├── .DS_Store ├── .gitignore ├── .vscode └── settings.json ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── deploy ├── .DS_Store ├── 1_optional-metrics-vpc.template.yaml ├── 2_metrics-analytics.template.yaml ├── 3_metrics.sql ├── 4_setup-s3-quicksight.py └── artifacts │ └── metrics_redshift_jsonpath.json ├── images ├── Architecture.png ├── DeployArchitecture_1.png ├── DeployArchitecture_2.png ├── DeployVPC.png ├── MetricGenerator.png ├── MetricsPerUser.png ├── Quicksight.png ├── QuicksightCalculatedField.png ├── RawMetrics.png ├── Redshift.png ├── SetupQuicksight.png ├── Sidebar.png ├── SystemUsage_1.png ├── SystemUsage_2.png ├── SystemUsage_3.png └── TopNFeature.png ├── metrics-generator └── application-metrics-generator.py └── metrics-java-lib ├── .classpath ├── .project ├── .settings ├── org.eclipse.core.resources.prefs ├── org.eclipse.jdt.apt.core.prefs ├── org.eclipse.jdt.core.prefs └── org.eclipse.m2e.core.prefs ├── pom.xml ├── src ├── main │ ├── java │ │ └── com │ │ │ └── amazonaws │ │ │ └── saas │ │ │ ├── metricsmanager │ │ │ ├── FirehosePublishService.java │ │ │ ├── MetricsPublisher.java │ │ │ ├── MetricsPublisherFactory.java │ │ │ ├── PropertiesUtil.java │ │ │ ├── builder │ │ │ │ ├── MetricBuilder.java │ │ │ │ ├── MetricEventBuilder.java │ │ │ │ └── TenantBuilder.java │ │ │ └── entities │ │ │ │ ├── CountMetric.java │ │ │ │ ├── ExecutionTimeMetric.java │ │ │ │ ├── Metric.java │ │ │ │ ├── MetricEvent.java │ │ │ │ ├── StorageMetric.java │ │ │ │ └── Tenant.java │ │ │ ├── sampleclient.java │ │ │ └── tokenmanager │ │ │ ├── JwtTokenManager.java │ │ │ └── TokenInterface.java │ └── resources │ │ ├── lib-config.properties │ │ └── logback.xml └── test │ └── java │ └── com │ └── amazonaws │ └── saas │ ├── metricsmanager │ ├── MetricsPublisherFactoryTest.java │ ├── MetricsPublisherTest.java │ └── PropertiesUtilTest.java │ └── tokenmanager │ └── JwtTokenServiceTest.java └── target └── classes └── logback.xml /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .idea 3 | *.iml 4 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "java.configuration.updateBuildConfiguration": "automatic" 3 | } -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *master* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | 61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 62 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Reference Implementation to track Usage & Consumption of a Multi-tenant, SaaS based Application using Metrics 2 | Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Capturing and Visualizing Metrics inside a Multi-Tenant SaaS Application 2 | 3 | ## Introduction 4 | In a Multi-tenant environment, metrics allows you to track, aggregate, and analyze tenant activity. Metrics are the lifeblood of a Software-as-a-Service (SaaS) application, driving both the business and technical aspects of SaaS. This repository contains code to setup a reference implementation of a metrics & analytics solution, which can be used to visualize the usage and consumption across various tenants, and tenant tiers, within your SaaS application. These metrics can then be used for various purposes, such as optimizing tenant experience and ultimately calculating cost per tenant. These metrics will also eventually have a direct impact on the pricing and tiering strategy of your SaaS application. 5 | 6 | ## Architecture 7 | Below diagram depicts the architecture of this solution. In this case, "Metric producer" is your SaaS application. It is the responsibility of your SaaS Application to produce tenant level metrics. Later in this document, we will also demonstrate a sample client to generate some sample metrics for us and a recommended JSON structure to compose the metric record, along with tenant information. Finally, we will also look at some of the graphs, created using Amazon QuickSight, to analyze these multi-tenant metrics. 8 | 9 | The architecture below allows the SaaS application to send metrics to a Kinesis Firehose delivery stream. The stream then ingests these metrics into an Amazon Redshift table. The table has been designed to capture tenant level information. Finally, QuickSight has been used to visualize these metrics at various levels. 10 | 11 |

Architecture Overview

12 | 13 | ## Prerequisites to deploy and run the solution 14 | You need an AWS Account in order to deploy the CloudFormation stack associated with this architecture. In order to test the stack and generate few sample metrics record, we have created a sample client using Python. You will need Python 3.7 or above installed on your machine, to run this client. Alternatively, you can use AWS Cloud9 to clone this repository and run this sample client from inside your Cloud9 terminal. 15 | 16 | ## Deployment Steps 17 | ### 1. Deploy the VPC and Subnets (Optional) 18 | You would need a VPC with 2 Public Subnets to deploy this solution. You can ignore this step, if you already have a VPC which meets these requirements AND want to use that VPC to deploy the infrastructure. Otherwise, deploy the CloudFormation stack, named "1_optional-metrics-vpc.template" residing inside the "deploy" folder of this repository. 19 | 20 | The easiest way to deploy would be to use the "CloudFormation" service inside the AWS Console. As shown below, you will be asked to provide the value for 2nd Octet. Provide that parameter and click on "Next". You can leave rest of the values as default in the following screens and then finally click on "Create Stack". 21 | 22 |

Deploy the VPC and Subnets

23 | 24 | 25 | ### 2. Deploy the Metrics & Analytics Architecture 26 | Use "2_metrics-analytics.template.yaml" file, inside the "deploy" folder, to deploy the architecture used for this solution. This template will deploy below infrastructure inside your AWS Account. 27 | 28 | * A Redshift delivery stream to accept the incoming metrics and ingest them into a Redshift Cluster, along with corresponding CloudWatch Log Group and Log Stream. 29 | * A Redshift cluster inside the VPC and spanned across 2 Public Subnets selected. You need Redshift to be deployed in public subnet in order to use it with Kinesis Firehose. 30 | * Security group for Redshift, which only allow ingress from Firehose and QuickSight IP Addresses. 31 | * An S3 bucket needed for Firehose to ingest data into Redshift. We have set a lifecycle policy on this S3 bucket, to expire the temporary/intermediate metrics data stored in them after 7 days. 32 | * Encryption Key inside KMS to encrypt data within Firehose and S3. 33 | 34 | Below screen shots captures the parameters used by the CloudFormation, while deploying this stack inside AWS console. 35 | 36 | Each parameter has a description related to its usage in the architecture. You will have to select the VPC and Subnets where you want to deploy the Redshift cluster. If you deployed the VPC using the Step 1, then this will be the VPC and Subnet from the previous step. You can choose to Encrypt data at rest, if you are concerned about security of the data. Most of the other parameters have been defaulted and can be modified, as per your needs. One parameter that needs to be provided is the Password for your Redshift Cluster. Make sure to note down the "Master user name" and "Master user password", as you would need this to access your Cluster. 37 | 38 |

Architecture

39 |

Architecture

40 | 41 | Click on Next, once you have provided all the parameters. You can leave rest of the values as default in the following screens and then finally click on "Create Stack". 42 | 43 | ### 3. Create the Metrics Table inside Redshift 44 | We will now deploy the Metrics table inside the newly created Redshift Cluster. The table definition is saved inside "3_metrics.sql" file, in the "deploy" folder. To deploy this table, navigate to the Amazon Redshift Console and select "EDITOR" menu from the side bar on the left. Then login into the newly created cluster, using the username and password provided while deploying the CloudFormation. You can then use the query editor to deploy the "metrics" table. 45 | 46 |

Metrics Table

47 | 48 | ### 4. Setup Amazon QuickSight 49 | We have used Amazon QuickSight to visualize the metrics data. In order to use QuickSight, you will first need to register for QuickSight inside your AWS Account. If you haven't done this before, follow the instructions provided using this link in order to proceed: https://docs.aws.amazon.com/QuickSight/latest/user/setup-QuickSight-for-existing-aws-account.html 50 | 51 | Make sure to allow QuickSight to access Redshift, while signing up. Alternatively, you can do that inside QuickSight using following steps: 52 | * Click on the top right corner where your username is displayed. 53 | * Click Manage QuickSight. 54 | * Click on Security and Permissions. 55 | * Under "QuickSight access to AWS services" section click "Add or remove" button. 56 | * Select "Amazon Redshift" from the list. 57 | * Click Update. 58 | 59 |

Setup QuickSight

60 | 61 | ### 5. Deploy S3 artifacts and create Redshift connection inside QuickSight 62 | Redshift uses a "JSONPaths" file to map JSON columns to the Table. Since we are sending JSON data to Firehose, we need this file, so that Firehose can use it to map JSON columns to "metrics" table. Finally, we also need to setup a Data Source and Data Set inside QuickSight, which can be used to connect to the Redshift cluster. We will use "4_setup-s3-QuickSight.py" file, inside the "deploy" folder, to perform these two actions. 63 | 64 | ``` 65 | python3 4_setup-s3-QuickSight.py 66 | ``` 67 |

Setup QuickSight

68 | 69 | You will be asked to provide 8 inputs, each with appropriate defaults. 70 | 71 | "Enter region associated with QuickSight account" - Defaulted to the current session. Use this to provide the region where you have deployed your CloudFormation stack and registered your QuickSight account.
72 | "Enter AWS Access Key associated with QuickSight account" - Defaulted to the current session. Use this to provide the Access Key to access the deployed CloudFormation stack and QuickSight account.
73 | "Enter AWS Secret Key associated with QuickSight account" - Defaulted to the current session. Use this to provide the Secret Key to access the deployed CloudFormation stack and QuickSight account.
74 | "QuickSight user name" - Defaulted to the QuickSight user created, while registering for QuickSight. You can leave to default value, unless you want another user to have access to the QuickSight Data source and Data set.
75 | "Enter 0 to provide Redshift connection details manually. Enter 1 to read the connection details from the deployed Cloudformation stack" - Leave it to 1, unless you want to specify all the Cluster details manually.
76 | "Cloudformation stack name for Metrics & Analytics" - Stack name for the deployed architecture. We will get the Outputs from this stack and use it to setup Redshift connection inside QuickSight.
77 | "Redshift user name for accessing metrics data" - Username for the Redshift Cluster.
78 | "Redshift password for accessing metrics data" - Password for the Redshift Cluster.
79 | 80 | This completes the deployment of the Architecture and all the associated components. 81 | 82 | ## Generating sample metrics 83 | Before we generate some sample metrics, it is important to understand the structure of the multi-tenant JSON. This JSON structure directly correlates to the table that we created above inside Redshift. Below is an example JSON . 84 | ```json 85 | { 86 | "type": "Application", 87 | "workload": "PhotoApplication", 88 | "context": "PhotoUploadService", 89 | "tenant": { 90 | "id": "tenant-id-1", 91 | "name": "tenant-name-a", 92 | "tier": "standard" 93 | }, 94 | "metric": { 95 | "name": "DataTransfer", 96 | "unit": "MB", 97 | "value": 810 98 | }, 99 | "timestamp": "1593734382", 100 | "metadata": { 101 | "user": "user-4", 102 | "resource": "load-balancer” 103 | } 104 | 105 | } 106 | ``` 107 | Here is the description for each element of this JSON. 108 | - "type": This is defaulted to "Application" in our case. But, in case you want to extend the usage of this architecture to ingest system level metrics or business KPIs, then you can choose to add "System" or "KPI" as a type, as well. 109 | - "workload": A workload is a collection of resources and code that delivers business value, such as a customer-facing application or a backend process. This could be your SaaS application. In some case, you might have multiple SaaS Applications, being delivered to same set of tenants. Adding this attribute allows us to aggregate metrics from multiple applications into a single place. 110 | - "context": This is to capture a feature inside your SaaS application. You might allow tenants to sign-up for multiple features based upon their tiers and want to capture the usage across these features. 111 | - "tenant": This is basically your tenant information. We have de-normalized this entity a bit and passing the tenant_id, tenant_name and tenant_tier as part of the JSON. 112 | - "metric": This is your actual metric information. Above, we have shown "DataTransfer" as an example. Our sample metric generator have few more examples, like "Storage" and "ExecutionTime". It is up to the product owners and architects of the SaaS application to decide, what relevant metrics they want to capture to understand the tenant consumption patterns. 113 | - "timestamp" - This denotes the time when the metric was recorded. 114 | - "metadata" - This could be used to send any other key-value pair which can provide useful information about the tenant and/or metric. In above example, we are providing the actual user information and the resource for which this metric is relevant. But the implementation of this attribute can vary significantly, based upon the use case. 115 | 116 | Let's now use our sample metrics generator to generate some sample metrics. 117 | 118 | ``` 119 | cd metrics-generator 120 | python3 application-metrics-generator.py 121 | ``` 122 |

Setup QuickSight

123 | 124 | You will be asked to provide 8 inputs, each with appropriate defaults. 125 | 126 | "How many metrics?" - Number of sample metrics to be generated.
127 | "Enter start date for metrics?" - Start date of the first metric record.
128 | "Number of days?" - For how many days you want the metrics to be spread out, starting from "Start Date".
129 | "Batch size for Kinesis?" - The sample metrics generator send records to Kinesis in batches to improve performance. This parameter controls the batch size.
130 | "Enter region for the deployed metrics stack " - Defaulted to the current session. Use this to provide the region where you have deployed your CloudFormation stack.
131 | "Enter AWS Access Key for the deployed metrics stack" - Defaulted to the current session. Use this to provide the Access Key to access the deployed CloudFormation stack.
132 | "Enter AWS Secret Key for the deployed metrics stack" - Defaulted to the current session. Use this to provide the Secret Key to access the deployed CloudFormation stack.
133 | "Enter Kinesis stream name" - This will default to the deployed Kinesis Delivery Stream. You can change if needed.
134 | 135 | This will now push the metrics records to Kinesis Firehose, which will then ingest it to Redshift. Depending upon the Buffer interval and Buffer size you provided, while deploying the stack, it can take anywhere between 60 to 900 seconds (1 to 15 mins) to show up the records in Redshift. This is because Kinesis keeps those records in its memory and only send to Redshift, once it hits the Buffer interval or Buffer size. 136 | 137 | You can now start exploring the metric data inside Redshift, using the Query Editor. 138 | 139 | ## Example Java Client Implementation to send metrics from your SaaS Application 140 | Your goal should be to use a standard way to send metric data, inside your SaaS Application. To demonstrate this, this code repository comes with a sample Java client implementation to ingest data into Kinesis Firehose. You can refer to this Java Client, as a reference implementation, to understand how your SaaS application can potentially send metrics data to the deployed architecture. 141 | 142 | This library uses maven. For building the artifacts use the following commands. 143 | ``` 144 | cd metrics-java-lib 145 | mvn clean package 146 | ``` 147 | 148 | "sampleclient.java" inside "metrics-java-lib\src\main\java\com\amazonaws\saas" folder provides an example of sending metrics to the Kinesis Firehose stream created by the CloudFormation. 149 | 150 | The basic assumption here is that your application has authenticated against an identity provider and in return has got a JWT token. This JWT token now has tenant context in the payload. The sample client uses a "MetricsPublisherFactory" inside the "metricsmanger" folder to get a reference of the MetricsPublisher class. You can either use "getPublisher" or "getBatchPublisher" method to do that. The only difference between two methods is that the getBatchPublisher buffers the metrics in memory, till the batchSize is achieved, before pushing to Firehose. On the other hand getPublisher will push data to Firehose immediately. 151 | 152 | The next step is now just to push the metric event using "publishMetricEvent" method. This method takes the actual Metric, JWT token (in order to extract tenant context) and a HashMap (one or more key value pairs). The HashMap can be used to send any additional information that doesn't belong to either Metric or Tenant. This could be things like user information, as an example. "publisMetricEvent" extracts the tenant information from the JWT token and publishes to Firehose along with actual metric and metadata (key value pair). 153 | 154 | Here is the sample code from "sampleclient.java" that does it 155 | ``` 156 | metricPublisher.publishMetricEvent(new ExecutionTimeMetric(100L), jwtToken, new HashMap<>()); 157 | ``` 158 | 159 | You can use the below command to run the sample client and send data to the your metrics infrastructure you deployed earlier. Make sure to change the "kinesis.stream.name" property inside "metrics-java-lib\src\main\resources" folder to the actual Kinesis Firehose stream name that was deployed as part of the CloudFormation. 160 | ``` 161 | java -jar target/metrics-java-lib-1.2.0-jar-with-dependencies.jar 162 | ``` 163 | 164 | ## Sample Multi-Tenant Dashboard 165 | Till now we have deployed the architecture, setup QuickSight and sent some sample data to the deployed stack. Now comes the most important part of visualizing the data, using QuickSight. This is what the business and technical owners of your SaaS application will use to see tenant level trends and make some of the important decisions. 166 | 167 | As of now, QuickSight doesn't allow you to create graphs using APIs. Because of this limitation, this Architecture doesn't come with any pre-built/canned graphs to visualize some of those tenant level metrics. But below are some of the screen shots of few sample graphs that you can create using the Data source and Data set created, as part of the deployment above. 168 | 169 | In this case, we have created a QuickSight Dashboard with four tabs, namely "System Usage", "Top N Features", "Metrics Per User" and "Raw Metrics" 170 | 171 | #### System Usage 172 | This is by far the most important Tab and presents lots of useful information about your SaaS application's usage and consumption. Assume you have a SaaS Application(s), comprising of more than one workload, which provides Data Storage services to your tenants. Let's take below few use cases, as an example, to demonstrate how you can visualize your metrics data in this scenario. 173 | 174 | 1) You might want to know how much Storage is being used by across your SaaS Applications/Workloads. 175 | 2) You might want to know how much Storage is being used by each feature with-in each SaaS Application/Workload. 176 | 3) Your tenants are categorized into tiers (free, basic, standard, premier) and you want to know how much storage is consumed across these tiers. 177 | 4) You want to know the consumption at tenant level to see your busy tenants. 178 | 5) You want to see overall Storage by Date. 179 | 6) You want to see overall Storage by Date and Tier. 180 | 181 | Above are few examples and the way metrics will be visualized depends on the use case of the SaaS Application. You can also use the consumption per tenant metrics to apportion the overall system usage by tenant. If your pricing model depends on the actual consumption, this is a useful metrics used to bill your tenant. 182 | 183 | Here are some of the screenshots demonstrating these use cases. 184 | 185 |

186 | 187 |

188 | 189 |

190 | 191 | #### Top N Features 192 | This tab is used for visualizing your top used features by tenants and tenant tiers. 193 | 194 |

195 | 196 | #### Metrics Per User 197 | In some cases, you might want to know which user is consuming most within a tenant. Below screenshot uses the "user" attribute inside "meta_data" attribute of the JSON. Here we have created a calculated field inside QuickSight Dataset, something like this: 198 | 199 |

200 | 201 | Below is the Metrics per user graph. 202 |

203 | 204 | #### Raw Metrics 205 | Below screenshot, shows the Raw view of Metrics in a table format. 206 | 207 |

208 | 209 | ## Steps to Clean-up 210 | Use the below steps to perform clean-up in your account: 211 | 1. Delete the CloudFormation stacks used to create the VPC and subnets and then the CloudFormation stack for the Metrics & Analytics architecture, using the CloudFormation console. 212 | 2. Delete the S3 Bucket created to store the intermediate Redshift files and JSONPath file, using S3 Console. 213 | 3. If you want to close your QuickSight account, you can do so by following instructions in this link: https://docs.aws.amazon.com/QuickSight/latest/user/closing-account.html 214 | 4. But if want to keep the QuickSight account, they you can just delete the corresponding Dashboard, Data source and Data set created to visualize the metrics data.
215 | a) https://docs.aws.amazon.com/QuickSight/latest/user/deleting-a-dashboard.html
216 | b) https://docs.aws.amazon.com/QuickSight/latest/user/delete-a-data-set.html
217 | c) https://docs.aws.amazon.com/QuickSight/latest/user/delete-a-data-source.html 218 | 219 | 220 | ## Security 221 | 222 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 223 | 224 | ## License 225 | 226 | This library is licensed under the MIT-0 License. See the LICENSE file. 227 | 228 | -------------------------------------------------------------------------------- /deploy/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/deploy/.DS_Store -------------------------------------------------------------------------------- /deploy/1_optional-metrics-vpc.template.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | AWSTemplateFormatVersion: '2010-09-09' 5 | 6 | Description: 'Cloudformation Template to create a VPC with 2 public subnets in 2 AZs needed for Metrics & Analytics Solution.' 7 | 8 | Metadata: 9 | 'AWS::CloudFormation::Interface': 10 | ParameterGroups: 11 | - Label: 12 | default: 'VPC Parameters' 13 | Parameters: 14 | - ClassB 15 | ParameterLabels: 16 | ClassB: 17 | default: ClassB 2nd Octet 18 | 19 | Parameters: 20 | ClassB: 21 | Description: 'Specify the 2nd Octet of IPv4 CIDR block for the VPC (10.XXX.0.0/16) in the range [0-255]' 22 | Type: Number 23 | Default: 0 24 | ConstraintDescription: 'Must be in the range [0-255]' 25 | MinValue: 0 26 | MaxValue: 255 27 | 28 | Resources: 29 | VPC: 30 | Type: 'AWS::EC2::VPC' 31 | Properties: 32 | CidrBlock: !Sub '10.${ClassB}.0.0/16' 33 | EnableDnsSupport: true 34 | EnableDnsHostnames: true 35 | InstanceTenancy: default 36 | Tags: 37 | - Key: Name 38 | Value: !Sub '${AWS::StackName}-VPC' 39 | 40 | InternetGateway: 41 | Type: 'AWS::EC2::InternetGateway' 42 | Properties: 43 | Tags: 44 | - Key: Name 45 | Value: !Sub '10.${ClassB}.0.0/16' 46 | 47 | VPCGatewayAttachment: 48 | Type: 'AWS::EC2::VPCGatewayAttachment' 49 | Properties: 50 | VpcId: !Ref VPC 51 | InternetGatewayId: !Ref InternetGateway 52 | 53 | NATEIPA: 54 | DependsOn: VPCGatewayAttachment 55 | Type: AWS::EC2::EIP 56 | Properties: 57 | Domain: vpc 58 | 59 | NATEIPB: 60 | DependsOn: VPCGatewayAttachment 61 | Type: AWS::EC2::EIP 62 | Properties: 63 | Domain: vpc 64 | 65 | SubnetAPublic: 66 | Type: 'AWS::EC2::Subnet' 67 | Properties: 68 | AvailabilityZone: !Select [0, !GetAZs ''] 69 | CidrBlock: !Sub '10.${ClassB}.0.0/20' 70 | MapPublicIpOnLaunch: true 71 | VpcId: !Ref VPC 72 | Tags: 73 | - Key: Name 74 | Value: !Join 75 | - '_' 76 | - - 'Metrics-Analytics' 77 | - !Select [0, !GetAZs ''] 78 | - 'Public' 79 | - Key: Reach 80 | Value: public 81 | 82 | SubnetBPublic: 83 | Type: 'AWS::EC2::Subnet' 84 | Properties: 85 | AvailabilityZone: !Select [1, !GetAZs ''] 86 | CidrBlock: !Sub '10.${ClassB}.16.0/20' 87 | MapPublicIpOnLaunch: true 88 | VpcId: !Ref VPC 89 | Tags: 90 | - Key: Name 91 | Value: !Join 92 | - '_' 93 | - - 'Metrics-Analytics' 94 | - !Select [1, !GetAZs ''] 95 | - 'Public' 96 | - Key: Reach 97 | Value: public 98 | 99 | RouteTablePublic: 100 | Type: 'AWS::EC2::RouteTable' 101 | Properties: 102 | VpcId: !Ref VPC 103 | Tags: 104 | - Key: Name 105 | Value: !Join 106 | - '_' 107 | - - !Sub '10.${ClassB}.0.0/16' 108 | - 'Public' 109 | 110 | RouteTableAssociationAPublic: 111 | Type: 'AWS::EC2::SubnetRouteTableAssociation' 112 | Properties: 113 | SubnetId: !Ref SubnetAPublic 114 | RouteTableId: !Ref RouteTablePublic 115 | 116 | RouteTableAssociationBPublic: 117 | Type: 'AWS::EC2::SubnetRouteTableAssociation' 118 | Properties: 119 | SubnetId: !Ref SubnetBPublic 120 | RouteTableId: !Ref RouteTablePublic 121 | 122 | RouteTablePublicInternetRoute: 123 | Type: 'AWS::EC2::Route' 124 | DependsOn: VPCGatewayAttachment 125 | Properties: 126 | RouteTableId: !Ref RouteTablePublic 127 | DestinationCidrBlock: '0.0.0.0/0' 128 | GatewayId: !Ref InternetGateway 129 | 130 | NetworkAclPublic: 131 | Type: 'AWS::EC2::NetworkAcl' 132 | Properties: 133 | VpcId: !Ref VPC 134 | Tags: 135 | - Key: Name 136 | Value: !Join 137 | - '_' 138 | - - !Sub '10.${ClassB}.0.0/16' 139 | - 'NACL' 140 | - 'Public' 141 | 142 | SubnetNetworkAclAssociationAPublic: 143 | Type: 'AWS::EC2::SubnetNetworkAclAssociation' 144 | Properties: 145 | SubnetId: !Ref SubnetAPublic 146 | NetworkAclId: !Ref NetworkAclPublic 147 | 148 | SubnetNetworkAclAssociationBPublic: 149 | Type: 'AWS::EC2::SubnetNetworkAclAssociation' 150 | Properties: 151 | SubnetId: !Ref SubnetBPublic 152 | NetworkAclId: !Ref NetworkAclPublic 153 | 154 | NetworkAclEntryInPublicAllowAll: 155 | Type: 'AWS::EC2::NetworkAclEntry' 156 | Properties: 157 | NetworkAclId: !Ref NetworkAclPublic 158 | RuleNumber: 99 159 | Protocol: -1 160 | RuleAction: allow 161 | Egress: false 162 | CidrBlock: '0.0.0.0/0' 163 | 164 | NetworkAclEntryOutPublicAllowAll: 165 | Type: 'AWS::EC2::NetworkAclEntry' 166 | Properties: 167 | NetworkAclId: !Ref NetworkAclPublic 168 | RuleNumber: 99 169 | Protocol: -1 170 | RuleAction: allow 171 | Egress: true 172 | CidrBlock: '0.0.0.0/0' 173 | 174 | 175 | Outputs: 176 | TemplateID: 177 | Description: 'Template ID' 178 | Value: 'VPC-3AZs' 179 | 180 | StackName: 181 | Description: 'Stack name' 182 | Value: !Sub '${AWS::StackName}' 183 | 184 | VPC: 185 | Description: 'VPC' 186 | Value: !Ref VPC 187 | Export: 188 | Name: !Sub '${AWS::StackName}-VPC' 189 | 190 | ClassB: 191 | Description: 'Class B' 192 | Value: !Ref ClassB 193 | Export: 194 | Name: !Sub '${AWS::StackName}-ClassB' 195 | 196 | CidrBlock: 197 | Description: 'The set of IP addresses for the VPC' 198 | Value: !GetAtt 'VPC.CidrBlock' 199 | Export: 200 | Name: !Sub '${AWS::StackName}-CidrBlock' 201 | 202 | AZs: 203 | Description: 'AZs' 204 | Value: 2 205 | Export: 206 | Name: !Sub '${AWS::StackName}-AZs' 207 | 208 | AZA: 209 | Description: 'AZ of A' 210 | Value: !Select [0, !GetAZs ''] 211 | Export: 212 | Name: !Sub '${AWS::StackName}-AZA' 213 | 214 | AZB: 215 | Description: 'AZ of B' 216 | Value: !Select [1, !GetAZs ''] 217 | Export: 218 | Name: !Sub '${AWS::StackName}-AZB' 219 | 220 | SubnetsPublic: 221 | Description: 'Subnets public' 222 | Value: !Join [',', [!Ref SubnetAPublic, !Ref SubnetBPublic]] 223 | Export: 224 | Name: !Sub '${AWS::StackName}-SubnetsPublic' 225 | 226 | RouteTablesPublic: 227 | Description: 'Route tables public' 228 | Value: !Ref RouteTablePublic 229 | Export: 230 | Name: !Sub '${AWS::StackName}-RouteTablePublic' 231 | 232 | SubnetAPublic: 233 | Description: 'Subnet A public' 234 | Value: !Ref SubnetAPublic 235 | Export: 236 | Name: !Sub '${AWS::StackName}-SubnetAPublic' 237 | 238 | SubnetBPublic: 239 | Description: 'Subnet B public' 240 | Value: !Ref SubnetBPublic 241 | Export: 242 | Name: !Sub '${AWS::StackName}-SubnetBPublic' 243 | -------------------------------------------------------------------------------- /deploy/2_metrics-analytics.template.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | AWSTemplateFormatVersion: '2010-09-09' 5 | Description: >- 6 | Template deploys a Metrics & Analytics Solution that Integrates Amazon Kinesis Firehose, S3 and Amazon Redshift in an existing VPC. 7 | Metadata: 8 | AWS::CloudFormation::Interface: 9 | ParameterGroups: 10 | - Label: 11 | default: Network configuration 12 | Parameters: 13 | - VPC 14 | - Label: 15 | default: Subnet configuration 16 | Parameters: 17 | - PublicSubnetA 18 | - PublicSubnetB 19 | - Label: 20 | default: Encrypt data configuration 21 | Parameters: 22 | - EncryptData 23 | - Label: 24 | default: Amazon Redshift cluster configuration 25 | Parameters: 26 | - DatabaseName 27 | - ClusterType 28 | - NumberOfNodes 29 | - NodeType 30 | - RedshiftPortNumber 31 | - Label: 32 | default: Amazon Redshift configuration for Amazon Kinesis Data Firehose 33 | Parameters: 34 | - MasterUser 35 | - MasterUserPassword 36 | - RedshiftTableName 37 | - RedshiftBufferInterval 38 | - RedshiftBufferSize 39 | 40 | ParameterLabels: 41 | VPC: 42 | default: Existing VPC ID 43 | PublicSubnetA: 44 | default: Existing public subnet ID in AZ-1 45 | PublicSubnetB: 46 | default: Existing public subnet ID in AZ-2 47 | DatabaseName: 48 | default: Database name 49 | ClusterType: 50 | default: Cluster type 51 | NumberOfNodes: 52 | default: Number of nodes 53 | NodeType: 54 | default: Node type 55 | EncryptData: 56 | default: Encrypt data at rest 57 | MasterUser: 58 | default: Master user name 59 | MasterUserPassword: 60 | default: Master user password 61 | RedshiftTableName: 62 | default: Table name 63 | RedshiftBufferInterval: 64 | default: Buffer interval 65 | RedshiftBufferSize: 66 | default: Buffer size 67 | RedshiftPortNumber: 68 | default: Redshift port number 69 | 70 | Parameters: 71 | VPC: 72 | Type: AWS::EC2::VPC::Id 73 | Description: Choose an existing VPC 74 | PublicSubnetA: 75 | Type: AWS::EC2::Subnet::Id 76 | Description: The public subnet in Availability Zone 1 77 | PublicSubnetB: 78 | Type: AWS::EC2::Subnet::Id 79 | Description: The public subnet in Availability Zone 2 80 | EncryptData: 81 | Description: Set to yes to encrypt the data as it leaves your Amazon Kinesis Data Firehose delivery stream. 82 | Type: String 83 | AllowedValues: 84 | - 'yes' 85 | - 'no' 86 | Default: 'no' 87 | DatabaseName: 88 | Description: The name of the first database to be created when the Amazon Redshift cluster is created. 89 | Type: String 90 | Default: 'metricsdb' 91 | AllowedPattern: "([a-z]|[0-9])+" 92 | ClusterType: 93 | Description: The type of Amazon Redshift cluster. 94 | Type: String 95 | Default: multi-node 96 | AllowedValues: 97 | - single-node 98 | - multi-node 99 | NumberOfNodes: 100 | Description: The number of compute nodes in the Amazon Redshift cluster. For multi-node clusters, the NumberOfNodes parameter must be greater than 1. 101 | Type: Number 102 | Default: '2' 103 | NodeType: 104 | Description: The type of Amazon Redshift node to be provisioned. 105 | Type: String 106 | Default: dc2.large 107 | AllowedValues: 108 | - dc2.large 109 | - dc2.8xlarge 110 | - ra3.4xlarge 111 | - ra3.16xlarge 112 | MasterUserPassword: 113 | Description: The master user password for the Amazon Redshift cluster. Must contain one number and no special characters. 114 | NoEcho: 'true' 115 | Type: String 116 | MasterUser: 117 | Description: The name of the master user of the Amazon Redshift cluster. 118 | Type: String 119 | Default: metricsadmin 120 | RedshiftBufferInterval: 121 | Description: The number of seconds to buffer data before delivering to Amazon Redshift (60 to 900). 122 | Type: Number 123 | Default: 300 124 | MinValue: 60 125 | MaxValue: 900 126 | RedshiftBufferSize: 127 | Description: MB of data to buffer before delivering to Amazon Redshift (1 to 128). 128 | Type: Number 129 | Default: 5 130 | MinValue: 1 131 | MaxValue: 128 132 | RedshiftTableName: 133 | Description: The name of the table in the Amazon Redshift cluster. 134 | Type: String 135 | Default: Metrics 136 | RedshiftPortNumber: 137 | Description: The Amazon Redshift publicly accessible port number. 138 | Type: String 139 | Default: '8200' 140 | Mappings: 141 | RegionMap: 142 | us-east-1: 143 | RedshiftInboundTraffic: 52.70.63.192/27 144 | us-east-2: 145 | RedshiftInboundTraffic: 13.58.135.96/27 146 | us-west-2: 147 | RedshiftInboundTraffic: 52.89.255.224/27 148 | us-west-1: 149 | RedshiftInboundTraffic: 13.57.135.192/27 150 | eu-central-1: 151 | RedshiftInboundTraffic: 35.158.127.160/27 152 | ca-central-1: 153 | RedshiftInboundTraffic: 35.183.92.128/27 154 | eu-west-1: 155 | RedshiftInboundTraffic: 52.19.239.192/27 156 | eu-west-2: 157 | RedshiftInboundTraffic: 18.130.1.96/27 158 | eu-west-3: 159 | RedshiftInboundTraffic: 35.180.1.96/27 160 | ap-southeast-1: 161 | RedshiftInboundTraffic: 13.228.64.192/27 162 | ap-southeast-2: 163 | RedshiftInboundTraffic: 13.210.67.224/27 164 | ap-northeast-1: 165 | RedshiftInboundTraffic: 13.113.196.224/27 166 | ap-northeast-2: 167 | RedshiftInboundTraffic: 13.209.1.64/27 168 | ap-south-1: 169 | RedshiftInboundTraffic: 13.232.67.32/27 170 | sa-east-1: 171 | RedshiftInboundTraffic: 18.228.1.128/27 172 | AWSQuickSightIPMap: 173 | ap-northeast-1: 174 | QuickSightIP: 13.113.244.32/27 175 | ap-southeast-1: 176 | QuickSightIP: 13.229.254.0/27 177 | ap-southeast-2: 178 | QuickSightIP: 54.153.249.96/27 179 | eu-central-1: 180 | QuickSightIP: 35.158.127.192/27 181 | eu-west-1: 182 | QuickSightIP: 52.210.255.224/27 183 | eu-west-2: 184 | QuickSightIP: 35.177.218.0/27 185 | us-east-1: 186 | QuickSightIP: 52.23.63.224/27 187 | us-east-2: 188 | QuickSightIP: 52.15.247.160/27 189 | us-west-2: 190 | QuickSightIP: 54.70.204.128/27 191 | us-west-1: 192 | QuickSightIP: none 193 | ca-central-1: 194 | QuickSightIP: none 195 | eu-west-3: 196 | QuickSightIP: none 197 | eu-north-1: 198 | QuickSightIP: none 199 | ap-east-1: 200 | QuickSightIP: none 201 | ap-northeast-2: 202 | QuickSightIP: none 203 | ap-northeast-3: 204 | QuickSightIP: none 205 | ap-south-1: 206 | QuickSightIP: none 207 | me-south-1: 208 | QuickSightIP: none 209 | sa-east-1: 210 | QuickSightIP: none 211 | 212 | Conditions: 213 | IsMultiNodeCluster: 214 | Fn::Equals: 215 | - Ref: ClusterType 216 | - multi-node 217 | EncryptData: !Equals 218 | - !Ref 'EncryptData' 219 | - 'yes' 220 | NoEncryption: !Equals 221 | - !Ref 'EncryptData' 222 | - 'no' 223 | isQuickSightRegionIP: 224 | !Not [!Equals [!FindInMap [ AWSQuickSightIPMap, !Ref "AWS::Region", QuickSightIP ], "none"]] 225 | 226 | Resources: 227 | EncryptionKey: 228 | Type: AWS::KMS::Key 229 | Condition: EncryptData 230 | Properties: 231 | Description: KMS key generated to encrypt Kinesis data. 232 | EnableKeyRotation: true 233 | KeyPolicy: 234 | Id: KMS key policy 235 | Version: '2012-10-17' 236 | Statement: 237 | - Sid: Enable IAM User Permissions 238 | Effect: Allow 239 | Principal: 240 | AWS: 241 | - !Join 242 | - '' 243 | - - 'arn:aws:iam::' 244 | - !Ref 'AWS::AccountId' 245 | - :root 246 | Action: kms:* 247 | Resource: '*' 248 | - Sid: Allow access for Key Administrators 249 | Effect: Allow 250 | Principal: 251 | AWS: 252 | - !Join 253 | - '' 254 | - - 'arn:aws:iam::' 255 | - !Ref 'AWS::AccountId' 256 | - :root 257 | Action: 258 | - kms:Create* 259 | - kms:Describe* 260 | - kms:Enable* 261 | - kms:List* 262 | - kms:Put* 263 | - kms:Update* 264 | - kms:Revoke* 265 | - kms:Disable* 266 | - kms:Get* 267 | - kms:Delete* 268 | - kms:ScheduleKeyDeletion 269 | - kms:CancelKeyDeletion 270 | Resource: '*' 271 | - Sid: Allow use of the key 272 | Effect: Allow 273 | Principal: 274 | AWS: 275 | - !Join 276 | - '' 277 | - - 'arn:aws:iam::' 278 | - !Ref 'AWS::AccountId' 279 | - :root 280 | Action: 281 | - kms:Encrypt 282 | - kms:Decrypt 283 | - kms:ReEncrypt* 284 | - kms:GenerateDataKey* 285 | - kms:DescribeKey 286 | Resource: '*' 287 | - Sid: Allow attachment of persistent resources 288 | Effect: Allow 289 | Principal: 290 | AWS: 291 | - !Join 292 | - '' 293 | - - 'arn:aws:iam::' 294 | - !Ref 'AWS::AccountId' 295 | - :root 296 | Action: 297 | - kms:CreateGrant 298 | - kms:ListGrants 299 | - kms:RevokeGrant 300 | Resource: '*' 301 | Condition: 302 | Bool: 303 | kms:GrantIsForAWSResource: true 304 | KMSAlias: 305 | Type: AWS::KMS::Alias 306 | Condition: EncryptData 307 | Properties: 308 | AliasName: !Join 309 | - '' 310 | - - alias/key- 311 | - !Ref 'AWS::StackName' 312 | TargetKeyId: !GetAtt 'EncryptionKey.Arn' 313 | MetricsBucket: 314 | Type: AWS::S3::Bucket 315 | DeletionPolicy: Retain 316 | Properties: 317 | LifecycleConfiguration: 318 | Rules: 319 | - Id: DeleteRedshiftDelivery 320 | Prefix: 'RedshiftDelivery' 321 | Status: 'Enabled' 322 | ExpirationInDays: 7 323 | RedshiftCluster: 324 | Type: AWS::Redshift::Cluster 325 | DependsOn: RedshiftClusterRole 326 | Properties: 327 | ClusterType: !Ref ClusterType 328 | NumberOfNodes: 329 | Fn::If: 330 | - IsMultiNodeCluster 331 | - Ref: NumberOfNodes 332 | - Ref: AWS::NoValue 333 | NodeType: 334 | Ref: NodeType 335 | DBName: 336 | Ref: DatabaseName 337 | IamRoles: 338 | - !GetAtt RedshiftClusterRole.Arn 339 | MasterUsername: 340 | Ref: MasterUser 341 | MasterUserPassword: 342 | Ref: MasterUserPassword 343 | ClusterParameterGroupName: 344 | Ref: RedshiftClusterParameterGroup 345 | VpcSecurityGroupIds: 346 | - Ref: RSDefaultSG 347 | ClusterSubnetGroupName: 348 | Ref: RedshiftClusterSubnetGroup 349 | PubliclyAccessible: true 350 | Port: 351 | Ref: RedshiftPortNumber 352 | RedshiftClusterParameterGroup: 353 | Type: AWS::Redshift::ClusterParameterGroup 354 | Properties: 355 | Description: Cluster parameter group 356 | ParameterGroupFamily: redshift-1.0 357 | Parameters: 358 | - ParameterName: enable_user_activity_logging 359 | ParameterValue: 'true' 360 | RedshiftClusterSubnetGroup: 361 | Type: AWS::Redshift::ClusterSubnetGroup 362 | Properties: 363 | Description: Cluster subnet group 364 | SubnetIds: 365 | - Ref: PublicSubnetA 366 | - Ref: PublicSubnetB 367 | RSDefaultSG: 368 | Type: AWS::EC2::SecurityGroup 369 | Properties: 370 | GroupDescription: RSSecurity group 371 | SecurityGroupIngress: 372 | - CidrIp: !FindInMap 373 | - RegionMap 374 | - !Ref 'AWS::Region' 375 | - RedshiftInboundTraffic 376 | FromPort: 377 | Ref: RedshiftPortNumber 378 | ToPort: 379 | Ref: RedshiftPortNumber 380 | IpProtocol: tcp 381 | Description: Kinesis Data Firehose CIDR block 382 | VpcId: !Ref VPC 383 | QSingressRule: 384 | Type: AWS::EC2::SecurityGroupIngress 385 | Condition: isQuickSightRegionIP 386 | Properties: 387 | CidrIp: !FindInMap 388 | - AWSQuickSightIPMap 389 | - !Ref 'AWS::Region' 390 | - QuickSightIP 391 | Description: Amazon QuickSight access 392 | FromPort: 393 | Ref: RedshiftPortNumber 394 | ToPort: 395 | Ref: RedshiftPortNumber 396 | IpProtocol: tcp 397 | GroupId: !GetAtt RSDefaultSG.GroupId 398 | RedshiftClusterRole: 399 | Type: AWS::IAM::Role 400 | Properties: 401 | AssumeRolePolicyDocument: 402 | Version: '2012-10-17' 403 | Statement: 404 | - Effect: Allow 405 | Principal: 406 | Service: 407 | - redshift.amazonaws.com 408 | Action: 409 | - sts:AssumeRole 410 | Condition: 411 | StringEquals: 412 | sts:ExternalId: !Ref 'AWS::AccountId' 413 | Policies: 414 | - PolicyName: !Join 415 | - '' 416 | - - Redshift-Delivery- 417 | - !Ref 'AWS::StackName' 418 | PolicyDocument: 419 | Version: '2012-10-17' 420 | Statement: 421 | - Sid: '' 422 | Effect: Allow 423 | Action: 424 | - s3:GetBucketLocation 425 | - s3:GetObject 426 | - s3:ListBucket 427 | - s3:ListBucketMultipartUploads 428 | - s3:GetBucketAcl 429 | - s3:ListAllMyBuckets 430 | Resource: 431 | - !Join 432 | - '' 433 | - - 'arn:aws:s3:::' 434 | - !Ref 'MetricsBucket' 435 | - !Join 436 | - '' 437 | - - 'arn:aws:s3:::' 438 | - !Ref 'MetricsBucket' 439 | - /* 440 | FirehoseDeliveryRole: 441 | Type: AWS::IAM::Role 442 | Properties: 443 | AssumeRolePolicyDocument: 444 | Version: '2012-10-17' 445 | Statement: 446 | - Effect: Allow 447 | Principal: 448 | Service: 449 | - firehose.amazonaws.com 450 | Action: 451 | - sts:AssumeRole 452 | Condition: 453 | StringEquals: 454 | sts:ExternalId: !Ref 'AWS::AccountId' 455 | Policies: 456 | - PolicyName: !Join 457 | - '' 458 | - - Redshift-Delivery- 459 | - !Ref 'AWS::StackName' 460 | PolicyDocument: 461 | Version: '2012-10-17' 462 | Statement: 463 | - Sid: '' 464 | Effect: Allow 465 | Action: 466 | - s3:AbortMultipartUpload 467 | - s3:GetBucketLocation 468 | - s3:GetObject 469 | - s3:ListBucket 470 | - s3:ListBucketMultipartUploads 471 | - s3:PutObject 472 | - s3:GetBucketAcl 473 | - s3:ListAllMyBuckets 474 | Resource: 475 | - !Join 476 | - '' 477 | - - 'arn:aws:s3:::' 478 | - !Ref 'MetricsBucket' 479 | - !Join 480 | - '' 481 | - - 'arn:aws:s3:::' 482 | - !Ref 'MetricsBucket' 483 | - /* 484 | - Sid: '' 485 | Effect: Allow 486 | Action: 487 | - kms:Decrypt 488 | - kms:GenerateDataKey 489 | Resource: 490 | - !If 491 | - NoEncryption 492 | - !Join 493 | - '' 494 | - - 'arn:aws:kms:' 495 | - !Ref 'AWS::Region' 496 | - ':' 497 | - !Ref 'AWS::AccountId' 498 | - :key/placeholder-kms-id 499 | - !GetAtt 'EncryptionKey.Arn' 500 | Condition: 501 | StringEquals: 502 | kms:ViaService: !Join 503 | - '' 504 | - - s3. 505 | - !Ref 'AWS::Region' 506 | - .amazonaws.com 507 | StringLike: 508 | kms:EncryptionContext:aws:s3:arn: !Join 509 | - '' 510 | - - 'arn:aws:s3:::' 511 | - !Ref 'MetricsBucket' 512 | - /RedshiftDelivery/* 513 | - Sid: '' 514 | Effect: Allow 515 | Action: 516 | - logs:PutLogEvents 517 | Resource: 518 | - '*' 519 | - Sid: '' 520 | Effect: Allow 521 | Action: 522 | - kinesis:Get* 523 | - kinesis:Describe* 524 | - kinesis:ListShards 525 | - lambda:InvokeFunction 526 | - lambda:GetFunctionConfiguration 527 | - glue:Get* 528 | Resource: 529 | - '*' 530 | MetricsStream: 531 | Type: AWS::KinesisFirehose::DeliveryStream 532 | DependsOn: FirehoseDeliveryRole 533 | Properties: 534 | RedshiftDestinationConfiguration: 535 | ClusterJDBCURL: !Sub "jdbc:redshift://${RedshiftCluster.Endpoint.Address}:${RedshiftCluster.Endpoint.Port}/${DatabaseName}" 536 | CopyCommand: 537 | CopyOptions: !Join 538 | - '' 539 | - - "gzip compupdate off STATUPDATE ON TIMEFORMAT 'epochsecs' " 540 | - "json 's3://" 541 | - !Ref 'MetricsBucket' 542 | - "/metrics_redshift_jsonpath.json'" 543 | DataTableName: !Ref 'RedshiftTableName' 544 | Username: !Ref 'MasterUser' 545 | Password: !Ref 'MasterUserPassword' 546 | CloudWatchLoggingOptions: 547 | Enabled: true 548 | LogGroupName: !Ref RSCloudwatchLogsGroup 549 | LogStreamName: !Ref RSLogStream 550 | RoleARN: !GetAtt 'FirehoseDeliveryRole.Arn' 551 | S3Configuration: 552 | BucketARN: !Join 553 | - '' 554 | - - 'arn:aws:s3:::' 555 | - !Ref 'MetricsBucket' 556 | RoleARN: !GetAtt 'FirehoseDeliveryRole.Arn' 557 | BufferingHints: 558 | IntervalInSeconds: !Ref 'RedshiftBufferInterval' 559 | SizeInMBs: !Ref 'RedshiftBufferSize' 560 | CompressionFormat: GZIP 561 | EncryptionConfiguration: 562 | KMSEncryptionConfig: !If 563 | - NoEncryption 564 | - !Ref 'AWS::NoValue' 565 | - AWSKMSKeyARN: !GetAtt 'EncryptionKey.Arn' 566 | NoEncryptionConfig: !If 567 | - NoEncryption 568 | - NoEncryption 569 | - !Ref 'AWS::NoValue' 570 | Prefix: RedshiftDelivery/ 571 | RSCloudwatchLogsGroup: 572 | Type: AWS::Logs::LogGroup 573 | Properties: 574 | RetentionInDays: 365 575 | RSLogStream: 576 | Type: AWS::Logs::LogStream 577 | Properties: 578 | LogGroupName: !Ref RSCloudwatchLogsGroup 579 | LogStreamName: redshift 580 | 581 | Outputs: 582 | VPCID: 583 | Value: !Ref VPC 584 | PublicSubnetA: 585 | Value: !Ref PublicSubnetA 586 | PublicSubnetB: 587 | Value: !Ref PublicSubnetB 588 | S3Bucket: 589 | Value: !Ref MetricsBucket 590 | RedshiftCluster: 591 | Value: !Ref RedshiftCluster 592 | RedshiftDeliveryStream: 593 | Value: !Ref MetricsStream 594 | RedshiftEndpointAddress: 595 | Value: !Sub "${RedshiftCluster.Endpoint.Address}" 596 | RedshiftEndpointPort: 597 | Value: !Sub "${RedshiftCluster.Endpoint.Port}" 598 | RedshiftDatabaseName: 599 | Value: !Ref DatabaseName 600 | RedshiftJDBCURL: 601 | Value: !Sub "jdbc:redshift://${RedshiftCluster.Endpoint.Address}:${RedshiftCluster.Endpoint.Port}/${DatabaseName}" 602 | 603 | -------------------------------------------------------------------------------- /deploy/3_metrics.sql: -------------------------------------------------------------------------------- 1 | -- Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -- SPDX-License-Identifier: MIT-0 3 | 4 | CREATE TABLE IF NOT EXISTS public.metrics 5 | ( 6 | "type" VARCHAR(256) ENCODE lzo 7 | ,workload VARCHAR(256) ENCODE lzo 8 | ,context VARCHAR(256) ENCODE lzo 9 | ,tenant_id VARCHAR(256) ENCODE lzo 10 | ,tenant_name VARCHAR(256) ENCODE lzo 11 | ,tenant_tier VARCHAR(256) ENCODE lzo 12 | ,timerecorded TIMESTAMP WITH TIME ZONE ENCODE az64 13 | ,metric_name VARCHAR(256) ENCODE lzo 14 | ,metric_unit VARCHAR(256) ENCODE lzo 15 | ,metric_value NUMERIC(18,0) ENCODE az64 16 | ,meta_data VARCHAR(256) ENCODE lzo 17 | ) 18 | DISTSTYLE AUTO 19 | ; 20 | -------------------------------------------------------------------------------- /deploy/4_setup-s3-quicksight.py: -------------------------------------------------------------------------------- 1 | ## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | import time 5 | import boto3 6 | import random 7 | import string 8 | 9 | def SetupS3(client, s3_bucket_name): 10 | client.upload_file(Bucket=s3_bucket_name,Key='metrics_redshift_jsonpath.json', Filename='./artifacts/metrics_redshift_jsonpath.json') 11 | 12 | def SetupQuicksight(client, account_id, quicksight_user_arn, random_string, redshift_host, redshift_port, redshift_db_name, redshift_cluster_id, redshift_user_name, redshift_password): 13 | datasource_response = client.create_data_source( 14 | AwsAccountId=account_id, 15 | DataSourceId='MetricsDataSource-' + random_string, 16 | Name='MetricsDataSource-' + random_string, 17 | Type='REDSHIFT', 18 | DataSourceParameters={ 19 | 'RedshiftParameters': { 20 | 'Host': redshift_host_name, 21 | 'Port': redshift_port, 22 | 'Database': redshift_db_name, 23 | 'ClusterId': redshift_cluster_id 24 | } 25 | }, 26 | Credentials={ 27 | 'CredentialPair': { 28 | 'Username': redshift_user_name, 29 | 'Password': redshift_password 30 | } 31 | }, 32 | Permissions=[ 33 | { 34 | 'Principal': quicksight_user_arn, 35 | 'Actions': ["quicksight:DescribeDataSource","quicksight:DescribeDataSourcePermissions","quicksight:PassDataSource","quicksight:UpdateDataSource","quicksight:DeleteDataSource","quicksight:UpdateDataSourcePermissions"] 36 | }, 37 | ], 38 | 39 | SslProperties={ 40 | 'DisableSsl': False 41 | }, 42 | Tags=[ 43 | { 44 | 'Key': 'Name', 45 | 'Value': 'Metrics-Analytics' 46 | }, 47 | ] 48 | ) 49 | 50 | datasource_arn = datasource_response.get('Arn') 51 | 52 | time.sleep(1) 53 | 54 | response = client.create_data_set( 55 | AwsAccountId= account_id, 56 | DataSetId='MetricsDataSet-' + random_string, 57 | Name='MetricsDataSet-' + random_string, 58 | PhysicalTableMap={ 59 | 'string': { 60 | 'RelationalTable': { 61 | 'DataSourceArn': datasource_arn, 62 | 'Schema': 'public', 63 | 'Name': 'metrics', 64 | 'InputColumns': [ 65 | { 66 | 'Name': 'type', 67 | 'Type': 'STRING' 68 | }, 69 | { 70 | 'Name': 'workload', 71 | 'Type': 'STRING' 72 | }, 73 | { 74 | 'Name': 'context', 75 | 'Type': 'STRING' 76 | }, 77 | { 78 | 'Name': 'tenant_id', 79 | 'Type': 'STRING' 80 | }, 81 | { 82 | 'Name': 'tenant_name', 83 | 'Type': 'STRING' 84 | }, 85 | { 86 | 'Name': 'tenant_tier', 87 | 'Type': 'STRING' 88 | }, 89 | { 90 | 'Name': 'timerecorded', 91 | 'Type': 'DATETIME' 92 | }, 93 | { 94 | 'Name': 'metric_name', 95 | 'Type': 'STRING' 96 | }, 97 | { 98 | 'Name': 'metric_unit', 99 | 'Type': 'STRING' 100 | }, 101 | { 102 | 'Name': 'metric_value', 103 | 'Type': 'INTEGER' 104 | }, 105 | { 106 | 'Name': 'meta_data', 107 | 'Type': 'STRING' 108 | }, 109 | ] 110 | } 111 | } 112 | }, 113 | ImportMode='DIRECT_QUERY', 114 | Permissions=[ 115 | { 116 | 'Principal': quicksight_user_arn, 117 | 'Actions': ["quicksight:DescribeDataSet","quicksight:DescribeDataSetPermissions","quicksight:PassDataSet","quicksight:DescribeIngestion","quicksight:ListIngestions","quicksight:UpdateDataSet","quicksight:DeleteDataSet","quicksight:CreateIngestion","quicksight:CancelIngestion","quicksight:UpdateDataSetPermissions"] 118 | }, 119 | ], 120 | Tags=[ 121 | { 122 | 'Key': 'Name', 123 | 'Value': 'Metrics-Analytics' 124 | }, 125 | ] 126 | ) 127 | 128 | 129 | def randomString(stringLength=8): 130 | letters = string.ascii_lowercase 131 | return ''.join(random.choice(letters) for i in range(stringLength)) 132 | 133 | def input_with_default(msg, default): 134 | value = input(msg + " [" + default + "] : ") 135 | if value == "": 136 | value = default 137 | return value 138 | 139 | if __name__ == "__main__": 140 | try: 141 | current_session = boto3.session.Session() 142 | current_credentials = current_session.get_credentials().get_frozen_credentials() 143 | 144 | region = input_with_default("Enter region associated with Quicksight account", current_session.region_name) 145 | access_key = input_with_default("Enter AWS Access Key associated with Quicksight account", current_credentials.access_key) 146 | secret_key = input_with_default("Enter AWS Secret Key associated with Quicksight account", current_credentials.secret_key) 147 | 148 | sts_client = boto3.client('sts', region_name=region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) 149 | quicksight_client = boto3.client('quicksight', region_name=region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) 150 | cfn_client = boto3.client('cloudformation', region_name=region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) 151 | s3_client = boto3.client('s3', region_name=region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) 152 | 153 | aws_account_id = sts_client.get_caller_identity().get('Account') 154 | 155 | response = quicksight_client.list_users( 156 | AwsAccountId=aws_account_id, 157 | Namespace='default' 158 | ) 159 | 160 | quick_user_name = input_with_default("Quicksight user name", response['UserList'][0].get('UserName')) 161 | 162 | redshift_response = int(input_with_default("Enter 0 to provide Redshift connection details manually. Enter 1 to read the connection details from the deployed Cloudformation stack", "1")) 163 | 164 | if (redshift_response == 0): 165 | redshift_host_name = input("Redshift host name for accessing metrics data: ") 166 | redshift_cluster_id = input("Redshift cluster id accessing metrics data: ") 167 | redshift_port_number = int(input_with_default("Redshift cluster port number", "8200")) 168 | redshift_database_name = input_with_default("Redshift cluster port number", "metricsdb") 169 | else: 170 | cfn_name = input("Cloudformation stack name for Metrics & Analytics: ") 171 | outputs = cfn_client.describe_stacks(StackName=cfn_name)['Stacks'][0]['Outputs'] 172 | 173 | for output in outputs: 174 | if output['OutputKey'] == 'RedshiftCluster': 175 | redshift_cluster_id = output['OutputValue'] 176 | if output['OutputKey'] == 'RedshiftEndpointAddress': 177 | redshift_host_name = output['OutputValue'] 178 | if output['OutputKey'] == 'RedshiftEndpointPort': 179 | redshift_port_number = int(output['OutputValue']) 180 | if output['OutputKey'] == 'RedshiftDatabaseName': 181 | redshift_database_name = output['OutputValue'] 182 | if output['OutputKey'] == 'S3Bucket': 183 | s3_bucket_name = output['OutputValue'] 184 | 185 | quicksight_user = quicksight_client.describe_user( 186 | UserName=quick_user_name, 187 | AwsAccountId=aws_account_id, 188 | Namespace='default' 189 | ) 190 | 191 | quicksight_user_arn = (quicksight_user.get('User').get('Arn')) 192 | 193 | redshift_user_name = input("Redshift user name for accessing metrics data: ") 194 | redshift_password = input("Redshift password for accessing metrics data: ") 195 | 196 | SetupS3(client=s3_client, s3_bucket_name=s3_bucket_name) 197 | 198 | SetupQuicksight(client=quicksight_client,account_id=aws_account_id, quicksight_user_arn=quicksight_user_arn, random_string=randomString(), 199 | redshift_host=redshift_host_name, redshift_port=redshift_port_number, redshift_db_name=redshift_database_name, 200 | redshift_cluster_id=redshift_cluster_id, redshift_user_name=redshift_user_name, redshift_password=redshift_password) 201 | 202 | except Exception as e: 203 | print("error occured") 204 | print(e) 205 | raise -------------------------------------------------------------------------------- /deploy/artifacts/metrics_redshift_jsonpath.json: -------------------------------------------------------------------------------- 1 | { 2 | "jsonpaths": [ 3 | "$.type", 4 | "$.workload", 5 | "$.context", 6 | "$.tenant.id", 7 | "$.tenant.name", 8 | "$.tenant.tier", 9 | "$.timestamp", 10 | "$.metric.name", 11 | "$.metric.unit", 12 | "$.metric.value", 13 | "$.metadata" 14 | ] 15 | } -------------------------------------------------------------------------------- /images/Architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/Architecture.png -------------------------------------------------------------------------------- /images/DeployArchitecture_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/DeployArchitecture_1.png -------------------------------------------------------------------------------- /images/DeployArchitecture_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/DeployArchitecture_2.png -------------------------------------------------------------------------------- /images/DeployVPC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/DeployVPC.png -------------------------------------------------------------------------------- /images/MetricGenerator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/MetricGenerator.png -------------------------------------------------------------------------------- /images/MetricsPerUser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/MetricsPerUser.png -------------------------------------------------------------------------------- /images/Quicksight.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/Quicksight.png -------------------------------------------------------------------------------- /images/QuicksightCalculatedField.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/QuicksightCalculatedField.png -------------------------------------------------------------------------------- /images/RawMetrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/RawMetrics.png -------------------------------------------------------------------------------- /images/Redshift.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/Redshift.png -------------------------------------------------------------------------------- /images/SetupQuicksight.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/SetupQuicksight.png -------------------------------------------------------------------------------- /images/Sidebar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/Sidebar.png -------------------------------------------------------------------------------- /images/SystemUsage_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/SystemUsage_1.png -------------------------------------------------------------------------------- /images/SystemUsage_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/SystemUsage_2.png -------------------------------------------------------------------------------- /images/SystemUsage_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/SystemUsage_3.png -------------------------------------------------------------------------------- /images/TopNFeature.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-saas-factory-ref-solution-metrics-analytics/df1f2a0d154e1a5bb0c6b1786b0a2c380b08751a/images/TopNFeature.png -------------------------------------------------------------------------------- /metrics-generator/application-metrics-generator.py: -------------------------------------------------------------------------------- 1 | ## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | import sys 5 | import os 6 | import json 7 | import random 8 | import datetime 9 | import boto3 10 | 11 | start_time = datetime.datetime.now() 12 | no_of_days = 30 13 | 14 | workload_contexts_distribution = [2, 15, 20, 18, 30, 15] 15 | workload_contexts = { 16 | "OnBoardingApplication": ['TenantCreation', 'UserCreation'], 17 | "AuthApplication": ['Login', 'Logout', 'PasswordReset'], 18 | "PhotoApplication": ['PhotoUpload', 'PhotoEdit', 'PhotoDelete'], 19 | "MessagingApplication": ['SendMessage', 'SendBulkMessages', 'DeleteMessages', 'ArchiveMessages'], 20 | "ProductApplication": ['ViewProduct', 'ViewProductDetails', 'AddNewProduct', 'DeleteProduct', 'UpdateProduct'], 21 | "BatchWorkload": ['ActiveProductsReport', 'DailyTransactionReport', 'DailyInventoryReport', 'DailySalesReport'] 22 | } 23 | 24 | tenant_distribution = [5, 10, 20, 15, 2, 3, 10, 5, 25, 5] 25 | regular_tenants = [ 26 | {"id": "tenant-id-1", "name":"tenant-name-a", "tier":"standard"}, 27 | {"id": "tenant-id-2", "name":"tenant-name-b", "tier":"premium"}, 28 | {"id": "tenant-id-3", "name":"tenant-name-c", "tier":"basic"}, 29 | {"id": "tenant-id-4", "name":"tenant-name-d", "tier":"basic"}, 30 | {"id": "tenant-id-5", "name":"tenant-name-e", "tier":"standard"}, 31 | {"id": "tenant-id-6", "name":"tenant-name-f", "tier":"standard"}, 32 | {"id": "tenant-id-7", "name":"tenant-name-g", "tier":"free"}, 33 | {"id": "tenant-id-8", "name":"tenant-name-h", "tier":"free"}, 34 | {"id": "tenant-id-9", "name":"tenant-name-i", "tier":"basic"}, 35 | {"id": "tenant-id-0", "name":"tenant-name-j", "tier":"free"} 36 | ] 37 | 38 | user_distribution = [10, 40, 20, 30] 39 | users = ('user-1', 'user-2', 'user-3', 'user-4') 40 | 41 | resource_metrics = { 42 | "s3": ["Storage", "DataTransfer"], 43 | "load-balancer": ["ExecutionTime"], 44 | "lambda": ["ExecutionTime"], 45 | "dynamo-db": ["Storage", "ExecutionTime", "DataTransfer"], 46 | "rds": ["Storage", "ExecutionTime", "DataTransfer"] 47 | } 48 | 49 | def generate_random_metric_value(metric_name): 50 | metric = {} 51 | if metric_name == "Storage": 52 | metric = {'name' : 'Storage', 'unit' : 'MB', 'value' : random.randrange(50, 5000, 100)} 53 | elif metric_name == "ExecutionTime": 54 | metric = {'name' : 'ExecutionTime', 'unit' : 'MilliSeconds', 'value' : random.randrange(100, 5000, 200)} 55 | elif metric_name == "DataTransfer": 56 | metric = {'name' : 'DataTransfer', 'unit' : 'MB', 'value' : random.randrange(10, 3000, 200)} 57 | 58 | return metric 59 | 60 | def event_time(): 61 | random_days = random.randint(1, no_of_days) 62 | prev_days = start_time + datetime.timedelta(days=random_days) 63 | random_minute = random.randint(0, 59) 64 | random_hour = random.randint(0, 23) 65 | time = prev_days + datetime.timedelta(hours=random_hour) + datetime.timedelta(minutes=random_minute) 66 | return int(time.timestamp()) 67 | 68 | def input_with_default(msg, default): 69 | value = input(msg + " [" + default + "] : ") 70 | if value == "": 71 | value = default 72 | return value 73 | 74 | def generate_metric_for(workload, tenants): 75 | selected_context = random.choice(workload_contexts[workload]) 76 | selected_resource = random.choice(list(resource_metrics.keys())) 77 | selected_metric_name = random.choice(resource_metrics[selected_resource]) 78 | random_metric_value = generate_random_metric_value(selected_metric_name) 79 | 80 | application_metric = { 81 | 'type' : 'Application', 82 | 'workload': workload, 83 | 'context': selected_context, 84 | 'tenant' : random.choices(tenants, tenant_distribution)[0], 85 | 'metric': random_metric_value, 86 | 'timestamp': event_time(), 87 | 'metadata' : {'user' : random.choices(users, user_distribution)[0], 'resource': selected_resource} 88 | } 89 | #print(application_metric) 90 | return application_metric 91 | 92 | def generate_metrics_for(tenants, no_of_metrics, stream_name, batch_size): 93 | metrics_batch = [] 94 | 95 | for m in range(no_of_metrics): 96 | selected_workload = random.choices(list(workload_contexts.keys()), workload_contexts_distribution)[0] 97 | 98 | if len(metrics_batch) < batch_size: 99 | print("Generating Metric for: " + selected_workload) 100 | metric = generate_metric_for(selected_workload, tenants) 101 | metrics_batch.append({'Data' : json.dumps(metric)}) 102 | else: 103 | write_data_to_firehose(stream_name, metrics_batch) 104 | display("Processed batch of " + str(batch_size)) 105 | metrics_batch = [] 106 | 107 | if len(metrics_batch) > 0: 108 | write_data_to_firehose(stream_name, metrics_batch) 109 | display("Processed batch of " + str(len(metrics_batch))) 110 | 111 | def display(msg): 112 | print("___________________________________") 113 | print(msg + "...") 114 | print("___________________________________") 115 | 116 | def write_data_to_firehose(stream_name, metrics_batch): 117 | response = firehose_client.put_record_batch( 118 | DeliveryStreamName = stream_name, 119 | Records = metrics_batch 120 | ) 121 | 122 | if __name__ == "__main__": 123 | try: 124 | number_of_metrics_to_generate = int(input_with_default("How many metrics? ", "10000")) 125 | start_at = input_with_default("Enter start date for metrics? ", "2020-01-01") 126 | start_time = datetime.datetime.strptime(start_at, "%Y-%m-%d") 127 | no_of_days = int(input_with_default("Number of days? ", "30")) 128 | batch_size = int(input_with_default("Batch size for Kinesis? ", "25")) 129 | 130 | current_session = boto3.session.Session() 131 | current_credentials = current_session.get_credentials().get_frozen_credentials() 132 | 133 | region = input_with_default("Enter region for the deployed metrics stack", current_session.region_name) 134 | access_key = input_with_default("Enter AWS Access Key for the deployed metrics stack", current_credentials.access_key) 135 | secret_key = input_with_default("Enter AWS Secret Key for the deployed metrics stack", current_credentials.secret_key) 136 | 137 | firehose_client = boto3.client('firehose', region_name=region, aws_access_key_id=access_key, aws_secret_access_key=secret_key) 138 | streams = firehose_client.list_delivery_streams() 139 | 140 | stream_name = input_with_default("Enter Kinesis stream name: ", [stream for stream in streams['DeliveryStreamNames'] if 'MetricsStream' in stream][0]) 141 | 142 | 143 | display("Generating Event Metrics: " + str(number_of_metrics_to_generate)) 144 | generate_metrics_for(regular_tenants, number_of_metrics_to_generate, stream_name, batch_size) 145 | 146 | except Exception as e: 147 | print("error occured") 148 | print(e) 149 | raise 150 | -------------------------------------------------------------------------------- /metrics-java-lib/.classpath: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /metrics-java-lib/.project: -------------------------------------------------------------------------------- 1 | 2 | 3 | metrics-java-sdk 4 | 5 | 6 | 7 | 8 | 9 | org.eclipse.jdt.core.javabuilder 10 | 11 | 12 | 13 | 14 | org.eclipse.m2e.core.maven2Builder 15 | 16 | 17 | 18 | 19 | 20 | org.eclipse.jdt.core.javanature 21 | org.eclipse.m2e.core.maven2Nature 22 | 23 | 24 | 25 | 1603317638622 26 | 27 | 30 28 | 29 | org.eclipse.core.resources.regexFilterMatcher 30 | node_modules|.git|__CREATED_BY_JAVA_LANGUAGE_SERVER__ 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /metrics-java-lib/.settings/org.eclipse.core.resources.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | encoding//src/main/java=UTF-8 3 | encoding//src/main/resources=UTF-8 4 | encoding//src/test/java=UTF-8 5 | encoding/=UTF-8 6 | -------------------------------------------------------------------------------- /metrics-java-lib/.settings/org.eclipse.jdt.apt.core.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | org.eclipse.jdt.apt.aptEnabled=false 3 | -------------------------------------------------------------------------------- /metrics-java-lib/.settings/org.eclipse.jdt.core.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 3 | org.eclipse.jdt.core.compiler.compliance=1.8 4 | org.eclipse.jdt.core.compiler.problem.enablePreviewFeatures=disabled 5 | org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning 6 | org.eclipse.jdt.core.compiler.problem.reportPreviewFeatures=ignore 7 | org.eclipse.jdt.core.compiler.processAnnotations=disabled 8 | org.eclipse.jdt.core.compiler.release=disabled 9 | org.eclipse.jdt.core.compiler.source=1.8 10 | -------------------------------------------------------------------------------- /metrics-java-lib/.settings/org.eclipse.m2e.core.prefs: -------------------------------------------------------------------------------- 1 | activeProfiles= 2 | eclipse.preferences.version=1 3 | resolveWorkspaceProjects=true 4 | version=1 5 | -------------------------------------------------------------------------------- /metrics-java-lib/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.amazonaws.saas 8 | metrics-java-lib 9 | 1.2.0 10 | 11 | 12 | MIT No Attribution License (MIT-0) 13 | https://spdx.org/licenses/MIT-0.html 14 | 15 | 16 | SaaSFactoryMetricsAnalytics 17 | Sample Java library which can be used to send Multi-Tenant metrics data 18 | 19 | 20 | UTF-8 21 | 1.8 22 | 23 | 24 | 25 | 26 | 27 | org.apache.maven.plugins 28 | maven-compiler-plugin 29 | 3.8.1 30 | 31 | ${java.version} 32 | ${java.version} 33 | 34 | 35 | 36 | org.apache.maven.plugins 37 | maven-assembly-plugin 38 | 3.1.1 39 | 40 | 41 | 42 | jar-with-dependencies 43 | 44 | 45 | 46 | com.amazonaws.saas.sampleclient 47 | 48 | 49 | 50 | 51 | 52 | 53 | make-assembly 54 | package 55 | 56 | single 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | org.bitbucket.b_c 68 | jose4j 69 | 0.7.2 70 | 71 | 72 | 73 | software.amazon.awssdk 74 | firehose 75 | 2.13.12 76 | 77 | 78 | 79 | software.amazon.awssdk 80 | netty-nio-client 81 | 2.13.12 82 | 83 | 84 | 85 | com.fasterxml.jackson.core 86 | jackson-core 87 | 2.11.0 88 | 89 | 90 | 91 | com.fasterxml.jackson.core 92 | jackson-annotations 93 | 2.11.0 94 | 95 | 96 | 97 | org.slf4j 98 | slf4j-api 99 | 1.7.30 100 | 101 | 102 | 103 | ch.qos.logback 104 | logback-classic 105 | 1.2.3 106 | runtime 107 | 108 | 109 | 110 | junit 111 | junit 112 | 4.13.1 113 | test 114 | 115 | 116 | 117 | org.jmock 118 | jmock-junit4 119 | 2.12.0 120 | test 121 | 122 | 123 | 124 | 125 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/FirehosePublishService.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager; 4 | 5 | import com.amazonaws.saas.metricsmanager.entities.MetricEvent; 6 | import com.fasterxml.jackson.databind.ObjectMapper; 7 | import io.netty.util.CharsetUtil; 8 | import java.util.ArrayList; 9 | import java.util.Collections; 10 | import java.util.List; 11 | 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | import software.amazon.awssdk.core.SdkBytes; 15 | import software.amazon.awssdk.regions.Region; 16 | import software.amazon.awssdk.services.firehose.FirehoseClient; 17 | import software.amazon.awssdk.services.firehose.model.PutRecordBatchRequest; 18 | import software.amazon.awssdk.services.firehose.model.Record; 19 | 20 | /** 21 | * FirehosePublishService is used to log the metric event by sending it to kinsis data firehose. 22 | * It can be used in a batch and single mode of communication. 23 | * In log running tasks batch mode is preferred, batch size and time window can be configured via 24 | * properties files. 25 | */ 26 | public class FirehosePublishService { 27 | private final Logger logger = LoggerFactory.getLogger(FirehosePublishService.class); 28 | 29 | public static final int DEFAULT_FLUSH_TIME_IN_SECS = 30; 30 | private final FirehoseClient firehose; 31 | private final String streamName; 32 | private final int bufferSize; 33 | private List recordBuffer; 34 | private Long startTime; 35 | private int flushTimeWindowInSeconds; 36 | 37 | protected FirehosePublishService(String kinesisStreamName, Region region, int batchSize, int flushTimeWindow) { 38 | this.bufferSize = batchSize; 39 | this.streamName = kinesisStreamName; 40 | this.flushTimeWindowInSeconds = flushTimeWindow; 41 | this.initializeBuffer(); 42 | this.firehose = getFirehoseClientIn(region); 43 | } 44 | 45 | public void publishEvent(MetricEvent event) { 46 | String eventJsonString = ""; 47 | 48 | try { 49 | eventJsonString = (new ObjectMapper()).writeValueAsString(event); 50 | logger.debug(String.format("Metric Event Json: %s", eventJsonString)); 51 | Record record = Record.builder().data(SdkBytes.fromByteArray(eventJsonString.getBytes(CharsetUtil.UTF_8))).build(); 52 | recordBuffer.add(record); 53 | 54 | synchronized (this) { 55 | if (shouldSendToKinesis()) { 56 | writeToKinesisFirehose(); 57 | } 58 | } 59 | } catch (Exception exception) { 60 | logger.debug(String.format("Error: Unable to log metric: %s", eventJsonString), exception); 61 | } 62 | 63 | } 64 | 65 | public int getBufferSize() { 66 | return bufferSize; 67 | } 68 | 69 | public void shutdown() { 70 | logger.debug("Clean shutdown, sending buffer data to kinesis"); 71 | writeToKinesisFirehose(); 72 | } 73 | 74 | protected FirehoseClient getFirehoseClientIn(Region region) { 75 | return FirehoseClient.builder().region(region).build(); 76 | } 77 | 78 | protected boolean shouldSendToKinesis() { 79 | long elapsedTime = (System.currentTimeMillis() - startTime) / 1000L; 80 | if (recordBuffer.size() >= bufferSize) { 81 | logger.debug("Buffer full, writing to kinesis"); 82 | return true; 83 | } else if (elapsedTime >= flushTimeWindowInSeconds) { 84 | logger.debug("Time elapsed, writing to kinesis"); 85 | return true; 86 | } else { 87 | return false; 88 | } 89 | } 90 | 91 | protected void writeToKinesisFirehose() { 92 | logger.debug(streamName); 93 | firehose.putRecordBatch(PutRecordBatchRequest.builder().deliveryStreamName(streamName).records(recordBuffer).build()); 94 | initializeBuffer(); 95 | } 96 | 97 | private void initializeBuffer() { 98 | logger.debug("Initializing Buffer"); 99 | startTime = System.currentTimeMillis(); 100 | recordBuffer = Collections.synchronizedList(new ArrayList<>()); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/MetricsPublisher.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager; 4 | 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | import software.amazon.awssdk.regions.Region; 8 | import java.util.HashMap; 9 | import java.util.Map; 10 | 11 | import com.amazonaws.saas.metricsmanager.builder.MetricEventBuilder; 12 | import com.amazonaws.saas.metricsmanager.entities.Metric; 13 | import com.amazonaws.saas.metricsmanager.entities.MetricEvent; 14 | import com.amazonaws.saas.metricsmanager.entities.Tenant; 15 | import com.amazonaws.saas.tokenmanager.TokenInterface; 16 | 17 | /** 18 | * This is a sample wrapper for FirehosePublishService and uses JWT tokens to extract tenant context. 19 | */ 20 | public class MetricsPublisher { 21 | private static final Logger logger = LoggerFactory.getLogger(MetricsPublisher.class); 22 | 23 | private TokenInterface tokenService; 24 | private FirehosePublishService firehosePublisher; 25 | private static PropertiesUtil propUtil = new PropertiesUtil(); 26 | 27 | public MetricsPublisher() { 28 | int batchSize = Integer.parseInt(propUtil.getPropertyOrDefault("batch.size", "25")); 29 | initializeFirehosePublishService(batchSize); 30 | } 31 | 32 | public MetricsPublisher(int batchSize) { 33 | initializeFirehosePublishService(batchSize); 34 | } 35 | 36 | /** 37 | * This method is used to log different types of metrics 38 | * It will extract tenant data from jwtToken. 39 | * @param metric 40 | * @param jwtToken 41 | */ 42 | public void publishMetricEvent(Metric metric, String jwtToken) { 43 | MetricEvent event = buildMetricEvent(metric, jwtToken, new HashMap<>()); 44 | logger.debug(String.format("Logging Metric Event: %s", metric)); 45 | firehosePublisher.publishEvent(event); 46 | } 47 | 48 | 49 | /** 50 | * This method is used to log different types of metrics 51 | * If there is meta-data that needs to be part of the 52 | * metric payload then it can be sent as a key:value pair in Map. 53 | * It will extract tenant data from jwtToken. 54 | * @param metric 55 | * @param jwtToken 56 | * @param metaData 57 | */ 58 | public void publishMetricEvent(Metric metric, String jwtToken, Map metaData) { 59 | MetricEvent event = buildMetricEvent(metric, jwtToken, metaData); 60 | logger.debug(String.format("Logging Metric Event: %s", metric)); 61 | firehosePublisher.publishEvent(event); 62 | } 63 | 64 | public void setTokenService(TokenInterface tokenService) { 65 | this.tokenService = tokenService; 66 | } 67 | 68 | protected MetricEvent buildMetricEvent(Metric metric, String jwtToken, Map metaData) { 69 | Tenant Tenant = tokenService.extractTenantFrom(jwtToken); 70 | 71 | String workload = propUtil.getPropertyOrDefault("workload", "No Workload Info In ENV Variable."); 72 | 73 | return new MetricEventBuilder() 74 | .withType(MetricEvent.Type.Application) 75 | .withWorkload(workload) 76 | .withMetric(metric) 77 | .withTenant(Tenant) 78 | .withMetaData(metaData) 79 | .build(); 80 | } 81 | 82 | private void initializeFirehosePublishService(int batchSize) { 83 | logger.debug("Initializing the service to publish to firehose"); 84 | int flushTimeWindow = Integer.parseInt(propUtil.getPropertyOrDefault("flush.time.window.in.seconds", "5")); 85 | String kinesisStreamName = propUtil.getPropertyOrDefault("kinesis.stream.name", "Metrics"); 86 | Region region = Region.of(propUtil.getPropertyOrDefault("aws.region", "us-east-1")); 87 | firehosePublisher = new FirehosePublishService(kinesisStreamName, region, batchSize, flushTimeWindow); 88 | } 89 | 90 | 91 | } 92 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/MetricsPublisherFactory.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager; 4 | 5 | import com.amazonaws.saas.tokenmanager.TokenInterface; 6 | /** 7 | * This is a factory class used to create and configure a batch or single publisher. 8 | */ 9 | public class MetricsPublisherFactory { 10 | 11 | private static MetricsPublisher publisher; 12 | private static MetricsPublisher batchPublisher; 13 | /** 14 | * This method is used to initialize and get a simple publisher, which will send the metric event to 15 | * kinesis data firehose as soon as it is received. This should be used in environments like lambda. 16 | * @return Metricspublisher 17 | */ 18 | public synchronized static MetricsPublisher getPublisher(TokenInterface tokenService) { 19 | if(publisher == null) { 20 | publisher = new MetricsPublisher(1); 21 | publisher.setTokenService(tokenService); 22 | } 23 | return publisher; 24 | } 25 | 26 | /** 27 | * This method is used to initialize and get a batch publisher, which will keep collecting metric events 28 | * in cache. When the buffer is full or the specified time has elapsed then it will send 29 | * the batch to kinesis data firehose. This should be used in long running processes such as applications running 30 | * in containers or EC2. 31 | * @return Metricspublisher 32 | */ 33 | public synchronized static MetricsPublisher getBatchPublisher(TokenInterface tokenService) { 34 | if(batchPublisher == null) { 35 | batchPublisher = new MetricsPublisher(); 36 | publisher.setTokenService(tokenService); 37 | } 38 | return batchPublisher; 39 | } 40 | 41 | 42 | } 43 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/PropertiesUtil.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager; 4 | 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import java.util.Properties; 9 | 10 | /** 11 | * PropertiesUtil is used to load the properties file 12 | * It will get the property by name or return the provided default value 13 | * if the property is not found. 14 | */ 15 | public class PropertiesUtil { 16 | 17 | public static final Properties properties = new Properties(); 18 | private static final Logger logger = LoggerFactory.getLogger(PropertiesUtil.class); 19 | 20 | public PropertiesUtil() { 21 | this("lib-config.properties"); 22 | } 23 | 24 | public PropertiesUtil(String propertiesFileName) { 25 | try { 26 | properties.load(getClass().getClassLoader().getResourceAsStream(propertiesFileName)); 27 | } catch (Exception e) { 28 | String message = "Unable to load properties file"; 29 | logger.error(String.format("ERROR: %s", message)); 30 | throw new RuntimeException(message); 31 | } 32 | } 33 | 34 | public String getPropertyOrDefault(String propertyName, String defaultValue) { 35 | if (isNotNullOrEmpty(propertyName)) { 36 | String propertyValue = (String) properties.get(propertyName); 37 | if(isNullOrEmpty(propertyValue)) { 38 | propertyValue = defaultValue; 39 | } 40 | return propertyValue; 41 | } 42 | return defaultValue; 43 | } 44 | 45 | 46 | private boolean isNotNullOrEmpty (String value) { 47 | return !isNullOrEmpty(value); 48 | } 49 | 50 | private boolean isNullOrEmpty (String value) { 51 | return value == null || "".equals(value); 52 | } 53 | 54 | } 55 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/builder/MetricBuilder.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.builder; 4 | 5 | import com.amazonaws.saas.metricsmanager.entities.Metric; 6 | 7 | public class MetricBuilder { 8 | private Metric metric = new Metric(); 9 | 10 | public MetricBuilder withName(String name) { 11 | this.metric.setName(name); 12 | return this; 13 | } 14 | 15 | public MetricBuilder withValue(Long value) { 16 | this.metric.setValue(value); 17 | return this; 18 | } 19 | 20 | public MetricBuilder withUnit(String unit) { 21 | this.metric.setUnit(unit); 22 | return this; 23 | } 24 | 25 | public Metric build() { 26 | return this.metric; 27 | } 28 | } 29 | 30 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/builder/MetricEventBuilder.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.builder; 4 | 5 | import java.util.Map; 6 | 7 | import com.amazonaws.saas.metricsmanager.entities.Metric; 8 | import com.amazonaws.saas.metricsmanager.entities.MetricEvent; 9 | import com.amazonaws.saas.metricsmanager.entities.Tenant; 10 | import com.amazonaws.saas.metricsmanager.entities.MetricEvent.Type; 11 | 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | public class MetricEventBuilder { 16 | private static final Logger logger = LoggerFactory.getLogger(MetricEventBuilder.class); 17 | private MetricEvent metricEvent; 18 | 19 | public MetricEventBuilder() { 20 | metricEvent = new MetricEvent(); 21 | } 22 | 23 | public MetricEventBuilder withType(Type type) { 24 | this.metricEvent.setType(Type.Application); 25 | return this; 26 | } 27 | 28 | public MetricEventBuilder withWorkload(String workload) { 29 | this.metricEvent.setWorkload(workload); 30 | return this; 31 | } 32 | 33 | public MetricEventBuilder withContext(String context) { 34 | this.metricEvent.setContext(context); 35 | return this; 36 | } 37 | 38 | public MetricEventBuilder withTenant(Tenant Tenant) { 39 | this.metricEvent.setTenant(Tenant); 40 | return this; 41 | } 42 | 43 | public MetricEventBuilder withMetaData(Map metaData) { 44 | this.metricEvent.setMetaData(metaData); 45 | return this; 46 | } 47 | 48 | public MetricEventBuilder addMetaData(String key, String value) { 49 | this.metricEvent.getMetaData().put(key, value); 50 | return this; 51 | } 52 | 53 | public MetricEventBuilder withMetric(Metric metric) { 54 | this.metricEvent.setMetric(metric); 55 | return this; 56 | } 57 | 58 | public MetricEvent build() { 59 | if (this.metricEvent.isValid()) { 60 | return this.metricEvent; 61 | } else { 62 | logger.debug("Error: MetricEvent is missing required data"); 63 | return null; 64 | } 65 | } 66 | } 67 | 68 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/builder/TenantBuilder.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.builder; 4 | 5 | import com.amazonaws.saas.metricsmanager.entities.Tenant; 6 | 7 | public class TenantBuilder { 8 | private Tenant Tenant = new Tenant(); 9 | 10 | public TenantBuilder() { 11 | } 12 | 13 | public TenantBuilder withId(String id) { 14 | this.Tenant.setId(id); 15 | return this; 16 | } 17 | 18 | public TenantBuilder withName(String name) { 19 | this.Tenant.setName(name); 20 | return this; 21 | } 22 | 23 | public TenantBuilder withTier(String tier) { 24 | this.Tenant.setTier(tier); 25 | return this; 26 | } 27 | 28 | 29 | public Tenant build() { 30 | return this.Tenant; 31 | } 32 | 33 | public Tenant empty() { 34 | return new Tenant(); 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/entities/CountMetric.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.entities; 4 | 5 | public class CountMetric extends Metric { 6 | 7 | public static final String METRIC_NAME = "Count"; 8 | public static final String DEFAULT_UNIT = "unit"; 9 | 10 | public CountMetric(Long value) { 11 | super(METRIC_NAME, DEFAULT_UNIT, value); 12 | } 13 | 14 | 15 | } 16 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/entities/ExecutionTimeMetric.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.entities; 4 | 5 | public class ExecutionTimeMetric extends Metric { 6 | 7 | public static final String METRIC_NAME = "ExecutionTime"; 8 | public static final String DEFAULT_UNIT = "msec"; 9 | 10 | public ExecutionTimeMetric(Long value) { 11 | super(METRIC_NAME, DEFAULT_UNIT, value); 12 | } 13 | 14 | public ExecutionTimeMetric(String unit, Long value) { 15 | super(METRIC_NAME, unit, value); 16 | } 17 | 18 | 19 | } 20 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/entities/Metric.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.entities; 4 | 5 | import com.fasterxml.jackson.annotation.JsonGetter; 6 | import com.fasterxml.jackson.annotation.JsonIgnore; 7 | 8 | import java.util.Objects; 9 | 10 | public class Metric { 11 | 12 | private String name; 13 | private String unit; 14 | private Long value; 15 | 16 | public Metric() { 17 | } 18 | 19 | public Metric(String name, String unit, Long value) { 20 | this.name = name; 21 | this.unit = unit; 22 | this.value = value; 23 | } 24 | 25 | @JsonIgnore 26 | public boolean isValid() { 27 | return this.getName() != null && !this.getUnit().isEmpty() && this.getValue() != null; 28 | } 29 | 30 | @JsonGetter("name") 31 | public String getName() { 32 | return this.name; 33 | } 34 | 35 | public void setName(String name) { 36 | this.name = name; 37 | } 38 | 39 | @JsonGetter("unit") 40 | public String getUnit() { 41 | return this.unit; 42 | } 43 | 44 | public void setUnit(String unit) { 45 | this.unit = unit; 46 | } 47 | 48 | @JsonGetter("value") 49 | public Long getValue() { 50 | return this.value; 51 | } 52 | 53 | public void setValue(Long value) { 54 | this.value = value; 55 | } 56 | 57 | public String toString() { 58 | return "Metric{name=" + this.name + ", unit='" + this.unit + '\'' + ", value=" + this.value + '}'; 59 | } 60 | 61 | public boolean equals(Object o) { 62 | if (this == o) { 63 | return true; 64 | } else if (o != null && this.getClass() == o.getClass()) { 65 | Metric metric = (Metric)o; 66 | return this.name == metric.name && Objects.equals(this.unit, metric.unit) && Objects.equals(this.value, metric.value); 67 | } else { 68 | return false; 69 | } 70 | } 71 | 72 | public int hashCode() { 73 | return Objects.hash(new Object[]{this.name, this.unit, this.value}); 74 | } 75 | 76 | 77 | } 78 | 79 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/entities/MetricEvent.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.entities; 4 | 5 | import com.fasterxml.jackson.annotation.JsonGetter; 6 | import com.fasterxml.jackson.annotation.JsonIgnore; 7 | 8 | import java.time.Instant; 9 | import java.util.HashMap; 10 | import java.util.Map; 11 | import java.util.Objects; 12 | 13 | public class MetricEvent { 14 | 15 | public static enum Type { 16 | Application, 17 | System; 18 | } 19 | 20 | private MetricEvent.Type type; 21 | private String workload; 22 | private String context; 23 | private Tenant Tenant; 24 | private Map metaData; 25 | private Metric metric; 26 | private Long timestamp; 27 | 28 | public MetricEvent() { 29 | this.type = MetricEvent.Type.Application; 30 | this.timestamp = Instant.now().getEpochSecond(); 31 | this.metaData = new HashMap<>(); 32 | this.Tenant = new Tenant(); 33 | this.metric = new Metric(); 34 | } 35 | 36 | @JsonIgnore 37 | public boolean isValid() { 38 | return !this.getWorkload().isEmpty() && this.Tenant.isValid() && this.metric.isValid(); 39 | } 40 | 41 | public void setType(MetricEvent.Type type) { 42 | this.type = type; 43 | } 44 | 45 | @JsonGetter("type") 46 | public MetricEvent.Type getType() { 47 | return this.type; 48 | } 49 | 50 | @JsonGetter("workload") 51 | public String getWorkload() { 52 | return this.workload; 53 | } 54 | 55 | public void setWorkload(String workload) { 56 | this.workload = workload; 57 | } 58 | 59 | @JsonGetter("context") 60 | public String getContext() { 61 | return this.context; 62 | } 63 | 64 | public void setContext(String context) { 65 | this.context = context; 66 | } 67 | 68 | @JsonGetter("tenant") 69 | public Tenant getTenant() { 70 | return this.Tenant; 71 | } 72 | 73 | public void setTenant(Tenant Tenant) { 74 | this.Tenant = Tenant; 75 | } 76 | 77 | @JsonGetter("meta-data") 78 | public Map getMetaData() { 79 | return this.metaData; 80 | } 81 | 82 | public void setMetaData(Map metaData) { 83 | this.metaData = metaData; 84 | } 85 | 86 | @JsonGetter("metric") 87 | public Metric getMetric() { 88 | return this.metric; 89 | } 90 | 91 | public void setMetric(Metric metric) { 92 | this.metric = metric; 93 | } 94 | 95 | @JsonGetter("timestamp") 96 | public Long getTimestamp() { 97 | return this.timestamp; 98 | } 99 | 100 | public void setTimestamp(Long timestamp) { 101 | this.timestamp = timestamp; 102 | } 103 | 104 | @Override 105 | public String toString() { 106 | return "MetricEvent{" + 107 | "type=" + type + 108 | ", workload='" + workload + '\'' + 109 | ", context='" + context + '\'' + 110 | ", tenant=" + Tenant + 111 | ", metaData=" + metaData + 112 | ", metric=" + metric + 113 | ", timestamp=" + timestamp + 114 | '}'; 115 | } 116 | 117 | public boolean equals(Object o) { 118 | if (this == o) { 119 | return true; 120 | } else if (o != null && this.getClass() == o.getClass()) { 121 | MetricEvent that = (MetricEvent)o; 122 | return this.type == that.type && Objects.equals(this.workload, that.workload) && Objects.equals(this.context, that.context) && Objects.equals(this.Tenant, that.Tenant) && Objects.equals(this.metaData, that.metaData) && Objects.equals(this.metric, that.metric) && Objects.equals(this.timestamp, that.timestamp); 123 | } else { 124 | return false; 125 | } 126 | } 127 | 128 | public int hashCode() { 129 | return Objects.hash(new Object[]{this.type, this.workload, this.context, this.Tenant, this.metaData, this.metric, this.timestamp}); 130 | } 131 | 132 | } 133 | 134 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/entities/StorageMetric.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.entities; 4 | 5 | public class StorageMetric extends Metric { 6 | 7 | public static final String METRIC_NAME = "Storage"; 8 | public static final String DEFAULT_UNIT = "MB"; 9 | 10 | public StorageMetric(Long value) { 11 | super(METRIC_NAME, DEFAULT_UNIT, value); 12 | } 13 | 14 | public StorageMetric(String unit, Long value) { 15 | super(METRIC_NAME, unit, value); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/metricsmanager/entities/Tenant.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager.entities; 4 | 5 | 6 | import com.fasterxml.jackson.annotation.JsonGetter; 7 | import com.fasterxml.jackson.annotation.JsonIgnore; 8 | import java.util.Objects; 9 | 10 | public class Tenant { 11 | 12 | private String id; 13 | private String name; 14 | private String tier; 15 | 16 | public Tenant() { 17 | } 18 | 19 | @JsonIgnore 20 | public boolean isValid() { 21 | return this.getId() != null; 22 | } 23 | 24 | @JsonGetter("id") 25 | public String getId() { 26 | return this.id; 27 | } 28 | 29 | public void setId(String id) { 30 | this.id = id; 31 | } 32 | 33 | @JsonGetter("name") 34 | public String getName() { 35 | return this.name; 36 | } 37 | 38 | public void setName(String name) { 39 | this.name = name; 40 | } 41 | 42 | @JsonGetter("tier") 43 | public String getTier() { 44 | return this.tier; 45 | } 46 | 47 | public void setTier(String tier) { 48 | this.tier = tier; 49 | } 50 | 51 | public String toString() { 52 | return "Tenant{id='" + this.id + '\'' + ", name='" + this.name + '\'' + ", tier=" + this.tier + '}'; 53 | } 54 | 55 | @Override 56 | public boolean equals(Object o) { 57 | if (this == o) return true; 58 | if (o == null || getClass() != o.getClass()) return false; 59 | Tenant Tenant = (Tenant) o; 60 | return Objects.equals(id, Tenant.id) && 61 | Objects.equals(name, Tenant.name) && 62 | Objects.equals(tier, Tenant.tier); 63 | } 64 | 65 | @Override 66 | public int hashCode() { 67 | return Objects.hash(id, name, tier); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/sampleclient.java: -------------------------------------------------------------------------------- 1 | package com.amazonaws.saas; 2 | 3 | import org.jose4j.jwk.RsaJsonWebKey; 4 | import org.jose4j.jws.AlgorithmIdentifiers; 5 | import org.jose4j.jws.JsonWebSignature; 6 | import org.jose4j.jwt.JwtClaims; 7 | import org.jose4j.jwk.RsaJwkGenerator; 8 | 9 | import com.amazonaws.saas.metricsmanager.MetricsPublisherFactory; 10 | import com.amazonaws.saas.metricsmanager.MetricsPublisher; 11 | import com.amazonaws.saas.metricsmanager.entities.*; 12 | import com.amazonaws.saas.tokenmanager.*; 13 | import com.amazonaws.saas.metricsmanager.builder.TenantBuilder; 14 | 15 | import java.util.HashMap; 16 | 17 | public class sampleclient { 18 | 19 | private static JwtTokenManager jwtTokenManager = new JwtTokenManager(); 20 | private static final MetricsPublisher metricPublisher = MetricsPublisherFactory.getPublisher(jwtTokenManager); 21 | private static RsaJsonWebKey rsaJsonWebKey; 22 | 23 | public static void main(String[] args) throws Exception{ 24 | rsaJsonWebKey = RsaJwkGenerator.generateJwk(2048); 25 | rsaJsonWebKey.setKeyId("k1"); 26 | jwtTokenManager.setRsaJsonWebKey(rsaJsonWebKey); 27 | 28 | Tenant tenant = new TenantBuilder().withId("Tenant1").withName("My First Tenant").withTier("Free").build(); 29 | String jwtToken = issueTokenFor(tenant); 30 | 31 | metricPublisher.publishMetricEvent(new ExecutionTimeMetric(100L), jwtToken, new HashMap<>()); 32 | } 33 | 34 | 35 | private static String issueTokenFor(Tenant Tenant) throws Exception { 36 | JwtClaims claims = createJwtClaimsFor(Tenant); 37 | 38 | return createSignedJwtTokenFor(claims); 39 | } 40 | 41 | private static JwtClaims createJwtClaimsFor(Tenant Tenant) { 42 | JwtClaims claims = new JwtClaims(); 43 | claims.setIssuer("Issuer"); 44 | claims.setAudience("Audience"); 45 | claims.setExpirationTimeMinutesInTheFuture(10); 46 | claims.setGeneratedJwtId(); 47 | claims.setIssuedAtToNow(); 48 | claims.setNotBeforeMinutesInThePast(2); 49 | claims.setSubject("subject"); 50 | claims.setClaim("tenant-id", Tenant.getId()); 51 | claims.setClaim("tenant-name", Tenant.getName()); 52 | claims.setClaim("tenant-tier", Tenant.getTier()); 53 | return claims; 54 | } 55 | 56 | private static String createSignedJwtTokenFor(JwtClaims claims) throws Exception { 57 | JsonWebSignature jws = new JsonWebSignature(); 58 | jws.setPayload(claims.toJson()); 59 | jws.setKey(rsaJsonWebKey.getPrivateKey()); 60 | jws.setKeyIdHeaderValue(rsaJsonWebKey.getKeyId()); 61 | jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); 62 | return jws.getCompactSerialization(); 63 | } 64 | 65 | } 66 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/tokenmanager/JwtTokenManager.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.tokenmanager; 4 | 5 | import com.amazonaws.saas.metricsmanager.builder.TenantBuilder; 6 | import com.amazonaws.saas.metricsmanager.entities.Tenant; 7 | import com.amazonaws.saas.metricsmanager.PropertiesUtil; 8 | import com.amazonaws.saas.tokenmanager.TokenInterface; 9 | 10 | import org.jose4j.jwa.AlgorithmConstraints; 11 | import org.jose4j.jwk.RsaJsonWebKey; 12 | import org.jose4j.jws.AlgorithmIdentifiers; 13 | import org.jose4j.jwt.JwtClaims; 14 | import org.jose4j.jwt.consumer.JwtConsumer; 15 | import org.jose4j.jwt.consumer.JwtConsumerBuilder; 16 | import org.slf4j.Logger; 17 | import org.slf4j.LoggerFactory; 18 | 19 | /** 20 | * This is a sample implementation of JwtTokenManager based on jose4j library 21 | */ 22 | public class JwtTokenManager implements TokenInterface{ 23 | 24 | private static final Logger logger = LoggerFactory.getLogger(JwtTokenManager.class); 25 | private static PropertiesUtil propUtil = new PropertiesUtil(); 26 | public static final String TENANT_ID_DEFAULT_CLAIM_NAME = "tenant-id"; 27 | public static final String TENANT_NAME_DEFAULT_CLAIM_NAME = "tenant-name"; 28 | public static final String TENANT_TIER_DEFAULT_CLAIM_NAME = "tenant-tier"; 29 | private String issuer; 30 | private String audience; 31 | private RsaJsonWebKey rsaJsonWebKey; 32 | 33 | public JwtTokenManager() { 34 | issuer = propUtil.getPropertyOrDefault("issuer", "Issuer"); 35 | audience = propUtil.getPropertyOrDefault("audience", "Audience"); 36 | } 37 | 38 | /** 39 | * The implementation of this method relies on jose4j library and the jwtTokens signed by Rsa algorithm. 40 | * It returns tenant context extracted from the jwtToken. 41 | * @param jwtToken 42 | * @return Tenant 43 | */ 44 | @Override 45 | public Tenant extractTenantFrom(String jwtToken) { 46 | JwtClaims jwtClaim = extractJwtClaim(jwtToken); 47 | if (jwtClaim != null) { 48 | String tenantId = extractTenantIdFrom(jwtClaim); 49 | String tenantName = extractTenantNameFrom(jwtClaim); 50 | String tenantTier = extractTenantTierFrom(jwtClaim); 51 | return new TenantBuilder() 52 | .withId(tenantId) 53 | .withName(tenantName) 54 | .withTier(tenantTier) 55 | .build(); 56 | } else { 57 | return new TenantBuilder().empty(); 58 | } 59 | } 60 | 61 | public void setRsaJsonWebKey(RsaJsonWebKey rsaJsonWebKey) { 62 | this.rsaJsonWebKey = rsaJsonWebKey; 63 | } 64 | 65 | 66 | private JwtClaims extractJwtClaim(String jwt) { 67 | JwtConsumer jwtConsumer = new JwtConsumerBuilder() 68 | .setRequireExpirationTime() 69 | .setAllowedClockSkewInSeconds(30) 70 | .setRequireSubject() 71 | .setExpectedIssuer(issuer) 72 | .setExpectedAudience(audience) 73 | .setVerificationKey(rsaJsonWebKey.getKey()) 74 | .setJwsAlgorithmConstraints( 75 | AlgorithmConstraints.ConstraintType.PERMIT, AlgorithmIdentifiers.RSA_USING_SHA256) 76 | .build(); 77 | 78 | try { 79 | return jwtConsumer.processToClaims(jwt); 80 | } 81 | catch (Exception e) { 82 | logger.error("Error: Unable to process JWT Token", e); 83 | } 84 | return null; 85 | } 86 | 87 | private String extractTenantIdFrom(JwtClaims jwtClaims) { 88 | String claimName = propUtil.getPropertyOrDefault("tenant.id.claim.field", TENANT_ID_DEFAULT_CLAIM_NAME); 89 | return (String) jwtClaims.getClaimValue(claimName); 90 | } 91 | 92 | private String extractTenantNameFrom(JwtClaims jwtClaims) { 93 | String claimName = propUtil.getPropertyOrDefault("tenant.name.claim.field", TENANT_NAME_DEFAULT_CLAIM_NAME); 94 | return (String) jwtClaims.getClaimValue(claimName); 95 | } 96 | 97 | private String extractTenantTierFrom(JwtClaims jwtClaims) { 98 | String claimName = propUtil.getPropertyOrDefault("tenant.tier.claim.field", TENANT_TIER_DEFAULT_CLAIM_NAME); 99 | return (String) jwtClaims.getClaimValue(claimName); 100 | } 101 | 102 | } 103 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/java/com/amazonaws/saas/tokenmanager/TokenInterface.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.tokenmanager; 4 | 5 | import com.amazonaws.saas.metricsmanager.entities.Tenant; 6 | 7 | public interface TokenInterface { 8 | 9 | public Tenant extractTenantFrom(String token); 10 | } -------------------------------------------------------------------------------- /metrics-java-lib/src/main/resources/lib-config.properties: -------------------------------------------------------------------------------- 1 | # JWT Claim Field Names 2 | Tenant.id.claim.field=tenant-id 3 | Tenant.name.claim.field=tenant-name 4 | Tenant.tier.claim.field=tenant-tier 5 | user.id.claim.field=user-id 6 | 7 | # Kinesis Stream Config 8 | kinesis.stream.name=Metrics-Analytcics-Nov2020-MetricsStream-TPC9CIoZrfOh 9 | aws.region=us-east-1 10 | 11 | # Application Config 12 | workload=Application Name 13 | 14 | # Logger Config 15 | batch.size=25 16 | flush.time.window.in.seconds=30 17 | -------------------------------------------------------------------------------- /metrics-java-lib/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | System.out 5 | 6 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /metrics-java-lib/src/test/java/com/amazonaws/saas/metricsmanager/MetricsPublisherFactoryTest.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager; 4 | 5 | import org.junit.Test; 6 | 7 | import static org.junit.Assert.*; 8 | 9 | import com.amazonaws.saas.tokenmanager.TokenInterface; 10 | import com.amazonaws.saas.tokenmanager.JwtTokenManager; 11 | 12 | public class MetricsPublisherFactoryTest { 13 | private TokenInterface tokenService = new JwtTokenManager(); 14 | @Test 15 | public void testMultipleMetricLoggerCreationShouldReturnTheSameObject() { 16 | assertEquals(MetricsPublisherFactory.getPublisher(tokenService), MetricsPublisherFactory.getPublisher(tokenService)); 17 | } 18 | 19 | @Test 20 | public void testMultipleBatchMetricLoggerCreationShouldReturnTheSameObject() { 21 | assertEquals(MetricsPublisherFactory.getBatchPublisher(tokenService), MetricsPublisherFactory.getBatchPublisher(tokenService)); 22 | } 23 | 24 | @Test 25 | public void testBatchMetricLoggerShouldDifferentThanMetricLogger() { 26 | assertNotEquals(MetricsPublisherFactory.getBatchPublisher(tokenService), MetricsPublisherFactory.getPublisher(tokenService)); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /metrics-java-lib/src/test/java/com/amazonaws/saas/metricsmanager/MetricsPublisherTest.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.metricsmanager; 4 | 5 | import org.jmock.Expectations; 6 | import org.jmock.Mockery; 7 | import org.junit.Before; 8 | import org.junit.Test; 9 | 10 | import java.util.HashMap; 11 | 12 | import com.amazonaws.saas.metricsmanager.MetricsPublisher; 13 | import com.amazonaws.saas.metricsmanager.MetricsPublisherFactory; 14 | import com.amazonaws.saas.metricsmanager.builder.TenantBuilder; 15 | import com.amazonaws.saas.metricsmanager.entities.*; 16 | import com.amazonaws.saas.tokenmanager.TokenInterface; 17 | 18 | import static org.junit.Assert.*; 19 | 20 | public class MetricsPublisherTest { 21 | 22 | private MetricsPublisher metricPublisher; 23 | private final Mockery context = new Mockery(); 24 | private final TokenInterface mockJwtService = context.mock(TokenInterface.class); 25 | 26 | @Before 27 | public void setup() { 28 | metricPublisher = MetricsPublisherFactory.getPublisher(mockJwtService); 29 | } 30 | 31 | @Test 32 | public void testMetricLogging() { 33 | String jwtToken = "jwtToken"; 34 | Tenant expectedTenant = new TenantBuilder().withId("123").withName("XYZ").withTier("Free").build(); 35 | Metric expectedMetric = new ExecutionTimeMetric(100L); 36 | 37 | context.checking(new Expectations() {{ 38 | oneOf (mockJwtService).extractTenantFrom(jwtToken); 39 | will(returnValue(expectedTenant)); 40 | }}); 41 | 42 | MetricEvent event = metricPublisher.buildMetricEvent(expectedMetric, jwtToken, new HashMap<>()); 43 | assertEquals(expectedTenant, event.getTenant()); 44 | assertEquals(expectedMetric, event.getMetric()); 45 | } 46 | 47 | } 48 | -------------------------------------------------------------------------------- /metrics-java-lib/src/test/java/com/amazonaws/saas/metricsmanager/PropertiesUtilTest.java: -------------------------------------------------------------------------------- 1 | package com.amazonaws.saas.metricsmanager; 2 | 3 | import org.junit.Test; 4 | 5 | import static org.junit.Assert.assertEquals; 6 | 7 | public class PropertiesUtilTest { 8 | 9 | private PropertiesUtil propertiesUtil = new PropertiesUtil(); 10 | 11 | @Test(expected = RuntimeException.class) 12 | public void testConfigFileNotFound() { 13 | new PropertiesUtil("xyz"); 14 | } 15 | 16 | @Test() 17 | public void testConfigFileFound() { 18 | new PropertiesUtil(); 19 | } 20 | 21 | @Test() 22 | public void loadPropertyFromFile() { 23 | String propertyValue = propertiesUtil.getPropertyOrDefault("aws.region", "DefaultRegion"); 24 | assertEquals("us-east-1", propertyValue); 25 | } 26 | 27 | @Test() 28 | public void whenPropertyIsNotFoundInFile() { 29 | String propertyValue = propertiesUtil.getPropertyOrDefault("kinesis.streams.name", "DefaultStreamName"); 30 | assertEquals("DefaultStreamName", propertyValue); 31 | } 32 | 33 | @Test 34 | public void testLibraryProperties() throws Exception{ 35 | assertEquals("tenant-id", propertiesUtil.getPropertyOrDefault("Tenant.id.claim.field", "None")); 36 | assertEquals("tenant-name", propertiesUtil.getPropertyOrDefault("Tenant.name.claim.field", "None")); 37 | assertEquals("tenant-tier", propertiesUtil.getPropertyOrDefault("Tenant.tier.claim.field", "None")); 38 | assertEquals("user-id", propertiesUtil.getPropertyOrDefault("user.id.claim.field", "None")); 39 | assertEquals("us-east-1", propertiesUtil.getPropertyOrDefault("aws.region", "None")); 40 | assertEquals("Application Name", propertiesUtil.getPropertyOrDefault("workload", "None")); 41 | assertEquals("25", propertiesUtil.getPropertyOrDefault("batch.size", "None")); 42 | assertEquals("30", propertiesUtil.getPropertyOrDefault("flush.time.window.in.seconds", "None")); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /metrics-java-lib/src/test/java/com/amazonaws/saas/tokenmanager/JwtTokenServiceTest.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.saas.tokenmanager; 4 | 5 | import org.jose4j.jwk.RsaJsonWebKey; 6 | import org.jose4j.jws.AlgorithmIdentifiers; 7 | import org.jose4j.jws.JsonWebSignature; 8 | import org.jose4j.jwt.JwtClaims; 9 | import org.jose4j.jwk.RsaJwkGenerator; 10 | import org.junit.Before; 11 | import org.junit.Test; 12 | import static org.junit.Assert.*; 13 | 14 | import com.amazonaws.saas.metricsmanager.builder.TenantBuilder; 15 | import com.amazonaws.saas.metricsmanager.entities.Tenant; 16 | 17 | public class JwtTokenServiceTest { 18 | 19 | private JwtTokenManager jwtTokenManager; 20 | private RsaJsonWebKey rsaJsonWebKey; 21 | 22 | @Before 23 | public void setup() throws Exception { 24 | jwtTokenManager = new JwtTokenManager(); 25 | 26 | rsaJsonWebKey = RsaJwkGenerator.generateJwk(2048); 27 | rsaJsonWebKey.setKeyId("k1"); 28 | jwtTokenManager.setRsaJsonWebKey(rsaJsonWebKey); 29 | } 30 | 31 | @Test 32 | public void testIssueAndVerificationOfJwtToken() throws Exception { 33 | Tenant expectedTenant = new TenantBuilder().withId("123").withName("XYZ").withTier("Free").build(); 34 | String jwtToken = issueTokenFor(expectedTenant); 35 | assertNotNull(jwtToken); 36 | 37 | Tenant Tenant = jwtTokenManager.extractTenantFrom(jwtToken); 38 | 39 | assertEquals(expectedTenant, Tenant); 40 | } 41 | 42 | String issueTokenFor(Tenant Tenant) throws Exception { 43 | JwtClaims claims = createJwtClaimsFor(Tenant); 44 | 45 | return createSignedJwtTokenFor(claims); 46 | } 47 | 48 | private JwtClaims createJwtClaimsFor(Tenant Tenant) { 49 | JwtClaims claims = new JwtClaims(); 50 | claims.setIssuer("Issuer"); 51 | claims.setAudience("Audience"); 52 | claims.setExpirationTimeMinutesInTheFuture(10); 53 | claims.setGeneratedJwtId(); 54 | claims.setIssuedAtToNow(); 55 | claims.setNotBeforeMinutesInThePast(2); 56 | claims.setSubject("subject"); 57 | claims.setClaim("tenant-id", Tenant.getId()); 58 | claims.setClaim("tenant-name", Tenant.getName()); 59 | claims.setClaim("tenant-tier", Tenant.getTier()); 60 | return claims; 61 | } 62 | 63 | 64 | private String createSignedJwtTokenFor(JwtClaims claims) throws Exception { 65 | JsonWebSignature jws = new JsonWebSignature(); 66 | jws.setPayload(claims.toJson()); 67 | jws.setKey(rsaJsonWebKey.getPrivateKey()); 68 | jws.setKeyIdHeaderValue(rsaJsonWebKey.getKeyId()); 69 | jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); 70 | return jws.getCompactSerialization(); 71 | } 72 | 73 | } 74 | -------------------------------------------------------------------------------- /metrics-java-lib/target/classes/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | System.out 5 | 6 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | --------------------------------------------------------------------------------