├── .gitignore
├── LICENSE
├── README.md
├── cover.png
├── env.template.sh
└── src
├── .dockerignore
├── .gitignore
├── __init__.py
├── account_open
├── .gitignore
├── README.md
├── __init__.py
├── account_utils
│ ├── __init__.py
│ └── account_open_parameter_name.py
├── app.py
├── cdk.json
├── functions
│ ├── new_account.py
│ └── requirements.txt
├── requirements-dev.txt
├── requirements.txt
├── source.bat
├── stacks
│ ├── __init__.py
│ ├── datasource_stack.py
│ ├── failover_stack.py
│ └── process_stack.py
└── tests
│ ├── __init__.py
│ ├── account-load-test.yml
│ ├── account_invalid.json
│ ├── account_poison_pill.json
│ ├── account_valid.json
│ ├── failover.txt
│ ├── onboarding_test_client.py
│ └── unit
│ ├── __init__.py
│ └── test_ch_x_stack.py
├── environment-setup
├── __init__.py
├── app.py
├── cdk.json
├── lambda
│ ├── __init__.py
│ └── hello_resilience.py
├── requirements.txt
└── stacks
│ ├── __init__.py
│ └── hello_resilience_stack.py
├── frontend
├── .gitignore
├── AdobeStock_4K.mov
├── README.md
├── __init__.py
├── app.py
├── cdk.json
├── python
│ └── index.py
├── requirements-dev.txt
├── requirements.txt
├── source.bat
├── stacks
│ ├── __init__.py
│ ├── front_end_canary_stack.py
│ ├── front_end_rum_stack.py
│ ├── front_end_secondary_bucket_stack.py
│ └── front_end_website_stack.py
├── tests
│ ├── __init__.py
│ ├── frontend-website-load-test.yml
│ ├── onboarding_test_client.py
│ ├── package.json
│ └── unit
│ │ ├── __init__.py
│ │ └── test_front_end_stack.py
├── utils
│ ├── __init__.py
│ └── front_end_parameter_enum.py
└── website
│ ├── .gitignore
│ ├── README.md
│ ├── __init__.py
│ ├── configure_website_environments.py
│ ├── index.html
│ ├── jsconfig.json
│ ├── package.json
│ ├── public
│ ├── AvailableTrade.png
│ └── favicon.ico
│ ├── src
│ ├── App.vue
│ ├── assets
│ │ ├── base.css
│ │ └── main.css
│ ├── main.js
│ ├── router
│ │ └── index.js
│ ├── stores
│ │ ├── customer.js
│ │ ├── degrade.js
│ │ └── user_monitor.js
│ └── views
│ │ ├── AccountOpenView.vue
│ │ ├── HomeView.vue
│ │ ├── InsightsView.vue
│ │ ├── TradeStockView.vue
│ │ └── UtilitiesView.vue
│ └── vite.config.js
├── recovery
├── .gitignore
├── README.md
├── app.py
├── cdk.json
├── requirements-dev.txt
├── requirements.txt
├── source.bat
├── stacks
│ ├── __init__.py
│ ├── orchestration_primary_stack.py
│ ├── orchestration_route53_stack.py
│ └── orchestration_secondary_stack.py
└── tests
│ ├── __init__.py
│ └── unit
│ ├── __init__.py
│ └── test_recovery_stack.py
└── trade-stock
├── .gitignore
├── README.md
├── __init__.py
├── app.py
├── cdk.json
├── confirms_api
├── Dockerfile
├── confirms_api.py
├── gunicorn_logging.conf
├── requirements.txt
└── trade_parameter_name.py
├── order_api
├── Dockerfile
├── README.md
├── data_objects.py
├── gunicorn_logging.conf
├── order_api.py
├── requirements.txt
├── requirements.txt.frozen
├── response.json
└── trade_parameter_name.py
├── requirements-dev.txt
├── requirements.txt
├── seed
├── customers.csv
└── stocks.csv
├── shell_scripts
├── create_api_user.sh
├── curl_confirms_endpoint.sh
├── curl_order_endpoint.sh
├── install_admin_client_packages.sh
├── load_db_admin_session.sh
├── load_db_order_api_session.sh
├── order_trade.sh
├── stress_order_api.sh
└── warm_order_api.sh
├── source.bat
├── sql_scripts
├── load_seed_data.sql
└── schema.sql
├── test.py
├── tests
├── __init__.py
├── trade-stock-stress-test.yml
├── trade_request.json
├── trade_stock_test_client.py
└── unit
│ ├── __init__.py
│ └── test_trade_stock_stack.py
├── trade_stock
├── __init__.py
├── public_api_stack.py
├── trade_confirms_stack.py
├── trade_database.py
├── trade_database_secondary_stack.py
├── trade_order_stack.py
└── vpc_stack.py
└── trade_utils
├── __init__.py
├── private_lb_extension.py
├── trade_parameter_name.py
└── x_ray_extension.py
/.gitignore:
--------------------------------------------------------------------------------
1 | **/venv
2 | **/.venv
3 | **/*bkp
4 | **/*dtmp
5 | **/*DS_Store
6 | **/*idea
7 | **/.vscode
8 | env.sh
9 | **/node_modules
10 | /package.json
11 | /package-lock.json
12 | **/~*.*x
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # engineering-resilient-systems-on-aws
2 |
3 | This repository holds the companion source code for the new book O'Reilly book "Engineering Resilient Systems on AWS", ISBN 9781098162429, by Kevin Schwarz, Jennifer Moran and Nate Bachmeier.
4 |
5 | The source code is now available!
6 |
7 | We'll add updates to this README document when we have changes.
8 |
9 | The Kindle version and print versions are available! You can purchase on order both on Amazon: http://amzn.to/4ftNR1j
10 |
11 | 
12 |
--------------------------------------------------------------------------------
/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/cover.png
--------------------------------------------------------------------------------
/env.template.sh:
--------------------------------------------------------------------------------
1 | # run `source env.sh` to set these before doing any CDK deployment actions
2 |
3 | export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
4 | export AWS_PRIMARY_REGION=us-east-1
5 | export AWS_SECONDARY_REGION=us-west-2
6 |
7 | export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/src/trade-stock:`pwd`/src/account_open:`pwd`/src/front_end
8 |
9 | export AWS_DOMAIN_NAME= # update with your cloudfront URL (after deploying front end) or your custom AWS hosted domain
10 |
11 | export JSII_SILENCE_WARNING_UNTESTED_NODE_VERSION=1
--------------------------------------------------------------------------------
/src/.dockerignore:
--------------------------------------------------------------------------------
1 | .env/**
2 | **/__pycache__/**
3 | cdk*
--------------------------------------------------------------------------------
/src/.gitignore:
--------------------------------------------------------------------------------
1 | **/.env
2 | **/__pycache__
3 | **/cdk.context.json
4 | **/cdk.out
5 | **/*DS_Store
6 | **/*idea
7 | **/cdk.context.json
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/__init__.py
--------------------------------------------------------------------------------
/src/account_open/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 | package-lock.json
3 | __pycache__
4 | .pytest_cache
5 | .venv
6 | *.egg-info
7 |
8 | # CDK asset staging directory
9 | .cdk.staging
10 | cdk.out
11 |
--------------------------------------------------------------------------------
/src/account_open/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Welcome to your CDK Python project!
3 |
4 | This is a blank project for CDK development with Python.
5 |
6 | The `cdk.json` file tells the CDK Toolkit how to execute your app.
7 |
8 | This project is set up like a standard Python project. The initialization
9 | process also creates a virtualenv within this project, stored under the `.venv`
10 | directory. To create the virtualenv it assumes that there is a `python3`
11 | (or `python` for Windows) executable in your path with access to the `venv`
12 | package. If for any reason the automatic creation of the virtualenv fails,
13 | you can create the virtualenv manually.
14 |
15 | To manually create a virtualenv on MacOS and Linux:
16 |
17 | ```
18 | $ python3 -m venv .venv
19 | ```
20 |
21 | After the init process completes and the virtualenv is created, you can use the following
22 | step to activate your virtualenv.
23 |
24 | ```
25 | $ source .venv/bin/activate
26 | ```
27 |
28 | If you are a Windows platform, you would activate the virtualenv like this:
29 |
30 | ```
31 | % .venv\Scripts\activate.bat
32 | ```
33 |
34 | Once the virtualenv is activated, you can install the required dependencies.
35 |
36 | ```
37 | $ pip install -r requirements.txt
38 | ```
39 |
40 | At this point you can now synthesize the CloudFormation template for this code.
41 |
42 | ```
43 | $ cdk synth
44 | ```
45 |
46 | To add additional dependencies, for example other CDK libraries, just add
47 | them to your `setup.py` file and rerun the `pip install -r requirements.txt`
48 | command.
49 |
50 | ## Useful commands
51 |
52 | * `cdk ls` list all stacks in the app
53 | * `cdk synth` emits the synthesized CloudFormation template
54 | * `cdk deploy` deploy this stack to your default AWS account/region
55 | * `cdk diff` compare deployed stack with current state
56 | * `cdk docs` open CDK documentation
57 |
58 | Enjoy!
59 |
--------------------------------------------------------------------------------
/src/account_open/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/account_open/__init__.py
--------------------------------------------------------------------------------
/src/account_open/account_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/account_open/account_utils/__init__.py
--------------------------------------------------------------------------------
/src/account_open/account_utils/account_open_parameter_name.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class AccountOpenParameterName(Enum):
5 | ACCOUNT_OPEN_REGIONAL_ENDPOINT = 'AccountOpenRegionalEndpoint_' # to use this append region name
6 | ACCOUNT_OPEN_GLOBAL_ENDPOINT = 'AccountOpenGlobalEndpoint' # to use this, you need a domain and hosted zone
7 |
--------------------------------------------------------------------------------
/src/account_open/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import aws_cdk as cdk
4 | from stacks.process_stack import ProcessStack
5 | from stacks.datasource_stack import DatasourceStack
6 | from stacks.failover_stack import FailoverStack
7 |
8 | app = cdk.App()
9 | account = os.getenv('AWS_ACCOUNT_ID')
10 | primary_region = os.getenv('AWS_PRIMARY_REGION')
11 | secondary_region = os.getenv('AWS_SECONDARY_REGION')
12 | idempotency_table = 'idempotency'
13 | accounts_table = 'brokerage_accounts'
14 |
15 | failover = FailoverStack(app, "FailoverStack", env=cdk.Environment(account=account, region=secondary_region))
16 |
17 | multi_region = DatasourceStack(app, "DatasourceStack",
18 | env=cdk.Environment(account=account, region=primary_region),
19 | secondary_region=secondary_region,
20 | idempotency_table=idempotency_table,
21 | accounts_table=accounts_table)
22 |
23 | primary = ProcessStack(app, "ProcessStack-primary",
24 | env=cdk.Environment(account=account, region=primary_region),
25 | recovery_region=False,
26 | idempotency_table=idempotency_table,
27 | accounts_table=accounts_table, failover_bucket=failover.failover_bucket.bucket_name)
28 |
29 | secondary = ProcessStack(app, "ProcessStack-secondary",
30 | env=cdk.Environment(account=account, region=secondary_region),
31 | recovery_region=True,
32 | idempotency_table=idempotency_table,
33 | accounts_table=accounts_table, failover_bucket=failover.failover_bucket.bucket_name)
34 |
35 | primary.topic.add_subscription(cdk.aws_sns_subscriptions.SqsSubscription(primary.queue))
36 | primary.topic.add_subscription(cdk.aws_sns_subscriptions.SqsSubscription(secondary.queue))
37 |
38 | #secondary.topic.add_subscription(cdk.aws_sns_subscriptions.SqsSubscription(primary.queue))
39 | # add the subscription post deploy if desired, cyclic reference for CDK so can't do it here
40 | secondary.topic.add_subscription(cdk.aws_sns_subscriptions.SqsSubscription(secondary.queue))
41 |
42 | multi_region.idempotency_table.grant_read_write_data(primary.new_account_function)
43 | multi_region.new_account_table.grant_read_write_data(primary.new_account_function)
44 |
45 | multi_region.idempotency_table.replica(secondary_region).grant_read_write_data(secondary.new_account_function)
46 | multi_region.new_account_table.replica(secondary_region).grant_read_write_data(secondary.new_account_function)
47 |
48 | failover.failover_bucket.grant_read(primary.new_account_function)
49 | failover.failover_bucket.grant_read(secondary.new_account_function)
50 |
51 | app.synth()
52 |
--------------------------------------------------------------------------------
/src/account_open/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py",
3 | "watch": {
4 | "include": [
5 | "**"
6 | ],
7 | "exclude": [
8 | "README.md",
9 | "cdk*.json",
10 | "requirements*.txt",
11 | "source.bat",
12 | "**/__init__.py",
13 | "**/__pycache__",
14 | "tests"
15 | ]
16 | },
17 | "context": {
18 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true,
19 | "@aws-cdk/core:checkSecretUsage": true,
20 | "@aws-cdk/core:target-partitions": [
21 | "aws",
22 | "aws-cn"
23 | ],
24 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
25 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
26 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true,
27 | "@aws-cdk/aws-iam:minimizePolicies": true,
28 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true,
29 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
30 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
31 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
32 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true,
33 | "@aws-cdk/core:enablePartitionLiterals": true,
34 | "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true,
35 | "@aws-cdk/aws-iam:standardizedServicePrincipals": true,
36 | "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true,
37 | "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true,
38 | "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true,
39 | "@aws-cdk/aws-route53-patters:useCertificate": true,
40 | "@aws-cdk/customresources:installLatestAwsSdkDefault": false,
41 | "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true,
42 | "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true,
43 | "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true,
44 | "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true,
45 | "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true,
46 | "@aws-cdk/aws-redshift:columnId": true,
47 | "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true,
48 | "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true,
49 | "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true,
50 | "@aws-cdk/aws-kms:aliasNameRef": true,
51 | "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true,
52 | "@aws-cdk/core:includePrefixInUniqueNameGeneration": true,
53 | "@aws-cdk/aws-efs:denyAnonymousAccess": true,
54 | "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true,
55 | "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true,
56 | "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true,
57 | "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true,
58 | "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true,
59 | "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/src/account_open/functions/new_account.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | from dataclasses import dataclass, field
4 | from uuid import uuid4
5 | from aws_lambda_powertools import Logger
6 | import json
7 | import boto3
8 | from aws_lambda_powertools.utilities.idempotency import (
9 | DynamoDBPersistenceLayer,
10 | IdempotencyConfig,
11 | idempotent_function
12 | )
13 | from aws_lambda_powertools import Tracer
14 | from aws_lambda_powertools import Metrics
15 | from aws_lambda_powertools.metrics import MetricUnit
16 | from aws_lambda_powertools.utilities.typing import LambdaContext
17 | from aws_lambda_powertools.utilities.data_classes import event_source, SQSEvent
18 | from aws_lambda_powertools.utilities.idempotency.serialization.dataclass import DataclassSerializer
19 | from boto3.dynamodb.conditions import Key
20 |
21 |
22 | logger = Logger()
23 | metrics = Metrics()
24 | tracer = Tracer()
25 | accounts_table = os.getenv("ACCOUNTS_TABLE")
26 | idempotency_table = os.getenv("IDEMPOTENCY_TABLE")
27 | recovery_region = os.getenv("RECOVERY_REGION") == "True"
28 | persistence_store = DynamoDBPersistenceLayer(table_name=idempotency_table)
29 | config = IdempotencyConfig(event_key_jmespath="request_token",
30 | raise_on_no_idempotency_key=True,
31 | expires_after_seconds=60 * 60 * 3)
32 | ddb_client = boto3.resource("dynamodb")
33 | s3_client = boto3.client("s3")
34 | failover_bucket = os.getenv("FAILOVER_BUCKET")
35 |
36 |
37 | @tracer.capture_lambda_handler
38 | @metrics.log_metrics
39 | @event_source(data_class=SQSEvent)
40 | def handler(event: SQSEvent, context: LambdaContext):
41 | config.register_lambda_context(context)
42 | recovery_mode = in_recovery_mode()
43 | logger.info(f'recovery_mode: {recovery_mode}')
44 | batch_item_failures = []
45 | sqs_batch_response = {}
46 | active_in_recovery = recovery_region and not in_recovery_mode()
47 | passive_in_primary = not recovery_region and in_recovery_mode()
48 | for record in event.records:
49 | logger.debug(f'record: {record}')
50 | body = json.loads(record["body"])
51 | message = json.loads(body['Message'])
52 | green_test = "greentest_" in message["user_id"]
53 | if (active_in_recovery or passive_in_primary) and not green_test:
54 | logger.info("nothing to do, message must be processed in active region")
55 | table = ddb_client.Table(accounts_table)
56 | logger.debug(f'finding record {message}')
57 | try:
58 | logger.debug('query dynamo') # must throw an error if not found
59 | user_id = message['user_id']
60 | request_token = message['request_token']
61 | response = table.query(
62 | IndexName='user_request',
63 | KeyConditionExpression=Key('user_id').eq(user_id) & Key('request_token').eq(
64 | request_token), ProjectionExpression='account_id')
65 | if len(response['Items']) < 1:
66 | raise Exception("account record not found")
67 | logger.debug(
68 | f'safe to purge message, found {response}')
69 | except Exception as e:
70 | logger.error('failed to process', e)
71 | batch_item_failures.append({"itemIdentifier": record.message_id})
72 | else: # current region is the active region, process messages
73 | logger.info("active region, attempting to create new account")
74 | try:
75 | # raise Exception("kaboom!!!") # forced failure
76 | logger.debug(f'body: {body}')
77 | account: Account = create_brokerage_account(account_event=message)
78 | except Exception as exc:
79 | logger.error('failed to create account', exc)
80 | batch_item_failures.append({"itemIdentifier": record.message_id})
81 | metrics.add_metric(name="NewAccountFailure", unit=MetricUnit.Count, value=1)
82 |
83 | sqs_batch_response["batchItemFailures"] = batch_item_failures
84 | return sqs_batch_response
85 |
86 |
87 | def in_recovery_mode():
88 | # ToDo: list has paging, so want to change this to just get the object. it will throw error, so need clean try/except flow
89 | objects = s3_client.list_objects_v2(Bucket=failover_bucket)
90 | for obj in objects.get('Contents', []):
91 | if 'failover.txt' in obj['Key']:
92 | return True
93 | return False
94 |
95 |
96 | @dataclass
97 | class Beneficiary:
98 | name: str
99 | percent: int
100 |
101 |
102 | @dataclass
103 | class Suitability:
104 | liquidity: str
105 | time_horizon: str
106 | risk_tolerance: str
107 |
108 |
109 | @dataclass
110 | class Instructions:
111 | dividends: str
112 |
113 |
114 | @dataclass
115 | class Account:
116 | customer_first_name: str
117 | customer_last_name: str
118 | account_type: str
119 | comment: str
120 | beneficiaries: list[Beneficiary]
121 | suitability: Suitability
122 | instructions: Instructions
123 | request_token: str
124 | user_id: str
125 | account_id: str = field(default_factory=lambda: f"{uuid4()}")
126 |
127 |
128 | @idempotent_function(data_keyword_argument='account_event', config=config, persistence_store=persistence_store,
129 | output_serializer=DataclassSerializer)
130 | def create_brokerage_account(account_event: dict) -> Account:
131 | account: Account = Account(**account_event)
132 | account_serialized = DataclassSerializer(Account).to_dict(data=account)
133 | logger.debug(f'serialized account {account_serialized}')
134 | table = ddb_client.Table(accounts_table)
135 | result = table.put_item(Item=account_serialized)
136 | logger.debug(f'ddb put result{result}')
137 | metrics.add_metric(name="NewAccountOpened", unit=MetricUnit.Count, value=1)
138 | tracer.put_annotation("account_id", account.account_id)
139 | return account
140 |
--------------------------------------------------------------------------------
/src/account_open/functions/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3
2 | aws_lambda_powertools
--------------------------------------------------------------------------------
/src/account_open/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | pytest==6.2.5
2 | requests
3 |
--------------------------------------------------------------------------------
/src/account_open/requirements.txt:
--------------------------------------------------------------------------------
1 | aws-cdk-lib==2.136.0
2 | constructs>=10.0.0,<11.0.0
3 | aws-lambda-powertools>=2.40.1
--------------------------------------------------------------------------------
/src/account_open/source.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | rem The sole purpose of this script is to make the command
4 | rem
5 | rem source .venv/bin/activate
6 | rem
7 | rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows.
8 | rem On Windows, this command just runs this batch file (the argument is ignored).
9 | rem
10 | rem Now we don't need to document a Windows command for activating a virtualenv.
11 |
12 | echo Executing .venv\Scripts\activate.bat for you
13 | .venv\Scripts\activate.bat
14 |
--------------------------------------------------------------------------------
/src/account_open/stacks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/account_open/stacks/__init__.py
--------------------------------------------------------------------------------
/src/account_open/stacks/datasource_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | Stack,
3 | aws_dynamodb as dynamodb
4 | )
5 | import aws_cdk as cdk
6 | from constructs import Construct
7 |
8 |
9 | class DatasourceStack(Stack):
10 | def __init__(self, scope: Construct, construct_id: str, secondary_region: str,
11 | idempotency_table: str, accounts_table: str, **kwargs) -> None:
12 | super().__init__(scope, construct_id, **kwargs)
13 |
14 | self.idempotency_table = dynamodb.TableV2(
15 | self, 'GlobalIdempotency', table_name=idempotency_table,
16 | partition_key={'name': 'id', 'type': dynamodb.AttributeType.STRING},
17 | deletion_protection=True,
18 | replicas=[dynamodb.ReplicaTableProps(region=secondary_region)],
19 | contributor_insights=True,
20 | removal_policy=cdk.RemovalPolicy.RETAIN,
21 | time_to_live_attribute="expiration",
22 | point_in_time_recovery=True)
23 |
24 | self.new_account_table = dynamodb.TableV2(
25 | self, 'GlobalBrokerageAccounts', table_name=accounts_table,
26 | partition_key={'name': 'user_id',
27 | 'type': dynamodb.AttributeType.STRING},
28 | sort_key={'name': 'account_id',
29 | 'type': dynamodb.AttributeType.STRING},
30 | deletion_protection=True,
31 | replicas=[dynamodb.ReplicaTableProps(region=secondary_region)],
32 | contributor_insights=True,
33 | removal_policy=cdk.RemovalPolicy.RETAIN,
34 | point_in_time_recovery=True,)
35 |
36 | self.new_account_table.add_global_secondary_index(
37 | partition_key=dynamodb.Attribute(name='user_id', type=dynamodb.AttributeType.STRING),
38 | sort_key=dynamodb.Attribute(name='request_token', type=dynamodb.AttributeType.STRING),
39 | index_name='user_request'
40 | )
41 |
--------------------------------------------------------------------------------
/src/account_open/stacks/failover_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | Stack,
3 | aws_s3 as s3,
4 | aws_iam as iam
5 | )
6 | import aws_cdk as cdk
7 | from constructs import Construct
8 |
9 |
10 | class FailoverStack(Stack):
11 | def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
12 | super().__init__(scope, construct_id, **kwargs)
13 |
14 | self.failover_bucket = s3.Bucket(self, "failover_bucket",
15 | bucket_name=f'failover-bucket-{self.region}-{self.account}')
16 |
17 | self.failover_bucket.add_to_resource_policy(
18 | iam.PolicyStatement(
19 | principals=[
20 | iam.ServicePrincipal("lambda.amazonaws.com"),
21 | iam.ServicePrincipal("ssm.amazonaws.com")
22 | ],
23 | actions=["s3:PutObject"],
24 | resources=[self.failover_bucket.arn_for_objects("*")],
25 | )
26 | )
27 |
28 | cdk.CfnOutput(self, "Failover Bucket", value=self.failover_bucket.bucket_name)
29 |
--------------------------------------------------------------------------------
/src/account_open/stacks/process_stack.py:
--------------------------------------------------------------------------------
1 | import os
2 | from aws_cdk import (
3 | Stack,
4 | aws_sns as sns,
5 | aws_sqs as sqs,
6 | aws_lambda as lambda_,
7 | aws_lambda_event_sources as eventsources,
8 | aws_iam as iam,
9 | aws_apigateway as apigateway,
10 | aws_logs as logs,
11 | aws_ssm as ssm,
12 | )
13 | import aws_cdk as cdk
14 | from constructs import Construct
15 |
16 |
17 | class ProcessStack(Stack):
18 |
19 | def __init__(self, scope: Construct, construct_id: str, recovery_region: bool,
20 | idempotency_table: str, accounts_table: str, failover_bucket: str, **kwargs) -> None:
21 | super().__init__(scope, construct_id, **kwargs)
22 |
23 | self.topic = sns.Topic(self, "NewAccountTopic", display_name="New Account Opening",
24 | topic_name=cdk.PhysicalName.GENERATE_IF_NEEDED)
25 |
26 | function_timeout_seconds = 5
27 |
28 | dlq = sqs.Queue(self, "DLQ_NewAccountQueue", encryption=sqs.QueueEncryption.UNENCRYPTED,
29 | visibility_timeout=cdk.Duration.seconds(5))
30 | dead_letter_queue = sqs.DeadLetterQueue(max_receive_count=3, queue=dlq)
31 | self.queue = sqs.Queue(self, "NewAccountQueue", dead_letter_queue=dead_letter_queue,
32 | encryption=sqs.QueueEncryption.UNENCRYPTED,
33 | visibility_timeout=cdk.Duration.seconds(
34 | 6 * function_timeout_seconds))
35 |
36 | self.new_account_function = lambda_.Function(
37 | self, "New Account", runtime=lambda_.Runtime.PYTHON_3_9, handler="new_account.handler",
38 | code=lambda_.Code.from_asset(os.path.join(os.path.dirname("./functions/new_account.py"))),
39 | environment={"RECOVERY_REGION": str(recovery_region), 'POWERTOOLS_SERVICE_NAME': "AccountOpen",
40 | 'POWERTOOLS_METRICS_NAMESPACE': 'ResilientBrokerage',
41 | 'IDEMPOTENCY_TABLE': idempotency_table, 'ACCOUNTS_TABLE': accounts_table,
42 | 'FAILOVER_BUCKET': failover_bucket, 'LOG_LEVEL': 'DEBUG'},
43 | tracing=lambda_.Tracing.ACTIVE, timeout=cdk.Duration.seconds(function_timeout_seconds),
44 | log_retention=cdk.aws_logs.RetentionDays.FIVE_DAYS
45 | )
46 | self.new_account_function.add_event_source(
47 | eventsources.SqsEventSource(self.queue, batch_size=10, max_concurrency=15,
48 | report_batch_item_failures=True,
49 | max_batching_window=cdk.Duration.seconds(1)))
50 | self.new_account_function.add_layers(
51 | lambda_.LayerVersion.from_layer_version_arn(
52 | self, id='lambdapowertools',
53 | layer_version_arn=
54 | f"arn:aws:lambda:{cdk.Stack.of(self).region}:017000801446:layer:AWSLambdaPowertoolsPythonV2:45"))
55 |
56 | gateway_execution_role = iam.Role(self, "GatewayExecutionRole",
57 | assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"))
58 | self.topic.grant_publish(gateway_execution_role)
59 |
60 | api_log_group = logs.LogGroup(self, "NewAccountApiLogs")
61 |
62 | api = apigateway.RestApi(self, "NewAccountApi", endpoint_types=[apigateway.EndpointType.REGIONAL],
63 | default_cors_preflight_options=apigateway.CorsOptions(
64 | allow_methods=['PUT', 'OPTIONS'],
65 | allow_headers=['Content-Type',
66 | 'Cache-Control',
67 | 'Authorization'],
68 | allow_origins=apigateway.Cors.ALL_ORIGINS
69 | ),
70 | deploy_options=apigateway.StageOptions(
71 | stage_name="prod",
72 | access_log_destination=apigateway.LogGroupLogDestination(api_log_group),
73 | logging_level=apigateway.MethodLoggingLevel.INFO,
74 | data_trace_enabled=True,
75 | metrics_enabled=True,
76 | tracing_enabled=True,
77 | access_log_format=apigateway.AccessLogFormat.json_with_standard_fields(caller=True, http_method=True, ip=True,protocol=True,request_time=True, resource_path=True, response_length=True, status=True, user=True),
78 | throttling_rate_limit=100,
79 | throttling_burst_limit=25,
80 | ), cloud_watch_role=True)
81 | sns_integration = apigateway.AwsIntegration(
82 | service="sns",
83 | # path=f"{self.account}/{self.topic.topic_name}"
84 | path=f"{self.account}/{self.topic.topic_name}",
85 | integration_http_method="POST",
86 | options=apigateway.IntegrationOptions(
87 | credentials_role=gateway_execution_role,
88 | timeout=cdk.Duration.seconds(2),
89 | passthrough_behavior=apigateway.PassthroughBehavior.NEVER,
90 | request_parameters={'integration.request.header.Content-Type': "'application/x-www-form-urlencoded'"},
91 | request_templates={
92 | "application/json":
93 | f"Action=Publish&TopicArn=$util.urlEncode('{self.topic.topic_arn}')&Message"
94 | f"=$util.urlEncode($input.body)"
95 | },
96 | integration_responses=[
97 | apigateway.IntegrationResponse(
98 | status_code="200",
99 | response_templates={"application/json": '{"status": "message added to topic"}'},
100 | response_parameters={
101 | 'method.response.header.Access-Control-Allow-Headers': "'Access-Control-Allow-Origin,Content-Length,Content-Type,Date,X-Amz-Apigw-Id,X-Amzn-Requestid,X-Amzn-Trace-Id'",
102 | 'method.response.header.Access-Control-Allow-Methods': "'OPTIONS,PUT'",
103 | 'method.response.header.Access-Control-Allow-Origin': "'*'"} # * for local dev only,
104 | # deploy with proper domain for production
105 | ),
106 | apigateway.IntegrationResponse(
107 | status_code="400",
108 | selection_pattern="^\[Error\].*",
109 | response_templates={
110 | "application/json": "{\"state\":\"error\",\"message\":\"$util.escapeJavaScript($input.path('$.errorMessage'))\"}",
111 | }
112 | )
113 | ]
114 | )
115 | )
116 |
117 | account_model = apigateway.Model(
118 | self, "account-schema",
119 | rest_api=api,
120 | content_type="application/json",
121 | description="validate account open json",
122 | model_name="Account",
123 | schema=apigateway.JsonSchema(
124 | schema=apigateway.JsonSchemaVersion.DRAFT4,
125 | title="AccountRequest",
126 | type=apigateway.JsonSchemaType.OBJECT,
127 | properties={
128 | "request_token": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
129 | "customer_first_name": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
130 | "customer_last_name": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
131 | "account_type": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
132 | "comment": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
133 | "beneficiaries": apigateway.JsonSchema(
134 | type=apigateway.JsonSchemaType.ARRAY,
135 | properties={"name": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
136 | "percentage": apigateway.JsonSchema(type=apigateway.JsonSchemaType.INTEGER)}),
137 | "suitability": {
138 | "liquidity": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
139 | "time_horizon": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
140 | "risk_tolerance": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
141 | },
142 | "instructions": {
143 | "dividends": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING)
144 | }
145 | },
146 | required=["request_token", "user_id", "account_type"]
147 | )
148 | )
149 |
150 | api.root.add_method("PUT", sns_integration, method_responses=[
151 | apigateway.MethodResponse(status_code="200",
152 | response_parameters={"method.response.header.Access-Control-Allow-Headers": True,
153 | "method.response.header.Access-Control-Allow-Methods": True,
154 | "method.response.header.Access-Control-Allow-Origin": True}),
155 | apigateway.MethodResponse(status_code="400")],
156 | api_key_required=False,
157 | request_validator=apigateway.RequestValidator(
158 | self,
159 | "AccountValidator",
160 | rest_api=api,
161 | validate_request_body=True,
162 | validate_request_parameters=False,
163 | request_validator_name="AccountValidation"),
164 | request_models={"application/json": account_model})
165 |
166 | # ToDo create the custom metric for account failures (api count - account create) > 0
167 | # ToDo put the custom metric on a dashboard
168 | # replication latency, funtion latency, messages visible, few other things you'd want to monitor
169 | # api.metric_count
170 |
171 | cdk.CfnOutput(self, "SNS topic", value=self.topic.topic_arn)
172 |
173 | ssm.StringParameter(self, "AccountOpenRegionalEndpoint",
174 | description="Account Open Regional Endpoint",
175 | parameter_name=f"AccountOpenRegionalEndpoint_{self.region}",
176 | string_value=api.url)
177 |
178 | # Write the APIID arn to a ssm parameter named NewAccountAPIID
179 | ssm.StringParameter(self, "NewAccountAPIID",
180 | parameter_name="NewAccountAPIID",
181 | string_value=api.rest_api_id,
182 | )
183 |
--------------------------------------------------------------------------------
/src/account_open/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/account_open/tests/__init__.py
--------------------------------------------------------------------------------
/src/account_open/tests/account-load-test.yml:
--------------------------------------------------------------------------------
1 | config:
2 | # This is a test server run by team Artillery
3 | # It's designed to be highly scalable
4 | target: "{{ url }}"
5 | phases:
6 | - duration: 60
7 | arrivalRate: 1
8 | rampTo: 2
9 | name: Warm up phase
10 | - duration: 60
11 | arrivalRate: 2
12 | rampTo: 4
13 | name: Ramp up load
14 | - duration: 30
15 | arrivalRate: 10
16 | rampTo: 30
17 | name: Spike phase
18 | # Load & configure a couple of useful plugins
19 | # https://docs.art/reference/extensions
20 | plugins:
21 | ensure: { }
22 | apdex: { }
23 | metrics-by-endpoint: { }
24 | apdex:
25 | threshold: 100
26 | ensure:
27 | thresholds:
28 | - http.response_time.p99: 200
29 | - http.response_time.p95: 150
30 | scenarios:
31 | - flow:
32 | - loop:
33 | - put:
34 | url: "/prod/"
35 | json:
36 | customer_first_name: "load"
37 | customer_last_name: "test"
38 | account_type: "performance"
39 | comment: "don't stop me now"
40 | request_token: "{{$randomNumber(1,1000000000000000)}}"
41 | user_id: "loaduser{{$randomNumber(1,10000)}}"
42 | beneficiaries:
43 | - name: "bene1"
44 | percentage: 63
45 | - name: "bene2"
46 | percentage: 27
47 | suitability:
48 | "liquidity": "high"
49 | "time_horizon": "7 years"
50 | "risk_tolerance": "low"
51 | "instructions":
52 | "dividends": "pay-out-all"
53 | expect:
54 | - statusCode: 200
55 | count: 100
--------------------------------------------------------------------------------
/src/account_open/tests/account_invalid.json:
--------------------------------------------------------------------------------
1 | {
2 | "customer_first_name": "kevin",
3 | "customer_last_name": "coding",
4 | "comment": "AWSome Investing - consider account as a uuid generated before submitting & used for idempotency",
5 | "beneficiaries": [
6 | {
7 | "name": "Joe",
8 | "percentage": 75
9 | },
10 | {
11 | "name": "Sarah",
12 | "percentage": 25
13 | }
14 | ],
15 | "suitability": {
16 | "liquidity": "low",
17 | "time_horizon": "20 years",
18 | "risk_tolerance": "high"
19 | },
20 | "instructions": {
21 | "dividends": "reinvest-all"
22 | }
23 | }
--------------------------------------------------------------------------------
/src/account_open/tests/account_poison_pill.json:
--------------------------------------------------------------------------------
1 | {
2 | "customer_first_name": "xxx",
3 | "customer_last_name": "yyyy",
4 | "account_type": "poison pill",
5 | "comment": "you're going to have to purge this message",
6 | "suitability": {
7 | "liquidity": "low",
8 | "time_horizon": "20 years",
9 | "risk_tolerance": "high"
10 | },
11 | "instructions": {
12 | "dividends": "reinvest-all"
13 | }
14 | }
--------------------------------------------------------------------------------
/src/account_open/tests/account_valid.json:
--------------------------------------------------------------------------------
1 | {
2 | "customer_first_name": "kevin",
3 | "customer_last_name": "coding",
4 | "account_type": "moneymaker",
5 | "comment": "AWSome Investing - consider account as a uuid generated before submitting & used for idempotency",
6 | "beneficiaries": [
7 | {
8 | "name": "Joe",
9 | "percentage": 75
10 | },
11 | {
12 | "name": "Sarah",
13 | "percentage": 25
14 | }
15 | ],
16 | "suitability": {
17 | "liquidity": "low",
18 | "time_horizon": "20 years",
19 | "risk_tolerance": "high"
20 | },
21 | "instructions": {
22 | "dividends": "reinvest-all"
23 | }
24 | }
--------------------------------------------------------------------------------
/src/account_open/tests/failover.txt:
--------------------------------------------------------------------------------
1 | placing this file into the dr recovery bucket will cause the system to fail over to the secondary region
--------------------------------------------------------------------------------
/src/account_open/tests/onboarding_test_client.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | from botocore.config import Config
3 | import json
4 | import os
5 | import argparse
6 | from argparse import RawTextHelpFormatter
7 | import requests
8 | import random
9 | from uuid import uuid4
10 |
11 | parser = argparse.ArgumentParser(prog="Account Open Test Client",
12 | description="Test Account Open Resiliency",
13 | formatter_class=RawTextHelpFormatter)
14 | parser.add_argument(
15 | "--test",
16 | help='''Choose a test to run
17 | 1/ Submit a valid request.
18 | 2/ Submit a invalid request missing account_type.
19 | 3/ Retries - see text, use CDK deployments.
20 | 4/ Test load throttling.
21 | 5/ Poison pill, test DLQ.
22 | 6/ Switch to secondary region.
23 | 7/ Switch back to primary.
24 | 8/ Test in recovery/green region.''',
25 | type=int,
26 | required=True)
27 | parser.add_argument("--request_token",
28 | help="Use specified request token instead of generating at random. Good for idempotency testing.",
29 | type=str,
30 | default='')
31 | parser.add_argument("--user_id",
32 | help="Create accounts for specified user_id instead of randomly generating a user_id.",
33 | type=str,
34 | default='')
35 |
36 |
37 | def in_recovery_mode(s3_client, failover_bucket_name):
38 | objects = s3_client.list_objects_v2(Bucket=failover_bucket_name)
39 | for obj in objects.get('Contents', []):
40 | if 'failover.txt' in obj['Key']:
41 | return True
42 | return False
43 |
44 |
45 | def get_url(primary):
46 | region = 'AWS_PRIMARY_REGION' if primary is True else 'AWS_SECONDARY_REGION'
47 | stack = "ProcessStack-primary" if primary is True else "ProcessStack-secondary"
48 | endpoint = None
49 | cf = boto3.client("cloudformation", config=Config(region_name=os.getenv(region)))
50 | stacks = cf.describe_stacks(
51 | StackName=stack) # might use an arg to toggle primary or recovery
52 | outputs = stacks["Stacks"][0]["Outputs"]
53 | for output in outputs:
54 | if "NewAccountApiEndpoint" in output["OutputKey"]:
55 | endpoint = output["OutputValue"]
56 | print(endpoint)
57 | return endpoint
58 |
59 |
60 | def request_account(file, endpoint):
61 | payload = json.load(open(file))
62 | payload["request_token"] = args.request_token if len(
63 | args.request_token) > 2 else f"{uuid4()}"
64 | payload["user_id"] = args.user_id if len(args.user_id) > 2 else f"user{random.randrange(999)}"
65 | print(payload)
66 | r = requests.put(endpoint, json=payload)
67 | print(r)
68 |
69 |
70 | args = parser.parse_args()
71 |
72 | test = args.test
73 | filename = "failover.txt"
74 |
75 | if test == 1:
76 | request_account('account_valid.json', get_url(True))
77 | elif test == 2:
78 | request_account('account_invalid.json', get_url(True))
79 | elif test == 3:
80 | print("Use CDK deployments to test retries, this test client does not support the use case")
81 | elif test == 4:
82 | command = "artillery run new-account-load-test.yml --variables '{ \"url\": \"{url}\" }'".replace(
83 | "{url}", get_url(True).replace('/prod/', ''))
84 | print(command)
85 | os.system(command)
86 | elif test == 5:
87 | request_account('account_poison_pill.json', get_url(True))
88 | elif test == 6:
89 | s3 = boto3.client("s3", config=Config(region_name=os.getenv('AWS_SECONDARY_REGION'))) # secondary?
90 | failover_bucket = f"failover-bucket-{os.getenv('AWS_SECONDARY_REGION')}-{os.getenv('AWS_ACCOUNT_ID')}"
91 | print(failover_bucket)
92 | if in_recovery_mode(s3, failover_bucket):
93 | print("already in secondary region")
94 | else:
95 |
96 | with open(filename, "rb") as f:
97 | response = s3.upload_fileobj(f, failover_bucket, filename)
98 | print("Switched to secondary region")
99 | elif test == 7:
100 | s3 = boto3.client("s3", config=Config(region_name=os.getenv('AWS_SECONDARY_REGION'))) # secondary?
101 | failover_bucket = f"failover-bucket-{os.getenv('AWS_SECONDARY_REGION')}-{os.getenv('AWS_ACCOUNT_ID')}"
102 | if not in_recovery_mode(s3, failover_bucket):
103 | print("Primary region is already active")
104 | else:
105 | s3.delete_object(Bucket=failover_bucket, Key=filename)
106 | print("Switched to primary region")
107 | elif test == 8:
108 | request_account('account_valid.json', get_url(False))
109 | else:
110 | print("invalid test case, please try again")
111 |
--------------------------------------------------------------------------------
/src/account_open/tests/unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/account_open/tests/unit/__init__.py
--------------------------------------------------------------------------------
/src/account_open/tests/unit/test_ch_x_stack.py:
--------------------------------------------------------------------------------
1 | import aws_cdk as core
2 | import aws_cdk.assertions as assertions
3 |
4 | from ch_x.ch_x_stack import ChXStack
5 |
6 |
7 | # example tests. To run these tests, uncomment this file along with the example
8 | # resource in stacks/process_stack.py
9 | def test_sqs_queue_created():
10 | app = core.App()
11 | stack = ChXStack(app, "ch-x")
12 | template = assertions.Template.from_stack(stack)
13 |
14 | # template.has_resource_properties("AWS::SQS::Queue", {
15 | # "VisibilityTimeout": 300
16 | # })
17 |
--------------------------------------------------------------------------------
/src/environment-setup/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/environment-setup/__init__.py
--------------------------------------------------------------------------------
/src/environment-setup/app.py:
--------------------------------------------------------------------------------
1 | import os
2 | import aws_cdk as cdk
3 | from stacks.hello_resilience_stack import HelloResilienceStack
4 |
5 | account = os.getenv('AWS_ACCOUNT_ID')
6 | primary_region = os.getenv('AWS_PRIMARY_REGION')
7 |
8 | app = cdk.App()
9 | HelloResilienceStack(
10 | app, "HelloResilienceStack",
11 | env=cdk.Environment(account=account, region=primary_region))
12 | app.synth()
13 |
--------------------------------------------------------------------------------
/src/environment-setup/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py",
3 | "watch": {
4 | "include": [
5 | "**"
6 | ],
7 | "exclude": [
8 | "README.md",
9 | "cdk*.json",
10 | "requirements*.txt",
11 | "source.bat",
12 | "**/__init__.py",
13 | "**/__pycache__",
14 | "tests"
15 | ]
16 | },
17 | "context": {
18 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true,
19 | "@aws-cdk/core:checkSecretUsage": true,
20 | "@aws-cdk/core:target-partitions": [
21 | "aws",
22 | "aws-cn"
23 | ],
24 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
25 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
26 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true,
27 | "@aws-cdk/aws-iam:minimizePolicies": true,
28 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true,
29 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
30 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
31 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
32 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true,
33 | "@aws-cdk/core:enablePartitionLiterals": true,
34 | "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true,
35 | "@aws-cdk/aws-iam:standardizedServicePrincipals": true,
36 | "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true,
37 | "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true,
38 | "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true,
39 | "@aws-cdk/aws-route53-patters:useCertificate": true,
40 | "@aws-cdk/customresources:installLatestAwsSdkDefault": false,
41 | "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true,
42 | "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true,
43 | "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true,
44 | "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true,
45 | "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true,
46 | "@aws-cdk/aws-redshift:columnId": true,
47 | "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true,
48 | "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true,
49 | "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true,
50 | "@aws-cdk/aws-kms:aliasNameRef": true,
51 | "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true,
52 | "@aws-cdk/core:includePrefixInUniqueNameGeneration": true,
53 | "@aws-cdk/aws-efs:denyAnonymousAccess": true,
54 | "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true,
55 | "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true,
56 | "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true,
57 | "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true,
58 | "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true,
59 | "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/src/environment-setup/lambda/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/environment-setup/lambda/__init__.py
--------------------------------------------------------------------------------
/src/environment-setup/lambda/hello_resilience.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 |
4 | def handler(event, context):
5 | print('request: {}'.format(json.dumps(event)))
6 | return {
7 | 'statusCode': 200,
8 | 'headers': {
9 | 'Content-Type': 'text/plain'
10 | },
11 | 'body': 'Hello, Resilience! You have hit {}\n'.format(event['path'])
12 | }
13 |
--------------------------------------------------------------------------------
/src/environment-setup/requirements.txt:
--------------------------------------------------------------------------------
1 | aws-cdk-lib==2.99.0
2 | constructs>=10.0.0,<11.0.0
3 | boto3>=1.36
4 | requests>=2.32
5 |
--------------------------------------------------------------------------------
/src/environment-setup/stacks/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/src/environment-setup/stacks/hello_resilience_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | Stack,
3 | aws_lambda as _lambda,
4 | aws_apigateway as apigateway
5 | )
6 | import aws_cdk as cdk
7 | from constructs import Construct
8 |
9 |
10 | class HelloResilienceStack(Stack):
11 | def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
12 | super().__init__(scope, construct_id, **kwargs)
13 |
14 | hello_resilience = _lambda.Function(
15 | self, "HelloResilience",
16 | runtime=_lambda.Runtime.PYTHON_3_9,
17 | code=_lambda.Code.from_asset("lambda"),
18 | handler='hello_resilience.handler'
19 | )
20 |
21 | hello_resilience_api = apigateway.RestApi(
22 | self, "HelloResilienceApi",
23 | deploy_options=apigateway.StageOptions(
24 | data_trace_enabled=True,
25 | tracing_enabled=True
26 | ))
27 |
28 | hello_resilience_endpoint = hello_resilience_api.root.add_resource(
29 | "getHello")
30 | hello_resilience_endpoint.add_method(
31 | "GET",
32 | apigateway.LambdaIntegration(hello_resilience))
33 |
34 | cdk.CfnOutput(self, "HelloResilienceEndpoint",
35 | value=hello_resilience_api.url_for_path("/getHello/"))
36 |
--------------------------------------------------------------------------------
/src/frontend/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 | package-lock.json
3 | __pycache__
4 | .pytest_cache
5 | .venv
6 | *.egg-info
7 |
8 | # CDK asset staging directory
9 | .cdk.staging
10 | cdk.out
11 |
--------------------------------------------------------------------------------
/src/frontend/AdobeStock_4K.mov:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/AdobeStock_4K.mov
--------------------------------------------------------------------------------
/src/frontend/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Welcome to your CDK Python project!
3 |
4 | This is a blank project for CDK development with Python.
5 |
6 | The `cdk.json` file tells the CDK Toolkit how to execute your app.
7 |
8 | This project is set up like a standard Python project. The initialization
9 | process also creates a virtualenv within this project, stored under the `.venv`
10 | directory. To create the virtualenv it assumes that there is a `python3`
11 | (or `python` for Windows) executable in your path with access to the `venv`
12 | package. If for any reason the automatic creation of the virtualenv fails,
13 | you can create the virtualenv manually.
14 |
15 | To manually create a virtualenv on MacOS and Linux:
16 |
17 | ```
18 | $ python3 -m venv .venv
19 | ```
20 |
21 | After the init process completes and the virtualenv is created, you can use the following
22 | step to activate your virtualenv.
23 |
24 | ```
25 | $ source .venv/bin/activate
26 | ```
27 |
28 | If you are a Windows platform, you would activate the virtualenv like this:
29 |
30 | ```
31 | % .venv\Scripts\activate.bat
32 | ```
33 |
34 | Once the virtualenv is activated, you can install the required dependencies.
35 |
36 | ```
37 | $ pip install -r requirements.txt
38 | ```
39 |
40 | At this point you can now synthesize the CloudFormation template for this code.
41 |
42 | ```
43 | $ cdk synth
44 | ```
45 |
46 | To add additional dependencies, for example other CDK libraries, just add
47 | them to your `setup.py` file and rerun the `pip install -r requirements.txt`
48 | command.
49 |
50 | ## Useful commands
51 |
52 | * `cdk ls` list all stacks in the app
53 | * `cdk synth` emits the synthesized CloudFormation template
54 | * `cdk deploy` deploy this stack to your default AWS account/region
55 | * `cdk diff` compare deployed stack with current state
56 | * `cdk docs` open CDK documentation
57 |
58 | Enjoy!
59 |
--------------------------------------------------------------------------------
/src/frontend/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/__init__.py
--------------------------------------------------------------------------------
/src/frontend/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import aws_cdk as cdk
4 | from stacks.front_end_canary_stack import FrontEndCanaryStack
5 | from stacks.front_end_website_stack import FrontEndWebsiteStack
6 | from stacks.front_end_secondary_bucket_stack import FrontEndSecondaryBucketStack
7 | from stacks.front_end_rum_stack import FrontEndRumStack
8 |
9 | account = os.getenv('AWS_ACCOUNT_ID')
10 | primary_region = os.getenv('AWS_PRIMARY_REGION')
11 | secondary_region = os.getenv('AWS_SECONDARY_REGION')
12 | website_domain_name = os.getenv('AWS_DOMAIN_NAME')
13 | primary_environment = cdk.Environment(account=account, region=primary_region)
14 | secondary_environment = cdk.Environment(account=account, region=secondary_region)
15 |
16 | app = cdk.App()
17 |
18 | FrontEndSecondaryBucketStack(app, "FrontEnd-BucketStack-Secondary", env=secondary_environment)
19 | FrontEndWebsiteStack(app, "FrontEnd-WebsiteStack", env=primary_environment, domain_name=website_domain_name, secondary_region=secondary_region)
20 | FrontEndCanaryStack(app, "FrontEnd-CanaryStack-Primary", env=primary_environment, endpoint_url=website_domain_name)
21 | FrontEndCanaryStack(app, "FrontEnd-CanaryStack-Secondary", env=secondary_environment, endpoint_url=website_domain_name)
22 | FrontEndRumStack(app, "FrontEnd-RumStack", env=primary_environment, domain_name=website_domain_name)
23 |
24 | app.synth()
25 |
--------------------------------------------------------------------------------
/src/frontend/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py",
3 | "watch": {
4 | "include": [
5 | "**"
6 | ],
7 | "exclude": [
8 | "README.md",
9 | "cdk*.json",
10 | "requirements*.txt",
11 | "source.bat",
12 | "**/__init__.py",
13 | "**/__pycache__",
14 | "tests"
15 | ]
16 | },
17 | "context": {
18 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true,
19 | "@aws-cdk/core:checkSecretUsage": true,
20 | "@aws-cdk/core:target-partitions": [
21 | "aws",
22 | "aws-cn"
23 | ],
24 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
25 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
26 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true,
27 | "@aws-cdk/aws-iam:minimizePolicies": true,
28 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true,
29 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
30 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
31 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
32 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true,
33 | "@aws-cdk/core:enablePartitionLiterals": true,
34 | "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true,
35 | "@aws-cdk/aws-iam:standardizedServicePrincipals": true,
36 | "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true,
37 | "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true,
38 | "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true,
39 | "@aws-cdk/aws-route53-patters:useCertificate": true,
40 | "@aws-cdk/customresources:installLatestAwsSdkDefault": false,
41 | "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true,
42 | "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true,
43 | "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true,
44 | "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true,
45 | "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true,
46 | "@aws-cdk/aws-redshift:columnId": true,
47 | "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true,
48 | "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true,
49 | "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true,
50 | "@aws-cdk/aws-kms:aliasNameRef": true,
51 | "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true,
52 | "@aws-cdk/core:includePrefixInUniqueNameGeneration": true,
53 | "@aws-cdk/aws-efs:denyAnonymousAccess": true,
54 | "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true,
55 | "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true,
56 | "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true,
57 | "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true,
58 | "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true,
59 | "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true,
60 | "@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForCodeCommitSource": true,
61 | "@aws-cdk/aws-cloudwatch-actions:changeLambdaPermissionLogicalIdForLambdaAction": true,
62 | "@aws-cdk/aws-codepipeline:crossAccountKeysDefaultValueToFalse": true,
63 | "@aws-cdk/aws-ecr-assets:dockerIgnoreSupport": true
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/src/frontend/python/index.py:
--------------------------------------------------------------------------------
1 | from selenium.webdriver.common.by import By
2 | from aws_synthetics.selenium import synthetics_webdriver as syn_webdriver
3 | from aws_synthetics.common import synthetics_logger as logger
4 | import os
5 |
6 | def main():
7 | url = os.environ['ENDPOINT_URL']
8 |
9 | # Set screenshot option
10 | takeScreenshot = True
11 |
12 | browser = syn_webdriver.Chrome()
13 | browser.get(url)
14 |
15 | if takeScreenshot:
16 | browser.save_screenshot("loaded.png")
17 |
18 | response_code = syn_webdriver.get_http_response(url)
19 | if not response_code or response_code < 200 or response_code > 299:
20 | raise Exception("Failed to load page!")
21 | logger.info("Canary successfully executed.")
22 |
23 |
24 | def handler(event, context):
25 | # user defined log statements using synthetics_logger
26 | logger.info("Selenium Python heartbeat canary.")
27 | return main()
28 |
--------------------------------------------------------------------------------
/src/frontend/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | pytest==6.2.5
2 |
--------------------------------------------------------------------------------
/src/frontend/requirements.txt:
--------------------------------------------------------------------------------
1 | aws-cdk-lib==2.144.0
2 | constructs>=10.0.0,<11.0.0
3 | aws_solutions_constructs.aws_cloudfront_s3
4 | aws_solutions_constructs.aws_wafwebacl_cloudfront
5 |
6 |
--------------------------------------------------------------------------------
/src/frontend/source.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | rem The sole purpose of this script is to make the command
4 | rem
5 | rem source .venv/bin/activate
6 | rem
7 | rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows.
8 | rem On Windows, this command just runs this batch file (the argument is ignored).
9 | rem
10 | rem Now we don't need to document a Windows command for activating a virtualenv.
11 |
12 | echo Executing .venv\Scripts\activate.bat for you
13 | .venv\Scripts\activate.bat
14 |
--------------------------------------------------------------------------------
/src/frontend/stacks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/stacks/__init__.py
--------------------------------------------------------------------------------
/src/frontend/stacks/front_end_canary_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | Stack,
3 | aws_synthetics as synthetics,
4 | aws_iam as iam,
5 | aws_s3 as s3,
6 | Duration,
7 | RemovalPolicy
8 | )
9 | from aws_cdk.aws_cloudwatch import (
10 | Alarm,
11 | ComparisonOperator,
12 | Statistic,
13 | )
14 | from constructs import Construct
15 | import os
16 |
17 | class FrontEndCanaryStack(Stack):
18 |
19 |
20 | def __init__(self, scope: Construct, id: str, endpoint_url: str, **kwargs) -> None:
21 | super().__init__(scope, id, **kwargs)
22 |
23 | assets_bucket = s3.Bucket(self, 'CanaryAssetsBucket',
24 | # bucket_name='canary-assets-bucket',
25 | versioned=False,
26 | encryption=s3.BucketEncryption.S3_MANAGED,
27 | removal_policy=RemovalPolicy.DESTROY,
28 | auto_delete_objects=True
29 | )
30 |
31 | canary_role = iam.Role(self, f"canary-{self.account}-{self.region}-role",
32 | assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
33 | description='Canary IAM Role'
34 | )
35 |
36 | canary_role.add_to_policy(iam.PolicyStatement(
37 | resources=['*'],
38 | actions=['s3:ListAllMyBuckets'],
39 | effect=iam.Effect.ALLOW
40 | ))
41 |
42 | canary_role.add_to_policy(iam.PolicyStatement(
43 | resources=[f"{assets_bucket.bucket_arn}/*"],
44 | actions=["kms:GenerateDataKey"],
45 | effect=iam.Effect.ALLOW
46 | ))
47 |
48 | canary_role.add_to_policy(
49 | iam.PolicyStatement(
50 | resources=[
51 | f"{assets_bucket.bucket_arn}",
52 | f"{assets_bucket.bucket_arn}/*"
53 | ],
54 | actions=['s3:*'],
55 | effect=iam.Effect.ALLOW,
56 | )
57 | )
58 |
59 | canary_role.add_to_policy(
60 | iam.PolicyStatement(
61 | resources=['*'],
62 | actions=['cloudwatch:PutMetricData'],
63 | effect=iam.Effect.ALLOW,
64 | conditions={
65 | "StringEquals": {
66 | "cloudwatch:namespace": "CloudWatchSynthetics",
67 | },
68 | },
69 | )
70 | )
71 | with open('python/index.py', 'r') as file:
72 | code_as_string = file.read()
73 | canary = synthetics.Canary(self, 'FrontEndCanary',
74 | canary_name=f"canary-web-{self.region}",
75 | role=canary_role,
76 | schedule=synthetics.Schedule.rate(Duration.minutes(5)),
77 | artifacts_bucket_location={'bucket': assets_bucket},
78 | environment_variables={'ENDPOINT_URL': f"https://{endpoint_url}"},
79 | runtime = synthetics.Runtime.SYNTHETICS_PYTHON_SELENIUM_3_0,
80 | test=synthetics.Test.custom(
81 | code=synthetics.Code.from_inline(code_as_string),
82 | handler='index.handler'
83 | )
84 | )
85 |
86 | # Get the metric for successful canary runs
87 | success_metric = canary.metric_success_percent()
88 |
89 | # Set up CloudWatch Alarm for success rate
90 | Alarm(
91 | self,
92 | "CanarySuccessAlarm",
93 | alarm_name=f"Synthetics-Alarm-canary-web-{self.region}",
94 | metric=success_metric,
95 | evaluation_periods=2, # Evaluate over 2 periods
96 | threshold=90, # Threshold for success rate (90%)
97 | comparison_operator=ComparisonOperator.LESS_THAN_THRESHOLD,
98 | alarm_description="Canary successful run rate fell below 90%",
99 | )
100 |
101 |
102 |
103 |
--------------------------------------------------------------------------------
/src/frontend/stacks/front_end_rum_stack.py:
--------------------------------------------------------------------------------
1 | import aws_cdk as cdk
2 | from aws_cdk import (
3 | Stack,
4 | aws_rum as rum,
5 | aws_cognito as cognito,
6 | aws_iam as iam,
7 | aws_cloudwatch as cloudwatch
8 | )
9 | from constructs import Construct
10 |
11 |
12 | class FrontEndRumStack(Stack):
13 |
14 | def __init__(self, scope: Construct, construct_id: str, domain_name: str, **kwargs) -> None:
15 | super().__init__(scope, construct_id, **kwargs)
16 |
17 | rum_identity_pool = cognito.CfnIdentityPool(self, 'FrontEndRumPool', allow_unauthenticated_identities=True)
18 | rum_federated_role = iam.Role(self, "RumRole",
19 | assumed_by=iam.FederatedPrincipal("cognito-identity.amazonaws.com", {
20 | 'StringEquals': {
21 | "cognito-identity.amazonaws.com:aud": rum_identity_pool.ref
22 | },
23 | "ForAnyValue:StringLike": {
24 | "cognito-identity.amazonaws.com:amr": "unauthenticated"
25 | }
26 | }, assume_role_action="sts:AssumeRoleWithWebIdentity"))
27 |
28 | local_application_monitor_name = "local_resilient_front_end"
29 | deployed_application_monitor_name = "deployed_resilient_front_end"
30 | rum_federated_role.add_to_policy(iam.PolicyStatement(
31 | effect=iam.Effect.ALLOW, actions=['rum:PutRumEvents'],
32 | resources=[f'arn:aws:rum:{self.region}:{self.account}:appmonitor/{local_application_monitor_name}',
33 | f'arn:aws:rum:{self.region}:{self.account}:appmonitor/{deployed_application_monitor_name}'])
34 | )
35 |
36 | cognito.CfnIdentityPoolRoleAttachment(self, 'FrontEndRumRole', identity_pool_id=rum_identity_pool.ref,
37 | roles={
38 | "unauthenticated": rum_federated_role.role_arn
39 | }
40 | )
41 |
42 | local = rum.CfnAppMonitor(self, "FrontEndRumMonitorLocal", domain='localhost',
43 | name=local_application_monitor_name,
44 | app_monitor_configuration=rum.CfnAppMonitor.AppMonitorConfigurationProperty(
45 | allow_cookies=True,
46 | enable_x_ray=True,
47 | session_sample_rate=1,
48 | telemetries=['errors', 'performance', 'http'],
49 | identity_pool_id=rum_identity_pool.ref,
50 | guest_role_arn=rum_federated_role.role_arn
51 | ),
52 | cw_log_enabled=True)
53 | cdk.CfnOutput(self, "LocalRum-AppMonitorId", value=local.attr_id)
54 |
55 | local_js_error_metric = cloudwatch.Metric(metric_name="JsErrorCount", namespace="AWS/RUM",
56 | dimensions_map={"application_name": local.name})
57 | cloudwatch.Alarm(self, "LocalRumJavascriptErrorsAlarm", metric=local_js_error_metric,
58 | threshold=5,
59 | evaluation_periods=3,
60 | datapoints_to_alarm=1)
61 |
62 | hosted_domain = False
63 |
64 | if len(domain_name) > 1 and "cloudfront" not in domain_name:
65 | hosted_domain = True
66 |
67 | if hosted_domain:
68 | deployed = rum.CfnAppMonitor(self, "FrontEndRumMonitor", domain=domain_name,
69 | name=deployed_application_monitor_name,
70 | app_monitor_configuration=rum.CfnAppMonitor.AppMonitorConfigurationProperty(
71 | allow_cookies=True,
72 | enable_x_ray=True,
73 | session_sample_rate=1,
74 | telemetries=['errors', 'performance', 'http'],
75 | identity_pool_id=rum_identity_pool.ref,
76 | guest_role_arn=rum_federated_role.role_arn
77 | ),
78 | cw_log_enabled=True)
79 | cdk.CfnOutput(self, "DeployedRum-AppMonitorId", value=deployed.attr_id)
80 | prod_js_error_metric = cloudwatch.Metric(metric_name="JsErrorCount", namespace="AWS/RUM",
81 | dimensions_map={"application_name": deployed.name})
82 | cloudwatch.Alarm(self, "ProdRumJavascriptErrorsAlarm", metric=prod_js_error_metric,
83 | threshold=5,
84 | evaluation_periods=3,
85 | datapoints_to_alarm=1)
86 |
87 |
--------------------------------------------------------------------------------
/src/frontend/stacks/front_end_secondary_bucket_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | aws_s3 as s3,
3 | aws_iam as iam,
4 | )
5 | import aws_cdk as cdk
6 | from constructs import Construct
7 |
8 |
9 | class FrontEndSecondaryBucketStack(cdk.Stack):
10 |
11 | def __init__(self, scope: Construct, id: str, **kwargs) -> None:
12 | super().__init__(scope, id, **kwargs)
13 |
14 | website_bucket = s3.Bucket(self, 'SecondaryS3Bucket',
15 | bucket_name=f'website-{self.account}-{self.region}',
16 | versioned=True,
17 | removal_policy=cdk.RemovalPolicy.DESTROY,
18 | auto_delete_objects=True,
19 | enforce_ssl=True,
20 | server_access_logs_bucket=s3.Bucket(self,f"ServerAccessLogsBucket-{self.account}-{self.region}")
21 | )
22 |
23 | website_bucket.add_to_resource_policy(iam.PolicyStatement(
24 | effect=iam.Effect.ALLOW,
25 | principals=[iam.ServicePrincipal('cloudfront.amazonaws.com')],
26 | actions=["s3:GetObject"],
27 | resources=[website_bucket.arn_for_objects("*")]
28 | ))
29 |
--------------------------------------------------------------------------------
/src/frontend/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/tests/__init__.py
--------------------------------------------------------------------------------
/src/frontend/tests/frontend-website-load-test.yml:
--------------------------------------------------------------------------------
1 | config:
2 | # This is a test server run by team Artillery
3 | # It's designed to be highly scalable
4 | target: "{{ url }}"
5 | phases:
6 | - duration: 60
7 | arrivalRate: 1
8 | rampTo: 2
9 | name: Warm up phase
10 | - duration: 60
11 | arrivalRate: 2
12 | rampTo: 4
13 | name: Ramp up load
14 | - duration: 30
15 | arrivalRate: 10
16 | rampTo: 30
17 | name: Spike phase
18 | # Load & configure a couple of useful plugins
19 | # https://docs.art/reference/extensions
20 | plugins:
21 | ensure: { }
22 | apdex: { }
23 | metrics-by-endpoint: { }
24 | apdex:
25 | threshold: 100
26 | ensure:
27 | thresholds:
28 | - http.response_time.p99: 100
29 | - http.response_time.p95: 75
30 | scenarios:
31 | - flow:
32 | - loop:
33 | - get:
34 | url: "/"
35 | expect:
36 | - statusCode: 200
37 | count: 200
38 |
--------------------------------------------------------------------------------
/src/frontend/tests/onboarding_test_client.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | from botocore.config import Config
3 | import json
4 | import os
5 | import argparse
6 | from argparse import RawTextHelpFormatter
7 | import requests
8 | import random
9 | from uuid import uuid4
10 |
11 | parser = argparse.ArgumentParser(prog="Front End Website Test Client",
12 | description="Test Front End Website Resiliency",
13 | formatter_class=RawTextHelpFormatter)
14 | parser.add_argument(
15 | "--test",
16 | help='''Choose a test to run
17 | 1/ Load Test''',
18 | type=int,
19 | required=True)
20 |
21 | def get_url(primary):
22 | region = 'AWS_PRIMARY_REGION'
23 | print(region)
24 | stack = "FrontEnd-WebsiteStack"
25 | print(stack)
26 | endpoint = None
27 | cf = boto3.client("cloudformation", config=Config(region_name=os.getenv(region)))
28 | stacks = cf.describe_stacks(StackName=stack) # might use an arg to toggle primary or recovery
29 | outputs = stacks["Stacks"][0]["Outputs"]
30 | for output in outputs:
31 | if "WebsiteURL" in output["OutputKey"]:
32 | endpoint = output["OutputValue"]
33 | print(endpoint)
34 | return endpoint
35 |
36 |
37 | args = parser.parse_args()
38 |
39 | test = args.test
40 |
41 | if test == 1:
42 | command = "artillery run frontend-website-load-test.yml --variables '{ \"url\": \"{url}\" }'".replace(
43 | "{url}", get_url(True))
44 | print(command)
45 | os.system(command)
46 | else:
47 | print("invalid test case, please try again")
48 |
--------------------------------------------------------------------------------
/src/frontend/tests/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "dependencies": {
3 | "boto3": "^0.0.1",
4 | "requests": "^0.3.0"
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/src/frontend/tests/unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/tests/unit/__init__.py
--------------------------------------------------------------------------------
/src/frontend/tests/unit/test_front_end_stack.py:
--------------------------------------------------------------------------------
1 | import aws_cdk as core
2 | import aws_cdk.assertions as assertions
3 |
4 | from front_end.front_end_stack import FrontEndStack
5 |
6 | # example tests. To run these tests, uncomment this file along with the example
7 | # resource in front_end/front_end_stack.py
8 | def test_sqs_queue_created():
9 | app = core.App()
10 | stack = FrontEndStack(app, "front-end")
11 | template = assertions.Template.from_stack(stack)
12 |
13 | # template.has_resource_properties("AWS::SQS::Queue", {
14 | # "VisibilityTimeout": 300
15 | # })
16 |
--------------------------------------------------------------------------------
/src/frontend/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/utils/__init__.py
--------------------------------------------------------------------------------
/src/frontend/utils/front_end_parameter_enum.py:
--------------------------------------------------------------------------------
1 | # front end constants we need to share between stacks, configurations, etc.
2 |
3 |
4 | VITE_TRADE_STOCK_API_ENDPOINT = ""
--------------------------------------------------------------------------------
/src/frontend/website/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | .DS_Store
12 | **/dist*
13 | dist-ssr
14 | coverage
15 | *.local
16 |
17 | /cypress/videos/
18 | /cypress/screenshots/
19 |
20 | # Editor directories and files
21 | .vscode/*
22 | !.vscode/extensions.json
23 | .idea
24 | *.suo
25 | *.ntvs*
26 | *.njsproj
27 | *.sln
28 | *.sw?
29 |
30 | *.tsbuildinfo
31 |
32 | # environment specific Vite config
33 | *.env.*
34 |
35 |
--------------------------------------------------------------------------------
/src/frontend/website/README.md:
--------------------------------------------------------------------------------
1 | # portal-integrated-ui
2 |
3 | TODO: you need to setup environment configs
4 | 1. for local, you'll build it by hand
5 | 2. for deployments, you'll build it with python in the cdk deployment, you'll fetch all the parameters from the parameter store, which requires all your service must register endpoints in the parameter store. This helps resilience because we'll fail the deployment if expected parameters are not configured, and we don't ever manage parameters by hand (except locally). Once you code this, you can probably address with the script for local too.
6 |
7 | ## Customize configuration
8 |
9 | https://cli.vuejs.org/guide/mode-and-env.html#modes
10 | See [Vite Configuration Reference](https://vitejs.dev/config/).
11 |
12 | ## build out CDK deployment configuration, sync with Jen on this.
13 |
14 | ## Project Setup
15 |
16 | ```sh
17 | npm install
18 | ```
19 |
20 | ### Compile and Hot-Reload for Development
21 |
22 | ```sh
23 | npm run dev
24 | ```
25 |
26 | ### Compile and Minify for Production
27 |
28 | ```sh
29 | npm run build
30 | ```
31 |
--------------------------------------------------------------------------------
/src/frontend/website/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/website/__init__.py
--------------------------------------------------------------------------------
/src/frontend/website/configure_website_environments.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import boto3
3 | from botocore.config import Config
4 | import os
5 | from trade_utils.trade_parameter_name import TradeParameterName
6 | from account_utils.account_open_parameter_name import AccountOpenParameterName
7 | from enum import Enum
8 |
9 |
10 | class ViteConfigEnum(Enum):
11 | VITE_NEW_ACCOUNT_ENDPOINT = 'VITE_NEW_ACCOUNT_ENDPOINT'
12 | VITE_TRADE_STOCK_ENDPOINT = 'VITE_TRADE_STOCK_ENDPOINT'
13 | VITE_RUM_APPLICATION_ID = 'VITE_RUM_APPLICATION_ID'
14 |
15 |
16 | def generate_config_files(parameter_api, parameter_domain_name):
17 | """Generates .env.development and .env.production files based on parameters.
18 |
19 | Args:
20 | parameter_api (str): 'y' to use custom domain name, 'n' to use regional endpoints
21 | parameter_domain_name (str): Domain name
22 | """
23 |
24 | lines = []
25 |
26 | ssm_client = boto3.client('ssm')
27 | primary_region = os.getenv('AWS_PRIMARY_REGION')
28 |
29 | ## Check if parameter_api is equal to 'n'
30 | if parameter_api == 'n':
31 | account_open_endpoint = ssm_client.get_parameter(
32 | Name=f"{AccountOpenParameterName.ACCOUNT_OPEN_REGIONAL_ENDPOINT.value}{primary_region}")[
33 | 'Parameter']['Value']
34 | trade_order_endpoint = ssm_client.get_parameter(
35 | Name=TradeParameterName.TRADE_ORDER_API_ENDPOINT.value)['Parameter']['Value']
36 | else:
37 | if parameter_domain_name == '':
38 | print("Error: Domain name is required when using parameter = 'y'")
39 | sys.exit(1)
40 | account_open_endpoint = 'https://api-account.' + parameter_domain_name + '/prod/'
41 | trade_order_endpoint = 'https://api-trade.' + parameter_domain_name + '/resilient/'
42 |
43 | lines.append(f"{ViteConfigEnum.VITE_NEW_ACCOUNT_ENDPOINT.value}={account_open_endpoint}\n")
44 | lines.append(f"{ViteConfigEnum.VITE_TRADE_STOCK_ENDPOINT.value}={trade_order_endpoint}\n")
45 |
46 | cloudfront_client = boto3.client("cloudformation", config=Config(region_name=os.getenv(primary_region)))
47 | outputs = []
48 | try:
49 | stack = cloudfront_client.describe_stacks(StackName="FrontEnd-RumStack")
50 | outputs = stack["Stacks"][0]["Outputs"]
51 | except:
52 | print("warning: RUM stack not deployed")
53 |
54 | dev_lines = lines.copy()
55 | prod_lines = lines.copy()
56 |
57 | for output in outputs:
58 | if "LocalRumAppMonitorId" in output["OutputKey"]:
59 | endpoint = output["OutputValue"]
60 | dev_lines.append(f"{ViteConfigEnum.VITE_RUM_APPLICATION_ID.value}={endpoint}\n")
61 |
62 | if "DeployedRumAppMonitorId" in output["OutputKey"]:
63 | endpoint = output["OutputValue"]
64 | prod_lines.append(f"{ViteConfigEnum.VITE_RUM_APPLICATION_ID.value}={endpoint}\n")
65 |
66 | with open(".env.development", "w") as dev_config:
67 | dev_config.writelines(dev_lines)
68 |
69 | with open(".env.production", "w") as prod_config:
70 | prod_config.writelines(prod_lines)
71 |
72 |
73 | if __name__ == "__main__":
74 | if len(sys.argv) < 2: # Check for at least one parameter
75 | print("Error: At least one parameter is required (y/n for local RUM ID commenting)")
76 | sys.exit(1)
77 | elif len(sys.argv) == 2: # If only one parameter is given
78 | generate_config_files(sys.argv[1], None) # Pass only custom endpoint creation flag
79 | else:
80 | generate_config_files(sys.argv[1], sys.argv[2]) # Pass both parameters
--------------------------------------------------------------------------------
/src/frontend/website/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | AvailableTrade
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/src/frontend/website/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "paths": {
4 | "@/*": ["./src/*"]
5 | }
6 | },
7 | "exclude": ["node_modules", "dist"]
8 | }
9 |
--------------------------------------------------------------------------------
/src/frontend/website/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "portal-integrated-ui",
3 | "version": "0.0.0",
4 | "private": true,
5 | "type": "module",
6 | "scripts": {
7 | "configure": "python configure_website_environments.py",
8 | "dev": "vite",
9 | "build": "vite build",
10 | "preview": "vite preview"
11 | },
12 | "dependencies": {
13 | "aws-rum-web": "^1.17.2",
14 | "cors": "^2.8.5",
15 | "dotenv": "^16.4.5",
16 | "pinia": "^2.1.7",
17 | "process": "^0.11.10",
18 | "uuid": "^9.0.1",
19 | "vee-validate": "^4.12.6",
20 | "vue": "^3.4.15",
21 | "vue-router": "^4.2.5",
22 | "yup": "^1.4.0"
23 | },
24 | "devDependencies": {
25 | "@types/uuid": "^9.0.8",
26 | "@vitejs/plugin-vue": "^5.0.3",
27 | "vite": "^5.0.11"
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/src/frontend/website/public/AvailableTrade.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/website/public/AvailableTrade.png
--------------------------------------------------------------------------------
/src/frontend/website/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/frontend/website/public/favicon.ico
--------------------------------------------------------------------------------
/src/frontend/website/src/App.vue:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | Resilient Systems on AWS
16 | |
17 | |
18 | |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/src/frontend/website/src/assets/base.css:
--------------------------------------------------------------------------------
1 | /* color palette from */
2 | :root {
3 | --vt-c-white: #ffffff;
4 | --vt-c-white-soft: #f8f8f8;
5 | --vt-c-white-mute: #f2f2f2;
6 |
7 | --vt-c-black: #181818;
8 | --vt-c-black-soft: #222222;
9 | --vt-c-black-mute: #282828;
10 |
11 | --vt-c-indigo: #2c3e50;
12 |
13 | --vt-c-divider-light-1: rgba(60, 60, 60, 0.29);
14 | --vt-c-divider-light-2: rgba(60, 60, 60, 0.12);
15 | --vt-c-divider-dark-1: rgba(84, 84, 84, 0.65);
16 | --vt-c-divider-dark-2: rgba(84, 84, 84, 0.48);
17 |
18 | --vt-c-text-light-1: var(--vt-c-indigo);
19 | --vt-c-text-light-2: rgba(60, 60, 60, 0.66);
20 | --vt-c-text-dark-1: var(--vt-c-white);
21 | --vt-c-text-dark-2: rgba(235, 235, 235, 0.64);
22 | }
23 |
24 | /* semantic color variables for this project */
25 | :root {
26 | --color-background: var(--vt-c-white);
27 | --color-background-soft: var(--vt-c-white-soft);
28 | --color-background-mute: var(--vt-c-white-mute);
29 |
30 | --color-border: var(--vt-c-divider-light-2);
31 | --color-border-hover: var(--vt-c-divider-light-1);
32 |
33 | --color-heading: var(--vt-c-text-light-1);
34 | --color-text: var(--vt-c-text-light-1);
35 |
36 | --section-gap: 160px;
37 | }
38 |
39 | @media (prefers-color-scheme: dark) {
40 | :root {
41 | --color-background: var(--vt-c-black);
42 | --color-background-soft: var(--vt-c-black-soft);
43 | --color-background-mute: var(--vt-c-black-mute);
44 |
45 | --color-border: var(--vt-c-divider-dark-2);
46 | --color-border-hover: var(--vt-c-divider-dark-1);
47 |
48 | --color-heading: var(--vt-c-text-dark-1);
49 | --color-text: var(--vt-c-text-dark-2);
50 | }
51 | }
52 |
53 | *,
54 | *::before,
55 | *::after {
56 | box-sizing: border-box;
57 | margin: 0;
58 | font-weight: normal;
59 | }
60 |
61 | body {
62 | min-height: 100vh;
63 | color: var(--color-text);
64 | background: var(--color-background);
65 | transition:
66 | color 0.5s,
67 | background-color 0.5s;
68 | line-height: 1.6;
69 | font-family:
70 | Inter,
71 | -apple-system,
72 | BlinkMacSystemFont,
73 | 'Segoe UI',
74 | Roboto,
75 | Oxygen,
76 | Ubuntu,
77 | Cantarell,
78 | 'Fira Sans',
79 | 'Droid Sans',
80 | 'Helvetica Neue',
81 | sans-serif;
82 | font-size: 15px;
83 | text-rendering: optimizeLegibility;
84 | -webkit-font-smoothing: antialiased;
85 | -moz-osx-font-smoothing: grayscale;
86 | }
87 |
--------------------------------------------------------------------------------
/src/frontend/website/src/assets/main.css:
--------------------------------------------------------------------------------
1 | @import './base.css';
2 |
3 | #app {
4 | max-width: 1280px;
5 | margin: 0 auto;
6 | padding: 2rem;
7 | font-weight: normal;
8 | }
9 |
10 | .page-header {
11 | color: midnightblue;
12 | }
13 |
14 | .header-nav {
15 | font-size: 22px;
16 | font-weight: bold;
17 | }
18 |
19 | .error {
20 | color: crimson;
21 | font-weight: bold;
22 | }
23 |
24 | a,
25 | .green {
26 | text-decoration: none;
27 | color: hsla(245, 100%, 21%, 1);
28 | transition: 0.4s;
29 | padding: 3px;
30 | }
31 |
32 | @media (min-width: 1024px) {
33 | body {
34 | display: block;
35 | place-items: center;
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/frontend/website/src/main.js:
--------------------------------------------------------------------------------
1 | import './assets/main.css'
2 |
3 | import { createApp } from 'vue'
4 | import { createPinia } from 'pinia'
5 |
6 | import App from './App.vue'
7 | import router from './router'
8 |
9 | const app = createApp(App)
10 |
11 | app.use(createPinia())
12 | app.use(router)
13 |
14 | app.mount('#app')
15 |
--------------------------------------------------------------------------------
/src/frontend/website/src/router/index.js:
--------------------------------------------------------------------------------
1 | import { createRouter, createWebHistory } from 'vue-router'
2 |
3 | const router = createRouter({
4 | history: createWebHistory(import.meta.env.BASE_URL),
5 | routes: [
6 | {
7 | path: '/',
8 | name: 'home',
9 | component: () => import('../views/HomeView.vue')
10 | },
11 | {
12 | path: '/insights',
13 | name: 'insights',
14 | component: () => import('../views/InsightsView.vue')
15 | },
16 | {
17 | path: '/account_open',
18 | name: 'account_open',
19 | component: () => import('../views/AccountOpenView.vue')
20 | },
21 | {
22 | path: '/trade_stock',
23 | name: 'trade_stock',
24 | component: () => import('../views/TradeStockView.vue')
25 | },
26 | {
27 | path: '/utilities',
28 | name: 'utilities',
29 | component: () => import('../views/UtilitiesView.vue')
30 | }
31 | ]
32 | })
33 |
34 | export default router
35 |
--------------------------------------------------------------------------------
/src/frontend/website/src/stores/customer.js:
--------------------------------------------------------------------------------
1 | import { defineStore} from "pinia";
2 |
3 |
4 | export const useCustomerStore = defineStore('customer', {
5 | state: () => ({
6 | first_name: '', last_name: '', registered: false, account_request_token: '', user_id: ''
7 | }),
8 | getters: {
9 | firstName(state) {
10 | return state.first_name;
11 | },
12 | lastName(state) {
13 | return state.last_name;
14 | },
15 | accountReqeustToken(state) {
16 | return state.account_request_token;
17 | },
18 | userId(state) {
19 | return state.user_id;
20 | },
21 | isRegistered(state) {
22 | return state.registered;
23 | }
24 | },
25 | actions: {
26 | refresh(customer_record) {
27 | console.log("refresh", customer_record);
28 | this.first_name = customer_record.first_name;
29 | this.last_name = customer_record.last_name;
30 | this.account_request_token = customer_record.request_token;
31 | this.user_id = customer_record.user_id;
32 | },
33 | confirmRegistration() {
34 | this.registered = true;
35 | }
36 | },
37 | })
--------------------------------------------------------------------------------
/src/frontend/website/src/stores/degrade.js:
--------------------------------------------------------------------------------
1 | import {defineStore} from "pinia";
2 |
3 | export const useDegradingStore = defineStore('degrade', {
4 | state: () => ({
5 | account_open_available: true, account_open_failure_count: 0
6 | }),
7 | getters: {
8 | accountOpenAvailable(state) {
9 | return state.account_open_available;
10 | }
11 | },
12 | actions: {
13 | monitorAccountOpenAvailability() {
14 | setInterval(this.accountOpenHeartbeat, 5000)
15 | // set availability immediately to activate, then monitor the heartbeat
16 | this.account_open_available = this.accountOpenHeartbeat().ok
17 | },
18 | async accountOpenHeartbeat() {
19 | let ok = true
20 | try {
21 | let response = await this.optionsHeartbeat(import.meta.env.VITE_NEW_ACCOUNT_ENDPOINT)
22 | ok = response.ok
23 | } catch (error) {
24 | console.error(error)
25 | ok = false
26 | //this.account_open_available = false;
27 | }
28 | if (ok) {
29 | this.account_open_available = true;
30 | this.account_open_failure_count = 0;
31 | } else {
32 | this.account_open_failure_count += 1;
33 | if (this.account_open_failure_count > 5) {
34 | this.account_open_available = false;
35 | }
36 | }
37 | },
38 | async optionsHeartbeat(endpoint) {
39 | const options = {};
40 | const controller = new AbortController();
41 | const {timeout = 1000} = options;
42 | const id = setTimeout(() => controller.abort(), timeout)
43 | return fetch(endpoint, {
44 | ...options,
45 | method: "OPTIONS",
46 | mode: "cors",
47 | cache: "no-cache",
48 | })
49 | }
50 | },
51 | })
--------------------------------------------------------------------------------
/src/frontend/website/src/stores/user_monitor.js:
--------------------------------------------------------------------------------
1 | import {ref, computed} from 'vue'
2 | import {defineStore} from 'pinia'
3 | import {AwsRum} from 'aws-rum-web';
4 |
5 | const config = {
6 | sessionSampleRate: 1,
7 | identityPoolId: "us-east-1:7d6deffb-1f1d-4bb9-8266-2d1e88d8016d",
8 | endpoint: "https://dataplane.rum.us-east-1.amazonaws.com",
9 | telemetries: ["errors", "performance", "http"],
10 | allowCookies: true,
11 | enableXRay: true
12 | }
13 | const APPLICATION_ID = import.meta.env.VITE_RUM_APPLICATION_ID;
14 | const APPLICATION_VERSION = '1.0.0';
15 | const APPLICATION_REGION = 'us-east-1';
16 | export const useUserMonitorStore = defineStore('user_monitor', {
17 | state: () => ({
18 | aws_rum: new AwsRum(APPLICATION_ID, APPLICATION_VERSION,APPLICATION_REGION,config)
19 | }),
20 | getters: {
21 | monitor(state) {
22 | return state.aws_rum;
23 | }
24 | },
25 | actions: {
26 | /**
27 | * Manually record a page view.
28 | * Rum records page views added to HTML 5 history be default, so it is rare you'd need to this
29 | * @param page
30 | */
31 | recordPageView(page) {
32 | console.log("recording page", page)
33 | this.aws_rum.recordPageView(page)
34 | },
35 |
36 | /**d
37 | * Record errors.
38 | * Allows you to monitor, measure and alarm on UI errors.
39 | * Use this either with the vuejs onErrorCaptured lifecycle hook, or in catch blocks.
40 | * It is very common that you'd need to use this.
41 | * @param error
42 | */
43 | recordError(error) {
44 | console.log("recording page", error)
45 | this.aws_rum.recordError(error)
46 | }
47 |
48 | }
49 | })
50 |
--------------------------------------------------------------------------------
/src/frontend/website/src/views/AccountOpenView.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
Welcome {{ customer.firstName }} {{ customer.lastName }} , you're ready to trade!
8 |
9 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 | Our account open service is currently available. We'll be back soon.
137 | In the meantime, you can still place trades, gain insights and use utilities.
138 | If you need to open your account now, you can still do so with our agent based telephone support.
139 | Please call (555) GET-WISE [(555) 438-9473]. Hold times may vary.
140 |
141 |
142 |
143 |
144 |
145 |
--------------------------------------------------------------------------------
/src/frontend/website/src/views/HomeView.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
Welcome to the AvailableTrade electronic trading application
8 |
You are current registered as: {{ customer.firstName }}
9 |
To get started, open a new account
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/src/frontend/website/src/views/InsightsView.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
Current Customer: {{ customer.firstName }}
8 |
This page is a placeholder for Trading Insights
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/src/frontend/website/src/views/TradeStockView.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
Hello {{ customer.firstName }} {{ customer.lastName }}
6 |
You are ready to place a trade. For demonstration purposes, several of your trade parameters are fixed.
7 | Simply specify how many shares you'd like to buy, then submit the trade.
8 |
{{ submit_failure }}
9 |
{{ trade_message }}
10 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | To get started with trading, open an account, then come back here.
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/src/frontend/website/src/views/UtilitiesView.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
You are current registered as: {{ customer.firstName }}
8 |
This page throws a JavaScript Error. How can you monitor, track and address client side errors?
9 |
Your application is being served from {{ response }}
10 |
11 |
12 |
13 |
14 |
15 |
59 |
--------------------------------------------------------------------------------
/src/frontend/website/vite.config.js:
--------------------------------------------------------------------------------
1 | import { fileURLToPath, URL } from 'node:url'
2 |
3 | import { defineConfig } from 'vite'
4 | import vue from '@vitejs/plugin-vue'
5 | // https://vitejs.dev/config/
6 | export default defineConfig({
7 | plugins: [
8 | vue()
9 | ],
10 | resolve: {
11 | alias: {
12 | '@': fileURLToPath(new URL('./src', import.meta.url))
13 | }
14 | },
15 | server: {
16 | cors: true
17 | },
18 | build: {
19 | minify: false
20 | }
21 | })
22 |
--------------------------------------------------------------------------------
/src/recovery/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 | package-lock.json
3 | __pycache__
4 | .pytest_cache
5 | .venv
6 | *.egg-info
7 |
8 | # CDK asset staging directory
9 | .cdk.staging
10 | cdk.out
11 |
--------------------------------------------------------------------------------
/src/recovery/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Welcome to your CDK Python project!
3 |
4 | This is a blank project for CDK development with Python.
5 |
6 | The `cdk.json` file tells the CDK Toolkit how to execute your app.
7 |
8 | This project is set up like a standard Python project. The initialization
9 | process also creates a virtualenv within this project, stored under the `.venv`
10 | directory. To create the virtualenv it assumes that there is a `python3`
11 | (or `python` for Windows) executable in your path with access to the `venv`
12 | package. If for any reason the automatic creation of the virtualenv fails,
13 | you can create the virtualenv manually.
14 |
15 | To manually create a virtualenv on MacOS and Linux:
16 |
17 | ```
18 | $ python3 -m venv .venv
19 | ```
20 |
21 | After the init process completes and the virtualenv is created, you can use the following
22 | step to activate your virtualenv.
23 |
24 | ```
25 | $ source .venv/bin/activate
26 | ```
27 |
28 | If you are a Windows platform, you would activate the virtualenv like this:
29 |
30 | ```
31 | % .venv\Scripts\activate.bat
32 | ```
33 |
34 | Once the virtualenv is activated, you can install the required dependencies.
35 |
36 | ```
37 | $ pip install -r requirements.txt
38 | ```
39 |
40 | At this point you can now synthesize the CloudFormation template for this code.
41 |
42 | ```
43 | $ cdk synth
44 | ```
45 |
46 | To add additional dependencies, for example other CDK libraries, just add
47 | them to your `setup.py` file and rerun the `pip install -r requirements.txt`
48 | command.
49 |
50 | ## Useful commands
51 |
52 | * `cdk ls` list all stacks in the app
53 | * `cdk synth` emits the synthesized CloudFormation template
54 | * `cdk deploy` deploy this stack to your default AWS account/region
55 | * `cdk diff` compare deployed stack with current state
56 | * `cdk docs` open CDK documentation
57 |
58 | Enjoy!
59 |
--------------------------------------------------------------------------------
/src/recovery/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import aws_cdk as cdk
4 | from stacks.orchestration_primary_stack import OrchestrationPrimaryStack
5 | from stacks.orchestration_secondary_stack import OrchestrationSecondaryStack
6 | from stacks.orchestration_route53_stack import OrchestrationRoute53Stack
7 | app = cdk.App()
8 |
9 | account = os.getenv('AWS_ACCOUNT_ID')
10 | primary_region = os.getenv('AWS_PRIMARY_REGION')
11 | secondary_region = os.getenv('AWS_SECONDARY_REGION')
12 | website_domain_name = os.getenv('AWS_DOMAIN_NAME')
13 |
14 | primary_environment = cdk.Environment(account=account, region=primary_region)
15 | secondary_environment = cdk.Environment(account=account, region=secondary_region)
16 |
17 | OrchestrationSecondaryStack(app, "Orchestration-Secondary-Stack", env=secondary_environment, domain_name=website_domain_name)
18 | OrchestrationPrimaryStack(app, "Orchestration-Primary-Stack", env=primary_environment, domain_name=website_domain_name)
19 | OrchestrationRoute53Stack(app, "Orchestration-Route53-Primary-Stack", env=primary_environment, domain_name=website_domain_name, is_primary=True)
20 | OrchestrationRoute53Stack(app, "Orchestration-Route53-Secondary-Stack", env=secondary_environment, domain_name=website_domain_name, is_primary=False)
21 |
22 | app.synth()
23 |
--------------------------------------------------------------------------------
/src/recovery/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py",
3 | "watch": {
4 | "include": [
5 | "**"
6 | ],
7 | "exclude": [
8 | "README.md",
9 | "cdk*.json",
10 | "requirements*.txt",
11 | "source.bat",
12 | "**/__init__.py",
13 | "**/__pycache__",
14 | "tests"
15 | ]
16 | },
17 | "context": {
18 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true,
19 | "@aws-cdk/core:checkSecretUsage": true,
20 | "@aws-cdk/core:target-partitions": [
21 | "aws",
22 | "aws-cn"
23 | ],
24 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
25 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
26 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true,
27 | "@aws-cdk/aws-iam:minimizePolicies": true,
28 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true,
29 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
30 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
31 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
32 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true,
33 | "@aws-cdk/core:enablePartitionLiterals": true,
34 | "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true,
35 | "@aws-cdk/aws-iam:standardizedServicePrincipals": true,
36 | "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true,
37 | "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true,
38 | "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true,
39 | "@aws-cdk/aws-route53-patters:useCertificate": true,
40 | "@aws-cdk/customresources:installLatestAwsSdkDefault": false,
41 | "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true,
42 | "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true,
43 | "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true,
44 | "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true,
45 | "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true,
46 | "@aws-cdk/aws-redshift:columnId": true,
47 | "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true,
48 | "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true,
49 | "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true,
50 | "@aws-cdk/aws-kms:aliasNameRef": true,
51 | "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true,
52 | "@aws-cdk/core:includePrefixInUniqueNameGeneration": true,
53 | "@aws-cdk/aws-efs:denyAnonymousAccess": true,
54 | "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true,
55 | "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true,
56 | "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true,
57 | "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true,
58 | "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true,
59 | "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true,
60 | "@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForCodeCommitSource": true,
61 | "@aws-cdk/aws-cloudwatch-actions:changeLambdaPermissionLogicalIdForLambdaAction": true,
62 | "@aws-cdk/aws-codepipeline:crossAccountKeysDefaultValueToFalse": true,
63 | "@aws-cdk/aws-codepipeline:defaultPipelineTypeToV2": true,
64 | "@aws-cdk/aws-kms:reduceCrossAccountRegionPolicyScope": true,
65 | "@aws-cdk/aws-eks:nodegroupNameAttribute": true,
66 | "@aws-cdk/aws-ec2:ebsDefaultGp3Volume": true,
67 | "@aws-cdk/aws-ecs:removeDefaultDeploymentAlarm": true
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/src/recovery/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | pytest==6.2.5
2 |
--------------------------------------------------------------------------------
/src/recovery/requirements.txt:
--------------------------------------------------------------------------------
1 | aws-cdk-lib==2.144.0
2 | constructs>=10.0.0,<11.0.0
3 | boto3
4 | botocore
5 | PyYAML
--------------------------------------------------------------------------------
/src/recovery/source.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | rem The sole purpose of this script is to make the command
4 | rem
5 | rem source .venv/bin/activate
6 | rem
7 | rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows.
8 | rem On Windows, this command just runs this batch file (the argument is ignored).
9 | rem
10 | rem Now we don't need to document a Windows command for activating a virtualenv.
11 |
12 | echo Executing .venv\Scripts\activate.bat for you
13 | .venv\Scripts\activate.bat
14 |
--------------------------------------------------------------------------------
/src/recovery/stacks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/recovery/stacks/__init__.py
--------------------------------------------------------------------------------
/src/recovery/stacks/orchestration_primary_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | Stack,
3 | aws_certificatemanager as acm,
4 | aws_ssm as ssm,
5 | aws_apigateway as apigateway,
6 | )
7 | from constructs import Construct
8 |
9 | class OrchestrationPrimaryStack(Stack):
10 | def __init__(self, scope: Construct, id: str, domain_name: str, **kwargs) -> None:
11 | super().__init__(scope, id, **kwargs)
12 |
13 | ## Retrieve ACM Certificate for custom domain
14 | certificate = acm.Certificate.from_certificate_arn(
15 | self, "Certificate",
16 | certificate_arn=ssm.StringParameter.value_for_string_parameter(
17 | self, "CertificateARN"
18 | ),
19 | )
20 |
21 | ## Retrieve ssm parameter names NewAccountAPIID
22 | account_api_id = ssm.StringParameter.value_for_string_parameter(
23 | self, "NewAccountAPIID"
24 | )
25 |
26 | ## Get a reference to the APIG
27 | account_api = apigateway.RestApi.from_rest_api_id(
28 | self, "NewAccountApi-Primary",
29 | rest_api_id=account_api_id,
30 | )
31 |
32 | ## Create the custom domain name
33 | account_api.add_domain_name("NewAccountCustomDomain-Primary",
34 | domain_name=f"api-account.{domain_name}",
35 | certificate=certificate,
36 | )
37 |
38 | ## Retrieve ssm parameter names NewAccountAPIID
39 | trade_api_id = ssm.StringParameter.value_for_string_parameter(
40 | self, "TradeStockAPIID"
41 | )
42 |
43 | ## Get a reference to the APIG
44 | trade_api = apigateway.RestApi.from_rest_api_id(
45 | self, "TradeStockAPI-Primary",
46 | rest_api_id=trade_api_id,
47 | )
48 |
49 | ## Create the custom domain name
50 | trade_api.add_domain_name("TradeStockCustomDomain-Primary",
51 | domain_name=f"api-trade.{domain_name}",
52 | certificate=certificate,
53 | )
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/src/recovery/stacks/orchestration_route53_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | Stack,
3 | aws_route53 as route53,
4 | )
5 | from constructs import Construct
6 |
7 | import boto3
8 |
9 | class OrchestrationRoute53Stack(Stack):
10 | def __init__(self, scope: Construct, id: str, domain_name: str, is_primary: bool, **kwargs) -> None:
11 | super().__init__(scope, id, **kwargs)
12 |
13 | try:
14 | # Function to fetch API Gateway info
15 | def get_api_gateway_info(custom_domain_name):
16 | api_gw_client = boto3.client('apigateway', region_name=self.region) # Create client with correct region
17 | domain_name_info = api_gw_client.get_domain_name(domainName=custom_domain_name)
18 | return (
19 | domain_name_info.get('regionalDomainName'),
20 | domain_name_info.get('regionalHostedZoneId')
21 | )
22 |
23 | account_regional_domain_name, account_regional_hosted_zone = get_api_gateway_info(f"api-account.{domain_name}")
24 | trade_regional_domain_name, trade_regional_hosted_zone = get_api_gateway_info(f"api-trade.{domain_name}")
25 | hosted_zone = route53.HostedZone.from_lookup(self, "HostedZone", domain_name=domain_name)
26 | if is_primary:
27 | ## This deploys but fails for Resource handler returned message: "Invalid request provided: AWS::Route53::HealthCheck" (RequestToken: 7876d7e6-097c-8f18-a982-ecc66dc635e8, HandlerErrorCode: InvalidRequest)
28 | health_check = route53.CfnHealthCheck(
29 | self,
30 | "AvailableTradeCloudWatchHealthCheck",
31 | health_check_config=route53.CfnHealthCheck.HealthCheckConfigProperty(
32 | type="CLOUDWATCH_METRIC",
33 | alarm_identifier=route53.CfnHealthCheck.AlarmIdentifierProperty(
34 | name="AvailableTradeFailoverAlarm",
35 | region="us-west-2"
36 | ),
37 | insufficient_data_health_status="LastKnownStatus", # Or choose 'HEALTHY' or 'UNHEALTHY'
38 | ),
39 | )
40 | # Create record sets (loop for DRYness)
41 | for subdomain, regional_domain_name, regional_hosted_zone_id in [
42 | ("api-account", account_regional_domain_name, account_regional_hosted_zone),
43 | ("api-trade", trade_regional_domain_name, trade_regional_hosted_zone),
44 | ]:
45 | # Use ApiGateway helper for automatic alias target creation
46 | target = route53.CfnRecordSet.AliasTargetProperty(
47 | dns_name=regional_domain_name,
48 | hosted_zone_id=regional_hosted_zone_id,
49 | evaluate_target_health=True if is_primary else False
50 | )
51 |
52 | ## Create the primary record set for both api-account and api-trade
53 | route53.CfnRecordSet(
54 | self, f"{subdomain.replace('-', '')}Record",
55 | hosted_zone_id=hosted_zone.hosted_zone_id,
56 | name=f"{subdomain}.{domain_name}",
57 | type="A",
58 | failover="PRIMARY" if is_primary else "SECONDARY",
59 | health_check_id=health_check.ref if is_primary else None,
60 | set_identifier=f"{subdomain.replace('-', '')}Record-" + self.region,
61 | alias_target=target
62 | )
63 | except Exception as e:
64 | print(f"An unexpected error occurred: {e}")
65 |
--------------------------------------------------------------------------------
/src/recovery/stacks/orchestration_secondary_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | Stack,
3 | Duration,
4 | aws_certificatemanager as acm,
5 | aws_route53 as route53,
6 | aws_ssm as ssm,
7 | aws_apigateway as apigateway,
8 | aws_sns as sns,
9 | aws_cloudwatch as cloudwatch,
10 | aws_cloudwatch_actions as cw_actions,
11 | aws_iam as iam,
12 | aws_s3 as s3
13 | )
14 | from constructs import Construct
15 | import os
16 | import yaml
17 |
18 | class OrchestrationSecondaryStack(Stack):
19 | def __init__(self, scope: Construct, id: str, domain_name: str, **kwargs) -> None:
20 | super().__init__(scope, id, **kwargs)
21 |
22 | ## Retrieve Secondary Clust ARN to use in SSM document
23 | secondary_cluster_arn = ssm.StringParameter.value_for_string_parameter(
24 | self, "trade_rds_secondary_cluster_arn"
25 | )
26 | ## Create the bucket name
27 | bucket_name = f"failover-bucket-{os.getenv('AWS_SECONDARY_REGION')}-{os.getenv('AWS_ACCOUNT_ID')}"
28 |
29 | # Retrieve bucket by name
30 | bucket = s3.Bucket.from_bucket_name(self, "S3FailoverBucket", bucket_name)
31 |
32 | # IAM role for SSM Automation
33 | ssm_automation_role = iam.Role(
34 | self, "SSMAutomationRole",
35 | assumed_by=iam.ServicePrincipal("ssm.amazonaws.com"),
36 | description="Role to allow SSM Automation to run documents and access S3",
37 | )
38 |
39 | # Allow SSM Automation to execute the specified document
40 | ssm_automation_role.add_to_policy(
41 | iam.PolicyStatement(
42 | actions=["ssm:StartAutomationExecution"],
43 | resources=["*"],
44 | )
45 | )
46 |
47 | # Allow the SSM document to upload objects to the S3 bucket
48 | ssm_automation_role.add_to_policy(
49 | iam.PolicyStatement(
50 | actions=["s3:PutObject"],
51 | resources=[
52 | bucket.bucket_arn, # Allow access to the bucket itself
53 | f"{bucket.bucket_arn}/*" # Allow access to all objects within the bucket
54 | ],
55 | )
56 | )
57 | # Allow the SSM document to execute the failover automation
58 | ssm_automation_role.add_to_policy(
59 | iam.PolicyStatement(
60 | actions=["rds:SwitchoverGlobalCluster", "rds:FailoverGlobalCluster"],
61 | resources=["*"],
62 | )
63 | )
64 | # Allow the SSM document to execute the failover automation
65 | ssm_automation_role.add_to_policy(
66 | iam.PolicyStatement(
67 | actions=["cloudwatch:PutMetricData"],
68 | resources=["*"],
69 | )
70 | )
71 |
72 | yaml_doc = f""" # Use an f-string to interpolate the variable
73 | schemaVersion: '0.3'
74 | description: "Failover Document"
75 | assumeRole: "{ssm_automation_role.role_arn}"
76 | mainSteps:
77 | - name: SwitchoverGlobalCluster
78 | action: aws:executeAwsApi
79 | nextStep: SToPFile
80 | inputs:
81 | Service: rds
82 | Api: SwitchoverGlobalCluster
83 | GlobalClusterIdentifier: global-trade-cluster
84 | TargetDbClusterIdentifier: {secondary_cluster_arn}
85 | - name: SToPFile
86 | action: aws:executeScript
87 | nextStep: PutMetricData
88 | inputs:
89 | Runtime: python3.10
90 | Handler: script_handler
91 | Script: |
92 | def script_handler(event, context):
93 | import boto3
94 | # Create the text file
95 | with open("failover.txt", "w") as f:
96 | f.write("This is a test file for failover.")
97 | # Upload the file to the S3 bucket
98 | s3 = boto3.client('s3')
99 | bucket_name = "{bucket_name}" # Use the bucket name from the CDK variable
100 | file_name = "failover.txt"
101 | s3.upload_file(file_name, bucket_name, file_name)
102 | - name: PutMetricData
103 | action: aws:executeAwsApi
104 | isEnd: true
105 | inputs:
106 | Service: cloudwatch
107 | Api: PutMetricData
108 | MetricData:
109 | - MetricName: AvailableTradeFailoverMetric
110 | Value: 3
111 | Namespace: AvailableTrade
112 | """
113 | # Create the AvailableTradeFailoverAutomation SSM Document
114 | ssm.CfnDocument(
115 | self, "AvailableTradeFailoverAutomation",
116 | content=yaml.safe_load(yaml_doc),
117 | document_type="Automation",
118 | )
119 |
120 | ## Create an Alarm with a custom metric that will be used to signal a failover
121 | alarm = cloudwatch.Alarm(
122 | self,
123 | "AvailableTradeFailoverAlarm",
124 | metric=cloudwatch.Metric(
125 | namespace="AvailableTrade",
126 | metric_name="AvailableTradeFailoverMetric",
127 | dimensions_map={},
128 | statistic="Maximum",
129 | period=Duration.minutes(1),
130 | ),
131 | alarm_name="AvailableTradeFailoverAlarm",
132 | threshold=2,
133 | evaluation_periods=1,
134 | comparison_operator=cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD,
135 | treat_missing_data=cloudwatch.TreatMissingData.IGNORE
136 | )
137 |
138 | # Create an SNS topic to be used with the CloudWatch Alarm
139 | alarm_topic = sns.Topic(self, "AvailableTradeFailoverAlarmTopic")
140 | alarm.add_alarm_action(cw_actions.SnsAction(alarm_topic))
141 |
142 | # Create a certificate with multiple domain names (api-account and api-trade)
143 | hosted_zone = route53.HostedZone.from_lookup(self, "HostedZone", domain_name=domain_name)
144 | certificate = acm.Certificate(self, "Certificate",
145 | domain_name=domain_name,
146 | validation=acm.CertificateValidation.from_dns(hosted_zone),
147 | subject_alternative_names=[domain_name, f"api-account.{domain_name}", f"api-trade.{domain_name}"],
148 | )
149 | ## Retrieve ssm parameter NewAccountAPIID
150 | account_api_id = ssm.StringParameter.value_for_string_parameter(
151 | self, "NewAccountAPIID"
152 | )
153 |
154 | ## Get a reference to the APIG
155 | account_api = apigateway.RestApi.from_rest_api_id(
156 | self, "NewAccountApi-Secondary",
157 | rest_api_id=account_api_id,
158 | )
159 |
160 | ## Create the custom domain name
161 | account_api.add_domain_name("NewAccountCustomDomain-Secondary",
162 | domain_name=f"api-account.{domain_name}",
163 | certificate=certificate,
164 | )
165 |
166 | ## Retrieve ssm parameter names NewAccountAPIID
167 | trade_api_id = ssm.StringParameter.value_for_string_parameter(
168 | self, "TradeStockAPIID"
169 | )
170 |
171 | ## Get a reference to the APIG
172 | trade_api = apigateway.RestApi.from_rest_api_id(
173 | self, "TradeStockAPI-Secondary",
174 | rest_api_id=trade_api_id,
175 | )
176 |
177 | ## Create the custom domain name
178 | trade_api.add_domain_name("TradeStockCustomDomain-Secondary",
179 | domain_name=f"api-trade.{domain_name}",
180 | certificate=certificate,
181 | )
182 |
183 |
184 |
--------------------------------------------------------------------------------
/src/recovery/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/recovery/tests/__init__.py
--------------------------------------------------------------------------------
/src/recovery/tests/unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/recovery/tests/unit/__init__.py
--------------------------------------------------------------------------------
/src/recovery/tests/unit/test_recovery_stack.py:
--------------------------------------------------------------------------------
1 | import aws_cdk as core
2 | import aws_cdk.assertions as assertions
3 |
4 | from recovery.recovery_stack import RecoveryStack
5 |
6 | # example tests. To run these tests, uncomment this file along with the example
7 | # resource in recovery/recovery_stack.py
8 | def test_sqs_queue_created():
9 | app = core.App()
10 | stack = RecoveryStack(app, "recovery")
11 | template = assertions.Template.from_stack(stack)
12 |
13 | # template.has_resource_properties("AWS::SQS::Queue", {
14 | # "VisibilityTimeout": 300
15 | # })
16 |
--------------------------------------------------------------------------------
/src/trade-stock/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 | package-lock.json
3 | __pycache__
4 | .pytest_cache
5 | .venv
6 | *.egg-info
7 |
8 | # CDK asset staging directory
9 | .cdk.staging
10 | cdk.out
11 |
--------------------------------------------------------------------------------
/src/trade-stock/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Welcome to your CDK Python project!
3 |
4 | This is a blank project for CDK development with Python.
5 |
6 | The `cdk.json` file tells the CDK Toolkit how to execute your app.
7 |
8 | This project is set up like a standard Python project. The initialization
9 | process also creates a virtualenv within this project, stored under the `.venv`
10 | directory. To create the virtualenv it assumes that there is a `python3`
11 | (or `python` for Windows) executable in your path with access to the `venv`
12 | package. If for any reason the automatic creation of the virtualenv fails,
13 | you can create the virtualenv manually.
14 |
15 | To manually create a virtualenv on MacOS and Linux:
16 |
17 | ```
18 | $ python3 -m venv .venv
19 | ```
20 |
21 | After the init process completes and the virtualenv is created, you can use the following
22 | step to activate your virtualenv.
23 |
24 | ```
25 | $ source .venv/bin/activate
26 | ```
27 |
28 | If you are a Windows platform, you would activate the virtualenv like this:
29 |
30 | ```
31 | % .venv\Scripts\activate.bat
32 | ```
33 |
34 | Once the virtualenv is activated, you can install the required dependencies.
35 |
36 | ```
37 | $ pip install -r requirements.txt
38 | ```
39 |
40 | At this point you can now synthesize the CloudFormation template for this code.
41 |
42 | ```
43 | $ cdk synth
44 | ```
45 |
46 | To add additional dependencies, for example other CDK libraries, just add
47 | them to your `setup.py` file and rerun the `pip install -r requirements.txt`
48 | command.
49 |
50 | ## Useful commands
51 |
52 | * `cdk ls` list all stacks in the app
53 | * `cdk synth` emits the synthesized CloudFormation template
54 | * `cdk deploy` deploy this stack to your default AWS account/region
55 | * `cdk diff` compare deployed stack with current state
56 | * `cdk docs` open CDK documentation
57 |
58 | Enjoy!
59 |
--------------------------------------------------------------------------------
/src/trade-stock/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/trade-stock/__init__.py
--------------------------------------------------------------------------------
/src/trade-stock/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import aws_cdk as cdk
4 | from aws_cdk import (
5 | aws_rds as rds,
6 | )
7 | from trade_stock.vpc_stack import VpcStack
8 | from trade_stock.trade_order_stack import TradeOrderStack
9 | from trade_stock.trade_confirms_stack import TradeConfirmsStack
10 | from trade_stock.public_api_stack import PublicApiStack
11 | from trade_stock.trade_database import TradeDatabaseStack
12 | from trade_stock.trade_database_secondary_stack import TradeDatabaseSecondaryStack
13 |
14 | app = cdk.App()
15 | account = os.getenv('AWS_ACCOUNT_ID')
16 | primary_region = os.getenv('AWS_PRIMARY_REGION')
17 | secondary_region = os.getenv('AWS_SECONDARY_REGION')
18 |
19 | primary_env = cdk.Environment(account=account, region=primary_region)
20 | primary_vpc_stack = VpcStack(app, "TradeVpcStackPrimary", env=primary_env)
21 | primary_database = TradeDatabaseStack(app, "TradeDatabaseStackPrimary", env=primary_env,
22 | vpc=primary_vpc_stack.vpc, secondary_region=secondary_region)
23 | primary_order_api_stack = TradeOrderStack(app, "TradeOrderStackPrimary", env=primary_env,
24 | vpc=primary_vpc_stack.vpc,
25 | task_role=primary_database.task_role)
26 | primary_confirms_api_stack = TradeConfirmsStack(app, "TradeConfirmsStackPrimary", env=primary_env,
27 | vpc=primary_vpc_stack.vpc)
28 | primary_api = PublicApiStack(app, "TradeStockApiGatewayStackPrimary", env=primary_env,
29 | private_lb=primary_order_api_stack.private_lb,
30 | resource_name="trade")
31 |
32 | secondary_env = cdk.Environment(account=account, region=secondary_region)
33 | secondary_vpc_stack = VpcStack(app, "TradeVpcStackSecondary", env=secondary_env)
34 | secondary_database = TradeDatabaseSecondaryStack(app, "TradeDatabaseStackSecondary", env=secondary_env,
35 | vpc=secondary_vpc_stack.vpc)
36 |
37 | secondary_confirms_api_stack = TradeConfirmsStack(app, "TradeConfirmsStackSecondary", env=secondary_env,
38 | vpc=secondary_vpc_stack.vpc)
39 | secondary_order_api_stack = TradeOrderStack(app, "TradeOrderStackSecondary", env=secondary_env,
40 | vpc=secondary_vpc_stack.vpc,
41 | task_role=secondary_database.task_role)
42 | secondary_api = PublicApiStack(app, "TradeStockApiGatewayStackSecondary", env=secondary_env,
43 | private_lb=secondary_order_api_stack.private_lb,
44 | resource_name="trade")
45 |
46 | app.synth()
47 |
--------------------------------------------------------------------------------
/src/trade-stock/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py",
3 | "watch": {
4 | "include": [
5 | "**"
6 | ],
7 | "exclude": [
8 | "README.md",
9 | "cdk*.json",
10 | "requirements*.txt",
11 | "source.bat",
12 | "**/__init__.py",
13 | "**/__pycache__",
14 | "tests"
15 | ]
16 | },
17 | "context": {
18 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true,
19 | "@aws-cdk/core:checkSecretUsage": true,
20 | "@aws-cdk/core:target-partitions": [
21 | "aws",
22 | "aws-cn"
23 | ],
24 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
25 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
26 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true,
27 | "@aws-cdk/aws-iam:minimizePolicies": true,
28 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true,
29 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
30 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
31 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
32 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true,
33 | "@aws-cdk/core:enablePartitionLiterals": true,
34 | "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true,
35 | "@aws-cdk/aws-iam:standardizedServicePrincipals": true,
36 | "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true,
37 | "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true,
38 | "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true,
39 | "@aws-cdk/aws-route53-patters:useCertificate": true,
40 | "@aws-cdk/customresources:installLatestAwsSdkDefault": false,
41 | "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true,
42 | "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true,
43 | "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true,
44 | "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true,
45 | "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true,
46 | "@aws-cdk/aws-redshift:columnId": true,
47 | "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true,
48 | "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true,
49 | "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true,
50 | "@aws-cdk/aws-kms:aliasNameRef": true,
51 | "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true,
52 | "@aws-cdk/core:includePrefixInUniqueNameGeneration": true,
53 | "@aws-cdk/aws-efs:denyAnonymousAccess": true,
54 | "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true,
55 | "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true,
56 | "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true,
57 | "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true,
58 | "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true,
59 | "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true,
60 | "@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForCodeCommitSource": true
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/src/trade-stock/confirms_api/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM --platform=linux/amd64 public.ecr.aws/docker/library/python:3-slim as build
2 | # for local only, if using mac M1
3 | #FROM public.ecr.aws/docker/library/python:slim as build
4 |
5 | WORKDIR /usr/app
6 | COPY requirements.txt .
7 | RUN pip install -r requirements.txt # --no-cache-dir
8 |
9 | COPY . .
10 |
11 | # curl "$ECS_CONTAINER_METADATA_URI/task" -o "$HOME/results.json"
12 | #cat "$HOME/results.json"
13 |
14 | CMD [ "gunicorn", "--bind", "0.0.0.0:80", "--log-config", "gunicorn_logging.conf", "--timeout", "1", "confirms_api:app" ]
15 |
16 |
17 | # local testing
18 | # docker image build -t confirms .
19 | # docker run -p 80:80 -d confirms
--------------------------------------------------------------------------------
/src/trade-stock/confirms_api/confirms_api.py:
--------------------------------------------------------------------------------
1 | #!/bin/python3
2 | from flask import Flask, Response
3 | from flask_api import status
4 | from trade_parameter_name import TradeParameterName
5 | import boto3
6 | import logging
7 | from pythonjsonlogger import jsonlogger
8 | import time
9 | import requests
10 | import os
11 |
12 |
13 | class ConfirmsMaintenanceError(RuntimeError):
14 | pass
15 |
16 |
17 | class ConfirmsProcessingException(RuntimeError):
18 | pass
19 |
20 |
21 | app = Flask(__name__)
22 | count: int = 0
23 |
24 | json_handler = logging.StreamHandler()
25 | formatter = jsonlogger.JsonFormatter('%(levelname)s %(lineno)d %(asctime)s %(task)-32s %(az)-10s - %(message)s')
26 | json_handler.setFormatter(formatter)
27 | logging.basicConfig(handlers=[json_handler], level=logging.INFO)
28 | logger = logging.getLogger('confirms')
29 |
30 | meta_data_uri = os.environ.get('ECS_CONTAINER_METADATA_URI_V4')
31 | tr = requests.get("{}/task".format(meta_data_uri)).json()
32 | availability_zone_ = tr['AvailabilityZone']
33 | d = {'az': availability_zone_}
34 |
35 |
36 | def get_exchange_status(request_count: int, force: bool = False) -> str:
37 | """
38 | Get exchange status is a fault injection for the simulated 3rd party off-platform confirms service.
39 | When the exchange status is not OPEN, the service will hard fail.
40 | Test full outages with circuit breakers and graceful degradation.
41 | """
42 | global exchange_status
43 | if force or request_count % 10 == 0:
44 | ssm_client = boto3.client('ssm')
45 | exchange_status = ssm_client.get_parameter(
46 | Name=TradeParameterName.TRADE_CONFIRMS_EXCHANGE_STATUS.value)['Parameter']['Value']
47 | return exchange_status
48 |
49 |
50 | def get_exchange_glitch_factor(request_count: int, force: bool = False) -> str:
51 | """
52 | Get glitch_factor is a fault injection for the simulated third-party off-platform confirms service.
53 | When the glitch_factor is greater than 0, the service will return intermittent errors, or gray failures.
54 | Test non-deterministic/intermittent error behavior like network issues, impaired instances, and resource exhaustion.
55 | """
56 | global glitch_factor
57 | if force or request_count % 10 == 0:
58 | ssm_client = boto3.client('ssm')
59 | glitch_factor = ssm_client.get_parameter(
60 | Name=TradeParameterName.TRADE_CONFIRMS_GLITCH_FACTOR.value)['Parameter']['Value']
61 | return glitch_factor
62 |
63 |
64 | exchange_status = get_exchange_status(0, True)
65 | glitch_factor = get_exchange_glitch_factor(0, True)
66 |
67 |
68 | @app.route("/")
69 | def health():
70 | """
71 | A simple health check for the load balancers that indicates Flask application is up and running.
72 | """
73 | logger.info("Call to / it's OK", extra=d)
74 | return Response(response="OK", status=status.HTTP_200_OK)
75 |
76 |
77 | @app.route("/exchange-health/")
78 | def exchange_health():
79 | """
80 | A deep health check indicates if the exchange is open for trading running and running normally.
81 | """
82 | response_message = "Exchange is {} and glitch factor is {}.".format(exchange_status, glitch_factor)
83 | available = exchange_status == "AVAILABLE" or glitch_factor == "OFF"
84 | response_status = status.HTTP_200_OK if available else status.HTTP_503_SERVICE_UNAVAILABLE
85 | logger.info("Call to /exchange_health/ response is {}".format(response_message), extra=d)
86 | return Response(response_message, status=response_status, mimetype="text/plain")
87 |
88 |
89 | @app.route("/confirm-trade/", methods=["POST", "GET", "PUT"])
90 | def confirm_trade():
91 | """
92 | Simulated placing a trade order with a third-party off-platform exchange.
93 | Demonstrates glitches and outages for resilience chaos testing.
94 | """
95 | global count
96 | count += 1
97 | try:
98 | if get_exchange_status(count) != "AVAILABLE":
99 | raise ConfirmsMaintenanceError("ConfirmsMaintenanceError: Exchange is not available, come back later!")
100 | if get_exchange_glitch_factor(count) == "ON" and count % 3 == 0:
101 | raise ConfirmsProcessingException("ConfirmsProcessingException: Processing error, please try again...")
102 | except ConfirmsMaintenanceError as e:
103 | logger.error("ConfirmsMaintenanceError: Exchange status: {}".format(exchange_status), extra=d)
104 | return Response(e.__str__(), status=status.HTTP_503_SERVICE_UNAVAILABLE)
105 | except ConfirmsProcessingException as e:
106 | logger.error("ConfirmsProcessingException: glitch_factor is: {}".format(glitch_factor), extra=d)
107 | return Response(e.__str__(), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
108 | time.sleep(.1) # delay to simulate doing some work
109 |
110 | return Response("Trade Confirmed", status=status.HTTP_200_OK)
111 |
112 |
113 | if __name__ == "__main__":
114 | app.run(debug=False, host="0.0.0.0", port=80)
115 | logger.info("Started Confirms Flask app", extra=d)
116 |
--------------------------------------------------------------------------------
/src/trade-stock/confirms_api/gunicorn_logging.conf:
--------------------------------------------------------------------------------
1 | [loggers]
2 | keys=root, gunicorn.error
3 |
4 | [handlers]
5 | keys=console
6 |
7 | [formatters]
8 | keys=json
9 |
10 | [logger_root]
11 | level=INFO
12 | handlers=console
13 |
14 | [logger_gunicorn.error]
15 | level=INFO
16 | handlers=console
17 | propagate=0
18 | qualname=gunicorn.error
19 |
20 | [handler_console]
21 | class=StreamHandler
22 | formatter=json
23 | args=(sys.stdout, )
24 |
25 | [formatter_json]
26 | class=pythonjsonlogger.jsonlogger.JsonFormatter
27 | format=%(asctime)s %(threadName)s %(name)s %(levelname)s %(message)s
--------------------------------------------------------------------------------
/src/trade-stock/confirms_api/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==2.3.2
2 | Flask-API==3.0.post1
3 | werkzeug==3.0.6
4 | python-json-logger
5 | gunicorn
6 | requests
7 | boto3
--------------------------------------------------------------------------------
/src/trade-stock/confirms_api/trade_parameter_name.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class TradeParameterName(Enum):
5 | TRADE_CONFIRMS_ENDPOINT = 'trade_confirms_endpoint'
6 | TRADE_CONFIRMS_EXCHANGE_STATUS = 'trade_confirms_exchange_status'
7 | TRADE_CONFIRMS_GLITCH_FACTOR = 'trade_confirms_glitch_factor'
8 | TRADE_ORDER_ENDPOINT = 'trade_order_endpoint'
9 | TRADE_ORDER_API_ENDPOINT = 'trade_order_global_endpoint'
10 | TRADE_RDS_PROXY_ENDPOINT = 'trade_rds_proxy_endpoint'
11 | TRADE_RDS_PROXY_READ_ONLY_ENDPOINT = 'trade_rds_proxy_read_only_endpoint'
12 | TRADE_DATABASE_SECRET_ID = 'trade_db_secret_id'
13 | TRADE_ORDER_API_SECRET_ID = 'trade_order_api_secret_id'
14 | TRADE_RDS_SECONDARY_CLUSTER_ARN = 'trade_rds_secondary_cluster_arn'
15 |
--------------------------------------------------------------------------------
/src/trade-stock/order_api/Dockerfile:
--------------------------------------------------------------------------------
1 | #FROM public.ecr.aws/docker/library/python:slim
2 | FROM --platform=linux/amd64 public.ecr.aws/docker/library/python:3.9-slim as build
3 |
4 | WORKDIR /usr/app
5 | COPY requirements.txt .
6 | RUN pip install -r requirements.txt # --no-cache-dir
7 |
8 | COPY . .
9 | CMD [ "gunicorn", "--bind", "0.0.0.0:80", "--log-config", "gunicorn_logging.conf", "--timeout", "2", "order_api:app" ]
10 |
11 | #, "--workers", "3"
--------------------------------------------------------------------------------
/src/trade-stock/order_api/README.md:
--------------------------------------------------------------------------------
1 |
2 | building and testing your docker image
3 |
4 | first code your flask app locally, then test it out
5 | `order_api.py`
6 |
7 | if everything checks out, consider freezing requirements.txt
8 |
9 | next build docker images
10 | `docker build -t mypython .`
11 |
12 | review the images
13 | `docker images -a`
14 |
15 | finally, run your image and test that out
16 | `docker run -d --name trading_api -p 80:80 trading_api:latest`
--------------------------------------------------------------------------------
/src/trade-stock/order_api/data_objects.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | import enum
3 | from sqlalchemy.orm import DeclarativeBase
4 | from sqlalchemy.orm import Mapped
5 | from sqlalchemy.orm import mapped_column
6 | from sqlalchemy import ForeignKey
7 | from sqlalchemy import String
8 |
9 |
10 | class Base(DeclarativeBase):
11 | pass
12 |
13 |
14 | class Customer(Base):
15 | __tablename__ = "customer"
16 |
17 | id: Mapped[int] = mapped_column(primary_key=True)
18 | first_name: Mapped[str] = mapped_column(String(250))
19 | last_name: Mapped[str] = mapped_column(String(250))
20 | created_on: Mapped[datetime]
21 | updated_on: Mapped[datetime]
22 |
23 | def as_dict(self):
24 | return {c.name: getattr(self, c.name) for c in self.__table__.columns}
25 |
26 |
27 | class Symbol(Base):
28 | __tablename__ = "symbol"
29 |
30 | id: Mapped[int] = mapped_column(primary_key=True)
31 | ticker: Mapped[str] = mapped_column(String(25), primary_key=True)
32 | open: Mapped[float]
33 | high: Mapped[float]
34 | low: Mapped[float]
35 | close: Mapped[float]
36 | volume: Mapped[int]
37 | created_on: Mapped[datetime]
38 | updated_on: Mapped[datetime]
39 |
40 | def as_dict(self):
41 | return {c.name: getattr(self, c.name) for c in self.__table__.columns}
42 |
43 |
44 | class TransactionType(str, enum.Enum):
45 | buy = "buy"
46 | sell = "sell"
47 |
48 |
49 | class TradeState(str, enum.Enum):
50 | submitted = "submitted"
51 | pending = "pending"
52 | rejected = "rejected"
53 | filled = "filled"
54 | aborted = "aborted"
55 |
56 |
57 | class Activity(Base):
58 | __tablename__ = "activity"
59 |
60 | id: Mapped[int] = mapped_column(primary_key=True)
61 | request_id: Mapped[str] = mapped_column(String(250), unique=True)
62 | customer_id: Mapped[int] = mapped_column(ForeignKey("customer.id"), name="customer_id")
63 | symbol_ticker: Mapped[int] = mapped_column(ForeignKey("symbol.id"), name="symbol_ticker")
64 | type: Mapped[TransactionType] = mapped_column(name="type")
65 | status: Mapped[TradeState] = mapped_column(name="status")
66 | current_price: Mapped[float]
67 | share_count: Mapped[float]
68 |
69 | def as_dict(self):
70 | return {c.name: getattr(self, c.name) for c in self.__table__.columns}
71 |
--------------------------------------------------------------------------------
/src/trade-stock/order_api/gunicorn_logging.conf:
--------------------------------------------------------------------------------
1 | [loggers]
2 | keys=root, gunicorn.error
3 |
4 | [handlers]
5 | keys=console
6 |
7 | [formatters]
8 | keys=json
9 |
10 | [logger_root]
11 | level=INFO
12 | handlers=console
13 |
14 | [logger_gunicorn.error]
15 | level=INFO
16 | handlers=console
17 | propagate=0
18 | qualname=gunicorn.error
19 |
20 | [handler_console]
21 | class=StreamHandler
22 | formatter=json
23 | args=(sys.stdout, )
24 |
25 | [formatter_json]
26 | class=pythonjsonlogger.jsonlogger.JsonFormatter
27 | format=%(asctime)s %(threadName)s %(name)s %(levelname)s %(message)s
--------------------------------------------------------------------------------
/src/trade-stock/order_api/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==2.3.2
2 | Flask-API==3.0.post1
3 | boto3
4 | aws-secretsmanager-caching
5 | psycopg2-binary
6 | sqlalchemy
7 | werkzeug==3.0.6
8 | gunicorn
9 | circuitbreaker
10 | requests
11 | retry
12 | python-json-logger
13 |
--------------------------------------------------------------------------------
/src/trade-stock/order_api/requirements.txt.frozen:
--------------------------------------------------------------------------------
1 | attrs==23.1.0
2 | aws-cdk-lib==2.114.1
3 | aws-cdk.asset-awscli-v1==2.2.201
4 | aws-cdk.asset-kubectl-v20==2.1.2
5 | aws-cdk.asset-node-proxy-agent-v6==2.0.1
6 | aws-secretsmanager-caching==1.1.1.5
7 | aws-xray-sdk==2.12.1
8 | blinker==1.7.0
9 | boto3==1.34.11
10 | botocore==1.34.11
11 | cattrs==23.2.3
12 | cdk-ecs-service-extensions==2.0.0
13 | circuitbreaker==2.0.0
14 | click==8.1.7
15 | constructs==10.3.0
16 | decorator==5.1.1
17 | Flask==2.3.0
18 | Flask-API==3.0.post1
19 | gunicorn==21.2.0
20 | importlib-resources==6.1.1
21 | iniconfig==2.0.0
22 | itsdangerous==2.1.2
23 | Jinja2==3.1.2
24 | jmespath==1.0.1
25 | jsii==1.93.0
26 | MarkupSafe==2.1.3
27 | packaging==23.2
28 | pluggy==1.3.0
29 | psycopg2-binary==2.9.9
30 | publication==0.0.3
31 | py==1.11.0
32 | pytest==6.2.5
33 | python-dateutil==2.8.2
34 | retry==0.9.2
35 | s3transfer==0.10.0
36 | six==1.16.0
37 | SQLAlchemy==2.0.24
38 | toml==0.10.2
39 | typeguard==2.13.3
40 | typing_extensions==4.9.0
41 | urllib3==2.0.7
42 | Werkzeug==2.3.8
43 | wrapt==1.16.0
44 |
--------------------------------------------------------------------------------
/src/trade-stock/order_api/response.json:
--------------------------------------------------------------------------------
1 | {
2 | "pets": [
3 | {
4 | "id": 1,
5 | "type": "dog",
6 | "price": 249.99
7 | },
8 | {
9 | "id": 2,
10 | "type": "cat",
11 | "price": 124.99
12 | },
13 | {
14 | "id": 3,
15 | "type": "fish",
16 | "price": 0.99
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/src/trade-stock/order_api/trade_parameter_name.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class TradeParameterName(Enum):
5 | TRADE_CONFIRMS_ENDPOINT = 'trade_confirms_endpoint'
6 | TRADE_CONFIRMS_EXCHANGE_STATUS = 'trade_confirms_exchange_status'
7 | TRADE_CONFIRMS_GLITCH_FACTOR = 'trade_confirms_glitch_factor'
8 | TRADE_ORDER_ENDPOINT = 'trade_order_endpoint'
9 | TRADE_ORDER_API_ENDPOINT = 'trade_order_global_endpoint'
10 | TRADE_RDS_PROXY_ENDPOINT = 'trade_rds_proxy_endpoint'
11 | TRADE_RDS_PROXY_READ_ONLY_ENDPOINT = 'trade_rds_proxy_read_only_endpoint'
12 | TRADE_DATABASE_SECRET_ID = 'trade_db_secret_id'
13 | TRADE_ORDER_API_SECRET_ID = 'trade_order_api_secret_id'
14 | TRADE_RDS_SECONDARY_CLUSTER_ARN = 'trade_rds_secondary_cluster_arn'
15 |
--------------------------------------------------------------------------------
/src/trade-stock/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | pytest==6.2.5
2 |
--------------------------------------------------------------------------------
/src/trade-stock/requirements.txt:
--------------------------------------------------------------------------------
1 | aws-cdk-lib>=2.148.1
2 | constructs>=10.0.0,<11.0.0
3 | cdk-ecs-service-extensions>=2.0.0
--------------------------------------------------------------------------------
/src/trade-stock/seed/customers.csv:
--------------------------------------------------------------------------------
1 | first_name,last_name
2 | sally,mae
3 | freddie,mac
4 | jp,morgan
5 | kevin,schwarz
6 | nate,bachmeier
7 | jennifer,moran
--------------------------------------------------------------------------------
/src/trade-stock/seed/stocks.csv:
--------------------------------------------------------------------------------
1 | ticker,volume,open,close,high,low
2 | BCC,248274,61.68,61.99,62.565,61.41
3 | CLDX,882958,35.03,34.78,35.81,34.39
4 | TRND,2084,26.73,26.7662,26.7662,26.7201
5 | BFS,47818,37.15,37.25,37.68,36.885
6 | NHTC,8284,4.9,4.85,4.92,4.85
7 | HIMS,1830853,9.59,9.91,10.11,9.557
8 | ETHO,981,49.9,50.0959,50.0959,49.86
9 | FSBC,37318,21.15,21.3,21.35,21
10 | IPAY,19344,40.26,40.06,40.3,39.94
11 | STLD,1168999,110.58,109.23,111.6801,108.91
12 | CEPU,295494,5.67,5.7,5.8,5.58
13 | BKSY,376877,1.41,1.37,1.41,1.36
14 | CAC,48232,37.54,37.75,37.915,37.17
15 | BACK,401789,0.16,0.151,0.16,0.15
16 | THCH,132609,3.52,3.45,3.5711,3.43
17 | SPCE,6937554,4.04,3.9,4.09,3.815
18 | DM,1666843,2.07,2.09,2.15,2.05
19 | IIGV,265,23.06,23.08,23.08,23.06
20 | IAI,63648,89.29,89.29,89.41,88.63
21 | AOSL,193321,25.64,25.62,25.72,25
22 | IMNN,6173,1.38,1.33,1.38,1.32
23 | ADMP,743019,0.1199,0.115,0.1242,0.1118
24 | BBAX,163318,48.12,48.23,48.3082,48.04
25 | BCAN,16771,2.22,2.24,2.28,2.22
26 | GWRE,380125,76.3,76.44,76.735,75.81
27 | FLTB,8763,48.67,48.4,48.67,48.35
28 | CFGpE,77463,20.28,20.06,20.47,19.99
29 | ATAI,1791732,1.4,1.33,1.41,1.3
30 | RNRpF,4221,21.88,21.85,21.99,21.82
31 | BSJN,217663,23.44,23.435,23.46,23.423
32 | KPLTW,7458,0.0918,0.069601,0.0918,0.062999
33 | SPNTpB,31714,22,21.6,22,21.6
34 | SMMV,43048,33.96,34.14,34.1983,33.93
35 | CDAK,12021331,0.1952,0.19,0.21,0.165
36 | TRUP,1212683,38.4,37.64,40.07,37.55
37 | CHSCO,17496,25.69,26,26.04,25.69
38 | AMTB,69688,22.26,22.25,22.4,21.99
39 | CAT,1801975,218.15,220.16,220.56,217.71
40 | OCCI,13904,9.12,9.12,9.14,9.06
41 | PRO,89772,25.48,25.8,25.93,25.44
42 | TIGO,88117,18.24,18.09,18.33,18.03
43 | IYR,5155837,80.01,80.32,80.76,79.75
44 | TNPpD,7628,24.64,24.69,24.755,24.55
45 | IMRX,50073,10.33,9.38,10.69,9.3
46 | EELV,261806,23.17,23.21,23.24,23.17
47 | MNKD,1549361,4.03,4.01,4.08,3.975
48 | WOOF,4466267,7.95,8.47,8.475,7.945
49 | HTFC,3500,23.95,24.2,24.2,22.85
50 | IZM,86175,3.46,3.27,3.46,2.96
51 | FEZ,835367,43.12,43.21,43.3099,43.05
52 | ZEUS,87565,51.36,50.63,52.24,50.51
53 | ERF,1187123,14.23,14.36,14.62,14.19
54 | APCA.WS,1245,0.11,0.1109,0.1109,0.11
55 | RGR,60472,55.27,55.69,55.75,55.2
56 | SDOW,7030935,28.46,28.5,28.7692,28.09
57 | SIX,941918,24.39,24.36,24.87,24.265
58 | OLPX,1766005,3.97,4.01,4.08,3.95
59 | MOAT,447473,70.8,70.82,70.99,70.4
60 | ZNTL,383253,17.51,16.92,17.92,16.88
61 | ACA,243860,60.52,61.37,61.58,60.52
62 | FSM,4508565,3.59,3.74,3.75,3.54
63 | EM,84234,1.12,1.0284,1.13,1
64 | DAVA,128479,65.24,63.84,65.815,63.76
65 | YELL,712519,1.98,1.875,1.99,1.87
66 | YSG,1303891,1.46,1.45,1.48,1.4
67 | ETY,147291,11.4,11.35,11.4,11.32
68 | EMD,79074,8.57,8.45,8.59,8.45
69 | MCBS,17615,17.06,16.92,17.235,16.82
70 | SESG,372,39.24,39.3911,39.3911,39.24
71 | GPK,3184423,24.84,25.15,25.165,24.72
72 | FEN,91554,12.34,12.57,12.5899,12.34
73 | EOI,66726,14.91,14.85,14.93,14.75
74 | WCCpA,12392,26.82,26.71,26.99,26.65
75 | DSTL,41186,41.78,41.79,41.855,41.59
76 | SGML,2212233,35.99,36.95,39.04,35.42
77 | TYO,49476,12.26,12.21,12.3,12.197
78 | VIGI,213572,71.86,71.99,72.0035,71.73
79 | CHD,908585,86.3,86.4,86.75,86.07
80 | JPEM,22169,49.97,49.96,50.0599,49.89
81 | AAIC,65169,2.73,2.67,2.77,2.67
82 | SLDP,1036592,2.78,2.78,2.835,2.72
83 | SHOP,8872483,44.98,44.31,45.39,44
84 | AGCO,474492,128.72,129.18,130.18,127.91
85 | HVT,95860,31.17,31.3,31.86,31.02
86 | AVRO,504794,1.1,1.05,1.18,1.03
87 | DTEA,174525,0.5253,0.5175,0.5253,0.51
88 | CVI,1207795,31.16,32.32,32.455,31.16
89 | MBB,1243961,94.15,94.14,94.37,94.06
90 | AMBA,372385,76.58,74.36,76.58,73
91 | CRAI,80637,108.74,109.35,109.88,107.56
92 | BSGM,335495,1.13,1.08,1.1985,1.05
93 | MANU,1485229,22.22,22.47,22.82,21.97
94 | TRKA,30345101,0.213,0.2,0.2212,0.1972
95 | CRDO,2047460,8.83,8.88,8.95,8.69
96 | MIXT,4812,7.89,8.19,8.2,7.89
97 | SBI,2870,7.84,7.84,7.84,7.8102
98 | EPC,322128,41.86,42.25,42.36,41.65
99 | ERH,12553,10.23,10.38,10.448,10.23
100 | SARK,911701,43.02,43.58,43.81,42.8
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/create_api_user.sh:
--------------------------------------------------------------------------------
1 | export API_SECRET_ID=$(aws ssm get-parameter --name trade_order_api_secret_id \
2 | --region us-east-1 | jq -r .Parameter.Value)
3 | export API_SECRET=`aws secretsmanager get-secret-value \
4 | --secret-id $API_SECRET_ID --region us-east-1 | jq -r '.SecretString'`
5 | export API_PASSWORD="`echo $API_SECRET | jq -r '.password'`"
6 | export API_USER="`echo $API_SECRET | jq -r '.username'`"
7 | psql -c "create user $API_USER with password '$API_PASSWORD';"
8 | psql -c \
9 | "grant select,insert,update,delete on all tables in schema public to $API_USER;"
10 | psql -c \
11 | "grant usage,select on all sequences in schema public to $API_USER;"
12 |
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/curl_confirms_endpoint.sh:
--------------------------------------------------------------------------------
1 | export CONFIRMS_ENDPOINT=$(aws ssm get-parameter \
2 | --name trade_confirms_endpoint --region us-east-1 | jq -r .Parameter.Value)
3 | curl $CONFIRMS_ENDPOINT
4 |
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/curl_order_endpoint.sh:
--------------------------------------------------------------------------------
1 | export ORDER_ENDPOINT=$(aws ssm get-parameter --name trade_order_endpoint \
2 | --region us-east-1 | jq -r .Parameter.Value)
3 | curl $ORDER_ENDPOINT/db-health/
4 |
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/install_admin_client_packages.sh:
--------------------------------------------------------------------------------
1 | cd ~
2 | bash
3 | sudo yum update -y
4 | sudo yum install jq -y
5 | sudo amazon-linux-extras enable postgresql14
6 |
7 | install parallel
8 |
9 | sudo yum install postgresql-server -y
10 | yum clean metadata
11 |
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/load_db_admin_session.sh:
--------------------------------------------------------------------------------
1 | export ADMIN_SECRET_ID=$(aws ssm get-parameter --name trade_db_secret_id \
2 | --region us-east-1 | jq -r .Parameter.Value)
3 | export ADMIN_SECRET=`aws secretsmanager get-secret-value \
4 | --secret-id $ADMIN_SECRET_ID --region us-east-1 | jq -r '.SecretString'`
5 | export PGUSER="`echo $ADMIN_SECRET | jq -r '.username'`"
6 | export PGPASSWORD="`echo $ADMIN_SECRET | jq -r '.password'`"
7 | export PGHOST="`echo $ADMIN_SECRET | jq -r '.host'`"
8 | export PGDATABASE="`echo $ADMIN_SECRET | jq -r '.dbname'`"
9 | export PGPORT="`echo $ADMIN_SECRET | jq -r '.port'`"
10 | psql -c "select version(),AURORA_VERSION();"
11 |
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/load_db_order_api_session.sh:
--------------------------------------------------------------------------------
1 | export API_SECRET_ID=$(aws ssm get-parameter --name trade_order_api_secret_id \
2 | --region us-east-1 | jq -r .Parameter.Value)
3 | export API_SECRET=`aws secretsmanager get-secret-value \
4 | --secret-id $API_SECRET_ID --region us-east-1 | jq -r '.SecretString'`
5 | export PGUSER="`echo $API_SECRET | jq -r '.username'`"
6 | export PGPASSWORD="`echo $API_SECRET | jq -r '.password'`"
7 | export PGHOST="`echo $API_SECRET | jq -r '.host'`"
8 | export PGDATABASE="`echo $API_SECRET | jq -r '.dbname'`"
9 | export PGPORT="`echo $API_SECRET | jq -r '.port'`"
10 | psql -c "select version(),AURORA_VERSION();"
11 | psql -c "select current_role;"
12 |
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/order_trade.sh:
--------------------------------------------------------------------------------
1 | bash
2 | cd ~
3 | export ORDER_ENDPOINT=$(aws ssm get-parameter --name trade_order_endpoint \
4 | --region us-east-1 | jq -r .Parameter.Value)
5 |
6 | cat <<'EOF' > trade.sh
7 | curl \
8 | --request POST \
9 | --header "Content-Type: application/json" \
10 | --data '{"request_id": "'$(uuidgen)'", "customer_id": "4", "ticker": "IPAY",
11 | "transaction_type": "buy", "current_price": 40.06,
12 | "share_count": '$RANDOM'}' \
13 | $ORDER_ENDPOINT/trade/
14 | EOF
15 |
16 | chmod u+x trade.sh
17 | watch -d -n 1 ./trade.sh
18 |
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/stress_order_api.sh:
--------------------------------------------------------------------------------
1 | export ORDER_ENDPOINT=$(aws ssm get-parameter \
2 | --name trade_order_endpoint --region us-east-1 | jq -r .Parameter.Value)
3 | watch -n .1 time curl $ORDER_ENDPOINT/db-stress/ &
4 |
--------------------------------------------------------------------------------
/src/trade-stock/shell_scripts/warm_order_api.sh:
--------------------------------------------------------------------------------
1 | export ORDER_ENDPOINT=$(aws ssm get-parameter --name trade_order_endpoint \
2 | --region us-east-1 | jq -r .Parameter.Value)
3 | for i in {1..100}; do time curl $ORDER_ENDPOINT/db-health/; done
4 |
--------------------------------------------------------------------------------
/src/trade-stock/source.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | rem The sole purpose of this script is to make the command
4 | rem
5 | rem source .venv/bin/activate
6 | rem
7 | rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows.
8 | rem On Windows, this command just runs this batch file (the argument is ignored).
9 | rem
10 | rem Now we don't need to document a Windows command for activating a virtualenv.
11 |
12 | echo Executing .venv\Scripts\activate.bat for you
13 | .venv\Scripts\activate.bat
14 |
--------------------------------------------------------------------------------
/src/trade-stock/sql_scripts/load_seed_data.sql:
--------------------------------------------------------------------------------
1 | \COPY symbol(ticker,volume,open,close,high,low)
2 | FROM '~/seed/stocks.csv' DELIMITER ',' CSV HEADER;
3 | \COPY customer(first_name,last_name)
4 | FROM '~/seed/customers.csv' DELIMITER ',' CSV HEADER;
5 |
--------------------------------------------------------------------------------
/src/trade-stock/sql_scripts/schema.sql:
--------------------------------------------------------------------------------
1 | CREATE TYPE trade_state AS ENUM ('submitted', 'pending', 'rejected', 'filled');
2 | CREATE TYPE transaction_type AS ENUM ('buy', 'sell');
3 |
4 | CREATE TABLE customer (
5 | id serial PRIMARY KEY,
6 | first_name VARCHAR ( 250 ) NOT NULL,
7 | last_name VARCHAR ( 250 ) NOT NULL,
8 | created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
9 | updated_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
10 | );
11 | CREATE TABLE symbol (
12 | id serial PRIMARY KEY,
13 | ticker VARCHAR ( 25 ) NOT NULL,
14 | open numeric NOT NULL CHECK (open > 0),
15 | high numeric NOT NULL CHECK (high > 0),
16 | low numeric NOT NULL CHECK (low > 0),
17 | close numeric NOT NULL CHECK (close > 0),
18 | volume integer NOT NULL CHECK (volume > 0),
19 | created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
20 | updated_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
21 | );
22 | CREATE TABLE activity (
23 | id serial PRIMARY KEY,
24 | request_id VARCHAR ( 40 ) UNIQUE NOT NULL,
25 | customer_id INTEGER REFERENCES customer (id),
26 | symbol_ticker INTEGER REFERENCES symbol (id),
27 | type transaction_type NOT NULL,
28 | current_price numeric NOT NULL CHECK (current_price > 0),
29 | share_count numeric NOT NULL CHECK (share_count > 0),
30 | status trade_state NOT NULL,
31 | created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
32 | updated_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
33 | );
34 |
35 | CREATE FUNCTION update_updated_on()
36 | RETURNS TRIGGER AS $$
37 | BEGIN
38 | NEW.updated_on = now();
39 | RETURN NEW;
40 | END;
41 | $$ language plpgsql;
42 |
43 | CREATE TRIGGER update_symbol_updated_on
44 | BEFORE UPDATE
45 | ON
46 | symbol
47 | FOR EACH ROW
48 | EXECUTE PROCEDURE update_updated_on();
49 | CREATE TRIGGER update_activity_updated_on
50 | BEFORE UPDATE
51 | ON
52 | activity
53 | FOR EACH ROW
54 | EXECUTE PROCEDURE update_updated_on();
55 | CREATE TRIGGER update_customer_updated_on
56 | BEFORE UPDATE
57 | ON
58 | customer
59 | FOR EACH ROW
60 | EXECUTE PROCEDURE update_updated_on();
61 |
--------------------------------------------------------------------------------
/src/trade-stock/test.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import circuitbreaker
4 | from circuitbreaker import circuit
5 | import retry
6 | import requests
7 | from pythonjsonlogger import jsonlogger
8 |
9 |
10 | # decorate with circuit breaker, it will see the failures. Once there are enough failures the circuit will close
11 | @circuit(failure_threshold=5, expected_exception=RuntimeError)
12 | def do_stuff() -> requests.Response:
13 | print("trying to get stock")
14 | response = requests.get("https://ipinfo.io/shit")
15 | if response.status_code != 200:
16 | raise RuntimeError("took a shit")
17 | return response
18 |
19 |
20 | print(circuitbreaker.CircuitBreakerMonitor.get_circuits())
21 | print(circuitbreaker.CircuitBreakerMonitor.get_closed())
22 | print(circuitbreaker.CircuitBreakerMonitor.get_open())
23 | # monitor the circuits, if it's open, don't even bother.
24 | try:
25 | print("trying")
26 | response = retry.api.retry_call(do_stuff, backoff=0, jitter=0.1, tries=2)
27 | print(response)
28 | except:
29 | pass
30 |
31 | try:
32 | print("trying")
33 | response = retry.api.retry_call(do_stuff, backoff=0, jitter=0.1, tries=2)
34 | print(response)
35 | except:
36 | pass
37 |
38 | try:
39 | print("trying")
40 | response = retry.api.retry_call(do_stuff, backoff=0, jitter=0.1, tries=2)
41 | print(response)
42 | except:
43 | pass
44 |
45 |
46 | try:
47 | print("trying")
48 | response = retry.api.retry_call(do_stuff, backoff=0, jitter=0.1, tries=2)
49 | print(response)
50 | except:
51 | pass
52 |
53 | print(circuitbreaker.CircuitBreakerMonitor.get('do_stuff').open_until)
54 |
55 |
56 | @circuit(failure_threshold=2, expected_exception=RuntimeError)
57 | def is_registered():
58 | print("make it stop")
59 |
60 |
61 | print(circuitbreaker.CircuitBreakerMonitor.get('is_registered').state)
62 |
63 |
64 |
65 | d = {'task': 'foo', 'az': 'us-east-1'}
66 |
67 |
68 | json_handler = logging.StreamHandler()
69 | formatter = jsonlogger.JsonFormatter('%(levelname)s %(lineno)d %(asctime)s %(task)-32s %(az)-10s - %(message)s')
70 | json_handler.setFormatter(formatter)
71 | logging.basicConfig(handlers=[json_handler], level=logging.INFO)
72 | logger = logging.getLogger('orders')
73 | logger.info("logs in json format", extra=d)
--------------------------------------------------------------------------------
/src/trade-stock/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/trade-stock/tests/__init__.py
--------------------------------------------------------------------------------
/src/trade-stock/tests/trade-stock-stress-test.yml:
--------------------------------------------------------------------------------
1 | config:
2 | # This is a test server run by team Artillery
3 | # It's designed to be highly scalable
4 | target: "{{ url }}"
5 | phases:
6 | - duration: 500
7 | arrivalRate: 1
8 | rampTo: 2
9 | name: Warm up phase
10 | - duration: 500
11 | arrivalRate: 2
12 | rampTo: 8
13 | name: Ramp up load
14 | - duration: 1000
15 | arrivalRate: 5
16 | rampTo: 15
17 | name: Spike phase
18 | # Load & configure a couple of useful plugins
19 | # https://docs.art/reference/extensions
20 | plugins:
21 | ensure: { }
22 | apdex: { }
23 | metrics-by-endpoint: { }
24 | apdex:
25 | threshold: 100
26 | ensure:
27 | thresholds:
28 | - http.response_time.p99: 100
29 | - http.response_time.p95: 75
30 | scenarios:
31 | - flow:
32 | - loop:
33 | - get:
34 | url: "/db-stress/"
35 | expect:
36 | - statusCode: 200
37 | count: 200
--------------------------------------------------------------------------------
/src/trade-stock/tests/trade_request.json:
--------------------------------------------------------------------------------
1 | {
2 | "request_id": "NEED TO ADD THIS FROM A TEST CLIENT",
3 | "customer_id": "4",
4 | "symbol_ticker": "IPAY",
5 | "transaction_type": "buy",
6 | "current_price": "40.06",
7 | "share_count": "100"
8 | }
--------------------------------------------------------------------------------
/src/trade-stock/tests/trade_stock_test_client.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | from botocore.config import Config
3 | import os
4 | import argparse
5 | from argparse import RawTextHelpFormatter
6 | from trade_utils.trade_parameter_name import TradeParameterName
7 | from circuitbreaker import circuit
8 | import requests
9 | from circuitbreaker import CircuitBreakerMonitor
10 |
11 | parser = argparse.ArgumentParser(prog="Trade Stock Test Client",
12 | description="Test Trade Stock Resiliency",
13 | formatter_class=RawTextHelpFormatter)
14 | parser.add_argument(
15 | "--test",
16 | help='''Choose a test to run
17 | 1/ Stress Test''',
18 | type=int,
19 | required=True)
20 |
21 |
22 | @circuit(failure_threshold=10, expected_exception=requests.exceptions.HTTPError, recovery_timeout=60)
23 | def make_call():
24 | rsp = requests.get("https://digital.nhs.uk/developer/api-catalogue/hello-word")
25 | rsp.raise_for_status()
26 | return rsp
27 |
28 |
29 | def get_url():
30 | region = 'AWS_PRIMARY_REGION'
31 | endpoint = None
32 | ssm = boto3.client("ssm", config=Config(region_name=os.getenv(region)))
33 | endpoint = ssm.get_parameter(Name=TradeParameterName.TRADE_ORDER_API_ENDPOINT.value)['Parameter']['Value']
34 | print(endpoint)
35 | return endpoint
36 |
37 |
38 | args = parser.parse_args()
39 |
40 | test = args.test
41 |
42 | if test == 1:
43 | command = "artillery run trade-stock-stress-test.yml --variables '{ \"url\": \"{url}\" }'".replace(
44 | "{url}", get_url())
45 | print(command)
46 | os.system(command)
47 | if test == 2:
48 | for i in range(1, 20):
49 | print(CircuitBreakerMonitor.get("make_call").state)
50 | try:
51 | print(make_call())
52 | except:
53 | print("error!")
54 |
55 | else:
56 | print("invalid test case, please try again")
57 |
--------------------------------------------------------------------------------
/src/trade-stock/tests/unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/trade-stock/tests/unit/__init__.py
--------------------------------------------------------------------------------
/src/trade-stock/tests/unit/test_trade_stock_stack.py:
--------------------------------------------------------------------------------
1 | import aws_cdk as core
2 | import aws_cdk.assertions as assertions
3 |
4 | from trade_stock.trade_stock_stack import TradeStockStack
5 |
6 |
7 | # example tests. To run these tests, uncomment this file along with the example
8 | # resource in trade_stock/trade_stock_stack.py
9 | def test_sqs_queue_created():
10 | app = core.App()
11 | stack = TradeStockStack(app, 'trade-stock')
12 | template = assertions.Template.from_stack(stack)
13 |
14 | # template.has_resource_properties("AWS::SQS::Queue", {
15 | # "VisibilityTimeout": 300
16 | # })
17 |
--------------------------------------------------------------------------------
/src/trade-stock/trade_stock/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/trade-stock/trade_stock/__init__.py
--------------------------------------------------------------------------------
/src/trade-stock/trade_stock/public_api_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | aws_apigateway as apigateway,
3 | aws_logs as cw_logs,
4 | aws_ssm as ssm,
5 | )
6 | from constructs import Construct
7 | import aws_cdk as cdk
8 | from trade_utils.trade_parameter_name import TradeParameterName
9 |
10 |
11 | class PublicApiStack(cdk.Stack):
12 | def __init__(self, scope: Construct, construct_id: str, private_lb, resource_name: str, **kwargs) -> None:
13 | super().__init__(scope, construct_id, **kwargs)
14 |
15 | api_logs = cw_logs.LogGroup(self, "TradeStockApiLogs")
16 | restful_trades = apigateway.RestApi(
17 | self,
18 | "TradeStockApi", endpoint_types=[apigateway.EndpointType.REGIONAL],
19 | default_cors_preflight_options=apigateway.CorsOptions(
20 | allow_methods=['PUT', 'GET', 'OPTIONS'],
21 | allow_headers=['Content-Type',
22 | 'Cache-Control',
23 | 'Authorization'],
24 | allow_origins=apigateway.Cors.ALL_ORIGINS
25 | ),
26 | deploy_options=apigateway.StageOptions(
27 | stage_name="resilient",
28 | access_log_destination=apigateway.LogGroupLogDestination(api_logs),
29 | logging_level=apigateway.MethodLoggingLevel.INFO,
30 | access_log_format=apigateway.AccessLogFormat.clf(),
31 | data_trace_enabled=True,
32 | metrics_enabled=True,
33 | tracing_enabled=True,
34 | throttling_rate_limit=10000,
35 | throttling_burst_limit=500,
36 | ), cloud_watch_role=True,
37 | )
38 |
39 | trade_response_model = restful_trades.add_model(
40 | "TradeResponseModel",
41 | content_type="application/json",
42 | model_name="TradeResponseModel",
43 | schema=apigateway.JsonSchema(
44 | schema=apigateway.JsonSchemaVersion.DRAFT4,
45 | title="trade",
46 | type=apigateway.JsonSchemaType.OBJECT,
47 | properties={
48 | "id": apigateway.JsonSchema(type=apigateway.JsonSchemaType.INTEGER),
49 | "type": apigateway.JsonSchema(type=apigateway.JsonSchemaType.STRING),
50 | "price": apigateway.JsonSchema(type=apigateway.JsonSchemaType.INTEGER),
51 | },
52 | ),
53 | )
54 |
55 | api_vpc_link = apigateway.VpcLink(self, "ApiVpcLink", targets=[private_lb.nlb])
56 |
57 | api_integration = apigateway.Integration(
58 | type=apigateway.IntegrationType.HTTP,
59 | options=apigateway.IntegrationOptions(
60 | connection_type=apigateway.ConnectionType.VPC_LINK,
61 | vpc_link=api_vpc_link,
62 | integration_responses=[apigateway.IntegrationResponse(
63 | status_code="200",
64 | response_parameters={
65 | 'method.response.header.Access-Control-Allow-Headers': "'Access-Control-Allow-Origin,Content-Length,Content-Type,Date,X-Amz-Apigw-Id,X-Amzn-Requestid,X-Amzn-Trace-Id'",
66 | 'method.response.header.Access-Control-Allow-Methods': "'OPTIONS,PUT'",
67 | 'method.response.header.Access-Control-Allow-Origin': "'*'"}
68 | )],
69 | passthrough_behavior=apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES,
70 | timeout=cdk.Duration.seconds(2),
71 | request_parameters={'integration.request.header.Content-Type': "'application/json'"}
72 | ),
73 | integration_http_method="POST",
74 | uri="http://{}/{}/".format(
75 | private_lb.nlb.load_balancer_dns_name, resource_name
76 | ),
77 | )
78 |
79 | api_resource = restful_trades.root.add_resource(resource_name)
80 | api_resource.add_method(
81 | "PUT",
82 | api_integration,
83 | method_responses=[
84 | apigateway.MethodResponse(
85 | status_code="200",
86 | response_parameters={
87 | "method.response.header.Content-Type": True,
88 | "method.response.header.Content-Length": True,
89 | "method.response.header.Connection": True,
90 | "method.response.header.Server": True,
91 | 'method.response.header.Access-Control-Allow-Headers': True,
92 | 'method.response.header.Access-Control-Allow-Methods': True,
93 | 'method.response.header.Access-Control-Allow-Origin': True,
94 | },
95 | # Validate the schema on the response
96 | response_models={"application/json": trade_response_model},
97 | )
98 | ],
99 | )
100 |
101 | health_resource = restful_trades.root.add_resource("region-az")
102 | health_integration = apigateway.Integration(
103 | type=apigateway.IntegrationType.HTTP,
104 | options=apigateway.IntegrationOptions(
105 | connection_type=apigateway.ConnectionType.VPC_LINK,
106 | vpc_link=api_vpc_link,
107 | integration_responses=[
108 | apigateway.IntegrationResponse(
109 | status_code="200",
110 | response_parameters={
111 | 'method.response.header.Access-Control-Allow-Headers': "'Access-Control-Allow-Origin,Content-Length,Content-Type,Date,X-Amz-Apigw-Id,X-Amzn-Requestid,X-Amzn-Trace-Id'",
112 | 'method.response.header.Access-Control-Allow-Methods': "'OPTIONS,GET'",
113 | 'method.response.header.Access-Control-Allow-Origin': "'*'"}
114 | ),
115 | apigateway.IntegrationResponse(
116 | status_code="400",
117 | selection_pattern="^\[Error\].*",
118 | response_templates={
119 | "application/json": "{\"state\":\"error\",\"message\":\"$util.escapeJavaScript($input.path('$.errorMessage'))\"}",
120 | }
121 | ),
122 | apigateway.IntegrationResponse(
123 | status_code="500",
124 | selection_pattern="^\[Error\].*",
125 | response_templates={
126 | "application/json": "{\"state\":\"error\",\"message\":\"$util.escapeJavaScript($input.path('$.errorMessage'))\"}",
127 | }
128 | )
129 | ],
130 | passthrough_behavior=apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES,
131 | ),
132 | integration_http_method="GET",
133 | uri="http://{}/region-az/".format(private_lb.nlb.load_balancer_dns_name),
134 | )
135 | health_resource.add_method(
136 | "GET",
137 | health_integration,
138 | method_responses=[
139 | apigateway.MethodResponse(status_code="200",
140 | response_parameters={
141 | "method.response.header.Content-Type": True,
142 | "method.response.header.Content-Length": True,
143 | "method.response.header.Connection": True,
144 | "method.response.header.Server": True,
145 | 'method.response.header.Access-Control-Allow-Headers': True,
146 | 'method.response.header.Access-Control-Allow-Methods': True,
147 | 'method.response.header.Access-Control-Allow-Origin': True,
148 | }),
149 | apigateway.MethodResponse(status_code="400"),
150 | apigateway.MethodResponse(status_code="500"),
151 | ],
152 | )
153 |
154 | stress_resource = restful_trades.root.add_resource("db-stress")
155 | stress_integration = apigateway.Integration(
156 | type=apigateway.IntegrationType.HTTP,
157 | options=apigateway.IntegrationOptions(
158 | connection_type=apigateway.ConnectionType.VPC_LINK,
159 | vpc_link=api_vpc_link,
160 | integration_responses=[
161 | apigateway.IntegrationResponse(
162 | status_code="200",
163 | ),
164 | apigateway.IntegrationResponse(
165 | status_code="400",
166 | selection_pattern="^\[Error\].*",
167 | response_templates={
168 | "application/json": "{\"state\":\"error\",\"message\":\"$util.escapeJavaScript($input.path('$.errorMessage'))\"}",
169 | }
170 | ),
171 | apigateway.IntegrationResponse(
172 | status_code="500",
173 | selection_pattern="^\[Error\].*",
174 | response_templates={
175 | "application/json": "{\"state\":\"error\",\"message\":\"$util.escapeJavaScript($input.path('$.errorMessage'))\"}",
176 | }
177 | )
178 | ],
179 | passthrough_behavior=apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES,
180 | ),
181 | integration_http_method="GET",
182 | uri="http://{}/db-health/".format(private_lb.nlb.load_balancer_dns_name),
183 | )
184 |
185 | stress_resource.add_method(
186 | "GET",
187 | stress_integration,
188 | method_responses=[
189 | apigateway.MethodResponse(status_code="200",
190 | response_parameters={
191 | "method.response.header.Content-Type": True,
192 | "method.response.header.Content-Length": True,
193 | "method.response.header.Connection": True,
194 | "method.response.header.Server": True
195 | }),
196 | apigateway.MethodResponse(status_code="400"),
197 | apigateway.MethodResponse(status_code="500"),
198 | ],
199 | )
200 |
201 | ssm.StringParameter(self, "OrderApiDNSEndpoint",
202 | description="Order API DNS endpoint",
203 | parameter_name=TradeParameterName.TRADE_ORDER_API_ENDPOINT.value,
204 | string_value=restful_trades.url)
205 |
206 | ssm.StringParameter(self, "TradeStockAPIID",
207 | parameter_name="TradeStockAPIID",
208 | string_value=restful_trades.rest_api_id,
209 | )
210 |
--------------------------------------------------------------------------------
/src/trade-stock/trade_stock/trade_confirms_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | aws_ecs as ecs,
3 | aws_ec2 as ec2,
4 | aws_ecr_assets as ecr,
5 | aws_ssm as ssm,
6 | aws_iam as iam
7 | )
8 | import os
9 | import shutil
10 | import aws_cdk as cdk
11 | from aws_cdk import Stack
12 | from cdk_ecs_service_extensions import (
13 | Container,
14 | Environment,
15 | Service,
16 | ServiceDescription,
17 | EnvironmentCapacityType,
18 | AutoScalingOptions
19 | )
20 | from trade_utils.private_lb_extension import PrivateAlbExtension
21 | from trade_utils.trade_parameter_name import TradeParameterName
22 | from trade_utils.x_ray_extension import XRayExtension
23 | from constructs import Construct
24 |
25 |
26 | class TradeConfirmsStack(Stack):
27 |
28 | def __init__(self, scope: Construct, construct_id: str, vpc: ec2.Vpc, **kwargs) -> None:
29 | super().__init__(scope, construct_id, **kwargs)
30 |
31 | # you should be able to pull down the x-ray image from
32 | # dockerhub and push it to your repo, then enable X-Ray
33 |
34 | confirms_api_image = ecr.DockerImageAsset(self, 'confirms_api_image',
35 | directory=os.path.join(os.path.dirname('.'), 'confirms_api'))
36 | cluster = ecs.Cluster(self, "TradeConfirmsCluster", container_insights=True, vpc=vpc)
37 | service = ServiceDescription()
38 | container = Container(cpu=256, memory_mib=512, traffic_port=80,
39 | image=ecs.ContainerImage.from_docker_image_asset(asset=confirms_api_image)
40 | )
41 |
42 |
43 |
44 | service.add(container)
45 | self.private_lb = PrivateAlbExtension()
46 | service.add(self.private_lb)
47 | #service.add(XRayExtension()) # copy Xray image into ECR
48 |
49 | environment = Environment(self, "TradeConfirmsDev", vpc=vpc, cluster=cluster,
50 | capacity_type=EnvironmentCapacityType.FARGATE)
51 |
52 | task_role = iam.Role(self, "TradeConfirmsTaskRole",
53 | assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
54 | task_role.add_to_policy(iam.PolicyStatement(resources=["*"], actions=["ssm:GetParameter"]))
55 |
56 | self.service = Service(self, "ConfirmsApi", environment=environment,
57 | service_description=service,
58 | desired_count=len(vpc.availability_zones) * 2,
59 | auto_scale_task_count=AutoScalingOptions(
60 | min_task_count=len(vpc.availability_zones),
61 | max_task_count=len(vpc.availability_zones) * 5,
62 | target_cpu_utilization=70,
63 | target_memory_utilization=50),
64 | task_role=task_role)
65 |
66 | ssm.StringParameter(self, "ConfirmsApiDNSEndpoint",
67 | description="Confirms API DNS endpoint",
68 | parameter_name=TradeParameterName.TRADE_CONFIRMS_ENDPOINT.value,
69 | string_value=self.private_lb.alb.load_balancer_dns_name)
70 |
71 | ssm.StringParameter(self, "ConfirmsExchangeStatus",
72 | description="Indicates if the confirms exchange is available to accept trade requests",
73 | parameter_name=TradeParameterName.TRADE_CONFIRMS_EXCHANGE_STATUS.value,
74 | string_value="AVAILABLE")
75 |
76 | ssm.StringParameter(self, "ConfirmsGlitchFactor",
77 | description="Indicates how often the confirms api is glitching",
78 | parameter_name=TradeParameterName.TRADE_CONFIRMS_GLITCH_FACTOR.value,
79 | string_value="OFF")
80 |
81 | shutil.copy("trade_utils/trade_parameter_name.py", "confirms_api/trade_parameter_name.py")
82 |
83 | cdk.CfnOutput(self, "ConfirmsAPI.ALB.DNSEndpoint", value=self.private_lb.alb.load_balancer_dns_name)
84 |
--------------------------------------------------------------------------------
/src/trade-stock/trade_stock/trade_database.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | aws_ec2 as ec2,
3 | aws_rds as rds,
4 | aws_iam as iam,
5 | aws_ssm as ssm
6 | )
7 | from constructs import Construct
8 | import aws_cdk as cdk
9 | from cdk_ecs_service_extensions import Service
10 |
11 | from trade_utils.trade_parameter_name import TradeParameterName
12 |
13 |
14 | class TradeDatabaseStack(cdk.Stack):
15 |
16 | def __init__(self, scope: Construct, construct_id: str, vpc: ec2.Vpc, secondary_region: str, **kwargs) -> None:
17 | super().__init__(scope, construct_id, **kwargs)
18 |
19 | readers = []
20 | index = 1
21 | for az in vpc.availability_zones:
22 | reader = rds.ClusterInstance.serverless_v2("readerV2-{}".format(index), scale_with_writer=index == 1,
23 | enable_performance_insights=True)
24 |
25 | readers.append(reader)
26 | index += 1
27 |
28 | writer = rds.ClusterInstance.serverless_v2("writer", publicly_accessible=False,
29 | enable_performance_insights=True)
30 | database_name = 'trades'
31 | cluster_admin = "clusteradmin"
32 |
33 | parameter_group = rds.ParameterGroup(
34 | self,
35 | "ParameterGroup",
36 | engine=rds.DatabaseClusterEngine.aurora_postgres(
37 | version=rds.AuroraPostgresEngineVersion.VER_16_2
38 | ),
39 | parameters={
40 | "max_connections": "28"
41 | }
42 | )
43 |
44 | self.cluster = rds.DatabaseCluster(self, "TradeCluster",
45 | engine=rds.DatabaseClusterEngine.aurora_postgres(
46 | version=rds.AuroraPostgresEngineVersion.VER_16_2),
47 | credentials=rds.Credentials.from_generated_secret(cluster_admin),
48 | writer=writer,
49 | readers=readers,
50 | serverless_v2_min_capacity=0.5, serverless_v2_max_capacity=2,
51 | storage_type=rds.DBClusterStorageType.AURORA_IOPT1,
52 | storage_encrypted=False,
53 | vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
54 | vpc=vpc, default_database_name=database_name, cluster_identifier='stock',
55 | backup=rds.BackupProps(retention=cdk.Duration.days(15)),
56 | parameter_group=parameter_group,
57 | preferred_maintenance_window="Sun:23:45-Mon:00:15",
58 | cloudwatch_logs_exports=["postgresql"],
59 | cloudwatch_logs_retention=cdk.aws_logs.RetentionDays.TWO_WEEKS)
60 |
61 | global_cluster = rds.CfnGlobalCluster(
62 | self, "global-trade-cluster",
63 | deletion_protection=False,
64 | global_cluster_identifier='global-trade-cluster',
65 | source_db_cluster_identifier=self.cluster.cluster_identifier
66 | )
67 |
68 | order_api_user_name = "order_api_user"
69 | order_api_secret = rds.DatabaseSecret(self, "order_api_secret", username=order_api_user_name,
70 | secret_name="order_api_db_secret", master_secret=self.cluster.secret,
71 | exclude_characters="{}[]()'\"", dbname=database_name)
72 | order_api_secret.attach(self.cluster)
73 | order_api_secret.add_replica_region(secondary_region)
74 | self.cluster.add_rotation_single_user(automatically_after=cdk.Duration.days(1))
75 | self.cluster.add_rotation_multi_user(order_api_user_name, automatically_after=cdk.Duration.days(1),
76 | secret=order_api_secret)
77 |
78 | self.cluster.metric_serverless_database_capacity(period=cdk.Duration.minutes(10)).create_alarm(self, "capacity",
79 | threshold=1.5,
80 | evaluation_periods=3)
81 | self.cluster.metric_acu_utilization(period=cdk.Duration.minutes(10)).create_alarm(self, "alarm",
82 | evaluation_periods=3,
83 | threshold=90)
84 |
85 | self.cluster_credentials = self.cluster.secret
86 | admin_client = ec2.BastionHostLinux(self, "AdminClient", instance_name="AdminClient", vpc=vpc,
87 | require_imdsv2=True,
88 | subnet_selection=ec2.SubnetSelection(
89 | subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))
90 |
91 | proxy = self.cluster.add_proxy("proxy", borrow_timeout=cdk.Duration.seconds(30), max_connections_percent=95,
92 | secrets=[order_api_secret], vpc=vpc, db_proxy_name="TradeProxy")
93 |
94 | # allow admin actions
95 | admin_client.role.add_to_policy(iam.PolicyStatement(resources=["*"], actions=["secretsmanager:ListSecrets"]))
96 | admin_client.role.add_to_policy(
97 | iam.PolicyStatement(resources=["*"], actions=["elasticloadbalancing:DescribeLoadBalancers"]))
98 | admin_client.role.add_to_policy(
99 | iam.PolicyStatement(resources=["*"], actions=["ssm:GetParameter", "ssm:PutParameter"]))
100 | self.cluster_credentials.grant_read(admin_client.role)
101 | order_api_secret.grant_read(admin_client.role)
102 | proxy.grant_connect(admin_client.role, cluster_admin)
103 | proxy.grant_connect(admin_client.role, order_api_user_name)
104 | self.cluster.connections.allow_from(admin_client, ec2.Port.tcp(5432))
105 | proxy.connections.allow_from(admin_client, ec2.Port.tcp(5432))
106 |
107 | # allow DML from order api
108 | self.task_role = iam.Role(self, "TradingApiTaskRole",
109 | role_name=cdk.PhysicalName.GENERATE_IF_NEEDED,
110 | assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
111 | self.task_role.add_to_policy(iam.PolicyStatement(resources=["*"], actions=["secretsmanager:ListSecrets"]))
112 | self.task_role.add_to_policy(iam.PolicyStatement(resources=["*"], actions=["ssm:GetParameter"]))
113 | self.task_role.add_to_policy(iam.PolicyStatement(resources=["*"], actions=["cloudwatch:PutMetricData"]))
114 | order_api_secret.grant_read(self.task_role)
115 | proxy.grant_connect(self.task_role, order_api_user_name)
116 | self.cluster.connections.allow_from(ec2.Peer.ipv4(vpc.vpc_cidr_block), ec2.Port.tcp(5432))
117 | proxy.connections.allow_from(ec2.Peer.ipv4(vpc.vpc_cidr_block), ec2.Port.tcp(5432))
118 | # preferable to above, but creates a cyclic reference, except not really, so confusing
119 | # self.cluster.connections.allow_from(service.ecs_service, ec2.Port.tcp(5432))
120 |
121 | ro_proxy_sg = ec2.SecurityGroup(self, "TradeProxyReadOnlySG", vpc=vpc)
122 | ro_proxy_sg.add_ingress_rule(ec2.Peer.ipv4("10.0.0.0/16"), ec2.Port.tcp(5432),
123 | "access from any IP in VPC for ECS tasks")
124 | reader_endpoint = rds.CfnDBProxyEndpoint(self, 'TradeProxyReadOnlyEndpoint', db_proxy_name=proxy.db_proxy_name,
125 | target_role='READ_ONLY',
126 | db_proxy_endpoint_name='TradeProxyReadOnlyEndpoint',
127 | vpc_subnet_ids=vpc.select_subnets(
128 | subnet_type=ec2.SubnetType.PRIVATE_ISOLATED).subnet_ids,
129 | vpc_security_group_ids=[ro_proxy_sg.security_group_id]
130 | )
131 | reader_endpoint.node.add_dependency(proxy)
132 |
133 | ssm.StringParameter(self, "TradeDatabaseSecretId",
134 | description="Trade Database Secret Id",
135 | parameter_name=TradeParameterName.TRADE_DATABASE_SECRET_ID.value,
136 | string_value=self.cluster.secret.secret_name)
137 |
138 | ssm.StringParameter(self, "TradeOrderApiDbSecretId",
139 | description="Order API Database Secret Id",
140 | parameter_name=TradeParameterName.TRADE_ORDER_API_SECRET_ID.value,
141 | string_value=order_api_secret.secret_name)
142 |
143 | ssm.StringParameter(self, "TradeRdsProxyEndpoint",
144 | description="Trade Database Proxy Endpoint",
145 | parameter_name=TradeParameterName.TRADE_RDS_PROXY_ENDPOINT.value,
146 | string_value=proxy.endpoint)
147 |
148 | ssm.StringParameter(self, "TradeRdsProxyReadOnlyEndpoint",
149 | description="Trade Database Proxy Read Only Endpoint",
150 | parameter_name=TradeParameterName.TRADE_RDS_PROXY_READ_ONLY_ENDPOINT.value,
151 | string_value=reader_endpoint.attr_endpoint)
152 |
--------------------------------------------------------------------------------
/src/trade-stock/trade_stock/trade_database_secondary_stack.py:
--------------------------------------------------------------------------------
1 | from aws_cdk import (
2 | aws_ec2 as ec2,
3 | aws_rds as rds,
4 | aws_iam as iam,
5 | aws_ssm as ssm,
6 | aws_secretsmanager as secretsmanager
7 | )
8 | from constructs import Construct
9 | import aws_cdk as cdk
10 | from cdk_ecs_service_extensions import Service
11 |
12 | from trade_utils.trade_parameter_name import TradeParameterName
13 |
14 |
15 | class TradeDatabaseSecondaryStack(cdk.Stack):
16 | def __init__(self, scope: Construct, construct_id: str, vpc: ec2.Vpc, **kwargs) -> None:
17 | super().__init__(scope, construct_id, **kwargs)
18 |
19 | readers = []
20 | index = 1
21 | for az in vpc.availability_zones:
22 | reader = rds.ClusterInstance.serverless_v2("readerV2-{}".format(index), scale_with_writer=index == 1,
23 | enable_performance_insights=True)
24 | readers.append(reader)
25 | index += 1
26 |
27 | writer = rds.ClusterInstance.serverless_v2("writer", publicly_accessible=False,
28 | enable_performance_insights=True)
29 |
30 | self.cluster = rds.DatabaseCluster(self, "TradeCluster",
31 | engine=rds.DatabaseClusterEngine.aurora_postgres(
32 | version=rds.AuroraPostgresEngineVersion.VER_16_2),
33 | writer=writer,
34 | readers=readers,
35 | serverless_v2_min_capacity=0.5, serverless_v2_max_capacity=2,
36 | storage_type=rds.DBClusterStorageType.AURORA_IOPT1,
37 | storage_encrypted=False,
38 | vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
39 | vpc=vpc,
40 | cluster_identifier='stock',
41 | backup=rds.BackupProps(retention=cdk.Duration.days(15)),
42 | preferred_maintenance_window="Sun:23:45-Mon:00:15",
43 | cloudwatch_logs_exports=["postgresql"],
44 | cloudwatch_logs_retention=cdk.aws_logs.RetentionDays.TWO_WEEKS)
45 |
46 | cfn_cluster = self.cluster.node.default_child
47 | cfn_cluster.global_cluster_identifier = "global-trade-cluster"
48 | cfn_cluster.master_username = None
49 | cfn_cluster.master_user_password = None
50 |
51 | order_api_user_name = "order_api_user"
52 |
53 | self.task_role = iam.Role(self, "TradingApiTaskRole",
54 | role_name=cdk.PhysicalName.GENERATE_IF_NEEDED,
55 | assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
56 | self.task_role.add_to_policy(iam.PolicyStatement(resources=["*"], actions=["secretsmanager:ListSecrets"]))
57 | self.task_role.add_to_policy(iam.PolicyStatement(resources=["*"], actions=["ssm:GetParameter"]))
58 | self.task_role.add_to_policy(iam.PolicyStatement(resources=["*"], actions=["cloudwatch:PutMetricData"]))
59 |
60 | order_api_db_secret = secretsmanager.Secret.from_secret_name_v2(self, "order_api_db_secret",
61 | "order_api_db_secret")
62 | order_api_db_secret.grant_read(self.task_role)
63 |
64 | proxy = self.cluster.add_proxy("proxy", borrow_timeout=cdk.Duration.seconds(30), max_connections_percent=95,
65 | secrets=[order_api_db_secret], vpc=vpc, db_proxy_name="TradeProxy")
66 |
67 | proxy.grant_connect(self.task_role, order_api_user_name)
68 | self.cluster.connections.allow_from(ec2.Peer.ipv4(vpc.vpc_cidr_block), ec2.Port.tcp(5432))
69 | proxy.connections.allow_from(ec2.Peer.ipv4(vpc.vpc_cidr_block), ec2.Port.tcp(5432))
70 |
71 | reader_endpoint = rds.CfnDBProxyEndpoint(self, 'TradeProxyReadOnlyEndpoint', db_proxy_name=proxy.db_proxy_name,
72 | target_role='READ_ONLY',
73 | db_proxy_endpoint_name='TradeProxyReadOnlyEndpoint',
74 | vpc_subnet_ids=vpc.select_subnets(
75 | subnet_type=ec2.SubnetType.PRIVATE_ISOLATED).subnet_ids
76 | )
77 | reader_endpoint.node.add_dependency(proxy)
78 |
79 | ssm.StringParameter(self, "TradeOrderApiDbSecretId",
80 | description="Order API Database Secret Id",
81 | parameter_name=TradeParameterName.TRADE_ORDER_API_SECRET_ID.value,
82 | string_value=order_api_db_secret.secret_name)
83 |
84 | ssm.StringParameter(self, "TradeRdsProxyEndpoint",
85 | description="Trade Database Proxy Endpoint",
86 | parameter_name=TradeParameterName.TRADE_RDS_PROXY_ENDPOINT.value,
87 | string_value=proxy.endpoint)
88 |
89 | ssm.StringParameter(self, "TradeRdsProxyReadOnlyEndpoint",
90 | description="Trade Database Proxy Read Only Endpoint",
91 | parameter_name=TradeParameterName.TRADE_RDS_PROXY_READ_ONLY_ENDPOINT.value,
92 | string_value=reader_endpoint.attr_endpoint)
93 |
94 | ssm.StringParameter(self, "TradeRdsSecondaryClusterArn",
95 | description="Trade Database Secondary Cluster ARN",
96 | parameter_name=TradeParameterName.TRADE_RDS_SECONDARY_CLUSTER_ARN.value,
97 | string_value=self.cluster.cluster_arn)
98 |
--------------------------------------------------------------------------------
/src/trade-stock/trade_stock/vpc_stack.py:
--------------------------------------------------------------------------------
1 | import aws_cdk as cdk
2 | from aws_cdk import (
3 | aws_ec2 as ec2
4 | )
5 | from aws_cdk import Stack
6 | from constructs import Construct
7 |
8 | # ToDo: consider making this a dict, and adding a policy statement for each: ec2.PolicyStatement(), be secure
9 | interface_endpoints = ['ecr.dkr', 'ecr.api', 'xray', 'logs', 'ssm', 'ssmmessages', 'ec2messages', 'secretsmanager',
10 | 'elasticloadbalancing','monitoring'] # guardduty-data
11 | # TODO: watch out for this if you have gaurd duty turned on with your private VPC
12 | # 1. aws-guardduty-agent-fargate container was attempting to and failing to download.
13 | # I had to disable Guard Duty to successfully deploy.
14 | # CannotPullContainerError: pull image manifest has been retried 1 time(s):
15 | # failed to resolve ref 593207742271.dkr.ecr.us-east-1.amazonaws.com/aws-guardduty-agent-fargate:v1.0.1-Fg_x86_64:
16 | # pulling from host 593207742271.dkr.ecr.us-east-1.amazonaws.com failed
17 |
18 |
19 |
20 | class VpcStack(Stack):
21 |
22 | def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
23 | super().__init__(scope, construct_id, **kwargs)
24 |
25 | self.vpc: ec2.IVpc = ec2.Vpc(self, "VPC", max_azs=3, subnet_configuration=[
26 | ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED, name="Service", cidr_mask=24),
27 | ])
28 | self.vpc.add_flow_log("TradeStockVpcFlow")
29 | vpc_peer = ec2.Peer.ipv4(self.vpc.vpc_cidr_block)
30 | endpoint_sg = ec2.SecurityGroup(self, "endpoint_sg", security_group_name="InterfaceEndpointsSG",
31 | description="allow access VPC Endpoints", vpc=self.vpc,
32 | allow_all_outbound=True)
33 | cdk.Tags.of(endpoint_sg).add("Name", "InterfaceEndpoints")
34 | endpoint_sg.add_ingress_rule(vpc_peer, ec2.Port.tcp(443))
35 | endpoint_sg.add_ingress_rule(vpc_peer, ec2.Port.tcp(80))
36 |
37 | # ToDo: endpoint policies should be in place before you go to production.
38 |
39 | for endpoint in interface_endpoints:
40 | self.vpc.add_interface_endpoint(
41 | endpoint,
42 | service=ec2.InterfaceVpcEndpointAwsService(endpoint, port=443),
43 | private_dns_enabled=True,
44 | security_groups=[endpoint_sg])
45 | # interface_endpoint.add_to_policy(iam.PolicyStatement())
46 | # support here https://docs.aws.amazon.com/vpc/latest/privatelink/aws-services-privatelink-support.html
47 |
48 | self.vpc.add_gateway_endpoint("S3Endpoint", service=ec2.GatewayVpcEndpointAwsService.S3)
49 | # ToDo: gateway_endpoint.add_to_policy(iam.PolicyStatement()) -- allow for ECR and Amazon Linux policies
50 |
--------------------------------------------------------------------------------
/src/trade-stock/trade_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/engineering-resilient-systems-on-aws/AvailableTrade/d67282410fb02c938a6b1d628cd018ff89b2cc48/src/trade-stock/trade_utils/__init__.py
--------------------------------------------------------------------------------
/src/trade-stock/trade_utils/private_lb_extension.py:
--------------------------------------------------------------------------------
1 | import builtins
2 | import typing
3 | import aws_cdk as cdk
4 | from aws_cdk import (
5 | Duration,
6 | aws_elasticloadbalancingv2 as elb,
7 | aws_ecs as ecs,
8 | aws_ec2 as ec2,
9 | aws_elasticloadbalancingv2_targets as targets,
10 | )
11 | from cdk_ecs_service_extensions import (
12 | HttpLoadBalancerProps,
13 | Service,
14 | ServiceExtension,
15 | ServiceBuild
16 | )
17 | from constructs import Construct
18 |
19 |
20 | class PrivateAlbExtension(ServiceExtension):
21 | def __init__(self, requests_per_target=None):
22 | super().__init__("load-balancer")
23 | self.alb_listener = None
24 | self.alb = None
25 | self.nlb = None
26 | self.nlb_listener = None
27 | self.requests_per_target = requests_per_target
28 | self.props = HttpLoadBalancerProps(requests_per_target=self.requests_per_target)
29 |
30 | def prehook(self, parent: Service, scope: Construct) -> None:
31 | self._parent_service = parent
32 | self._scope = scope
33 | self.alb = elb.ApplicationLoadBalancer(
34 | scope,
35 | "{}-private-alb".format(self._parent_service.id),
36 | vpc=self._parent_service.vpc,
37 | internet_facing=False,
38 | vpc_subnets=ec2.SubnetSelection(
39 | subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
40 | ),
41 | cross_zone_enabled=True
42 | )
43 |
44 | self.alb_listener = self.alb.add_listener(
45 | "{}-alb-listener".format(self._parent_service.id), port=80, open=True
46 | )
47 |
48 | self.nlb = elb.NetworkLoadBalancer(
49 | scope,
50 | "{}-private-nlb".format(self._parent_service.id),
51 | vpc=self._parent_service.vpc,
52 | internet_facing=False,
53 | cross_zone_enabled=True,
54 | vpc_subnets=ec2.SubnetSelection(
55 | subnet_type=ec2.SubnetType.PRIVATE_ISOLATED # TODO - for sidecar docker images
56 | ),
57 | )
58 |
59 | target_group = elb.NetworkTargetGroup(
60 | scope,
61 | "{}-alb-target_group".format(self._parent_service.id),
62 | port=80,
63 | target_type=elb.TargetType.ALB,
64 | protocol=elb.Protocol.TCP,
65 | health_check=elb.HealthCheck(enabled=True),
66 | vpc=self._parent_service.vpc,
67 | targets=[targets.AlbTarget(self.alb, 80)],
68 | )
69 |
70 | self.nlb_listener = self.nlb.add_listener(
71 | "{}-nlb-listener".format(self._parent_service.id),
72 | port=80,
73 | default_action=elb.NetworkListenerAction.forward([target_group]),
74 | )
75 |
76 | cdk.CfnOutput(
77 | scope,
78 | "{}-nlb-dns-output".format(self._parent_service.id),
79 | value=self.nlb.load_balancer_dns_name,
80 | )
81 |
82 | def use_service(
83 | self, service: typing.Union[ecs.Ec2Service, ecs.FargateService]
84 | ) -> None:
85 |
86 | target_group = self.alb_listener.add_targets(
87 | self._parent_service.id,
88 | deregistration_delay=Duration.seconds(10),
89 | port=80,
90 | targets=[service]
91 | # health_check=elb.HealthCheck(path='/health/') # customize health check here if desired
92 | )
93 | target_group.set_attribute("load_balancing.cross_zone.enabled", "true")
94 |
95 | if self.requests_per_target:
96 | if not self._parent_service.scalable_task_count:
97 | raise Exception(
98 | "Auto scaling target for the service {} hasn't been configured. Please use Service construct to configure 'minTaskCount' and 'maxTaskCount'.".format(
99 | self._parent_service.id
100 | )
101 | )
102 | self._parent_service.scalable_task_count.scale_on_request_count(
103 | "{}-target-request-count-{}".format(
104 | self._parent_service.id, self.requests_per_target
105 | ),
106 | requests_per_target=self.requests_per_target,
107 | target_group=target_group,
108 | )
109 | self._parent_service.enable_auto_scaling_policy()
110 | self._parent_service.ecs_service.enable_deployment_alarms()
111 |
112 | def modify_service_props(
113 | self,
114 | *,
115 | cluster: ecs.ICluster,
116 | task_definition: ecs.TaskDefinition,
117 | assign_public_ip: typing.Optional[builtins.bool] = None,
118 | cloud_map_options: typing.Optional[ecs.CloudMapOptions] = None,
119 | desired_count: int = None,
120 | health_check_grace_period: typing.Optional[Duration] = None,
121 | max_healthy_percent: int = None,
122 | min_healthy_percent: int = None
123 | ) -> ServiceBuild:
124 | build = ServiceBuild(
125 | cluster=cluster,
126 | task_definition=task_definition,
127 | assign_public_ip=False, # override
128 | cloud_map_options=cloud_map_options,
129 | desired_count=desired_count,
130 | health_check_grace_period=Duration.minutes(1),
131 | max_healthy_percent=max_healthy_percent,
132 | min_healthy_percent=min_healthy_percent,
133 | )
134 | return build
135 |
--------------------------------------------------------------------------------
/src/trade-stock/trade_utils/trade_parameter_name.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class TradeParameterName(Enum):
5 | TRADE_CONFIRMS_ENDPOINT = 'trade_confirms_endpoint'
6 | TRADE_CONFIRMS_EXCHANGE_STATUS = 'trade_confirms_exchange_status'
7 | TRADE_CONFIRMS_GLITCH_FACTOR = 'trade_confirms_glitch_factor'
8 | TRADE_ORDER_ENDPOINT = 'trade_order_endpoint'
9 | TRADE_ORDER_API_ENDPOINT = 'trade_order_global_endpoint'
10 | TRADE_RDS_PROXY_ENDPOINT = 'trade_rds_proxy_endpoint'
11 | TRADE_RDS_PROXY_READ_ONLY_ENDPOINT = 'trade_rds_proxy_read_only_endpoint'
12 | TRADE_DATABASE_SECRET_ID = 'trade_db_secret_id'
13 | TRADE_ORDER_API_SECRET_ID = 'trade_order_api_secret_id'
14 | TRADE_RDS_SECONDARY_CLUSTER_ARN = 'trade_rds_secondary_cluster_arn'
15 |
--------------------------------------------------------------------------------
/src/trade-stock/trade_utils/x_ray_extension.py:
--------------------------------------------------------------------------------
1 | import aws_cdk as cdk
2 | from constructs import Construct
3 | from aws_cdk import (
4 | aws_ecs as ecs,
5 | aws_iam as iam,
6 | Stack
7 | )
8 | from cdk_ecs_service_extensions import (
9 | Service,
10 | ServiceExtension,
11 | )
12 |
13 |
14 | class XRayExtension(ServiceExtension):
15 | def __init__(self, image_id: str, requests_per_target=None):
16 | super().__init__("xray-sidecar")
17 | self.requests_per_target = requests_per_target
18 | self.image_id = image_id
19 |
20 | def prehook(self, service: Service, scope: Construct) -> None:
21 | self._parent_service = service
22 | self._scope = scope
23 |
24 | # docker pull amazon/aws-xray-daemon
25 | # aws ecr get-login-password --region $AWS_PRIMARY_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_PRIMARY_REGION.amazonaws.com
26 | # aws ecr create-repository --repository-name amazon/aws-xray-daemon
27 | # docker tag amazon/aws-xray-daemon $AWS_ACCOUNT_ID.dkr.ecr.$AWS_PRIMARY_REGION.amazonaws.com/amazon/aws-xray-daemon:latest
28 | # docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_PRIMARY_REGION.amazonaws.com/amazon/aws-xray-daemon:latest
29 |
30 | def use_task_definition(self, task_definition: ecs.TaskDefinition) -> None:
31 | self.container = task_definition.add_container('xray',
32 | image=ecs.ContainerImage.from_registry(self.image_id),
33 | essential=True,
34 | memory_reservation_mib=256,
35 | environment={
36 | 'AWS_REGION': Stack.of(self._parent_service).region},
37 | health_check=ecs.HealthCheck(
38 | command=[
39 | 'CMD-SHELL',
40 | 'curl -s http://localhost:2000'
41 | ],
42 | start_period=cdk.Duration.seconds(10),
43 | interval=cdk.Duration.seconds(5),
44 | timeout=cdk.Duration.seconds(2),
45 | retries=3
46 | ),
47 | logging=ecs.AwsLogDriver(stream_prefix='xray'),
48 | user='1337')
49 | task_definition.task_role.add_managed_policy(
50 | iam.ManagedPolicy.from_aws_managed_policy_name('AWSXRayDaemonWriteAccess'))
51 | # need to add permissions for the ECR for xray image right here. delete stack and do a clean try.
52 | # image is trying to start like mac silicon, need to build it for linux, like with the other one.
53 | # what a pain, maybe test it from cloudshell.
54 | # jesus christ - exec /xray: exec format error
55 |
56 | def resolve_container_dependencies(self) -> None:
57 | if not self.container:
58 | raise Exception('The container dependency hook was called before the container was created')
59 |
--------------------------------------------------------------------------------