├── .editorconfig ├── .github └── workflows │ ├── maintenance.yaml │ └── tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── CHANGELOG.rst ├── CODE_OF_CONDUCT.rst ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── RELEASE.rst ├── docs ├── Makefile ├── _static │ └── images │ │ ├── troposphere_favicon.png │ │ ├── troposphere_logo.png │ │ └── troposphere_logo_small.png ├── apis │ ├── examples.rst │ ├── examples_toc.rst │ ├── tests.rst │ ├── tests_toc.rst │ ├── troposphere.helpers.rst │ ├── troposphere.openstack.rst │ ├── troposphere.rst │ └── troposphere_toc.rst ├── changelog.rst ├── code_of_conduct.rst ├── conf.py ├── contributing.md ├── index.rst ├── license.rst ├── quick_start.rst └── requirements.txt ├── examples ├── ApiGateway.py ├── ApplicationAutoScalingSample.py ├── ApplicationELB.py ├── ApplicationLB_FixedActions.py ├── AuroraServerlessRDS_SecretsManager.py ├── Autoscaling.py ├── AutoscalingHTTPRequests.py ├── Backup.py ├── Batch.py ├── BatchEventSnsLambda.py ├── CertificateManagerSample.py ├── ClassExtensions.py ├── CloudFormation_Init_ConfigSet.py ├── CloudFront_Distribution_S3.py ├── CloudFront_StreamingDistribution_S3.py ├── CloudTrail.py ├── CloudWatchEventsSample.py ├── CodeBuild.py ├── CodeDeploy.py ├── CodePipeline.py ├── CustomResource.py ├── Dlm.py ├── DynamoDB_Table.py ├── DynamoDB_Table_With_GSI_And_NonKeyAttributes_Projection.py ├── DynamoDB_Table_With_GlobalSecondaryIndex.py ├── DynamoDB_Table_With_KinesisStreamSpecification.py ├── DynamoDB_Tables_OnDemand.py ├── EC2Conditions.py ├── EC2InstanceSample.py ├── EC2_Remove_Ephemeral_Drive.py ├── ECRSample.py ├── ECSCluster.py ├── ECSFargate.py ├── EFS.py ├── ELBSample.py ├── EMR_Cluster.py ├── ElastiCacheRedis.py ├── ElasticBeanstalk_Nodejs_Sample.py ├── ElasticsearchDomain.py ├── Firehose_with_Redshift.py ├── IAM_Policies_SNS_Publish_To_SQS.py ├── IAM_Roles_and_InstanceProfiles.py ├── IAM_Users_Groups_and_Policies.py ├── IAM_Users_snippet.py ├── IoTAnalytics.py ├── IoTSample.py ├── Kinesis_Stream.py ├── Lambda.py ├── Mediapackage.py ├── Metadata.py ├── MskCluster.py ├── NatGateway.py ├── NetworkLB.py ├── OpenStack_AutoScaling.py ├── OpenStack_Server.py ├── OpsWorksSnippet.py ├── RDS_Snapshot_On_Delete.py ├── RDS_VPC.py ├── RDS_with_DBParameterGroup.py ├── Redshift.py ├── RedshiftClusterInVpc.py ├── RedshiftServerless.py ├── Route53_A.py ├── Route53_CNAME.py ├── Route53_RoundRobin.py ├── S3_Bucket.py ├── S3_Bucket_With_AccelerateConfiguration.py ├── S3_Bucket_With_Versioning_And_Lifecycle_Rules.py ├── S3_Website_Bucket_With_Retain_On_Delete.py ├── SQSDLQ.py ├── SQSEncrypt.py ├── SQS_With_CloudWatch_Alarms.py ├── SSMExample.py ├── Secretsmanager.py ├── Secretsmanager_Rds.py ├── Serverless_Api_Backend.py ├── Serverless_Deployment_Preference.py ├── Serverless_S3_Processor.py ├── VPC_EC2_Instance_With_Multiple_Dynamic_IPAddresses.py ├── VPC_With_VPN_Connection.py ├── VPC_single_instance_in_subnet.py ├── VpnEndpoint.py ├── WAF_Common_Attacks_Sample.py ├── WAF_Regional_Common_Attacks_Sample.py └── WaitObject.py ├── package-lock.json ├── package.json ├── pyproject.toml ├── requirements-dev.txt ├── requirements.txt ├── resources_aws.md ├── resources_openstack.md ├── scripts ├── cfn ├── cfn2py ├── gen.py ├── newgen.py ├── patches │ ├── __init__.py │ ├── amazonmq.py │ ├── apigateway.py │ ├── appflow.py │ ├── appsync.py │ ├── autoscaling.py │ ├── awslambda.py │ ├── batch.py │ ├── bedrock.py │ ├── budgets.py │ ├── cloudfront.py │ ├── cloudwatch.py │ ├── codebuild.py │ ├── codecommit.py │ ├── codedeploy.py │ ├── codepipeline.py │ ├── cognito.py │ ├── config.py │ ├── connect.py │ ├── databrew.py │ ├── datapipeline.py │ ├── datasync.py │ ├── dynamodb.py │ ├── ec2.py │ ├── ecr.py │ ├── ecs.py │ ├── efs.py │ ├── elasticbeanstalk.py │ ├── elasticloadbalancing.py │ ├── elasticloadbalancingv2.py │ ├── emr.py │ ├── emrserverless.py │ ├── events.py │ ├── firehose.py │ ├── fms.py │ ├── fsx.py │ ├── gamelift.py │ ├── glue.py │ ├── greengrassv2.py │ ├── groundstation.py │ ├── guardduty.py │ ├── iam.py │ ├── imagebuilder.py │ ├── iotanalytics.py │ ├── iotsitewise.py │ ├── iotwireless.py │ ├── lakeformation.py │ ├── lex.py │ ├── lightsail.py │ ├── macie.py │ ├── mediaconnect.py │ ├── mediapackage.py │ ├── memorydb.py │ ├── msk.py │ ├── networkfirewall.py │ ├── networkmanager.py │ ├── opsworks.py │ ├── pinpoint.py │ ├── qbusiness.py │ ├── quicksight.py │ ├── rds.py │ ├── redshift.py │ ├── redshiftserverless.py │ ├── route53.py │ ├── s3.py │ ├── s3outposts.py │ ├── sagemaker.py │ ├── ses.py │ ├── sns.py │ ├── sqs.py │ ├── ssm.py │ ├── stepfunctions.py │ ├── transfer.py │ ├── waf.py │ ├── wafregional.py │ ├── wafv2.py │ └── workspaces.py └── regen ├── setup.cfg ├── setup.py ├── tests ├── __init__.py ├── examples_output │ ├── ApiGateway.template │ ├── ApplicationAutoScalingSample.template │ ├── ApplicationELB.template │ ├── ApplicationLB_FixedActions.template │ ├── AuroraServerlessRDS_SecretsManager.template │ ├── Autoscaling.template │ ├── AutoscalingHTTPRequests.template │ ├── Backup.template │ ├── Batch.template │ ├── BatchEventSnsLambda.template │ ├── CertificateManagerSample.template │ ├── ClassExtensions.template │ ├── CloudFormation_Init_ConfigSet.template │ ├── CloudFront_Distribution_S3.template │ ├── CloudFront_StreamingDistribution_S3.template │ ├── CloudTrail.template │ ├── CloudWatchEventsSample.template │ ├── CodeBuild.template │ ├── CodeDeploy.template │ ├── CodePipeline.template │ ├── CustomResource.template │ ├── Dlm.template │ ├── DynamoDB2_Table.template │ ├── DynamoDB2_Table_With_GSI_And_NonKeyAttributes_Projection.template │ ├── DynamoDB2_Table_With_GlobalSecondaryIndex.template │ ├── DynamoDB_Table.template │ ├── DynamoDB_Table_With_GSI_And_NonKeyAttributes_Projection.template │ ├── DynamoDB_Table_With_GlobalSecondaryIndex.template │ ├── DynamoDB_Table_With_KinesisStreamSpecification.template │ ├── DynamoDB_Tables_OnDemand.template │ ├── EC2Conditions.template │ ├── EC2InstanceSample.template │ ├── EC2_Remove_Ephemeral_Drive.template │ ├── ECRSample.template │ ├── ECSCluster.template │ ├── ECSFargate.template │ ├── EFS.template │ ├── ELBSample.template │ ├── EMR_Cluster.template │ ├── ElastiCacheRedis.template │ ├── ElasticBeanstalk_Nodejs_Sample.template │ ├── ElasticsearchDomain.template │ ├── Firehose_with_Redshift.template │ ├── IAM_Policies_SNS_Publish_To_SQS.template │ ├── IAM_Roles_and_InstanceProfiles.template │ ├── IAM_Users_Groups_and_Policies.template │ ├── IAM_Users_snippet.template │ ├── IoTAnalytics.template │ ├── IoTSample.template │ ├── Kinesis_Stream.template │ ├── Lambda.template │ ├── Mediapackage.template │ ├── Metadata.template │ ├── MskCluster.template │ ├── NatGateway.template │ ├── NetworkLB.template │ ├── OpenStack_AutoScaling.template │ ├── OpenStack_Server.template │ ├── OpsWorksSnippet.template │ ├── RDS_Snapshot_On_Delete.template │ ├── RDS_VPC.template │ ├── RDS_with_DBParameterGroup.template │ ├── Redshift.template │ ├── RedshiftClusterInVpc.template │ ├── RedshiftServerless.template │ ├── Route53_A.template │ ├── Route53_CNAME.template │ ├── Route53_RoundRobin.template │ ├── S3_Bucket.template │ ├── S3_Bucket_With_AccelerateConfiguration.template │ ├── S3_Bucket_With_Versioning_And_Lifecycle_Rules.template │ ├── S3_Website_Bucket_With_Retain_On_Delete.template │ ├── SQSDLQ.template │ ├── SQSEncrypt.template │ ├── SQS_With_CloudWatch_Alarms.template │ ├── SSMExample.template │ ├── Secretsmanager.template │ ├── Secretsmanager_Rds.template │ ├── Serverless_Api_Backend.template │ ├── Serverless_Deployment_Preference.template │ ├── Serverless_S3_Processor.template │ ├── VPC_EC2_Instance_With_Multiple_Dynamic_IPAddresses.template │ ├── VPC_With_VPN_Connection.template │ ├── VPC_single_instance_in_subnet.template │ ├── VpnEndpoint.template │ ├── WAF_Common_Attacks_Sample.template │ ├── WAF_Regional_Common_Attacks_Sample.template │ ├── WaitObject.template │ └── __init__.template ├── test_apigateway.py ├── test_apigatewayv2.py ├── test_appconfig.py ├── test_appsync.py ├── test_asg.py ├── test_awslambda.py ├── test_basic.py ├── test_cloudformation.py ├── test_cloudfront.py ├── test_cloudwatch.py ├── test_codebuild.py ├── test_codecommit.py ├── test_config.py ├── test_dict.py ├── test_dlm.py ├── test_ec2.py ├── test_ecr.py ├── test_ecs.py ├── test_efs.py ├── test_eks.py ├── test_elasticloadbalancerv2.py ├── test_emr.py ├── test_examples.py ├── test_examples_template_generator.py ├── test_findinmap.py ├── test_fsx.py ├── test_guardduty.py ├── test_int_type.py ├── test_iottwinmaker.py ├── test_language_extensions.py ├── test_logs.py ├── test_mappings.py ├── test_networkfirewall.py ├── test_opensearchservice.py ├── test_opsworks.py ├── test_parameters.py ├── test_policies.py ├── test_rds.py ├── test_resiliencehub.py ├── test_route53.py ├── test_s3.py ├── test_scheduler.py ├── test_serverless.py ├── test_sqs.py ├── test_ssm.py ├── test_stepfunctions.py ├── test_tags.py ├── test_template.py ├── test_template_generator.py ├── test_userdata.py ├── test_validators.py ├── test_wafv2.py ├── test_yaml.py └── userdata_test_scripts │ ├── char_escaping.sh │ ├── empty.sh │ ├── one_line.sh │ └── simple.sh ├── tox.ini └── troposphere ├── __init__.py ├── accessanalyzer.py ├── acmpca.py ├── amazonmq.py ├── amplify.py ├── analytics.py ├── apigateway.py ├── apigatewayv2.py ├── appconfig.py ├── appflow.py ├── appintegrations.py ├── applicationautoscaling.py ├── applicationinsights.py ├── applicationsignals.py ├── appmesh.py ├── apprunner.py ├── appstream.py ├── appsync.py ├── apptest.py ├── aps.py ├── arczonalshift.py ├── ask.py ├── athena.py ├── auditmanager.py ├── autoscaling.py ├── autoscalingplans.py ├── awslambda.py ├── b2bi.py ├── backup.py ├── backupgateway.py ├── batch.py ├── bcmdataexports.py ├── bedrock.py ├── billingconductor.py ├── budgets.py ├── cassandra.py ├── ce.py ├── certificatemanager.py ├── chatbot.py ├── cleanrooms.py ├── cleanroomsml.py ├── cloud9.py ├── cloudformation.py ├── cloudfront.py ├── cloudtrail.py ├── cloudwatch.py ├── codeartifact.py ├── codebuild.py ├── codecommit.py ├── codeconnections.py ├── codedeploy.py ├── codeguruprofiler.py ├── codegurureviewer.py ├── codepipeline.py ├── codestar.py ├── codestarconnections.py ├── codestarnotifications.py ├── cognito.py ├── compat.py ├── comprehend.py ├── config.py ├── connect.py ├── connectcampaigns.py ├── connectcampaignsv2.py ├── constants.py ├── controltower.py ├── cur.py ├── customerprofiles.py ├── databrew.py ├── datapipeline.py ├── datasync.py ├── datazone.py ├── dax.py ├── deadline.py ├── detective.py ├── devopsguru.py ├── directoryservice.py ├── dlm.py ├── dms.py ├── docdb.py ├── docdbelastic.py ├── dynamodb.py ├── ec2.py ├── ecr.py ├── ecs.py ├── efs.py ├── eks.py ├── elasticache.py ├── elasticbeanstalk.py ├── elasticloadbalancing.py ├── elasticloadbalancingv2.py ├── elasticsearch.py ├── emr.py ├── emrcontainers.py ├── emrserverless.py ├── entityresolution.py ├── events.py ├── eventschemas.py ├── evidently.py ├── finspace.py ├── firehose.py ├── fis.py ├── fms.py ├── forecast.py ├── frauddetector.py ├── fsx.py ├── gamelift.py ├── globalaccelerator.py ├── glue.py ├── grafana.py ├── greengrass.py ├── greengrassv2.py ├── groundstation.py ├── guardduty.py ├── healthimaging.py ├── healthlake.py ├── helpers ├── __init__.py └── userdata.py ├── iam.py ├── identitystore.py ├── imagebuilder.py ├── inspector.py ├── inspectorv2.py ├── internetmonitor.py ├── invoicing.py ├── iot.py ├── iotanalytics.py ├── iotcoredeviceadvisor.py ├── iotevents.py ├── iotfleethub.py ├── iotfleetwise.py ├── iotsitewise.py ├── iotthingsgraph.py ├── iottwinmaker.py ├── iotwireless.py ├── ivs.py ├── ivschat.py ├── kafkaconnect.py ├── kendra.py ├── kendraranking.py ├── kinesis.py ├── kinesisanalyticsv2.py ├── kinesisvideo.py ├── kms.py ├── lakeformation.py ├── launchwizard.py ├── lex.py ├── licensemanager.py ├── lightsail.py ├── location.py ├── logs.py ├── lookoutequipment.py ├── lookoutmetrics.py ├── lookoutvision.py ├── m2.py ├── macie.py ├── managedblockchain.py ├── mediaconnect.py ├── mediaconvert.py ├── medialive.py ├── mediapackage.py ├── mediapackagev2.py ├── mediastore.py ├── mediatailor.py ├── memorydb.py ├── msk.py ├── mwaa.py ├── neptune.py ├── neptunegraph.py ├── networkfirewall.py ├── networkmanager.py ├── oam.py ├── omics.py ├── opensearchserverless.py ├── opensearchservice.py ├── openstack ├── __init__.py ├── heat.py ├── neutron.py └── nova.py ├── opsworks.py ├── opsworkscm.py ├── organizations.py ├── osis.py ├── panorama.py ├── paymentcryptography.py ├── pcaconnectorad.py ├── pcaconnectorscep.py ├── pcs.py ├── personalize.py ├── pinpoint.py ├── pinpointemail.py ├── pipes.py ├── policies.py ├── proton.py ├── qbusiness.py ├── qldb.py ├── quicksight.py ├── ram.py ├── rbin.py ├── rds.py ├── redshift.py ├── redshiftserverless.py ├── refactorspaces.py ├── rekognition.py ├── resiliencehub.py ├── resourceexplorer2.py ├── resourcegroups.py ├── robomaker.py ├── rolesanywhere.py ├── route53.py ├── route53profiles.py ├── route53recoverycontrol.py ├── route53recoveryreadiness.py ├── route53resolver.py ├── rum.py ├── s3.py ├── s3express.py ├── s3objectlambda.py ├── s3outposts.py ├── s3tables.py ├── sagemaker.py ├── scheduler.py ├── sdb.py ├── secretsmanager.py ├── securityhub.py ├── securitylake.py ├── serverless.py ├── servicecatalog.py ├── servicecatalogappregistry.py ├── servicediscovery.py ├── ses.py ├── shield.py ├── signer.py ├── simspaceweaver.py ├── sns.py ├── sqs.py ├── ssm.py ├── ssmcontacts.py ├── ssmincidents.py ├── ssmquicksetup.py ├── sso.py ├── stepfunctions.py ├── supportapp.py ├── synthetics.py ├── systemsmanagersap.py ├── template_generator.py ├── timestream.py ├── transfer.py ├── type_defs ├── __init__.py ├── compat.py └── protocols.py ├── utils.py ├── validators ├── __init__.py ├── acmpca.py ├── amazonmq.py ├── apigateway.py ├── apigatewayv2.py ├── appconfig.py ├── appmesh.py ├── appstream.py ├── appsync.py ├── athena.py ├── autoscaling.py ├── autoscalingplans.py ├── awslambda.py ├── backup.py ├── batch.py ├── cassandra.py ├── certificatemanager.py ├── chatbot.py ├── cloudformation.py ├── cloudfront.py ├── cloudwatch.py ├── codeartifact.py ├── codebuild.py ├── codecommit.py ├── codedeploy.py ├── codestarconnections.py ├── cognito.py ├── config.py ├── dlm.py ├── dms.py ├── dynamodb.py ├── ec2.py ├── ecr.py ├── ecs.py ├── efs.py ├── eks.py ├── elasticache.py ├── elasticbeanstalk.py ├── elasticloadbalancing.py ├── elasticloadbalancingv2.py ├── elasticsearch.py ├── emr.py ├── firehose.py ├── fms.py ├── fsx.py ├── globalaccelerator.py ├── glue.py ├── groundstation.py ├── iam.py ├── imagebuilder.py ├── iot.py ├── iottwinmaker.py ├── kinesis.py ├── kinesisanalyticsv2.py ├── kms.py ├── lex.py ├── logs.py ├── macie.py ├── mediastore.py ├── networkfirewall.py ├── opensearchservice.py ├── opsworks.py ├── opsworkscm.py ├── organizations.py ├── rds.py ├── rekognition.py ├── resiliencehub.py ├── resourcegroups.py ├── route53.py ├── route53resolver.py ├── s3.py ├── scheduler.py ├── secretsmanager.py ├── servicecatalog.py ├── sns.py ├── sqs.py ├── ssm.py ├── synthetics.py ├── transfer.py ├── waf.py ├── wafregional.py └── wafv2.py ├── verifiedpermissions.py ├── voiceid.py ├── vpclattice.py ├── waf.py ├── wafregional.py ├── wafv2.py ├── wisdom.py ├── workspaces.py ├── workspacesthinclient.py ├── workspacesweb.py └── xray.py /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | charset = utf-8 7 | indent_style = space 8 | indent_size = 4 9 | 10 | [*.py] 11 | max_line_length = 79 12 | 13 | [*.{toml,yaml,yml}] 14 | indent_size = 2 15 | 16 | [Makefile] 17 | indent_style = tab 18 | 19 | [{package,package-lock}.json] 20 | indent_size = 2 21 | -------------------------------------------------------------------------------- /.github/workflows/maintenance.yaml: -------------------------------------------------------------------------------- 1 | name: Automated Maintenance 2 | on: 3 | schedule: 4 | - cron: '0 13 * * *' 5 | workflow_dispatch: # Enables on-demand/manual triggering 6 | jobs: 7 | job: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - uses: actions/setup-python@v5 12 | with: 13 | python-version: "3.10" 14 | - run: | 15 | npm install 16 | npx pyright --version 17 | python -m pip install -r requirements-dev.txt 18 | make spec 19 | make regen 20 | make test 21 | make lint 22 | - name: Set spec version variable 23 | id: vars 24 | run: | 25 | echo "pr_title=Updates from spec version $(jq -r .ResourceSpecificationVersion CloudFormationResourceSpecification.json)" >> $GITHUB_OUTPUT 26 | - uses: peter-evans/create-pull-request@v6 27 | with: 28 | commit-message: | 29 | ${{ steps.vars.outputs.pr_title }} 30 | title: ${{ steps.vars.outputs.pr_title }} 31 | delete-branch: true 32 | body: | 33 | Autogenerated by maintenance action 34 | 35 | If tests are stuck on https://github.com/peter-evans/create-pull-request/issues/48: 36 | ["Manually close pull requests and immediately reopen them. This will enable `on: pull_request` workflows to run and be added as checks."](https://github.com/peter-evans/create-pull-request/blob/master/docs/concepts-guidelines.md#triggering-further-workflow-runs) 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # troposphere stuff 4 | autogen 5 | CloudFormationResourceSpecification* 6 | spec 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Packages 12 | *.egg 13 | *.eggs 14 | *.egg-info 15 | dist 16 | build 17 | eggs 18 | parts 19 | bin 20 | var 21 | sdist 22 | develop-eggs 23 | .installed.cfg 24 | lib 25 | lib64 26 | 27 | # Installer logs 28 | pip-log.txt 29 | 30 | # Unit test / coverage reports 31 | .coverage 32 | .tox 33 | nosetests.xml 34 | 35 | # Translations 36 | *.mo 37 | 38 | # Mr Developer 39 | .mr.developer.cfg 40 | .project 41 | .pydevproject 42 | .idea 43 | .venv 44 | 45 | # Vim 46 | *.sw* 47 | 48 | # Jetbrains 49 | *.iml 50 | 51 | # Git 52 | *.diff 53 | *.patch 54 | 55 | # Sphinx 56 | docs/_build/ 57 | 58 | # VSCode 59 | .vscode 60 | 61 | .ipynb_checkpoints 62 | */.ipynb_checkpoints/* 63 | *.ipynb 64 | 65 | .DS_Store 66 | node_modules 67 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: '24.4.2' 4 | hooks: 5 | - id: black 6 | 7 | - repo: https://github.com/timothycrosley/isort 8 | rev: '5.13.2' 9 | hooks: 10 | - id: isort 11 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.11" 7 | 8 | # Build from the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: docs/conf.py 11 | 12 | # Specify the dependencies 13 | python: 14 | install: 15 | - requirements: docs/requirements.txt 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012-2017, Mark Peek 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 14 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 17 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 19 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 20 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 21 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 22 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 23 | POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.rst 3 | include .gitignore 4 | include requirements.txt 5 | recursive-include examples *.py 6 | -------------------------------------------------------------------------------- /RELEASE.rst: -------------------------------------------------------------------------------- 1 | Releasing 2 | ========= 3 | 4 | Steps to release a new version 5 | ------------------------------ 6 | 7 | - Change version in troposphere/\_\_init\_\_.py 8 | - Change version in docs/conf.py 9 | - Update CHANGELOG.md with changes made since last release 10 | - Verify release installs on Python 3: ``make release-test`` 11 | - Create a signed tag: ``git tag --sign -m "Release 1.1.1" 1.1.1`` 12 | - Build the distribution: python -m build --sdist --wheel . 13 | - Use twine to check the release: twine check dist/troposphere-1.1.1*[.whl,.gz] 14 | - Upload using twine: twine upload -s dist/troposphere-1.1.1*[.whl,.gz] 15 | - Push commits: ``git push`` 16 | - Push tag: ``git push --tags`` 17 | - Update github release page: https://github.com/cloudtools/troposphere/releases 18 | 19 | 20 | Helper to create CHANGELOG entries 21 | ---------------------------------- 22 | 23 | ``git log --reverse --pretty=format:"%s" | tail -100 | sed 's/^/* /'`` 24 | 25 | Helper to list supported resources 26 | ---------------------------------- 27 | 28 | ``grep -h 'resource_type = "AWS::' troposphere/* | sed 's/[ ]*resource_type = "'// | cut -f1-3 -d: | sort | uniq | sed 's/^/- /'`` 29 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | touch ../examples/__init__.py 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | rm -rf ../examples/__init__.py 22 | 23 | index: 24 | # sphinx-apidoc won't overwrite existing files, so remove them first 25 | rm -rf ./apis 26 | 27 | # sphinx-apidoc won't generate a package without a __init__ file 28 | # however, the troposphere tests fail if there is a __init__ file 29 | # so, create the file, create the docs and then remove __init__.py 30 | # sleep is there to ensure the system has time to place the file 31 | # before sphinx-apidoc is run 32 | touch ../examples/__init__.py 33 | sphinx-apidoc -o ./apis ../examples --tocfile examples_toc 34 | sphinx-apidoc -o ./apis ../troposphere --tocfile troposphere_toc 35 | sphinx-apidoc -o ./apis ../tests --tocfile tests_toc 36 | rm -rf ../examples/__init__.py 37 | -------------------------------------------------------------------------------- /docs/_static/images/troposphere_favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudtools/troposphere/923567a7aa8803ddd17310c01339dd5cc8538fd2/docs/_static/images/troposphere_favicon.png -------------------------------------------------------------------------------- /docs/_static/images/troposphere_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudtools/troposphere/923567a7aa8803ddd17310c01339dd5cc8538fd2/docs/_static/images/troposphere_logo.png -------------------------------------------------------------------------------- /docs/_static/images/troposphere_logo_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudtools/troposphere/923567a7aa8803ddd17310c01339dd5cc8538fd2/docs/_static/images/troposphere_logo_small.png -------------------------------------------------------------------------------- /docs/apis/examples_toc.rst: -------------------------------------------------------------------------------- 1 | examples 2 | ======== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | examples 8 | -------------------------------------------------------------------------------- /docs/apis/tests_toc.rst: -------------------------------------------------------------------------------- 1 | tests 2 | ===== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | tests 8 | -------------------------------------------------------------------------------- /docs/apis/troposphere.helpers.rst: -------------------------------------------------------------------------------- 1 | troposphere.helpers package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | troposphere.helpers.userdata module 8 | ----------------------------------- 9 | 10 | .. automodule:: troposphere.helpers.userdata 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: troposphere.helpers 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /docs/apis/troposphere.openstack.rst: -------------------------------------------------------------------------------- 1 | troposphere.openstack package 2 | ============================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | troposphere.openstack.heat module 8 | --------------------------------- 9 | 10 | .. automodule:: troposphere.openstack.heat 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | troposphere.openstack.neutron module 16 | ------------------------------------ 17 | 18 | .. automodule:: troposphere.openstack.neutron 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | troposphere.openstack.nova module 24 | --------------------------------- 25 | 26 | .. automodule:: troposphere.openstack.nova 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: troposphere.openstack 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /docs/apis/troposphere_toc.rst: -------------------------------------------------------------------------------- 1 | troposphere 2 | =========== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | troposphere 8 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | .. include:: ../CHANGELOG.rst 5 | -------------------------------------------------------------------------------- /docs/code_of_conduct.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../CODE_OF_CONDUCT.rst 3 | -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | ```{include} ../CONTRIBUTING.md 2 | ``` 3 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. troposphere documentation master file, created by 2 | sphinx-quickstart on Fri Feb 1 14:27:25 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. include:: ../README.rst 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | :caption: Table of Contents 11 | 12 | quick_start 13 | apis/examples_toc 14 | apis/troposphere_toc 15 | apis/tests_toc 16 | 17 | changelog 18 | contributing 19 | code_of_conduct 20 | license 21 | 22 | Indices and tables 23 | ================== 24 | 25 | * :ref:`genindex` 26 | * :ref:`modindex` 27 | * :ref:`search` 28 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | License 2 | ======= 3 | 4 | .. include:: ../LICENSE 5 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | awacs>=2.0.0 2 | cfn_flip>=1.0.2 3 | myst_parser==2.0.0 4 | sphinx==6.2.1 5 | sphinx_rtd_theme==1.2.2 6 | readthedocs-sphinx-search==0.3.2 7 | -------------------------------------------------------------------------------- /examples/ApplicationAutoScalingSample.py: -------------------------------------------------------------------------------- 1 | from troposphere import Ref, Template 2 | from troposphere.applicationautoscaling import ( 3 | ScalableTarget, 4 | ScalingPolicy, 5 | StepAdjustment, 6 | StepScalingPolicyConfiguration, 7 | ) 8 | 9 | t = Template() 10 | 11 | scalable_target = ScalableTarget( 12 | "scalableTarget", 13 | MaxCapacity=2, 14 | MinCapacity=1, 15 | ResourceId="service/ecsStack", 16 | RoleARN="Access Management (IAM) role", 17 | ScalableDimension="ecs:service:DesiredCount", 18 | ServiceNamespace="ecs", 19 | ) 20 | 21 | scaling_policy = ScalingPolicy( 22 | "scalingPolicy", 23 | PolicyName="AStepPolicy", 24 | PolicyType="StepScaling", 25 | ScalingTargetId=Ref(scalable_target), 26 | StepScalingPolicyConfiguration=StepScalingPolicyConfiguration( 27 | AdjustmentType="PercentChangeInCapacity", 28 | Cooldown=60, 29 | MetricAggregationType="Average", 30 | StepAdjustments=[ 31 | StepAdjustment( 32 | MetricIntervalLowerBound=0, 33 | ScalingAdjustment=200, 34 | ), 35 | ], 36 | ), 37 | ) 38 | 39 | t = Template() 40 | 41 | t.add_resource(scalable_target) 42 | t.add_resource(scaling_policy) 43 | 44 | print(t.to_json()) 45 | -------------------------------------------------------------------------------- /examples/CertificateManagerSample.py: -------------------------------------------------------------------------------- 1 | from troposphere import Template 2 | from troposphere.certificatemanager import Certificate, DomainValidationOption 3 | 4 | t = Template() 5 | 6 | t.add_resource( 7 | Certificate( 8 | "mycert", 9 | DomainName="example.com", 10 | DomainValidationOptions=[ 11 | DomainValidationOption( 12 | DomainName="example.com", 13 | ValidationDomain="example.com", 14 | ), 15 | ], 16 | Tags=[ 17 | {"Key": "tag-key", "Value": "tag-value"}, 18 | ], 19 | ) 20 | ) 21 | 22 | print(t.to_json()) 23 | -------------------------------------------------------------------------------- /examples/ClassExtensions.py: -------------------------------------------------------------------------------- 1 | import troposphere 2 | import troposphere.ec2 3 | 4 | template = troposphere.Template() 5 | 6 | 7 | class TrustyInstance(troposphere.ec2.Instance): 8 | ImageId = "ami-xxxx" 9 | Monitoring = True 10 | 11 | 12 | class FrontendInstance(TrustyInstance): 13 | SecurityGroups = ["frontend"] 14 | InstanceType = "t1.micro" 15 | 16 | 17 | class ProcessingInstance(TrustyInstance): 18 | SecurityGroups = ["processing"] 19 | InstanceType = "m3.large" 20 | 21 | 22 | template.add_resource(FrontendInstance("jones1")) 23 | template.add_resource(ProcessingInstance("williams1", InstanceType="m2.large")) 24 | 25 | print(template.to_json()) 26 | -------------------------------------------------------------------------------- /examples/CodeBuild.py: -------------------------------------------------------------------------------- 1 | from troposphere import Template 2 | from troposphere.codebuild import Artifacts, Environment, Project, Source 3 | 4 | template = Template() 5 | template.set_version("2010-09-09") 6 | 7 | artifacts = Artifacts(Type="NO_ARTIFACTS") 8 | 9 | environment = Environment( 10 | ComputeType="BUILD_GENERAL1_SMALL", 11 | Image="aws/codebuild/java:openjdk-8", 12 | Type="LINUX_CONTAINER", 13 | EnvironmentVariables=[{"Name": "APP_NAME", "Value": "demo"}], 14 | ) 15 | 16 | source = Source( 17 | Location="codebuild-demo-test/0123ab9a371ebf0187b0fe5614fbb72c", Type="S3" 18 | ) 19 | 20 | project = Project( 21 | "DemoProject", 22 | Artifacts=artifacts, 23 | Environment=environment, 24 | Name="DemoProject", 25 | ServiceRole="arn:aws:iam::0123456789:role/codebuild-role", 26 | Source=source, 27 | ) 28 | template.add_resource(project) 29 | 30 | print(template.to_json()) 31 | -------------------------------------------------------------------------------- /examples/CustomResource.py: -------------------------------------------------------------------------------- 1 | from troposphere import Join, Ref, Template 2 | from troposphere.cloudformation import AWSCustomObject 3 | 4 | 5 | class CustomPlacementGroup(AWSCustomObject): 6 | resource_type = "Custom::PlacementGroup" 7 | 8 | props = {"ServiceToken": (str, True), "PlacementGroupName": (str, True)} 9 | 10 | 11 | t = Template() 12 | 13 | t.set_description( 14 | "Example template showing how a Lambda Function CustomResource might look" 15 | "For information on AWS Lambda-backed Custom Resources see:" 16 | "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/" 17 | "template-custom-resources-lambda.html" 18 | ) 19 | 20 | placementgroup_a = t.add_resource( 21 | CustomPlacementGroup( 22 | "ClusterGroup", 23 | ServiceToken=Join( 24 | "", 25 | [ 26 | "arn:aws:lambda:", 27 | Ref("AWS::Region"), 28 | ":", 29 | Ref("AWS::AccountId"), 30 | ":function:cfnPlacementGroup", 31 | ], 32 | ), 33 | PlacementGroupName="ExampleClusterGroup", 34 | ) 35 | ) 36 | 37 | print(t.to_json()) 38 | -------------------------------------------------------------------------------- /examples/ECRSample.py: -------------------------------------------------------------------------------- 1 | import awacs.ecr as ecr 2 | import awacs.iam as iam 3 | from awacs.aws import Allow, AWSPrincipal, PolicyDocument, Statement 4 | 5 | from troposphere import Template 6 | from troposphere.ecr import Repository 7 | 8 | t = Template() 9 | 10 | t.add_resource( 11 | Repository( 12 | "MyRepository", 13 | RepositoryName="test-repository", 14 | RepositoryPolicyText=PolicyDocument( 15 | Version="2008-10-17", 16 | Statement=[ 17 | Statement( 18 | Sid="AllowPushPull", 19 | Effect=Allow, 20 | Principal=AWSPrincipal( 21 | [ 22 | iam.ARN(account="123456789012", resource="user/Bob"), 23 | iam.ARN(account="123456789012", resource="user/Alice"), 24 | ] 25 | ), 26 | Action=[ 27 | ecr.GetDownloadUrlForLayer, 28 | ecr.BatchGetImage, 29 | ecr.BatchCheckLayerAvailability, 30 | ecr.PutImage, 31 | ecr.InitiateLayerUpload, 32 | ecr.UploadLayerPart, 33 | ecr.CompleteLayerUpload, 34 | ], 35 | ), 36 | ], 37 | ), 38 | ) 39 | ) 40 | 41 | print(t.to_json()) 42 | -------------------------------------------------------------------------------- /examples/ECSFargate.py: -------------------------------------------------------------------------------- 1 | from troposphere import Parameter, Ref, Template 2 | from troposphere.ecs import ( 3 | AwsvpcConfiguration, 4 | Cluster, 5 | ContainerDefinition, 6 | NetworkConfiguration, 7 | PortMapping, 8 | Service, 9 | TaskDefinition, 10 | ) 11 | 12 | t = Template() 13 | t.set_version("2010-09-09") 14 | t.add_parameter( 15 | Parameter( 16 | "Subnet", 17 | Type="AWS::EC2::Subnet::Id", 18 | Description="A VPC subnet ID for the container.", 19 | ) 20 | ) 21 | 22 | cluster = t.add_resource(Cluster("Cluster")) 23 | 24 | task_definition = t.add_resource( 25 | TaskDefinition( 26 | "TaskDefinition", 27 | RequiresCompatibilities=["FARGATE"], 28 | Cpu="256", 29 | Memory="512", 30 | NetworkMode="awsvpc", 31 | ContainerDefinitions=[ 32 | ContainerDefinition( 33 | Name="nginx", 34 | Image="nginx", 35 | Essential=True, 36 | PortMappings=[PortMapping(ContainerPort=80)], 37 | ) 38 | ], 39 | ) 40 | ) 41 | 42 | service = t.add_resource( 43 | Service( 44 | "NginxService", 45 | Cluster=Ref(cluster), 46 | DesiredCount=1, 47 | TaskDefinition=Ref(task_definition), 48 | LaunchType="FARGATE", 49 | NetworkConfiguration=NetworkConfiguration( 50 | AwsvpcConfiguration=AwsvpcConfiguration(Subnets=[Ref("Subnet")]) 51 | ), 52 | ) 53 | ) 54 | 55 | print(t.to_json()) 56 | -------------------------------------------------------------------------------- /examples/IAM_Roles_and_InstanceProfiles.py: -------------------------------------------------------------------------------- 1 | from awacs.aws import Allow, PolicyDocument, Principal, Statement 2 | from awacs.sts import AssumeRole 3 | 4 | from troposphere import Ref, Template 5 | from troposphere.iam import InstanceProfile, Role 6 | 7 | t = Template() 8 | 9 | t.set_description( 10 | "AWS CloudFormation Sample Template: This template " 11 | "demonstrates the creation of IAM Roles and " 12 | "InstanceProfiles." 13 | ) 14 | 15 | cfnrole = t.add_resource( 16 | Role( 17 | "CFNRole", 18 | AssumeRolePolicyDocument=PolicyDocument( 19 | Statement=[ 20 | Statement( 21 | Effect=Allow, 22 | Action=[AssumeRole], 23 | Principal=Principal("Service", ["ec2.amazonaws.com"]), 24 | ) 25 | ] 26 | ), 27 | ) 28 | ) 29 | 30 | cfninstanceprofile = t.add_resource( 31 | InstanceProfile("CFNInstanceProfile", Roles=[Ref(cfnrole)]) 32 | ) 33 | 34 | print(t.to_json()) 35 | -------------------------------------------------------------------------------- /examples/Kinesis_Stream.py: -------------------------------------------------------------------------------- 1 | # This is an example of a Kinesis Stream 2 | 3 | import troposphere.kinesis as kinesis 4 | from troposphere import Output, Ref, Template 5 | 6 | template = Template() 7 | 8 | kinesis_stream = template.add_resource(kinesis.Stream("TestStream", ShardCount=1)) 9 | 10 | template.add_output( 11 | [ 12 | Output( 13 | "StreamName", 14 | Description="Stream Name (Physical ID)", 15 | Value=Ref(kinesis_stream), 16 | ), 17 | ] 18 | ) 19 | 20 | print(template.to_json()) 21 | -------------------------------------------------------------------------------- /examples/Mediapackage.py: -------------------------------------------------------------------------------- 1 | from troposphere import Template 2 | from troposphere.mediapackage import Channel, OriginEndpoint, OriginEndpointHlsPackage 3 | 4 | t = Template() 5 | t.set_version() 6 | 7 | t.add_resource(Channel("MediaPackage", Id="MediaPackageChannel")) 8 | 9 | t.add_resource( 10 | OriginEndpoint( 11 | "MediaPackageOriginEndpoint", 12 | ChannelId="MediaPackageChannel", 13 | Description="MediaPackage HLS endpoint", 14 | HlsPackage=OriginEndpointHlsPackage( 15 | ProgramDateTimeIntervalSeconds=0, 16 | PlaylistWindowSeconds=60, 17 | PlaylistType="NONE", 18 | IncludeIframeOnlyStream=False, 19 | SegmentDurationSeconds=6, 20 | UseAudioRenditionGroup=False, 21 | ), 22 | Id="MediaPackageOriginEndpoint", 23 | ManifestName="MediaPackageOriginEndpoint", 24 | Origination="ALLOW", 25 | ) 26 | ) 27 | 28 | print(t.to_json()) 29 | -------------------------------------------------------------------------------- /examples/Metadata.py: -------------------------------------------------------------------------------- 1 | from troposphere import Template 2 | 3 | t = Template() 4 | 5 | t.set_description("Example to show adding a Metadata section to the template") 6 | t.set_metadata( 7 | { 8 | "Comments": "Initial Draft", 9 | "LastUpdated": "Jan 1st 2015", 10 | "UpdatedBy": "First Last", 11 | "Version": "V1.0", 12 | } 13 | ) 14 | 15 | print(t.to_json()) 16 | -------------------------------------------------------------------------------- /examples/RDS_Snapshot_On_Delete.py: -------------------------------------------------------------------------------- 1 | # Converted from RDS_Snapshot_On_Delete.template located at: 2 | # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ 3 | 4 | from troposphere import GetAtt, Join, Output, Template 5 | from troposphere.rds import DBInstance 6 | 7 | t = Template() 8 | 9 | t.set_version("2010-09-09") 10 | 11 | t.set_description( 12 | "AWS CloudFormation Sample Template RDS_Snapshot_On_Delete: Sample " 13 | "template showing how to create an RDS DBInstance that is snapshotted on " 14 | "stack deletion. **WARNING** This template creates an Amazon RDS database " 15 | "instance. When the stack is deleted a database snpshot will be left in " 16 | "your account. You will be billed for the AWS resources used if you " 17 | "create a stack from this template." 18 | ) 19 | MyDB = t.add_resource( 20 | DBInstance( 21 | "MyDB", 22 | Engine="MySQL", 23 | MasterUsername="myName", 24 | MasterUserPassword="myPassword", 25 | AllocatedStorage="5", 26 | DBInstanceClass="db.m1.small", 27 | DBName="MyDatabase", 28 | ) 29 | ) 30 | 31 | JDBCConnectionString = t.add_output( 32 | Output( 33 | "JDBCConnectionString", 34 | Description="JDBC connection string for the database", 35 | Value=Join( 36 | "", 37 | [ 38 | "jdbc:mysql://", 39 | GetAtt(MyDB, "Endpoint.Address"), 40 | ":", 41 | GetAtt(MyDB, "Endpoint.Port"), 42 | "/MyDatabase", 43 | ], 44 | ), 45 | ) 46 | ) 47 | 48 | print(t.to_json()) 49 | -------------------------------------------------------------------------------- /examples/Route53_CNAME.py: -------------------------------------------------------------------------------- 1 | # Converted from Route53_CNAME.template located at: 2 | # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ 3 | 4 | from troposphere import Join, Output, Parameter, Ref, Template 5 | from troposphere.route53 import RecordSetType 6 | 7 | t = Template() 8 | 9 | t.set_description( 10 | "AWS CloudFormation Sample Template Route53_CNAME: Sample template " 11 | "showing how to create an Amazon Route 53 CNAME record. It assumes that " 12 | "you already have a Hosted Zone registered with Amazon Route 53. " 13 | "**WARNING** This template creates an Amazon EC2 instance. " 14 | "You will be billed for the AWS resources used if you create " 15 | "a stack from this template." 16 | ) 17 | 18 | hostedzone = t.add_parameter( 19 | Parameter( 20 | "HostedZone", 21 | Description="The DNS name of an existing Amazon Route 53 hosted zone", 22 | Type="String", 23 | ) 24 | ) 25 | 26 | myDNSRecord = t.add_resource( 27 | RecordSetType( 28 | "myDNSRecord", 29 | HostedZoneName=Join("", [Ref(hostedzone), "."]), 30 | Comment="CNAME redirect to aws.amazon.com.", 31 | Name=Join( 32 | "", 33 | [Ref("AWS::StackName"), ".", Ref("AWS::Region"), ".", Ref(hostedzone), "."], 34 | ), 35 | Type="CNAME", 36 | TTL="900", 37 | ResourceRecords=["aws.amazon.com"], 38 | ) 39 | ) 40 | 41 | 42 | t.add_output(Output("DomainName", Value=Ref(myDNSRecord))) 43 | 44 | print(t.to_json()) 45 | -------------------------------------------------------------------------------- /examples/S3_Bucket.py: -------------------------------------------------------------------------------- 1 | # Converted from S3_Bucket.template located at: 2 | # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ 3 | 4 | from troposphere import Output, Ref, Template 5 | from troposphere.s3 import Bucket, PublicRead 6 | 7 | t = Template() 8 | 9 | t.set_description( 10 | "AWS CloudFormation Sample Template S3_Bucket: Sample template showing " 11 | "how to create a publicly accessible S3 bucket. " 12 | "**WARNING** This template creates an Amazon S3 Bucket. " 13 | "You will be billed for the AWS resources used if you create " 14 | "a stack from this template." 15 | ) 16 | 17 | s3bucket = t.add_resource( 18 | Bucket( 19 | "S3Bucket", 20 | AccessControl=PublicRead, 21 | ) 22 | ) 23 | 24 | t.add_output( 25 | Output( 26 | "BucketName", 27 | Value=Ref(s3bucket), 28 | Description="Name of S3 bucket to hold website content", 29 | ) 30 | ) 31 | 32 | print(t.to_json()) 33 | -------------------------------------------------------------------------------- /examples/S3_Bucket_With_AccelerateConfiguration.py: -------------------------------------------------------------------------------- 1 | # Converted from S3_Bucket.template located at: 2 | # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ 3 | 4 | from troposphere import Output, Ref, Template 5 | from troposphere.s3 import AccelerateConfiguration, Bucket, PublicRead 6 | 7 | t = Template() 8 | 9 | t.set_description( 10 | "AWS CloudFormation Sample Template S3_Bucket: Sample template showing :" 11 | "How to create a publicly accessible S3 bucket. " 12 | "How to enable S3 Transfer Acceleration. " 13 | "**WARNING** This template creates an Amazon S3 Bucket. " 14 | "You will be billed for the AWS resources used if you create " 15 | "a stack from this template." 16 | ) 17 | 18 | s3bucket = t.add_resource( 19 | Bucket( 20 | "S3Bucket", 21 | # Make public Read 22 | AccessControl=PublicRead, 23 | # Enable s3 Transfer Acceleration 24 | AccelerateConfiguration=AccelerateConfiguration( 25 | AccelerationStatus="Enabled", 26 | ), 27 | ) 28 | ) 29 | 30 | t.add_output( 31 | Output( 32 | "BucketName", 33 | Value=Ref(s3bucket), 34 | Description="Name of S3 bucket with s3 transfer acceleration enabled", 35 | ) 36 | ) 37 | 38 | print(t.to_json()) 39 | -------------------------------------------------------------------------------- /examples/SQSEncrypt.py: -------------------------------------------------------------------------------- 1 | # Converted from SQS_With_CloudWatch_Alarms.template located at: 2 | # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ 3 | 4 | from troposphere import GetAtt, Output, Ref, Template 5 | from troposphere.sqs import Queue 6 | 7 | t = Template() 8 | 9 | t.set_description( 10 | "AWS CloudFormation Sample Template SQS: Sample template showing how to " 11 | "create an SQS queue with Server Side Encryption. **WARNING** This " 12 | "template creates Amazon SQS Queues. You will be billed for the AWS " 13 | "resources used if you create a stack from this template." 14 | ) 15 | 16 | mysourcequeue = t.add_resource( 17 | Queue("MySourceQueue", KmsMasterKeyId="testing", KmsDataKeyReusePeriodSeconds=60) 18 | ) 19 | 20 | t.add_output( 21 | [ 22 | Output( 23 | "SourceQueueURL", 24 | Description="URL of the source queue", 25 | Value=Ref(mysourcequeue), 26 | ), 27 | Output( 28 | "SourceQueueARN", 29 | Description="ARN of the source queue", 30 | Value=GetAtt(mysourcequeue, "Arn"), 31 | ), 32 | ] 33 | ) 34 | 35 | print(t.to_json()) 36 | -------------------------------------------------------------------------------- /examples/Secretsmanager.py: -------------------------------------------------------------------------------- 1 | from troposphere import Tags, Template 2 | from troposphere.secretsmanager import GenerateSecretString, Secret 3 | 4 | t = Template() 5 | t.set_version("2010-09-09") 6 | 7 | MySecret = t.add_resource( 8 | Secret( 9 | "MySecret", 10 | Name="MySecret", 11 | Description="This is an autogenerated secret", 12 | GenerateSecretString=GenerateSecretString( 13 | SecretStringTemplate='{"username":"test-user"}', 14 | GenerateStringKey="password", 15 | PasswordLength=30, 16 | ), 17 | Tags=Tags(Appname="AppA"), 18 | ) 19 | ) 20 | 21 | print(t.to_json()) 22 | -------------------------------------------------------------------------------- /examples/Serverless_Deployment_Preference.py: -------------------------------------------------------------------------------- 1 | # Converted from s3_processor located at: 2 | # https://github.com/awslabs/serverless-application-model/blob/dbc54b5d0cd31bf5cebd16d765b74aee9eb34641/examples/2016-10-31/s3_processor/template.yaml 3 | 4 | from troposphere import Template 5 | from troposphere.serverless import DeploymentPreference, Function 6 | 7 | t = Template() 8 | 9 | t.set_description( 10 | "A function that uses the configured traffic shifting type " 11 | "for a canary deployment." 12 | ) 13 | 14 | t.set_transform("AWS::Serverless-2016-10-31") 15 | 16 | t.add_resource( 17 | Function( 18 | "Function", 19 | Handler="index.handler", 20 | Runtime="nodejs6.10", 21 | CodeUri="s3:///function.zip", 22 | AutoPublishAlias="live", 23 | DeploymentPreference=DeploymentPreference( 24 | Enabled=True, Type="Canary10Percent5Minutes" 25 | ), 26 | ) 27 | ) 28 | 29 | print(t.to_json()) 30 | -------------------------------------------------------------------------------- /examples/Serverless_S3_Processor.py: -------------------------------------------------------------------------------- 1 | # Converted from s3_processor located at: 2 | # https://github.com/awslabs/serverless-application-model/blob/dbc54b5d0cd31bf5cebd16d765b74aee9eb34641/examples/2016-10-31/s3_processor/template.yaml 3 | 4 | from troposphere import Ref, Template 5 | from troposphere.s3 import Bucket 6 | from troposphere.serverless import Function, S3Event 7 | 8 | t = Template() 9 | 10 | t.set_description( 11 | "A function is triggered off an upload to a bucket. It logs the content " 12 | "type of the uploaded object." 13 | ) 14 | 15 | t.set_transform("AWS::Serverless-2016-10-31") 16 | 17 | 18 | s3_bucket = t.add_resource(Bucket("Bucket")) 19 | 20 | t.add_resource( 21 | Function( 22 | "ProcessorFunction", 23 | Handler="index.handler", 24 | Runtime="nodejs4.3", 25 | CodeUri="s3:///s3_processor.zip", 26 | Policies="AmazonS3ReadOnlyAccess", 27 | Events={ 28 | "PhotoUpload": S3Event( 29 | "PhotoUpload", Bucket=Ref(s3_bucket), Events=["s3:ObjectCreated:*"] 30 | ) 31 | }, 32 | ) 33 | ) 34 | 35 | print(t.to_json()) 36 | -------------------------------------------------------------------------------- /examples/VpnEndpoint.py: -------------------------------------------------------------------------------- 1 | import troposphere.ec2 as ec2 2 | from troposphere import Tags, Template 3 | 4 | t = Template() 5 | 6 | t.add_resource( 7 | ec2.ClientVpnEndpoint( 8 | "myClientVpnEndpoint", 9 | AuthenticationOptions=[ 10 | ec2.ClientAuthenticationRequest( 11 | Type="directory-service-authentication", 12 | ActiveDirectory=ec2.DirectoryServiceAuthenticationRequest( 13 | DirectoryId="d-926example" 14 | ), 15 | ) 16 | ], 17 | ClientCidrBlock="10.0.0.0/22", 18 | ConnectionLogOptions=ec2.ConnectionLogOptions(Enabled=False), 19 | Description="My Client VPN Endpoint", 20 | DnsServers=["11.11.0.1"], 21 | ServerCertificateArn=( 22 | "arn:aws:acm:us-east-1:111122223333:certificate/" 23 | "12345678-1234-1234-1234-123456789012" 24 | ), 25 | TagSpecifications=[ 26 | ec2.TagSpecifications( 27 | ResourceType="client-vpn-endpoint", 28 | Tags=Tags(Purpose="Production"), 29 | ) 30 | ], 31 | TransportProtocol="udp", 32 | ) 33 | ) 34 | 35 | print(t.to_json()) 36 | -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "troposphere", 3 | "version": "0.0.0", 4 | "lockfileVersion": 2, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "troposphere", 9 | "version": "0.0.0", 10 | "license": "BSD-3-Clause", 11 | "devDependencies": { 12 | "pyright": "^1.1.261" 13 | }, 14 | "engines": { 15 | "node": ">=16.0.0", 16 | "npm": ">=7.0.0" 17 | } 18 | }, 19 | "node_modules/pyright": { 20 | "version": "1.1.261", 21 | "resolved": "https://registry.npmjs.org/pyright/-/pyright-1.1.261.tgz", 22 | "integrity": "sha512-p+5OCS104OQmpy4xxHjv8pRCxdC1TA14NOf3q8GjqzrBThFORtUFYe4ZDJrmNLqi58QWIXoBkqaKf0s1NPScFA==", 23 | "dev": true, 24 | "bin": { 25 | "pyright": "index.js", 26 | "pyright-langserver": "langserver.index.js" 27 | }, 28 | "engines": { 29 | "node": ">=12.0.0" 30 | } 31 | } 32 | }, 33 | "dependencies": { 34 | "pyright": { 35 | "version": "1.1.261", 36 | "resolved": "https://registry.npmjs.org/pyright/-/pyright-1.1.261.tgz", 37 | "integrity": "sha512-p+5OCS104OQmpy4xxHjv8pRCxdC1TA14NOf3q8GjqzrBThFORtUFYe4ZDJrmNLqi58QWIXoBkqaKf0s1NPScFA==", 38 | "dev": true 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "troposphere", 3 | "version": "0.0.0", 4 | "directories": { 5 | "doc": "docs", 6 | "example": "examples", 7 | "test": "tests" 8 | }, 9 | "scripts": { 10 | "py-type-check": "pyright --venv-path ./" 11 | }, 12 | "repository": { 13 | "type": "git", 14 | "url": "git+https://github.com/cloudtools/troposphere.git" 15 | }, 16 | "license": "BSD-3-Clause", 17 | "bugs": { 18 | "url": "https://github.com/cloudtools/troposphere/issues" 19 | }, 20 | "homepage": "https://github.com/cloudtools/troposphere#readme", 21 | "devDependencies": { 22 | "pyright": "^1.1.261" 23 | }, 24 | "engines": { 25 | "npm": ">=7.0.0", 26 | "node": ">=16.0.0" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.pyright] 2 | include = [ 3 | "**/troposphere", 4 | ] 5 | pythonPlatform = "All" 6 | pythonVersion = "3.8" 7 | reportDuplicateImport = "none" 8 | reportImportCycles = "none" 9 | reportIncompatibleMethodOverride = "warning" 10 | reportMissingTypeStubs = "none" 11 | reportPrivateUsage = "none" 12 | reportUnknownMemberType = "none" 13 | reportUnnecessaryIsInstance = "none" 14 | reportUnusedImport = "none" 15 | reportUnusedVariable = "none" 16 | reportWildcardImportFromLibrary = "none" 17 | strict = [ 18 | "**/__init__.py" 19 | ] 20 | strictParameterNoneValue = false 21 | typeCheckingMode = "off" 22 | useLibraryCodeForTypes = true 23 | venv = ".venv" 24 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # troposphere 2 | # ------------------------------------- 3 | 4 | setuptools 5 | 6 | # ---- Need libraries installed by setup.py 7 | cfn_flip>=1.0.2 8 | awacs>=2.0.0 9 | 10 | # ---- Use jsonpatch to fixup spec files 11 | jsonpatch 12 | 13 | # ---- Documentation Libraries 14 | sphinx==6.2.1 15 | sphinx_rtd_theme==1.2.2 16 | 17 | # ---- S/W Engineering Libraries 18 | 19 | # ---- ---- Styling and Linting 20 | flake8>=4.0.1 21 | pycodestyle>=2.4 22 | pydocstyle>=3.0 23 | pyflakes>=2.1 24 | pylint>=1.9 25 | pre-commit>=2.16 26 | 27 | # ---- ---- Testing 28 | pytest>=4.1 29 | pytest-cov>=2.6 30 | 31 | black==24.4.2 32 | build==0.5.1 33 | click==8.0.4 34 | isort==5.13.2 35 | tox==3.23.1 36 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # troposphere 2 | # ------------------------------------- 3 | 4 | # ---- Need libraries installed by setup.py 5 | cfn_flip>=1.0.2 6 | -------------------------------------------------------------------------------- /resources_openstack.md: -------------------------------------------------------------------------------- 1 | # Currently supported OpenStack resource types 2 | 3 | - [OS::Neutron::Firewall](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Neutron::Firewall) 4 | - [OS::Neutron::FirewallPolicy](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Neutron::FirewallPolicy) 5 | - [OS::Neutron::FirewallRule](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Neutron::FirewallRule) 6 | - [OS::Neutron::FloatingIP](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Neutron::FloatingIP) 7 | - [OS::Neutron::FloatingIPAssociation](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Neutron::FloatingIPAssociation) 8 | - OS::Neutron::HealthMonitor 9 | - OS::Neutron::Pool 10 | - OS::Neutron::LoadBalancer 11 | - [OS::Neutron::Net](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Neutron::Net) 12 | - OS::Neutron::PoolMember 13 | - [OS::Neutron::Port](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Neutron::Port) 14 | - [OS::Neutron::SecurityGroup`](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Neutron::SecurityGroup) 15 | - OS::Nova::FloatingIP 16 | - OS::Nova::FloatingIPAssociation 17 | - [OS::Nova::KeyPair](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Nova::KeyPair) 18 | - [OS::Nova::Server](https://docs.openstack.org/heat/latest/template_guide/openstack.html#OS::Nova::Server) 19 | -------------------------------------------------------------------------------- /scripts/patches/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudtools/troposphere/923567a7aa8803ddd17310c01339dd5cc8538fd2/scripts/patches/__init__.py -------------------------------------------------------------------------------- /scripts/patches/amazonmq.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::AmazonMQ::Broker.LogList to AWS::AmazonMQ::Broker.LogsConfiguration - backward compatibility 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::AmazonMQ::Broker.LogList", 6 | "path": "/PropertyTypes/AWS::AmazonMQ::Broker.LogsConfiguration", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::AmazonMQ::Broker/Properties/Logs/Type", 11 | "value": "LogsConfiguration", 12 | }, 13 | ] 14 | -------------------------------------------------------------------------------- /scripts/patches/apigateway.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::ApiGateway::Stage.CanarySetting", 5 | "path": "/PropertyTypes/AWS::ApiGateway::Stage.StageCanarySetting", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::ApiGateway::Stage/Properties/CanarySetting/Type", 10 | "value": "StageCanarySetting", 11 | }, 12 | # backward compatibility 13 | { 14 | "op": "replace", 15 | "path": "/PropertyTypes/AWS::ApiGateway::Deployment.StageDescription/Properties/CanarySetting/Type", 16 | "value": "DeploymentCanarySettings", 17 | }, 18 | # Technically there are two different EndpointConfiguration but keep using 19 | # this one for backward compatibility 20 | { 21 | "op": "remove", 22 | "path": "/PropertyTypes/AWS::ApiGateway::DomainName.EndpointConfiguration", 23 | }, 24 | ] 25 | -------------------------------------------------------------------------------- /scripts/patches/appsync.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::SageMaker::Device.Device to AWS::SageMaker::Device.DeviceProperty 3 | { 4 | "op": "replace", 5 | "path": "/ResourceTypes/AWS::AppSync::GraphQLApi/Properties/AdditionalAuthenticationProviders/Type", 6 | "value": "List", 7 | }, 8 | { 9 | "op": "add", 10 | "path": "/ResourceTypes/AWS::AppSync::GraphQLApi/Properties/AdditionalAuthenticationProviders/ItemType", 11 | "value": "AdditionalAuthenticationProvider", 12 | }, 13 | { 14 | "op": "replace", 15 | "path": "/ResourceTypes/AWS::AppSync::GraphQLApi/Properties/Tags/Type", 16 | "value": "List", 17 | }, 18 | { 19 | "op": "add", 20 | "path": "/ResourceTypes/AWS::AppSync::GraphQLApi/Properties/Tags/ItemType", 21 | "value": "Tag", 22 | }, 23 | ] 24 | -------------------------------------------------------------------------------- /scripts/patches/autoscaling.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::AutoScaling::AutoScalingGroup.NotificationConfiguration", 5 | "path": "/PropertyTypes/AWS::AutoScaling::AutoScalingGroup.NotificationConfigurations", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::AutoScaling::AutoScalingGroup/Properties/NotificationConfigurations/ItemType", 10 | "value": "NotificationConfigurations", 11 | }, 12 | { 13 | "op": "remove", 14 | "path": "/PropertyTypes/AWS::AutoScaling::AutoScalingGroup.TagProperty", 15 | }, 16 | { 17 | "op": "remove", 18 | "path": "/ResourceTypes/AWS::AutoScaling::AutoScalingGroup/Properties/Tags/ItemType", 19 | }, 20 | { 21 | "op": "move", 22 | "from": "/PropertyTypes/AWS::AutoScaling::ScalingPolicy.StepAdjustment", 23 | "path": "/PropertyTypes/AWS::AutoScaling::ScalingPolicy.StepAdjustments", 24 | }, 25 | { 26 | "op": "replace", 27 | "path": "/ResourceTypes/AWS::AutoScaling::ScalingPolicy/Properties/StepAdjustments/ItemType", 28 | "value": "StepAdjustments", 29 | }, 30 | { 31 | "op": "move", 32 | "from": "/PropertyTypes/AWS::AutoScaling::LaunchConfiguration.BlockDevice", 33 | "path": "/PropertyTypes/AWS::AutoScaling::LaunchConfiguration.EBSBlockDevice", 34 | }, 35 | { 36 | "op": "replace", 37 | "path": "/PropertyTypes/AWS::AutoScaling::LaunchConfiguration.BlockDeviceMapping/Properties/Ebs/Type", 38 | "value": "EBSBlockDevice", 39 | }, 40 | ] 41 | -------------------------------------------------------------------------------- /scripts/patches/awslambda.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Remove AWS::Lambda::EventSourceMapping.DestinationConfig and prefer AWS::Lambda::EventInvokeConfig.DestinationConfig 3 | # which includes OnSuccess 4 | { 5 | "op": "remove", 6 | "path": "/PropertyTypes/AWS::Lambda::EventSourceMapping.DestinationConfig", 7 | }, 8 | { 9 | "op": "remove", 10 | "path": "/PropertyTypes/AWS::Lambda::EventSourceMapping.OnFailure", 11 | }, 12 | # Rename VpcConfig to VPCConfig - backward compatibility 13 | { 14 | "op": "move", 15 | "from": "/PropertyTypes/AWS::Lambda::Function.VpcConfig", 16 | "path": "/PropertyTypes/AWS::Lambda::Function.VPCConfig", 17 | }, 18 | # backward compatibility 19 | { 20 | "op": "replace", 21 | "path": "/ResourceTypes/AWS::Lambda::Function/Properties/VpcConfig/Type", 22 | "value": "VPCConfig", 23 | }, 24 | ] 25 | -------------------------------------------------------------------------------- /scripts/patches/bedrock.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "remove", 4 | "path": "/PropertyTypes/AWS::Bedrock::Flow.PromptTemplateConfiguration", 5 | }, 6 | { 7 | "op": "remove", 8 | "path": "/PropertyTypes/AWS::Bedrock::FlowVersion.PromptTemplateConfiguration", 9 | }, 10 | { 11 | "op": "remove", 12 | "path": "/PropertyTypes/AWS::Bedrock::PromptVersion.PromptTemplateConfiguration", 13 | }, 14 | ] 15 | -------------------------------------------------------------------------------- /scripts/patches/budgets.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Fix different Subscriber properties 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::Budgets::BudgetsAction.Subscriber", 6 | "path": "/PropertyTypes/AWS::Budgets::BudgetsAction.ActionSubscriber", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::Budgets::BudgetsAction/Properties/Subscribers/ItemType", 11 | "value": "ActionSubscriber", 12 | }, 13 | { 14 | "op": "replace", 15 | "path": "/PropertyTypes/AWS::Budgets::Budget.Expression/Properties/And/ItemType", 16 | "value": "object", 17 | }, 18 | { 19 | "op": "replace", 20 | "path": "/PropertyTypes/AWS::Budgets::Budget.Expression/Properties/Not/Type", 21 | "value": "object", 22 | }, 23 | { 24 | "op": "replace", 25 | "path": "/PropertyTypes/AWS::Budgets::Budget.Expression/Properties/Or/ItemType", 26 | "value": "object", 27 | }, 28 | ] 29 | -------------------------------------------------------------------------------- /scripts/patches/cloudwatch.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::CloudWatch::Alarm.Dimension", 5 | "path": "/PropertyTypes/AWS::CloudWatch::Alarm.MetricDimension", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::CloudWatch::Alarm/Properties/Dimensions/ItemType", 10 | "value": "MetricDimension", 11 | }, 12 | { 13 | "op": "replace", 14 | "path": "/PropertyTypes/AWS::CloudWatch::Alarm.Metric/Properties/Dimensions/ItemType", 15 | "value": "MetricDimension", 16 | }, 17 | { 18 | "op": "remove", 19 | "path": "/PropertyTypes/AWS::CloudWatch::AnomalyDetector.Dimension", 20 | }, 21 | { 22 | "op": "replace", 23 | "path": "/ResourceTypes/AWS::CloudWatch::AnomalyDetector/Properties/Dimensions/ItemType", 24 | "value": "MetricDimension", 25 | }, 26 | { 27 | "op": "replace", 28 | "path": "/PropertyTypes/AWS::CloudWatch::AnomalyDetector.Metric/Properties/Dimensions/ItemType", 29 | "value": "MetricDimension", 30 | }, 31 | { 32 | "op": "replace", 33 | "path": "/PropertyTypes/AWS::CloudWatch::AnomalyDetector.SingleMetricAnomalyDetector/Properties/Dimensions/ItemType", 34 | "value": "MetricDimension", 35 | }, 36 | ] 37 | -------------------------------------------------------------------------------- /scripts/patches/codebuild.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Fixup to account for FilterGroup 3 | { 4 | "op": "add", 5 | "path": "/PropertyTypes/AWS::CodeBuild::Project.ProjectTriggers/Properties/FilterGroups/PrimitiveType", 6 | "value": "list", 7 | }, 8 | # backward compatibility 9 | { 10 | "op": "move", 11 | "from": "/PropertyTypes/AWS::CodeBuild::Project.CloudWatchLogsConfig", 12 | "path": "/PropertyTypes/AWS::CodeBuild::Project.CloudWatchLogs", 13 | }, 14 | # backward compatibility 15 | { 16 | "op": "replace", 17 | "path": "/PropertyTypes/AWS::CodeBuild::Project.LogsConfig/Properties/CloudWatchLogs/Type", 18 | "value": "CloudWatchLogs", 19 | }, 20 | # backward compatibility 21 | { 22 | "op": "move", 23 | "from": "/PropertyTypes/AWS::CodeBuild::Project.S3LogsConfig", 24 | "path": "/PropertyTypes/AWS::CodeBuild::Project.S3Logs", 25 | }, 26 | # backward compatibility 27 | { 28 | "op": "replace", 29 | "path": "/PropertyTypes/AWS::CodeBuild::Project.LogsConfig/Properties/S3Logs/Type", 30 | "value": "S3Logs", 31 | }, 32 | ] 33 | -------------------------------------------------------------------------------- /scripts/patches/codecommit.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # backwards compatibility 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::CodeCommit::Repository.RepositoryTrigger", 6 | "path": "/PropertyTypes/AWS::CodeCommit::Repository.Trigger", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::CodeCommit::Repository/Properties/Triggers/ItemType", 11 | "value": "Trigger", 12 | }, 13 | ] 14 | -------------------------------------------------------------------------------- /scripts/patches/cognito.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::Cognito::IdentityPoolRoleAttachment.RulesConfigurationType", 5 | "path": "/PropertyTypes/AWS::Cognito::IdentityPoolRoleAttachment.RulesConfiguration", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/PropertyTypes/AWS::Cognito::IdentityPoolRoleAttachment.RoleMapping/Properties/RulesConfiguration/Type", 10 | "value": "RulesConfiguration", 11 | }, 12 | ] 13 | -------------------------------------------------------------------------------- /scripts/patches/config.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::Config::ConfigRule.SourceDetail", 5 | "path": "/PropertyTypes/AWS::Config::ConfigRule.SourceDetails", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/PropertyTypes/AWS::Config::ConfigRule.Source/Properties/SourceDetails/ItemType", 10 | "value": "SourceDetails", 11 | }, 12 | { 13 | "op": "move", 14 | "from": "/PropertyTypes/AWS::Config::ConfigurationAggregator.AccountAggregationSource", 15 | "path": "/PropertyTypes/AWS::Config::ConfigurationAggregator.AccountAggregationSources", 16 | }, 17 | { 18 | "op": "replace", 19 | "path": "/ResourceTypes/AWS::Config::ConfigurationAggregator/Properties/AccountAggregationSources/ItemType", 20 | "value": "AccountAggregationSources", 21 | }, 22 | ] 23 | -------------------------------------------------------------------------------- /scripts/patches/databrew.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "add", 4 | "path": "/PropertyTypes/AWS::DataBrew::Recipe.Action/Properties/Parameters/Type", 5 | "value": "RecipeParameters", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/PropertyTypes/AWS::DataBrew::Job.StatisticOverride/Properties/Parameters/Type", 10 | "value": "Map", 11 | }, 12 | # Rename AWS::DataBrew::Job.Recipe to AWS::DataBrew::Job.JobRecipe due to conflict with Recipe resource name 13 | { 14 | "op": "move", 15 | "from": "/PropertyTypes/AWS::DataBrew::Job.Recipe", 16 | "path": "/PropertyTypes/AWS::DataBrew::Job.JobRecipe", 17 | }, 18 | { 19 | "op": "replace", 20 | "path": "/ResourceTypes/AWS::DataBrew::Job/Properties/Recipe/Type", 21 | "value": "JobRecipe", 22 | }, 23 | # Rename AWS::DataBrew::Job.S3Location to AWS::DataBrew::Job.JobS3Location 24 | { 25 | "op": "move", 26 | "from": "/PropertyTypes/AWS::DataBrew::Job.S3Location", 27 | "path": "/PropertyTypes/AWS::DataBrew::Job.JobS3Location", 28 | }, 29 | { 30 | "op": "replace", 31 | "path": "/PropertyTypes/AWS::DataBrew::Job.S3TableOutputOptions/Properties/Location/Type", 32 | "value": "JobS3Location", 33 | }, 34 | ] 35 | -------------------------------------------------------------------------------- /scripts/patches/datapipeline.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # backward compatibility 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::DataPipeline::Pipeline.ParameterAttribute", 6 | "path": "/PropertyTypes/AWS::DataPipeline::Pipeline.ParameterObjectAttribute", 7 | }, 8 | # backward compatibility 9 | { 10 | "op": "replace", 11 | "path": "/PropertyTypes/AWS::DataPipeline::Pipeline.ParameterObject/Properties/Attributes/ItemType", 12 | "value": "ParameterObjectAttribute", 13 | }, 14 | # backward compatibility 15 | { 16 | "op": "move", 17 | "from": "/PropertyTypes/AWS::DataPipeline::Pipeline.Field", 18 | "path": "/PropertyTypes/AWS::DataPipeline::Pipeline.ObjectField", 19 | }, 20 | # backward compatibility 21 | { 22 | "op": "replace", 23 | "path": "/PropertyTypes/AWS::DataPipeline::Pipeline.PipelineObject/Properties/Fields/ItemType", 24 | "value": "ObjectField", 25 | }, 26 | ] 27 | -------------------------------------------------------------------------------- /scripts/patches/datasync.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::DataSync::LocationFSxONTAP.Protocol to AWS::DataSync::LocationFSxONTAP.ONTAPProtocol 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::DataSync::LocationFSxONTAP.Protocol", 6 | "path": "/PropertyTypes/AWS::DataSync::LocationFSxONTAP.ONTAPProtocol", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::DataSync::LocationFSxONTAP/Properties/Protocol/Type", 11 | "value": "ONTAPProtocol", 12 | }, 13 | ] 14 | -------------------------------------------------------------------------------- /scripts/patches/dynamodb.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # duplicate GlobalSecondaryIndex 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::DynamoDB::GlobalTable.GlobalSecondaryIndex", 6 | "path": "/PropertyTypes/AWS::DynamoDB::GlobalTable.GlobalTableGlobalSecondaryIndex", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::DynamoDB::GlobalTable/Properties/GlobalSecondaryIndexes/ItemType", 11 | "value": "GlobalTableGlobalSecondaryIndex", 12 | }, 13 | # duplicate SSESpecification 14 | { 15 | "op": "move", 16 | "from": "/PropertyTypes/AWS::DynamoDB::GlobalTable.SSESpecification", 17 | "path": "/PropertyTypes/AWS::DynamoDB::GlobalTable.GlobalTableSSESpecification", 18 | }, 19 | { 20 | "op": "replace", 21 | "path": "/ResourceTypes/AWS::DynamoDB::GlobalTable/Properties/SSESpecification/Type", 22 | "value": "GlobalTableSSESpecification", 23 | }, 24 | ] 25 | -------------------------------------------------------------------------------- /scripts/patches/ecr.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::ECR::ReplicationConfiguration.ReplicationConfiguration to AWS::ECR::ReplicationConfiguration.ReplicationConfigurationProperty 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::ECR::ReplicationConfiguration.ReplicationConfiguration", 6 | "path": "/PropertyTypes/AWS::ECR::ReplicationConfiguration.ReplicationConfigurationProperty", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::ECR::ReplicationConfiguration/Properties/ReplicationConfiguration/Type", 11 | "value": "ReplicationConfigurationProperty", 12 | }, 13 | ] 14 | -------------------------------------------------------------------------------- /scripts/patches/efs.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "replace", 4 | "path": "/ResourceTypes/AWS::EFS::AccessPoint/Properties/AccessPointTags/ItemType", 5 | "value": "Tag", 6 | }, 7 | # Remove unused AccessPointTag (remapped to troposphere Tags) 8 | { 9 | "op": "remove", 10 | "path": "/PropertyTypes/AWS::EFS::AccessPoint.AccessPointTag", 11 | }, 12 | { 13 | "op": "replace", 14 | "path": "/ResourceTypes/AWS::EFS::FileSystem/Properties/FileSystemTags/ItemType", 15 | "value": "Tag", 16 | }, 17 | # Remove unused ElasticFileSystemTag (remapped to troposphere Tags) 18 | { 19 | "op": "remove", 20 | "path": "/PropertyTypes/AWS::EFS::FileSystem.ElasticFileSystemTag", 21 | }, 22 | ] 23 | -------------------------------------------------------------------------------- /scripts/patches/elasticbeanstalk.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename ElasticBeanstalk::ConfigurationTemplate ConfigurationOptionSetting to OptionSetting - backward compatibility 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting", 6 | "path": "/PropertyTypes/AWS::ElasticBeanstalk::ConfigurationTemplate.OptionSetting", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::ElasticBeanstalk::ConfigurationTemplate/Properties/OptionSettings/ItemType", 11 | "value": "OptionSetting", 12 | }, 13 | ] 14 | -------------------------------------------------------------------------------- /scripts/patches/emrserverless.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "remove", 4 | "path": "/PropertyTypes/AWS::EMRServerless::Application.ConfigurationObject/Properties/Configurations", 5 | }, 6 | ] 7 | -------------------------------------------------------------------------------- /scripts/patches/events.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "replace", 4 | "path": "/ResourceTypes/AWS::Events::EventBus/Properties/Tags/ItemType", 5 | "value": "Tags", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /scripts/patches/fms.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "replace", 4 | "path": "/ResourceTypes/AWS::FMS::Policy/Properties/ResourceTags/ItemType", 5 | "value": "Tag", 6 | }, 7 | { 8 | "op": "remove", 9 | "path": "/PropertyTypes/AWS::FMS::Policy.ResourceTag", 10 | }, 11 | { 12 | "op": "replace", 13 | "path": "/ResourceTypes/AWS::FMS::Policy/Properties/Tags/ItemType", 14 | "value": "Tag", 15 | }, 16 | { 17 | "op": "remove", 18 | "path": "/PropertyTypes/AWS::FMS::Policy.PolicyTag", 19 | }, 20 | ] 21 | -------------------------------------------------------------------------------- /scripts/patches/fsx.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::FSx::Volume.OntapConfiguration to AWS::FSx::Volume.VolumeOntapConfiguration - duplicate property name 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::FSx::Volume.OntapConfiguration", 6 | "path": "/PropertyTypes/AWS::FSx::Volume.VolumeOntapConfiguration", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::FSx::Volume/Properties/OntapConfiguration/Type", 11 | "value": "VolumeOntapConfiguration", 12 | }, 13 | # Rename AWS::FSx::Volume.OpenZFSConfiguration to AWS::FSx::Volume.VolumeOpenZFSConfiguration - duplicate property name 14 | { 15 | "op": "move", 16 | "from": "/PropertyTypes/AWS::FSx::Volume.OpenZFSConfiguration", 17 | "path": "/PropertyTypes/AWS::FSx::Volume.VolumeOpenZFSConfiguration", 18 | }, 19 | { 20 | "op": "replace", 21 | "path": "/ResourceTypes/AWS::FSx::Volume/Properties/OpenZFSConfiguration/Type", 22 | "value": "VolumeOpenZFSConfiguration", 23 | }, 24 | ] 25 | -------------------------------------------------------------------------------- /scripts/patches/gamelift.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "add", 4 | "path": "/ResourceTypes/AWS::GameLift::Fleet/Properties/AnywhereConfiguration/Type", 5 | "value": "AnywhereConfiguration", 6 | }, 7 | { 8 | # Prefer ContainerFleet.LocationConfiguration over GameLift::Fleet.LocationConfiguration 9 | "op": "remove", 10 | "path": "/PropertyTypes/AWS::GameLift::Fleet.LocationConfiguration", 11 | }, 12 | { 13 | # Prefer GameLift::ContainerFleet.ScalingPolicy over GameLift::Fleet.ScalingPolicy 14 | "op": "remove", 15 | "path": "/PropertyTypes/AWS::GameLift::ContainerFleet.ScalingPolicy", 16 | }, 17 | ] 18 | -------------------------------------------------------------------------------- /scripts/patches/glue.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "replace", 4 | "path": "/PropertyTypes/AWS::Glue::SecurityConfiguration.EncryptionConfiguration/Properties/S3Encryptions/Type", 5 | "value": "List", 6 | }, 7 | { 8 | "op": "add", 9 | "path": "/PropertyTypes/AWS::Glue::SecurityConfiguration.EncryptionConfiguration/Properties/S3Encryptions/ItemType", 10 | "value": "S3Encryption", 11 | }, 12 | ] 13 | -------------------------------------------------------------------------------- /scripts/patches/greengrassv2.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "remove", 4 | "path": "/PropertyTypes/AWS::GreengrassV2::Deployment.IoTJobRateIncreaseCriteria", 5 | }, 6 | { 7 | "op": "add", 8 | "path": "/PropertyTypes/AWS::GreengrassV2::Deployment.IoTJobRateIncreaseCriteria", 9 | "value": { 10 | "Properties": { 11 | "NumberOfNotifiedThings": { 12 | "PrimitiveType": "Integer", 13 | "Required": False, 14 | }, 15 | "NumberOfSucceededThings": { 16 | "PrimitiveType": "Integer", 17 | "Required": False, 18 | }, 19 | }, 20 | }, 21 | }, 22 | ] 23 | -------------------------------------------------------------------------------- /scripts/patches/groundstation.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::GroundStation::Config.FrequencyBandwidth", 5 | "path": "/PropertyTypes/AWS::GroundStation::Config.Bandwidth", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/PropertyTypes/AWS::GroundStation::Config.SpectrumConfig/Properties/Bandwidth/Type", 10 | "value": "Bandwidth", 11 | }, 12 | { 13 | "op": "replace", 14 | "path": "/PropertyTypes/AWS::GroundStation::Config.AntennaUplinkConfig/Properties/SpectrumConfig/Type", 15 | "value": "SpectrumConfig", 16 | }, 17 | { 18 | "op": "remove", 19 | "path": "/PropertyTypes/AWS::GroundStation::Config.UplinkSpectrumConfig", 20 | }, 21 | ] 22 | -------------------------------------------------------------------------------- /scripts/patches/guardduty.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "replace", 4 | "path": "/ResourceTypes/AWS::GuardDuty::Filter/Properties/Tags/ItemType", 5 | "value": "Tags", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /scripts/patches/iam.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # backward compatibility 3 | { 4 | "op": "move", 5 | "from": "/ResourceTypes/AWS::IAM::Policy", 6 | "path": "/ResourceTypes/AWS::IAM::PolicyType::Policy", 7 | }, 8 | ] 9 | -------------------------------------------------------------------------------- /scripts/patches/imagebuilder.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::ImageBuilder::ContainerRecipe.ComponentConfiguration to AWS::ImageBuilder::ContainerRecipe.ContainerComponentConfiguration 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::ImageBuilder::ContainerRecipe.ComponentConfiguration", 6 | "path": "/PropertyTypes/AWS::ImageBuilder::ContainerRecipe.ContainerComponentConfiguration", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::ImageBuilder::ContainerRecipe/Properties/Components/ItemType", 11 | "value": "ContainerComponentConfiguration", 12 | }, 13 | ] 14 | -------------------------------------------------------------------------------- /scripts/patches/iotanalytics.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::IoTAnalytics::Dataset.Filter", 5 | "path": "/PropertyTypes/AWS::IoTAnalytics::Dataset.QueryActionFilter", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/PropertyTypes/AWS::IoTAnalytics::Dataset.QueryAction/Properties/Filters/ItemType", 10 | "value": "QueryActionFilter", 11 | }, 12 | { 13 | "op": "move", 14 | "from": "/PropertyTypes/AWS::IoTAnalytics::Pipeline.Channel", 15 | "path": "/PropertyTypes/AWS::IoTAnalytics::Pipeline.ActivityChannel", 16 | }, 17 | { 18 | "op": "replace", 19 | "path": "/PropertyTypes/AWS::IoTAnalytics::Pipeline.Activity/Properties/Channel/Type", 20 | "value": "ActivityChannel", 21 | }, 22 | { 23 | "op": "move", 24 | "from": "/PropertyTypes/AWS::IoTAnalytics::Pipeline.Datastore", 25 | "path": "/PropertyTypes/AWS::IoTAnalytics::Pipeline.ActivityDatastore", 26 | }, 27 | { 28 | "op": "replace", 29 | "path": "/PropertyTypes/AWS::IoTAnalytics::Pipeline.Activity/Properties/Datastore/Type", 30 | "value": "ActivityDatastore", 31 | }, 32 | ] 33 | -------------------------------------------------------------------------------- /scripts/patches/iotsitewise.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::IoTSiteWise::AccessPolicy.Portal to AWS::IoTSiteWise::AccessPolicy.PortalProperty due to conflict with Portal resource name 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::IoTSiteWise::AccessPolicy.Portal", 6 | "path": "/PropertyTypes/AWS::IoTSiteWise::AccessPolicy.PortalProperty", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/PropertyTypes/AWS::IoTSiteWise::AccessPolicy.AccessPolicyResource/Properties/Portal/Type", 11 | "value": "PortalProperty", 12 | }, 13 | # Rename AWS::IoTSiteWise::AccessPolicy.Project to AWS::IoTSiteWise::AccessPolicy.ProjectProperty due to conflict with Project resource name 14 | { 15 | "op": "move", 16 | "from": "/PropertyTypes/AWS::IoTSiteWise::AccessPolicy.Project", 17 | "path": "/PropertyTypes/AWS::IoTSiteWise::AccessPolicy.ProjectProperty", 18 | }, 19 | { 20 | "op": "replace", 21 | "path": "/PropertyTypes/AWS::IoTSiteWise::AccessPolicy.AccessPolicyResource/Properties/Project/Type", 22 | "value": "ProjectProperty", 23 | }, 24 | ] 25 | -------------------------------------------------------------------------------- /scripts/patches/iotwireless.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::IoTWireless::FuotaTask.LoRaWAN", 5 | "path": "/PropertyTypes/AWS::IoTWireless::FuotaTask.FuotaTaskLoRaWAN", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::IoTWireless::FuotaTask/Properties/LoRaWAN/Type", 10 | "value": "FuotaTaskLoRaWAN", 11 | }, 12 | ] 13 | -------------------------------------------------------------------------------- /scripts/patches/lex.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Replace AWS::Lex::ResourcePolicy.Policy from Type "Policy" to Map 3 | # Note: a policy validator will replace this as well. 4 | { 5 | "op": "add", 6 | "path": "/ResourceTypes/AWS::Lex::Bot/Properties/BotTags/PrimitiveType", 7 | "value": "Tags", 8 | }, 9 | { 10 | "op": "add", 11 | "path": "/ResourceTypes/AWS::Lex::Bot/Properties/TestBotAliasTags/PrimitiveType", 12 | "value": "Tags", 13 | }, 14 | { 15 | "op": "add", 16 | "path": "/ResourceTypes/AWS::Lex::BotAlias/Properties/BotAliasTags/PrimitiveType", 17 | "value": "Tags", 18 | }, 19 | { 20 | "op": "add", 21 | "path": "/PropertyTypes/AWS::Lex::Bot.SlotValueOverride/Properties/Values/PrimitiveItemType", 22 | "value": "String", 23 | }, 24 | { 25 | "op": "remove", 26 | "path": "/PropertyTypes/AWS::Lex::Bot.SlotValueOverride/Properties/Values/ItemType", 27 | }, 28 | ] 29 | -------------------------------------------------------------------------------- /scripts/patches/lightsail.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::Lightsail::Instance.Disk to AWS::Lightsail::Instance.DiskProperty 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::Lightsail::Instance.Disk", 6 | "path": "/PropertyTypes/AWS::Lightsail::Instance.DiskProperty", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/PropertyTypes/AWS::Lightsail::Instance.Hardware/Properties/Disks/ItemType", 11 | "value": "DiskProperty", 12 | }, 13 | ] 14 | -------------------------------------------------------------------------------- /scripts/patches/macie.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "replace", 4 | "path": "/PropertyTypes/AWS::Macie::FindingsFilter.FindingCriteria/Properties/Criterion/Type", 5 | "value": "Map", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /scripts/patches/mediaconnect.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::MediaConnect::FlowOutput.Encryption", 5 | "path": "/PropertyTypes/AWS::MediaConnect::FlowOutput.FlowOutputEncryption", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::MediaConnect::FlowOutput/Properties/Encryption/Type", 10 | "value": "FlowOutputEncryption", 11 | }, 12 | ] 13 | -------------------------------------------------------------------------------- /scripts/patches/memorydb.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Add AWS::MemoryDB::Cluster Endpoint 3 | { 4 | "op": "add", 5 | "path": "/ResourceTypes/AWS::MemoryDB::Cluster/Properties/ClusterEndpoint", 6 | "value": {"Type": "Endpoint"}, 7 | }, 8 | ] 9 | -------------------------------------------------------------------------------- /scripts/patches/msk.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "remove", 4 | "path": "/PropertyTypes/AWS::MSK::ServerlessCluster.ClientAuthentication", 5 | }, 6 | { 7 | "op": "remove", 8 | "path": "/PropertyTypes/AWS::MSK::ServerlessCluster.Sasl", 9 | }, 10 | ] 11 | -------------------------------------------------------------------------------- /scripts/patches/networkfirewall.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::NetworkFirewall::RuleGroup.RuleGroup", 5 | "path": "/PropertyTypes/AWS::NetworkFirewall::RuleGroup.RuleGroupProperty", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::NetworkFirewall::RuleGroup/Properties/RuleGroup/Type", 10 | "value": "RuleGroupProperty", 11 | }, 12 | { 13 | "op": "move", 14 | "from": "/PropertyTypes/AWS::NetworkFirewall::FirewallPolicy.FirewallPolicy", 15 | "path": "/PropertyTypes/AWS::NetworkFirewall::FirewallPolicy.FirewallPolicyProperty", 16 | }, 17 | { 18 | "op": "replace", 19 | "path": "/ResourceTypes/AWS::NetworkFirewall::FirewallPolicy/Properties/FirewallPolicy/Type", 20 | "value": "FirewallPolicyProperty", 21 | }, 22 | { 23 | "op": "move", 24 | "from": "/PropertyTypes/AWS::NetworkFirewall::LoggingConfiguration.LoggingConfiguration", 25 | "path": "/PropertyTypes/AWS::NetworkFirewall::LoggingConfiguration.LoggingConfigurationProperty", 26 | }, 27 | { 28 | "op": "replace", 29 | "path": "/ResourceTypes/AWS::NetworkFirewall::LoggingConfiguration/Properties/LoggingConfiguration/Type", 30 | "value": "LoggingConfigurationProperty", 31 | }, 32 | ] 33 | -------------------------------------------------------------------------------- /scripts/patches/networkmanager.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "remove", 4 | "path": "/PropertyTypes/AWS::NetworkManager::CoreNetwork.CoreNetworkNetworkFunctionGroup", 5 | }, 6 | ] 7 | -------------------------------------------------------------------------------- /scripts/patches/opsworks.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # backward compatibility 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::OpsWorks::App.EnvironmentVariable", 6 | "path": "/PropertyTypes/AWS::OpsWorks::App.Environment", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::OpsWorks::App/Properties/Environment/ItemType", 11 | "value": "Environment", 12 | }, 13 | # backward compatibility 14 | { 15 | "op": "move", 16 | "from": "/PropertyTypes/AWS::OpsWorks::Layer.LifecycleEventConfiguration", 17 | "path": "/PropertyTypes/AWS::OpsWorks::Layer.LifeCycleConfiguration", 18 | }, 19 | { 20 | "op": "replace", 21 | "path": "/ResourceTypes/AWS::OpsWorks::Layer/Properties/LifecycleEventConfiguration/Type", 22 | "value": "LifeCycleConfiguration", 23 | }, 24 | ] 25 | -------------------------------------------------------------------------------- /scripts/patches/pinpoint.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # backward compatibility - prefer Campaign.Limits over ApplicationSettings.Limits 3 | { 4 | "op": "remove", 5 | "path": "/PropertyTypes/AWS::Pinpoint::ApplicationSettings.Limits", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /scripts/patches/qbusiness.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "replace", 4 | "path": "/PropertyTypes/AWS::QBusiness::DataAccessor.AttributeFilter/Properties/AndAllFilters/ItemType", 5 | "value": "object", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/PropertyTypes/AWS::QBusiness::DataAccessor.AttributeFilter/Properties/NotFilter/Type", 10 | "value": "object", 11 | }, 12 | { 13 | "op": "replace", 14 | "path": "/PropertyTypes/AWS::QBusiness::DataAccessor.AttributeFilter/Properties/OrAllFilters/ItemType", 15 | "value": "object", 16 | }, 17 | ] 18 | -------------------------------------------------------------------------------- /scripts/patches/quicksight.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Remove attribute property OutputColumn and Sheets 3 | { 4 | "op": "remove", 5 | "path": "/PropertyTypes/AWS::QuickSight::DataSet.OutputColumn", 6 | }, 7 | { 8 | "op": "remove", 9 | "path": "/PropertyTypes/AWS::QuickSight::Analysis.Sheet", 10 | }, 11 | { 12 | "op": "remove", 13 | "path": "/PropertyTypes/AWS::QuickSight::Dashboard.DashboardVersion", 14 | }, 15 | { 16 | "op": "remove", 17 | "path": "/PropertyTypes/AWS::QuickSight::Template.Visual", 18 | }, 19 | ] 20 | -------------------------------------------------------------------------------- /scripts/patches/rds.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # backward compatibility - these fake out the template generator for the 3 | # examples. Need to find a better long term fix. 4 | { 5 | "op": "add", 6 | "path": "/ResourceTypes/AWS::RDS::DBInstance/Properties/DBSecurityGroups/PrimitiveType", 7 | "value": "list", 8 | }, 9 | { 10 | "op": "add", 11 | "path": "/ResourceTypes/AWS::RDS::DBSubnetGroup/Properties/SubnetIds/PrimitiveType", 12 | "value": "list", 13 | }, 14 | # Spec 193.0.0 removed these properties, so add them back in 15 | { 16 | "op": "add", 17 | "path": "/ResourceTypes/AWS::RDS::DBInstance/Properties/CertificateDetails", 18 | "value": { 19 | "Type": "CertificateDetails", 20 | "Required": False, 21 | }, 22 | }, 23 | { 24 | "op": "add", 25 | "path": "/ResourceTypes/AWS::RDS::DBInstance/Properties/Endpoint", 26 | "value": { 27 | "Type": "Endpoint", 28 | "Required": False, 29 | }, 30 | }, 31 | { 32 | "op": "add", 33 | "path": "/ResourceTypes/AWS::RDS::GlobalCluster/Properties/GlobalEndpoint", 34 | "value": { 35 | "Type": "GlobalEndpoint", 36 | "Required": False, 37 | }, 38 | }, 39 | ] 40 | -------------------------------------------------------------------------------- /scripts/patches/redshift.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::Redshift::ClusterParameterGroup.Parameter", 5 | "path": "/PropertyTypes/AWS::Redshift::ClusterParameterGroup.AmazonRedshiftParameter", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::Redshift::ClusterParameterGroup/Properties/Parameters/ItemType", 10 | "value": "AmazonRedshiftParameter", 11 | }, 12 | ] 13 | -------------------------------------------------------------------------------- /scripts/patches/redshiftserverless.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "add", 4 | "path": "/ResourceTypes/AWS::RedshiftServerless::Workgroup/Properties/Workgroup", 5 | "value": {"Type": "Workgroup"}, 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /scripts/patches/s3outposts.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Remove attribute property NetworkInterface 3 | { 4 | "op": "remove", 5 | "path": "/PropertyTypes/AWS::S3Outposts::Endpoint.NetworkInterface", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /scripts/patches/ses.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Rename AWS::SES::Template.Template to AWS::SES::Template.EmailTemplate - backward compatibility 3 | { 4 | "op": "move", 5 | "from": "/PropertyTypes/AWS::SES::Template.Template", 6 | "path": "/PropertyTypes/AWS::SES::Template.EmailTemplate", 7 | }, 8 | { 9 | "op": "replace", 10 | "path": "/ResourceTypes/AWS::SES::Template/Properties/Template/Type", 11 | "value": "EmailTemplate", 12 | }, 13 | # Resolve conflict between AWS::SES::ReceiptRule.Rule and AWS::SES::MailManagerRuleSet.Rule 14 | { 15 | "op": "move", 16 | "from": "/PropertyTypes/AWS::SES::MailManagerRuleSet.Rule", 17 | "path": "/PropertyTypes/AWS::SES::MailManagerRuleSet.MailManagerRule", 18 | }, 19 | { 20 | "op": "replace", 21 | "path": "/ResourceTypes/AWS::SES::MailManagerRuleSet/Properties/Rules/ItemType", 22 | "value": "MailManagerRule", 23 | }, 24 | # Resolve conflict between AWS::SES::ReceiptRule.S3Actio and AWS::SES::MailManagerRuleSet.S3Action 25 | { 26 | "op": "move", 27 | "from": "/PropertyTypes/AWS::SES::MailManagerRuleSet.S3Action", 28 | "path": "/PropertyTypes/AWS::SES::MailManagerRuleSet.MailManagerS3Action", 29 | }, 30 | { 31 | "op": "replace", 32 | "path": "/PropertyTypes/AWS::SES::MailManagerRuleSet.RuleAction/Properties/WriteToS3/Type", 33 | "value": "MailManagerS3Action", 34 | }, 35 | ] 36 | -------------------------------------------------------------------------------- /scripts/patches/sns.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # backward compatibility 3 | { 4 | "op": "move", 5 | "from": "/ResourceTypes/AWS::SNS::Subscription", 6 | "path": "/ResourceTypes/AWS::SNS::SubscriptionResource::Subscription", 7 | }, 8 | ] 9 | -------------------------------------------------------------------------------- /scripts/patches/sqs.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Add RedrivePolicy for backward compatibility 3 | { 4 | "op": "add", 5 | "path": "/PropertyTypes/AWS::SQS::Queue.RedrivePolicy", 6 | "value": { 7 | "Properties": { 8 | "deadLetterTargetArn": {"PrimitiveType": "String", "Required": False}, 9 | "maxReceiveCount": {"PrimitiveType": "Integer", "Required": False}, 10 | }, 11 | }, 12 | }, 13 | { 14 | "op": "remove", 15 | "path": "/ResourceTypes/AWS::SQS::Queue/Properties/RedrivePolicy/PrimitiveType", 16 | }, 17 | { 18 | "op": "add", 19 | "path": "/ResourceTypes/AWS::SQS::Queue/Properties/RedrivePolicy/Type", 20 | "value": "RedrivePolicy", 21 | }, 22 | ] 23 | -------------------------------------------------------------------------------- /scripts/patches/ssm.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "add", 4 | "path": "/PropertyTypes/AWS::SSM::PatchBaseline.Rule/Properties/ApproveUntilDate/PrimitiveType", 5 | "value": "String", 6 | }, 7 | # backward compatibility 8 | { 9 | "op": "move", 10 | "from": "/PropertyTypes/AWS::SSM::Association.Target", 11 | "path": "/PropertyTypes/AWS::SSM::Association.Targets", 12 | }, 13 | # backward compatibility 14 | { 15 | "op": "replace", 16 | "path": "/ResourceTypes/AWS::SSM::Association/Properties/Targets/ItemType", 17 | "value": "Targets", 18 | }, 19 | # backward compatibility 20 | { 21 | "op": "replace", 22 | "path": "/ResourceTypes/AWS::SSM::MaintenanceWindowTarget/Properties/Targets/ItemType", 23 | "value": "Targets", 24 | }, 25 | # backward compatibility 26 | { 27 | "op": "replace", 28 | "path": "/ResourceTypes/AWS::SSM::MaintenanceWindowTask/Properties/Targets/ItemType", 29 | "value": "Targets", 30 | }, 31 | # backward compatibility 32 | { 33 | "op": "remove", 34 | "path": "/PropertyTypes/AWS::SSM::MaintenanceWindowTask.Target", 35 | }, 36 | ] 37 | -------------------------------------------------------------------------------- /scripts/patches/stepfunctions.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "add", 4 | "path": "/ResourceTypes/AWS::StepFunctions::StateMachine/Properties/Definition/PrimitiveType", 5 | "value": "Json", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::StepFunctions::Activity/Properties/Tags/ItemType", 10 | "value": "Tag", 11 | }, 12 | { 13 | "op": "remove", 14 | "path": "/PropertyTypes/AWS::StepFunctions::Activity.TagsEntry", 15 | }, 16 | { 17 | "op": "replace", 18 | "path": "/ResourceTypes/AWS::StepFunctions::StateMachine/Properties/Tags/ItemType", 19 | "value": "Tag", 20 | }, 21 | { 22 | "op": "remove", 23 | "path": "/PropertyTypes/AWS::StepFunctions::StateMachine.TagsEntry", 24 | }, 25 | ] 26 | -------------------------------------------------------------------------------- /scripts/patches/transfer.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | { 3 | "op": "move", 4 | "from": "/PropertyTypes/AWS::Transfer::WebApp.IdentityProviderDetails", 5 | "path": "/PropertyTypes/AWS::Transfer::WebApp.WebAppIdentityProviderDetails", 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/ResourceTypes/AWS::Transfer::WebApp/Properties/IdentityProviderDetails/Type", 10 | "value": "WebAppIdentityProviderDetails", 11 | }, 12 | ] 13 | -------------------------------------------------------------------------------- /scripts/patches/workspaces.py: -------------------------------------------------------------------------------- 1 | patches = [ 2 | # Remove attribute property ConnectionAliasAssociation 3 | { 4 | "op": "remove", 5 | "path": "/PropertyTypes/AWS::WorkSpaces::ConnectionAlias.ConnectionAliasAssociation", 6 | }, 7 | ] 8 | -------------------------------------------------------------------------------- /scripts/regen: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | while getopts "v" option; do 6 | case "${option}" in 7 | v) 8 | verbose=-v 9 | ;; 10 | esac 11 | done 12 | 13 | grep -l PropsDictType troposphere/*.py | \ 14 | cut -d/ -f2 | \ 15 | cut -d. -f1 | \ 16 | sed -e '/^__init__/d' -e '/^constants/d' -e '/^policies/d' | \ 17 | sed -e '/^serverless/d' -e '/^template_generator/d' -e '/^utils/d' | \ 18 | sed 's/^analytics$/kinesisanalytics/' | \ 19 | sed 's/awslambda/lambda/' | \ 20 | sed 's/^firehose$/kinesisfirehose/' | \ 21 | sort > autogen 22 | python3 scripts/gen.py --filelist autogen -d troposphere ${verbose} 23 | make fix 24 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudtools/troposphere/923567a7aa8803ddd17310c01339dd5cc8538fd2/tests/__init__.py -------------------------------------------------------------------------------- /tests/examples_output/ApplicationAutoScalingSample.template: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "scalableTarget": { 4 | "Properties": { 5 | "MaxCapacity": 2, 6 | "MinCapacity": 1, 7 | "ResourceId": "service/ecsStack", 8 | "RoleARN": "Access Management (IAM) role", 9 | "ScalableDimension": "ecs:service:DesiredCount", 10 | "ServiceNamespace": "ecs" 11 | }, 12 | "Type": "AWS::ApplicationAutoScaling::ScalableTarget" 13 | }, 14 | "scalingPolicy": { 15 | "Properties": { 16 | "PolicyName": "AStepPolicy", 17 | "PolicyType": "StepScaling", 18 | "ScalingTargetId": { 19 | "Ref": "scalableTarget" 20 | }, 21 | "StepScalingPolicyConfiguration": { 22 | "AdjustmentType": "PercentChangeInCapacity", 23 | "Cooldown": 60, 24 | "MetricAggregationType": "Average", 25 | "StepAdjustments": [ 26 | { 27 | "MetricIntervalLowerBound": 0, 28 | "ScalingAdjustment": 200 29 | } 30 | ] 31 | } 32 | }, 33 | "Type": "AWS::ApplicationAutoScaling::ScalingPolicy" 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /tests/examples_output/CertificateManagerSample.template: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "mycert": { 4 | "Properties": { 5 | "DomainName": "example.com", 6 | "DomainValidationOptions": [ 7 | { 8 | "DomainName": "example.com", 9 | "ValidationDomain": "example.com" 10 | } 11 | ], 12 | "Tags": [ 13 | { 14 | "Key": "tag-key", 15 | "Value": "tag-value" 16 | } 17 | ] 18 | }, 19 | "Type": "AWS::CertificateManager::Certificate" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /tests/examples_output/ClassExtensions.template: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "jones1": { 4 | "Properties": { 5 | "ImageId": "ami-xxxx", 6 | "InstanceType": "t1.micro", 7 | "Monitoring": true, 8 | "SecurityGroups": [ 9 | "frontend" 10 | ] 11 | }, 12 | "Type": "AWS::EC2::Instance" 13 | }, 14 | "williams1": { 15 | "Properties": { 16 | "ImageId": "ami-xxxx", 17 | "InstanceType": "m2.large", 18 | "Monitoring": true, 19 | "SecurityGroups": [ 20 | "processing" 21 | ] 22 | }, 23 | "Type": "AWS::EC2::Instance" 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /tests/examples_output/CloudFront_Distribution_S3.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template CloudFront_S3: Sample template showing how to create an Amazon CloudFront distribution using an S3 origin. **WARNING** This template creates a CloudFront distribution. You will be billed for the AWS resources used if you create a stack from this template.", 3 | "Outputs": { 4 | "DistributionId": { 5 | "Value": { 6 | "Ref": "myDistribution" 7 | } 8 | }, 9 | "DistributionName": { 10 | "Value": { 11 | "Fn::Join": [ 12 | "", 13 | [ 14 | "http://", 15 | { 16 | "Fn::GetAtt": [ 17 | "myDistribution", 18 | "DomainName" 19 | ] 20 | } 21 | ] 22 | ] 23 | } 24 | } 25 | }, 26 | "Parameters": { 27 | "S3DNSName": { 28 | "Description": "The DNS name of an existing S3 bucket to use as the Cloudfront distribution origin", 29 | "Type": "String" 30 | } 31 | }, 32 | "Resources": { 33 | "myDistribution": { 34 | "Properties": { 35 | "DistributionConfig": { 36 | "DefaultCacheBehavior": { 37 | "ForwardedValues": { 38 | "QueryString": false 39 | }, 40 | "TargetOriginId": "Origin 1", 41 | "ViewerProtocolPolicy": "allow-all" 42 | }, 43 | "Enabled": true, 44 | "HttpVersion": "http2", 45 | "Origins": [ 46 | { 47 | "DomainName": { 48 | "Ref": "S3DNSName" 49 | }, 50 | "Id": "Origin 1", 51 | "S3OriginConfig": {} 52 | } 53 | ] 54 | } 55 | }, 56 | "Type": "AWS::CloudFront::Distribution" 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /tests/examples_output/CloudFront_StreamingDistribution_S3.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template CloudFront_S3: Sample template showing how to create an Amazon CloudFront Streaming distribution using an S3 origin. **WARNING** This template creates a CloudFront distribution. You will be billed for the AWS resources used if you create a stack from this template.", 3 | "Outputs": { 4 | "DistributionId": { 5 | "Value": { 6 | "Ref": "myDistribution" 7 | } 8 | }, 9 | "DistributionName": { 10 | "Value": { 11 | "Fn::Join": [ 12 | "", 13 | [ 14 | "http://", 15 | { 16 | "Fn::GetAtt": [ 17 | "myDistribution", 18 | "DomainName" 19 | ] 20 | } 21 | ] 22 | ] 23 | } 24 | } 25 | }, 26 | "Parameters": { 27 | "S3DNSName": { 28 | "Description": "The DNS name of an existing S3 bucket to use as the Cloudfront distribution origin", 29 | "Type": "String" 30 | } 31 | }, 32 | "Resources": { 33 | "myDistribution": { 34 | "Properties": { 35 | "StreamingDistributionConfig": { 36 | "Comment": "Streaming distribution", 37 | "Enabled": true, 38 | "S3Origin": { 39 | "DomainName": { 40 | "Ref": "S3DNSName" 41 | } 42 | }, 43 | "TrustedSigners": { 44 | "Enabled": false 45 | } 46 | } 47 | }, 48 | "Type": "AWS::CloudFront::StreamingDistribution" 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /tests/examples_output/CodeBuild.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Resources": { 4 | "DemoProject": { 5 | "Properties": { 6 | "Artifacts": { 7 | "Type": "NO_ARTIFACTS" 8 | }, 9 | "Environment": { 10 | "ComputeType": "BUILD_GENERAL1_SMALL", 11 | "EnvironmentVariables": [ 12 | { 13 | "Name": "APP_NAME", 14 | "Value": "demo" 15 | } 16 | ], 17 | "Image": "aws/codebuild/java:openjdk-8", 18 | "Type": "LINUX_CONTAINER" 19 | }, 20 | "Name": "DemoProject", 21 | "ServiceRole": "arn:aws:iam::0123456789:role/codebuild-role", 22 | "Source": { 23 | "Location": "codebuild-demo-test/0123ab9a371ebf0187b0fe5614fbb72c", 24 | "Type": "S3" 25 | } 26 | }, 27 | "Type": "AWS::CodeBuild::Project" 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /tests/examples_output/CustomResource.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "Example template showing how a Lambda Function CustomResource might lookFor information on AWS Lambda-backed Custom Resources see:http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-custom-resources-lambda.html", 3 | "Resources": { 4 | "ClusterGroup": { 5 | "Properties": { 6 | "PlacementGroupName": "ExampleClusterGroup", 7 | "ServiceToken": { 8 | "Fn::Join": [ 9 | "", 10 | [ 11 | "arn:aws:lambda:", 12 | { 13 | "Ref": "AWS::Region" 14 | }, 15 | ":", 16 | { 17 | "Ref": "AWS::AccountId" 18 | }, 19 | ":function:cfnPlacementGroup" 20 | ] 21 | ] 22 | } 23 | }, 24 | "Type": "Custom::PlacementGroup" 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /tests/examples_output/Dlm.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Resources": { 4 | "DlmRole": { 5 | "Properties": { 6 | "AssumeRolePolicyDocument": { 7 | "Statement": [ 8 | { 9 | "Action": [ 10 | "sts:AssumeRole" 11 | ], 12 | "Effect": "Allow", 13 | "Principal": { 14 | "Service": [ 15 | "ec2.amazonaws.com" 16 | ] 17 | } 18 | } 19 | ] 20 | } 21 | }, 22 | "Type": "AWS::IAM::Role" 23 | }, 24 | "LifecyclePolicy": { 25 | "Properties": { 26 | "Description": "Daily backup", 27 | "ExecutionRoleArn": { 28 | "Fn::GetAtt": [ 29 | "DlmRole", 30 | "Arn" 31 | ] 32 | }, 33 | "PolicyDetails": { 34 | "ResourceTypes": [ 35 | "VOLUME" 36 | ], 37 | "Schedules": [ 38 | { 39 | "CopyTags": true, 40 | "CreateRule": { 41 | "Interval": 12, 42 | "IntervalUnit": "HOURS", 43 | "Times": [ 44 | "13:00" 45 | ] 46 | }, 47 | "Name": "Daily Snapshots", 48 | "RetainRule": { 49 | "Count": 1 50 | }, 51 | "TagsToAdd": [ 52 | { 53 | "Key": "type", 54 | "Value": "DailySnapshot" 55 | } 56 | ] 57 | } 58 | ], 59 | "TargetTags": [ 60 | { 61 | "Key": "Backup", 62 | "Value": "True" 63 | } 64 | ] 65 | }, 66 | "State": "ENABLED" 67 | }, 68 | "Type": "AWS::DLM::LifecyclePolicy" 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /tests/examples_output/ECRSample.template: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "MyRepository": { 4 | "Properties": { 5 | "RepositoryName": "test-repository", 6 | "RepositoryPolicyText": { 7 | "Statement": [ 8 | { 9 | "Action": [ 10 | "ecr:GetDownloadUrlForLayer", 11 | "ecr:BatchGetImage", 12 | "ecr:BatchCheckLayerAvailability", 13 | "ecr:PutImage", 14 | "ecr:InitiateLayerUpload", 15 | "ecr:UploadLayerPart", 16 | "ecr:CompleteLayerUpload" 17 | ], 18 | "Effect": "Allow", 19 | "Principal": { 20 | "AWS": [ 21 | "arn:aws:iam::123456789012:user/Bob", 22 | "arn:aws:iam::123456789012:user/Alice" 23 | ] 24 | }, 25 | "Sid": "AllowPushPull" 26 | } 27 | ], 28 | "Version": "2008-10-17" 29 | } 30 | }, 31 | "Type": "AWS::ECR::Repository" 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /tests/examples_output/ECSFargate.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Parameters": { 4 | "Subnet": { 5 | "Description": "A VPC subnet ID for the container.", 6 | "Type": "AWS::EC2::Subnet::Id" 7 | } 8 | }, 9 | "Resources": { 10 | "Cluster": { 11 | "Type": "AWS::ECS::Cluster" 12 | }, 13 | "NginxService": { 14 | "Properties": { 15 | "Cluster": { 16 | "Ref": "Cluster" 17 | }, 18 | "DesiredCount": 1, 19 | "LaunchType": "FARGATE", 20 | "NetworkConfiguration": { 21 | "AwsvpcConfiguration": { 22 | "Subnets": [ 23 | { 24 | "Ref": "Subnet" 25 | } 26 | ] 27 | } 28 | }, 29 | "TaskDefinition": { 30 | "Ref": "TaskDefinition" 31 | } 32 | }, 33 | "Type": "AWS::ECS::Service" 34 | }, 35 | "TaskDefinition": { 36 | "Properties": { 37 | "ContainerDefinitions": [ 38 | { 39 | "Essential": true, 40 | "Image": "nginx", 41 | "Name": "nginx", 42 | "PortMappings": [ 43 | { 44 | "ContainerPort": 80 45 | } 46 | ] 47 | } 48 | ], 49 | "Cpu": "256", 50 | "Memory": "512", 51 | "NetworkMode": "awsvpc", 52 | "RequiresCompatibilities": [ 53 | "FARGATE" 54 | ] 55 | }, 56 | "Type": "AWS::ECS::TaskDefinition" 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /tests/examples_output/ElasticsearchDomain.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "Elasticsearch Domain example", 3 | "Resources": { 4 | "ElasticsearchDomain": { 5 | "Properties": { 6 | "AccessPolicies": { 7 | "Statement": [ 8 | { 9 | "Action": "es:*", 10 | "Effect": "Allow", 11 | "Principal": { 12 | "AWS": "*" 13 | }, 14 | "Resource": "*" 15 | } 16 | ], 17 | "Version": "2012-10-17" 18 | }, 19 | "AdvancedOptions": { 20 | "rest.action.multi.allow_explicit_index": true 21 | }, 22 | "DomainName": "ExampleElasticsearchDomain", 23 | "EBSOptions": { 24 | "EBSEnabled": true, 25 | "Iops": 0, 26 | "VolumeSize": 20, 27 | "VolumeType": "gp2" 28 | }, 29 | "ElasticsearchClusterConfig": { 30 | "DedicatedMasterCount": 3, 31 | "DedicatedMasterEnabled": true, 32 | "DedicatedMasterType": "m3.medium.elasticsearch", 33 | "InstanceCount": 2, 34 | "InstanceType": "m3.medium.elasticsearch", 35 | "ZoneAwarenessEnabled": true 36 | }, 37 | "SnapshotOptions": { 38 | "AutomatedSnapshotStartHour": 0 39 | }, 40 | "VPCOptions": { 41 | "SecurityGroupIds": [ 42 | "sg-04cf048c" 43 | ], 44 | "SubnetIds": [ 45 | "subnet-4f2bb123" 46 | ] 47 | } 48 | }, 49 | "Type": "AWS::Elasticsearch::Domain" 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /tests/examples_output/IAM_Roles_and_InstanceProfiles.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template: This template demonstrates the creation of IAM Roles and InstanceProfiles.", 3 | "Resources": { 4 | "CFNInstanceProfile": { 5 | "Properties": { 6 | "Roles": [ 7 | { 8 | "Ref": "CFNRole" 9 | } 10 | ] 11 | }, 12 | "Type": "AWS::IAM::InstanceProfile" 13 | }, 14 | "CFNRole": { 15 | "Properties": { 16 | "AssumeRolePolicyDocument": { 17 | "Statement": [ 18 | { 19 | "Action": [ 20 | "sts:AssumeRole" 21 | ], 22 | "Effect": "Allow", 23 | "Principal": { 24 | "Service": [ 25 | "ec2.amazonaws.com" 26 | ] 27 | } 28 | } 29 | ] 30 | } 31 | }, 32 | "Type": "AWS::IAM::Role" 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /tests/examples_output/Kinesis_Stream.template: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "StreamName": { 4 | "Description": "Stream Name (Physical ID)", 5 | "Value": { 6 | "Ref": "TestStream" 7 | } 8 | } 9 | }, 10 | "Resources": { 11 | "TestStream": { 12 | "Properties": { 13 | "ShardCount": 1 14 | }, 15 | "Type": "AWS::Kinesis::Stream" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /tests/examples_output/Mediapackage.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Resources": { 4 | "MediaPackage": { 5 | "Properties": { 6 | "Id": "MediaPackageChannel" 7 | }, 8 | "Type": "AWS::MediaPackage::Channel" 9 | }, 10 | "MediaPackageOriginEndpoint": { 11 | "Properties": { 12 | "ChannelId": "MediaPackageChannel", 13 | "Description": "MediaPackage HLS endpoint", 14 | "HlsPackage": { 15 | "IncludeIframeOnlyStream": false, 16 | "PlaylistType": "NONE", 17 | "PlaylistWindowSeconds": 60, 18 | "ProgramDateTimeIntervalSeconds": 0, 19 | "SegmentDurationSeconds": 6, 20 | "UseAudioRenditionGroup": false 21 | }, 22 | "Id": "MediaPackageOriginEndpoint", 23 | "ManifestName": "MediaPackageOriginEndpoint", 24 | "Origination": "ALLOW" 25 | }, 26 | "Type": "AWS::MediaPackage::OriginEndpoint" 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /tests/examples_output/Metadata.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "Example to show adding a Metadata section to the template", 3 | "Metadata": { 4 | "Comments": "Initial Draft", 5 | "LastUpdated": "Jan 1st 2015", 6 | "UpdatedBy": "First Last", 7 | "Version": "V1.0" 8 | }, 9 | "Resources": {} 10 | } 11 | -------------------------------------------------------------------------------- /tests/examples_output/MskCluster.template: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "TestCluster": { 4 | "Properties": { 5 | "BrokerNodeGroupInfo": { 6 | "BrokerAZDistribution": "DEFAULT", 7 | "ClientSubnets": [ 8 | "subnet-ce49ff7bcd", 9 | "subnet-2541a68474", 10 | "subnet-1d6b6f39da" 11 | ], 12 | "InstanceType": "kafka.m5.large", 13 | "SecurityGroups": [ 14 | "sg-c73ebda3" 15 | ], 16 | "StorageInfo": { 17 | "EBSStorageInfo": { 18 | "VolumeSize": 100 19 | } 20 | } 21 | }, 22 | "ClientAuthentication": { 23 | "Tls": { 24 | "CertificateAuthorityArnList": [ 25 | "ReplaceWithCAArn" 26 | ] 27 | } 28 | }, 29 | "ClusterName": "MyMskCluster", 30 | "ConfigurationInfo": { 31 | "Arn": "arn:aws:kafka:us-east-1:123456789012:configuration/example-configuration-name/abcdabcd-1234-abcd-1234-abcd123e8e8e-1", 32 | "Revision": 1 33 | }, 34 | "EncryptionInfo": { 35 | "EncryptionAtRest": { 36 | "DataVolumeKMSKeyId": "ReplaceWithKmsKeyArn" 37 | }, 38 | "EncryptionInTransit": { 39 | "ClientBroker": "TLS", 40 | "InCluster": true 41 | } 42 | }, 43 | "EnhancedMonitoring": "PER_BROKER", 44 | "KafkaVersion": "2.1.0", 45 | "NumberOfBrokerNodes": 3, 46 | "Tags": { 47 | "MyTagName": "MyTagValue" 48 | } 49 | }, 50 | "Type": "AWS::MSK::Cluster" 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /tests/examples_output/RDS_Snapshot_On_Delete.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "AWS CloudFormation Sample Template RDS_Snapshot_On_Delete: Sample template showing how to create an RDS DBInstance that is snapshotted on stack deletion. **WARNING** This template creates an Amazon RDS database instance. When the stack is deleted a database snpshot will be left in your account. You will be billed for the AWS resources used if you create a stack from this template.", 4 | "Outputs": { 5 | "JDBCConnectionString": { 6 | "Description": "JDBC connection string for the database", 7 | "Value": { 8 | "Fn::Join": [ 9 | "", 10 | [ 11 | "jdbc:mysql://", 12 | { 13 | "Fn::GetAtt": [ 14 | "MyDB", 15 | "Endpoint.Address" 16 | ] 17 | }, 18 | ":", 19 | { 20 | "Fn::GetAtt": [ 21 | "MyDB", 22 | "Endpoint.Port" 23 | ] 24 | }, 25 | "/MyDatabase" 26 | ] 27 | ] 28 | } 29 | } 30 | }, 31 | "Resources": { 32 | "MyDB": { 33 | "Properties": { 34 | "AllocatedStorage": "5", 35 | "DBInstanceClass": "db.m1.small", 36 | "DBName": "MyDatabase", 37 | "Engine": "MySQL", 38 | "MasterUserPassword": "myPassword", 39 | "MasterUsername": "myName" 40 | }, 41 | "Type": "AWS::RDS::DBInstance" 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /tests/examples_output/RedshiftServerless.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "RedshiftServerless: Template and module example", 3 | "Resources": { 4 | "RedshiftServerlessNamespace": { 5 | "Properties": { 6 | "AdminUserPassword": "{{{{resolve:ssm:/redshift_admin_password}}}}", 7 | "AdminUsername": "{{{{resolve:ssm:/redshift_admin_username}}}}", 8 | "DbName": "dev", 9 | "DefaultIamRoleArn": "arn:aws:iam::123456789123:role/service-role/AmazonRedshift-CommandsAccessRole-123451234512345", 10 | "IamRoles": [], 11 | "NamespaceName": "serverless" 12 | }, 13 | "Type": "AWS::RedshiftServerless::Namespace" 14 | }, 15 | "RedshiftServerlessWorkgroup": { 16 | "Properties": { 17 | "ConfigParameters": [ 18 | { 19 | "ParameterKey": "enable_user_activity_logging", 20 | "ParameterValue": "true" 21 | } 22 | ], 23 | "EnhancedVpcRouting": true, 24 | "NamespaceName": "serverless", 25 | "PubliclyAccessible": false, 26 | "SecurityGroupIds": [ 27 | "sg-12345123451234567" 28 | ], 29 | "SubnetIds": [ 30 | "subnet-12345678912345678", 31 | "subnet-98765432198765432" 32 | ], 33 | "WorkgroupName": "RedshiftServerlessWorkgroup" 34 | }, 35 | "Type": "AWS::RedshiftServerless::Workgroup" 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /tests/examples_output/Route53_CNAME.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template Route53_CNAME: Sample template showing how to create an Amazon Route 53 CNAME record. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", 3 | "Outputs": { 4 | "DomainName": { 5 | "Value": { 6 | "Ref": "myDNSRecord" 7 | } 8 | } 9 | }, 10 | "Parameters": { 11 | "HostedZone": { 12 | "Description": "The DNS name of an existing Amazon Route 53 hosted zone", 13 | "Type": "String" 14 | } 15 | }, 16 | "Resources": { 17 | "myDNSRecord": { 18 | "Properties": { 19 | "Comment": "CNAME redirect to aws.amazon.com.", 20 | "HostedZoneName": { 21 | "Fn::Join": [ 22 | "", 23 | [ 24 | { 25 | "Ref": "HostedZone" 26 | }, 27 | "." 28 | ] 29 | ] 30 | }, 31 | "Name": { 32 | "Fn::Join": [ 33 | "", 34 | [ 35 | { 36 | "Ref": "AWS::StackName" 37 | }, 38 | ".", 39 | { 40 | "Ref": "AWS::Region" 41 | }, 42 | ".", 43 | { 44 | "Ref": "HostedZone" 45 | }, 46 | "." 47 | ] 48 | ] 49 | }, 50 | "ResourceRecords": [ 51 | "aws.amazon.com" 52 | ], 53 | "TTL": "900", 54 | "Type": "CNAME" 55 | }, 56 | "Type": "AWS::Route53::RecordSet" 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /tests/examples_output/S3_Bucket.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template S3_Bucket: Sample template showing how to create a publicly accessible S3 bucket. **WARNING** This template creates an Amazon S3 Bucket. You will be billed for the AWS resources used if you create a stack from this template.", 3 | "Outputs": { 4 | "BucketName": { 5 | "Description": "Name of S3 bucket to hold website content", 6 | "Value": { 7 | "Ref": "S3Bucket" 8 | } 9 | } 10 | }, 11 | "Resources": { 12 | "S3Bucket": { 13 | "Properties": { 14 | "AccessControl": "PublicRead" 15 | }, 16 | "Type": "AWS::S3::Bucket" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /tests/examples_output/S3_Bucket_With_AccelerateConfiguration.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template S3_Bucket: Sample template showing :How to create a publicly accessible S3 bucket. How to enable S3 Transfer Acceleration. **WARNING** This template creates an Amazon S3 Bucket. You will be billed for the AWS resources used if you create a stack from this template.", 3 | "Outputs": { 4 | "BucketName": { 5 | "Description": "Name of S3 bucket with s3 transfer acceleration enabled", 6 | "Value": { 7 | "Ref": "S3Bucket" 8 | } 9 | } 10 | }, 11 | "Resources": { 12 | "S3Bucket": { 13 | "Properties": { 14 | "AccelerateConfiguration": { 15 | "AccelerationStatus": "Enabled" 16 | }, 17 | "AccessControl": "PublicRead" 18 | }, 19 | "Type": "AWS::S3::Bucket" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /tests/examples_output/S3_Website_Bucket_With_Retain_On_Delete.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template S3_Website_Bucket_With_Retain_On_Delete: Sample template showing how to create a publicly accessible S3 bucket configured for website access with a deletion policy of retail on delete. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", 3 | "Outputs": { 4 | "S3BucketSecureURL": { 5 | "Description": "Name of S3 bucket to hold website content", 6 | "Value": { 7 | "Fn::Join": [ 8 | "", 9 | [ 10 | "http://", 11 | { 12 | "Fn::GetAtt": [ 13 | "S3Bucket", 14 | "DomainName" 15 | ] 16 | } 17 | ] 18 | ] 19 | } 20 | }, 21 | "WebsiteURL": { 22 | "Description": "URL for website hosted on S3", 23 | "Value": { 24 | "Fn::GetAtt": [ 25 | "S3Bucket", 26 | "WebsiteURL" 27 | ] 28 | } 29 | } 30 | }, 31 | "Resources": { 32 | "S3Bucket": { 33 | "Properties": { 34 | "AccessControl": "PublicRead", 35 | "WebsiteConfiguration": { 36 | "ErrorDocument": "error.html", 37 | "IndexDocument": "index.html" 38 | } 39 | }, 40 | "Type": "AWS::S3::Bucket" 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /tests/examples_output/SQSDLQ.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template SQS: Sample template showing how to create an SQS queue with a dead letter queue. **WARNING** This template creates Amazon SQS Queues. You will be billed for the AWS resources used if you create a stack from this template.", 3 | "Outputs": { 4 | "DeadLetterQueueARN": { 5 | "Description": "ARN of the dead letter queue", 6 | "Value": { 7 | "Fn::GetAtt": [ 8 | "MyDeadLetterQueue", 9 | "Arn" 10 | ] 11 | } 12 | }, 13 | "DeadLetterQueueURL": { 14 | "Description": "URL of the dead letter queue", 15 | "Value": { 16 | "Ref": "MyDeadLetterQueue" 17 | } 18 | }, 19 | "SourceQueueARN": { 20 | "Description": "ARN of the source queue", 21 | "Value": { 22 | "Fn::GetAtt": [ 23 | "MySourceQueue", 24 | "Arn" 25 | ] 26 | } 27 | }, 28 | "SourceQueueURL": { 29 | "Description": "URL of the source queue", 30 | "Value": { 31 | "Ref": "MySourceQueue" 32 | } 33 | } 34 | }, 35 | "Resources": { 36 | "MyDeadLetterQueue": { 37 | "Type": "AWS::SQS::Queue" 38 | }, 39 | "MySourceQueue": { 40 | "Properties": { 41 | "RedrivePolicy": { 42 | "deadLetterTargetArn": { 43 | "Fn::GetAtt": [ 44 | "MyDeadLetterQueue", 45 | "Arn" 46 | ] 47 | }, 48 | "maxReceiveCount": "5" 49 | } 50 | }, 51 | "Type": "AWS::SQS::Queue" 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /tests/examples_output/SQSEncrypt.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "AWS CloudFormation Sample Template SQS: Sample template showing how to create an SQS queue with Server Side Encryption. **WARNING** This template creates Amazon SQS Queues. You will be billed for the AWS resources used if you create a stack from this template.", 3 | "Outputs": { 4 | "SourceQueueARN": { 5 | "Description": "ARN of the source queue", 6 | "Value": { 7 | "Fn::GetAtt": [ 8 | "MySourceQueue", 9 | "Arn" 10 | ] 11 | } 12 | }, 13 | "SourceQueueURL": { 14 | "Description": "URL of the source queue", 15 | "Value": { 16 | "Ref": "MySourceQueue" 17 | } 18 | } 19 | }, 20 | "Resources": { 21 | "MySourceQueue": { 22 | "Properties": { 23 | "KmsDataKeyReusePeriodSeconds": 60, 24 | "KmsMasterKeyId": "testing" 25 | }, 26 | "Type": "AWS::SQS::Queue" 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /tests/examples_output/Secretsmanager.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Resources": { 4 | "MySecret": { 5 | "Properties": { 6 | "Description": "This is an autogenerated secret", 7 | "GenerateSecretString": { 8 | "GenerateStringKey": "password", 9 | "PasswordLength": 30, 10 | "SecretStringTemplate": "{\"username\":\"test-user\"}" 11 | }, 12 | "Name": "MySecret", 13 | "Tags": [ 14 | { 15 | "Key": "Appname", 16 | "Value": "AppA" 17 | } 18 | ] 19 | }, 20 | "Type": "AWS::SecretsManager::Secret" 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /tests/examples_output/Secretsmanager_Rds.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Resources": { 4 | "DbSecret": { 5 | "Properties": { 6 | "Description": "This is the RDS instance master password", 7 | "GenerateSecretString": { 8 | "GenerateStringKey": "password", 9 | "PasswordLength": 30, 10 | "SecretStringTemplate": "{\"username\":\"admin\"}" 11 | }, 12 | "Name": "DbSecret" 13 | }, 14 | "Type": "AWS::SecretsManager::Secret" 15 | }, 16 | "Instance": { 17 | "Properties": { 18 | "AllocatedStorage": "20", 19 | "DBInstanceClass": "db.t2.micro", 20 | "DBInstanceIdentifier": "TestInstance", 21 | "Engine": "mysql", 22 | "MasterUserPassword": { 23 | "Fn::Join": [ 24 | "", 25 | [ 26 | "{{resolve:secretsmanager:", 27 | { 28 | "Ref": "DbSecret" 29 | }, 30 | ":SecretString:password}}" 31 | ] 32 | ] 33 | }, 34 | "MasterUsername": { 35 | "Fn::Join": [ 36 | "", 37 | [ 38 | "{{resolve:secretsmanager:", 39 | { 40 | "Ref": "DbSecret" 41 | }, 42 | ":SecretString:username}}" 43 | ] 44 | ] 45 | } 46 | }, 47 | "Type": "AWS::RDS::DBInstance" 48 | }, 49 | "SecretRDSInstanceAttachment": { 50 | "Properties": { 51 | "SecretId": { 52 | "Ref": "DbSecret" 53 | }, 54 | "TargetId": { 55 | "Ref": "Instance" 56 | }, 57 | "TargetType": "AWS::RDS::DBInstance" 58 | }, 59 | "Type": "AWS::SecretsManager::SecretTargetAttachment" 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /tests/examples_output/Serverless_Deployment_Preference.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "A function that uses the configured traffic shifting type for a canary deployment.", 3 | "Resources": { 4 | "Function": { 5 | "Properties": { 6 | "AutoPublishAlias": "live", 7 | "CodeUri": "s3:///function.zip", 8 | "DeploymentPreference": { 9 | "Enabled": true, 10 | "Type": "Canary10Percent5Minutes" 11 | }, 12 | "Handler": "index.handler", 13 | "Runtime": "nodejs6.10" 14 | }, 15 | "Type": "AWS::Serverless::Function" 16 | } 17 | }, 18 | "Transform": "AWS::Serverless-2016-10-31" 19 | } 20 | -------------------------------------------------------------------------------- /tests/examples_output/Serverless_S3_Processor.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "A function is triggered off an upload to a bucket. It logs the content type of the uploaded object.", 3 | "Resources": { 4 | "Bucket": { 5 | "Type": "AWS::S3::Bucket" 6 | }, 7 | "ProcessorFunction": { 8 | "Properties": { 9 | "CodeUri": "s3:///s3_processor.zip", 10 | "Events": { 11 | "PhotoUpload": { 12 | "Properties": { 13 | "Bucket": { 14 | "Ref": "Bucket" 15 | }, 16 | "Events": [ 17 | "s3:ObjectCreated:*" 18 | ] 19 | }, 20 | "Type": "S3" 21 | } 22 | }, 23 | "Handler": "index.handler", 24 | "Policies": "AmazonS3ReadOnlyAccess", 25 | "Runtime": "nodejs4.3" 26 | }, 27 | "Type": "AWS::Serverless::Function" 28 | } 29 | }, 30 | "Transform": "AWS::Serverless-2016-10-31" 31 | } 32 | -------------------------------------------------------------------------------- /tests/examples_output/VpnEndpoint.template: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "myClientVpnEndpoint": { 4 | "Properties": { 5 | "AuthenticationOptions": [ 6 | { 7 | "ActiveDirectory": { 8 | "DirectoryId": "d-926example" 9 | }, 10 | "Type": "directory-service-authentication" 11 | } 12 | ], 13 | "ClientCidrBlock": "10.0.0.0/22", 14 | "ConnectionLogOptions": { 15 | "Enabled": false 16 | }, 17 | "Description": "My Client VPN Endpoint", 18 | "DnsServers": [ 19 | "11.11.0.1" 20 | ], 21 | "ServerCertificateArn": "arn:aws:acm:us-east-1:111122223333:certificate/12345678-1234-1234-1234-123456789012", 22 | "TagSpecifications": [ 23 | { 24 | "ResourceType": "client-vpn-endpoint", 25 | "Tags": [ 26 | { 27 | "Key": "Purpose", 28 | "Value": "Production" 29 | } 30 | ] 31 | } 32 | ], 33 | "TransportProtocol": "udp" 34 | }, 35 | "Type": "AWS::EC2::ClientVpnEndpoint" 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /tests/examples_output/WaitObject.template: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "Example template showing how the WaitCondition and WaitConditionHandle are configured. With this template, the stack will not complete until either the WaitCondition timeout occurs, or you manually signal the WaitCondition object using the URL created by the WaitConditionHandle. You can use CURL or some other equivalent mechanism to signal the WaitCondition. To find the URL, use cfn-describe-stack-resources or the AWS Management Console to display the PhysicalResourceId of the WaitConditionHandle - this is the URL to use to signal. For details of the signal request see the AWS CloudFormation User Guide at http://docs.amazonwebservices.com/AWSCloudFormation/latest/UserGuide/", 3 | "Outputs": { 4 | "ApplicationData": { 5 | "Description": "The data passed back as part of signalling the WaitCondition", 6 | "Value": { 7 | "Fn::GetAtt": [ 8 | "myWaitCondition", 9 | "Data" 10 | ] 11 | } 12 | } 13 | }, 14 | "Resources": { 15 | "myWaitCondition": { 16 | "Properties": { 17 | "Handle": { 18 | "Ref": "myWaitHandle" 19 | }, 20 | "Timeout": "300" 21 | }, 22 | "Type": "AWS::CloudFormation::WaitCondition" 23 | }, 24 | "myWaitHandle": { 25 | "Type": "AWS::CloudFormation::WaitConditionHandle" 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /tests/examples_output/__init__.template: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudtools/troposphere/923567a7aa8803ddd17310c01339dd5cc8538fd2/tests/examples_output/__init__.template -------------------------------------------------------------------------------- /tests/test_cloudformation.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere import Ref 4 | from troposphere.cloudformation import WaitCondition, WaitConditionHandle 5 | from troposphere.policies import CreationPolicy, ResourceSignal 6 | 7 | 8 | class TestWaitCondition(unittest.TestCase): 9 | def test_CreationPolicy(self): 10 | w = WaitCondition( 11 | "mycondition", 12 | CreationPolicy=CreationPolicy( 13 | ResourceSignal=ResourceSignal(Timeout="PT15M") 14 | ), 15 | ) 16 | w.validate() 17 | 18 | def test_CreationPolicyWithProps(self): 19 | w = WaitCondition( 20 | "mycondition", 21 | Count=10, 22 | CreationPolicy=CreationPolicy( 23 | ResourceSignal=ResourceSignal(Timeout="PT15M") 24 | ), 25 | ) 26 | with self.assertRaises(ValueError): 27 | w.validate() 28 | 29 | def test_RequiredProps(self): 30 | handle = WaitConditionHandle("myWaitHandle") 31 | w = WaitCondition( 32 | "mycondition", 33 | Handle=Ref(handle), 34 | Timeout="300", 35 | ) 36 | w.validate() 37 | 38 | 39 | if __name__ == "__main__": 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /tests/test_cloudfront.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import troposphere.cloudfront as cloudfront 4 | from troposphere import If 5 | 6 | 7 | class TestCorsConfig(unittest.TestCase): 8 | def test_allowmethods(self): 9 | cloudfront.AccessControlAllowMethods(Items=[]) 10 | cloudfront.AccessControlAllowMethods(Items=["GET", "POST"]) 11 | cloudfront.AccessControlAllowMethods( 12 | Items=If("SomeCondition", ["GET"], ["POST"]) 13 | ) 14 | with self.assertRaises(ValueError): 15 | cloudfront.AccessControlAllowMethods(Items=["get"]) 16 | with self.assertRaises(ValueError): 17 | cloudfront.AccessControlAllowMethods(Items=["GET", "get", "PUT"]) 18 | with self.assertRaises(TypeError): 19 | cloudfront.AccessControlAllowMethods(Items=10) 20 | with self.assertRaises(TypeError): 21 | cloudfront.AccessControlAllowMethods(Items="GET") 22 | 23 | 24 | if __name__ == "__main__": 25 | unittest.main() 26 | -------------------------------------------------------------------------------- /tests/test_codecommit.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import troposphere.codecommit as cc 4 | 5 | 6 | class TestCodeCommit(unittest.TestCase): 7 | def test_trigger(self): 8 | trigger = cc.Trigger( 9 | DestinationArn="arn:random", 10 | Events=[ 11 | "all", 12 | ], 13 | Name="trigger name", 14 | ) 15 | trigger.to_dict() 16 | 17 | trigger = cc.Trigger( 18 | DestinationArn="arn:random", 19 | Events=[ 20 | "updateReference", 21 | "createReference", 22 | "deleteReference", 23 | ], 24 | Name="trigger name", 25 | ) 26 | trigger.to_dict() 27 | 28 | trigger = cc.Trigger( 29 | DestinationArn="arn:random", 30 | Events=[ 31 | "all", 32 | "deleteReference", 33 | ], 34 | Name="trigger name", 35 | ) 36 | with self.assertRaisesRegex(ValueError, "Trigger events: all"): 37 | trigger.to_dict() 38 | 39 | trigger = cc.Trigger( 40 | DestinationArn="arn:random", 41 | Events=[ 42 | "deleteReference", 43 | "foobar", 44 | ], 45 | Name="trigger name", 46 | ) 47 | with self.assertRaisesRegex(ValueError, "invalid event foobar"): 48 | trigger.to_dict() 49 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.config import ONE_HOUR, SourceDetails 4 | 5 | 6 | class TestConfig(unittest.TestCase): 7 | def test_SourceDetails(self): 8 | SourceDetails( 9 | EventSource="esource", 10 | MaximumExecutionFrequency=ONE_HOUR, 11 | MessageType="mtype", 12 | ).to_dict() 13 | 14 | def test_invalid_SourceDetails_MaximumExecutionFrequency(self): 15 | with self.assertRaises(ValueError): 16 | SourceDetails( 17 | EventSource="esource", 18 | MaximumExecutionFrequency="foo", 19 | MessageType="mtype", 20 | ).to_dict() 21 | 22 | 23 | if __name__ == "__main__": 24 | unittest.main() 25 | -------------------------------------------------------------------------------- /tests/test_dlm.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.dlm import CreateRule 4 | 5 | 6 | class TestDlmCreateRule(unittest.TestCase): 7 | def test_createrule_interval_bad_value(self): 8 | with self.assertRaisesRegex(ValueError, "Interval must be one of"): 9 | CreateRule("CreateRule", Interval=25) 10 | 11 | def test_createrule_intervalunit_bad_value(self): 12 | with self.assertRaisesRegex(ValueError, "Interval unit must be one of"): 13 | CreateRule("CreateRule", Interval=24, IntervalUnit="HOUR") 14 | 15 | def test_createrule(self): 16 | CreateRule("CreateRule", Interval=24, IntervalUnit="HOURS") 17 | -------------------------------------------------------------------------------- /tests/test_ecr.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import troposphere.ecr as ecr 4 | from troposphere import Tags 5 | 6 | 7 | class TestECS(unittest.TestCase): 8 | def test_ecr_with_tags(self): 9 | repo = ecr.Repository( 10 | "ECRRepo", 11 | RepositoryName="myrepo", 12 | Tags=Tags(Name="myrepo"), 13 | ) 14 | repo.to_dict() 15 | -------------------------------------------------------------------------------- /tests/test_examples.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | import re 4 | import sys 5 | 6 | 7 | def test_example(input_filename, expected_output): 8 | saved = sys.stdout 9 | stdout = io.StringIO() 10 | try: 11 | sys.stdout = stdout 12 | with open(input_filename) as f: 13 | code = compile(f.read(), input_filename, "exec") 14 | exec(code, {"__name__": "__main__"}) 15 | finally: 16 | sys.stdout = saved 17 | # rewind fake stdout so we can read it 18 | stdout.seek(0) 19 | actual_output = stdout.read() 20 | assert str(expected_output) == str(actual_output) 21 | 22 | 23 | def load_example_tests(): 24 | # Filter out all *.py files from the examples directory 25 | examples = "examples" 26 | regex = re.compile(r".py$", re.I) 27 | example_filenames = filter(regex.search, os.listdir(examples)) 28 | 29 | results = [] 30 | for f in example_filenames: 31 | expected_output = open("tests/examples_output/%s.template" % f[:-3]).read() 32 | results.append((examples + "/" + f, expected_output)) 33 | 34 | return results 35 | 36 | 37 | def pytest_generate_tests(metafunc): 38 | example_data = load_example_tests() 39 | metafunc.parametrize("input_filename,expected_output", example_data) 40 | -------------------------------------------------------------------------------- /tests/test_findinmap.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere import BaseAWSObject, FindInMap 4 | 5 | expected_find_in_map = {"Fn::FindInMap": ["m", "t", "s"]} 6 | 7 | expected_find_in_map_with_default = { 8 | "Fn::FindInMap": ["m", "t", "s", {"DefaultValue": "d"}] 9 | } 10 | 11 | map_object = BaseAWSObject(title="m") 12 | 13 | 14 | class TestFindInMap(unittest.TestCase): 15 | def test_find_in_map(self): 16 | find_in_map = FindInMap(mapname="m", toplevelkey="t", secondlevelkey="s") 17 | self.assertEqual(find_in_map.to_dict(), expected_find_in_map) 18 | 19 | def test_find_in_map_with_object(self): 20 | find_in_map = FindInMap(mapname=map_object, toplevelkey="t", secondlevelkey="s") 21 | self.assertEqual(find_in_map.to_dict(), expected_find_in_map) 22 | 23 | def test_find_in_map_with_default(self): 24 | find_in_map = FindInMap( 25 | mapname="m", toplevelkey="t", secondlevelkey="s", defaultvalue="d" 26 | ) 27 | self.assertEqual(find_in_map.to_dict(), expected_find_in_map_with_default) 28 | 29 | def test_find_in_map_with_object_and_default(self): 30 | find_in_map = FindInMap( 31 | mapname=map_object, toplevelkey="t", secondlevelkey="s", defaultvalue="d" 32 | ) 33 | self.assertEqual(find_in_map.to_dict(), expected_find_in_map_with_default) 34 | 35 | 36 | if __name__ == "__main__": 37 | unittest.main() 38 | -------------------------------------------------------------------------------- /tests/test_fsx.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.fsx import FileSystem 4 | 5 | 6 | class TestFSx(unittest.TestCase): 7 | def test_FileSystem(self): 8 | FileSystem( 9 | "filesystem", 10 | FileSystemType="type", 11 | StorageType="HDD", 12 | SubnetIds=["subnet"], 13 | ).to_dict() 14 | 15 | def test_invalid_storagetype(self): 16 | with self.assertRaises(ValueError): 17 | FileSystem( 18 | "filesystem", 19 | FileSystemType="type", 20 | StorageType="floppy", 21 | SubnetIds=["subnet"], 22 | ).to_dict() 23 | 24 | 25 | if __name__ == "__main__": 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /tests/test_iottwinmaker.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere import Name 4 | from troposphere.iottwinmaker import DataType, DataValue 5 | 6 | 7 | class TestPlacementTemplate(unittest.TestCase): 8 | def test_datatype_nestedtype(self): 9 | with self.assertRaises(TypeError): 10 | DataType(NestedType="foo") 11 | 12 | DataType(NestedType=DataType()) 13 | DataType(NestedType=Name("foo")) 14 | 15 | def test_datavalue_listvalue(self): 16 | with self.assertRaises(TypeError): 17 | DataValue(ListValue="foo") 18 | 19 | with self.assertRaises(TypeError): 20 | DataValue(ListValue=["foo"]) 21 | 22 | DataValue(ListValue=[DataValue(), Name("foo")]) 23 | -------------------------------------------------------------------------------- /tests/test_mappings.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere import Template 4 | 5 | single_mapping = """\ 6 | { 7 | "Mappings": { 8 | "map": { 9 | "n": "v" 10 | } 11 | }, 12 | "Resources": {} 13 | }""" 14 | 15 | multiple_mappings = """\ 16 | { 17 | "Mappings": { 18 | "map": { 19 | "k1": { 20 | "n1": "v1" 21 | }, 22 | "k2": { 23 | "n2": "v2" 24 | } 25 | } 26 | }, 27 | "Resources": {} 28 | }""" 29 | 30 | 31 | class TestMappings(unittest.TestCase): 32 | def test_single_mapping(self): 33 | template = Template() 34 | template.add_mapping("map", {"n": "v"}) 35 | json = template.to_json() 36 | self.assertEqual(single_mapping, json) 37 | 38 | def test_multiple_mappings(self): 39 | template = Template() 40 | template.add_mapping("map", {"k1": {"n1": "v1"}}) 41 | template.add_mapping("map", {"k2": {"n2": "v2"}}) 42 | json = template.to_json() 43 | self.assertEqual(multiple_mappings, json) 44 | 45 | 46 | if __name__ == "__main__": 47 | unittest.main() 48 | -------------------------------------------------------------------------------- /tests/test_networkfirewall.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.networkfirewall import RuleGroup 4 | 5 | 6 | class TestNetworkFirewall(unittest.TestCase): 7 | def test_RuleGroup(self): 8 | RuleGroup( 9 | "rulegroup", 10 | Capacity="10", 11 | RuleGroupName="foobar", 12 | Type="STATEFUL", 13 | ).to_dict() 14 | 15 | def test_invalid_RuleGroup(self): 16 | with self.assertRaises(ValueError): 17 | RuleGroup( 18 | "rulegroup", 19 | Capacity="10", 20 | RuleGroupName="foobar", 21 | Type="NOSTATE", 22 | ).to_dict() 23 | 24 | 25 | if __name__ == "__main__": 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /tests/test_opensearchservice.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import troposphere.opensearchservice as opensearchservice 4 | 5 | 6 | class TestOpenSearchServiceValidators(unittest.TestCase): 7 | def test_validate_search_service_engine_version(self): 8 | valid_values = [ 9 | "OpenSearch_1.1", 10 | "OpenSearch_10.123", 11 | "Elasticsearch_7.10", 12 | "Elasticsearch_6.5", 13 | ] 14 | for x in valid_values: 15 | opensearchservice.validate_search_service_engine_version(x) 16 | 17 | invalid_values = [ 18 | "openSearch_1.1", 19 | "apenSearch_10.123", 20 | "elasticsearch_7.10", 21 | "Elasticsearch_x.x", 22 | "latest", 23 | ] 24 | for x in invalid_values: 25 | with self.assertRaises(ValueError): 26 | opensearchservice.validate_search_service_engine_version(x) 27 | -------------------------------------------------------------------------------- /tests/test_parameters.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere import Parameter, Ref 4 | 5 | 6 | class TestInitArguments(unittest.TestCase): 7 | def test_title_max_length(self): 8 | title = "i" * 256 9 | with self.assertRaises(ValueError): 10 | Parameter(title, Type="String") 11 | 12 | def test_ref_can_be_requested(self): 13 | param = Parameter("title", Type="String") 14 | reference = param.ref() 15 | 16 | self.assertIsInstance(reference, Ref) 17 | self.assertDictEqual(reference.data, {"Ref": "title"}) 18 | 19 | 20 | class TestParameterValidator(unittest.TestCase): 21 | def test_allowed_pattern_for_number(self): 22 | with self.assertRaises(ValueError): 23 | Parameter("Foo", Type="Number", AllowedPattern="^[a-zA-Z0-9]*$").validate() 24 | 25 | def test_allowed_pattern_for_comma_delimited_list_and_string(self): 26 | Parameter( 27 | "Foo", 28 | Type="CommaDelimitedList", 29 | AllowedPattern="^[A-Z]{2}$", 30 | Default="", 31 | ).validate() 32 | 33 | Parameter( 34 | "Foo", 35 | Type="String", 36 | AllowedPattern="^[A-Z]{2}$", 37 | Default="", 38 | ).validate() 39 | 40 | def test_aws_specific_type(self): 41 | Parameter( 42 | "Foo", 43 | Type="AWS::EC2::KeyPair::KeyName", 44 | Default="", 45 | ).validate() 46 | 47 | 48 | if __name__ == "__main__": 49 | unittest.main() 50 | -------------------------------------------------------------------------------- /tests/test_resiliencehub.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.resiliencehub import FailurePolicy, ResiliencyPolicy 4 | 5 | 6 | class TestResiliencyPolicy(unittest.TestCase): 7 | def test_ResiliencyPolicy(self): 8 | ResiliencyPolicy( 9 | "policy", 10 | Policy={"Hardware": FailurePolicy(RpoInSecs=10, RtoInSecs=10)}, 11 | PolicyName="foo", 12 | Tier="MissionCritical", 13 | ).to_dict() 14 | 15 | def test_invalid_policy_key(self): 16 | with self.assertRaises(ValueError): 17 | ResiliencyPolicy( 18 | "policy", 19 | Policy={"Foo": FailurePolicy(RpoInSecs=10, RtoInSecs=10)}, 20 | PolicyName="foo", 21 | Tier="MissionCritical", 22 | ).to_dict() 23 | 24 | def test_invalid_policy_value(self): 25 | with self.assertRaises(ValueError): 26 | ResiliencyPolicy( 27 | "policy", 28 | Policy={"Hardware": 10}, 29 | PolicyName="foo", 30 | Tier="MissionCritical", 31 | ).to_dict() 32 | 33 | def test_invalid_policy_tier(self): 34 | with self.assertRaises(ValueError): 35 | ResiliencyPolicy( 36 | "policy", 37 | Policy={"Hardware": FailurePolicy(RpoInSecs=10, RtoInSecs=10)}, 38 | PolicyName="foo", 39 | Tier="foobar", 40 | ).to_dict() 41 | 42 | 43 | if __name__ == "__main__": 44 | unittest.main() 45 | -------------------------------------------------------------------------------- /tests/test_route53.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.route53 import AliasTarget 4 | 5 | 6 | class TestAliasTarget(unittest.TestCase): 7 | def test_bucket_template(self): 8 | AliasTarget("zone", "dnsname", True) 9 | AliasTarget(hostedzoneid="zone", dnsname="dnsname", evaluatetargethealth=True) 10 | AliasTarget(HostedZoneId="zone", DNSName="dnsname", EvaluateTargetHealth=True) 11 | 12 | 13 | if __name__ == "__main__": 14 | unittest.main() 15 | -------------------------------------------------------------------------------- /tests/test_scheduler.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.scheduler import EcsParameters, FlexibleTimeWindow 4 | 5 | 6 | class TestSchedule(unittest.TestCase): 7 | def test_flexible_time_window_mode(self): 8 | for invalid_value in ("", "not-valid"): 9 | with self.assertRaises(ValueError): 10 | FlexibleTimeWindow(Mode=invalid_value).to_dict() 11 | 12 | for valid_value in ("OFF", "FLEXIBLE"): 13 | FlexibleTimeWindow(Mode=valid_value).to_dict() 14 | 15 | def test_ecsparameters_tags(self): 16 | with self.assertRaises(ValueError): 17 | EcsParameters( 18 | TaskDefinitionArn="some-arn", 19 | Tags={"tag1": "value1"}, 20 | ).to_dict() 21 | 22 | EcsParameters(TaskDefinitionArn="some-arn").to_dict() 23 | 24 | 25 | if __name__ == "__main__": 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /tests/test_sqs.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere import Join 4 | from troposphere.sqs import Queue 5 | 6 | 7 | class TestQueue(unittest.TestCase): 8 | def test_QueueName(self): 9 | Queue( 10 | "q", 11 | FifoQueue=False, 12 | ).validate() 13 | 14 | Queue( 15 | "q", 16 | FifoQueue=True, 17 | QueueName="foobar.fifo", 18 | ).validate() 19 | 20 | Queue( 21 | "q", 22 | FifoQueue=True, 23 | QueueName=Join("foo", "bar"), 24 | ).validate() 25 | 26 | Queue( 27 | "q", 28 | FifoQueue=True, 29 | ).validate() 30 | 31 | with self.assertRaises(ValueError): 32 | Queue( 33 | "q", 34 | FifoQueue=True, 35 | QueueName="foobar", 36 | ).validate() 37 | 38 | 39 | if __name__ == "__main__": 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /tests/test_ssm.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.ssm import Document 4 | 5 | 6 | class TestQueue(unittest.TestCase): 7 | def test_Document(self): 8 | # dict 9 | Document("title", Content={"foo": "bar"}).validate() 10 | # Valid yaml and json 11 | Document("title", Content='{"foo": "bar"}').validate() 12 | Document("title", Content="{foo: bar}").validate() 13 | # Invalid json/yaml 14 | with self.assertRaises(ValueError): 15 | Document("title", Content="*foo").validate() 16 | 17 | 18 | if __name__ == "__main__": 19 | unittest.main() 20 | -------------------------------------------------------------------------------- /tests/test_stepfunctions.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from troposphere.stepfunctions import Activity, StateMachine 4 | 5 | 6 | class TestStepFunctions(unittest.TestCase): 7 | def test_activity(self): 8 | activity = Activity( 9 | "myactivity", 10 | Name="testactivity", 11 | ) 12 | self.assertEqual(activity.Name, "testactivity") 13 | 14 | def test_statemachine(self): 15 | statemachine = StateMachine( 16 | "mystatemachine", 17 | DefinitionString="testdefinitionstring", 18 | RoleArn="testinrolearn", 19 | ) 20 | self.assertEqual(statemachine.RoleArn, "testinrolearn") 21 | 22 | def test_statemachine_missing_parameter(self): 23 | StateMachine( 24 | "mystatemachine", 25 | DefinitionString="testdefinitionstring", 26 | ) 27 | self.assertTrue(AttributeError) 28 | 29 | 30 | if __name__ == "__main__": 31 | unittest.main() 32 | -------------------------------------------------------------------------------- /tests/userdata_test_scripts/char_escaping.sh: -------------------------------------------------------------------------------- 1 | \n 2 | \ 3 | 4 | ? 5 | "" 6 | 7 | <> 8 | -------------------------------------------------------------------------------- /tests/userdata_test_scripts/empty.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudtools/troposphere/923567a7aa8803ddd17310c01339dd5cc8538fd2/tests/userdata_test_scripts/empty.sh -------------------------------------------------------------------------------- /tests/userdata_test_scripts/one_line.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -------------------------------------------------------------------------------- /tests/userdata_test_scripts/simple.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Hello world" -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox:tox] 2 | envlist = 3 | pyflakes, 4 | pycodestyle, 5 | tests 6 | 7 | [testenv] 8 | basepython = python2.7 9 | 10 | [testenv:pyflakes] 11 | deps = pyflakes 12 | commands = 13 | pyflakes troposphere/ scripts/ tests/ 14 | 15 | [testenv:pycodestyle] 16 | deps = pycodestyle 17 | commands = 18 | pycodestyle --show-source --show-pep8 troposphere/ scripts/ tests/ 19 | 20 | [testenv:tests] 21 | commands = 22 | python setup.py test 23 | -------------------------------------------------------------------------------- /troposphere/backupgateway.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | 11 | 12 | class Hypervisor(AWSObject): 13 | """ 14 | `Hypervisor `__ 15 | """ 16 | 17 | resource_type = "AWS::BackupGateway::Hypervisor" 18 | 19 | props: PropsDictType = { 20 | "Host": (str, False), 21 | "KmsKeyArn": (str, False), 22 | "LogGroupArn": (str, False), 23 | "Name": (str, False), 24 | "Password": (str, False), 25 | "Tags": (Tags, False), 26 | "Username": (str, False), 27 | } 28 | -------------------------------------------------------------------------------- /troposphere/cloud9.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType, Tags 10 | from .validators import integer 11 | 12 | 13 | class Repository(AWSProperty): 14 | """ 15 | `Repository `__ 16 | """ 17 | 18 | props: PropsDictType = { 19 | "PathComponent": (str, True), 20 | "RepositoryUrl": (str, True), 21 | } 22 | 23 | 24 | class EnvironmentEC2(AWSObject): 25 | """ 26 | `EnvironmentEC2 `__ 27 | """ 28 | 29 | resource_type = "AWS::Cloud9::EnvironmentEC2" 30 | 31 | props: PropsDictType = { 32 | "AutomaticStopTimeMinutes": (integer, False), 33 | "ConnectionType": (str, False), 34 | "Description": (str, False), 35 | "ImageId": (str, True), 36 | "InstanceType": (str, True), 37 | "Name": (str, False), 38 | "OwnerArn": (str, False), 39 | "Repositories": ([Repository], False), 40 | "SubnetId": (str, False), 41 | "Tags": (Tags, False), 42 | } 43 | -------------------------------------------------------------------------------- /troposphere/codeconnections.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | 11 | 12 | class Connection(AWSObject): 13 | """ 14 | `Connection `__ 15 | """ 16 | 17 | resource_type = "AWS::CodeConnections::Connection" 18 | 19 | props: PropsDictType = { 20 | "ConnectionName": (str, True), 21 | "HostArn": (str, False), 22 | "ProviderType": (str, False), 23 | "Tags": (Tags, False), 24 | } 25 | -------------------------------------------------------------------------------- /troposphere/codeguruprofiler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType, Tags 10 | 11 | 12 | class AgentPermissions(AWSProperty): 13 | """ 14 | `AgentPermissions `__ 15 | """ 16 | 17 | props: PropsDictType = { 18 | "Principals": ([str], True), 19 | } 20 | 21 | 22 | class Channel(AWSProperty): 23 | """ 24 | `Channel `__ 25 | """ 26 | 27 | props: PropsDictType = { 28 | "channelId": (str, False), 29 | "channelUri": (str, True), 30 | } 31 | 32 | 33 | class ProfilingGroup(AWSObject): 34 | """ 35 | `ProfilingGroup `__ 36 | """ 37 | 38 | resource_type = "AWS::CodeGuruProfiler::ProfilingGroup" 39 | 40 | props: PropsDictType = { 41 | "AgentPermissions": (AgentPermissions, False), 42 | "AnomalyDetectionNotificationConfiguration": ([Channel], False), 43 | "ComputePlatform": (str, False), 44 | "ProfilingGroupName": (str, True), 45 | "Tags": (Tags, False), 46 | } 47 | -------------------------------------------------------------------------------- /troposphere/codegurureviewer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | 11 | 12 | class RepositoryAssociation(AWSObject): 13 | """ 14 | `RepositoryAssociation `__ 15 | """ 16 | 17 | resource_type = "AWS::CodeGuruReviewer::RepositoryAssociation" 18 | 19 | props: PropsDictType = { 20 | "BucketName": (str, False), 21 | "ConnectionArn": (str, False), 22 | "Name": (str, True), 23 | "Owner": (str, False), 24 | "Tags": (Tags, False), 25 | "Type": (str, True), 26 | } 27 | -------------------------------------------------------------------------------- /troposphere/codestar.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType 10 | from .validators import boolean 11 | 12 | 13 | class S3(AWSProperty): 14 | """ 15 | `S3 `__ 16 | """ 17 | 18 | props: PropsDictType = { 19 | "Bucket": (str, True), 20 | "Key": (str, True), 21 | "ObjectVersion": (str, False), 22 | } 23 | 24 | 25 | class Code(AWSProperty): 26 | """ 27 | `Code `__ 28 | """ 29 | 30 | props: PropsDictType = { 31 | "S3": (S3, True), 32 | } 33 | 34 | 35 | class GitHubRepository(AWSObject): 36 | """ 37 | `GitHubRepository `__ 38 | """ 39 | 40 | resource_type = "AWS::CodeStar::GitHubRepository" 41 | 42 | props: PropsDictType = { 43 | "Code": (Code, False), 44 | "ConnectionArn": (str, False), 45 | "EnableIssues": (boolean, False), 46 | "IsPrivate": (boolean, False), 47 | "RepositoryAccessToken": (str, False), 48 | "RepositoryDescription": (str, False), 49 | "RepositoryName": (str, True), 50 | "RepositoryOwner": (str, True), 51 | } 52 | -------------------------------------------------------------------------------- /troposphere/codestarnotifications.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType 10 | 11 | 12 | class Target(AWSProperty): 13 | """ 14 | `Target `__ 15 | """ 16 | 17 | props: PropsDictType = { 18 | "TargetAddress": (str, True), 19 | "TargetType": (str, True), 20 | } 21 | 22 | 23 | class NotificationRule(AWSObject): 24 | """ 25 | `NotificationRule `__ 26 | """ 27 | 28 | resource_type = "AWS::CodeStarNotifications::NotificationRule" 29 | 30 | props: PropsDictType = { 31 | "CreatedBy": (str, False), 32 | "DetailType": (str, True), 33 | "EventTypeId": (str, False), 34 | "EventTypeIds": ([str], True), 35 | "Name": (str, True), 36 | "Resource": (str, True), 37 | "Status": (str, False), 38 | "Tags": (dict, False), 39 | "TargetAddress": (str, False), 40 | "Targets": ([Target], True), 41 | } 42 | -------------------------------------------------------------------------------- /troposphere/compat.py: -------------------------------------------------------------------------------- 1 | try: 2 | from awacs.aws import Policy, PolicyDocument 3 | 4 | policytypes = (dict, Policy, PolicyDocument) # type: tuple 5 | except ImportError: 6 | try: 7 | # A future release of awacs might remove `Policy` in which case 8 | # `PolicyDocument` should still be supported. This ensures forward 9 | # compatibility of current releases of troposphere with future 10 | # releases of awacs. 11 | from awacs.aws import PolicyDocument 12 | 13 | policytypes = (dict, PolicyDocument) 14 | except ImportError: 15 | policytypes = (dict,) 16 | 17 | 18 | def validate_policytype(policy): 19 | if not isinstance(policy, policytypes): 20 | raise TypeError( 21 | f"Invalid policy type: is {type(policy)}, expected {policytypes}" 22 | ) 23 | 24 | return policy 25 | 26 | 27 | __all__ = ["policytypes", "validate_policytype"] 28 | -------------------------------------------------------------------------------- /troposphere/cur.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType 10 | from .validators import boolean 11 | 12 | 13 | class ReportDefinition(AWSObject): 14 | """ 15 | `ReportDefinition `__ 16 | """ 17 | 18 | resource_type = "AWS::CUR::ReportDefinition" 19 | 20 | props: PropsDictType = { 21 | "AdditionalArtifacts": ([str], False), 22 | "AdditionalSchemaElements": ([str], False), 23 | "BillingViewArn": (str, False), 24 | "Compression": (str, True), 25 | "Format": (str, True), 26 | "RefreshClosedReports": (boolean, True), 27 | "ReportName": (str, True), 28 | "ReportVersioning": (str, True), 29 | "S3Bucket": (str, True), 30 | "S3Prefix": (str, True), 31 | "S3Region": (str, True), 32 | "TimeUnit": (str, True), 33 | } 34 | -------------------------------------------------------------------------------- /troposphere/detective.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | from .validators import boolean 11 | 12 | 13 | class Graph(AWSObject): 14 | """ 15 | `Graph `__ 16 | """ 17 | 18 | resource_type = "AWS::Detective::Graph" 19 | 20 | props: PropsDictType = { 21 | "AutoEnableMembers": (boolean, False), 22 | "Tags": (Tags, False), 23 | } 24 | 25 | 26 | class MemberInvitation(AWSObject): 27 | """ 28 | `MemberInvitation `__ 29 | """ 30 | 31 | resource_type = "AWS::Detective::MemberInvitation" 32 | 33 | props: PropsDictType = { 34 | "DisableEmailNotification": (boolean, False), 35 | "GraphArn": (str, True), 36 | "MemberEmailAddress": (str, True), 37 | "MemberId": (str, True), 38 | "Message": (str, False), 39 | } 40 | 41 | 42 | class OrganizationAdmin(AWSObject): 43 | """ 44 | `OrganizationAdmin `__ 45 | """ 46 | 47 | resource_type = "AWS::Detective::OrganizationAdmin" 48 | 49 | props: PropsDictType = { 50 | "AccountId": (str, True), 51 | } 52 | -------------------------------------------------------------------------------- /troposphere/docdbelastic.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | from .validators import integer 11 | 12 | 13 | class Cluster(AWSObject): 14 | """ 15 | `Cluster `__ 16 | """ 17 | 18 | resource_type = "AWS::DocDBElastic::Cluster" 19 | 20 | props: PropsDictType = { 21 | "AdminUserName": (str, True), 22 | "AdminUserPassword": (str, False), 23 | "AuthType": (str, True), 24 | "BackupRetentionPeriod": (integer, False), 25 | "ClusterName": (str, True), 26 | "KmsKeyId": (str, False), 27 | "PreferredBackupWindow": (str, False), 28 | "PreferredMaintenanceWindow": (str, False), 29 | "ShardCapacity": (integer, True), 30 | "ShardCount": (integer, True), 31 | "ShardInstanceCount": (integer, False), 32 | "SubnetIds": ([str], False), 33 | "Tags": (Tags, False), 34 | "VpcSecurityGroupIds": ([str], False), 35 | } 36 | -------------------------------------------------------------------------------- /troposphere/healthimaging.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType 10 | 11 | 12 | class Datastore(AWSObject): 13 | """ 14 | `Datastore `__ 15 | """ 16 | 17 | resource_type = "AWS::HealthImaging::Datastore" 18 | 19 | props: PropsDictType = { 20 | "DatastoreName": (str, False), 21 | "KmsKeyArn": (str, False), 22 | "Tags": (dict, False), 23 | } 24 | -------------------------------------------------------------------------------- /troposphere/helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudtools/troposphere/923567a7aa8803ddd17310c01339dd5cc8538fd2/troposphere/helpers/__init__.py -------------------------------------------------------------------------------- /troposphere/identitystore.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType 10 | 11 | 12 | class Group(AWSObject): 13 | """ 14 | `Group `__ 15 | """ 16 | 17 | resource_type = "AWS::IdentityStore::Group" 18 | 19 | props: PropsDictType = { 20 | "Description": (str, False), 21 | "DisplayName": (str, True), 22 | "IdentityStoreId": (str, True), 23 | } 24 | 25 | 26 | class MemberId(AWSProperty): 27 | """ 28 | `MemberId `__ 29 | """ 30 | 31 | props: PropsDictType = { 32 | "UserId": (str, True), 33 | } 34 | 35 | 36 | class GroupMembership(AWSObject): 37 | """ 38 | `GroupMembership `__ 39 | """ 40 | 41 | resource_type = "AWS::IdentityStore::GroupMembership" 42 | 43 | props: PropsDictType = { 44 | "GroupId": (str, True), 45 | "IdentityStoreId": (str, True), 46 | "MemberId": (MemberId, True), 47 | } 48 | -------------------------------------------------------------------------------- /troposphere/invoicing.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType 10 | from .validators import boolean 11 | 12 | 13 | class ResourceTag(AWSProperty): 14 | """ 15 | `ResourceTag `__ 16 | """ 17 | 18 | props: PropsDictType = { 19 | "Key": (str, True), 20 | "Value": (str, True), 21 | } 22 | 23 | 24 | class Rule(AWSProperty): 25 | """ 26 | `Rule `__ 27 | """ 28 | 29 | props: PropsDictType = { 30 | "LinkedAccounts": ([str], True), 31 | } 32 | 33 | 34 | class InvoiceUnit(AWSObject): 35 | """ 36 | `InvoiceUnit `__ 37 | """ 38 | 39 | resource_type = "AWS::Invoicing::InvoiceUnit" 40 | 41 | props: PropsDictType = { 42 | "Description": (str, False), 43 | "InvoiceReceiver": (str, True), 44 | "Name": (str, True), 45 | "ResourceTags": ([ResourceTag], False), 46 | "Rule": (Rule, True), 47 | "TaxInheritanceDisabled": (boolean, False), 48 | } 49 | -------------------------------------------------------------------------------- /troposphere/iotfleethub.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | 11 | 12 | class Application(AWSObject): 13 | """ 14 | `Application `__ 15 | """ 16 | 17 | resource_type = "AWS::IoTFleetHub::Application" 18 | 19 | props: PropsDictType = { 20 | "ApplicationDescription": (str, False), 21 | "ApplicationName": (str, True), 22 | "RoleArn": (str, True), 23 | "Tags": (Tags, False), 24 | } 25 | -------------------------------------------------------------------------------- /troposphere/iotthingsgraph.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType 10 | from .validators import double 11 | 12 | 13 | class DefinitionDocument(AWSProperty): 14 | """ 15 | `DefinitionDocument `__ 16 | """ 17 | 18 | props: PropsDictType = { 19 | "Language": (str, True), 20 | "Text": (str, True), 21 | } 22 | 23 | 24 | class FlowTemplate(AWSObject): 25 | """ 26 | `FlowTemplate `__ 27 | """ 28 | 29 | resource_type = "AWS::IoTThingsGraph::FlowTemplate" 30 | 31 | props: PropsDictType = { 32 | "CompatibleNamespaceVersion": (double, False), 33 | "Definition": (DefinitionDocument, True), 34 | } 35 | -------------------------------------------------------------------------------- /troposphere/kendraranking.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType, Tags 10 | from .validators import integer 11 | 12 | 13 | class CapacityUnitsConfiguration(AWSProperty): 14 | """ 15 | `CapacityUnitsConfiguration `__ 16 | """ 17 | 18 | props: PropsDictType = { 19 | "RescoreCapacityUnits": (integer, True), 20 | } 21 | 22 | 23 | class ExecutionPlan(AWSObject): 24 | """ 25 | `ExecutionPlan `__ 26 | """ 27 | 28 | resource_type = "AWS::KendraRanking::ExecutionPlan" 29 | 30 | props: PropsDictType = { 31 | "CapacityUnits": (CapacityUnitsConfiguration, False), 32 | "Description": (str, False), 33 | "Name": (str, True), 34 | "Tags": (Tags, False), 35 | } 36 | -------------------------------------------------------------------------------- /troposphere/kinesisvideo.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | from .validators import integer 11 | 12 | 13 | class SignalingChannel(AWSObject): 14 | """ 15 | `SignalingChannel `__ 16 | """ 17 | 18 | resource_type = "AWS::KinesisVideo::SignalingChannel" 19 | 20 | props: PropsDictType = { 21 | "MessageTtlSeconds": (integer, False), 22 | "Name": (str, False), 23 | "Tags": (Tags, False), 24 | "Type": (str, False), 25 | } 26 | 27 | 28 | class Stream(AWSObject): 29 | """ 30 | `Stream `__ 31 | """ 32 | 33 | resource_type = "AWS::KinesisVideo::Stream" 34 | 35 | props: PropsDictType = { 36 | "DataRetentionInHours": (integer, False), 37 | "DeviceName": (str, False), 38 | "KmsKeyId": (str, False), 39 | "MediaType": (str, False), 40 | "Name": (str, False), 41 | "Tags": (Tags, False), 42 | } 43 | -------------------------------------------------------------------------------- /troposphere/launchwizard.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | 11 | 12 | class Deployment(AWSObject): 13 | """ 14 | `Deployment `__ 15 | """ 16 | 17 | resource_type = "AWS::LaunchWizard::Deployment" 18 | 19 | props: PropsDictType = { 20 | "DeploymentPatternName": (str, True), 21 | "Name": (str, True), 22 | "Specifications": (dict, False), 23 | "Tags": (Tags, False), 24 | "WorkloadName": (str, True), 25 | } 26 | -------------------------------------------------------------------------------- /troposphere/lookoutvision.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType 10 | 11 | 12 | class Project(AWSObject): 13 | """ 14 | `Project `__ 15 | """ 16 | 17 | resource_type = "AWS::LookoutVision::Project" 18 | 19 | props: PropsDictType = { 20 | "ProjectName": (str, True), 21 | } 22 | -------------------------------------------------------------------------------- /troposphere/openstack/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | OpenStack 3 | --------- 4 | 5 | The package to support OpenStack templates using troposphere. 6 | """ 7 | -------------------------------------------------------------------------------- /troposphere/ram.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType, Tags 10 | from .validators import boolean 11 | 12 | 13 | class Permission(AWSObject): 14 | """ 15 | `Permission `__ 16 | """ 17 | 18 | resource_type = "AWS::RAM::Permission" 19 | 20 | props: PropsDictType = { 21 | "Name": (str, True), 22 | "PolicyTemplate": (dict, True), 23 | "ResourceType": (str, True), 24 | "Tags": (Tags, False), 25 | } 26 | 27 | 28 | class ResourceShare(AWSObject): 29 | """ 30 | `ResourceShare `__ 31 | """ 32 | 33 | resource_type = "AWS::RAM::ResourceShare" 34 | 35 | props: PropsDictType = { 36 | "AllowExternalPrincipals": (boolean, False), 37 | "Name": (str, True), 38 | "PermissionArns": ([str], False), 39 | "Principals": ([str], False), 40 | "ResourceArns": ([str], False), 41 | "Sources": ([str], False), 42 | "Tags": (Tags, False), 43 | } 44 | -------------------------------------------------------------------------------- /troposphere/sdb.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, PropsDictType 10 | 11 | 12 | class Domain(AWSObject): 13 | """ 14 | `Domain `__ 15 | """ 16 | 17 | resource_type = "AWS::SDB::Domain" 18 | 19 | props: PropsDictType = { 20 | "Description": (str, False), 21 | } 22 | -------------------------------------------------------------------------------- /troposphere/simspaceweaver.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType 10 | 11 | 12 | class S3Location(AWSProperty): 13 | """ 14 | `S3Location `__ 15 | """ 16 | 17 | props: PropsDictType = { 18 | "BucketName": (str, True), 19 | "ObjectKey": (str, True), 20 | } 21 | 22 | 23 | class Simulation(AWSObject): 24 | """ 25 | `Simulation `__ 26 | """ 27 | 28 | resource_type = "AWS::SimSpaceWeaver::Simulation" 29 | 30 | props: PropsDictType = { 31 | "MaximumDuration": (str, False), 32 | "Name": (str, True), 33 | "RoleArn": (str, True), 34 | "SchemaS3Location": (S3Location, False), 35 | "SnapshotS3Location": (S3Location, False), 36 | } 37 | -------------------------------------------------------------------------------- /troposphere/type_defs/__init__.py: -------------------------------------------------------------------------------- 1 | """Type definitions.""" 2 | -------------------------------------------------------------------------------- /troposphere/type_defs/compat.py: -------------------------------------------------------------------------------- 1 | """Type definition backward compatibility.""" 2 | 3 | # flake8: noqa 4 | from __future__ import annotations 5 | 6 | from typing import Final, Literal, Protocol, SupportsIndex 7 | -------------------------------------------------------------------------------- /troposphere/type_defs/protocols.py: -------------------------------------------------------------------------------- 1 | """Protocols.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Any, Dict 6 | 7 | from .compat import Protocol 8 | 9 | 10 | class JSONreprProtocol(Protocol): 11 | def JSONrepr(self, *__args: Any, **__kwargs: Any) -> Dict[str, Any]: 12 | raise NotImplementedError 13 | 14 | 15 | class ToDictProtocol(Protocol): 16 | def to_dict(self) -> Dict[str, Any]: 17 | raise NotImplementedError 18 | -------------------------------------------------------------------------------- /troposphere/utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def _tail_print(e): 5 | print("%s %s %s" % (e.resource_status, e.resource_type, e.event_id)) 6 | 7 | 8 | def get_events(conn, stackname): 9 | """Get the events in batches and return in chronological order""" 10 | next = None 11 | event_list = [] 12 | while 1: 13 | events = conn.describe_stack_events(stackname, next) 14 | event_list.append(events) 15 | if events.next_token is None: 16 | break 17 | next = events.next_token 18 | time.sleep(1) 19 | return reversed(sum(event_list, [])) 20 | 21 | 22 | def tail(conn, stack_name, log_func=_tail_print, sleep_time=5, include_initial=True): 23 | """Show and then tail the event log""" 24 | # First dump the full list of events in chronological order and keep 25 | # track of the events we've seen already 26 | seen = set() 27 | initial_events = get_events(conn, stack_name) 28 | for e in initial_events: 29 | if include_initial: 30 | log_func(e) 31 | seen.add(e.event_id) 32 | 33 | # Now keep looping through and dump the new events 34 | while 1: 35 | events = get_events(conn, stack_name) 36 | for e in events: 37 | if e.event_id not in seen: 38 | log_func(e) 39 | seen.add(e.event_id) 40 | time.sleep(sleep_time) 41 | -------------------------------------------------------------------------------- /troposphere/validators/amazonmq.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import tags_or_list 8 | 9 | 10 | def validate_tags_or_list(x): 11 | """ 12 | Property: Broker.Tags 13 | """ 14 | return tags_or_list(x) 15 | -------------------------------------------------------------------------------- /troposphere/validators/appconfig.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_growth_type(growth_type): 8 | """ 9 | Property: DeploymentStrategy.GrowthType 10 | """ 11 | 12 | VALID_GROWTH_TYPES = ("LINEAR",) 13 | 14 | if growth_type not in VALID_GROWTH_TYPES: 15 | raise ValueError( 16 | "DeploymentStrategy GrowthType must be one of: %s" 17 | % ", ".join(VALID_GROWTH_TYPES) 18 | ) 19 | return growth_type 20 | 21 | 22 | def validate_replicate_to(replicate_to): 23 | """ 24 | Property: DeploymentStrategy.ReplicateTo 25 | """ 26 | 27 | VALID_REPLICATION_DESTINATION = ("NONE", "SSM_DOCUMENT") 28 | 29 | if replicate_to not in VALID_REPLICATION_DESTINATION: 30 | raise ValueError( 31 | "DeploymentStrategy ReplicateTo must be one of: %s" 32 | % ", ".join(VALID_REPLICATION_DESTINATION) 33 | ) 34 | return replicate_to 35 | 36 | 37 | def validate_validator_type(validator_type): 38 | """ 39 | Property: Validators.Type 40 | """ 41 | 42 | VALID_VALIDATOR_TYPE = ("JSON_SCHEMA", "LAMBDA") 43 | 44 | if validator_type not in VALID_VALIDATOR_TYPE: 45 | raise ValueError( 46 | "ConfigurationProfile Validator Type must be one of: %s" 47 | % ", ".join(VALID_VALIDATOR_TYPE) # NOQA 48 | ) 49 | return validator_type 50 | -------------------------------------------------------------------------------- /troposphere/validators/appmesh.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_listenertls_mode(listenertls_mode): 8 | """ 9 | Validate Mode for ListernerTls 10 | Property: ListenerTls.Mode 11 | """ 12 | 13 | VALID_LISTENERTLS_MODE = ("STRICT", "PERMISSIVE", "DISABLED") 14 | 15 | if listenertls_mode not in VALID_LISTENERTLS_MODE: 16 | raise ValueError( 17 | "ListernerTls Mode must be one of: %s" % ", ".join(VALID_LISTENERTLS_MODE) 18 | ) 19 | return listenertls_mode 20 | -------------------------------------------------------------------------------- /troposphere/validators/appstream.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import tags_or_list 8 | 9 | 10 | def validate_tags_or_list(x): 11 | """ 12 | Property: AppBlock.Tags 13 | Property: Application.Tags 14 | Property: Fleet.Tags 15 | Property: ImageBuilder.Tags 16 | Property: Stack.Tags 17 | """ 18 | return tags_or_list(x) 19 | -------------------------------------------------------------------------------- /troposphere/validators/appsync.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2021, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def resolver_kind_validator(x): 8 | """ 9 | Property: Resolver.Kind 10 | """ 11 | valid_types = ["UNIT", "PIPELINE"] 12 | if x not in valid_types: 13 | raise ValueError("Kind must be one of: %s" % ", ".join(valid_types)) 14 | return x 15 | -------------------------------------------------------------------------------- /troposphere/validators/athena.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_workgroup_state(workgroup_state): 8 | """ 9 | Validate State for Workgroup 10 | Property: WorkGroup.State 11 | """ 12 | 13 | VALID_WORKGROUP_STATE = ("ENABLED", "DISABLED") 14 | 15 | if workgroup_state not in VALID_WORKGROUP_STATE: 16 | raise ValueError( 17 | "Workgroup State must be one of: %s" % ", ".join(VALID_WORKGROUP_STATE) 18 | ) 19 | return workgroup_state 20 | 21 | 22 | def validate_encryptionoption(encryption_option): 23 | """ 24 | Validate EncryptionOption for EncryptionConfiguration 25 | Property: EncryptionConfiguration.EncryptionOption 26 | """ 27 | 28 | VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION = [ 29 | "CSE_KMS", 30 | "SSE_KMS", 31 | "SSE_S3", 32 | ] 33 | 34 | if encryption_option not in VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION: 35 | raise ValueError( 36 | "EncryptionConfiguration EncryptionOption must be one of: %s" 37 | % ", ".join(VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION) # NOQA 38 | ) 39 | return encryption_option 40 | -------------------------------------------------------------------------------- /troposphere/validators/backup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | import re 8 | 9 | from .. import If 10 | from . import exactly_one, json_checker 11 | 12 | 13 | def validate_json_checker(x): 14 | """ 15 | Property: BackupVault.AccessPolicy 16 | """ 17 | return json_checker(x) 18 | 19 | 20 | def backup_vault_name(name): 21 | """ 22 | Property: BackupVault.BackupVaultName 23 | """ 24 | vault_name_re = re.compile(r"^[a-zA-Z0-9\-\_\.]{1,50}$") # noqa 25 | if vault_name_re.match(name): 26 | return name 27 | else: 28 | raise ValueError("%s is not a valid backup vault name" % name) 29 | 30 | 31 | def validate_backup_selection(self): 32 | """ 33 | Class: BackupSelectionResourceType 34 | """ 35 | conds = [ 36 | "ListOfTags", 37 | "Resources", 38 | ] 39 | 40 | def check_if(names, props): 41 | validated = [] 42 | for name in names: 43 | validated.append(name in props and isinstance(props[name], If)) 44 | return all(validated) 45 | 46 | if check_if(conds, self.properties): 47 | return 48 | 49 | exactly_one(self.__class__.__name__, self.properties, conds) 50 | -------------------------------------------------------------------------------- /troposphere/validators/cassandra.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_clusteringkeycolumn_orderby(clusteringkeycolumn_orderby): 8 | """ 9 | Property: ClusteringKeyColumn.OrderBy 10 | """ 11 | 12 | VALID_CLUSTERINGKEYCOLUMN_ORDERBY = ("ASC", "DESC") 13 | 14 | if clusteringkeycolumn_orderby not in VALID_CLUSTERINGKEYCOLUMN_ORDERBY: 15 | raise ValueError( 16 | "ClusteringKeyColumn OrderBy must be one of: %s" 17 | % ", ".join(VALID_CLUSTERINGKEYCOLUMN_ORDERBY) 18 | ) 19 | return clusteringkeycolumn_orderby 20 | 21 | 22 | def validate_billingmode_mode(billingmode_mode): 23 | """ 24 | Property: BillingMode.Mode 25 | """ 26 | 27 | VALID_BILLINGMODE_MODE = ("ON_DEMAND", "PROVISIONED") 28 | 29 | if billingmode_mode not in VALID_BILLINGMODE_MODE: 30 | raise ValueError( 31 | "BillingMode Mode must be one of: %s" % ", ".join(VALID_BILLINGMODE_MODE) 32 | ) 33 | return billingmode_mode 34 | -------------------------------------------------------------------------------- /troposphere/validators/certificatemanager.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import tags_or_list 8 | 9 | 10 | def validate_tags_or_list(x): 11 | """ 12 | Property: Certificate.Tags 13 | """ 14 | return tags_or_list(x) 15 | -------------------------------------------------------------------------------- /troposphere/validators/chatbot.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_logginglevel(slackchannelconfiguration_logginglevel): 8 | """ 9 | Validate LoggingLevel for SlackChannelConfiguration 10 | Property: SlackChannelConfiguration.LoggingLevel 11 | """ 12 | 13 | VALID_SLACKCHANNELCONFIGURATION_LOGGINGLEVEL = ("ERROR", "INFO", "NONE") 14 | 15 | if ( 16 | slackchannelconfiguration_logginglevel 17 | not in VALID_SLACKCHANNELCONFIGURATION_LOGGINGLEVEL 18 | ): 19 | raise ValueError( 20 | "SlackChannelConfiguration LoggingLevel must be one of: %s" 21 | % ", ".join(VALID_SLACKCHANNELCONFIGURATION_LOGGINGLEVEL) 22 | ) 23 | return slackchannelconfiguration_logginglevel 24 | -------------------------------------------------------------------------------- /troposphere/validators/codeartifact.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..compat import validate_policytype 8 | 9 | 10 | def policytypes(policy): 11 | """ 12 | Property: Domain.PermissionsPolicyDocument 13 | Property: Repository.PermissionsPolicyDocument 14 | """ 15 | return validate_policytype(policy) 16 | -------------------------------------------------------------------------------- /troposphere/validators/codecommit.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from .. import AWSHelperFn 8 | 9 | 10 | def validate_trigger(self): 11 | """ 12 | Class: Trigger 13 | """ 14 | valid = [ 15 | "all", 16 | "createReference", 17 | "deleteReference", 18 | "updateReference", 19 | ] 20 | events = self.properties.get("Events") 21 | if events and not isinstance(events, AWSHelperFn): 22 | if "all" in events and len(events) != 1: 23 | raise ValueError("Trigger events: all must be used alone") 24 | else: 25 | for e in events: 26 | if e not in valid and not isinstance(e, AWSHelperFn): 27 | raise ValueError("Trigger: invalid event %s" % e) 28 | -------------------------------------------------------------------------------- /troposphere/validators/codestarconnections.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_connection_providertype(connection_providertype): 8 | """ 9 | Validate ProviderType for Connection 10 | Property: Connection.ProviderType 11 | """ 12 | 13 | VALID_CONNECTION_PROVIDERTYPE = ["Bitbucket", "GitHub", "GitHubEnterpriseServer"] 14 | 15 | if connection_providertype not in VALID_CONNECTION_PROVIDERTYPE: 16 | raise ValueError( 17 | "Connection ProviderType must be one of: %s" 18 | % ", ".join(VALID_CONNECTION_PROVIDERTYPE) 19 | ) 20 | return connection_providertype 21 | -------------------------------------------------------------------------------- /troposphere/validators/cognito.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_recoveryoption_name(recoveryoption_name): 8 | """ 9 | Validate Name for RecoveryOption 10 | Property: RecoveryOption.Name 11 | """ 12 | 13 | VALID_RECOVERYOPTION_NAME = ( 14 | "admin_only", 15 | "verified_email", 16 | "verified_phone_number", 17 | ) 18 | 19 | if recoveryoption_name not in VALID_RECOVERYOPTION_NAME: 20 | raise ValueError( 21 | "RecoveryOption Name must be one of: %s" 22 | % ", ".join(VALID_RECOVERYOPTION_NAME) 23 | ) 24 | return recoveryoption_name 25 | -------------------------------------------------------------------------------- /troposphere/validators/config.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..type_defs.compat import Final 8 | 9 | ONE_HOUR: Final = "One_Hour" 10 | THREE_HOURS: Final = "Three_Hours" 11 | SIX_HOURS: Final = "Six_Hours" 12 | TWELVE_HOURS: Final = "Twelve_Hours" 13 | TWENTYFOUR_HOURS: Final = "TwentyFour_Hours" 14 | 15 | 16 | def validate_source_details(self): 17 | """ 18 | Class: SourceDetails 19 | """ 20 | 21 | valid_freqs = [ 22 | ONE_HOUR, 23 | THREE_HOURS, 24 | SIX_HOURS, 25 | TWELVE_HOURS, 26 | TWENTYFOUR_HOURS, 27 | ] 28 | freq = self.properties.get("MaximumExecutionFrequency") 29 | if freq and freq not in valid_freqs: 30 | raise ValueError( 31 | "MaximumExecutionFrequency (given: %s) must be one of: %s" 32 | % (freq, ", ".join(valid_freqs)) 33 | ) 34 | -------------------------------------------------------------------------------- /troposphere/validators/dlm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import tags_or_list 8 | 9 | 10 | def validate_tags_or_list(x): 11 | """ 12 | Property: LifecyclePolicy.Tags 13 | Property: PolicyDetails.TargetTags 14 | Property: Schedule.TagsToAdd 15 | """ 16 | return tags_or_list(x) 17 | 18 | 19 | def validate_interval(interval): 20 | """ 21 | Interval validation rule. 22 | Property: CreateRule.Interval 23 | """ 24 | 25 | VALID_INTERVALS = (1, 2, 3, 4, 6, 8, 12, 24) 26 | 27 | if interval not in VALID_INTERVALS: 28 | raise ValueError( 29 | "Interval must be one of : %s" 30 | % ", ".join([str(i) for i in VALID_INTERVALS]) 31 | ) 32 | return interval 33 | 34 | 35 | def validate_interval_unit(interval_unit): 36 | """ 37 | Interval unit validation rule. 38 | Property: CreateRule.IntervalUnit 39 | """ 40 | 41 | VALID_INTERVAL_UNITS = ("HOURS",) 42 | 43 | if interval_unit not in VALID_INTERVAL_UNITS: 44 | raise ValueError( 45 | "Interval unit must be one of : %s" % ", ".join(VALID_INTERVAL_UNITS) 46 | ) 47 | return interval_unit 48 | 49 | 50 | def validate_state(state): 51 | """ 52 | State validation rule. 53 | Property: LifecyclePolicy.State 54 | """ 55 | 56 | VALID_STATES = ("ENABLED", "DISABLED") 57 | 58 | if state not in VALID_STATES: 59 | raise ValueError("State must be one of : %s" % ", ".join(VALID_STATES)) 60 | return state 61 | -------------------------------------------------------------------------------- /troposphere/validators/dms.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..type_defs.compat import Final 8 | from . import network_port 9 | 10 | CDC: Final = "cdc" 11 | FULL_LOAD: Final = "full-load" 12 | FULL_LOAD_AND_CDC: Final = "full-load-and-cdc" 13 | 14 | 15 | def validate_network_port(x): 16 | """ 17 | Property: Endpoint.Port 18 | Property: MongoDbSettings.Port 19 | Property: RedisSettings.Port 20 | """ 21 | return network_port(x) 22 | -------------------------------------------------------------------------------- /troposphere/validators/ecr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2021, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..compat import validate_policytype 8 | 9 | 10 | def policytypes(policy): 11 | """ 12 | Property: PublicRepository.RepositoryPolicyText 13 | Property: RegistryPolicy.PolicyText 14 | Property: Repository.RepositoryPolicyText 15 | """ 16 | return validate_policytype(policy) 17 | -------------------------------------------------------------------------------- /troposphere/validators/efs.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | from . import one_of 7 | 8 | Bursting = "bursting" 9 | Elastic = "elastic" 10 | Provisioned = "provisioned" 11 | 12 | 13 | def throughput_mode_validator(mode): 14 | """ 15 | Property: FileSystem.ThroughputMode 16 | """ 17 | valid_modes = [Bursting, Elastic, Provisioned] 18 | if mode not in valid_modes: 19 | raise ValueError( 20 | 'ThroughputMode must be one of: "%s"' % (", ".join(valid_modes)) 21 | ) 22 | return mode 23 | 24 | 25 | def provisioned_throughput_validator(throughput): 26 | """ 27 | Property: FileSystem.ProvisionedThroughputInMibps 28 | """ 29 | if throughput < 0.0: 30 | raise ValueError( 31 | "ProvisionedThroughputInMibps must be greater than or equal to 0.0" 32 | ) 33 | return throughput 34 | 35 | 36 | def validate_backup_policy(self): 37 | """ 38 | Class: BackupPolicy 39 | """ 40 | 41 | conds = ["DISABLED", "DISABLING", "ENABLED", "ENABLING"] 42 | one_of(self.__class__.__name__, self.properties, "Status", conds) 43 | -------------------------------------------------------------------------------- /troposphere/validators/elasticbeanstalk.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | WebServer = "WebServer" 8 | Worker = "Worker" 9 | WebServerType = "Standard" 10 | WorkerType = "SQS/HTTP" 11 | 12 | 13 | def validate_tier_name(name): 14 | """ 15 | Property: Tier.Name 16 | """ 17 | valid_names = [WebServer, Worker] 18 | if name not in valid_names: 19 | raise ValueError("Tier name needs to be one of %r" % valid_names) 20 | return name 21 | 22 | 23 | def validate_tier_type(tier_type): 24 | """ 25 | Property: Tier.Type 26 | """ 27 | valid_types = [WebServerType, WorkerType] 28 | if tier_type not in valid_types: 29 | raise ValueError("Tier type needs to be one of %r" % valid_types) 30 | return tier_type 31 | -------------------------------------------------------------------------------- /troposphere/validators/elasticloadbalancing.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import elb_name, integer_range, network_port, tags_or_list 8 | 9 | 10 | def validate_int_to_str(x): 11 | """ 12 | Backward compatibility - field was int and now str. 13 | Property: HealthCheck.Interval 14 | Property: HealthCheck.Timeout 15 | """ 16 | 17 | if isinstance(x, int): 18 | return str(x) 19 | if isinstance(x, str): 20 | return str(int(x)) 21 | 22 | raise TypeError(f"Value {x} of type {type(x)} must be either int or str") 23 | 24 | 25 | def validate_elb_name(x): 26 | """ 27 | Property: LoadBalancer.LoadBalancerName 28 | """ 29 | return elb_name(x) 30 | 31 | 32 | def validate_network_port(x): 33 | """ 34 | Property: Listener.InstancePort 35 | Property: Listener.LoadBalancerPort 36 | """ 37 | return network_port(x) 38 | 39 | 40 | def validate_tags_or_list(x): 41 | """ 42 | Property: LoadBalancer.Tags 43 | """ 44 | return tags_or_list(x) 45 | 46 | 47 | def validate_threshold(port): 48 | """ 49 | Property: HealthCheck.HealthyThreshold 50 | Property: HealthCheck.UnhealthyThreshold 51 | """ 52 | return integer_range(2, 10)(port) 53 | -------------------------------------------------------------------------------- /troposphere/validators/fms.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import json_checker 8 | 9 | 10 | def validate_json_checker(x): 11 | """ 12 | Property: Policy.SecurityServicePolicyData 13 | """ 14 | return json_checker(x) 15 | -------------------------------------------------------------------------------- /troposphere/validators/globalaccelerator.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def accelerator_ipaddresstype(type): 8 | """ 9 | Property: Accelerator.IpAddressType 10 | """ 11 | 12 | valid_types = ["IPV4"] 13 | if type not in valid_types: 14 | raise ValueError( 15 | 'IpAddressType must be one of: "%s"' % (", ".join(valid_types)) 16 | ) 17 | return type 18 | 19 | 20 | def endpointgroup_healthcheckprotocol(protocol): 21 | """ 22 | Property: EndpointGroup.HealthCheckProtocol 23 | """ 24 | 25 | valid_protocols = ["HTTP", "HTTPS", "TCP"] 26 | if protocol not in valid_protocols: 27 | raise ValueError( 28 | 'HealthCheckProtocol must be one of: "%s"' % (", ".join(valid_protocols)) 29 | ) 30 | return protocol 31 | 32 | 33 | def listener_clientaffinity(affinity): 34 | """ 35 | Property: Listener.ClientAffinity 36 | """ 37 | valid_affinities = ["NONE", "SOURCE_IP"] 38 | if affinity not in valid_affinities: 39 | raise ValueError( 40 | 'ClientAffinity must be one of: "%s"' % (", ".join(valid_affinities)) 41 | ) 42 | return affinity 43 | 44 | 45 | def listener_protocol(protocol): 46 | """ 47 | Property: Listener.Protocol 48 | """ 49 | valid_protocols = ["TCP", "UDP"] 50 | if protocol not in valid_protocols: 51 | raise ValueError('Protocol must be one of: "%s"' % (", ".join(valid_protocols))) 52 | return protocol 53 | -------------------------------------------------------------------------------- /troposphere/validators/groundstation.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import json_checker 8 | 9 | 10 | def validate_json_checker(x): 11 | """ 12 | Property: DecodeConfig.UnvalidatedJSON 13 | Property: DemodulationConfig.UnvalidatedJSON 14 | """ 15 | return json_checker(x) 16 | -------------------------------------------------------------------------------- /troposphere/validators/iot.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..compat import validate_policytype 8 | from . import json_checker 9 | 10 | 11 | def policytypes(policy): 12 | """ 13 | Property: Policy.PolicyDocument 14 | """ 15 | return validate_policytype(policy) 16 | 17 | 18 | def validate_json_checker(x): 19 | """ 20 | Property: JobTemplate.AbortConfig 21 | Property: JobTemplate.JobExecutionsRolloutConfig 22 | Property: JobTemplate.TimeoutConfig 23 | """ 24 | return json_checker(x) 25 | -------------------------------------------------------------------------------- /troposphere/validators/iottwinmaker.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_listvalue(values): 8 | """ 9 | Property: DataValue.ListValue 10 | """ 11 | from .. import AWSHelperFn 12 | from ..iottwinmaker import DataValue 13 | 14 | if not isinstance(values, list): 15 | raise TypeError("ListValue must be a list", list) 16 | 17 | for v in values: 18 | if not isinstance(v, (DataValue, AWSHelperFn)): 19 | raise TypeError( 20 | "ListValue must contain DataValue or AWSHelperFn", 21 | AWSHelperFn, 22 | DataValue, 23 | ) 24 | 25 | 26 | def validate_nestedtypel(value): 27 | """ 28 | Property: DataType.NestedType 29 | """ 30 | from .. import AWSHelperFn 31 | from ..iottwinmaker import DataType 32 | 33 | if not isinstance(value, (DataType, AWSHelperFn)): 34 | raise TypeError( 35 | "NestedType must be either DataType or AWSHelperFn", AWSHelperFn, DataType 36 | ) 37 | -------------------------------------------------------------------------------- /troposphere/validators/kinesis.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2021, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import tags_or_list 8 | 9 | 10 | def kinesis_stream_mode(mode): 11 | """ 12 | Property: StreamModeDetails.StreamMode 13 | """ 14 | valid_modes = ["ON_DEMAND", "PROVISIONED"] 15 | if mode not in valid_modes: 16 | raise ValueError('ContentType must be one of: "%s"' % (", ".join(valid_modes))) 17 | return mode 18 | 19 | 20 | def validate_tags_or_list(x): 21 | """ 22 | Property: Stream.Tags 23 | """ 24 | return tags_or_list(x) 25 | -------------------------------------------------------------------------------- /troposphere/validators/kinesisanalyticsv2.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_runtime_environment(runtime_environment): 8 | """ 9 | Validate RuntimeEnvironment for Application 10 | Property: Application.RuntimeEnvironment 11 | """ 12 | 13 | VALID_RUNTIME_ENVIRONMENTS = ( 14 | "FLINK-1_6", 15 | "FLINK-1_8", 16 | "FLINK-1_11", 17 | "FLINK-1_13", 18 | "FLINK-1_15", 19 | "FLINK-1_18", 20 | "FLINK-1_19", 21 | "FLINK-1_20", 22 | "SQL-1_0", 23 | "ZEPPELIN-FLINK-1_0", 24 | "ZEPPELIN-FLINK-2_0", 25 | "ZEPPELIN-FLINK-3_0", 26 | ) 27 | 28 | if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS: 29 | raise ValueError( 30 | "Application RuntimeEnvironment must be one of: %s" 31 | % ", ".join(VALID_RUNTIME_ENVIRONMENTS) 32 | ) 33 | return runtime_environment 34 | -------------------------------------------------------------------------------- /troposphere/validators/kms.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..compat import validate_policytype 8 | from . import integer_range, tags_or_list 9 | 10 | 11 | def policytypes(policy): 12 | """ 13 | Property: Key.KeyPolicy 14 | """ 15 | return validate_policytype(policy) 16 | 17 | 18 | def key_usage_type(key): 19 | """ 20 | Property: Key.KeyUsage 21 | """ 22 | valid_values = ["ENCRYPT_DECRYPT", "SIGN_VERIFY"] 23 | if key not in valid_values: 24 | raise ValueError('KeyUsage must be one of: "%s"' % (", ".join(valid_values))) 25 | return key 26 | 27 | 28 | def validate_pending_window_in_days(port): 29 | """ 30 | Property: Key.PendingWindowInDays 31 | """ 32 | return integer_range(7, 30)(port) 33 | 34 | 35 | def validate_tags_or_list(x): 36 | """ 37 | Property: Key.Tags 38 | """ 39 | return tags_or_list(x) 40 | -------------------------------------------------------------------------------- /troposphere/validators/lex.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..compat import validate_policytype 8 | 9 | 10 | def policytypes(policy): 11 | """ 12 | Property: ResourcePolicy.Policy 13 | """ 14 | return validate_policytype(policy) 15 | -------------------------------------------------------------------------------- /troposphere/validators/macie.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def findingsfilter_action(action): 8 | """ 9 | Property: FindingsFilter.Action 10 | """ 11 | 12 | valid_actions = ["ARCHIVE", "NOOP"] 13 | 14 | if action not in valid_actions: 15 | raise ValueError('Action must be one of: "%s"' % (", ".join(valid_actions))) 16 | return action 17 | 18 | 19 | def session_findingpublishingfrequency(frequency): 20 | """ 21 | Property: Session.FindingPublishingFrequency 22 | """ 23 | 24 | valid_frequencies = ["FIFTEEN_MINUTES", "ONE_HOUR", "SIX_HOURS"] 25 | 26 | if frequency not in valid_frequencies: 27 | raise ValueError( 28 | 'FindingPublishingFrequency must be one of: "%s"' 29 | % (", ".join(valid_frequencies)) 30 | ) 31 | return frequency 32 | 33 | 34 | def session_status(status): 35 | """ 36 | Property: Session.Status 37 | """ 38 | 39 | valid_status = ["ENABLED", "DISABLED"] 40 | 41 | if status not in valid_status: 42 | raise ValueError('Status must be one of: "%s"' % (", ".join(valid_status))) 43 | return status 44 | -------------------------------------------------------------------------------- /troposphere/validators/mediastore.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def containerlevelmetrics_status(status): 8 | """ 9 | Property: MetricPolicy.ContainerLevelMetrics 10 | """ 11 | valid_status = ["DISABLED", "ENABLED"] 12 | if status not in valid_status: 13 | raise ValueError( 14 | 'ContainerLevelMetrics must be one of: "%s"' % (", ".join(valid_status)) 15 | ) 16 | return status 17 | -------------------------------------------------------------------------------- /troposphere/validators/networkfirewall.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2021, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_rule_group_type(rule_group_type): 8 | """ 9 | Validate Type for RuleGroup 10 | Property: RuleGroup.Type 11 | """ 12 | 13 | VALID_RULE_GROUP_TYPES = ("STATEFUL", "STATELESS") 14 | if rule_group_type not in VALID_RULE_GROUP_TYPES: 15 | raise ValueError( 16 | "RuleGroup Type must be one of %s" % ", ".join(VALID_RULE_GROUP_TYPES) 17 | ) 18 | return rule_group_type 19 | -------------------------------------------------------------------------------- /troposphere/validators/opensearchservice.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | import re 7 | 8 | 9 | def validate_search_service_engine_version(engine_version): 10 | """ 11 | Validate Engine Version for OpenSearchServiceDomain. 12 | The value must be in the format OpenSearch_X.Y or Elasticsearch_X.Y 13 | Property: Domain.EngineVersion 14 | """ 15 | 16 | engine_version_check = re.compile(r"^(OpenSearch_|Elasticsearch_)\d{1,5}.\d{1,5}") 17 | if engine_version_check.match(engine_version) is None: 18 | raise ValueError( 19 | "OpenSearch EngineVersion must be in the format OpenSearch_X.Y or Elasticsearch_X.Y" 20 | ) 21 | return engine_version 22 | -------------------------------------------------------------------------------- /troposphere/validators/opsworkscm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import tags_or_list 8 | 9 | 10 | def validate_tags_or_list(x): 11 | """ 12 | Property: Server.Tags 13 | """ 14 | return tags_or_list(x) 15 | -------------------------------------------------------------------------------- /troposphere/validators/organizations.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_policy_type(policy_type): 8 | """ 9 | Property: Policy.Type 10 | """ 11 | valid_types = [ 12 | "AISERVICES_OPT_OUT_POLICY", 13 | "BACKUP_POLICY", 14 | "CHATBOT_POLICY", 15 | "DECLARATIVE_POLICY_EC2", 16 | "RESOURCE_CONTROL_POLICY", 17 | "SERVICE_CONTROL_POLICY", 18 | "TAG_POLICY", 19 | ] 20 | if policy_type not in valid_types: 21 | raise ValueError("Type must be one of: %s" % ", ".join(valid_types)) 22 | return policy_type 23 | -------------------------------------------------------------------------------- /troposphere/validators/rekognition.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_PolygonRegionsOfInterest(polygons): 8 | """ 9 | Property: StreamProcessor.PolygonRegionsOfInterest 10 | """ 11 | from ..rekognition import Point 12 | 13 | if not isinstance(polygons, list): 14 | raise TypeError("PolygonRegionsOfInterest must be a list") 15 | 16 | all_lists = all(isinstance(item, list) for item in polygons) 17 | if not all_lists: 18 | raise TypeError("PolygonRegionsOfInterest must be a list of lists") 19 | 20 | all_points = all( 21 | isinstance(point, Point) for sublist in polygons for point in sublist 22 | ) 23 | if not all_points: 24 | raise TypeError("PolygonRegionsOfInterest must be a list of lists of ponts") 25 | -------------------------------------------------------------------------------- /troposphere/validators/resiliencehub.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2021, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_resiliencypolicy_policy(policy): 8 | """ 9 | Validate Type for Policy 10 | Property: ResiliencyPolicy.Policy 11 | """ 12 | from ..resiliencehub import FailurePolicy 13 | 14 | VALID_POLICY_KEYS = ("Software", "Hardware", "AZ", "Region") 15 | 16 | if not isinstance(policy, dict): 17 | raise ValueError("Policy must be a dict") 18 | 19 | for k, v in policy.items(): 20 | if k not in VALID_POLICY_KEYS: 21 | policy_keys = ", ".join(VALID_POLICY_KEYS) 22 | raise ValueError(f"Policy key must be one of {policy_keys}") 23 | 24 | if not isinstance(v, FailurePolicy): 25 | raise ValueError("Policy value must be FailurePolicy") 26 | 27 | return policy 28 | 29 | 30 | def validate_resiliencypolicy_tier(tier): 31 | """ 32 | Validate Type for Tier 33 | Property: ResiliencyPolicy.Tier 34 | """ 35 | 36 | VALID_TIER_VALUES = ( 37 | "MissionCritical", 38 | "Critical", 39 | "Important", 40 | "CoreServices", 41 | "NonCritical", 42 | ) 43 | 44 | if tier not in VALID_TIER_VALUES: 45 | tier_values = ", ".join(VALID_TIER_VALUES) 46 | raise ValueError(f"Tier must be one of {tier_values}") 47 | 48 | return tier 49 | -------------------------------------------------------------------------------- /troposphere/validators/resourcegroups.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def resourcequery_type(type): 8 | """ 9 | Property: ResourceQuery.Type 10 | """ 11 | 12 | valid_types = ["TAG_FILTERS_1_0", "CLOUDFORMATION_STACK_1_0"] 13 | 14 | if type not in valid_types: 15 | raise ValueError('Type must be one of: "%s"' % (", ".join(valid_types))) 16 | return type 17 | -------------------------------------------------------------------------------- /troposphere/validators/route53.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from .. import AWSProperty 8 | from . import boolean 9 | 10 | 11 | class AliasTarget(AWSProperty): 12 | """ 13 | Export: 14 | """ 15 | 16 | props = { 17 | "HostedZoneId": (str, True), 18 | "DNSName": (str, True), 19 | "EvaluateTargetHealth": (boolean, False), 20 | } 21 | 22 | def __init__( 23 | self, hostedzoneid=None, dnsname=None, evaluatetargethealth=None, **kwargs 24 | ): 25 | # provided for backward compatibility 26 | if hostedzoneid is not None: 27 | kwargs["HostedZoneId"] = hostedzoneid 28 | if dnsname is not None: 29 | kwargs["DNSName"] = dnsname 30 | if evaluatetargethealth is not None: 31 | kwargs["EvaluateTargetHealth"] = evaluatetargethealth 32 | super().__init__(**kwargs) 33 | -------------------------------------------------------------------------------- /troposphere/validators/route53resolver.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_ruletype(ruletype): 8 | """ 9 | Validate RuleType for ResolverRule. 10 | Property: ResolverRule.RuleType 11 | """ 12 | 13 | VALID_RULETYPES = ("SYSTEM", "FORWARD") 14 | 15 | if ruletype not in VALID_RULETYPES: 16 | raise ValueError("Rule type must be one of: %s" % ", ".join(VALID_RULETYPES)) 17 | return ruletype 18 | -------------------------------------------------------------------------------- /troposphere/validators/scheduler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, Guy Taylor 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_flexibletimewindow_mode(flexibletimewindow_mode): 8 | """ 9 | Validate State for FlexibleTimeWindow 10 | Property: FlexibleTimeWindow.Mode 11 | """ 12 | 13 | valid_modes = ["OFF", "FLEXIBLE"] 14 | 15 | if flexibletimewindow_mode not in valid_modes: 16 | raise ValueError("{} is not a valid mode".format(flexibletimewindow_mode)) 17 | return flexibletimewindow_mode 18 | 19 | 20 | def validate_ecsparameters_tags(ecsparameters_tags): 21 | """ 22 | Validate State for EcsParameters 23 | Property: EcsParameters.Tags 24 | """ 25 | 26 | if ecsparameters_tags is not None: 27 | raise ValueError( 28 | "EcsParameters Tags must be None as the TagMap property is not currently supported by AWS CloudFormation" 29 | ) 30 | return ecsparameters_tags 31 | -------------------------------------------------------------------------------- /troposphere/validators/secretsmanager.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..compat import validate_policytype 8 | from . import tags_or_list 9 | 10 | 11 | def policytypes(policy): 12 | """ 13 | Property: ResourcePolicy.ResourcePolicy 14 | """ 15 | return validate_policytype(policy) 16 | 17 | 18 | def validate_tags_or_list(x): 19 | """ 20 | Property: Secret.Tags 21 | """ 22 | return tags_or_list(x) 23 | 24 | 25 | def validate_target_types(target_type): 26 | """ 27 | Target types validation rule. 28 | Property: SecretTargetAttachment.TargetType 29 | """ 30 | 31 | VALID_TARGET_TYPES = ( 32 | "AWS::RDS::DBInstance", 33 | "AWS::RDS::DBCluster", 34 | "AWS::Redshift::Cluster", 35 | "AWS::DocDB::DBInstance", 36 | "AWS::DocDB::DBCluster", 37 | ) 38 | 39 | if target_type not in VALID_TARGET_TYPES: 40 | raise ValueError( 41 | "Target type must be one of : %s" % ", ".join(VALID_TARGET_TYPES) 42 | ) 43 | return target_type 44 | -------------------------------------------------------------------------------- /troposphere/validators/servicecatalog.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_tag_update(update): 8 | """ 9 | Property: ResourceUpdateConstraint.TagUpdateOnProvisionedProduct 10 | """ 11 | valid_tag_update_values = [ 12 | "ALLOWED", 13 | "NOT_ALLOWED", 14 | ] 15 | if update not in valid_tag_update_values: 16 | raise ValueError("{} is not a valid tag update value".format(update)) 17 | return update 18 | -------------------------------------------------------------------------------- /troposphere/validators/sns.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..compat import validate_policytype 8 | 9 | 10 | def policytypes(policy): 11 | """ 12 | Property: TopicPolicy.PolicyDocument 13 | """ 14 | return validate_policytype(policy) 15 | -------------------------------------------------------------------------------- /troposphere/validators/sqs.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from ..compat import validate_policytype 8 | 9 | 10 | def policytypes(policy): 11 | """ 12 | Property: QueuePolicy.PolicyDocument 13 | """ 14 | return validate_policytype(policy) 15 | 16 | 17 | def validate_queue(self): 18 | """ 19 | Class: Queue 20 | """ 21 | from .. import AWSHelperFn 22 | 23 | if self.properties.get("FifoQueue"): 24 | queuename = self.properties.get("QueueName") 25 | if queuename is None or isinstance(queuename, AWSHelperFn): 26 | pass 27 | elif not queuename.endswith(".fifo"): 28 | raise ValueError( 29 | "SQS: FIFO queues need to provide a " "QueueName that ends with '.fifo'" 30 | ) 31 | -------------------------------------------------------------------------------- /troposphere/validators/synthetics.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def canary_runtime_version(runtime_version): 8 | """ 9 | Property: Canary.RuntimeVersion 10 | """ 11 | 12 | valid_runtime_versions = [ 13 | "syn-nodejs-playwright-1.0", 14 | "syn-nodejs-puppeteer-4.0", 15 | "syn-nodejs-puppeteer-5.0", 16 | "syn-nodejs-puppeteer-5.1", 17 | "syn-nodejs-puppeteer-5.2", 18 | "syn-nodejs-puppeteer-6.0", 19 | "syn-nodejs-puppeteer-6.1", 20 | "syn-nodejs-puppeteer-6.2", 21 | "syn-nodejs-puppeteer-7.0", 22 | "syn-nodejs-puppeteer-8.0", 23 | "syn-nodejs-puppeteer-9.0", 24 | "syn-nodejs-puppeteer-9.1", 25 | "syn-python-selenium-1.0", 26 | "syn-python-selenium-1.1", 27 | "syn-python-selenium-1.2", 28 | "syn-python-selenium-1.3", 29 | "syn-python-selenium-2.0", 30 | "syn-python-selenium-2.1", 31 | "syn-python-selenium-3.0", 32 | "syn-python-selenium-4.0", 33 | "syn-python-selenium-4.1", 34 | ] 35 | if runtime_version not in valid_runtime_versions: 36 | raise ValueError( 37 | 'RuntimeVersion must be one of: "%s"' % (", ".join(valid_runtime_versions)) 38 | ) 39 | return runtime_version 40 | -------------------------------------------------------------------------------- /troposphere/validators/transfer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | def validate_homedirectory_type(homedirectory_type): 8 | """ 9 | Validate HomeDirectoryType for User 10 | Property: User.HomeDirectoryType 11 | """ 12 | 13 | VALID_HOMEDIRECTORY_TYPE = ("LOGICAL", "PATH") 14 | 15 | if homedirectory_type not in VALID_HOMEDIRECTORY_TYPE: # NOQA 16 | raise ValueError( 17 | "User HomeDirectoryType must be one of: %s" 18 | % ", ".join(VALID_HOMEDIRECTORY_TYPE) # NOQA 19 | ) 20 | return homedirectory_type 21 | -------------------------------------------------------------------------------- /troposphere/validators/waf.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import waf_action_type 8 | 9 | 10 | def validate_waf_action_type(action): 11 | """ 12 | Property: Action.Type 13 | """ 14 | return waf_action_type(action) 15 | -------------------------------------------------------------------------------- /troposphere/validators/wafregional.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2022, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | 6 | 7 | from . import waf_action_type 8 | 9 | 10 | def validate_waf_action_type(action): 11 | """ 12 | Property: Action.Type 13 | """ 14 | return waf_action_type(action) 15 | -------------------------------------------------------------------------------- /troposphere/voiceid.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2025, Mark Peek 2 | # All rights reserved. 3 | # 4 | # See LICENSE file for full license. 5 | # 6 | # *** Do not modify - this file is autogenerated *** 7 | 8 | 9 | from . import AWSObject, AWSProperty, PropsDictType, Tags 10 | 11 | 12 | class ServerSideEncryptionConfiguration(AWSProperty): 13 | """ 14 | `ServerSideEncryptionConfiguration `__ 15 | """ 16 | 17 | props: PropsDictType = { 18 | "KmsKeyId": (str, True), 19 | } 20 | 21 | 22 | class Domain(AWSObject): 23 | """ 24 | `Domain `__ 25 | """ 26 | 27 | resource_type = "AWS::VoiceID::Domain" 28 | 29 | props: PropsDictType = { 30 | "Description": (str, False), 31 | "Name": (str, True), 32 | "ServerSideEncryptionConfiguration": (ServerSideEncryptionConfiguration, True), 33 | "Tags": (Tags, False), 34 | } 35 | --------------------------------------------------------------------------------