├── .github └── workflows │ ├── publish-to-pypi.yml │ └── publish-to-test-pypi.yml ├── .gitignore ├── CHANGES.txt ├── Dockerfile ├── Jenkinsfile ├── LICENSE.txt ├── README.md ├── deploy-sam-template.sh ├── requirements.txt ├── serverless.yml ├── setup.py ├── shelvery ├── __init__.py ├── aws_helper.py ├── backup_resource.py ├── documentdb_backup.py ├── ebs_backup.py ├── ec2_backup.py ├── ec2ami_backup.py ├── engine.py ├── entity_resource.py ├── factory.py ├── notifications.py ├── queue.py ├── rds_backup.py ├── rds_cluster_backup.py ├── redshift_backup.py ├── runtime_config.py └── shelvery_invoker.py ├── shelvery_cli ├── __init__.py ├── __main__.py └── shelver_cli_main.py ├── shelvery_lambda ├── __init__.py └── lambda_handler.py ├── shelvery_tests ├── __init__.py ├── cleanup_functions.py ├── cloudformation-unittest.yaml ├── conftest.py ├── data_bucket_test.py ├── docdb_integration_test.py ├── docdb_pull_test.py ├── ebs_integration_test.py ├── ebs_pull_test.py ├── ec2ami_integration_test.py ├── ec2ami_pull_test.py ├── engine_test.py ├── pytest.ini ├── rds_cluster_integration_test.py ├── rds_cluster_pull_test.py ├── rds_integration_test.py ├── rds_pull_test.py ├── resources.py ├── s3data_integration_test.py ├── test_functions.py └── zname_transformation_test.py └── template.yaml /.github/workflows/publish-to-pypi.yml: -------------------------------------------------------------------------------- 1 | name: PyPI Release 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v2 12 | - name: Set up Python 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: '3.11' 16 | - name: Install dependencies 17 | run: | 18 | python -m pip install --upgrade pip 19 | pip install wheel twine 20 | pip install -r requirements.txt 21 | - name: Build distribution package 22 | run: | 23 | python setup.py sdist bdist_wheel 24 | - name: Upload to PyPI 25 | env: 26 | TWINE_USERNAME: __token__ 27 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} 28 | run: | 29 | twine upload --verbose dist/* -------------------------------------------------------------------------------- /.github/workflows/publish-to-test-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Test PyPI Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Set up Python 14 | uses: actions/setup-python@v2 15 | with: 16 | python-version: '3.11' 17 | - name: Install dependencies 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install wheel twine 21 | pip install -r requirements.txt 22 | - name: Build distribution package 23 | run: | 24 | python setup.py sdist bdist_wheel 25 | - name: Upload to PyPI 26 | env: 27 | TWINE_USERNAME: __token__ 28 | TWINE_PASSWORD: ${{ secrets.TEST_PYPI_API_TOKEN }} 29 | run: | 30 | twine upload --verbose --repository-url https://test.pypi.org/legacy/ dist/* 31 | - name: Install package from TestPyPI 32 | run: | 33 | python -m pip install --index-url https://test.pypi.org/simple/ --no-deps shelvery 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | lib 2 | .idea 3 | shelvery.iml 4 | *__pycache__* 5 | nosetests.xml 6 | MANIFEST 7 | build 8 | dist 9 | shelvery.egg-info 10 | play.sh 11 | .serverless 12 | .noseids 13 | out.txt 14 | .pytest_cache 15 | 16 | packaged-template.yaml 17 | /shelvery-aws-backups.iml 18 | -------------------------------------------------------------------------------- /CHANGES.txt: -------------------------------------------------------------------------------- 1 | v0.1.0, 6/11/2017 -- Initial release. 2 | v0.2.0, 14/11/2017 -- Control behaviour via resource tags. Multiple levels of configuration. RDS Cleanup. 3 | v0.3.0, 20/12/2017 -- RDS Cluster backups added 4 | v0.3.1, 12/02/2018 -- Fixed bug related to cleanup of yearly backups 5 | v0.3.2, 12/02/2018 -- Added error handling of cleanup on per-backup basis. Allowing legacy datetime format in backup tags 6 | v0.4.0, 26/04/2018 -- Support for single resource backup. Support for Ec2 AMIs. Integration testing. Single thread mode. 7 | v0.4.1, 27/04/2018 -- Updating scm repository information, no source code changes 8 | v0.4.2, 27/04/2018 -- RDS and Cluster - fallback to regular snapshot if no automated backups. Bugfix - backup cleanup 9 | when resource is not available. 10 | v0.4.3, 27/04/2018 -- Bugfix - unmarking rds snapshots as shelvery managed 11 | v0.5.0, 21/08/2018 -- New features - Pull shared backups -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11 2 | 3 | COPY requirements.txt requirements.txt 4 | 5 | RUN pip install prospector && \ 6 | pip install -r requirements.txt && \ 7 | useradd -ms /bin/bash jenkins 8 | 9 | ENV PATH /home/jenkins/.local/bin:$PATH -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | @Library('ciinabox') _ 2 | 3 | pipeline { 4 | 5 | agent { 6 | dockerfile { 7 | filename 'Dockerfile' 8 | label 'docker' 9 | } 10 | } 11 | 12 | stages { 13 | 14 | stage('Notify slack') { 15 | steps { 16 | slackSend color: '#70A1F0', 17 | message: "Shelvery pipeline started\n*Branch:* ${env.BRANCH_NAME}\n*Commit:* ${env.GIT_COMMIT}\n*Build:* <${env.BUILD_URL}|${env.BUILD_NUMBER}>" 18 | } 19 | } 20 | 21 | stage('Static Code Analysis') { 22 | steps { 23 | script { 24 | def prospectorStatus = sh script: "prospector", returnStatus: true 25 | if (prospectorStatus != 0) { 26 | // ignore failures here for now until issues are resolved 27 | echo "prospector failed with status code ${prospectorStatus}" 28 | } 29 | } 30 | } 31 | } 32 | 33 | stage('Unit Tests') { 34 | when { changeRequest target: 'master' } 35 | steps { 36 | script { 37 | //Source Account 38 | withAWS(role: env.SHELVERY_TEST_ROLE, region: 'ap-southeast-2') { 39 | 40 | sh "pwd" 41 | dir ('shelvery_tests'){ 42 | def pytestStatus = sh script: "pytest -s -v -m source --source ${env.OPS_ACCOUNT_ID} --destination ${env.DEV_ACCOUNT_ID} --junit-xml=pytest_unit.xml", returnStatus: true 43 | junit 'pytest_unit.xml' 44 | 45 | 46 | if (pytestStatus != 0) { 47 | currentBuild.result = 'FAILURE' 48 | error("Shelvery unit tests failed with exit code ${pytestStatus}") 49 | } 50 | } 51 | } 52 | } 53 | script { 54 | withAWS(role: env.SHELVERY_TEST_ROLE, roleAccount: env.DEV_ACCOUNT_ID, region: 'ap-southeast-2') { 55 | //Destination Account 56 | sh "pwd" 57 | dir ('shelvery_tests'){ 58 | def pytestStatus = sh script: "pytest -s -v -m destination --source ${env.OPS_ACCOUNT_ID} --destination ${env.DEV_ACCOUNT_ID} --junit-xml=pytest_unit.xml", returnStatus: true 59 | junit 'pytest_unit.xml' 60 | 61 | if (pytestStatus != 0) { 62 | currentBuild.result = 'FAILURE' 63 | error("Shelvery unit tests failed with exit code ${pytestStatus}") 64 | } 65 | } 66 | } 67 | } 68 | } 69 | } 70 | 71 | stage('CLI Utility Test') { 72 | steps { 73 | sh "python setup.py build install --user" 74 | script { 75 | def shelveryCliStatus = sh script: "shelvery --version", returnStatus: true 76 | 77 | if (shelveryCliStatus != 254) { 78 | currentBuild.result = 'FAILURE' 79 | error("Shelvery CLI test failed with exit code ${shelveryCliStatus}") 80 | } 81 | } 82 | } 83 | } 84 | 85 | stage('Package') { 86 | steps { 87 | sh "python3 setup.py sdist" 88 | stash name: 'archive', includes: 'dist/*' 89 | } 90 | } 91 | 92 | stage('Release S3') { 93 | steps { 94 | unstash name: 'archive' 95 | 96 | script { 97 | def fileName = shellOut('cd $WORKSPACE/dist && ls -1 shelvery-*.tar.gz') 98 | def safebranch = env.BRANCH_NAME.replace("/", "_") 99 | def releaseFileName = env.BRANCH_NAME == 'master' ? fileName : fileName.replace('.tar.gz',"-${safebranch}.tar.gz") 100 | env["SHELVERY_S3_RELEASE"] = "https://${env.SHELVERY_DIST_BUCKET}.s3.amazonaws.com/release/${releaseFileName}" 101 | s3Upload(bucket: env.SHELVERY_DIST_BUCKET, file: "dist/${fileName}", path: "release/${releaseFileName}") 102 | 103 | } 104 | 105 | } 106 | post { 107 | success { 108 | slackSend color: '#00FF00', message: "built new shelvery release for banch ${env.BRANCH_NAME} and published to ${env.SHELVERY_S3_RELEASE}" 109 | } 110 | } 111 | } 112 | 113 | } 114 | 115 | post { 116 | success { 117 | slackSend color: '#00FF00', 118 | message: "Shelvery ${env.BRANCH_NAME} build <${env.BUILD_URL}|${env.BUILD_NUMBER}> successfully completed" 119 | } 120 | failure { 121 | slackSend color: '#FF0000', 122 | message: "Shelvery ${env.BRANCH_NAME} build <${env.BUILD_URL}|${env.BUILD_NUMBER}> failed" 123 | } 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Base2Services 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /deploy-sam-template.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | SHELVERY_VERSION=0.9.13 5 | 6 | # set DOCKERUSERID to current user. could be changed with -u uid 7 | DOCKERUSERID="-u $(id -u)" 8 | 9 | if [[ $1 == "help" ]]; then 10 | echo """ 11 | Usage: 12 | ./deploy-sam-template.sh -b my-s3-bucket -r us-west-2 # deploy latest shelvery version in the us-west-2 region 13 | ./deploy-sam-template.sh -b my-s3-bucket -l true -p true # package and deploy the current git branch 14 | 15 | Options: 16 | -b BUCKET # s3 bucket to deploy the sam package to 17 | [-v VERSION] # set the shelvery version to deploy, defaults to $SHELVERY_VERSION 18 | [-r REGION] # AWS region to deploy shelvery, if not set it will get from the aws config or environment 19 | [-p true] BOOLEAN # Build and package shelvery from the current branch. Use with '-l true' to deploy the package. 20 | [-l true] BOOLEAN # install shelvery from a local dist build in the ./dist/shelvery-\${SHELVERY_VERSION}.tar.gz 21 | [-o KEY1=VALUE1,KEY2=VALUE2] # Override cloudformation template parameters with a comma separated string of key value pairs 22 | # e.g. -o ShelveryRdsBackupMode=RDS_CREATE_SNAPSHOT,ShelveryEncryptCopy=true 23 | [-u UID] # Set the docker user id, defaults to $DOCKERUSERID 24 | """ 25 | exit -1 26 | fi 27 | 28 | while getopts ":b:r:u:l:p:v:o:" opt; do 29 | case $opt in 30 | b) 31 | BUCKET=$OPTARG 32 | ;; 33 | v) 34 | SHELVERY_VERSION=$OPTARG 35 | ;; 36 | r) 37 | REGION=$OPTARG 38 | ;; 39 | l) 40 | LOCAL_INSTALL=$OPTARG 41 | ;; 42 | p) 43 | PACKAGE=$OPTARG 44 | ;; 45 | o) 46 | PARAM_OVERRIDES=$OPTARG 47 | ;; 48 | u) 49 | DOCKERUSERID=" -u $OPTARG" 50 | ;; 51 | \?) 52 | echo "Invalid option: -$OPTARG" >&2 53 | exit 1 54 | ;; 55 | :) 56 | echo "Option -$OPTARG requires an argument." >&2 57 | exit 1 58 | ;; 59 | esac 60 | done 61 | 62 | if [ -z ${BUCKET+x} ]; then 63 | echo "Source bucket not set with -b" 64 | exit 1 65 | fi 66 | 67 | if [ ! -z ${REGION} ]; then 68 | REGION="--region $REGION" 69 | fi 70 | 71 | rm -rf lib/* 72 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 73 | 74 | if [[ ${PACKAGE} == 'true' ]]; then 75 | docker run --rm -v $DIR:/build -w /build $DOCKERUSERID python:3 python setup.py sdist 76 | fi 77 | 78 | if [[ ${LOCAL_INSTALL} == 'true' ]]; then 79 | echo "Installing shelvery $SHELVERY_VERSION from local sdist" 80 | docker run --rm -v $DIR:/dst -w /dst $DOCKERUSERID python:3 pip install ./dist/shelvery-${SHELVERY_VERSION}.tar.gz -t lib 81 | else 82 | echo "Installing shelvery $SHELVERY_VERSION from pypi" 83 | docker run --rm -v $DIR:/dst -w /dst $DOCKERUSERID python:3 pip install shelvery==$SHELVERY_VERSION -t lib 84 | fi 85 | 86 | echo "packaging lambdas" 87 | cd lib 88 | zip shelvery.zip -r ./* 89 | cd .. 90 | 91 | PARAM_OPTS="" 92 | 93 | if [ ! -z ${PARAM_OVERRIDES} ]; then 94 | PARAMS=($(echo ${PARAM_OVERRIDES} | tr ',' "\n")) 95 | PARAM_OPTS="--parameter-overrides" 96 | for p in "${PARAMS[@]}" 97 | do 98 | KEY=$(echo $p | cut -d'=' -f1) 99 | VALUE=$(echo $p | cut -d'=' -f2) 100 | PARAM_OPTS="${PARAM_OPTS} ParameterKey=${KEY},ParameterValue=${VALUE}" 101 | done 102 | fi 103 | 104 | echo "packaging cloudformation" 105 | sam package --template-file template.yaml --s3-bucket $BUCKET --s3-prefix cloudformation/shelvery --output-template-file packaged-template.yaml $REGION 106 | 107 | 108 | echo "updating/creating cloudformation stack shelvery" 109 | sam deploy --template-file ./packaged-template.yaml --stack-name shelvery --capabilities CAPABILITY_IAM $PARAM_OPTS $REGION 110 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | python-dateutil 2 | boto3 3 | pytest 4 | pyyaml -------------------------------------------------------------------------------- /serverless.yml: -------------------------------------------------------------------------------- 1 | service: 2 | name: shelvery 3 | 4 | provider: 5 | name: aws 6 | runtime: python3.11 7 | iamRoleStatements: 8 | # read only ec2, rds 9 | - Effect: Allow 10 | Action: 11 | - 'ec2:Describe*' 12 | - 'rds:Describe*' 13 | - 'rds:ListTagsForResource' 14 | Resource: '*' 15 | # manage ebs snapshots and tags 16 | - Effect: Allow 17 | Action: 18 | - ec2:CreateSnapshot 19 | - ec2:ModifySnapshotAttribute 20 | - ec2:ResetSnapshotAttribute 21 | - ec2:DeleteSnapshot 22 | - ec2:DescribeTags 23 | - ec2:CreateTags 24 | - ec2:DeleteTags 25 | - ec2:CopySnapshot 26 | Resource: '*' 27 | # manage rds snaphosts and tags 28 | - Effect: Allow 29 | Action: 30 | - rds:ModifyDBSnapshotAttribute 31 | - rds:ModifyDBClusterSnapshotAttribute 32 | - rds:CopyDBSnapshot 33 | - rds:CopyDBClusterSnapshot 34 | - rds:DeleteDBSnapshot 35 | - rds:DeleteDBClusterSnapshot 36 | - rds:CreateDBSnapshot 37 | - rds:CreateDBClusterSnapshot 38 | - rds:AddTagsToResource 39 | - rds:RemoveTagsFromResource 40 | Resource: '*' 41 | - Effect: Allow 42 | Action: 43 | - 'lambda:Invoke*' 44 | Resource: 45 | 'Fn::Join': 46 | - '' 47 | - - 'arn:aws:lambda:' 48 | - Ref: 'AWS::Region' 49 | - ':' 50 | - Ref: 'AWS::AccountId' 51 | - ':function:shelvery' 52 | # manage AMIs (limited) 53 | - Effect: Allow 54 | Action: 55 | - ec2:CopyImage 56 | - ec2:CreateImage 57 | - ec2:DeregisterImage 58 | - ec2:DescribeImageAttribute 59 | - ec2:DescribeImages 60 | - ec2:DescribeInstances 61 | - ec2:ModifyImageAttribute 62 | - ec2:DeleteSnapshot 63 | Resource: '*' 64 | - Effect: Allow 65 | Action: 66 | - s3:Get* 67 | - s3:List* 68 | - s3:CreateBucket 69 | - s3:PutBucketPolicy 70 | - s3:PutBucketAcl 71 | - s3:PutObject 72 | - s3:PutObjectAcl 73 | - s3:DeleteObject 74 | Resource: '*' 75 | - Effect: Allow 76 | Action: 77 | - sns:Publish* 78 | Resource: '*' 79 | 80 | package: 81 | include: 82 | - shelvery/* 83 | - shelvery_lambda/* 84 | exclude: 85 | - shelvery_cli/* 86 | - tests/** 87 | - build/** 88 | - dist/** 89 | - Jenkinsfile 90 | - nosetests.xml 91 | - setup.py 92 | - shelvery.egg-info/** 93 | excludeDevDependencies: true 94 | 95 | functions: 96 | shelvery: 97 | handler: shelvery_lambda/lambda_handler.lambda_handler 98 | name: shelvery 99 | runtime: python3.11 100 | timeout: 300 101 | events: 102 | 103 | ## Create backups schedule 104 | 105 | # create buckets 106 | - schedule: 107 | rate: "cron(0 0 ? * * *)" 108 | enabled: true 109 | input: 110 | backup_type: ebs 111 | action: create_data_buckets 112 | # create ebs 113 | - schedule: 114 | rate: "cron(0 1 ? * * *)" 115 | enabled: true 116 | input: 117 | backup_type: ebs 118 | action: create_backups 119 | # create rds 120 | - schedule: 121 | rate: "cron(0 1 ? * * *)" 122 | enabled: true 123 | input: 124 | backup_type: rds 125 | action: create_backups 126 | # create rds cluster 127 | - schedule: 128 | rate: "cron(0 1 ? * * *)" 129 | enabled: true 130 | input: 131 | backup_type: rds_cluster 132 | action: create_backups 133 | # create ec2 amis 134 | - schedule: 135 | rate: "cron(0 1 ? * * *)" 136 | enabled: true 137 | input: 138 | backup_type: ec2ami 139 | action: create_backups 140 | 141 | ## Clean backups schedule ## 142 | 143 | # clean ebs 144 | - schedule: 145 | rate: "cron(0 2 ? * * *)" 146 | enabled: true 147 | input: 148 | backup_type: ebs 149 | action: clean_backups 150 | # clean rds 151 | - schedule: 152 | rate: "cron(0 2 ? * * *)" 153 | enabled: true 154 | input: 155 | backup_type: rds 156 | action: clean_backups 157 | # clean rds cluster 158 | - schedule: 159 | rate: "cron(0 2 ? * * *)" 160 | enabled: true 161 | input: 162 | backup_type: rds_cluster 163 | action: clean_backups 164 | # clean amis 165 | - schedule: 166 | rate: "cron(0 2 ? * * *)" 167 | enabled: true 168 | input: 169 | backup_type: ec2ami 170 | action: clean_backups 171 | 172 | ### Pull shared backups #### 173 | 174 | # pull ebs 175 | - schedule: 176 | rate: "cron(0 2 ? * * *)" 177 | enabled: true 178 | input: 179 | backup_type: ebs 180 | action: pull_shared_backups 181 | # pull rds 182 | - schedule: 183 | rate: "cron(0 2 ? * * *)" 184 | enabled: true 185 | input: 186 | backup_type: rds 187 | action: pull_shared_backups 188 | # pull rds cluster 189 | - schedule: 190 | rate: "cron(0 2 ? * * *)" 191 | enabled: true 192 | input: 193 | backup_type: rds_cluster 194 | action: pull_shared_backups 195 | # pull amis 196 | - schedule: 197 | rate: "cron(0 2 ? * * *)" 198 | enabled: true 199 | input: 200 | backup_type: ec2ami 201 | action: pull_shared_backups 202 | environment: 203 | shelvery_keep_daily_backups: ${env:shelvery_keep_daily_backups,'14'} 204 | shelvery_keep_weekly_backups: ${env:shelvery_keep_weekly_backups,'8'} 205 | shelvery_keep_monthly_backups: ${env:shelvery_keep_monthly_backups,'12'} 206 | shelvery_keep_yearly_backups: ${env:shelvery_keep_yearly_backups,'10'} 207 | shelvery_dr_regions: ${env:shelvery_dr_regions,''} 208 | shelvery_share_aws_account_ids: ${env:shelvery_share_aws_account_ids,''} 209 | shelvery_source_aws_account_ids: ${env:shelvery_source_aws_account_ids,''} 210 | shelvery_rds_backup_mode: ${env:shelvery_rds_backup_mode,'RDS_COPY_AUTOMATED_SNAPSHOT'} 211 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup(name='shelvery', version='0.9.13', author='Base2Services R&D', 4 | author_email='itsupport@base2services.com', 5 | url='http://github.com/base2Services/shelvery-aws-backups', 6 | classifiers=[ 7 | 'Development Status :: 3 - Alpha', 8 | 'Programming Language :: Python :: 3.6', 9 | 'Intended Audience :: System Administrators', 10 | 'Intended Audience :: Information Technology', 11 | 'License :: OSI Approved :: MIT License', 12 | 'Topic :: System :: Archiving :: Backup', 13 | ], 14 | keywords='aws backup lambda ebs rds ami', 15 | packages=['shelvery', 'shelvery_cli', 'shelvery_lambda'], 16 | install_requires=['boto3', 'python-dateutil', 'pyyaml'], 17 | python_requires='>=3.6', 18 | description='Backup manager for AWS EBS and AWS RDS services', 19 | entry_points={ 20 | 'console_scripts': ['shelvery = shelvery_cli.__main__:main'], 21 | }) 22 | -------------------------------------------------------------------------------- /shelvery/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.9.13' 2 | LAMBDA_WAIT_ITERATION = 'lambda_wait_iteration' 3 | S3_DATA_PREFIX = 'backups' 4 | SHELVERY_DO_BACKUP_TAGS = ['True', 'true', '1', 'TRUE'] 5 | -------------------------------------------------------------------------------- /shelvery/aws_helper.py: -------------------------------------------------------------------------------- 1 | import json 2 | import boto3 3 | from botocore.config import Config 4 | 5 | from shelvery.runtime_config import RuntimeConfig 6 | from shelvery import S3_DATA_PREFIX 7 | 8 | class AwsHelper: 9 | 10 | 11 | @staticmethod 12 | def get_shelvery_bucket_policy(owner_id, share_account_ids, bucket_name): 13 | """ 14 | Returns bucket policy allowing all destination accounts access to shared 15 | paths 16 | :param share_account_ids: 17 | :param bucket_name: 18 | :return: 19 | """ 20 | policy_stmt = [{ 21 | 'Effect': 'Allow', 22 | 'Principal':{'AWS':f"arn:aws:iam::{owner_id}:root"} , 23 | 'Action': 's3:*', 24 | 'Resource': [ 25 | f"arn:aws:s3:::{bucket_name}", 26 | f"arn:aws:s3:::{bucket_name}/*", 27 | ] 28 | }] 29 | if share_account_ids is not None: 30 | for shared_account_id in share_account_ids: 31 | policy_stmt.append({ 32 | 'Effect': 'Allow', 33 | 'Principal':{'AWS':f"arn:aws:iam::{shared_account_id}:root"} , 34 | 'Action': ['s3:Get*', 's3:List*'], 35 | 'Resource': f"arn:aws:s3:::{bucket_name}" 36 | }) 37 | policy_stmt.append({ 38 | 'Effect': 'Allow', 39 | 'Principal':{'AWS':f"arn:aws:iam::{shared_account_id}:root"} , 40 | 'Action': 's3:*', 41 | 'Resource': f"arn:aws:s3:::{bucket_name}/{S3_DATA_PREFIX}/shared/{shared_account_id}*" 42 | }) 43 | return json.dumps({'Version': '2012-10-17', 'Id': 'shelvery-generated', 'Statement': policy_stmt}, separators=(',', ':')) 44 | 45 | @staticmethod 46 | def local_account_id(): 47 | return AwsHelper.boto3_client('sts').get_caller_identity()['Account'] 48 | 49 | @staticmethod 50 | def local_region(): 51 | return boto3.session.Session().region_name 52 | 53 | @staticmethod 54 | def boto3_retry_config(): 55 | return RuntimeConfig.boto3_retry_times() 56 | 57 | @staticmethod 58 | def boto3_sts(arn,external_id): 59 | sts_client = boto3.client('sts',config=Config(retries={'max_attempts':AwsHelper.boto3_retry_config()})) 60 | if external_id is not None: 61 | assumedRoleObject = sts_client.assume_role( 62 | RoleArn=arn, 63 | RoleSessionName="shelvery-runtime", 64 | ExternalId=external_id 65 | ) 66 | else: 67 | assumedRoleObject = sts_client.assume_role( 68 | RoleArn=arn, 69 | RoleSessionName="shelvery-runtime" 70 | ) 71 | 72 | return assumedRoleObject['Credentials'] 73 | 74 | @staticmethod 75 | def boto3_client(service_name, region_name = None, arn = None, external_id = None): 76 | if region_name is None: 77 | region_name = AwsHelper.local_region() 78 | 79 | if arn is not None: 80 | credentials = AwsHelper.boto3_sts(arn,external_id) 81 | client = boto3.client(service_name, 82 | aws_access_key_id=credentials['AccessKeyId'], 83 | aws_secret_access_key=credentials['SecretAccessKey'], 84 | aws_session_token=credentials['SessionToken'], 85 | region_name=region_name, 86 | config=Config(retries={'max_attempts':AwsHelper.boto3_retry_config()})) 87 | else: 88 | client = boto3.client(service_name, 89 | region_name=region_name, 90 | config=Config(retries={'max_attempts':AwsHelper.boto3_retry_config()})) 91 | 92 | return client 93 | 94 | def boto3_session(service_name, region_name = None, arn = None, external_id = None): 95 | if arn is not None: 96 | credentials = AwsHelper.boto3_sts(arn,external_id) 97 | session = boto3.session.Session(region_name=region_name, 98 | aws_access_key_id=credentials['AccessKeyId'], 99 | aws_secret_access_key=credentials['SecretAccessKey'], 100 | aws_session_token=credentials['SessionToken'], 101 | ).resource(service_name) 102 | else: 103 | session = boto3.session.Session(region_name=region_name).resource(service_name) 104 | 105 | return session 106 | -------------------------------------------------------------------------------- /shelvery/backup_resource.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import re 3 | import copy 4 | from datetime import datetime 5 | from typing import Dict 6 | 7 | from dateutil.relativedelta import relativedelta 8 | from datetime import timedelta 9 | 10 | from shelvery.aws_helper import AwsHelper 11 | from shelvery.entity_resource import EntityResource 12 | from shelvery.runtime_config import RuntimeConfig 13 | import boto3 14 | 15 | class BackupResource: 16 | """Model representing single backup""" 17 | 18 | BACKUP_MARKER_TAG = 'backup' 19 | TIMESTAMP_FORMAT = '%Y-%m-%d-%H%M' 20 | TIMESTAMP_FORMAT_LEGACY = '%Y%m%d-%H%M' 21 | 22 | RETENTION_DAILY = 'daily' 23 | RETENTION_WEEKLY = 'weekly' 24 | RETENTION_MONTHLY = 'monthly' 25 | RETENTION_YEARLY = 'yearly' 26 | 27 | def __init__(self, tag_prefix, entity_resource: EntityResource, construct=False, copy_resource_tags=True, exluded_resource_tag_keys=[], resource_properties={}): 28 | """Construct new backup resource out of entity resource (e.g. ebs volume).""" 29 | # if object manually created 30 | if construct: 31 | return 32 | 33 | # current date 34 | self.date_created = datetime.utcnow() 35 | self.account_id = AwsHelper.local_account_id() 36 | 37 | # determine retention period 38 | if self.date_created.day == 1: 39 | if self.date_created.month == 1: 40 | self.retention_type = self.RETENTION_YEARLY 41 | else: 42 | self.retention_type = self.RETENTION_MONTHLY 43 | elif self.date_created.weekday() == 6: 44 | self.retention_type = self.RETENTION_WEEKLY 45 | else: 46 | self.retention_type = self.RETENTION_DAILY 47 | 48 | # determine backup name. Hash of resource id is added to support creating backups 49 | # with resources having a same name 50 | if 'Name' in entity_resource.tags: 51 | name = entity_resource.tags['Name'] 52 | name = name + '-' + hashlib.md5(entity_resource.resource_id.encode('utf-8')).hexdigest()[0:6] 53 | else: 54 | name = entity_resource.resource_id 55 | 56 | # replace anything that is not alphanumeric to hyphen 57 | # do not allow two hyphens next to each other 58 | name = re.sub('[^a-zA-Z0-9\-]', '-', name) 59 | name = re.sub('\-+','-',name) 60 | date_formatted = self.date_created.strftime(self.TIMESTAMP_FORMAT) 61 | self.name = f"{name}-{date_formatted}-{self.retention_type}" 62 | 63 | self.entity_id = entity_resource.resource_id 64 | self.entity_resource = entity_resource 65 | self.__region = entity_resource.resource_region 66 | 67 | self.tags = { 68 | 'Name': self.name, 69 | "shelvery:tag_name": tag_prefix, 70 | f"{tag_prefix}:date_created": date_formatted, 71 | f"{tag_prefix}:src_account": self.account_id, 72 | f"{tag_prefix}:name": self.name, 73 | f"{tag_prefix}:region": entity_resource.resource_region, 74 | f"{tag_prefix}:retention_type": self.retention_type, 75 | f"{tag_prefix}:entity_id": entity_resource.resource_id, 76 | f"{tag_prefix}:{self.BACKUP_MARKER_TAG}": 'true' 77 | } 78 | 79 | resource_tags = self.entity_resource_tags() 80 | 81 | if f"{tag_prefix}:config:shelvery_encrypt_copy" in resource_tags: 82 | self.tags[f"{tag_prefix}:config:shelvery_encrypt_copy"] = resource_tags[f"{tag_prefix}:config:shelvery_encrypt_copy"] 83 | 84 | if f"{tag_prefix}:config:shelvery_copy_kms_key_id" in resource_tags: 85 | self.tags[f"{tag_prefix}:config:shelvery_copy_kms_key_id"] = resource_tags[f"{tag_prefix}:config:shelvery_copy_kms_key_id"] 86 | 87 | if copy_resource_tags: 88 | for key, value in self.entity_resource_tags().items(): 89 | if key == 'Name': 90 | self.tags["ResourceName"] = value 91 | elif not any(exc_tag in key for exc_tag in exluded_resource_tag_keys): 92 | self.tags[key] = value 93 | 94 | self.backup_id = None 95 | self.expire_date = None 96 | self.date_deleted = None 97 | self.resource_properties = resource_properties 98 | 99 | def cross_account_copy(self, new_backup_id): 100 | backup = copy.deepcopy(self) 101 | 102 | # backup name and retention type are copied 103 | backup.backup_id = new_backup_id 104 | backup.region = AwsHelper.local_region() 105 | backup.account_id = AwsHelper.local_account_id() 106 | 107 | tag_prefix = self.tags['shelvery:tag_name'] 108 | backup.tags[f"{tag_prefix}:region"] = backup.region 109 | backup.tags[f"{tag_prefix}:date_copied"] = datetime.utcnow().strftime(self.TIMESTAMP_FORMAT) 110 | backup.tags[f"{tag_prefix}:dst_account"] = backup.account_id 111 | backup.tags[f"{tag_prefix}:src_region"] = self.region 112 | backup.tags[f"{tag_prefix}:region"] = backup.region 113 | backup.tags[f"{tag_prefix}:dr_copy"] = 'false' 114 | backup.tags[f"{tag_prefix}:cross_account_copy"] = 'true' 115 | backup.tags[f"{tag_prefix}:dr_regions"] = '' 116 | backup.tags[f"{tag_prefix}:dr_copies"] = '' 117 | 118 | return backup 119 | 120 | 121 | @classmethod 122 | def construct(cls, 123 | tag_prefix: str, 124 | backup_id: str, 125 | tags: Dict): 126 | """ 127 | Construct BackupResource object from object id and aws tags stored by shelvery 128 | """ 129 | 130 | obj = BackupResource(None, None, True) 131 | obj.entity_resource = None 132 | obj.entity_id = None 133 | obj.backup_id = backup_id 134 | obj.tags = tags 135 | 136 | # read properties from tags 137 | obj.retention_type = tags[f"{tag_prefix}:retention_type"] 138 | obj.name = tags[f"{tag_prefix}:name"] 139 | 140 | if f"{tag_prefix}:entity_id" in tags: 141 | obj.entity_id = tags[f"{tag_prefix}:entity_id"] 142 | 143 | try: 144 | obj.date_created = datetime.strptime(tags[f"{tag_prefix}:date_created"], cls.TIMESTAMP_FORMAT) 145 | except Exception as e: 146 | if 'does not match format' in str(e): 147 | str_date = tags[f"{tag_prefix}:date_created"] 148 | print(f"Failed to read {str_date} as date, trying legacy format {cls.TIMESTAMP_FORMAT_LEGACY}") 149 | obj.date_created = datetime.strptime(tags[f"{tag_prefix}:date_created"], cls.TIMESTAMP_FORMAT_LEGACY) 150 | 151 | 152 | obj.region = tags[f"{tag_prefix}:region"] 153 | if f"{tag_prefix}:src_account" in tags: 154 | obj.account_id = tags[f"{tag_prefix}:src_account"] 155 | else: 156 | obj.account_id = AwsHelper.local_account_id() 157 | 158 | return obj 159 | 160 | def entity_resource_tags(self): 161 | return self.entity_resource.tags if self.entity_resource is not None else {} 162 | 163 | def calculate_expire_date(self, engine, custom_retention_types=None): 164 | """Determine expire date, based on 'retention_type' tag""" 165 | if self.retention_type == BackupResource.RETENTION_DAILY: 166 | expire_date = self.date_created + timedelta( 167 | days=RuntimeConfig.get_keep_daily(self.entity_resource_tags(), engine)) 168 | elif self.retention_type == BackupResource.RETENTION_WEEKLY: 169 | expire_date = self.date_created + relativedelta( 170 | weeks=RuntimeConfig.get_keep_weekly(self.entity_resource_tags(), engine)) 171 | elif self.retention_type == BackupResource.RETENTION_MONTHLY: 172 | expire_date = self.date_created + relativedelta( 173 | months=RuntimeConfig.get_keep_monthly(self.entity_resource_tags(), engine)) 174 | elif self.retention_type == BackupResource.RETENTION_YEARLY: 175 | expire_date = self.date_created + relativedelta( 176 | years=RuntimeConfig.get_keep_yearly(self.entity_resource_tags(), engine)) 177 | elif self.retention_type in custom_retention_types: 178 | expire_date = self.date_created + timedelta( 179 | seconds=custom_retention_types[self.retention_type]) 180 | else: 181 | # We don't want backups existing forever 182 | raise Exception(f"Unknown retention period '{self.retention_type}' for backup '{self.backup_id}'") 183 | 184 | self.expire_date = expire_date 185 | 186 | def is_stale(self, engine, custom_retention_types = None): 187 | self.calculate_expire_date(engine, custom_retention_types) 188 | now = datetime.now(self.date_created.tzinfo) 189 | return now > self.expire_date 190 | 191 | @property 192 | def region(self): 193 | return self.__region 194 | 195 | @region.setter 196 | def region(self, region: str): 197 | self.__region = region 198 | 199 | def set_retention_type(self, retention_type: str): 200 | self.retention_type = retention_type 201 | self.name = '-'.join(self.name.split('-')[0:-1]) + f"-{retention_type}" 202 | self.tags[f"{self.tags['shelvery:tag_name']}:name"] = self.name 203 | self.tags['Name'] = self.name 204 | self.tags[f"{self.tags['shelvery:tag_name']}:retention_type"] = retention_type 205 | 206 | @property 207 | def boto3_tags(self): 208 | tags = self.tags 209 | return list(map(lambda k: {'Key': k, 'Value': tags[k]}, tags)) 210 | 211 | @staticmethod 212 | def dict_from_boto3_tags(boot3_tags): 213 | return dict(map(lambda t: (t['Key'], t['Value']), boot3_tags)) 214 | -------------------------------------------------------------------------------- /shelvery/documentdb_backup.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | from shelvery.runtime_config import RuntimeConfig 4 | from shelvery.backup_resource import BackupResource 5 | from shelvery.engine import ShelveryEngine, SHELVERY_DO_BACKUP_TAGS 6 | from shelvery.entity_resource import EntityResource 7 | 8 | from typing import Dict, List 9 | from botocore.errorfactory import ClientError 10 | from shelvery.aws_helper import AwsHelper 11 | 12 | 13 | class ShelveryDocumentDbBackup(ShelveryEngine): 14 | def is_backup_available(self, backup_region: str, backup_id: str) -> bool: 15 | docdb_client = AwsHelper.boto3_client('docdb', region_name=backup_region, arn=self.role_arn, 16 | external_id=self.role_external_id) 17 | snapshots = docdb_client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id) 18 | return snapshots['DBClusterSnapshots'][0]['Status'] == 'available' 19 | 20 | def get_resource_type(self) -> str: 21 | return 'DocumentDb' 22 | 23 | def backup_resource(self, backup_resource: BackupResource) -> BackupResource: 24 | if RuntimeConfig.get_docdb_mode(backup_resource.entity_resource.tags, 25 | self) == RuntimeConfig.DOCDB_CREATE_SNAPSHOT: 26 | return self.backup_from_cluster(backup_resource) 27 | if RuntimeConfig.get_docdb_mode(backup_resource.entity_resource.tags, 28 | self) == RuntimeConfig.DOCDB_COPY_AUTOMATED_SNAPSHOT: 29 | return self.backup_from_latest_automated(backup_resource) 30 | 31 | raise Exception(f"Only {RuntimeConfig.DOCDB_COPY_AUTOMATED_SNAPSHOT} and " 32 | f"{RuntimeConfig.DOCDB_CREATE_SNAPSHOT} documentdb backup " 33 | f"modes supported - set documentdb backup mode using docdb_backup_mode configuration option ") 34 | 35 | def backup_from_latest_automated(self, backup_resource: BackupResource): 36 | docdb_client = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id) 37 | auto_snapshots = docdb_client.describe_db_cluster_snapshots( 38 | DBClusterIdentifier=backup_resource.entity_id, 39 | SnapshotType='automated', 40 | # API always returns in date descending order, and we only need last one 41 | MaxRecords=20 42 | ) 43 | auto_snapshots = sorted(auto_snapshots['DBClusterSnapshots'], key=lambda k: k['SnapshotCreateTime'], 44 | reverse=True) 45 | 46 | if len(auto_snapshots) == 0: 47 | self.logger.info(f"There is no latest automated backup for cluster {backup_resource.entity_id}," 48 | f" fallback to DOCDB_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster...") 49 | return self.backup_from_cluster(backup_resource) 50 | 51 | # TODO handle case when there are no latest automated backups 52 | automated_snapshot_id = auto_snapshots[0]['DBClusterSnapshotIdentifier'] 53 | response = docdb_client.copy_db_cluster_snapshot( 54 | SourceDBClusterSnapshotIdentifier=automated_snapshot_id, 55 | TargetDBClusterSnapshotIdentifier=backup_resource.name, 56 | CopyTags=False 57 | ) 58 | backup_resource.resource_properties = response['DBClusterSnapshot'] 59 | backup_resource.backup_id = backup_resource.name 60 | return backup_resource 61 | 62 | def backup_from_cluster(self, backup_resource): 63 | docdb_client = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id) 64 | response = docdb_client.create_db_cluster_snapshot( 65 | DBClusterSnapshotIdentifier=backup_resource.name, 66 | DBClusterIdentifier=backup_resource.entity_id 67 | ) 68 | backup_resource.resource_properties = response['DBClusterSnapshot'] 69 | backup_resource.backup_id = backup_resource.name 70 | return backup_resource 71 | 72 | def delete_backup(self, backup_resource: BackupResource): 73 | docdb_client = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id) 74 | docdb_client.delete_db_cluster_snapshot( 75 | DBClusterSnapshotIdentifier=backup_resource.backup_id 76 | ) 77 | 78 | def tag_backup_resource(self, backup_resource: BackupResource): 79 | regional_docdb_client = AwsHelper.boto3_client('docdb', region_name=backup_resource.region, arn=self.role_arn, 80 | external_id=self.role_external_id) 81 | snapshots = regional_docdb_client.describe_db_cluster_snapshots( 82 | DBClusterSnapshotIdentifier=backup_resource.backup_id) 83 | snapshot_arn = snapshots['DBClusterSnapshots'][0]['DBClusterSnapshotArn'] 84 | tags = list(map(lambda k: {'Key': k, 'Value': backup_resource.tags[k].replace(',', ' ')}, backup_resource.tags)) 85 | regional_docdb_client.add_tags_to_resource( 86 | ResourceName=snapshot_arn, 87 | Tags=tags 88 | ) 89 | 90 | def get_existing_backups(self, backup_tag_prefix: str) -> List[BackupResource]: 91 | docdb_client = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id) 92 | 93 | # collect all snapshots 94 | all_snapshots = self.collect_all_snapshots(docdb_client) 95 | 96 | # filter ones backed up with shelvery 97 | all_backups = self.get_shelvery_backups_only(all_snapshots, backup_tag_prefix, docdb_client) 98 | 99 | return all_backups 100 | 101 | def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str): 102 | docdb_client = AwsHelper.boto3_client('docdb', region_name=backup_region, arn=self.role_arn, 103 | external_id=self.role_external_id) 104 | docdb_client.modify_db_cluster_snapshot_attribute( 105 | DBClusterSnapshotIdentifier=backup_id, 106 | AttributeName='restore', 107 | ValuesToAdd=[aws_account_id] 108 | ) 109 | 110 | def copy_backup_to_region(self, backup_id: str, region: str) -> str: 111 | local_region = boto3.session.Session().region_name 112 | client_local = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id) 113 | docdb_client = AwsHelper.boto3_client('docdb', region_name=region) 114 | snapshots = client_local.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id) 115 | snapshot = snapshots['DBClusterSnapshots'][0] 116 | docdb_client.copy_db_cluster_snapshot( 117 | SourceDBClusterSnapshotIdentifier=snapshot['DBClusterSnapshotArn'], 118 | TargetDBClusterSnapshotIdentifier=backup_id, 119 | SourceRegion=local_region, 120 | # tags are created explicitly 121 | CopyTags=False 122 | ) 123 | return backup_id 124 | 125 | def create_encrypted_backup(self, backup_id: str, kms_key: str, region: str) -> str: 126 | return backup_id 127 | 128 | def copy_shared_backup(self, source_account: str, source_backup: BackupResource): 129 | docdb_client = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id) 130 | # copying of tags happens outside this method 131 | source_arn = f"arn:aws:rds:{source_backup.region}:{source_backup.account_id}:cluster-snapshot:{source_backup.backup_id}" 132 | 133 | params = { 134 | 'SourceDBClusterSnapshotIdentifier': source_arn, 135 | 'SourceRegion': source_backup.region, 136 | 'CopyTags': False, 137 | 'TargetDBClusterSnapshotIdentifier': source_backup.backup_id 138 | } 139 | 140 | # If the backup is encrypted, include the KMS key ID in the request. 141 | if source_backup.resource_properties['StorageEncrypted']: 142 | kms_key = source_backup.resource_properties['KmsKeyId'] 143 | self.logger.info(f"Snapshot {source_backup.backup_id} is encrypted with the kms key {kms_key}") 144 | 145 | copy_kms_key = RuntimeConfig.get_copy_kms_key_id(source_backup.tags, self) 146 | # if a new key is provided by config encypt the copy with the new kms key 147 | if copy_kms_key: 148 | self.logger.info( 149 | f"Snapshot {source_backup.backup_id} will be copied and encrypted with the kms key {copy_kms_key}") 150 | kms_key = copy_kms_key 151 | 152 | params['KmsKeyId'] = kms_key 153 | else: 154 | # if the backup is not encrypted and the encrypt_copy is enabled, encrypted the backup with the provided kms key 155 | if RuntimeConfig.get_encrypt_copy(source_backup.tags, self): 156 | kms_key = RuntimeConfig.get_copy_kms_key_id(source_backup.tags, self) 157 | if kms_key is not None: 158 | self.logger.info( 159 | f"Snapshot {source_backup.backup_id} is not encrypted. Encrypting the copy with KMS key {kms_key}") 160 | params['KmsKeyId'] = kms_key 161 | 162 | snap = docdb_client.copy_db_cluster_snapshot(**params) 163 | return snap['DBClusterSnapshot']['DBClusterSnapshotIdentifier'] 164 | 165 | def get_backup_resource(self, backup_region: str, backup_id: str) -> BackupResource: 166 | docdb_client = AwsHelper.boto3_client('docdb', region_name=backup_region, arn=self.role_arn, 167 | external_id=self.role_external_id) 168 | snapshots = docdb_client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=backup_id) 169 | snapshot = snapshots['DBClusterSnapshots'][0] 170 | tags = docdb_client.list_tags_for_resource(ResourceName=snapshot['DBClusterSnapshotArn']).get('TagList', []) 171 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 172 | resource = BackupResource.construct(d_tags['shelvery:tag_name'], backup_id, d_tags) 173 | resource.resource_properties = snapshot 174 | return resource 175 | 176 | def get_engine_type(self) -> str: 177 | return 'docdb' 178 | 179 | def get_entities_to_backup(self, tag_name: str) -> List[EntityResource]: 180 | # region and api client 181 | local_region = boto3.session.Session().region_name 182 | docdb_client = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id) 183 | 184 | # list of models returned from api 185 | db_cluster_entities = [] 186 | 187 | db_clusters = self.get_all_clusters(docdb_client) 188 | 189 | # collect tags in check if instance tagged with marker tag 190 | 191 | for instance in db_clusters: 192 | tags = docdb_client.list_tags_for_resource(ResourceName=instance['DBClusterArn']).get('TagList', []) 193 | 194 | # convert api response to dictionary 195 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 196 | 197 | # check if marker tag is present 198 | if tag_name in d_tags and d_tags[tag_name] in SHELVERY_DO_BACKUP_TAGS: 199 | resource = EntityResource(instance['DBClusterIdentifier'], 200 | local_region, 201 | instance['ClusterCreateTime'], 202 | d_tags) 203 | db_cluster_entities.append(resource) 204 | 205 | return db_cluster_entities 206 | 207 | def get_all_clusters(self, docdb_client): 208 | """ 209 | Get all DocumentDb clusters within region for given boto3 client 210 | :param docdb_client: boto3 DocumentDb service 211 | :return: all DocumentDb instances within region for given boto3 client 212 | """ 213 | # list of resource models 214 | db_clusters = [] 215 | # temporary list of api models, as calls are batched 216 | temp_clusters = docdb_client.describe_db_clusters( 217 | Filters=[ 218 | { 219 | 'Name': 'engine', 220 | 'Values': ['docdb', 221 | ] 222 | }, 223 | ]) 224 | db_clusters.extend(temp_clusters['DBClusters']) 225 | # collect database instances 226 | while 'Marker' in temp_clusters: 227 | temp_clusters = docdb_client.describe_db_clusters(Marker=temp_clusters['Marker']) 228 | db_clusters.extend(temp_clusters['DBClusters']) 229 | 230 | return db_clusters 231 | 232 | def get_shelvery_backups_only(self, all_snapshots, backup_tag_prefix, docdb_client): 233 | """ 234 | :param all_snapshots: all snapshots within region 235 | :param backup_tag_prefix: prefix of shelvery backup system 236 | :param docdb_client: amazon boto3 DocumentDb client 237 | :return: snapshots created using shelvery 238 | """ 239 | all_backups = [] 240 | marker_tag = f"{backup_tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}" 241 | for snap in all_snapshots: 242 | tags = docdb_client.list_tags_for_resource(ResourceName=snap['DBClusterSnapshotArn']).get('TagList', []) 243 | self.logger.info(f"Checking DocumentDb Snapshot {snap['DBClusterSnapshotIdentifier']}") 244 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 245 | if marker_tag in d_tags: 246 | if d_tags[marker_tag] in SHELVERY_DO_BACKUP_TAGS: 247 | backup_resource = BackupResource.construct(backup_tag_prefix, snap['DBClusterSnapshotIdentifier'], 248 | d_tags) 249 | backup_resource.entity_resource = snap['EntityResource'] 250 | backup_resource.entity_id = snap['EntityResource'].resource_id 251 | 252 | all_backups.append(backup_resource) 253 | 254 | return all_backups 255 | 256 | def collect_all_snapshots(self, docdb_client): 257 | """ 258 | :param docdb_client: 259 | :return: All snapshots within region for docdb_client 260 | """ 261 | all_snapshots = [] 262 | 263 | self.logger.info("Collecting DB cluster snapshots...") 264 | tmp_snapshots = docdb_client.describe_db_cluster_snapshots(SnapshotType='manual') 265 | all_snapshots.extend(tmp_snapshots['DBClusterSnapshots']) 266 | 267 | while 'Marker' in tmp_snapshots: 268 | self.logger.info( 269 | f"Collected {len(tmp_snapshots['DBClusterSnapshots'])} manual snapshots. Continuing collection...") 270 | tmp_snapshots = docdb_client.describe_db_cluster_snapshots(SnapshotType='manual', 271 | Marker=tmp_snapshots['Marker']) 272 | all_snapshots.extend(tmp_snapshots['DBClusterSnapshots']) 273 | 274 | self.logger.info(f"Collected {len(all_snapshots)} manual snapshots.") 275 | self.populate_snap_entity_resource(all_snapshots) 276 | 277 | return all_snapshots 278 | 279 | def populate_snap_entity_resource(self, all_snapshots): 280 | cluster_ids = [] 281 | 282 | for snap in all_snapshots: 283 | if snap['DBClusterIdentifier'] not in cluster_ids: 284 | cluster_ids.append(snap['DBClusterIdentifier']) 285 | 286 | entities = {} 287 | docdb_client = AwsHelper.boto3_client('docdb', arn=self.role_arn, external_id=self.role_external_id) 288 | local_region = boto3.session.Session().region_name 289 | 290 | for cluster_id in cluster_ids: 291 | try: 292 | self.logger.info(f"Collecting tags from DB cluster {cluster_id} ...") 293 | docdb_instance = docdb_client.describe_db_clusters(DBClusterIdentifier=cluster_id)['DBClusters'][0] 294 | tags = docdb_client.list_tags_for_resource(ResourceName=docdb_instance['DBClusterArn']).get('TagList', []) 295 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 296 | docdb_entity = EntityResource(cluster_id, 297 | local_region, 298 | docdb_instance['ClusterCreateTime'], 299 | d_tags) 300 | entities[cluster_id] = docdb_entity 301 | except ClientError as e: 302 | if 'DBClusterNotFoundFault' in str(type(e)): 303 | entities[cluster_id] = EntityResource.empty() 304 | entities[cluster_id].resource_id = cluster_id 305 | else: 306 | raise e 307 | 308 | for snap in all_snapshots: 309 | if snap['DBClusterIdentifier'] in entities: 310 | snap['EntityResource'] = entities[snap['DBClusterIdentifier']] 311 | -------------------------------------------------------------------------------- /shelvery/ebs_backup.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | from typing import List 4 | 5 | from botocore.exceptions import ClientError 6 | from shelvery.aws_helper import AwsHelper 7 | from shelvery.engine import SHELVERY_DO_BACKUP_TAGS 8 | from shelvery.ec2_backup import ShelveryEC2Backup 9 | from shelvery.entity_resource import EntityResource 10 | from shelvery.backup_resource import BackupResource 11 | 12 | 13 | class ShelveryEBSBackup(ShelveryEC2Backup): 14 | """Shelvery engine implementation for EBS data backups""" 15 | 16 | def __init__(self): 17 | ShelveryEC2Backup.__init__(self) 18 | 19 | def delete_backup(self, backup_resource: BackupResource): 20 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 21 | ec2client.delete_snapshot(SnapshotId=backup_resource.backup_id) 22 | 23 | def get_existing_backups(self, tag_prefix: str) -> List[BackupResource]: 24 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 25 | # lookup snapshots by tags 26 | snapshots = ec2client.describe_snapshots(Filters=[ 27 | {'Name': f"tag:{tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}", 'Values': ['true']} 28 | ]) 29 | backups = [] 30 | 31 | # create backup resource objects 32 | for snap in snapshots['Snapshots']: 33 | snap_tags = dict(map(lambda t: (t['Key'], t['Value']), snap['Tags'])) 34 | if f"{tag_prefix}:ami_id" in snap_tags: 35 | self.logger.info(f"EBS snapshot {snap['SnapshotId']} created by AMI shelvery backup, skiping...") 36 | continue 37 | 38 | backup = BackupResource.construct( 39 | tag_prefix=tag_prefix, 40 | backup_id=snap['SnapshotId'], 41 | tags=snap_tags 42 | ) 43 | # legacy code - entity id should be picked up from tags 44 | if backup.entity_id is None: 45 | self.logger.info(f"SnapshotId is None, using VolumeId {snap['VolumeId']}") 46 | backup.entity_id = snap['VolumeId'] 47 | backups.append(backup) 48 | 49 | self.populate_volume_information(backups) 50 | 51 | return backups 52 | 53 | def get_engine_type(self) -> str: 54 | return 'ebs' 55 | 56 | def get_resource_type(self) -> str: 57 | return 'ec2 volume' 58 | 59 | def backup_resource(self, backup_resource: BackupResource) -> BackupResource: 60 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 61 | # create snapshot 62 | snap = ec2client.create_snapshot( 63 | VolumeId=backup_resource.entity_id, 64 | Description=backup_resource.name 65 | ) 66 | backup_resource.backup_id = snap['SnapshotId'] 67 | return backup_resource 68 | 69 | def get_backup_resource(self, region: str, backup_id: str) -> BackupResource: 70 | ec2 = AwsHelper.boto3_session('ec2', region_name=region, arn=self.role_arn, external_id=self.role_external_id) 71 | snapshot = ec2.Snapshot(backup_id) 72 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), snapshot.tags)) 73 | return BackupResource.construct(d_tags['shelvery:tag_name'], backup_id, d_tags) 74 | 75 | def get_entities_to_backup(self, tag_name: str) -> List[EntityResource]: 76 | volumes = self.collect_volumes(tag_name) 77 | return list( 78 | map( 79 | lambda vol: EntityResource( 80 | resource_id=vol['VolumeId'], 81 | resource_region=self.region, 82 | date_created=vol['CreateTime'], 83 | tags=dict(map(lambda t: (t['Key'], t['Value']), vol['Tags'])) 84 | ), 85 | volumes 86 | ) 87 | ) 88 | 89 | def is_backup_available(self, region: str, backup_id: str) -> bool: 90 | try: 91 | regional_client = AwsHelper.boto3_client('ec2', region_name=region, arn=self.role_arn, external_id=self.role_external_id) 92 | snapshot = regional_client.describe_snapshots(SnapshotIds=[backup_id])['Snapshots'][0] 93 | complete = snapshot['State'] == 'completed' 94 | self.logger.info(f"{backup_id} is {snapshot['Progress']} complete") 95 | return complete 96 | except Exception as e: 97 | self.logger.warn(f"Problem getting status of ec2 snapshot status for snapshot {backup_id}:{e}") 98 | 99 | def copy_backup_to_region(self, backup_id: str, region: str): 100 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 101 | snapshot = ec2client.describe_snapshots(SnapshotIds=[backup_id])['Snapshots'][0] 102 | regional_client = AwsHelper.boto3_client('ec2', region_name=region, arn=self.role_arn, external_id=self.role_external_id) 103 | copy_snapshot_response = regional_client.copy_snapshot(SourceSnapshotId=backup_id, 104 | SourceRegion=ec2client._client_config.region_name, 105 | DestinationRegion=region, 106 | Description=snapshot['Description']) 107 | 108 | # return id of newly created snapshot in dr region 109 | return copy_snapshot_response['SnapshotId'] 110 | 111 | def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str): 112 | ec2 = AwsHelper.boto3_session('ec2', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 113 | snapshot = ec2.Snapshot(backup_id) 114 | snapshot.modify_attribute(Attribute='createVolumePermission', 115 | CreateVolumePermission={ 116 | 'Add': [{'UserId': aws_account_id}] 117 | }, 118 | UserIds=[aws_account_id], 119 | OperationType='add') 120 | 121 | def copy_shared_backup(self, source_account: str, source_backup: BackupResource): 122 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 123 | snap = ec2client.copy_snapshot( 124 | SourceSnapshotId=source_backup.backup_id, 125 | SourceRegion=source_backup.region 126 | ) 127 | return snap['SnapshotId'] 128 | 129 | def create_encrypted_backup(self, backup_id: str, kms_key: str, region: str) -> str: 130 | return backup_id 131 | 132 | # collect all volumes tagged with given tag, in paginated manner 133 | def collect_volumes(self, tag_name: str): 134 | load_volumes = True 135 | next_token = '' 136 | all_volumes = [] 137 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 138 | while load_volumes: 139 | tagged_volumes = ec2client.describe_volumes( 140 | Filters=[{'Name': f"tag:{tag_name}", 'Values': SHELVERY_DO_BACKUP_TAGS}], 141 | NextToken=next_token 142 | ) 143 | all_volumes = all_volumes + tagged_volumes['Volumes'] 144 | if 'NextToken' in tagged_volumes and len(tagged_volumes['NextToken']) > 0: 145 | load_volumes = True 146 | next_token = tagged_volumes['NextToken'] 147 | else: 148 | load_volumes = False 149 | 150 | return all_volumes 151 | 152 | def populate_volume_information(self, backups): 153 | volume_ids = [] 154 | volumes = {} 155 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 156 | local_region = boto3.session.Session().region_name 157 | 158 | # create list of all volume ids 159 | for backup in backups: 160 | if backup.entity_id not in volume_ids: 161 | volume_ids.append(backup.entity_id) 162 | 163 | # populate map volumeid->volume if present 164 | for volume_id in volume_ids: 165 | try: 166 | volume = ec2client.describe_volumes(VolumeIds=[volume_id])['Volumes'][0] 167 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), volume['Tags'])) 168 | volumes[volume_id] = EntityResource(volume_id, local_region, volume['CreateTime'], d_tags) 169 | except ClientError as e: 170 | if 'InvalidVolume.NotFound' in str(e): 171 | volumes[volume_id] = EntityResource.empty() 172 | volumes[volume_id].resource_id = volume_id 173 | else: 174 | raise e 175 | 176 | # add info to backup resource objects 177 | for backup in backups: 178 | if backup.entity_id in volumes: 179 | backup.entity_resource = volumes[backup.entity_id] 180 | -------------------------------------------------------------------------------- /shelvery/ec2_backup.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | from shelvery.backup_resource import BackupResource 4 | from shelvery.engine import ShelveryEngine 5 | from shelvery.entity_resource import EntityResource 6 | from shelvery.aws_helper import AwsHelper 7 | from typing import Dict, List 8 | 9 | 10 | class ShelveryEC2Backup(ShelveryEngine): 11 | """Parent class sharing common functionality for AMI and EBS backups""" 12 | 13 | def __init__(self): 14 | ShelveryEngine.__init__(self) 15 | # default region will be picked up in AwsHelper.boto3_client call 16 | self.region = boto3.session.Session().region_name 17 | 18 | def tag_backup_resource(self, backup_resource: BackupResource): 19 | regional_client = AwsHelper.boto3_client('ec2', region_name=backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) 20 | regional_client.create_tags( 21 | Resources=[backup_resource.backup_id], 22 | Tags=list(map(lambda k: {'Key': k, 'Value': backup_resource.tags[k]}, backup_resource.tags)) 23 | ) 24 | 25 | def delete_backup(self, backup_resource: BackupResource): 26 | pass 27 | 28 | def get_existing_backups(self, backup_tag_prefix: str) -> List[BackupResource]: 29 | pass 30 | 31 | def get_resource_type(self) -> str: 32 | pass 33 | 34 | def backup_resource(self, backup_resource: BackupResource): 35 | pass 36 | 37 | def get_entities_to_backup(self, tag_name: str) -> List[EntityResource]: 38 | pass 39 | 40 | def is_backup_available(self, backup_region: str, backup_id: str) -> bool: 41 | pass 42 | 43 | def copy_backup_to_region(self, backup_id: str, region: str) -> str: 44 | pass 45 | 46 | def get_backup_resource(self, region: str, backup_id: str) -> BackupResource: 47 | pass 48 | 49 | def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str): 50 | pass 51 | -------------------------------------------------------------------------------- /shelvery/ec2ami_backup.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | from typing import List 3 | from time import sleep 4 | 5 | import boto3 6 | 7 | from shelvery.aws_helper import AwsHelper 8 | from shelvery.backup_resource import BackupResource 9 | from shelvery.entity_resource import EntityResource 10 | from shelvery.ec2_backup import ShelveryEC2Backup 11 | 12 | from shelvery.engine import SHELVERY_DO_BACKUP_TAGS 13 | 14 | 15 | class ShelveryEC2AMIBackup(ShelveryEC2Backup): 16 | def delete_backup(self, backup_resource: BackupResource): 17 | regional_client = AwsHelper.boto3_client('ec2', region_name=backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) 18 | ami = regional_client.describe_images(ImageIds=[backup_resource.backup_id])['Images'][0] 19 | 20 | # delete image 21 | regional_client.deregister_image(ImageId=backup_resource.backup_id) 22 | snapshots = [] 23 | for bdm in ami['BlockDeviceMappings']: 24 | if 'Ebs' in bdm and 'SnapshotId' in bdm['Ebs']: 25 | snapshots.append(bdm['Ebs']['SnapshotId']) 26 | 27 | # delete related snapshots 28 | for snapshot in snapshots: 29 | regional_client.delete_snapshot(SnapshotId=snapshot) 30 | 31 | def get_existing_backups(self, backup_tag_prefix: str) -> List[BackupResource]: 32 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 33 | amis = ec2client.describe_images(Filters=[ 34 | {'Name': f"tag:{backup_tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}", 'Values': ['true']} 35 | ])['Images'] 36 | backups = [] 37 | instances = dict(map( 38 | lambda x: (x.resource_id, x), 39 | self._get_all_entities() 40 | )) 41 | for ami in amis: 42 | backup = BackupResource.construct(backup_tag_prefix, 43 | ami['ImageId'], 44 | dict(map(lambda x: (x['Key'], x['Value']), ami['Tags']))) 45 | 46 | if backup.entity_id in instances: 47 | backup.entity_resource = instances[backup.entity_id] 48 | 49 | backups.append(backup) 50 | 51 | return backups 52 | 53 | def get_resource_type(self) -> str: 54 | return 'Amazon Machine Image' 55 | 56 | def get_engine_type(self) -> str: 57 | return 'ec2ami' 58 | 59 | def copy_shared_backup(self, source_account: str, source_backup: BackupResource): 60 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 61 | ami = ec2client.copy_image( 62 | ClientToken=f"{AwsHelper.local_account_id()}{source_account}{source_backup.backup_id}", 63 | SourceImageId=source_backup.backup_id, 64 | SourceRegion=source_backup.region, 65 | Name=source_backup.backup_id 66 | ) 67 | return ami['ImageId'] 68 | 69 | def backup_resource(self, backup_resource: BackupResource): 70 | regional_client = AwsHelper.boto3_client('ec2', region_name=backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) 71 | ami = regional_client.create_image( 72 | NoReboot=True, 73 | Name=backup_resource.name, 74 | Description=f"Shelvery created backup for {backup_resource.entity_id}", 75 | InstanceId=backup_resource.entity_id, 76 | 77 | ) 78 | backup_resource.backup_id = ami['ImageId'] 79 | return backup_resource 80 | 81 | def tag_backup_resource(self, backup_resource: BackupResource): 82 | regional_client = AwsHelper.boto3_client('ec2', region_name=backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) 83 | regional_client.create_tags( 84 | Resources=[backup_resource.backup_id], 85 | Tags=list(map(lambda k: {'Key': k, 'Value': backup_resource.tags[k]}, backup_resource.tags)) 86 | ) 87 | snapshots = self._get_snapshots_from_ami(backup_resource) 88 | # tag all snapshots associated with the ami 89 | backup_resource.tags[f"{backup_resource.tags['shelvery:tag_name']}:ami_id"] = backup_resource.backup_id 90 | self.logger.info(f"Tagging {len(snapshots)} AMI snapshots: {snapshots}") 91 | regional_client.create_tags( 92 | Resources=snapshots, 93 | Tags=list(map(lambda k: {'Key': k, 'Value': backup_resource.tags[k]}, backup_resource.tags)) 94 | ) 95 | 96 | def _get_snapshots_from_ami(self, backup_resource: BackupResource, retry=0): 97 | # we're going to sleep here real quick because it always takes half a second for the snapshots to become available 98 | sleep(0.5) 99 | regional_client = AwsHelper.boto3_client('ec2', region_name=backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) 100 | response = regional_client.describe_images( 101 | ImageIds=[backup_resource.backup_id] 102 | ) 103 | 104 | block_device_mappings = 0 105 | snapshots = [] 106 | for image in response['Images']: 107 | if 'BlockDeviceMappings' in image: 108 | block_device_mappings = len(image['BlockDeviceMappings']) 109 | self.logger.info(f"Found {block_device_mappings} Block Device Mappings for {backup_resource.backup_id}") 110 | for bdm in image['BlockDeviceMappings']: 111 | if 'Ebs' in bdm: 112 | if 'SnapshotId' in bdm['Ebs']: 113 | snapshots.append(bdm['Ebs']['SnapshotId']) 114 | 115 | # Then we'll retry a few times here if it takes a bit longer 116 | if len(snapshots) < block_device_mappings and retry < 3: 117 | retry += 1 118 | self.logger.info(f"Not all snapshots created yet, will try again. Retry count {retry}") 119 | sleep(0.5) 120 | snapshots = self._get_snapshots_from_ami(backup_resource,retry=retry) 121 | 122 | return snapshots 123 | 124 | def _get_all_entities(self) -> List[EntityResource]: 125 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 126 | instances = ec2client.describe_instances() 127 | while 'NextToken' in instances: 128 | instances += ec2client.describe_instances( 129 | NextToken=instances['NextToken'] 130 | ) 131 | return self._convert_instances_to_entities(instances) 132 | 133 | def get_entities_to_backup(self, tag_name: str) -> List[EntityResource]: 134 | ec2client = AwsHelper.boto3_client('ec2', arn=self.role_arn, external_id=self.role_external_id) 135 | instances = ec2client.describe_instances( 136 | Filters=[ 137 | { 138 | 'Name': f"tag:{tag_name}", 139 | 'Values': SHELVERY_DO_BACKUP_TAGS 140 | } 141 | ] 142 | ) 143 | while 'NextToken' in instances: 144 | instances += ec2client.describe_instances( 145 | Filters=[ 146 | { 147 | 'Name': f"tag:{tag_name}", 148 | 'Values': SHELVERY_DO_BACKUP_TAGS 149 | } 150 | ], 151 | NextToken=instances['NextToken'] 152 | ) 153 | 154 | return self._convert_instances_to_entities(instances) 155 | 156 | @staticmethod 157 | def _convert_instances_to_entities(instances): 158 | """ 159 | Params: 160 | instances: a list of Reservations (i.e. the response from `aws ec2 describe-instances`) 161 | """ 162 | local_region = boto3.session.Session().region_name 163 | 164 | entities = [] 165 | for reservation in instances['Reservations']: 166 | for instance in reservation['Instances']: 167 | tags = {} 168 | if 'Tags' in instance: 169 | tags = dict(map(lambda tag: (tag['Key'], tag['Value']), instance['Tags'])) 170 | entities.append(EntityResource(resource_id=instance['InstanceId'], resource_region=local_region, date_created=instance['LaunchTime'], tags=tags)) 171 | 172 | return entities 173 | 174 | def is_backup_available(self, backup_region: str, backup_id: str) -> bool: 175 | regional_client = AwsHelper.boto3_client('ec2', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 176 | ami = regional_client.describe_images(ImageIds=[backup_id]) 177 | if len(ami['Images']) > 0: 178 | return ami['Images'][0]['State'] == 'available' 179 | 180 | return False 181 | 182 | def copy_backup_to_region(self, backup_id: str, region: str) -> str: 183 | local_region = boto3.session.Session().region_name 184 | local_client = AwsHelper.boto3_client('ec2', region_name=local_region, arn=self.role_arn, external_id=self.role_external_id) 185 | regional_client = AwsHelper.boto3_client('ec2', region_name=region, arn=self.role_arn, external_id=self.role_external_id) 186 | ami = local_client.describe_images(ImageIds=[backup_id])['Images'][0] 187 | idempotency_token = f"shelverycopy{backup_id.replace('-','')}to{region.replace('-','')}" 188 | return regional_client.copy_image(Name=ami['Name'], 189 | ClientToken=idempotency_token, 190 | Description=f"Shelvery copy of {backup_id} to {region} from {local_region}", 191 | SourceImageId=backup_id, 192 | SourceRegion=local_region 193 | )['ImageId'] 194 | 195 | def get_backup_resource(self, region: str, backup_id: str) -> BackupResource: 196 | ec2client = AwsHelper.boto3_client('ec2', region_name=region, arn=self.role_arn, external_id=self.role_external_id) 197 | ami = ec2client.describe_images(ImageIds=[backup_id])['Images'][0] 198 | 199 | d_tags = dict(map(lambda x: (x['Key'], x['Value']), ami['Tags'])) 200 | backup_tag_prefix = d_tags['shelvery:tag_name'] 201 | 202 | backup = BackupResource.construct(backup_tag_prefix, backup_id, d_tags) 203 | return backup 204 | 205 | def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str): 206 | ec2 = AwsHelper.boto3_session('ec2', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 207 | image = ec2.Image(backup_id) 208 | image.modify_attribute(Attribute='launchPermission', 209 | LaunchPermission={ 210 | 'Add': [{'UserId': aws_account_id}] 211 | }, 212 | UserIds=[aws_account_id], 213 | OperationType='add') 214 | for bdm in image.block_device_mappings: 215 | if 'Ebs' in bdm: 216 | snap_id = bdm['Ebs']['SnapshotId'] 217 | snapshot = ec2.Snapshot(snap_id) 218 | snapshot.modify_attribute(Attribute='createVolumePermission', 219 | CreateVolumePermission={ 220 | 'Add': [{'UserId': aws_account_id}] 221 | }, 222 | UserIds=[aws_account_id], 223 | OperationType='add') 224 | 225 | def create_encrypted_backup(self, backup_id: str, kms_key: str, region: str) -> str: 226 | return backup_id 227 | -------------------------------------------------------------------------------- /shelvery/entity_resource.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import Dict 3 | import boto3 4 | 5 | 6 | class EntityResource: 7 | """Represents entity such as ec2 volume, instance or rds instance""" 8 | 9 | def __init__(self, resource_id: str, resource_region: str, date_created: datetime, tags: Dict): 10 | self.resource_id = resource_id 11 | self.date_created = date_created 12 | self.tags = tags 13 | self.resource_region = resource_region 14 | 15 | @classmethod 16 | def empty(cls): 17 | local_region = boto3.session.Session().region_name 18 | resource = EntityResource(None, local_region, None, {}) 19 | return resource 20 | -------------------------------------------------------------------------------- /shelvery/factory.py: -------------------------------------------------------------------------------- 1 | from shelvery.ebs_backup import ShelveryEBSBackup 2 | from shelvery.engine import ShelveryEngine 3 | from shelvery.rds_backup import ShelveryRDSBackup 4 | from shelvery.rds_cluster_backup import ShelveryRDSClusterBackup 5 | from shelvery.ec2ami_backup import ShelveryEC2AMIBackup 6 | from shelvery.redshift_backup import ShelveryRedshiftBackup 7 | from shelvery.documentdb_backup import ShelveryDocumentDbBackup 8 | 9 | class ShelveryFactory: 10 | 11 | @classmethod 12 | def get_shelvery_instance(cls, type: str) -> ShelveryEngine: 13 | if type == 'ebs': 14 | return ShelveryEBSBackup() 15 | 16 | if type == 'rds': 17 | return ShelveryRDSBackup() 18 | 19 | if type == 'rds_cluster': 20 | return ShelveryRDSClusterBackup() 21 | 22 | if type == 'ec2ami': 23 | return ShelveryEC2AMIBackup() 24 | 25 | if type == 'redshift': 26 | return ShelveryRedshiftBackup() 27 | 28 | if type == 'docdb': 29 | return ShelveryDocumentDbBackup() 30 | -------------------------------------------------------------------------------- /shelvery/notifications.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | import logging 4 | from shelvery.aws_helper import AwsHelper 5 | from datetime import datetime 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class ShelveryNotification: 11 | 12 | def __init__(self, topic_arn): 13 | self.topic_arn = topic_arn 14 | logger.info("Initialized notification service") 15 | self.sns = AwsHelper.boto3_client('sns') 16 | 17 | def notify(self, message): 18 | if isinstance(message, dict): 19 | message['Timestamp'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC") 20 | message = json.dumps(message) 21 | 22 | if self.topic_arn is not None and self.topic_arn.startswith('arn:aws:sns'): 23 | try: 24 | self.sns.publish( 25 | TopicArn=self.topic_arn, 26 | Message=message 27 | ) 28 | except: 29 | logger.exception('Failed publishing to SNS Topic') 30 | logger.error(f"Message:{message}") 31 | -------------------------------------------------------------------------------- /shelvery/queue.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | import logging 4 | from shelvery.aws_helper import AwsHelper 5 | from datetime import datetime 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class ShelveryQueue: 11 | 12 | def __init__(self, queue_url, wait_period): 13 | self.queue_url = queue_url 14 | # Max wait time is 900, if is set to greater, set value to 900 15 | self.wait_period = int(wait_period) if int(wait_period) < 900 else 900 16 | logger.info(f"Initialized sqs service with message delay of {self.wait_period} seconds") 17 | self.sqs = AwsHelper.boto3_client('sqs') 18 | 19 | def send(self, message): 20 | if isinstance(message, dict): 21 | message['Timestamp'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC") 22 | message = json.dumps(message) 23 | 24 | if self.queue_url is not None: 25 | try: 26 | response = self.sqs.send_message( 27 | QueueUrl=self.queue_url, 28 | DelaySeconds=self.wait_period, 29 | MessageBody=message 30 | ) 31 | except: 32 | logger.exception('Failed to send message to sqs queue') 33 | logger.error(f"Message:{message}") 34 | -------------------------------------------------------------------------------- /shelvery/rds_backup.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | from shelvery.runtime_config import RuntimeConfig 4 | from shelvery.backup_resource import BackupResource 5 | from shelvery.engine import ShelveryEngine, SHELVERY_DO_BACKUP_TAGS 6 | from shelvery.entity_resource import EntityResource 7 | 8 | from typing import Dict, List 9 | from botocore.errorfactory import ClientError 10 | from shelvery.aws_helper import AwsHelper 11 | 12 | class ShelveryRDSBackup(ShelveryEngine): 13 | def is_backup_available(self, backup_region: str, backup_id: str) -> bool: 14 | rds_client = AwsHelper.boto3_client('rds', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 15 | snapshots = rds_client.describe_db_snapshots(DBSnapshotIdentifier=backup_id) 16 | return snapshots['DBSnapshots'][0]['Status'] == 'available' 17 | 18 | def get_resource_type(self) -> str: 19 | return 'RDS Instance' 20 | 21 | def backup_resource(self, backup_resource: BackupResource) -> BackupResource: 22 | if RuntimeConfig.get_rds_mode(backup_resource.entity_resource.tags, self) == RuntimeConfig.RDS_CREATE_SNAPSHOT: 23 | return self.backup_from_instance(backup_resource) 24 | if RuntimeConfig.get_rds_mode(backup_resource.entity_resource.tags, 25 | self) == RuntimeConfig.RDS_COPY_AUTOMATED_SNAPSHOT: 26 | return self.backup_from_latest_automated(backup_resource) 27 | 28 | raise Exception(f"Only {RuntimeConfig.RDS_COPY_AUTOMATED_SNAPSHOT} and " 29 | f"{RuntimeConfig.RDS_CREATE_SNAPSHOT} rds backup " 30 | f"modes supported - set rds backup mode using rds_backup_mode configuration option ") 31 | 32 | def backup_from_latest_automated(self, backup_resource: BackupResource): 33 | rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) 34 | response = rds_client.describe_db_snapshots( 35 | DBInstanceIdentifier=backup_resource.entity_id, 36 | SnapshotType='automated', 37 | # API always returns in date descending order, and we only need last one 38 | MaxRecords=20 39 | ) 40 | # filter out any snapshots that could be in progress 41 | available_snapshots = [snap for snap in response['DBSnapshots'] if snap['Status'] == 'available'] 42 | auto_snapshots = sorted(available_snapshots, key=lambda k: k['SnapshotCreateTime'], reverse=True) 43 | 44 | if len(auto_snapshots) == 0: 45 | self.logger.info(f"There is no latest automated backup for cluster {backup_resource.entity_id}," 46 | f" fallback to RDS_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster...") 47 | return self.backup_from_instance(backup_resource) 48 | 49 | automated_snapshot_id = auto_snapshots[0]['DBSnapshotIdentifier'] 50 | response = rds_client.copy_db_snapshot( 51 | SourceDBSnapshotIdentifier=automated_snapshot_id, 52 | TargetDBSnapshotIdentifier=backup_resource.name, 53 | CopyTags=False 54 | ) 55 | backup_resource.resource_properties = response['DBSnapshot'] 56 | backup_resource.backup_id = backup_resource.name 57 | return backup_resource 58 | 59 | def backup_from_instance(self, backup_resource): 60 | rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) 61 | response = rds_client.create_db_snapshot( 62 | DBSnapshotIdentifier=backup_resource.name, 63 | DBInstanceIdentifier=backup_resource.entity_id 64 | ) 65 | backup_resource.resource_properties = response['DBSnapshot'] 66 | backup_resource.backup_id = backup_resource.name 67 | return backup_resource 68 | 69 | def delete_backup(self, backup_resource: BackupResource): 70 | rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn) 71 | rds_client.delete_db_snapshot( 72 | DBSnapshotIdentifier=backup_resource.backup_id 73 | ) 74 | 75 | def tag_backup_resource(self, backup_resource: BackupResource): 76 | regional_rds_client = AwsHelper.boto3_client('rds', region_name=backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) 77 | snapshots = regional_rds_client.describe_db_snapshots(DBSnapshotIdentifier=backup_resource.backup_id) 78 | snapshot_arn = snapshots['DBSnapshots'][0]['DBSnapshotArn'] 79 | regional_rds_client.add_tags_to_resource( 80 | ResourceName=snapshot_arn, 81 | Tags=list( 82 | map(lambda k: {'Key': k, 'Value': backup_resource.tags[k].replace(',', ' ')}, backup_resource.tags)) 83 | ) 84 | 85 | def get_existing_backups(self, backup_tag_prefix: str) -> List[BackupResource]: 86 | rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) 87 | 88 | # collect all snapshots 89 | all_snapshots = self.collect_all_snapshots(rds_client) 90 | 91 | # filter ones backed up with shelvery 92 | all_backups = self.get_shelvery_backups_only(all_snapshots, backup_tag_prefix, rds_client) 93 | 94 | return all_backups 95 | 96 | def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str): 97 | rds_client = AwsHelper.boto3_client('rds', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 98 | rds_client.modify_db_snapshot_attribute( 99 | DBSnapshotIdentifier=backup_id, 100 | AttributeName='restore', 101 | ValuesToAdd=[aws_account_id] 102 | ) 103 | 104 | def copy_backup_to_region(self, backup_id: str, region: str) -> str: 105 | local_region = boto3.session.Session().region_name 106 | client_local = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) 107 | rds_client = AwsHelper.boto3_client('rds', region_name=region, arn=self.role_arn, external_id=self.role_external_id) 108 | snapshots = client_local.describe_db_snapshots(DBSnapshotIdentifier=backup_id) 109 | snapshot = snapshots['DBSnapshots'][0] 110 | rds_client.copy_db_snapshot( 111 | SourceDBSnapshotIdentifier=snapshot['DBSnapshotArn'], 112 | TargetDBSnapshotIdentifier=backup_id, 113 | SourceRegion=local_region, 114 | # tags are created explicitly 115 | CopyTags=False 116 | ) 117 | return backup_id 118 | 119 | def snapshot_exists(self, client, backup_id): 120 | try: 121 | response = client.describe_db_snapshots(DBSnapshotIdentifier=backup_id) 122 | snapshots = response.get('DBSnapshots', []) 123 | return bool(snapshots) 124 | except ClientError as e: 125 | if e.response['Error']['Code'] == 'DBSnapshotNotFound' or e.response['Error']['Code'] == 'DBSnapshotNotFoundFault': 126 | return False 127 | else: 128 | print(e.response['Error']['Code']) 129 | raise e 130 | 131 | def create_encrypted_backup(self, backup_id: str, kms_key: str, region: str) -> str: 132 | local_region = boto3.session.Session().region_name 133 | client_local = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) 134 | rds_client = AwsHelper.boto3_client('rds', region_name=region, arn=self.role_arn, external_id=self.role_external_id) 135 | snapshots = client_local.describe_db_snapshots(DBSnapshotIdentifier=backup_id) 136 | snapshot = snapshots['DBSnapshots'][0] 137 | backup_id = f'{backup_id}-re-encrypted' 138 | 139 | if self.snapshot_exists(rds_client, backup_id): 140 | return backup_id 141 | 142 | rds_client_params = { 143 | 'SourceDBSnapshotIdentifier': snapshot['DBSnapshotArn'], 144 | 'TargetDBSnapshotIdentifier': backup_id, 145 | 'SourceRegion': local_region, 146 | 'CopyTags': True, 147 | 'KmsKeyId': kms_key, 148 | } 149 | rds_client.copy_db_snapshot(**rds_client_params) 150 | return backup_id 151 | 152 | def get_backup_resource(self, backup_region: str, backup_id: str) -> BackupResource: 153 | rds_client = AwsHelper.boto3_client('rds', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 154 | snapshots = rds_client.describe_db_snapshots(DBSnapshotIdentifier=backup_id) 155 | snapshot = snapshots['DBSnapshots'][0] 156 | tags = snapshot.get('TagList', []) 157 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 158 | resource = BackupResource.construct(d_tags['shelvery:tag_name'], backup_id, d_tags) 159 | resource.resource_properties = snapshot 160 | return resource 161 | 162 | def get_engine_type(self) -> str: 163 | return 'rds' 164 | 165 | def get_entities_to_backup(self, tag_name: str) -> List[EntityResource]: 166 | # region and api client 167 | local_region = boto3.session.Session().region_name 168 | rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) 169 | 170 | # list of models returned from api 171 | db_entities = [] 172 | 173 | db_instances = self.get_all_instances(rds_client) 174 | 175 | for instance in db_instances: 176 | # collect tags in check if instance tagged with marker tag 177 | tags = instance.get('TagList', []) 178 | # convert api response to dictionary 179 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 180 | if 'DBClusterIdentifier' in instance: 181 | self.logger.info(f"Skipping RDS Instance {instance['DBInstanceIdentifier']} as it is part" 182 | f" of cluster {instance['DBClusterIdentifier']}") 183 | continue 184 | 185 | # check if marker tag is present 186 | if tag_name in d_tags and d_tags[tag_name] in SHELVERY_DO_BACKUP_TAGS: 187 | resource = EntityResource(instance['DBInstanceIdentifier'], 188 | local_region, 189 | instance['InstanceCreateTime'], 190 | d_tags) 191 | db_entities.append(resource) 192 | 193 | return db_entities 194 | 195 | def get_all_instances(self, rds_client): 196 | """ 197 | Get all RDS instances within region for given boto3 client 198 | :param rds_client: boto3 rds service 199 | :return: all RDS instances within region for given boto3 client 200 | """ 201 | # list of resource models 202 | db_instances = [] 203 | # temporary list of api models, as calls are batched 204 | temp_instances = rds_client.describe_db_instances() 205 | db_instances.extend(temp_instances['DBInstances']) 206 | # collect database instances 207 | while 'Marker' in temp_instances: 208 | temp_instances = rds_client.describe_db_instances(Marker=temp_instances['Marker']) 209 | db_instances.extend(temp_instances['DBInstances']) 210 | 211 | return db_instances 212 | 213 | def get_shelvery_backups_only(self, all_snapshots, backup_tag_prefix, rds_client): 214 | """ 215 | :param all_snapshots: all snapshots within region 216 | :param backup_tag_prefix: prefix of shelvery backup system 217 | :param rds_client: amazon boto3 rds client 218 | :return: snapshots created using shelvery 219 | """ 220 | all_backups = [] 221 | marker_tag = f"{backup_tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}" 222 | 223 | for snap in all_snapshots: 224 | #collect tags 225 | tags = snap.get('TagList', []) 226 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 227 | self.logger.info(f"Checking RDS Snap {snap['DBSnapshotIdentifier']}") 228 | 229 | if marker_tag in d_tags: 230 | if d_tags[marker_tag] in SHELVERY_DO_BACKUP_TAGS: 231 | backup_resource = BackupResource.construct(backup_tag_prefix, snap['DBSnapshotIdentifier'], d_tags) 232 | backup_resource.entity_resource = snap['EntityResource'] 233 | backup_resource.entity_id = snap['EntityResource'].resource_id 234 | 235 | all_backups.append(backup_resource) 236 | 237 | return all_backups 238 | 239 | def copy_shared_backup(self, source_account: str, source_backup: BackupResource): 240 | rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) 241 | # copying of tags happens outside this method 242 | source_arn = f"arn:aws:rds:{source_backup.region}:{source_backup.account_id}:snapshot:{source_backup.backup_id}" 243 | 244 | params = { 245 | 'SourceDBSnapshotIdentifier': source_arn, 246 | 'SourceRegion': source_backup.region, 247 | 'CopyTags': False, 248 | 'TargetDBSnapshotIdentifier': source_backup.backup_id 249 | } 250 | 251 | # If the backup is encrypted, include the KMS key ID in the request. 252 | # We have to check the attribute to support our previous YAML file format for backup data stored in S3 253 | if hasattr(source_backup, 'resource_properties') and source_backup.resource_properties['Encrypted']: 254 | kms_key = source_backup.resource_properties['KmsKeyId'] 255 | self.logger.info(f"Snapshot {source_backup.backup_id} is encrypted with the kms key {kms_key}") 256 | 257 | copy_kms_key = RuntimeConfig.get_copy_kms_key_id(source_backup.tags, self) 258 | # if a new key is provided by config encypt the copy with the new kms key 259 | if copy_kms_key is not None: 260 | self.logger.info(f"Snapshot {source_backup.backup_id} will be copied and encrypted with the kms key {copy_kms_key}") 261 | kms_key = copy_kms_key 262 | 263 | params['KmsKeyId'] = kms_key 264 | else: 265 | # if the backup is not encrypted and the encrypt_copy is enabled, encrypted the backup with the provided kms key 266 | if RuntimeConfig.get_encrypt_copy(source_backup.tags, self): 267 | kms_key = RuntimeConfig.get_copy_kms_key_id(source_backup.tags, self) 268 | if kms_key is not None: 269 | self.logger.info(f"Snapshot {source_backup.backup_id} is not encrypted. Encrypting the copy with KMS key {kms_key}") 270 | params['KmsKeyId'] = kms_key 271 | 272 | snap = rds_client.copy_db_snapshot(**params) 273 | return snap['DBSnapshot']['DBSnapshotIdentifier'] 274 | 275 | def collect_all_snapshots(self, rds_client): 276 | """ 277 | :param rds_client: 278 | :return: All snapshots within region for rds_client 279 | """ 280 | all_snapshots = [] 281 | tmp_snapshots = rds_client.describe_db_snapshots(SnapshotType='manual') 282 | all_snapshots.extend(tmp_snapshots['DBSnapshots']) 283 | while 'Marker' in tmp_snapshots: 284 | tmp_snapshots = rds_client.describe_db_snapshots(Marker=tmp_snapshots['Marker']) 285 | all_snapshots.extend(tmp_snapshots['DBSnapshots']) 286 | 287 | self.populate_snap_entity_resource(all_snapshots) 288 | 289 | return all_snapshots 290 | 291 | def populate_snap_entity_resource(self, all_snapshots): 292 | instance_ids = [] 293 | for snap in all_snapshots: 294 | if snap['DBInstanceIdentifier'] not in instance_ids: 295 | instance_ids.append(snap['DBInstanceIdentifier']) 296 | entities = {} 297 | rds_client = AwsHelper.boto3_client('rds', arn=self.role_arn, external_id=self.role_external_id) 298 | local_region = boto3.session.Session().region_name 299 | 300 | for instance_id in instance_ids: 301 | try: 302 | rds_instance = rds_client.describe_db_instances(DBInstanceIdentifier=instance_id)['DBInstances'][0] 303 | tags = rds_client.list_tags_for_resource(ResourceName=rds_instance['DBInstanceArn']).get('TagList', []) 304 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 305 | d_tags = dict(map(lambda t: (t['Key'], t['Value']), tags)) 306 | rds_entity = EntityResource(instance_id, 307 | local_region, 308 | rds_instance['InstanceCreateTime'], 309 | d_tags) 310 | entities[instance_id] = rds_entity 311 | except ClientError as e: 312 | if 'DBInstanceNotFoundFault' in str(type(e)): 313 | entities[instance_id] = EntityResource.empty() 314 | entities[instance_id].resource_id = instance_id 315 | else: 316 | raise e 317 | 318 | for snap in all_snapshots: 319 | if snap['DBInstanceIdentifier'] in entities: 320 | snap['EntityResource'] = entities[snap['DBInstanceIdentifier']] 321 | -------------------------------------------------------------------------------- /shelvery/redshift_backup.py: -------------------------------------------------------------------------------- 1 | import boto3, datetime 2 | from botocore.exceptions import ClientError 3 | 4 | from typing import List 5 | 6 | from shelvery.engine import SHELVERY_DO_BACKUP_TAGS 7 | from shelvery.engine import ShelveryEngine 8 | 9 | from shelvery.entity_resource import EntityResource 10 | from shelvery.runtime_config import RuntimeConfig 11 | from shelvery.backup_resource import BackupResource 12 | from shelvery.aws_helper import AwsHelper 13 | 14 | class ShelveryRedshiftBackup(ShelveryEngine): 15 | def __init__(self): 16 | ShelveryEngine.__init__(self) 17 | self.redshift_client = AwsHelper.boto3_client('redshift', arn=self.role_arn, external_id=self.role_external_id) 18 | # default region will be picked up in AwsHelper.boto3_client call 19 | self.region = boto3.session.Session().region_name 20 | 21 | def get_resource_type(self) -> str: 22 | """Returns entity type that's about to be backed up""" 23 | return 'Redshift Cluster' 24 | 25 | def get_engine_type(self) -> str: 26 | """ 27 | Return engine type, valid string to be passed to ShelveryFactory.get_shelvery_instance method 28 | """ 29 | return 'redshift' 30 | 31 | def delete_backup(self, backup_resource: BackupResource): 32 | """ 33 | Remove given backup from system 34 | """ 35 | redshift_client = AwsHelper.boto3_client('redshift', region_name = backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) 36 | cluster_id = backup_resource.backup_id.split(":")[-1].split("/")[0] 37 | snapshot_id = backup_resource.backup_id.split(":")[-1].split("/")[1] 38 | 39 | response = redshift_client.describe_cluster_snapshots( 40 | SnapshotIdentifier=snapshot_id 41 | ) 42 | 43 | snapshot = next((snap for snap in response['Snapshots'] if snap['SnapshotIdentifier'] == snapshot_id), None) 44 | 45 | if snapshot is None: 46 | self.logger.exception(f"Failed t delete snapshot as the snapshot {backup_resource.backup_id} doesn't exist") 47 | 48 | if 'AccountsWithRestoreAccess' in snapshot: 49 | for shared_account in snapshot['AccountsWithRestoreAccess']: 50 | self.logger.info(f"revoking access to snapshot {backup_resource.backup_id} to account {shared_account['AccountId']} for the purpose of deletion") 51 | redshift_client.revoke_snapshot_access( 52 | SnapshotIdentifier=snapshot_id, 53 | SnapshotClusterIdentifier=cluster_id, 54 | AccountWithRestoreAccess=shared_account['AccountId'] 55 | ) 56 | 57 | try: 58 | redshift_client.delete_cluster_snapshot( 59 | SnapshotIdentifier=snapshot_id, 60 | SnapshotClusterIdentifier=cluster_id 61 | ) 62 | except ClientError as ex: 63 | if 'other accounts still have access to it' in ex.response['Error']['Message']: 64 | self.logger.exception(f"Could not delete {backup_resource.backup_id} as " 65 | f"other accounts still have access to this snapshot") 66 | return 67 | else: 68 | self.logger.error(ex.response) 69 | self.logger.exception(f"Could not delete {backup_resource.backup_id}") 70 | 71 | def get_existing_backups(self, backup_tag_prefix: str) -> List[BackupResource]: 72 | """ 73 | Collect existing backups on system of given type, marked with given tag 74 | """ 75 | local_region = boto3.session.Session().region_name 76 | marker_tag = f"{backup_tag_prefix}:{BackupResource.BACKUP_MARKER_TAG}" 77 | response = self.redshift_client.describe_cluster_snapshots( 78 | SnapshotType='manual', 79 | TagKeys=[marker_tag], 80 | TagValues=SHELVERY_DO_BACKUP_TAGS 81 | ) 82 | 83 | snapshots = response['Snapshots'] 84 | backups = [] 85 | 86 | for snap in snapshots: 87 | cluster_id = snap['ClusterIdentifier'] 88 | d_tags = BackupResource.dict_from_boto3_tags(snap['Tags']) 89 | create_time = snap['ClusterCreateTime'] 90 | redshift_entity = EntityResource(cluster_id, 91 | local_region, 92 | create_time, 93 | d_tags) 94 | backup_id = f"arn:aws:redshift:{local_region}:{snap['OwnerAccount']}" 95 | backup_id = f"{backup_id}:snapshot:{snap['ClusterIdentifier']}/{snap['SnapshotIdentifier']}" 96 | backup_resource = BackupResource.construct( 97 | backup_tag_prefix, 98 | backup_id, 99 | d_tags 100 | ) 101 | backup_resource.entity_resource = redshift_entity 102 | backup_resource.entity_id = redshift_entity.resource_id 103 | 104 | backups.append(backup_resource) 105 | 106 | return backups 107 | 108 | def get_entities_to_backup(self, tag_name: str) -> List[EntityResource]: 109 | """Get all instances that contain `tag_name` as a tag.""" 110 | clusters = self.collect_clusters(tag_name) 111 | 112 | # TODO: To get the cluster's creation time, we need to query the "events" with the 113 | # cluster ID. 114 | 115 | entities = [] 116 | for cluster in clusters: 117 | if cluster['ClusterStatus'] != 'available': 118 | self.logger.info(f"Skipping cluster '{cluster['ClusterIdentifier']}' as its status is '{cluster['ClusterStatus']}'.") 119 | continue 120 | 121 | d_tags = BackupResource.dict_from_boto3_tags(cluster['Tags']) 122 | 123 | entity = EntityResource( 124 | resource_id=cluster['ClusterIdentifier'], 125 | resource_region=self.region, 126 | date_created=f"{datetime.datetime.utcnow():%Y-%m-%d %H:%M:%S}", 127 | tags=d_tags) 128 | entities.append(entity) 129 | 130 | return entities 131 | 132 | # collect all clusters tagged with given tag, in paginated manner 133 | def collect_clusters(self, tag_name: str): 134 | load_clusters = True 135 | next_token = '' 136 | all_clusters = [] 137 | 138 | while load_clusters: 139 | tagged_clusters = self.redshift_client.describe_clusters(TagKeys=[tag_name], TagValues=SHELVERY_DO_BACKUP_TAGS) 140 | all_clusters = all_clusters + tagged_clusters['Clusters'] 141 | if 'Marker' in tagged_clusters and len(tagged_clusters['Marker']) > 0: 142 | load_clusters = True 143 | next_token = tagged_clusters['Marker'] 144 | else: 145 | load_clusters = False 146 | 147 | return all_clusters 148 | 149 | def backup_resource(self, backup_resource: BackupResource) -> BackupResource: 150 | """Redshift supports two modes of snapshot functions: a regular cluster snapshot and copying an existing snapshot to a different region. 151 | """ 152 | if RuntimeConfig.get_redshift_mode(backup_resource.entity_resource.tags, self) == RuntimeConfig.REDSHIFT_CREATE_SNAPSHOT: 153 | return self.backup_from_cluster(backup_resource) 154 | if RuntimeConfig.get_redshift_mode(backup_resource.entity_resource.tags, 155 | self) == RuntimeConfig.REDSHIFT_COPY_AUTOMATED_SNAPSHOT: 156 | return self.backup_from_latest_automated(backup_resource) 157 | 158 | raise Exception(f"Only {RuntimeConfig.REDSHIFT_COPY_AUTOMATED_SNAPSHOT} and " 159 | f"{RuntimeConfig.REDSHIFT_CREATE_SNAPSHOT} redshift backup " 160 | f"modes supported - set redshift backup mode using redshift_backup_mode configuration option ") 161 | 162 | def backup_from_cluster(self, backup_resource: BackupResource): 163 | snapshot = self.redshift_client.create_cluster_snapshot( 164 | SnapshotIdentifier=backup_resource.name, 165 | ClusterIdentifier=backup_resource.entity_id, 166 | )['Snapshot'] 167 | backup_resource.backup_id = f"arn:aws:redshift:{backup_resource.region}:{backup_resource.account_id}" 168 | backup_resource.backup_id = f"{backup_resource.backup_id}:snapshot:{snapshot['ClusterIdentifier']}/{snapshot['SnapshotIdentifier']}" 169 | return backup_resource 170 | 171 | def backup_from_latest_automated(self, backup_resource: BackupResource): 172 | auto_snapshots = self.redshift_client.describe_cluster_snapshots( 173 | ClusterIdentifier=backup_resource.entity_id, 174 | SnapshotType='automated', 175 | # API always returns in date descending order, and we only need last one 176 | MaxRecords=20 177 | ) 178 | auto_snapshots = sorted(auto_snapshots['Snapshots'], key=lambda k: k['SnapshotCreateTime'], 179 | reverse=True) 180 | 181 | if len(auto_snapshots) == 0: 182 | self.logger.error(f"There is no latest automated backup for cluster {backup_resource.entity_id}," 183 | f" fallback to REDSHIFT_CREATE_SNAPSHOT mode. Creating snapshot directly on cluster...") 184 | return self.backup_from_cluster(backup_resource) 185 | 186 | # TODO handle case when there are no latest automated backups 187 | snapshot = self.redshift_client.copy_cluster_snapshot( 188 | SourceSnapshotIdentifier=auto_snapshots[0]['SnapshotIdentifier'], 189 | SourceSnapshotClusterIdentifier=auto_snapshots[0]['ClusterIdentifier'], 190 | TargetSnapshotIdentifier=backup_resource.name 191 | )['Snapshot'] 192 | backup_resource.backup_id = f"arn:aws:redshift:{backup_resource.region}:{backup_resource.account_id}" 193 | backup_resource.backup_id = f"{backup_resource.backup_id}:snapshot:{snapshot['ClusterIdentifier']}/{snapshot['SnapshotIdentifier']}" 194 | return backup_resource 195 | 196 | def tag_backup_resource(self, backup_resource: BackupResource): 197 | """ 198 | Create backup resource tags. 199 | """ 200 | # This is unnecessary for Redshift as the tags are included when calling `backup_resource()`. 201 | redshift_client = AwsHelper.boto3_client('redshift', region_name = backup_resource.region, arn=self.role_arn, external_id=self.role_external_id) 202 | redshift_client.create_tags( 203 | ResourceName=backup_resource.backup_id, 204 | Tags=backup_resource.boto3_tags 205 | ) 206 | 207 | def copy_backup_to_region(self, backup_id: str, region: str) -> str: 208 | """ 209 | Copy a backup to another region. 210 | This enables cross-region automated backups for the Redshift cluster, so future automated backups 211 | will be replicated to `region`. 212 | """ 213 | self.logger.warning("Redshift does not support copy of a snapshot as such to another region, " 214 | "but rather allows automatic copying of automated backups to another region" 215 | "using EnableSnapshotCopy API Call.") 216 | pass 217 | 218 | def create_encrypted_backup(self, backup_id: str, kms_key: str, region: str) -> str: 219 | return backup_id 220 | 221 | def is_backup_available(self, backup_region: str, backup_id: str) -> bool: 222 | """ 223 | Determine whether backup has completed and is available to be copied 224 | to other regions and shared with other AWS accounts 225 | """ 226 | redshift_client = AwsHelper.boto3_client('redshift', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 227 | snapshot_id = backup_id.split(":")[-1].split("/")[1] 228 | snapshots = None 229 | try: 230 | snapshots = redshift_client.describe_cluster_snapshots( 231 | SnapshotIdentifier=snapshot_id 232 | ) 233 | except ClientError as e: 234 | self.logger.warning(f"Backup {backup_id} not found") 235 | print(e.response) 236 | if e.response['Error']['Code'] == '404': 237 | return False 238 | else: 239 | self.logger.exception(f"Problem waiting for {backup_id} availability") 240 | raise e 241 | return snapshots['Snapshots'][0]['Status'] == 'available' 242 | 243 | 244 | def share_backup_with_account(self, backup_region: str, backup_id: str, aws_account_id: str): 245 | """ 246 | Share backup with another AWS Account 247 | """ 248 | redshift_client = AwsHelper.boto3_client('redshift', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 249 | snapshot_id = backup_id.split(":")[-1].split("/")[1] 250 | redshift_client.authorize_snapshot_access( 251 | SnapshotIdentifier=snapshot_id, 252 | AccountWithRestoreAccess=aws_account_id 253 | ) 254 | 255 | 256 | def get_backup_resource(self, backup_region: str, backup_id: str) -> BackupResource: 257 | """ 258 | Get Backup Resource within region, identified by its backup_id 259 | """ 260 | redshift_client = AwsHelper.boto3_client('redshift', region_name=backup_region, arn=self.role_arn, external_id=self.role_external_id) 261 | snapshot_id = backup_id.split(":")[-1].split("/")[1] 262 | snapshots = redshift_client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_id) 263 | snapshot = snapshots['Snapshots'][0] 264 | d_tags = BackupResource.dict_from_boto3_tags(snapshot['Tags']) 265 | return BackupResource.construct(d_tags['shelvery:tag_name'], backup_id, d_tags) 266 | 267 | def copy_shared_backup(self, source_account: str, source_backup: BackupResource) -> str: 268 | """ 269 | Copy Shelvery backup that has been shared from another account to account where 270 | shelvery is currently running 271 | :param source_account: 272 | :param source_backup: 273 | :return: 274 | """ 275 | self.logger.warning("Redshift does not support cross account copy of snapshots as such. " 276 | "Alternate way of creating snapshot copy is creating cluster out of" 277 | "shared snapshot, and then creating snapshot out of that cluster.") 278 | return source_backup.backup_id 279 | -------------------------------------------------------------------------------- /shelvery/runtime_config.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | import boto3 4 | 5 | 6 | 7 | class RuntimeConfig: 8 | """ 9 | Helper to read runtime and other values 10 | Valid environment variables are 11 | 12 | shelvery_keep_daily_backups - daily backups to keep, defaults to 14 days 13 | shelvery_keep_weekly_backups - daily backups to keep, defaults to 8 weeks 14 | shelvery_keep_monthly_backups - daily backups to keep, defaults to 12 months 15 | shelvery_keep_yearly_backups - daily backups to keep, defaults to 10 years 16 | 17 | shelvery_custom_retention_types - custom retention periods in name:seconds format, comma separated, empty (disabled) by default 18 | shelvery_current_retention_type - custom retention period applied to current create backup process 19 | shelvery_dr_regions - disaster recovery regions, comma separated, empty (disabled) by default 20 | 21 | shelvery_keep_daily_backups_dr - daily backups to keep in disaster recover region 22 | shelvery_keep_weekly_backups_dr - daily backups to keep in disaster recover region 23 | shelvery_keep_monthly_backups_dr - daily backups to keep in disaster recover region 24 | shelvery_keep_yearly_backups_dr - daily backups to keep in disaster recover region 25 | 26 | shelvery_wait_snapshot_timeout - timeout in seconds to wait for snapshot to become available 27 | before copying it to another region / sharing with other account 28 | defaults to 1200 29 | 30 | shelvery_lambda_max_wait_iterations - maximum number of wait calls to lambda function. E.g. 31 | if lambda is set to timeout in 5 minutes, and this 32 | values is set to 3, total wait time will be approx 14 minutes, 33 | as lambda is invoked recursively 20 seconds before timeout 34 | defaults to 5 35 | 36 | shelvery_share_aws_account_ids - AWS Account Ids to share backups with. Applies to both original and regional 37 | backups 38 | 39 | shelvery_source_aws_account_ids - AWS Account Ids that are sharing shelvery backups with AWS Account shelvery 40 | is running in. Used for 'pull backups' feature 41 | 42 | shelvery_bucket_name_template - Template used to create bucket name. Available keys: `{account_id}`, `{region}`. 43 | Defaults to `shelvery.data.{account_id}-{region}.base2tools` 44 | 45 | shelvery_select_entity - Filter which entities get backed up, regardless of tags 46 | 47 | shelvery_sns_topic - SNS Topics for shelvery notifications 48 | 49 | shelvery_error_sns_topic - SNS Topics for just error messages 50 | 51 | shelvery_copy_resource_tags - Copy tags from original resource 52 | shelvery_exluded_resource_tag_keys - Comma separated list of tag keys to exclude from copying from original 53 | 54 | shelvery_sqs_queue_url - re invoke shelvery through a sqs queue 55 | 56 | shelvery_sqs_queue_wait_period - wait time in seconds before re invoking shelvery [0-900] 57 | 58 | shelvery_ignore_invalid_resource_state - ignore exceptions due to the resource being in a unavailable state, 59 | such as shutdown, rebooting. 60 | 61 | shelvery_encrypt_copy - when copying a shared unencrypted snapshot, encrypt the shapshot. 62 | when enabled 'shelvery_copy_kms_key_id' must also be set. 63 | shelvery_copy_kms_key_id - when copying a shared snapshot, you can specify a different kms key used to encrypt the original snapshot. 64 | Note that when copying to a new key, the shelvery requires access to both the new key and the original key. 65 | shelvery_reencrypt_kms_key_id - when re-encrypting a snapshot with a new KMS key before sharing it to a new account. 66 | """ 67 | 68 | DEFAULT_KEEP_DAILY = 14 69 | DEFAULT_KEEP_WEEKLY = 8 70 | DEFAULT_KEEP_MONTHLY = 12 71 | DEFAULT_KEEP_YEARLY = 10 72 | 73 | RDS_COPY_AUTOMATED_SNAPSHOT = 'RDS_COPY_AUTOMATED_SNAPSHOT' 74 | RDS_CREATE_SNAPSHOT = 'RDS_CREATE_SNAPSHOT' 75 | DOCDB_COPY_AUTOMATED_SNAPSHOT = 'DOCDB_COPY_AUTOMATED_SNAPSHOT' 76 | DOCDB_CREATE_SNAPSHOT = 'DOCDB_CREATE_SNAPSHOT' 77 | REDSHIFT_COPY_AUTOMATED_SNAPSHOT = 'REDSHIFT_COPY_AUTOMATED_SNAPSHOT' 78 | REDSHIFT_CREATE_SNAPSHOT = 'REDSHIFT_CREATE_SNAPSHOT' 79 | 80 | DEFAULTS = { 81 | 'shelvery_keep_daily_backups': 14, 82 | 'shelvery_keep_weekly_backups': 8, 83 | 'shelvery_keep_monthly_backups': 12, 84 | 'shelvery_keep_yearly_backups': 10, 85 | 'shelvery_custom_retention_types': None, 86 | 'shelvery_current_retention_type': None, 87 | 'shelvery_wait_snapshot_timeout': 1200, 88 | 'shelvery_lambda_max_wait_iterations': 5, 89 | 'shelvery_dr_regions': None, 90 | 'shelvery_rds_backup_mode': RDS_COPY_AUTOMATED_SNAPSHOT, 91 | 'shelvery_docdb_backup_mode': DOCDB_COPY_AUTOMATED_SNAPSHOT, 92 | 'shelvery_source_aws_account_ids': None, 93 | 'shelvery_share_aws_account_ids': None, 94 | 'shelvery_redshift_backup_mode': REDSHIFT_COPY_AUTOMATED_SNAPSHOT, 95 | 'shelvery_select_entity': None, 96 | 'shelvery_bucket_name_template': 'shelvery.data.{account_id}-{region}.base2tools', 97 | 'boto3_retries': 10, 98 | 'role_arn': None, 99 | 'role_external_id': None, 100 | 'shelvery_copy_resource_tags': True, 101 | 'shelvery_exluded_resource_tag_keys': None, 102 | 'shelvery_sqs_queue_url': None, 103 | 'shelvery_sqs_queue_wait_period': 0, 104 | 'shelvery_ignore_invalid_resource_state': False, 105 | 'shelvery_encrypt_copy': False, 106 | 'shelvery_copy_kms_key_id': None, 107 | 'shelvery_reencrypt_kms_key_id': None 108 | } 109 | 110 | @classmethod 111 | def get_conf_value(cls, key: str, resource_tags=None, lambda_payload=None): 112 | # priority 3 are resource tags 113 | if resource_tags is not None: 114 | tag_key = f"shelvery:config:{key}" 115 | if tag_key in resource_tags: 116 | return resource_tags[tag_key] 117 | 118 | # priority 2 is lambda payload 119 | if (lambda_payload is not None) and ('config' in lambda_payload) and (key in lambda_payload['config']): 120 | return lambda_payload['config'][key] 121 | 122 | # priority 1 are environment variables 123 | if key in os.environ: 124 | return os.environ[key] 125 | 126 | # priority 0 are defaults 127 | if key in cls.DEFAULTS: 128 | return cls.DEFAULTS[key] 129 | 130 | @classmethod 131 | def is_lambda_runtime(cls, engine) -> bool: 132 | return engine.aws_request_id != 0 and engine.lambda_payload is not None 133 | 134 | @classmethod 135 | def is_offload_queueing(cls, engine) -> bool: 136 | return cls.get_sqs_queue_url(engine) is not None 137 | 138 | @classmethod 139 | def get_keep_daily(cls, resource_tags=None, engine=None): 140 | return int(cls.get_conf_value('shelvery_keep_daily_backups', resource_tags, engine.lambda_payload)) 141 | 142 | @classmethod 143 | def get_keep_weekly(cls, resource_tags=None, engine=None): 144 | return int(cls.get_conf_value('shelvery_keep_weekly_backups', resource_tags, engine.lambda_payload)) 145 | 146 | @classmethod 147 | def get_keep_monthly(cls, resource_tags=None, engine=None): 148 | return int(cls.get_conf_value('shelvery_keep_monthly_backups', resource_tags, engine.lambda_payload)) 149 | 150 | @classmethod 151 | def get_keep_yearly(cls, resource_tags=None, engine=None): 152 | return int(cls.get_conf_value('shelvery_keep_yearly_backups', resource_tags, engine.lambda_payload)) 153 | 154 | @classmethod 155 | def get_custom_retention_types(cls, engine=None): 156 | custom_retention = cls.get_conf_value('shelvery_custom_retention_types', None, engine.lambda_payload) 157 | if custom_retention is None or custom_retention.strip() == '': 158 | return {} 159 | 160 | retentions = custom_retention.split(',') 161 | rval = {} 162 | for retention in retentions: 163 | parts = retention.split(':') 164 | if len(parts) == 2: 165 | rval[parts[0]] = int(parts[1]) 166 | return rval 167 | 168 | @classmethod 169 | def get_current_retention_type(cls, engine=None): 170 | current_retention_type = cls.get_conf_value('shelvery_current_retention_type', None, engine.lambda_payload) 171 | if current_retention_type is None or current_retention_type.strip() == '': 172 | return None 173 | return current_retention_type 174 | 175 | @classmethod 176 | def get_envvalue(cls, key: str, default_value): 177 | return os.environ[key] if key in os.environ else default_value 178 | 179 | @classmethod 180 | def get_tag_prefix(cls): 181 | return cls.get_envvalue('shelvery_tag_prefix', 'shelvery') 182 | 183 | @classmethod 184 | def get_dr_regions(cls, resource_tags, engine): 185 | regions = cls.get_conf_value('shelvery_dr_regions', resource_tags, engine.lambda_payload) 186 | return [] if regions is None else regions.split(',') 187 | 188 | @classmethod 189 | def is_started_internally(cls, engine) -> bool: 190 | # 1. running in lambda environment 191 | # 2. payload has 'is_stated_internally' key 192 | # 3. payload 'is_started_internally' key is set to True 193 | return cls.is_lambda_runtime(engine) \ 194 | and 'is_started_internally' in engine.lambda_payload \ 195 | and engine.lambda_payload['is_started_internally'] 196 | 197 | @classmethod 198 | def get_wait_backup_timeout(cls, shelvery): 199 | if cls.is_lambda_runtime(shelvery): 200 | return (shelvery.lambda_context.get_remaining_time_in_millis() / 1000) - 20 201 | else: 202 | return int(cls.get_conf_value('shelvery_wait_snapshot_timeout', None, shelvery.lambda_payload)) 203 | 204 | @classmethod 205 | def get_max_lambda_wait_iterations(cls): 206 | return int(cls.get_envvalue('shelvery_lambda_max_wait_iterations', '5')) 207 | 208 | @classmethod 209 | def get_share_with_accounts(cls, shelvery): 210 | # collect account from env vars 211 | accounts = cls.get_conf_value('shelvery_share_aws_account_ids', None, shelvery.lambda_payload) 212 | 213 | if accounts is not None and accounts.strip() == "": 214 | return [] 215 | 216 | # by default it is empty list 217 | accounts = accounts.split(',') if accounts is not None else [] 218 | 219 | # validate account format 220 | rval = [] 221 | for acc in accounts: 222 | if re.match('^[0-9]{12}$', acc) is None: 223 | shelvery.logger.warn(f"Account id {acc} is not 12-digit number, skipping for share") 224 | else: 225 | rval.append(acc) 226 | shelvery.logger.info(f"Collected account {acc} to share backups with") 227 | 228 | return rval 229 | 230 | @classmethod 231 | def get_source_backup_accounts(cls, shelvery): 232 | # collect account from env vars 233 | accounts = cls.get_conf_value('shelvery_source_aws_account_ids', None, shelvery.lambda_payload) 234 | 235 | if accounts is not None and accounts.strip() == "": 236 | return [] 237 | 238 | # by default it is empty list 239 | accounts = accounts.split(',') if accounts is not None else [] 240 | 241 | # validate account format 242 | rval = [] 243 | for acc in accounts: 244 | if re.match('^[0-9]{12}$', acc) is None: 245 | shelvery.logger.warn(f"Account id {acc} is not 12-digit number, skipping for share") 246 | else: 247 | rval.append(acc) 248 | shelvery.logger.info(f"Collected account {acc} to collect backups from") 249 | 250 | return rval 251 | 252 | @classmethod 253 | def get_rds_mode(cls, resource_tags, engine): 254 | return cls.get_conf_value('shelvery_rds_backup_mode', resource_tags, engine.lambda_payload) 255 | 256 | @classmethod 257 | def get_docdb_mode(cls, resource_tags, engine): 258 | return cls.get_conf_value('shelvery_docdb_backup_mode', resource_tags, engine.lambda_payload) 259 | 260 | @classmethod 261 | def get_redshift_mode(cls, resource_tags, engine): 262 | return cls.get_conf_value('shelvery_redshift_backup_mode', resource_tags, engine.lambda_payload) 263 | 264 | @classmethod 265 | def get_shelvery_select_entity(cls, engine): 266 | val = cls.get_conf_value('shelvery_select_entity', None, engine.lambda_payload) 267 | if val == '': 268 | return None 269 | return val 270 | 271 | 272 | @classmethod 273 | def get_sns_topic(cls, engine): 274 | return cls.get_conf_value('shelvery_sns_topic', None, engine.lambda_payload) 275 | 276 | @classmethod 277 | def boto3_retry_times(cls): 278 | return cls.get_conf_value('boto3_retries', None, None) 279 | 280 | @classmethod 281 | def get_error_sns_topic(cls, engine): 282 | topic = cls.get_conf_value('shelvery_error_sns_topic', None, engine.lambda_payload) 283 | if topic is None: 284 | topic = cls.get_conf_value('shelvery_sns_topic', None, engine.lambda_payload) 285 | return topic 286 | 287 | @classmethod 288 | def get_role_arn(cls, engine): 289 | return cls.get_conf_value('role_arn', None, engine.lambda_payload) 290 | 291 | @classmethod 292 | def get_role_external_id(cls, engine): 293 | return cls.get_conf_value('role_external_id', None, engine.lambda_payload) 294 | 295 | @classmethod 296 | def get_bucket_name_template(cls, engine): 297 | return cls.get_conf_value('shelvery_bucket_name_template', None, engine.lambda_payload) 298 | 299 | @classmethod 300 | def copy_resource_tags(cls, engine) -> bool: 301 | copy_tags = cls.get_conf_value('shelvery_copy_resource_tags', None, engine.lambda_payload) 302 | if copy_tags or copy_tags.lower() == 'true' or copy_tags == 0: 303 | return True 304 | else: 305 | return False 306 | 307 | @classmethod 308 | def ignore_invalid_resource_state(cls, engine) -> bool: 309 | ignore_state = cls.get_conf_value('shelvery_ignore_invalid_resource_state', None, engine.lambda_payload) 310 | if ignore_state or str(ignore_state).lower() == 'true' or int(ignore_state) == 0: 311 | return True 312 | else: 313 | return False 314 | 315 | @classmethod 316 | def get_exluded_resource_tag_keys(cls, engine): 317 | # Exluding the tag_pefix as sthey are not necessary 318 | # and aws tags as aws is a tag reserved namespace 319 | keys = [cls.get_tag_prefix(),'aws:'] 320 | exclude = cls.get_conf_value('shelvery_exluded_resource_tag_keys', None, engine.lambda_payload) 321 | if exclude is not None: 322 | keys += exclude.split(',') 323 | return keys 324 | 325 | @classmethod 326 | def get_sqs_queue_url(cls, engine): 327 | return cls.get_conf_value('shelvery_sqs_queue_url', None, engine.lambda_payload) 328 | 329 | @classmethod 330 | def get_sqs_queue_wait_period(cls, engine): 331 | return cls.get_conf_value('shelvery_sqs_queue_wait_period', None, engine.lambda_payload) 332 | 333 | @classmethod 334 | def get_encrypt_copy(cls, resource_tags, engine): 335 | return cls.get_conf_value('shelvery_encrypt_copy', resource_tags, engine.lambda_payload) 336 | 337 | @classmethod 338 | def get_copy_kms_key_id(cls, resource_tags, engine): 339 | return cls.get_conf_value('shelvery_copy_kms_key_id', resource_tags, engine.lambda_payload) 340 | 341 | @classmethod 342 | def get_reencrypt_kms_key_id(cls, resource_tags, engine): 343 | return cls.get_conf_value('shelvery_reencrypt_kms_key_id', resource_tags, engine.lambda_payload) -------------------------------------------------------------------------------- /shelvery/shelvery_invoker.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import os 3 | import json 4 | import logging 5 | 6 | from typing import Dict 7 | from threading import Thread 8 | 9 | from shelvery.runtime_config import RuntimeConfig 10 | from shelvery.aws_helper import AwsHelper 11 | from shelvery.queue import ShelveryQueue 12 | 13 | class ShelveryInvoker: 14 | """Helper to orchestrate execution of shelvery operations on AWS Lambda platform""" 15 | 16 | def invoke_shelvery_operation(self, engine, method_name: str, method_arguments: Dict): 17 | """ 18 | Invokes shelvery engine asynchronously 19 | If shelvery is running within lambda environment, new lambda function invocation will be made. If running 20 | on server, it will start new thread and invoke the function 21 | Function invoke must accept arguments in form of map 22 | """ 23 | is_lambda_context = RuntimeConfig.is_lambda_runtime(engine) 24 | is_offload_queueing = RuntimeConfig.is_offload_queueing(engine) 25 | parameters = { 26 | 'backup_type': engine.get_engine_type(), 27 | 'action': method_name, 28 | 'arguments': method_arguments 29 | } 30 | if is_lambda_context: 31 | if 'config' in engine.lambda_payload: 32 | parameters['config'] = engine.lambda_payload['config'] 33 | 34 | if is_offload_queueing: 35 | sqs = ShelveryQueue(RuntimeConfig.get_sqs_queue_url(engine),RuntimeConfig.get_sqs_queue_wait_period(engine)) 36 | sqs.send(parameters) 37 | else: 38 | parameters['is_started_internally'] = True 39 | payload = json.dumps(parameters) 40 | bytes_payload = bytearray() 41 | bytes_payload.extend(map(ord, payload)) 42 | function_name = os.environ['AWS_LAMBDA_FUNCTION_NAME'] 43 | lambda_client = AwsHelper.boto3_client('lambda') 44 | lambda_client.invoke_async(FunctionName=function_name, InvokeArgs=bytes_payload) 45 | else: 46 | resource_type = engine.get_engine_type() 47 | 48 | def execute(): 49 | from shelvery.factory import ShelveryFactory 50 | backup_engine = ShelveryFactory.get_shelvery_instance(resource_type) 51 | method = backup_engine.__getattribute__(method_name) 52 | method(method_arguments) 53 | 54 | logging.info(f"Start new thread to execute :{method_name}") 55 | if 'SHELVERY_MONO_THREAD' in os.environ and os.environ['SHELVERY_MONO_THREAD'] == "1": 56 | execute() 57 | else: 58 | thread = Thread(target=execute) 59 | thread.start() 60 | # thread.join() 61 | -------------------------------------------------------------------------------- /shelvery_cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/base2Services/shelvery-aws-backups/422c92abc25be39a3bad12546db2e5421826c3d2/shelvery_cli/__init__.py -------------------------------------------------------------------------------- /shelvery_cli/__main__.py: -------------------------------------------------------------------------------- 1 | import shelvery 2 | from shelvery_cli.shelver_cli_main import ShelveryCliMain 3 | import logging 4 | import sys 5 | 6 | 7 | def setup_logging(): 8 | root = logging.getLogger() 9 | root.setLevel(logging.DEBUG) 10 | 11 | ch = logging.StreamHandler(sys.stdout) 12 | ch.setLevel(logging.DEBUG) 13 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 14 | ch.setFormatter(formatter) 15 | root.addHandler(ch) 16 | 17 | 18 | def main(args=None): 19 | """The main routine.""" 20 | 21 | print(f"Shelvery v{shelvery.__version__}") 22 | 23 | if args is None: 24 | args = sys.argv[1:] 25 | # for create data buckets, engine does not matter, 26 | # probably should extract all S3 things in separate class 27 | if len(args) == 1 and args[0] == 'create_data_buckets': 28 | args.insert(0, 'ebs') 29 | if len(args) < 2: 30 | print("""Usage: shelvery \n\nBackup types: rds ebs rds_cluster ec2ami redshift 31 | Actions:\n\tcreate_backups\n\tclean_backups\n\tcreate_data_buckets\n\tpull_shared_backups""") 32 | exit(-2) 33 | 34 | setup_logging() 35 | main_runner = ShelveryCliMain() 36 | main_runner.main(args[0], args[1]) 37 | 38 | 39 | if __name__ == "__main__": 40 | main() 41 | -------------------------------------------------------------------------------- /shelvery_cli/shelver_cli_main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from shelvery.factory import ShelveryFactory 3 | 4 | 5 | class ShelveryCliMain: 6 | 7 | def main(self, backup_type, action): 8 | 9 | logger = logging.getLogger() 10 | logger.setLevel(logging.INFO) 11 | 12 | # create backup engine 13 | backup_engine = ShelveryFactory.get_shelvery_instance(backup_type) 14 | method = backup_engine.__getattribute__(action) 15 | 16 | # start the action 17 | method() 18 | return 0 19 | 20 | -------------------------------------------------------------------------------- /shelvery_lambda/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/base2Services/shelvery-aws-backups/422c92abc25be39a3bad12546db2e5421826c3d2/shelvery_lambda/__init__.py -------------------------------------------------------------------------------- /shelvery_lambda/lambda_handler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import json 3 | 4 | from shelvery.factory import ShelveryFactory 5 | 6 | def lambda_handler(event, context): 7 | 8 | logger = logging.getLogger() 9 | logger.setLevel(logging.INFO) 10 | logger.info(f"Received event\n{json.dumps(event,indent=2)}") 11 | 12 | # handle messages from sns, sqs, cloudwatch secheduled events 13 | if 'Records' in event: 14 | for record in event['Records']: 15 | if 'Sns' in record: 16 | payload = json.loads(record['Sns']['Message']) 17 | elif 'body' in record: 18 | payload = json.loads(record['body']) 19 | else: 20 | payload = event 21 | 22 | if 'backup_type' not in payload: 23 | raise Exception("Expecting backup type in event payload in \"backup_type\" key") 24 | 25 | if 'action' not in payload: 26 | raise Exception("Expecting backup action in event payload in \"action\" key") 27 | 28 | backup_type = payload['backup_type'] 29 | action = payload['action'] 30 | 31 | # create backup engine 32 | backup_engine = ShelveryFactory.get_shelvery_instance(backup_type) 33 | backup_engine.set_lambda_environment(payload, context) 34 | 35 | method = backup_engine.__getattribute__(action) 36 | 37 | if 'arguments' in payload: 38 | method(payload['arguments']) 39 | else: 40 | method() 41 | 42 | return 0 43 | -------------------------------------------------------------------------------- /shelvery_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/base2Services/shelvery-aws-backups/422c92abc25be39a3bad12546db2e5421826c3d2/shelvery_tests/__init__.py -------------------------------------------------------------------------------- /shelvery_tests/cleanup_functions.py: -------------------------------------------------------------------------------- 1 | from shelvery.documentdb_backup import ShelveryDocumentDbBackup 2 | from shelvery.ebs_backup import ShelveryEBSBackup 3 | from shelvery.ec2ami_backup import ShelveryEC2AMIBackup 4 | from shelvery.rds_cluster_backup import ShelveryRDSClusterBackup 5 | from shelvery.rds_backup import ShelveryRDSBackup 6 | from shelvery.aws_helper import AwsHelper 7 | import boto3 8 | import os 9 | 10 | def cleanDocDBSnapshots(): 11 | print("Cleaning up DocDB Snapshots") 12 | backups_engine = ShelveryDocumentDbBackup() 13 | backups_engine.clean_backups() 14 | 15 | def cleanRdsClusterSnapshots(): 16 | print("Cleaning up RDS Cluster Snapshots") 17 | backups_engine = ShelveryRDSClusterBackup() 18 | backups_engine.clean_backups() 19 | 20 | def cleanRdsSnapshots(): 21 | print("Cleaning up RDS Snapshots") 22 | backups_engine = ShelveryRDSBackup() 23 | backups_engine.clean_backups() 24 | 25 | def cleanEC2Snapshots(): 26 | print("Cleaning up EC2 AMI Snapshots") 27 | backups_engine = ShelveryEC2AMIBackup() 28 | backups_engine.clean_backups() 29 | 30 | def cleanEBSSnapshots(): 31 | print("Cleaning up EBS Snapshots") 32 | backups_engine = ShelveryEBSBackup() 33 | backups_engine.clean_backups() 34 | 35 | def cleanS3Bucket(): 36 | print("Cleaning S3 Bucket") 37 | bucket_name = f"shelvery.data.{AwsHelper.local_account_id()}-ap-southeast-2.base2tools" 38 | s3 = boto3.resource('s3') 39 | bucket = s3.Bucket(bucket_name) 40 | 41 | # Delete all objects in the bucket 42 | for obj in bucket.objects.all(): 43 | obj.delete() 44 | 45 | def cleanupSnapshots(): 46 | os.environ['shelvery_custom_retention_types'] = 'shortLived:1' 47 | os.environ['shelvery_current_retention_type'] = 'shortLived' 48 | cleanDocDBSnapshots() 49 | cleanEC2Snapshots() 50 | cleanEBSSnapshots() 51 | cleanRdsClusterSnapshots() 52 | cleanRdsSnapshots() 53 | -------------------------------------------------------------------------------- /shelvery_tests/cloudformation-unittest.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | 3 | Parameters: 4 | Ami: 5 | Type: AWS::SSM::Parameter::Value 6 | Default: /aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2 7 | 8 | Resources: 9 | 10 | RDSInstance: 11 | Type: AWS::RDS::DBInstance 12 | Properties: 13 | DBInstanceClass: db.t3.small 14 | DBInstanceIdentifier: shelvery-test-rds 15 | Engine: postgres 16 | MasterUsername: asdf9876lkjh4312 17 | MasterUserPassword: azxcv0987poiu4321 18 | AllocatedStorage: '100' 19 | 20 | RDSClusterInstance: 21 | Type: AWS::RDS::DBCluster 22 | Properties: 23 | Engine: aurora-postgresql 24 | MasterUsername: asdf9876lkjh4312 25 | MasterUserPassword: azxcv0987poiu4321 26 | DBClusterIdentifier: shelvery-test-rds-cluster 27 | 28 | DocDBInstance: 29 | Type: AWS::DocDB::DBCluster 30 | Properties: 31 | MasterUsername: asdf9876lkjh4312 32 | MasterUserPassword: azxcv0987poiu4321 33 | StorageEncrypted: false 34 | DBClusterIdentifier: shelvery-test-docdb 35 | 36 | EC2AmiInstance: 37 | Type: AWS::EC2::Instance 38 | Properties: 39 | ImageId: 40 | Ref: Ami 41 | Tags: 42 | - Key : "Name" 43 | Value : "shelvery-test-ec2" 44 | 45 | EBSInstance: 46 | Type: AWS::EC2::Volume 47 | Properties: 48 | Encrypted: False 49 | VolumeType: gp2 50 | AvailabilityZone: !GetAtt EC2AmiInstance.AvailabilityZone 51 | Size: 1 52 | Tags: 53 | - Key : "Name" 54 | Value : "shelvery-test-ebs" 55 | 56 | -------------------------------------------------------------------------------- /shelvery_tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import boto3 3 | import os 4 | from shelvery.aws_helper import AwsHelper 5 | from botocore.exceptions import ClientError 6 | 7 | from shelvery_tests.cleanup_functions import cleanupSnapshots, cleanS3Bucket 8 | 9 | source_account = None 10 | destination_account = None 11 | 12 | def pytest_addoption(parser): 13 | parser.addoption("--destination", action="store", default="None") 14 | parser.addoption("--source", action="store", default="None") 15 | 16 | 17 | def pytest_configure(config): 18 | global source_account 19 | global destination_account 20 | 21 | source_account = config.getoption('--source') 22 | destination_account = config.getoption('--destination') 23 | 24 | #Add some check here on if the stack aleady exists (eg: deleting/creating/available) 25 | def create_stack(cfclient): 26 | create_waiter = cfclient.get_waiter('stack_create_complete') 27 | cwd = os.getcwd() 28 | template_path = f"{cwd}/cloudformation-unittest.yaml" 29 | 30 | template = "" 31 | with open(template_path, "r") as file: 32 | template = file.read() 33 | 34 | cfclient.create_stack( 35 | StackName='shelvery-test', 36 | TemplateBody=template 37 | 38 | ) 39 | 40 | #Wait till stack is created 41 | create_waiter.wait( 42 | StackName='shelvery-test', 43 | WaiterConfig={ 44 | 'Delay': 30, 45 | 'MaxAttempts': 50 46 | } 47 | ) 48 | print('STACK CREATED') 49 | 50 | 51 | @pytest.fixture(scope="session", autouse=True) 52 | def setup(request): 53 | 54 | sts = AwsHelper.boto3_client('sts') 55 | id = str(sts.get_caller_identity()['Account']) 56 | 57 | if id == source_account: 58 | 59 | #Cleanup any existing snapshots after stack is created 60 | cleanupSnapshots() 61 | 62 | #Cleanup S3 Bucket 63 | cleanS3Bucket() 64 | 65 | def teardown(): 66 | print ("Initiating Teardown") 67 | response = cfclient.delete_stack( 68 | StackName='shelvery-test', 69 | ) 70 | 71 | request.addfinalizer(teardown) 72 | 73 | cfclient = boto3.client('cloudformation') 74 | delete_waiter = cfclient.get_waiter('stack_delete_complete') 75 | 76 | #Get status of stack 77 | try: 78 | shelvery_status = cfclient.describe_stacks(StackName='shelvery-test')['Stacks'][0]['StackStatus'] 79 | if shelvery_status == 'DELETE_IN_PROGRESS' or shelvery_status == 'DELETE_COMPLETE': 80 | #Stack is deleting so wait till deleted 81 | delete_waiter.wait( 82 | StackName='shelvery-test', 83 | WaiterConfig={ 84 | 'Delay': 30, 85 | 'MaxAttempts': 50 86 | } 87 | ) 88 | #Finished deleting stack -> Create new stack 89 | create_stack(cfclient=cfclient) 90 | 91 | except ClientError as error: 92 | if error.response['Error']['Code'] == 'ValidationError': 93 | #Stack does not exist so create 94 | create_stack(cfclient=cfclient) 95 | else: 96 | raise error 97 | 98 | #Cleanup snapshots in destination account 99 | else: 100 | cleanupSnapshots() 101 | 102 | def teardown(): 103 | cleanupSnapshots() 104 | 105 | request.addfinalizer(teardown) 106 | -------------------------------------------------------------------------------- /shelvery_tests/data_bucket_test.py: -------------------------------------------------------------------------------- 1 | from http.client import responses 2 | from inspect import stack 3 | import json 4 | from select import POLLHUP 5 | import unittest 6 | import boto3 7 | import pytest 8 | import os 9 | from shelvery.engine import ShelveryEngine 10 | from shelvery.aws_helper import AwsHelper 11 | from shelvery_tests.conftest import destination_account 12 | 13 | 14 | class DataBucketIntegrationTestCase(unittest.TestCase): 15 | """Data Bucket Integration shelvery tests""" 16 | 17 | 18 | @pytest.mark.source 19 | @pytest.mark.share 20 | def test_CreateDataBucket(self): 21 | 22 | s3client = AwsHelper.boto3_client('s3', region_name='ap-southeast-2') 23 | sts = AwsHelper.boto3_client('sts') 24 | id = str(sts.get_caller_identity()['Account']) 25 | os.environ['SHELVERY_MONO_THREAD'] = '1' 26 | 27 | share_with_id = destination_account 28 | os.environ["shelvery_share_aws_account_ids"] = str(share_with_id) 29 | 30 | engine = ShelveryEngine() 31 | 32 | print("Creating Data Buckets") 33 | engine.create_data_buckets() 34 | 35 | bucket_name = f"shelvery.data.{id}-ap-southeast-2.base2tools" 36 | 37 | response = s3client.get_bucket_policy( 38 | Bucket=bucket_name 39 | )['Policy'] 40 | 41 | policy = json.loads(response)['Statement'] 42 | 43 | print("Policy: " + str(policy)) 44 | 45 | valid = False 46 | 47 | #Add other checks on policy? 48 | 49 | for statement in policy: 50 | if statement['Effect'] == "Allow" and str(share_with_id) in statement['Principal']['AWS']: 51 | valid = True 52 | 53 | self.assertTrue(valid) 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /shelvery_tests/docdb_integration_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import pytest 4 | import os 5 | from botocore.exceptions import WaiterError 6 | from shelvery.engine import ShelveryEngine 7 | from shelvery.runtime_config import RuntimeConfig 8 | from shelvery_tests.resources import DOCDB_RESOURCE_NAME, ResourceClass 9 | from shelvery_tests.test_functions import setup_source, compare_backups 10 | from shelvery.documentdb_backup import ShelveryDocumentDbBackup 11 | from shelvery.aws_helper import AwsHelper 12 | 13 | pwd = os.path.dirname(os.path.abspath(__file__)) 14 | 15 | sys.path.append(f"{pwd}/..") 16 | sys.path.append(f"{pwd}/../shelvery") 17 | sys.path.append(f"{pwd}/shelvery") 18 | sys.path.append(f"{pwd}/lib") 19 | sys.path.append(f"{pwd}/../lib") 20 | 21 | 22 | 23 | print(f"Python lib path:\n{sys.path}") 24 | 25 | class DocDBTestClass(ResourceClass): 26 | 27 | def __init__(self): 28 | self.resource_name = DOCDB_RESOURCE_NAME 29 | self.backups_engine = ShelveryDocumentDbBackup() 30 | self.client = AwsHelper.boto3_client('docdb', region_name='ap-southeast-2') 31 | self.ARN = f"arn:aws:rds:{os.environ['AWS_DEFAULT_REGION']}:{AwsHelper.local_account_id()}:cluster:{self.resource_name}" 32 | 33 | def add_backup_tags(self): 34 | self.client.add_tags_to_resource( 35 | ResourceName=self.ARN, 36 | Tags=[{ 37 | 'Key': f"{RuntimeConfig.get_tag_prefix()}:{ShelveryEngine.BACKUP_RESOURCE_TAG}", 38 | 'Value': 'true' 39 | }, 40 | {'Key': 'Name', 41 | 'Value': self.resource_name 42 | } 43 | ] 44 | ) 45 | 46 | def wait_for_resource(self): 47 | waiter = AwsHelper.boto3_client('rds', region_name='ap-southeast-2').get_waiter('db_cluster_available') 48 | try: 49 | waiter.wait( 50 | DBClusterIdentifier=self.resource_name, 51 | WaiterConfig={ 52 | 'Delay': 30, 53 | 'MaxAttempts': 50 54 | } 55 | ) 56 | except WaiterError as error: 57 | print("Waiting for Doc DB Cluster Failed") 58 | print(error) 59 | raise error 60 | 61 | ######## Test Case 62 | class ShelveryDocDBIntegrationTestCase(unittest.TestCase): 63 | """Shelvery DocDB Backups Integration shelvery tests""" 64 | 65 | def id(self): 66 | return str(self.__class__) 67 | 68 | def setUp(self): 69 | # Complete initial setup 70 | self.created_snapshots = [] 71 | setup_source(self) 72 | # Instantiate resource test class 73 | docdb_test_class = DocDBTestClass() 74 | # Wait till DocDB Cluster is in an available state 75 | docdb_test_class.wait_for_resource() 76 | # Add tags to indicate backup 77 | docdb_test_class.add_backup_tags() 78 | 79 | @pytest.mark.source 80 | def test_CleanupDocDbBackup(self): 81 | print(f"Doc DB - Running cleanup test") 82 | # Create test resource class 83 | docdb_test_class = DocDBTestClass() 84 | backups_engine = docdb_test_class.backups_engine 85 | client = docdb_test_class.client 86 | # Create backups 87 | backups = backups_engine.create_backups() 88 | # Clean backups 89 | backups_engine.clean_backups() 90 | # Retrieve remaining backups 91 | snapshots = [ 92 | snapshot 93 | for backup in backups 94 | for snapshot in client.describe_db_cluster_snapshots( 95 | DBClusterIdentifier=docdb_test_class.resource_name, 96 | DBClusterSnapshotIdentifier=backup.backup_id 97 | )["DBClusterSnapshots"] 98 | ] 99 | print(f"Snapshots: {snapshots}") 100 | 101 | self.assertTrue(len(snapshots) == 0) 102 | 103 | @pytest.mark.source 104 | def test_CreateDocDbBackup(self): 105 | print("Running DocDB create backup test") 106 | # Instantiate test resource class 107 | docdb_test_class = DocDBTestClass() 108 | backups_engine = docdb_test_class.backups_engine 109 | 110 | # Create backups 111 | backups = backups_engine.create_backups() 112 | print(f"Created {len(backups)} backups for Doc DB cluster") 113 | 114 | # Compare backups 115 | for backup in backups: 116 | valid = compare_backups(self=self, backup=backup, backup_engine=backups_engine) 117 | 118 | # Clean backups 119 | print(f"Cleaning up DocDB Backups") 120 | backups_engine.clean_backups() 121 | 122 | #Validate backup 123 | self.assertTrue(valid, f"Backup {backup} is not valid") 124 | 125 | self.assertEqual(len(backups), 1, f"Expected 1 backup, but found {len(backups)}") 126 | 127 | @pytest.mark.source 128 | @pytest.mark.share 129 | def test_ShareDocDbBackup(self): 130 | print("Running Doc DB share backup test") 131 | 132 | # Instantiate test resource class 133 | docdb_test_class = DocDBTestClass() 134 | backups_engine = docdb_test_class.backups_engine 135 | client = docdb_test_class.client 136 | 137 | print("Creating shared backups") 138 | backups = backups_engine.create_backups() 139 | print(f"{len(backups)} shared backups created") 140 | 141 | for backup in backups: 142 | snapshot_id = backup.backup_id 143 | print(f"Checking if snapshot {snapshot_id} is shared with {self.share_with_id}") 144 | 145 | # Retrieve snapshots 146 | snapshots = client.describe_db_cluster_snapshots( 147 | DBClusterIdentifier=docdb_test_class.resource_name, 148 | DBClusterSnapshotIdentifier=backup.backup_id 149 | )["DBClusterSnapshots"] 150 | 151 | # Get attributes of snapshot 152 | attributes = client.describe_db_cluster_snapshot_attributes( 153 | DBClusterSnapshotIdentifier=snapshot_id 154 | )['DBClusterSnapshotAttributesResult']['DBClusterSnapshotAttributes'] 155 | 156 | # Check if snapshot is shared with destination account 157 | shared_with_destination = any( 158 | attr['AttributeName'] == 'restore' and self.share_with_id in attr['AttributeValues'] 159 | for attr in attributes 160 | ) 161 | 162 | # Assertions 163 | self.assertEqual(len(snapshots), 1, f"Expected 1 snapshot, but found {len(snapshots)}") 164 | self.assertTrue(shared_with_destination, f"Snapshot {snapshot_id} is not shared with {self.share_with_id}") 165 | 166 | if __name__ == '__main__': 167 | unittest.main() 168 | -------------------------------------------------------------------------------- /shelvery_tests/docdb_pull_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import pytest 5 | from shelvery_tests.docdb_integration_test import DocDBTestClass 6 | from shelvery_tests.test_functions import setup_destination 7 | from shelvery_tests.resources import DOCDB_RESOURCE_NAME 8 | 9 | 10 | pwd = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | sys.path.append(f"{pwd}/..") 13 | sys.path.append(f"{pwd}/../shelvery") 14 | sys.path.append(f"{pwd}/shelvery") 15 | sys.path.append(f"{pwd}/lib") 16 | sys.path.append(f"{pwd}/../lib") 17 | 18 | class ShelveryDocDBPullTestCase(unittest.TestCase): 19 | 20 | @pytest.mark.destination 21 | def test_PullDocDbBackup(self): 22 | 23 | # Complete initial setup 24 | print(f"Doc DB - Running pull shared backups test") 25 | setup_destination(self) 26 | 27 | # Instantiate test resource class 28 | docdb_test_class = DocDBTestClass() 29 | backups_engine = docdb_test_class.backups_engine 30 | client = docdb_test_class.client 31 | 32 | # Clean residual existing snapshots 33 | backups_engine.clean_backups() 34 | 35 | # Pull shared backups 36 | backups_engine.pull_shared_backups() 37 | 38 | # Get post-pull snapshot count 39 | pulled_snapshots = client.describe_db_cluster_snapshots( 40 | DBClusterIdentifier=DOCDB_RESOURCE_NAME, 41 | SnapshotType='Manual' 42 | ) 43 | 44 | # Verify that only one snapshot was pulled 45 | self.assertEqual(len(pulled_snapshots['DBClusterSnapshots']), 1) 46 | 47 | @pytest.mark.cleanup 48 | def test_cleanup(self): 49 | # Instantiate test resource class 50 | docdb_test_class = DocDBTestClass() 51 | backups_engine = docdb_test_class.backups_engine 52 | # Clean backups 53 | backups_engine.clean_backups() 54 | 55 | -------------------------------------------------------------------------------- /shelvery_tests/ebs_integration_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import pytest 4 | import os 5 | import time 6 | from botocore.exceptions import WaiterError 7 | from shelvery.engine import ShelveryEngine 8 | from shelvery.runtime_config import RuntimeConfig 9 | from shelvery_tests.test_functions import setup_source, compare_backups 10 | from shelvery.ebs_backup import ShelveryEBSBackup 11 | from shelvery.aws_helper import AwsHelper 12 | from shelvery_tests.resources import EBS_INSTANCE_RESOURCE_NAME, ResourceClass 13 | 14 | pwd = os.path.dirname(os.path.abspath(__file__)) 15 | 16 | sys.path.append(f"{pwd}/..") 17 | sys.path.append(f"{pwd}/../shelvery") 18 | sys.path.append(f"{pwd}/shelvery") 19 | sys.path.append(f"{pwd}/lib") 20 | sys.path.append(f"{pwd}/../lib") 21 | 22 | print(f"Python lib path:\n{sys.path}") 23 | 24 | class EBSTestClass(ResourceClass): 25 | 26 | def __init__(self): 27 | self.resource_name = EBS_INSTANCE_RESOURCE_NAME 28 | self.backups_engine = ShelveryEBSBackup() 29 | self.client = AwsHelper.boto3_client('ec2', region_name='ap-southeast-2') 30 | self.resource_id = self.get_instance_id() 31 | 32 | def add_backup_tags(self): 33 | self.client.create_tags( 34 | Resources=[self.resource_id], 35 | Tags=[{ 36 | 'Key': f"{RuntimeConfig.get_tag_prefix()}:{ShelveryEngine.BACKUP_RESOURCE_TAG}", 37 | 'Value': 'true' 38 | }, 39 | {'Key': 'Name', 40 | 'Value': self.resource_name 41 | }] 42 | ) 43 | 44 | def get_instance_id(self): 45 | # Find EBS Volume 46 | search_filter = [{'Name': 'tag:Name', 'Values': [self.resource_name]}] 47 | # Get EBS volumes 48 | ebs_volumes = self.client.describe_volumes(Filters=search_filter) 49 | # Get volume ID 50 | try: 51 | return ebs_volumes['Volumes'][0]['VolumeId'] 52 | except (IndexError, KeyError): 53 | print("No EBS volumes found matching the given criteria.") 54 | return "" 55 | 56 | def wait_for_resource(self): 57 | waiter = AwsHelper.boto3_client('ec2', region_name='ap-southeast-2').get_waiter('volume_available') 58 | try: 59 | waiter.wait( 60 | VolumeIds=[self.resource_id], 61 | WaiterConfig={ 62 | 'Delay': 30, 63 | 'MaxAttempts': 50 64 | } 65 | ) 66 | except WaiterError as error: 67 | print("Waiting for EBS Volume Failed") 68 | print(error) 69 | raise error 70 | 71 | class ShelveryEBSIntegrationTestCase(unittest.TestCase): 72 | """Shelvery EBS Backups Integration shelvery tests""" 73 | 74 | def id(self): 75 | return str(self.__class__) 76 | 77 | def setUp(self): 78 | # Complete initial setup 79 | self.created_snapshots = [] 80 | self.regional_snapshots = [] 81 | setup_source(self) 82 | # Instantiate resource test class 83 | ebs_test_class = EBSTestClass() 84 | # Wait till volume is in an available state 85 | ebs_test_class.wait_for_resource() 86 | # Add tags to indicate backup 87 | ebs_test_class.add_backup_tags() 88 | 89 | 90 | @pytest.mark.source 91 | def test_CleanupEbsBackup(self): 92 | print(f"EBS - Running cleanup test") 93 | # Create test resource class 94 | ebs_test_class = EBSTestClass() 95 | backups_engine = ebs_test_class.backups_engine 96 | client = ebs_test_class.client 97 | # Create backups 98 | backups = backups_engine.create_backups() 99 | # Clean backups 100 | backups_engine.clean_backups() 101 | # Retrieve remaining backups 102 | snapshots = [ 103 | snapshot 104 | for backup in backups 105 | for snapshot in client.describe_snapshots( 106 | Filters = [{ 107 | 'Name': 'tag:Name', 108 | 'Values': [backup.name] 109 | }] 110 | )['Snapshots'] 111 | ] 112 | print(f"Snapshots: {snapshots}") 113 | 114 | self.assertTrue(len(snapshots) == 0) 115 | 116 | 117 | @pytest.mark.source 118 | def test_CreateEbsBackup(self): 119 | print("Running EBS create backup test") 120 | # Create test resource class 121 | ebs_test_class = EBSTestClass() 122 | backups_engine = ebs_test_class.backups_engine 123 | 124 | # Create backups 125 | backups = backups_engine.create_backups() 126 | print(f"Created {len(backups)} backups for EBS Volume") 127 | 128 | # Compare backups 129 | for backup in backups: 130 | valid = compare_backups(self=self, backup=backup, backup_engine=backups_engine) 131 | 132 | # Clean backups 133 | print(f"Cleaning up EBS Backups") 134 | backups_engine.clean_backups() 135 | 136 | # Validate Backup 137 | self.assertTrue(valid, f"Backup {backup} is not valid") 138 | 139 | self.assertEqual(len(backups), 1, f"Expected 1 backup, but found {len(backups)}") 140 | 141 | @pytest.mark.source 142 | @pytest.mark.share 143 | def test_ShareEbsBackup(self): 144 | 145 | print("Running EBS share backup test") 146 | # Instantiate test resource classs 147 | ebs_test_class = EBSTestClass() 148 | backups_engine = ebs_test_class.backups_engine 149 | client = ebs_test_class.client 150 | 151 | print("Creating shared backups") 152 | backups = backups_engine.create_backups() 153 | print(f"{len(backups)} shared backups created") 154 | 155 | for backup in backups: 156 | snapshot_id = backup.backup_id 157 | print(f"Checking if snapshot {snapshot_id} is shared with {self.share_with_id}") 158 | 159 | # Retrieve snapshot 160 | snapshots = client.describe_snapshots( 161 | Filters = [{ 162 | 'Name': 'tag:Name', 163 | 'Values': [backup.name] 164 | }] 165 | )['Snapshots'] 166 | 167 | snapshot_id = snapshots[0]['SnapshotId'] 168 | 169 | # retrieve the snapshot attributes 170 | response = client.describe_snapshot_attribute( 171 | SnapshotId=snapshot_id, 172 | Attribute='createVolumePermission' 173 | ) 174 | 175 | # check if the snapshot is shared with the destination account 176 | shared_with_destination = any( 177 | perm['UserId'] == self.share_with_id for perm in response.get('CreateVolumePermissions', []) 178 | ) 179 | 180 | # Assertions 181 | self.assertEqual(len(snapshots), 1, f"Expected 1 snapshot, but found {len(snapshots)}") 182 | self.assertTrue(shared_with_destination, f"Snapshot {snapshot_id} is not shared with {self.share_with_id}") 183 | 184 | 185 | def tearDown(self): 186 | print("Waiting 30s due to EBS Snapshot rate limit...") 187 | time.sleep(30) #EBS snapshot create wait limit, so wait 30~ 188 | 189 | if __name__ == '__main__': 190 | unittest.main() 191 | -------------------------------------------------------------------------------- /shelvery_tests/ebs_pull_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import pytest 5 | from shelvery_tests.ebs_integration_test import EBSTestClass 6 | from shelvery_tests.test_functions import setup_destination 7 | from shelvery_tests.resources import EBS_INSTANCE_RESOURCE_NAME 8 | 9 | 10 | pwd = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | sys.path.append(f"{pwd}/..") 13 | sys.path.append(f"{pwd}/../shelvery") 14 | sys.path.append(f"{pwd}/shelvery") 15 | sys.path.append(f"{pwd}/lib") 16 | sys.path.append(f"{pwd}/../lib") 17 | 18 | class ShelveryEBSPullTestCase(unittest.TestCase): 19 | 20 | @pytest.mark.destination 21 | def test_PullEBSBackup(self): 22 | 23 | # Complete initial setup 24 | print(f"EBS - Running pull shared backups test") 25 | setup_destination(self) 26 | 27 | # Create test resource class 28 | ebs_test_class = EBSTestClass() 29 | backups_engine = ebs_test_class.backups_engine 30 | client = ebs_test_class.client 31 | 32 | # Clean residual existing snapshots 33 | backups_engine.clean_backups() 34 | 35 | # Pull shared backups 36 | backups_engine.pull_shared_backups() 37 | 38 | # Get post-pull snapshot count 39 | search_filter = [{'Name':'tag:ResourceName', 40 | 'Values':[EBS_INSTANCE_RESOURCE_NAME] 41 | }] 42 | 43 | #Retrieve pulled images from shelvery-test stack 44 | snapshots = client.describe_snapshots( 45 | Filters=search_filter 46 | )['Snapshots'] 47 | 48 | # Verify that only one snapshot was pulled 49 | self.assertEqual(len(snapshots), 1) 50 | 51 | @pytest.mark.cleanup 52 | def test_cleanup(self): 53 | # Create test resource class 54 | ebs_test_class = EBSTestClass() 55 | backups_engine = ebs_test_class.backups_engine 56 | # Clean backups 57 | backups_engine.clean_backups() 58 | -------------------------------------------------------------------------------- /shelvery_tests/ec2ami_integration_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import pytest 4 | import os 5 | from botocore.exceptions import WaiterError 6 | from shelvery.engine import ShelveryEngine 7 | from shelvery.runtime_config import RuntimeConfig 8 | from shelvery_tests.test_functions import setup_source, compare_backups 9 | from shelvery.ec2ami_backup import ShelveryEC2AMIBackup 10 | from shelvery.aws_helper import AwsHelper 11 | from shelvery_tests.resources import EC2_AMI_INSTANCE_RESOURCE_NAME, ResourceClass 12 | 13 | 14 | pwd = os.path.dirname(os.path.abspath(__file__)) 15 | 16 | sys.path.append(f"{pwd}/..") 17 | sys.path.append(f"{pwd}/../shelvery") 18 | sys.path.append(f"{pwd}/shelvery") 19 | sys.path.append(f"{pwd}/lib") 20 | sys.path.append(f"{pwd}/../lib") 21 | 22 | print(f"Python lib path:\n{sys.path}") 23 | 24 | import boto3 25 | class EC2AmiTestClass(ResourceClass): 26 | 27 | def __init__(self): 28 | self.resource_name = EC2_AMI_INSTANCE_RESOURCE_NAME 29 | self.backups_engine = ShelveryEC2AMIBackup() 30 | self.client = AwsHelper.boto3_client('ec2', region_name='ap-southeast-2') 31 | self.resource_id = self.get_instance_id() 32 | 33 | def add_backup_tags(self): 34 | self.client.create_tags( 35 | Resources=[self.resource_id], 36 | Tags=[{ 37 | 'Key': f"{RuntimeConfig.get_tag_prefix()}:{ShelveryEngine.BACKUP_RESOURCE_TAG}", 38 | 'Value': 'true' 39 | }, 40 | {'Key': 'Name', 41 | 'Value': self.resource_name 42 | }] 43 | ) 44 | 45 | def get_instance_id(self): 46 | # Find EC2 instance 47 | search_filter = [ 48 | {'Name': 'tag:Name', 'Values': [EC2_AMI_INSTANCE_RESOURCE_NAME]}, 49 | {'Name': 'instance-state-name', 'Values': ['running']} 50 | ] 51 | 52 | # Get EC2 instance 53 | ec2_instance = self.client.describe_instances(Filters=search_filter) 54 | 55 | # Get instance ID 56 | try: 57 | return ec2_instance['Reservations'][0]['Instances'][0]['InstanceId'] 58 | except (IndexError, KeyError): 59 | print("No instance found matching the given criteria.") 60 | return "" 61 | 62 | def wait_for_resource(self): 63 | waiter = AwsHelper.boto3_client('ec2', region_name='ap-southeast-2').get_waiter('instance_running') 64 | try: 65 | waiter.wait( 66 | InstanceIds=[self.resource_id], 67 | WaiterConfig={ 68 | 'Delay': 30, 69 | 'MaxAttempts': 50 70 | } 71 | ) 72 | except WaiterError as error: 73 | print("Waiting for EC2 Instance Failed") 74 | print(error) 75 | raise error 76 | 77 | class ShelveryEC2AmiIntegrationTestCase(unittest.TestCase): 78 | """Shelvery EC2 AMI Backups Integration shelvery tests""" 79 | 80 | def id(self): 81 | return str(self.__class__) 82 | 83 | def setUp(self): 84 | # Complete initial setup 85 | self.created_snapshots = [] 86 | self.regional_snapshots = [] 87 | setup_source(self) 88 | # Instantiate resource test class 89 | ec2_ami_test_class = EC2AmiTestClass() 90 | # Wait till instance is in an available state 91 | ec2_ami_test_class.wait_for_resource() 92 | # Add tags to indicate backup 93 | ec2_ami_test_class.add_backup_tags() 94 | 95 | @pytest.mark.source 96 | def test_CleanupEC2AmiBackup(self): 97 | 98 | print(f"EC2 Ami - Running cleanup test") 99 | # Create test resource class 100 | ec2_ami_test_class = EC2AmiTestClass() 101 | backups_engine = ec2_ami_test_class.backups_engine 102 | client = ec2_ami_test_class.client 103 | # Create backups 104 | backups = backups_engine.create_backups() 105 | # Clean backups 106 | backups_engine.clean_backups() 107 | # Retrieve remaining backups 108 | snapshots = [ 109 | snapshot 110 | for backup in backups 111 | for snapshot in client.describe_snapshots( 112 | Filters = [{ 113 | 'Name': 'tag:Name', 114 | 'Values': [backup.name] 115 | }] 116 | )['Snapshots'] 117 | ] 118 | print(f"Snapshots: {snapshots}") 119 | 120 | self.assertTrue(len(snapshots) == 0) 121 | 122 | @pytest.mark.source 123 | def test_CreateEc2AmiBackup(self): 124 | print("Running EC2 AMI create backup test") 125 | # Create test resource class 126 | ec2_ami_test_class = EC2AmiTestClass() 127 | backups_engine = ec2_ami_test_class.backups_engine 128 | 129 | # Create backups 130 | backups = backups_engine.create_backups() 131 | print(f"Created {len(backups)} backups for EC2 Instance") 132 | 133 | # Compare backups 134 | for backup in backups: 135 | valid = compare_backups(self=self, backup=backup, backup_engine=backups_engine) 136 | 137 | # Clean backups 138 | print(f"Cleaning up EC2 AMI Backups") 139 | backups_engine.clean_backups() 140 | 141 | #Validate backup 142 | self.assertTrue(valid, f"Backup {backup} is not valid") 143 | 144 | self.assertEqual(len(backups), 1, f"Expected 1 backup, but found {len(backups)}") 145 | 146 | @pytest.mark.source 147 | @pytest.mark.share 148 | def test_ShareEc2AmiBackup(self): 149 | print("Running EC2 AMI share backup test") 150 | # Instantiate test resource classs 151 | ec2_ami_test_class = EC2AmiTestClass() 152 | backups_engine = ec2_ami_test_class.backups_engine 153 | client = boto3.client('ec2')#ec2_ami_test_class.client 154 | 155 | print("Creating shared backups") 156 | backups = backups_engine.create_backups() 157 | print(f"{len(backups)} shared backups created") 158 | 159 | for backup in backups: 160 | snapshot_id = backup.backup_id 161 | print(f"Checking if snapshot {snapshot_id} is shared with {self.share_with_id}") 162 | 163 | # Retrieve snapshot 164 | snapshots = client.describe_snapshots( 165 | Filters = [{ 166 | 'Name': 'tag:Name', 167 | 'Values': [backup.name] 168 | }] 169 | )['Snapshots'] 170 | 171 | snapshot_id = snapshots[0]['SnapshotId'] 172 | 173 | # retrieve the snapshot attributes 174 | response = client.describe_snapshot_attribute( 175 | SnapshotId=snapshot_id, 176 | Attribute='createVolumePermission' 177 | ) 178 | 179 | # check if the snapshot is shared with the destination account 180 | shared_with_destination = any( 181 | perm['UserId'] == self.share_with_id for perm in response.get('CreateVolumePermissions', []) 182 | ) 183 | 184 | # Assertions 185 | self.assertEqual(len(snapshots), 1, f"Expected 1 snapshot, but found {len(snapshots)}") 186 | self.assertTrue(shared_with_destination, f"Snapshot {snapshot_id} is not shared with {self.share_with_id}") 187 | 188 | 189 | if __name__ == '__main__': 190 | unittest.main() 191 | -------------------------------------------------------------------------------- /shelvery_tests/ec2ami_pull_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import pytest 5 | from shelvery_tests.ec2ami_integration_test import EC2AmiTestClass 6 | from shelvery_tests.test_functions import setup_destination 7 | from shelvery_tests.resources import EC2_AMI_INSTANCE_RESOURCE_NAME 8 | pwd = os.path.dirname(os.path.abspath(__file__)) 9 | 10 | sys.path.append(f"{pwd}/..") 11 | sys.path.append(f"{pwd}/../shelvery") 12 | sys.path.append(f"{pwd}/shelvery") 13 | sys.path.append(f"{pwd}/lib") 14 | sys.path.append(f"{pwd}/../lib") 15 | 16 | class ShelveryEC2AmiPullTestCase(unittest.TestCase): 17 | 18 | @pytest.mark.destination 19 | def test_PullEC2Backup(self): 20 | # Complete initial setup 21 | print(f"EC2 AMI - Running pull shared backups test") 22 | setup_destination(self) 23 | 24 | # Create test resource class 25 | ec2_ami_test_class = EC2AmiTestClass() 26 | backups_engine = ec2_ami_test_class.backups_engine 27 | client = ec2_ami_test_class.client 28 | 29 | # Clean residual existing snapshots 30 | backups_engine.clean_backups() 31 | 32 | # Pull shared backups 33 | backups_engine.pull_shared_backups() 34 | 35 | # Get post-pull snapshot count 36 | search_filter = [{'Name':'tag:ResourceName', 37 | 'Values':[EC2_AMI_INSTANCE_RESOURCE_NAME] 38 | }] 39 | 40 | #Retrieve pulled images from shelvery-test stack 41 | amis = client.describe_images( 42 | Filters=search_filter 43 | )["Images"] 44 | 45 | # Verify that only one snapshot was pulled 46 | self.assertEqual(len(amis), 1) 47 | 48 | @pytest.mark.cleanup 49 | def test_cleanup(self): 50 | # Create test resource class 51 | ec2_ami_test_class = EC2AmiTestClass() 52 | backups_engine = ec2_ami_test_class.backups_engine 53 | # Clean backups 54 | backups_engine.clean_backups() 55 | -------------------------------------------------------------------------------- /shelvery_tests/engine_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | import os 4 | import pytest 5 | pwd = os.path.dirname(os.path.abspath(__file__)) 6 | sys.path.append(f"{pwd}/../shelvery") 7 | sys.path.append(f"{pwd}/shelvery") 8 | sys.path.append(f"{pwd}/lib") 9 | sys.path.append(f"{pwd}/../lib") 10 | 11 | from shelvery.ebs_backup import ShelveryEBSBackup 12 | from shelvery.rds_backup import ShelveryRDSBackup 13 | from shelvery.factory import ShelveryFactory 14 | from shelvery.ec2ami_backup import ShelveryEC2AMIBackup 15 | from shelvery.rds_cluster_backup import ShelveryRDSClusterBackup 16 | 17 | print(f"Python lib path:\n{sys.path}") 18 | 19 | 20 | class ShelveryFactoryTestCase(unittest.TestCase): 21 | """Shelvery Factory unit shelvery_tests""" 22 | 23 | def id(self): 24 | return str(self.__class__) 25 | 26 | def setUp(self): 27 | print(f"Setting up unit shelvery_tests") 28 | 29 | def tearDown(self): 30 | print(f"Tear down unit shelvery_tests") 31 | 32 | @pytest.mark.source 33 | def test_getEbsShelvery(self): 34 | instance = ShelveryFactory.get_shelvery_instance('ebs') 35 | self.assertTrue(isinstance(instance, ShelveryEBSBackup)) 36 | 37 | @pytest.mark.source 38 | def test_getRdsShelvery(self): 39 | instance = ShelveryFactory.get_shelvery_instance('rds') 40 | self.assertTrue(isinstance(instance, ShelveryRDSBackup)) 41 | 42 | @pytest.mark.source 43 | def test_getRdsClusterBackup(self): 44 | instance = ShelveryFactory.get_shelvery_instance('rds_cluster') 45 | self.assertTrue(isinstance(instance, ShelveryRDSClusterBackup)) 46 | 47 | @pytest.mark.source 48 | def test_getRdsEc2AmiBackup(self): 49 | instance = ShelveryFactory.get_shelvery_instance('ec2ami') 50 | self.assertTrue(isinstance(instance, ShelveryEC2AMIBackup)) 51 | 52 | 53 | if __name__ == '__main__': 54 | unittest.main() 55 | -------------------------------------------------------------------------------- /shelvery_tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | source: source account 4 | destination: destination account 5 | share: share resources 6 | cleanup: cleanup thigns 7 | 8 | #log_cli = true 9 | 10 | 11 | -------------------------------------------------------------------------------- /shelvery_tests/rds_cluster_integration_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import pytest 4 | import os 5 | from botocore.exceptions import WaiterError 6 | from shelvery.engine import ShelveryEngine 7 | from shelvery.runtime_config import RuntimeConfig 8 | from shelvery_tests.test_functions import setup_source, compare_backups 9 | from shelvery.rds_cluster_backup import ShelveryRDSClusterBackup 10 | from shelvery.aws_helper import AwsHelper 11 | from shelvery_tests.resources import RDS_CLUSTER_RESOURCE_NAME, ResourceClass 12 | 13 | pwd = os.path.dirname(os.path.abspath(__file__)) 14 | 15 | sys.path.append(f"{pwd}/..") 16 | sys.path.append(f"{pwd}/../shelvery") 17 | sys.path.append(f"{pwd}/shelvery") 18 | sys.path.append(f"{pwd}/lib") 19 | sys.path.append(f"{pwd}/../lib") 20 | 21 | print(f"Python lib path:\n{sys.path}") 22 | 23 | class RDSClusterTestClass(ResourceClass): 24 | 25 | def __init__(self): 26 | self.resource_name = RDS_CLUSTER_RESOURCE_NAME 27 | self.backups_engine = ShelveryRDSClusterBackup() 28 | self.client = AwsHelper.boto3_client('rds', region_name='ap-southeast-2') 29 | self.ARN = f"arn:aws:rds:{os.environ['AWS_DEFAULT_REGION']}:{AwsHelper.local_account_id()}:cluster:{self.resource_name}" 30 | 31 | def add_backup_tags(self): 32 | self.client.add_tags_to_resource( 33 | ResourceName=self.ARN, 34 | Tags=[{ 35 | 'Key': f"{RuntimeConfig.get_tag_prefix()}:{ShelveryEngine.BACKUP_RESOURCE_TAG}", 36 | 'Value': 'true' 37 | }, 38 | {'Key': 'Name', 39 | 'Value': self.resource_name 40 | } 41 | ] 42 | ) 43 | 44 | def wait_for_resource(self): 45 | waiter = AwsHelper.boto3_client('rds', region_name='ap-southeast-2').get_waiter('db_cluster_available') 46 | try: 47 | waiter.wait( 48 | DBClusterIdentifier=self.resource_name, 49 | WaiterConfig={ 50 | 'Delay': 30, 51 | 'MaxAttempts': 50 52 | } 53 | ) 54 | except WaiterError as error: 55 | print("Waiting for RDS Cluster Failed") 56 | print(error) 57 | raise error 58 | 59 | 60 | class ShelveryRDSClusterIntegrationTestCase(unittest.TestCase): 61 | """Shelvery RDS Cluster Backups Integration shelvery tests""" 62 | 63 | def id(self): 64 | return str(self.__class__) 65 | 66 | 67 | def setUp(self): 68 | # Complete initial setup 69 | self.created_snapshots = [] 70 | setup_source(self) 71 | # Instantiate resource test class 72 | rds_cluster_test_class = RDSClusterTestClass() 73 | # Wait till RDS Cluster is in an available state 74 | rds_cluster_test_class.wait_for_resource() 75 | # Add tags to indicate backup 76 | rds_cluster_test_class.add_backup_tags() 77 | 78 | @pytest.mark.source 79 | def test_CleanupRdsClusterBackup(self): 80 | print(f"RDS Cluster - Running cleanup test") 81 | # Create test resource class 82 | rds_cluster_test_class = RDSClusterTestClass() 83 | backups_engine = rds_cluster_test_class.backups_engine 84 | client = rds_cluster_test_class.client 85 | # Create backups 86 | backups = backups_engine.create_backups() 87 | # Clean backups 88 | backups_engine.clean_backups() 89 | # Retrieve remaining backups 90 | snapshots = [ 91 | snapshot 92 | for backup in backups 93 | for snapshot in client.describe_db_cluster_snapshots( 94 | DBClusterIdentifier=rds_cluster_test_class.resource_name, 95 | DBClusterSnapshotIdentifier=backup.backup_id 96 | )["DBClusterSnapshots"] 97 | ] 98 | print(f"Snapshots: {snapshots}") 99 | 100 | self.assertTrue(len(snapshots) == 0) 101 | 102 | @pytest.mark.source 103 | def test_CreateRdsClusterBackup(self): 104 | print("Running RDS Cluster create backup test") 105 | # Instantiate test resource class 106 | rds_cluster_test_class = RDSClusterTestClass() 107 | backups_engine = rds_cluster_test_class.backups_engine 108 | 109 | # Create backups 110 | backups = backups_engine.create_backups() 111 | print(f"Created {len(backups)} backups for RDS Cluster") 112 | 113 | # Compare backups 114 | for backup in backups: 115 | valid = compare_backups(self=self, backup=backup, backup_engine=backups_engine) 116 | 117 | # Clean backups 118 | print(f"Cleaning up RDS Cluster Backups") 119 | backups_engine.clean_backups() 120 | 121 | # Validate backups 122 | self.assertTrue(valid, f"Backup {backup} is not valid") 123 | 124 | self.assertEqual(len(backups), 1, f"Expected 1 backup, but found {len(backups)}") 125 | 126 | @pytest.mark.source 127 | @pytest.mark.share 128 | def test_ShareRdsClusterBackup(self): 129 | print("Running RDS Cluster share backup test") 130 | 131 | # Instantiate test resource class 132 | rds_cluster_test_class = RDSClusterTestClass() 133 | backups_engine = rds_cluster_test_class.backups_engine 134 | client = rds_cluster_test_class.client 135 | 136 | print("Creating shared backups") 137 | backups = backups_engine.create_backups() 138 | print(f"{len(backups)} shared backups created") 139 | 140 | for backup in backups: 141 | snapshot_id = backup.backup_id 142 | print(f"Checking if snapshot {snapshot_id} is shared with {self.share_with_id}") 143 | 144 | # Retrieve snapshots 145 | snapshots = client.describe_db_cluster_snapshots( 146 | DBClusterIdentifier=rds_cluster_test_class.resource_name, 147 | DBClusterSnapshotIdentifier=backup.backup_id 148 | )["DBClusterSnapshots"] 149 | 150 | # Get attributes of snapshot 151 | attributes = client.describe_db_cluster_snapshot_attributes( 152 | DBClusterSnapshotIdentifier=snapshot_id 153 | )['DBClusterSnapshotAttributesResult']['DBClusterSnapshotAttributes'] 154 | 155 | # Check if snapshot is shared with destination account 156 | shared_with_destination = any( 157 | attr['AttributeName'] == 'restore' and self.share_with_id in attr['AttributeValues'] 158 | for attr in attributes 159 | ) 160 | 161 | # Assertions 162 | self.assertEqual(len(snapshots), 1, f"Expected 1 snapshot, but found {len(snapshots)}") 163 | self.assertTrue(shared_with_destination, f"Snapshot {snapshot_id} is not shared with {self.share_with_id}") 164 | 165 | 166 | if __name__ == '__main__': 167 | unittest.main() 168 | -------------------------------------------------------------------------------- /shelvery_tests/rds_cluster_pull_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import os 4 | import pytest 5 | from shelvery_tests.rds_cluster_integration_test import RDSClusterTestClass 6 | from shelvery_tests.test_functions import setup_destination 7 | from shelvery_tests.resources import RDS_CLUSTER_RESOURCE_NAME 8 | 9 | pwd = os.path.dirname(os.path.abspath(__file__)) 10 | 11 | sys.path.append(f"{pwd}/..") 12 | sys.path.append(f"{pwd}/../shelvery") 13 | sys.path.append(f"{pwd}/shelvery") 14 | sys.path.append(f"{pwd}/lib") 15 | sys.path.append(f"{pwd}/../lib") 16 | 17 | class ShelveryRDSClusterPullTestCase(unittest.TestCase): 18 | 19 | @pytest.mark.destination 20 | def test_PullRdsClusterBackup(self): 21 | 22 | # Complete initial setup 23 | print(f"RDS Cluster - Running pull shared backups test") 24 | setup_destination(self) 25 | 26 | # Instantiate test resource class 27 | rds_cluster_test_class = RDSClusterTestClass() 28 | backups_engine = rds_cluster_test_class.backups_engine 29 | client = rds_cluster_test_class.client 30 | 31 | # Clean residual existing snapshots 32 | backups_engine.clean_backups() 33 | 34 | # Pull shared backups 35 | backups_engine.pull_shared_backups() 36 | 37 | # Get post-pull snapshot count 38 | pulled_snapshots = client.describe_db_cluster_snapshots( 39 | DBClusterIdentifier=RDS_CLUSTER_RESOURCE_NAME, 40 | SnapshotType='Manual' 41 | ) 42 | 43 | # Verify that only one snapshot was pulled 44 | self.assertEqual(len(pulled_snapshots['DBClusterSnapshots']), 1) 45 | 46 | @pytest.mark.cleanup 47 | def test_cleanup(self): 48 | # Instantiate test resource class 49 | rds_cluster_test_class = RDSClusterTestClass() 50 | backups_engine = rds_cluster_test_class.backups_engine 51 | # Clean backups 52 | backups_engine.clean_backups() 53 | 54 | 55 | 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /shelvery_tests/rds_integration_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import pytest 4 | import os 5 | from botocore.exceptions import WaiterError 6 | from shelvery.engine import ShelveryEngine 7 | from shelvery.runtime_config import RuntimeConfig 8 | from shelvery_tests.test_functions import setup_source, compare_backups 9 | from shelvery.rds_backup import ShelveryRDSBackup 10 | from shelvery.aws_helper import AwsHelper 11 | from shelvery_tests.resources import RDS_INSTANCE_RESOURCE_NAME, ResourceClass 12 | 13 | pwd = os.path.dirname(os.path.abspath(__file__)) 14 | 15 | sys.path.append(f"{pwd}/..") 16 | sys.path.append(f"{pwd}/../shelvery") 17 | sys.path.append(f"{pwd}/shelvery") 18 | sys.path.append(f"{pwd}/lib") 19 | sys.path.append(f"{pwd}/../lib") 20 | 21 | print(f"Python lib path:\n{sys.path}") 22 | 23 | class RDSInstanceTestClass(ResourceClass): 24 | 25 | def __init__(self): 26 | self.resource_name = RDS_INSTANCE_RESOURCE_NAME 27 | self.backups_engine = ShelveryRDSBackup() 28 | self.client = AwsHelper.boto3_client('rds', region_name='ap-southeast-2') 29 | self.ARN = f"arn:aws:rds:{os.environ['AWS_DEFAULT_REGION']}:{AwsHelper.local_account_id()}:db:{self.resource_name}" 30 | 31 | def add_backup_tags(self): 32 | self.client.add_tags_to_resource( 33 | ResourceName=self.ARN, 34 | Tags=[{ 35 | 'Key': f"{RuntimeConfig.get_tag_prefix()}:{ShelveryEngine.BACKUP_RESOURCE_TAG}", 36 | 'Value': 'true' 37 | }, 38 | {'Key': 'Name', 39 | 'Value': self.resource_name 40 | } 41 | ] 42 | ) 43 | 44 | def wait_for_resource(self): 45 | waiter = AwsHelper.boto3_client('rds', region_name='ap-southeast-2').get_waiter('db_instance_available') 46 | try: 47 | waiter.wait( 48 | DBInstanceIdentifier=self.resource_name, 49 | WaiterConfig={ 50 | 'Delay': 30, 51 | 'MaxAttempts': 50 52 | } 53 | ) 54 | except WaiterError as error: 55 | print("Waiting for RDS Instance Failed") 56 | print(error) 57 | raise error 58 | 59 | ######## Test Case 60 | class ShelveryRDSIntegrationTestCase(unittest.TestCase): 61 | """Shelvery RDS Instance Backups Integration shelvery tests""" 62 | 63 | def id(self): 64 | return str(self.__class__) 65 | 66 | def setUp(self): 67 | # Complete initial setup 68 | self.created_snapshots = [] 69 | setup_source(self) 70 | # Instantiate resource test class 71 | rds_instance_test_class = RDSInstanceTestClass() 72 | # Wait till RDS Instance is in an available state 73 | rds_instance_test_class.wait_for_resource() 74 | # Add tags to indicate backup 75 | rds_instance_test_class.add_backup_tags() 76 | 77 | @pytest.mark.source 78 | def test_CleanupRdsInstanceBackup(self): 79 | print(f"RDS Instance - Running cleanup test") 80 | # Create test resource class 81 | rds_instance_test_class = RDSInstanceTestClass() 82 | backups_engine = rds_instance_test_class.backups_engine 83 | client = rds_instance_test_class.client 84 | # Create backups 85 | backups = backups_engine.create_backups() 86 | # Clean backups 87 | backups_engine.clean_backups() 88 | # Retrieve remaining backups 89 | snapshots = [ 90 | snapshot 91 | for backup in backups 92 | for snapshot in client.describe_db_snapshots( 93 | DBInstanceIdentifier=rds_instance_test_class.resource_name, 94 | DBSnapshotIdentifier=backup.backup_id 95 | )["DBSnapshots"] 96 | ] 97 | print(f"Snapshots: {snapshots}") 98 | 99 | self.assertTrue(len(snapshots) == 0) 100 | 101 | @pytest.mark.source 102 | def test_CreateRdsInstanceBackup(self): 103 | print("Running RDS Instance create backup test") 104 | # Instantiate test resource class 105 | rds_instance_test_class = RDSInstanceTestClass() 106 | backups_engine = rds_instance_test_class.backups_engine 107 | 108 | # Create backups 109 | backups = backups_engine.create_backups() 110 | print(f"Created {len(backups)} backups for RDS Instance") 111 | 112 | # Compare backups 113 | for backup in backups: 114 | valid = compare_backups(self=self, backup=backup, backup_engine=backups_engine) 115 | 116 | # Clean backups 117 | print(f"Cleaning up RDS Instance Backups") 118 | backups_engine.clean_backups() 119 | 120 | # Validate backups 121 | self.assertTrue(valid, f"Backup {backup} is not valid") 122 | 123 | self.assertEqual(len(backups), 1, f"Expected 1 backup, but found {len(backups)}") 124 | 125 | @pytest.mark.source 126 | @pytest.mark.share 127 | def test_shareRdsInstanceBackup(self): 128 | 129 | print("Running RDS Instance share backup test") 130 | 131 | # Instantiate test resource class 132 | rds_instance_test_class = RDSInstanceTestClass() 133 | backups_engine = rds_instance_test_class.backups_engine 134 | client = rds_instance_test_class.client 135 | 136 | print("Creating shared backups") 137 | backups = backups_engine.create_backups() 138 | print(f"{len(backups)} shared backups created") 139 | 140 | for backup in backups: 141 | snapshot_id = backup.backup_id 142 | print(f"Checking if snapshot {snapshot_id} is shared with {self.share_with_id}") 143 | 144 | # Retrieve remaining backups 145 | snapshots = [ 146 | snapshot 147 | for backup in backups 148 | for snapshot in client.describe_db_snapshots( 149 | DBInstanceIdentifier=rds_instance_test_class.resource_name, 150 | DBSnapshotIdentifier=backup.backup_id 151 | )["DBSnapshots"] 152 | ] 153 | 154 | # Get attributes of snapshot 155 | attributes = client.describe_db_snapshot_attributes( 156 | DBSnapshotIdentifier=snapshot_id 157 | )['DBSnapshotAttributesResult']['DBSnapshotAttributes'] 158 | 159 | # Check if snapshot is shared with destination account 160 | shared_with_destination = any( 161 | attr['AttributeName'] == 'restore' and self.share_with_id in attr['AttributeValues'] 162 | for attr in attributes 163 | ) 164 | 165 | # Assertions 166 | self.assertEqual(len(snapshots), 1, f"Expected 1 snapshot, but found {len(snapshots)}") 167 | self.assertTrue(shared_with_destination, f"Snapshot {snapshot_id} is not shared with {self.share_with_id}") 168 | 169 | if __name__ == '__main__': 170 | unittest.main() 171 | -------------------------------------------------------------------------------- /shelvery_tests/rds_pull_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | import pytest 4 | import os 5 | from shelvery_tests.rds_integration_test import RDSInstanceTestClass 6 | from shelvery_tests.test_functions import setup_destination 7 | from shelvery_tests.resources import RDS_INSTANCE_RESOURCE_NAME 8 | 9 | pwd = os.path.dirname(os.path.abspath(__file__)) 10 | 11 | sys.path.append(f"{pwd}/..") 12 | sys.path.append(f"{pwd}/../shelvery") 13 | sys.path.append(f"{pwd}/shelvery") 14 | sys.path.append(f"{pwd}/lib") 15 | sys.path.append(f"{pwd}/../lib") 16 | 17 | class ShelveryRDSPullTestCase(unittest.TestCase): 18 | 19 | @pytest.mark.destination 20 | def test_PullRdsBackup(self): 21 | 22 | # Complete initial setup 23 | print(f"RDS Instance - Running pull shared backups test") 24 | setup_destination(self) 25 | 26 | # Create test resource class 27 | rds_instance_test_class = RDSInstanceTestClass() 28 | backups_engine = rds_instance_test_class.backups_engine 29 | client = rds_instance_test_class.client 30 | 31 | # Clean residual existing snapshots 32 | backups_engine.clean_backups() 33 | 34 | # Pull shared backups 35 | backups_engine.pull_shared_backups() 36 | 37 | # Get post-pull snapshot count 38 | pulled_snapshots = client.describe_db_snapshots( 39 | DBInstanceIdentifier=RDS_INSTANCE_RESOURCE_NAME, 40 | SnapshotType='Manual' 41 | ) 42 | 43 | # Verify that only one snapshot was pulled 44 | self.assertEqual(len(pulled_snapshots["DBSnapshots"]), 1) 45 | 46 | @pytest.mark.cleanup 47 | def test_cleanup(self): 48 | # Instantiate test resource class 49 | rds_instance_test_class = RDSInstanceTestClass() 50 | backups_engine = rds_instance_test_class.backups_engine 51 | # Clean backups 52 | backups_engine.clean_backups() 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /shelvery_tests/resources.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | 3 | DOCDB_RESOURCE_NAME='shelvery-test-docdb' 4 | RDS_INSTANCE_RESOURCE_NAME='shelvery-test-rds' 5 | RDS_CLUSTER_RESOURCE_NAME='shelvery-test-rds-cluster' 6 | EC2_AMI_INSTANCE_RESOURCE_NAME='shelvery-test-ec2' 7 | EBS_INSTANCE_RESOURCE_NAME='shelvery-test-ebs' 8 | class ResourceClass(): 9 | 10 | def __init__(self): 11 | self.resource_name = None 12 | self.backups_engine = None 13 | self.client = None 14 | 15 | @abstractmethod 16 | def add_backup_tags(self): 17 | pass 18 | 19 | @abstractmethod 20 | def wait_for_resource(self): 21 | pass -------------------------------------------------------------------------------- /shelvery_tests/s3data_integration_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import traceback 3 | import unittest 4 | import yaml 5 | import boto3 6 | import os 7 | import time 8 | import botocore 9 | import pytest 10 | from datetime import datetime 11 | 12 | pwd = os.path.dirname(os.path.abspath(__file__)) 13 | 14 | sys.path.append(f"{pwd}/..") 15 | sys.path.append(f"{pwd}/../shelvery") 16 | sys.path.append(f"{pwd}/shelvery") 17 | sys.path.append(f"{pwd}/lib") 18 | sys.path.append(f"{pwd}/../lib") 19 | 20 | from shelvery.ebs_backup import ShelveryEBSBackup 21 | from shelvery.engine import ShelveryEngine 22 | from shelvery.engine import S3_DATA_PREFIX 23 | from shelvery.runtime_config import RuntimeConfig 24 | from shelvery.backup_resource import BackupResource 25 | from shelvery.aws_helper import AwsHelper 26 | 27 | 28 | print(f"Python lib path:\n{sys.path}") 29 | 30 | 31 | class ShelveryS3DataTestCase(unittest.TestCase): 32 | """Shelvery EBS Backups Integration shelvery_tests""" 33 | 34 | def id(self): 35 | return str(self.__class__) 36 | 37 | def setUp(self): 38 | self.volume = None 39 | self.created_snapshots = [] 40 | self.regional_snapshots = { 41 | 'us-west-1': [], 42 | 'us-west-2': [] 43 | } 44 | 45 | print(f"Setting up ebs integraion test") 46 | print("Create EBS Volume of 1G") 47 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 48 | os.environ['SHELVERY_MONO_THREAD'] = '1' 49 | ec2client = AwsHelper.boto3_client('ec2') 50 | sts = AwsHelper.boto3_client('sts') 51 | self.id = sts.get_caller_identity() 52 | print(f"Running as user:\n{self.id}\n") 53 | self.volume = ec2client.create_volume(AvailabilityZone='us-east-1a', 54 | Encrypted=False, 55 | Size=1, 56 | VolumeType='gp2', 57 | TagSpecifications=[{ 58 | 'ResourceType': 'volume', 59 | 'Tags': [{ 60 | 'Key': f"{RuntimeConfig.get_tag_prefix()}:{ShelveryEngine.BACKUP_RESOURCE_TAG}", 61 | 'Value': 'true' 62 | }, {'Key': 'Name', 'Value': 'shelvery-automated-shelvery_tests'}] 63 | }]) 64 | 65 | # wait until volume is available 66 | interm_volume = ec2client.describe_volumes(VolumeIds=[self.volume['VolumeId']])['Volumes'][0] 67 | while interm_volume['State'] != 'available': 68 | time.sleep(5) 69 | interm_volume = ec2client.describe_volumes(VolumeIds=[self.volume['VolumeId']])['Volumes'][0] 70 | 71 | print(f"Created volume: {self.volume}") 72 | # TODO wait until volume is 'available' 73 | self.share_with_id = int(self.id['Account']) + 1 74 | os.environ['shelvery_select_entity'] = self.volume['VolumeId'] 75 | 76 | def tearDown(self): 77 | ec2client = AwsHelper.boto3_client('ec2') 78 | ec2client.delete_volume(VolumeId=self.volume['VolumeId']) 79 | print(f"Deleted volume:\n{self.volume['VolumeId']}\n") 80 | 81 | # snapshot deletion surrounded with try/except in order 82 | # for cases when shelvery cleans / does not clean up behind itself 83 | for snapid in self.created_snapshots: 84 | print(f"Deleting snapshot {snapid}") 85 | try: 86 | ec2client.delete_snapshot(SnapshotId=snapid) 87 | except Exception as e: 88 | print(f"Failed to delete {snapid}:{str(e)}") 89 | 90 | for region in self.regional_snapshots: 91 | ec2regional = AwsHelper.boto3_client('ec2', region_name=region) 92 | for snapid in self.regional_snapshots[region]: 93 | try: 94 | ec2regional.delete_snapshot(SnapshotId=snapid) 95 | except Exception as e: 96 | print(f"Failed to delete {snapid}:{str(e)}") 97 | 98 | @pytest.mark.source 99 | def test_CreateBackupData(self): 100 | ebs_backups_engine = ShelveryEBSBackup() 101 | try: 102 | backups = ebs_backups_engine.create_backups() 103 | except Exception as e: 104 | print(e) 105 | print(f"Failed with {e}") 106 | traceback.print_exc(file=sys.stdout) 107 | raise e 108 | ec2client = AwsHelper.boto3_client('ec2') 109 | 110 | valid = False 111 | # validate there is 112 | for backup in backups: 113 | if backup.entity_id == self.volume['VolumeId']: 114 | snapshot_id = backup.backup_id 115 | self.created_snapshots.append(snapshot_id) 116 | 117 | # wait for snapshot to become available 118 | ebs_backups_engine.wait_backup_available(backup.region, backup.backup_id, None, None) 119 | 120 | # allow buffer period for engine to write data to s3 121 | time.sleep(20) 122 | 123 | # this is the backup that gets stored in s3 124 | engine_backup = ebs_backups_engine.get_backup_resource(backup.region, backup.backup_id) 125 | # verify the s3 data 126 | account_id = ebs_backups_engine.account_id 127 | s3path = f"{S3_DATA_PREFIX}/{ebs_backups_engine.get_engine_type()}/{engine_backup.name}.yaml" 128 | s3bucket = ebs_backups_engine.get_local_bucket_name() 129 | print(f"Usingbucket {s3bucket}") 130 | print(f"Using path {s3path}") 131 | bucket = boto3.resource('s3').Bucket(s3bucket) 132 | object = bucket.Object(s3path) 133 | content = object.get()['Body'].read() 134 | restored_br = yaml.load(content, Loader=yaml.Loader) 135 | self.assertEqual(restored_br.backup_id, engine_backup.backup_id) 136 | self.assertEqual(restored_br.name, engine_backup.name) 137 | self.assertEqual(restored_br.region, engine_backup.region) 138 | print(f"Tags restored: \n{yaml.dump(restored_br.tags)}\n") 139 | print(f"Tags backup: \n{yaml.dump(engine_backup.tags)}\n") 140 | self.assertEqual(restored_br.tags['Name'], engine_backup.tags['Name']) 141 | for tag in ['name','date_created','entity_id','region','retention_type']: 142 | self.assertEqual( 143 | restored_br.tags[f"{RuntimeConfig.get_tag_prefix()}:{tag}"], 144 | engine_backup.tags[f"{RuntimeConfig.get_tag_prefix()}:{tag}"] 145 | ) 146 | valid = True 147 | 148 | self.assertTrue(valid) 149 | 150 | @pytest.mark.source 151 | def test_CreateSharingInfo(self): 152 | ebs_backups_engine = ShelveryEBSBackup() 153 | try: 154 | os.environ["shelvery_share_aws_account_ids"] = str(self.share_with_id) 155 | backups = ebs_backups_engine.create_backups() 156 | except Exception as e: 157 | print(e) 158 | print(f"Failed with {e}") 159 | traceback.print_exc(file=sys.stdout) 160 | raise e 161 | finally: 162 | del os.environ["shelvery_share_aws_account_ids"] 163 | 164 | valid = False 165 | for backup in backups: 166 | if backup.entity_id == self.volume['VolumeId']: 167 | account_id = ebs_backups_engine.account_id 168 | s3path = f"{S3_DATA_PREFIX}/shared/{self.share_with_id}/{ebs_backups_engine.get_engine_type()}/{backup.name}.yaml" 169 | s3bucket = ebs_backups_engine.get_local_bucket_name() 170 | bucket = boto3.resource('s3').Bucket(s3bucket) 171 | object = bucket.Object(s3path) 172 | content = object.get()['Body'].read() 173 | restored_br = yaml.load(content, Loader=yaml.Loader) 174 | engine_backup = ebs_backups_engine.get_backup_resource(backup.region, backup.backup_id) 175 | self.assertEqual(restored_br.backup_id, engine_backup.backup_id) 176 | self.assertEqual(restored_br.name, engine_backup.name) 177 | self.assertEqual(restored_br.region, engine_backup.region) 178 | print(engine_backup.name) 179 | for tag in ['name','date_created','entity_id','region','retention_type']: 180 | self.assertEqual( 181 | restored_br.tags[f"{RuntimeConfig.get_tag_prefix()}:{tag}"], 182 | engine_backup.tags[f"{RuntimeConfig.get_tag_prefix()}:{tag}"] 183 | ) 184 | valid = True 185 | 186 | self.assertTrue(valid) 187 | 188 | @pytest.mark.source 189 | def test_CleanBackupData(self): 190 | ebs_backups_engine = ShelveryEBSBackup() 191 | try: 192 | backups = ebs_backups_engine.create_backups() 193 | except Exception as e: 194 | print(e) 195 | print(f"Failed with {e}") 196 | traceback.print_exc(file=sys.stdout) 197 | raise e 198 | ec2client = AwsHelper.boto3_client('ec2') 199 | 200 | valid = False 201 | # validate there is 202 | for backup in backups: 203 | if backup.entity_id == self.volume['VolumeId']: 204 | snapshot_id = backup.backup_id 205 | snapshots = ec2client.describe_snapshots(SnapshotIds=[snapshot_id])['Snapshots'] 206 | self.assertEqual(len(snapshots), 1) 207 | ec2client.create_tags( 208 | Resources=[snapshot_id], 209 | Tags=[{'Key': f"{RuntimeConfig.get_tag_prefix()}:date_created", 210 | 'Value': datetime(2000, 1, 1).strftime(BackupResource.TIMESTAMP_FORMAT) 211 | }] 212 | ) 213 | ebs_backups_engine.clean_backups() 214 | with self.assertRaises(botocore.exceptions.ClientError) as context: 215 | ec2client.describe_snapshots(SnapshotIds=[snapshot_id])['Snapshots'] 216 | 217 | self.assertTrue('does not exist' in context.exception.response['Error']['Message']) 218 | self.assertEqual('InvalidSnapshot.NotFound', context.exception.response['Error']['Code']) 219 | 220 | account_id = ebs_backups_engine.account_id 221 | s3path = f"{S3_DATA_PREFIX}/{ebs_backups_engine.get_engine_type()}/removed/{backup.name}.yaml" 222 | s3bucket = ebs_backups_engine.get_local_bucket_name() 223 | bucket = boto3.resource('s3').Bucket(s3bucket) 224 | object = bucket.Object(s3path) 225 | content = object.get()['Body'].read() 226 | restored_br = yaml.load(content, Loader=yaml.Loader) 227 | self.assertEqual(restored_br.backup_id, backup.backup_id) 228 | self.assertEqual(restored_br.name, backup.name) 229 | self.assertEqual(restored_br.region, backup.region) 230 | self.assertIsNotNone(restored_br.date_deleted) 231 | self.assertEqual(restored_br.date_created, datetime(2000, 1, 1)) 232 | valid = True 233 | 234 | self.assertTrue(valid) 235 | 236 | 237 | if __name__ == '__main__': 238 | unittest.main() 239 | -------------------------------------------------------------------------------- /shelvery_tests/test_functions.py: -------------------------------------------------------------------------------- 1 | import os 2 | import boto3 3 | import time 4 | import yaml 5 | from shelvery_tests.conftest import destination_account, source_account 6 | from shelvery.runtime_config import RuntimeConfig 7 | from shelvery.aws_helper import AwsHelper 8 | 9 | def setup_source(self): 10 | print(f"Setting up integration test") 11 | self.share_with_id = destination_account 12 | os.environ["shelvery_share_aws_account_ids"] = destination_account 13 | os.environ['AWS_DEFAULT_REGION'] = 'ap-southeast-2' 14 | os.environ['SHELVERY_MONO_THREAD'] = '1' 15 | os.environ['shelvery_custom_retention_types'] = 'shortLived:1' 16 | os.environ['shelvery_current_retention_type'] = 'shortLived' 17 | 18 | sts = AwsHelper.boto3_client('sts') 19 | self.id = sts.get_caller_identity() 20 | print(f"Running as user:\n{self.id}\n") 21 | 22 | def setup_destination(self): 23 | print(f"Setting up integration test") 24 | 25 | os.environ['AWS_DEFAULT_REGION'] = 'ap-southeast-2' 26 | os.environ['SHELVERY_MONO_THREAD'] = '1' 27 | os.environ['shelvery_custom_retention_types'] = 'shortLived:1' 28 | os.environ['shelvery_current_retention_type'] = 'shortLived' 29 | os.environ["shelvery_source_aws_account_ids"] = source_account 30 | 31 | sts = AwsHelper.boto3_client('sts') 32 | self.id = sts.get_caller_identity() 33 | print(f"Running as user:\n{self.id}\n") 34 | 35 | def compare_backups(self,backup,backup_engine): 36 | print("Inside backup loop" + backup.backup_id) 37 | snapshot_id = backup.backup_id 38 | self.created_snapshots.append(snapshot_id) 39 | print("Snapshot:" + str(snapshot_id)) 40 | 41 | # wait for snapshot to become available 42 | backup_engine.wait_backup_available(backup.region, backup.backup_id, None, None) 43 | 44 | # allow buffer period for engine to write data to s3 45 | time.sleep(20) 46 | 47 | # this is the backup that gets stored in s3 48 | engine_backup = backup_engine.get_backup_resource(backup.region, backup.backup_id) 49 | # verify the s3 data 50 | account_id = backup_engine.account_id 51 | s3path = f"backups/{backup_engine.get_engine_type()}/{engine_backup.name}.yaml" 52 | s3bucket = backup_engine.get_local_bucket_name() 53 | print(f"Using bucket {s3bucket}") 54 | print(f"Using path {s3path}") 55 | bucket = boto3.resource('s3').Bucket(s3bucket) 56 | object = bucket.Object(s3path) 57 | content = object.get()['Body'].read() 58 | restored_br = yaml.load(content, Loader=yaml.Loader) 59 | self.assertEqual(restored_br.backup_id, engine_backup.backup_id) 60 | self.assertEqual(restored_br.name, engine_backup.name) 61 | self.assertEqual(restored_br.region, engine_backup.region) 62 | print(f"Tags restored: \n{yaml.dump(restored_br.tags)}\n") 63 | print(f"Tags backup: \n{yaml.dump(engine_backup.tags)}\n") 64 | self.assertEqual(restored_br.tags['Name'], engine_backup.tags['Name']) 65 | for tag in ['name', 'date_created', 'entity_id', 'region', 'retention_type']: 66 | self.assertEqual( 67 | restored_br.tags[f"{RuntimeConfig.get_tag_prefix()}:{tag}"], 68 | engine_backup.tags[f"{RuntimeConfig.get_tag_prefix()}:{tag}"] 69 | ) 70 | 71 | return True -------------------------------------------------------------------------------- /shelvery_tests/zname_transformation_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import traceback 3 | import unittest 4 | import os 5 | import time 6 | import pytest 7 | 8 | pwd = os.path.dirname(os.path.abspath(__file__)) 9 | 10 | sys.path.append(f"{pwd}/..") 11 | sys.path.append(f"{pwd}/../shelvery") 12 | sys.path.append(f"{pwd}/shelvery") 13 | sys.path.append(f"{pwd}/lib") 14 | sys.path.append(f"{pwd}/../lib") 15 | 16 | from shelvery.ebs_backup import ShelveryEBSBackup 17 | from shelvery.engine import ShelveryEngine 18 | from shelvery.runtime_config import RuntimeConfig 19 | from shelvery.backup_resource import BackupResource 20 | from shelvery.aws_helper import AwsHelper 21 | 22 | print(f"Python lib path:\n{sys.path}") 23 | 24 | NAME_WITH_SPECIAL_CHARACTERS = 'shelvery&#^--_auto_mate_d_tests' 25 | NAME_TRANSFORMED = 'shelvery-auto-mate-d-tests' 26 | 27 | 28 | class ShelveryNameTransformationTestCase(unittest.TestCase): 29 | """Shelvery EBS Backups Integration shelvery_tests""" 30 | 31 | def id(self): 32 | return str(self.__class__) 33 | 34 | @pytest.mark.source 35 | def setUp(self): 36 | self.volume = None 37 | self.created_snapshots = [] 38 | self.regional_snapshots = { 39 | 'us-west-1': [], 40 | 'us-west-2': [] 41 | } 42 | 43 | print(f"Setting up ebs integraion test") 44 | print("Create EBS Volume of 1G") 45 | os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' 46 | os.environ['SHELVERY_MONO_THREAD'] = '1' 47 | ec2client = AwsHelper.boto3_client('ec2') 48 | sts = AwsHelper.boto3_client('sts') 49 | self.id = sts.get_caller_identity() 50 | print(f"Running as user:\n{self.id}\n") 51 | self.volume = ec2client.create_volume(AvailabilityZone='us-east-1a', 52 | Encrypted=False, 53 | Size=1, 54 | VolumeType='gp2', 55 | TagSpecifications=[{ 56 | 'ResourceType': 'volume', 57 | 'Tags': [{ 58 | 'Key': f"{RuntimeConfig.get_tag_prefix()}:{ShelveryEngine.BACKUP_RESOURCE_TAG}", 59 | 'Value': 'true' 60 | }, {'Key': 'Name', 'Value': NAME_WITH_SPECIAL_CHARACTERS}] 61 | }]) 62 | 63 | # wait until volume is available 64 | interm_volume = ec2client.describe_volumes(VolumeIds=[self.volume['VolumeId']])['Volumes'][0] 65 | while interm_volume['State'] != 'available': 66 | time.sleep(5) 67 | interm_volume = ec2client.describe_volumes(VolumeIds=[self.volume['VolumeId']])['Volumes'][0] 68 | 69 | print(f"Created volume: {self.volume}") 70 | # TODO wait until volume is 'available' 71 | self.share_with_id = int(self.id['Account']) + 1 72 | os.environ['shelvery_select_entity'] = self.volume['VolumeId'] 73 | 74 | @pytest.mark.source 75 | def tearDown(self): 76 | ec2client = AwsHelper.boto3_client('ec2') 77 | ec2client.delete_volume(VolumeId=self.volume['VolumeId']) 78 | print(f"Deleted volume:\n{self.volume['VolumeId']}\n") 79 | 80 | # snapshot deletion surrounded with try/except in order 81 | # for cases when shelvery cleans / does not clean up behind itself 82 | for snapid in self.created_snapshots: 83 | print(f"Deleting snapshot {snapid}") 84 | try: 85 | ec2client.delete_snapshot(SnapshotId=snapid) 86 | except Exception as e: 87 | print(f"Failed to delete {snapid}:{str(e)}") 88 | 89 | for region in self.regional_snapshots: 90 | ec2regional = AwsHelper.boto3_client('ec2', region_name=region) 91 | for snapid in self.regional_snapshots[region]: 92 | try: 93 | ec2regional.delete_snapshot(SnapshotId=snapid) 94 | except Exception as e: 95 | print(f"Failed to delete {snapid}:{str(e)}") 96 | 97 | @pytest.mark.source 98 | def test_NameTransformed(self): 99 | ebs_backups_engine = ShelveryEBSBackup() 100 | try: 101 | backups = ebs_backups_engine.create_backups() 102 | except Exception as e: 103 | print(e) 104 | print(f"Failed with {e}") 105 | traceback.print_exc(file=sys.stdout) 106 | raise e 107 | ec2client = AwsHelper.boto3_client('ec2') 108 | 109 | valid = False 110 | # validate there is 111 | for backup in backups: 112 | if backup.entity_id == self.volume['VolumeId']: 113 | snapshot_id = backup.backup_id 114 | self.created_snapshots.append(snapshot_id) 115 | 116 | # wait for snapshot to become available 117 | snapshots = ec2client.describe_snapshots(SnapshotIds=[snapshot_id])['Snapshots'] 118 | self.assertTrue(len(snapshots) == 1) 119 | self.assertTrue(snapshots[0]['VolumeId'] == self.volume['VolumeId']) 120 | d_tags = dict(map(lambda x: (x['Key'], x['Value']), snapshots[0]['Tags'])) 121 | 122 | self.assertTrue(d_tags['Name'].startswith(NAME_TRANSFORMED)) 123 | print(f"required: {backup.date_created.strftime(BackupResource.TIMESTAMP_FORMAT)}-{backup.retention_type}") 124 | print(f"actual: {d_tags['Name']}") 125 | self.assertTrue(d_tags['Name'].endswith(f"{backup.date_created.strftime(BackupResource.TIMESTAMP_FORMAT)}-{backup.retention_type}")) 126 | 127 | valid = True 128 | 129 | self.assertTrue(valid) 130 | 131 | 132 | if __name__ == '__main__': 133 | unittest.main() 134 | -------------------------------------------------------------------------------- /template.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Transform: AWS::Serverless-2016-10-31 3 | Description: Shelvery 4 | Parameters: 5 | CreateBucketsSchedule: 6 | Type: String 7 | Default: "0 0 ? * * *" 8 | CreateBackupSchedule: 9 | Type: String 10 | Default: "0 1 ? * * *" 11 | CleanBackupsSchedule: 12 | Type: String 13 | Default: "0 2 ? * * *" 14 | PullSharedBackupsSchedule: 15 | Type: String 16 | Default: "0 2 ? * * *" 17 | S3BackupSchedule: 18 | Type: String 19 | Default: "0 1 ? * * *" 20 | ShelveryKeepDailyBackups: 21 | Type: String 22 | Default: '14' 23 | ShelveryKeepWeeklyBackups: 24 | Type: String 25 | Default: '8' 26 | ShelveryKeepMonthlyBackups: 27 | Type: String 28 | Default: '12' 29 | ShelveryKeepYearlyBackups: 30 | Type: String 31 | Default: '10' 32 | ShelverySourceAwsAccountIds: 33 | Type: String 34 | Default: '' 35 | ShelveryShareAwsAccountIds: 36 | Type: String 37 | Default: '' 38 | ShelveryRdsBackupMode: 39 | Type: String 40 | Default: 'RDS_COPY_AUTOMATED_SNAPSHOT' 41 | AllowedValues: 42 | - RDS_COPY_AUTOMATED_SNAPSHOT 43 | - RDS_CREATE_SNAPSHOT 44 | ShelveryDocDbBackupMode: 45 | Type: String 46 | Default: 'DOCDB_COPY_AUTOMATED_SNAPSHOT' 47 | AllowedValues: 48 | - DOCDB_COPY_AUTOMATED_SNAPSHOT 49 | - DOCDB_CREATE_SNAPSHOT 50 | ShelveryRedShiftBackupMode: 51 | Type: String 52 | Default: 'REDSHIFT_COPY_AUTOMATED_SNAPSHOT' 53 | AllowedValues: 54 | - REDSHIFT_COPY_AUTOMATED_SNAPSHOT 55 | - REDSHIFT_CREATE_SNAPSHOT 56 | ShelveryCustomRetentionTypes: 57 | Type: String 58 | Default: 'quarterHourly:86400,halfHourly:86400,hourly:86400' 59 | ShelveryEncryptCopy: 60 | Description: During the pull backup task, encrypt all unencrypted snapshots with kms 61 | Type: String 62 | AllowedValues: 63 | - true 64 | - false 65 | Default: false 66 | ShelveryCopyKmsKeyId: 67 | Description: Duing the pull backup task, Kms key id to encrypt snapshots with 68 | Type: String 69 | Default: '' 70 | ShelveryReencryptKmsKeyId: 71 | Description: During the share task, Kms key id to encrypt snapshots with 72 | Type: String 73 | Default: '' 74 | 75 | Resources: 76 | 77 | SnsTopic: 78 | Type: AWS::SNS::Topic 79 | Properties: 80 | TopicName: Shelvery-Notifications 81 | 82 | SnsTopicErrors: 83 | Type: AWS::SNS::Topic 84 | Properties: 85 | TopicName: Shelvery-Errors 86 | 87 | ShelveryWaitQueue: 88 | Type: AWS::SQS::Queue 89 | Properties: 90 | QueueName: Shelvery-Wait-Queue 91 | MessageRetentionPeriod: 3600 92 | VisibilityTimeout: 900 # must match function timeout 93 | RedrivePolicy: 94 | deadLetterTargetArn: !GetAtt ShelveryWaitDLQ.Arn 95 | maxReceiveCount: 10 96 | Tags: 97 | - Key: Name 98 | Value: Shelvery-Wait 99 | - Key: CreatedBy 100 | Value: Shelvery 101 | 102 | ShelveryWaitDLQ: 103 | Type: AWS::SQS::Queue 104 | Properties: 105 | QueueName: Shelvery-Wait-dlq 106 | MessageRetentionPeriod: 1209600 107 | Tags: 108 | - Key: Name 109 | Value: Shelvery-Wait-dlq 110 | - Key: CreatedBy 111 | Value: Shelvery 112 | 113 | Shelvery: 114 | Type: AWS::Serverless::Function 115 | Properties: 116 | FunctionName: ShelveryBackups 117 | Handler: shelvery_lambda/lambda_handler.lambda_handler 118 | Runtime: python3.11 119 | CodeUri: ./lib 120 | Timeout: 900 121 | 122 | Tags: 123 | Name: Shelvery 124 | CreatedBy: Shelvery 125 | ShelveryVersion: 0.9.13 126 | 127 | Environment: 128 | Variables: 129 | shelvery_keep_daily_backups: !Ref ShelveryKeepDailyBackups 130 | shelvery_keep_weekly_backups: !Ref ShelveryKeepWeeklyBackups 131 | shelvery_keep_monthly_backups: !Ref ShelveryKeepMonthlyBackups 132 | shelvery_keep_yearly_backups: !Ref ShelveryKeepYearlyBackups 133 | shelvery_source_aws_account_ids: !Ref ShelverySourceAwsAccountIds 134 | shelvery_share_aws_account_ids: !Ref ShelveryShareAwsAccountIds 135 | shelvery_rds_backup_mode: !Ref ShelveryRdsBackupMode 136 | shelvery_docdb_backup_mode: !Ref ShelveryDocDbBackupMode 137 | shelvery_redshift_backup_mode: !Ref ShelveryRedShiftBackupMode 138 | shelvery_sns_topic: !Ref SnsTopic 139 | shelvery_error_sns_topic: !Ref SnsTopicErrors 140 | shelvery_custom_retention_types: !Ref ShelveryCustomRetentionTypes 141 | shelvery_sqs_queue_url: !Ref ShelveryWaitQueue 142 | shelvery_sqs_queue_wait_period: 300 143 | shelvery_encrypt_copy: !Ref ShelveryEncryptCopy 144 | shelvery_copy_kms_key_id: !Ref ShelveryCopyKmsKeyId 145 | shelvery_reencrypt_kms_key_id: !Ref ShelveryReencryptKmsKeyId 146 | 147 | Events: 148 | 149 | WaitTrigger: 150 | Type: SQS 151 | Properties: 152 | Queue: !GetAtt ShelveryWaitQueue.Arn 153 | BatchSize: 1 154 | 155 | CreateDataBucket: 156 | Type: Schedule 157 | Properties: 158 | Schedule: !Sub cron(${CreateBucketsSchedule}) 159 | Input: '{"backup_type":"ebs","action":"create_data_buckets"}' 160 | 161 | CreateEbs: 162 | Type: Schedule 163 | Properties: 164 | Schedule: !Sub cron(${CreateBackupSchedule}) 165 | Input: '{"backup_type":"ebs","action":"create_backups"}' 166 | CreateRds: 167 | Type: Schedule 168 | Properties: 169 | Schedule: !Sub cron(${CreateBackupSchedule}) 170 | Input: '{"backup_type":"rds","action":"create_backups"}' 171 | CreateRdsCluster: 172 | Type: Schedule 173 | Properties: 174 | Schedule: !Sub cron(${CreateBackupSchedule}) 175 | Input: '{"backup_type":"rds_cluster","action":"create_backups"}' 176 | CreateAmi: 177 | Type: Schedule 178 | Properties: 179 | Schedule: !Sub cron(${CreateBackupSchedule}) 180 | Input: '{"backup_type":"ec2ami","action":"create_backups"}' 181 | CreateDocDb: 182 | Type: Schedule 183 | Properties: 184 | Schedule: !Sub cron(${CreateBackupSchedule}) 185 | Input: '{"backup_type":"docdb","action":"create_backups"}' 186 | 187 | CleanEbs: 188 | Type: Schedule 189 | Properties: 190 | Schedule: !Sub cron(${CleanBackupsSchedule}) 191 | Input: '{"backup_type":"ebs","action":"clean_backups"}' 192 | CleanRds: 193 | Type: Schedule 194 | Properties: 195 | Schedule: !Sub cron(${CleanBackupsSchedule}) 196 | Input: '{"backup_type":"rds","action":"clean_backups"}' 197 | CleanRdsCluster: 198 | Type: Schedule 199 | Properties: 200 | Schedule: !Sub cron(${CleanBackupsSchedule}) 201 | Input: '{"backup_type":"rds_cluster","action":"clean_backups"}' 202 | CleanEC2: 203 | Type: Schedule 204 | Properties: 205 | Schedule: !Sub cron(${CleanBackupsSchedule}) 206 | Input: '{"backup_type":"ec2ami","action":"clean_backups"}' 207 | CleanDocDb: 208 | Type: Schedule 209 | Properties: 210 | Schedule: !Sub cron(${CleanBackupsSchedule}) 211 | Input: '{"backup_type":"docdb","action":"clean_backups"}' 212 | 213 | PullEbs: 214 | Type: Schedule 215 | Properties: 216 | Schedule: !Sub cron(${PullSharedBackupsSchedule}) 217 | Input: '{"backup_type":"ebs","action":"pull_shared_backups"}' 218 | PullRds: 219 | Type: Schedule 220 | Properties: 221 | Schedule: !Sub cron(${PullSharedBackupsSchedule}) 222 | Input: '{"backup_type":"rds","action":"pull_shared_backups"}' 223 | PullRdsCluster: 224 | Type: Schedule 225 | Properties: 226 | Schedule: !Sub cron(${PullSharedBackupsSchedule}) 227 | Input: '{"backup_type":"rds_cluster","action":"pull_shared_backups"}' 228 | PullEC2: 229 | Type: Schedule 230 | Properties: 231 | Schedule: !Sub cron(${PullSharedBackupsSchedule}) 232 | Input: '{"backup_type":"ec2ami","action":"pull_shared_backups"}' 233 | PullDocdb: 234 | Type: Schedule 235 | Properties: 236 | Schedule: !Sub cron(${PullSharedBackupsSchedule}) 237 | Input: '{"backup_type":"docdb","action":"pull_shared_backups"}' 238 | 239 | Policies: 240 | - Version: "2012-10-17" 241 | Statement: 242 | # kms encryption 243 | - Effect: Allow 244 | Action: 245 | - kms:DescribeKey 246 | - kms:Decrypt 247 | - kms:Encrypt 248 | # add your kms keys here if you want to be more restrictive 249 | Resource: '*' 250 | # kms permissions for cross account copying of snapshots 251 | - Effect: Allow 252 | Action: 253 | - kms:RevokeGrant 254 | - kms:CreateGrant 255 | - kms:ListGrants 256 | # add your kms keys here if you want to be more restrictive 257 | Resource: '*' 258 | Condition: 259 | Bool: 260 | kms:GrantIsForAWSResource: 'true' 261 | - Effect: Allow 262 | Action: 263 | - lambda:Invoke* 264 | Resource: '*' 265 | # read only ec2, rds 266 | - Effect: Allow 267 | Action: 268 | - 'ec2:Describe*' 269 | - 'rds:Describe*' 270 | - 'rds:ListTagsForResource' 271 | Resource: '*' 272 | # manage ebs snapshots and tags 273 | - Effect: Allow 274 | Action: 275 | - ec2:CreateSnapshot 276 | - ec2:ModifySnapshotAttribute 277 | - ec2:ResetSnapshotAttribute 278 | - ec2:DeleteSnapshot 279 | - ec2:DescribeTags 280 | - ec2:CreateTags 281 | - ec2:DeleteTags 282 | - ec2:CopySnapshot 283 | Resource: '*' 284 | # manage rds snaphosts and tags 285 | - Effect: Allow 286 | Action: 287 | - rds:ModifyDBSnapshotAttribute 288 | - rds:ModifyDBClusterSnapshotAttribute 289 | - rds:CopyDBSnapshot 290 | - rds:CopyDBClusterSnapshot 291 | - rds:DeleteDBSnapshot 292 | - rds:DeleteDBClusterSnapshot 293 | - rds:CreateDBSnapshot 294 | - rds:CreateDBClusterSnapshot 295 | - rds:AddTagsToResource 296 | - rds:RemoveTagsFromResource 297 | Resource: '*' 298 | - Effect: Allow 299 | Action: 300 | - 'lambda:Invoke*' 301 | Resource: 302 | - !Sub "arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:shelvery" 303 | # manage AMIs (limited) 304 | - Effect: Allow 305 | Action: 306 | - ec2:CopyImage 307 | - ec2:CreateImage 308 | - ec2:DeregisterImage 309 | - ec2:DescribeImageAttribute 310 | - ec2:DescribeImages 311 | - ec2:DescribeInstances 312 | - ec2:ModifyImageAttribute 313 | - ec2:DeleteSnapshot 314 | Resource: '*' 315 | - Effect: Allow 316 | Action: 317 | - s3:Get* 318 | - s3:List* 319 | - s3:CreateBucket 320 | - s3:PutBucketPolicy 321 | - s3:PutBucketAcl 322 | - s3:PutObject 323 | - s3:PutObjectAcl 324 | - s3:DeleteObject 325 | - s3:PutPublicAccessBlock 326 | - s3:GetBucketPolicyStatus 327 | - s3:GetBucketPublicAccessBlock 328 | - s3:PutBucketPublicAccessBlock 329 | - s3:GetAccountPublicAccessBlock 330 | - s3:PutAccountPublicAccessBlock 331 | Resource: '*' 332 | - Effect: Allow 333 | Action: 334 | - sns:Publish* 335 | Resource: 336 | - !Ref SnsTopic 337 | - Effect: Allow 338 | Action: 339 | - sqs:ChangeMessageVisibility 340 | - sqs:DeleteMessage 341 | - sqs:GetQueueAttributes 342 | - sqs:ReceiveMessage 343 | - sqs:SendMessage 344 | Resource: 345 | - !GetAtt ShelveryWaitQueue.Arn 346 | 347 | ShelveryErrors: 348 | Type: AWS::CloudWatch::Alarm 349 | Properties: 350 | AlarmDescription: If Shelvery errors is equal to 1 for 5 minutes 351 | Namespace: AWS/Lambda 352 | MetricName: Errors 353 | AlarmActions: 354 | - !Ref SnsTopicErrors 355 | OKActions: 356 | - !Ref SnsTopicErrors 357 | ComparisonOperator: GreaterThanThreshold 358 | EvaluationPeriods: 1 359 | Period: 300 360 | Statistic: Maximum 361 | Threshold: 0 362 | TreatMissingData: ignore 363 | Dimensions: 364 | - Name: FunctionName 365 | Value: ShelveryBackups 366 | --------------------------------------------------------------------------------