├── .autoenv ├── .autoenv.leave ├── .bumpversion.cfg ├── .gitignore ├── .readthedocs.yml ├── AUTHORS.txt ├── CHANGES.md ├── LICENSE.txt ├── MANIFEST.in ├── Makefile ├── README.md ├── deploy-complete.bash ├── deployfish ├── __init__.py ├── config │ ├── __init__.py │ ├── config.py │ ├── processors │ │ ├── __init__.py │ │ ├── abstract.py │ │ ├── environment.py │ │ └── terraform.py │ └── test │ │ ├── __init__.py │ │ ├── env_file.env │ │ ├── interpolate.yml │ │ ├── terraform.tfstate │ │ ├── terraform.tfstate.0.12 │ │ ├── terraform.tfstate.prod │ │ ├── terraform.tfstate.qa │ │ ├── terraform_interpolate.yml │ │ ├── test_Config.py │ │ └── test_Terraform.py ├── controllers │ ├── __init__.py │ ├── base.py │ ├── cluster.py │ ├── commands.py │ ├── crud.py │ ├── elb.py │ ├── elbv2.py │ ├── invoked_task.py │ ├── logs.py │ ├── network.py │ ├── rds.py │ ├── secrets.py │ ├── service.py │ ├── task.py │ ├── tunnel.py │ └── utils.py ├── core │ ├── __init__.py │ ├── adapters │ │ ├── __init__.py │ │ ├── abstract.py │ │ └── deployfish │ │ │ ├── __init__.py │ │ │ ├── appscaling.py │ │ │ ├── cloudwatch.py │ │ │ ├── ecs.py │ │ │ ├── events.py │ │ │ ├── mixins.py │ │ │ ├── secrets.py │ │ │ ├── service_discovery.py │ │ │ ├── ssh.py │ │ │ └── test │ │ │ ├── __init__.py │ │ │ ├── test_ServiceHelperTaskAdapter.py │ │ │ ├── test_ServiceHelperTask_new.py │ │ │ ├── test_Service_new.py │ │ │ └── test_StandaloneTaskAdapter.py │ ├── aws.py │ ├── loaders.py │ ├── models │ │ ├── __init__.py │ │ ├── abstract.py │ │ ├── appscaling.py │ │ ├── cloudwatch.py │ │ ├── cloudwatchlogs.py │ │ ├── ec2.py │ │ ├── ecs.py │ │ ├── efs.py │ │ ├── elb.py │ │ ├── elbv2.py │ │ ├── events.py │ │ ├── mixins.py │ │ ├── rds.py │ │ ├── secrets.py │ │ ├── secrets_manager.py │ │ ├── service_discovery.py │ │ ├── ssh.py │ │ └── test │ │ │ ├── __init__.py │ │ │ └── test_TaskDefinitionFARGATEMixin.py │ ├── ssh.py │ ├── utils │ │ ├── __init__.py │ │ ├── mixins.py │ │ └── utils.py │ └── waiters │ │ ├── __init__.py │ │ └── hooks │ │ ├── __init__.py │ │ ├── abstract.py │ │ └── ecs.py ├── exceptions.py ├── ext │ ├── __init__.py │ ├── ext_df_argparse.py │ ├── ext_df_jinja2.py │ └── ext_df_plugin.py ├── main.py ├── plugins │ ├── mysql │ │ ├── __init__.py │ │ ├── adapters │ │ │ ├── __init__.py │ │ │ └── deployfish │ │ │ │ ├── __init__.py │ │ │ │ └── mysql.py │ │ ├── controllers │ │ │ ├── __init__.py │ │ │ └── mysql.py │ │ ├── hooks.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ └── mysql.py │ │ └── templates │ │ │ ├── __init__.py │ │ │ └── detail--mysqldatabase.jinja2 │ ├── slack │ │ ├── __init__.py │ │ └── hooks.py │ └── sqs │ │ ├── __init__.py │ │ └── hooks.py ├── py.typed ├── registry.py ├── renderers │ ├── __init__.py │ ├── abstract.py │ ├── misc.py │ └── table.py ├── templates │ ├── __init__.py │ ├── detail--classicloadbalancer.jinja2 │ ├── detail--cloudwatchloggroup.jinja2 │ ├── detail--cloudwatchlogstream.jinja2 │ ├── detail--cluster.jinja2 │ ├── detail--invokedtask.jinja2 │ ├── detail--loadbalancer.jinja2 │ ├── detail--loadbalancerlistener.jinja2 │ ├── detail--rdsinstance.jinja2 │ ├── detail--secrets--diff.jinja2 │ ├── detail--secrets.jinja2 │ ├── detail--service--short.jinja2 │ ├── detail--service.jinja2 │ ├── detail--servicehelpertask.jinja2 │ ├── detail--sshtunnel.jinja2 │ ├── detail--standalonetask--short.jinja2 │ ├── detail--standalonetask.jinja2 │ ├── detail--targetgroup.jinja2 │ ├── detail.jinja2 │ ├── macros │ │ ├── awslogs.jinja2 │ │ ├── classicloadbalancer.jinja2 │ │ ├── invoked-task.jinja2 │ │ ├── secrets.jinja2 │ │ ├── service.jinja2 │ │ ├── target-group.jinja2 │ │ ├── task-definition.jinja2 │ │ ├── task.jinja2 │ │ ├── utils-plan.jinja2 │ │ └── utils.jinja2 │ ├── plan--service.jinja2 │ └── plan--standalonetask.jinja2 └── types.py ├── docs ├── Makefile ├── requirements.txt └── source │ ├── advanced.rst │ ├── api │ ├── adapters │ │ ├── abstract.rst │ │ ├── appscaling.rst │ │ ├── cloudwatch.rst │ │ ├── ecs.rst │ │ ├── events.rst │ │ ├── index.rst │ │ ├── mixins.rst │ │ ├── secrets.rst │ │ ├── service_discovery.rst │ │ └── ssh.rst │ ├── config │ │ ├── config.rst │ │ ├── config_processors.rst │ │ └── index.rst │ ├── controllers │ │ ├── base.rst │ │ ├── cluster.rst │ │ ├── commands.rst │ │ ├── crud.rst │ │ ├── elb.rst │ │ ├── extension.rst │ │ ├── index.rst │ │ ├── invoked_task.rst │ │ ├── logs.rst │ │ ├── network.rst │ │ ├── rds.rst │ │ ├── secrets.rst │ │ ├── service.rst │ │ ├── task.rst │ │ └── tunnel.rst │ ├── loaders.rst │ ├── main.rst │ ├── models │ │ ├── abstract.rst │ │ ├── appscaling.rst │ │ ├── cloudwatch.rst │ │ ├── cloudwatchlogs.rst │ │ ├── ec2.rst │ │ ├── ecs.rst │ │ ├── efs.rst │ │ ├── elb.rst │ │ ├── elbv2.rst │ │ ├── events.rst │ │ ├── index.rst │ │ ├── mixins.rst │ │ ├── rds.rst │ │ ├── secrets.rst │ │ ├── secrets_manager.rst │ │ ├── service_discovery.rst │ │ └── ssh.rst │ └── renderers.rst │ ├── conf.py │ ├── index.rst │ ├── install.rst │ ├── intro.rst │ ├── plugins │ ├── mysql.rst │ ├── plugins.rst │ ├── slack.rst │ └── sqs.rst │ ├── quickintro.rst │ ├── runbook │ ├── adapters.rst │ ├── architecture.rst │ ├── contributing.rst │ ├── extending.rst │ └── testing.rst │ ├── tutorial1.rst │ ├── tutorial2.rst │ ├── tutorial3.rst │ ├── tutorial4.rst │ ├── tutorial5.rst │ ├── tutorial6.rst │ ├── tutorials.rst │ └── yaml.rst ├── examples ├── appscaling.yml ├── asg.yml ├── basic.yml ├── different-aws-profile.yml ├── fargate.yml ├── mulitple-containers.yml ├── no-elb.yml ├── parameter-store.yml ├── run_task.yml ├── terraform-basic.yml ├── tutorial_1.yml ├── tutorial_2.yml └── volumes.yml ├── pyproject.toml ├── requirements.txt ├── setup.cfg ├── setup.py └── tox.ini /.autoenv: -------------------------------------------------------------------------------- 1 | if [ -f .venv/bin/activate ]; then 2 | echo "Activating virtual environment" 3 | source .venv/bin/activate 4 | fi 5 | -------------------------------------------------------------------------------- /.autoenv.leave: -------------------------------------------------------------------------------- 1 | CWD=$(pwd) 2 | if [[ ! "$CWD" == *"$VIRTUAL_ENV_PROMPT"* ]]; then 3 | echo "Deactivating virtual environment" 4 | deactivate 5 | fi 6 | 7 | -------------------------------------------------------------------------------- /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 1.15.1 3 | commit = True 4 | tag = True 5 | tag_name = {new_version} 6 | 7 | [bumpversion:file:deployfish/__init__.py] 8 | parse = (?P\d+), (?P\d+), (?P\d+) 9 | serialize = {major}, {minor}, {patch} 10 | 11 | [bumpversion:file:docs/source/conf.py] 12 | 13 | [bumpversion:file:setup.cfg] 14 | 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.sw* 3 | .DS_Store 4 | dist 5 | *.egg-info 6 | .ropeproject 7 | .terraform 8 | .idea 9 | docs/build 10 | #terraform.tfstate 11 | tags 12 | build 13 | .vscode 14 | .venv 15 | /deployfish.yml 16 | real_tests 17 | .python-version 18 | 19 | .mypy_cache 20 | *.code-workspace 21 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | version: 2 4 | 5 | sphinx: 6 | configuration: docs/source/conf.py 7 | 8 | formats: all 9 | 10 | build: 11 | os: ubuntu-22.04 12 | tools: 13 | python: "3.11" 14 | 15 | python: 16 | install: 17 | - requirements: docs/requirements.txt 18 | -------------------------------------------------------------------------------- /AUTHORS.txt: -------------------------------------------------------------------------------- 1 | Chris Malek 2 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | ## 1.0.0 (2021-05-17) 2 | 3 | ENHANCEMENTS: 4 | 5 | * Complete refactoring of the deployfish codebase 6 | * Django like models and managers for all AWS resources 7 | * Jinja2 and python-tabulate for rich output 8 | * Class based views for click commands 9 | * Many other changes 10 | * FEATURE: You can now work with all Services and Tasks in your AWS account, even if they're not listed in your deployfish.yml file 11 | * FEATURE: Service helper tasks can now be scheduled 12 | * FEATURE: Service helper tasks logging now defaults to awslogs when configured for FARGATE 13 | * FEATURE: All standalone tasks (in the top level tasks: section) for a service can now be updated with a single command 14 | * FEATURE: You can now look at task logs in CloudWatch Logs with deployfish 15 | * FEATURE: Describe ALBs, Target Groups 16 | * FEATURE: Describe ELBs 17 | * UPDATE: Service info output now includes details about load balancing setup 18 | 19 | ## 0.30.1 (2019-12-17) 20 | 21 | BUG FIXES: 22 | 23 | * cli: properly handle multiple target groups when describing a service 24 | 25 | ## 0.30.0 (2019-12-17) 26 | 27 | ENHANCEMENTS: 28 | 29 | * service: you can now specify multiple target groups for a service 30 | * service: you can now specify Capacity Provider Strategies for a service 31 | * cli: now helpfully show all services and environments when you give a service/environment name that doesn't exist. 32 | 33 | ## 0.29.9 (2019-10-14) 34 | 35 | BUG FIXES: 36 | 37 | * task: now actually setting security groups properly when using task scheduling 38 | 39 | ## 0.29.8 (2019-10-14) 40 | 41 | ENHANCEMENTS: 42 | 43 | * service/***task-definition: now adding entries from the `config:` section of the task definition to 44 | container secrets. Thus you no longer need to set `deploy entrypoint` as your container's `ENTRYPOINT` 45 | in order to get your `config:` entries out of SSM Parameter Store. 46 | 47 | ## 0.29.7 (2019-04-18) 48 | 49 | BUG FIXES: 50 | 51 | * task/task-definition: now actually setting security groups properly for the task 52 | 53 | ## 0.29.5 (2019-10-07) 54 | 55 | ENHANCEMENTS: 56 | 57 | * config/terraform: you can now use `{environment}`, `{service-name}` and `{cluster-name}` keyword replacements in the 58 | terraform statefile url 59 | * global: now requiring PyYAML >= 5 60 | 61 | ## 0.29.1 (2019-07-14) 62 | 63 | ENHANCEMENTS: 64 | 65 | * config/terraform: now correctly parsing terraform-0.12.x format state files 66 | 67 | ## 0.29.0 (2019-05-30) 68 | 69 | ENHANCEMENTS: 70 | 71 | * cli: Added the `deploy parameters` subcommand, which allows you to manage `:external:` type parameters in AWS SSM 72 | Parameter Store. 73 | 74 | ## 0.28.1 (2019-04-18) 75 | 76 | BUG FIXES: 77 | 78 | * We should no longer be creating invalid `cpu` Cloudwatch Alarms when Application Autoscaling is defined for the 79 | service [] 80 | 81 | ## 0.28.0 (2019-04-16) 82 | 83 | FEATURES: 84 | 85 | * **New resource**: `tasks:`, Standalone task support, outside of an ECS service 86 | 87 | ## 0.27.0 (2019-01-04) 88 | 89 | ENHANCEMENTS: 90 | 91 | * service/task-definition: Added tmpfs support for ECS container definitions (ChrisLeeTW) 92 | * serivce: Added support for target tracking to our Application Autoscaling implementation (rv-vmalhotra) 93 | 94 | ## 0.26.0 (2018-11-30) 95 | 96 | ENHANCEMENTS: 97 | 98 | * service, service/task-definition: Added full docker volumes support 99 | * service/task-definition: Added `cap_add` and `cap_drop` to our ECS container definitions 100 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-17 California Institute of Technology. Questions or comments 2 | may be directed to the author, the Academic Development Services group of 3 | Caltech's Information Management Systems and Services department, at 4 | imss-ads-staff@caltech.edu. 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated 7 | documentation files (the "Software"), to deal in the Software without restriction, including without limitation the 8 | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit 9 | persons to whom the Software is furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the 12 | Software. 13 | 14 | Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products 15 | derived from this software without specific prior written permission. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 18 | WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 19 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 | OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include deployfish * 2 | global-exclude *.py[cod] __pycache__ *.so *.sw* 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | RAWVERSION = $(filter-out __version__ = , $(shell grep __version__ deployfish/__init__.py)) 2 | VERSION = $(strip $(shell echo $(RAWVERSION))) 3 | 4 | PACKAGE = deployfish 5 | 6 | clean: 7 | rm -rf *.tar.gz dist build *.egg-info *.rpm 8 | find . -name "*.pyc" | xargs rm -f 9 | find . -name "__pycache__" | xargs rm -rf 10 | 11 | version: 12 | @echo $(VERSION) 13 | 14 | dist: clean 15 | @python setup.py sdist 16 | @python setup.py bdist_wheel --universal 17 | 18 | pypi: dist 19 | @twine upload dist/* 20 | 21 | tox: 22 | # create a tox pyenv virtualenv based on 2.7.x 23 | # install tox and tox-pyenv in that ve 24 | # activate that ve before running this 25 | @tox 26 | -------------------------------------------------------------------------------- /deploy-complete.bash: -------------------------------------------------------------------------------- 1 | _deploy_completion() { 2 | COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ 3 | COMP_CWORD=$COMP_CWORD \ 4 | _DEPLOY_COMPLETE=complete $1 ) ) 5 | return 0 6 | } 7 | 8 | complete -F _deploy_completion -o default deploy; 9 | -------------------------------------------------------------------------------- /deployfish/__init__.py: -------------------------------------------------------------------------------- 1 | from cement.utils.version import get_version as cement_get_version 2 | 3 | VERSION = (1, 15, 1, "final", 0) 4 | 5 | 6 | def get_version(version=VERSION): 7 | return cement_get_version(version) 8 | -------------------------------------------------------------------------------- /deployfish/config/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from cement import App 4 | 5 | from .config import Config 6 | 7 | MAIN_APP: App | None = None 8 | 9 | 10 | def set_app(app: App) -> None: 11 | global MAIN_APP # pylint:disable=global-statement 12 | MAIN_APP = app 13 | 14 | 15 | def get_config() -> Config: 16 | assert MAIN_APP is not None, "get_config() called before set_app()" 17 | return MAIN_APP.deployfish_config 18 | -------------------------------------------------------------------------------- /deployfish/config/processors/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, Any, Dict, List, Type 2 | 3 | from deployfish.exceptions import ConfigProcessingFailed, SkipConfigProcessing 4 | 5 | from .abstract import AbstractConfigProcessor 6 | from .environment import EnvironmentConfigProcessor 7 | from .terraform import TerraformStateConfigProcessor 8 | 9 | if TYPE_CHECKING: 10 | from deployfish.config import Config 11 | 12 | 13 | class ConfigProcessor: 14 | 15 | class ProcessingFailed(ConfigProcessingFailed): 16 | pass 17 | 18 | processor_classes: list[type[AbstractConfigProcessor]] = [] 19 | 20 | @classmethod 21 | def register(cls, processor_class: type[AbstractConfigProcessor]) -> None: 22 | cls.processor_classes.append(processor_class) 23 | 24 | def __init__(self, config: "Config", context: dict[str, Any]): 25 | self.config = config 26 | self.context = context 27 | 28 | def process(self) -> None: 29 | for processor_class in self.processor_classes: 30 | try: 31 | current_processor = processor_class(self.config, self.context) 32 | except SkipConfigProcessing: 33 | continue 34 | try: 35 | current_processor.process() 36 | except ConfigProcessingFailed as e: 37 | raise self.ProcessingFailed(str(e)) 38 | 39 | 40 | ConfigProcessor.register(TerraformStateConfigProcessor) 41 | ConfigProcessor.register(EnvironmentConfigProcessor) 42 | -------------------------------------------------------------------------------- /deployfish/config/processors/environment.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import os 3 | import os.path 4 | import re 5 | from typing import TYPE_CHECKING, Any 6 | 7 | from .abstract import AbstractConfigProcessor 8 | 9 | if TYPE_CHECKING: 10 | from deployfish.config import Config 11 | 12 | 13 | class EnvironmentConfigProcessor(AbstractConfigProcessor): 14 | 15 | ENVIRONMENT_RE = re.compile(r"\$\{env.(?P[A-Za-z0-9-_]+)\}") 16 | 17 | def __init__(self, config: "Config", context: dict[str, Any]): 18 | super().__init__(config, context) 19 | self.environ: dict[str, str] = {} 20 | self.per_item_environ: dict[str, Any] = {} 21 | if "env_file" in self.context: 22 | self.environ.update(self._load_env_file(self.context["env_file"])) 23 | if self.context.get("import_env"): 24 | self.environ.update(os.environ) 25 | 26 | def _load_env_file(self, filename: str) -> dict[str, str]: 27 | if not filename: 28 | return {} 29 | if not os.path.exists(filename): 30 | if not self.context.get("ignore_missing_environment", False): 31 | raise self.ProcessingFailed(f'Environment file "{filename}" does not exist') 32 | return {} 33 | if not os.path.isfile(filename): 34 | if not self.context.get("ignore_missing_environment", False): 35 | raise self.ProcessingFailed(f'Environment file "{filename}" is not a regular file') 36 | return {} 37 | try: 38 | with open(filename, encoding="utf-8") as f: 39 | raw_lines = f.readlines() 40 | except OSError as e: 41 | if e.errno == errno.EACCES: 42 | if not self.context.get("ignore_missing_environment", False): 43 | raise self.ProcessingFailed(f'Environment file "{filename}" is not readable') 44 | return {} 45 | # Strip the comments and empty lines 46 | lines = [x.strip() for x in raw_lines if x.strip() and not x.strip().startswith("#")] 47 | environment = {} 48 | for line in lines: 49 | # split on the first "=" 50 | parts = str.split(line, "=", 1) 51 | if len(parts) == 2: 52 | key = parts[0] 53 | value = parts[1] 54 | environment[key] = value 55 | return environment 56 | 57 | def load_per_item_environment(self, section_name: str, item_name: str) -> None: 58 | if section_name not in self.per_item_environ or item_name not in self.per_item_environ[section_name]: 59 | filename = self.config.get_section_item(section_name, item_name).get("env_file", None) 60 | if section_name not in self.per_item_environ: 61 | self.per_item_environ[section_name] = {} 62 | if item_name not in self.per_item_environ[section_name]: 63 | self.per_item_environ[section_name][item_name] = {} 64 | self.per_item_environ[section_name][item_name] = self._load_env_file(filename) 65 | 66 | def replace(self, obj: Any, key: str | int, value: Any, section_name: str, item_name: str) -> None: 67 | self.load_per_item_environment(section_name, item_name) 68 | replacers = self.get_deployfish_replacements(section_name, item_name) 69 | # FIXME: need to deal with multiple matches in the same line 70 | m = self.ENVIRONMENT_RE.search(value) 71 | if m: 72 | envkey = m.group("key") 73 | for replace_str, replace_value in list(replacers.items()): 74 | envkey = envkey.replace(replace_str, replace_value) 75 | envkey = envkey.upper().replace("-", "_") 76 | try: 77 | env_value = self.per_item_environ[section_name][item_name][envkey] 78 | except KeyError: 79 | try: 80 | env_value = self.environ[envkey] 81 | except KeyError: 82 | if not self.context.get("ignore_missing_environment", False): 83 | raise self.ProcessingFailed( 84 | f'Config["{section_name}"]["{item_name}"]: Could not find value for ${{env.{envkey}}}' 85 | ) 86 | env_value = "NOT-IN-ENVIRONMENT" 87 | obj[key] = value.replace(m.group(0), env_value) 88 | -------------------------------------------------------------------------------- /deployfish/config/test/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.1.0" 2 | -------------------------------------------------------------------------------- /deployfish/config/test/env_file.env: -------------------------------------------------------------------------------- 1 | FOOBAR_ENV=hi_mom 2 | FOO_BAR_PREFIX_ENV=oh_no 3 | FOO_BAR_SECRET_ENV=)(#jlk329!!3$3093%%.__) 4 | -------------------------------------------------------------------------------- /deployfish/config/test/interpolate.yml: -------------------------------------------------------------------------------- 1 | terraform: 2 | statefile: s3://asldfjksldfkjsldk 3 | lookups: 4 | cluster_name: '{environment}-cluster-name' 5 | autoscalinggroup_name: '{environment}-autoscalinggroup-name' 6 | elb_id: '{environment}-elb-id' 7 | secrets_bucket_name: 's3-config-store-bucket' 8 | iam_task_role: 'iam-role-{environment}-task' 9 | security_group_list: 'security-group-list' 10 | vpc_configuration: 'vpc-configuration' 11 | rds_address: '{environment}-rds-address' 12 | rds_port: '{environment}-rds-address' 13 | 14 | tunnels: 15 | - name: mysql-prod 16 | service: foobar-prod 17 | host: ${terraform.rds_address} 18 | port: ${terraform.rds_port} 19 | local_port: 8888 20 | 21 | services: 22 | - name: foobar-prod 23 | environment: prod 24 | cluster: ${terraform.cluster_name} 25 | service_role_arn: a_task_role_arn 26 | count: 2 27 | load_balancer: 28 | load_balancer_name: ${terraform.elb_id} 29 | container_name: example 30 | container_port: 443 31 | config: 32 | - FOOBAR=${env.FOOBAR_ENV} 33 | - DB_HOST=my_host 34 | - FOO_BAR_PREFIX=${env.FOO_BAR_PREFIX_ENV}/test 35 | - FOO_BAR_SECRET=${env.FOO_BAR_SECRET_ENV} 36 | family: foobar-prod 37 | network_mode: host 38 | task_role_arn: ${terraform.iam_task_role} 39 | vpc_configuration: 40 | security_groups: ${terraform.security_group_list} 41 | containers: 42 | - name: example 43 | image: example:1.2.3 44 | cpu: 1024 45 | memory: 4000 46 | command: /usr/bin/supervisord 47 | entrypoint: /entrypoint.sh 48 | ports: 49 | - "80:80" 50 | - "443:443" 51 | - "8021:8021:udp" 52 | ulimits: 53 | nproc: 65535 54 | nofile: 55 | soft: 65535 56 | hard: 65535 57 | environment: 58 | - LDAPTLS_REQCERT=never 59 | - ENVIRONMENT=prod 60 | - SECRETS_BUCKET_NAME=${terraform.secrets_bucket_name} 61 | labels: 62 | edu.caltech.imss-ads: "foobar" 63 | - name: output-test 64 | vpc_configuration: ${terraform.vpc_configuration} 65 | cluster: ${terraform.cluster_name} 66 | 67 | tunnels: 68 | - name: test 69 | service: foobar-prod 70 | host: config.DB_HOST 71 | port: 3306 72 | local_port: 8888 73 | -------------------------------------------------------------------------------- /deployfish/config/test/terraform.tfstate: -------------------------------------------------------------------------------- 1 | { 2 | "version": 3, 3 | "terraform_version": "0.7.13", 4 | "serial": 116, 5 | "lineage": "3a5655c3-1d1b-4494-ad10-79186f28354e", 6 | "remote": { 7 | "type": "s3", 8 | "config": { 9 | "bucket": "state-file-bucket", 10 | "key": "remote-terraform-state", 11 | "region": "us-west-2" 12 | } 13 | }, 14 | "modules": [ 15 | { 16 | "path": [ 17 | "root" 18 | ], 19 | "outputs": { 20 | "iam-role-prod-task": { 21 | "sensitive": false, 22 | "type": "string", 23 | "value": "arn:aws:iam::324958023459:role/prod-task" 24 | }, 25 | "iam-role-qa-task": { 26 | "sensitive": false, 27 | "type": "string", 28 | "value": "arn:aws:iam::324958023459:role/qa-task" 29 | }, 30 | "prod-autoscalinggroup-name": { 31 | "sensitive": false, 32 | "type": "string", 33 | "value": "foobar-asg-prod" 34 | }, 35 | "prod-cluster-name": { 36 | "sensitive": false, 37 | "type": "string", 38 | "value": "foobar-cluster-prod" 39 | }, 40 | "prod-elb-id": { 41 | "sensitive": false, 42 | "type": "string", 43 | "value": "foobar-elb-prod" 44 | }, 45 | "qa-autoscalinggroup-name": { 46 | "sensitive": false, 47 | "type": "string", 48 | "value": "foobar-asg-qa" 49 | }, 50 | "qa-cluster-name": { 51 | "sensitive": false, 52 | "type": "string", 53 | "value": "foobar-cluster-qa" 54 | }, 55 | "qa-elb-id": { 56 | "sensitive": false, 57 | "type": "string", 58 | "value": "foobar-elb-qa" 59 | }, 60 | "s3-config-store-bucket": { 61 | "sensitive": false, 62 | "type": "string", 63 | "value": "my-config-store" 64 | }, 65 | "security-group-list": { 66 | "sensitive": false, 67 | "type": "list", 68 | "value": [ 69 | "sg-1234567", 70 | "sg-2345678", 71 | "sg-3456789" 72 | ] 73 | }, 74 | "vpc-configuration": { 75 | "sensitive": false, 76 | "type": "map", 77 | "value": { 78 | "subnets": ["subnet-1234567"], 79 | "security_groups": ["sg-1234567"], 80 | "public_ip": "DISABLED" 81 | } 82 | } 83 | } 84 | } 85 | ] 86 | } 87 | -------------------------------------------------------------------------------- /deployfish/config/test/terraform.tfstate.0.12: -------------------------------------------------------------------------------- 1 | { 2 | "version": 4, 3 | "terraform_version": "0.12.2", 4 | "serial": 1, 5 | "lineage": "1acc247b-67a5-06ad-b168-9918cb0db95e", 6 | "outputs": { 7 | "alb-target-group-arn-prod": { 8 | "value": "arn:aws:elasticloadbalancing:us-west-2:1234566777881:targetgroup/foobar-prod/6cb8d354ee4dff3e", 9 | "type": "string" 10 | }, 11 | "alb-target-group-arn-test": { 12 | "value": "arn:aws:elasticloadbalancing:us-west-2:1234566777881:targetgroup/foobar-test/718aa450937e606e", 13 | "type": "string" 14 | }, 15 | "ecr_registry_arn": { 16 | "value": "arn:aws:ecr:us-west-2:1234566777881:repository/examplecorp/foobar", 17 | "type": "string" 18 | }, 19 | "ecr_registry_id": { 20 | "value": "1234566777881", 21 | "type": "string" 22 | }, 23 | "ecr_repository_name": { 24 | "value": "examplecorp/foobar", 25 | "type": "string" 26 | }, 27 | "elb-id-prod": { 28 | "value": "arn:aws:elasticloadbalancing:us-west-2:1234566777881:loadbalancer/app/prod-foobar-foo-apps/75f25c6d6bd426aa", 29 | "type": "string" 30 | }, 31 | "elb-id-test": { 32 | "value": "arn:aws:elasticloadbalancing:us-west-2:1234566777881:loadbalancer/app/test-foobar-foo-apps/5b06556fb14d2b23", 33 | "type": "string" 34 | }, 35 | "prod-cluster-name": { 36 | "value": "foobar-foo-apps-prod", 37 | "type": "string" 38 | }, 39 | "prod-kms-key-arn": { 40 | "value": "arn:aws:kms:us-west-2:1234566777881:key/7b005881-ec3e-4320-b80a-c07b7b7b9812", 41 | "type": "string" 42 | }, 43 | "prod-rds-address": { 44 | "value": "foo-prod.c970jsizrrcy.us-west-2.rds.amazonaws.com", 45 | "type": "string" 46 | }, 47 | "prod-rds-port": { 48 | "value": 3306, 49 | "type": "number" 50 | }, 51 | "s3-bucket": { 52 | "value": "foobar-files", 53 | "type": "string" 54 | }, 55 | "task-role-arn-prod": { 56 | "value": "arn:aws:iam::1234566777881:role/foobar-prod-task", 57 | "type": "string" 58 | }, 59 | "task-role-arn-test": { 60 | "value": "arn:aws:iam::1234566777881:role/foobar-test-task", 61 | "type": "string" 62 | }, 63 | "test-cluster-name": { 64 | "value": "foobar-foo-apps-test", 65 | "type": "string" 66 | }, 67 | "test-kms-key-arn": { 68 | "value": "arn:aws:kms:us-west-2:1234566777881:key/ea869a78-bb70-46c4-bdf1-ecaacca2b95e", 69 | "type": "string" 70 | }, 71 | "test-rds-address": { 72 | "value": "foo-test.c970jsizrrcy.us-west-2.rds.amazonaws.com", 73 | "type": "string" 74 | }, 75 | "test-rds-port": { 76 | "value": 3306, 77 | "type": "number" 78 | } 79 | }, 80 | "resources": [ 81 | ] 82 | } 83 | 84 | -------------------------------------------------------------------------------- /deployfish/config/test/terraform.tfstate.prod: -------------------------------------------------------------------------------- 1 | { 2 | "version": 3, 3 | "terraform_version": "0.7.13", 4 | "serial": 116, 5 | "lineage": "3a5655c3-1d1b-4494-ad10-79186f28354e", 6 | "remote": { 7 | "type": "s3", 8 | "config": { 9 | "bucket": "state-file-bucket", 10 | "key": "remote-terraform-state", 11 | "region": "us-west-2" 12 | } 13 | }, 14 | "modules": [ 15 | { 16 | "path": [ 17 | "root" 18 | ], 19 | "outputs": { 20 | "elb-id": { 21 | "sensitive": false, 22 | "type": "string", 23 | "value": "foobar-prod-elb" 24 | }, 25 | "iam-role-task": { 26 | "sensitive": false, 27 | "type": "string", 28 | "value": "arn:aws:iam::324958023459:role/foobar-prod-task" 29 | }, 30 | "autoscalinggroup-name": { 31 | "sensitive": false, 32 | "type": "string", 33 | "value": "foobar-asg-prod" 34 | }, 35 | "cluster-name": { 36 | "sensitive": false, 37 | "type": "string", 38 | "value": "foobar-cluster-prod" 39 | }, 40 | "security-group-list": { 41 | "sensitive": false, 42 | "type": "list", 43 | "value": [ 44 | "sg-1234567", 45 | "sg-2345678", 46 | "sg-3456789" 47 | ] 48 | }, 49 | "vpc-configuration": { 50 | "sensitive": false, 51 | "type": "map", 52 | "value": { 53 | "subnets": ["subnet-1234567"], 54 | "security_groups": ["sg-1234567"], 55 | "public_ip": "DISABLED" 56 | } 57 | }, 58 | "rds-address": { 59 | "value": "foo-prod.c970jsizrrcy.us-west-2.rds.amazonaws.com", 60 | "type": "string" 61 | }, 62 | "rds-port": { 63 | "value": 3306, 64 | "type": "number" 65 | } 66 | } 67 | } 68 | ] 69 | } 70 | -------------------------------------------------------------------------------- /deployfish/config/test/terraform.tfstate.qa: -------------------------------------------------------------------------------- 1 | { 2 | "version": 3, 3 | "terraform_version": "0.7.13", 4 | "serial": 116, 5 | "lineage": "3a5655c3-1d1b-4494-ad10-79186f28354e", 6 | "remote": { 7 | "type": "s3", 8 | "config": { 9 | "bucket": "state-file-bucket", 10 | "key": "remote-terraform-state", 11 | "region": "us-west-2" 12 | } 13 | }, 14 | "modules": [ 15 | { 16 | "path": [ 17 | "root" 18 | ], 19 | "outputs": { 20 | "elb-id": { 21 | "sensitive": false, 22 | "type": "string", 23 | "value": "foobar-qa-elb" 24 | }, 25 | "iam-role-task": { 26 | "sensitive": false, 27 | "type": "string", 28 | "value": "arn:aws:iam::324958023459:role/foobar-qa-task" 29 | }, 30 | "autoscalinggroup-name": { 31 | "sensitive": false, 32 | "type": "string", 33 | "value": "foobar-asg-qa" 34 | }, 35 | "cluster-name": { 36 | "sensitive": false, 37 | "type": "string", 38 | "value": "foobar-cluster-qa" 39 | }, 40 | "security-group-list": { 41 | "sensitive": false, 42 | "type": "list", 43 | "value": [ 44 | "sg-1234567", 45 | "sg-2345678", 46 | "sg-3456789" 47 | ] 48 | }, 49 | "vpc-configuration": { 50 | "sensitive": false, 51 | "type": "map", 52 | "value": { 53 | "subnets": ["subnet-1234567"], 54 | "security_groups": ["sg-1234567"], 55 | "public_ip": "DISABLED" 56 | } 57 | }, 58 | "rds-address": { 59 | "value": "foo-qa.c970jsizrrcy.us-west-2.rds.amazonaws.com", 60 | "type": "string" 61 | }, 62 | "rds-port": { 63 | "value": 3306, 64 | "type": "number" 65 | } 66 | } 67 | } 68 | ] 69 | } 70 | -------------------------------------------------------------------------------- /deployfish/config/test/terraform_interpolate.yml: -------------------------------------------------------------------------------- 1 | terraform: 2 | statefile: s3://my-{environment}-statefile 3 | lookups: 4 | cluster_name: 'cluster-name' 5 | autoscalinggroup_name: 'autoscalinggroup-name' 6 | elb_id: 'elb-id' 7 | iam_task_role: 'iam-role-task' 8 | security_group_list: 'security-group-list' 9 | vpc_configuration: 'vpc-configuration' 10 | rds_address: 'rds-address' 11 | rds_port: 'rds-port' 12 | 13 | tunnels: 14 | - name: mysql-qa 15 | service: foobar-qa 16 | host: ${terraform.rds_address} 17 | port: ${terraform.rds_port} 18 | local_port: 8888 19 | 20 | - name: mysql-prod 21 | service: foobar-prod 22 | host: ${terraform.rds_address} 23 | port: ${terraform.rds_port} 24 | local_port: 8888 25 | 26 | services: 27 | - name: foobar-qa 28 | environment: qa 29 | cluster: ${terraform.cluster_name} 30 | service_role_arn: a_task_role_arn 31 | count: 2 32 | load_balancer: 33 | load_balancer_name: ${terraform.elb_id} 34 | container_name: example 35 | container_port: 443 36 | family: foobar-qa 37 | network_mode: host 38 | task_role_arn: ${terraform.iam_task_role} 39 | vpc_configuration: 40 | security_groups: ${terraform.security_group_list} 41 | containers: 42 | - name: example 43 | image: example:1.2.3 44 | cpu: 1024 45 | memory: 4000 46 | command: /usr/bin/supervisord 47 | entrypoint: /entrypoint.sh 48 | ports: 49 | - "80:80" 50 | - "443:443" 51 | - "8021:8021:udp" 52 | - name: foobar-prod 53 | environment: prod 54 | cluster: ${terraform.cluster_name} 55 | service_role_arn: a_task_role_arn 56 | count: 2 57 | load_balancer: 58 | load_balancer_name: ${terraform.elb_id} 59 | container_name: example 60 | container_port: 443 61 | family: foobar-prod 62 | network_mode: host 63 | task_role_arn: ${terraform.iam_task_role} 64 | vpc_configuration: 65 | security_groups: ${terraform.security_group_list} 66 | containers: 67 | - name: example 68 | image: example:1.2.3 69 | cpu: 1024 70 | memory: 4000 71 | command: /usr/bin/supervisord 72 | entrypoint: /entrypoint.sh 73 | ports: 74 | - "80:80" 75 | - "443:443" 76 | - "8021:8021:udp" 77 | -------------------------------------------------------------------------------- /deployfish/config/test/test_Terraform.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import unittest 4 | from unittest.mock import Mock 5 | 6 | from testfixtures import Replacer, compare 7 | 8 | from deployfish.config.processors.terraform import TerraformS3State 9 | 10 | YAML = { 11 | "statefile": "s3://foobar/baz", 12 | "lookups": { 13 | "lookup1": "{environment}-cluster-name", 14 | "lookup2": "{environment}-elb-id", 15 | "lookup3": "{environment}-autoscalinggroup-name", 16 | "lookup4": "security-group-list" 17 | } 18 | } 19 | 20 | 21 | class TestTerraform_load_yaml(unittest.TestCase): 22 | 23 | def setUp(self): 24 | self.terraform = TerraformS3State(YAML, {}) 25 | 26 | def test_lookups(self): 27 | compare(self.terraform.terraform_config["lookups"], { 28 | "lookup1": "{environment}-cluster-name", 29 | "lookup2": "{environment}-elb-id", 30 | "lookup3": "{environment}-autoscalinggroup-name", 31 | "lookup4": "security-group-list", 32 | }) 33 | 34 | 35 | class TestTerraform_get_terraform_state(unittest.TestCase): 36 | 37 | def setUp(self): 38 | current_dir = os.path.dirname(os.path.abspath(__file__)) 39 | filename = os.path.join(current_dir, "terraform.tfstate") 40 | with open(filename) as f: 41 | self.tfstate = json.loads(f.read()) 42 | self.terraform = TerraformS3State(YAML, {}) 43 | 44 | def test_lookup(self): 45 | with Replacer() as r: 46 | get_mock = r("deployfish.config.processors.terraform.TerraformS3State._get_state_file_from_s3", Mock()) 47 | get_mock.return_value = self.tfstate 48 | self.terraform.load({"environment": "qa"}) 49 | self.assertTrue("qa-cluster-name" in self.terraform.terraform_lookups) 50 | 51 | 52 | class TestTerraform_get_terraform_state_v12(unittest.TestCase): 53 | 54 | def setUp(self): 55 | current_dir = os.path.dirname(os.path.abspath(__file__)) 56 | filename = os.path.join(current_dir, "terraform.tfstate.0.12") 57 | with open(filename) as f: 58 | self.tfstate = json.loads(f.read()) 59 | self.terraform = TerraformS3State(YAML, {}) 60 | 61 | def test_lookup(self): 62 | with Replacer() as r: 63 | get_mock = r("deployfish.config.processors.terraform.TerraformS3State._get_state_file_from_s3", Mock()) 64 | get_mock.return_value = self.tfstate 65 | self.terraform.load({"environment": "qa"}) 66 | self.assertTrue("prod-rds-address" in self.terraform.terraform_lookups) 67 | 68 | 69 | class TestTerraform_lookup(unittest.TestCase): 70 | 71 | def setUp(self): 72 | current_dir = os.path.dirname(os.path.abspath(__file__)) 73 | filename = os.path.join(current_dir, "terraform.tfstate") 74 | with open(filename) as f: 75 | self.tfstate = json.loads(f.read()) 76 | self.terraform = TerraformS3State(YAML, {}) 77 | 78 | def test_lookup(self): 79 | with Replacer() as r: 80 | get_mock = r("deployfish.config.processors.terraform.TerraformS3State._get_state_file_from_s3", Mock()) 81 | get_mock.return_value = self.tfstate 82 | self.terraform.load({"environment": "qa"}) 83 | self.assertEqual(self.terraform.lookup("lookup1", {"{environment}": "qa"}), "foobar-cluster-qa") 84 | self.assertEqual(self.terraform.lookup("lookup1", {"{environment}": "prod"}), "foobar-cluster-prod") 85 | self.assertListEqual(self.terraform.lookup("lookup4", {}), ["sg-1234567", "sg-2345678", "sg-3456789"]) 86 | -------------------------------------------------------------------------------- /deployfish/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import ( # noqa: F401 2 | Base, 3 | BaseService, 4 | BaseServiceDockerExec, 5 | BaseServiceSecrets, 6 | BaseServiceSSH, 7 | ) 8 | from .cluster import ( # noqa: F401 9 | ECSCluster, 10 | ECSClusterSSH, 11 | ) 12 | from .commands import ECSServiceCommandLogs, ECSServiceCommands # noqa: F401 13 | from .elb import EC2ClassicLoadBalancer # noqa: F401 14 | from .elbv2 import ( # noqa: F401 15 | EC2LoadBalancer, 16 | EC2LoadBalancerListener, 17 | EC2LoadBalancerTargetGroup, 18 | ) 19 | from .invoked_task import ECSInvokedTask # noqa: F401 20 | from .logs import ( # noqa: F401 21 | Logs, 22 | LogsCloudWatchLogGroup, 23 | LogsCloudWatchLogStream, 24 | ) 25 | from .rds import ( # noqa: F401 26 | RDSRDSInstance, 27 | ) 28 | from .service import ( # noqa: F401 29 | ECSService, 30 | ECSServiceDockerExec, 31 | ECSServiceSecrets, 32 | ECSServiceSSH, 33 | ECSServiceStandaloneTasks, 34 | ECSServiceTunnel, 35 | ) 36 | from .task import ( # noqa: F401 37 | ECSStandaloneTask, 38 | ECSStandaloneTaskLogs, 39 | ECSStandaloneTaskSecrets, 40 | ) 41 | from .tunnel import BaseTunnel, Tunnels # noqa: F401 42 | -------------------------------------------------------------------------------- /deployfish/controllers/elb.py: -------------------------------------------------------------------------------- 1 | 2 | from cement import ex 3 | 4 | from deployfish.core.models import ClassicLoadBalancer, Model 5 | 6 | from .crud import ReadOnlyCrudBase 7 | from .utils import handle_model_exceptions 8 | 9 | 10 | class EC2ClassicLoadBalancer(ReadOnlyCrudBase): 11 | 12 | class Meta: 13 | label = "elbs" 14 | description = "Work with Classic Load Balancer objects" 15 | help = "Work with Classic Load Balancer objects" 16 | stacked_type = "nested" 17 | 18 | model: type[Model] = ClassicLoadBalancer 19 | 20 | help_overrides: dict[str, str] = { 21 | "info": "Show details about an ELB from AWS", 22 | } 23 | 24 | info_template: str = "detail--classicloadbalancer.jinja2" 25 | 26 | list_ordering: str = "Name" 27 | list_result_columns: dict[str, str] = { 28 | "Name": "LoadBalancerName", 29 | "Scheme": "scheme", 30 | "VPC": "VPCId", 31 | "Hostname": "DNSName" 32 | } 33 | 34 | @ex( 35 | help="List Classic Load Balancers in AWS", 36 | arguments=[ 37 | ( 38 | ["--vpc-id"], 39 | { 40 | "help": "Filter by VPC ID", 41 | "action": "store", 42 | "default": None, 43 | "dest": "vpc_id" 44 | } 45 | ), 46 | ( 47 | ["--name"], 48 | { 49 | "help": 'Filter by load balancer name, with globs. Ex: "foo*", "*foo"', 50 | "action": "store", 51 | "default": None, 52 | "dest": "name" 53 | } 54 | ), 55 | ( 56 | ["--scheme"], 57 | { 58 | "help": "Filter by load balancer scheme.", 59 | "action": "store", 60 | "default": "any", 61 | "choices": ["any", "internet-facing", "internal"], 62 | "dest": "scheme" 63 | } 64 | ), 65 | ] 66 | ) 67 | @handle_model_exceptions 68 | def list(self): 69 | results = self.model.objects.list( 70 | vpc_id=self.app.pargs.vpc_id, 71 | scheme=self.app.pargs.scheme, 72 | name=self.app.pargs.name 73 | ) 74 | self.render_list(results) 75 | -------------------------------------------------------------------------------- /deployfish/controllers/invoked_task.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from cement import ex 4 | 5 | from deployfish.core.models import InvokedTask, Model 6 | 7 | from .crud import ReadOnlyCrudBase 8 | from .utils import handle_model_exceptions 9 | 10 | 11 | class ECSInvokedTask(ReadOnlyCrudBase): 12 | 13 | class Meta: 14 | label = "invoked-tasks" 15 | description = "Work with Load Balancer objects" 16 | help = "Work with Load Balancer objects" 17 | stacked_type = "nested" 18 | usage = "Invoked tasks are tasks that either are currently running in ECS, or have run and are now stopped." 19 | 20 | model: type[Model] = InvokedTask 21 | 22 | help_overrides: dict[str, str] = { 23 | "info": "Show details about an InvokedTask in AWS", 24 | } 25 | 26 | info_template: str = "detail--invokedtask.jinja2" 27 | 28 | list_ordering: str = "Family" 29 | list_result_columns: dict[str, Any] = { 30 | "Family": "taskDefinition__family_revision", 31 | "Status": "lastStatus", 32 | "pk": "pk", 33 | } 34 | 35 | 36 | @ex( 37 | help="List Invoked Tasks in AWS", 38 | arguments=[ 39 | (["cluster"], {"help": "Name of the cluster to look in for tasks"}), 40 | ( 41 | ["--service-name"], 42 | { 43 | "help": "Filter by service name", 44 | "action": "store", 45 | "default": None, 46 | "dest": "service" 47 | } 48 | ), 49 | ( 50 | ["--family"], 51 | { 52 | "help": 'Filter by task family"', 53 | "action": "store", 54 | "default": None, 55 | "dest": "family" 56 | } 57 | ), 58 | ( 59 | ["--status"], 60 | { 61 | "help": "Filter by task status.", 62 | "action": "store", 63 | "default": "RUNNING", 64 | "choices": ["RUNNING", "PENDING", "STOPPEd"], 65 | "dest": "status" 66 | } 67 | ), 68 | ( 69 | ["--launch-type"], 70 | { 71 | "help": "Filter by launch-type.", 72 | "action": "store", 73 | "default": "any", 74 | "choices": ["any", "EC2", "FARGATE"], 75 | "dest": "launch_type" 76 | } 77 | ), 78 | ] 79 | ) 80 | @handle_model_exceptions 81 | def list(self): 82 | results = self.model.objects.list( 83 | self.app.pargs.cluster, 84 | service=self.app.pargs.service, 85 | family=self.app.pargs.family, 86 | launch_type=self.app.pargs.launch_type, 87 | status=self.app.pargs.status 88 | ) 89 | self.render_list(results) 90 | -------------------------------------------------------------------------------- /deployfish/controllers/rds.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from typing import cast 3 | 4 | import click 5 | from cement import ex 6 | 7 | from deployfish.core.models import Model, RDSInstance 8 | 9 | from .crud import ReadOnlyCrudBase 10 | from .utils import handle_model_exceptions 11 | 12 | 13 | class RDSRDSInstance(ReadOnlyCrudBase): 14 | 15 | class Meta: 16 | label = "rds" 17 | description = "Work with RDS Instances" 18 | help = "Work with RDS Instances" 19 | stacked_type = "nested" 20 | 21 | model: type[Model] = RDSInstance 22 | 23 | help_overrides: dict[str, str] = { 24 | "info": "Show details about an RDS Instance from AWS", 25 | "list": "List RDS Instances in AWS", 26 | } 27 | 28 | info_template: str = "detail--rdsinstance.jinja2" 29 | 30 | list_ordering: str = "Name" 31 | list_result_columns: dict[str, str] = { 32 | "Name": "DBInstanceIdentifier", 33 | "VPC": "vpc__name", 34 | "Engine": "Engine", 35 | "Version": "EngineVersion", 36 | "Mult AZ": "multi_az", 37 | "Hostname": "hostname", 38 | "Root User": "root_user" 39 | } 40 | 41 | @ex( 42 | help="Get the root credentials for an RDS Instance.", 43 | arguments=[(["pk"], {"help": "The name of the RDS Instance in AWS"})], 44 | description=""" 45 | Print the username and password for the root user if the RDS instance 46 | identified by {pk} is Secrets Manager enabled. If the instance is 47 | not Secrets Manager enabled, just print the username of the root user. 48 | 49 | The {pk} is the name of the RDS instance. 50 | """, 51 | formatter_class=argparse.RawDescriptionHelpFormatter 52 | ) 53 | @handle_model_exceptions 54 | def credentials(self) -> None: 55 | loader = self.loader(self) 56 | obj = loader.get_object_from_aws(self.app.pargs.pk) 57 | obj = cast("RDSInstance", obj) 58 | if obj.secret_enabled: 59 | self.app.print(f"Username: {obj.root_user}") 60 | self.app.print(f"Password: {obj.root_password}") 61 | else: 62 | self.app.print(f"Username: {obj.root_user}") 63 | self.app.print(click.style("Password is not in AWS Secrets Manager", fg="red")) 64 | -------------------------------------------------------------------------------- /deployfish/controllers/utils.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Callable 2 | from functools import wraps 3 | 4 | import click 5 | 6 | from deployfish.core.ssh import SSHMixin 7 | from deployfish.exceptions import ( 8 | ConfigProcessingFailed, 9 | MultipleObjectsReturned, 10 | NoSuchConfigSection, 11 | NoSuchConfigSectionItem, 12 | ObjectDoesNotExist, 13 | SchemaException, 14 | ) 15 | 16 | # ======================== 17 | # Decorators 18 | # ======================== 19 | 20 | def handle_model_exceptions(func: Callable) -> Callable: 21 | """ 22 | This decorator cathces all the kinds of execptions we expect to see in normal 23 | operation while letting others display their stack traces normally. 24 | 25 | We use this decorator to wrap cement command methods on 26 | :py:class:`cement.ext.ext_argparse.ArgparseController` subclasses. 27 | """ 28 | 29 | @wraps(func) 30 | def inner(self, *args, **kwargs): 31 | try: 32 | obj = func(self, *args, **kwargs) 33 | except ( 34 | ObjectDoesNotExist, 35 | MultipleObjectsReturned, 36 | self.model.OperationFailed, 37 | self.model.ReadOnly, 38 | self.loader.DeployfishSectionDoesNotExist, 39 | SchemaException, 40 | ConfigProcessingFailed, 41 | NoSuchConfigSection, 42 | SSHMixin.NoSSHTargetAvailable 43 | ) as e: 44 | self.app.print(click.style(str(e), fg="red")) 45 | except NoSuchConfigSectionItem as e: 46 | lines = [] 47 | lines.append(click.style(f"ERROR: {e!s}", fg="red")) 48 | lines.append( 49 | click.style(f'Available {self.model.__name__}s in the "{e.section}:" section of deployfish.yml:', fg="cyan") 50 | ) 51 | for item in self.app.deployfish_config.get_section(e.section): 52 | lines.append(" {}".format(item["name"])) 53 | environments = [] 54 | for item in self.app.deployfish_config.get_section(e.section): 55 | if "environment" in item: 56 | environments.append(" {}".format(item["environment"])) 57 | if environments: 58 | lines.append(click.style("\nAvailable environments:", fg="cyan")) 59 | lines.extend(environments) 60 | lines.append("") 61 | self.app.print("\n".join(lines)) 62 | else: 63 | return obj 64 | return inner 65 | -------------------------------------------------------------------------------- /deployfish/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/core/__init__.py -------------------------------------------------------------------------------- /deployfish/core/adapters/__init__.py: -------------------------------------------------------------------------------- 1 | from .deployfish import * # noqa: F403 2 | -------------------------------------------------------------------------------- /deployfish/core/adapters/abstract.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Callable 2 | from typing import Any 3 | 4 | from deployfish.exceptions import SchemaException as BaseSchemaException 5 | 6 | 7 | class Adapter: 8 | """ 9 | Given a dict of data from a data source, convert it appropriate data 10 | structures to be used to initialize a deployfish model. 11 | 12 | Minimally this means translating the source data into the data structure 13 | returned by an apporpriate ``describe_*`` AWS API call. In more complicated 14 | cases, there may be additional data returned also. 15 | """ 16 | 17 | NONE: str = "deployfish:required" 18 | 19 | class SchemaException(BaseSchemaException): 20 | """ 21 | Raise this if data in the config source does not validate properly. 22 | """ 23 | 24 | 25 | def __init__(self, data: dict[str, Any], partial: bool = False, **kwargs) -> None: 26 | """ 27 | ``data`` is the raw data from our source. 28 | """ 29 | self.data: dict[str, Any] = data 30 | self.partial: bool = partial 31 | 32 | def only_one_is_True(self, data: list[bool]) -> bool: 33 | """ 34 | Look through the list ``data``, a list of boolean values, and return True if only one True is in the 35 | list, False otherwise. 36 | """ 37 | # FIXME: much better ways to do this 38 | true_found = False 39 | for v in data: 40 | if v and not true_found: 41 | true_found = True 42 | elif v and true_found: 43 | return False # "Too Many Trues" 44 | return true_found 45 | 46 | def set( 47 | self, 48 | data: dict[str, Any], 49 | source_key: str, 50 | dest_key: str = None, 51 | default: Any = NONE, 52 | optional: bool = False, 53 | convert: Callable = None 54 | ): 55 | if dest_key is None: 56 | dest_key = source_key 57 | if self.partial or optional: 58 | if source_key in self.data: 59 | data[dest_key] = self.data[source_key] 60 | elif default != self.NONE: 61 | data[dest_key] = self.data.get(source_key, default) 62 | else: 63 | data[dest_key] = self.data[source_key] 64 | if dest_key in data and convert: 65 | data[dest_key] = convert(data[dest_key]) 66 | 67 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 68 | """ 69 | This method is the meat of the adapter -- it is what takes ``self.data`` and returns the 70 | data structures needed to initialize our model. 71 | 72 | The return type varies by what the model needs. 73 | """ 74 | raise NotImplementedError 75 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/__init__.py: -------------------------------------------------------------------------------- 1 | from deployfish.registry import importer_registry as registry 2 | 3 | from .appscaling import ( 4 | ECSServiceScalableTargetAdapter, 5 | ECSServiceScalingPolicyAdapter, 6 | ) 7 | from .cloudwatch import ECSServiceCPUAlarmAdapter 8 | from .ecs import ( 9 | ServiceAdapter, 10 | ServiceHelperTaskAdapter, 11 | StandaloneTaskAdapter, 12 | TaskDefinitionAdapter, 13 | ) 14 | from .events import ( 15 | EventScheduleRuleAdapter, 16 | EventTargetAdapter, 17 | ) 18 | from .secrets import SecretAdapter, parse_secret_string 19 | from .service_discovery import ServiceDiscoveryServiceAdapter 20 | from .ssh import SSHTunnelAdapter 21 | 22 | # ----------------------- 23 | # Adapter registrations 24 | # ----------------------- 25 | 26 | # ecs 27 | registry.register("StandaloneTask", "deployfish", StandaloneTaskAdapter) 28 | registry.register("TaskDefinition", "deployfish", TaskDefinitionAdapter) 29 | registry.register("Service", "deployfish", ServiceAdapter) 30 | registry.register("ServiceHelperTask", "deployfish", ServiceHelperTaskAdapter) 31 | 32 | # events 33 | registry.register("EventTarget", "deployfish", EventTargetAdapter) 34 | registry.register("EventScheduleRule", "deployfish", EventScheduleRuleAdapter) 35 | 36 | # cloudwatch 37 | registry.register("CloudwatchAlarm", "deployfish", ECSServiceCPUAlarmAdapter) 38 | 39 | # appscaling 40 | registry.register("ScalingPolicy", "deployfish", ECSServiceScalingPolicyAdapter) 41 | registry.register("ScalableTarget", "deployfish", ECSServiceScalableTargetAdapter) 42 | 43 | # secrets 44 | registry.register("Secret", "deployfish", SecretAdapter) 45 | 46 | # service_discovery 47 | registry.register("ServiceDiscoveryService", "deployfish", ServiceDiscoveryServiceAdapter) 48 | 49 | # ssh tunnels 50 | registry.register("SSHTunnel", "deployfish", SSHTunnelAdapter) 51 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/appscaling.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from deployfish.core.models import CloudwatchAlarm, ScalingPolicy 4 | 5 | from ..abstract import Adapter 6 | 7 | # ------------------------ 8 | # Adapters 9 | # ------------------------ 10 | 11 | class ECSServiceScalingPolicyAdapter(Adapter): 12 | """ 13 | .. code-block:: python 14 | 15 | { 16 | 'cpu': '>=60', 17 | 'check_every_seconds': 60, 18 | 'periods': 5, 19 | 'cooldown': 60, 20 | 'scale_by': 1 21 | } 22 | """ 23 | 24 | def __init__(self, data: dict[str, Any], **kwargs) -> None: 25 | self.cluster = kwargs.pop("cluster", None) 26 | self.service = kwargs.pop("service", None) 27 | super().__init__(data, **kwargs) 28 | 29 | def get_PolicyName(self) -> str: 30 | if int(self.data["scale_by"]) < 0: 31 | direction = "scale-down" 32 | else: 33 | direction = "scale-up" 34 | return f"{self.cluster}-{self.service}-{direction}" 35 | 36 | def get_ResourceId(self) -> str: 37 | return f"service/{self.cluster}/{self.service}" 38 | 39 | def get_MetricIntervalLowerBound(self) -> float | None: 40 | if ">" in self.data["cpu"]: 41 | return 0.0 42 | return None 43 | 44 | def get_MetricIntervalUpperBound(self) -> float | None: 45 | if "<" in self.data["cpu"]: 46 | return 0.0 47 | return None 48 | 49 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 50 | data: dict[str, Any] = {} 51 | data["PolicyName"] = self.get_PolicyName() 52 | data["ServiceNamespace"] = "ecs" 53 | data["ResourceId"] = self.get_ResourceId() 54 | data["ScalableDimension"] = "ecs:service:DesiredCount" 55 | data["PolicyType"] = "StepScaling" 56 | adjustment: dict[str, Any] = {"ScalingAdjustment": int(self.data["scale_by"])} 57 | lower_bound = self.get_MetricIntervalLowerBound() 58 | if lower_bound is not None: 59 | adjustment["MetricIntervalLowerBound"] = lower_bound 60 | upper_bound = self.get_MetricIntervalUpperBound() 61 | if upper_bound is not None: 62 | adjustment["MetricIntervalUpperBound"] = upper_bound 63 | data["StepScalingPolicyConfiguration"] = { 64 | "AdjustmentType": "ChangeInCapacity", 65 | "StepAdjustments": [adjustment], 66 | "Cooldown": int(self.data["cooldown"]), 67 | "MetricAggregationType": "Average" 68 | } 69 | kwargs = {} 70 | kwargs["alarm"] = CloudwatchAlarm.new( 71 | self.data, 72 | "deployfish", 73 | cluster=self.cluster, 74 | service=self.service 75 | ) 76 | return data, kwargs 77 | 78 | 79 | class ECSServiceScalableTargetAdapter(Adapter): 80 | """ 81 | .. code-block:: python 82 | 83 | { 84 | 'min_capacity': 2, 85 | 'max_capacity': 4, 86 | 'role_arn': 'arn:aws:iam::123445678901:role/ecsServiceRole', 87 | 'scale-up': { 88 | 'cpu': '>=60', 89 | 'check_every_seconds': 60, 90 | 'periods': 5, 91 | 'cooldown': 60, 92 | 'scale_by': 1 93 | }, 94 | 'scale-down': { 95 | 'cpu': '<=30', 96 | 'check_every_seconds': 60, 97 | 'periods': 30, 98 | 'cooldown': 60, 99 | 'scale_by': -1 100 | } 101 | } 102 | """ 103 | 104 | def __init__(self, data: dict[str, Any], **kwargs): 105 | self.cluster = kwargs.pop("cluster", None) 106 | self.service = kwargs.pop("service", None) 107 | super().__init__(data, **kwargs) 108 | 109 | def get_ResourceId(self) -> str: 110 | return f"service/{self.cluster}/{self.service}" 111 | 112 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 113 | data = {} 114 | data["ServiceNamespace"] = "ecs" 115 | data["ResourceId"] = self.get_ResourceId() 116 | data["ScalableDimension"] = "ecs:service:DesiredCount" 117 | data["MinCapacity"] = self.data["min_capacity"] 118 | data["MaxCapacity"] = self.data["max_capacity"] 119 | data["RoleARN"] = self.data["role_arn"] 120 | kwargs = {} 121 | policies = [] 122 | policies.append(ScalingPolicy.new( 123 | self.data["scale-up"], 124 | "deployfish", 125 | cluster=self.cluster, 126 | service=self.service 127 | )) 128 | policies.append(ScalingPolicy.new( 129 | self.data["scale-down"], 130 | "deployfish", 131 | cluster=self.cluster, 132 | service=self.service 133 | )) 134 | kwargs["policies"] = policies 135 | return data, kwargs 136 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/cloudwatch.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import Any 3 | 4 | from ..abstract import Adapter 5 | 6 | # ------------------------ 7 | # Adapters 8 | # ------------------------ 9 | 10 | class ECSServiceCPUAlarmAdapter(Adapter): 11 | """ 12 | .. code-block:: python 13 | 14 | { 15 | 'cpu': '>=60', 16 | 'check_every_seconds': 60, 17 | 'periods': 5, 18 | 'cooldown': 60, 19 | 'scale_by': 1 20 | } 21 | """ 22 | 23 | def __init__(self, data: dict[str, Any], **kwargs) -> None: 24 | self.cluster = kwargs.pop("cluster", None) 25 | self.service = kwargs.pop("service", None) 26 | super().__init__(data, **kwargs) 27 | 28 | def get_AlarmName(self) -> str: 29 | if "<" in self.data["cpu"]: 30 | direction = "low" 31 | else: 32 | direction = "high" 33 | return f"{self.cluster}-{self.service}-{direction}" 34 | 35 | def get_AlarmDescription(self) -> str: 36 | if ">" in self.data["cpu"]: 37 | direction = "up" 38 | else: 39 | direction = "down" 40 | return "Scale {} ECS service {} in cluster {} if service Average CPU is {} for {} seconds".format( 41 | direction, 42 | self.service, 43 | self.cluster, 44 | self.data["cpu"], 45 | (int(self.data["periods"]) * int(self.data["check_every_seconds"])) 46 | ) 47 | 48 | def get_ComparisonOperator(self) -> str: 49 | operator = "==" 50 | if "<=" in self.data["cpu"]: 51 | operator = "LessThanOrEqualToThreshold" 52 | elif "<" in self.data["cpu"]: 53 | operator = "LessThanThreshold" 54 | elif ">=" in self.data["cpu"]: 55 | operator = "GreaterThanOrEqualToThreshold" 56 | elif ">" in self.data["cpu"]: 57 | operator = "GreaterThanThreshold" 58 | return operator 59 | 60 | def get_Threshold(self) -> float: 61 | return float(re.sub("[<>=]*", "", self.data["cpu"])) 62 | 63 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 64 | data: dict[str, Any] = {} 65 | data["AlarmName"] = self.get_AlarmName() 66 | data["AlarmDescription"] = self.get_AlarmDescription() 67 | data["MetricName"] = "CPUUtilization" 68 | data["Namespace"] = "AWS/ECS" 69 | data["Statistic"] = "Average" 70 | data["Dimensions"] = [ 71 | {"Name": "ClusterName", "Value": self.cluster}, 72 | {"Name": "ServiceName", "Value": self.service} 73 | ] 74 | data["Period"] = int(self.data["check_every_seconds"]) 75 | data["Unit"] = self.data.get("unit", "Percent") 76 | data["EvaluationPeriods"] = int(self.data["periods"]) 77 | data["ComparisonOperator"] = self.get_ComparisonOperator() 78 | data["Threshold"] = self.get_Threshold() 79 | return data, {} 80 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/events.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from deployfish.core.models import Cluster 4 | 5 | from ..abstract import Adapter 6 | 7 | 8 | class EventTargetAdapter(Adapter): 9 | 10 | def get_cluster_arn(self) -> str: 11 | try: 12 | cluster = Cluster.objects.get(self.data["cluster"]) 13 | except Cluster.DoesNotExist as e: 14 | raise self.SchemaException(f"EventTarget: {e!s}") 15 | return cluster.arn 16 | 17 | def get_vpc_configuration(self) -> dict[str, Any]: 18 | # FIXME: use VpcConfigurationMixin for this 19 | data: dict[str, Any] = {} 20 | source = self.data.get("vpc_configuration", None) 21 | if source: 22 | data["Subnets"] = source["subnets"] 23 | if "security_groups" in source: 24 | data["SecurityGroups"] = source["security_groups"] 25 | if "public_ip" in source: 26 | data["AssignPublicIp"] = "ENABLED" if source["public_ip"] else "DISABLED" 27 | return data 28 | 29 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 30 | data = {} 31 | data["Id"] = "deployfish-" + self.data["name"] 32 | data["Arn"] = self.get_cluster_arn() 33 | data["RoleArn"] = self.data["schedule_role"] 34 | ecs = {} 35 | ecs["TaskCount"] = self.data.get("count", 1) 36 | ecs["LaunchType"] = self.data.get("launch_type", "EC2") 37 | if ecs["LaunchType"] == "FARGATE": 38 | vpc_configuration = self.get_vpc_configuration() 39 | if vpc_configuration: 40 | ecs["NetworkConfiguration"] = {} 41 | ecs["NetworkConfiguration"]["awsvpcConfiguration"] = vpc_configuration 42 | ecs["PlatformVersion"] = self.data.get("platform_version", "LATEST") 43 | if "grouo" in self.data: 44 | ecs["Group"] = self.data["group"] 45 | # FIXME: Deal with placementConstraints, placementStrategy and capacityProviderStrategy 46 | data["EcsParameters"] = ecs 47 | return data, {} 48 | 49 | 50 | class EventScheduleRuleAdapter(Adapter): 51 | 52 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 53 | data = {} 54 | data["Name"] = "deployfish-" + self.data["name"] 55 | data["ScheduleExpression"] = self.data["schedule"] 56 | data["State"] = "ENABLED" 57 | data["Description"] = "Scheduler for task: {}".format(self.data["name"]) 58 | return data, {} 59 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/mixins.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | 4 | class SSHConfigMixin: 5 | 6 | data: dict[str, Any] 7 | 8 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 9 | data: dict[str, Any] = {} 10 | kwargs: dict[str, Any] = {} 11 | if "ssh" in self.data: 12 | if "proxy" in self.data["ssh"]: 13 | kwargs["ssh_proxy_type"] = self.data["ssh"]["proxy"] 14 | return data, kwargs 15 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/secrets.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | from typing import Any, cast 3 | 4 | from deployfish.core.models import ExternalSecret, Secret 5 | 6 | from ..abstract import Adapter 7 | 8 | 9 | def parse_secret_string(secret_string: str) -> tuple[str, dict[str, Any]]: 10 | """ 11 | Parse an identifier from a deployfish.yml parameter definition that looks like one of the following: 12 | 13 | KEY=VALUE 14 | KEY:secure=VALUE 15 | KEY:secure:arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab=VALUE 16 | """ 17 | i = 0 18 | key = "" 19 | is_secure = False 20 | kms_key_id = None 21 | identifier, value = deepcopy(secret_string).split("=", 1) 22 | while identifier is not None: 23 | segments = identifier.split(":", 1) 24 | segment = segments[0] 25 | if i == 0: 26 | key = segment 27 | elif segment == "secure": 28 | is_secure = True 29 | elif segment == "arn": 30 | kms_key_id = f"arn:{segments[1]}" 31 | break 32 | if len(segments) > 1: 33 | identifier = segments[1] 34 | else: 35 | break 36 | i += 1 37 | kwargs: dict[str, Any] = { 38 | "Value": value, 39 | "DataType": "text", 40 | "Tier": "Standard" 41 | } 42 | if is_secure: 43 | kwargs["Type"] = "SecureString" 44 | kwargs["KeyId"] = kms_key_id 45 | else: 46 | kwargs["Type"] = "String" 47 | return key, kwargs 48 | 49 | 50 | # ------------------------ 51 | # Mixins 52 | # ------------------------ 53 | 54 | class SecretsMixin: 55 | 56 | data: dict[str, Any] 57 | 58 | def get_secrets(self, cluster: str, name: str, decrypt: bool = True) -> list[Secret]: 59 | secrets = None 60 | if "config" in self.data: 61 | secrets = [] 62 | for secret in self.data["config"]: 63 | try: 64 | secrets.append(Secret.new({"value": secret}, "deployfish", cluster=cluster, name=name)) 65 | except SecretAdapter.ExternalParameterException: 66 | # handle globs 67 | secrets.extend(ExternalSecret.objects.list(secret, decrypt=decrypt)) 68 | return cast("list[Secret]", secrets) 69 | 70 | 71 | # ------------------------ 72 | # Adapters 73 | # ------------------------ 74 | 75 | class SecretAdapter(Adapter): 76 | 77 | class ExternalParameterException(Exception): 78 | pass 79 | 80 | def __init__(self, data: dict[str, Any], **kwargs): 81 | super().__init__(data, **kwargs) 82 | self.cluster: str = kwargs.pop("cluster", None) 83 | self.name: str = kwargs.pop("name", None) 84 | self.prefix: str = "" 85 | if kwargs.get("prefix"): 86 | self.prefix = "{}-".format(kwargs["prefix"]) 87 | 88 | def is_external(self) -> bool: 89 | if ("=" not in self.data["value"] or ":external" in self.data["value"]): 90 | return True 91 | return False 92 | 93 | def split(self) -> tuple[str, str]: 94 | definition: str = deepcopy(self.data["value"]) 95 | key = definition 96 | value = "" 97 | delimiter_loc = definition.find("=") 98 | if delimiter_loc > 0: 99 | key = definition[:delimiter_loc] 100 | if len(definition) > delimiter_loc + 1: 101 | value = definition[delimiter_loc + 1:].strip('"') 102 | return key, value 103 | 104 | def parse(self) -> tuple[str, dict[str, Any]]: 105 | """ 106 | Parse an identifier from a deployfish.yml parameter definition that looks like one of the following: 107 | 108 | KEY=VALUE 109 | KEY:secure=VALUE 110 | KEY:secure:arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab=VALUE 111 | """ 112 | return parse_secret_string(self.data["value"]) 113 | 114 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 115 | if self.is_external(): 116 | raise self.ExternalParameterException( 117 | "This is an external parameter; use ExternalParametersAdapter instead" 118 | ) 119 | key, kwargs = self.parse() 120 | data = {} 121 | if self.cluster and self.name: 122 | data["Name"] = f"{self.prefix}{self.cluster}.{self.name}.{key}" 123 | else: 124 | data["Name"] = f"{self.prefix}{key}" 125 | data.update(kwargs) 126 | 127 | return data, {"name": key} 128 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/service_discovery.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from ..abstract import Adapter 4 | 5 | 6 | class ServiceDiscoveryServiceAdapter(Adapter): 7 | """ 8 | .. code-block:: python 9 | 10 | { 11 | 'namespace': 'local', 12 | 'name': 'test', 13 | 'dns_records': [ 14 | { 15 | 'type': 'A', 16 | 'ttl': '60', 17 | } 18 | ], 19 | } 20 | """ 21 | 22 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 23 | data = {} 24 | data["Name"] = self.data["name"] 25 | data["DnsConfig"] = {} 26 | data["DnsConfig"]["RoutingPolicy"] = "MULTIVALUE" 27 | data["DnsConfig"]["DnsRecords"] = [] 28 | for record in self.data["dns_records"]: 29 | data["DnsConfig"]["DnsRecords"].append({ 30 | "Type": record["type"], 31 | "TTL": record["ttl"] 32 | }) 33 | kwargs = {} 34 | kwargs["namespace_name"] = self.data["namespace"] 35 | return data, kwargs 36 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/ssh.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | from typing import Any 3 | 4 | from ..abstract import Adapter 5 | 6 | 7 | class SSHTunnelAdapter(Adapter): 8 | 9 | def convert(self) -> tuple[dict[str, Any], dict[str, Any]]: 10 | data = deepcopy(self.data) 11 | kwargs: dict[str, Any] = {} 12 | return data, kwargs 13 | -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/core/adapters/deployfish/test/__init__.py -------------------------------------------------------------------------------- /deployfish/core/adapters/deployfish/test/test_ServiceHelperTask_new.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/core/adapters/deployfish/test/test_ServiceHelperTask_new.py -------------------------------------------------------------------------------- /deployfish/core/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .abstract import * # noqa: F403 2 | from .appscaling import * # noqa: F403 3 | from .cloudwatch import * # noqa: F403 4 | from .cloudwatchlogs import * # noqa: F403 5 | from .ec2 import * # noqa: F403 6 | from .ecs import * # noqa: F403 7 | from .efs import * # noqa: F403 8 | from .elb import * # noqa: F403 9 | from .elbv2 import * # noqa: F403 10 | from .events import * # noqa: F403 11 | from .rds import * # noqa: F403 12 | from .secrets import * # noqa: F403 13 | from .secrets_manager import * # noqa: F403 14 | from .service_discovery import * # noqa: F403 15 | from .ssh import * # noqa: F403 16 | -------------------------------------------------------------------------------- /deployfish/core/models/cloudwatch.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Sequence 2 | from typing import Any 3 | 4 | from .abstract import Manager, Model 5 | 6 | # ---------------------------------------- 7 | # Managers 8 | # ---------------------------------------- 9 | 10 | 11 | class CloudwatchAlarmManager(Manager): 12 | 13 | service = "cloudwatch" 14 | 15 | def get(self, pk: str, **kwargs) -> "CloudwatchAlarm": 16 | response = self.client.describe_alarms(AlarmNames=[pk]) 17 | if response.get("MetricAlarms"): 18 | return CloudwatchAlarm(response["MetricAlarms"][0]) 19 | raise CloudwatchAlarm.DoesNotExist(f'No Cloudwatch Alarm with name "{pk}" exists in AWS') 20 | 21 | def list(self, cluster: str, service: str, **kwargs) -> Sequence["CloudwatchAlarm"]: 22 | response = self.client.describe_alarms( 23 | AlarmNamePrefix=[f"{cluster}-{service}"] 24 | ) 25 | if "MetricAlarms" in response: 26 | return [CloudwatchAlarm(d) for d in response["MetricAlarms"]] 27 | return [] 28 | 29 | def save(self, obj: Model, **kwargs) -> None: 30 | self.delete(obj) 31 | self.client.put_metric_alarm(**obj.render_for_create()) 32 | 33 | def delete(self, obj: Model, **kwargs) -> None: 34 | try: 35 | self.client.delete_alarms(AlarmNames=[obj.pk]) 36 | except self.client.exceptions.ResourceNotFound: 37 | pass 38 | 39 | 40 | # ---------------------------------------- 41 | # Models 42 | # ---------------------------------------- 43 | 44 | class CloudwatchAlarm(Model): 45 | 46 | objects = CloudwatchAlarmManager() 47 | 48 | @property 49 | def pk(self) -> str: 50 | return self.data["AlarmName"] 51 | 52 | @property 53 | def name(self) -> str: 54 | return self.data["AlarmName"] 55 | 56 | @property 57 | def arn(self) -> str: 58 | return self.data.get("AlarmArn", None) 59 | 60 | def set_policy_arn(self, arn: str) -> None: 61 | self.data["AlarmActions"] = [arn] 62 | 63 | def render_for_diff(self) -> dict[str, Any]: 64 | data = {} 65 | data["AlarmName"] = self.data["AlarmName"] 66 | data["AlarmDescription"] = self.data["AlarmDescription"] 67 | data["MetricName"] = self.data["MetricName"] 68 | data["Namespace"] = self.data["Namespace"] 69 | data["Statistic"] = self.data["Statistic"] 70 | data["Dimensions"] = self.data["Dimensions"] 71 | data["Period"] = self.data["Period"] 72 | data["Unit"] = self.data["Unit"] 73 | data["EvaluationPeriods"] = self.data["EvaluationPeriods"] 74 | data["ComparisonOperator"] = self.data["ComparisonOperator"] 75 | data["Threshold"] = self.data["Threshold"] 76 | return data 77 | -------------------------------------------------------------------------------- /deployfish/core/models/efs.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Sequence 2 | 3 | import botocore 4 | 5 | from .abstract import Manager, Model 6 | from .mixins import TagsManagerMixin, TagsMixin 7 | 8 | # ---------------------------------------- 9 | # Managers 10 | # ---------------------------------------- 11 | 12 | 13 | class EFSFileSystemManager(TagsManagerMixin, Manager): 14 | 15 | service: str = "efs" 16 | 17 | def get(self, pk: str, **_) -> "EFSFileSystem": 18 | try: 19 | response = self.client.describe_file_systems(FileSystemId=pk) 20 | except botocore.exceptions.ClientError: 21 | # FIXME: can we get ClientError for reasons other than the filesystem does 22 | # not exist? 23 | raise EFSFileSystem.DoesNotExist( 24 | f'No EFS file system with id "{pk}" exists in AWS' 25 | ) 26 | return EFSFileSystem(response["FileSystems"][0]) 27 | 28 | def list(self) -> Sequence["EFSFileSystem"]: 29 | response = self.client.describe_file_systems() 30 | return [EFSFileSystem(group) for group in response["FileSystems"]] 31 | 32 | 33 | # ---------------------------------------- 34 | # Models 35 | # ---------------------------------------- 36 | 37 | class EFSFileSystem(TagsMixin, Model): 38 | 39 | objects = EFSFileSystemManager() 40 | 41 | @property 42 | def pk(self) -> str: 43 | return self.data["FileSystemId"] 44 | 45 | @property 46 | def name(self) -> str: 47 | return self.data["Name"] 48 | 49 | @property 50 | def arn(self) -> str: 51 | return self.data["FileSystemArn"] 52 | 53 | @property 54 | def size(self) -> int: 55 | return self.data["SizeInBytes"]["Value"] 56 | 57 | @property 58 | def state(self) -> str: 59 | return self.data["LifeCycleState"] 60 | -------------------------------------------------------------------------------- /deployfish/core/models/secrets_manager.py: -------------------------------------------------------------------------------- 1 | import base64 2 | from collections.abc import Sequence 3 | 4 | from .abstract import Manager, Model 5 | from .mixins import TagsManagerMixin, TagsMixin 6 | 7 | # ---------------------------------------- 8 | # Managers 9 | # ---------------------------------------- 10 | 11 | 12 | class SMSecretManager(TagsManagerMixin, Manager): 13 | """ 14 | Manage our Secrets Manager secrets. This differs from 15 | :py:class:`deployfish.core.models.secrets.SecretManager` in that that manager 16 | manages SSM Parameter Store secrets, not Secrets Manager secrets. 17 | """ 18 | 19 | service: str = "secretsmanager" 20 | 21 | def get(self, pk: str, **_) -> "SMSecret": 22 | try: 23 | response = self.client.describe_secret(SecretId=pk) 24 | except self.client.exceptions.ResourceNotFoundException: 25 | raise SMSecret.DoesNotExist( 26 | f'No SMSecret with id "{pk}" exists in AWS' 27 | ) 28 | return SMSecret(response) 29 | 30 | def get_value(self, pk: str) -> str: 31 | try: 32 | response = self.client.get_secret_value(SecretId=pk) 33 | except self.client.exceptions.ResourceNotFoundException: 34 | raise SMSecret.DoesNotExist( 35 | f'No SMSecret with id "{pk}" exists in AWS' 36 | ) 37 | except self.client.exceptions.ResourceNotFoundException as e: 38 | raise SMSecret.OperationFailed( 39 | f'Could not decrypt SMSecret("{pk}")' 40 | ) from e 41 | 42 | if "SecretBinary" in response: 43 | # SecretBinary is a base64 encoded bytes array. We need to decode 44 | # it back to a utf-8 string. 45 | return base64.b64decode(response["SecretBinary"]).decode("utf-8") 46 | return response["SecretString"] 47 | 48 | def list(self) -> Sequence["SMSecret"]: 49 | secrets: list[SMSecret] = [] 50 | paginator = self.client.get_paginator("list_secrets") 51 | for page in paginator.paginate(): 52 | secrets.extend([SMSecret(secret) for secret in page["SecretList"]]) 53 | return secrets 54 | 55 | 56 | # ---------------------------------------- 57 | # Models 58 | # ---------------------------------------- 59 | 60 | class SMSecret(TagsMixin, Model): 61 | 62 | objects = SMSecretManager() 63 | 64 | @property 65 | def pk(self) -> str: 66 | return self.data["ANR"] 67 | 68 | @property 69 | def name(self) -> str: 70 | return self.data["Name"] 71 | 72 | @property 73 | def arn(self) -> str: 74 | return self.data["ARN"] 75 | 76 | @property 77 | def kms_key_id(self) -> str: 78 | return self.data["KmsKeyId"] 79 | 80 | @property 81 | def description(self) -> str | None: 82 | return self.data.get("Description", None) 83 | 84 | @property 85 | def rotation_enabled(self) -> bool: 86 | return self.data["RotationEnabled"] 87 | 88 | @property 89 | def last_rotated(self) -> bool: 90 | return self.data["LastRotationDate"] 91 | 92 | @property 93 | def value(self) -> str: 94 | if "value" not in self.cache: 95 | self.cache["value"] = self.objects.get_value(self.arn) 96 | return self.cache["value"] 97 | -------------------------------------------------------------------------------- /deployfish/core/models/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/core/models/test/__init__.py -------------------------------------------------------------------------------- /deployfish/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import Optional 3 | 4 | 5 | def is_fnmatch_filter(f: str | None) -> bool: 6 | """ 7 | Use this function to determine if a string is a fnmatch filter, which 8 | is to say glob pattern. We determine this by checking for the presence 9 | of any of the following characters: '[', '?', or '*'. 10 | 11 | Args: 12 | f: The string to check for glob pattern. 13 | 14 | Returns: 15 | ``True`` if the string is a glob pattern, ``False`` otherwise. 16 | 17 | """ 18 | if f is not None and re.search(r"[\[?*]", f): 19 | return True 20 | return False 21 | -------------------------------------------------------------------------------- /deployfish/core/utils/utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | def is_fnmatch_filter(f: str | None) -> bool: 5 | """ 6 | Use this function to determine if a string is a fnmatch filter, which 7 | is to say glob pattern. We determine this by checking for the presence 8 | of any of the following characters: '[', '?', or '*'. 9 | 10 | Args: 11 | f: The string to check for glob pattern. 12 | 13 | Returns: 14 | ``True`` if the string is a glob pattern, ``False`` otherwise. 15 | 16 | """ 17 | if f is not None and re.search(r"[\[?*]", f): 18 | return True 19 | return False 20 | -------------------------------------------------------------------------------- /deployfish/core/waiters/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | from .ecs import ECSDeploymentStatusWaiterHook, ECSTaskStatusHook # noqa:F401 2 | -------------------------------------------------------------------------------- /deployfish/core/waiters/hooks/abstract.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | 4 | class AbstractWaiterHook: 5 | 6 | def __init__(self, obj): 7 | self.obj = obj 8 | 9 | def mark(self, status, response, num_attempts, **kwargs): 10 | click.secho("=" * 72, fg="yellow", bold=True) 11 | 12 | def setup(self, status, response, num_attempts, **kwargs): 13 | """ 14 | Do any necessary setup on the waiter iteration before we've done our per-state processing. This will get 15 | called once per iteration. 16 | """ 17 | 18 | def waiting(self, status, response, num_attempts, **kwargs): 19 | """ 20 | Do something when our waiter status is 'waiting'. 21 | """ 22 | 23 | def success(self, status, response, num_attempts, **kwargs): 24 | """ 25 | Do something when our waiter status is 'success'. 26 | """ 27 | 28 | def failure(self, status, response, num_attempts, **kwargs): 29 | """ 30 | Do something when our waiter status is 'failure'. 31 | """ 32 | 33 | def error(self, status, response, num_attempts, **kwargs): 34 | """ 35 | Do something when our waiter status is 'error'. 36 | """ 37 | 38 | def timeout(self, status, response, num_attempts, **kwargs): 39 | """ 40 | Do something when our waiter status is 'timeout'. 41 | """ 42 | 43 | def cleanup(self, status, response, num_attempts, **kwargs): 44 | """ 45 | Do any necessary cleanup after the waiter iteration has completed and we've done our per-state processing. 46 | This will get called once per iteration. 47 | """ 48 | 49 | def __call__(self, status, response, num_attempts, **kwargs): 50 | """ 51 | Args: 52 | * 'state': the current state of the waiter. One of 'waiting', 'success', 'failure', 'error' or 'timeout'. 53 | * 'response': the boto3 response from the last invocation of our waiter's operation 54 | * 'num_attempts': the current iteration number 55 | 56 | kwargs: 57 | 58 | * 'name': the name of the waiter 59 | * 'config': the SingleWaiterConfig object passed to the constructor 60 | * 'WaiterConfig': (optional) not sure 61 | * 'Delay': (optional) the sleep amount in seconds 62 | * 'MaxAttempts': (optional) how many iterations we'll perform before timing out 63 | 64 | Plus other waiter specific kwargs. e.g. Bucket when doing a 'bucket_exists' waiter. 65 | 66 | """ 67 | self.setup(status, response, num_attempts, **kwargs) 68 | if status == "waiting": 69 | self.waiting(status, response, num_attempts, **kwargs) 70 | elif status == "success": 71 | self.success(status, response, num_attempts, **kwargs) 72 | elif status == "failure" or status == "error" or status == "timeout": 73 | self.failure(status, response, num_attempts, **kwargs) 74 | self.cleanup(status, response, num_attempts, **kwargs) 75 | -------------------------------------------------------------------------------- /deployfish/exceptions.py: -------------------------------------------------------------------------------- 1 | class SchemaException(Exception): 2 | """ 3 | There was a schema validation problem in the deployfish.yml file. 4 | """ 5 | 6 | 7 | 8 | class ObjectDoesNotExist(Exception): 9 | """ 10 | We tried to get a single object but it does not exist in AWS. 11 | """ 12 | 13 | 14 | 15 | class MultipleObjectsReturned(Exception): 16 | """ 17 | We expected to retrieve only one object but got multiple objects. 18 | """ 19 | 20 | 21 | 22 | class ObjectImproperlyConfigured(Exception): 23 | """ 24 | Deployfish, our model's manager Manager or the model itself is not properly configured. 25 | """ 26 | 27 | 28 | 29 | class ObjectReadOnly(Exception): 30 | """ 31 | This is a read only model; no writes to AWS permitted. 32 | """ 33 | 34 | 35 | 36 | class OperationFailed(Exception): 37 | """ 38 | We tried to do something we expected to succeed, but it failed. 39 | """ 40 | 41 | 42 | 43 | class NoSuchConfigSection(Exception): 44 | """ 45 | We looked in our deployfish.yml for a section, but it was not present. 46 | """ 47 | 48 | def __init__(self, section: str): 49 | super().__init__() 50 | self.section = section 51 | 52 | def __str__(self) -> str: 53 | return f"No such deployfish.yml section: {self.section}" 54 | 55 | 56 | class NoSuchConfigSectionItem(Exception): 57 | """ 58 | We looked an existing deployfish.yml section for a named item, but it was not present. 59 | """ 60 | 61 | def __init__(self, section: str, name: str): 62 | super().__init__() 63 | self.section = section 64 | self.name = name 65 | 66 | def __str__(self) -> str: 67 | return f'No item named "{self.name}" deployfish.yml section "{self.section}"' 68 | 69 | 70 | class RenderException(Exception): 71 | """ 72 | This is used for click commands, and gets re-raised when we get other exceptions so we can 73 | have a consistent method for configuring command line error messages instead of needing 74 | to catch every exception separately. 75 | """ 76 | 77 | def __init__(self, msg: str, exit_code: int = 1): 78 | self.msg = msg 79 | self.exit_code = exit_code 80 | 81 | 82 | class DeployfishAppError(Exception): 83 | """Generic errors.""" 84 | 85 | 86 | 87 | class NoSuchTerraformStateFile(Exception): 88 | """ 89 | deployfish.yml references a Terraform state file that doesn't exist. 90 | """ 91 | 92 | 93 | 94 | class ConfigProcessingFailed(Exception): 95 | """ 96 | While performing our variable substitutions in deployfish.yml, we had a problem. 97 | """ 98 | 99 | 100 | 101 | class SkipConfigProcessing(Exception): 102 | """ 103 | This is used to skip processing steps when looping through the variable substitution classes 104 | while processing variable substitutions in deployfish.yml. 105 | """ 106 | 107 | -------------------------------------------------------------------------------- /deployfish/ext/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/ext/__init__.py -------------------------------------------------------------------------------- /deployfish/ext/ext_df_argparse.py: -------------------------------------------------------------------------------- 1 | from cement.ext.ext_argparse import ArgparseController 2 | from cement.utils.misc import minimal_logger 3 | 4 | LOG = minimal_logger(__name__) 5 | 6 | 7 | class DeployfishArgparseController(ArgparseController): 8 | """ 9 | We use this subclass of ArgparseController instead of cement's version so that we can 10 | redefine help strings in subclassess of a base class. 11 | """ 12 | 13 | def _get_command_parser_options(self, command): 14 | """ 15 | Look on the controller owning a command for a class or instance attribute named 16 | ``help_overrides``, which is a dict whose keys are method names and whose values 17 | are help strings, like so:: 18 | 19 | class BaseCommands(DeployfishArgparseController): 20 | 21 | class Meta: 22 | label = "base-commands" 23 | 24 | @ex( 25 | help='The base help string' 26 | ... 27 | ) 28 | def mycommand(self): 29 | ... 30 | 31 | class SubclassedCommands(BaseCommands): 32 | 33 | class Meta: 34 | label = "subclass-commands" 35 | stacked_type = 'nested' 36 | 37 | help_overrides = { 38 | 'info': 'My subclass info help' 39 | } 40 | 41 | Now the help strings will be:: 42 | 43 | > appname base-commands --help 44 | [...] 45 | 46 | sub-commands: 47 | {info} 48 | info The base help string 49 | 50 | > appname subclass-commands --help 51 | [...] 52 | 53 | sub-commands: 54 | {info} 55 | info My subclass info help 56 | """ 57 | kwargs = super()._get_command_parser_options(command) 58 | if "help" in kwargs: 59 | controller = command["controller"] 60 | if hasattr(controller, "help_overrides"): 61 | if command["func_name"] in controller.help_overrides: 62 | kwargs["help"] = controller.help_overrides[command["func_name"]] 63 | return kwargs 64 | -------------------------------------------------------------------------------- /deployfish/plugins/mysql/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from cement import App 4 | 5 | from . import adapters # noqa:F401 6 | from .controllers.mysql import MysqlController 7 | from .hooks import pre_config_interpolate_add_mysql_section 8 | 9 | __version__ = "1.2.16" 10 | 11 | 12 | def add_template_dir(app: App): 13 | path = os.path.join(os.path.dirname(__file__), "templates") 14 | app.add_template_dir(path) 15 | 16 | 17 | def load(app: App) -> None: 18 | app.handler.register(MysqlController) 19 | app.hook.register("post_setup", add_template_dir) 20 | app.hook.register("pre_config_interpolate", pre_config_interpolate_add_mysql_section) 21 | -------------------------------------------------------------------------------- /deployfish/plugins/mysql/adapters/__init__.py: -------------------------------------------------------------------------------- 1 | from .deployfish import * # noqa: F403 2 | -------------------------------------------------------------------------------- /deployfish/plugins/mysql/adapters/deployfish/__init__.py: -------------------------------------------------------------------------------- 1 | from deployfish.registry import importer_registry as registry 2 | 3 | from .mysql import MySQLDatabaseAdapter 4 | 5 | # ----------------------- 6 | # Adapter registrations 7 | # ----------------------- 8 | 9 | # mysql 10 | registry.register("MySQLDatabase", "deployfish", MySQLDatabaseAdapter) 11 | -------------------------------------------------------------------------------- /deployfish/plugins/mysql/adapters/deployfish/mysql.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | 3 | from deployfish.core.adapters.abstract import Adapter 4 | 5 | 6 | class MySQLDatabaseAdapter(Adapter): 7 | 8 | def convert(self): 9 | data = deepcopy(self.data) 10 | kwargs = {} 11 | return data, kwargs 12 | -------------------------------------------------------------------------------- /deployfish/plugins/mysql/controllers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/plugins/mysql/controllers/__init__.py -------------------------------------------------------------------------------- /deployfish/plugins/mysql/hooks.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING 2 | 3 | from cement import App 4 | 5 | if TYPE_CHECKING: 6 | from deployfish.config import Config 7 | 8 | 9 | def pre_config_interpolate_add_mysql_section(app: App, obj: "type[Config]") -> None: 10 | """ 11 | Add our "mysql" section to the list of sections on which keyword interpolation 12 | will be run 13 | 14 | Args: 15 | app: out cement app 16 | obj: the :py:class:`deployfish.config.Config` class 17 | 18 | """ 19 | obj.add_processable_section("mysql") 20 | -------------------------------------------------------------------------------- /deployfish/plugins/mysql/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .mysql import * # noqa: F403 2 | -------------------------------------------------------------------------------- /deployfish/plugins/mysql/templates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/plugins/mysql/templates/__init__.py -------------------------------------------------------------------------------- /deployfish/plugins/mysql/templates/detail--mysqldatabase.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import heading, subsection, subobject, tags -%} 2 | {{ subobject('MySQL Database', obj.name) }} 3 | pk : {{ obj.pk }} 4 | name : {{ obj.name }} 5 | host : {{ obj.host }} 6 | port : {{ obj.port }} 7 | database : {{ obj.db }} 8 | user : {{ obj.user }} 9 | password : {{ obj.password }} 10 | -------------------------------------------------------------------------------- /deployfish/plugins/slack/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .hooks import process_service_update 3 | 4 | 5 | def load(app): 6 | app.hook.register("post_object_update", process_service_update) 7 | -------------------------------------------------------------------------------- /deployfish/plugins/slack/hooks.py: -------------------------------------------------------------------------------- 1 | import getpass 2 | import logging 3 | import os 4 | import pwd 5 | 6 | from deployfish.core.models.ecs import Service 7 | 8 | # pylint: disable=no-name-in-module 9 | from deployfish.core.utils.mixins import ( 10 | CodeNameVersionMixin, 11 | GitChangelogMixin, 12 | GitMixin, 13 | ) 14 | from slackfin import ( 15 | SlackFormatter, 16 | SlackLabelValueListBlock, 17 | SlackLabelValuePair, 18 | SlackMarkdownType, 19 | SlackMessage, 20 | SlackMessageContext, 21 | SlackMessageDivider, 22 | SlackMessageHeader, 23 | SlackMessageMarkdown, 24 | ) 25 | 26 | logging.basicConfig(level=logging.WARNING) 27 | 28 | 29 | def process_service_update(app, obj, success=True, reason=None): 30 | if not success: 31 | return 32 | if not isinstance(obj, Service): 33 | return 34 | config_file = app.pargs.deployfish_filename 35 | repo_folder = os.path.dirname(config_file) 36 | channel = app.config.get("plugin.slack", "channel") 37 | if not channel or channel == "": 38 | channel = f"@{getpass.getuser()}" 39 | _ = ServiceUpdateMessage(app, obj, repo_folder).send(channel=channel) 40 | 41 | 42 | class DeployfishMessage(SlackMessage): 43 | """A message from deployfish.""" 44 | 45 | def __init__(self, app, *args, **kwargs): 46 | token = app.config.get("plugin.slack", "token") 47 | super().__init__( 48 | SlackMessageDivider(), 49 | *args, 50 | token=token, 51 | **kwargs, 52 | ) 53 | 54 | def add_context(self): 55 | self.add_block( 56 | SlackMessageContext( 57 | SlackMarkdownType(SlackFormatter().datetime()), 58 | SlackMarkdownType("Deployfish"), 59 | ) 60 | ) 61 | 62 | 63 | class ServiceUpdateMessage( 64 | GitChangelogMixin, GitMixin, CodeNameVersionMixin, DeployfishMessage 65 | ): 66 | """A message indicating that a service has been updated.""" 67 | 68 | def __init__(self, app, obj, repo_folder): 69 | if repo_folder: 70 | cwd = os.getcwd() 71 | os.chdir(repo_folder) 72 | super().__init__( 73 | app, 74 | SlackMessageHeader(text="Service Update Succeeded"), 75 | text="The service has been updated.", 76 | ) 77 | self.values = {} 78 | self.annotate(self.values) 79 | if repo_folder: 80 | os.chdir(cwd) 81 | 82 | self.add_service_update(obj) 83 | self.add_changelog() 84 | self.add_context() 85 | 86 | def add_service_update(self, obj): 87 | environment = obj.tags["Environment"] 88 | username = getpass.getuser() 89 | full_name = pwd.getpwnam(username).pw_gecos.split(",")[0] 90 | block = SlackLabelValueListBlock() 91 | block.add_entry( 92 | SlackLabelValuePair( 93 | label=self.values["name"], 94 | value="service updated", 95 | label_url=self.url_patterns["repo"], 96 | ) 97 | ) 98 | block.add_entry( 99 | SlackLabelValuePair( 100 | label="Environment", 101 | value=environment, 102 | ) 103 | ) 104 | # block.add_entry( 105 | # SlackLabelValuePair( 106 | # label="Cluster", 107 | # value=obj.cluster.pk, 108 | # ) 109 | # ) 110 | block.add_entry( 111 | SlackLabelValuePair( 112 | label="Committer", 113 | value=self.values["committer"], 114 | ) 115 | ) 116 | block.add_entry( 117 | SlackLabelValuePair( 118 | label="Authors", 119 | value=",".join(self.values["authors"]), 120 | ) 121 | ) 122 | block.add_entry( 123 | SlackLabelValuePair( 124 | label="Deployer", 125 | value=full_name, 126 | ) 127 | ) 128 | self.add_block(block) 129 | 130 | def add_changelog(self): 131 | changelog = self.values.get("changelog", []) 132 | url = "https://ads-utils-icons.s3.us-west-2.amazonaws.com/ads_dev_ops/database-check.png" 133 | text = "*Changelog:*\n" 134 | text += str.join("\n", changelog) 135 | if text: 136 | self.add_block( 137 | SlackMessageMarkdown( 138 | text=text, 139 | image_url=url, 140 | alt_text="Changelog", 141 | ) 142 | ) 143 | -------------------------------------------------------------------------------- /deployfish/plugins/sqs/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .hooks import process_service_update 3 | 4 | 5 | def load(app): 6 | app.hook.register("post_object_update", process_service_update) 7 | -------------------------------------------------------------------------------- /deployfish/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/py.typed -------------------------------------------------------------------------------- /deployfish/registry.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING 2 | 3 | if TYPE_CHECKING: 4 | from .core.adapters.abstract import Adapter 5 | 6 | 7 | class AdapterRegistry: 8 | """ 9 | A registry of adapters which consume specific data sources to configure deployfish models. 10 | """ 11 | 12 | def __init__(self) -> None: 13 | self.adapters: dict[str, dict[str, type[Adapter]]] = {} 14 | 15 | def register(self, model_name: str, source: str, adapter_class: type["Adapter"]) -> None: 16 | """ 17 | Register a new Adapter class with a model and a source. 18 | 19 | :param model_name: the name of a deployfish model 20 | :param source: the identifier for the config source 21 | :param adapter_class: the class of the source -> model adapter to use 22 | """ 23 | if model_name not in self.adapters: 24 | self.adapters[model_name] = {} 25 | self.adapters[model_name][source] = adapter_class 26 | 27 | def get(self, model_name: str, source: str) -> type["Adapter"]: 28 | """ 29 | Return the source -> model Adapter class to use for the source ``source`` and 30 | model ``model_name``. 31 | """ 32 | return self.adapters[model_name][source] 33 | 34 | 35 | importer_registry: AdapterRegistry = AdapterRegistry() 36 | click_registry: AdapterRegistry = AdapterRegistry() 37 | -------------------------------------------------------------------------------- /deployfish/renderers/__init__.py: -------------------------------------------------------------------------------- 1 | from .abstract import AbstractRenderer # noqa:F401 2 | from .table import * # noqa: F403 3 | -------------------------------------------------------------------------------- /deployfish/renderers/abstract.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | 4 | class AbstractRenderer: 5 | 6 | def __init__(self, *args, **kwargs): 7 | pass 8 | 9 | def render(self, data: Any, **kwargs: Any) -> str: 10 | raise NotImplementedError 11 | -------------------------------------------------------------------------------- /deployfish/renderers/misc.py: -------------------------------------------------------------------------------- 1 | from deployfish.core.models import TargetGroup 2 | 3 | 4 | def target_group_listener_rules(obj: TargetGroup) -> str: 5 | """ 6 | Given a ``TargetGroup`` iterate through its list of LoadBalancerListenerRule objects and return a human readable 7 | description of those rules. 8 | 9 | :param obj TargetGroup: a TargetGroup object 10 | 11 | :rtype: str 12 | """ 13 | rules = obj.rules 14 | conditions = [] 15 | for rule in rules: 16 | if "Conditions" in rule.data: 17 | for condition in rule.data["Conditions"]: 18 | if "HostHeaderConfig" in condition: 19 | for v in condition["HostHeaderConfig"]["Values"]: 20 | conditions.append(f"hostname:{v}") 21 | if "HttpHeaderConfig" in condition: 22 | conditions.append("header:{} -> {}".format( 23 | condition["HttpHeaderConfig"]["HttpHeaderName"], 24 | ",".join(condition["HttpHeaderConfig"]["Values"]) 25 | )) 26 | if "PathPatternConfig" in condition: 27 | for v in condition["PathPatternConfig"]["Values"]: 28 | conditions.append(f"path:{v}") 29 | if "QueryStringConfig" in condition: 30 | for v in condition["QueryStringConfig"]["Values"]: 31 | conditions.append("qs:{}={} -> ".format(v["Key"], v["Value"])) 32 | if "SourceIpConfig" in condition: 33 | for v in condition["SourceIpConfig"]["Values"]: 34 | conditions.append(f"ip:{v} -> ") 35 | if "HttpRequestMethod" in condition: 36 | for v in condition["HttpRequestMethod"]["Values"]: 37 | conditions.append(f"verb:{v} -> ") 38 | if not conditions: 39 | conditions.append(f"forward:{obj.load_balancers[0].lb_type}:{obj.listeners[0].port}:{obj.listeners[0].protocol} -> CONTAINER:{obj.port}:{obj.protocol}") 40 | return "\n".join(sorted(conditions)) 41 | -------------------------------------------------------------------------------- /deployfish/templates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/caltechads/deployfish/988eb8b8eb419533d424452732d3e225a88d3e6d/deployfish/templates/__init__.py -------------------------------------------------------------------------------- /deployfish/templates/detail--classicloadbalancer.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import heading, subsection, subobject, tags -%} 2 | {{ subobject('Classic Load Balancer (ELB)', obj.name) }} 3 | pk : {{ obj.pk }} 4 | name : {{ obj.name }} 5 | scheme : {{ obj.scheme }} 6 | hostname : {{ obj.hostname }} 7 | created : {{ obj.data['CreatedTime'].strftime('%Y-%m-%d %H:%M:%S') }} 8 | {%- if obj.ssl_certificate_arn %} 9 | ssl certificate : {{ obj.ssl_certificate_arn }} 10 | ssl policy : {{ obj.ssl_policy }} 11 | {%- endif %} 12 | 13 | {{ subsection('networking') }} 14 | VPC : {{ obj.data['VPCId'] }} 15 | subnets : {{ obj.data['Subnets']|join(', ') }} 16 | availability zones: {{ obj.data['AvailabilityZones']|join(', ') }} 17 | security groups : {{ obj.data['SecurityGroups']|join(', ') }} 18 | 19 | {{ subsection('health check') }} 20 | target : {{ obj.data['HealthCheck']['Target'] }} 21 | interval : {{ obj.data['HealthCheck']['Interval'] }} 22 | timeout : {{ obj.data['HealthCheck']['Timeout'] }} 23 | unhealthy count : {{ obj.data['HealthCheck']['UnhealthyThreshold'] }} 24 | healthy count : {{ obj.data['HealthCheck']['HealthyThreshold'] }} 25 | 26 | {{ heading('Listeners') }} 27 | 28 | {{ obj.listeners|tabular(LB_Protocol='Protocol', LB_Port='LoadBalancerPort', Instance_Protocol='InstanceProtocol', Instance_Port='InstancePort') }} 29 | 30 | {{ heading('Targets') }} 31 | 32 | {{ obj.targets|tabular(Name='name', State='Instance__State__Name', Health='State', Code='ReasonCode', Description='Description', Instance_Type='Instance__InstanceType', IP_Address='ip_address', ordering='Name') }} 33 | -------------------------------------------------------------------------------- /deployfish/templates/detail--cloudwatchloggroup.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/awslogs.jinja2' import log_streams_table -%} 2 | {% filter color(fg='green') %}Cloudwatch Log Group: {% endfilter %}{{ obj.name|color(fg='cyan', bold=True) }} 3 | pk : {{ obj.pk }} 4 | arn : {{ obj.arn }} 5 | created : {{ obj.data['creationTime']|fromtimestamp }} 6 | retention days : {{ obj.data['retentionInDays']|default('infinite') }} 7 | size (bytes) : {{ obj.data['storedBytes'] }} 8 | {% if 'kmsKeyId' in obj.data -%} 9 | encrypted : True 10 | kms key id : {{ obj.data['kmsKeyId'] }} 11 | {%- endif %} 12 | 13 | {% filter section_title(fg='cyan', bold=True) %}25 Most Recent Log Streams{% endfilter %} 14 | {{ log_streams_table(obj, maxitems=25) }} 15 | -------------------------------------------------------------------------------- /deployfish/templates/detail--cloudwatchlogstream.jinja2: -------------------------------------------------------------------------------- 1 | {% filter color(fg='green') %}Cloudwatch Log Stream: {% endfilter %}{{ obj.name|color(fg='cyan', bold=True) }} 2 | pk : {{ obj.pk }} 3 | arn : {{ obj.arn }} 4 | log group : {{ obj.data['logGroupName'] }} 5 | created : {{ obj.data['creationTime']|fromtimestamp }} 6 | {%- if 'firstEventTimestamp' in obj.data %} 7 | first event : {{ obj.data['firstEventTimestamp']|fromtimestamp }} 8 | {%- endif %} 9 | {%- if 'lastEventTimestamp' in obj.data %} 10 | last event : {{ obj.data['lastEventTimestamp']|fromtimestamp }} 11 | {%- endif %} 12 | {%- if 'lastIngetstionTime' in obj.data %} 13 | last event : {{ obj.data['lastIngestionTime']|fromtimestamp }} 14 | {%- endif %} 15 | 16 | {% filter section_title(fg='cyan', bold=True) %}Events{% endfilter %} 17 | {% set paginator = obj.events(sleep=0) %} 18 | {% for page in paginator -%} 19 | {% for event in page -%} 20 | {{ event['timestamp']|color(fg='cyan') }} {{ event['message'] }} 21 | {% endfor %} 22 | {% endfor %} 23 | -------------------------------------------------------------------------------- /deployfish/templates/detail--cluster.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import heading, subsection, subobject, tags -%} 2 | {{ subobject('Cluster', obj.name) }} 3 | pk : {{ obj.pk }} 4 | name : {{ obj.name }} 5 | arn : {{ obj.arn }} 6 | status : {{ obj.data['status'] }} 7 | cluster_type : {{ obj.cluster_type }} 8 | {% if obj.data['settings'] %} 9 | {{ subsection(' settings')|indent(width=2) -}} 10 | {% for setting in obj.data['settings'] %} 11 | {{ setting['name']|color(fg='yellow') }} : {{ setting['value'] }} 12 | {%- endfor -%} 13 | {% endif %} 14 | 15 | {{ subsection(' task counts')|indent(width=2) }} 16 | {% filter color(fg='green') %}running{% endfilter %} : {{ obj.data['runningTasksCount']|color(fg='green') }} 17 | {% filter color(fg='yellow') %}pending{% endfilter %} : {{ obj.data['pendingTasksCount']|color(fg='yellow') }} 18 | {% if obj.tags %}{{ tags(obj.tags)|indent(width=2) }} 19 | {% endif -%} 20 | 21 | {% if obj.cluster_type == 'EC2' -%} 22 | {{ heading('Container instances') }} 23 | instances : {{ obj.data['registeredContainerInstancesCount'] }} 24 | {% if obj.autoscaling_group %}autoscaling_group : {{ obj.autoscaling_group.name }}{% endif %} 25 | 26 | {{ obj.container_instances|tabular(Name='ec2_instance__name', Instance_Type='ec2_instance__InstanceType', IP_Address='ec2_instance__PrivateIpAddress', Free_CPU='free_cpu', Free_Memory='free_memory', Launch_Time='ec2_instance__LaunchTime', ordering='Name') }} 27 | {%- endif %} 28 | 29 | {{ heading('Services') }} 30 | 31 | {{ obj.services|tabular(Name='name', Version='version', Desired='desiredCount', Running='runningCount', Created='createdAt') }} 32 | 33 | -------------------------------------------------------------------------------- /deployfish/templates/detail--invokedtask.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import subobject -%} 2 | {% from 'macros/task-definition.jinja2' import task_definition -%} 3 | {% from 'macros/invoked-task.jinja2' import task_container, status, timestamps -%} 4 | {% filter color(fg='green') %}Invoked Task: {% endfilter %}{{ obj.name|color(fg='cyan', bold=True) }} 5 | pk : {{ obj.pk }} 6 | cluster : {{ obj.data['cluster'] }} 7 | availability zone : {{ obj.data['availabilityZone'] }} 8 | connectivity : {{ obj.data['connectivity'] }} 9 | launch type : {{ obj.data['launchType'] }} 10 | {%- if 'group' in obj.data %} 11 | group : {{ obj.data['group'] }} 12 | {%- endif %} 13 | {%- if 'cpu' in obj.data %} 14 | cpu : {{ obj.data['cpu'] }} 15 | {%- endif %} 16 | {%- if 'memory' in obj.data %} 17 | memory : {{ obj.data['memory'] }} 18 | {%- endif %} 19 | {{ status(obj)|indent(width=2) }} 20 | 21 | {% filter color(fg='cyan') %} timestamps{% endfilter %} 22 | {{ timestamps(obj)|indent }} 23 | {%- for container in obj.data['containers'] %} 24 | {{ subobject('Container', container['name'])|indent(width=2) }} 25 | {{ task_container(container)|indent(width=4) }} 26 | {%- endfor %} 27 | 28 | {% filter section_title(fg='cyan', bold=True) %}Task Definition{% endfilter %} 29 | {{ task_definition(obj.task_definition)|indent }} 30 | -------------------------------------------------------------------------------- /deployfish/templates/detail--loadbalancer.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import heading, subsection, subobject, tags -%} 2 | {{ subobject('Application Load Balancer (ALB)', obj.name) }} 3 | pk : {{ obj.pk }} 4 | arn : {{ obj.arn }} 5 | name : {{ obj.name }} 6 | scheme : {{ obj.scheme }} 7 | state : {{ obj.data['State']['Name'] }} 8 | hostname : {{ obj.hostname }} 9 | created : {{ obj.data['CreatedTime'].strftime('%Y-%m-%d %H:%M:%S') }} 10 | {%- if obj.ssl_certificate_arn %} 11 | ssl certificate : {{ obj.ssl_certificate_arn }} 12 | ssl policy : {{ obj.ssl_policy }} 13 | {%- endif %} 14 | 15 | {{ subsection('networking') }} 16 | VPC : {{ obj.data['VpcId'] }} 17 | subnets : {{ obj.data['AvailabilityZones']|map(attribute='SubnetId')|join(', ') }} 18 | availability zones: {{ obj.data['AvailabilityZones']|map(attribute='ZoneName')|join(', ') }} 19 | security groups : {{ obj.data['SecurityGroups']|join(', ') }} 20 | 21 | {{ heading('Listeners') }} 22 | 23 | {{ obj.listeners|lb_listener_table }} 24 | 25 | {{ heading('Target Groups') }} 26 | 27 | {{ obj.target_groups|target_group_table }} 28 | -------------------------------------------------------------------------------- /deployfish/templates/detail--loadbalancerlistener.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import heading, subsection, subobject, tags -%} 2 | {{ subobject('Load Balancer Listener', obj.name) }} 3 | load_balancer : {{ obj.load_balancer.name }} 4 | pk : {{ obj.pk }} 5 | arn : {{ obj.arn }} 6 | name : {{ obj.name }} 7 | port : {{ obj.port }} 8 | protocol : {{ obj.protocol }} 9 | {%- if obj.protocol in ['HTTPS', 'TLS'] %} 10 | ssl certificates : {{ obj.ssl_certificates|join(',') }} 11 | ssl policy : {{ obj.ssl_policy }} 12 | {%- endif %} 13 | -------------------------------------------------------------------------------- /deployfish/templates/detail--rdsinstance.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import heading, subsection, subobject, tags -%} 2 | {{ subobject('RDS instance', obj.name) }} 3 | pk : {{ obj.pk }} 4 | name : {{ obj.name }} 5 | arn : {{ obj.arn }} 6 | engine : {{ obj.engine }} 7 | engine version : {{ obj.version }} 8 | instance type | {{ obj.data['DBInstanceClass'] }} 9 | Multi AZ? : {{ obj.multi_az }} 10 | hostname : {{ obj.hostname }} 11 | port : {{ obj.port }} 12 | root username : {{ obj.root_user }} 13 | {%- if obj.secret_enabled %} 14 | secret ARN : {{ obj.secret_arn }} 15 | {%- endif %} 16 | 17 | {{ subsection('networking') }} 18 | VPC : {{ obj.vpc.pk }} ({{ obj.vpc.name }}) 19 | subnet group : {{ obj.data['DBSubnetGroup']['DBSubnetGroupName']}} 20 | 21 | subnets 22 | {% for subnet in obj.subnets %} {{ subnet.name|color(fg='green') }} [{{ subnet.pk }}] {{ subnet.cidr_block|color(fg='cyan') }} 23 | {% endfor %} 24 | security_groups 25 | {% for sg in obj.security_groups %} {{sg.name|color(fg='green')}} [{{sg.pk}}] 26 | {% endfor -%} -------------------------------------------------------------------------------- /deployfish/templates/detail--secrets--diff.jinja2: -------------------------------------------------------------------------------- 1 | {%- if '$insert' in obj -%} 2 | {% filter color(fg='green', bold=True) %}These secrets would be created in AWS:{% endfilter %} 3 | {% for secret, changes in obj['$insert'].items() %} 4 | {{ secret|color(fg='yellow') }}: {{changes['Value']}} {% if changes['Type'] == 'SecureString' -%}{% filter color(fg='cyan') %}[SECURE:{{changes['KeyId']}}]{% endfilter %}{% endif %} 5 | {%- endfor %} 6 | {%- endif %} 7 | {% if '$delete' in obj %} 8 | {% filter color(fg='red', bold=True) %}These secrets would be removed from AWS:{% endfilter %} 9 | {% for secret in obj['$delete'] %} 10 | {{ secret|color(fg='yellow') }} 11 | {%- endfor %} 12 | {%- endif %} 13 | {% if '$update' in obj %} 14 | {% filter color(fg='cyan', bold=True) %}These secrets would be updated in AWS:{% endfilter %} 15 | {% for secret, changes in obj['$update'].items() %} 16 | {{ secret|color(fg='yellow') }}: 17 | {%- for k, v in changes['$update'].items() %} 18 | {{ k }} -> {{ v }} 19 | {% endfor %} 20 | {% endfor %} 21 | {%- endif %} 22 | -------------------------------------------------------------------------------- /deployfish/templates/detail--secrets.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/secrets.jinja2' import secrets_list -%} 2 | {{ secrets_list(obj) }} 3 | -------------------------------------------------------------------------------- /deployfish/templates/detail--service--short.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/service.jinja2' import load_balancer %} 2 | {% from 'macros/task.jinja2' import vpc_configuration %} 3 | {% filter color(fg='green') %}Service: {% endfilter %}{{ obj.name|color(fg='cyan', bold=True) }} 4 | pk : {{ obj.pk }} 5 | arn : {{ obj.arn }} 6 | status : {{ obj.data['status'] }} 7 | last updated : {{ obj.last_updated }} 8 | cluster : {{ obj.data['cluster'] }} 9 | ECS Exec enabled : {{ obj.exec_enabled }} 10 | launch type : {{ obj.data['launchType'] }} 11 | {% if obj.data['launchType'] == 'FARGATE' %} platform version : {{ obj.data['platformVersion'] }} 12 | {% endif -%} 13 | {% if 'networkConfiguration' in obj.data %}{% filter color(fg='cyan') %} vpc configuration{% endfilter %}{{ vpc_configuration(obj)|indent(width=6)}} 14 | {% endif -%} 15 | {%- if obj.data['runningCount'] != 'UNKNOWN' -%} 16 | {% filter color(fg='cyan') %} task counts{% endfilter %} 17 | desired : {{ obj.data['desiredCount'] }} 18 | {% filter color(fg='green') %}running{% endfilter %} : {{ obj.data['runningCount']|color(fg='green') }} 19 | {% filter color(fg='yellow') %}pending{% endfilter %} : {{ obj.data['pendingCount']|color(fg='yellow') }} 20 | {%- else -%} 21 | count : {{ obj.data['desiredCount'] }} 22 | {%- endif -%} 23 | {%- if obj.data['loadBalancers'] %} 24 | {%- for lb in obj.load_balancers %}{{ load_balancer(lb)|indent(width=2) }}{% endfor -%} 25 | {%- endif %} 26 | -------------------------------------------------------------------------------- /deployfish/templates/detail--service.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/task-definition.jinja2' import task_definition -%} 2 | {% from 'macros/secrets.jinja2' import secrets_table -%} 3 | {% include 'detail--service--short.jinja2' %} 4 | 5 | {% filter section_title(fg='cyan', bold=True) %}Task Definition{% endfilter %} 6 | {{ task_definition(obj.task_definition)|indent }} 7 | {% if includes is not defined or ('secrets' in includes and obj.secrets) %} 8 | {% filter section_title(fg='cyan', bold=True) %}Secrets{% endfilter %} 9 | {{ secrets_table(obj.secrets.values()) }} 10 | {% endif -%} 11 | {%- if includes is not defined or ('deployments' in includes and obj.deployments) %} 12 | 13 | {% filter section_title(fg='cyan', bold=True) %}Deployments{% endfilter %} 14 | 15 | {{ obj.deployments|tabular(Status='status', Task_Definition='taskDefinition', Desired='desiredCount', Pending='pendingCount', Running='runningCount', ordering='-Status') }} 16 | {% endif -%} 17 | {%- if excludes is defined and 'events' not in excludes and obj.events %} 18 | 19 | {% filter section_title(fg='cyan', bold=True) %}Events{% endfilter %} 20 | 21 | {{ obj.events[:10]|tabular(Timestamp='createdAt', Message='message') }} 22 | {% endif -%} 23 | -------------------------------------------------------------------------------- /deployfish/templates/detail--servicehelpertask.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/task-definition.jinja2' import task_definition -%} 2 | {% from 'macros/secrets.jinja2' import secrets_table -%} 3 | {% from 'macros/task.jinja2' import vpc_configuration -%} 4 | {% filter color(fg='green') %}Command: {% endfilter %}{{ obj.command|color(fg='cyan', bold=True) }} 5 | pk : {{ obj.pk }} 6 | service : {{ obj.data['service'] }} 7 | cluster : {{ obj.data['cluster'] }} 8 | launch type : {{ obj.data['launchType'] }} 9 | {%- if obj.data['launchType'] == 'FARGATE' %} 10 | platform version : {{ obj.data['platformVersion'] }} 11 | {%- endif %} 12 | count : {{ obj.data.get('desiredCount', 1) }} 13 | {% if 'networkConfiguration' in obj.data and obj.task_definition.data.get('networkMode', 'bridge') == 'awsvpc' -%} 14 | {{ vpc_configuration(obj)|indent(width=2) -}} 15 | {% endif -%} 16 | {%- if obj.schedule %} 17 | {% filter color(fg='yellow', bold=True) %}schedule{% endfilter %} : {{ obj.schedule.data['ScheduleExpression']|color(fg='yellow', bold=True) }} {% if not obj.enabled %}{% filter color(fg='red', bold=True)%}[DISABLED]{% endfilter %}{% endif %} 18 | {%- endif %} 19 | 20 | {% filter section_title(fg='cyan', bold=True) %}Task Definition{% endfilter %} 21 | {{ task_definition(obj.task_definition)|indent }} 22 | {%- if includes is not defined or ('secrets' in includes and obj.secrets) %} 23 | {% filter section_title(fg='cyan', bold=True) %}Secrets{% endfilter %} 24 | {{ secrets_table(obj.secrets.values()) }} 25 | {% endif -%} 26 | -------------------------------------------------------------------------------- /deployfish/templates/detail--sshtunnel.jinja2: -------------------------------------------------------------------------------- 1 | {% filter color(fg='green') %}SSHTunnel: {% endfilter %}{{ obj.name|color(fg='cyan', bold=True) }} 2 | pk : {{ obj.pk }} 3 | name : {{ obj.name }} 4 | service : {{ obj.service.name }} 5 | cluster : {{ obj.cluster.name }} 6 | 7 | local port : {{ obj.data['local_port']|color(fg='yellow') }} 8 | host : {{ obj.data['host']|color(fg='yellow') }} 9 | host port : {{ obj.data['port']|color(fg='yellow') }} 10 | -------------------------------------------------------------------------------- /deployfish/templates/detail--standalonetask--short.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/task-definition.jinja2' import task_definition -%} 2 | {% from 'macros/secrets.jinja2' import secrets_table -%} 3 | {% from 'macros/task.jinja2' import vpc_configuration -%} 4 | {% filter color(fg='green') %}Task: {% endfilter %}{{ obj.name|color(fg='cyan', bold=True) }} 5 | pk : {{ obj.pk }} 6 | {%- if 'service' in obj.data %} 7 | service : {{ obj.data['service'] }} 8 | {%- endif %} 9 | cluster : {{ obj.data['cluster'] }} 10 | launch type : {{ obj.data['launchType'] }} 11 | {%- if obj.data['launchType'] == 'FARGATE' %} 12 | platform version : {{ obj.data.get('platformVersion', 'LATEST') }} 13 | {%- endif %} 14 | count : {{ obj.data.get('count', 1) }} 15 | {%- if obj.schedule %} 16 | {% filter color(fg='yellow', bold=True) %}schedule{% endfilter %} : {{ obj.schedule.data['ScheduleExpression']|color(fg='yellow', bold=True) }} {% if not obj.schedule.enabled %}{% filter color(fg='red', bold=True)%}[DISABLED]{% endfilter %}{% endif %} 17 | {%- endif %} 18 | {% if 'networkConfiguration' in obj.data %}{% filter color(fg='cyan') %} vpc configuration{% endfilter %}{{ vpc_configuration(obj)|indent(width=4)}}{% endif %} 19 | 20 | {% filter section_title(fg='cyan', bold=True) %}Task Definition{% endfilter %} 21 | {{ task_definition(obj.task_definition)|indent }} 22 | -------------------------------------------------------------------------------- /deployfish/templates/detail--standalonetask.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/secrets.jinja2' import secrets_table -%} 2 | {% include 'detail--standalonetask--short.jinja2' %} 3 | {%- if includes is not defined or ('secrets' in includes and obj.secrets) %} 4 | {% filter section_title(fg='cyan', bold=True) %}Secrets{% endfilter %} 5 | {{ secrets_table(obj.secrets.values()) }} 6 | {% endif -%} 7 | -------------------------------------------------------------------------------- /deployfish/templates/detail--targetgroup.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import heading, subsection, subobject, tags -%} 2 | {% from 'macros/target-group.jinja2' import tg_health_check -%} 3 | {{ subobject('Target Group', obj.name) }} 4 | pk : {{ obj.pk }} 5 | name : {{ obj.name }} 6 | port : {{ obj.port }} 7 | protocol : {{ obj.protocol }} 8 | 9 | {{ subsection('health check') }} 10 | {{ tg_health_check(obj)|indent(4)}} 11 | 12 | {{ heading('Targets') }} 13 | 14 | {{ obj.targets|tabular(Id='Id', health='health__State', Name='target__name', IP_Address='target__ip_address', Port='port')}} 15 | -------------------------------------------------------------------------------- /deployfish/templates/detail.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils.jinja2' import heading, subsection, subobject, tags -%} 2 | {{ subobject(obj.__class__.__name__, obj.name) }} 3 | pk : {{ obj.pk }} 4 | name : {{ obj.name }} 5 | arn : {{ obj.arn }} 6 | 7 | -------------------------------------------------------------------------------- /deployfish/templates/macros/awslogs.jinja2: -------------------------------------------------------------------------------- 1 | {% macro log_streams_table(group, maxitems=None) %} 2 | {{ group.log_streams(maxitems=maxitems)|tabular(Name='logStreamName', Created='creationTime', Created_datatype='timestamp', Last_Event='lastEventTimestamp', Last_Event_default='', Last_Event_datatype='timestamp') }} 3 | {% endmacro %} 4 | -------------------------------------------------------------------------------- /deployfish/templates/macros/classicloadbalancer.jinja2: -------------------------------------------------------------------------------- 1 | {% macro elb_health_check(elb) %} 2 | target : {{ elb.data['HealthCheck']['Target'] }} 3 | interval : {{ elb.data['HealthCheck']['Interval'] }} 4 | timeout : {{ elb.data['HealthCheck']['Timeout'] }} 5 | unhealthy count : {{ elb.data['HealthCheck']['UnhealthyThreshold'] }} 6 | healthy count : {{ elb.data['HealthCheck']['HealthyThreshold'] }} 7 | {% endmacro %} 8 | -------------------------------------------------------------------------------- /deployfish/templates/macros/invoked-task.jinja2: -------------------------------------------------------------------------------- 1 | {% macro task_container(container) %} 2 | arn : {{ container['containerArn'] }} 3 | image : {{ container['image'] }} 4 | image digest : {{ container['imageDigest'] }} 5 | runtime id : {{ container['runtimeId'] }} 6 | health : {{ container['healthStatus'] }} 7 | {% if 'cpu' in container -%} 8 | cpu : {{ container['cpu'] }} 9 | {% endif -%} 10 | {% if 'memory' in container -%} 11 | memory : {{ container['memory'] }} 12 | {% endif -%} 13 | {% if 'memoryReservation' in container -%} 14 | memoryReservation : {{ container['memoryReservation'] }} 15 | {% endif -%} 16 | 17 | last status : {{ container['lastStatus'] }} 18 | {% if 'exitCode' in container -%} 19 | exit code : {{ container['exitCode'] }} 20 | {% endif -%} 21 | {% if 'reason' in container -%} 22 | reason : {{ container['reason'] }} 23 | {% endif -%} 24 | {% endmacro %} 25 | 26 | {% macro status(obj) %} 27 | desired status : {{ obj.data['desiredStatus'] }} 28 | last status : {{ obj.data['lastStatus'] }} 29 | {% if 'stopCode' in obj.data -%} 30 | stop code : {{ obj.data['stopCode'] }} 31 | {% endif -%} 32 | {% if 'stoppedReason' in obj.data -%} 33 | reason : {{ obj.data['stoppedReason'] }} 34 | {% endif -%} 35 | {% endmacro %} 36 | 37 | {% macro timestamps(obj) %} 38 | {% if 'createdAt' in obj.data -%} 39 | created : {{ obj.data['createdAt'].strftime('%Y-%m-%d %H:%M:%S') }} 40 | {% endif -%} 41 | {% if 'startedAt' in obj.data -%} 42 | started : {{ obj.data['startedAt'].strftime('%Y-%m-%d %H:%M:%S') }} 43 | {% endif -%} 44 | {% if 'connectivityAt' in obj.data -%} 45 | connectivity : {{ obj.data['connectivityAt'].strftime('%Y-%m-%d %H:%M:%S') }} 46 | {% endif -%} 47 | {% if 'pullStartedAt' in obj.data -%} 48 | pull started : {{ obj.data['pullStartedAt'].strftime('%Y-%m-%d %H:%M:%S') }} 49 | {% endif -%} 50 | {% if 'pullStoppedAt' in obj.data -%} 51 | pull stopped : {{ obj.data['pullStoppedAt'].strftime('%Y-%m-%d %H:%M:%S') }} 52 | {% endif -%} 53 | {% if 'executionStoppedAt' in obj.data -%} 54 | execution stopped : {{ obj.data['executionStoppedAt'].strftime('%Y-%m-%d %H:%M:%S') }} 55 | {% endif -%} 56 | {% if 'stoppingAt' in obj.data -%} 57 | stopping at : {{ obj.data['stoppingAt'].strftime('%Y-%m-%d %H:%M:%S') }} 58 | {% endif -%} 59 | {% if 'stoppedAt' in obj.data -%} 60 | stopped : {{ obj.data['stoppedAt'].strftime('%Y-%m-%d %H:%M:%S') }} 61 | {% endif -%} 62 | {% endmacro %} 63 | -------------------------------------------------------------------------------- /deployfish/templates/macros/secrets.jinja2: -------------------------------------------------------------------------------- 1 | {% macro secrets_table(objs) %} 2 | {% set secrets=objs|selectattr("arn") -%} 3 | {% set non_existant_parameters=objs|selectattr("arn", "none") -%} 4 | {{ secrets|tabular(Name='secret_name', Secure='is_secure', Value='value', ordering='Name') }} 5 | {% if non_existant_parameters %} 6 | 7 | These secrets referenced by the task definition do not exist in AWS SSM Parameter Store: 8 | {% for s in non_existant_parameters %} 9 | {{s.pk|color(fg='red')}} 10 | {%- endfor -%} 11 | {% endif -%} 12 | {% endmacro %} 13 | 14 | {% macro secrets_list(obj) %} 15 | {%- for name in obj.keys()|sort %} 16 | {% if obj[name].arn -%} 17 | {{ name|color(fg='yellow') }}: {{obj[name].value }} {% if obj[name].kms_key_id -%}{% filter color(fg='cyan') %}[SECURE:{{obj[name].kms_key_id}}]{% endfilter %}{% endif %} 18 | {%- else -%} 19 | {{ name|color(fg='yellow') }}: {% filter color(fg='red') %}NOT IN AWS{% endfilter %} 20 | {%- endif %} 21 | {%- endfor %} 22 | {% endmacro %} 23 | -------------------------------------------------------------------------------- /deployfish/templates/macros/service.jinja2: -------------------------------------------------------------------------------- 1 | {# 2 | ============================================================================================ 3 | Service related macros 4 | ============================================================================================ 5 | #} 6 | 7 | {% from 'macros/utils.jinja2' import subobject, subsection %} 8 | {% from 'macros/target-group.jinja2' import tg_health_check %} 9 | {% from 'macros/classicloadbalancer.jinja2' import elb_health_check %} 10 | 11 | {% macro load_balancer(lb_data) %} 12 | {%- if 'targetGroupArn' in lb_data %}{% set lb_type=lb_data['TargetGroup'].load_balancers[0].lb_type %}{% else %}{% set lb_type='Classic (ELB)' %}{% endif %} 13 | {{ subobject('Load Balancer', lb_type) }}{% if 'targetGroupArn' in lb_data %} 14 | load balancer : {{ lb_data['TargetGroup'].load_balancers[0].name }} 15 | hostname : {{ lb_data['TargetGroup'].load_balancers[0].hostname }} 16 | target group : {{ lb_data['TargetGroup'].name }} 17 | target group arn: {{ lb_data['TargetGroup'].arn }} 18 | {{ subsection('routing rules') }} 19 | {{ lb_data['TargetGroup']|target_group_listener_rules|indent(width=6) }} 20 | {{ subsection('health check') }} 21 | {{- tg_health_check(lb_data['TargetGroup'])|indent|indent(width=2) }} 22 | {%- else %} 23 | name : {{ lb_data['loadBalancerName'] }} 24 | hostname : {{ lb_data['LoadBalancer'].hostname }} 25 | {{ subsection('health check') }} 26 | {{- elb_health_check(lb_data['LoadBalancer'])|indent(width=6) }} 27 | {%- endif %} 28 | container name : {{ lb_data['containerName'] }} 29 | container port : {{ lb_data['containerPort'] -}} 30 | {% endmacro %} 31 | -------------------------------------------------------------------------------- /deployfish/templates/macros/target-group.jinja2: -------------------------------------------------------------------------------- 1 | {% macro tg_health_check(obj) %} 2 | target : {{ obj.data['HealthCheckProtocol']}}:{{obj.data['HealthCheckPort']}}{{obj.data['HealthCheckPath']}} 3 | interval : {{ obj.data['HealthCheckIntervalSeconds'] }} 4 | timeout : {{ obj.data['HealthCheckTimeoutSeconds'] }} 5 | unhealthy count : {{ obj.data['UnhealthyThresholdCount'] }} 6 | healthy count : {{ obj.data['HealthyThresholdCount'] }} 7 | {% endmacro %} 8 | 9 | -------------------------------------------------------------------------------- /deployfish/templates/macros/task.jinja2: -------------------------------------------------------------------------------- 1 | {# 2 | ============================================================================================ 3 | Macros related to rendering Tasks 4 | ============================================================================================ 5 | #} 6 | {% from 'macros/utils.jinja2' import subsection %} 7 | 8 | {# Render the awsvpcConfiguration a task #} 9 | {# ------------------------------------- #} 10 | {% macro vpc_configuration(obj) -%} 11 | {%- if 'subnets' in obj.vpc_configuration %} 12 | vpc : {{ obj.vpc_configuration['vpc'].name}} 13 | subnets 14 | {% for subnet in obj.vpc_configuration['subnets'] %} {{ subnet.name|color(fg='green') }} [{{ subnet.pk }}] {{ subnet.cidr_block|color(fg='cyan') }} 15 | {% endfor -%} 16 | {% endif -%}{% if 'security_groups' in obj.vpc_configuration %}security_groups 17 | {% for sg in obj.vpc_configuration['security_groups'] %} {{sg.name|color(fg='green')}} [{{sg.pk}}] 18 | {% endfor -%} 19 | {% endif -%} 20 | allow public IP : {{ obj.vpc_configuration['allow_public_ip'] }} 21 | {% endmacro -%} 22 | -------------------------------------------------------------------------------- /deployfish/templates/macros/utils.jinja2: -------------------------------------------------------------------------------- 1 | {# 2 | ============================================================================================ 3 | General macros for use in any template 4 | ============================================================================================ 5 | #} 6 | 7 | 8 | {% macro heading(msg) %} 9 | {%- filter section_title(fg='cyan', bold=True) %}{{msg}}{% endfilter -%} 10 | {% endmacro %} 11 | 12 | {% macro subsection(msg) %} 13 | {%- filter color(fg='cyan', bold=True) %}{{ msg }}{% endfilter -%} 14 | {% endmacro %} 15 | 16 | {% macro subobject(label, name) %} 17 | {% filter color(fg='green') %}{{ label }}: {% endfilter %}{{ name|color(fg='cyan', bold=True) -}} 18 | {% endmacro %} 19 | 20 | {% macro tags(tag_dict) %} 21 | {{ subsection('tags') }} 22 | {% for key, value in tag_dict.items() %} {{ key|color(fg='yellow') }}: {{ value }} 23 | {% endfor -%} 24 | {% endmacro %} 25 | 26 | {% macro indent(depth) %} 27 | {{- ' ' * depth -}} 28 | {% endmacro %} 29 | -------------------------------------------------------------------------------- /deployfish/templates/plan--service.jinja2: -------------------------------------------------------------------------------- 1 | {% from 'macros/utils-plan.jinja2' import diff_sym, render_diff %} 2 | {# Now call the macro to loop through the changes #} 3 | {% filter color(fg='green') %}Service: {% endfilter %}{{ obj.pk|color(fg='cyan', bold=True) }} 4 | {% if debug %}{% filter color(fg='yellow') %}Debugging...{% endfilter %}{% endif%} 5 | {{ render_diff(aws_json, changes, 'services.' + obj.pk, "", None, debug) }} 6 | 7 | Run update command to apply changes: 8 | 9 | deploy update {{ obj.name }} 10 | -------------------------------------------------------------------------------- /deployfish/templates/plan--standalonetask.jinja2: -------------------------------------------------------------------------------- 1 | {%- from 'macros/utils-plan.jinja2' import diff_sym, render_diff -%} 2 | {# Now call the macro to loop through the changes #} 3 | {% set taskname = obj.cluster.pk ~ ":" ~ obj.pk %} 4 | {% filter color(fg='green') %}StandaloneTask: {% endfilter %}{{ taskname|color(fg='cyan', bold=True) }} 5 | {% if debug %}{% filter color(fg='yellow') %}Debugging...{% endfilter %}{% endif%} 6 | {{ render_diff(aws_json, changes, 'standalonetask.' + obj.pk, "", None, debug) }} 7 | 8 | Run update command to apply changes: 9 | 10 | deploy task update {{ obj.name }} 11 | -------------------------------------------------------------------------------- /deployfish/types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | from collections.abc import Callable, Sequence 5 | from typing import ( 6 | TYPE_CHECKING, 7 | Any, 8 | Optional, 9 | ) 10 | 11 | if sys.version_info >= (3, 8): 12 | from typing import Protocol 13 | else: 14 | from typing import Protocol 15 | 16 | if TYPE_CHECKING: 17 | from deployfish.core.models import ( 18 | Cluster, 19 | ContainerDefinition, 20 | Instance, 21 | InvokedTask, 22 | Manager, 23 | Model, 24 | Secret, 25 | SSHTunnel, 26 | TaskDefinition, 27 | ) 28 | 29 | class SupportsSSH(Protocol): 30 | 31 | @property 32 | def ssh_targets(self) -> Sequence["Instance"]: 33 | ... 34 | 35 | @property 36 | def ssh_target(self) -> Optional["Instance"]: 37 | ... 38 | 39 | @property 40 | def ssh_proxy_type(self) -> str: 41 | ... 42 | 43 | class SupportsTunnel(Protocol): 44 | 45 | @property 46 | def tunnel_targets(self) -> Sequence["Instance"]: 47 | ... 48 | 49 | @property 50 | def tunnel_target(self) -> Optional["Instance"]: 51 | ... 52 | 53 | @property 54 | def ssh_tunnels(self) -> Sequence["SSHTunnel"]: 55 | ... 56 | 57 | def tunnel(self, tunnel: "SSHTunnel", verbose: bool = False, tunnel_target: "Instance" = None) -> None: 58 | ... 59 | 60 | class SupportsExec(Protocol): 61 | 62 | @property 63 | def exec_enabled(self) -> bool: 64 | ... 65 | 66 | 67 | class SupportsNetworking(SupportsSSH, SupportsTunnel, Protocol): 68 | pass 69 | 70 | 71 | class SupportsCache(Protocol): 72 | 73 | cache: dict[str, Any] 74 | 75 | def get_cached( 76 | self, 77 | key: str, 78 | populator: Callable, 79 | args: list[Any], 80 | kwargs: dict[str, Any] = None 81 | ) -> Any: 82 | ... 83 | 84 | class SupportsSecrets(Protocol): 85 | 86 | @property 87 | def secrets(self) -> dict[str, "Secret"]: 88 | ... 89 | 90 | @property 91 | def secrets_prefix(self) -> str: 92 | ... 93 | 94 | def reload_secrets(self) -> None: 95 | ... 96 | 97 | def write_secrets(self) -> None: 98 | ... 99 | 100 | def diff_secrets(self, other: Sequence["Secret"], ignore_external: bool = False) -> dict[str, Any]: 101 | ... 102 | 103 | 104 | class SupportsModel(Protocol): 105 | 106 | objects: "Manager" 107 | config_section: str 108 | data: dict[str, Any] 109 | 110 | @property 111 | def pk(self) -> str: 112 | ... 113 | 114 | @property 115 | def name(self) -> str: 116 | ... 117 | 118 | @property 119 | def arn(self) -> str | None: 120 | ... 121 | 122 | class SupportsTaskDefinition(SupportsModel, Protocol): 123 | 124 | containers: list["ContainerDefinition"] 125 | 126 | 127 | class SupportsNetworkedModel(SupportsModel, SupportsNetworking, Protocol): 128 | pass 129 | 130 | class SupportsSSHModel(SupportsModel, SupportsSSH, Protocol): 131 | pass 132 | 133 | class SupportsTunnelModel(SupportsModel, SupportsSSH, SupportsTunnel, Protocol): 134 | pass 135 | 136 | class SupportsExecModel(SupportsModel, SupportsSSH, SupportsExec, Protocol): 137 | pass 138 | 139 | class SupportsModelWithSecrets(SupportsModel, SupportsSecrets, Protocol): 140 | pass 141 | 142 | class SupportsService( 143 | SupportsModel, 144 | SupportsSSH, 145 | SupportsTunnel, 146 | SupportsSecrets, 147 | Protocol 148 | ): 149 | 150 | @property 151 | def exec_enabled(self) -> bool: 152 | ... 153 | 154 | @property 155 | def cluster(self) -> "Cluster": 156 | ... 157 | 158 | @property 159 | def task_definition(self) -> "TaskDefinition": 160 | ... 161 | 162 | @property 163 | def running_tasks(self) -> Sequence["InvokedTask"]: 164 | ... 165 | 166 | 167 | class SupportsModelClass(Protocol): 168 | 169 | model: type["Model"] 170 | 171 | 172 | class SupportsRendering(Protocol): 173 | 174 | datetime_format: str | None 175 | date_format: str | None 176 | float_precision: str | None 177 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = deployfish 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | # Requirements just to build the docs 2 | # -------------------------------------------------------------------------------------------- 3 | click==6.7 # https://github.com/pallets/click 4 | Sphinx # https://github.com/sphinx-doc/sphinx 5 | sphinx-autobuild # https://github.com/GaretJax/sphinx-autobuild 6 | sphinx-click # https://github.com/click-contrib/sphinx-click 7 | sphinx_rtd_theme # https://github.com/readthedocs/sphinx_rtd_theme 8 | -------------------------------------------------------------------------------- /docs/source/advanced.rst: -------------------------------------------------------------------------------- 1 | ***************** 2 | Advanced Features 3 | ***************** 4 | 5 | .. contents:: 6 | :local: 7 | 8 | Architectural Assumptions 9 | ========================= 10 | 11 | A few assuptions are made as to how your VPCs are structured. It is assumed 12 | that you have a bastion host for each of your VPCs. These bastion hosts are 13 | used to access the individual EC2 instances in your ECS clusters. We expect 14 | these bastion hosts must also have a ``Name`` tag beginning with ``bastion*``, 15 | like ``bastion-test``, etc. 16 | 17 | deploy cluster 18 | ============== 19 | 20 | The ``deploy cluster`` commands allow you to interract with the individual EC2 21 | machines that make up your ECS cluster. It provides three subcommands, 22 | ``info``, ``run``, and ``ssh``. For many of the advanced features of 23 | deployfish, the above assumptions have been made about your architecture that 24 | are required for them to work. 25 | 26 | Info 27 | ---- 28 | The ``info`` subcommand allows you to view information about the individual EC2 29 | systems that make up your ECS cluster. For example:: 30 | 31 | deploy cluster info web-test 32 | 33 | Might return the output below:: 34 | 35 | Cluster: web-test 36 | pk : web-test 37 | name : web-test 38 | arn : arn:aws:ecs:us-west-2:123456789012:cluster/web-test 39 | status : ACTIVE 40 | instances : 6 41 | autoscaling_group : web-test 42 | task counts 43 | running : 5 44 | pending : 0 45 | 46 | Container instances 47 | ------------------- 48 | 49 | Name Instance Type IP Address Free CPU Free Memory 50 | ---------------- --------------- ------------ ---------- ------------- 51 | ecs.web-test.b.2 t2.medium 10.0.1.1 768 102 52 | ecs.web-test.b.1 t2.medium 10.0.1.2 1536 182 53 | ecs.web-test.c.2 t2.medium 10.0.2.1 1408 1206 54 | ecs.web-test.c.1 t2.medium 10.0.2.2 1024 614 55 | 56 | Services 57 | -------- 58 | 59 | Name Version Desired Running Created 60 | ------------------------- --------- --------- --------- ------------------- 61 | service1 2.0.8 2 2 2021-04-02 17:29:30 62 | service2 1.4.1 1 1 2021-04-23 11:21:39 63 | service3 2.1.2 2 2 2020-08-19 09:33:12 64 | 65 | 66 | deploy service ssh 67 | ================================= 68 | 69 | The ``deploy service ssh`` command (alias: ``deploy ssh``) will connect you via SSH to a system in your ECS cluster. If 70 | you have any running containers, it will choose one of those, otherwise it will connect to a random one. This is useful 71 | for debugging:: 72 | 73 | # ssh to a container instance for the service identified by environment "test' in deployfish.yml 74 | deploy service ssh test 75 | # ssh to a container instance for the service "service1" in cluster "web-test" 76 | deploy service ssh web-test:service1 77 | 78 | deploy service exec 79 | ================================== 80 | 81 | The ``deploy service exec`` command (alias: ``deploy exec``) will connect you to a running container, similar to 82 | connecting to the host running the container and running:: 83 | 84 | docker exec -it /bin/bash 85 | 86 | It will choose a random container. The command in our case would be:: 87 | 88 | # exec into a container for the service identified by environment "test' in deployfish.yml 89 | deploy service exec test 90 | # exec into a container for the service "service1" in cluster "web-test" 91 | deploy service exec web-test:service1 92 | -------------------------------------------------------------------------------- /docs/source/api/adapters/abstract.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__abstract: 2 | 3 | Abstract 4 | ======== 5 | 6 | .. automodule:: deployfish.core.adapters.abstract 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/adapters/appscaling.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__appscaling: 2 | 3 | Application Scaling 4 | =================== 5 | 6 | .. automodule:: deployfish.core.adapters.deployfish.appscaling 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/adapters/cloudwatch.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__cloudwatch: 2 | 3 | CloudWatch 4 | ========== 5 | 6 | .. automodule:: deployfish.core.adapters.deployfish.cloudwatch 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/adapters/ecs.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__ecs: 2 | 3 | Elastic Container Service 4 | ========================= 5 | 6 | .. automodule:: deployfish.core.adapters.deployfish.ecs 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/adapters/events.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__events: 2 | 3 | Events 4 | ====== 5 | 6 | .. automodule:: deployfish.core.adapters.deployfish.events 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/adapters/index.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters: 2 | 3 | Adapters 4 | ======== 5 | 6 | Adapters are the classes that are responsible for converting the data from ``deployfish.yml`` into the data structures 7 | that can be used to build Deployfish models. 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | abstract 13 | mixins 14 | appscaling 15 | cloudwatch 16 | ecs 17 | events 18 | secrets 19 | service_discovery 20 | ssh 21 | -------------------------------------------------------------------------------- /docs/source/api/adapters/mixins.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__mixins: 2 | 3 | Mixins 4 | ====== 5 | 6 | .. automodule:: deployfish.core.adapters.deployfish.mixins 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/adapters/secrets.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__secrets: 2 | 3 | AWS SSM Paramter Store 4 | ====================== 5 | 6 | .. automodule:: deployfish.core.adapters.deployfish.secrets 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/adapters/service_discovery.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__service_discovery: 2 | 3 | Service Discovery 4 | ================= 5 | 6 | .. automodule:: deployfish.core.adapters.deployfish.service_discovery 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/adapters/ssh.rst: -------------------------------------------------------------------------------- 1 | .. _api__adapters__ssh: 2 | 3 | SSH 4 | === 5 | 6 | .. automodule:: deployfish.core.adapters.deployfish.ssh 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/config/config.rst: -------------------------------------------------------------------------------- 1 | .. _api__config__config: 2 | 3 | Config 4 | ====== 5 | 6 | .. automodule:: deployfish.config.config 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/config/config_processors.rst: -------------------------------------------------------------------------------- 1 | .. _api__config__config_processors: 2 | 3 | Config Processors 4 | ================= 5 | 6 | .. automodule:: deployfish.config.processors.__init__ 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | 11 | .. automodule:: deployfish.config.processors.abstract 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | 16 | .. automodule:: deployfish.config.processors.environment 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | 21 | .. automodule:: deployfish.config.processors.terraform 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | -------------------------------------------------------------------------------- /docs/source/api/config/index.rst: -------------------------------------------------------------------------------- 1 | .. _api__config: 2 | 3 | Config and Config Processors 4 | ============================ 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | 9 | config 10 | config_processors 11 | -------------------------------------------------------------------------------- /docs/source/api/controllers/base.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__base: 2 | 3 | Base 4 | ==== 5 | 6 | .. automodule:: deployfish.controllers.base 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/cluster.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__cluster: 2 | 3 | Cluster 4 | ======= 5 | 6 | .. automodule:: deployfish.controllers.cluster 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/commands.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__commands: 2 | 3 | Commands 4 | ======== 5 | 6 | .. automodule:: deployfish.controllers.commands 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/crud.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__crud: 2 | 3 | Crud 4 | ==== 5 | 6 | .. automodule:: deployfish.controllers.crud 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/elb.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__elb: 2 | 3 | Elastic Load Balancer 4 | ===================== 5 | 6 | .. automodule:: deployfish.controllers.elb 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | 11 | .. automodule:: deployfish.controllers.elbv2 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | -------------------------------------------------------------------------------- /docs/source/api/controllers/extension.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__ext: 2 | 3 | Extension 4 | ========= 5 | 6 | .. automodule:: deployfish.ext.ext_df_argparse 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/index.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers: 2 | 3 | Controllers 4 | =========== 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | 9 | extension 10 | base 11 | crud 12 | cluster 13 | commands 14 | elb 15 | invoked_task 16 | logs 17 | network 18 | secrets 19 | service 20 | rds 21 | task 22 | tunnel 23 | -------------------------------------------------------------------------------- /docs/source/api/controllers/invoked_task.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__invoked_task: 2 | 3 | Invoked Task 4 | ============ 5 | 6 | .. automodule:: deployfish.controllers.invoked_task 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/logs.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__logs: 2 | 3 | Logs 4 | ==== 5 | 6 | .. automodule:: deployfish.controllers.logs 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/network.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__network: 2 | 3 | Network 4 | ======= 5 | 6 | .. automodule:: deployfish.controllers.network 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/rds.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__rds: 2 | 3 | Relational Database Service 4 | =========================== 5 | 6 | .. automodule:: deployfish.controllers.rds 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/secrets.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__secrets: 2 | 3 | Secrets 4 | ======= 5 | 6 | .. automodule:: deployfish.controllers.secrets 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/service.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__service: 2 | 3 | Service 4 | ======= 5 | 6 | .. automodule:: deployfish.controllers.service 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/task.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__task: 2 | 3 | Task 4 | ==== 5 | 6 | .. automodule:: deployfish.controllers.task 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/controllers/tunnel.rst: -------------------------------------------------------------------------------- 1 | .. _api__controllers__tunnel: 2 | 3 | Tunnel 4 | ====== 5 | 6 | .. automodule:: deployfish.controllers.tunnel 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/loaders.rst: -------------------------------------------------------------------------------- 1 | .. _api__loaders: 2 | 3 | Loaders 4 | ======= 5 | 6 | .. automodule:: deployfish.core.loaders 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/main.rst: -------------------------------------------------------------------------------- 1 | .. _api__main: 2 | 3 | Main 4 | ==== 5 | 6 | Application configuration. 7 | 8 | .. automodule:: deployfish.main 9 | :members: 10 | :undoc-members: 11 | :show-inheritance: 12 | -------------------------------------------------------------------------------- /docs/source/api/models/abstract.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__abstract: 2 | 3 | Abstract 4 | ======== 5 | 6 | .. automodule:: deployfish.core.models.abstract 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/appscaling.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__appscaling: 2 | 3 | Application Scaling 4 | =================== 5 | 6 | .. automodule:: deployfish.core.models.appscaling 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/cloudwatch.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__cloudwatch: 2 | 3 | CloudWatch 4 | ========== 5 | 6 | .. automodule:: deployfish.core.models.cloudwatch 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/cloudwatchlogs.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__cloudwatchlogs: 2 | 3 | CloudWatch Logs 4 | =============== 5 | 6 | .. automodule:: deployfish.core.models.cloudwatchlogs 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/ec2.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__ec2: 2 | 3 | Elastic Compute Cloud 4 | ===================== 5 | 6 | .. automodule:: deployfish.core.models.ec2 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/ecs.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__ecs: 2 | 3 | Elastic Container Service 4 | ========================= 5 | 6 | .. automodule:: deployfish.core.models.ecs 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/efs.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__efs: 2 | 3 | Elastic File System 4 | =================== 5 | 6 | .. automodule:: deployfish.core.models.efs 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/elb.rst: -------------------------------------------------------------------------------- 1 | .. _api__classic_load_balancing: 2 | 3 | Classic Load Balancing 4 | ====================== 5 | 6 | .. automodule:: deployfish.core.models.elb 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/elbv2.rst: -------------------------------------------------------------------------------- 1 | Application/Network Load Balancing 2 | ================================== 3 | 4 | .. automodule:: deployfish.core.models.elbv2 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/api/models/events.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__events: 2 | 3 | Events 4 | ====== 5 | 6 | .. automodule:: deployfish.core.models.events 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/index.rst: -------------------------------------------------------------------------------- 1 | .. _api__models: 2 | 3 | Models and Managers 4 | =================== 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | 9 | mixins 10 | abstract 11 | appscaling 12 | cloudwatch 13 | cloudwatchlogs 14 | ec2 15 | ecs 16 | efs 17 | elb 18 | elbv2 19 | events 20 | rds 21 | secrets 22 | secrets_manager 23 | service_discovery 24 | ssh 25 | -------------------------------------------------------------------------------- /docs/source/api/models/mixins.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__mixins: 2 | 3 | Mixins 4 | ====== 5 | 6 | .. automodule:: deployfish.core.models.mixins 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/rds.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__rds: 2 | 3 | Relational Database Service 4 | =========================== 5 | 6 | .. automodule:: deployfish.core.models.rds 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/secrets.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__secrets: 2 | 3 | AWS SSM Paramter Store 4 | ====================== 5 | 6 | .. automodule:: deployfish.core.models.secrets 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/secrets_manager.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__secrets_manager: 2 | 3 | Secrets Manager 4 | =============== 5 | 6 | .. automodule:: deployfish.core.models.secrets_manager 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/service_discovery.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__service_discovery: 2 | 3 | Service Discovery 4 | ================= 5 | 6 | .. automodule:: deployfish.core.models.service_discovery 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/models/ssh.rst: -------------------------------------------------------------------------------- 1 | .. _api__models__ssh: 2 | 3 | SSH 4 | ==== 5 | 6 | .. automodule:: deployfish.core.models.ssh 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/source/api/renderers.rst: -------------------------------------------------------------------------------- 1 | .. _api__renderers: 2 | 3 | Renderers 4 | ========= 5 | 6 | .. automodule:: deployfish.renderers.abstract 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | 11 | .. automodule:: deployfish.renderers.table 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | 16 | .. automodule:: deployfish.renderers.misc 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # 2 | # deployfish documentation build configuration file, created by 3 | # sphinx-quickstart on Tue Jun 13 16:54:27 2017. 4 | # 5 | # This file is execfile()d with the current directory set to its 6 | # containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | # If extensions (or modules to document with autodoc) are in another directory, 15 | # add these directories to sys.path here. If the directory is relative to the 16 | # documentation root, use os.path.abspath to make it absolute, like shown here. 17 | # 18 | import os 19 | import sys 20 | from typing import Any 21 | 22 | sys.path.insert(0, os.path.abspath("../..")) 23 | 24 | import sphinx_rtd_theme 25 | 26 | # -- General configuration ------------------------------------------------ 27 | 28 | # If your documentation needs a minimal Sphinx version, state it here. 29 | # 30 | # needs_sphinx = '1.0' 31 | 32 | # Add any Sphinx extension module names here, as strings. They can be 33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 34 | # ones. 35 | extensions: list[str] = [ 36 | "sphinx.ext.autodoc", 37 | "sphinx.ext.napoleon", 38 | "sphinx.ext.viewcode", 39 | "sphinx.ext.intersphinx", 40 | "sphinx_rtd_theme", 41 | ] 42 | 43 | # Add any paths that contain templates here, relative to this directory. 44 | templates_path: list[str] = ["_templates"] 45 | 46 | # The suffix(es) of source filenames. 47 | # You can specify multiple suffix as a list of string: 48 | # 49 | # source_suffix = ['.rst', '.md'] 50 | source_suffix: str = ".rst" 51 | 52 | # The master toctree document. 53 | master_doc: str = "index" 54 | 55 | # General information about the project. 56 | project: str = "Deployfish" 57 | copyright: str = "Caltech IMSS ADS" # pylint: disable=redefined-builtin 58 | author: str = "Chris Malek, Glenn Bach" 59 | 60 | show_authors = False 61 | 62 | version: str = "1.15.1" 63 | release: str = "1.15.1" 64 | 65 | # List of patterns, relative to source directory, that match files and 66 | # directories to ignore when looking for source files. 67 | # This patterns also effect to html_static_path and html_extra_path 68 | exclude_patterns: list[str] = [] 69 | 70 | add_function_parentheses: bool = False 71 | add_module_names: bool = True 72 | 73 | autodoc_member_order: str = "bysource" 74 | autodoc_type_aliases: dict[str, str] = {} 75 | 76 | # the locations and names of other projects that should be linked to this one 77 | intersphinx_mapping: dict[str, tuple[str, str | None]] = { 78 | "python": ("https://docs.python.org/3", None), 79 | "boto3": ( 80 | "https://boto3.amazonaws.com/v1/documentation/api/latest/", 81 | None, 82 | ), 83 | } 84 | 85 | # The name of the Pygments (syntax highlighting) style to use. 86 | pygments_style: str = "sphinx" 87 | 88 | # If true, `todo` and `todoList` produce output, else they produce nothing. 89 | todo_include_todos: bool = False 90 | 91 | 92 | # -- Options for HTML output ---------------------------------------------- 93 | 94 | html_theme: str = "sphinx_rtd_theme" 95 | html_theme_path: list[str] = [sphinx_rtd_theme.get_html_theme_path()] 96 | html_context: dict[str, Any] = { 97 | "display_github": True, 98 | "github_user": "caltechads", 99 | "github_repo": "deployfish", 100 | "github_version": "master", 101 | "conf_py_path": "/docs/source/", 102 | } 103 | html_theme_options: dict[str, Any] = { 104 | "collapse_navigation": True, 105 | "display_version": True, 106 | "navigation_depth": 3, 107 | } 108 | 109 | # Add any paths that contain custom static files (such as style sheets) here, 110 | # relative to this directory. They are copied after the builtin static files, 111 | # so a file named "default.css" will overwrite the builtin "default.css". 112 | # html_static_path: List[str] = ['_static'] 113 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. deployfish documentation master file, created by 2 | sphinx-quickstart on Tue Jun 13 16:54:27 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ============ 7 | Deployfish 8 | ============ 9 | 10 | .. include:: quickintro.rst 11 | 12 | .. toctree:: 13 | :maxdepth: 1 14 | :hidden: 15 | :caption: User Guide 16 | 17 | intro 18 | install 19 | tutorials 20 | plugins/plugins 21 | yaml 22 | 23 | .. toctree:: 24 | :hidden: 25 | :caption: Developer Guide 26 | 27 | runbook/contributing 28 | runbook/architecture 29 | runbook/adapters 30 | runbook/extending 31 | runbook/testing 32 | 33 | .. toctree:: 34 | :hidden: 35 | :caption: Reference 36 | 37 | api/main 38 | api/config/index 39 | api/controllers/index 40 | api/loaders 41 | api/adapters/index 42 | api/models/index 43 | api/renderers 44 | 45 | .. 46 | Indices and tables 47 | ================== 48 | 49 | * :ref:`genindex` 50 | * :ref:`modindex` 51 | * :ref:`search` 52 | -------------------------------------------------------------------------------- /docs/source/install.rst: -------------------------------------------------------------------------------- 1 | ************ 2 | Installation 3 | ************ 4 | 5 | deployfish is a pure python package. As such, it can be installed in the 6 | usual python ways. For the following instructions, either install it into your 7 | global python install, or use a python `virtual environment `_ to install it 8 | without polluting your global python environment. 9 | 10 | Install deployfish 11 | ================== 12 | 13 | :: 14 | 15 | pip install deployfish 16 | 17 | 18 | Install AWS CLI v2 19 | ================== 20 | 21 | deployfish requries AWS CLI v2 for some of its functionality, notably EXEC'ing into FARGATE containers. While AWS CLI v1 22 | was installable via `pip`, AWS CLI v2 is not, so we have to do the install manually. Here's how to set that up on a Mac:: 23 | 24 | # Uninstall any old versions of the cli 25 | pip uninstall awscli 26 | 27 | # Deactivate any pyenv environment so we can be in the system-wide Python interpreter 28 | cd ~ 29 | 30 | # Install the new AWS CLI from brew -- it's no longer pip installable 31 | brew update 32 | brew install awscli 33 | 34 | # Install the Session Manager plugin 35 | curl "https://s3.amazonaws.com/session-manager-downloads/plugin/latest/mac/sessionmanager-bundle.zip" -o "sessionmanager-bundle.zip" 36 | unzip sessionmanager-bundle.zip 37 | sudo ./sessionmanager-bundle/install -i /usr/local/sessionmanagerplugin -b /usr/local/bin/session-manager-plugin 38 | 39 | 40 | If later on you have issues with EXEC'ing or with the `aws` command in general, check to ensure you're getting your 41 | global v2 version of `aws` instead of an old v1 one from your current virtual environment:: 42 | 43 | aws --version 44 | 45 | If the version string shows version < 2:: 46 | 47 | pip uninstall awscli 48 | -------------------------------------------------------------------------------- /docs/source/intro.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | .. include:: quickintro.rst 5 | 6 | To use ``deployfish``, you 7 | 8 | * Install ``deployfish`` 9 | * Define your tasks and services in ``deployfish.yml`` 10 | * Use ``deploy`` to start managing your tasks and services 11 | 12 | A simple ``deployfish.yml`` looks like this:: 13 | 14 | services: 15 | - name: my-service 16 | environment: prod 17 | cluster: my-cluster 18 | count: 2 19 | load_balancer: 20 | service_role_arn: arn:aws:iam::123142123547:role/ecsServiceRole 21 | load_balancer_name: my-service-elb 22 | container_name: my-service 23 | container_port: 80 24 | family: my-service 25 | network_mode: bridge 26 | task_role_arn: arn:aws:iam::123142123547:role/myTaskRole 27 | containers: 28 | - name: my-service 29 | image: 123142123547.dkr.ecr.us-west-2.amazonaws.com/my-service:0.0.1 30 | cpu: 128 31 | memory: 256 32 | ports: 33 | - "80" 34 | environment: 35 | - ENVIRONMENT=prod 36 | - ANOTHER_ENV_VAR=value 37 | - THIRD_ENV_VAR=value 38 | 39 | See the ``examples/`` folder in this repository for example ``deployfish.yml`` 40 | files. 41 | -------------------------------------------------------------------------------- /docs/source/plugins/mysql.rst: -------------------------------------------------------------------------------- 1 | Mysql Plugin 2 | ============ 3 | 4 | ``deployfish-mysql`` is a plugin that allows you to manage databases in remote MySQL servers in AWS. 5 | 6 | * ``deploy mysql create {name}``: Create database a database and user, with appropriate ``GRANT``. 7 | * ``deploy mysql update {name}``: Update the user's password and ``GRANT`` 8 | * ``deploy mysql validate {name}``: Validate that the username/password combination is valid 9 | * ``deploy mysql dump {name}``: Dump MySQL databases as SQL files to local file systems. 10 | * ``deploy mysql load {name} {filename}``: Load a local SQL file into remote MySQL databases 11 | * ``deploy mysql show-grants {name}``: Show GRANTs for your user 12 | 13 | ``{name}`` above refers to the ``name`` of a MySQL connection from the ``mysql:`` section of 14 | your ``deployfish.yml`` file. See below for how the ``mysql:`` connection works. 15 | 16 | Configure deployfish-mysql 17 | -------------------------- 18 | 19 | First follow the instructions for installing and configuring deployfish, then 20 | add this stanza to your ``~/.deployfish.yml`` file: 21 | 22 | plugin.mysql: 23 | enabled: true 24 | 25 | NOTE: ``~/.deployfish.yml`` is the config file for deployfish itself. This is different from 26 | the ``deployfish.yml`` file that defines your services and tasks. 27 | 28 | Instrument your deployfish.yml 29 | ------------------------------ 30 | 31 | ``deployfish-mysql`` looks in your ``deployfish.yml`` file (the one with your services and 32 | task definitions, not the ``~/.deployfish.yml`` config file for deployfish iteslf) for a 33 | section named ``mysql``, which has definitions of mysql databases:: 34 | 35 | mysql: 36 | - name: test 37 | service: service-test 38 | host: my-remote-rds-host.amazonaws.com 39 | db: mydb 40 | user: myuser 41 | pass: password 42 | 43 | - name: config-test 44 | service: service-test 45 | host: config.DB_HOST 46 | db: config.DB_NAME 47 | user: config.DB_USER 48 | pass: config.DB_PASSWORD 49 | 50 | services: 51 | - name: dftest-test 52 | cluster: my-cluster 53 | environment: test 54 | config: 55 | - DEBUG=False 56 | - DB_HOST=${terraform.rds_address} 57 | - DB_NAME=dftest 58 | - DB_USER=dftest_u 59 | - DB_PASSWORD:secure:kms_key_arn=${env.DB_PASSWORD} 60 | 61 | Entries in the ``mysql:`` section must minimally define these keys: 62 | 63 | * ``name``: the name of the connection. This will be used in all the ``deploy mysql`` commands as the connection name. 64 | * ``service``: the name of a service in the ``services:`` section. This will be used to determine which host we use to use for SSH when doing our mysql commands 65 | * ``host``: the hostname of the remote MySQL server 66 | * ``db``: the name of the database to work with in ``host`` 67 | * ``user``: the username of the user to use to authenticate to ``host`` 68 | * ``pass``: the password of the user to use to authenticate to ``host`` 69 | 70 | These are optional keys that you can add to your connection definition: 71 | 72 | * ``port``: the port to connect to on the remote MySQL server. Default: 3306 73 | * ``character_set``: set the character set of your database to this (used for ``deploy mysql create`` and ``deploy mysql update``). Default: ``utf8``. 74 | * ``collation``: set the collation set of your database to this (used for ``deploy mysql create`` and ``deploy mysql update``). Default: ``utf8_unicode_ci``. 75 | 76 | As you can see in the examples above, you can either hard code ``host``, ``db``, ``user`` and ``password`` in or you can reference ``config`` parameters from the ``config:`` section of the definition of our service. For the latter, ``deployfish-mysql`` will retrieve those parameters directly from AWS SSM Parameter Store, so ensure you write the service config to AWS before trying to establish a MySQL connection. 77 | -------------------------------------------------------------------------------- /docs/source/plugins/plugins.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Plugins 3 | ======= 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | :caption: Contents: 8 | 9 | mysql 10 | slack 11 | sqs 12 | -------------------------------------------------------------------------------- /docs/source/plugins/slack.rst: -------------------------------------------------------------------------------- 1 | Slack Plugin 2 | ============ 3 | 4 | ``deployfish-slack`` is a plugin that provides notification via slack for service updates. 5 | 6 | Configure deployfish-slack 7 | -------------------------- 8 | 9 | First follow the instructions for installing and configuring deployfish, then 10 | add this stanza to your ``~/.deployfish.yml`` file:: 11 | 12 | plugin.slack: 13 | enabled: true 14 | token: 15 | channel: 16 | 17 | If you specify a channel of ````, the slack message will be sent to the user who 18 | initiated the deploy. If you specify a channel name, which should be prefixed with a ``#``, 19 | it must be quoted, or it will be interpreted as a comment by the YAML parser. 20 | 21 | NOTE: ``~/.deployfish.yml`` is the config file for deployfish itself. This is different from 22 | the ``deployfish.yml`` file that defines your services and tasks. 23 | 24 | -------------------------------------------------------------------------------- /docs/source/plugins/sqs.rst: -------------------------------------------------------------------------------- 1 | SQS Plugin 2 | ========== 3 | 4 | ``deployfish-sqs`` is a plugin that provides notification via AWS SQS for service updates. 5 | 6 | Configure deployfish-sqs 7 | -------------------------- 8 | 9 | First follow the instructions for installing and configuring deployfish, then 10 | add this stanza to your ``~/.deployfish.yml`` file:: 11 | 12 | plugin.sqs: 13 | enabled: true 14 | queue: 15 | - name: 16 | type: 17 | profile: 18 | 19 | Queue is a list, so a message will be sent to each queue in the list. Profile can be omitted 20 | if you want your default profile to be used. 21 | 22 | NOTE: ``~/.deployfish.yml`` is the config file for deployfish itself. This is different from 23 | the ``deployfish.yml`` file that defines your services and tasks. 24 | 25 | -------------------------------------------------------------------------------- /docs/source/quickintro.rst: -------------------------------------------------------------------------------- 1 | ``deployfish`` has commands for managing the whole lifecycle of your application: 2 | 3 | * Safely and easily create, update, destroy and restart ECS services 4 | * Safely and easily create, update, run, schedule and unschedule ECS tasks 5 | * Extensive support for ECS related services like load balancing, application 6 | autoscaling and service discovery 7 | * Easily scale the number of containers in your service, optionally scaling its 8 | associated autoscaling group at the same time 9 | * Manage multiple environments for your task or service (test, qa, prod, etc.) in 10 | multiple AWS accounts. 11 | * Uses AWS Parameter Store for secrets for your containers 12 | * View the configuration and status of running ECS services 13 | * Run a one-off command related to your service 14 | * Easily exec through your VPC bastion host into your running containers, or 15 | ssh into a ECS container machine in your cluster. 16 | * Setup SSH tunnels to the private AWS resources in VPC that your service 17 | uses so that you can connect to them from your work machine. 18 | 19 | * Extensible! Add additional functionality through custom deployfish modules. 20 | * Works great in CodeBuild steps in a CodePipeline based CI/CD system! 21 | 22 | Additionally, ``deployfish`` integrates with 23 | `Terraform `_ state files so that you can use the 24 | values of terraform outputs directly in your ``deployfish`` configurations. 25 | -------------------------------------------------------------------------------- /docs/source/runbook/testing.rst: -------------------------------------------------------------------------------- 1 | .. _testing: 2 | 3 | Testing 4 | ======= 5 | 6 | To run the unittests, you'll need to set up a virtualenv and install the requirements. 7 | 8 | If you haven't yet, install: 9 | 10 | * `pyenv`_ 11 | * `pyenv-virtualenv`_ 12 | 13 | Deployfish can support python 3.7 and above. 14 | 15 | .. code-block:: shell 16 | 17 | $ pyenv install 3.11.9 18 | 19 | Set up a virtualenv and install the requirements: 20 | 21 | .. code-block:: shell 22 | 23 | $ pyenv virtualenv 3.11.9 deployfish 24 | $ pyenv local deployfish 25 | $ pip install --upgrade pip wheel 26 | $ pip install -r requirements.txt 27 | 28 | Run all the tests: 29 | 30 | .. code-block:: bash 31 | 32 | $ python -m unittest discover 33 | 34 | For specific tests, checkout your options with: ``python -m unittest --help`` 35 | 36 | 37 | .. _`pyenv`: https://github.com/pyenv/pyenv 38 | .. _`pyenv-virtualenv`: https://github.com/pyenv/pyenv-virtualenv 39 | -------------------------------------------------------------------------------- /docs/source/tutorial1.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | A Basic Service 3 | *************** 4 | 5 | 6 | Problem 7 | ======= 8 | 9 | In this tutorial we will configure the bare essentials. Everything in the configuration is required. Further tutorials will look at some of the optional parameters. 10 | 11 | The configuration below will result in a single container running in an AWS ECS cluster. The container is built from a simple nginx based hello-world image available on http://dockerhub.com, named `tutum/hello-world `_. 12 | 13 | Setup 14 | ===== 15 | 16 | In order to deploy this configuration, you will need an AWS ECS cluster, containing at least one EC2 machine, on which to run the container. You can either create a cluster named *hello-world-cluster* or change the *cluster* parameter in the configuration file to correspond to the name of the cluster that you created. 17 | 18 | Configuration 19 | ============= 20 | 21 | Here's the configuration for this service:: 22 | 23 | services: 24 | - name: hello-world-test 25 | cluster: hello-world-cluster 26 | count: 1 27 | family: hello-world 28 | containers: 29 | - name: hello-world 30 | image: tutum/hello-world 31 | cpu: 128 32 | memory: 256 33 | 34 | AWS ECS is made up of *services*, *tasks*, and *task definitions*. The *task definitions* define the *task* or *service*. A *task* is a container that runs and exits, while a *service* is a container that stays running, like a web server, and will be restarted by ECS if it shuts down unexpectedly. 35 | 36 | The configuration files you will use with *deployfish* are `YAML `_ based. A typical project or application will have a single *deployfish.yml* file, containing all of the project's relevant services. This initial example only defines a single service. 37 | 38 | If you want to define additional services, you simply have to add another name to the *services* array, along with its corresponding parameters:: 39 | 40 | services: 41 | - name: name1 42 | cluster: cluster1 43 | ... 44 | - name: name2 45 | cluster: cluster2 46 | ... 47 | 48 | Required Service Parameters 49 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 50 | 51 | Each *service* contains at least the five following required parameters: 52 | 53 | *name* 54 | The name of the ECS service. In this case, it is *hello-world-test*. This has to be unique. 55 | 56 | *cluster* 57 | The ECS cluster that will run the resultant container. 58 | 59 | *count* 60 | The number of containers to run, which is 1 in this case. 61 | 62 | *family* 63 | The base name of the *task definition*. Each revision of your image will have its own *task definition* consisting of the base name and the revision number. We are naming this base name *hello-world*. 64 | 65 | *container* 66 | This parameter defines the containers to be run. 67 | 68 | Required Container Parameters 69 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 70 | 71 | Each *container* in the *service* contains at least the four following required parameters: 72 | 73 | *name* 74 | The name of the container. 75 | 76 | *image* 77 | The Docker image to use. If your image is in AWS ECR, you will use the full format:: 78 | 79 | .dkr.ecr..amazonaws.com/: 80 | 81 | Since we're pulling an image from Dockerhub, we just need to supply the image name:: 82 | 83 | tutum/hello-world 84 | 85 | *cpu* 86 | The number of cpu units to reserve for the container. 87 | 88 | *memory* 89 | The hard limit of memory (in MB) available to the container. 90 | 91 | 92 | 93 | Deploy 94 | ====== 95 | 96 | To deploy this service, add your configuration to the *deployfish.yml* file and in the same directory as your configuration file run:: 97 | 98 | deploy create hello-world-test 99 | 100 | If you have named your configuration file something else, you can run:: 101 | 102 | deploy -f myconfigfile.yml create hello-world-test 103 | 104 | Assuming everything ran successfully, you should be able to see the relevant info with:: 105 | 106 | deploy info hello-world-test 107 | 108 | If you make a change and would like to update the service run:: 109 | 110 | deploy update hello-world-test 111 | 112 | -------------------------------------------------------------------------------- /docs/source/tutorial2.rst: -------------------------------------------------------------------------------- 1 | ***************** 2 | More Funtionality 3 | ***************** 4 | 5 | 6 | Problem 7 | ======= 8 | 9 | In :doc:`tutorial1`, we looked at the essentials of a service. We hosted an nginx based hello-world web site. A fundamental flaw with this site, though, is that it isn't accessible from anywhere but the local Docker container, which isn't terribly useful. We need to open the relevant ports to make it available. We're also going to set some environment variables and overwrite the Docker *command*. 10 | 11 | Setup 12 | ===== 13 | 14 | We just need the same basic setup that we had in the first tutorial, namely an ECS cluster of at least one EC2 machine named *hello-world-cluster* 15 | 16 | Configuration 17 | ============= 18 | 19 | Here's the configuration file for this service:: 20 | 21 | services: 22 | - name: hello-world-test 23 | cluster: hello-world-cluster 24 | count: 1 25 | family: hello-world 26 | containers: 27 | - name: hello-world 28 | image: tutum/hello-world 29 | cpu: 128 30 | memory: 256 31 | ports: 32 | - "80" 33 | command: /usr/bin/supervisord 34 | environment: 35 | - VAR1=test 36 | - VAR2=anothervar 37 | - DEBUG=True 38 | 39 | Here we've added three new parameters - *ports*, *command*, and *environment*: 40 | 41 | *ports* 42 | This is a list of values, so each value begins with a dash. In our case, we are just opening up one port, so we just have the single value, *80*. This will open port 80, hosting it on a random port on the ECS cluster machine that is hosting the container. 43 | 44 | *command* 45 | This is the Docker *command* that will be run when the container is started 46 | 47 | *environment* 48 | This is a list of values, so each begins with a dash. It is always in the form:: 49 | 50 | - VARIABLE=VALUE 51 | 52 | Anything set here will be available in the environment of the running container. 53 | 54 | Port Options 55 | ^^^^^^^^^^^^ 56 | 57 | If you want to specify the port number on the ECS cluster machine that will correspond to the container's port, you can specify that in the form HOST_PORT:CONTAINER_PORT:: 58 | 59 | ports: 60 | - "8000:80" 61 | 62 | The *hello-world* web site will then be avialable on port 8000 of the ECS cluster machine that is hosting the container. 63 | 64 | Deploy 65 | ====== 66 | 67 | To deploy this service, run the same command we ran in the last tutorial:: 68 | 69 | deploy create hello-world-test 70 | 71 | -------------------------------------------------------------------------------- /docs/source/tutorial3.rst: -------------------------------------------------------------------------------- 1 | ************** 2 | Load Balancing 3 | ************** 4 | 5 | Problem 6 | ======= 7 | 8 | We often want to scale an application to run on more than one running container, either for performance or reliability reasons. In this tutorial, we'll add a load balancer to balance the load across two containers. 9 | 10 | Setup 11 | ===== 12 | 13 | In addition to our basic setup from the previous tutorials, you need to create a load balancer. In this example, we're using an AWS Elastic Load Balancer (ELB) and naming it *hello-world-elb*. 14 | 15 | Configuration 16 | ============= 17 | 18 | Here's the configuration file for this load balanced service:: 19 | 20 | services: 21 | - name: hello-world-test 22 | cluster: hello-world-cluster 23 | count: 1 24 | family: hello-world 25 | load_balancer: 26 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 27 | load_balancer_name: hello-world-elb 28 | container_name: hello-world 29 | container_port: 80 30 | containers: 31 | - name: hello-world 32 | image: tutum/hello-world 33 | cpu: 128 34 | memory: 256 35 | ports: 36 | - "80" 37 | command: /usr/bin/supervisord 38 | environment: 39 | - VAR1=test 40 | - VAR2=anothervar 41 | - DEBUG=True 42 | 43 | Here we've added the new parameter, *load_balancer*. This corresponds to your AWS ELB. 44 | 45 | Load Balancer Parameters 46 | ------------------------ 47 | 48 | ELB 49 | ^^^ 50 | 51 | The *load_balancer* parameter requires the following four parameters if you are using a classic AWS ELB: 52 | 53 | *service_role_arn* 54 | The name or full ARN of the IAM role that allows ECS to make calls to your load balancer on your behalf. You will need to use the ARN that corresponds to your account. 55 | 56 | *load_balancer_name* 57 | The name of the ELB. 58 | 59 | *container_name* 60 | The name of the container to associate with the load balancer 61 | 62 | *container_port* 63 | The port on the container to associate with the load balancer. This port must correspond to a container port on container container_name in your service’s task definition 64 | 65 | ALB or NLB 66 | ^^^^^^^^^^ 67 | 68 | AWS also offers the Application Load Balancers (ALB) and Network Load Balancers. If you are using one of those instead 69 | of the ELB, you will still use the *load_balancer* parameter, but it will require *target_group_arn* to be specified, 70 | rather than *load_balancer_name*: 71 | 72 | *target_group_arn* 73 | The full ARN of the target group to use for this service. 74 | 75 | Deploy 76 | ====== 77 | 78 | To deploy this service, run the same command we ran in the last tutorial:: 79 | 80 | deploy create hello-world-test 81 | 82 | To increase the number of running containers behind the load balancer to 2 instances, you can either modify the config, setting the count to:: 83 | 84 | services: 85 | - name: hello-world-test 86 | cluster: hello-world-cluster 87 | count: 2 88 | family: hello-world 89 | load_balancer: 90 | ... 91 | 92 | Then running *update*:: 93 | 94 | deploy update hello-world-test 95 | 96 | Or you can scale the container arbitrarily with the *scale* command:: 97 | 98 | deploy scale test 2 99 | 100 | -------------------------------------------------------------------------------- /docs/source/tutorial6.rst: -------------------------------------------------------------------------------- 1 | ***************** 2 | Fargate Tutorial 3 | ***************** 4 | 5 | 6 | Problem 7 | ======= 8 | 9 | In :doc:`tutorial2`, we looked at an nginx based hello-world web site running on ECS EC2. In this tutorial we will see how to create the same service running on 10 | ECS Fargate. 11 | 12 | Setup 13 | ===== 14 | 15 | We just need the same basic setup that we had in the first tutorial, namely an ECS cluster named *hello-world-cluster*, but we will not need any EC2 instances. 16 | 17 | Configuration 18 | ============= 19 | 20 | Here's the configuration file for this service:: 21 | 22 | services: 23 | - name: hello-world-test 24 | cluster: hello-world-cluster 25 | count: 1 26 | family: hello-world 27 | network_mode: awsvpc 28 | launch_type: FARGATE 29 | execution_role: arn:aws:iam::123142123547:role/my-task-role 30 | cpu: 256 31 | memory: 512 32 | vpc_configuration: 33 | subnets: 34 | - subnet-12345678 35 | - subnet-87654321 36 | security_groups: 37 | - sg-12345678 38 | public_ip: ENABLED 39 | containers: 40 | - name: hello-world 41 | image: tutum/hello-world 42 | cpu: 128 43 | memory: 256 44 | ports: 45 | - "80" 46 | command: /usr/bin/supervisord 47 | environment: 48 | - VAR1=test 49 | - VAR2=anothervar 50 | - DEBUG=True 51 | 52 | You will notice that we have added several new parameters - *launch_type*, *execution_role*, *cpu*, *memory*, 53 | and *vpc_configuration*: 54 | 55 | *launch_type* 56 | This is the parameter that specifies whether the service is an EC2 service or a FARGATE service. The default value is EC2 57 | so you only need to specify this for a Fargate task. 58 | 59 | *execution_role* 60 | This is the task exeuction role ARN for an IAM role that allows Fargate to pull container images and publish container logs 61 | to Amazon CloudWatch on your behalf 62 | 63 | *cpu* 64 | For Fargate tasks you are required to define the cpu at the task level, and there are specific values that are allowed. 65 | 66 | ================== 67 | CPU value 68 | ================== 69 | 256 (.25 vCPU) 70 | 512 (.5 vCPU) 71 | 1024 (1 vCPU) 72 | 2048 (2 vCPU) 73 | 4096 (4 vCPU) 74 | 75 | *memory* 76 | For Fargate tasks you are required to define the memory at the task level, and there are specific values that are allowed. 77 | 78 | ===================================================================================== 79 | Memory value (MiB) 80 | ===================================================================================== 81 | 512 (0.5GB), 1024 (1GB), 2048 (2GB) 82 | 1024 (1GB), 2048 (2GB), 3072 (3GB), 4096 (4GB) 83 | 2048 (2GB), 3072 (3GB), 4096 (4GB), 5120 (5GB), 6144 (6GB), 7168 (7GB), 8192 (8GB) 84 | Between 4096 (4GB) and 16384 (16GB) in increments of 1024 (1GB) 85 | Between 8192 (8GB) and 30720 (30GB) in increments of 1024 (1GB) 86 | 87 | *vpc_configuration* 88 | The vpc configuration for any Fargate tasks requires the following four parameters: 89 | 90 | *subnets (array)* 91 | The subnets in the VPC that the task scheduler should consider for placement. 92 | Only private subnets are supported at this time. The VPC will be determined by the subnets you 93 | specify, so if you specify multiple subnets they must be in the same VPC. 94 | *security_groups (array)* 95 | The ID of the security group to associate with the service. 96 | *public_ip (string)* 97 | Whether to enabled or disable public IPs. Valid Values are ``ENABLED`` or ``DISABLED`` 98 | 99 | 100 | 101 | Deploy 102 | ====== 103 | 104 | To deploy this service, run the same command we ran in the last tutorial:: 105 | 106 | deploy create hello-world-test 107 | 108 | -------------------------------------------------------------------------------- /docs/source/tutorials.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Tutorials 3 | ========= 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | :caption: Contents: 8 | 9 | tutorial1 10 | tutorial2 11 | tutorial3 12 | tutorial4 13 | tutorial5 14 | tutorial6 15 | advanced 16 | -------------------------------------------------------------------------------- /examples/appscaling.yml: -------------------------------------------------------------------------------- 1 | # basic.yml 2 | # 3 | # This file describes a service with an ELB which has application scaling 4 | # configured. 5 | # 6 | 7 | services: 8 | - name: my-service-scaling 9 | cluster: my-cluster 10 | count: 2 11 | load_balancer: 12 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 13 | load_balancer_name: my-elb 14 | container_name: my-service 15 | container_port: 80 16 | application_scaling: 17 | min_capacity: 2 18 | max_capacity: 4 19 | role_arn: arn:aws:iam::123445678901:role/ApplicationAutoscalingECSRole 20 | scale-up: 21 | cpu: ">=60" 22 | check_every_seconds: 60 23 | periods: 5 24 | cooldown: 60 25 | scale_by: 1 26 | scale-down: 27 | cpu: "<=30" 28 | check_every_seconds: 60 29 | periods: 60 30 | cooldown: 60 31 | scale_by: -1 32 | family: my-service 33 | network_mode: bridge 34 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 35 | containers: 36 | - name: my-service 37 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 38 | cpu: 128 39 | memory: 256 40 | ports: 41 | - "80" 42 | environment: 43 | - S3_BUCKET=my-bucket 44 | -------------------------------------------------------------------------------- /examples/asg.yml: -------------------------------------------------------------------------------- 1 | # asg.yml 2 | # 3 | # This defines a service with a load balancer that has a dedicated autoscaling 4 | # group. 5 | # 6 | 7 | services: 8 | - name: my-service 9 | cluster: my-cluster 10 | count: 2 11 | autoscalinggroup_name: my-asg 12 | load_balancer: 13 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 14 | load_balancer_name: my-elb 15 | container_name: my-service 16 | container_port: 80 17 | family: my-service 18 | network_mode: bridge 19 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 20 | containers: 21 | - name: my-service 22 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 23 | cpu: 128 24 | memory: 256 25 | ports: 26 | - "80" 27 | environment: 28 | - S3_BUCKET=my-bucket 29 | -------------------------------------------------------------------------------- /examples/basic.yml: -------------------------------------------------------------------------------- 1 | # basic.yml 2 | # 3 | # This file has two pretty standard services. 4 | # 5 | # * They both use a load balancer 6 | # * They both create 2 tasks in the cluster my-cluster 7 | # * They both use a task role 8 | # * They both use bridge networking 9 | # * Their tasks both have a single container whose image comes from an AWS ECR 10 | # repository and which has a single port open 11 | # 12 | # my-service-elb uses an ELB for its load balancer (requires load_balancer_name) 13 | # my-service-alb uses an ALB for its load balancer (requires target_group_arn) 14 | # 15 | # 16 | 17 | services: 18 | - name: my-service-elb 19 | cluster: my-cluster 20 | count: 2 21 | load_balancer: 22 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 23 | load_balancer_name: my-elb 24 | container_name: my-service 25 | container_port: 80 26 | family: my-service 27 | network_mode: bridge 28 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 29 | containers: 30 | - name: my-service 31 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 32 | cpu: 128 33 | memory: 256 34 | ports: 35 | - "80" 36 | environment: 37 | - S3_BUCKET=my-bucket 38 | - name: my-service-alb 39 | cluster: my-cluster 40 | count: 2 41 | load_balancer: 42 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 43 | target_group_arn: arn:aws:elasticloadbalancing:us-west-2:123445564666:targetgroup/my-service-alb/4bf28a0fa5978cea 44 | container_name: my-service 45 | container_port: 80 46 | family: my-service 47 | network_mode: bridge 48 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 49 | containers: 50 | - name: my-service 51 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 52 | cpu: 128 53 | memory: 256 54 | ports: 55 | - "80" 56 | environment: 57 | - S3_BUCKET=my-bucket 58 | -------------------------------------------------------------------------------- /examples/different-aws-profile.yml: -------------------------------------------------------------------------------- 1 | # basic.yml 2 | # 3 | # This is exactly like basic.yml, except that we're telling deployfish to 4 | # use a different AWS Named Profile to connect to AWS. 5 | # 6 | aws: 7 | profile: other 8 | allowed_account_ids: 9 | - "12134566" 10 | 11 | services: 12 | - name: my-service-elb 13 | cluster: my-cluster 14 | count: 2 15 | load_balancer: 16 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 17 | load_balancer_name: my-elb 18 | container_name: my-service 19 | container_port: 80 20 | family: my-service 21 | network_mode: bridge 22 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 23 | containers: 24 | - name: my-service 25 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 26 | cpu: 128 27 | memory: 256 28 | ports: 29 | - "80" 30 | environment: 31 | - S3_BUCKET=my-bucket 32 | - name: my-service-alb 33 | cluster: my-cluster 34 | count: 2 35 | load_balancer: 36 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 37 | target_group_arn: arn:aws:elasticloadbalancing:us-west-2:123445564666:targetgroup/my-service-alb/4bf28a0fa5978cea 38 | container_name: my-service 39 | container_port: 80 40 | family: my-service 41 | network_mode: bridge 42 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 43 | containers: 44 | - name: my-service 45 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 46 | cpu: 128 47 | memory: 256 48 | ports: 49 | - "80" 50 | environment: 51 | - S3_BUCKET=my-bucket 52 | -------------------------------------------------------------------------------- /examples/fargate.yml: -------------------------------------------------------------------------------- 1 | # fargate.yml 2 | # 3 | # This file has two pretty standard fargate services. 4 | # 5 | # * They both use a load balancer 6 | # * They both create 2 tasks in the cluster my-cluster 7 | # * They both use a task role 8 | # * They both use a execution role which is required for fargate tasks 9 | # * They both use awsvpc networking which is required for fargate tasks 10 | # * Their tasks both have a single container whose image comes from an AWS ECR 11 | # repository and which has a single port open 12 | # 13 | # my-service-elb uses an ELB for its load balancer (requires load_balancer_name) 14 | # my-service-alb uses an ALB for its load balancer (requires target_group_arn) 15 | # 16 | 17 | services: 18 | - name: my-service-elb 19 | cluster: my-cluster 20 | count: 2 21 | load_balancer: 22 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 23 | load_balancer_name: my-elb 24 | container_name: my-service 25 | container_port: 80 26 | family: my-service 27 | network_mode: awsvpc 28 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 29 | maximum_percent: 200 30 | minimum_healthy_percent: 50 31 | launch_type: FARGATE 32 | cpu: 256 33 | memory: 512 34 | requires_compatibilities: 35 | - FARGATE 36 | vpc_configuration: 37 | subnets: 38 | - subnet-12345678 39 | - subnet-87654321 40 | security_groups: 41 | - sg-12345678 42 | public_ip: ENABLED 43 | containers: 44 | - name: my-service 45 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 46 | cpu: 128 47 | memory: 256 48 | ports: 49 | - "80" 50 | environment: 51 | - S3_BUCKET=my-bucket 52 | - name: my-service-alb 53 | cluster: my-cluster 54 | count: 2 55 | load_balancer: 56 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 57 | target_group_arn: arn:aws:elasticloadbalancing:us-west-2:123445564666:targetgroup/my-service-alb/4bf28a0fa5978cea 58 | container_name: my-service 59 | container_port: 80 60 | family: my-service 61 | network_mode: awsvpc 62 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 63 | maximum_percent: 200 64 | minimum_healthy_percent: 50 65 | launch_type: FARGATE 66 | cpu: 256 67 | memory: 512 68 | requires_compatibilities: 69 | - FARGATE 70 | vpc_configuration: 71 | subnets: 72 | - subnet-12345678 73 | - subnet-87654321 74 | security_groups: 75 | - sg-12345678 76 | public_ip: ENABLED 77 | containers: 78 | - name: my-service 79 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 80 | cpu: 128 81 | memory: 256 82 | ports: 83 | - "80" 84 | environment: 85 | - S3_BUCKET=my-bucket 86 | -------------------------------------------------------------------------------- /examples/mulitple-containers.yml: -------------------------------------------------------------------------------- 1 | # multiple-containers.yml 2 | # 3 | # This file defines a service with a task definition with three containers: 4 | # a application container, a redis container and a mysql container. 5 | # 6 | # This config uses links: to link the application container to the redis 7 | # and db containers, and the db container uses an alias. 8 | # 9 | # Note also that our application image comes from ECR, while the 10 | # redis and mysql images come from Docker Hub. 11 | 12 | services: 13 | - name: my-service 14 | cluster: my-cluster 15 | count: 2 16 | load_balancer: 17 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 18 | load_balancer_name: my-elb 19 | container_name: my-service 20 | container_port: 80 21 | family: my-service 22 | network_mode: bridge 23 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 24 | containers: 25 | - name: my-service 26 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 27 | cpu: 128 28 | memory: 256 29 | ports: 30 | - "80" 31 | environment: 32 | - S3_BUCKET=my-bucket 33 | links: 34 | - redis 35 | - db:database 36 | - name: redis 37 | image: redis:latest 38 | cpu: 128 39 | memory: 256 40 | - name: db 41 | image: mysql:5.5.57 42 | cpu: 128 43 | memory: 512 44 | environment: 45 | MYSQL_ROOT_PASSWORD: __MYSQL_ROOT_PASSWD__ 46 | -------------------------------------------------------------------------------- /examples/no-elb.yml: -------------------------------------------------------------------------------- 1 | # no-elb.yml 2 | # 3 | # This file defines a basic service with no ELB. 4 | 5 | services: 6 | - name: my-service-elb 7 | cluster: my-cluster 8 | count: 1 9 | family: my-service 10 | network_mode: bridge 11 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 12 | containers: 13 | - name: my-service 14 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 15 | cpu: 128 16 | memory: 256 17 | ports: 18 | - "80" 19 | environment: 20 | - S3_BUCKET=my-bucket 21 | -------------------------------------------------------------------------------- /examples/parameter-store.yml: -------------------------------------------------------------------------------- 1 | # parameter-store.yml 2 | # 3 | # This file has a standard service that uses AWS Parameter Store 4 | # to store its secrets. 5 | # 6 | # * It has a load balancer 7 | # * It has 2 tasks in the cluster my-cluster 8 | # * It uses a task role 9 | # * It uses bridge networking 10 | # * Its task has a single container whose image comes from an AWS ECR 11 | # repository and which has a single port open 12 | # 13 | # my-service uses an ELB for its load balancer 14 | # 15 | 16 | services: 17 | - name: my-service 18 | cluster: my-cluster 19 | count: 2 20 | load_balancer: 21 | service_role_arn: arn:aws:iam::123445564666:role/ecsServiceRole 22 | load_balancer_name: my-elb 23 | container_name: my-service 24 | container_port: 80 25 | family: my-service 26 | network_mode: bridge 27 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 28 | config: 29 | - 30 | containers: 31 | - name: my-service 32 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 33 | cpu: 128 34 | memory: 256 35 | ports: 36 | - "80" 37 | environment: 38 | - S3_BUCKET=my-bucket 39 | 40 | -------------------------------------------------------------------------------- /examples/run_task.yml: -------------------------------------------------------------------------------- 1 | tasks: 2 | - name: run-task 3 | family: run-task 4 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 5 | network_mode: awsvpc 6 | launch_type: FARGATE 7 | cpu: 256 8 | memory: 512 9 | execution_role_arn: arn:aws:iam::123445564666:role/run-task-test 10 | containers: 11 | - name: hello-world 12 | image: hello-world 13 | cpu: 256 14 | memory: 512 15 | vpc_configuration: 16 | subnets: 17 | - subnet-xxxxxxxx 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /examples/terraform-basic.yml: -------------------------------------------------------------------------------- 1 | # terraform-basic.yml 2 | # 3 | # This file is just like basic.yml, except that we've set it upt to pull 4 | # values from a terraform state file. 5 | # 6 | # Terraform lookups are evaluated within the context of each service 7 | # separately. Terraform lookups can be used in any string value in the service 8 | # definition. In the terraform:lookups section, you can use these replacements in the 9 | # lookups definitions: 10 | # 11 | # * {environment}: replace with the value of the 'environment' option for the 12 | # current service 13 | # * {service-name}: replace with the name of the current service 14 | # * {cluster-name}: replace with the name of the cluster for the current service 15 | # 16 | # When the string replacements are done in the service definition, deployfish 17 | # will retrieve your terraform statefile and replace any ${terraform.} 18 | # with the corresponding terraform output, where matches one of the keys 19 | # in terraform:lookups. 20 | # 21 | # The 'statefile' keyword tells deployfish where in S3 the appropriate 22 | # terraform statefile lives. 23 | 24 | terraform: 25 | statefile: 's3://terraform-remote-state/my-service-terraform-state' 26 | lookups: 27 | ecs_service_role: 'ecs-service-role' 28 | cluster_name: '{service-name}-ecs-cluster-name' 29 | elb_name: '{service-name}-elb-name' 30 | target_group_arn: '{service-name}-target-group-arn' 31 | storage_bucket: 's3-{environment}-bucket' 32 | task_role_arn: '{service-name}-task-role-arn' 33 | ecr_repo_url: 'ecr-repository-url' 34 | 35 | services: 36 | - name: my-service-elb 37 | cluster: ${terraform.cluster_name} 38 | environment: prod 39 | count: 2 40 | load_balancer: 41 | service_role_arn: ${terraform.ecs_service_role} 42 | load_balancer_name: ${terraform.elb_name} 43 | container_name: my-service 44 | container_port: 80 45 | family: my-service 46 | network_mode: bridge 47 | task_role_arn: ${terraform.task-role-arn} 48 | containers: 49 | - name: my-service 50 | image: ${terraform.ecr_repo_url}:0.1.0 51 | cpu: 128 52 | memory: 256 53 | ports: 54 | - "80" 55 | environment: 56 | - S3_BUCKET=${terraform.storage_bucket} 57 | - name: my-service-alb 58 | cluster: ${terraform.cluster_name} 59 | environment: test 60 | count: 2 61 | load_balancer: 62 | service_role_arn: ${terraform.ecs_service_role} 63 | target_group_arn: ${terraform.target_group_arn} 64 | container_name: my-service 65 | container_port: 80 66 | family: my-service 67 | network_mode: bridge 68 | task_role_arn: ${terraform.task-role-arn} 69 | containers: 70 | - name: my-service 71 | image: ${terraform.ecr_repo_url}:0.1.0 72 | cpu: 128 73 | memory: 256 74 | ports: 75 | - "80" 76 | environment: 77 | - S3_BUCKET=${terraform.storage_bucket} 78 | -------------------------------------------------------------------------------- /examples/tutorial_1.yml: -------------------------------------------------------------------------------- 1 | # tutorial_1.yml 2 | # 3 | # This file defines a minimal service. 4 | 5 | services: 6 | - name: hello-world-test 7 | cluster: deployfish 8 | count: 1 9 | family: hello-world 10 | containers: 11 | - name: hello-world 12 | image: tutum/hello-world 13 | cpu: 128 14 | memory: 256 15 | -------------------------------------------------------------------------------- /examples/tutorial_2.yml: -------------------------------------------------------------------------------- 1 | # tutorial_2.yml 2 | 3 | services: 4 | - name: hello-world-test 5 | cluster: hello-world-cluster 6 | count: 1 7 | family: hello-world 8 | containers: 9 | - name: hello-world 10 | image: tutum/hello-world 11 | cpu: 128 12 | memory: 256 13 | ports: 14 | - "80" 15 | command: /usr/bin/supervisord 16 | environment: 17 | - VAR1=test 18 | - VAR2=anothervar 19 | - DEBUG=True 20 | -------------------------------------------------------------------------------- /examples/volumes.yml: -------------------------------------------------------------------------------- 1 | # volumes.yml 2 | # 3 | # This file demonstrates two ways to to declare a volume mount for a container 4 | # 5 | 6 | services: 7 | - name: my-simple-volume-service 8 | cluster: my-cluster 9 | count: 2 10 | family: my-service 11 | network_mode: bridge 12 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 13 | containers: 14 | - name: my-service 15 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 16 | cpu: 128 17 | memory: 256 18 | ports: 19 | - "80" 20 | environment: 21 | - S3_BUCKET=my-bucket 22 | volumes: 23 | - /host/path:/container/path 24 | - /host/path-ro:/container/path-ro:ro 25 | 26 | - name: my-full-volume-service 27 | cluster: my-cluster 28 | count: 2 29 | family: my-service 30 | network_mode: bridge 31 | task_role_arn: arn:aws:iam::123445564666:role/ecsTaskRole 32 | containers: 33 | - name: my-service 34 | image: 123445564666.dkr.ecr.us-west-2.amazonaws.com/my-service:0.1.0 35 | cpu: 128 36 | memory: 256 37 | ports: 38 | - "80" 39 | environment: 40 | - S3_BUCKET=my-bucket 41 | volumes: 42 | - storage:/container/path 43 | volumes: 44 | - name: storage 45 | config: 46 | scope: shared 47 | driver: my_vol_driver:latest 48 | driverOpts: 49 | opt1: value1 50 | opt2: value2 51 | labels: 52 | key: value 53 | key: value 54 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools >= 48", 4 | "wheel >= 0.29.0", 5 | "toml == 0.10.2", 6 | ] 7 | build-backend = "setuptools.build_meta" 8 | 9 | ############################################################################### 10 | # Ruff Configuration 11 | ############################################################################### 12 | [tool.ruff] 13 | line-length = 88 14 | indent-width = 4 15 | target-version = "py312" 16 | # Tell ruff that this project's python code all lives inside the {{cookiecutter.project_python_name}} subfolder. 17 | src = ["deployfish"] 18 | # Don't lint the venv, the test code, or the migrations. 19 | exclude = [".venv", "*/tests/*"] 20 | 21 | [tool.ruff.format] 22 | # Like Black, use double quotes for strings. 23 | quote-style = "double" 24 | # Like Black, indent with spaces, rather than tabs. 25 | indent-style = "space" 26 | # Like Black, respect magic trailing commas. 27 | skip-magic-trailing-comma = false 28 | # Like Black, automatically detect the appropriate line ending. 29 | line-ending = "auto" 30 | # Disable auto-formatting of code examples in docstrings. 31 | # This is currently disabled by default, but it is planned for this 32 | # to be opt-out in the future, so we're opting out now. 33 | docstring-code-format = false 34 | # Set the line length limit used when formatting code snippets in docstrings. 35 | # This only has an effect when the docstring-code-format = true. 36 | docstring-code-line-length = "dynamic" 37 | 38 | [tool.ruff.lint] 39 | select = ["ALL"] 40 | # Allow fix for all enabled rules (when `--fix`) is provided. 41 | fixable = ["ALL"] 42 | unfixable = [] 43 | # Allow unused variables if their names are underscore-prefixed. 44 | dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" 45 | ignore = [ 46 | #### Ignore entire modules 47 | "ANN", # flake8-annotations 48 | "COM", # flake8-commas 49 | "C90", # mccabe complexity 50 | "TID", # flake8-tidy-imports 51 | 52 | #### Ignore specific rules 53 | "CPY001", # ignore missing copyright notices 54 | "D100", # ignore missing docs 55 | "D101", # ignore Missing docstring in public class 56 | "D102", # ignore Missing docstring in public method 57 | "D103", # ignore Missing docstring in public function 58 | "D104", # ignore Missing docstring in public package 59 | "D105", # ignore Missing docstring in magic method 60 | "D106", # ignore Missing docstring in public nested class 61 | "D107", # ignore Missing docstring in __init__ method 62 | "D200", # One-line docstring should fit on one line 63 | "D203", # 1 blank required before class docstring 64 | "D205", # 1 blank line required between summary line and description 65 | "D211", # No blank lines allowed before class docstring 66 | "D212", # Multi-line docstring summary should start at the first line 67 | "D400", # First line of docstring should end with a period 68 | "D401", # First line of docstring should be in imperative mood 69 | "D415", # First line of docstring should end with a period, question mark, or exclamation point 70 | "DOC201", # Ignore missing "Return" section in docstring 71 | "E402", # Ignore imports that aren't at the top of the file 72 | "FIX002", # Line contains "TODO", consider resolving the issue 73 | "N818", # Stop bugging me about not ending my exceptions with "Error" 74 | "PLC0415", # Ignore imports that aren't at the top level. Sometimes that's needed to avoid circular imports 75 | "PLR6201", # Ignore list literals used in membership tests. We don't care about the performance boost from sets 76 | "RUF012", # Ignore mutable class attrs. Wagtail expects lists for a lot of those, and our code never touches them 77 | "S603", # ignore subprocess calls that do not check return code 78 | "S607", # ignore subprocess programs that are not absolute paths 79 | "SIM102", # Don't try to combine nested ifs 80 | "SLF001", # Ignore access to attributes starting with a single _. Django's Model._meta is used all over the place. 81 | "TD002", # Missing author in TODO; try: # TODO(): ... or # TODO @: 82 | "TD003", # Missing issue link on the line following this TODO 83 | "TRY003", # external messages in exceptions are too verbose 84 | ] 85 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -e . 2 | 3 | click>=6.7 # https://github.com/pallets/click 4 | PyYaml>=5.1 # https://github.com/yaml/pyyaml 5 | requests>=2.18.4 # https://github.com/psf/requests 6 | jsondiff2>=1.2.3 # https://github.com/edoreld/jsondiff 7 | inspect2>=0.1.2 # https://github.com/JelleZijlstra/inspect2 8 | jinja2>=2.11 # https://github.com/pallets/jinja 9 | typer>=0.6.1 # https://github.com/tiangolo/typer 10 | tzlocal>=4.0.1 11 | cement==3.0.10 # https://github.com/datafolklabs/cement 12 | tabulate>=0.8.1 13 | # ---- Purposely unpinned ---- 14 | boto3>=1.22 # https://github.com/boto/boto3 15 | 16 | # Packaging 17 | # ------------------------------------------------------------------------------ 18 | bumpversion==0.5.3 # https://github.com/peritus/bumpversion 19 | twine # https://github.com/pypa/twine/ 20 | tox # https://github.com/tox-dev/to 21 | wheel # https://github.com/pypa/wheel 22 | Sphinx # https://github.com/sphinx-doc/sphinx 23 | sphinx-autobuild # https://github.com/GaretJax/sphinx-autobuild 24 | sphinx-click # https://github.com/click-contrib/sphinx-click 25 | sphinx_rtd_theme # https://github.com/readthedocs/sphinx_rtd_theme 26 | 27 | # Development 28 | # ------------------------------------------------------------------------------ 29 | autopep8 # https://github.com/hhatto/autopep8 30 | flake8 # https://github.com/PyCQA/flake8 31 | pycodestyle # https://github.com/PyCQA/pycodestyle 32 | mypy # https://github.com/python/mypy 33 | debugpy # https://github.com/microsoft/debugpy 34 | # Testing 35 | # ------------------------------------------------------------------------------ 36 | testfixtures>=6.10.0 # https://github.com/Simplistix/testfixtures 37 | mock==3.0.5 # https://github.com/testing-cabal/mock 38 | nose==1.3.7 # https://github.com/nose-devs/nose 39 | 40 | # Other utils 41 | # ------------------------------------------------------------------------------ 42 | ipython>=7.1.0 # https://github.com/ipython/ipython 43 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = deployfish 3 | version = 1.15.1 4 | description = AWS ECS related deployment tools 5 | author = Caltech IMSS ADS 6 | author_email = imss-ads-staff@caltech.edu 7 | url = https://github.com/caltechads/deployfish 8 | long_description = file: README.md 9 | long_description_content_type = text/markdown; charset=UTF-8 10 | keywords = 11 | aws 12 | ecs 13 | docker 14 | devops 15 | classifiers = 16 | Development Status :: 5 - Production/Stable 17 | Environment :: Console 18 | Intended Audience :: System Administrators 19 | Programming Language :: Python :: 3 20 | Programming Language :: Python :: 3.7 21 | Programming Language :: Python :: 3.8 22 | Programming Language :: Python :: 3.9 23 | Programming Language :: Python :: 3.10 24 | Programming Language :: Python :: 3.11 25 | Topic :: System :: Installation/Setup 26 | Topic :: System :: Systems Administration 27 | project_urls = 28 | Documentation = https://deployfish.readthedocs.io/en/latest/ 29 | Source = https://github.com/caltechads/deployfish 30 | Issues = https://github.com/caltechads/deployfish/issues 31 | 32 | [options] 33 | zip_safe = False 34 | platforms = any 35 | packages = find: 36 | include_package_data = True 37 | python_requires = >=3.7 38 | install_requires = 39 | boto3 >= 1.26.36 40 | debugpy 41 | cement==3.0.10 42 | click >= 6.7 43 | colorlog 44 | jinja2 >= 2.11 45 | jsondiff2 >= 1.2.3 46 | pytz 47 | PyYAML >= 5.1 48 | requests >= 2.18.4 49 | shellescape >= 3.8.1 50 | tabulate >= 0.8.1 51 | typing_extensions 52 | tzlocal >= 4.0.1 53 | docker >= 7.1.0 54 | slack_sdk >= 3.31.0 55 | slackfin >= 0.2.2 56 | gitpython >= 3.1.43 57 | giturlparse >= 0.12.0 58 | setuptools >= 74.1.2 59 | simplesqs >= 0.4.0 60 | toml >= 0.10.2 61 | 62 | [options.entry_points] 63 | console_scripts = 64 | deploy = deployfish.main:main 65 | dpy = deployfish.main:main 66 | 67 | [options.package_data] 68 | deplofish = 69 | deployfish = py.typed 70 | 71 | [bdist_wheel] 72 | universal = 1 73 | 74 | 75 | # ------- 76 | # Linters 77 | # ------- 78 | 79 | [flake8] 80 | max-line-length: 120 81 | filename: *.py 82 | exclude: *.cfg, *.js, *.json, *.bak, *.md, *.sql, *.sh, *.txt, *.yml, simple_test_db, Makefile, Dockerfile, MANIFEST.in 83 | # E221: multiple spaces before operator 84 | # E241: multiple spaces after : 85 | # E265: block comment should start with '# ' 86 | # E266: too many leading '#' for block comment 87 | # E401: multiple imports on one line 88 | # W503: line break before binary operator 89 | ignore = E221,E241,E265,E266,E401,C0321,W503,C901 90 | 91 | [pylint.FORMAT] 92 | max-line-length=120 93 | 94 | [pylint.MESSAGES CONTROL] 95 | disable= 96 | missing-docstring, 97 | protected-access, 98 | unused-argument, 99 | invalid-name, 100 | too-few-public-methods, 101 | attribute-defined-outside-init, 102 | consider-using-f-string, 103 | too-many-lines, 104 | no-member, 105 | unnecessary-pass, 106 | 107 | [mypy] 108 | python_executable: ~/.pyenv/shims/python 109 | implicit_optional = True 110 | exclude = (^build/.*$|^docs/.*\.py$|test_.*\.py$) 111 | 112 | [mypy-jsondiff] 113 | ignore_missing_imports = True 114 | 115 | [mypy-shellescape] 116 | ignore_missing_imports = True 117 | 118 | [mypy-botocore.docs.docstring] 119 | ignore_missing_imports = True 120 | 121 | [mypy-cement.*] 122 | ignore_missing_imports = True 123 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import setuptools 3 | 4 | if __name__ == "__main__": 5 | setuptools.setup() 6 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py27,py36 3 | [testenv] 4 | deps=-rrequirements.txt 5 | commands=nosetests 6 | --------------------------------------------------------------------------------