├── .circleci ├── config.yml ├── package_release.py ├── py3fixers └── test_examples.py ├── .drp └── trufflehog_config.yml ├── .gitignore ├── .gitmodules ├── CHANGELOG.txt ├── LICENSE ├── Makefile ├── README.md ├── cloudify_docker ├── __init__.py ├── __version__.py ├── ansible.py ├── constants.py ├── resources │ └── post-install.sh ├── tasks.py ├── terraform.py └── tests │ ├── __init__.py │ └── test_plugin.py ├── plugin.yaml ├── plugin_1_4.yaml ├── plugin_1_5.yaml ├── requirements-3.6.in ├── requirements-3.6.txt ├── requirements.in ├── requirements.txt ├── setup.py ├── test-requirements.txt ├── tox.ini └── v2_plugin.yaml /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | job-post-steps: &job-post-steps 4 | post-steps: 5 | - slack/notify_failed 6 | 7 | unittest-post-steps: &unittest-post-steps 8 | post-steps: 9 | - store_test_results: 10 | path: /home/circleci/project/nosetests.xml 11 | - store_artifacts: 12 | path: /home/circleci/project/coverage.xml 13 | prefix: tests 14 | - slack/notify_failed 15 | 16 | orbs: 17 | node: cloudify/public-unittest-orb@volatile 18 | wagonorb: cloudify/wagon-bulder-orb@volatile 19 | releaseorb: cloudify/release-orb@volatile 20 | slack: cloudify/notify-slack@2 21 | 22 | checkout: 23 | post: 24 | - > 25 | if [ -n "$CI_PULL_REQUEST" ]; then 26 | PR_ID=${CI_PULL_REQUEST##*/} 27 | git fetch origin +refs/pull/$PR_ID/merge: 28 | git checkout -qf FETCH_HEAD 29 | fi 30 | 31 | executors: 32 | 33 | cloudify-machine-py3: 34 | machine: 35 | image: ubuntu-2004:202201-02 36 | 37 | commands: 38 | 39 | setup_manager: 40 | steps: 41 | - run: | 42 | if [[ -z "${CLOUDIFY_HOST}" ]]; then 43 | exit 1 44 | fi 45 | - run: | 46 | if [[ -z "${CLOUDIFY_TENANT}" ]] && [ -z "${CIRCLE_PROJECT_REPONAME}" ]; then 47 | exit 1 48 | fi 49 | - run: | 50 | if [[ -z "${CLOUDIFY_TOKEN}" ]]; then 51 | exit 1 52 | fi 53 | - run: | 54 | response=$(curl --write-out '%{http_code}' --silent --insecure --header "Tenant: ${CLOUDIFY_TENANT}" --header "Authentication-Token: ${CLOUDIFY_TOKEN}" https://$CLOUDIFY_HOST/api/v3.1/status --output /dev/null) 55 | if [[ $response != 200 ]]; then 56 | echo "Failed to get manager status"; 57 | exit 1 58 | fi 59 | - run: pip3 install urllib3==1.26.15 requests-toolbelt==0.10.1 60 | - run: pip3 install https://github.com/cloudify-incubator/cloudify-ecosystem-test/archive/refs/heads/master.zip 61 | - run: git submodule update --init --recursive --remote 62 | 63 | prepare_test_manager: 64 | steps: 65 | - run: ecosystem-test prepare-remote-test-manager -es gcp_credentials=$gcp_credentials -p $(find ~/project/workspace/build/ -name *manylinux-py311-none-linux_x86_64.wgn*) ~/project/plugin_1_4.yaml 66 | 67 | run_test: 68 | steps: 69 | - run: ecosystem-test remote-blueprint-test -b examples/docker/install-docker-gcp-vm/docker-on-vm.yaml -i agent_user=centos --timeout=3000 --on-failure=uninstall-force --test-id=docker_vm_test 70 | 71 | jobs: 72 | integration_tests: 73 | executor: cloudify-machine-py3 74 | environment: 75 | CLOUDIFY_SSL_TRUST_ALL: true 76 | IAAS: gcp 77 | steps: 78 | - checkout 79 | - attach_workspace: 80 | at: workspace 81 | - setup_manager 82 | - prepare_test_manager 83 | - run_test 84 | 85 | workflows: 86 | version: 2 87 | tests: 88 | jobs: 89 | - node/check_py3_compat_job 90 | - node/unittests_job: 91 | context: 92 | - plugins-inputs 93 | <<: *unittest-post-steps 94 | - wagonorb/wagon: 95 | filters: 96 | branches: 97 | only: /([0-9\.]*\-build|master|dev)/ 98 | - wagonorb/wagon_311: 99 | filters: 100 | branches: 101 | only: /([0-9\.]*\-build|master|dev)/ 102 | - wagonorb/rhel_wagon: 103 | filters: 104 | branches: 105 | only: /([0-9\.]*\-build|master|dev)/ 106 | - wagonorb/arch64_wagon: 107 | filters: 108 | branches: 109 | only: /([0-9\.]*\-build|master|dev)/ 110 | - integration_tests: 111 | context: 112 | - plugins-inputs 113 | requires: 114 | - wagonorb/wagon 115 | - wagonorb/wagon_311 116 | - wagonorb/arch64_wagon 117 | - wagonorb/rhel_wagon 118 | filters: 119 | branches: 120 | only: /([0-9\.]*\-build|master|dev)/ 121 | - releaseorb/release: 122 | context: 123 | - plugins-inputs 124 | filters: 125 | branches: 126 | only: /master/ 127 | requires: 128 | - integration_tests 129 | nightly: 130 | triggers: 131 | - schedule: 132 | cron: "0 1 * * 0,1,3,5" 133 | filters: 134 | branches: 135 | only: 136 | - master 137 | jobs: 138 | - node/check_py3_compat_job 139 | - node/unittests_job: 140 | context: 141 | - plugins-inputs 142 | <<: *unittest-post-steps 143 | - wagonorb/wagon: 144 | filters: 145 | branches: 146 | only: /([0-9\.]*\-build|master|dev)/ 147 | - wagonorb/wagon_311: 148 | filters: 149 | branches: 150 | only: /([0-9\.]*\-build|master|dev)/ 151 | - wagonorb/rhel_wagon: 152 | filters: 153 | branches: 154 | only: /([0-9\.]*\-build|master|dev)/ 155 | - wagonorb/arch64_wagon: 156 | filters: 157 | branches: 158 | only: /([0-9\.]*\-build|master|dev)/ 159 | - integration_tests: 160 | context: 161 | - plugins-inputs 162 | <<: *job-post-steps 163 | requires: 164 | - wagonorb/wagon 165 | - wagonorb/wagon_311 166 | - wagonorb/arch64_wagon 167 | - wagonorb/rhel_wagon 168 | filters: 169 | branches: 170 | only: /([0-9\.]*\-build|master|dev)/ 171 | 172 | -------------------------------------------------------------------------------- /.circleci/package_release.py: -------------------------------------------------------------------------------- 1 | from os import path, pardir 2 | from ecosystem_cicd_tools.release import ( 3 | plugin_release_with_latest, find_version) 4 | 5 | setup_py = path.join( 6 | path.abspath(path.join(path.dirname(__file__), pardir)), 7 | 'setup.py') 8 | 9 | 10 | if __name__ == '__main__': 11 | plugin_release_with_latest( 12 | 'cloudify-docker-plugin', find_version(setup_py)) 13 | -------------------------------------------------------------------------------- /.circleci/py3fixers: -------------------------------------------------------------------------------- 1 | --stage1 2 | -f lib2to3.fixes.fix_getcwdu 3 | -f lib2to3.fixes.fix_long 4 | -f lib2to3.fixes.fix_nonzero 5 | -f lib2to3.fixes.fix_input 6 | -f lib2to3.fixes.fix_raw_input 7 | -f lib2to3.fixes.fix_itertools 8 | -f lib2to3.fixes.fix_itertools_imports 9 | -f lib2to3.fixes.fix_exec 10 | -f lib2to3.fixes.fix_operator 11 | -f libfuturize.fixes.fix_execfile 12 | -f libpasteurize.fixes.fix_newstyle 13 | -f lib2to3.fixes.fix_filter 14 | # fix_dict is not idempotent 15 | # -f lib2to3.fixes.fix_dict 16 | -f lib2to3.fixes.fix_map 17 | -f lib2to3.fixes.fix_zip 18 | -f lib2to3.fixes.fix_xrange 19 | -f lib2to3.fixes.fix_basestring 20 | -f libfuturize.fixes.fix_cmp 21 | -f libfuturize.fixes.fix_division_safe 22 | -f lib2to3.fixes.fix_metaclass 23 | -f libfuturize.fixes.fix_unicode_keep_u 24 | -------------------------------------------------------------------------------- /.circleci/test_examples.py: -------------------------------------------------------------------------------- 1 | ######## 2 | # Copyright (c) 2014-2019 Cloudify Platform Ltd. All rights reserved 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | 17 | import os 18 | import pytest 19 | 20 | from ecosystem_tests.dorkl import ( 21 | basic_blueprint_test, 22 | cleanup_on_failure, 23 | prepare_test, 24 | blueprints_upload, 25 | deployments_create, 26 | executions_start, 27 | cloudify_exec 28 | ) 29 | 30 | 31 | SECRETS_TO_CREATE = { 32 | 'gcp_credentials': True 33 | } 34 | 35 | prepare_test(secrets=SECRETS_TO_CREATE) 36 | 37 | blueprint_list = ['examples/docker/docker/general/any-container.yaml'] 38 | vm = 'examples/virtual-machine/gcp.yaml' 39 | docker = 'examples/docker/installation/install-docker.yaml' 40 | 41 | 42 | @pytest.fixture(scope='function', params=blueprint_list) 43 | def blueprint_examples(request): 44 | try: 45 | blueprints_upload(vm, 'vm') 46 | deployments_create('vm', inputs='agent_user=centos') 47 | executions_start('install', 'vm', timeout=200) 48 | vm_caps = cloudify_exec('cfy deployments capabilities vm') 49 | blueprints_upload(docker, 'docker') 50 | deployments_create( 51 | 'docker', 52 | inputs='docker_host={0} -i docker_user=centos'.format( 53 | vm_caps['endpoint']['value'])) 54 | executions_start('install', 'docker', timeout=200) 55 | try: 56 | dirname_param = os.path.dirname(request.param).split('/')[-1:][0] 57 | basic_blueprint_test( 58 | request.param, 59 | dirname_param, 60 | inputs='docker_host={0} -i docker_user=centos', 61 | timeout=3000) 62 | except BaseException: 63 | cleanup_on_failure(request.param) 64 | raise 65 | except BaseException: 66 | cleanup_on_failure('vm') 67 | 68 | 69 | def test_blueprints(blueprint_examples): 70 | assert blueprint_examples is None 71 | -------------------------------------------------------------------------------- /.drp/trufflehog_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | project_exclusion_list: 3 | exclusion_folders: 4 | - .drp 5 | - .github 6 | - examples 7 | - cloudify_docker/tests/ 8 | # - test 9 | # - jenkins 10 | # - buildroot 11 | # - buildroot 12 | exclusion_file_paths: 13 | - Makefile 14 | # - package/tpm2-tools/tpm2-tools.hash 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Vagrantfile 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | env/ 13 | bin/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # Installer logs 28 | pip-log.txt 29 | pip-delete-this-directory.txt 30 | 31 | # Unit test / coverage reports 32 | htmlcov/ 33 | .tox/ 34 | .coverage 35 | .cache 36 | nosetests.xml 37 | coverage.xml 38 | 39 | # Translations 40 | *.mo 41 | 42 | # Mr Developer 43 | .mr.developer.cfg 44 | .project 45 | .pydevproject 46 | 47 | # Rope 48 | .ropeproject 49 | 50 | # Django stuff: 51 | *.log 52 | *.pot 53 | 54 | # Sphinx documentation 55 | docs/_build/ 56 | 57 | *.iml 58 | 59 | *COMMIT_MSG 60 | 61 | # QuickBuild 62 | .qbcache/ 63 | 64 | .noseids 65 | 66 | *.yaml.template 67 | NOTES 68 | .cloudify 69 | local-storage 70 | *.wgn 71 | 72 | fusion-agent 73 | fusion-common 74 | fusion-manager 75 | cloudify-utilities-plugins-sdk 76 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "examples"] 2 | path = examples 3 | url = https://github.com/cloudify-community/blueprint-examples.git 4 | -------------------------------------------------------------------------------- /CHANGELOG.txt: -------------------------------------------------------------------------------- 1 | 2.0.0: 2 | - New Version that is using docker-py module. 3 | 2.0.2: 4 | - Fix flake8 issue. 5 | 2.0.3: 6 | - Upgrade packages to fix build. 7 | 2.0.4: 8 | - Bump and rerelease for arm wagon. 9 | 2.0.5: 10 | - Handle detach true in case of running commands. 11 | - Fetch container details after starting it. 12 | 2.0.6: 13 | - Fix Rsync host verfication failed. 14 | - Change how we fetch container execution. 15 | - V2 Plugin YAML 16 | 2.0.7: 17 | - Fix Temp Dir permissions. 18 | 2.0.8: 19 | - Support plugin 1_4 dsl yaml 20 | - Release redhat8 wagon 21 | 2.0.9: 22 | - Fix Deprecation from SocketIO and basic refactor for code readability. 23 | - Added support passing file and handled perparation for the container files accordingly. 24 | - Added the clean up logic for the tmp files same as terraform plugin. 25 | 2.0.10: 26 | - Fix get api due to back-compat issue. 27 | 2.0.11: 28 | - add __version__.py file in cloudify_docker folder. 29 | 2.0.12: 30 | - fix install config for docker host. 31 | 2.0.13: 32 | - Add PullImage support. 33 | - Allow docker installation without sudo and offline. 34 | 2.0.14: 35 | - updated circleci context & added wagon for py 3.11. 36 | 2.0.15: Release with DSL 1.5 plugin YAML. 37 | 2.0.16: added .drp folder for trufflehog. 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | download: 2 | ifeq (,$(wildcard ./fusion-agent)) 3 | git clone https://${GH_USER}:${GITHUB_PASSWORD}@eos2git.cec.lab.emc.com/ISG-Edge/fusion-agent.git && cd './fusion-agent' && git checkout rel/magicp1-2.0.0 && cd .. 4 | endif 5 | ifeq (,$(wildcard ./fusion-common)) 6 | git clone https://${GH_USER}:${GITHUB_PASSWORD}@eos2git.cec.lab.emc.com/ISG-Edge/fusion-common.git && cd './fusion-common' && git checkout rel/magicp1-2.0.0 && cd .. 7 | endif 8 | ifeq (,$(wildcard ./fusion-manager)) 9 | git clone https://${GH_USER}:${GITHUB_PASSWORD}@eos2git.cec.lab.emc.com/ISG-Edge/fusion-manager.git && cd './fusion-manager' && git checkout rel/magicp1-2.0.0 && cd .. 10 | endif 11 | ifeq (,$(wildcard ./cloudify-utilities-plugins-sdk)) 12 | git clone https://github.com/cloudify-incubator/cloudify-utilities-plugins-sdk.git && cd './cloudify-utilities-plugins-sdk' && git checkout fusion && cd .. 13 | endif 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Cloudify Docker Plugin 2 | 3 | This plugin provides the following functionality: 4 | 5 | * Installation, configuration and uninstallation of Docker on a machine 6 | [ could be the manager as well but better to have it on a different node ] 7 | * Representation of Docker modules [Image, Container] as Cloudify nodes 8 | * Building Docker Images 9 | * Run Docker container given the built images that you have 10 | * Retrieve host details 11 | * Retrieve all images on the system 12 | * Retrieve all containers on the system 13 | * Handle container volume mapping to the docker host for use inside the container 14 | 15 | -------- 16 | Two more things: 17 | 18 | * Handle Ansible Playbook inside a docker container given the same node_type 19 | as in [cloudify-ansible-plugin](https://github.com/cloudify-cosmo/cloudify-ansible-plugin) 20 | 21 | **NOTE** 22 | * in addition to the properties, some more were added in order to specify 23 | which docker machine to execute that container on 24 | 25 | * Handle Terraform module inside a docker container given the same node_type 26 | as in [cloudify-terraform-plugin](https://github.com/cloudify-incubator/cloudify-terraform-plugin) 27 | 28 | **NOTE** 29 | 30 | * in addition to the properties, some more were added in order to specify 31 | which docker machine to execute that container on, 32 | 33 | * one more thing is the terraform_plugins which is the list to install to that container 34 | 35 | 36 | docker specific properties: 37 | 38 | - ``` 39 | cloudify.types.docker.DockerMachineConfig: 40 | properties: 41 | docker_ip: 42 | description: Docker Machine IP 43 | type: string 44 | default: '' 45 | docker_user: 46 | description: Docker Machine User 47 | type: string 48 | default: '' 49 | docker_key: 50 | description: Docker Machine Private Key 51 | type: string 52 | default: '' 53 | container_volume: 54 | description: Docker Container volume_mapping 55 | type: string 56 | default: '' 57 | ``` 58 | 59 | One more thing if you want to provision a host given a Cloudify manager, 60 | you could use a blueprint that handles that task of configure docker, 61 | see [Blueprints](https://github.com/cloudify-community/blueprint-examples/tree/master/docker-machine-example) 62 | 63 | ## Examples 64 | 65 | For official blueprint examples using this Cloudify plugin, please see [Cloudify Community Blueprints Examples](https://github.com/cloudify-community/blueprint-examples/). 66 | 67 | 68 | 69 | ## Usage 70 | 71 | See [Docker Plugin](https://docs.cloudify.co/5.0.5/working_with/official_plugins/) 72 | -------------------------------------------------------------------------------- /cloudify_docker/__init__.py: -------------------------------------------------------------------------------- 1 | ######## 2 | # Copyright (c) 2014-2020 GigaSpaces Technologies Ltd. All rights reserved 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | -------------------------------------------------------------------------------- /cloudify_docker/__version__.py: -------------------------------------------------------------------------------- 1 | version = '2.0.16' 2 | -------------------------------------------------------------------------------- /cloudify_docker/ansible.py: -------------------------------------------------------------------------------- 1 | ######## 2 | # Copyright (c) 2014-2020 GigaSpaces Technologies Ltd. All rights reserved 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import os 16 | import yaml 17 | import json 18 | import errno 19 | import shutil 20 | import getpass 21 | import tempfile 22 | 23 | from uuid import uuid1 24 | 25 | from .tasks import get_lan_ip 26 | from .tasks import get_fabric_settings 27 | from .tasks import get_docker_machine_from_ctx 28 | from .tasks import call_sudo 29 | from .tasks import call_put 30 | 31 | from cloudify.manager import get_rest_client 32 | from cloudify.decorators import operation 33 | from cloudify.exceptions import (NonRecoverableError, HttpException) 34 | 35 | from cloudify_common_sdk.resource_downloader import unzip_archive 36 | from cloudify_common_sdk.resource_downloader import untar_archive 37 | from cloudify_common_sdk.resource_downloader import get_shared_resource 38 | from cloudify_common_sdk.resource_downloader import TAR_FILE_EXTENSTIONS 39 | from cloudify_common_sdk._compat import text_type 40 | 41 | from .constants import (HOSTS, 42 | WORKSPACE, 43 | LIST_TYPES, 44 | BP_INCLUDES_PATH, 45 | LOCAL_HOST_ADDRESSES) 46 | 47 | 48 | @operation 49 | def set_playbook_config(ctx, **kwargs): 50 | """ 51 | Set all playbook node instance configuration as runtime properties 52 | :param _ctx: Cloudify node instance which is instance of CloudifyContext 53 | :param config: Playbook node configurations 54 | """ 55 | def _get_secure_values(data, sensitive_keys, parent_hide=False): 56 | """ 57 | ::param data : dict to check againt sensitive_keys 58 | ::param sensitive_keys : a list of keys we want to hide the values for 59 | ::param parent_hide : boolean flag to pass if the parent key is 60 | in sensitive_keys 61 | """ 62 | for key in data: 63 | # check if key or its parent {dict value} in sensitive_keys 64 | hide = parent_hide or (key in sensitive_keys) 65 | value = data[key] 66 | # handle dict value incase sensitive_keys was inside another key 67 | if isinstance(value, dict): 68 | # call _get_secure_value function recusivly 69 | # to handle the dict value 70 | inner_dict = _get_secure_values(value, sensitive_keys, hide) 71 | data[key] = inner_dict 72 | else: 73 | data[key] = '*'*len(value) if hide else value 74 | return data 75 | if kwargs and isinstance(kwargs, dict): 76 | kwargs = _get_secure_values(kwargs, kwargs.get("sensitive_keys", {})) 77 | for key, value in kwargs.items(): 78 | ctx.instance.runtime_properties[key] = value 79 | ctx.instance.update() 80 | 81 | 82 | @operation 83 | def create_ansible_playbook(ctx, **kwargs): 84 | 85 | def handle_file_path(file_path, additional_playbook_files, _ctx): 86 | """Get the path to a file. 87 | 88 | I do this for two reasons: 89 | 1. The Download Resource only downloads an individual file. 90 | Ansible Playbooks are often many files. 91 | 2. I have not figured out how to pass a file as an in 92 | memory object to the PlaybookExecutor class. 93 | 94 | :param file_path: The `site_yaml_path` from `run`. 95 | :param additional_playbook_files: additional files 96 | adjacent to the playbook path. 97 | :param _ctx: The Cloudify Context. 98 | :return: The absolute path on the manager to the file. 99 | """ 100 | 101 | def _get_deployment_blueprint(deployment_id): 102 | new_blueprint = "" 103 | try: 104 | # get the latest deployment update to get the new blueprint id 105 | client = get_rest_client() 106 | dep_upd = \ 107 | client.deployment_updates.list(deployment_id=deployment_id, 108 | sort='created_at')[-1] 109 | new_blueprint = \ 110 | client.deployment_updates.get(dep_upd.id)[ 111 | "new_blueprint_id"] 112 | except KeyError: 113 | raise NonRecoverableError( 114 | "can't get blueprint for deployment {0}".format( 115 | deployment_id)) 116 | return new_blueprint 117 | 118 | def download_nested_file_to_new_nested_temp_file(file_path, new_root, 119 | _ctx): 120 | """ Download file to a similar folder system with a new 121 | root directory. 122 | 123 | :param file_path: the resource path for download resource source. 124 | :param new_root: Like a temporary directory 125 | :param _ctx: 126 | :return: 127 | """ 128 | 129 | dirname, file_name = os.path.split(file_path) 130 | # Create the new directory path including the new root. 131 | new_dir = os.path.join(new_root, dirname) 132 | new_full_path = os.path.join(new_dir, file_name) 133 | try: 134 | os.makedirs(new_dir) 135 | except OSError as e: 136 | if e.errno == errno.EEXIST and os.path.isdir(new_dir): 137 | pass 138 | else: 139 | raise 140 | return _ctx.download_resource(file_path, new_full_path) 141 | 142 | if not isinstance(file_path, text_type): 143 | raise NonRecoverableError( 144 | 'The variable file_path {0} is a {1},' 145 | 'expected a string.'.format(file_path, type(file_path))) 146 | if not getattr(_ctx, '_local', False): 147 | if additional_playbook_files: 148 | # This section is intended to handle scenario where we want 149 | # to download the resource instead of use absolute path. 150 | # Perhaps this should replace the old way entirely. 151 | # For now, the important thing here is that we are 152 | # enabling downloading the playbook to a remote host. 153 | playbook_file_dir = tempfile.mkdtemp() 154 | new_file_path = download_nested_file_to_new_nested_temp_file( 155 | file_path, 156 | playbook_file_dir, 157 | _ctx 158 | ) 159 | for additional_file in additional_playbook_files: 160 | download_nested_file_to_new_nested_temp_file( 161 | additional_file, 162 | playbook_file_dir, 163 | _ctx 164 | ) 165 | return new_file_path 166 | else: 167 | # handle update deployment different blueprint playbook name 168 | deployment_blueprint = _ctx.blueprint.id 169 | if _ctx.workflow_id == 'update': 170 | deployment_blueprint = \ 171 | _get_deployment_blueprint(_ctx.deployment.id) 172 | file_path = \ 173 | BP_INCLUDES_PATH.format( 174 | tenant=_ctx.tenant_name, 175 | blueprint=deployment_blueprint, 176 | relative_path=file_path) 177 | if os.path.exists(file_path): 178 | return file_path 179 | raise NonRecoverableError( 180 | 'File path {0} does not exist.'.format(file_path)) 181 | 182 | def handle_site_yaml(site_yaml_path, additional_playbook_files, _ctx): 183 | """ Create an absolute local path to the site.yaml. 184 | 185 | :param site_yaml_path: Relative to the blueprint. 186 | :param additional_playbook_files: additional playbook files relative to 187 | the playbook. 188 | :param _ctx: The Cloudify context. 189 | :return: The final absolute path on the system to the site.yaml. 190 | """ 191 | 192 | site_yaml_real_path = os.path.abspath( 193 | handle_file_path(site_yaml_path, additional_playbook_files, _ctx)) 194 | site_yaml_real_dir = os.path.dirname(site_yaml_real_path) 195 | site_yaml_real_name = os.path.basename(site_yaml_real_path) 196 | site_yaml_new_dir = os.path.join( 197 | _ctx.instance.runtime_properties[WORKSPACE], 'playbook') 198 | shutil.copytree(site_yaml_real_dir, site_yaml_new_dir) 199 | site_yaml_final_path = os.path.join(site_yaml_new_dir, 200 | site_yaml_real_name) 201 | return site_yaml_final_path 202 | 203 | def get_inventory_file(filepath, _ctx, new_inventory_path): 204 | """ 205 | This method will get the location for inventory file. 206 | The file location could be locally with relative to the blueprint 207 | resources or it could be remotely on the remote machine 208 | :return: 209 | :param filepath: File path to do check for 210 | :param _ctx: The Cloudify context. 211 | :param new_inventory_path: New path which holds the file inventory path 212 | when "filepath" is a local resource 213 | :return: File location for inventory file 214 | """ 215 | if os.path.isfile(filepath): 216 | # The file already exists on the system, then return the file url 217 | return filepath 218 | else: 219 | # Check to see if the file does not exit, then try to lookup the 220 | # file from the Cloudify blueprint resources 221 | try: 222 | _ctx.download_resource(filepath, new_inventory_path) 223 | except HttpException: 224 | _ctx.logger.error( 225 | 'Error when trying to download {0}'.format(filepath)) 226 | return None 227 | return new_inventory_path 228 | 229 | def handle_source_from_string(filepath, _ctx, new_inventory_path): 230 | inventory_file = get_inventory_file(filepath, _ctx, new_inventory_path) 231 | if inventory_file: 232 | return inventory_file 233 | else: 234 | with open(new_inventory_path, 'w') as outfile: 235 | _ctx.logger.info( 236 | 'Writing this data to temp file: {0}'.format( 237 | new_inventory_path)) 238 | outfile.write(filepath) 239 | return new_inventory_path 240 | 241 | def handle_key_data(_data, workspace_dir, container_volume): 242 | """Take Key Data from ansible_ssh_private_key_file and 243 | replace with a temp file. 244 | 245 | :param _data: The hosts dict (from YAML). 246 | :param workspace_dir: The temp dir where we are putting everything. 247 | :return: The hosts dict with a path to a temp file. 248 | """ 249 | 250 | def recurse_dictionary(existing_dict, 251 | key='ansible_ssh_private_key_file'): 252 | if key not in existing_dict: 253 | for k, v in existing_dict.items(): 254 | if isinstance(v, dict): 255 | existing_dict[k] = recurse_dictionary(v) 256 | elif key in existing_dict: 257 | # If is_file_path is True, this has already been done. 258 | try: 259 | is_file_path = os.path.exists(existing_dict[key]) 260 | except TypeError: 261 | is_file_path = False 262 | if not is_file_path: 263 | private_key_file = \ 264 | os.path.join(workspace_dir, str(uuid1())) 265 | with open(private_key_file, 'w') as outfile: 266 | outfile.write(existing_dict[key]) 267 | os.chmod(private_key_file, 0o600) 268 | private_key_file = \ 269 | private_key_file.replace(workspace_dir, 270 | container_volume) 271 | existing_dict[key] = private_key_file 272 | return existing_dict 273 | return recurse_dictionary(_data) 274 | 275 | def handle_sources(data, site_yaml_abspath, _ctx, container_volume): 276 | """Allow users to provide a path to a hosts file 277 | or to generate hosts dynamically, 278 | which is more comfortable for Cloudify users. 279 | 280 | :param data: Either a dict (from YAML) 281 | or a path to a conventional Ansible file. 282 | :param site_yaml_abspath: This is the path to the site yaml folder. 283 | :param _ctx: The Cloudify context. 284 | :return: The final path of the hosts file that 285 | was either provided or generated. 286 | """ 287 | 288 | hosts_abspath = os.path.join(os.path.dirname(site_yaml_abspath), HOSTS) 289 | if isinstance(data, dict): 290 | data = handle_key_data( 291 | data, os.path.dirname(site_yaml_abspath), container_volume) 292 | if os.path.exists(hosts_abspath): 293 | _ctx.logger.error( 294 | 'Hosts data was provided but {0} already exists. ' 295 | 'Overwriting existing file.'.format(hosts_abspath)) 296 | with open(hosts_abspath, 'w') as outfile: 297 | yaml.safe_dump(data, outfile, default_flow_style=False) 298 | elif isinstance(data, text_type): 299 | hosts_abspath = handle_source_from_string(data, _ctx, 300 | hosts_abspath) 301 | return hosts_abspath 302 | 303 | def prepare_options_config(options_config, run_data, destination, ctx): 304 | options_list = [] 305 | if 'extra_vars' not in options_config: 306 | options_config['extra_vars'] = {} 307 | options_config['extra_vars'].update(run_data) 308 | for key, value in options_config.items(): 309 | if key == 'extra_vars': 310 | f = tempfile.NamedTemporaryFile(delete=False, dir=destination) 311 | with open(f.name, 'w') as outfile: 312 | json.dump(value, outfile) 313 | value = '@{filepath}'.format(filepath=f.name) 314 | elif key == 'verbosity': 315 | ctx.logger.error('No such option verbosity') 316 | del key 317 | continue 318 | key = key.replace("_", "-") 319 | if isinstance(value, text_type): 320 | value = value.encode('utf-8') 321 | elif isinstance(value, dict): 322 | value = json.dumps(value) 323 | elif isinstance(value, list) and key not in LIST_TYPES: 324 | value = [i.encode('utf-8') for i in value] 325 | elif isinstance(value, list): 326 | value = ",".join(value).encode('utf-8') 327 | options_list.append( 328 | '--{key}={value}'.format(key=key, value=repr(value))) 329 | return ' '.join(options_list) 330 | 331 | def prepare_playbook_args(ctx): 332 | playbook_source_path = \ 333 | ctx.instance.runtime_properties.get('playbook_source_path', None) 334 | playbook_path = \ 335 | ctx.instance.runtime_properties.get('playbook_path', None) \ 336 | or ctx.instance.runtime_properties.get('site_yaml_path', None) 337 | sources = \ 338 | ctx.instance.runtime_properties.get('sources', {}) 339 | debug_level = \ 340 | ctx.instance.runtime_properties.get('debug_level', 2) 341 | additional_args = \ 342 | ctx.instance.runtime_properties.get('additional_args', '') 343 | additional_playbook_files = \ 344 | ctx.instance.runtime_properties.get( 345 | 'additional_playbook_files', None) or [] 346 | ansible_env_vars = \ 347 | ctx.instance.runtime_properties.get('ansible_env_vars', None) \ 348 | or {'ANSIBLE_HOST_KEY_CHECKING': "False"} 349 | ctx.instance.runtime_properties[WORKSPACE] = tempfile.mkdtemp() 350 | # check if source path is provided [full path/URL] 351 | if playbook_source_path: 352 | # here we will combine playbook_source_path with playbook_path 353 | playbook_tmp_path = get_shared_resource(playbook_source_path) 354 | if playbook_tmp_path == playbook_source_path: 355 | # didn't download anything so check the provided path 356 | # if file and absolute path 357 | if os.path.isfile(playbook_tmp_path) and \ 358 | os.path.isabs(playbook_tmp_path): 359 | # check file type if archived 360 | file_name = playbook_tmp_path.rsplit('/', 1)[1] 361 | file_type = file_name.rsplit('.', 1)[1] 362 | if file_type == 'zip': 363 | playbook_tmp_path = \ 364 | unzip_archive(playbook_tmp_path) 365 | elif file_type in TAR_FILE_EXTENSTIONS: 366 | playbook_tmp_path = \ 367 | untar_archive(playbook_tmp_path) 368 | playbook_path = "{0}/{1}".format(playbook_tmp_path, 369 | playbook_path) 370 | else: 371 | # here will handle the bundled ansible files 372 | playbook_path = handle_site_yaml( 373 | playbook_path, additional_playbook_files, ctx) 374 | playbook_args = { 375 | 'playbook_path': playbook_path, 376 | 'sources': handle_sources(sources, playbook_path, 377 | ctx, 378 | ctx.node.properties.get( 379 | 'docker_machine', {}).get( 380 | 'container_volume', "")), 381 | 'verbosity': debug_level, 382 | 'additional_args': additional_args or '', 383 | } 384 | options_config = \ 385 | ctx.instance.runtime_properties.get('options_config', {}) 386 | run_data = \ 387 | ctx.instance.runtime_properties.get('run_data', {}) 388 | return playbook_args, ansible_env_vars, options_config, run_data 389 | 390 | playbook_args, ansible_env_vars, options_config, run_data = \ 391 | prepare_playbook_args(ctx) 392 | docker_ip, docker_user, docker_key, container_volume = \ 393 | get_docker_machine_from_ctx(ctx) 394 | # The decorators will take care of creating the playbook workspace 395 | # which will package everything in a directory for our usages 396 | # it will be in the kwargs [playbook_args.playbook_path] 397 | playbook_path = playbook_args.get("playbook_path", "") 398 | debug_level = playbook_args.get("debug_level", 2) 399 | destination = os.path.dirname(playbook_path) 400 | verbosity = '-v' 401 | for i in range(1, debug_level): 402 | verbosity += 'v' 403 | command_options = \ 404 | prepare_options_config(options_config, run_data, destination, ctx) 405 | additional_args = playbook_args.get("additional_args", "") 406 | if not destination: 407 | raise NonRecoverableError( 408 | "something is wrong with the playbook provided") 409 | return 410 | else: 411 | ctx.logger.info("playbook is ready at {0}".format(destination)) 412 | playbook_path = playbook_path.replace(destination, container_volume) 413 | command_options = command_options.replace(destination, 414 | container_volume) 415 | ctx.instance.runtime_properties['destination'] = destination 416 | ctx.instance.runtime_properties['docker_host'] = docker_ip 417 | ctx.instance.runtime_properties['ansible_env_vars'] = ansible_env_vars 418 | ctx.instance.runtime_properties['ansible_container_command_arg'] = \ 419 | "ansible-playbook {0} -i hosts {1} {2} {3} ".format( 420 | verbosity, 421 | command_options, 422 | additional_args, 423 | playbook_path) 424 | # copy these files to docker machine if needed at that destination 425 | if not docker_ip: 426 | raise NonRecoverableError("no docker_ip was provided") 427 | return 428 | if docker_ip not in LOCAL_HOST_ADDRESSES and not docker_ip == get_lan_ip(): 429 | with get_fabric_settings(ctx, docker_ip, 430 | docker_user, 431 | docker_key) as s: 432 | with s: 433 | destination_parent = destination.rsplit('/', 1)[0] 434 | if destination_parent != '/tmp': 435 | call_sudo('mkdir -p {0}'.format( 436 | destination_parent), fab_ctx=s) 437 | call_sudo("chown -R {0}:{0} {1}".format( 438 | docker_user, destination_parent), fab_ctx=s) 439 | call_put( 440 | destination, 441 | destination_parent, 442 | mirror_local_mode=True, 443 | fab_ctx=s) 444 | 445 | 446 | @operation 447 | def remove_ansible_playbook(ctx, **kwargs): 448 | 449 | docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx) 450 | 451 | destination = ctx.instance.runtime_properties.get('destination', "") 452 | if not destination: 453 | raise NonRecoverableError("destination was not assigned due to error") 454 | return 455 | ctx.logger.info("removing file from destination {0}".format(destination)) 456 | if os.path.exists(destination): 457 | os.system("sudo chown -R {0} {1}".format(getpass.getuser(), 458 | destination)) 459 | shutil.rmtree(destination) 460 | ctx.instance.runtime_properties.pop('destination', None) 461 | if not docker_ip: 462 | raise NonRecoverableError("no docker_ip was provided") 463 | return 464 | if docker_ip not in LOCAL_HOST_ADDRESSES and not docker_ip == get_lan_ip(): 465 | with get_fabric_settings(ctx, docker_ip, docker_user, docker_key) as s: 466 | with s: 467 | call_sudo("rm -rf {0}".format(destination), fab_ctx=s) 468 | -------------------------------------------------------------------------------- /cloudify_docker/constants.py: -------------------------------------------------------------------------------- 1 | HOSTS = 'hosts' 2 | PLAYBOOK_PATH = "playbook_path" 3 | REDHAT_OS_VERS = ('centos', 'redhat', 'fedora') 4 | DEBIAN_OS_VERS = ('ubuntu', 'debian') 5 | HOSTS_FILE_NAME = 'hosts' 6 | CONTAINER_VOLUME = "container_volume" 7 | ANSIBLE_PRIVATE_KEY = 'ansible_ssh_private_key_file' 8 | LOCAL_HOST_ADDRESSES = ("127.0.0.1", "localhost", "host.docker.internal") 9 | WORKSPACE = 'workspace' 10 | LIST_TYPES = ['skip-tags', 'tags'] 11 | BP_INCLUDES_PATH = '/opt/manager/resources/blueprints/' \ 12 | '{tenant}/{blueprint}/{relative_path}' 13 | -------------------------------------------------------------------------------- /cloudify_docker/resources/post-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | set -x 3 | sleep 60 4 | if [ -f /etc/redhat-release ]; then 5 | sed -i '/ExecStart/s/usr\/bin\/dockerd/usr\/bin\/dockerd --mtu=1450/' /lib/systemd/system/docker.service 6 | sed -i '/ExecStart/ s/$/ -H=tcp:\/\/0.0.0.0:2375 --dns 8.8.8.8 --bip 172.99.0.1\/16/' /lib/systemd/system/docker.service 7 | systemctl daemon-reload 8 | systemctl restart docker.service 9 | fi 10 | if [ -f /etc/lsb-release ]; then 11 | docker_unit=$(systemctl list-unit-files | grep -w "docker") 12 | if [ -n "$docker_unit" ]; then 13 | sed -i '/ExecStart/s/usr\/bin\/dockerd/usr\/bin\/dockerd --mtu=1450/' /lib/systemd/system/docker.service 14 | sed -i '/ExecStart/ s/$/ -H=tcp:\/\/0.0.0.0:2375 --dns 8.8.8.8 --bip 172.99.0.1\/16/' /lib/systemd/system/docker.service 15 | systemctl daemon-reload 16 | systemctl restart docker.service 17 | else 18 | echo "DOCKER_OPTS=\"--mtu=1450 --dns 8.8.8.8 --dns 8.8.4.4 \ 19 | -H=tcp://0.0.0.0:2375 --bip 172.99.0.1/16\"" >> /etc/default/docker 20 | service docker restart 21 | fi 22 | fi 23 | -------------------------------------------------------------------------------- /cloudify_docker/tasks.py: -------------------------------------------------------------------------------- 1 | ######## 2 | # Copyright (c) 2014-2020 GigaSpaces Technologies Ltd. All rights reserved 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import io 16 | import os 17 | import time 18 | import json 19 | import yaml 20 | import fcntl 21 | import fabric 22 | import struct 23 | import socket 24 | import shutil 25 | import getpass 26 | import tarfile 27 | import tempfile 28 | import traceback 29 | import subprocess 30 | 31 | import docker 32 | 33 | from uuid import uuid1 34 | import patchwork.transfers 35 | from functools import wraps 36 | from contextlib import contextmanager 37 | 38 | from cloudify import ctx 39 | from cloudify.decorators import operation 40 | from cloudify.exceptions import NonRecoverableError 41 | 42 | from cloudify_common_sdk.resource_downloader import unzip_archive 43 | from cloudify_common_sdk.resource_downloader import untar_archive 44 | from cloudify_common_sdk.resource_downloader import get_shared_resource 45 | from cloudify_common_sdk.resource_downloader import TAR_FILE_EXTENSTIONS 46 | from cloudify_common_sdk._compat import text_type, PY2 47 | from docker.errors import ImageNotFound, NotFound 48 | 49 | try: 50 | if PY2: 51 | from fabric.api import settings, sudo, put, run 52 | FABRIC_VER = 1 53 | else: 54 | from fabric import Connection, Config 55 | FABRIC_VER = 2 56 | except (ImportError, BaseException): 57 | FABRIC_VER = 'unclear' 58 | 59 | from .constants import (HOSTS, 60 | PLAYBOOK_PATH, 61 | REDHAT_OS_VERS, 62 | DEBIAN_OS_VERS, 63 | HOSTS_FILE_NAME, 64 | CONTAINER_VOLUME, 65 | ANSIBLE_PRIVATE_KEY, 66 | LOCAL_HOST_ADDRESSES) 67 | 68 | 69 | def call_sudo(command, fab_ctx=None): 70 | ctx.logger.debug('Executing: {0}'.format(command)) 71 | if FABRIC_VER == 2: 72 | out = fab_ctx.sudo(command) 73 | ctx.logger.debug('Out: {0}'.format(out)) 74 | return out 75 | elif FABRIC_VER == 1: 76 | return sudo(command) 77 | 78 | 79 | def call_command(command, fab_ctx=None): 80 | ctx.logger.debug('Executing without sudo: {0}'.format(command)) 81 | if FABRIC_VER == 2: 82 | out = fab_ctx.run(command) 83 | ctx.logger.debug('Out: {0}'.format(out)) 84 | return out 85 | elif FABRIC_VER == 1: 86 | return run(command) 87 | 88 | 89 | def call_put(destination, 90 | destination_parent, 91 | mirror_local_mode=None, 92 | fab_ctx=None): 93 | ctx.logger.debug('Copying: {0} {1}'.format(destination, 94 | destination_parent)) 95 | if FABRIC_VER == 2: 96 | return patchwork.transfers.rsync( 97 | fab_ctx, destination, destination_parent, exclude='.git', 98 | strict_host_keys=False) 99 | elif FABRIC_VER == 1: 100 | return put(destination, destination_parent, mirror_local_mode) 101 | 102 | 103 | def get_lan_ip(): 104 | 105 | def get_interface_ip(ifname): 106 | if os.name != "nt": 107 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 108 | return socket.inet_ntoa(fcntl.ioctl( 109 | s.fileno(), 110 | 0x8915, # SIOCGIFADDR 111 | struct.pack('256s', bytes(ifname[:15], encoding='utf8')) 112 | )[20:24]) 113 | return "127.0.0.1" 114 | 115 | try: 116 | ip = socket.gethostbyname(socket.gethostname()) 117 | if ip.startswith("127.") and os.name != "nt": 118 | interfaces = ["eth0", "eth1", "eth2", "wlan0", "wlan1", "wifi0", 119 | "ath0", "ath1", "ppp0"] 120 | for ifname in interfaces: 121 | try: 122 | ip = get_interface_ip(ifname) 123 | break 124 | except IOError: 125 | pass 126 | return ip 127 | except socket.gaierror: 128 | return "127.0.0.1" # considering no IP is configured to begin with 129 | 130 | 131 | def is_remote_docker(docker_ip): 132 | return docker_ip and docker_ip not in LOCAL_HOST_ADDRESSES and \ 133 | not (docker_ip == get_lan_ip()) 134 | 135 | 136 | def get_from_resource_config(*args): 137 | # takes the resource config , and whatever else you want to get from it 138 | # will be returned as a list and you handle it from the calling method 139 | # i.e : source, dst = get_from_resource_config(res_config, 'src', 'dst') 140 | resource_config = args[0] 141 | result = [] 142 | for arg in args[1:]: 143 | item = resource_config.get(arg) 144 | result.append(item) 145 | return result 146 | 147 | 148 | @contextmanager 149 | def get_fabric_settings(ctx, server_ip, server_user, server_private_key): 150 | if FABRIC_VER == 2: 151 | ctx.logger.info( 152 | "Fabric version : {0}".format(fabric.__version__)) 153 | elif FABRIC_VER == 1: 154 | ctx.logger.info( 155 | "Fabric version : {0}".format(fabric.version.get_version())) 156 | try: 157 | is_file_path = os.path.exists(server_private_key) 158 | except TypeError: 159 | is_file_path = False 160 | if not is_file_path: 161 | private_key_file = os.path.join( 162 | tempfile.mkdtemp(), "{0}.pem".format(str(uuid1()))) 163 | with open(private_key_file, 'w') as outfile: 164 | outfile.write(server_private_key) 165 | os.chmod(private_key_file, 0o400) 166 | server_private_key = private_key_file 167 | try: 168 | ctx.logger.debug("ssh connection to {0}@{1}".format(server_user, 169 | server_ip)) 170 | ctx.logger.debug("server_private_key {0} there? {1}".format( 171 | server_private_key, os.path.isfile(server_private_key))) 172 | if FABRIC_VER == 2: 173 | yield Connection( 174 | host=server_ip, 175 | connect_kwargs={ 176 | "key_filename": server_private_key 177 | }, 178 | user=server_user, 179 | config=Config( 180 | overrides={ 181 | "run": { 182 | "warn": True 183 | }})) 184 | elif FABRIC_VER == 1: 185 | yield settings( 186 | connection_attempts=5, 187 | disable_known_hosts=True, 188 | warn_only=True, 189 | host_string=server_ip, 190 | key_filename=server_private_key, 191 | user=server_user) 192 | finally: 193 | ctx.logger.info("Terminating ssh connection to {0}".format(server_ip)) 194 | if not is_file_path: 195 | os.remove(server_private_key) 196 | shutil.rmtree(os.path.dirname(server_private_key)) 197 | 198 | 199 | def get_docker_machine_from_ctx(ctx): 200 | resource_config = ctx.node.properties.get('resource_config', {}) 201 | docker_machine = ctx.node.properties.get('docker_machine', {}) 202 | if docker_machine: # takes precedence 203 | docker_ip = docker_machine.get('docker_ip', "") 204 | docker_user = docker_machine.get('docker_user', "") 205 | docker_key = docker_machine.get('docker_key', "") 206 | container_volume = docker_machine.get('container_volume', "") 207 | elif resource_config: 208 | # taking properties from resource_config 209 | docker_machine = resource_config.get('docker_machine', {}) 210 | docker_ip = docker_machine.get('docker_ip', "") 211 | docker_user = docker_machine.get('docker_user', "") 212 | docker_key = docker_machine.get('docker_key', "") 213 | container_volume = docker_machine.get('container_volume', "") 214 | return docker_ip, docker_user, docker_key, container_volume 215 | 216 | 217 | def handle_docker_exception(func): 218 | @wraps(func) 219 | def f(*args, **kwargs): 220 | try: 221 | return func(*args, **kwargs) 222 | except docker.errors.APIError as ae: 223 | raise NonRecoverableError(str(ae)) 224 | except docker.errors.DockerException as de: 225 | raise NonRecoverableError(str(de)) 226 | except Exception: 227 | tb = traceback.format_exc() 228 | ctx.logger.error("Exception Happend: {0}".format(tb)) 229 | raise NonRecoverableError(tb) 230 | return f 231 | 232 | 233 | def with_docker(func): 234 | @wraps(func) 235 | def f(*args, **kwargs): 236 | ctx = kwargs['ctx'] 237 | client_config = ctx.node.properties.get('client_config', {}) 238 | base_url = None 239 | if client_config.get('docker_host', '') \ 240 | and client_config.get('docker_rest_port', ''): 241 | base_url = "tcp://{0}:{1}".format( 242 | client_config['docker_host'], 243 | client_config['docker_rest_port']) 244 | elif client_config.get('docker_sock_file', ''): 245 | base_url = "unix:/{0}".format(client_config['docker_sock_file']) 246 | else: 247 | # if we are here that means we don't have a valid docker config 248 | raise NonRecoverableError('Invalid docker client config') 249 | kwargs['docker_client'] = docker.DockerClient(base_url=base_url, 250 | tls=False) 251 | return func(*args, **kwargs) 252 | return f 253 | 254 | 255 | @handle_docker_exception 256 | def follow_container_logs(ctx, docker_client, container, **kwargs): 257 | 258 | @handle_docker_exception 259 | def check_container_exited(docker_client, container): 260 | result = docker_client.containers.get(container.id) 261 | if result.status == 'exited': 262 | ctx.logger.info('Container exit_code {0}'.format( 263 | result.attrs['State']['ExitCode'])) 264 | return True 265 | return False 266 | 267 | run_output = "" 268 | container_logs = container.logs(stream=True) 269 | ctx.logger.debug("Following container {0} logs".format(container)) 270 | ctx.logger.debug("Attach returned {0}".format(container_logs)) 271 | while True: 272 | try: 273 | chunk = next(container_logs) 274 | if chunk: 275 | chunk = chunk.decode('utf-8', 'replace').strip() 276 | run_output += "{0}\n".format(chunk) 277 | # ctx.logger.debug("{0}".format(chunk)) 278 | elif check_container_exited(docker_client, container): 279 | break 280 | except StopIteration: 281 | break 282 | container_logs.close() 283 | return run_output 284 | 285 | 286 | def move_files(source, destination, permissions=None): 287 | # let's handle folder vs file 288 | if os.path.isdir(source): 289 | for filename in os.listdir(source): 290 | if destination == os.path.join(source, filename): 291 | # moving files from parent to child case 292 | # so skip 293 | continue 294 | shutil.move(os.path.join(source, filename), 295 | os.path.join(destination, filename)) 296 | if permissions: 297 | os.chmod(os.path.join(destination, filename), permissions) 298 | else: 299 | shutil.move(source, destination) 300 | 301 | 302 | @operation 303 | def prepare_container_files(ctx, **kwargs): 304 | 305 | docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx) 306 | resource_config = ctx.node.properties.get('resource_config', {}) 307 | source, destination, extra_files, ansible_sources, terraform_sources = \ 308 | get_from_resource_config(resource_config, 309 | 'source', 310 | 'destination', 311 | 'extra_files', 312 | 'ansible_sources', 313 | 'terraform_sources') 314 | # check source to handle various cases [zip,tar,git] 315 | source_tmp_path = get_shared_resource(source) 316 | # check if we actually downloaded something or not 317 | delete_tmp = False 318 | if source_tmp_path == source: 319 | # didn't download anything so check the provided path 320 | # if file and absolute path or not 321 | if not os.path.isabs(source_tmp_path): 322 | # bundled and need to be downloaded from blurprint 323 | source_tmp_path = ctx.download_resource(source_tmp_path) 324 | delete_tmp = True 325 | if os.path.isfile(source_tmp_path): 326 | file_name = source_tmp_path.rsplit('/', 1)[1] 327 | file_type = file_name.rsplit('.', 1)[1] 328 | # check type 329 | if file_type == 'zip': 330 | unzipped_source = unzip_archive(source_tmp_path, False) 331 | if delete_tmp: 332 | shutil.rmtree(os.path.dirname(source_tmp_path)) 333 | source_tmp_path = unzipped_source 334 | elif file_type in TAR_FILE_EXTENSTIONS: 335 | unzipped_source = untar_archive(source_tmp_path, False) 336 | if delete_tmp: 337 | shutil.rmtree(os.path.dirname(source_tmp_path)) 338 | source_tmp_path = unzipped_source 339 | 340 | # Reaching this point we should have got the files into source_tmp_path 341 | if not destination: 342 | destination = tempfile.mkdtemp() 343 | # fix permissions for this temp directory 344 | os.chmod(destination, 0o755) 345 | move_files(source_tmp_path, destination) 346 | if os.path.isdir(source_tmp_path): 347 | shutil.rmtree(source_tmp_path) 348 | elif os.path.isfile(source_tmp_path): 349 | os.remove(source_tmp_path) 350 | 351 | # copy extra files to destination 352 | for file in (extra_files or []): 353 | try: 354 | is_file_path = os.path.exists(file) 355 | if is_file_path: 356 | shutil.copy(file, destination) 357 | except TypeError: 358 | raise NonRecoverableError("file {0} can't be copied".format(file)) 359 | 360 | # handle ansible_sources -Special Case-: 361 | if ansible_sources: 362 | hosts_file = os.path.join(destination, HOSTS_FILE_NAME) 363 | # handle the private key logic 364 | private_key_val = ansible_sources.get(ANSIBLE_PRIVATE_KEY, "") 365 | if private_key_val: 366 | try: 367 | is_file_path = os.path.exists(private_key_val) 368 | except TypeError: 369 | is_file_path = False 370 | if not is_file_path: 371 | private_key_file = os.path.join(destination, str(uuid1())) 372 | with open(private_key_file, 'w') as outfile: 373 | outfile.write(private_key_val) 374 | os.chmod(private_key_file, 0o600) 375 | ansible_sources.update({ANSIBLE_PRIVATE_KEY: private_key_file}) 376 | # check if playbook_path was provided or not 377 | playbook_path = ansible_sources.get(PLAYBOOK_PATH, "") 378 | if not playbook_path: 379 | raise NonRecoverableError( 380 | "Check Ansible Sources, No playbook path was provided") 381 | hosts_dict = { 382 | "all": { 383 | "hosts": { 384 | "instance": {} 385 | } 386 | } 387 | } 388 | for key in ansible_sources: 389 | if key in (CONTAINER_VOLUME, PLAYBOOK_PATH): 390 | continue 391 | elif key == ANSIBLE_PRIVATE_KEY: 392 | # replace docker mapping to container volume 393 | hosts_dict['all'][HOSTS]['instance'][key] = \ 394 | ansible_sources.get(key).replace(destination, 395 | ansible_sources.get( 396 | CONTAINER_VOLUME)) 397 | else: 398 | hosts_dict['all'][HOSTS]['instance'][key] = \ 399 | ansible_sources.get(key) 400 | with open(hosts_file, 'w') as outfile: 401 | yaml.safe_dump(hosts_dict, outfile, default_flow_style=False) 402 | ctx.instance.runtime_properties['ansible_container_command_arg'] = \ 403 | "ansible-playbook -i hosts {0}".format(playbook_path) 404 | 405 | # handle terraform_sources -Special Case-: 406 | if terraform_sources: 407 | container_volume = terraform_sources.get(CONTAINER_VOLUME, "") 408 | # handle files 409 | storage_dir = terraform_sources.get("storage_dir", "") 410 | if not storage_dir: 411 | storage_dir = os.path.join(destination, str(uuid1())) 412 | else: 413 | storage_dir = os.path.join(destination, storage_dir) 414 | os.mkdir(storage_dir) 415 | # move the downloaded files from source to storage_dir 416 | move_files(destination, storage_dir) 417 | # store the runtime property relative to container rather than docker 418 | storage_dir_prop = storage_dir.replace(destination, container_volume) 419 | ctx.instance.runtime_properties['storage_dir'] = storage_dir_prop 420 | 421 | # handle plugins 422 | plugins_dir = terraform_sources.get("plugins_dir", "") 423 | if not plugins_dir: 424 | plugins_dir = os.path.join(destination, str(uuid1())) 425 | else: 426 | plugins_dir = os.path.join(destination, plugins_dir) 427 | plugins = terraform_sources.get("plugins", {}) 428 | os.mkdir(plugins_dir) 429 | for plugin in plugins: 430 | downloaded_plugin_path = get_shared_resource(plugin) 431 | if downloaded_plugin_path == plugin: 432 | # it means we didn't download anything/ extracted 433 | raise NonRecoverableError( 434 | "Check Plugin {0} URL".format(plugin)) 435 | else: 436 | move_files(downloaded_plugin_path, plugins_dir, 0o775) 437 | os.chmod(plugins_dir, 0o775) 438 | # store the runtime property relative to container rather than docker 439 | plugins_dir = plugins_dir.replace(destination, container_volume) 440 | ctx.instance.runtime_properties['plugins_dir'] = plugins_dir 441 | 442 | # handle variables 443 | terraform_variables = terraform_sources.get("variables", {}) 444 | if terraform_variables: 445 | variables_file = os.path.join(storage_dir, 'vars.json') 446 | with open(variables_file, 'w') as outfile: 447 | json.dump(terraform_variables, outfile) 448 | # store the runtime property relative to container 449 | # rather than docker 450 | variables_file = \ 451 | variables_file.replace(destination, container_volume) 452 | ctx.instance.runtime_properties['variables_file'] = variables_file 453 | 454 | # handle backend 455 | backend_file = "" 456 | terraform_backend = terraform_sources.get("backend", {}) 457 | if terraform_backend: 458 | if not terraform_backend.get("name", ""): 459 | raise NonRecoverableError( 460 | "Check backend {0} name value".format(terraform_backend)) 461 | backend_str = """ 462 | terraform { 463 | backend "{backend_name}" { 464 | {backend_options} 465 | } 466 | } 467 | """ 468 | backend_options = "" 469 | for option_name, option_value in \ 470 | terraform_backend.get("options", {}).items(): 471 | if isinstance(option_value, text_type): 472 | backend_options += "{0} = \"{1}\"".format(option_name, 473 | option_value) 474 | else: 475 | backend_options += "{0} = {1}".format(option_name, 476 | option_value) 477 | backend_str.format( 478 | backend_name=terraform_backend.get("name"), 479 | backend_options=backend_options) 480 | backend_file = os.path.join(storage_dir, '{0}.tf'.format( 481 | terraform_backend.get("name"))) 482 | with open(backend_file, 'w') as outfile: 483 | outfile.write(backend_str) 484 | # store the runtime property relative to container 485 | # rather than docker 486 | backend_file = \ 487 | backend_file.replace(destination, container_volume) 488 | ctx.instance.runtime_properties['backend_file'] = backend_file 489 | 490 | # handle terraform scripts inside shell script 491 | terraform_script_file = os.path.join(storage_dir, '{0}.sh'.format( 492 | str(uuid1()))) 493 | terraform_script = """#!/bin/bash -e 494 | terraform init -no-color {backend_file} -plugin-dir={plugins_dir} {storage_dir} 495 | terraform plan -no-color {vars_file} {storage_dir} 496 | terraform apply -no-color -auto-approve {vars_file} {storage_dir} 497 | terraform refresh -no-color {vars_file} 498 | terraform state pull 499 | """.format(backend_file="" if not backend_file 500 | else "-backend-config={0}".format(backend_file), 501 | plugins_dir=plugins_dir, 502 | storage_dir=storage_dir_prop, 503 | vars_file="" if not terraform_variables 504 | else " -var-file {0}".format(variables_file)) 505 | ctx.logger.info("terraform_script_file content {0}".format( 506 | terraform_script)) 507 | with open(terraform_script_file, 'w') as outfile: 508 | outfile.write(terraform_script) 509 | # store the runtime property relative to container 510 | # rather than docker machine path 511 | terraform_script_file = \ 512 | terraform_script_file.replace(destination, container_volume) 513 | ctx.instance.runtime_properties['terraform_script_file'] = \ 514 | terraform_script_file 515 | ctx.instance.runtime_properties['terraform_container_command_arg'] = \ 516 | "bash {0}".format(terraform_script_file) 517 | 518 | # Reaching this point means we now have everything in this destination 519 | ctx.instance.runtime_properties['destination'] = destination 520 | ctx.instance.runtime_properties['docker_host'] = docker_ip 521 | # copy these files to docker machine if needed at that destination 522 | if is_remote_docker(docker_ip): 523 | with get_fabric_settings(ctx, docker_ip, 524 | docker_user, 525 | docker_key) as s: 526 | with s: 527 | destination_parent = destination.rsplit('/', 1)[0] 528 | if destination_parent != '/tmp': 529 | call_sudo('mkdir -p {0}'.format( 530 | destination_parent), fab_ctx=s) 531 | call_sudo( 532 | "chown -R {0}:{0} {1}".format( 533 | docker_user, destination_parent), 534 | fab_ctx=s) 535 | call_put( 536 | destination, 537 | destination_parent, 538 | mirror_local_mode=True, 539 | fab_ctx=s) 540 | 541 | 542 | @operation 543 | def remove_container_files(ctx, **kwargs): 544 | 545 | docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx) 546 | 547 | destination = ctx.instance.runtime_properties.get('destination', "") 548 | if not destination: 549 | raise NonRecoverableError("destination was not assigned due to error") 550 | return 551 | ctx.logger.info("removing file from destination {0}".format(destination)) 552 | if os.path.exists(destination): 553 | os.system("sudo chown -R {0} {1}".format(getpass.getuser(), 554 | destination)) 555 | shutil.rmtree(destination) 556 | ctx.instance.runtime_properties.pop('destination', None) 557 | ctx.instance.runtime_properties.pop('destination', None) 558 | if is_remote_docker(docker_ip): 559 | with get_fabric_settings(ctx, docker_ip, docker_user, docker_key) as s: 560 | with s: 561 | call_sudo("rm -rf {0}".format(destination), fab_ctx=s) 562 | 563 | 564 | @operation 565 | @handle_docker_exception 566 | @with_docker 567 | def list_images(ctx, docker_client, **kwargs): 568 | ctx.instance.runtime_properties['images'] = \ 569 | docker_client.images.list(all=True) 570 | 571 | 572 | @operation 573 | def install_docker(ctx, **kwargs): 574 | resource_config = ctx.node.properties.get('resource_config', {}) 575 | offline_installation = resource_config.get('offline_installation') 576 | if not offline_installation: 577 | _install_docker(ctx=ctx, **kwargs) 578 | else: 579 | _install_docker_offline(ctx=ctx, **kwargs) 580 | 581 | 582 | @handle_docker_exception 583 | def _install_docker(ctx, **kwargs): 584 | # fetch the data needed for installation 585 | docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx) 586 | resource_config = ctx.node.properties.get('resource_config', {}) 587 | install_url = resource_config.get('install_url') 588 | post_install_url = resource_config.get('install_script') 589 | install_with_sudo = resource_config.get('install_with_sudo', True) 590 | 591 | if not (install_url and post_install_url): 592 | raise NonRecoverableError("Please validate your install config") 593 | installation_commands = [ 594 | 'curl -fsSL {0} -o /tmp/install.sh'.format(install_url), 595 | 'chmod 0755 /tmp/install.sh', 596 | 'sh /tmp/install.sh', 597 | 'curl -fsSL {0} -o /tmp/postinstall.sh'.format(post_install_url), 598 | 'chmod 0755 /tmp/postinstall.sh', 599 | 'sh /tmp/postinstall.sh', 600 | 'usermod -aG docker {0}'.format(docker_user) 601 | ] 602 | 603 | with get_fabric_settings(ctx, docker_ip, docker_user, docker_key) as s: 604 | with s: 605 | for _command in installation_commands: 606 | if install_with_sudo: 607 | call_sudo(_command, fab_ctx=s) 608 | else: 609 | call_command(_command, fab_ctx=s) 610 | 611 | 612 | @handle_docker_exception 613 | def _install_docker_offline(ctx, **kwargs): 614 | """ 615 | support only for EDGE OS (ubuntu22.04) 616 | """ 617 | # fetch the data needed for installation 618 | docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx) 619 | resource_config = ctx.node.properties.get('resource_config', {}) 620 | package_tar_path = resource_config.get('package_tar_path') 621 | post_install_path = resource_config.get('post_install_script_path') 622 | installation_dir = resource_config.get('installation_dir') 623 | install_with_sudo = resource_config.get('install_with_sudo', True) 624 | installation_dir = installation_dir if installation_dir.endswith('/')\ 625 | else '{0}/'.format(installation_dir) 626 | if not (package_tar_path and post_install_path): 627 | raise NonRecoverableError("Please validate your install config") 628 | installation_commands = [ 629 | 'tar -xf {0} -C {1}'.format(package_tar_path, installation_dir), 630 | 'dpkg -i {0}*.deb'.format(installation_dir), 631 | 'chmod 0755 {0}'.format(post_install_path), 632 | 'sh {}'.format(post_install_path), 633 | 'usermod -aG docker {0}'.format(docker_user) 634 | ] 635 | 636 | with get_fabric_settings(ctx, docker_ip, docker_user, docker_key) as s: 637 | with s: 638 | for _command in installation_commands: 639 | if install_with_sudo: 640 | call_sudo(_command, fab_ctx=s) 641 | else: 642 | call_command(_command, fab_ctx=s) 643 | 644 | 645 | @operation 646 | def uninstall_docker(ctx, **kwargs): 647 | resource_config = ctx.node.properties.get('resource_config', {}) 648 | offline_installation = resource_config.get('offline_installation') 649 | if not offline_installation: 650 | _uninstall_docker(ctx=ctx, **kwargs) 651 | else: 652 | _uninstall_docker_offline(ctx=ctx, **kwargs) 653 | 654 | 655 | def _uninstall_docker_offline(ctx, **kwargs): 656 | """ 657 | support only for EDGE OS (ubuntu22.04) 658 | """ 659 | # fetch the data needed for installation 660 | docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx) 661 | resource_config = ctx.node.properties.get('resource_config', {}) 662 | install_with_sudo = resource_config.get('install_with_sudo', True) 663 | installation_dir = resource_config.get('installation_dir') 664 | installation_dir = installation_dir if installation_dir.endswith('/') \ 665 | else '{0}/'.format(installation_dir) 666 | installation_commands = [ 667 | 'dpkg --remove docker-buildx-plugin docker-ce docker-ce-rootless-' 668 | 'extras docker-ce-cli docker-compose-plugin containerd.io', 669 | 'rm -rf {0}'.format(installation_dir) 670 | ] 671 | 672 | with get_fabric_settings(ctx, docker_ip, docker_user, docker_key) as s: 673 | with s: 674 | for _command in installation_commands: 675 | if install_with_sudo: 676 | call_sudo(_command, fab_ctx=s) 677 | else: 678 | call_command(_command, fab_ctx=s) 679 | 680 | 681 | def _uninstall_docker(ctx, **kwargs): 682 | # fetch the data needed for installation 683 | docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx) 684 | resource_config = ctx.node.properties.get('resource_config', {}) 685 | install_with_sudo = resource_config.get('install_with_sudo', True) 686 | if install_with_sudo: 687 | command_obj = call_sudo 688 | else: 689 | command_obj = call_command 690 | 691 | with get_fabric_settings(ctx, docker_ip, docker_user, docker_key) as s: 692 | with s: 693 | os_type = command_obj("echo $(python -c " 694 | "'import platform; " 695 | "print(platform.linux_distribution(" 696 | "full_distribution_name=False)[0])')", 697 | fab_ctx=s) 698 | if not PY2: 699 | os_type = os_type.stdout 700 | os_type = os_type.splitlines() 701 | value = "" 702 | # sometimes ubuntu print the message when using sudo 703 | for line in os_type: 704 | if "unable to resolve host" in line: 705 | continue 706 | else: 707 | value += line 708 | os_type = value.strip() 709 | yum_command = False 710 | apt_command = False 711 | if not os_type: 712 | ctx.logger.info('OS not detected. Check the commands...') 713 | command_yum = str(command_obj('yum', fab_ctx=s)) 714 | command_apt = str(command_obj('apt-get', fab_ctx=s)) 715 | yum_command = False \ 716 | if 'command not found' in command_yum else True 717 | apt_command = False \ 718 | if 'command not found' in command_apt else True 719 | ctx.logger.info('System: YUM: {0}. APT-GET: {1}'.format( 720 | yum_command, apt_command)) 721 | 722 | ctx.logger.info("os_type {0}".format(os_type)) 723 | result = "" 724 | if os_type.lower() in REDHAT_OS_VERS or yum_command: 725 | result = command_obj("yum remove -y docker*", fab_ctx=s) 726 | elif os_type.lower() in DEBIAN_OS_VERS or apt_command: 727 | result = command_obj("apt-get remove -y docker*", fab_ctx=s) 728 | ctx.logger.info("uninstall result {0}".format(result)) 729 | 730 | 731 | @operation 732 | @handle_docker_exception 733 | @with_docker 734 | def list_host_details(ctx, docker_client, **kwargs): 735 | ctx.instance.runtime_properties['host_details'] = docker_client.info() 736 | 737 | 738 | @operation 739 | @handle_docker_exception 740 | @with_docker 741 | def list_containers(ctx, docker_client, **kwargs): 742 | ctx.instance.runtime_properties['contianers'] = \ 743 | docker_client.containers.list(all=True, trunc=True) 744 | 745 | 746 | @operation 747 | @handle_docker_exception 748 | @with_docker 749 | def build_image(ctx, docker_client, **kwargs): 750 | resource_config = ctx.node.properties.get('resource_config', {}) 751 | image_content, tag = get_from_resource_config(resource_config, 752 | 'image_content', 753 | 'tag') 754 | pull_image = resource_config.get('pull_image', False) 755 | 756 | if image_content: 757 | # check what content we got, URL , path or string 758 | split = image_content.split('://') 759 | schema = split[0] 760 | if schema in ['http', 'https']: 761 | downloaded_image_content = get_shared_resource(image_content) 762 | with open(downloaded_image_content, "r") as f: 763 | image_content = f.read() 764 | elif os.path.isfile(image_content): 765 | if os.path.isabs(image_content): 766 | with open(image_content, "r") as f: 767 | image_content = f.read() 768 | else: 769 | downloaded_image_content = ctx.download_resource(image_content) 770 | with open(downloaded_image_content, "r") as f: 771 | image_content = f.read() 772 | else: 773 | ctx.logger.info("Building image with tag {0}".format(tag)) 774 | # replace the new line str with new line char 775 | image_content = image_content.replace("\\n", '\n') 776 | ctx.logger.debug("Image Dockerfile:\n{0}".format(image_content)) 777 | build_output = "" 778 | img_data = io.BytesIO(image_content.encode('ascii')) 779 | # the result of build will have a tuple (image_id, build_result) 780 | for chunk in docker_client.images.build(fileobj=img_data, tag=tag)[1]: 781 | build_output += "{0}\n".format(chunk) 782 | ctx.instance.runtime_properties['build_result'] = build_output 783 | ctx.logger.info("Build Output {0}".format(build_output)) 784 | if 'errorDetail' in build_output: 785 | raise NonRecoverableError("Build Failed check build-result") 786 | ctx.instance.runtime_properties['image'] = \ 787 | repr(docker_client.images.get(name=tag)) 788 | elif pull_image: 789 | all_tags = resource_config.get('all_tags', False) 790 | if not tag: 791 | return 792 | repository = tag.split(':')[0] 793 | try: 794 | image_tag = tag.split(':')[1] 795 | except IndexError: 796 | image_tag = 'latest' 797 | try: 798 | docker_client.images.get(tag) 799 | except ImageNotFound: 800 | docker_client.images.pull(repository=repository, 801 | tag=image_tag, all_tags=all_tags) 802 | ctx.instance.runtime_properties['build_result'] = 'Image was pull' 803 | 804 | 805 | @operation 806 | @handle_docker_exception 807 | @with_docker 808 | def remove_image(ctx, docker_client, **kwargs): 809 | resource_config = ctx.node.properties.get('resource_config', {}) 810 | tag = resource_config.get('tag', "") 811 | build_res = ctx.instance.runtime_properties.pop('build_result', "") 812 | if tag: 813 | if not build_res or 'errorDetail' in build_res: 814 | ctx.logger.info("build contained errors , nothing to do ") 815 | return 816 | ctx.logger.debug("Removing image with tag {0}".format(tag)) 817 | remove_res = docker_client.images.remove(tag, force=True) 818 | ctx.logger.info("Remove result {0}".format(remove_res)) 819 | 820 | 821 | @operation 822 | @handle_docker_exception 823 | @with_docker 824 | def create_container(ctx, docker_client, **kwargs): 825 | resource_config = ctx.node.properties.get('resource_config', {}) 826 | image_tag, container_args = get_from_resource_config(resource_config, 827 | 'image_tag', 828 | 'container_args') 829 | if image_tag: 830 | ctx.logger.debug( 831 | "Running container from image tag {0}".format(image_tag)) 832 | host_config = container_args.pop("host_config", {}) 833 | 834 | # handle volume mapping 835 | # map each entry to it's volume based on index 836 | volumes = container_args.pop('volumes', None) 837 | if volumes: 838 | # logic was added to handle mapping to create_container 839 | paths_on_host = container_args.pop('volumes_mapping', None) 840 | binds_list = [] 841 | if paths_on_host: 842 | for path, volume in zip(paths_on_host, volumes): 843 | binds_list.append('{0}:{1}'.format(path, volume)) 844 | host_config.update({"volumes": binds_list}) 845 | ctx.logger.debug("host_config : {0}".format(host_config)) 846 | container_args.update(host_config) 847 | ctx.instance.runtime_properties['container_args'] = container_args 848 | ctx.logger.debug("container_args : {0}".format(container_args)) 849 | 850 | # docker create 851 | container = docker_client.containers.create(image=image_tag, 852 | **container_args) 853 | # docker start 854 | container.start() 855 | 856 | # the run method will handle the lifecycle create, 857 | # start and logs in case of detach no logs 858 | # container = docker_client.containers.run(image=image_tag, 859 | # **container_args) 860 | 861 | # if command in detach mode -since the command will keep running- 862 | # no need to follow logs [ it wil return the Container Object ] 863 | if container_args.get("detach", False): 864 | ctx.logger.info("command is running in detach mode True") 865 | ctx.instance.runtime_properties['container'] = container.id 866 | container_info = docker_client.containers.get(container.id) 867 | ctx.instance.runtime_properties['container_info'] = \ 868 | repr(container_info) 869 | return 870 | ctx.logger.info("container was created : {0}".format(container)) 871 | ctx.instance.runtime_properties['container'] = container.id 872 | container_logs = follow_container_logs(ctx, docker_client, container) 873 | ctx.logger.info("container logs : {0} ".format(container_logs)) 874 | ctx.instance.runtime_properties['run_result'] = container_logs 875 | 876 | 877 | @operation 878 | @handle_docker_exception 879 | @with_docker 880 | def start_container(ctx, docker_client, **kwargs): 881 | resource_config = ctx.node.properties.get('resource_config', {}) 882 | container_args = resource_config.get('container_args', {}) 883 | container = ctx.instance.runtime_properties.get('container', "") 884 | if not container: 885 | ctx.logger.info("container was not create successfully, nothing to do") 886 | return 887 | if not container_args.get("command", ""): 888 | ctx.logger.info("no command sent to container, nothing to do") 889 | return 890 | ctx.logger.debug( 891 | "Running this command on container : {0} ".format( 892 | container_args.get("command", ""))) 893 | container_obj = docker_client.containers.get(container) 894 | container_obj.start() 895 | container_logs = follow_container_logs(ctx, docker_client, container_obj) 896 | ctx.logger.info("container logs : {0} ".format(container_logs)) 897 | ctx.instance.runtime_properties['run_result'] = container_logs 898 | 899 | 900 | def check_if_applicable_command(command): 901 | EXCEPTION_LIST = ('terraform', 'ansible-playbook', 'ansible') 902 | # check if command given the platform , 903 | # TODO : make it more dynamic 904 | # at least : bash , python , and basic unix commands ... 905 | # adding exceptions like terraform, ansible_playbook 906 | # if they are not installed on the host 907 | # can be extended based on needs 908 | if command in EXCEPTION_LIST: 909 | return True 910 | rc = subprocess.call(['which', command]) 911 | if rc == 0: 912 | return True 913 | else: 914 | return False 915 | 916 | 917 | def find_host_script_path(docker_client, container_id, 918 | command, container_args): 919 | # given the original command and the mapping 920 | # let's return the path we will be overriding the content for 921 | script = None 922 | argument_list = command.split(' ', 1)[1].split() 923 | # weed out flags and stop on first argument after that 924 | # for example in ansible we would have : 925 | # ansible-playbook -i {hosts} {actual_file_we want} 926 | # flags trick to get the file and validate it is part of mapping 927 | skip_flag_arg = False 928 | for argument in argument_list: 929 | # skip the flags 930 | if argument.startswith('-'): 931 | skip_flag_arg = True 932 | continue 933 | if skip_flag_arg: 934 | skip_flag_arg = False 935 | continue 936 | script = argument 937 | break 938 | ctx.logger.debug("script to override {0}".format(script)) 939 | # Handle the attached volume to override 940 | # the script with stop_command 941 | volumes = container_args.get("volumes", None) 942 | volumes_mapping = container_args.get("volumes_mapping", None) 943 | if volumes and volumes_mapping: 944 | # look for the script in the mapped volumes 945 | mapping_to_use = "" 946 | for volume, mapping in zip(volumes, volumes_mapping): 947 | ctx.logger.debug( 948 | "check if script {0} contain volume {1}".format(script, 949 | volume)) 950 | if volume in script: 951 | ctx.logger.debug("replacing {0} with {1}".format(volume, 952 | mapping)) 953 | script = script.replace(volume, mapping) 954 | ctx.logger.debug("script to modify is {0}".format(script)) 955 | mapping_to_use = mapping 956 | break 957 | 958 | if not mapping_to_use: 959 | ctx.logger.info("volume mapping is not correct") 960 | return 961 | else: 962 | # let's look for local files inside the container 963 | containerObj = docker_client.containers.get(container_id) 964 | bits, stats = containerObj.get_archive(script) 965 | if stats.get('size', 0) > 0: 966 | destination = tempfile.mkdtemp() 967 | f = open( 968 | os.path.join(destination, stats.get('name')), 'wb') 969 | for chunk in bits: 970 | f.write(chunk) 971 | f.close() 972 | file_obj = tarfile.open(f.name, "r") 973 | file = file_obj.extractfile(stats.get('name')) 974 | file_content = file.read() 975 | file_obj.close() 976 | os.remove(f.name) 977 | # return the file name and content so it would be handled 978 | # via put_archive though cotinaer API 979 | return script, file_content 980 | else: 981 | ctx.logger.info('script not found inside the container ' 982 | 'since no volumes were mapped') 983 | return 984 | return script 985 | 986 | 987 | def handle_container_timed_out(ctx, docker_client, container_id, 988 | container_args, stop_command): 989 | # check the original command in the properties 990 | command = container_args.get("command", "") 991 | if not command: 992 | ctx.logger.info("no command sent to container, nothing to do") 993 | return 994 | # assuming the container was passed : {script_executor} {script} [ARGS] 995 | if len(command.split(' ', 1)) >= 2: 996 | script_executor = command.split(' ', 1)[0] 997 | if not check_if_applicable_command(script_executor): 998 | ctx.logger.info( 999 | "can't run this command {0}".format(script_executor)) 1000 | return 1001 | # we will get the docker_host conf from mapped 1002 | # container_files node through relationships 1003 | volumes = container_args.get("volumes", None) 1004 | volumes_mapping = container_args.get("volumes_mapping", None) 1005 | docker_ip = "" 1006 | relationships = list(ctx.instance.relationships) 1007 | if volumes and volumes_mapping: 1008 | for rel in relationships: 1009 | node = rel.target.node 1010 | resource_config = node.properties.get('resource_config', {}) 1011 | docker_machine = resource_config.get('docker_machine', {}) 1012 | ctx.logger.debug("checking for IP in {0}".format(node.name)) 1013 | if node.type == 'cloudify.nodes.docker.container_files': 1014 | docker_ip = docker_machine.get('docker_ip', "") 1015 | docker_user = docker_machine.get('docker_user', "") 1016 | docker_key = docker_machine.get('docker_key', "") 1017 | break 1018 | if node.type == 'cloudify.nodes.docker.terraform_module': 1019 | docker_machine = node.properties.get('docker_machine', {}) 1020 | docker_ip = docker_machine.get('docker_ip', "") 1021 | docker_user = docker_machine.get('docker_user', "") 1022 | docker_key = docker_machine.get('docker_key', "") 1023 | break 1024 | if not docker_ip: 1025 | ctx.logger.info( 1026 | "can't find docker_ip in container_files " 1027 | "node through relationships") 1028 | return 1029 | 1030 | # here we assume the command is OK , and we have arguments to it 1031 | 1032 | script, _ = find_host_script_path(docker_client, container_id, 1033 | command, container_args) 1034 | if not script: 1035 | return 1036 | replace_script = stop_command 1037 | 1038 | is_ansible_custom_case = 'ansible' in script_executor 1039 | if is_ansible_custom_case: 1040 | _, replace_script = find_host_script_path(docker_client, 1041 | container_id, 1042 | stop_command, 1043 | container_args) 1044 | if not replace_script: 1045 | return 1046 | 1047 | # check if we have volume mapping or not 1048 | if volumes and volumes_mapping: 1049 | # let's read from the remote docker if that is the case 1050 | if is_remote_docker(docker_ip): 1051 | with get_fabric_settings(ctx, docker_ip, docker_user, 1052 | docker_key) as s: 1053 | with s: 1054 | replace_script = call_sudo( 1055 | 'cat {0}'.format(replace_script), 1056 | fab_ctx=s).stdout 1057 | else: 1058 | # check from local 1059 | with open(replace_script, 'r') as f: 1060 | replace_script = f.read() 1061 | 1062 | container_obj = docker_client.containers.get(container_id) 1063 | # if we are here , then we found the script 1064 | # in one of the mapped volumes 1065 | ctx.logger.debug("override script {0} content to {1}".format( 1066 | script, replace_script)) 1067 | if volumes and volumes_mapping: 1068 | with open(script, 'w') as outfile: 1069 | outfile.write(replace_script) 1070 | 1071 | if is_remote_docker(docker_ip): 1072 | with get_fabric_settings(ctx, docker_ip, docker_user, 1073 | docker_key) as s: 1074 | with s: 1075 | call_put( 1076 | script, script, mirror_local_mode=True, fab_ctx=s) 1077 | else: 1078 | # if we are here we have the replace content and we need to replace 1079 | # script content inside the container files 1080 | script_dir = os.path.dirname("/{0}".format(script)) 1081 | pw_tarstream = io.BytesIO() 1082 | pw_tar = tarfile.TarFile(fileobj=pw_tarstream, mode='w') 1083 | file_data = replace_script 1084 | tarinfo = tarfile.TarInfo(name=script) 1085 | tarinfo.size = len(file_data) 1086 | tarinfo.mtime = time.time() 1087 | pw_tar.addfile(tarinfo, io.BytesIO(file_data)) 1088 | pw_tar.close() 1089 | pw_tarstream.seek(0) 1090 | container_obj.put_archive(script_dir, 1091 | pw_tarstream) 1092 | 1093 | # now we can restart the container , and it will 1094 | # run with the overriden script that contain the 1095 | # stop_command 1096 | container_obj.restart() 1097 | container_logs = follow_container_logs(ctx, docker_client, 1098 | container_obj) 1099 | ctx.logger.info("container logs : {0} ".format(container_logs)) 1100 | else: 1101 | ctx.logger.info("""can't send this command {0} to container, 1102 | since it is unreachable""".format(stop_command)) 1103 | return 1104 | 1105 | 1106 | @operation 1107 | @handle_docker_exception 1108 | @with_docker 1109 | def stop_container(ctx, docker_client, stop_command, **kwargs): 1110 | container = ctx.instance.runtime_properties.get('container', "") 1111 | resource_config = ctx.node.properties.get('resource_config', {}) 1112 | image_tag, container_args = get_from_resource_config(resource_config, 1113 | 'image_tag', 1114 | 'container_args') 1115 | if not stop_command: 1116 | ctx.logger.info("no stop command, nothing to do") 1117 | try: 1118 | container_obj = docker_client.containers.get(container) 1119 | container_obj.stop() 1120 | container_obj.wait() 1121 | except NotFound: 1122 | pass 1123 | return 1124 | 1125 | script_executor = stop_command.split(' ', 1)[0] 1126 | if not check_if_applicable_command(script_executor): 1127 | ctx.logger.info( 1128 | "can't run this command {0}".format(script_executor)) 1129 | return 1130 | 1131 | if container: 1132 | ctx.logger.info( 1133 | "Stop Contianer {0} from tag {1} with command {2}".format( 1134 | container, image_tag, stop_command)) 1135 | # attach to container socket and send the stop_command 1136 | container_obj = docker_client.containers.get(container) 1137 | socket = container_obj.attach_socket( 1138 | params={ 1139 | 'stdin': 1, 1140 | "stdout": 1, 1141 | 'stream': 1, 1142 | "logs": 1 1143 | }) 1144 | try: 1145 | socket._sock.settimeout(20) 1146 | socket._sock.send(stop_command.encode('utf-8')) 1147 | buffer = "" 1148 | while True: 1149 | data = socket._sock.recv(4096) 1150 | if not data: 1151 | break 1152 | buffer += data.decode('utf-8') 1153 | ctx.logger.info("Stop command result {0}".format(buffer)) 1154 | except docker.errors.APIError as ae: 1155 | ctx.logger.error("APIError {0}".format(str(ae))) 1156 | except Exception as e: 1157 | message = e.message if hasattr(e, 'message') else e 1158 | # response = e.response if hasattr(e, 'response') else e 1159 | # explanation = e.explanation if hasattr(e, 'explanation') else e 1160 | # errno = e.errno if hasattr(e, 'errno') else e 1161 | ctx.logger.error("exception : {0}".format(message)) 1162 | # if timeout happened that means the container exited, 1163 | # and if want to do something for the container, 1164 | # or handle any special case if we want that 1165 | if "timed out" in repr(message): 1166 | ctx.logger.debug('Expected case since it is stopped ' 1167 | 'and we want a chance to execute ' 1168 | 'extra command with override to old one') 1169 | # Special Handling for terraform -to call cleanup for example- 1170 | # we can switch the command with stop_command and restart 1171 | handle_container_timed_out(ctx, docker_client, container, 1172 | container_args, stop_command) 1173 | 1174 | socket.close() 1175 | container_obj.stop() 1176 | container_obj.wait() 1177 | 1178 | 1179 | @operation 1180 | @handle_docker_exception 1181 | @with_docker 1182 | def remove_container(ctx, docker_client, **kwargs): 1183 | container = ctx.instance.runtime_properties.get('container', "") 1184 | resource_config = ctx.node.properties.get('resource_config', {}) 1185 | image_tag = resource_config.get('image_tag', "") 1186 | if container: 1187 | ctx.logger.info( 1188 | "remove Contianer {0} from tag {1}".format(container, 1189 | image_tag)) 1190 | container_obj = docker_client.containers.get(container) 1191 | remove_res = container_obj.remove() 1192 | ctx.instance.runtime_properties.pop('container') 1193 | ctx.logger.info("Remove result {0}".format(remove_res)) 1194 | -------------------------------------------------------------------------------- /cloudify_docker/terraform.py: -------------------------------------------------------------------------------- 1 | ######## 2 | # Copyright (c) 2014-2020 GigaSpaces Technologies Ltd. All rights reserved 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import os 16 | import json 17 | import shutil 18 | import getpass 19 | import tempfile 20 | 21 | from uuid import uuid1 22 | 23 | from .tasks import move_files 24 | from .tasks import get_lan_ip 25 | from .tasks import get_fabric_settings 26 | from .tasks import get_docker_machine_from_ctx 27 | from .tasks import call_sudo 28 | from .tasks import call_put 29 | 30 | from cloudify.decorators import operation 31 | from cloudify.exceptions import NonRecoverableError 32 | 33 | from cloudify_common_sdk.resource_downloader import unzip_archive 34 | from cloudify_common_sdk.resource_downloader import untar_archive 35 | from cloudify_common_sdk.resource_downloader import get_shared_resource 36 | from cloudify_common_sdk.resource_downloader import TAR_FILE_EXTENSTIONS 37 | from cloudify_common_sdk._compat import text_type 38 | 39 | from .constants import LOCAL_HOST_ADDRESSES 40 | 41 | 42 | @operation 43 | def prepare_terraform_files(ctx, **kwargs): 44 | 45 | docker_ip, docker_user, docker_key, container_volume = \ 46 | get_docker_machine_from_ctx(ctx) 47 | 48 | resource_config = ctx.node.properties.get('resource_config', {}) 49 | source = resource_config.get('source', "") 50 | backend = resource_config.get('backend', {}) 51 | variables = resource_config.get('variables', {}) 52 | environment_variables = resource_config.get('environment_variables', {}) 53 | terraform_plugins = ctx.node.properties.get('terraform_plugins', []) 54 | 55 | if not source: 56 | raise NonRecoverableError("Please check the source value") 57 | return 58 | 59 | destination = tempfile.mkdtemp() 60 | 61 | # handle the provided source 62 | source_tmp_path = get_shared_resource(source) 63 | if source_tmp_path == source: 64 | # didn't download anything so check the provided path 65 | # if file and relative path to download from blueprint 66 | if os.path.isfile(source_tmp_path) and \ 67 | not os.path.isabs(source_tmp_path): 68 | source_tmp_path = ctx.download_resource(source) 69 | # check file type if archived 70 | file_name = source_tmp_path.rsplit('/', 1)[1] 71 | file_type = file_name.rsplit('.', 1)[1] 72 | if file_type == 'zip': 73 | source_tmp_path = \ 74 | unzip_archive(source_tmp_path) 75 | elif file_type in TAR_FILE_EXTENSTIONS: 76 | source_tmp_path = \ 77 | untar_archive(source_tmp_path) 78 | 79 | storage_dir = "{0}/{1}".format(destination, "storage") 80 | os.mkdir(storage_dir) 81 | 82 | move_files(source_tmp_path, storage_dir) 83 | shutil.rmtree(source_tmp_path) 84 | 85 | storage_dir_prop = storage_dir.replace(destination, container_volume) 86 | ctx.instance.runtime_properties['storage_dir'] = storage_dir_prop 87 | 88 | plugins_dir = "{0}/{1}".format(destination, "plugins") 89 | os.mkdir(plugins_dir) 90 | 91 | backend_file = "" 92 | if backend: 93 | if not backend.get("name", ""): 94 | raise NonRecoverableError( 95 | "Check backend {0} name value".format(backend)) 96 | backend_str = """ 97 | terraform { 98 | backend "{backend_name}" { 99 | {backend_options} 100 | } 101 | } 102 | """ 103 | backend_options = "" 104 | for option_name, option_value in \ 105 | backend.get("options", {}).items(): 106 | if isinstance(option_value, text_type): 107 | backend_options += "{0} = \"{1}\"".format(option_name, 108 | option_value) 109 | else: 110 | backend_options += "{0} = {1}".format(option_name, 111 | option_value) 112 | backend_str.format( 113 | backend_name=backend.get("name"), 114 | backend_options=backend_options) 115 | backend_file = os.path.join(storage_dir, '{0}.tf'.format( 116 | backend.get("name"))) 117 | with open(backend_file, 'w') as outfile: 118 | outfile.write(backend_str) 119 | # store the runtime property relative to container 120 | # rather than docker machine path 121 | backend_file = \ 122 | backend_file.replace(destination, container_volume) 123 | ctx.instance.runtime_properties['backend_file'] = backend_file 124 | 125 | variables_file = "" 126 | if variables: 127 | variables_file = os.path.join(storage_dir, 'vars.json') 128 | with open(variables_file, 'w') as outfile: 129 | json.dump(variables, outfile) 130 | # store the runtime property relative to container 131 | # rather than docker machine path 132 | variables_file = \ 133 | variables_file.replace(destination, container_volume) 134 | ctx.instance.runtime_properties['variables_file'] = variables_file 135 | ctx.instance.runtime_properties['environment_variables'] = \ 136 | environment_variables 137 | if terraform_plugins: 138 | for plugin in terraform_plugins: 139 | downloaded_plugin_path = get_shared_resource(plugin) 140 | if downloaded_plugin_path == plugin: 141 | # it means we didn't download anything/ extracted 142 | raise NonRecoverableError( 143 | "Check Plugin {0} URL".format(plugin)) 144 | else: 145 | move_files(downloaded_plugin_path, plugins_dir, 0o775) 146 | os.chmod(plugins_dir, 0o775) 147 | plugins_dir = plugins_dir.replace(destination, container_volume) 148 | ctx.instance.runtime_properties['plugins_dir'] = plugins_dir 149 | 150 | # handle terraform scripts inside shell script 151 | terraform_script_file = os.path.join(storage_dir, '{0}.sh'.format( 152 | str(uuid1()))) 153 | terraform_script = """#!/bin/bash -e 154 | terraform init -no-color {backend_file} -plugin-dir={plugins_dir} {storage_dir} 155 | terraform plan -no-color {vars_file} {storage_dir} 156 | terraform apply -no-color -auto-approve {vars_file} {storage_dir} 157 | terraform refresh -no-color {vars_file} 158 | terraform state pull 159 | """.format(backend_file="" if not backend_file 160 | else "-backend-config={0}".format(backend_file), 161 | plugins_dir=plugins_dir, 162 | storage_dir=storage_dir_prop, 163 | vars_file="" if not variables 164 | else " -var-file {0}".format(variables_file)) 165 | ctx.logger.info("terraform_script_file content {0}".format( 166 | terraform_script)) 167 | with open(terraform_script_file, 'w') as outfile: 168 | outfile.write(terraform_script) 169 | # store the runtime property relative to container 170 | # rather than docker machine path 171 | terraform_script_file = \ 172 | terraform_script_file.replace(destination, container_volume) 173 | ctx.instance.runtime_properties['terraform_script_file'] = \ 174 | terraform_script_file 175 | ctx.instance.runtime_properties['terraform_container_command_arg'] = \ 176 | "bash {0}".format(terraform_script_file) 177 | 178 | # Reaching this point means we now have everything in this destination 179 | ctx.instance.runtime_properties['destination'] = destination 180 | ctx.instance.runtime_properties['docker_host'] = docker_ip 181 | # copy these files to docker machine if needed at that destination 182 | if docker_ip not in LOCAL_HOST_ADDRESSES and not docker_ip == get_lan_ip(): 183 | with get_fabric_settings(ctx, docker_ip, 184 | docker_user, 185 | docker_key) as s: 186 | with s: 187 | destination_parent = destination.rsplit('/', 1)[0] 188 | if destination_parent != '/tmp': 189 | call_sudo('mkdir -p {0}'.format( 190 | destination_parent), fab_ctx=s) 191 | call_sudo("chown -R {0}:{0} {1}".format( 192 | docker_user, destination_parent), fab_ctx=s) 193 | call_put( 194 | destination, 195 | destination_parent, 196 | mirror_local_mode=True, 197 | fab_ctx=s) 198 | 199 | 200 | @operation 201 | def remove_terraform_files(ctx, **kwargs): 202 | 203 | docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx) 204 | 205 | destination = ctx.instance.runtime_properties.get('destination', "") 206 | if not destination: 207 | raise NonRecoverableError("destination was not assigned due to error") 208 | return 209 | 210 | ctx.logger.info("removing file from destination {0}".format(destination)) 211 | if os.path.exists(destination): 212 | os.system("sudo chown -R {0} {1}".format(getpass.getuser(), 213 | destination)) 214 | shutil.rmtree(destination) 215 | ctx.instance.runtime_properties.pop('destination', None) 216 | if not docker_ip: 217 | raise NonRecoverableError("no docker_ip was provided") 218 | return 219 | if docker_ip not in LOCAL_HOST_ADDRESSES and not docker_ip == get_lan_ip(): 220 | with get_fabric_settings(ctx, docker_ip, docker_user, docker_key) as s: 221 | with s: 222 | call_sudo("rm -rf {0}".format(destination), fab_ctx=s) 223 | -------------------------------------------------------------------------------- /cloudify_docker/tests/__init__.py: -------------------------------------------------------------------------------- 1 | ######## 2 | # Copyright (c) 2014-2020 GigaSpaces Technologies Ltd. All rights reserved 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | -------------------------------------------------------------------------------- /cloudify_docker/tests/test_plugin.py: -------------------------------------------------------------------------------- 1 | ######## 2 | # Copyright (c) 2014-2020 GigaSpaces Technologies Ltd. All rights reserved 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import mock 16 | import unittest 17 | 18 | from os import (path, mkdir) 19 | from uuid import uuid1 20 | 21 | from cloudify.state import current_ctx 22 | # from cloudify.test_utils import workflow_test 23 | from cloudify.mocks import MockCloudifyContext 24 | 25 | from cloudify_docker.tasks import (build_image, 26 | list_images, 27 | remove_image, 28 | list_containers, 29 | list_host_details, 30 | find_host_script_path, 31 | remove_container_files, 32 | prepare_container_files) 33 | 34 | 35 | class TestPlugin(unittest.TestCase): 36 | 37 | def setUp(self): 38 | super(TestPlugin, self).setUp() 39 | 40 | def get_client_conf_props(self): 41 | return { 42 | "client_config": { 43 | "docker_host": "127.0.0.1", 44 | "docker_rest_port": "2375" 45 | } 46 | } 47 | 48 | def mock_ctx(self, 49 | test_name, 50 | test_properties, 51 | test_runtime_properties=None): 52 | test_node_id = uuid1() 53 | ctx = MockCloudifyContext( 54 | node_id=test_node_id, 55 | properties=test_properties, 56 | runtime_properties=test_runtime_properties, 57 | ) 58 | return ctx 59 | 60 | def test_list_images(self): 61 | ctx = self.mock_ctx('test_list_images', self.get_client_conf_props()) 62 | current_ctx.set(ctx=ctx) 63 | 64 | images = { 65 | "Image1": { 66 | "Created": 1586389397, 67 | "Id": "sha256:ef5bbc24923e" 68 | } 69 | } 70 | 71 | mock_images_list = mock.Mock() 72 | mock_images_list.images.list.return_value = images 73 | mock_client = mock.MagicMock(return_value=mock_images_list) 74 | 75 | with mock.patch('docker.DockerClient', mock_client): 76 | kwargs = { 77 | 'ctx': ctx 78 | } 79 | list_images(**kwargs) 80 | self.assertEqual(ctx.instance.runtime_properties['images'], 81 | images) 82 | 83 | def test_list_host_details(self): 84 | ctx = self.mock_ctx('test_list_host_details', 85 | self.get_client_conf_props()) 86 | current_ctx.set(ctx=ctx) 87 | 88 | details = { 89 | "ID": "PVLH:WS43:SHQI:BBBK:PQKO:N3LP:GKNK:3AHN:DVHD", 90 | "Containers": 101, 91 | "ContainersRunning": 0, 92 | "ContainersPaused": 0, 93 | "ContainersStopped": 101, 94 | "Images": 35 95 | } 96 | 97 | mock_host_details_list = mock.Mock() 98 | mock_host_details_list.info.return_value = details 99 | mock_client = mock.MagicMock(return_value=mock_host_details_list) 100 | 101 | with mock.patch('docker.DockerClient', mock_client): 102 | kwargs = { 103 | 'ctx': ctx 104 | } 105 | list_host_details(**kwargs) 106 | self.assertEqual(ctx.instance.runtime_properties['host_details'], 107 | details) 108 | 109 | def test_list_containers(self): 110 | ctx = self.mock_ctx('test_list_containers', 111 | self.get_client_conf_props()) 112 | current_ctx.set(ctx=ctx) 113 | 114 | containers = { 115 | "Contianer1": { 116 | "Created": 1586389397, 117 | "Id": "sha256:e2231923e" 118 | } 119 | } 120 | 121 | mock_containers_list = mock.Mock() 122 | mock_containers_list.containers.list.return_value = containers 123 | mock_client = mock.MagicMock(return_value=mock_containers_list) 124 | 125 | with mock.patch('docker.DockerClient', mock_client): 126 | kwargs = { 127 | 'ctx': ctx 128 | } 129 | list_containers(**kwargs) 130 | self.assertEqual(ctx.instance.runtime_properties['contianers'], 131 | containers) 132 | 133 | def test_prepare_container_files(self): 134 | docker_host = "127.0.0.1" 135 | source = "/tmp/source" 136 | if not path.exists(source): 137 | mkdir(source) 138 | dummy_file_name = str(uuid1()) 139 | dummy_file = path.join(source, dummy_file_name) 140 | with open(dummy_file, 'w') as outfile: 141 | outfile.write("dummy stuff") 142 | destination = "/tmp/destination" 143 | if not path.exists(destination): 144 | mkdir(destination) 145 | resource_config_test = { 146 | "resource_config": { 147 | "docker_machine": { 148 | "docker_ip": docker_host, 149 | "docker_user": "centos", 150 | "docker_key": "----RSA----", 151 | }, 152 | "source": source, 153 | "destination": destination, 154 | } 155 | } 156 | 157 | ctx = self.mock_ctx('test_prepare_container_files', 158 | resource_config_test) 159 | current_ctx.set(ctx=ctx) 160 | 161 | prepare_container_files(ctx) 162 | self.assertEqual( 163 | ctx.instance.runtime_properties['destination'], destination) 164 | self.assertEqual( 165 | ctx.instance.runtime_properties['docker_host'], docker_host) 166 | self.assertTrue(path.isfile(path.join(destination, dummy_file_name))) 167 | 168 | def test_remove_container_files(self): 169 | docker_host = "127.0.0.1" 170 | source = "/tmp/source" 171 | destination = "/tmp/destination" 172 | resource_config_test = { 173 | "resource_config": { 174 | "docker_machine": { 175 | "docker_ip": docker_host, 176 | "docker_user": "centos", 177 | "docker_key": "----RSA----", 178 | }, 179 | "source": source, 180 | "destination": destination, 181 | } 182 | } 183 | runtime_properties_test = { 184 | "destination": destination, 185 | } 186 | ctx = self.mock_ctx('test_remove_container_files', 187 | resource_config_test, 188 | runtime_properties_test) 189 | current_ctx.set(ctx=ctx) 190 | 191 | remove_container_files(ctx) 192 | self.assertIsNone( 193 | ctx.instance.runtime_properties.get('destination', None)) 194 | self.assertFalse(path.exists(destination)) 195 | self.assertFalse(path.exists(source)) 196 | 197 | def test_build_image(self): 198 | node_props = self.get_client_conf_props() 199 | node_props.update({ 200 | "resource_config": { 201 | "image_content": "FROM amd64/centos:7", 202 | "tag": "test:1.0" 203 | } 204 | }) 205 | build_result = [ 206 | {"stream": "Step 1/1 : FROM amd64/centos:7"}, 207 | {"stream": "\n"}, 208 | {"stream": " ---\u003e 5e35e350aded\n"} 209 | ] 210 | build_result_prop = "" 211 | for chunk in build_result: 212 | build_result_prop += "{0}\n".format(chunk) 213 | 214 | image_get = [{ 215 | "Created": 1586512602, 216 | "Labels": { 217 | "org.label-schema.name": "CentOS Base Image", 218 | "org.label-schema.schema-version": "1.0", 219 | "org.label-schema.license": "GPLv2", 220 | "org.label-schema.build-date": "20191001", 221 | "org.label-schema.vendor": "CentOS" 222 | }, 223 | "VirtualSize": 320490159, 224 | "SharedSize": -1, 225 | "ParentId": "sha256:683822edc367c7f2d5d5e005fab15e749428efee", 226 | "Size": 320490159, 227 | "RepoDigests": None, 228 | "Id": "sha256:e9abf53b02b1e1fbba06a8ea92c889a2b8de719", 229 | "Containers": -1, 230 | "RepoTags": [ 231 | "test:1.0" 232 | ] 233 | } 234 | ] 235 | ctx = self.mock_ctx('test_build_image', node_props) 236 | current_ctx.set(ctx=ctx) 237 | 238 | mock_images = mock.Mock() 239 | mock_images.images.build.return_value = ("Id", iter(build_result)) 240 | mock_images.images.get.return_value = image_get 241 | mock_client = mock.MagicMock(return_value=mock_images) 242 | 243 | with mock.patch('docker.DockerClient', mock_client): 244 | kwargs = { 245 | 'ctx': ctx 246 | } 247 | build_image(**kwargs) 248 | self.assertEqual( 249 | ctx.instance.runtime_properties['build_result'], 250 | build_result_prop) 251 | self.assertEqual( 252 | ctx.instance.runtime_properties['image'], 253 | repr(image_get)) 254 | 255 | def test_remove_image(self): 256 | node_props = self.get_client_conf_props() 257 | node_props.update({ 258 | "resource_config": { 259 | "image_content": "FROM amd64/centos:7", 260 | "tag": "test:1.0" 261 | } 262 | }) 263 | build_result = [{"stream": "Step 1/1 : FROM amd64/centos:7"}, 264 | {"stream": "\n"}, 265 | {"stream": " ---\u003e 5e35e350aded\n"}] 266 | build_result_prop = "" 267 | for chunk in iter(build_result): 268 | build_result_prop += "{0}\n".format(chunk) 269 | runtime_properties_test = { 270 | "build_result": build_result_prop, 271 | } 272 | 273 | ctx = self.mock_ctx('test_remove_image', 274 | node_props, 275 | runtime_properties_test) 276 | current_ctx.set(ctx=ctx) 277 | 278 | mock_images = mock.Mock() 279 | mock_images.images.remove = mock.Mock() 280 | mock_client = mock.MagicMock(return_value=mock_images) 281 | 282 | with mock.patch('docker.DockerClient', 283 | mock_client): 284 | kwargs = { 285 | 'ctx': ctx 286 | } 287 | remove_image(**kwargs) 288 | self.assertIsNone( 289 | ctx.instance.runtime_properties.get('build_result', None)) 290 | 291 | def test_if_volume_mapping_in_script(self): 292 | containers = { 293 | "Contianer1": { 294 | "Created": 1586389397, 295 | "Id": "sha256:e2231923e" 296 | } 297 | } 298 | 299 | mock_containers_get = mock.Mock() 300 | mock_containers_get.containers.get.return_value = \ 301 | containers['Contianer1'] 302 | mock_client = mock.MagicMock(return_value=mock_containers_get) 303 | 304 | command = 'ansible-playbook -i hosts /uninstall-playbooks/delete.yaml' 305 | container_args = { 306 | 'volumes_mapping': ['/tmp/tmpfijhaktv', '/tmp/tmpcz0u65ro'], 307 | 'volumes': ['/install-playbooks', 308 | '/uninstall-playbooks'] 309 | } 310 | self.assertEqual( 311 | find_host_script_path(mock_client, 'sha256:e2231923e', 312 | command, container_args), 313 | '/tmp/tmpcz0u65ro/delete.yaml') 314 | 315 | def test_if_volume_mapping_not_in_script(self): 316 | containers = { 317 | "Contianer1": { 318 | "Created": 1586389397, 319 | "Id": "sha256:e2231923e" 320 | } 321 | } 322 | 323 | mock_containers_get = mock.Mock() 324 | mock_containers_get.containers.get.return_value = \ 325 | containers['Contianer1'] 326 | mock_client = mock.MagicMock(return_value=mock_containers_get) 327 | 328 | command = 'ansible-playbook -i hosts /dummy_location/delete.yaml' 329 | container_args = { 330 | 'volumes_mapping': ['/tmp/tmpfijhaktv', '/tmp/tmpcz0u65ro'], 331 | 'volumes': ['/install-playbooks', 332 | '/uninstall-playbooks'] 333 | } 334 | self.assertIsNone(find_host_script_path(mock_client, 335 | 'sha256:e2231923e', 336 | command, 337 | container_args)) 338 | -------------------------------------------------------------------------------- /plugin.yaml: -------------------------------------------------------------------------------- 1 | plugins: 2 | docker: 3 | executor: central_deployment_agent 4 | package_name: cloudify-docker-plugin 5 | package_version: 2.0.16 6 | dsl_definitions: 7 | client_config: 8 | client_config: &id001 9 | type: cloudify.types.docker.ClientConfig 10 | required: false 11 | docker_machine: 12 | docker_machine: &id002 13 | type: cloudify.types.docker.DockerMachineConfig 14 | required: false 15 | playbook_config: 16 | ansible_playbook_executable_path: &id003 17 | type: string 18 | default: ansible-playbook 19 | playbook_source_path: &id004 20 | type: string 21 | default: '' 22 | playbook_path: &id005 23 | type: string 24 | default: '' 25 | site_yaml_path: &id006 26 | type: string 27 | default: '' 28 | additional_playbook_files: &id007 29 | type: list 30 | default: [] 31 | sources: &id008 32 | default: {} 33 | run_data: &id009 34 | default: {} 35 | sensitive_keys: &id010 36 | type: list 37 | default: 38 | - ansible_password 39 | options_config: &id011 40 | default: {} 41 | ansible_env_vars: &id012 42 | default: 43 | ANSIBLE_HOST_KEY_CHECKING: 'False' 44 | ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED: 'False' 45 | debug_level: &id013 46 | type: integer 47 | default: 2 48 | additional_args: &id014 49 | type: string 50 | default: '' 51 | save_playbook: &id015 52 | type: boolean 53 | default: false 54 | remerge_sources: &id016 55 | type: boolean 56 | default: false 57 | ansible_become: &id017 58 | type: boolean 59 | default: false 60 | playbook_inputs: 61 | ansible_playbook_executable_path: &id018 62 | default: { get_property: [SELF, ansible_playbook_executable_path] } 63 | playbook_source_path: &id019 64 | default: { get_property: [SELF, playbook_source_path] } 65 | playbook_path: &id020 66 | default: { get_property: [SELF, playbook_path] } 67 | site_yaml_path: &id021 68 | default: { get_property: [SELF, site_yaml_path] } 69 | save_playbook: &id022 70 | default: { get_property: [SELF, save_playbook] } 71 | remerge_sources: &id023 72 | default: { get_property: [SELF, remerge_sources] } 73 | sources: &id024 74 | default: { get_property: [SELF, sources] } 75 | run_data: &id025 76 | default: { get_property: [SELF, run_data] } 77 | sensitive_keys: &id026 78 | default: { get_property: [SELF, sensitive_keys] } 79 | options_config: &id027 80 | default: { get_property: [SELF, options_config] } 81 | ansible_env_vars: &id028 82 | default: { get_property: [SELF, ansible_env_vars] } 83 | debug_level: &id029 84 | default: { get_property: [SELF, debug_level] } 85 | additional_args: &id030 86 | default: { get_property: [SELF, additional_args] } 87 | data_types: 88 | cloudify.types.docker.DockerMachineConfig: 89 | properties: 90 | docker_ip: 91 | type: string 92 | default: '' 93 | docker_user: 94 | type: string 95 | default: '' 96 | docker_key: 97 | type: string 98 | default: '' 99 | container_volume: 100 | type: string 101 | default: '' 102 | cloudify.types.docker.DockerInstallationConfig: 103 | properties: 104 | install_url: 105 | type: string 106 | default: https://get.docker.com 107 | install_script: 108 | type: string 109 | default: https://raw.githubusercontent.com/cloudify-cosmo/cloudify-docker-plugin/master/cloudify_docker/resources/post-install.sh 110 | install_with_sudo: 111 | type: boolean 112 | default: true 113 | offline_installation: 114 | type: boolean 115 | default: false 116 | package_tar_path: 117 | type: string 118 | default: '' 119 | post_install_script_path: 120 | type: string 121 | default: '' 122 | installation_dir: 123 | type: string 124 | default: '' 125 | cloudify.types.docker.ClientConfig: 126 | properties: 127 | docker_host: 128 | type: string 129 | default: '' 130 | docker_rest_port: 131 | type: string 132 | default: '' 133 | docker_sock_file: 134 | type: string 135 | default: '' 136 | cloudify.types.docker.Image: 137 | properties: 138 | image_content: 139 | type: string 140 | default: '' 141 | tag: 142 | type: string 143 | default: '' 144 | pull_image: 145 | type: boolean 146 | default: false 147 | all_tags: 148 | type: boolean 149 | default: false 150 | cloudify.types.docker.Container: 151 | properties: 152 | image_tag: 153 | type: string 154 | default: '' 155 | container_args: 156 | type: dict 157 | default: {} 158 | cloudify.types.docker.ContainerFiles: 159 | properties: 160 | docker_machine: 161 | type: cloudify.types.docker.DockerMachineConfig 162 | default: {} 163 | source: 164 | type: string 165 | default: '' 166 | destination: 167 | type: string 168 | default: '' 169 | extra_files: 170 | type: list 171 | default: [] 172 | ansible_sources: 173 | type: dict 174 | default: {} 175 | terraform_sources: 176 | type: dict 177 | default: {} 178 | cloudify.types.terraform.Backend: 179 | properties: 180 | name: 181 | type: string 182 | required: false 183 | options: 184 | required: false 185 | cloudify.types.terraform.RootModule: 186 | properties: 187 | source: 188 | type: string 189 | required: true 190 | backend: 191 | type: cloudify.types.terraform.Backend 192 | default: {} 193 | variables: 194 | required: false 195 | default: {} 196 | environment_variables: 197 | required: false 198 | default: {} 199 | node_types: 200 | cloudify.nodes.docker.images: 201 | derived_from: cloudify.nodes.Root 202 | properties: 203 | client_config: *id001 204 | interfaces: 205 | cloudify.interfaces.lifecycle: 206 | create: 207 | implementation: docker.cloudify_docker.tasks.list_images 208 | cloudify.nodes.docker.containers: 209 | derived_from: cloudify.nodes.Root 210 | properties: 211 | client_config: *id001 212 | interfaces: 213 | cloudify.interfaces.lifecycle: 214 | create: 215 | implementation: docker.cloudify_docker.tasks.list_containers 216 | cloudify.nodes.docker.host: 217 | derived_from: cloudify.nodes.Root 218 | properties: 219 | docker_machine: *id002 220 | resource_config: 221 | type: cloudify.types.docker.DockerInstallationConfig 222 | required: true 223 | interfaces: 224 | cloudify.interfaces.lifecycle: 225 | create: 226 | implementation: docker.cloudify_docker.tasks.install_docker 227 | delete: 228 | implementation: docker.cloudify_docker.tasks.uninstall_docker 229 | cloudify.nodes.docker.host_details: 230 | derived_from: cloudify.nodes.Root 231 | properties: 232 | client_config: *id001 233 | interfaces: 234 | cloudify.interfaces.lifecycle: 235 | create: 236 | implementation: docker.cloudify_docker.tasks.list_host_details 237 | cloudify.nodes.docker.image: 238 | derived_from: cloudify.nodes.Root 239 | properties: 240 | client_config: *id001 241 | resource_config: 242 | type: cloudify.types.docker.Image 243 | required: true 244 | interfaces: 245 | cloudify.interfaces.lifecycle: 246 | create: 247 | implementation: docker.cloudify_docker.tasks.build_image 248 | delete: 249 | implementation: docker.cloudify_docker.tasks.remove_image 250 | cloudify.nodes.docker.container: 251 | derived_from: cloudify.nodes.Root 252 | properties: 253 | client_config: *id001 254 | resource_config: 255 | type: cloudify.types.docker.Container 256 | required: true 257 | interfaces: 258 | cloudify.interfaces.lifecycle: 259 | create: 260 | implementation: docker.cloudify_docker.tasks.create_container 261 | stop: 262 | implementation: docker.cloudify_docker.tasks.stop_container 263 | inputs: 264 | stop_command: 265 | type: string 266 | default: '' 267 | delete: 268 | implementation: docker.cloudify_docker.tasks.remove_container 269 | cloudify.nodes.docker.container_files: 270 | derived_from: cloudify.nodes.Root 271 | properties: 272 | resource_config: 273 | type: cloudify.types.docker.ContainerFiles 274 | required: true 275 | interfaces: 276 | cloudify.interfaces.lifecycle: 277 | create: 278 | implementation: docker.cloudify_docker.tasks.prepare_container_files 279 | delete: 280 | implementation: docker.cloudify_docker.tasks.remove_container_files 281 | cloudify.nodes.docker.ansible_playbook: 282 | derived_from: cloudify.nodes.Root 283 | properties: 284 | ansible_playbook_executable_path: *id003 285 | playbook_source_path: *id004 286 | playbook_path: *id005 287 | site_yaml_path: *id006 288 | additional_playbook_files: *id007 289 | sources: *id008 290 | run_data: *id009 291 | sensitive_keys: *id010 292 | options_config: *id011 293 | ansible_env_vars: *id012 294 | debug_level: *id013 295 | additional_args: *id014 296 | save_playbook: *id015 297 | remerge_sources: *id016 298 | ansible_become: *id017 299 | docker_machine: *id002 300 | interfaces: 301 | cloudify.interfaces.lifecycle: 302 | precreate: 303 | implementation: docker.cloudify_docker.ansible.set_playbook_config 304 | inputs: 305 | ansible_playbook_executable_path: *id018 306 | playbook_source_path: *id019 307 | playbook_path: *id020 308 | site_yaml_path: *id021 309 | save_playbook: *id022 310 | remerge_sources: *id023 311 | sources: *id024 312 | run_data: *id025 313 | sensitive_keys: *id026 314 | options_config: *id027 315 | ansible_env_vars: *id028 316 | debug_level: *id029 317 | additional_args: *id030 318 | create: 319 | implementation: docker.cloudify_docker.ansible.create_ansible_playbook 320 | delete: 321 | implementation: docker.cloudify_docker.ansible.remove_ansible_playbook 322 | cloudify.nodes.docker.terraform_module: 323 | derived_from: cloudify.nodes.Root 324 | properties: 325 | docker_machine: *id002 326 | terraform_plugins: 327 | default: [] 328 | required: true 329 | resource_config: 330 | type: cloudify.types.terraform.RootModule 331 | required: true 332 | interfaces: 333 | cloudify.interfaces.lifecycle: 334 | create: 335 | implementation: docker.cloudify_docker.terraform.prepare_terraform_files 336 | delete: 337 | implementation: docker.cloudify_docker.terraform.remove_terraform_files 338 | -------------------------------------------------------------------------------- /plugin_1_4.yaml: -------------------------------------------------------------------------------- 1 | plugins: 2 | docker: 3 | executor: central_deployment_agent 4 | package_name: 'cloudify-docker-plugin' 5 | package_version: '2.0.16' 6 | 7 | dsl_definitions: 8 | 9 | client_config: &client_config 10 | client_config: 11 | type: cloudify.types.docker.ClientConfig 12 | description: Your Docker client configuration. 13 | required: false 14 | 15 | docker_machine: &docker_machine 16 | docker_machine: 17 | type: cloudify.types.docker.DockerMachineConfig 18 | description: Docker Machine IP,User,Private_key 19 | required: false 20 | 21 | playbook_config: &playbook_config 22 | ansible_playbook_executable_path: 23 | type: string 24 | default: "ansible-playbook" 25 | description: > 26 | A full path to your ansible_playbook executable if user don't want to 27 | use the included version of executable in the plugin 28 | playbook_source_path: 29 | type: string 30 | default: "" 31 | description: > 32 | A full path/URL that contain playbook specified in playbook_path 33 | or site_yaml_path. 34 | playbook_path: 35 | type: string 36 | default: "" 37 | description: > 38 | A path to your `site.yaml` or `main.yaml` in your 39 | Ansible Playbook relative to blueprint or playbook_source_path 40 | if playbook_source_path is URL to archive File relative inside the archive. 41 | site_yaml_path: 42 | type: string 43 | default: "" 44 | description: > 45 | DEPRECATED. 46 | A path to your `site.yaml` or `main.yaml` in your 47 | Ansible Playbook relative to blueprint or playbook_source_path 48 | if playbook_source_path is URL to archive File relative inside the archive. 49 | additional_playbook_files: 50 | type: list 51 | default: [] 52 | description: > 53 | A list of string paths blueprint resources that you 54 | would like to download to the playbook directory. 55 | If you use this variable, you must list all of the paths 56 | that you expect to download. 57 | sources: 58 | default: {} 59 | description: > 60 | Your Inventory sources. Either YAML or a path to a file. 61 | If not provided the inventory will be take from the `sources` 62 | runtime property. 63 | run_data: 64 | default: {} 65 | description: > 66 | Variable values. 67 | sensitive_keys: 68 | type: list 69 | description: keys that you want us to obscure 70 | default: 71 | - ansible_password 72 | options_config: 73 | default: {} 74 | description: > 75 | Command-line options, such as `tags` or `skip_tags`. 76 | ansible_env_vars: 77 | default: 78 | ANSIBLE_HOST_KEY_CHECKING: "False" 79 | # On Ansible 2.8.x "INVALID_TASK_ATTRIBUTE_FAILED" default value has 80 | # been changed to "True" which cause failure when run playbook 81 | ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED: "False" 82 | description: > 83 | A dictionary of environment variables to set. 84 | debug_level: 85 | type: integer 86 | default: 2 87 | description: > 88 | Debug level 89 | additional_args: 90 | type: string 91 | description: > 92 | Additional args that you want to use, for example, '-c local'. 93 | default: '' 94 | save_playbook: 95 | type: boolean 96 | description: > 97 | Save playbook after action 98 | default: false 99 | remerge_sources: 100 | type: boolean 101 | description: > 102 | update sources on target node 103 | default: false 104 | ansible_become: 105 | type: boolean 106 | description: > 107 | A boolean value, `true` or `false` whether 108 | to assume the user privileges. 109 | default: false 110 | 111 | playbook_inputs: &playbook_inputs 112 | ansible_playbook_executable_path: 113 | default: { get_property: [SELF, ansible_playbook_executable_path] } 114 | playbook_source_path: 115 | default: { get_property: [SELF, playbook_source_path] } 116 | playbook_path: 117 | default: { get_property: [SELF, playbook_path] } 118 | site_yaml_path: 119 | default: { get_property: [SELF, site_yaml_path] } 120 | save_playbook: 121 | default: { get_property: [SELF, save_playbook] } 122 | remerge_sources: 123 | default: { get_property: [SELF, remerge_sources] } 124 | sources: 125 | default: { get_property: [SELF, sources] } 126 | run_data: 127 | default: { get_property: [SELF, run_data] } 128 | sensitive_keys: 129 | default: { get_property: [SELF, sensitive_keys] } 130 | options_config: 131 | default: { get_property: [SELF, options_config] } 132 | ansible_env_vars: 133 | default: { get_property: [SELF, ansible_env_vars] } 134 | debug_level: 135 | default: { get_property: [SELF, debug_level] } 136 | additional_args: 137 | default: { get_property: [SELF, additional_args] } 138 | 139 | data_types: 140 | 141 | cloudify.types.docker.DockerMachineConfig: 142 | properties: 143 | docker_ip: 144 | description: Docker Machine IP 145 | type: string 146 | default: '' 147 | docker_user: 148 | description: Docker Machine User 149 | type: string 150 | default: '' 151 | docker_key: 152 | description: Docker Machine Private Key 153 | type: string 154 | default: '' 155 | container_volume: 156 | description: Docker Container volume_mapping 157 | type: string 158 | default: '' 159 | 160 | cloudify.types.docker.DockerInstallationConfig: 161 | properties: 162 | install_url: 163 | description: Docker Installation Link 164 | type: string 165 | default: 'https://get.docker.com' 166 | install_script: 167 | description: Docker Installation script 168 | type: string 169 | default: 'https://raw.githubusercontent.com/cloudify-cosmo/cloudify-docker-plugin/master/cloudify_docker/resources/post-install.sh' 170 | install_with_sudo: 171 | type: boolean 172 | description: use sudo to run 173 | default: true 174 | offline_installation: 175 | type: boolean 176 | description: Install docker when the vm has no internet access 177 | default: false 178 | package_tar_path: 179 | description: | 180 | Docker Installation Tar path (must be located on the on where docker installed) 181 | Required when offline installation 182 | type: string 183 | default: '' 184 | post_install_script_path: 185 | description: | 186 | Docker Installation post script path 187 | Required when offline installation 188 | type: string 189 | default: '' 190 | installation_dir: 191 | description: | 192 | Docker Installation path 193 | Required when offline installation 194 | type: string 195 | default: '' 196 | 197 | cloudify.types.docker.ClientConfig: 198 | properties: 199 | docker_host: 200 | description: Docker Machine IP to connect to. 201 | type: string 202 | default: '' 203 | docker_rest_port: 204 | description: Docker Machine rest port. 205 | type: string 206 | default: '' 207 | docker_sock_file: 208 | description: > 209 | if docker is local you can leverage the sock file. 210 | the default value would be /var/run/docker.sock 211 | type: string 212 | default: '' 213 | 214 | cloudify.types.docker.Image: 215 | properties: 216 | image_content: 217 | description: Docker image to build 218 | type: string 219 | default: '' 220 | tag: 221 | description: Docker image tag 222 | type: string 223 | default: '' 224 | pull_image: 225 | type: boolean 226 | description: Pull image 227 | default: false 228 | all_tags: 229 | type: boolean 230 | description: Pull all tags (only if pull_image is True) 231 | default: false 232 | 233 | cloudify.types.docker.Container: 234 | properties: 235 | image_tag: 236 | description: Docker image tag to build container 237 | type: string 238 | default: '' 239 | container_args: 240 | description: > 241 | dict of arguments to pass to container when run 242 | check this URL for details: https://tinyurl.com/v8url54 243 | type: dict 244 | default: {} 245 | 246 | cloudify.types.docker.ContainerFiles: 247 | properties: 248 | docker_machine: 249 | description: Docker Machine IP,User,Private_key 250 | type: cloudify.types.docker.DockerMachineConfig 251 | default: {} 252 | source: 253 | description: Files location that will be mapped to container 254 | type: string 255 | default: '' 256 | destination: 257 | description: > 258 | Location to extract the files into, 259 | that will be mapped to container volume later 260 | type: string 261 | default: '' 262 | extra_files: 263 | description: More files to add to source before it goes to destination 264 | type: list 265 | default: [] 266 | ansible_sources: 267 | description: special case for ansible sources 268 | type: dict 269 | default: {} 270 | terraform_sources: 271 | description: special case for terraform sources 272 | type: dict 273 | default: {} 274 | 275 | cloudify.types.terraform.Backend: 276 | properties: 277 | name: 278 | type: string 279 | description: Some name. 280 | required: False 281 | options: 282 | description: Should be a dictionary of key/values. 283 | required: False 284 | 285 | cloudify.types.terraform.RootModule: 286 | properties: 287 | source: 288 | type: string 289 | description: > 290 | Path or URL to the ZIP file containing the Terraform project. 291 | If this is a path, then it must be relative to the blueprint's root. 292 | required: true 293 | backend: 294 | type: cloudify.types.terraform.Backend 295 | description: > 296 | If a backend is not defined in source, 297 | and you want to use a specific backend, define that here. 298 | default: {} 299 | variables: 300 | description: A dictionary of variables. 301 | required: false 302 | default: {} 303 | environment_variables: 304 | description: A dictionary of environment variables. 305 | required: false 306 | default: {} 307 | 308 | node_types: 309 | 310 | cloudify.nodes.docker.images: 311 | derived_from: cloudify.nodes.Root 312 | properties: 313 | <<: *client_config 314 | interfaces: 315 | cloudify.interfaces.lifecycle: 316 | create: 317 | implementation: docker.cloudify_docker.tasks.list_images 318 | 319 | cloudify.nodes.docker.containers: 320 | derived_from: cloudify.nodes.Root 321 | properties: 322 | <<: *client_config 323 | interfaces: 324 | cloudify.interfaces.lifecycle: 325 | create: 326 | implementation: docker.cloudify_docker.tasks.list_containers 327 | 328 | cloudify.nodes.docker.host: 329 | derived_from: cloudify.nodes.Root 330 | properties: 331 | <<: *docker_machine 332 | resource_config: 333 | type: cloudify.types.docker.DockerInstallationConfig 334 | description: Docker Installation type 335 | required: true 336 | interfaces: 337 | cloudify.interfaces.lifecycle: 338 | create: 339 | implementation: docker.cloudify_docker.tasks.install_docker 340 | delete: 341 | implementation: docker.cloudify_docker.tasks.uninstall_docker 342 | 343 | 344 | cloudify.nodes.docker.host_details: 345 | derived_from: cloudify.nodes.Root 346 | properties: 347 | <<: *client_config 348 | interfaces: 349 | cloudify.interfaces.lifecycle: 350 | create: 351 | implementation: docker.cloudify_docker.tasks.list_host_details 352 | 353 | cloudify.nodes.docker.image: 354 | derived_from: cloudify.nodes.Root 355 | properties: 356 | <<: *client_config 357 | resource_config: 358 | type: cloudify.types.docker.Image 359 | description: Docker Image type 360 | required: true 361 | interfaces: 362 | cloudify.interfaces.lifecycle: 363 | create: 364 | implementation: docker.cloudify_docker.tasks.build_image 365 | delete: 366 | implementation: docker.cloudify_docker.tasks.remove_image 367 | 368 | cloudify.nodes.docker.container: 369 | derived_from: cloudify.nodes.Root 370 | properties: 371 | <<: *client_config 372 | resource_config: 373 | type: cloudify.types.docker.Container 374 | description: Docker Container type 375 | required: true 376 | interfaces: 377 | cloudify.interfaces.lifecycle: 378 | create: 379 | implementation: docker.cloudify_docker.tasks.create_container 380 | stop: 381 | implementation: docker.cloudify_docker.tasks.stop_container 382 | inputs: 383 | stop_command: 384 | type: string 385 | default: "" 386 | delete: 387 | implementation: docker.cloudify_docker.tasks.remove_container 388 | 389 | cloudify.nodes.docker.container_files: 390 | derived_from: cloudify.nodes.Root 391 | properties: 392 | resource_config: 393 | type: cloudify.types.docker.ContainerFiles 394 | description: Docker Container Files type 395 | required: true 396 | interfaces: 397 | cloudify.interfaces.lifecycle: 398 | create: 399 | implementation: docker.cloudify_docker.tasks.prepare_container_files 400 | delete: 401 | implementation: docker.cloudify_docker.tasks.remove_container_files 402 | 403 | cloudify.nodes.docker.ansible_playbook: 404 | derived_from: cloudify.nodes.Root 405 | properties: 406 | <<: *playbook_config 407 | <<: *docker_machine 408 | interfaces: 409 | cloudify.interfaces.lifecycle: 410 | precreate: 411 | implementation: docker.cloudify_docker.ansible.set_playbook_config 412 | inputs: 413 | <<: *playbook_inputs 414 | create: 415 | implementation: docker.cloudify_docker.ansible.create_ansible_playbook 416 | delete: 417 | implementation: docker.cloudify_docker.ansible.remove_ansible_playbook 418 | 419 | cloudify.nodes.docker.terraform_module: 420 | derived_from: cloudify.nodes.Root 421 | properties: 422 | <<: *docker_machine 423 | terraform_plugins: 424 | description: Terraform Plugins to install 425 | default: [] 426 | required: true 427 | resource_config: 428 | type: cloudify.types.terraform.RootModule 429 | required: true 430 | interfaces: 431 | cloudify.interfaces.lifecycle: 432 | create: 433 | implementation: docker.cloudify_docker.terraform.prepare_terraform_files 434 | delete: 435 | implementation: docker.cloudify_docker.terraform.remove_terraform_files 436 | 437 | blueprint_labels: 438 | obj-type: 439 | values: 440 | - docker 441 | 442 | labels: 443 | obj-type: 444 | values: 445 | - docker 446 | -------------------------------------------------------------------------------- /plugin_1_5.yaml: -------------------------------------------------------------------------------- 1 | plugins: 2 | docker: 3 | executor: central_deployment_agent 4 | package_name: 'cloudify-docker-plugin' 5 | package_version: '2.0.16' 6 | 7 | dsl_definitions: 8 | 9 | client_config: &client_config 10 | client_config: 11 | type: cloudify.types.docker.ClientConfig 12 | description: Your Docker client configuration. 13 | required: false 14 | 15 | docker_machine: &docker_machine 16 | docker_machine: 17 | type: cloudify.types.docker.DockerMachineConfig 18 | description: Docker Machine IP,User,Private_key 19 | required: false 20 | 21 | playbook_config: &playbook_config 22 | ansible_playbook_executable_path: 23 | type: string 24 | default: "ansible-playbook" 25 | description: > 26 | A full path to your ansible_playbook executable if user don't want to 27 | use the included version of executable in the plugin 28 | playbook_source_path: 29 | type: string 30 | default: "" 31 | description: > 32 | A full path/URL that contain playbook specified in playbook_path 33 | or site_yaml_path. 34 | playbook_path: 35 | type: string 36 | default: "" 37 | description: > 38 | A path to your `site.yaml` or `main.yaml` in your 39 | Ansible Playbook relative to blueprint or playbook_source_path 40 | if playbook_source_path is URL to archive File relative inside the archive. 41 | site_yaml_path: 42 | type: string 43 | default: "" 44 | description: > 45 | DEPRECATED. 46 | A path to your `site.yaml` or `main.yaml` in your 47 | Ansible Playbook relative to blueprint or playbook_source_path 48 | if playbook_source_path is URL to archive File relative inside the archive. 49 | additional_playbook_files: 50 | type: list 51 | default: [] 52 | description: > 53 | A list of string paths blueprint resources that you 54 | would like to download to the playbook directory. 55 | If you use this variable, you must list all of the paths 56 | that you expect to download. 57 | sources: 58 | default: {} 59 | description: > 60 | Your Inventory sources. Either YAML or a path to a file. 61 | If not provided the inventory will be take from the `sources` 62 | runtime property. 63 | run_data: 64 | default: {} 65 | description: > 66 | Variable values. 67 | sensitive_keys: 68 | type: list 69 | description: keys that you want us to obscure 70 | default: 71 | - ansible_password 72 | options_config: 73 | default: {} 74 | description: > 75 | Command-line options, such as `tags` or `skip_tags`. 76 | ansible_env_vars: 77 | default: 78 | ANSIBLE_HOST_KEY_CHECKING: "False" 79 | # On Ansible 2.8.x "INVALID_TASK_ATTRIBUTE_FAILED" default value has 80 | # been changed to "True" which cause failure when run playbook 81 | ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED: "False" 82 | description: > 83 | A dictionary of environment variables to set. 84 | debug_level: 85 | type: integer 86 | default: 2 87 | description: > 88 | Debug level 89 | additional_args: 90 | type: string 91 | description: > 92 | Additional args that you want to use, for example, '-c local'. 93 | default: '' 94 | save_playbook: 95 | type: boolean 96 | description: > 97 | Save playbook after action 98 | default: false 99 | remerge_sources: 100 | type: boolean 101 | description: > 102 | update sources on target node 103 | default: false 104 | ansible_become: 105 | type: boolean 106 | description: > 107 | A boolean value, `true` or `false` whether 108 | to assume the user privileges. 109 | default: false 110 | 111 | playbook_inputs: &playbook_inputs 112 | ansible_playbook_executable_path: 113 | default: { get_property: [SELF, ansible_playbook_executable_path] } 114 | playbook_source_path: 115 | default: { get_property: [SELF, playbook_source_path] } 116 | playbook_path: 117 | default: { get_property: [SELF, playbook_path] } 118 | site_yaml_path: 119 | default: { get_property: [SELF, site_yaml_path] } 120 | save_playbook: 121 | default: { get_property: [SELF, save_playbook] } 122 | remerge_sources: 123 | default: { get_property: [SELF, remerge_sources] } 124 | sources: 125 | default: { get_property: [SELF, sources] } 126 | run_data: 127 | default: { get_property: [SELF, run_data] } 128 | sensitive_keys: 129 | default: { get_property: [SELF, sensitive_keys] } 130 | options_config: 131 | default: { get_property: [SELF, options_config] } 132 | ansible_env_vars: 133 | default: { get_property: [SELF, ansible_env_vars] } 134 | debug_level: 135 | default: { get_property: [SELF, debug_level] } 136 | additional_args: 137 | default: { get_property: [SELF, additional_args] } 138 | 139 | data_types: 140 | 141 | cloudify.types.docker.DockerMachineConfig: 142 | properties: 143 | docker_ip: 144 | description: Docker Machine IP 145 | type: string 146 | default: '' 147 | docker_user: 148 | description: Docker Machine User 149 | type: string 150 | default: '' 151 | docker_key: 152 | description: Docker Machine Private Key 153 | type: string 154 | default: '' 155 | container_volume: 156 | description: Docker Container volume_mapping 157 | type: string 158 | default: '' 159 | 160 | cloudify.types.docker.DockerInstallationConfig: 161 | properties: 162 | install_url: 163 | description: Docker Installation Link 164 | type: string 165 | default: 'https://get.docker.com' 166 | install_script: 167 | description: Docker Installation script 168 | type: string 169 | default: 'https://raw.githubusercontent.com/cloudify-cosmo/cloudify-docker-plugin/master/cloudify_docker/resources/post-install.sh' 170 | install_with_sudo: 171 | type: boolean 172 | description: use sudo to run 173 | default: true 174 | offline_installation: 175 | type: boolean 176 | description: Install docker when the vm has no internet access 177 | default: false 178 | package_tar_path: 179 | description: | 180 | Docker Installation Tar path (must be located on the on where docker installed) 181 | Required when offline installation 182 | type: string 183 | default: '' 184 | post_install_script_path: 185 | description: | 186 | Docker Installation post script path 187 | Required when offline installation 188 | type: string 189 | default: '' 190 | installation_dir: 191 | description: | 192 | Docker Installation path 193 | Required when offline installation 194 | type: string 195 | default: '' 196 | 197 | cloudify.types.docker.ClientConfig: 198 | properties: 199 | docker_host: 200 | description: Docker Machine IP to connect to. 201 | type: string 202 | default: '' 203 | docker_rest_port: 204 | description: Docker Machine rest port. 205 | type: string 206 | default: '' 207 | docker_sock_file: 208 | description: > 209 | if docker is local you can leverage the sock file. 210 | the default value would be /var/run/docker.sock 211 | type: string 212 | default: '' 213 | 214 | cloudify.types.docker.Image: 215 | properties: 216 | image_content: 217 | description: Docker image to build 218 | type: string 219 | default: '' 220 | tag: 221 | description: Docker image tag 222 | type: string 223 | default: '' 224 | pull_image: 225 | type: boolean 226 | description: Pull image 227 | default: false 228 | all_tags: 229 | type: boolean 230 | description: Pull all tags (only if pull_image is True) 231 | default: false 232 | 233 | cloudify.types.docker.Container: 234 | properties: 235 | image_tag: 236 | description: Docker image tag to build container 237 | type: string 238 | default: '' 239 | container_args: 240 | description: > 241 | dict of arguments to pass to container when run 242 | check this URL for details: https://tinyurl.com/v8url54 243 | type: dict 244 | default: {} 245 | 246 | cloudify.types.docker.ContainerFiles: 247 | properties: 248 | docker_machine: 249 | description: Docker Machine IP,User,Private_key 250 | type: cloudify.types.docker.DockerMachineConfig 251 | default: {} 252 | source: 253 | description: Files location that will be mapped to container 254 | type: string 255 | default: '' 256 | destination: 257 | description: > 258 | Location to extract the files into, 259 | that will be mapped to container volume later 260 | type: string 261 | default: '' 262 | extra_files: 263 | description: More files to add to source before it goes to destination 264 | type: list 265 | default: [] 266 | ansible_sources: 267 | description: special case for ansible sources 268 | type: dict 269 | default: {} 270 | terraform_sources: 271 | description: special case for terraform sources 272 | type: dict 273 | default: {} 274 | 275 | cloudify.types.terraform.Backend: 276 | properties: 277 | name: 278 | type: string 279 | description: Some name. 280 | required: False 281 | options: 282 | description: Should be a dictionary of key/values. 283 | required: False 284 | 285 | cloudify.types.terraform.RootModule: 286 | properties: 287 | source: 288 | type: string 289 | description: > 290 | Path or URL to the ZIP file containing the Terraform project. 291 | If this is a path, then it must be relative to the blueprint's root. 292 | required: true 293 | backend: 294 | type: cloudify.types.terraform.Backend 295 | description: > 296 | If a backend is not defined in source, 297 | and you want to use a specific backend, define that here. 298 | default: {} 299 | variables: 300 | description: A dictionary of variables. 301 | required: false 302 | default: {} 303 | environment_variables: 304 | description: A dictionary of environment variables. 305 | required: false 306 | default: {} 307 | 308 | node_types: 309 | 310 | cloudify.nodes.docker.images: 311 | derived_from: cloudify.nodes.Root 312 | properties: 313 | <<: *client_config 314 | interfaces: 315 | cloudify.interfaces.lifecycle: 316 | create: 317 | implementation: docker.cloudify_docker.tasks.list_images 318 | 319 | cloudify.nodes.docker.containers: 320 | derived_from: cloudify.nodes.Root 321 | properties: 322 | <<: *client_config 323 | interfaces: 324 | cloudify.interfaces.lifecycle: 325 | create: 326 | implementation: docker.cloudify_docker.tasks.list_containers 327 | 328 | cloudify.nodes.docker.host: 329 | derived_from: cloudify.nodes.Root 330 | properties: 331 | <<: *docker_machine 332 | resource_config: 333 | type: cloudify.types.docker.DockerInstallationConfig 334 | description: Docker Installation type 335 | required: true 336 | interfaces: 337 | cloudify.interfaces.lifecycle: 338 | create: 339 | implementation: docker.cloudify_docker.tasks.install_docker 340 | delete: 341 | implementation: docker.cloudify_docker.tasks.uninstall_docker 342 | 343 | 344 | cloudify.nodes.docker.host_details: 345 | derived_from: cloudify.nodes.Root 346 | properties: 347 | <<: *client_config 348 | interfaces: 349 | cloudify.interfaces.lifecycle: 350 | create: 351 | implementation: docker.cloudify_docker.tasks.list_host_details 352 | 353 | cloudify.nodes.docker.image: 354 | derived_from: cloudify.nodes.Root 355 | properties: 356 | <<: *client_config 357 | resource_config: 358 | type: cloudify.types.docker.Image 359 | description: Docker Image type 360 | required: true 361 | interfaces: 362 | cloudify.interfaces.lifecycle: 363 | create: 364 | implementation: docker.cloudify_docker.tasks.build_image 365 | delete: 366 | implementation: docker.cloudify_docker.tasks.remove_image 367 | 368 | cloudify.nodes.docker.container: 369 | derived_from: cloudify.nodes.Root 370 | properties: 371 | <<: *client_config 372 | resource_config: 373 | type: cloudify.types.docker.Container 374 | description: Docker Container type 375 | required: true 376 | interfaces: 377 | cloudify.interfaces.lifecycle: 378 | create: 379 | implementation: docker.cloudify_docker.tasks.create_container 380 | stop: 381 | implementation: docker.cloudify_docker.tasks.stop_container 382 | inputs: 383 | stop_command: 384 | type: string 385 | default: "" 386 | delete: 387 | implementation: docker.cloudify_docker.tasks.remove_container 388 | 389 | cloudify.nodes.docker.container_files: 390 | derived_from: cloudify.nodes.Root 391 | properties: 392 | resource_config: 393 | type: cloudify.types.docker.ContainerFiles 394 | description: Docker Container Files type 395 | required: true 396 | interfaces: 397 | cloudify.interfaces.lifecycle: 398 | create: 399 | implementation: docker.cloudify_docker.tasks.prepare_container_files 400 | delete: 401 | implementation: docker.cloudify_docker.tasks.remove_container_files 402 | 403 | cloudify.nodes.docker.ansible_playbook: 404 | derived_from: cloudify.nodes.Root 405 | properties: 406 | <<: *playbook_config 407 | <<: *docker_machine 408 | interfaces: 409 | cloudify.interfaces.lifecycle: 410 | precreate: 411 | implementation: docker.cloudify_docker.ansible.set_playbook_config 412 | inputs: 413 | <<: *playbook_inputs 414 | create: 415 | implementation: docker.cloudify_docker.ansible.create_ansible_playbook 416 | delete: 417 | implementation: docker.cloudify_docker.ansible.remove_ansible_playbook 418 | 419 | cloudify.nodes.docker.terraform_module: 420 | derived_from: cloudify.nodes.Root 421 | properties: 422 | <<: *docker_machine 423 | terraform_plugins: 424 | description: Terraform Plugins to install 425 | default: [] 426 | required: true 427 | resource_config: 428 | type: cloudify.types.terraform.RootModule 429 | required: true 430 | interfaces: 431 | cloudify.interfaces.lifecycle: 432 | create: 433 | implementation: docker.cloudify_docker.terraform.prepare_terraform_files 434 | delete: 435 | implementation: docker.cloudify_docker.terraform.remove_terraform_files 436 | 437 | blueprint_labels: 438 | obj-type: 439 | values: 440 | - docker 441 | 442 | labels: 443 | obj-type: 444 | values: 445 | - docker 446 | -------------------------------------------------------------------------------- /requirements-3.6.in: -------------------------------------------------------------------------------- 1 | cryptography==40.0.2 2 | -------------------------------------------------------------------------------- /requirements-3.6.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with python 3.6 3 | # To update, run: 4 | # 5 | # pip-compile --no-emit-index-url --output-file=requirements-3.6.txt requirements-3.6.in setup.py 6 | # 7 | adal==1.2.7 8 | # via msrestazure 9 | aiohttp==3.7.4.post0 10 | # via cloudify-common 11 | async-timeout==3.0.1 12 | # via aiohttp 13 | attrs==22.2.0 14 | # via aiohttp 15 | azure-common==1.1.28 16 | # via azure-mgmt-containerservice 17 | azure-core==1.24.2 18 | # via 19 | # azure-identity 20 | # azure-mgmt-core 21 | # msrest 22 | azure-identity==1.10.0 23 | # via cloudify-utilities-plugins-sdk 24 | azure-mgmt-containerservice==17.0.0 25 | # via cloudify-utilities-plugins-sdk 26 | azure-mgmt-core==1.3.2 27 | # via azure-mgmt-containerservice 28 | bcrypt==4.0.1 29 | # via paramiko 30 | boto3==1.23.10 31 | # via cloudify-utilities-plugins-sdk 32 | botocore==1.26.10 33 | # via 34 | # boto3 35 | # s3transfer 36 | bottle==0.12.19 37 | # via cloudify-common 38 | cachetools==4.2.4 39 | # via google-auth 40 | certifi==2023.11.17 41 | # via 42 | # kubernetes 43 | # msrest 44 | # requests 45 | cffi==1.15.1 46 | # via 47 | # cryptography 48 | # pynacl 49 | chardet==4.0.0 50 | # via aiohttp 51 | charset-normalizer==2.0.12 52 | # via requests 53 | cloudify-common==6.4.2.0 54 | # via 55 | # cloudify-docker-plugin (setup.py) 56 | # cloudify-utilities-plugins-sdk 57 | cloudify-utilities-plugins-sdk==0.0.130 58 | # via cloudify-docker-plugin (setup.py) 59 | cryptography==40.0.2 60 | # via 61 | # -r requirements-3.6.in 62 | # adal 63 | # azure-identity 64 | # msal 65 | # paramiko 66 | # pyjwt 67 | docker==5.0.3 68 | # via cloudify-docker-plugin (setup.py) 69 | fabric==2.7.1 70 | # via 71 | # cloudify-docker-plugin (setup.py) 72 | # patchwork 73 | fasteners==0.17.3 74 | # via cloudify-common 75 | gitdb==4.0.8 76 | # via 77 | # cloudify-utilities-plugins-sdk 78 | # gitpython 79 | gitpython==3.1.18 80 | # via cloudify-utilities-plugins-sdk 81 | google-auth==2.15.0 82 | # via 83 | # cloudify-utilities-plugins-sdk 84 | # kubernetes 85 | idna==3.6 86 | # via 87 | # idna-ssl 88 | # requests 89 | # yarl 90 | idna-ssl==1.1.0 91 | # via aiohttp 92 | invoke==1.7.3 93 | # via fabric 94 | isodate==0.6.1 95 | # via msrest 96 | jinja2==2.11.3 97 | # via 98 | # cloudify-common 99 | # cloudify-utilities-plugins-sdk 100 | jmespath==0.10.0 101 | # via 102 | # boto3 103 | # botocore 104 | kubernetes==v26.1.0 105 | # via cloudify-utilities-plugins-sdk 106 | markupsafe==2.0.1 107 | # via jinja2 108 | msal==1.25.0 109 | # via 110 | # azure-identity 111 | # msal-extensions 112 | msal-extensions==1.0.0 113 | # via azure-identity 114 | msrest==0.7.1 115 | # via 116 | # azure-mgmt-containerservice 117 | # msrestazure 118 | msrestazure==0.6.4 119 | # via cloudify-utilities-plugins-sdk 120 | multidict==5.2.0 121 | # via 122 | # aiohttp 123 | # yarl 124 | oauthlib==3.2.2 125 | # via requests-oauthlib 126 | packaging==21.3 127 | # via cloudify-utilities-plugins-sdk 128 | paramiko==3.3.1 129 | # via 130 | # cloudify-utilities-plugins-sdk 131 | # fabric 132 | patchwork==1.0.1 133 | # via cloudify-docker-plugin (setup.py) 134 | pathlib2==2.3.7.post1 135 | # via fabric 136 | pika==1.1.0 137 | # via cloudify-common 138 | pkginfo==1.9.6 139 | # via wagon 140 | portalocker==2.7.0 141 | # via msal-extensions 142 | proxy_tools==0.1.0 143 | # via cloudify-common 144 | psutil==5.9.6 145 | # via cloudify-utilities-plugins-sdk 146 | pyasn1==0.5.1 147 | # via 148 | # pyasn1-modules 149 | # rsa 150 | pyasn1-modules==0.3.0 151 | # via google-auth 152 | pycdlib==1.14.0 153 | # via cloudify-utilities-plugins-sdk 154 | pycparser==2.21 155 | # via cffi 156 | pyjwt[crypto]==2.4.0 157 | # via 158 | # adal 159 | # msal 160 | pynacl==1.5.0 161 | # via paramiko 162 | pyparsing==3.0.7 163 | # via packaging 164 | python-dateutil==2.8.2 165 | # via 166 | # adal 167 | # botocore 168 | # kubernetes 169 | pytz==2021.3 170 | # via cloudify-common 171 | pyyaml==6.0.1 172 | # via 173 | # cloudify-utilities-plugins-sdk 174 | # kubernetes 175 | requests==2.27.1 176 | # via 177 | # adal 178 | # azure-core 179 | # cloudify-common 180 | # cloudify-utilities-plugins-sdk 181 | # docker 182 | # kubernetes 183 | # msal 184 | # msrest 185 | # requests-oauthlib 186 | # requests-toolbelt 187 | requests-oauthlib==1.3.1 188 | # via 189 | # kubernetes 190 | # msrest 191 | requests_toolbelt==0.9.1 192 | # via cloudify-common 193 | retrying==1.3.3 194 | # via cloudify-common 195 | rsa==4.9 196 | # via google-auth 197 | s3transfer==0.5.2 198 | # via boto3 199 | six==1.16.0 200 | # via 201 | # azure-core 202 | # azure-identity 203 | # google-auth 204 | # isodate 205 | # kubernetes 206 | # msrestazure 207 | # pathlib2 208 | # python-dateutil 209 | # retrying 210 | smmap==5.0.0 211 | # via gitdb 212 | typing-extensions==4.1.1 213 | # via 214 | # aiohttp 215 | # azure-core 216 | # gitpython 217 | # yarl 218 | urllib3==1.26.18 219 | # via 220 | # botocore 221 | # kubernetes 222 | # requests 223 | wagon==1.0.1 224 | # via cloudify-common 225 | websocket-client==1.3.1 226 | # via 227 | # docker 228 | # kubernetes 229 | wheel==0.37.1 230 | # via wagon 231 | xmltodict==0.13.0 232 | # via cloudify-utilities-plugins-sdk 233 | yarl==1.7.2 234 | # via aiohttp 235 | 236 | # The following packages are considered to be unsafe in a requirements file: 237 | # setuptools 238 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | -e fusion-common 2 | -e fusion-manager/mgmtworker 3 | -e fusion-agent 4 | -e cloudify-utilities-plugins-sdk 5 | cryptography>=41.0.5 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.11 3 | # by the following command: 4 | # 5 | # pip-compile --no-emit-index-url --output-file=requirements.txt requirements.in setup.py 6 | # 7 | -e cloudify-utilities-plugins-sdk 8 | # via 9 | # -r requirements.in 10 | # cloudify-docker-plugin (setup.py) 11 | -e fusion-agent 12 | # via 13 | # -r requirements.in 14 | # fusion-mgmtworker 15 | -e fusion-common 16 | # via 17 | # -r requirements.in 18 | # cloudify-docker-plugin (setup.py) 19 | # cloudify-utilities-plugins-sdk 20 | # fusion-agent 21 | # fusion-mgmtworker 22 | -e fusion-manager/mgmtworker 23 | # via -r requirements.in 24 | adal==1.2.7 25 | # via msrestazure 26 | aiohttp==3.9.1 27 | # via fusion-common 28 | aiosignal==1.3.1 29 | # via aiohttp 30 | appdirs==1.4.3 31 | # via fusion-agent 32 | attrs==23.1.0 33 | # via aiohttp 34 | azure-common==1.1.28 35 | # via azure-mgmt-containerservice 36 | azure-core==1.29.5 37 | # via 38 | # azure-identity 39 | # azure-mgmt-core 40 | # msrest 41 | azure-identity==1.15.0 42 | # via cloudify-utilities-plugins-sdk 43 | azure-mgmt-containerservice==17.0.0 44 | # via cloudify-utilities-plugins-sdk 45 | azure-mgmt-core==1.4.0 46 | # via azure-mgmt-containerservice 47 | bcrypt==4.1.1 48 | # via paramiko 49 | boto3==1.33.4 50 | # via cloudify-utilities-plugins-sdk 51 | botocore==1.33.4 52 | # via 53 | # boto3 54 | # s3transfer 55 | bottle==0.12.25 56 | # via fusion-common 57 | cachetools==5.3.2 58 | # via google-auth 59 | certifi==2023.11.17 60 | # via 61 | # fusion-mgmtworker 62 | # kubernetes 63 | # msrest 64 | # requests 65 | cffi==1.16.0 66 | # via 67 | # cryptography 68 | # pynacl 69 | charset-normalizer==3.3.2 70 | # via requests 71 | click==8.1.7 72 | # via fusion-agent 73 | cryptography==41.0.7 74 | # via 75 | # -r requirements.in 76 | # adal 77 | # azure-identity 78 | # fusion-mgmtworker 79 | # msal 80 | # paramiko 81 | # pyjwt 82 | distro==1.8.0 83 | # via fusion-common 84 | docker==6.1.3 85 | # via cloudify-docker-plugin (setup.py) 86 | fabric==2.7.1 87 | # via 88 | # cloudify-docker-plugin (setup.py) 89 | # patchwork 90 | fasteners==0.19 91 | # via fusion-common 92 | frozenlist==1.4.0 93 | # via 94 | # aiohttp 95 | # aiosignal 96 | gitdb==4.0.11 97 | # via 98 | # cloudify-utilities-plugins-sdk 99 | # gitpython 100 | gitpython==3.1.40 101 | # via cloudify-utilities-plugins-sdk 102 | google-auth==2.15.0 103 | # via 104 | # cloudify-utilities-plugins-sdk 105 | # kubernetes 106 | idna==3.6 107 | # via 108 | # requests 109 | # yarl 110 | invoke==1.7.3 111 | # via fabric 112 | isodate==0.6.1 113 | # via msrest 114 | jinja2==3.1.2 115 | # via 116 | # cloudify-utilities-plugins-sdk 117 | # fusion-agent 118 | # fusion-common 119 | jmespath==1.0.1 120 | # via 121 | # boto3 122 | # botocore 123 | kubernetes==26.1.0 124 | # via cloudify-utilities-plugins-sdk 125 | markupsafe==2.1.3 126 | # via jinja2 127 | msal==1.25.0 128 | # via 129 | # azure-identity 130 | # msal-extensions 131 | msal-extensions==1.0.0 132 | # via azure-identity 133 | msrest==0.7.1 134 | # via 135 | # azure-mgmt-containerservice 136 | # msrestazure 137 | msrestazure==0.6.4 138 | # via cloudify-utilities-plugins-sdk 139 | multidict==6.0.4 140 | # via 141 | # aiohttp 142 | # yarl 143 | networkx==2.8.8 144 | # via fusion-common 145 | oauthlib==3.2.2 146 | # via requests-oauthlib 147 | packaging==21.3 148 | # via 149 | # cloudify-utilities-plugins-sdk 150 | # docker 151 | # fusion-agent 152 | # fusion-mgmtworker 153 | paramiko==3.3.1 154 | # via 155 | # cloudify-utilities-plugins-sdk 156 | # fabric 157 | patchwork==1.0.1 158 | # via cloudify-docker-plugin (setup.py) 159 | pathlib2==2.3.7.post1 160 | # via fabric 161 | pika==1.3.2 162 | # via fusion-common 163 | pkginfo==1.9.6 164 | # via wagon 165 | portalocker==2.8.2 166 | # via msal-extensions 167 | proxy-tools==0.1.0 168 | # via fusion-common 169 | psutil==5.9.6 170 | # via cloudify-utilities-plugins-sdk 171 | psycopg2==2.9.9 172 | # via fusion-mgmtworker 173 | pyasn1==0.5.1 174 | # via 175 | # pyasn1-modules 176 | # rsa 177 | pyasn1-modules==0.3.0 178 | # via google-auth 179 | pycdlib==1.14.0 180 | # via cloudify-utilities-plugins-sdk 181 | pycparser==2.21 182 | # via cffi 183 | pyjwt[crypto]==2.8.0 184 | # via 185 | # adal 186 | # msal 187 | pynacl==1.5.0 188 | # via paramiko 189 | pyparsing==3.1.1 190 | # via packaging 191 | python-dateutil==2.8.2 192 | # via 193 | # adal 194 | # botocore 195 | # fusion-mgmtworker 196 | # kubernetes 197 | pytz==2023.3.post1 198 | # via 199 | # fusion-common 200 | # fusion-mgmtworker 201 | pyyaml==6.0 202 | # via 203 | # cloudify-utilities-plugins-sdk 204 | # fusion-common 205 | # kubernetes 206 | requests==2.31.0 207 | # via 208 | # adal 209 | # azure-core 210 | # cloudify-utilities-plugins-sdk 211 | # docker 212 | # fusion-agent 213 | # fusion-common 214 | # kubernetes 215 | # msal 216 | # msrest 217 | # requests-oauthlib 218 | # requests-toolbelt 219 | requests-oauthlib==1.3.1 220 | # via 221 | # kubernetes 222 | # msrest 223 | requests-toolbelt==1.0.0 224 | # via 225 | # cloudify-utilities-plugins-sdk 226 | # fusion-common 227 | retrying==1.3.4 228 | # via fusion-mgmtworker 229 | rsa==4.9 230 | # via google-auth 231 | s3transfer==0.8.2 232 | # via boto3 233 | six==1.16.0 234 | # via 235 | # azure-core 236 | # google-auth 237 | # isodate 238 | # kubernetes 239 | # msrestazure 240 | # pathlib2 241 | # python-dateutil 242 | # retrying 243 | smmap==5.0.1 244 | # via gitdb 245 | typing-extensions==4.8.0 246 | # via azure-core 247 | urllib3==2.0.7 248 | # via 249 | # botocore 250 | # docker 251 | # kubernetes 252 | # requests 253 | wagon==1.0.1 254 | # via fusion-common 255 | websocket-client==1.6.4 256 | # via 257 | # docker 258 | # kubernetes 259 | wheel==0.42.0 260 | # via wagon 261 | xmltodict==0.13.0 262 | # via cloudify-utilities-plugins-sdk 263 | yarl==1.9.3 264 | # via aiohttp 265 | 266 | # The following packages are considered to be unsafe in a requirements file: 267 | # setuptools 268 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | ######## 2 | # Copyright (c) 2014-2020 GigaSpaces Technologies Ltd. All rights reserved 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | 17 | import os 18 | import re 19 | import sys 20 | import pathlib 21 | from setuptools import setup, find_packages 22 | 23 | 24 | def get_version(): 25 | current_dir = pathlib.Path(__file__).parent.resolve() 26 | with open(os.path.join(current_dir, 'cloudify_docker/__version__.py'), 27 | 'r') as outfile: 28 | var = outfile.read() 29 | return re.search(r'\d+.\d+.\d+', var).group() 30 | 31 | 32 | install_requires = [ 33 | 'docker>=5.0.3', # Latest with official support for python 3.6 is 5.0.3 34 | 'fabric>=2.5.0', 35 | 'patchwork>=1.0.1' # to copy files to docker machine 36 | ] 37 | 38 | if sys.version_info.major == 3 and sys.version_info.minor == 6: 39 | packages = ['cloudify_docker'] 40 | install_requires += [ 41 | 'cloudify-common>=6.4,<7.0', 42 | 'cloudify-utilities-plugins-sdk>=0.0.61', # Shared Resource Downloader 43 | ] 44 | else: 45 | packages = find_packages() 46 | install_requires += [ 47 | 'fusion-common', 48 | 'cloudify-utilities-plugins-sdk' 49 | ] 50 | 51 | setup( 52 | name='cloudify-docker-plugin', 53 | version=get_version(), 54 | author='Cloudify Platform LTD', 55 | author_email='hello@cloudify.co', 56 | description='Manage Docker nodes/containers by Cloudify.', 57 | packages=packages, 58 | license='LICENSE', 59 | zip_safe=False, 60 | install_requires=install_requires, 61 | ) 62 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | nose>=1.3 2 | nose-cov>=1.3 3 | mock>=1.0 4 | pathlib==1.0.1 5 | flake8==3.7.9 6 | testtools>=2.3.0 7 | requests_mock 8 | coverage 9 | testfixtures 10 | networkx==2.5.1 11 | 12 | # For integration tests 13 | pytest==4.6.3 14 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = unittesting,linting 3 | 4 | minversion = 1.6 5 | skipsdist = True 6 | 7 | [testenv] 8 | setenv = 9 | VIRTUAL_ENV={envdir} 10 | 11 | # NOTE: relative paths were used due to '-w' flag for nosetests util 12 | 13 | usedevelop = True 14 | install_command = pip install -U {opts} {packages} 15 | deps = -r{toxinidir}/requirements-3.6.txt 16 | -r{toxinidir}/test-requirements.txt 17 | whitelist_externals = bash 18 | 19 | [testenv:linting] 20 | commands = 21 | flake8 cloudify_docker 22 | 23 | [testenv:unittesting] 24 | commands = 25 | nosetests -s -v --with-cov --cov-report term-missing --cov cloudify_docker {posargs:cloudify_docker/tests} 26 | -------------------------------------------------------------------------------- /v2_plugin.yaml: -------------------------------------------------------------------------------- 1 | plugins: 2 | docker: 3 | executor: central_deployment_agent 4 | package_name: cloudify-docker-plugin 5 | package_version: 2.0.16 6 | dsl_definitions: 7 | client_config: 8 | client_config: &id001 9 | type: cloudify.types.docker.ClientConfig 10 | required: false 11 | docker_machine: 12 | docker_machine: &id002 13 | type: cloudify.types.docker.DockerMachineConfig 14 | required: false 15 | playbook_config: 16 | ansible_playbook_executable_path: &id003 17 | type: string 18 | default: ansible-playbook 19 | playbook_source_path: &id004 20 | type: string 21 | default: '' 22 | playbook_path: &id005 23 | type: string 24 | default: '' 25 | site_yaml_path: &id006 26 | type: string 27 | default: '' 28 | additional_playbook_files: &id007 29 | type: list 30 | default: [] 31 | sources: &id008 32 | default: {} 33 | run_data: &id009 34 | default: {} 35 | sensitive_keys: &id010 36 | type: list 37 | default: 38 | - ansible_password 39 | options_config: &id011 40 | default: {} 41 | ansible_env_vars: &id012 42 | default: 43 | ANSIBLE_HOST_KEY_CHECKING: 'False' 44 | ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED: 'False' 45 | debug_level: &id013 46 | type: integer 47 | default: 2 48 | additional_args: &id014 49 | type: string 50 | default: '' 51 | save_playbook: &id015 52 | type: boolean 53 | default: false 54 | remerge_sources: &id016 55 | type: boolean 56 | default: false 57 | ansible_become: &id017 58 | type: boolean 59 | default: false 60 | playbook_inputs: 61 | ansible_playbook_executable_path: &id018 62 | default: { get_property: [SELF, ansible_playbook_executable_path] } 63 | playbook_source_path: &id019 64 | default: { get_property: [SELF, playbook_source_path] } 65 | playbook_path: &id020 66 | default: { get_property: [SELF, playbook_path] } 67 | site_yaml_path: &id021 68 | default: { get_property: [SELF, site_yaml_path] } 69 | save_playbook: &id022 70 | default: { get_property: [SELF, save_playbook] } 71 | remerge_sources: &id023 72 | default: { get_property: [SELF, remerge_sources] } 73 | sources: &id024 74 | default: { get_property: [SELF, sources] } 75 | run_data: &id025 76 | default: { get_property: [SELF, run_data] } 77 | sensitive_keys: &id026 78 | default: { get_property: [SELF, sensitive_keys] } 79 | options_config: &id027 80 | default: { get_property: [SELF, options_config] } 81 | ansible_env_vars: &id028 82 | default: { get_property: [SELF, ansible_env_vars] } 83 | debug_level: &id029 84 | default: { get_property: [SELF, debug_level] } 85 | additional_args: &id030 86 | default: { get_property: [SELF, additional_args] } 87 | data_types: 88 | cloudify.types.docker.DockerMachineConfig: 89 | properties: 90 | docker_ip: 91 | type: string 92 | default: '' 93 | docker_user: 94 | type: string 95 | default: '' 96 | docker_key: 97 | type: string 98 | default: '' 99 | container_volume: 100 | type: string 101 | default: '' 102 | cloudify.types.docker.DockerInstallationConfig: 103 | properties: 104 | install_url: 105 | type: string 106 | default: https://get.docker.com 107 | install_script: 108 | type: string 109 | default: https://raw.githubusercontent.com/cloudify-cosmo/cloudify-docker-plugin/master/cloudify_docker/resources/post-install.sh 110 | install_with_sudo: 111 | type: boolean 112 | default: true 113 | offline_installation: 114 | type: boolean 115 | default: false 116 | package_tar_path: 117 | type: string 118 | default: '' 119 | post_install_script_path: 120 | type: string 121 | default: '' 122 | installation_dir: 123 | type: string 124 | default: '' 125 | cloudify.types.docker.ClientConfig: 126 | properties: 127 | docker_host: 128 | type: string 129 | default: '' 130 | docker_rest_port: 131 | type: string 132 | default: '' 133 | docker_sock_file: 134 | type: string 135 | default: '' 136 | cloudify.types.docker.Image: 137 | properties: 138 | image_content: 139 | type: string 140 | default: '' 141 | tag: 142 | type: string 143 | default: '' 144 | pull_image: 145 | type: boolean 146 | default: false 147 | all_tags: 148 | type: boolean 149 | default: false 150 | cloudify.types.docker.Container: 151 | properties: 152 | image_tag: 153 | type: string 154 | default: '' 155 | container_args: 156 | type: dict 157 | default: {} 158 | cloudify.types.docker.ContainerFiles: 159 | properties: 160 | docker_machine: 161 | type: cloudify.types.docker.DockerMachineConfig 162 | default: {} 163 | source: 164 | type: string 165 | default: '' 166 | destination: 167 | type: string 168 | default: '' 169 | extra_files: 170 | type: list 171 | default: [] 172 | ansible_sources: 173 | type: dict 174 | default: {} 175 | terraform_sources: 176 | type: dict 177 | default: {} 178 | cloudify.types.terraform.Backend: 179 | properties: 180 | name: 181 | type: string 182 | required: false 183 | options: 184 | required: false 185 | cloudify.types.terraform.RootModule: 186 | properties: 187 | source: 188 | type: string 189 | required: true 190 | backend: 191 | type: cloudify.types.terraform.Backend 192 | default: {} 193 | variables: 194 | required: false 195 | default: {} 196 | environment_variables: 197 | required: false 198 | default: {} 199 | node_types: 200 | cloudify.nodes.docker.images: 201 | derived_from: cloudify.nodes.Root 202 | properties: 203 | client_config: *id001 204 | interfaces: 205 | cloudify.interfaces.lifecycle: 206 | create: 207 | implementation: docker.cloudify_docker.tasks.list_images 208 | cloudify.nodes.docker.containers: 209 | derived_from: cloudify.nodes.Root 210 | properties: 211 | client_config: *id001 212 | interfaces: 213 | cloudify.interfaces.lifecycle: 214 | create: 215 | implementation: docker.cloudify_docker.tasks.list_containers 216 | cloudify.nodes.docker.host: 217 | derived_from: cloudify.nodes.Root 218 | properties: 219 | docker_machine: *id002 220 | resource_config: 221 | type: cloudify.types.docker.DockerInstallationConfig 222 | required: true 223 | interfaces: 224 | cloudify.interfaces.lifecycle: 225 | create: 226 | implementation: docker.cloudify_docker.tasks.install_docker 227 | delete: 228 | implementation: docker.cloudify_docker.tasks.uninstall_docker 229 | cloudify.nodes.docker.host_details: 230 | derived_from: cloudify.nodes.Root 231 | properties: 232 | client_config: *id001 233 | interfaces: 234 | cloudify.interfaces.lifecycle: 235 | create: 236 | implementation: docker.cloudify_docker.tasks.list_host_details 237 | cloudify.nodes.docker.image: 238 | derived_from: cloudify.nodes.Root 239 | properties: 240 | client_config: *id001 241 | resource_config: 242 | type: cloudify.types.docker.Image 243 | required: true 244 | interfaces: 245 | cloudify.interfaces.lifecycle: 246 | create: 247 | implementation: docker.cloudify_docker.tasks.build_image 248 | delete: 249 | implementation: docker.cloudify_docker.tasks.remove_image 250 | cloudify.nodes.docker.container: 251 | derived_from: cloudify.nodes.Root 252 | properties: 253 | client_config: *id001 254 | resource_config: 255 | type: cloudify.types.docker.Container 256 | required: true 257 | interfaces: 258 | cloudify.interfaces.lifecycle: 259 | create: 260 | implementation: docker.cloudify_docker.tasks.create_container 261 | stop: 262 | implementation: docker.cloudify_docker.tasks.stop_container 263 | inputs: 264 | stop_command: 265 | type: string 266 | default: '' 267 | delete: 268 | implementation: docker.cloudify_docker.tasks.remove_container 269 | cloudify.nodes.docker.container_files: 270 | derived_from: cloudify.nodes.Root 271 | properties: 272 | resource_config: 273 | type: cloudify.types.docker.ContainerFiles 274 | required: true 275 | interfaces: 276 | cloudify.interfaces.lifecycle: 277 | create: 278 | implementation: docker.cloudify_docker.tasks.prepare_container_files 279 | delete: 280 | implementation: docker.cloudify_docker.tasks.remove_container_files 281 | cloudify.nodes.docker.ansible_playbook: 282 | derived_from: cloudify.nodes.Root 283 | properties: 284 | ansible_playbook_executable_path: *id003 285 | playbook_source_path: *id004 286 | playbook_path: *id005 287 | site_yaml_path: *id006 288 | additional_playbook_files: *id007 289 | sources: *id008 290 | run_data: *id009 291 | sensitive_keys: *id010 292 | options_config: *id011 293 | ansible_env_vars: *id012 294 | debug_level: *id013 295 | additional_args: *id014 296 | save_playbook: *id015 297 | remerge_sources: *id016 298 | ansible_become: *id017 299 | docker_machine: *id002 300 | interfaces: 301 | cloudify.interfaces.lifecycle: 302 | precreate: 303 | implementation: docker.cloudify_docker.ansible.set_playbook_config 304 | inputs: 305 | ansible_playbook_executable_path: *id018 306 | playbook_source_path: *id019 307 | playbook_path: *id020 308 | site_yaml_path: *id021 309 | save_playbook: *id022 310 | remerge_sources: *id023 311 | sources: *id024 312 | run_data: *id025 313 | sensitive_keys: *id026 314 | options_config: *id027 315 | ansible_env_vars: *id028 316 | debug_level: *id029 317 | additional_args: *id030 318 | create: 319 | implementation: docker.cloudify_docker.ansible.create_ansible_playbook 320 | delete: 321 | implementation: docker.cloudify_docker.ansible.remove_ansible_playbook 322 | cloudify.nodes.docker.terraform_module: 323 | derived_from: cloudify.nodes.Root 324 | properties: 325 | docker_machine: *id002 326 | terraform_plugins: 327 | default: [] 328 | required: true 329 | resource_config: 330 | type: cloudify.types.terraform.RootModule 331 | required: true 332 | interfaces: 333 | cloudify.interfaces.lifecycle: 334 | create: 335 | implementation: docker.cloudify_docker.terraform.prepare_terraform_files 336 | delete: 337 | implementation: docker.cloudify_docker.terraform.remove_terraform_files 338 | blueprint_labels: 339 | obj-type: 340 | values: 341 | - docker 342 | labels: 343 | obj-type: 344 | values: 345 | - docker 346 | --------------------------------------------------------------------------------