├── .github ├── FUNDING.yml └── workflows │ ├── release.yml │ └── test.yml ├── .gitignore ├── .scalafmt.conf ├── CHANGELOG.md ├── LICENSE.txt ├── README.md ├── build.gradle ├── example ├── .gitignore ├── digdag.properties ├── ecs_task.embulk │ ├── csv │ │ ├── data.01.csv │ │ └── data.02.csv │ ├── example.dig │ └── template.yml ├── ecs_task.py │ ├── echo.py │ └── example.dig ├── ecs_task.rb │ ├── echo.rb │ └── example.dig ├── ecs_task.run │ └── example.dig ├── ecs_task.sh │ └── example.dig ├── example.dig └── run.sh ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradlew ├── gradlew.bat ├── settings.gradle └── src └── main ├── resources ├── META-INF │ └── services │ │ └── io.digdag.spi.Plugin └── pro │ └── civitaspo │ └── digdag │ └── plugin │ └── ecs_task │ ├── embulk │ └── run.sh │ ├── py │ ├── run.sh │ └── runner.py │ ├── rb │ ├── run.sh │ └── runner.rb │ └── sh │ └── run.sh └── scala └── pro └── civitaspo └── digdag └── plugin └── ecs_task ├── AbstractEcsTaskOperator.scala ├── EcsTaskPlugin.scala ├── aws ├── AmazonS3UriWrapper.scala ├── Aws.scala └── AwsConf.scala ├── command ├── AbstractEcsTaskCommandOperator.scala ├── EcsTaskCallInternalOperator.scala ├── EcsTaskCommandResultInternalOperator.scala ├── EcsTaskCommandRunner.scala ├── S3TmpStorage.scala └── TmpStorage.scala ├── embulk └── EcsTaskEmbulkOperator.scala ├── exception └── package.scala ├── package.scala ├── py └── EcsTaskPyOperator.scala ├── rb └── EcsTaskRbOperator.scala ├── register └── EcsTaskRegisterOperator.scala ├── result └── EcsTaskResultOperator.scala ├── run ├── EcsTaskRunInternalOperator.scala └── EcsTaskRunOperator.scala ├── sh └── EcsTaskShOperatar.scala ├── util └── WorkspaceWithTempDir.scala └── wait ├── EcsTaskWaitOperator.scala ├── EcsTaskWaiter.scala └── ExponentialBackoffDelayStrategy.scala /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: civitaspo 2 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release CI 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | release: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v1 15 | - name: Set up JDK 1.8 16 | uses: actions/setup-java@v1 17 | with: 18 | java-version: 1.8 19 | - name: scalafmt with Gradle 20 | run: ./gradlew spotlessCheck 21 | - name: Test with Gradle 22 | run: ./gradlew test 23 | - name: Release Packages into Github Packages 24 | run: ./gradlew publish 25 | env: 26 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 27 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test CI 2 | 3 | on: 4 | - pull_request 5 | 6 | jobs: 7 | test: 8 | 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v1 13 | - name: Set up JDK 1.8 14 | uses: actions/setup-java@v1 15 | with: 16 | java-version: 1.8 17 | - name: scalafmt with Gradle 18 | run: ./gradlew spotlessCheck 19 | - name: Test with Gradle 20 | run: ./gradlew test 21 | 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | /.gradle 3 | /build 4 | .java-version 5 | .digdag 6 | 7 | # git 8 | !.gitkeep 9 | .git 10 | 11 | # intellij idea 12 | *.iml 13 | *.ipr 14 | *.iws 15 | *.idea 16 | 17 | # eclipse 18 | .classpath 19 | .project 20 | .settings 21 | /bin/ 22 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | # https://scalameta.org/scalafmt/#Configuration 2 | 3 | version = "2.3.2" 4 | maxColumn = 160 5 | newlines.alwaysBeforeElseAfterCurlyIf = true 6 | newlines.alwaysBeforeTopLevelStatements = true 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 0.1.6 (2022-08-03) 2 | ================== 3 | * [New Feature] [#88](https://github.com/civitaspo/digdag-operator-ecs_task/pull/85) Support runtime_platform for the task definition. @kimurashuhei++ 4 | 5 | 0.1.5 (2022-07-13) 6 | ================== 7 | * [New Feature] [#85](https://github.com/civitaspo/digdag-operator-ecs_task/pull/85) Add pollingStrategy for RunOperator. @tangbamiinh++ 8 | 9 | 0.1.4 (2021-07-27) 10 | ================== 11 | * [New Feature] [#79](https://github.com/civitaspo/digdag-operator-ecs_task/pull/79) Support dependsOn. @shoot16625++ 12 | * [New Feature] [#80](https://github.com/civitaspo/digdag-operator-ecs_task/pull/80) Support ephemeralStorage @shoot16625++ 13 | * [Fix] [#81](https://github.com/civitaspo/digdag-operator-ecs_task/pull/81) Install from maven 14 | 15 | 0.1.3 (2021-01-25) 16 | ================== 17 | * [New Feature] [#77](https://github.com/civitaspo/digdag-operator-ecs_task/pull/77) Add support for EFS volume configuration. 18 | 19 | 0.1.2 (2020-06-25) 20 | ================== 21 | * [New Feature] [#75](https://github.com/civitaspo/digdag-operator-ecs_task/pull/75) Supports CapacityProvider. 22 | 23 | 0.1.1 (2020-05-22) 24 | ================== 25 | * [New Feature] [#71](https://github.com/civitaspo/digdag-operator-ecs_task/pull/71) Support a new log driver: `awsfirelens`. 26 | 27 | 0.1.0 (2019-12-08) 28 | ================== 29 | * [Enhancement] Update dependencies (digdag 0.9.31 -> 0.9.41, scala 2.12.6 -> 2.13.1, aws-sdk 1.11.451 -> 1.11.751) 30 | * [Enhancement] Add test dependencies 31 | * [Enhancement] Apply Scala 2.13 style 32 | * [Enhancement] Use `scala.jdk.CollectionConverters._` instead of `scala.collection.JavaConverters._` 33 | * [Enhancement] Fix deprecations by Scala 2.13 34 | * [Enhancement] Use Using.resource instead of TryWithResource 35 | * [New Feature] Use Github Actions instead of CircleCI 36 | * [New Feature] Release to Github Packages when tagging 37 | * [Enhancement] Update spotless 3.13.0 -> 3.27.1 38 | * [Enhancement] Update scalafmt 1.5.1 -> 2.3.2 39 | 40 | 0.0.14 (2019-11-24) 41 | =================== 42 | * [Enhancement] Add `shell` option to change the shell command that the operator uses internally. 43 | 44 | 0.0.13 (2019-07-30) 45 | =================== 46 | * [Fix] Fix environments bug: no environments in scripting operators 47 | * [Fix] Catch any initialization exception and re-throw as `ConfigException` 48 | 49 | 0.0.12 (2019-05-23) 50 | =================== 51 | * [Enhancement] Follow latest python runner script used by `ecs_task.py>`. The changes resolve the same issues that the bellow p-rs resolve. 52 | * [Support type hints for Python3 on py> operator by chezou · Pull Request \#905 · treasure\-data/digdag](https://github.com/treasure-data/digdag/pull/905) 53 | * [Fix default argument check on py> operator by chezou · Pull Request \#913 · treasure\-data/digdag](https://github.com/treasure-data/digdag/pull/913) 54 | * [Fix digdag\.env\.add\_subtask for python3 by sonots · Pull Request \#972 · treasure\-data/digdag](https://github.com/treasure-data/digdag/pull/972) 55 | 56 | 0.0.11 (2019-01-24) 57 | =================== 58 | * [Enhancement] `ecs_task.wait>` operator supports changeable interval and exponential backoff storategy. @Mulyu++ 59 | 60 | 0.0.10 (2018-12-26) 61 | =================== 62 | * [Enhancement] Shorten the family name with MurmurHash3 if auto-generated family name exceeds 255 letters. 63 | 64 | 0.0.9 (2018-12-23) 65 | ================== 66 | * [Enhancement] Stop tasks on waiting failure. 67 | 68 | 0.0.8 (2018-12-14) 69 | ================== 70 | * [Enhancement] Retry scripting operator when the container exit without exit code. 71 | * [Enhancement] Strip the line end of command for redirecting operation in `run.sh`. 72 | 73 | 0.0.7 (2018-11-19) 74 | ================== 75 | 76 | * [Enhancement] Add examples for scripting operators and update `ecs_task.run>` example. 77 | * [Enhancement] Always normalize ECS Task family name. 78 | * [Enhancement] Update aws-java-sdk 1.11.433 -> 1.11.451 79 | * [Enhancement] Add new options (`secrets`, `tags`) that follow ECS new release. `ipc_mode` and `pid_mode` are not supported yet because aws-java-sdk does not supports them. 80 | 81 | 0.0.6 (2018-11-13) 82 | ================== 83 | 84 | * [Enhancement] Enable to use params as env for `ecs_task.sh` 85 | 86 | 0.0.5 (2018-11-13) 87 | ================== 88 | 89 | * [Experimental] Implement `ecs_task.rb>` operator. 90 | * [Experimental] Implement `ecs_task.sh>` operator. 91 | * [Enhancement] Add interface for another storage except S3 used by scripting operators. 92 | * [Enhancement] Add abstract class for scripting operators. 93 | * [Enhancement] Request ECS TaskRun with some retry. 94 | * [Fix] Prevent the influence of prior task registration. 95 | * [Enhancement] Add Logging for registered TaskDefinition arn. 96 | * [Enhancement] Define VERSION var as package object val. 97 | 98 | 0.0.4 (2018-11-06) 99 | ================== 100 | 101 | * [Experimental] Implement `ecs_task.embulk>` operator. 102 | * [Enhancement] Write README for scripting operators. 103 | * [Enhancement] Make family name more configuable for scripting operators. 104 | 105 | 0.0.3 (2018-10-30) 106 | ================== 107 | 108 | * [Breaking Change] Do not use enum parameter directory because the enums require upper camel case ( `ecs_task.{py,register,run}>` operator) 109 | * [Enhancement] Rename the configuration key: `additional_containers` to `sidecars` ( `ecs_task.py>` operator) 110 | * [Breaking Change/Enhancement] Rename the configuration key: `environment` to `environments` ( `ecs_task.{py,register,run}>` operator) 111 | * [Enhancement] Rename the output key: `last_ecs_task_py` to `last_ecs_task_command` ( `ecs_task.py>` operator) 112 | * [Fix] Fix example indents 113 | * [Fix] Avoid java.nio.charset.MalformedInputException: Input length = 1 114 | * [Fix] Avoid com.amazonaws.services.ecs.model.ClientException: Family contains invalid characters when the default value is used. 115 | * [Enhancement] Enable to parse json text in configuration 116 | * [Enhancement] Get s3 content more simply 117 | * [Fix] Use unique s3 workspace path 118 | * [Fix] print error in runner.py 119 | 120 | 0.0.2 (2018-10-29) 121 | ================== 122 | 123 | * [Experimental] Implement `ecs_task.py>` operator. (No document yet) 124 | * [Fix] Stop correctly after task run to shutdown TransferManager after processing. 125 | 126 | 0.0.1 (2018-10-23) 127 | ================== 128 | 129 | * First Release 130 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # digdag-operator-ecs_task 2 | [![Jitpack](https://jitpack.io/v/pro.civitaspo/digdag-operator-ecs_task.svg)](https://jitpack.io/#pro.civitaspo/digdag-operator-ecs_task) ![Release CI Status Badge](https://github.com/civitaspo/embulk-output-s3_parquet/workflows/Release%20CI/badge.svg) ![Test CI Status Badge](https://github.com/civitaspo/embulk-output-s3_parquet/workflows/Test%20CI/badge.svg) [![Digdag](https://img.shields.io/badge/digdag-v0.9.31-brightgreen.svg)](https://github.com/treasure-data/digdag/releases/tag/v0.9.31) 3 | 4 | digdag plugin for AWS ECS Task. 5 | 6 | # Overview 7 | 8 | - Plugin type: operator 9 | 10 | # Usage 11 | 12 | ```yaml 13 | _export: 14 | plugin: 15 | repositories: 16 | - https://jitpack.io 17 | dependencies: 18 | - pro.civitaspo:digdag-operator-ecs_task:0.1.6 19 | ecs_task: 20 | auth_method: profile 21 | tmp_storage: 22 | type: s3 23 | uri: ${output} 24 | family_prefix: hello- 25 | cluster: ${cluster} 26 | network_mode: host 27 | memory: 1 GB 28 | task_role_arn: ${task_role_arn} 29 | 30 | +ecs_task.run: 31 | +step1: 32 | ecs_task.run>: 33 | def: 34 | network_mode: host 35 | container_definitions: 36 | - name: step1 37 | image: civitaspo/python-awscli:latest 38 | command: [echo, step1] 39 | essential: true 40 | memory: 500 41 | cpu: 1 42 | family: step1 43 | count: 1 44 | +step2: 45 | ecs_task.run>: 46 | def: 47 | network_mode: host 48 | container_definitions: 49 | - name: step2 50 | image: civitaspo/python-awscli:latest 51 | command: [echo, step2] 52 | essential: true 53 | memory: 500 54 | cpu: 1 55 | family: step2 56 | count: 1 57 | +step3: 58 | ecs_task.run>: 59 | def: 60 | network_mode: host 61 | container_definitions: 62 | - name: step3 63 | image: civitaspo/python-awscli:latest 64 | command: 65 | - sh 66 | - -c 67 | - echo '{"store_params":{"civi":"taspo"}}' | aws s3 cp - ${output}/${session_uuid}.json 68 | essential: true 69 | memory: 500 70 | cpu: 1 71 | task_role_arn: ${task_role_arn} 72 | family: step3 73 | count: 1 74 | result_s3_uri: ${output}/${session_uuid}.json 75 | 76 | +step4: 77 | echo>: ${civi} 78 | 79 | +ecs_task.sh: 80 | +step0: 81 | ecs_task.sh>: env 82 | image: civitaspo/digdag-awscli:latest 83 | _export: 84 | message: 85 | message: 'hello ecs_task.rb' 86 | created_by: civitaspo 87 | 88 | +ecs_task.rb: 89 | +step0: 90 | ecs_task.rb>: echo 91 | require: echo 92 | gem_install: [awesome_print] 93 | image: civitaspo/ruby-awscli:latest 94 | _export: 95 | message: 96 | message: 'hello ecs_task.rb' 97 | created_by: civitaspo 98 | 99 | +ecs_task.py: 100 | +step0: 101 | ecs_task.py>: echo.echo 102 | pip_install: [PyYaml] 103 | image: civitaspo/python-awscli:latest 104 | _export: 105 | message: 106 | message: 'hello ecs_task.py' 107 | created_by: civitaspo 108 | 109 | +ecs_task.embulk: 110 | _export: 111 | path_prefix: ./csv/ 112 | +dig: 113 | ecs_task.embulk>: 114 | in: 115 | type: file 116 | path_prefix: ${path_prefix} 117 | parser: 118 | charset: UTF-8 119 | newline: CRLF 120 | type: csv 121 | delimiter: ',' 122 | quote: '"' 123 | escape: '"' 124 | null_string: 'NULL' 125 | skip_header_lines: 0 126 | columns: 127 | - {name: id, type: long} 128 | - {name: comment, type: string} 129 | out: 130 | type: stdout 131 | image: civitaspo/embulk-awscli:latest 132 | 133 | +file: 134 | ecs_task.embulk>: template.yml 135 | image: civitaspo/embulk-awscli:latest 136 | 137 | ``` 138 | 139 | See [example](./example). 140 | 141 | # Configuration 142 | 143 | ## Remarks 144 | 145 | - type `DurationParam` is strings matched `\s*(?:(?\d+)\s*d)?\s*(?:(?\d+)\s*h)?\s*(?:(?\d+)\s*m)?\s*(?:(?\d+)\s*s)?\s*`. 146 | - The strings is used as `java.time.Duration`. 147 | 148 | ## Common Configuration 149 | 150 | ### System Options 151 | 152 | Define the below options on properties (which is indicated by `-c`, `--config`). 153 | 154 | - **ecs_task.allow_auth_method_env**: Indicates whether users can use **auth_method** `"env"` (boolean, default: `false`) 155 | - **ecs_task.allow_auth_method_instance**: Indicates whether users can use **auth_method** `"instance"` (boolean, default: `false`) 156 | - **ecs_task.allow_auth_method_profile**: Indicates whether users can use **auth_method** `"profile"` (boolean, default: `false`) 157 | - **ecs_task.allow_auth_method_properties**: Indicates whether users can use **auth_method** `"properties"` (boolean, default: `false`) 158 | - **ecs_task.allow_auth_method_web_identity_token**: Indicates whether users can use **auth_method** `"web_identity_token"` (boolean, default: `false`) 159 | - **ecs_task.assume_role_timeout_duration**: Maximum duration which server administer allows when users assume **role_arn**. (`DurationParam`, default: `1h`) 160 | - **ecs_task.default_web_identity_token_file**: Path to a web identity token file. (string, optional) 161 | - **ecs_task.default_web_identity_role_arn**: AWS Role when using a web identity token. (string, optional) 162 | 163 | ### Secrets 164 | 165 | - **ecs_task.access_key_id**: The AWS Access Key ID (optional) 166 | - **ecs_task.secret_access_key**: The AWS Secret Access Key (optional) 167 | - **ecs_task.session_token**: The AWS session token. This is used only **auth_method** is `"session"` (optional) 168 | - **ecs_task.role_arn**: The AWS Role to assume. (optional) 169 | - **ecs_task.role_session_name**: The AWS Role Session Name when assuming the role. (default: `digdag-ecs_task-${session_uuid}`) 170 | - **ecs_task.http_proxy.host**: proxy host (required if **use_http_proxy** is `true`) 171 | - **ecs_task.http_proxy.port** proxy port (optional) 172 | - **ecs_task.http_proxy.scheme** `"https"` or `"http"` (default: `"https"`) 173 | - **ecs_task.http_proxy.user** proxy user (optional) 174 | - **ecs_task.http_proxy.password**: http proxy password (optional) 175 | 176 | ### Options 177 | 178 | - **auth_method**: name of mechanism to authenticate requests (`"basic"`, `"env"`, `"instance"`, `"profile"`, `"properties"`, `"anonymous"`, or `"session"`. default: `"basic"`) 179 | - `"basic"`: uses access_key_id and secret_access_key to authenticate. 180 | - `"env"`: uses AWS_ACCESS_KEY_ID (or AWS_ACCESS_KEY) and AWS_SECRET_KEY (or AWS_SECRET_ACCESS_KEY) environment variables. 181 | - `"instance"`: uses EC2 instance profile. 182 | - `"profile"`: uses credentials written in a file. Format of the file is as following, where `[...]` is a name of profile. 183 | - **profile_file**: path to a profiles file. (string, default: given by `AWS_CREDENTIAL_PROFILES_FILE` environment varialbe, or ~/.aws/credentials). 184 | - **profile_name**: name of a profile. (string, default: `"default"`) 185 | - `"properties"`: uses aws.accessKeyId and aws.secretKey Java system properties. 186 | - `"anonymous"`: uses anonymous access. This auth method can access only public files. 187 | - `"session"`: uses temporary-generated access_key_id, secret_access_key and session_token. 188 | - `"web_identity_token"`: uses web identity token. 189 | - **web_identity_token_file**: path to a web identity token file. (string, default: given by **ecs_task.default_web_identity_token_file**) 190 | - **web_identity_role_arn**: aws role arn when using a web identity token. (string, default: given by **ecs_task.default_web_identity_role_arn**) 191 | - **use_http_proxy**: Indicate whether using when accessing AWS via http proxy. (boolean, default: `false`) 192 | - **region**: The AWS region. (string, optional) 193 | - **endpoint**: The AWS Service endpoint. (string, optional) 194 | 195 | ## Configuration for `ecs_task.register>` operator 196 | 197 | - **ecs_task.register>**: The configuration is the same as the snake-cased [RegisterTaskDefinition API](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RegisterTaskDefinition.html) (map, required) 198 | 199 | ## Configuration for `ecs_task.run>` operator 200 | 201 | The configuration is the same as the snake-cased [RunTask API](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html). 202 | 203 | In addition, the below configurations exist. 204 | 205 | - **def**: The definition for the task. The configuration is the same as `ecs_task.register>`'s one. (map, optional) 206 | - **NOTE**: **task_definition** is required on the [RunTask API Doc](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html), but it is not required if the **def** is defined. 207 | - **result_s3_uri**: The S3 uri for the task result. (string, optional) 208 | - **NOTE**: This configuration is used by `ecs_task.result>` operator, so the result content must follow the rule. 209 | - **timeout**: Timeout duration for the task. (`DurationParam`, default: `15m`) 210 | - **polling_strategy**: The polling strategy settings of wait. The configuration is the same as `ecs_task.wait>`'s one. (map, optional) 211 | 212 | ## Configuration for `ecs_task.wait>` operator 213 | 214 | - **cluster**: The short name or full ARN of the cluster that hosts the tasks. (string, required) 215 | - **tasks**: A list of up to 100 task IDs or full ARN entries. (array of string, required) 216 | - **timeout**: Timeout duration for the tasks. (`DurationParam`, default: `15m`) 217 | - **condition**: The condition of tasks to wait. Available values are `"all"` or `"any"`. (string, default: `"all"`) 218 | - **status**: The status of tasks to wait. Available values are `"PENDING"`, `"RUNNING"`, or `"STOPPED"` (string, default: `"STOPPED"`) 219 | - **ignore_failure**: Ignore even if any tasks exit with any status. This option is true, then the behaviour includes one of when **ignore_exit_code** is `true`. (boolean, default: `false`) 220 | - **ignore_exit_code**: Ignore even if any tasks exit with any exit code. When the containers of the task include one that does not have exit code, it is not ignored even if this option is `true`. (boolean, default: `false`) 221 | - **polling_strategy**: The polling strategy settings of wait. 222 | - **interval_type**: The interval type of wait. Available values are `"constant"` or `"exponential"`. (string, default: `"constant"`) 223 | - **limit**: Max number of polling try. (integer, optional) 224 | - **interval**: Delay interval of wait. The time unit is seconds. (integer, default: `1`) 225 | 226 | ## Configuration for `ecs_task.result>` operator 227 | 228 | - **ecs_task.result>**: S3 URI that the result is stored. (string, required) 229 | - **NOTE**: The result content must follow the below rule. 230 | - the format is json. 231 | - the keys are `"subtask_config"`, `"export_params"`, `"store_params"`. 232 | - the values are string to object map. 233 | - the usage follows [Digdag Python API](https://docs.digdag.io/python_api.html), [Digdag Ruby API](https://docs.digdag.io/ruby_api.html). 234 | 235 | # (Experimental) Scripting Operators 236 | 237 | [digdag-operator-ecs_task](https://github.com/civitaspo/digdag-operator-ecs_task) supports some [scripting operators](https://docs.digdag.io/operators/scripting.html) such as `ecs_task.py`, `ecs_task.rb`. Originally I wanted to provide `ecs_task` as one of the implementations of `CommandExecutor` provided by digdag, but users cannot extend the current CommandExecutor as written in this issue: [\[feature-request\] Use Custom CommandExecutors](https://github.com/treasure-data/digdag/issues/901). Therefore, this plugin implements Scripting Operator on its own. Of course, the usage is the same as the Scripting Operator provided by digdag. When the issue is resolved, I will reimplement it using the `CommandExecutor` of digdag. 238 | 239 | ## Scripting Operators Common Configurations 240 | 241 | - **max_retry**: Max number of retry when scripting container has no exit code. (integer, default: `3`) 242 | - **sidecars**: A list of container definitions except the container for scripting operator. (array of map, optional) 243 | - The configuration map is the same as the snake-cased [API_ContainerDefinition](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html) 244 | - **cpu**: The number of CPU units used by the task. It can be expressed as an integer using CPU units, for example `1024`, or as a string using vCPUs, for example `1 vCPU` or `1 vcpu`, in a task definition. String values are converted to an integer indicating the CPU units when the task definition is registered. (string, optional) 245 | - See the docs for more info: [ECS-RegisterTaskDefinition-request-cpu](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RegisterTaskDefinition.html#ECS-RegisterTaskDefinition-request-cpu) 246 | - **ephemeral_storage**: The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. For more information, see [AWS::ECS::TaskDefinition EphemeralStorage](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-ephemeralstorage.html). (map, optional) 247 | - The configuration map is the same as the snake-cased [API_EphemeralStorage](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_EphemeralStorage.html). 248 | - **execution_role_arn**: The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. (string, optional) 249 | - **family_prefix**: The family name prefix for a task definition. This is used if **family** is not defined. (string, default: `""`) 250 | - **family_infix**: The family name infix for a task definition. This is used if **family** is not defined. (string, default: `"${task_name}"`) 251 | - **family_suffix**: The family name sufix for a task definition. This is used if **family** is not defined. (string, default: `""`) 252 | - **family**: You must specify a `family` for a task definition, which allows you to track multiple versions of the same task definition. The `family` is used as a name for your task definition. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. If invalid charactors are found, these are replaced to `"_"`. (string, default: `"${family_prefix}${family_infix}${family_suffix}"`) 253 | - **memory**: The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB, for example `1024`, or as a string using GB, for example `1GB` or `1 GB`, in a task definition. String values are converted to an integer indicating the MiB when the task definition is registered. (string, optional) 254 | - See the docs for more info: [ECS-RegisterTaskDefinition-request-memory](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RegisterTaskDefinition.html#ECS-RegisterTaskDefinition-request-memory) 255 | - **network_mode**: The Docker networking mode to use for the containers in the task. The valid values are `none`, `bridge`, `awsvpc`, and `host`. The default Docker network mode is `bridge`. If using the Fargate launch type, the `awsvpc` network mode is required. If using the EC2 launch type, any network mode can be used. If the network mode is set to `none`, you can't specify port mappings in your container definitions, and the task's containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode. With the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings. If the network mode is `awsvpc`, the task is allocated an Elastic Network Interface, and you must specify the **network_configuration** option when you create a service or run a task with the task definition. For more information, see [Task Networking](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the Amazon Elastic Container Service Developer Guide. If the network mode is `host`, you can't run multiple instantiations of the same task on a single container instance when port mappings are used. Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode. (string, optional) 256 | - **requires_compatibilities**: The launch type required by the task. If no value is specified, it defaults to `EC2`. (string, optional) 257 | - **runtime_platform**: The platform definition for the task. 258 | - The configuration map is the same as the snake-cased [API_RuntimePlatform](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RuntimePlatform.html) 259 | - **cpu_architecture**: default `X86_64` (the valid values are `X86_64`, `ARM64`) 260 | - **operating_system_family**: default `LINUX` 261 | - **task_role_arn**: The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. (string, optional) 262 | - **volumes**: A list of volume definitions. (array of map, optional) 263 | - The configuration map is the same as the snake-cased [API_Volume](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Volume.html). 264 | - **depends_on**: The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed. For more informaiton, see [ECS-Type-ContainerDefinition-dependsOn](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-dependsOn). (array of map, optional) 265 | - The configuration map is the same as the snake-cased [API_ContainerDependency](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDependency.html). 266 | - **disable_networking**: When this parameter is `true`, networking is disabled within the container. (boolean, optional) 267 | - **dns_search_domains**: A list of DNS search domains that are presented to the container. (array of string, optional) 268 | - **dns_servers**: A list of DNS servers that are presented to the container. (array of string, optional) 269 | - **docker_labels**: A key/value map of labels to add to the container. (string to string map, optional) 270 | - **docker_security_options**: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the `Fargate` launch type. For more information, see [ECS-Type-ContainerDefinition-dockerSecurityOptions](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-dockerSecurityOptions). (array of string, optional) 271 | - **entry_point**: The entry point that is passed to the container. (array of string, optional) 272 | - **environments**: The environment variables to pass to a container. (string to string map, optional) 273 | - **extra_hosts**: A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter is not supported for Windows containers or tasks that use the `awsvpc` network mode. (string to string map, optional) 274 | - **health_check**: The health check command and associated configuration parameters for the container. The configuration map is the same as the snake-cased [APIReference/API_HealthCheck](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HealthCheck.html). (map, optional) 275 | - **hostname**: The hostname to use for your container. (string, optional) 276 | - **image**: The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either `repository-url/image:tag` or `repository-url/image@digest`. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. For more information, see [ECS-Type-ContainerDefinition-image](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-image). (string, optional) 277 | - **interactive**: When this parameter is true, this allows you to deploy containerized applications that require `stdin` or a `tty` to be allocated. (boolean, optional) 278 | - **links**: The `link` parameter allows containers to communicate with each other without the need for port mappings. Only supported if the network mode of a task definition is set to `bridge`. The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. (array of string, optional) 279 | - **linux_parameters**: Linux-specific modifications that are applied to the container, such as Linux [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html). The configuration map is the same as the snake-cased [API_LinuxParameters](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LinuxParameters.html). (map, optional) 280 | - **log_configuration**: The log configuration specification for the container. For more information, see [ECS-Type-ContainerDefinition-logConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-logConfiguration). The configuration map is the same as the snake-cased [API_LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html). (map, optional) 281 | - **firelens_configuration**: The fireLens configuration for the container. For more information, see [ECS-Type-ContainerDefinition-firelensConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-firelensConfiguration). 282 | - **mount_points**: The mount points for data volumes in your container. (array of map, optional) 283 | - The configuration map is the same as the snake-cased [API_MountPoint](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_MountPoint.html). 284 | - **container_name**: The name of a container. (string, default: the same as **family**) 285 | - **port_mappings**: The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic. For more informaiton, see [ECS-Type-ContainerDefinition-portMappings](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-portMappings). (array of map, optional) 286 | - The configuration map is the same as the snake-cased [API_PortMapping](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PortMapping.html). 287 | - **privileged**: When this parameter is `true`, the container is given elevated privileges on the host container instance (similar to the `root` user). (boolean, optional) 288 | - **pseudo_terminal**: When this parameter is `true`, a TTY is allocated. (boolean, optional) 289 | - **readonly_root_filesystem**: When this parameter is `true`, the container is given read-only access to its root file system. (boolean, optional) 290 | - **repository_credentials**: The private repository authentication credentials to use. The configuration map is the same as the snake-cased [API_RepositoryCredentials](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RepositoryCredentials.html). (map, optional) 291 | - **secrets**: The secrets to pass to the container. (array of map, optional) 292 | - The configuration map is the same as the snake-cased [API_Secret](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html). 293 | - **system_controls**: A list of namespaced kernel parameters to set in the container. For more information, see [ECS-Type-ContainerDefinition-systemControls](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html#ECS-Type-ContainerDefinition-systemControls). (array of map, optional) 294 | - The configuration map is the same as the snake-cased [API_SystemControl](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_SystemControl.html). 295 | - **ulimits**: A list of ulimits to set in the container. (array of map, optional) 296 | - The configuration map is the same as the snake-cased [API_Ulimit](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Ulimit.html). 297 | - **user**: The user name to use inside the container. (string, optional) 298 | - **volumes_from**: Data volumes to mount from another container. (array of map, optional) 299 | - The configuration map is the same as the snake-cased [API_VolumeFrom](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_VolumeFrom.html) 300 | - **working_directory**: The working directory in which to run commands inside the container. (string, optional) 301 | - **capacity_provider_strategy**: An array of capacity provider strategy items to control capacity providers. (array of map, optional) 302 | - The configuration map is the same as the snake-cased [API CapacityProviderStrategyItem](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CapacityProviderStrategyItem.html) 303 | - **cluster**: The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task. (string, required) 304 | - **count**: The number of instantiations of the specified task to place on your cluster. You can specify up to 10 tasks per call. (integer, optional) 305 | - **group**: The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name). (string, optional) 306 | - **launch_type**: The launch type on which to run your task. Valid values are `EC2`, `FARGATE`. (string, optional) 307 | - **network_configuration**: The network configuration for the task. This parameter is required for task definitions that use the `awsvpc` network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. For more information, see [Task Networking](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the Amazon Elastic Container Service Developer Guide. The configuration map is the same as the snake-cased [API_NetworkConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html). (map, optional) 308 | - **overrides**: A list of container overrides that specify the name of a container in the specified task definition and the overrides it should receive. The configuration map is the same as the snake-cased [API_TaskOverride](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TaskOverride.html). (map, optional) 309 | - **placement_constraints**: An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task. (array of map, optional) 310 | - The configuration map is the same as the snake-cased [API_PlacementConstraint](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementConstraint.html). 311 | - **placement_strategy**: The placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task. (array of map, optional) 312 | - The configuration map is the same as the snake-cased [API_PlacementStrategy](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementStrategy.html). 313 | - **platform_version**: The platform version on which to run your task. If one is not specified, the latest version is used by default. (string, optional) 314 | - **started_by**: An optional tag specified when a task is started. (string, optional) 315 | - **tags**: The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters. (string to string map, optional) 316 | - For using this option, require [migrating Amazon ECS deployment to the new ARN and resource ID format](Migrating your Amazon ECS deployment to the new ARN and resource ID format). 317 | - **shell**: The shell command in your container. (string, default: `"sh"`) 318 | - You can set the absolute path of the command. 319 | - This option does not support `"dash"`. 320 | - **workspace_s3_uri_prefix**: S3 uri prefix for using as workspace. (string, required) 321 | - Currently, input params, output params, stdout, stderr, and internal scripts are put on S3, and then they are not removed. So it's insecure unless strict access control to S3. 322 | - This option is **deprecated**. Please use **tmp_storage** option instead. 323 | - **tmp_storage**: Temporary storage for the data and files scripting operator uses. (map, required) 324 | - **type**: storage type. Currently, only `"s3"` is valid. (string, required) 325 | - **uri**: storage uri. (string, required) 326 | 327 | ## Configuration for `ecs_task.py>` operator 328 | 329 | - **ecs_task.py>**: Name of a method to run. The format is `[PACKAGE.CLASS.]METHOD`. (string, required) 330 | - **pip_install**: packages to install before task running. (array of string, optional) 331 | 332 | ## Configuration for `ecs_task.rb>` operator 333 | 334 | - **ecs_task.rb>**: Name of a method to run. The format is `[MODULE::CLASS.]METHOD`. (string, required) 335 | - **gem_install**: packages to install before task running. (array of string, optional) 336 | - **require**: Name of a file to require. e.g. `require: task/my_workflow` (string, required) 337 | 338 | ## Configuration for `ecs_task.sh>` operator 339 | 340 | - **ecs_task.sh>**: command to run on shell. (string, required) 341 | 342 | ## Configuration for `ecs_task.embulk>` operator 343 | 344 | - **ecs_task.embulk>**: Embulk config yaml or file. You can use digdag's template engine like `${...}` in the config yaml or file. (string or map, required) 345 | - For more information, see [Embulk Docs](http://www.embulk.org/docs/index.html). 346 | - **embulk_plugins**: packages to install before task running. (array of string, optional) 347 | - You can see the plugins in [Embulk Plugins](http://www.embulk.org/plugins/). 348 | 349 | # Development 350 | 351 | ## Run an Example 352 | 353 | ### 1) build 354 | 355 | ```sh 356 | ./gradlew publish 357 | ``` 358 | 359 | Artifacts are build on local repos: `./build/repo`. 360 | 361 | ### 2) get your aws profile 362 | 363 | ```sh 364 | aws configure 365 | ``` 366 | 367 | ### 3) run an example 368 | 369 | ```sh 370 | ./example/run.sh ${ECS Cluster Name} ${S3 URI Prefix for tmp storage} ${ECS Task Role ARN} 371 | ``` 372 | 373 | ## (TODO) Run Tests 374 | 375 | ```sh 376 | ./gradlew test 377 | ``` 378 | 379 | # ChangeLog 380 | 381 | [CHANGELOG.md](./CHANGELOG.md) 382 | 383 | # License 384 | 385 | [Apache License 2.0](./LICENSE.txt) 386 | 387 | # Author 388 | 389 | @civitaspo 390 | 391 | -------------------------------------------------------------------------------- /build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id 'scala' 3 | id 'maven-publish' 4 | id 'com.github.johnrengelman.shadow' version '2.0.2' 5 | id "com.diffplug.gradle.spotless" version "3.27.1" 6 | } 7 | 8 | group = 'pro.civitaspo' 9 | version = '0.1.6' 10 | 11 | def digdagVersion = '0.9.42' 12 | def scalaSemanticVersion = "2.13.1" 13 | def depScalaVersion = "2.13" 14 | 15 | repositories { 16 | mavenCentral() 17 | } 18 | 19 | dependencies { 20 | compile group: 'io.digdag', name: 'digdag-spi', version: digdagVersion 21 | compile group: 'io.digdag', name: 'digdag-plugin-utils', version: digdagVersion 22 | 23 | // https://mvnrepository.com/artifact/org.scala-lang/scala-library 24 | compile group: 'org.scala-lang', name: 'scala-library', version: scalaSemanticVersion 25 | 26 | ['ecs', 's3', 'sts'].each { svc -> 27 | // https://mvnrepository.com/artifact/com.amazonaws/ 28 | compile group: 'com.amazonaws', name: "aws-java-sdk-${svc}", version: '1.12.117' 29 | } 30 | 31 | testCompile group: 'org.scalatest', name: "scalatest_$depScalaVersion", version: '3.0.8' 32 | testCompile group: 'junit', name: 'junit', version: '4.12' 33 | testCompile group: 'io.digdag', name: 'digdag-cli', version: digdagVersion 34 | } 35 | 36 | shadowJar { 37 | classifier = null 38 | dependencies { 39 | exclude(dependency('io.digdag:.*')) 40 | exclude(dependency('.*:jackson.*:.*')) 41 | } 42 | } 43 | 44 | publishing { 45 | publications { 46 | shadow(MavenPublication) { publication -> 47 | project.shadow.component(publication) 48 | } 49 | } 50 | repositories { 51 | maven { 52 | if (System.getenv("GITHUB_TOKEN")) { 53 | name = "GitHubPackages" 54 | url = uri("https://maven.pkg.github.com/civitaspo/digdag-operator-ecs_task") 55 | credentials { 56 | username = "civitaspo" 57 | password = System.getenv("GITHUB_TOKEN") 58 | } 59 | } 60 | else { 61 | url "$buildDir/repo" 62 | } 63 | } 64 | } 65 | } 66 | 67 | spotless { 68 | scala { 69 | scalafmt('2.3.2').configFile('.scalafmt.conf') 70 | } 71 | } 72 | 73 | 74 | sourceCompatibility = 1.8 75 | targetCompatibility = 1.8 76 | 77 | compileScala.options.encoding = 'UTF-8' 78 | compileTestScala.options.encoding = 'UTF-8' 79 | compileScala.options.compilerArgs << "-Xlint:unchecked" << "-Xlint:deprecation" 80 | 81 | -------------------------------------------------------------------------------- /example/.gitignore: -------------------------------------------------------------------------------- 1 | /.digdag-wrapper 2 | .digdag 3 | *.pyc 4 | example.out 5 | 6 | -------------------------------------------------------------------------------- /example/digdag.properties: -------------------------------------------------------------------------------- 1 | ecs_task.allow_auth_method_env=true 2 | ecs_task.allow_auth_method_instance=true 3 | ecs_task.allow_auth_method_profile=true 4 | ecs_task.allow_auth_method_properties=true 5 | -------------------------------------------------------------------------------- /example/ecs_task.embulk/csv/data.01.csv: -------------------------------------------------------------------------------- 1 | 1,aaa 2 | 2,bbb 3 | 3,ccc 4 | -------------------------------------------------------------------------------- /example/ecs_task.embulk/csv/data.02.csv: -------------------------------------------------------------------------------- 1 | 1,aaa 2 | 2,bbb 3 | 3,ccc 4 | -------------------------------------------------------------------------------- /example/ecs_task.embulk/example.dig: -------------------------------------------------------------------------------- 1 | 2 | +ecs_task.embulk: 3 | _export: 4 | path_prefix: ./csv/ 5 | +dig: 6 | ecs_task.embulk>: 7 | in: 8 | type: file 9 | path_prefix: ${path_prefix} 10 | parser: 11 | charset: UTF-8 12 | newline: CRLF 13 | type: csv 14 | delimiter: ',' 15 | quote: '"' 16 | escape: '"' 17 | null_string: 'NULL' 18 | skip_header_lines: 0 19 | columns: 20 | - {name: id, type: long} 21 | - {name: comment, type: string} 22 | out: 23 | type: stdout 24 | image: civitaspo/embulk-awscli:latest 25 | 26 | +file: 27 | ecs_task.embulk>: template.yml 28 | image: civitaspo/embulk-awscli:latest 29 | 30 | -------------------------------------------------------------------------------- /example/ecs_task.embulk/template.yml: -------------------------------------------------------------------------------- 1 | in: 2 | type: file 3 | path_prefix: ${path_prefix} 4 | parser: 5 | charset: UTF-8 6 | newline: CRLF 7 | type: csv 8 | delimiter: ',' 9 | quote: '"' 10 | escape: '"' 11 | null_string: 'NULL' 12 | skip_header_lines: 0 13 | columns: 14 | - {name: id, type: long} 15 | - {name: comment, type: string} 16 | out: 17 | type: stdout 18 | 19 | -------------------------------------------------------------------------------- /example/ecs_task.py/echo.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | 4 | def echo(message: str): 5 | print(yaml.dump(message)) 6 | 7 | -------------------------------------------------------------------------------- /example/ecs_task.py/example.dig: -------------------------------------------------------------------------------- 1 | 2 | +ecs_task.py: 3 | +step0: 4 | ecs_task.py>: echo.echo 5 | pip_install: [PyYaml] 6 | image: civitaspo/python-awscli:latest 7 | _export: 8 | message: 9 | message: 'hello ecs_task.py' 10 | created_by: civitaspo 11 | 12 | -------------------------------------------------------------------------------- /example/ecs_task.rb/echo.rb: -------------------------------------------------------------------------------- 1 | require 'awesome_print' 2 | 3 | def echo(message) 4 | ap message 5 | end 6 | -------------------------------------------------------------------------------- /example/ecs_task.rb/example.dig: -------------------------------------------------------------------------------- 1 | 2 | +ecs_task.rb: 3 | +step0: 4 | ecs_task.rb>: echo 5 | require: echo 6 | gem_install: [awesome_print] 7 | image: civitaspo/ruby-awscli:latest 8 | _export: 9 | message: 10 | message: 'hello ecs_task.rb' 11 | created_by: civitaspo 12 | 13 | -------------------------------------------------------------------------------- /example/ecs_task.run/example.dig: -------------------------------------------------------------------------------- 1 | 2 | +ecs_task.run: 3 | +step1: 4 | ecs_task.run>: 5 | def: 6 | network_mode: host 7 | container_definitions: 8 | - name: step1 9 | image: civitaspo/python-awscli:latest 10 | command: [echo, step1] 11 | essential: true 12 | memory: 500 13 | cpu: 1 14 | family: step1 15 | count: 1 16 | +step2: 17 | ecs_task.run>: 18 | def: 19 | network_mode: host 20 | container_definitions: 21 | - name: step2 22 | image: civitaspo/python-awscli:latest 23 | command: [echo, step2] 24 | essential: true 25 | memory: 500 26 | cpu: 1 27 | family: step2 28 | count: 1 29 | +step3: 30 | ecs_task.run>: 31 | def: 32 | network_mode: host 33 | container_definitions: 34 | - name: step3 35 | image: civitaspo/python-awscli:latest 36 | command: 37 | - sh 38 | - -c 39 | - echo '{"store_params":{"civi":"taspo"}}' | aws s3 cp - ${output}/${session_uuid}.json 40 | essential: true 41 | memory: 500 42 | cpu: 1 43 | task_role_arn: ${task_role_arn} 44 | family: step3 45 | count: 1 46 | result_s3_uri: ${output}/${session_uuid}.json 47 | 48 | +step4: 49 | echo>: ${civi} 50 | -------------------------------------------------------------------------------- /example/ecs_task.sh/example.dig: -------------------------------------------------------------------------------- 1 | 2 | +ecs_task.sh: 3 | +step0: 4 | ecs_task.sh>: env 5 | image: civitaspo/digdag-awscli:latest 6 | environments: 7 | hoge: fuga 8 | fugo: hogo 9 | _export: 10 | message: 11 | message: 'hello ecs_task.sh' 12 | created_by: civitaspo 13 | 14 | +step1: 15 | +exceeds-255-letters: 16 | +dummy-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17 | +dummy-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18 | +dummy-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19 | +dummy-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 20 | +dummy-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 21 | ecs_task.sh>: env 22 | image: civitaspo/digdag-awscli:latest 23 | _export: 24 | message: 25 | message: 'hello ecs_task.sh' 26 | created_by: civitaspo 27 | +step2: 28 | ecs_task.sh>: sleep 15 29 | image: civitaspo/digdag-awscli:latest 30 | _export: 31 | ecs_task: 32 | wait: 33 | polling_strategy: 34 | interval_type: exponential 35 | limit: 4 36 | interval: 2 37 | 38 | +step3: 39 | ecs_task.sh>: echo 40 | image: civitaspo/digdag-awscli:latest 41 | shell: ash 42 | -------------------------------------------------------------------------------- /example/example.dig: -------------------------------------------------------------------------------- 1 | _export: 2 | plugin: 3 | repositories: 4 | - file://${repos} 5 | # - https://jitpack.io 6 | dependencies: 7 | - pro.civitaspo:digdag-operator-ecs_task:0.1.6 8 | ecs_task: 9 | auth_method: profile 10 | tmp_storage: 11 | type: s3 12 | uri: ${output} 13 | family_prefix: hello- 14 | cluster: ${cluster} 15 | network_mode: host 16 | memory: 1 GB 17 | # NOTE: For using this option, require the ECS Cluster migration. See ttps://aws.amazon.com/jp/blogs/compute/migrating-your-amazon-ecs-deployment-to-the-new-arn-and-resource-id-format-2/ 18 | # tags: 19 | # environment: development 20 | # created_by: digdag-operator-ecs_task 21 | # digdag.session_uuid: ${session_uuid} 22 | task_role_arn: ${task_role_arn} 23 | 24 | +ecs_task.sh: 25 | call>: ecs_task.sh/example 26 | 27 | +ecs_task.rb: 28 | call>: ecs_task.rb/example 29 | 30 | +ecs_task.py: 31 | call>: ecs_task.py/example 32 | 33 | +ecs_task.embulk: 34 | call>: ecs_task.embulk/example 35 | 36 | +ecs_task.run: 37 | call>: ecs_task.run/example 38 | -------------------------------------------------------------------------------- /example/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT=$(cd $(dirname $0)/..; pwd) 4 | EXAMPLE_ROOT=$ROOT/example 5 | LOCAL_MAVEN_REPO=$ROOT/build/repo 6 | 7 | CLUSTER="$1" 8 | OUTPUT="$2" 9 | TASK_ROLE_ARN="$3" 10 | 11 | if [ -z "$CLUSTER" ]; then 12 | echo "[ERROR] Set cluster as the first argument." 13 | exit 1 14 | fi 15 | if [ -z "$OUTPUT" ]; then 16 | echo "[ERROR] Set output s3 URI as the second argument." 17 | exit 1 18 | fi 19 | if [ -z "$TASK_ROLE_ARN" ]; then 20 | echo "[ERROR] Set task role arn as the third argument." 21 | exit 1 22 | fi 23 | 24 | ( 25 | cd $EXAMPLE_ROOT 26 | 27 | ## to remove cache 28 | rm -rfv .digdag 29 | 30 | ## run 31 | digdag run example.dig \ 32 | -c digdag.properties \ 33 | -p repos=${LOCAL_MAVEN_REPO} \ 34 | -p output=${OUTPUT} \ 35 | -p cluster=${CLUSTER} \ 36 | -p task_role_arn=${TASK_ROLE_ARN} \ 37 | --no-save 38 | ) 39 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/civitaspo/digdag-operator-ecs_task/048a3ce467dae5191cb53c18828dea787ce55897/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.2-bin.zip 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ############################################################################## 4 | ## 5 | ## Gradle start up script for UN*X 6 | ## 7 | ############################################################################## 8 | 9 | # Attempt to set APP_HOME 10 | # Resolve links: $0 may be a link 11 | PRG="$0" 12 | # Need this for relative symlinks. 13 | while [ -h "$PRG" ] ; do 14 | ls=`ls -ld "$PRG"` 15 | link=`expr "$ls" : '.*-> \(.*\)$'` 16 | if expr "$link" : '/.*' > /dev/null; then 17 | PRG="$link" 18 | else 19 | PRG=`dirname "$PRG"`"/$link" 20 | fi 21 | done 22 | SAVED="`pwd`" 23 | cd "`dirname \"$PRG\"`/" >/dev/null 24 | APP_HOME="`pwd -P`" 25 | cd "$SAVED" >/dev/null 26 | 27 | APP_NAME="Gradle" 28 | APP_BASE_NAME=`basename "$0"` 29 | 30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 31 | DEFAULT_JVM_OPTS="" 32 | 33 | # Use the maximum available, or set MAX_FD != -1 to use that value. 34 | MAX_FD="maximum" 35 | 36 | warn () { 37 | echo "$*" 38 | } 39 | 40 | die () { 41 | echo 42 | echo "$*" 43 | echo 44 | exit 1 45 | } 46 | 47 | # OS specific support (must be 'true' or 'false'). 48 | cygwin=false 49 | msys=false 50 | darwin=false 51 | nonstop=false 52 | case "`uname`" in 53 | CYGWIN* ) 54 | cygwin=true 55 | ;; 56 | Darwin* ) 57 | darwin=true 58 | ;; 59 | MINGW* ) 60 | msys=true 61 | ;; 62 | NONSTOP* ) 63 | nonstop=true 64 | ;; 65 | esac 66 | 67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 68 | 69 | # Determine the Java command to use to start the JVM. 70 | if [ -n "$JAVA_HOME" ] ; then 71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 72 | # IBM's JDK on AIX uses strange locations for the executables 73 | JAVACMD="$JAVA_HOME/jre/sh/java" 74 | else 75 | JAVACMD="$JAVA_HOME/bin/java" 76 | fi 77 | if [ ! -x "$JAVACMD" ] ; then 78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 79 | 80 | Please set the JAVA_HOME variable in your environment to match the 81 | location of your Java installation." 82 | fi 83 | else 84 | JAVACMD="java" 85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 86 | 87 | Please set the JAVA_HOME variable in your environment to match the 88 | location of your Java installation." 89 | fi 90 | 91 | # Increase the maximum file descriptors if we can. 92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then 93 | MAX_FD_LIMIT=`ulimit -H -n` 94 | if [ $? -eq 0 ] ; then 95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 96 | MAX_FD="$MAX_FD_LIMIT" 97 | fi 98 | ulimit -n $MAX_FD 99 | if [ $? -ne 0 ] ; then 100 | warn "Could not set maximum file descriptor limit: $MAX_FD" 101 | fi 102 | else 103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 104 | fi 105 | fi 106 | 107 | # For Darwin, add options to specify how the application appears in the dock 108 | if $darwin; then 109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 110 | fi 111 | 112 | # For Cygwin, switch paths to Windows format before running java 113 | if $cygwin ; then 114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 116 | JAVACMD=`cygpath --unix "$JAVACMD"` 117 | 118 | # We build the pattern for arguments to be converted via cygpath 119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 120 | SEP="" 121 | for dir in $ROOTDIRSRAW ; do 122 | ROOTDIRS="$ROOTDIRS$SEP$dir" 123 | SEP="|" 124 | done 125 | OURCYGPATTERN="(^($ROOTDIRS))" 126 | # Add a user-defined pattern to the cygpath arguments 127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 129 | fi 130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 131 | i=0 132 | for arg in "$@" ; do 133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 135 | 136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 138 | else 139 | eval `echo args$i`="\"$arg\"" 140 | fi 141 | i=$((i+1)) 142 | done 143 | case $i in 144 | (0) set -- ;; 145 | (1) set -- "$args0" ;; 146 | (2) set -- "$args0" "$args1" ;; 147 | (3) set -- "$args0" "$args1" "$args2" ;; 148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 154 | esac 155 | fi 156 | 157 | # Escape application args 158 | save () { 159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done 160 | echo " " 161 | } 162 | APP_ARGS=$(save "$@") 163 | 164 | # Collect all arguments for the java command, following the shell quoting and substitution rules 165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" 166 | 167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong 168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then 169 | cd "$(dirname "$0")" 170 | fi 171 | 172 | exec "$JAVACMD" "$@" 173 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @if "%DEBUG%" == "" @echo off 2 | @rem ########################################################################## 3 | @rem 4 | @rem Gradle startup script for Windows 5 | @rem 6 | @rem ########################################################################## 7 | 8 | @rem Set local scope for the variables with windows NT shell 9 | if "%OS%"=="Windows_NT" setlocal 10 | 11 | set DIRNAME=%~dp0 12 | if "%DIRNAME%" == "" set DIRNAME=. 13 | set APP_BASE_NAME=%~n0 14 | set APP_HOME=%DIRNAME% 15 | 16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 17 | set DEFAULT_JVM_OPTS= 18 | 19 | @rem Find java.exe 20 | if defined JAVA_HOME goto findJavaFromJavaHome 21 | 22 | set JAVA_EXE=java.exe 23 | %JAVA_EXE% -version >NUL 2>&1 24 | if "%ERRORLEVEL%" == "0" goto init 25 | 26 | echo. 27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 28 | echo. 29 | echo Please set the JAVA_HOME variable in your environment to match the 30 | echo location of your Java installation. 31 | 32 | goto fail 33 | 34 | :findJavaFromJavaHome 35 | set JAVA_HOME=%JAVA_HOME:"=% 36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 37 | 38 | if exist "%JAVA_EXE%" goto init 39 | 40 | echo. 41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 42 | echo. 43 | echo Please set the JAVA_HOME variable in your environment to match the 44 | echo location of your Java installation. 45 | 46 | goto fail 47 | 48 | :init 49 | @rem Get command-line arguments, handling Windows variants 50 | 51 | if not "%OS%" == "Windows_NT" goto win9xME_args 52 | 53 | :win9xME_args 54 | @rem Slurp the command line arguments. 55 | set CMD_LINE_ARGS= 56 | set _SKIP=2 57 | 58 | :win9xME_args_slurp 59 | if "x%~1" == "x" goto execute 60 | 61 | set CMD_LINE_ARGS=%* 62 | 63 | :execute 64 | @rem Setup the command line 65 | 66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 67 | 68 | @rem Execute Gradle 69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% 70 | 71 | :end 72 | @rem End local scope for the variables with windows NT shell 73 | if "%ERRORLEVEL%"=="0" goto mainEnd 74 | 75 | :fail 76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 77 | rem the _cmd.exe /c_ return code! 78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 79 | exit /b 1 80 | 81 | :mainEnd 82 | if "%OS%"=="Windows_NT" endlocal 83 | 84 | :omega 85 | -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | rootProject.name = 'digdag-operator-ecs_task' 2 | 3 | -------------------------------------------------------------------------------- /src/main/resources/META-INF/services/io.digdag.spi.Plugin: -------------------------------------------------------------------------------- 1 | pro.civitaspo.digdag.plugin.ecs_task.EcsTaskPlugin 2 | 3 | -------------------------------------------------------------------------------- /src/main/resources/pro/civitaspo/digdag/plugin/ecs_task/embulk/run.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | set -o pipefail 3 | 4 | mkdir -p ./digdag-operator-ecs_task 5 | cd digdag-operator-ecs_task 6 | 7 | # Create output files 8 | touch out.json stdout.log stderr.log 9 | 10 | # Download requirements 11 | aws s3 cp s3://${ECS_TASK_EMBULK_BUCKET}/${ECS_TASK_EMBULK_PREFIX}/ ./ --recursive 12 | 13 | # Move workspace 14 | cd workspace 15 | 16 | # Unset e option for returning embulk results to digdag 17 | set +e 18 | 19 | # Run setup command 20 | ${ECS_TASK_EMBULK_SETUP_COMMAND} \ 21 | 2>> ../stderr.log \ 22 | | tee -a ../stdout.log 23 | 24 | # Run 25 | embulk run ../config.yml \ 26 | 2>> ../stderr.log \ 27 | | tee -a ../stdout.log 28 | 29 | # Capture exit code 30 | EXIT_CODE=$? 31 | 32 | # Set e option 33 | set -e 34 | 35 | # Move out workspace 36 | cd .. 37 | 38 | # For logging driver 39 | cat stderr.log 1>&2 40 | 41 | # Write out.json 42 | cat < out.json 43 | { 44 | "subtask_config": {}, 45 | "export_params": {}, 46 | "store_params": {}, 47 | "status_params": { 48 | "exit_code": $EXIT_CODE 49 | } 50 | } 51 | EOF 52 | 53 | # Upload results 54 | aws s3 cp ./out.json s3://${ECS_TASK_EMBULK_BUCKET}/${ECS_TASK_EMBULK_PREFIX}/ 55 | aws s3 cp ./stdout.log s3://${ECS_TASK_EMBULK_BUCKET}/${ECS_TASK_EMBULK_PREFIX}/ 56 | aws s3 cp ./stderr.log s3://${ECS_TASK_EMBULK_BUCKET}/${ECS_TASK_EMBULK_PREFIX}/ 57 | 58 | # Exit with the embulk exit code 59 | exit $EXIT_CODE 60 | 61 | -------------------------------------------------------------------------------- /src/main/resources/pro/civitaspo/digdag/plugin/ecs_task/py/run.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | set -o pipefail 3 | 4 | mkdir -p ./digdag-operator-ecs_task 5 | cd digdag-operator-ecs_task 6 | 7 | # Create output files 8 | touch out.json stdout.log stderr.log 9 | 10 | # Download requirements 11 | aws s3 cp s3://${ECS_TASK_PY_BUCKET}/${ECS_TASK_PY_PREFIX}/ ./ --recursive 12 | 13 | # Move workspace 14 | cd workspace 15 | 16 | # Unset e option for returning python results to digdag 17 | set +e 18 | 19 | # Run setup command 20 | ${ECS_TASK_PY_SETUP_COMMAND} \ 21 | 2>> ../stderr.log \ 22 | | tee -a ../stdout.log 23 | 24 | # Run 25 | cat ../runner.py \ 26 | | python - "${ECS_TASK_PY_COMMAND}" \ 27 | ../in.json \ 28 | ../out.json \ 29 | 2>> ../stderr.log \ 30 | | tee -a ../stdout.log 31 | 32 | # Capture exit code 33 | EXIT_CODE=$? 34 | 35 | # Set e option 36 | set -e 37 | 38 | # Move out workspace 39 | cd .. 40 | 41 | # For logging driver 42 | cat stderr.log 1>&2 43 | 44 | # Upload results 45 | aws s3 cp ./out.json s3://${ECS_TASK_PY_BUCKET}/${ECS_TASK_PY_PREFIX}/ 46 | aws s3 cp ./stdout.log s3://${ECS_TASK_PY_BUCKET}/${ECS_TASK_PY_PREFIX}/ 47 | aws s3 cp ./stderr.log s3://${ECS_TASK_PY_BUCKET}/${ECS_TASK_PY_PREFIX}/ 48 | 49 | # Exit with the python exit code 50 | exit $EXIT_CODE 51 | 52 | -------------------------------------------------------------------------------- /src/main/resources/pro/civitaspo/digdag/plugin/ecs_task/py/runner.py: -------------------------------------------------------------------------------- 1 | ######### 2 | # Copy from https://raw.githubusercontent.com/treasure-data/digdag/6c81976334b78b3b776e357c7e9244f6bbe2711a/digdag-standards/src/main/resources/digdag/standards/py/runner.py 3 | # Then, customize a bit about error handling 4 | ######### 5 | 6 | import sys 7 | import os 8 | import json 9 | import imp 10 | import inspect 11 | import collections 12 | import traceback 13 | 14 | command = sys.argv[1] 15 | in_file = sys.argv[2] 16 | out_file = sys.argv[3] 17 | 18 | with open(in_file) as f: 19 | in_data = json.load(f) 20 | params = in_data['params'] 21 | 22 | # fake digdag_env module already imported 23 | digdag_env_mod = sys.modules['digdag_env'] = imp.new_module('digdag_env') 24 | digdag_env_mod.params = params 25 | digdag_env_mod.subtask_config = collections.OrderedDict() 26 | digdag_env_mod.export_params = {} 27 | digdag_env_mod.store_params = {} 28 | digdag_env_mod.state_params = {} 29 | import digdag_env 30 | 31 | # fake digdag module already imported 32 | digdag_mod = sys.modules['digdag'] = imp.new_module('digdag') 33 | 34 | class Env(object): 35 | def __init__(self, digdag_env_mod): 36 | self.params = digdag_env_mod.params 37 | self.subtask_config = digdag_env_mod.subtask_config 38 | self.export_params = digdag_env_mod.export_params 39 | self.store_params = digdag_env_mod.store_params 40 | self.state_params = digdag_env_mod.state_params 41 | self.subtask_index = 0 42 | 43 | def set_state(self, params={}, **kwds): 44 | self.state_params.update(params) 45 | self.state_params.update(kwds) 46 | 47 | def export(self, params={}, **kwds): 48 | self.export_params.update(params) 49 | self.export_params.update(kwds) 50 | 51 | def store(self, params={}, **kwds): 52 | self.store_params.update(params) 53 | self.store_params.update(kwds) 54 | 55 | def add_subtask(self, function=None, **params): 56 | if function is not None and not isinstance(function, dict): 57 | if hasattr(function, "im_class"): 58 | # Python 2 59 | command = ".".join([function.im_class.__module__, function.im_class.__name__, function.__name__]) 60 | else: 61 | # Python 3 62 | command = ".".join([function.__module__, function.__qualname__]) 63 | config = params 64 | config["py>"] = command 65 | else: 66 | if isinstance(function, dict): 67 | config = function.copy() 68 | config.update(params) 69 | else: 70 | config = params 71 | try: 72 | json.dumps(config) 73 | except Exception as error: 74 | raise TypeError("Parameters must be serializable using JSON: %s" % str(error)) 75 | self.subtask_config["+subtask" + str(self.subtask_index)] = config 76 | self.subtask_index += 1 77 | 78 | digdag_mod.env = Env(digdag_env_mod) 79 | import digdag 80 | 81 | # add the archive path to improt path 82 | sys.path.append(os.path.abspath(os.getcwd())) 83 | 84 | def digdag_inspect_command(command): 85 | # package.name.Class.method 86 | fragments = command.split(".") 87 | method_name = fragments.pop() 88 | class_type = None 89 | callable_type = None 90 | try: 91 | mod = __import__(".".join(fragments), fromlist=[method_name]) 92 | try: 93 | callable_type = getattr(mod, method_name) 94 | except AttributeError as error: 95 | raise AttributeError("Module '%s' has no attribute '%s'" % (".".join(fragments), method_name)) 96 | except ImportError as error: 97 | class_name = fragments.pop() 98 | mod = __import__(".".join(fragments), fromlist=[class_name]) 99 | try: 100 | class_type = getattr(mod, class_name) 101 | except AttributeError as error: 102 | raise AttributeError("Module '%s' has no attribute '%s'" % (".".join(fragments), method_name)) 103 | 104 | if type(callable_type) == type: 105 | class_type = callable_type 106 | method_name = "run" 107 | 108 | if class_type is not None: 109 | return (class_type, method_name) 110 | else: 111 | return (callable_type, None) 112 | 113 | def digdag_inspect_arguments(callable_type, exclude_self, params): 114 | if callable_type == object.__init__: 115 | # object.__init__ accepts *varargs and **keywords but it throws exception 116 | return {} 117 | if hasattr(inspect, 'getfullargspec'): # Python3 118 | spec = inspect.getfullargspec(callable_type) 119 | keywords_ = spec.varkw 120 | else: # Python 2 121 | spec = inspect.getargspec(callable_type) 122 | keywords_ = spec.keywords 123 | 124 | args = {} 125 | for idx, key in enumerate(spec.args): 126 | if exclude_self and idx == 0: 127 | continue 128 | if key in params: 129 | args[key] = params[key] 130 | else: 131 | if spec.defaults is None or idx < len(spec.args) - len(spec.defaults): 132 | # this keyword is required but not in params. raising an error. 133 | if hasattr(callable_type, '__qualname__'): 134 | # Python 3 135 | name = callable_type.__qualname__ 136 | elif hasattr(callable_type, 'im_class'): 137 | # Python 2 138 | name = "%s.%s" % (callable_type.im_class.__name__, callable_type.__name__) 139 | else: 140 | name = callable_type.__name__ 141 | raise TypeError("Method '%s' requires parameter '%s' but not set" % (name, key)) 142 | if keywords_: 143 | # above code was only for validation 144 | return params 145 | else: 146 | return args 147 | 148 | ##### begin: Custom Error Handling Code ##### 149 | status_params = {} 150 | def with_error_handler(func, **func_args): 151 | try: 152 | results = func(**func_args) 153 | status_params['exit_code'] = 0 154 | return results 155 | except Exception as e: 156 | status_params['exit_code'] = 1 157 | status_params['error_message'] = str(e) 158 | status_params['error_stacktrace'] = traceback.format_exc() 159 | print('message: {}, stacktrace: {}'.format(str(e), traceback.format_exc())) 160 | ##### end: Custom Error Handling Code ##### 161 | 162 | callable_type, method_name = digdag_inspect_command(command) 163 | 164 | if method_name: 165 | init_args = digdag_inspect_arguments(callable_type.__init__, True, params) 166 | instance = callable_type(**init_args) 167 | 168 | method = getattr(instance, method_name) 169 | method_args = digdag_inspect_arguments(method, True, params) 170 | # Replace the below code to customize error hadling 171 | # result = method(**method_args) 172 | result = with_error_handler(method, **method_args) 173 | 174 | else: 175 | args = digdag_inspect_arguments(callable_type, False, params) 176 | # Replace the below code to customize error hadling 177 | # result = callable_type(**args) 178 | result = with_error_handler(callable_type, **args) 179 | 180 | out = { 181 | 'subtask_config': digdag_env.subtask_config, 182 | 'export_params': digdag_env.export_params, 183 | 'store_params': digdag_env.store_params, 184 | #'state_params': digdag_env.state_params, # only for retrying 185 | 'status_params': status_params, # only for ecs_task.command_result_internal 186 | } 187 | 188 | with open(out_file, 'w') as f: 189 | json.dump(out, f) 190 | 191 | -------------------------------------------------------------------------------- /src/main/resources/pro/civitaspo/digdag/plugin/ecs_task/rb/run.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | set -o pipefail 3 | 4 | mkdir -p ./digdag-operator-ecs_task 5 | cd digdag-operator-ecs_task 6 | 7 | # Create output files 8 | touch out.json stdout.log stderr.log 9 | 10 | # Download requirements 11 | aws s3 cp s3://${ECS_TASK_RB_BUCKET}/${ECS_TASK_RB_PREFIX}/ ./ --recursive 12 | 13 | # Move workspace 14 | cd workspace 15 | 16 | # Unset e option for returning ruby results to digdag 17 | set +e 18 | 19 | # Run setup command 20 | ${ECS_TASK_RB_SETUP_COMMAND} \ 21 | 2>> ../stderr.log \ 22 | | tee -a ../stdout.log 23 | 24 | # Run 25 | cat ../runner.rb \ 26 | | ruby \ 27 | -I . \ 28 | -r ${ECS_TASK_RB_REQUIRE} \ 29 | -- - \ 30 | "${ECS_TASK_RB_COMMAND}" \ 31 | ../in.json \ 32 | ../out.json \ 33 | 2>> ../stderr.log \ 34 | | tee -a ../stdout.log 35 | 36 | # Capture exit code 37 | EXIT_CODE=$? 38 | 39 | # Set e option 40 | set -e 41 | 42 | # Move out workspace 43 | cd .. 44 | 45 | # For logging driver 46 | cat stderr.log 1>&2 47 | 48 | # Upload results 49 | aws s3 cp ./out.json s3://${ECS_TASK_RB_BUCKET}/${ECS_TASK_RB_PREFIX}/ 50 | aws s3 cp ./stdout.log s3://${ECS_TASK_RB_BUCKET}/${ECS_TASK_RB_PREFIX}/ 51 | aws s3 cp ./stderr.log s3://${ECS_TASK_RB_BUCKET}/${ECS_TASK_RB_PREFIX}/ 52 | 53 | # Exit with the ruby exit code 54 | exit $EXIT_CODE 55 | 56 | -------------------------------------------------------------------------------- /src/main/resources/pro/civitaspo/digdag/plugin/ecs_task/rb/runner.rb: -------------------------------------------------------------------------------- 1 | ######### 2 | # Copy from https://raw.githubusercontent.com/treasure-data/digdag/390663b/digdag-standards/src/main/resources/digdag/standards/rb/runner.rb 3 | # Then, customize a bit about error handling 4 | ######### 5 | 6 | require 'json' 7 | 8 | command = ARGV[0] 9 | out_file = ARGV[2] 10 | 11 | module DigdagEnv 12 | in_file = ARGV[1] 13 | 14 | in_data = JSON.parse(File.read(in_file)) 15 | params = in_data['params'] 16 | 17 | # TODO include indifferent access like Embulk::DataSource 18 | PARAMS = params 19 | SUBTASK_CONFIG = {} 20 | EXPORT_PARAMS = {} 21 | STORE_PARAMS = {} 22 | STATE_PARAMS = {} 23 | end 24 | 25 | # should this be a digdag.gem so that users can unit-test a command without running digdag? 26 | module Digdag 27 | class Env 28 | def initialize 29 | @params = DigdagEnv::PARAMS 30 | @subtask_config = DigdagEnv::SUBTASK_CONFIG 31 | @export_params = DigdagEnv::EXPORT_PARAMS 32 | @store_params = DigdagEnv::STORE_PARAMS 33 | @state_params = DigdagEnv::STATE_PARAMS 34 | @subtask_index = 0 35 | end 36 | 37 | attr_reader :params 38 | 39 | attr_reader :subtask_config 40 | 41 | attr_reader :export_params 42 | 43 | attr_reader :store_params 44 | 45 | attr_reader :state_params 46 | 47 | def set_state(**params) 48 | @state_params.merge!(params) 49 | end 50 | 51 | def export(**params) 52 | @export_params.merge!(params) 53 | end 54 | 55 | def store(**params) 56 | @store_params.merge!(params) 57 | end 58 | 59 | # add_subtask(params) 60 | # add_subtask(singleton_method_name, params={}) 61 | # add_subtask(klass, instance_method_name, params={}) 62 | def add_subtask(*args) 63 | if args.length == 1 && args[0].is_a?(Hash) 64 | # add_subtask(params) 65 | config = args[0] 66 | 67 | elsif args.length == 1 || (args.length == 2 && args[1].is_a?(Hash)) 68 | # add_subtask(singleton_method_name, params={}) 69 | method_name = args[0] 70 | params = Hash(args[1]) 71 | 72 | begin 73 | method_name = method_name.to_sym 74 | rescue NameError, ArgumentError 75 | raise ArgumentError, "Second argument must be a Symbol but got #{method_name.inspect}" 76 | end 77 | 78 | if method_name.to_s.include?(".") 79 | raise ArgumentError, "Method name can't include '.'" 80 | end 81 | 82 | config = params.dup 83 | config["rb>"] = method_name.to_s 84 | 85 | elsif args.length == 2 || (args.length == 3 && args[2].is_a?(Hash)) 86 | # add_subtask(klass, instance_method_name, params={}) 87 | klass = args[0] 88 | method_name = args[1] 89 | params = Hash(args[2]) 90 | 91 | begin 92 | method_name = method_name.to_sym 93 | rescue NameError, ArgumentError 94 | raise ArgumentError, "Second argument must be a Symbol but got #{method_name.inspect}" 95 | end 96 | 97 | if method_name.to_s.include?(".") 98 | raise ArgumentError, "Method name can't include '.'" 99 | end 100 | 101 | if klass.is_a?(Class) 102 | class_name = klass.name 103 | 104 | else 105 | begin 106 | class_name = klass.to_sym.to_s 107 | rescue NameError, ArgumentError 108 | raise ArgumentError, "First argument must be a Class or Symbol but got #{klass.inspect}" 109 | end 110 | end 111 | 112 | # validation 113 | begin 114 | klass = Kernel.const_get(class_name) # const_get with String (not Symbol) searches nested constants 115 | rescue NameError 116 | raise ArgumentError, "Could not find class named #{class_name}" 117 | end 118 | 119 | unless klass.respond_to?(method_name) || klass.public_instance_methods.include?(method_name) 120 | raise ArgumentError, "Class #{klass} does not have method #{method_name.inspect}" 121 | end 122 | 123 | config = params.dup 124 | config["rb>"] = "::#{klass}.#{method_name}" 125 | 126 | else 127 | raise ArgumentError, "wrong number of arguments (#{args.length} for 1..3 with the last argument is a Hash)" 128 | end 129 | 130 | begin 131 | JSON.dump(config) 132 | rescue => e 133 | raise ArgumentError, "Parameters must be serializable using JSON: #{e}" 134 | end 135 | 136 | @subtask_config["+subtask#{@subtask_index}"] = config 137 | @subtask_index += 1 138 | 139 | nil 140 | end 141 | end 142 | 143 | DIGDAG_ENV = Env.new 144 | private_constant :DIGDAG_ENV 145 | 146 | def self.env 147 | DIGDAG_ENV 148 | end 149 | end 150 | 151 | # add the archive path to LOAD_PATH 152 | $LOAD_PATH << File.expand_path(Dir.pwd) 153 | 154 | def digdag_inspect_command(command) 155 | fragments = command.split(".") 156 | method_name = fragments.pop.to_sym 157 | if fragments.empty? 158 | # method 159 | return nil, method_name, false 160 | else 161 | # Name::Space::Class.method 162 | class_name = fragments.join(".") 163 | klass = Kernel.const_get(class_name) 164 | is_instance_method = klass.public_instance_methods.include?(method_name) 165 | return klass, method_name, is_instance_method 166 | end 167 | end 168 | 169 | def digdag_inspect_arguments(receiver, method_name, params) 170 | if receiver 171 | parameters = receiver.method(method_name).parameters 172 | if method_name == :new && parameters == [[:rest]] 173 | # This is Object.new that forwards all arguments to #initialize 174 | begin 175 | parameters = receiver.instance_method(:initialize).parameters 176 | rescue NameError => e 177 | end 178 | end 179 | else 180 | parameters = method(method_name).parameters 181 | end 182 | 183 | args = [] 184 | keywords = nil 185 | parameters.each do |kind, name| 186 | key = name.to_s 187 | case kind 188 | when :req 189 | # required argument like a 190 | unless params.has_key?(key) 191 | if receiver.is_a?(Class) 192 | raise ArgumentError, "Method '#{receiver}.#{method_name}' requires parameter '#{key}' but not set" 193 | else 194 | raise ArgumentError, "Method '#{receiver.class}##{method_name}' requires parameter '#{key}' but not set" 195 | end 196 | end 197 | args << params[key] 198 | 199 | when :opt 200 | # optional argument like a=nil 201 | if params.has_key?(key) 202 | args << params[key] 203 | else 204 | # use the default value. 205 | end 206 | 207 | when :rest 208 | # variable-length arguments like *a 209 | # there're really we can do here to keep consistency with :opt. 210 | # should this be an error? 211 | 212 | when :keyreq 213 | # required keyword argument like a: 214 | unless params.has_key?(key) 215 | if receiver.is_a?(Class) 216 | raise ArgumentError, "Method '#{receiver}.#{method_name}' requires parameter '#{key}' but not set" 217 | else 218 | raise ArgumentError, "Method '#{receiver.class}##{method_name}' requires parameter '#{key}' but not set" 219 | end 220 | end 221 | if keywords.nil? 222 | keywords = {} 223 | args << keywords 224 | end 225 | keywords[name] = params[key] 226 | 227 | when :key 228 | # optional keyword argument like a: nil 229 | if params.has_key?(key) 230 | if keywords.nil? 231 | keywords = {} 232 | args << keywords 233 | end 234 | keywords[name] = params[key] 235 | else 236 | # use the default value. 237 | end 238 | 239 | when :keyrest 240 | # rest-of-keywords argument like **a 241 | # symbolize keys otherwise method call causes error: 242 | # "TypeError: wrong argument type String (expected Symbol)" 243 | if keywords.nil? 244 | keywords = {} 245 | args << keywords 246 | end 247 | keywords.merge!(digdag_symbolize_keys(params)) 248 | end 249 | end 250 | 251 | return args 252 | end 253 | 254 | def digdag_symbolize_keys(hash) 255 | built = {} 256 | hash.each_pair do |k, v| 257 | if v.is_a?(Hash) 258 | v = digdag_symbolize_keys(v) 259 | end 260 | built[k.to_s.to_sym] = v 261 | end 262 | return built 263 | end 264 | 265 | def with_error_handler(receiver, method_name, *method_args) 266 | status_params = {} 267 | begin 268 | results = 269 | if receiver 270 | receiver.send(method_name, *method_args) 271 | else 272 | send(method_name, *method_args) 273 | end 274 | status_params['exit_code'] = 0 275 | return results, status_params 276 | rescue => e 277 | status_params['exit_code'] = 1 278 | status_params['error_message'] = e.message 279 | status_params['error_stacktrace'] = e.backtrace.join("\n") 280 | puts("message: #{e}, stacktrace: #{e.backtrace.join("\n")}") 281 | return nil, status_params 282 | end 283 | end 284 | 285 | klass, method_name, is_instance_method = digdag_inspect_command(command) 286 | 287 | if klass.nil? 288 | method_args = digdag_inspect_arguments(nil, method_name, DigdagEnv::PARAMS) 289 | # result = send(method_name, *method_args) 290 | result, status_params = with_error_handler(nil, method_name, *method_args) 291 | 292 | elsif is_instance_method 293 | new_args = digdag_inspect_arguments(klass, :new, DigdagEnv::PARAMS) 294 | instance = klass.new(*new_args) 295 | 296 | method_args = digdag_inspect_arguments(instance, method_name, DigdagEnv::PARAMS) 297 | # result = instance.send(method_name, *method_args) 298 | result, status_params = with_error_handler(instance, method_name, *method_args) 299 | 300 | else 301 | method_args = digdag_inspect_arguments(klass, method_name, DigdagEnv::PARAMS) 302 | # result = klass.send(method_name, *method_args) 303 | result, status_params = with_error_handler(klass, method_name, *method_args) 304 | end 305 | 306 | out = { 307 | 'subtask_config' => DigdagEnv::SUBTASK_CONFIG, 308 | 'export_params' => DigdagEnv::EXPORT_PARAMS, 309 | 'store_params' => DigdagEnv::STORE_PARAMS, 310 | 'status_params' => status_params, # only for ecs_task.command_result_internal 311 | #'state_params' => DigdagEnv::STATE_PARAMS, # only for retrying 312 | } 313 | 314 | File.open(out_file, "w") {|f| f.write out.to_json} 315 | 316 | -------------------------------------------------------------------------------- /src/main/resources/pro/civitaspo/digdag/plugin/ecs_task/sh/run.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | set -o pipefail 3 | 4 | mkdir -p ./digdag-operator-ecs_task 5 | cd digdag-operator-ecs_task 6 | 7 | # Create output files 8 | touch out.json stdout.log stderr.log 9 | 10 | # Download requirements 11 | aws s3 cp s3://${ECS_TASK_SH_BUCKET}/${ECS_TASK_SH_PREFIX}/ ./ --recursive 12 | 13 | # Move workspace 14 | cd workspace 15 | 16 | # Unset e option for returning embulk results to digdag 17 | set +e 18 | 19 | # envs 20 | export ${ECS_TASK_SH_EXPORT_ENV} 21 | 22 | # Run 23 | ${ECS_TASK_SH_COMMAND} \ 24 | 2>> ../stderr.log \ 25 | | tee -a ../stdout.log 26 | 27 | # Capture exit code 28 | EXIT_CODE=$? 29 | 30 | # Set e option 31 | set -e 32 | 33 | # Move out workspace 34 | cd .. 35 | 36 | # For logging driver 37 | cat stderr.log 1>&2 38 | 39 | # Write out.json 40 | cat < out.json 41 | { 42 | "subtask_config": {}, 43 | "export_params": {}, 44 | "store_params": {}, 45 | "status_params": { 46 | "exit_code": $EXIT_CODE 47 | } 48 | } 49 | EOF 50 | 51 | # Upload results 52 | aws s3 cp ./out.json s3://${ECS_TASK_SH_BUCKET}/${ECS_TASK_SH_PREFIX}/ 53 | aws s3 cp ./stdout.log s3://${ECS_TASK_SH_BUCKET}/${ECS_TASK_SH_PREFIX}/ 54 | aws s3 cp ./stderr.log s3://${ECS_TASK_SH_BUCKET}/${ECS_TASK_SH_PREFIX}/ 55 | 56 | # Exit with the embulk exit code 57 | exit $EXIT_CODE 58 | 59 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/AbstractEcsTaskOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task 2 | 3 | import io.digdag.client.config.{Config, ConfigFactory} 4 | import io.digdag.spi.{OperatorContext, SecretProvider, TemplateEngine} 5 | import io.digdag.util.{BaseOperator, DurationParam} 6 | import org.slf4j.{Logger, LoggerFactory} 7 | import pro.civitaspo.digdag.plugin.ecs_task.aws.{Aws, AwsConf} 8 | 9 | abstract class AbstractEcsTaskOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 10 | extends BaseOperator(context) { 11 | 12 | protected val logger: Logger = LoggerFactory.getLogger(operatorName) 13 | protected val cf: ConfigFactory = request.getConfig.getFactory 14 | 15 | protected val params: Config = { 16 | val elems: Seq[String] = operatorName.split("\\.").toSeq 17 | elems.indices.foldLeft(request.getConfig) { (p: Config, idx: Int) => 18 | p.mergeDefault((0 to idx).foldLeft(request.getConfig) { (nestedParam: Config, keyIdx: Int) => 19 | nestedParam.getNestedOrGetEmpty(elems(keyIdx)) 20 | }) 21 | } 22 | } 23 | protected val secrets: SecretProvider = context.getSecrets.getSecrets("ecs_task") 24 | protected val sessionUuid: String = params.get("session_uuid", classOf[String]) 25 | 26 | protected val aws: Aws = Aws( 27 | AwsConf( 28 | isAllowedAuthMethodEnv = systemConfig.get("ecs_task.allow_auth_method_env", classOf[Boolean], false), 29 | isAllowedAuthMethodInstance = systemConfig.get("ecs_task.allow_auth_method_instance", classOf[Boolean], false), 30 | isAllowedAuthMethodProfile = systemConfig.get("ecs_task.allow_auth_method_profile", classOf[Boolean], false), 31 | isAllowedAuthMethodProperties = systemConfig.get("ecs_task.allow_auth_method_properties", classOf[Boolean], false), 32 | isAllowedAuthMethodWebIdentityToken = systemConfig.get("ecs_task.allow_auth_method_web_identity_token", classOf[Boolean], false), 33 | assumeRoleTimeoutDuration = systemConfig.get("ecs_task.assume_role_timeout_duration", classOf[DurationParam], DurationParam.parse("1h")), 34 | accessKeyId = secrets.getSecretOptional("access_key_id"), 35 | secretAccessKey = secrets.getSecretOptional("secret_access_key"), 36 | sessionToken = secrets.getSecretOptional("session_token"), 37 | roleArn = secrets.getSecretOptional("role_arn"), 38 | roleSessionName = secrets.getSecretOptional("role_session_name").or(s"digdag-ecs_task-$sessionUuid"), 39 | defaultWebIdentityTokenFile = systemConfig.getOptional("athena.default_web_identity_token_file", classOf[String]), 40 | webIdentityTokenFile = params.getOptional("web_identity_token_file", classOf[String]), 41 | defaultWebIdentityRoleArn = systemConfig.getOptional("athena.default_web_identity_role_arn", classOf[String]), 42 | webIdentityRoleArn = params.getOptional("web_identity_role_arn", classOf[String]), 43 | httpProxy = secrets.getSecrets("http_proxy"), 44 | authMethod = params.get("auth_method", classOf[String], "basic"), 45 | profileName = params.get("profile_name", classOf[String], "default"), 46 | profileFile = params.getOptional("profile_file", classOf[String]), 47 | useHttpProxy = params.get("use_http_proxy", classOf[Boolean], false), 48 | region = params.getOptional("region", classOf[String]), 49 | endpoint = params.getOptional("endpoint", classOf[String]) 50 | ) 51 | ) 52 | 53 | } 54 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/EcsTaskPlugin.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task 2 | 3 | import java.lang.reflect.Constructor 4 | import java.util.{Arrays => JArrays, List => JList} 5 | 6 | import io.digdag.client.config.{Config, ConfigException} 7 | import io.digdag.spi.{Operator, OperatorContext, OperatorFactory, OperatorProvider, Plugin, TemplateEngine} 8 | import javax.inject.Inject 9 | import pro.civitaspo.digdag.plugin.ecs_task.command.{EcsTaskCallInternalOperator, EcsTaskCommandResultInternalOperator} 10 | import pro.civitaspo.digdag.plugin.ecs_task.embulk.EcsTaskEmbulkOperator 11 | import pro.civitaspo.digdag.plugin.ecs_task.py.EcsTaskPyOperator 12 | import pro.civitaspo.digdag.plugin.ecs_task.rb.EcsTaskRbOperator 13 | import pro.civitaspo.digdag.plugin.ecs_task.register.EcsTaskRegisterOperator 14 | import pro.civitaspo.digdag.plugin.ecs_task.result.EcsTaskResultOperator 15 | import pro.civitaspo.digdag.plugin.ecs_task.run.{EcsTaskRunInternalOperator, EcsTaskRunOperator} 16 | import pro.civitaspo.digdag.plugin.ecs_task.sh.EcsTaskShOperatar 17 | import pro.civitaspo.digdag.plugin.ecs_task.wait.EcsTaskWaitOperator 18 | 19 | object EcsTaskPlugin { 20 | 21 | class EcsTaskOperatorProvider extends OperatorProvider { 22 | 23 | @Inject protected var systemConfig: Config = null 24 | @Inject protected var templateEngine: TemplateEngine = null 25 | 26 | override def get(): JList[OperatorFactory] = { 27 | JArrays.asList( 28 | operatorFactory("ecs_task.embulk", classOf[EcsTaskEmbulkOperator]), 29 | operatorFactory("ecs_task.py", classOf[EcsTaskPyOperator]), 30 | operatorFactory("ecs_task.rb", classOf[EcsTaskRbOperator]), 31 | operatorFactory("ecs_task.sh", classOf[EcsTaskShOperatar]), 32 | operatorFactory("ecs_task.call_internal", classOf[EcsTaskCallInternalOperator]), 33 | operatorFactory("ecs_task.command_result_internal", classOf[EcsTaskCommandResultInternalOperator]), 34 | operatorFactory("ecs_task.register", classOf[EcsTaskRegisterOperator]), 35 | operatorFactory("ecs_task.result", classOf[EcsTaskResultOperator]), 36 | operatorFactory("ecs_task.run", classOf[EcsTaskRunOperator]), 37 | operatorFactory("ecs_task.run_internal", classOf[EcsTaskRunInternalOperator]), 38 | operatorFactory("ecs_task.wait", classOf[EcsTaskWaitOperator]) 39 | ) 40 | } 41 | 42 | private def operatorFactory[T <: AbstractEcsTaskOperator](operatorName: String, klass: Class[T]): OperatorFactory = { 43 | new OperatorFactory { 44 | override def getType: String = operatorName 45 | override def newOperator(context: OperatorContext): Operator = { 46 | val constructor: Constructor[T] = klass.getConstructor(classOf[String], classOf[OperatorContext], classOf[Config], classOf[TemplateEngine]) 47 | try { 48 | constructor.newInstance(operatorName, context, systemConfig, templateEngine) 49 | } 50 | catch { 51 | case e: Throwable => throw new ConfigException(e) 52 | } 53 | } 54 | } 55 | } 56 | } 57 | } 58 | 59 | class EcsTaskPlugin extends Plugin { 60 | 61 | override def getServiceProvider[T](`type`: Class[T]): Class[_ <: T] = { 62 | if (`type` ne classOf[OperatorProvider]) return null 63 | classOf[EcsTaskPlugin.EcsTaskOperatorProvider].asSubclass(`type`) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/aws/AmazonS3UriWrapper.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.aws 2 | 3 | import com.amazonaws.services.s3.AmazonS3URI 4 | 5 | object AmazonS3UriWrapper { 6 | def apply(path: String): AmazonS3URI = new AmazonS3URI(path, false) 7 | } 8 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/aws/Aws.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.aws 2 | 3 | import com.amazonaws.{ClientConfiguration, Protocol} 4 | import com.amazonaws.auth.{ 5 | AnonymousAWSCredentials, 6 | AWSCredentials, 7 | AWSCredentialsProvider, 8 | AWSStaticCredentialsProvider, 9 | BasicAWSCredentials, 10 | BasicSessionCredentials, 11 | EC2ContainerCredentialsProviderWrapper, 12 | EnvironmentVariableCredentialsProvider, 13 | SystemPropertiesCredentialsProvider, 14 | WebIdentityTokenCredentialsProvider 15 | } 16 | import com.amazonaws.auth.profile.{ProfileCredentialsProvider, ProfilesConfigFile} 17 | import com.amazonaws.client.builder.AwsClientBuilder 18 | import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration 19 | import com.amazonaws.regions.{DefaultAwsRegionProviderChain, Regions} 20 | import com.amazonaws.services.ecs.{AmazonECS, AmazonECSClientBuilder} 21 | import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} 22 | import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuilder} 23 | import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder 24 | import com.amazonaws.services.securitytoken.model.AssumeRoleRequest 25 | import com.google.common.base.Optional 26 | import io.digdag.client.config.ConfigException 27 | 28 | import scala.util.Try 29 | 30 | case class Aws(conf: AwsConf) { 31 | 32 | def withS3[R](f: AmazonS3 => R): R = { 33 | val s3: AmazonS3 = buildService(AmazonS3ClientBuilder.standard()) 34 | try f(s3) 35 | finally s3.shutdown() 36 | } 37 | 38 | def withTransferManager[R](f: TransferManager => R): R = { 39 | withS3 { s3 => 40 | val xfer: TransferManager = TransferManagerBuilder.standard().withS3Client(s3).build() 41 | try f(xfer) 42 | finally xfer.shutdownNow(false) 43 | } 44 | } 45 | 46 | def withEcs[R](f: AmazonECS => R): R = { 47 | val ecs: AmazonECS = buildService(AmazonECSClientBuilder.standard()) 48 | try f(ecs) 49 | finally ecs.shutdown() 50 | } 51 | 52 | private def buildService[S <: AwsClientBuilder[S, T], T](builder: AwsClientBuilder[S, T]): T = { 53 | configureBuilderEndpointConfiguration(builder) 54 | .withClientConfiguration(clientConfiguration) 55 | .withCredentials(credentialsProvider) 56 | .build() 57 | } 58 | 59 | private def configureBuilderEndpointConfiguration[S <: AwsClientBuilder[S, T], T](builder: AwsClientBuilder[S, T]): AwsClientBuilder[S, T] = { 60 | if (conf.region.isPresent && conf.endpoint.isPresent) { 61 | val ec = new EndpointConfiguration(conf.endpoint.get(), conf.region.get()) 62 | builder.setEndpointConfiguration(ec) 63 | } 64 | else if (conf.region.isPresent && !conf.endpoint.isPresent) { 65 | builder.setRegion(conf.region.get()) 66 | } 67 | else if (!conf.region.isPresent && conf.endpoint.isPresent) { 68 | val r = Try(new DefaultAwsRegionProviderChain().getRegion).getOrElse(Regions.DEFAULT_REGION.getName) 69 | val ec = new EndpointConfiguration(conf.endpoint.get(), r) 70 | builder.setEndpointConfiguration(ec) 71 | } 72 | builder 73 | } 74 | 75 | private def credentialsProvider: AWSCredentialsProvider = { 76 | if (!conf.roleArn.isPresent) return standardCredentialsProvider 77 | assumeRoleCredentialsProvider(standardCredentialsProvider) 78 | } 79 | 80 | private def standardCredentialsProvider: AWSCredentialsProvider = { 81 | conf.authMethod match { 82 | case "basic" => basicAuthMethodAWSCredentialsProvider 83 | case "env" => envAuthMethodAWSCredentialsProvider 84 | case "instance" => instanceAuthMethodAWSCredentialsProvider 85 | case "profile" => profileAuthMethodAWSCredentialsProvider 86 | case "properties" => propertiesAuthMethodAWSCredentialsProvider 87 | case "anonymous" => anonymousAuthMethodAWSCredentialsProvider 88 | case "session" => sessionAuthMethodAWSCredentialsProvider 89 | case "web_identity_token" => webIdentityTokenAuthMethodAWSCredentialsProvider 90 | case _ => 91 | throw new ConfigException( 92 | s"""auth_method: "$conf.authMethod" is not supported. available `auth_method`s are "basic", "env", "instance", "profile", "properties", "anonymous", "session", or "web_identity_token".""" 93 | ) 94 | } 95 | } 96 | 97 | private def assumeRoleCredentialsProvider(credentialsProviderToAssumeRole: AWSCredentialsProvider): AWSCredentialsProvider = { 98 | // TODO: require EndpointConfiguration so on ? 99 | val sts = AWSSecurityTokenServiceClientBuilder 100 | .standard() 101 | .withClientConfiguration(clientConfiguration) 102 | .withCredentials(credentialsProviderToAssumeRole) 103 | .build() 104 | 105 | val role = sts.assumeRole( 106 | new AssumeRoleRequest() 107 | .withRoleArn(conf.roleArn.get()) 108 | .withDurationSeconds(conf.assumeRoleTimeoutDuration.getDuration.getSeconds.toInt) 109 | .withRoleSessionName(conf.roleSessionName) 110 | ) 111 | val credentials = 112 | new BasicSessionCredentials(role.getCredentials.getAccessKeyId, role.getCredentials.getSecretAccessKey, role.getCredentials.getSessionToken) 113 | new AWSStaticCredentialsProvider(credentials) 114 | } 115 | 116 | private def basicAuthMethodAWSCredentialsProvider: AWSCredentialsProvider = { 117 | if (!conf.accessKeyId.isPresent) throw new ConfigException(s"""`access_key_id` must be set when `auth_method` is "$conf.authMethod".""") 118 | if (!conf.secretAccessKey.isPresent) throw new ConfigException(s"""`secret_access_key` must be set when `auth_method` is "$conf.authMethod".""") 119 | val credentials: AWSCredentials = new BasicAWSCredentials(conf.accessKeyId.get(), conf.secretAccessKey.get()) 120 | new AWSStaticCredentialsProvider(credentials) 121 | } 122 | 123 | private def envAuthMethodAWSCredentialsProvider: AWSCredentialsProvider = { 124 | if (!conf.isAllowedAuthMethodEnv) throw new ConfigException(s"""auth_method: "$conf.authMethod" is not allowed.""") 125 | new EnvironmentVariableCredentialsProvider 126 | } 127 | 128 | private def instanceAuthMethodAWSCredentialsProvider: AWSCredentialsProvider = { 129 | if (!conf.isAllowedAuthMethodInstance) throw new ConfigException(s"""auth_method: "$conf.authMethod" is not allowed.""") 130 | // NOTE: combination of InstanceProfileCredentialsProvider and ContainerCredentialsProvider 131 | new EC2ContainerCredentialsProviderWrapper 132 | } 133 | 134 | private def profileAuthMethodAWSCredentialsProvider: AWSCredentialsProvider = { 135 | if (!conf.isAllowedAuthMethodProfile) throw new ConfigException(s"""auth_method: "$conf.authMethod" is not allowed.""") 136 | if (!conf.profileFile.isPresent) return new ProfileCredentialsProvider(conf.profileName) 137 | val pf: ProfilesConfigFile = new ProfilesConfigFile(conf.profileFile.get()) 138 | new ProfileCredentialsProvider(pf, conf.profileName) 139 | } 140 | 141 | private def propertiesAuthMethodAWSCredentialsProvider: AWSCredentialsProvider = { 142 | if (!conf.isAllowedAuthMethodProperties) throw new ConfigException(s"""auth_method: "$conf.authMethod" is not allowed.""") 143 | new SystemPropertiesCredentialsProvider() 144 | } 145 | 146 | private def anonymousAuthMethodAWSCredentialsProvider: AWSCredentialsProvider = { 147 | val credentials: AWSCredentials = new AnonymousAWSCredentials 148 | new AWSStaticCredentialsProvider(credentials) 149 | } 150 | 151 | private def sessionAuthMethodAWSCredentialsProvider: AWSCredentialsProvider = { 152 | if (!conf.accessKeyId.isPresent) throw new ConfigException(s"""`access_key_id` must be set when `auth_method` is "$conf.authMethod".""") 153 | if (!conf.secretAccessKey.isPresent) throw new ConfigException(s"""`secret_access_key` must be set when `auth_method` is "$conf.authMethod".""") 154 | if (!conf.sessionToken.isPresent) throw new ConfigException(s"""`session_token` must be set when `auth_method` is "$conf.authMethod".""") 155 | val credentials: AWSCredentials = new BasicSessionCredentials(conf.accessKeyId.get(), conf.secretAccessKey.get(), conf.sessionToken.get()) 156 | new AWSStaticCredentialsProvider(credentials) 157 | } 158 | 159 | private def webIdentityTokenAuthMethodAWSCredentialsProvider: AWSCredentialsProvider = { 160 | if (!conf.isAllowedAuthMethodWebIdentityToken) throw new ConfigException(s"""auth_method: "${conf.authMethod}" is not allowed.""") 161 | if (!conf.webIdentityTokenFile.or(conf.defaultWebIdentityTokenFile).isPresent) 162 | throw new ConfigException( 163 | s"""`web_identity_token_file` or `ecs_task.allow_auth_method_web_identity_token` (system) must be set when `auth_method` is "${conf.authMethod}".""" 164 | ) 165 | if (!conf.webIdentityRoleArn.or(conf.defaultWebIdentityRoleArn).isPresent) 166 | throw new ConfigException( 167 | s"""`web_identity_role_arn` or `ecs_task.allow_auth_method_web_identity_role_arn` (system) must be set when `auth_method` is "${conf.authMethod}".""" 168 | ) 169 | WebIdentityTokenCredentialsProvider 170 | .builder() 171 | .webIdentityTokenFile(conf.webIdentityTokenFile.or(conf.defaultWebIdentityTokenFile).get()) 172 | .roleArn(conf.webIdentityRoleArn.or(conf.defaultWebIdentityRoleArn).get()) 173 | .roleSessionName(conf.roleSessionName) 174 | .build() 175 | } 176 | 177 | private def clientConfiguration: ClientConfiguration = { 178 | if (!conf.useHttpProxy) return new ClientConfiguration() 179 | 180 | val host: String = conf.httpProxy.getSecret("host") 181 | val port: Optional[String] = conf.httpProxy.getSecretOptional("port") 182 | val protocol: Protocol = conf.httpProxy.getSecretOptional("scheme").or("https") match { 183 | case "http" => Protocol.HTTP 184 | case "https" => Protocol.HTTPS 185 | case _ => throw new ConfigException(s"""`ecs_task.http_proxy.scheme` must be "http" or "https".""") 186 | } 187 | val user: Optional[String] = conf.httpProxy.getSecretOptional("user") 188 | val password: Optional[String] = conf.httpProxy.getSecretOptional("password") 189 | 190 | val cc = new ClientConfiguration() 191 | .withProxyHost(host) 192 | .withProtocol(protocol) 193 | 194 | if (port.isPresent) cc.setProxyPort(port.get().toInt) 195 | if (user.isPresent) cc.setProxyUsername(user.get()) 196 | if (password.isPresent) cc.setProxyPassword(password.get()) 197 | 198 | cc 199 | } 200 | 201 | } 202 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/aws/AwsConf.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.aws 2 | 3 | import com.google.common.base.Optional 4 | import io.digdag.spi.SecretProvider 5 | import io.digdag.util.DurationParam 6 | 7 | case class AwsConf( 8 | isAllowedAuthMethodEnv: Boolean, 9 | isAllowedAuthMethodInstance: Boolean, 10 | isAllowedAuthMethodProfile: Boolean, 11 | isAllowedAuthMethodProperties: Boolean, 12 | isAllowedAuthMethodWebIdentityToken: Boolean, 13 | assumeRoleTimeoutDuration: DurationParam, 14 | accessKeyId: Optional[String], 15 | secretAccessKey: Optional[String], 16 | sessionToken: Optional[String], 17 | roleArn: Optional[String], 18 | roleSessionName: String, 19 | httpProxy: SecretProvider, 20 | authMethod: String, 21 | profileName: String, 22 | defaultWebIdentityTokenFile: Optional[String], 23 | webIdentityTokenFile: Optional[String], 24 | defaultWebIdentityRoleArn: Optional[String], 25 | webIdentityRoleArn: Optional[String], 26 | profileFile: Optional[String], 27 | useHttpProxy: Boolean, 28 | region: Optional[String], 29 | endpoint: Optional[String] 30 | ) 31 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/command/AbstractEcsTaskCommandOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.command 2 | 3 | import com.amazonaws.services.s3.AmazonS3URI 4 | import io.digdag.client.config.Config 5 | import io.digdag.spi.{OperatorContext, PrivilegedVariables, TaskResult, TemplateEngine} 6 | import pro.civitaspo.digdag.plugin.ecs_task.AbstractEcsTaskOperator 7 | import pro.civitaspo.digdag.plugin.ecs_task.aws.AmazonS3UriWrapper 8 | 9 | import scala.jdk.CollectionConverters._ 10 | import scala.util.{Random, Using} 11 | import scala.util.matching.Regex 12 | 13 | abstract class AbstractEcsTaskCommandOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 14 | extends AbstractEcsTaskOperator(operatorName, context, systemConfig, templateEngine) { 15 | 16 | protected val validEnvKeyRegex: Regex = "[a-zA-Z_][a-zA-Z_0-9]*".r 17 | 18 | protected val mainScriptName: String 19 | 20 | private lazy val tmpStorageConfig: Config = { 21 | if (params.has("workspace_s3_uri_prefix") && !params.has("tmp_storage")) { 22 | logger.warn("[Deprecated] Use `tmp_storage` instead of `workspace_s3_uri_prefix`") 23 | buildTmpStorageConfigFromWorkspaceS3UriPrefix() 24 | } 25 | else { 26 | if (params.has("workspace_s3_uri_prefix")) logger.info("Use `tmp_storage`, not `workspace_s3_uri_prefix`") 27 | params.getNested("tmp_storage") 28 | } 29 | } 30 | 31 | @deprecated 32 | private def buildTmpStorageConfigFromWorkspaceS3UriPrefix(): Config = { 33 | cf.create() 34 | .set("type", "s3") 35 | .set("uri", params.get("workspace_s3_uri_prefix", classOf[String])) 36 | } 37 | 38 | private def buildTmpStorage(): TmpStorage = { 39 | val storageType: String = tmpStorageConfig.get("type", classOf[String]) 40 | storageType match { 41 | case "s3" => buildS3TmpStorage() 42 | case _ => throw new UnsupportedOperationException("tmp_storage.type supports only s3") 43 | } 44 | } 45 | 46 | private def buildS3TmpStorage(): S3TmpStorage = { 47 | val shellCommand: String = params.get("shell", classOf[String], "sh") 48 | val uriString: String = tmpStorageConfig.get("uri", classOf[String]) 49 | val random: String = Random.alphanumeric.take(10).mkString 50 | val uri: AmazonS3URI = 51 | if (uriString.endsWith("/")) AmazonS3UriWrapper(s"$uriString$operatorName.$sessionUuid.$random") 52 | else AmazonS3UriWrapper(s"$uriString/$operatorName.$sessionUuid.$random") 53 | 54 | S3TmpStorage(shellCommand = shellCommand, location = uri, aws = aws, workspace = workspace, logger = logger) 55 | } 56 | 57 | protected def collectEnvironments(): Map[String, String] = { 58 | val vars: PrivilegedVariables = context.getPrivilegedVariables 59 | vars.getKeys.asScala.foldLeft(Map.empty[String, String]) { (env, key) => 60 | if (isValidEnvKey(key)) { 61 | env ++ Map(key -> vars.get(key)) 62 | } 63 | else { 64 | logger.info(s"$key is invalid env key.") 65 | env 66 | } 67 | } 68 | } 69 | 70 | protected def isValidEnvKey(key: String): Boolean = { 71 | key match { 72 | case validEnvKeyRegex() => true 73 | case _ => false 74 | } 75 | } 76 | 77 | protected def createCommandRunner(tmpStorage: TmpStorage): EcsTaskCommandRunner = { 78 | EcsTaskCommandRunner( 79 | tmpStorage = tmpStorage, 80 | mainScript = mainScriptName, 81 | params = params, 82 | environments = collectEnvironments(), 83 | awsConf = aws.conf, 84 | logger = logger 85 | ) 86 | } 87 | 88 | protected def prepare(tmpStorage: TmpStorage): Unit 89 | 90 | def runTask(): TaskResult = { 91 | Using.resource(buildTmpStorage()) { tmpStorage: TmpStorage => 92 | prepare(tmpStorage) 93 | createCommandRunner(tmpStorage).run() 94 | } 95 | } 96 | 97 | } 98 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/command/EcsTaskCallInternalOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.command 2 | 3 | import io.digdag.client.config.Config 4 | import io.digdag.spi.{OperatorContext, TaskResult, TemplateEngine} 5 | import pro.civitaspo.digdag.plugin.ecs_task.AbstractEcsTaskOperator 6 | 7 | class EcsTaskCallInternalOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 8 | extends AbstractEcsTaskOperator(operatorName, context, systemConfig, templateEngine) { 9 | 10 | protected val doConfig: Config = params.getNested("_do") 11 | 12 | override def runTask(): TaskResult = { 13 | TaskResult.defaultBuilder(cf).subtaskConfig(doConfig).build() 14 | } 15 | 16 | } 17 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/command/EcsTaskCommandResultInternalOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.command 2 | 3 | import com.amazonaws.services.s3.AmazonS3URI 4 | import io.digdag.client.config.Config 5 | import io.digdag.spi.{OperatorContext, TaskResult, TemplateEngine} 6 | import pro.civitaspo.digdag.plugin.ecs_task.AbstractEcsTaskOperator 7 | import pro.civitaspo.digdag.plugin.ecs_task.aws.AmazonS3UriWrapper 8 | 9 | import scala.util.{Failure, Try} 10 | 11 | class EcsTaskCommandResultInternalOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 12 | extends AbstractEcsTaskOperator(operatorName, context, systemConfig, templateEngine) { 13 | 14 | protected val locationPrefix: AmazonS3URI = AmazonS3UriWrapper(params.get("_command", classOf[String])) 15 | 16 | override def runTask(): TaskResult = { 17 | logStdoutStderr() 18 | 19 | val out: Config = loadOutJsonContent() 20 | val statusParams: Config = out.getNested("status_params") 21 | val exitCode: Int = statusParams.get("exit_code", classOf[Int]) 22 | 23 | if (exitCode != 0) { 24 | val errorMessage: String = statusParams.get("error_message", classOf[String], "") 25 | val errorStackTrace: String = statusParams.get("error_stacktrace", classOf[String], "") 26 | val stdout: String = Try(loadStdoutLogContent()).getOrElse("") 27 | val stderr: String = Try(loadStderrLogContent()).getOrElse("") 28 | throw new RuntimeException(s"message: '$errorMessage',\nstacktrace: '$errorStackTrace',\nstdout: '$stdout'\nstderr: '$stderr'") 29 | } 30 | 31 | TaskResult 32 | .defaultBuilder(cf) 33 | .subtaskConfig(out.getNestedOrGetEmpty("subtask_config")) 34 | .exportParams(out.getNestedOrGetEmpty("export_params")) 35 | .storeParams( 36 | out 37 | .getNestedOrGetEmpty("store_params") 38 | .setNested("last_ecs_task_command", statusParams) 39 | ) 40 | .build() 41 | } 42 | 43 | protected def loadOutJsonContent(): Config = { 44 | val targetUri: AmazonS3URI = AmazonS3UriWrapper(s"$locationPrefix/out.json") 45 | val content: String = loadS3ObjectContent(targetUri) 46 | cf.fromJsonString(content) 47 | } 48 | 49 | protected def logStdoutStderr(): Unit = { 50 | val t: Try[Unit] = Try { // do nothing if failed 51 | logger.info(s"stdout: ${loadStdoutLogContent()}") 52 | logger.info(s"stderr: ${loadStderrLogContent()}") 53 | } 54 | t match { 55 | case Failure(exception) => logger.error(exception.getMessage, exception) 56 | case _ => // do nothing 57 | } 58 | } 59 | 60 | protected def loadStdoutLogContent(): String = { 61 | val targetUri: AmazonS3URI = AmazonS3UriWrapper(s"$locationPrefix/stdout.log") 62 | loadS3ObjectContent(targetUri) 63 | } 64 | 65 | protected def loadStderrLogContent(): String = { 66 | val targetUri: AmazonS3URI = AmazonS3UriWrapper(s"$locationPrefix/stderr.log") 67 | loadS3ObjectContent(targetUri) 68 | } 69 | 70 | protected def loadS3ObjectContent(uri: AmazonS3URI): String = { 71 | logger.info(s"Load content from: $uri") 72 | aws.withS3(_.getObjectAsString(uri.getBucket, uri.getKey)) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/command/EcsTaskCommandRunner.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.command 2 | 3 | import java.nio.charset.StandardCharsets 4 | 5 | import com.google.common.base.Optional 6 | import io.digdag.client.config.{Config, ConfigException, ConfigFactory} 7 | import io.digdag.spi.TaskResult 8 | import io.digdag.util.DurationParam 9 | import org.slf4j.Logger 10 | import pro.civitaspo.digdag.plugin.ecs_task.VERSION 11 | import pro.civitaspo.digdag.plugin.ecs_task.aws.AwsConf 12 | 13 | import scala.jdk.CollectionConverters._ 14 | import scala.util.hashing.MurmurHash3 15 | import scala.util.matching.Regex 16 | 17 | case class EcsTaskCommandRunner( 18 | tmpStorage: TmpStorage, 19 | mainScript: String, 20 | params: Config, 21 | environments: Map[String, String], 22 | awsConf: AwsConf, 23 | logger: Logger 24 | ) { 25 | 26 | val cf: ConfigFactory = params.getFactory 27 | 28 | // For the task group of ecs_task.internal_run and ecs_task.wait 29 | val maxRetry: Int = params.get("max_retry", classOf[Int], 3) 30 | 31 | // For ecs_task.register> operator (TaskDefinition) 32 | // NOTE: Use only 1 container 33 | // val containerDefinitions: Seq[ContainerDefinition] = params.parseList("container_definitions", classOf[Config]).asScala.map(configureContainerDefinition).map(_.get) 34 | val sidecars: Seq[Config] = params.parseListOrGetEmpty("sidecars", classOf[Config]).asScala.toSeq 35 | val cpu: Optional[String] = params.getOptional("cpu", classOf[String]) 36 | val ephemeralStorage: Optional[Config] = params.getOptionalNested("ephemeral_storage") 37 | val executionRoleArn: Optional[String] = params.getOptional("execution_role_arn", classOf[String]) 38 | 39 | val taskName: String = params.get("task_name", classOf[String]) 40 | val familyPrefix: String = params.get("family_prefix", classOf[String], "") 41 | val familySuffix: String = params.get("family_suffix", classOf[String], "") 42 | val familyInfix: String = params.get("family_infix", classOf[String], taskName) 43 | 44 | val family: String = 45 | params 46 | .get( 47 | "family", 48 | classOf[String], { 49 | val defaultFamilyName: String = s"$familyPrefix$familyInfix$familySuffix" 50 | // NOTE: Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. 51 | // ref. https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RegisterTaskDefinition.html#ECS-RegisterTaskDefinition-request-family 52 | if (defaultFamilyName.length <= 255) defaultFamilyName 53 | else { 54 | val workflowName: String = taskName.split("\\+").filter(_.nonEmpty).head 55 | val taskNameHash: String = { 56 | val seed: Int = 65432 // NOTE: For reproducibility 57 | MurmurHash3.bytesHash(taskName.getBytes(StandardCharsets.UTF_8), seed).abs.toString 58 | } 59 | val defaultFamilyNameWithHashing: String = s"$familyPrefix$workflowName-$taskNameHash$familySuffix" 60 | if (defaultFamilyNameWithHashing.length > 255) throw new ConfigException(s"Cannot shorten the family name: $defaultFamilyName") 61 | else logger.warn(s"Shorten with MurmurHash3: $defaultFamilyName -> $defaultFamilyNameWithHashing") 62 | 63 | defaultFamilyNameWithHashing 64 | } 65 | } 66 | ) 67 | val ipcMode: Optional[String] = params.getOptional("ipc_mode", classOf[String]) 68 | val memory: Optional[String] = params.getOptional("memory", classOf[String]) 69 | val networkMode: Optional[String] = params.getOptional("network_mode", classOf[String]) 70 | val pidMode: Optional[String] = params.getOptional("pid_mode", classOf[String]) 71 | 72 | // NOTE: Use `ecs_task.run>`'s one. 73 | // val placementConstraints: Seq[TaskDefinitionPlacementConstraint] = params.parseListOrGetEmpty("placement_constraints", classOf[Config]).asScala.map(configureTaskDefinitionPlacementConstraint).map(_.get) 74 | val requiresCompatibilities 75 | : Seq[String] = params.parseListOrGetEmpty("requires_compatibilities", classOf[String]).asScala.toSeq // Valid Values: EC2 | FARGATE 76 | val runtimePlatform: Optional[Config] = params.getOptionalNested("runtime_platform") 77 | // NOTE: Use the same var as `ecs_task.run>`'s one. 78 | // val tags: Optional[Config] = params.getOptionalNested("tags") 79 | val taskRoleArn: Optional[String] = params.getOptional("task_role_arn", classOf[String]) 80 | val volumes: Seq[Config] = params.parseListOrGetEmpty("volumes", classOf[Config]).asScala.toSeq 81 | 82 | // For `ecs_task.register>` operator (ContainerDefinition) 83 | // NOTE: Set by this plugin 84 | // val command: Seq[String] = params.parseListOrGetEmpty("command", classOf[String]).asScala 85 | // NOTE: Set in `ecs_task.register>` TaskDefinition Context. If you set it by container level, use the `overrides` option. 86 | // val cpu: Optional[Int] = params.getOptional("cpu", classOf[Int]) 87 | val dependsOn: Seq[Config] = params.parseListOrGetEmpty("depends_on", classOf[Config]).asScala.toSeq 88 | val disableNetworking: Optional[Boolean] = params.getOptional("disable_networking", classOf[Boolean]) 89 | val dnsSearchDomains: Seq[String] = params.parseListOrGetEmpty("dns_search_domains", classOf[String]).asScala.toSeq 90 | val dnsServers: Seq[String] = params.parseListOrGetEmpty("dns_servers", classOf[String]).asScala.toSeq 91 | // NOTE: Add some labels by this plugin 92 | val dockerLabels: Map[String, String] = params.getMapOrEmpty("docker_labels", classOf[String], classOf[String]).asScala.toMap 93 | val dockerSecurityOptions: Seq[String] = params.parseListOrGetEmpty("docker_security_options", classOf[String]).asScala.toSeq 94 | val entryPoint: Seq[String] = params.parseListOrGetEmpty("entry_point", classOf[String]).asScala.toSeq 95 | // NOTE: Add some envs by this plugin 96 | val configEnvironment: Map[String, String] = params.getMapOrEmpty("environments", classOf[String], classOf[String]).asScala.toMap 97 | // NOTE: This plugin uses only 1 container so `essential` is always true. 98 | // val essential: Optional[Boolean] = params.getOptional("essential", classOf[Boolean]) 99 | val extraHosts: Map[String, String] = params.getMapOrEmpty("extra_hosts", classOf[String], classOf[String]).asScala.toMap 100 | val healthCheck: Optional[Config] = params.getOptionalNested("health_check") 101 | val hostname: Optional[String] = params.getOptional("hostname", classOf[String]) 102 | val image: Optional[String] = params.getOptional("image", classOf[String]) 103 | val interactive: Optional[Boolean] = params.getOptional("interactive", classOf[Boolean]) 104 | val links: Seq[String] = params.parseListOrGetEmpty("links", classOf[String]).asScala.toSeq 105 | val linuxParameters: Optional[Config] = params.getOptionalNested("linux_parameters") 106 | val logConfiguration: Optional[Config] = params.getOptionalNested("log_configuration") 107 | val firelensConfiguration: Optional[Config] = params.getOptionalNested("firelens_configuration") 108 | // NOTE: Set in `ecs_task.register>` TaskDefinition Context. If you set it by container level, use the `overrides` option. 109 | // val memory: Optional[Int] = params.getOptional("memory", classOf[Int]) 110 | // NOTE: If you set it by container level, use the `overrides` option. 111 | // val memoryReservation: Optional[Int] = params.getOptional("memory_reservation", classOf[Int]) 112 | val mountPoints: Seq[Config] = params.parseListOrGetEmpty("mount_points", classOf[Config]).asScala.toSeq 113 | val containerName: Optional[String] = params.getOptional("container_name", classOf[String]) 114 | val portMappings: Seq[Config] = params.parseListOrGetEmpty("port_mappings", classOf[Config]).asScala.toSeq 115 | val privileged: Optional[Boolean] = params.getOptional("privileged", classOf[Boolean]) 116 | val pseudoTerminal: Optional[Boolean] = params.getOptional("pseudo_terminal", classOf[Boolean]) 117 | val readonlyRootFilesystem: Optional[Boolean] = params.getOptional("readonly_root_filesystem", classOf[Boolean]) 118 | val repositoryCredentials: Optional[Config] = params.getOptionalNested("repository_credentials") 119 | val secrets: Seq[Config] = params.parseListOrGetEmpty("secrets", classOf[Config]).asScala.toSeq 120 | val systemControls: Seq[Config] = params.parseListOrGetEmpty("system_controls", classOf[Config]).asScala.toSeq 121 | val ulimits: Seq[Config] = params.parseListOrGetEmpty("ulimits", classOf[Config]).asScala.toSeq 122 | val user: Optional[String] = params.getOptional("user", classOf[String]) 123 | val volumesFrom: Seq[Config] = params.parseListOrGetEmpty("volumes_from", classOf[Config]).asScala.toSeq 124 | val workingDirectory: Optional[String] = params.getOptional("working_directory", classOf[String]) 125 | 126 | // For ecs_task.run operator 127 | val capacityProviderStrategy: Seq[Config] = params.parseListOrGetEmpty("capacity_provider_strategy", classOf[Config]).asScala.toSeq 128 | val cluster: String = params.get("cluster", classOf[String]) 129 | val count: Optional[Int] = params.getOptional("count", classOf[Int]) 130 | val group: Optional[String] = params.getOptional("group", classOf[String]) 131 | val launchType: Optional[String] = params.getOptional("launch_type", classOf[String]) 132 | val networkConfiguration: Optional[Config] = params.getOptionalNested("network_configuration") 133 | val overrides: Optional[Config] = params.getOptionalNested("overrides") 134 | val placementConstraints: Seq[Config] = params.parseListOrGetEmpty("placement_constraints", classOf[Config]).asScala.toSeq 135 | val placementStrategy: Seq[Config] = params.parseListOrGetEmpty("placement_strategy", classOf[Config]).asScala.toSeq 136 | val platformVersion: Optional[String] = params.getOptional("platform_version", classOf[String]) 137 | val startedBy: Optional[String] = params.getOptional("started_by", classOf[String]) 138 | val tags: Optional[Config] = params.getOptionalNested("tags") 139 | // NOTE: Generated by ecs_task.register operator 140 | // val taskDefinition: String = params.get("task_definition", classOf[String]) 141 | 142 | // For ecs_task.wait operator 143 | val timeout: DurationParam = params.get("timeout", classOf[DurationParam], DurationParam.parse("15m")) 144 | 145 | lazy val normalizedFamily: String = normalizeFamily(family) 146 | 147 | def run(): TaskResult = { 148 | val subTasks: Config = cf.create() 149 | subTasks.setNested("+register", ecsTaskRegisterSubTask()) 150 | subTasks.setNested("+with-retry", runAndWaitWithRetryTaskGroup()) 151 | subTasks.setNested("+result", ecsTaskResultSubTask()) 152 | 153 | val builder = TaskResult.defaultBuilder(cf) 154 | builder.subtaskConfig(subTasks) 155 | builder.build() 156 | } 157 | 158 | def runAndWaitWithRetryTaskGroup(): Config = { 159 | val subTaskGroup: Config = cf.create() 160 | subTaskGroup.setNested("+run", ecsTaskRunInternalSubTask()) 161 | subTaskGroup.setNested("+wait", ecsTaskWaitSubTask()) 162 | 163 | val taskGroup: Config = cf.create() 164 | taskGroup.set("_retry", maxRetry) 165 | // NOTE: workaround for the issue 166 | // "Group retry does not work in call> operator" https://github.com/treasure-data/digdag/issues/849 167 | taskGroup.set("_type", "ecs_task.call_internal") 168 | taskGroup.setNested("_do", subTaskGroup) 169 | taskGroup 170 | } 171 | 172 | protected def ecsTaskRegisterSubTask(): Config = { 173 | withDefaultSubTask { subTask => 174 | subTask.set("_type", "ecs_task.register") 175 | subTask.set("_command", taskDefinitionConfig()) 176 | } 177 | } 178 | 179 | protected def ecsTaskRunInternalSubTask(): Config = { 180 | withDefaultSubTask { subTask => 181 | subTask.set("_type", "ecs_task.run_internal") 182 | subTask.set("capacityProviderStrategy", capacityProviderStrategy.asJava) 183 | subTask.set("cluster", cluster) 184 | subTask.setOptional("count", count) 185 | subTask.setOptional("group", group) 186 | subTask.setOptional("launch_type", launchType) 187 | subTask.setOptional("network_configuration", networkConfiguration) 188 | subTask.setOptional("overrides", overrides) 189 | subTask.set("placement_constraints", placementConstraints.asJava) 190 | subTask.set("placement_strategy", placementStrategy.asJava) 191 | subTask.setOptional("platform_version", platformVersion) 192 | subTask.setOptional("started_by", startedBy) 193 | subTask.setOptional("tags", tags) 194 | subTask.set("task_definition", "${last_ecs_task_register.task_definition_arn}") 195 | } 196 | } 197 | 198 | protected def ecsTaskWaitSubTask(): Config = { 199 | withDefaultSubTask { subTask => 200 | subTask.set("_type", "ecs_task.wait") 201 | subTask.set("cluster", cluster) 202 | subTask.set("tasks", "${last_ecs_task_run.task_arns}") 203 | subTask.set("timeout", timeout.toString) 204 | subTask.set("ignore_exit_code", true) 205 | } 206 | } 207 | 208 | protected def ecsTaskResultSubTask(): Config = { 209 | withDefaultSubTask { subTask => 210 | subTask.set("_type", "ecs_task.command_result_internal") 211 | subTask.set("_command", tmpStorage.getLocation) 212 | } 213 | } 214 | 215 | protected def withDefaultSubTask(f: Config => Config): Config = { 216 | val subTask: Config = cf.create() 217 | 218 | subTask.set("auth_method", awsConf.authMethod) 219 | subTask.set("profile_name", awsConf.profileName) 220 | if (awsConf.profileFile.isPresent) subTask.set("profile_file", awsConf.profileFile.get()) 221 | subTask.set("use_http_proxy", awsConf.useHttpProxy) 222 | if (awsConf.region.isPresent) subTask.set("region", awsConf.region.get()) 223 | if (awsConf.endpoint.isPresent) subTask.set("endpoint", awsConf.endpoint.get()) 224 | 225 | f(subTask) 226 | subTask 227 | } 228 | 229 | protected def taskDefinitionConfig(): Config = { 230 | val c: Config = cf.create() 231 | 232 | c.set("container_definitions", (Seq(containerDefinitionConfig()) ++ sidecars).asJava) 233 | c.setOptional("cpu", cpu) 234 | c.setOptional("ephemeral_storage", ephemeralStorage) 235 | c.setOptional("execution_role_arn", executionRoleArn) 236 | c.set("family", normalizedFamily) 237 | c.setOptional("ipc_mode", ipcMode) 238 | c.setOptional("memory", memory) 239 | c.setOptional("network_mode", networkMode) 240 | c.setOptional("pid_mode", pidMode) 241 | c.set("requires_compatibilities", requiresCompatibilities.asJava) 242 | c.setOptional("runtime_platform", runtimePlatform) 243 | c.setOptional("tags", tags) 244 | c.setOptional("task_role_arn", taskRoleArn) 245 | c.set("volumes", volumes.asJava) 246 | 247 | c 248 | } 249 | 250 | protected def containerDefinitionConfig(): Config = { 251 | val c: Config = cf.create() 252 | 253 | val command: Seq[String] = tmpStorage.buildTaskCommand(mainScript) 254 | logger.info(s"Run in the container: ${command.mkString(" ")}") 255 | c.set("command", command.asJava) 256 | c.set("depends_on", dependsOn.asJava) 257 | c.setOptional("disable_networking", disableNetworking) 258 | c.set("dns_search_domains", dnsSearchDomains.asJava) 259 | c.set("dns_servers", dnsServers.asJava) 260 | val additionalLabels: Map[String, String] = Map("pro.civitaspo.digdag.plugin.ecs_task.version" -> VERSION) 261 | c.set("docker_labels", (dockerLabels ++ additionalLabels).asJava) 262 | c.set("entry_point", entryPoint.asJava) 263 | c.set("environments", (configEnvironment ++ environments).asJava) 264 | c.set("essential", true) 265 | c.set("extra_hosts", extraHosts.asJava) 266 | c.setOptional("health_check", healthCheck) 267 | c.setOptional("image", image) 268 | c.setOptional("interactive", interactive) 269 | c.set("links", links.asJava) 270 | c.setOptional("linux_parameters", linuxParameters) 271 | c.setOptional("log_configuration", logConfiguration) 272 | c.setOptional("firelens_configuration", firelensConfiguration) 273 | c.set("mount_points", mountPoints.asJava) 274 | c.set("name", containerName.or(normalizedFamily)) 275 | c.set("port_mappings", portMappings.asJava) 276 | c.setOptional("privileged", privileged) 277 | c.setOptional("pseudo_terminal", pseudoTerminal) 278 | c.setOptional("readonly_root_filesystem", readonlyRootFilesystem) 279 | c.setOptional("repository_credentials", repositoryCredentials) 280 | c.set("secrets", secrets.asJava) 281 | c.set("system_controls", systemControls.asJava) 282 | c.set("ulimits", ulimits.asJava) 283 | c.setOptional("user", user) 284 | c.set("volumes_from", volumesFrom.asJava) 285 | c.setOptional("working_directory", workingDirectory) 286 | 287 | c 288 | } 289 | 290 | protected def normalizeFamily(family: String): String = { 291 | // ref. https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RegisterTaskDefinition.html#ECS-RegisterTaskDefinition-request-family 292 | val validLetterRegex: Regex = "[a-zA-Z0-9_-]".r 293 | val after: String = family.map { case l @ validLetterRegex() => l; case _ => "_" }.mkString 294 | if (!family.contentEquals(after)) logger.warn(s"Normalized family: $family -> $after") 295 | after 296 | } 297 | 298 | } 299 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/command/S3TmpStorage.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.command 2 | 3 | import java.nio.charset.StandardCharsets.UTF_8 4 | import java.nio.file.{Files, Path} 5 | 6 | import com.amazonaws.services.s3.AmazonS3URI 7 | import io.digdag.util.Workspace 8 | import org.apache.commons.io.FileUtils 9 | import org.slf4j.Logger 10 | import pro.civitaspo.digdag.plugin.ecs_task.aws.Aws 11 | 12 | import scala.jdk.CollectionConverters._ 13 | import scala.util.{Random, Using} 14 | 15 | case class S3TmpStorage(shellCommand: String, location: AmazonS3URI, aws: Aws, workspace: Workspace, logger: Logger) extends TmpStorage { 16 | 17 | private lazy val tmpDir: Path = createTmpDir() 18 | 19 | private def createTmpDir(): Path = { 20 | val dir = workspace.getProjectPath.resolve(".digdag/tmp") 21 | Files.createDirectories(dir) 22 | val random: String = Random.alphanumeric.take(10).mkString 23 | Files.createTempDirectory(dir, random) 24 | } 25 | 26 | private def writeFile(file: Path, content: String): Unit = { 27 | logger.info(s"Write into ${file.toString}") 28 | Using.resource(workspace.newBufferedWriter(file.toString, UTF_8)) { writer => 29 | writer.write(content) 30 | } 31 | } 32 | 33 | override def getLocation: String = location.toString 34 | 35 | override def stageFile(fileName: String, content: String): Unit = { 36 | val file = Files.createFile(tmpDir.resolve(fileName)) 37 | writeFile(file, content) 38 | } 39 | 40 | override def stageWorkspace(): Unit = { 41 | val targets: Iterator[Path] = Files.list(workspace.getPath).iterator().asScala.filterNot(_.endsWith(".digdag")) 42 | val workspacePath: Path = Files.createDirectory(tmpDir.resolve("workspace")) 43 | targets.foreach { path => 44 | logger.info(s"Copy: $path -> $workspacePath") 45 | if (Files.isDirectory(path)) FileUtils.copyDirectoryToDirectory(path.toFile, workspacePath.toFile) 46 | else FileUtils.copyFileToDirectory(path.toFile, workspacePath.toFile) 47 | } 48 | } 49 | 50 | override def buildTaskCommand(mainScript: String): Seq[String] = { 51 | Seq(shellCommand, "-c", s"aws s3 cp ${location.toString}/$mainScript ./ && $shellCommand $mainScript") 52 | } 53 | 54 | override def storeStagedFiles(): Unit = { 55 | logger.info(s"Recursive Upload: $tmpDir -> ${location.getURI}") 56 | aws.withTransferManager { xfer => 57 | val upload = xfer.uploadDirectory( 58 | location.getBucket, 59 | location.getKey, 60 | tmpDir.toFile, 61 | true // includeSubdirectories 62 | ) 63 | upload.waitForCompletion() 64 | } 65 | } 66 | 67 | override def close(): Unit = { 68 | logger.info(s"Remove: $tmpDir") 69 | FileUtils.deleteDirectory(tmpDir.toFile) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/command/TmpStorage.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.command 2 | 3 | import io.digdag.util.Workspace 4 | 5 | trait TmpStorage extends AutoCloseable { 6 | 7 | val shellCommand: String 8 | 9 | val workspace: Workspace 10 | 11 | def getLocation: String 12 | 13 | def stageFile(fileName: String, content: String): Unit 14 | 15 | def stageWorkspace(): Unit 16 | 17 | def buildTaskCommand(mainScript: String): Seq[String] 18 | 19 | def storeStagedFiles(): Unit 20 | 21 | } 22 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/embulk/EcsTaskEmbulkOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.embulk 2 | 3 | import java.io.File 4 | import java.nio.charset.StandardCharsets.UTF_8 5 | 6 | import io.digdag.client.config.Config 7 | import io.digdag.spi.{OperatorContext, TemplateEngine} 8 | import pro.civitaspo.digdag.plugin.ecs_task.aws.AmazonS3UriWrapper 9 | import pro.civitaspo.digdag.plugin.ecs_task.command.{AbstractEcsTaskCommandOperator, TmpStorage} 10 | 11 | import scala.jdk.CollectionConverters._ 12 | import scala.io.Source 13 | import scala.util.{Try, Using} 14 | 15 | class EcsTaskEmbulkOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 16 | extends AbstractEcsTaskCommandOperator(operatorName, context, systemConfig, templateEngine) { 17 | 18 | private val runShResourcePath: String = "/pro/civitaspo/digdag/plugin/ecs_task/embulk/run.sh" 19 | override protected val mainScriptName: String = "run.sh" 20 | 21 | protected val embulkConfig: String = { 22 | val t: Try[String] = Try { 23 | val embulkConfigPath: String = params.get("_command", classOf[String]) 24 | val f: File = workspace.getFile(embulkConfigPath) 25 | workspace.templateFile(templateEngine, f.getPath, UTF_8, params) 26 | } 27 | t.getOrElse { 28 | val embulkConfig: Config = params.getNested("_command") 29 | templateEngine.template(embulkConfig.toString, params) 30 | } 31 | } 32 | protected val embulkPlugins: Seq[String] = params.getListOrEmpty("embulk_plugins", classOf[String]).asScala.toSeq 33 | 34 | override def prepare(tmpStorage: TmpStorage): Unit = { 35 | tmpStorage.stageFile("config.yml", embulkConfig) 36 | tmpStorage.stageFile(mainScriptName, createRunShContent(tmpStorage)) 37 | tmpStorage.stageWorkspace() 38 | tmpStorage.storeStagedFiles() 39 | } 40 | 41 | protected def createRunShContent(tmpStorage: TmpStorage): String = { 42 | val dup: Config = params.deepCopy() 43 | dup.set("ECS_TASK_EMBULK_BUCKET", AmazonS3UriWrapper(tmpStorage.getLocation).getBucket) 44 | dup.set("ECS_TASK_EMBULK_PREFIX", AmazonS3UriWrapper(tmpStorage.getLocation).getKey) 45 | 46 | dup.set("ECS_TASK_EMBULK_SETUP_COMMAND", "echo 'no setup command'") // set a default value 47 | if (embulkPlugins.nonEmpty) { 48 | logger.warn("`embulk_plugins` option is experimental, so please be careful in the plugin update.") 49 | val cmd: String = (Seq("embulk", "gem", "install") ++ embulkPlugins).mkString(" ") 50 | dup.set("ECS_TASK_EMBULK_SETUP_COMMAND", cmd) 51 | } 52 | 53 | Using.resource(classOf[EcsTaskEmbulkOperator].getResourceAsStream(runShResourcePath)) { is => 54 | val runShContentTemplate: String = Source.fromInputStream(is).mkString 55 | templateEngine.template(runShContentTemplate, dup) 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/exception/package.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task 2 | 3 | package object exception { 4 | class RetryTimeoutException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) 5 | } 6 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/package.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin 2 | 3 | package object ecs_task { 4 | 5 | val VERSION: String = "0.1.6" 6 | 7 | } 8 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/py/EcsTaskPyOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.py 2 | 3 | import io.digdag.client.config.Config 4 | import io.digdag.spi.{OperatorContext, TemplateEngine} 5 | import pro.civitaspo.digdag.plugin.ecs_task.aws.AmazonS3UriWrapper 6 | import pro.civitaspo.digdag.plugin.ecs_task.command.{AbstractEcsTaskCommandOperator, TmpStorage} 7 | 8 | import scala.jdk.CollectionConverters._ 9 | import scala.io.Source 10 | import scala.util.Using 11 | 12 | class EcsTaskPyOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 13 | extends AbstractEcsTaskCommandOperator(operatorName, context, systemConfig, templateEngine) { 14 | 15 | private val runnerPyResourcePath: String = "/pro/civitaspo/digdag/plugin/ecs_task/py/runner.py" 16 | private val runShResourcePath: String = "/pro/civitaspo/digdag/plugin/ecs_task/py/run.sh" 17 | override protected val mainScriptName: String = "run.sh" 18 | 19 | protected val command: String = params.get("_command", classOf[String]) 20 | protected val pipInstall: Seq[String] = params.getListOrEmpty("pip_install", classOf[String]).asScala.toSeq 21 | 22 | override def prepare(tmpStorage: TmpStorage): Unit = { 23 | tmpStorage.stageFile("in.json", createInJsonContent()) 24 | tmpStorage.stageFile("runner.py", createRunnerPyContent()) 25 | tmpStorage.stageFile(mainScriptName, createRunShContent(tmpStorage)) 26 | tmpStorage.stageWorkspace() 27 | tmpStorage.storeStagedFiles() 28 | } 29 | 30 | protected def createInJsonContent(): String = { 31 | templateEngine.template(cf.create.set("params", params).toString, params) 32 | } 33 | 34 | protected def createRunnerPyContent(): String = { 35 | Using.resource(classOf[EcsTaskPyOperator].getResourceAsStream(runnerPyResourcePath)) { is => 36 | Source.fromInputStream(is).mkString 37 | } 38 | } 39 | 40 | protected def createRunShContent(tmpStorage: TmpStorage): String = { 41 | val dup: Config = params.deepCopy() 42 | dup.set("ECS_TASK_PY_BUCKET", AmazonS3UriWrapper(tmpStorage.getLocation).getBucket) 43 | dup.set("ECS_TASK_PY_PREFIX", AmazonS3UriWrapper(tmpStorage.getLocation).getKey) 44 | dup.set("ECS_TASK_PY_COMMAND", command) 45 | 46 | dup.set("ECS_TASK_PY_SETUP_COMMAND", "echo 'no setup command'") // set a default value 47 | if (pipInstall.nonEmpty) { 48 | logger.warn("`pip_install` option is experimental, so please be careful in the plugin update.") 49 | val cmd: String = (Seq("pip", "install") ++ pipInstall).mkString(" ") 50 | dup.set("ECS_TASK_PY_SETUP_COMMAND", cmd) 51 | } 52 | 53 | Using.resource(classOf[EcsTaskPyOperator].getResourceAsStream(runShResourcePath)) { is => 54 | val runShContentTemplate: String = Source.fromInputStream(is).mkString 55 | templateEngine.template(runShContentTemplate, dup) 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/rb/EcsTaskRbOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.rb 2 | 3 | import io.digdag.client.config.Config 4 | import io.digdag.spi.{OperatorContext, TemplateEngine} 5 | import pro.civitaspo.digdag.plugin.ecs_task.aws.AmazonS3UriWrapper 6 | import pro.civitaspo.digdag.plugin.ecs_task.command.{AbstractEcsTaskCommandOperator, TmpStorage} 7 | 8 | import scala.jdk.CollectionConverters._ 9 | import scala.io.Source 10 | import scala.util.Using 11 | 12 | class EcsTaskRbOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 13 | extends AbstractEcsTaskCommandOperator(operatorName, context, systemConfig, templateEngine) { 14 | 15 | private val runnerRbResourcePath: String = "/pro/civitaspo/digdag/plugin/ecs_task/rb/runner.rb" 16 | private val runShResourcePath: String = "/pro/civitaspo/digdag/plugin/ecs_task/rb/run.sh" 17 | override protected val mainScriptName: String = "run.sh" 18 | 19 | protected val command: String = params.get("_command", classOf[String]) 20 | protected val gemInstall: Seq[String] = params.getListOrEmpty("gem_install", classOf[String]).asScala.toSeq 21 | protected val requirePath: String = params.get("require", classOf[String]) 22 | 23 | override def prepare(tmpStorage: TmpStorage): Unit = { 24 | tmpStorage.stageFile("in.json", createInJsonContent()) 25 | tmpStorage.stageFile("runner.rb", createRunnerRbContent()) 26 | tmpStorage.stageFile(mainScriptName, createRunShContent(tmpStorage)) 27 | tmpStorage.stageWorkspace() 28 | tmpStorage.storeStagedFiles() 29 | } 30 | 31 | protected def createInJsonContent(): String = { 32 | templateEngine.template(cf.create.set("params", params).toString, params) 33 | } 34 | 35 | protected def createRunnerRbContent(): String = { 36 | Using.resource(classOf[EcsTaskRbOperator].getResourceAsStream(runnerRbResourcePath)) { is => 37 | Source.fromInputStream(is).mkString 38 | } 39 | } 40 | 41 | protected def createRunShContent(tmpStorage: TmpStorage): String = { 42 | val dup: Config = params.deepCopy() 43 | dup.set("ECS_TASK_RB_BUCKET", AmazonS3UriWrapper(tmpStorage.getLocation).getBucket) 44 | dup.set("ECS_TASK_RB_PREFIX", AmazonS3UriWrapper(tmpStorage.getLocation).getKey) 45 | dup.set("ECS_TASK_RB_REQUIRE", requirePath) 46 | dup.set("ECS_TASK_RB_COMMAND", command) 47 | 48 | dup.set("ECS_TASK_RB_SETUP_COMMAND", "echo 'no setup command'") // set a default value 49 | if (gemInstall.nonEmpty) { 50 | logger.warn("`gem_install` option is experimental, so please be careful in the plugin update.") 51 | val cmd: String = (Seq("gem", "install") ++ gemInstall).mkString(" ") 52 | dup.set("ECS_TASK_RB_SETUP_COMMAND", cmd) 53 | } 54 | 55 | Using.resource(classOf[EcsTaskRbOperator].getResourceAsStream(runShResourcePath)) { is => 56 | val runShContentTemplate: String = Source.fromInputStream(is).mkString 57 | templateEngine.template(runShContentTemplate, dup) 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/register/EcsTaskRegisterOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.register 2 | 3 | import com.amazonaws.services.ecs.model.{ 4 | ContainerDefinition, 5 | ContainerDependency, 6 | Device, 7 | DockerVolumeConfiguration, 8 | EFSAuthorizationConfig, 9 | EFSVolumeConfiguration, 10 | EphemeralStorage, 11 | FirelensConfiguration, 12 | HealthCheck, 13 | HostEntry, 14 | HostVolumeProperties, 15 | KernelCapabilities, 16 | KeyValuePair, 17 | LinuxParameters, 18 | LogConfiguration, 19 | MountPoint, 20 | PortMapping, 21 | RegisterTaskDefinitionRequest, 22 | RegisterTaskDefinitionResult, 23 | RepositoryCredentials, 24 | RuntimePlatform, 25 | Secret, 26 | SystemControl, 27 | Tag, 28 | TaskDefinitionPlacementConstraint, 29 | Tmpfs, 30 | Ulimit, 31 | Volume, 32 | VolumeFrom 33 | } 34 | import com.google.common.base.Optional 35 | import com.google.common.collect.ImmutableList 36 | import io.digdag.client.config.{Config, ConfigKey} 37 | import io.digdag.spi.{ImmutableTaskResult, OperatorContext, TaskResult, TemplateEngine} 38 | import pro.civitaspo.digdag.plugin.ecs_task.AbstractEcsTaskOperator 39 | 40 | import scala.jdk.CollectionConverters._ 41 | 42 | class EcsTaskRegisterOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 43 | extends AbstractEcsTaskOperator(operatorName, context, systemConfig, templateEngine) { 44 | 45 | protected val config: Config = params.getNested("_command") 46 | 47 | protected def buildRegisterTaskDefinitionRequest(c: Config): RegisterTaskDefinitionRequest = { 48 | val req: RegisterTaskDefinitionRequest = new RegisterTaskDefinitionRequest() 49 | 50 | val containerDefinitions: Seq[ContainerDefinition] = 51 | c.parseList("container_definitions", classOf[Config]).asScala.map(configureContainerDefinition).map(_.get).toSeq 52 | val cpu: Optional[String] = c.getOptional("cpu", classOf[String]) 53 | val ephemeralStorage: Optional[EphemeralStorage] = configureEphemeralStorage(c.parseNestedOrGetEmpty("ephemeral_storage")) 54 | val executionRoleArn: Optional[String] = c.getOptional("execution_role_arn", classOf[String]) 55 | val family: String = c.get("family", classOf[String]) 56 | val ipcMode: Optional[String] = c.getOptional("ipc_mode", classOf[String]) 57 | val memory: Optional[String] = c.getOptional("memory", classOf[String]) 58 | val networkMode: Optional[String] = c.getOptional("network_mode", classOf[String]) 59 | val pidMode: Optional[String] = c.getOptional("pid_mode", classOf[String]) 60 | 61 | val placementConstraints: Seq[TaskDefinitionPlacementConstraint] = 62 | c.parseListOrGetEmpty("placement_constraints", classOf[Config]).asScala.map(configureTaskDefinitionPlacementConstraint).map(_.get).toSeq 63 | val requiresCompatibilities: Seq[String] = c.parseListOrGetEmpty("requires_compatibilities", classOf[String]).asScala.toSeq // Valid Values: EC2 | FARGATE 64 | val runtimePlatform: Optional[RuntimePlatform] = configureRuntimePlatform(c.parseNestedOrGetEmpty("runtime_platform")) 65 | val tags: Seq[Tag] = 66 | c.getMapOrEmpty("tags", classOf[String], classOf[String]).asScala.map((t: (String, String)) => new Tag().withKey(t._1).withValue(t._2)).toSeq 67 | val taskRoleArn: Optional[String] = c.getOptional("task_role_arn", classOf[String]) 68 | val volumes: Seq[Volume] = c.parseListOrGetEmpty("volumes", classOf[Config]).asScala.map(configureVolume).map(_.get).toSeq 69 | 70 | req.setContainerDefinitions(containerDefinitions.asJava) 71 | if (cpu.isPresent) req.setCpu(cpu.get) 72 | if (ephemeralStorage.isPresent) req.setEphemeralStorage(ephemeralStorage.get) 73 | if (executionRoleArn.isPresent) req.setExecutionRoleArn(executionRoleArn.get) 74 | req.setFamily(family) 75 | if (ipcMode.isPresent) throw new UnsupportedOperationException("Currently aws-java-sdk does not support ipc_mode.") 76 | if (memory.isPresent) req.setMemory(memory.get) 77 | if (networkMode.isPresent) req.setNetworkMode(networkMode.get) 78 | if (pidMode.isPresent) throw new UnsupportedOperationException("Currently aws-java-sdk does not support pid_mode.") 79 | if (placementConstraints.nonEmpty) req.setPlacementConstraints(placementConstraints.asJava) 80 | if (requiresCompatibilities.nonEmpty) req.setRequiresCompatibilities(requiresCompatibilities.asJava) 81 | if (runtimePlatform.isPresent) req.setRuntimePlatform(runtimePlatform.get) 82 | if (tags.nonEmpty) req.setTags(tags.asJava) 83 | if (taskRoleArn.isPresent) req.setTaskRoleArn(taskRoleArn.get) 84 | if (volumes.nonEmpty) req.setVolumes(volumes.asJava) 85 | 86 | req 87 | } 88 | 89 | protected def configureContainerDefinition(c: Config): Optional[ContainerDefinition] = { 90 | if (c.isEmpty) return Optional.absent() 91 | 92 | val command: Seq[String] = c.parseListOrGetEmpty("command", classOf[String]).asScala.toSeq 93 | val cpu: Optional[Int] = c.getOptional("cpu", classOf[Int]) 94 | val dependsOn: Seq[ContainerDependency] = c.parseListOrGetEmpty("depends_on", classOf[Config]).asScala.map(configureDependsOn).map(_.get).toSeq 95 | val disableNetworking: Optional[Boolean] = c.getOptional("disable_networking", classOf[Boolean]) 96 | val dnsSearchDomains: Seq[String] = c.parseListOrGetEmpty("dns_search_domains", classOf[String]).asScala.toSeq 97 | val dnsServers: Seq[String] = c.parseListOrGetEmpty("dns_servers", classOf[String]).asScala.toSeq 98 | val dockerLabels: Map[String, String] = c.getMapOrEmpty("docker_labels", classOf[String], classOf[String]).asScala.toMap 99 | val dockerSecurityOptions: Seq[String] = c.parseListOrGetEmpty("docker_security_options", classOf[String]).asScala.toSeq 100 | val entryPoint: Seq[String] = c.parseListOrGetEmpty("entry_point", classOf[String]).asScala.toSeq 101 | val environments: Seq[KeyValuePair] = c 102 | .getMapOrEmpty("environments", classOf[String], classOf[String]) 103 | .asScala 104 | .map((t: (String, String)) => new KeyValuePair().withName(t._1).withValue(t._2)) 105 | .toSeq // TODO: doc 106 | val essential: Optional[Boolean] = c.getOptional("essential", classOf[Boolean]) 107 | val extraHosts: Seq[HostEntry] = c 108 | .getMapOrEmpty("extra_hosts", classOf[String], classOf[String]) 109 | .asScala 110 | .map((t: (String, String)) => new HostEntry().withHostname(t._1).withIpAddress(t._2)) 111 | .toSeq // TODO: doc 112 | val healthCheck: Optional[HealthCheck] = configureHealthCheck(c.parseNestedOrGetEmpty("health_check")) 113 | val hostname: Optional[String] = c.getOptional("hostname", classOf[String]) 114 | val image: Optional[String] = c.getOptional("image", classOf[String]) 115 | val interactive: Optional[Boolean] = c.getOptional("interactive", classOf[Boolean]) 116 | val links: Seq[String] = c.parseListOrGetEmpty("links", classOf[String]).asScala.toSeq 117 | val linuxParameters: Optional[LinuxParameters] = configureLinuxParameters(c.parseNestedOrGetEmpty("linux_parameters")) 118 | val logConfiguration: Optional[LogConfiguration] = configureLogConfiguration(c.parseNestedOrGetEmpty("log_configuration")) 119 | val firelensConfiguration: Optional[FirelensConfiguration] = configureFirelensConfiguration(c.parseNestedOrGetEmpty("firelens_configuration")) 120 | val memory: Optional[Int] = c.getOptional("memory", classOf[Int]) 121 | val memoryReservation: Optional[Int] = c.getOptional("memory_reservation", classOf[Int]) 122 | val mountPoints: Seq[MountPoint] = c.parseListOrGetEmpty("mount_points", classOf[Config]).asScala.map(configureMountPoint).map(_.get).toSeq 123 | val name: Optional[String] = c.getOptional("name", classOf[String]) 124 | val portMappings: Seq[PortMapping] = c.parseListOrGetEmpty("port_mappings", classOf[Config]).asScala.map(configurePortMapping).map(_.get).toSeq 125 | val privileged: Optional[Boolean] = c.getOptional("privileged", classOf[Boolean]) 126 | val pseudoTerminal: Optional[Boolean] = c.getOptional("pseudo_terminal", classOf[Boolean]) 127 | val readonlyRootFilesystem: Optional[Boolean] = c.getOptional("readonly_root_filesystem", classOf[Boolean]) 128 | val repositoryCredentials: Optional[RepositoryCredentials] = configureRepositoryCredentials(c.parseNestedOrGetEmpty("repository_credentials")) 129 | val secrets: Seq[Secret] = c.parseListOrGetEmpty("secrets", classOf[Config]).asScala.map(configureSecrets).map(_.get).toSeq 130 | val systemControls: Seq[SystemControl] = c.parseListOrGetEmpty("system_controls", classOf[Config]).asScala.map(configureSystemControl).map(_.get).toSeq 131 | val ulimits: Seq[Ulimit] = c.parseListOrGetEmpty("ulimits", classOf[Config]).asScala.map(configureUlimit).map(_.get).toSeq 132 | val user: Optional[String] = c.getOptional("user", classOf[String]) 133 | val volumesFrom: Seq[VolumeFrom] = c.parseListOrGetEmpty("volumes_from", classOf[Config]).asScala.map(configureVolumeFrom).map(_.get).toSeq 134 | val workingDirectory: Optional[String] = c.getOptional("working_directory", classOf[String]) 135 | 136 | val cd: ContainerDefinition = new ContainerDefinition() 137 | cd.setCommand(command.asJava) 138 | if (cpu.isPresent) cd.setCpu(cpu.get) 139 | if (dependsOn.nonEmpty) cd.setDependsOn(dependsOn.asJava) 140 | if (disableNetworking.isPresent) cd.setDisableNetworking(disableNetworking.get) 141 | if (dnsSearchDomains.nonEmpty) cd.setDnsSearchDomains(dnsSearchDomains.asJava) 142 | if (dnsServers.nonEmpty) cd.setDnsServers(dnsServers.asJava) 143 | if (dockerLabels.nonEmpty) cd.setDockerLabels(dockerLabels.asJava) 144 | if (dockerSecurityOptions.nonEmpty) cd.setDockerSecurityOptions(dockerSecurityOptions.asJava) 145 | if (entryPoint.nonEmpty) cd.setEntryPoint(entryPoint.asJava) 146 | if (environments.nonEmpty) cd.setEnvironment(environments.asJava) // TODO: merge params? 147 | if (essential.isPresent) cd.setEssential(essential.get) 148 | if (extraHosts.nonEmpty) cd.setExtraHosts(extraHosts.asJava) 149 | if (healthCheck.isPresent) cd.setHealthCheck(healthCheck.get) 150 | if (hostname.isPresent) cd.setHostname(hostname.get) 151 | if (image.isPresent) cd.setImage(image.get) 152 | if (interactive.isPresent) cd.setInteractive(interactive.get) 153 | if (links.nonEmpty) cd.setLinks(links.asJava) 154 | if (linuxParameters.isPresent) cd.setLinuxParameters(linuxParameters.get) 155 | if (logConfiguration.isPresent) cd.setLogConfiguration(logConfiguration.get) 156 | if (firelensConfiguration.isPresent) cd.setFirelensConfiguration(firelensConfiguration.get) 157 | if (memory.isPresent) cd.setMemory(memory.get) 158 | if (memoryReservation.isPresent) cd.setMemoryReservation(memoryReservation.get) 159 | if (mountPoints.nonEmpty) cd.setMountPoints(mountPoints.asJava) 160 | if (name.isPresent) cd.setName(name.get) 161 | if (portMappings.nonEmpty) cd.setPortMappings(portMappings.asJava) 162 | if (privileged.isPresent) cd.setPrivileged(privileged.get) 163 | if (pseudoTerminal.isPresent) cd.setPseudoTerminal(pseudoTerminal.get) 164 | if (readonlyRootFilesystem.isPresent) cd.setReadonlyRootFilesystem(readonlyRootFilesystem.get) 165 | if (repositoryCredentials.isPresent) cd.setRepositoryCredentials(repositoryCredentials.get) 166 | if (secrets.nonEmpty) cd.setSecrets(secrets.asJava) 167 | if (systemControls.nonEmpty) cd.setSystemControls(systemControls.asJava) 168 | if (ulimits.nonEmpty) cd.setUlimits(ulimits.asJava) 169 | if (user.isPresent) cd.setUser(user.get) 170 | if (volumesFrom.nonEmpty) cd.setVolumesFrom(volumesFrom.asJava) 171 | if (workingDirectory.isPresent) cd.setWorkingDirectory(workingDirectory.get) 172 | 173 | Optional.of(cd) 174 | } 175 | 176 | protected def configureDependsOn(c: Config): Optional[ContainerDependency] = { 177 | if (c.isEmpty) return Optional.absent() 178 | 179 | val containerName: Optional[String] = c.getOptional("container_name", classOf[String]) 180 | val condition: Optional[String] = c.getOptional("condition", classOf[String]) 181 | 182 | val cd: ContainerDependency = new ContainerDependency() 183 | if (containerName.isPresent) cd.setContainerName(containerName.get) 184 | if (condition.isPresent) cd.setCondition(condition.get) 185 | 186 | Optional.of(cd) 187 | } 188 | 189 | protected def configureEphemeralStorage(c: Config): Optional[EphemeralStorage] = { 190 | if (c.isEmpty) return Optional.absent() 191 | 192 | val sizeInGiB: Int = c.get("size_in_gi_b", classOf[Int]) 193 | 194 | val es: EphemeralStorage = new EphemeralStorage() 195 | es.setSizeInGiB(sizeInGiB) 196 | 197 | Optional.of(es) 198 | } 199 | 200 | protected def configureHealthCheck(c: Config): Optional[HealthCheck] = { 201 | if (c.isEmpty) return Optional.absent() 202 | 203 | val command: Seq[String] = params.parseList("command", classOf[String]).asScala.toSeq 204 | val interval: Optional[Int] = params.getOptional("interval", classOf[Int]) 205 | val retries: Optional[Int] = params.getOptional("retries", classOf[Int]) 206 | val startPeriod: Optional[Int] = params.getOptional("start_period", classOf[Int]) 207 | val timeout: Optional[Int] = params.getOptional("timeout", classOf[Int]) 208 | 209 | val hc: HealthCheck = new HealthCheck() 210 | hc.setCommand(command.asJava) 211 | if (interval.isPresent) hc.setInterval(interval.get) 212 | if (retries.isPresent) hc.setRetries(retries.get) 213 | if (startPeriod.isPresent) hc.setStartPeriod(startPeriod.get) 214 | if (timeout.isPresent) hc.setTimeout(timeout.get) 215 | 216 | Optional.of(hc) 217 | } 218 | 219 | protected def configureLinuxParameters(c: Config): Optional[LinuxParameters] = { 220 | if (c.isEmpty) return Optional.absent() 221 | 222 | val capabilities: Optional[KernelCapabilities] = configureKernelCapabilities(c.parseNestedOrGetEmpty("capabilities")) 223 | val devices: Seq[Device] = c.parseListOrGetEmpty("devices", classOf[Config]).asScala.map(configureDevice).map(_.get).toSeq 224 | val initProcessEnabled: Optional[Boolean] = c.getOptional("init_process_enabled", classOf[Boolean]) 225 | val sharedMemorySize: Optional[Int] = c.getOptional("shared_memory_size", classOf[Int]) 226 | val tmpfs: Seq[Tmpfs] = c.parseListOrGetEmpty("tmpfs", classOf[Config]).asScala.map(configureTmpfs).map(_.get).toSeq 227 | 228 | val lp: LinuxParameters = new LinuxParameters() 229 | if (capabilities.isPresent) lp.setCapabilities(capabilities.get) 230 | if (devices.nonEmpty) lp.setDevices(devices.asJava) 231 | if (initProcessEnabled.isPresent) lp.setInitProcessEnabled(initProcessEnabled.get) 232 | if (sharedMemorySize.isPresent) lp.setSharedMemorySize(sharedMemorySize.get) 233 | if (tmpfs.nonEmpty) lp.setTmpfs(tmpfs.asJava) 234 | 235 | Optional.of(lp) 236 | } 237 | 238 | protected def configureKernelCapabilities(c: Config): Optional[KernelCapabilities] = { 239 | if (c.isEmpty) return Optional.absent() 240 | 241 | val add: Seq[String] = c.parseListOrGetEmpty("add", classOf[String]).asScala.toSeq 242 | val drop: Seq[String] = c.parseListOrGetEmpty("drop", classOf[String]).asScala.toSeq 243 | 244 | val kc: KernelCapabilities = new KernelCapabilities() 245 | if (add.nonEmpty) kc.setAdd(add.asJava) 246 | if (drop.nonEmpty) kc.setDrop(drop.asJava) 247 | 248 | Optional.of(kc) 249 | } 250 | 251 | protected def configureDevice(c: Config): Optional[Device] = { 252 | if (c.isEmpty) return Optional.absent() 253 | 254 | val containerPath: Optional[String] = c.getOptional("container_path", classOf[String]) 255 | val hostPath: String = c.get("host_path", classOf[String]) 256 | val permissions: Seq[String] = c.parseListOrGetEmpty("permissions", classOf[String]).asScala.toSeq 257 | 258 | val d: Device = new Device() 259 | if (containerPath.isPresent) d.setContainerPath(containerPath.get) 260 | d.setHostPath(hostPath) 261 | if (permissions.nonEmpty) d.setPermissions(permissions.asJava) 262 | 263 | Optional.of(d) 264 | } 265 | 266 | protected def configureTmpfs(c: Config): Optional[Tmpfs] = { 267 | if (c.isEmpty) return Optional.absent() 268 | 269 | val containerPath: String = c.get("container_path", classOf[String]) 270 | val mountOptions: Seq[String] = c.parseListOrGetEmpty("mount_options", classOf[String]).asScala.toSeq 271 | val size: Int = c.get("size", classOf[Int]) 272 | 273 | val tmpfs: Tmpfs = new Tmpfs() 274 | tmpfs.setContainerPath(containerPath) 275 | if (mountOptions.nonEmpty) tmpfs.setMountOptions(mountOptions.asJava) 276 | tmpfs.setSize(size) 277 | 278 | Optional.of(tmpfs) 279 | } 280 | 281 | protected def configureLogConfiguration(c: Config): Optional[LogConfiguration] = { 282 | if (c.isEmpty) return Optional.absent() 283 | 284 | val logDriver 285 | : String = c.get("log_driver", classOf[String]) // Valid Values: json-file | syslog | journald | gelf | fluentd | awslogs | splunk | awsfirelens 286 | val options: Map[String, String] = c.getMapOrEmpty("options", classOf[String], classOf[String]).asScala.toMap 287 | 288 | val lc: LogConfiguration = new LogConfiguration() 289 | lc.setLogDriver(logDriver) 290 | if (options.nonEmpty) lc.setOptions(options.asJava) 291 | 292 | Optional.of(lc) 293 | } 294 | 295 | protected def configureFirelensConfiguration(c: Config): Optional[FirelensConfiguration] = { 296 | if (c.isEmpty) return Optional.absent() 297 | 298 | val firelensType: String = c.get("type", classOf[String]) // Valid Values: fluentd | fluentbit 299 | val options: Map[String, String] = c.getMapOrEmpty("options", classOf[String], classOf[String]).asScala.toMap 300 | 301 | val fc: FirelensConfiguration = new FirelensConfiguration() 302 | fc.setType(firelensType) 303 | if (options.nonEmpty) fc.setOptions(options.asJava) 304 | 305 | Optional.of(fc) 306 | } 307 | 308 | protected def configureMountPoint(c: Config): Optional[MountPoint] = { 309 | if (c.isEmpty) return Optional.absent() 310 | 311 | val containerPath: Optional[String] = c.getOptional("container_path", classOf[String]) 312 | val readOnly: Optional[Boolean] = c.getOptional("read_only", classOf[Boolean]) 313 | val sourceVolume: Optional[String] = c.getOptional("source_volume", classOf[String]) 314 | 315 | val mp: MountPoint = new MountPoint() 316 | if (containerPath.isPresent) mp.setContainerPath(containerPath.get) 317 | if (readOnly.isPresent) mp.setReadOnly(readOnly.get) 318 | if (sourceVolume.isPresent) mp.setSourceVolume(sourceVolume.get) 319 | 320 | Optional.of(mp) 321 | } 322 | 323 | protected def configurePortMapping(c: Config): Optional[PortMapping] = { 324 | if (c.isEmpty) return Optional.absent() 325 | 326 | val containerPort: Optional[Int] = c.getOptional("container_port", classOf[Int]) 327 | val hostPort: Optional[Int] = c.getOptional("host_port", classOf[Int]) 328 | val protocol: Optional[String] = c.getOptional("protocol", classOf[String]) 329 | 330 | val pm: PortMapping = new PortMapping() 331 | if (containerPort.isPresent) pm.setContainerPort(containerPort.get) 332 | if (hostPort.isPresent) pm.setHostPort(hostPort.get) 333 | if (protocol.isPresent) pm.setProtocol(protocol.get) 334 | 335 | Optional.of(pm) 336 | } 337 | 338 | protected def configureRepositoryCredentials(c: Config): Optional[RepositoryCredentials] = { 339 | if (c.isEmpty) return Optional.absent() 340 | 341 | val credentialsParameter: String = c.get("credentials_parameter", classOf[String]) 342 | 343 | val rc: RepositoryCredentials = new RepositoryCredentials() 344 | rc.setCredentialsParameter(credentialsParameter) 345 | 346 | Optional.of(rc) 347 | } 348 | 349 | protected def configureRuntimePlatform(c: Config): Optional[RuntimePlatform] = { 350 | if (c.isEmpty) return Optional.absent() 351 | 352 | val cpuArchitecture: Optional[String] = c.getOptional("cpu_architecture", classOf[String]) 353 | val operatingSystemFamily: Optional[String] = c.getOptional("operating_system_family", classOf[String]) 354 | 355 | val rp: RuntimePlatform = new RuntimePlatform() 356 | if (cpuArchitecture.isPresent) rp.setCpuArchitecture(cpuArchitecture.get) 357 | if (operatingSystemFamily.isPresent) rp.setOperatingSystemFamily(operatingSystemFamily.get) 358 | 359 | Optional.of(rp) 360 | } 361 | 362 | protected def configureSecrets(c: Config): Optional[Secret] = { 363 | if (c.isEmpty) return Optional.absent() 364 | 365 | val name: String = c.get("name", classOf[String]) 366 | val valueFrom: String = c.get("value_from", classOf[String]) 367 | 368 | val s: Secret = new Secret() 369 | s.setName(name) 370 | s.setValueFrom(valueFrom) 371 | 372 | Optional.of(s) 373 | } 374 | 375 | protected def configureSystemControl(c: Config): Optional[SystemControl] = { 376 | if (c.isEmpty) return Optional.absent() 377 | 378 | val namespace: Optional[String] = c.getOptional("namespace", classOf[String]) 379 | val value: Optional[String] = c.getOptional("value", classOf[String]) 380 | 381 | val sc: SystemControl = new SystemControl() 382 | if (namespace.isPresent) sc.setNamespace(namespace.get) 383 | if (value.isPresent) sc.setValue(value.get) 384 | 385 | Optional.of(sc) 386 | } 387 | 388 | protected def configureUlimit(c: Config): Optional[Ulimit] = { 389 | if (c.isEmpty) return Optional.absent() 390 | 391 | val hardLimit: Int = c.get("hard_limit", classOf[Int]) 392 | val name: String = c.get("name", classOf[String]) 393 | val softLimit: Int = c.get("soft_limit", classOf[Int]) 394 | 395 | val u: Ulimit = new Ulimit() 396 | u.setHardLimit(hardLimit) 397 | u.setName(name) 398 | u.setSoftLimit(softLimit) 399 | 400 | Optional.of(u) 401 | } 402 | 403 | protected def configureVolumeFrom(c: Config): Optional[VolumeFrom] = { 404 | if (c.isEmpty) return Optional.absent() 405 | 406 | val readOnly: Optional[Boolean] = c.getOptional("read_only", classOf[Boolean]) 407 | val sourceContainer: Optional[String] = c.getOptional("source_container", classOf[String]) 408 | 409 | val vf: VolumeFrom = new VolumeFrom() 410 | if (readOnly.isPresent) vf.setReadOnly(readOnly.get) 411 | if (sourceContainer.isPresent) vf.setSourceContainer(sourceContainer.get) 412 | 413 | Optional.of(vf) 414 | } 415 | 416 | protected def configureTaskDefinitionPlacementConstraint(c: Config): Optional[TaskDefinitionPlacementConstraint] = { 417 | if (c.isEmpty) return Optional.absent() 418 | 419 | val expression: Optional[String] = c.getOptional("expression", classOf[String]) 420 | val `type`: Optional[String] = c.getOptional("type", classOf[String]) 421 | 422 | val tdpc: TaskDefinitionPlacementConstraint = new TaskDefinitionPlacementConstraint() 423 | if (expression.isPresent) tdpc.setExpression(expression.get) 424 | if (`type`.isPresent) tdpc.setType(`type`.get) 425 | 426 | Optional.of(tdpc) 427 | } 428 | 429 | protected def configureVolume(c: Config): Optional[Volume] = { 430 | if (c.isEmpty) return Optional.absent() 431 | 432 | val dockerVolumeConfiguration: Optional[DockerVolumeConfiguration] = configureDockerVolumeConfiguration( 433 | c.parseNestedOrGetEmpty("docker_volume_configuration") 434 | ) 435 | val efsVolumeConfiguration: Optional[EFSVolumeConfiguration] = configureEFSVolumeConfiguration( 436 | c.parseNestedOrGetEmpty("efs_volume_configuration") 437 | ) 438 | val host: Optional[HostVolumeProperties] = configureHostVolumeProperties(c.parseNestedOrGetEmpty("host")) 439 | val name: Optional[String] = c.getOptional("name", classOf[String]) 440 | 441 | val v: Volume = new Volume() 442 | if (dockerVolumeConfiguration.isPresent) v.setDockerVolumeConfiguration(dockerVolumeConfiguration.get) 443 | if (efsVolumeConfiguration.isPresent) v.setEfsVolumeConfiguration(efsVolumeConfiguration.get) 444 | if (host.isPresent) v.setHost(host.get) 445 | if (name.isPresent) v.setName(name.get) 446 | 447 | Optional.of(v) 448 | } 449 | 450 | protected def configureDockerVolumeConfiguration(c: Config): Optional[DockerVolumeConfiguration] = { 451 | if (c.isEmpty) return Optional.absent() 452 | 453 | val autoprovision: Optional[Boolean] = c.getOptional("autoprovision", classOf[Boolean]) 454 | val driver: Optional[String] = c.getOptional("driver", classOf[String]) 455 | val driverOpts: Map[String, String] = c.getMapOrEmpty("driver_opts", classOf[String], classOf[String]).asScala.toMap 456 | val labels: Map[String, String] = c.getMapOrEmpty("labels", classOf[String], classOf[String]).asScala.toMap 457 | val scope: Optional[String] = c.getOptional("scope", classOf[String]) 458 | 459 | val dvc: DockerVolumeConfiguration = new DockerVolumeConfiguration() 460 | if (autoprovision.isPresent) dvc.setAutoprovision(autoprovision.get) 461 | if (driver.isPresent) dvc.setDriver(driver.get) 462 | if (driverOpts.nonEmpty) dvc.setDriverOpts(driverOpts.asJava) 463 | if (labels.nonEmpty) dvc.setLabels(labels.asJava) 464 | if (scope.isPresent) dvc.setScope(scope.get) 465 | 466 | Optional.of(dvc) 467 | } 468 | 469 | protected def configureEFSAuthorizationConfig(c: Config): Optional[EFSAuthorizationConfig] = { 470 | if (c.isEmpty) return Optional.absent() 471 | 472 | val accessPointId: Optional[String] = c.getOptional("access_point_id", classOf[String]) 473 | val iam: Optional[String] = c.getOptional("iam", classOf[String]) 474 | 475 | val eac: EFSAuthorizationConfig = new EFSAuthorizationConfig() 476 | if (accessPointId.isPresent) eac.setAccessPointId(accessPointId.get) 477 | if (iam.isPresent) eac.setIam(iam.get) 478 | 479 | Optional.of(eac) 480 | } 481 | 482 | protected def configureEFSVolumeConfiguration(c: Config): Optional[EFSVolumeConfiguration] = { 483 | if (c.isEmpty) return Optional.absent() 484 | 485 | val authorizationConfig: Optional[EFSAuthorizationConfig] = configureEFSAuthorizationConfig(c.parseNestedOrGetEmpty("authorization_config")) 486 | val fileSystemId: String = c.get("file_system_id", classOf[String]) 487 | val rootDirectory: Optional[String] = c.getOptional("root_directory", classOf[String]) 488 | val transitEncryption: Optional[String] = c.getOptional("transit_encryption", classOf[String]) 489 | val transitEncryptionPort: Optional[Int] = c.getOptional("transit_encryption_port", classOf[Int]) 490 | 491 | val evc: EFSVolumeConfiguration = new EFSVolumeConfiguration() 492 | if (authorizationConfig.isPresent) evc.setAuthorizationConfig(authorizationConfig.get) 493 | evc.setFileSystemId(fileSystemId) 494 | if (rootDirectory.isPresent) evc.setRootDirectory(rootDirectory.get) 495 | if (transitEncryption.isPresent) evc.setTransitEncryption(transitEncryption.get) 496 | if (transitEncryptionPort.isPresent) evc.setTransitEncryptionPort(transitEncryptionPort.get) 497 | 498 | Optional.of(evc) 499 | } 500 | 501 | protected def configureHostVolumeProperties(c: Config): Optional[HostVolumeProperties] = { 502 | if (c.isEmpty) return Optional.absent() 503 | 504 | val sourcePath: Optional[String] = c.getOptional("source_path", classOf[String]) 505 | 506 | val hvp: HostVolumeProperties = new HostVolumeProperties() 507 | if (sourcePath.isPresent) hvp.setSourcePath(sourcePath.get) 508 | 509 | Optional.of(hvp) 510 | } 511 | 512 | override def runTask(): TaskResult = { 513 | val req: RegisterTaskDefinitionRequest = buildRegisterTaskDefinitionRequest(config) 514 | logger.debug(req.toString) 515 | val result: RegisterTaskDefinitionResult = aws.withEcs(_.registerTaskDefinition(req)) 516 | logger.debug(result.toString) 517 | 518 | logger.info(s"Registered: ${result.getTaskDefinition.getTaskDefinitionArn}") 519 | 520 | val paramsToStore = cf.create() 521 | val last_ecs_task_register: Config = paramsToStore.getNestedOrSetEmpty("last_ecs_task_register") 522 | last_ecs_task_register.set("task_definition_arn", result.getTaskDefinition.getTaskDefinitionArn) 523 | last_ecs_task_register.set("family", result.getTaskDefinition.getFamily) 524 | last_ecs_task_register.set("revision", result.getTaskDefinition.getRevision) 525 | 526 | val builder: ImmutableTaskResult.Builder = TaskResult.defaultBuilder(cf) 527 | builder.resetStoreParams(ImmutableList.of(ConfigKey.of("last_ecs_task_register"))) 528 | builder.storeParams(paramsToStore) 529 | 530 | builder.build() 531 | } 532 | 533 | } 534 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/result/EcsTaskResultOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.result 2 | 3 | import java.io.File 4 | 5 | import com.amazonaws.services.s3.AmazonS3URI 6 | import com.amazonaws.services.s3.transfer.Download 7 | import io.digdag.client.config.Config 8 | import io.digdag.spi.{OperatorContext, TaskResult, TemplateEngine} 9 | import pro.civitaspo.digdag.plugin.ecs_task.AbstractEcsTaskOperator 10 | import pro.civitaspo.digdag.plugin.ecs_task.aws.AmazonS3UriWrapper 11 | 12 | import scala.io.Source 13 | 14 | class EcsTaskResultOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 15 | extends AbstractEcsTaskOperator(operatorName, context, systemConfig, templateEngine) { 16 | 17 | val s3Uri: AmazonS3URI = AmazonS3UriWrapper(params.get("_command", classOf[String])) 18 | 19 | override def runTask(): TaskResult = { 20 | val f: String = workspace.createTempFile("ecs_task.result", ".json") 21 | aws.withTransferManager { xfer => 22 | val download: Download = xfer.download(s3Uri.getBucket, s3Uri.getKey, new File(f)) 23 | download.waitForCompletion() 24 | } 25 | val content: String = Source.fromFile(f).getLines.mkString 26 | val data: Config = cf.fromJsonString(content) 27 | 28 | TaskResult 29 | .defaultBuilder(cf) 30 | .subtaskConfig(data.getNestedOrGetEmpty("subtask_config")) 31 | .exportParams(data.getNestedOrGetEmpty("export_params")) 32 | .storeParams(data.getNestedOrGetEmpty("store_params")) 33 | .build 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/run/EcsTaskRunInternalOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.run 2 | 3 | import com.amazonaws.services.ecs.model.{ 4 | AwsVpcConfiguration, 5 | CapacityProviderStrategyItem, 6 | ContainerOverride, 7 | Failure, 8 | KeyValuePair, 9 | NetworkConfiguration, 10 | PlacementConstraint, 11 | PlacementStrategy, 12 | RunTaskRequest, 13 | RunTaskResult, 14 | Tag, 15 | TaskOverride 16 | } 17 | import com.google.common.base.Optional 18 | import com.google.common.collect.ImmutableList 19 | import io.digdag.client.config.{Config, ConfigKey} 20 | import io.digdag.spi.{ImmutableTaskResult, OperatorContext, TaskResult, TemplateEngine} 21 | import io.digdag.util.DurationParam 22 | import pro.civitaspo.digdag.plugin.ecs_task.AbstractEcsTaskOperator 23 | import pro.civitaspo.digdag.plugin.ecs_task.exception.RetryTimeoutException 24 | 25 | import scala.jdk.CollectionConverters._ 26 | 27 | class EcsTaskRunInternalOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 28 | extends AbstractEcsTaskOperator(operatorName, context, systemConfig, templateEngine) { 29 | 30 | val capacityProviderStrategy: Seq[CapacityProviderStrategyItem] = 31 | params.parseListOrGetEmpty("capacity_provider_strategy", classOf[Config]).asScala.map(configureCapacityProviderStrategy).map(_.get).toSeq 32 | val cluster: String = params.get("cluster", classOf[String]) 33 | val count: Optional[Int] = params.getOptional("count", classOf[Int]) 34 | val group: Optional[String] = params.getOptional("group", classOf[String]) 35 | val launchType: Optional[String] = params.getOptional("launch_type", classOf[String]) 36 | val networkConfiguration: Optional[NetworkConfiguration] = configureNetworkConfiguration(params.parseNestedOrGetEmpty("network_configuration")) 37 | val overrides: Optional[TaskOverride] = configureTaskOverride(params.parseNestedOrGetEmpty("overrides")) 38 | 39 | val placementConstraints: Seq[PlacementConstraint] = 40 | params.parseListOrGetEmpty("placement_constraints", classOf[Config]).asScala.map(configurePlacementConstraint).map(_.get).toSeq 41 | 42 | val placementStrategy: Seq[PlacementStrategy] = 43 | params.parseListOrGetEmpty("placement_strategy", classOf[Config]).asScala.map(configurePlacementStrategy).map(_.get).toSeq 44 | val platformVersion: Optional[String] = params.getOptional("platform_version", classOf[String]) 45 | val startedBy: Optional[String] = params.getOptional("started_by", classOf[String]) 46 | 47 | val tags: Seq[Tag] = 48 | params.getMapOrEmpty("tags", classOf[String], classOf[String]).asScala.map((t: (String, String)) => new Tag().withKey(t._1).withValue(t._2)).toSeq 49 | val taskDefinition: String = params.get("task_definition", classOf[String]) // generated by ecs_task.register> operator if not set. 50 | 51 | val runRequestRetryInterval: DurationParam = params.get("run_request_retry_interval", classOf[DurationParam], DurationParam.parse("5s")) 52 | val runRequestRetryTimeout: DurationParam = params.get("run_request_retry_timeout", classOf[DurationParam], DurationParam.parse("5m")) 53 | 54 | protected def buildRunTaskRequest(): RunTaskRequest = { 55 | val req: RunTaskRequest = new RunTaskRequest() 56 | 57 | if (capacityProviderStrategy.nonEmpty) req.setCapacityProviderStrategy(capacityProviderStrategy.asJava) 58 | req.setCluster(cluster) 59 | if (count.isPresent) req.setCount(count.get) 60 | if (group.isPresent) req.setGroup(group.get) 61 | if (launchType.isPresent) req.setLaunchType(launchType.get) 62 | if (networkConfiguration.isPresent) req.setNetworkConfiguration(networkConfiguration.get) 63 | if (overrides.isPresent) req.setOverrides(overrides.get) 64 | if (placementConstraints.nonEmpty) req.setPlacementConstraints(placementConstraints.asJava) 65 | if (placementStrategy.nonEmpty) req.setPlacementStrategy(placementStrategy.asJava) 66 | if (platformVersion.isPresent) req.setPlatformVersion(platformVersion.get) 67 | if (startedBy.isPresent) req.setStartedBy(startedBy.get) 68 | if (tags.nonEmpty) req.setTags(tags.asJava) 69 | req.setTaskDefinition(taskDefinition) 70 | 71 | req 72 | } 73 | 74 | protected def configureCapacityProviderStrategy(c: Config): Optional[CapacityProviderStrategyItem] = { 75 | if (c.isEmpty) return Optional.absent() 76 | 77 | val base: Optional[Int] = c.getOptional("base", classOf[Int]) 78 | val capacityProvider: Optional[String] = c.getOptional("capacity_provider", classOf[String]) 79 | val weight: Optional[Int] = c.getOptional("weight", classOf[Int]) 80 | 81 | val cps: CapacityProviderStrategyItem = new CapacityProviderStrategyItem() 82 | if (base.isPresent) cps.setBase(base.get) 83 | if (capacityProvider.isPresent) cps.setCapacityProvider(capacityProvider.get) 84 | if (weight.isPresent) cps.setWeight(weight.get) 85 | 86 | Optional.of(cps) 87 | } 88 | 89 | protected def configureNetworkConfiguration(c: Config): Optional[NetworkConfiguration] = { 90 | if (c.isEmpty) return Optional.absent() 91 | 92 | val awsvpcConfiguration: Optional[AwsVpcConfiguration] = configureAwsVpcConfiguration(c.parseNestedOrGetEmpty("awsvpc_configuration")) 93 | 94 | val nc: NetworkConfiguration = new NetworkConfiguration() 95 | if (awsvpcConfiguration.isPresent) nc.setAwsvpcConfiguration(awsvpcConfiguration.get) 96 | 97 | Optional.of(nc) 98 | } 99 | 100 | protected def configureAwsVpcConfiguration(c: Config): Optional[AwsVpcConfiguration] = { 101 | if (c.isEmpty) return Optional.absent() 102 | 103 | val assignPublicIp: Optional[String] = c.getOptional("assign_public_ip", classOf[String]) 104 | val securityGroups: Seq[String] = c.parseListOrGetEmpty("security_groups", classOf[String]).asScala.toSeq 105 | val subnets: Seq[String] = c.parseListOrGetEmpty("subnets", classOf[String]).asScala.toSeq 106 | 107 | val avc: AwsVpcConfiguration = new AwsVpcConfiguration() 108 | if (assignPublicIp.isPresent) avc.setAssignPublicIp(assignPublicIp.get) 109 | if (securityGroups.nonEmpty) avc.setSecurityGroups(securityGroups.asJava) 110 | if (subnets.nonEmpty) avc.setSubnets(subnets.asJava) 111 | 112 | Optional.of(avc) 113 | } 114 | 115 | protected def configureTaskOverride(c: Config): Optional[TaskOverride] = { 116 | if (c.isEmpty) return Optional.absent() 117 | 118 | val containerOverrides: Seq[ContainerOverride] = 119 | c.parseListOrGetEmpty("container_overrides", classOf[Config]).asScala.map(configureContainerOverride).map(_.get).toSeq 120 | val executionRoleArn: Optional[String] = c.getOptional("execution_role_arn", classOf[String]) 121 | val taskRoleArn: Optional[String] = c.getOptional("task_role_arn", classOf[String]) 122 | 123 | val to: TaskOverride = new TaskOverride() 124 | if (containerOverrides.nonEmpty) to.setContainerOverrides(containerOverrides.asJava) 125 | if (executionRoleArn.isPresent) to.setExecutionRoleArn(executionRoleArn.get) 126 | if (taskRoleArn.isPresent) to.setTaskRoleArn(taskRoleArn.get) 127 | 128 | Optional.of(to) 129 | } 130 | 131 | protected def configureContainerOverride(c: Config): Optional[ContainerOverride] = { 132 | if (c.isEmpty) return Optional.absent() 133 | 134 | val command: Seq[String] = c.parseListOrGetEmpty("command", classOf[String]).asScala.toSeq 135 | val cpu: Optional[Int] = c.getOptional("cpu", classOf[Int]) 136 | val environments: Seq[KeyValuePair] = c 137 | .getMapOrEmpty("environments", classOf[String], classOf[String]) 138 | .asScala 139 | .map((t: (String, String)) => new KeyValuePair().withName(t._1).withValue(t._2)) 140 | .toSeq // TODO: doc 141 | val memory: Optional[Int] = c.getOptional("memory", classOf[Int]) 142 | val memoryReservation: Optional[Int] = c.getOptional("memory_reservation", classOf[Int]) 143 | val name: Optional[String] = c.getOptional("name", classOf[String]) 144 | 145 | val co: ContainerOverride = new ContainerOverride() 146 | if (command.nonEmpty) co.setCommand(command.asJava) 147 | if (cpu.isPresent) co.setCpu(cpu.get) 148 | if (environments.nonEmpty) co.setEnvironment(environments.asJava) 149 | if (memory.isPresent) co.setMemory(memory.get) 150 | if (memoryReservation.isPresent) co.setMemoryReservation(memoryReservation.get) 151 | if (name.isPresent) co.setName(name.get) 152 | 153 | Optional.of(co) 154 | } 155 | 156 | protected def configurePlacementConstraint(c: Config): Optional[PlacementConstraint] = { 157 | if (c.isEmpty) return Optional.absent() 158 | 159 | val expression: Optional[String] = c.getOptional("expression", classOf[String]) 160 | val `type`: Optional[String] = c.getOptional("type", classOf[String]) 161 | 162 | val pc: PlacementConstraint = new PlacementConstraint() 163 | if (expression.isPresent) pc.setExpression(expression.get) 164 | if (`type`.isPresent) pc.setType(`type`.get) 165 | 166 | Optional.of(pc) 167 | } 168 | 169 | protected def configurePlacementStrategy(c: Config): Optional[PlacementStrategy] = { 170 | if (c.isEmpty) return Optional.absent() 171 | 172 | val field: Optional[String] = c.getOptional("field", classOf[String]) 173 | val `type`: Optional[String] = c.getOptional("type", classOf[String]) 174 | 175 | val ps: PlacementStrategy = new PlacementStrategy() 176 | if (field.isPresent) ps.setField(field.get) 177 | if (`type`.isPresent) ps.setType(`type`.get) 178 | 179 | Optional.of(ps) 180 | } 181 | 182 | override def runTask(): TaskResult = { 183 | val req: RunTaskRequest = buildRunTaskRequest() 184 | logger.debug(req.toString) 185 | val result: RunTaskResult = requestTaskRun(req) 186 | logger.debug(result.toString) 187 | 188 | val paramsToStore = cf.create() 189 | val last_ecs_task_run: Config = paramsToStore.getNestedOrSetEmpty("last_ecs_task_run") 190 | last_ecs_task_run.set("task_arns", result.getTasks.asScala.map(_.getTaskArn).asJava) 191 | 192 | val builder: ImmutableTaskResult.Builder = TaskResult.defaultBuilder(cf) 193 | builder.resetStoreParams(ImmutableList.of(ConfigKey.of("last_ecs_task_run"))) 194 | builder.storeParams(paramsToStore) 195 | builder.build() 196 | } 197 | 198 | protected def requestTaskRun(req: RunTaskRequest): RunTaskResult = { 199 | val startAtMillis: Long = System.currentTimeMillis() 200 | def timeSpentSec(): Long = (System.currentTimeMillis() - startAtMillis) / 1000 201 | 202 | def runUntilSuccess(numRetry: Int = 0): RunTaskResult = { 203 | val lastResult: RunTaskResult = aws.withEcs(_.runTask(req)) 204 | val failures: Seq[Failure] = lastResult.getFailures.asScala.toSeq 205 | if (failures.isEmpty) return lastResult 206 | 207 | failures.foreach { f => 208 | logger.warn(s"RunTask(${req.getTaskDefinition}, Retry: $numRetry, TimeSpent: ${timeSpentSec()}s) Failed -- Arn: ${f.getArn}, Reason: ${f.getReason}") 209 | } 210 | if (runRequestRetryTimeout.getDuration.minusSeconds(timeSpentSec()).isNegative) 211 | throw new RetryTimeoutException( 212 | s"RunTask(${req.getTaskDefinition}, Retry: $numRetry, TimeSpent: ${timeSpentSec()}s) Failed -- [${failures.map(_.toString).mkString(",")}]" 213 | ) 214 | 215 | logger.info(s"Sleep $runRequestRetryInterval. (Retry: $numRetry, TimeSpent: ${timeSpentSec()}s)") 216 | Thread.sleep(runRequestRetryInterval.getDuration.toMillis) 217 | logger.info(s"Wake up. (Retry: $numRetry, TimeSpent: ${timeSpentSec()}s)") 218 | 219 | runUntilSuccess(numRetry = numRetry + 1) 220 | } 221 | runUntilSuccess() 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/run/EcsTaskRunOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.run 2 | 3 | import com.google.common.base.Optional 4 | import io.digdag.client.config.Config 5 | import io.digdag.spi.{OperatorContext, TaskResult, TemplateEngine} 6 | import io.digdag.util.DurationParam 7 | import pro.civitaspo.digdag.plugin.ecs_task.AbstractEcsTaskOperator 8 | 9 | class EcsTaskRunOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 10 | extends AbstractEcsTaskOperator(operatorName, context, systemConfig, templateEngine) { 11 | 12 | val cluster: String = params.get("cluster", classOf[String]) 13 | val taskDef: Optional[Config] = params.getOptionalNested("def") 14 | val resultS3Uri: Optional[String] = params.getOptional("result_s3_uri", classOf[String]) 15 | val timeout: DurationParam = params.get("timeout", classOf[DurationParam], DurationParam.parse("15m")) 16 | val pollingStrategy: Optional[Config] = params.getOptionalNested("polling_strategy") 17 | 18 | override def runTask(): TaskResult = { 19 | val subTasks: Config = cf.create() 20 | if (taskDef.isPresent) subTasks.setNested("+register", ecsTaskRegisterSubTask()) 21 | subTasks.setNested("+run", ecsTaskRunInternalSubTask()) 22 | subTasks.setNested("+wait", ecsTaskWaitSubTask()) 23 | if (resultS3Uri.isPresent) subTasks.setNested("+result", ecsTaskResultSubTask()) 24 | 25 | val builder = TaskResult.defaultBuilder(cf) 26 | builder.subtaskConfig(subTasks) 27 | builder.build() 28 | } 29 | 30 | protected def ecsTaskRegisterSubTask(): Config = { 31 | withDefaultSubTask { subTask => 32 | subTask.set("_type", "ecs_task.register") 33 | subTask.set("_command", taskDef) 34 | } 35 | } 36 | 37 | protected def ecsTaskRunInternalSubTask(): Config = { 38 | val config: Config = params.deepCopy() 39 | Seq("def", "result_s3_uri_prefix", "timeout").foreach(config.remove) 40 | if (taskDef.isPresent) { 41 | if (config.has("last_ecs_task_register")) config.remove("last_ecs_task_register") 42 | config.set("task_definition", "${last_ecs_task_register.task_definition_arn}") 43 | } 44 | withDefaultSubTask { subTask => 45 | subTask.set("_type", "ecs_task.run_internal") 46 | subTask.set("_export", config) 47 | } 48 | } 49 | 50 | protected def ecsTaskWaitSubTask(): Config = { 51 | withDefaultSubTask { subTask => 52 | subTask.set("_type", "ecs_task.wait") 53 | subTask.set("cluster", cluster) 54 | subTask.set("tasks", "${last_ecs_task_run.task_arns}") 55 | subTask.set("timeout", timeout.toString) 56 | if (pollingStrategy.isPresent) subTask.set("polling_strategy", pollingStrategy.get()) 57 | } 58 | } 59 | 60 | protected def ecsTaskResultSubTask(): Config = { 61 | withDefaultSubTask { subTask => 62 | subTask.set("_type", "ecs_task.result") 63 | subTask.set("_command", resultS3Uri.get) 64 | } 65 | } 66 | 67 | protected def withDefaultSubTask(f: Config => Unit): Config = { 68 | val subTask: Config = cf.create() 69 | 70 | subTask.set("auth_method", aws.conf.authMethod) 71 | subTask.set("profile_name", aws.conf.profileName) 72 | if (aws.conf.profileFile.isPresent) subTask.set("profile_file", aws.conf.profileFile.get()) 73 | subTask.set("use_http_proxy", aws.conf.useHttpProxy) 74 | if (aws.conf.region.isPresent) subTask.set("region", aws.conf.region.get()) 75 | if (aws.conf.endpoint.isPresent) subTask.set("endpoint", aws.conf.endpoint.get()) 76 | 77 | f(subTask) 78 | subTask 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/sh/EcsTaskShOperatar.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.sh 2 | 3 | import com.fasterxml.jackson.databind.JsonNode 4 | import io.digdag.client.config.Config 5 | import io.digdag.spi.{OperatorContext, TemplateEngine} 6 | import pro.civitaspo.digdag.plugin.ecs_task.aws.AmazonS3UriWrapper 7 | import pro.civitaspo.digdag.plugin.ecs_task.command.{AbstractEcsTaskCommandOperator, TmpStorage} 8 | 9 | import scala.jdk.CollectionConverters._ 10 | import scala.io.Source 11 | import scala.util.Using 12 | 13 | class EcsTaskShOperatar(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 14 | extends AbstractEcsTaskCommandOperator(operatorName, context, systemConfig, templateEngine) { 15 | 16 | private val runShResourcePath: String = "/pro/civitaspo/digdag/plugin/ecs_task/sh/run.sh" 17 | override protected val mainScriptName: String = "run.sh" 18 | 19 | protected val command: String = params.get("_command", classOf[String]) 20 | 21 | override def prepare(tmpStorage: TmpStorage): Unit = { 22 | tmpStorage.stageFile(mainScriptName, createRunShContent(tmpStorage)) 23 | tmpStorage.stageWorkspace() 24 | tmpStorage.storeStagedFiles() 25 | } 26 | 27 | protected def createRunShContent(tmpStorage: TmpStorage): String = { 28 | val dup: Config = params.deepCopy() 29 | dup.set("ECS_TASK_SH_BUCKET", AmazonS3UriWrapper(tmpStorage.getLocation).getBucket) 30 | dup.set("ECS_TASK_SH_PREFIX", AmazonS3UriWrapper(tmpStorage.getLocation).getKey) 31 | dup.set("ECS_TASK_SH_EXPORT_ENV", convertParamsAsEnv().map { case (k: String, v: String) => s"$k=$v" }.mkString(" ")) 32 | dup.set("ECS_TASK_SH_COMMAND", command.stripLineEnd) 33 | 34 | Using.resource(classOf[EcsTaskShOperatar].getResourceAsStream(runShResourcePath)) { is => 35 | val runShContentTemplate: String = Source.fromInputStream(is).mkString 36 | templateEngine.template(runShContentTemplate, dup) 37 | } 38 | } 39 | 40 | protected def convertParamsAsEnv(params: Config = params): Map[String, String] = { 41 | val keys: Seq[String] = params.getKeys.asScala.toSeq 42 | keys.foldLeft(Map.empty[String, String]) { (env, key) => 43 | if (isValidEnvKey(key)) { 44 | val jn: JsonNode = params.getInternalObjectNode.get(key) 45 | val v: String = 46 | if (jn.isTextual) s""""${jn.textValue().replace("\"", "\\\"")}"""" 47 | else jn.toString 48 | env ++ Map(key -> v) 49 | } 50 | else { 51 | logger.info(s"$key is invalid env key.") 52 | env 53 | } 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/util/WorkspaceWithTempDir.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.util 2 | 3 | import java.nio.file.{Files, Path} 4 | 5 | import io.digdag.util.Workspace 6 | import org.apache.commons.io.FileUtils 7 | 8 | import scala.util.Random 9 | 10 | // ref. https://github.com/muga/digdag/blob/aff3dfab0b91aa6787d7921ce34d5b3b21947c20/digdag-plugin-utils/src/main/java/io/digdag/util/Workspace.java#L84-L95 11 | object WorkspaceWithTempDir { 12 | 13 | def apply[T](workspace: Workspace)(f: Path => T): T = { 14 | val dir = workspace.getProjectPath.resolve(".digdag/tmp") 15 | Files.createDirectories(dir) 16 | val random: String = Random.alphanumeric.take(10).mkString 17 | val tempDir: Path = Files.createTempDirectory(dir, random) 18 | try f(tempDir) 19 | finally FileUtils.deleteDirectory(tempDir.toFile) 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/wait/EcsTaskWaitOperator.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.wait 2 | 3 | import com.amazonaws.services.ecs.model.{DescribeTasksRequest, DescribeTasksResult, Failure, StopTaskRequest} 4 | import com.google.common.base.Throwables 5 | import io.digdag.client.config.Config 6 | import io.digdag.spi.{OperatorContext, TaskResult, TemplateEngine} 7 | import io.digdag.util.DurationParam 8 | import pro.civitaspo.digdag.plugin.ecs_task.AbstractEcsTaskOperator 9 | 10 | import scala.jdk.CollectionConverters._ 11 | 12 | class EcsTaskWaitOperator(operatorName: String, context: OperatorContext, systemConfig: Config, templateEngine: TemplateEngine) 13 | extends AbstractEcsTaskOperator(operatorName, context, systemConfig, templateEngine) { 14 | 15 | val cluster: String = params.get("cluster", classOf[String]) 16 | val tasks: Seq[String] = params.parseList("tasks", classOf[String]).asScala.toSeq 17 | val timeout: DurationParam = params.get("timeout", classOf[DurationParam], DurationParam.parse("15m")) 18 | val condition: String = params.get("condition", classOf[String], "all") 19 | val status: String = params.get("status", classOf[String], "STOPPED") 20 | val ignoreFailure: Boolean = params.get("ignore_failure", classOf[Boolean], false) 21 | val ignoreExitCode: Boolean = params.get("ignore_exit_code", classOf[Boolean], false) 22 | val pollingStrategy: Config = params.getNestedOrGetEmpty("polling_strategy") 23 | 24 | override def runTask(): TaskResult = { 25 | val req: DescribeTasksRequest = new DescribeTasksRequest() 26 | .withCluster(cluster) 27 | .withTasks(tasks: _*) 28 | 29 | aws.withEcs { ecs => 30 | val waiter: EcsTaskWaiter = 31 | EcsTaskWaiter(logger = logger, ecs = ecs, timeout = timeout, condition = condition, status = status, pollingStrategy = pollingStrategy) 32 | try { 33 | waiter.wait(req) 34 | } 35 | catch { 36 | case e: Throwable => 37 | logger.warn(s"Stop tasks: tasks=[${tasks.mkString(",")}] reason=${e.getMessage}") 38 | tasks.foreach { t => 39 | try ecs.stopTask(new StopTaskRequest().withCluster(cluster).withTask(t).withReason(e.getMessage)) 40 | catch { 41 | case e: Throwable => logger.warn(s"Failed to stop task: task=${t}, reason=${e.getMessage}") 42 | } 43 | } 44 | throw Throwables.propagate(e) 45 | } 46 | finally { 47 | waiter.shutdown() 48 | } 49 | } 50 | val result: DescribeTasksResult = aws.withEcs(_.describeTasks(req)) 51 | val failures: Seq[Failure] = result.getFailures.asScala.toSeq 52 | if (failures.nonEmpty) { 53 | val failureMessages: String = failures.map(_.toString).mkString(", ") 54 | if (!ignoreFailure) throw new IllegalStateException(s"Some tasks are failed: [$failureMessages]") 55 | else logger.warn(s"Some tasks are failed but ignore them: $failureMessages") 56 | } 57 | 58 | val failedMessages = Seq.newBuilder[String] 59 | result.getTasks.asScala.foreach { task => 60 | task.getContainers.asScala.foreach { container => 61 | Option(container.getExitCode) match { 62 | case Some(code) => 63 | val message = s"[${task.getTaskArn}] ${container.getName} has stopped with exit_code=$code" 64 | logger.info(message) 65 | if (!code.equals(0)) { 66 | if (!ignoreExitCode) failedMessages += message 67 | else logger.warn(s"Ignore failures because of ignore_exit_code=true: $message") 68 | } 69 | case None => 70 | val message = 71 | s"[${task.getTaskArn}] ${container.getName} has stopped without exit_code: reason=${container.getReason}, task_stopped_reason=${task.getStoppedReason}" 72 | logger.info(message) 73 | failedMessages += message 74 | } 75 | } 76 | } 77 | if (failedMessages.result().nonEmpty) { 78 | val message: String = failedMessages.result().mkString(", ") 79 | if (!ignoreFailure) throw new IllegalStateException(s"Failure messages: $message") 80 | else logger.warn(s"Some tasks are failed but ignore them: $message") 81 | } 82 | 83 | TaskResult.empty(cf) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/wait/EcsTaskWaiter.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.wait 2 | 3 | import java.util.concurrent.{ExecutorService, Executors} 4 | 5 | import com.amazonaws.services.ecs.AmazonECS 6 | import com.amazonaws.services.ecs.model.{DescribeTasksRequest, DescribeTasksResult} 7 | import com.amazonaws.services.ecs.waiters.DescribeTasksFunction 8 | import com.amazonaws.waiters.PollingStrategy.DelayStrategy 9 | import com.amazonaws.waiters._ 10 | import io.digdag.client.config.{Config, ConfigException} 11 | import io.digdag.util.DurationParam 12 | import org.slf4j.Logger 13 | 14 | import scala.jdk.CollectionConverters._ 15 | 16 | case class EcsTaskWaiter( 17 | logger: Logger, 18 | ecs: AmazonECS, 19 | executorService: ExecutorService = Executors.newFixedThreadPool(50), 20 | timeout: DurationParam, 21 | condition: String, 22 | status: String, 23 | pollingStrategy: Config 24 | ) { 25 | 26 | sealed trait IntervalType { 27 | def value: String = toString 28 | } 29 | 30 | object IntervalType { 31 | case object constant extends IntervalType 32 | case object exponential extends IntervalType 33 | private val values = Seq(constant, exponential) 34 | 35 | def from(value: String): IntervalType = 36 | values.find(_.value == value).getOrElse { 37 | val message: String = s"""interval_type: \"$value\" is not supported. Available `interval_type`s are \"constant\", \"exponential\".""" 38 | throw new ConfigException(message) 39 | } 40 | } 41 | 42 | val limit: Int = pollingStrategy.get("limit", classOf[Int], Int.MaxValue) 43 | val interval: Int = pollingStrategy.get("interval", classOf[Int], 1) 44 | val intervalType: String = pollingStrategy.get("interval_type", classOf[String], IntervalType.constant.value) 45 | 46 | private def delayStrategy: DelayStrategy = 47 | IntervalType.from(intervalType) match { 48 | case IntervalType.constant => 49 | new FixedDelayStrategy(interval) 50 | case IntervalType.exponential => 51 | new ExponentialBackoffDelayStrategy(interval) 52 | } 53 | 54 | def wait(req: DescribeTasksRequest): Unit = { 55 | newWaiter().run(new WaiterParameters[DescribeTasksRequest]().withRequest(req)) 56 | } 57 | 58 | def shutdown(): Unit = { 59 | executorService.shutdown() 60 | } 61 | 62 | private def newWaiter(): Waiter[DescribeTasksRequest] = { 63 | new WaiterBuilder[DescribeTasksRequest, DescribeTasksResult] 64 | .withSdkFunction(new DescribeTasksFunction(ecs)) 65 | .withAcceptors(newAcceptor()) 66 | .withDefaultPollingStrategy(newPollingStrategy()) 67 | .withExecutorService(executorService) 68 | .build() 69 | } 70 | 71 | private def newAcceptor(): WaiterAcceptor[DescribeTasksResult] = { 72 | val startAt: Long = System.currentTimeMillis() 73 | 74 | new WaiterAcceptor[DescribeTasksResult] { 75 | override def matches(output: DescribeTasksResult): Boolean = { 76 | val waitingMillis: Long = System.currentTimeMillis() - startAt 77 | logger.info( 78 | s"Waiting ${waitingMillis}ms for that $condition tasks [${output.getTasks.asScala.map(t => s"${t.getTaskArn}:${t.getLastStatus}").mkString(",")}] become $status." 79 | ) 80 | if (waitingMillis > timeout.getDuration.toMillis) { 81 | throw new WaiterTimedOutException(s"Reached timeout ${timeout.getDuration.toMillis}ms without transitioning to the desired state '$status'.") 82 | } 83 | 84 | condition match { 85 | case "all" => output.getTasks.asScala.forall(t => t.getLastStatus.equals(status)) 86 | case "any" => output.getTasks.asScala.exists(t => t.getLastStatus.equals(status)) 87 | case _ => throw new ConfigException(s"condition: $condition is unsupported.") 88 | } 89 | } 90 | override def getState: WaiterState = WaiterState.SUCCESS 91 | } 92 | } 93 | 94 | private def newPollingStrategy(): PollingStrategy = { 95 | new PollingStrategy(new MaxAttemptsRetryStrategy(limit), delayStrategy) 96 | } 97 | 98 | } 99 | -------------------------------------------------------------------------------- /src/main/scala/pro/civitaspo/digdag/plugin/ecs_task/wait/ExponentialBackoffDelayStrategy.scala: -------------------------------------------------------------------------------- 1 | package pro.civitaspo.digdag.plugin.ecs_task.wait 2 | 3 | import com.amazonaws.waiters.PollingStrategy.DelayStrategy 4 | import com.amazonaws.waiters.PollingStrategyContext 5 | 6 | class ExponentialBackoffDelayStrategy(interval: Int) extends DelayStrategy { 7 | 8 | override def delayBeforeNextRetry(pollingStrategyContext: PollingStrategyContext): Unit = { 9 | val nextDurationSec = 10 | interval * Math.pow(2, pollingStrategyContext.getRetriesAttempted) 11 | 12 | Thread.sleep(nextDurationSec.toLong * 1000) 13 | } 14 | } 15 | --------------------------------------------------------------------------------