├── .github └── workflows │ ├── codeql.yml │ ├── create-ami.yml │ ├── deploy.yml │ ├── test.yml │ └── tfsec.yml ├── .gitignore ├── .secrets.baseline ├── LICENSE ├── Makefile ├── README.md ├── client ├── .eslintrc.json ├── .nvmrc ├── README.md ├── action.yml ├── dist │ ├── index.js │ ├── index.js.map │ └── sourcemap-register.js ├── index.js ├── package-lock.json ├── package.json └── wait.js ├── docs └── development.md ├── garo_terraform ├── .terraform-version ├── aws.tf ├── lambda.tf ├── lb.tf ├── security-groups.tf ├── terraform.tf ├── variables.tf └── vpc.tf ├── github.py ├── http_helper.py ├── lambda_handler.py ├── requirements-dev.txt ├── requirements.txt ├── scripts ├── amazon_linux_ec2_ami_build.sh ├── amazon_linux_ec2_template.sh ├── create_ami.sh ├── install_headless_chrome.sh └── instance_watcher.sh ├── terraform_module ├── aws.tf ├── ec2_role.tf ├── iam.tf ├── outputs.tf └── variables.tf ├── tests └── fixtures │ └── example.json ├── utils.py ├── wrangling_ec2.py └── wrestling_sts.py /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: "Code scanning - action" 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '0 09 * * 3' 7 | 8 | jobs: 9 | CodeQL-Build: 10 | 11 | # CodeQL runs on ubuntu-latest and windows-latest 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Checkout repository 16 | uses: actions/checkout@v2 17 | with: 18 | # We must fetch at least the immediate parents so that if this is 19 | # a pull request then we can checkout the head. 20 | fetch-depth: 2 21 | 22 | # If this run was triggered by a pull request event, then checkout 23 | # the head of the pull request instead of the merge commit. 24 | - run: git checkout HEAD^2 25 | if: ${{ github.event_name == 'pull_request' }} 26 | 27 | # Initializes the CodeQL tools for scanning. 28 | - name: Initialize CodeQL 29 | uses: github/codeql-action/init@v1 30 | # Override language selection by uncommenting this and choosing your languages 31 | # with: 32 | # languages: go, javascript, csharp, python, cpp, java 33 | 34 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 35 | # If this step fails, then you should remove it and run the build manually (see below) 36 | - name: Autobuild 37 | uses: github/codeql-action/autobuild@v1 38 | 39 | # ℹ️ Command-line programs to run using the OS shell. 40 | # 📚 https://git.io/JvXDl 41 | 42 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 43 | # and modify them (or add more) to build your code if your project 44 | # uses a compiled language 45 | 46 | #- run: | 47 | # make bootstrap 48 | # make release 49 | 50 | - name: Perform CodeQL Analysis 51 | uses: github/codeql-action/analyze@v1 52 | -------------------------------------------------------------------------------- /.github/workflows/create-ami.yml: -------------------------------------------------------------------------------- 1 | name: Create AMI 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '0 09 * * 5' 7 | 8 | jobs: 9 | get-spot-runner: 10 | environment: aws_dev 11 | runs-on: ubuntu-latest 12 | name: Get a spot runner using prod API 13 | outputs: 14 | name: ${{ steps.garoclient-spot.outputs.name }} 15 | runnerstate: ${{ steps.garoclient-spot.outputs.runnerstate }} 16 | uniqueid: ${{ steps.garoclient-spot.outputs.uniqueid }} 17 | steps: 18 | - name: Get runner 19 | uses: alphagov/github-actions-runner-orchestration/client@main 20 | id: garoclient-spot 21 | with: 22 | ACTION: 'start' 23 | GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' 24 | RUNNER_TYPE: 'spot' 25 | REPO: '${{ github.repository }}' 26 | GITHUB_COMMIT: '${{ github.sha }}' 27 | RUNNER_SUBNET: '${{ secrets.RUNNER_SUBNET }}' 28 | RUNNER_SG: '${{ secrets.RUNNER_SG }}' 29 | RUNNER_ACID: '${{ secrets.RUNNER_ACID }}' 30 | RUNNER_EXID: '${{ secrets.RUNNER_EXID }}' 31 | GARO_URL: 'https://prod.co-cdio-garo.digital' 32 | RUNNER_TIMEOUT: '3600' 33 | RUNNER_LABEL: 'prod' 34 | 35 | - name: Output runner details 36 | run: | 37 | echo "Name: ${{ steps.garoclient-spot.outputs.name }}" 38 | echo "State: ${{ steps.garoclient-spot.outputs.runnerstate }}" 39 | echo "UniqueID: ${{ steps.garoclient-spot.outputs.uniqueid }}" 40 | 41 | create-ami: 42 | environment: aws_dev 43 | name: Deploy an instance with the build only 44 | needs: get-spot-runner 45 | runs-on: [self-hosted, prod, "${{ needs.get-spot-runner.outputs.uniqueid }}"] 46 | defaults: 47 | run: 48 | shell: bash 49 | steps: 50 | - name: Checkout 51 | uses: actions/checkout@v2 52 | with: 53 | path: main 54 | - run: | 55 | source ~/.bash_profile && source ~/.bashrc 56 | 57 | echo "Runner Unique ID: ${{ needs.get-spot-runner.outputs.uniqueid }}" 58 | cd main/ 59 | export SUBNETID=${{ secrets.RUNNER_SUBNET }} \ 60 | && export SECURITYG=${{ secrets.RUNNER_SG }} \ 61 | && scripts/create_ami.sh 62 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Test and deploy to staging and production 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | get-main-deploy-runner: 8 | environment: aws_dev 9 | runs-on: ubuntu-latest 10 | name: Get the main deploy runner using prod API 11 | outputs: 12 | name: ${{ steps.garoclient-main.outputs.name }} 13 | runnerstate: ${{ steps.garoclient-main.outputs.runnerstate }} 14 | uniqueid: ${{ steps.garoclient-main.outputs.uniqueid }} 15 | steps: 16 | - name: Get runner 17 | uses: alphagov/github-actions-runner-orchestration/client@main 18 | id: garoclient-main 19 | with: 20 | ACTION: 'start' 21 | GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' 22 | RUNNER_TYPE: 'ondemand' 23 | REPO: '${{ github.repository }}' 24 | GITHUB_COMMIT: '${{ github.sha }}' 25 | RUNNER_SUBNET: '${{ secrets.RUNNER_SUBNET }}' 26 | RUNNER_SG: '${{ secrets.RUNNER_SG }}' 27 | RUNNER_ACID: '${{ secrets.RUNNER_ACID }}' 28 | RUNNER_EXID: '${{ secrets.RUNNER_EXID }}' 29 | GARO_URL: 'https://prod.co-cdio-garo.digital' 30 | RUNNER_TIMEOUT: '3600' 31 | RUNNER_LABEL: 'prod' 32 | 33 | - name: Output runner details 34 | run: | 35 | echo "Name: ${{ steps.garoclient-main.outputs.name }}" 36 | echo "State: ${{ steps.garoclient-main.outputs.runnerstate }}" 37 | echo "UniqueID: ${{ steps.garoclient-main.outputs.uniqueid }}" 38 | 39 | deploy-to-staging: 40 | name: Deploy to staging 41 | needs: get-main-deploy-runner 42 | runs-on: [self-hosted, prod, "${{ needs.get-main-deploy-runner.outputs.uniqueid }}"] 43 | defaults: 44 | run: 45 | shell: bash 46 | steps: 47 | - name: Checkout 48 | uses: actions/checkout@v2 49 | with: 50 | path: main 51 | - run: | 52 | source ~/.bash_profile 53 | 54 | nvm install 12.21.0 55 | 56 | cd main/ 57 | make test-full 58 | make build-full 59 | 60 | cd garo_terraform/ 61 | tfenv install 62 | tfenv use 63 | terraform init | awsredact 64 | terraform validate | awsredact 65 | terraform workspace list 66 | terraform workspace select staging 67 | terraform apply -auto-approve | awsredact 68 | 69 | sleep 5 70 | 71 | get-test-staging-runner: 72 | name: Start a runner using the staging API 73 | needs: deploy-to-staging 74 | environment: aws_dev 75 | runs-on: ubuntu-latest 76 | outputs: 77 | name: ${{ steps.garoclient-staging-test.outputs.name }} 78 | runnerstate: ${{ steps.garoclient-staging-test.outputs.runnerstate }} 79 | uniqueid: ${{ steps.garoclient-staging-test.outputs.uniqueid }} 80 | steps: 81 | - name: Get runner 82 | uses: alphagov/github-actions-runner-orchestration/client@main 83 | id: garoclient-staging-test 84 | with: 85 | ACTION: 'start' 86 | GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' 87 | RUNNER_TYPE: 'spot' 88 | REPO: '${{ github.repository }}' 89 | GITHUB_COMMIT: '${{ github.sha }}' 90 | RUNNER_SUBNET: '${{ secrets.RUNNER_SUBNET }}' 91 | RUNNER_SG: '${{ secrets.RUNNER_SG }}' 92 | RUNNER_ACID: '${{ secrets.RUNNER_ACID }}' 93 | RUNNER_EXID: '${{ secrets.RUNNER_EXID }}' 94 | GARO_URL: 'https://staging.co-cdio-garo.digital' 95 | RUNNER_TIMEOUT: '900' 96 | RUNNER_LABEL: 'stagingtest' 97 | 98 | - name: Output runner details 99 | run: | 100 | echo "Name: ${{ steps.garoclient-staging-test.outputs.name }}" 101 | echo "State: ${{ steps.garoclient-staging-test.outputs.runnerstate }}" 102 | echo "UniqueID: ${{ steps.garoclient-staging-test.outputs.uniqueid }}" 103 | 104 | use-staging-runner: 105 | name: Test the staging runner 106 | needs: get-test-staging-runner 107 | runs-on: [self-hosted, stagingtest, "${{ needs.get-test-staging-runner.outputs.uniqueid }}"] 108 | defaults: 109 | run: 110 | shell: bash 111 | steps: 112 | - name: Checkout 113 | uses: actions/checkout@v2 114 | with: 115 | path: main 116 | - run: | 117 | source ~/.bash_profile 118 | ls -lah main 119 | aws sts get-caller-identity | awsredact 120 | docker run hello-world 121 | 122 | deploy-to-prod: 123 | name: Deploy to production 124 | needs: [get-main-deploy-runner, use-staging-runner] 125 | runs-on: [self-hosted, prod, "${{ needs.get-main-deploy-runner.outputs.uniqueid }}"] 126 | defaults: 127 | run: 128 | shell: bash 129 | steps: 130 | - name: Checkout 131 | uses: actions/checkout@v2 132 | with: 133 | path: main 134 | - run: | 135 | source ~/.bash_profile 136 | 137 | nvm install 12.21.0 138 | 139 | cd main/ 140 | make test-full 141 | make build-full 142 | 143 | cd garo_terraform/ 144 | tfenv install 145 | tfenv use 146 | terraform init | awsredact 147 | terraform validate | awsredact 148 | terraform workspace list 149 | terraform workspace select prod 150 | terraform apply -auto-approve | awsredact 151 | 152 | sleep 5 153 | 154 | get-test-prod-runner: 155 | environment: aws_dev 156 | needs: deploy-to-prod 157 | runs-on: ubuntu-latest 158 | name: Start a runner using the prod API 159 | outputs: 160 | name: ${{ steps.garoclient-prod-test.outputs.name }} 161 | runnerstate: ${{ steps.garoclient-prod-test.outputs.runnerstate }} 162 | uniqueid: ${{ steps.garoclient-prod-test.outputs.uniqueid }} 163 | steps: 164 | - name: Get runner 165 | uses: alphagov/github-actions-runner-orchestration/client@main 166 | id: garoclient-prod-test 167 | with: 168 | ACTION: 'start' 169 | GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' 170 | RUNNER_TYPE: 'spot' 171 | REPO: '${{ github.repository }}' 172 | GITHUB_COMMIT: '${{ github.sha }}' 173 | RUNNER_SUBNET: '${{ secrets.RUNNER_SUBNET }}' 174 | RUNNER_SG: '${{ secrets.RUNNER_SG }}' 175 | RUNNER_ACID: '${{ secrets.RUNNER_ACID }}' 176 | RUNNER_EXID: '${{ secrets.RUNNER_EXID }}' 177 | GARO_URL: 'https://prod.co-cdio-garo.digital' 178 | RUNNER_TIMEOUT: '900' 179 | RUNNER_LABEL: 'prodtest' 180 | 181 | - name: Output runner details 182 | run: | 183 | echo "Name: ${{ steps.garoclient-prod-test.outputs.name }}" 184 | echo "State: ${{ steps.garoclient-prod-test.outputs.runnerstate }}" 185 | echo "UniqueID: ${{ steps.garoclient-prod-test.outputs.uniqueid }}" 186 | 187 | use-prod-runner: 188 | name: Test the production runner 189 | needs: get-test-prod-runner 190 | runs-on: [self-hosted, prodtest, "${{ needs.get-test-prod-runner.outputs.uniqueid }}"] 191 | defaults: 192 | run: 193 | shell: bash 194 | steps: 195 | - name: Checkout 196 | uses: actions/checkout@v2 197 | with: 198 | path: main 199 | - run: | 200 | source ~/.bash_profile 201 | ls -lah main 202 | aws sts get-caller-identity | awsredact 203 | docker run hello-world 204 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test self-hosted runner 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | get-runners: 8 | environment: aws_dev 9 | runs-on: ubuntu-latest 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | include: 14 | - label: test1 15 | type: spot 16 | - label: test2 17 | type: spot 18 | - label: test3 19 | type: ondemand 20 | steps: 21 | - name: Get Runner 22 | uses: alphagov/github-actions-runner-orchestration/client@main 23 | id: garoclient 24 | with: 25 | ACTION: 'start' 26 | GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' 27 | RUNNER_TYPE: ${{ matrix.type }} 28 | RUNNER_LABEL: ${{ matrix.label }} 29 | REPO: '${{ github.repository }}' 30 | GITHUB_COMMIT: '${{ github.sha }}' 31 | RUNNER_SUBNET: '${{ secrets.RUNNER_SUBNET }}' 32 | RUNNER_SG: '${{ secrets.RUNNER_SG }}' 33 | RUNNER_ACID: '${{ secrets.RUNNER_ACID }}' 34 | RUNNER_EXID: '${{ secrets.RUNNER_EXID }}' 35 | GARO_URL: 'https://dev.co-cdio-garo.digital' 36 | RUNNER_TIMEOUT: '900' 37 | 38 | use-test1-runner: 39 | environment: aws_dev # for the redaction 40 | runs-on: [self-hosted, linux, spot, test1] 41 | needs: get-runners 42 | defaults: 43 | run: 44 | shell: bash 45 | steps: 46 | - run: | 47 | source ~/.bash_profile 48 | echo "Hello world from test1!" 49 | echo "" 50 | aws sts get-caller-identity | redact "${{ secrets.RUNNER_ACID }}" 51 | echo "" 52 | 53 | use-test2-runner: 54 | runs-on: [self-hosted, linux, spot, test2] 55 | needs: get-runners 56 | defaults: 57 | run: 58 | shell: bash 59 | steps: 60 | - run: | 61 | source ~/.bash_profile 62 | echo "Hello world from test2!" 63 | echo "" 64 | aws sts get-caller-identity | awsredact 65 | echo "" 66 | 67 | use-test3-runner: 68 | runs-on: [self-hosted, linux, ondemand, test3] 69 | needs: get-runners 70 | defaults: 71 | run: 72 | shell: bash 73 | steps: 74 | - run: | 75 | source ~/.bash_profile 76 | echo "Hello world from test3 (ondemand)!" 77 | echo "" 78 | aws sts get-caller-identity | redact '[[:xdigit:]]+{4,999}' 79 | echo "" 80 | -------------------------------------------------------------------------------- /.github/workflows/tfsec.yml: -------------------------------------------------------------------------------- 1 | name: Validate and Test Terraform 2 | # this action runs terraform init, validate and tests 3 | # for common security vulnerabilities 4 | 5 | on: 6 | workflow_dispatch: 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - '**.tf' 12 | # pull_request: 13 | # paths: 14 | # - '**.tf' 15 | 16 | jobs: 17 | tfsec: 18 | name: Validate and Test Terraform 19 | runs-on: ubuntu-latest 20 | 21 | steps: 22 | - name: Clone repo 23 | uses: actions/checkout@main 24 | 25 | - name: Empty zip 26 | run: mkdir .build && touch .build/lambda.zip 27 | 28 | - name: Validate Terraform 29 | run: | 30 | # install tfenv 31 | if ! command -v "tfenv" > /dev/null; then 32 | git clone https://github.com/tfutils/tfenv.git ~/.tfenv 33 | sudo rm /usr/local/bin/tfenv || echo "No tfenv installed" 34 | sudo rm /usr/local/bin/terraform || echo "No terraform installed" 35 | sudo ln -s ~/.tfenv/bin/* /usr/local/bin > /dev/null 36 | fi 37 | 38 | tfenv install "$(cat .terraform-version)" > /dev/null 39 | tfenv use "$(cat .terraform-version)" > /dev/null 40 | mkdir -p ~/.terraform.d/plugin-cache 41 | echo "plugin_cache_dir = \"$HOME/.terraform.d/plugin-cache\"" > \ 42 | ~/.terraformrc 43 | 44 | for folder in $(find * -type f -name '*.tf' \ 45 | | grep -Eo "(.*\/)" | sort | uniq) 46 | do 47 | echo "=================" 48 | echo "Checking $folder deployment." 49 | echo "-----------------" 50 | workdir=$(pwd) 51 | cd "$folder" || exit 1 52 | rm -rf .terraform || echo "No .terraform" 53 | rm -rf .terraform.lock.hcl || echo "No .terraform.lock.hcl" 54 | terraform init -backend=false 55 | terraform validate 56 | cd "$workdir" || exit 1 57 | echo "-----------------" 58 | echo "$folder is valid!" 59 | done 60 | 61 | # test Terraform for security vulnerabilities 62 | - name: tfsec 63 | uses: tfsec/tfsec-sarif-action@2ec44316ed27c50d48c931c3c628adc4c8bb1d2b 64 | with: 65 | sarif_file: tfsec.sarif 66 | # secret generated per workflow run 67 | github_token: ${{ secrets.GITHUB_TOKEN }} 68 | 69 | # GitHub security outputs 70 | - name: Upload SARIF file 71 | uses: github/codeql-action/upload-sarif@v1 72 | with: 73 | # Path to SARIF file relative to the root of the repository 74 | # created above in last step 75 | sarif_file: tfsec.sarif 76 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .build 2 | .target 3 | */.terraform/ 4 | */.terraform.* 5 | */terraform.tfs* 6 | */*.tfvars 7 | 8 | client/node_modules/ 9 | 10 | # Byte-compiled / optimized / DLL files 11 | __pycache__/ 12 | *.py[cod] 13 | *$py.class 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Distribution / packaging 19 | .Python 20 | build/ 21 | develop-eggs/ 22 | ./dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | pip-wheel-metadata/ 33 | share/python-wheels/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | MANIFEST 38 | 39 | # PyInstaller 40 | # Usually these files are written by a python script from a template 41 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 42 | *.manifest 43 | *.spec 44 | 45 | # Installer logs 46 | pip-log.txt 47 | pip-delete-this-directory.txt 48 | 49 | # Unit test / coverage reports 50 | htmlcov/ 51 | .tox/ 52 | .nox/ 53 | .coverage 54 | .coverage.* 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | *.cover 59 | *.py,cover 60 | .hypothesis/ 61 | .pytest_cache/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | .python-version 95 | 96 | # pipenv 97 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 98 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 99 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 100 | # install all needed dependencies. 101 | #Pipfile.lock 102 | 103 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 104 | __pypackages__/ 105 | 106 | # Celery stuff 107 | celerybeat-schedule 108 | celerybeat.pid 109 | 110 | # SageMath parsed files 111 | *.sage.py 112 | 113 | # Environments 114 | .env 115 | .venv 116 | env/ 117 | venv/ 118 | ENV/ 119 | env.bak/ 120 | venv.bak/ 121 | 122 | # Spyder project settings 123 | .spyderproject 124 | .spyproject 125 | 126 | # Rope project settings 127 | .ropeproject 128 | 129 | # mkdocs documentation 130 | /site 131 | 132 | # mypy 133 | .mypy_cache/ 134 | .dmypy.json 135 | dmypy.json 136 | 137 | # Pyre type checker 138 | .pyre/ 139 | -------------------------------------------------------------------------------- /.secrets.baseline: -------------------------------------------------------------------------------- 1 | { 2 | "exclude": { 3 | "files": null, 4 | "lines": null 5 | }, 6 | "generated_at": "2021-07-01T12:09:53Z", 7 | "plugins_used": [ 8 | { 9 | "name": "AWSKeyDetector" 10 | }, 11 | { 12 | "name": "ArtifactoryDetector" 13 | }, 14 | { 15 | "base64_limit": 4.5, 16 | "name": "Base64HighEntropyString" 17 | }, 18 | { 19 | "name": "BasicAuthDetector" 20 | }, 21 | { 22 | "name": "CloudantDetector" 23 | }, 24 | { 25 | "hex_limit": 3, 26 | "name": "HexHighEntropyString" 27 | }, 28 | { 29 | "name": "IbmCloudIamDetector" 30 | }, 31 | { 32 | "name": "IbmCosHmacDetector" 33 | }, 34 | { 35 | "name": "JwtTokenDetector" 36 | }, 37 | { 38 | "keyword_exclude": null, 39 | "name": "KeywordDetector" 40 | }, 41 | { 42 | "name": "MailchimpDetector" 43 | }, 44 | { 45 | "name": "PrivateKeyDetector" 46 | }, 47 | { 48 | "name": "SlackDetector" 49 | }, 50 | { 51 | "name": "SoftlayerDetector" 52 | }, 53 | { 54 | "name": "StripeDetector" 55 | }, 56 | { 57 | "name": "TwilioKeyDetector" 58 | } 59 | ], 60 | "results": { 61 | "client/dist/index.js.map": [ 62 | { 63 | "hashed_secret": "64ed58817dcd313822856067265516645a6bbef5", 64 | "is_verified": false, 65 | "line_number": 1, 66 | "type": "Secret Keyword" 67 | } 68 | ], 69 | "client/package-lock.json": [ 70 | { 71 | "hashed_secret": "7c5b3cc9f11581ce2db7b4d82b0cc2bcee6af470", 72 | "is_verified": false, 73 | "line_number": 10, 74 | "type": "Base64 High Entropy String" 75 | }, 76 | { 77 | "hashed_secret": "8ea5c81c835c73d62e4994c918677cc0d154377b", 78 | "is_verified": false, 79 | "line_number": 15, 80 | "type": "Base64 High Entropy String" 81 | }, 82 | { 83 | "hashed_secret": "da7d002805dc3b953ed8e104cbcc61f00c5da25a", 84 | "is_verified": false, 85 | "line_number": 24, 86 | "type": "Base64 High Entropy String" 87 | }, 88 | { 89 | "hashed_secret": "a679a30c09cbbab3b2081ab0f3f150996f3b7f05", 90 | "is_verified": false, 91 | "line_number": 30, 92 | "type": "Base64 High Entropy String" 93 | }, 94 | { 95 | "hashed_secret": "bb05672f29d196996d896664a1b8ab1d5b96fe4d", 96 | "is_verified": false, 97 | "line_number": 41, 98 | "type": "Base64 High Entropy String" 99 | }, 100 | { 101 | "hashed_secret": "f7c253e2ec8b16ee1315678b6a85281bff4ede60", 102 | "is_verified": false, 103 | "line_number": 54, 104 | "type": "Base64 High Entropy String" 105 | }, 106 | { 107 | "hashed_secret": "3b29c3ee1315c5e719c70ce4293e355f397c9c7d", 108 | "is_verified": false, 109 | "line_number": 71, 110 | "type": "Base64 High Entropy String" 111 | }, 112 | { 113 | "hashed_secret": "2c10b352b04c7e3ac3eb280f079fad0d31affca3", 114 | "is_verified": false, 115 | "line_number": 76, 116 | "type": "Base64 High Entropy String" 117 | }, 118 | { 119 | "hashed_secret": "e36f107ecc3917fc60c89d6b8f9bbee042b0e990", 120 | "is_verified": false, 121 | "line_number": 82, 122 | "type": "Base64 High Entropy String" 123 | }, 124 | { 125 | "hashed_secret": "0c7f66893fe508baed443a4aad6003315428df3b", 126 | "is_verified": false, 127 | "line_number": 88, 128 | "type": "Base64 High Entropy String" 129 | }, 130 | { 131 | "hashed_secret": "845aaf24f7f1104268d267c929cdebb56ca61a40", 132 | "is_verified": false, 133 | "line_number": 100, 134 | "type": "Base64 High Entropy String" 135 | }, 136 | { 137 | "hashed_secret": "1c1c16799957f77d54bac4782677b9b1e1c2a0f3", 138 | "is_verified": false, 139 | "line_number": 106, 140 | "type": "Base64 High Entropy String" 141 | }, 142 | { 143 | "hashed_secret": "ed5695eb040e5c940801b8d72eb96490e4b2f11e", 144 | "is_verified": false, 145 | "line_number": 112, 146 | "type": "Base64 High Entropy String" 147 | }, 148 | { 149 | "hashed_secret": "d2eb4e9361fd3c06052590c09d8b407f1753b2d4", 150 | "is_verified": false, 151 | "line_number": 121, 152 | "type": "Base64 High Entropy String" 153 | }, 154 | { 155 | "hashed_secret": "da3c2503d0bd44fe1cf4f3722696fc3e6f28cf38", 156 | "is_verified": false, 157 | "line_number": 130, 158 | "type": "Base64 High Entropy String" 159 | }, 160 | { 161 | "hashed_secret": "456d02ad0d4d84a2516934ac4cd212b702a35971", 162 | "is_verified": false, 163 | "line_number": 136, 164 | "type": "Base64 High Entropy String" 165 | }, 166 | { 167 | "hashed_secret": "3aafb400e42871c4a994eba8013e4ff1034c2724", 168 | "is_verified": false, 169 | "line_number": 142, 170 | "type": "Base64 High Entropy String" 171 | }, 172 | { 173 | "hashed_secret": "7e0c8e7bebd3b26c7a99961ec3498c0448dcb08d", 174 | "is_verified": false, 175 | "line_number": 152, 176 | "type": "Base64 High Entropy String" 177 | }, 178 | { 179 | "hashed_secret": "dcf32b4254051ffec36827bb5f20b51a69e313be", 180 | "is_verified": false, 181 | "line_number": 158, 182 | "type": "Base64 High Entropy String" 183 | }, 184 | { 185 | "hashed_secret": "086b47ff4b09d5daf48fc11a12842df114051a64", 186 | "is_verified": false, 187 | "line_number": 192, 188 | "type": "Base64 High Entropy String" 189 | }, 190 | { 191 | "hashed_secret": "69e4418b2ffb487c86b50465bb7a8dd539577c3f", 192 | "is_verified": false, 193 | "line_number": 198, 194 | "type": "Base64 High Entropy String" 195 | }, 196 | { 197 | "hashed_secret": "3da27985279d7d55ced598655b77d5a3aef28e39", 198 | "is_verified": false, 199 | "line_number": 209, 200 | "type": "Base64 High Entropy String" 201 | }, 202 | { 203 | "hashed_secret": "26f76deebe03d118284ce3a2b105ef284a3af65b", 204 | "is_verified": false, 205 | "line_number": 224, 206 | "type": "Base64 High Entropy String" 207 | }, 208 | { 209 | "hashed_secret": "1dbfc5c9bc6298802d5fe70bc5def20aa112c66c", 210 | "is_verified": false, 211 | "line_number": 230, 212 | "type": "Base64 High Entropy String" 213 | }, 214 | { 215 | "hashed_secret": "b1739b5f43d946a0b8a9b1d58f304433370834bb", 216 | "is_verified": false, 217 | "line_number": 241, 218 | "type": "Base64 High Entropy String" 219 | }, 220 | { 221 | "hashed_secret": "7d8af756c6ff9c64e7fa50cfc0f0f19c374e5ec4", 222 | "is_verified": false, 223 | "line_number": 256, 224 | "type": "Base64 High Entropy String" 225 | }, 226 | { 227 | "hashed_secret": "fc7b2f07f919c3f8e20aa78a1756d2bcca252fb9", 228 | "is_verified": false, 229 | "line_number": 265, 230 | "type": "Base64 High Entropy String" 231 | }, 232 | { 233 | "hashed_secret": "7e6e5982fc713007d1d7cb8b093118f1f94dc970", 234 | "is_verified": false, 235 | "line_number": 271, 236 | "type": "Base64 High Entropy String" 237 | }, 238 | { 239 | "hashed_secret": "afca0b380a66cc359f66a3e2b1c00103a9b5807d", 240 | "is_verified": false, 241 | "line_number": 280, 242 | "type": "Base64 High Entropy String" 243 | }, 244 | { 245 | "hashed_secret": "a253d63d3b9acc42033ef6a0c4c88441947e50bf", 246 | "is_verified": false, 247 | "line_number": 286, 248 | "type": "Base64 High Entropy String" 249 | }, 250 | { 251 | "hashed_secret": "c3d1f151e80ba4c24fd7ae58458b1b78d9b4faa6", 252 | "is_verified": false, 253 | "line_number": 331, 254 | "type": "Base64 High Entropy String" 255 | }, 256 | { 257 | "hashed_secret": "40c0d7cff7f41b94edece73bd1b66ba7a7653d9d", 258 | "is_verified": false, 259 | "line_number": 341, 260 | "type": "Base64 High Entropy String" 261 | }, 262 | { 263 | "hashed_secret": "d3d182b77d4b044a546ee1231307afa9cf604d52", 264 | "is_verified": false, 265 | "line_number": 358, 266 | "type": "Base64 High Entropy String" 267 | }, 268 | { 269 | "hashed_secret": "eacb7cf99e07afa59ed9b5c692adfadbee4a57fb", 270 | "is_verified": false, 271 | "line_number": 364, 272 | "type": "Base64 High Entropy String" 273 | }, 274 | { 275 | "hashed_secret": "552c5087eda818037986f5b04db4a3932caead2b", 276 | "is_verified": false, 277 | "line_number": 375, 278 | "type": "Base64 High Entropy String" 279 | }, 280 | { 281 | "hashed_secret": "d8863c346b87ab9e420048134928a830cdb44e4f", 282 | "is_verified": false, 283 | "line_number": 383, 284 | "type": "Base64 High Entropy String" 285 | }, 286 | { 287 | "hashed_secret": "a08d4841cd17ac8cd3a1f5ab5b7ef92e56582a3e", 288 | "is_verified": false, 289 | "line_number": 389, 290 | "type": "Base64 High Entropy String" 291 | }, 292 | { 293 | "hashed_secret": "51e9263c74493cd77c2d25adb255b15647d84512", 294 | "is_verified": false, 295 | "line_number": 406, 296 | "type": "Base64 High Entropy String" 297 | }, 298 | { 299 | "hashed_secret": "10ad0247b7201e9fd720608145c78b8a2bda0ab5", 300 | "is_verified": false, 301 | "line_number": 415, 302 | "type": "Base64 High Entropy String" 303 | }, 304 | { 305 | "hashed_secret": "8391286cc1bda9f5510e294af9545020bed4e1df", 306 | "is_verified": false, 307 | "line_number": 423, 308 | "type": "Base64 High Entropy String" 309 | }, 310 | { 311 | "hashed_secret": "2fcbf434b04ba0a49ae29f1d9ea0edc30881350b", 312 | "is_verified": false, 313 | "line_number": 429, 314 | "type": "Base64 High Entropy String" 315 | }, 316 | { 317 | "hashed_secret": "c0f64a532897bbd5497996fdfec7cfcd087540ce", 318 | "is_verified": false, 319 | "line_number": 435, 320 | "type": "Base64 High Entropy String" 321 | }, 322 | { 323 | "hashed_secret": "fea0d9c5b0c53c41e6a0a961a49cccc170847120", 324 | "is_verified": false, 325 | "line_number": 441, 326 | "type": "Base64 High Entropy String" 327 | }, 328 | { 329 | "hashed_secret": "774eca9b42e13c9b6fce631a97afd84e037b17b1", 330 | "is_verified": false, 331 | "line_number": 447, 332 | "type": "Base64 High Entropy String" 333 | }, 334 | { 335 | "hashed_secret": "2d45c3e13e24253c33144b90e9e7fd5ed3a77bd2", 336 | "is_verified": false, 337 | "line_number": 453, 338 | "type": "Base64 High Entropy String" 339 | }, 340 | { 341 | "hashed_secret": "1495d374905ac90b921e0d285840a7b813c9b4f9", 342 | "is_verified": false, 343 | "line_number": 462, 344 | "type": "Base64 High Entropy String" 345 | }, 346 | { 347 | "hashed_secret": "bd601e2e754aa35115f7394207da536d6ca8d2db", 348 | "is_verified": false, 349 | "line_number": 472, 350 | "type": "Base64 High Entropy String" 351 | }, 352 | { 353 | "hashed_secret": "12ffe31fe2cdc41991054eec09b2a8e1105b126e", 354 | "is_verified": false, 355 | "line_number": 478, 356 | "type": "Base64 High Entropy String" 357 | }, 358 | { 359 | "hashed_secret": "53d8a2faec0eb354e1c5ab249d552f5eb0755c4f", 360 | "is_verified": false, 361 | "line_number": 484, 362 | "type": "Base64 High Entropy String" 363 | }, 364 | { 365 | "hashed_secret": "8f337304c5ed78319bc8d7e60dd0f3753fdd0407", 366 | "is_verified": false, 367 | "line_number": 490, 368 | "type": "Base64 High Entropy String" 369 | }, 370 | { 371 | "hashed_secret": "371ca38e7acde2cb9e7c1a8bea0355722ff047cf", 372 | "is_verified": false, 373 | "line_number": 504, 374 | "type": "Base64 High Entropy String" 375 | }, 376 | { 377 | "hashed_secret": "274f942c9029551130cdb1aba79543d89962fbe7", 378 | "is_verified": false, 379 | "line_number": 513, 380 | "type": "Base64 High Entropy String" 381 | }, 382 | { 383 | "hashed_secret": "3de084e244b8cee5e85a65912415cc90ea37b393", 384 | "is_verified": false, 385 | "line_number": 522, 386 | "type": "Base64 High Entropy String" 387 | }, 388 | { 389 | "hashed_secret": "36f523e990aecdfe159dcaec524b2732b2197e07", 390 | "is_verified": false, 391 | "line_number": 528, 392 | "type": "Base64 High Entropy String" 393 | }, 394 | { 395 | "hashed_secret": "100e2d5dddf453d710f276f116d5e200cbde9a74", 396 | "is_verified": false, 397 | "line_number": 534, 398 | "type": "Base64 High Entropy String" 399 | }, 400 | { 401 | "hashed_secret": "cf09cb791688fe019284bfdc362abc41918645a5", 402 | "is_verified": false, 403 | "line_number": 560, 404 | "type": "Base64 High Entropy String" 405 | }, 406 | { 407 | "hashed_secret": "b7f8937f7821d03786e13293e0d0482c51adbf7a", 408 | "is_verified": false, 409 | "line_number": 566, 410 | "type": "Base64 High Entropy String" 411 | }, 412 | { 413 | "hashed_secret": "d5ec021cbb92c3486e38fac176f4f59a6c1d92e8", 414 | "is_verified": false, 415 | "line_number": 572, 416 | "type": "Base64 High Entropy String" 417 | }, 418 | { 419 | "hashed_secret": "36772b0a4de160ad3eec470df23d92655268e759", 420 | "is_verified": false, 421 | "line_number": 578, 422 | "type": "Base64 High Entropy String" 423 | }, 424 | { 425 | "hashed_secret": "f997f480531d137b87109bf8879332b9a7f27b12", 426 | "is_verified": false, 427 | "line_number": 587, 428 | "type": "Base64 High Entropy String" 429 | }, 430 | { 431 | "hashed_secret": "5856bd4cb3a23981807d6e537408344c13ffaada", 432 | "is_verified": false, 433 | "line_number": 593, 434 | "type": "Base64 High Entropy String" 435 | }, 436 | { 437 | "hashed_secret": "fcb18ac405c4e422fe4b38c25a8f936a08da7223", 438 | "is_verified": false, 439 | "line_number": 599, 440 | "type": "Base64 High Entropy String" 441 | }, 442 | { 443 | "hashed_secret": "127f92724797904fb4e6de2dfff2c71c07739612", 444 | "is_verified": false, 445 | "line_number": 609, 446 | "type": "Base64 High Entropy String" 447 | }, 448 | { 449 | "hashed_secret": "f218e51689a8e848585ec2f6a63fed545e42cdb4", 450 | "is_verified": false, 451 | "line_number": 615, 452 | "type": "Base64 High Entropy String" 453 | }, 454 | { 455 | "hashed_secret": "0ec89f07004a01dda54b29e48a86c84f8098fdb6", 456 | "is_verified": false, 457 | "line_number": 621, 458 | "type": "Base64 High Entropy String" 459 | }, 460 | { 461 | "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", 462 | "is_verified": false, 463 | "line_number": 631, 464 | "type": "Base64 High Entropy String" 465 | }, 466 | { 467 | "hashed_secret": "d7cdb9c755d36ef6bba01ff385c215194d07c523", 468 | "is_verified": false, 469 | "line_number": 637, 470 | "type": "Base64 High Entropy String" 471 | }, 472 | { 473 | "hashed_secret": "67619a42987e368dd3fe59582e4e13c12e7a563d", 474 | "is_verified": false, 475 | "line_number": 646, 476 | "type": "Base64 High Entropy String" 477 | }, 478 | { 479 | "hashed_secret": "ea7a290ccb099ca955bf01c7b1203e4930a1f79a", 480 | "is_verified": false, 481 | "line_number": 655, 482 | "type": "Base64 High Entropy String" 483 | }, 484 | { 485 | "hashed_secret": "0ecaac82c63ffbfd4c14b0820094472d2e523401", 486 | "is_verified": false, 487 | "line_number": 661, 488 | "type": "Base64 High Entropy String" 489 | }, 490 | { 491 | "hashed_secret": "acb6c89dc11795624fc92ec5d981d3a280fe6fce", 492 | "is_verified": false, 493 | "line_number": 676, 494 | "type": "Base64 High Entropy String" 495 | }, 496 | { 497 | "hashed_secret": "139323d71483de0860981200fb39673681ab2ef2", 498 | "is_verified": false, 499 | "line_number": 690, 500 | "type": "Base64 High Entropy String" 501 | }, 502 | { 503 | "hashed_secret": "891e23c02f7af11c912b47e84dba9ebc2a810571", 504 | "is_verified": false, 505 | "line_number": 699, 506 | "type": "Base64 High Entropy String" 507 | }, 508 | { 509 | "hashed_secret": "61ff5c8a702d8ffe28063c06487a3a5a0db2ed70", 510 | "is_verified": false, 511 | "line_number": 705, 512 | "type": "Base64 High Entropy String" 513 | }, 514 | { 515 | "hashed_secret": "740c72e48345f65f0deefba812581017bd57c614", 516 | "is_verified": false, 517 | "line_number": 711, 518 | "type": "Base64 High Entropy String" 519 | }, 520 | { 521 | "hashed_secret": "ec3990a473a2628b0b34fc88835f0a225667b960", 522 | "is_verified": false, 523 | "line_number": 717, 524 | "type": "Base64 High Entropy String" 525 | }, 526 | { 527 | "hashed_secret": "7a87fb248397359e9c6ca6e46f39805789059102", 528 | "is_verified": false, 529 | "line_number": 723, 530 | "type": "Base64 High Entropy String" 531 | }, 532 | { 533 | "hashed_secret": "5740dc5f7e5232e8df92b8861c8426d96d88611c", 534 | "is_verified": false, 535 | "line_number": 729, 536 | "type": "Base64 High Entropy String" 537 | }, 538 | { 539 | "hashed_secret": "5a485a4915f94231f9dc5b355ddda367442ba450", 540 | "is_verified": false, 541 | "line_number": 735, 542 | "type": "Base64 High Entropy String" 543 | }, 544 | { 545 | "hashed_secret": "41135cd24fce09855f67710faed09efce38cd9fb", 546 | "is_verified": false, 547 | "line_number": 741, 548 | "type": "Base64 High Entropy String" 549 | }, 550 | { 551 | "hashed_secret": "a138c5aefc47562990ad8f9024051d842662e91d", 552 | "is_verified": false, 553 | "line_number": 747, 554 | "type": "Base64 High Entropy String" 555 | }, 556 | { 557 | "hashed_secret": "549c6a72a5d6d8822b1175c0205f7800f5a75e1d", 558 | "is_verified": false, 559 | "line_number": 756, 560 | "type": "Base64 High Entropy String" 561 | }, 562 | { 563 | "hashed_secret": "3b7990b0f82bc9bc6c4ec02744f9c02e76aac827", 564 | "is_verified": false, 565 | "line_number": 765, 566 | "type": "Base64 High Entropy String" 567 | }, 568 | { 569 | "hashed_secret": "9072ebcbad57a1ae4256c5ae37c1e7c7ae5325de", 570 | "is_verified": false, 571 | "line_number": 774, 572 | "type": "Base64 High Entropy String" 573 | }, 574 | { 575 | "hashed_secret": "2017c9b1f1c38285db67283950ac55e5efb09858", 576 | "is_verified": false, 577 | "line_number": 780, 578 | "type": "Base64 High Entropy String" 579 | }, 580 | { 581 | "hashed_secret": "d15b82853a984c45aab87399878c98082bac85ba", 582 | "is_verified": false, 583 | "line_number": 791, 584 | "type": "Base64 High Entropy String" 585 | }, 586 | { 587 | "hashed_secret": "4ed49acfbd180533265d2099f3043963c4a24443", 588 | "is_verified": false, 589 | "line_number": 800, 590 | "type": "Base64 High Entropy String" 591 | }, 592 | { 593 | "hashed_secret": "428e72f71455a785963f1df5f85c26c6d6ae519b", 594 | "is_verified": false, 595 | "line_number": 809, 596 | "type": "Base64 High Entropy String" 597 | }, 598 | { 599 | "hashed_secret": "4e6c84d648b89cf86ee5343472e2399ccf3b0446", 600 | "is_verified": false, 601 | "line_number": 817, 602 | "type": "Base64 High Entropy String" 603 | }, 604 | { 605 | "hashed_secret": "22f9d6d0cf5e403b68a49fc985b3f35076560ea7", 606 | "is_verified": false, 607 | "line_number": 823, 608 | "type": "Base64 High Entropy String" 609 | }, 610 | { 611 | "hashed_secret": "622a3612bd67f8e5a4483d7ddb99a3eb02668ef5", 612 | "is_verified": false, 613 | "line_number": 834, 614 | "type": "Base64 High Entropy String" 615 | }, 616 | { 617 | "hashed_secret": "3e536139add3f70bf34712d57cf490dc9c631553", 618 | "is_verified": false, 619 | "line_number": 843, 620 | "type": "Base64 High Entropy String" 621 | }, 622 | { 623 | "hashed_secret": "8368db68b0ace9e898989e5e645c5e1eddf99b11", 624 | "is_verified": false, 625 | "line_number": 849, 626 | "type": "Base64 High Entropy String" 627 | }, 628 | { 629 | "hashed_secret": "ae3b82557a7574cb2945d06c4b88bca535f5c5ff", 630 | "is_verified": false, 631 | "line_number": 858, 632 | "type": "Base64 High Entropy String" 633 | }, 634 | { 635 | "hashed_secret": "81ae62f2223b9fe40c0d0e74875892ab6cd5e83c", 636 | "is_verified": false, 637 | "line_number": 870, 638 | "type": "Base64 High Entropy String" 639 | }, 640 | { 641 | "hashed_secret": "e7c4ee17ab711302f98d915dd0b266cd2d1063e5", 642 | "is_verified": false, 643 | "line_number": 882, 644 | "type": "Base64 High Entropy String" 645 | }, 646 | { 647 | "hashed_secret": "fd98c7e5160ef8d495751b2876d32a99661265d5", 648 | "is_verified": false, 649 | "line_number": 890, 650 | "type": "Base64 High Entropy String" 651 | }, 652 | { 653 | "hashed_secret": "a09ac4be578a7b177bc5f823f8624e32f10751fb", 654 | "is_verified": false, 655 | "line_number": 896, 656 | "type": "Base64 High Entropy String" 657 | }, 658 | { 659 | "hashed_secret": "96b299789c89faf2ca7467ca7e034c969bed15f6", 660 | "is_verified": false, 661 | "line_number": 905, 662 | "type": "Base64 High Entropy String" 663 | }, 664 | { 665 | "hashed_secret": "390113d1af6b5ece9a90ea6db402a0008b2a9d08", 666 | "is_verified": false, 667 | "line_number": 911, 668 | "type": "Base64 High Entropy String" 669 | }, 670 | { 671 | "hashed_secret": "5e5c5d3d017fdf63a15b2e6abd71fee9250ed40d", 672 | "is_verified": false, 673 | "line_number": 920, 674 | "type": "Base64 High Entropy String" 675 | }, 676 | { 677 | "hashed_secret": "f0257d02d23f53e799476af74cf0386375d4b6c2", 678 | "is_verified": false, 679 | "line_number": 926, 680 | "type": "Base64 High Entropy String" 681 | }, 682 | { 683 | "hashed_secret": "6a06fcab75226b02d9c0b37db785b301712291b2", 684 | "is_verified": false, 685 | "line_number": 935, 686 | "type": "Base64 High Entropy String" 687 | }, 688 | { 689 | "hashed_secret": "eca5fc6e4f5f895143d3fcedefc42dfe6e79f918", 690 | "is_verified": false, 691 | "line_number": 941, 692 | "type": "Base64 High Entropy String" 693 | }, 694 | { 695 | "hashed_secret": "03edbe2e35e4244d03ec1f59ae4b9e86cb180365", 696 | "is_verified": false, 697 | "line_number": 947, 698 | "type": "Base64 High Entropy String" 699 | } 700 | ] 701 | }, 702 | "version": "0.13.1", 703 | "word_list": { 704 | "file": null, 705 | "hash": null 706 | } 707 | } 708 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Government Digital Service 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /usr/bin/env bash 2 | DEFAULT_GOAL := test 3 | PHONY = clean 4 | ONESHELL: 5 | 6 | build-lambda: 7 | mkdir -p .build/ 8 | mkdir -p .target/scripts/ 9 | cp ./*.py .target/ 10 | cp ./scripts/*.sh .target/scripts/ 11 | cd .target/ && zip -FSqr ../.build/lambda.zip . 12 | 13 | build-dependencies: 14 | python3.8 -m pip install -r requirements.txt -t .target/ --upgrade 15 | 16 | build-full: build-dependencies build build-client 17 | 18 | clean: 19 | rm -rf .build 20 | rm -rf .target 21 | 22 | venv: 23 | python3.8 -m venv env; 24 | chmod +x ./env/bin/activate; 25 | 26 | install-dev-dependencies: 27 | source ./env/bin/activate && python3.8 -m pip install -r requirements.txt -r requirements-dev.txt --upgrade 28 | 29 | test-python-full: venv install-dev-dependencies test-python 30 | 31 | test-python: venv 32 | source ./env/bin/activate && python3.8 -m doctest -f *.py; 33 | 34 | test-scripts: 35 | shellcheck scripts/*.sh 36 | 37 | build-client: 38 | cd ./client && npm run all 39 | 40 | test-client: 41 | cd ./client && npm run test 42 | 43 | test-client-full: 44 | cd ./client && npm install && npm run test 45 | 46 | test: test-python test-scripts test-client 47 | 48 | test-full: test-python-full test-scripts test-client-full 49 | 50 | build: build-lambda build-client 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # github-actions-runner-orchestration (GARO) 2 | ![Experimental](https://img.shields.io/badge/Status-Experimental-orange.svg) [![Test and deploy to staging and production](https://github.com/alphagov/github-actions-runner-orchestration/actions/workflows/deploy.yml/badge.svg?branch=main)](https://github.com/alphagov/github-actions-runner-orchestration/actions/workflows/deploy.yml) [![Create AMI](https://github.com/alphagov/github-actions-runner-orchestration/actions/workflows/create-ami.yml/badge.svg?branch=main)](https://github.com/alphagov/github-actions-runner-orchestration/actions/workflows/create-ami.yml) 3 | 4 | ## What is it? 5 | GARO is an experimental serverless (AWS Lambda) API for running GitHub Action 6 | runners in self-hosted, ephemeral EC2 instances. 7 | 8 | ## How to use? 9 | 10 | The [garo client] will start up an instance and wait for it to be active. 11 | See the [client README](client/README.md) for details and an example. 12 | 13 | There are also the [workflows here] which use this tool. 14 | 15 | ## Requirements for [garo client] (or direct API use) 16 | - subnets with external internet access (recommend via a NAT gateway) 17 | - security group for the runner instances 18 | - 2x IAM roles (see [terraform_module](terraform_module/) for these) 19 | 1. role for assuming from API (with random [external ID] added as a condition) 20 | 2. role for instances to use (allow assume from the first role) 21 | - PAT in SSM (`/github/runner/pat`) with repo write access (for adding runners to a repo) 22 | - Params in GitHub secrets / environment variables (recommend using [GitHub Environments](https://docs.github.com/en/actions/reference/environments) with branch protections) 23 | 24 | ## Development 25 | See the [development documentation]. 26 | 27 | [development documentation]: docs/development.md 28 | [workflows here]: .github/workflows/ 29 | [external ID]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html 30 | [garo client]: client/ 31 | -------------------------------------------------------------------------------- /client/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "commonjs": true, 4 | "es6": true, 5 | "jest": false, 6 | "node": true 7 | }, 8 | "extends": "eslint:recommended", 9 | "globals": { 10 | "Atomics": "readonly", 11 | "SharedArrayBuffer": "readonly" 12 | }, 13 | "parserOptions": { 14 | "ecmaVersion": 2018 15 | }, 16 | "rules": { 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /client/.nvmrc: -------------------------------------------------------------------------------- 1 | 12 2 | -------------------------------------------------------------------------------- /client/README.md: -------------------------------------------------------------------------------- 1 | # GARO Action 2 | 3 | This action spins up and ephemerial runner in your own AWS environment. 4 | 5 | ## Example usage 6 | 7 | ``` yml 8 | steps: 9 | - name: Get runner 10 | uses: alphagov/github-actions-runner-orchestration/client@main 11 | id: garoclient 12 | with: 13 | ACTION: 'start' 14 | GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' 15 | RUNNER_TYPE: 'ondemand' 16 | REPO: '${{ github.repository }}' 17 | GITHUB_COMMIT: '${{ github.sha }}' 18 | RUNNER_SUBNET: '${{ secrets.RUNNER_SUBNET }}' 19 | RUNNER_SG: '${{ secrets.RUNNER_SG }}' 20 | RUNNER_ACID: '${{ secrets.RUNNER_ACID }}' 21 | RUNNER_EXID: '${{ secrets.RUNNER_EXID }}' 22 | ``` 23 | 24 | ## Inputs 25 | 26 | #### -- Required -- 27 | 28 | ### `GITHUB_TOKEN` 29 | 30 | **Required** The token from the workflow run. 31 | 32 | For most cases, set to: `'${{ secrets.GITHUB_TOKEN }}'` 33 | 34 | ### `REPO` 35 | 36 | **Required** The repo name (including organisation), for example `alphagov/github-actions-runner-orchestration`. 37 | 38 | For most cases, set to `'${{ github.repository }}'` 39 | 40 | ### `GITHUB_COMMIT` 41 | 42 | **Required** The commit SHA from the workflow run. 43 | 44 | For most cases, set to: `'${{ github.sha }}'` 45 | 46 | ### `RUNNER_ACID` 47 | 48 | **Required** The AWS account ID where to assume the `GitHubRunnerAssumeRole` role. 49 | 50 | ### `RUNNER_EXID` 51 | 52 | **Required** The AWS external ID that's set as a condition in the `GitHubRunnerAssumeRole` role. 53 | 54 | See here [for more information about external IDs](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html). 55 | 56 | #### -- Optionals -- 57 | 58 | ### `RUNNER_SUBNET` 59 | 60 | _optional_ The AWS subnet to start the runner in - must be set with `ACTION: 'start'`. 61 | 62 | ### `RUNNER_SG` 63 | 64 | _optional_ The AWS security group to assign to the runner - must be set with `ACTION: 'start'`. 65 | 66 | ### `RUNNER_TYPE` 67 | 68 | _optional_ The type of instance for runner `spot | ondemand`. 69 | 70 | ### `RUNNER_LABEL` 71 | 72 | _optional_ CSV of additional labels for the runner, for example `firstjob,123` 73 | 74 | Can be useful for specifying particular classes of runner. 75 | 76 | ### `RUNNER_NAME` 77 | 78 | _optional_ An existing runners name - must only be set with `ACTION: 'state'`. 79 | 80 | ### `RUNNER_TIMEOUT` 81 | 82 | _optional_ How long the runner idles for in seconds. 83 | 84 | Default: `3600` (1 hour) 85 | 86 | ### `RUNNER_REGION` 87 | 88 | _optional_ The AWS region name, for example `eu-west-2`. 89 | 90 | Default: `eu-west-2` 91 | 92 | ### `GARO_URL` 93 | 94 | _optional_ The API url. 95 | 96 | Default: https://prod.co-cdio-garo.digital 97 | 98 | ### `ACTION` 99 | 100 | _optional_ `start | state`. 101 | 102 | Default: `start` 103 | 104 | ### `WAIT_FOR_START` 105 | 106 | _optional_ Whether to wait for the runner to start `yes | no`. 107 | 108 | Default: `yes` 109 | 110 | 111 | ## Outputs 112 | 113 | ### `name` 114 | 115 | The full runner name. 116 | 117 | ### `runnerstate` 118 | 119 | The runner's state. 120 | 121 | ### `uniqueid` 122 | 123 | The runner's unique ID that's randomly generated when created. 124 | -------------------------------------------------------------------------------- /client/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Get Runner' 2 | description: 'Get the GitHub Action runner' 3 | inputs: 4 | GITHUB_TOKEN: 5 | description: 'GitHub Token (do not set in settings)' 6 | required: true 7 | REPO: 8 | description: 'In format: org/repo' 9 | required: true 10 | GITHUB_COMMIT: 11 | description: 'Hash of the latest commit' 12 | required: true 13 | RUNNER_ACID: 14 | description: 'AWS account ID where to deploy (requires roles to be in place)' 15 | required: true 16 | RUNNER_EXID: 17 | description: 'External ID on the AWS IAM role' 18 | required: true 19 | 20 | GARO_URL: 21 | description: 'URL for the GARO API' 22 | required: false 23 | default: 'https://prod.co-cdio-garo.digital' 24 | ACTION: 25 | description: 'Action: start | state' 26 | required: false 27 | default: 'start' 28 | WAIT_FOR_START: 29 | description: 'Whether to wait: yes | no' 30 | required: false 31 | default: 'yes' 32 | RUNNER_SUBNET: 33 | description: 'Subnet where to start the runner' 34 | required: false 35 | RUNNER_SG: 36 | description: 'Security group to give the runner' 37 | required: false 38 | RUNNER_TYPE: 39 | description: 'Type of runner: spot | ondemand' 40 | required: false 41 | default: 'spot' 42 | RUNNER_REGION: 43 | description: 'AWS region name for the runner' 44 | required: false 45 | default: 'eu-west-2' 46 | RUNNER_TIMEOUT: 47 | description: 'Timeout of the runner' 48 | required: false 49 | default: '3600' 50 | RUNNER_NAME: 51 | description: 'Name of the runner' 52 | required: false 53 | default: '' 54 | RUNNER_LABEL: 55 | description: 'Additional label for the runner' 56 | required: false 57 | default: '' 58 | outputs: 59 | name: 60 | description: 'Runner name' 61 | runnerstate: 62 | description: 'Runner state' 63 | uniqueid: 64 | description: 'Runner unique ID' 65 | runs: 66 | using: 'node12' 67 | main: 'dist/index.js' 68 | -------------------------------------------------------------------------------- /client/dist/index.js: -------------------------------------------------------------------------------- 1 | require('./sourcemap-register.js');module.exports = 2 | /******/ (() => { // webpackBootstrap 3 | /******/ var __webpack_modules__ = ({ 4 | 5 | /***/ 932: 6 | /***/ ((__unused_webpack_module, __unused_webpack_exports, __nccwpck_require__) => { 7 | 8 | const core = __nccwpck_require__(186); 9 | const wait = __nccwpck_require__(258); 10 | const crypto = __nccwpck_require__(417) 11 | 12 | 13 | const getItem = (itemName, defaultStr = null) => { 14 | const res = core.getInput(itemName); 15 | if (res.length == 0) { 16 | if (defaultStr == null) { 17 | throw `${itemName} not set`; 18 | } 19 | return defaultStr; 20 | } 21 | return res; 22 | } 23 | 24 | 25 | function make_api_request(action, garo_url, github_token, github_commit, postObj, dryrun=false) { 26 | const https = __nccwpck_require__(211); 27 | const api_uri = new URL(garo_url); 28 | 29 | const current_time = Math.floor(new Date().getTime() / 1000).toString(); 30 | postObj.time = current_time; 31 | 32 | postObj.dryrun = dryrun; 33 | 34 | if (action == "start") 35 | { 36 | console.log("Sending start action to API"); 37 | } 38 | else if (action == "state") 39 | { 40 | if ("name" in postObj) { 41 | console.log("Sending state action to API"); 42 | } else { 43 | throw "name missing"; 44 | } 45 | } 46 | else 47 | { 48 | return false; 49 | } 50 | 51 | const data = JSON.stringify(postObj); 52 | const signature = crypto.createHmac('sha512', github_token).update(data).digest('hex'); 53 | const options = { 54 | hostname: api_uri.hostname, 55 | port: 443, 56 | path: `/${action}`, 57 | method: 'POST', 58 | headers: { 59 | 'X-GitHub-Token': github_token, 60 | 'X-GitHub-Signature': signature, 61 | 'X-GitHub-CommitSHA': github_commit, 62 | 'Content-Type': 'application/json', 63 | 'Content-Length': data.length 64 | } 65 | } 66 | 67 | return new Promise((resolve, reject) => { 68 | const req = https.request(options, res => { 69 | console.log(`statusCode: ${res.statusCode}`) 70 | if (res.statusCode != 200) { 71 | resolve({"runnerstate": "Non-200"}); 72 | } 73 | 74 | res.on('data', d => { 75 | const data_resp = d.toString() 76 | if (data_resp != "error") { 77 | resolve(JSON.parse(data_resp)); 78 | } else { 79 | reject("error response"); 80 | } 81 | }) 82 | }) 83 | 84 | req.on('error', error => { 85 | reject(error); 86 | }); 87 | 88 | req.write(data); 89 | req.end(); 90 | }); 91 | } 92 | 93 | 94 | async function run() { 95 | try { 96 | const wait_for_start = getItem('WAIT_FOR_START', "true"); 97 | const action = getItem('ACTION', "start"); 98 | const garo_url = getItem('GARO_URL'); 99 | const github_token = getItem('GITHUB_TOKEN'); 100 | const github_commit = getItem('GITHUB_COMMIT'); 101 | const dryrun = (getItem('DRYRUN', 'false') == 'true'); 102 | 103 | let postObj = { 104 | repo: getItem('REPO'), 105 | account_id: getItem('RUNNER_ACID'), 106 | external_id: getItem('RUNNER_EXID'), 107 | type: getItem('RUNNER_TYPE', "spot"), 108 | region: getItem('RUNNER_REGION', "eu-west-2"), 109 | timeout: getItem('RUNNER_TIMEOUT', "3600"), 110 | } 111 | 112 | const rLabel = getItem('RUNNER_LABEL', ""); 113 | if (rLabel != "") { 114 | postObj["label"] = rLabel; 115 | } 116 | 117 | const rName = getItem('RUNNER_NAME', ""); 118 | if (rName != "") { 119 | postObj["name"] = rName; 120 | } 121 | 122 | const rSub = getItem('RUNNER_SUBNET', ""); 123 | if (rSub != "") { 124 | postObj["subnet"] = rSub; 125 | } 126 | 127 | const rSg = getItem('RUNNER_SG', ""); 128 | if (rSg != "") { 129 | postObj["sg"] = rSg; 130 | } 131 | 132 | if (action == "start") { 133 | const result = await make_api_request( 134 | "start", 135 | garo_url, 136 | github_token, 137 | github_commit, 138 | postObj, 139 | dryrun 140 | ) 141 | 142 | if (result["runnerstate"] == "Non-200") { 143 | throw 'Could not start the runner'; 144 | } 145 | 146 | console.log("wait_for_start:", wait_for_start); 147 | 148 | if (result["runnerstate"] == "started") { 149 | console.log("Runner already started:", result); 150 | 151 | core.setOutput("name", result["name"]); 152 | core.setOutput("runnerstate", result["runnerstate"]); 153 | core.setOutput("uniqueid", result["uniqueid"]); 154 | } 155 | 156 | if (result["runnerstate"] == "starting" && wait_for_start) { 157 | console.log("Runner starting:", result); 158 | postObj["name"] = result["name"]; 159 | 160 | 161 | var state_result = {}; 162 | let i = 0; 163 | while (i < 20) { 164 | i++; 165 | console.log(`Starting wait: ${i}`) 166 | await wait(15000); 167 | 168 | state_result = await make_api_request( 169 | "state", 170 | garo_url, 171 | github_token, 172 | github_commit, 173 | postObj, 174 | dryrun 175 | ); 176 | 177 | console.log(state_result); 178 | 179 | if (state_result["runnerstate"] == "started") { 180 | core.setOutput("name", result["name"]); 181 | core.setOutput("runnerstate", result["runnerstate"]); 182 | core.setOutput("uniqueid", result["uniqueid"]); 183 | break; 184 | } 185 | } 186 | 187 | if (state_result["runnerstate"] != "started") { 188 | throw 'Runner not started in time'; 189 | } 190 | } 191 | } 192 | 193 | } 194 | catch (error) 195 | { 196 | if (typeof(error) == "object" && "message" in error) 197 | { 198 | core.setFailed(error.message); 199 | } 200 | else 201 | { 202 | core.setFailed(error); 203 | } 204 | } 205 | } 206 | 207 | run(); 208 | 209 | 210 | /***/ }), 211 | 212 | /***/ 351: 213 | /***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { 214 | 215 | "use strict"; 216 | 217 | var __importStar = (this && this.__importStar) || function (mod) { 218 | if (mod && mod.__esModule) return mod; 219 | var result = {}; 220 | if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; 221 | result["default"] = mod; 222 | return result; 223 | }; 224 | Object.defineProperty(exports, "__esModule", ({ value: true })); 225 | const os = __importStar(__nccwpck_require__(87)); 226 | const utils_1 = __nccwpck_require__(278); 227 | /** 228 | * Commands 229 | * 230 | * Command Format: 231 | * ::name key=value,key=value::message 232 | * 233 | * Examples: 234 | * ::warning::This is the message 235 | * ::set-env name=MY_VAR::some value 236 | */ 237 | function issueCommand(command, properties, message) { 238 | const cmd = new Command(command, properties, message); 239 | process.stdout.write(cmd.toString() + os.EOL); 240 | } 241 | exports.issueCommand = issueCommand; 242 | function issue(name, message = '') { 243 | issueCommand(name, {}, message); 244 | } 245 | exports.issue = issue; 246 | const CMD_STRING = '::'; 247 | class Command { 248 | constructor(command, properties, message) { 249 | if (!command) { 250 | command = 'missing.command'; 251 | } 252 | this.command = command; 253 | this.properties = properties; 254 | this.message = message; 255 | } 256 | toString() { 257 | let cmdStr = CMD_STRING + this.command; 258 | if (this.properties && Object.keys(this.properties).length > 0) { 259 | cmdStr += ' '; 260 | let first = true; 261 | for (const key in this.properties) { 262 | if (this.properties.hasOwnProperty(key)) { 263 | const val = this.properties[key]; 264 | if (val) { 265 | if (first) { 266 | first = false; 267 | } 268 | else { 269 | cmdStr += ','; 270 | } 271 | cmdStr += `${key}=${escapeProperty(val)}`; 272 | } 273 | } 274 | } 275 | } 276 | cmdStr += `${CMD_STRING}${escapeData(this.message)}`; 277 | return cmdStr; 278 | } 279 | } 280 | function escapeData(s) { 281 | return utils_1.toCommandValue(s) 282 | .replace(/%/g, '%25') 283 | .replace(/\r/g, '%0D') 284 | .replace(/\n/g, '%0A'); 285 | } 286 | function escapeProperty(s) { 287 | return utils_1.toCommandValue(s) 288 | .replace(/%/g, '%25') 289 | .replace(/\r/g, '%0D') 290 | .replace(/\n/g, '%0A') 291 | .replace(/:/g, '%3A') 292 | .replace(/,/g, '%2C'); 293 | } 294 | //# sourceMappingURL=command.js.map 295 | 296 | /***/ }), 297 | 298 | /***/ 186: 299 | /***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { 300 | 301 | "use strict"; 302 | 303 | var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { 304 | function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } 305 | return new (P || (P = Promise))(function (resolve, reject) { 306 | function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } 307 | function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } 308 | function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } 309 | step((generator = generator.apply(thisArg, _arguments || [])).next()); 310 | }); 311 | }; 312 | var __importStar = (this && this.__importStar) || function (mod) { 313 | if (mod && mod.__esModule) return mod; 314 | var result = {}; 315 | if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; 316 | result["default"] = mod; 317 | return result; 318 | }; 319 | Object.defineProperty(exports, "__esModule", ({ value: true })); 320 | const command_1 = __nccwpck_require__(351); 321 | const file_command_1 = __nccwpck_require__(717); 322 | const utils_1 = __nccwpck_require__(278); 323 | const os = __importStar(__nccwpck_require__(87)); 324 | const path = __importStar(__nccwpck_require__(622)); 325 | /** 326 | * The code to exit an action 327 | */ 328 | var ExitCode; 329 | (function (ExitCode) { 330 | /** 331 | * A code indicating that the action was successful 332 | */ 333 | ExitCode[ExitCode["Success"] = 0] = "Success"; 334 | /** 335 | * A code indicating that the action was a failure 336 | */ 337 | ExitCode[ExitCode["Failure"] = 1] = "Failure"; 338 | })(ExitCode = exports.ExitCode || (exports.ExitCode = {})); 339 | //----------------------------------------------------------------------- 340 | // Variables 341 | //----------------------------------------------------------------------- 342 | /** 343 | * Sets env variable for this action and future actions in the job 344 | * @param name the name of the variable to set 345 | * @param val the value of the variable. Non-string values will be converted to a string via JSON.stringify 346 | */ 347 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 348 | function exportVariable(name, val) { 349 | const convertedVal = utils_1.toCommandValue(val); 350 | process.env[name] = convertedVal; 351 | const filePath = process.env['GITHUB_ENV'] || ''; 352 | if (filePath) { 353 | const delimiter = '_GitHubActionsFileCommandDelimeter_'; 354 | const commandValue = `${name}<<${delimiter}${os.EOL}${convertedVal}${os.EOL}${delimiter}`; 355 | file_command_1.issueCommand('ENV', commandValue); 356 | } 357 | else { 358 | command_1.issueCommand('set-env', { name }, convertedVal); 359 | } 360 | } 361 | exports.exportVariable = exportVariable; 362 | /** 363 | * Registers a secret which will get masked from logs 364 | * @param secret value of the secret 365 | */ 366 | function setSecret(secret) { 367 | command_1.issueCommand('add-mask', {}, secret); 368 | } 369 | exports.setSecret = setSecret; 370 | /** 371 | * Prepends inputPath to the PATH (for this action and future actions) 372 | * @param inputPath 373 | */ 374 | function addPath(inputPath) { 375 | const filePath = process.env['GITHUB_PATH'] || ''; 376 | if (filePath) { 377 | file_command_1.issueCommand('PATH', inputPath); 378 | } 379 | else { 380 | command_1.issueCommand('add-path', {}, inputPath); 381 | } 382 | process.env['PATH'] = `${inputPath}${path.delimiter}${process.env['PATH']}`; 383 | } 384 | exports.addPath = addPath; 385 | /** 386 | * Gets the value of an input. The value is also trimmed. 387 | * 388 | * @param name name of the input to get 389 | * @param options optional. See InputOptions. 390 | * @returns string 391 | */ 392 | function getInput(name, options) { 393 | const val = process.env[`INPUT_${name.replace(/ /g, '_').toUpperCase()}`] || ''; 394 | if (options && options.required && !val) { 395 | throw new Error(`Input required and not supplied: ${name}`); 396 | } 397 | return val.trim(); 398 | } 399 | exports.getInput = getInput; 400 | /** 401 | * Sets the value of an output. 402 | * 403 | * @param name name of the output to set 404 | * @param value value to store. Non-string values will be converted to a string via JSON.stringify 405 | */ 406 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 407 | function setOutput(name, value) { 408 | command_1.issueCommand('set-output', { name }, value); 409 | } 410 | exports.setOutput = setOutput; 411 | /** 412 | * Enables or disables the echoing of commands into stdout for the rest of the step. 413 | * Echoing is disabled by default if ACTIONS_STEP_DEBUG is not set. 414 | * 415 | */ 416 | function setCommandEcho(enabled) { 417 | command_1.issue('echo', enabled ? 'on' : 'off'); 418 | } 419 | exports.setCommandEcho = setCommandEcho; 420 | //----------------------------------------------------------------------- 421 | // Results 422 | //----------------------------------------------------------------------- 423 | /** 424 | * Sets the action status to failed. 425 | * When the action exits it will be with an exit code of 1 426 | * @param message add error issue message 427 | */ 428 | function setFailed(message) { 429 | process.exitCode = ExitCode.Failure; 430 | error(message); 431 | } 432 | exports.setFailed = setFailed; 433 | //----------------------------------------------------------------------- 434 | // Logging Commands 435 | //----------------------------------------------------------------------- 436 | /** 437 | * Gets whether Actions Step Debug is on or not 438 | */ 439 | function isDebug() { 440 | return process.env['RUNNER_DEBUG'] === '1'; 441 | } 442 | exports.isDebug = isDebug; 443 | /** 444 | * Writes debug message to user log 445 | * @param message debug message 446 | */ 447 | function debug(message) { 448 | command_1.issueCommand('debug', {}, message); 449 | } 450 | exports.debug = debug; 451 | /** 452 | * Adds an error issue 453 | * @param message error issue message. Errors will be converted to string via toString() 454 | */ 455 | function error(message) { 456 | command_1.issue('error', message instanceof Error ? message.toString() : message); 457 | } 458 | exports.error = error; 459 | /** 460 | * Adds an warning issue 461 | * @param message warning issue message. Errors will be converted to string via toString() 462 | */ 463 | function warning(message) { 464 | command_1.issue('warning', message instanceof Error ? message.toString() : message); 465 | } 466 | exports.warning = warning; 467 | /** 468 | * Writes info to log with console.log. 469 | * @param message info message 470 | */ 471 | function info(message) { 472 | process.stdout.write(message + os.EOL); 473 | } 474 | exports.info = info; 475 | /** 476 | * Begin an output group. 477 | * 478 | * Output until the next `groupEnd` will be foldable in this group 479 | * 480 | * @param name The name of the output group 481 | */ 482 | function startGroup(name) { 483 | command_1.issue('group', name); 484 | } 485 | exports.startGroup = startGroup; 486 | /** 487 | * End an output group. 488 | */ 489 | function endGroup() { 490 | command_1.issue('endgroup'); 491 | } 492 | exports.endGroup = endGroup; 493 | /** 494 | * Wrap an asynchronous function call in a group. 495 | * 496 | * Returns the same type as the function itself. 497 | * 498 | * @param name The name of the group 499 | * @param fn The function to wrap in the group 500 | */ 501 | function group(name, fn) { 502 | return __awaiter(this, void 0, void 0, function* () { 503 | startGroup(name); 504 | let result; 505 | try { 506 | result = yield fn(); 507 | } 508 | finally { 509 | endGroup(); 510 | } 511 | return result; 512 | }); 513 | } 514 | exports.group = group; 515 | //----------------------------------------------------------------------- 516 | // Wrapper action state 517 | //----------------------------------------------------------------------- 518 | /** 519 | * Saves state for current action, the state can only be retrieved by this action's post job execution. 520 | * 521 | * @param name name of the state to store 522 | * @param value value to store. Non-string values will be converted to a string via JSON.stringify 523 | */ 524 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 525 | function saveState(name, value) { 526 | command_1.issueCommand('save-state', { name }, value); 527 | } 528 | exports.saveState = saveState; 529 | /** 530 | * Gets the value of an state set by this action's main execution. 531 | * 532 | * @param name name of the state to get 533 | * @returns string 534 | */ 535 | function getState(name) { 536 | return process.env[`STATE_${name}`] || ''; 537 | } 538 | exports.getState = getState; 539 | //# sourceMappingURL=core.js.map 540 | 541 | /***/ }), 542 | 543 | /***/ 717: 544 | /***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { 545 | 546 | "use strict"; 547 | 548 | // For internal use, subject to change. 549 | var __importStar = (this && this.__importStar) || function (mod) { 550 | if (mod && mod.__esModule) return mod; 551 | var result = {}; 552 | if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; 553 | result["default"] = mod; 554 | return result; 555 | }; 556 | Object.defineProperty(exports, "__esModule", ({ value: true })); 557 | // We use any as a valid input type 558 | /* eslint-disable @typescript-eslint/no-explicit-any */ 559 | const fs = __importStar(__nccwpck_require__(747)); 560 | const os = __importStar(__nccwpck_require__(87)); 561 | const utils_1 = __nccwpck_require__(278); 562 | function issueCommand(command, message) { 563 | const filePath = process.env[`GITHUB_${command}`]; 564 | if (!filePath) { 565 | throw new Error(`Unable to find environment variable for file command ${command}`); 566 | } 567 | if (!fs.existsSync(filePath)) { 568 | throw new Error(`Missing file at path: ${filePath}`); 569 | } 570 | fs.appendFileSync(filePath, `${utils_1.toCommandValue(message)}${os.EOL}`, { 571 | encoding: 'utf8' 572 | }); 573 | } 574 | exports.issueCommand = issueCommand; 575 | //# sourceMappingURL=file-command.js.map 576 | 577 | /***/ }), 578 | 579 | /***/ 278: 580 | /***/ ((__unused_webpack_module, exports) => { 581 | 582 | "use strict"; 583 | 584 | // We use any as a valid input type 585 | /* eslint-disable @typescript-eslint/no-explicit-any */ 586 | Object.defineProperty(exports, "__esModule", ({ value: true })); 587 | /** 588 | * Sanitizes an input into a string so it can be passed into issueCommand safely 589 | * @param input input to sanitize into a string 590 | */ 591 | function toCommandValue(input) { 592 | if (input === null || input === undefined) { 593 | return ''; 594 | } 595 | else if (typeof input === 'string' || input instanceof String) { 596 | return input; 597 | } 598 | return JSON.stringify(input); 599 | } 600 | exports.toCommandValue = toCommandValue; 601 | //# sourceMappingURL=utils.js.map 602 | 603 | /***/ }), 604 | 605 | /***/ 258: 606 | /***/ ((module) => { 607 | 608 | let wait = function (milliseconds) { 609 | return new Promise((resolve) => { 610 | if (typeof milliseconds !== 'number') { 611 | throw new Error('milliseconds not a number'); 612 | } 613 | setTimeout(() => resolve("done!"), milliseconds) 614 | }); 615 | }; 616 | 617 | module.exports = wait; 618 | 619 | 620 | /***/ }), 621 | 622 | /***/ 417: 623 | /***/ ((module) => { 624 | 625 | "use strict"; 626 | module.exports = require("crypto");; 627 | 628 | /***/ }), 629 | 630 | /***/ 747: 631 | /***/ ((module) => { 632 | 633 | "use strict"; 634 | module.exports = require("fs");; 635 | 636 | /***/ }), 637 | 638 | /***/ 211: 639 | /***/ ((module) => { 640 | 641 | "use strict"; 642 | module.exports = require("https");; 643 | 644 | /***/ }), 645 | 646 | /***/ 87: 647 | /***/ ((module) => { 648 | 649 | "use strict"; 650 | module.exports = require("os");; 651 | 652 | /***/ }), 653 | 654 | /***/ 622: 655 | /***/ ((module) => { 656 | 657 | "use strict"; 658 | module.exports = require("path");; 659 | 660 | /***/ }) 661 | 662 | /******/ }); 663 | /************************************************************************/ 664 | /******/ // The module cache 665 | /******/ var __webpack_module_cache__ = {}; 666 | /******/ 667 | /******/ // The require function 668 | /******/ function __nccwpck_require__(moduleId) { 669 | /******/ // Check if module is in cache 670 | /******/ if(__webpack_module_cache__[moduleId]) { 671 | /******/ return __webpack_module_cache__[moduleId].exports; 672 | /******/ } 673 | /******/ // Create a new module (and put it into the cache) 674 | /******/ var module = __webpack_module_cache__[moduleId] = { 675 | /******/ // no module.id needed 676 | /******/ // no module.loaded needed 677 | /******/ exports: {} 678 | /******/ }; 679 | /******/ 680 | /******/ // Execute the module function 681 | /******/ var threw = true; 682 | /******/ try { 683 | /******/ __webpack_modules__[moduleId].call(module.exports, module, module.exports, __nccwpck_require__); 684 | /******/ threw = false; 685 | /******/ } finally { 686 | /******/ if(threw) delete __webpack_module_cache__[moduleId]; 687 | /******/ } 688 | /******/ 689 | /******/ // Return the exports of the module 690 | /******/ return module.exports; 691 | /******/ } 692 | /******/ 693 | /************************************************************************/ 694 | /******/ /* webpack/runtime/compat */ 695 | /******/ 696 | /******/ __nccwpck_require__.ab = __dirname + "/";/************************************************************************/ 697 | /******/ // module exports must be returned from runtime so entry inlining is disabled 698 | /******/ // startup 699 | /******/ // Load entry module and return exports 700 | /******/ return __nccwpck_require__(932); 701 | /******/ })() 702 | ; 703 | //# sourceMappingURL=index.js.map -------------------------------------------------------------------------------- /client/dist/index.js.map: -------------------------------------------------------------------------------- 1 | {"version":3,"file":"index.js","sources":["../webpack://garo-client/./index.js","../webpack://garo-client/./node_modules/@actions/core/lib/command.js","../webpack://garo-client/./node_modules/@actions/core/lib/core.js","../webpack://garo-client/./node_modules/@actions/core/lib/file-command.js","../webpack://garo-client/./node_modules/@actions/core/lib/utils.js","../webpack://garo-client/./wait.js","../webpack://garo-client/external \"crypto\"","../webpack://garo-client/external \"fs\"","../webpack://garo-client/external \"https\"","../webpack://garo-client/external \"os\"","../webpack://garo-client/external \"path\"","../webpack://garo-client/webpack/bootstrap","../webpack://garo-client/webpack/runtime/compat","../webpack://garo-client/webpack/startup"],"sourcesContent":["const core = require('@actions/core');\r\nconst wait = require('./wait');\r\nconst crypto = require('crypto')\r\n\r\n\r\nconst getItem = (itemName, defaultStr = null) => {\r\n const res = core.getInput(itemName);\r\n if (res.length == 0) {\r\n if (defaultStr == null) {\r\n throw `${itemName} not set`;\r\n }\r\n return defaultStr;\r\n }\r\n return res;\r\n}\r\n\r\n\r\nfunction make_api_request(action, garo_url, github_token, github_commit, postObj, dryrun=false) {\r\n const https = require('https');\r\n const api_uri = new URL(garo_url);\r\n\r\n const current_time = Math.floor(new Date().getTime() / 1000).toString();\r\n postObj.time = current_time;\r\n\r\n postObj.dryrun = dryrun;\r\n\r\n if (action == \"start\")\r\n {\r\n console.log(\"Sending start action to API\");\r\n }\r\n else if (action == \"state\")\r\n {\r\n if (\"name\" in postObj) {\r\n console.log(\"Sending state action to API\");\r\n } else {\r\n throw \"name missing\";\r\n }\r\n }\r\n else\r\n {\r\n return false;\r\n }\r\n\r\n const data = JSON.stringify(postObj);\r\n const signature = crypto.createHmac('sha512', github_token).update(data).digest('hex');\r\n const options = {\r\n hostname: api_uri.hostname,\r\n port: 443,\r\n path: `/${action}`,\r\n method: 'POST',\r\n headers: {\r\n 'X-GitHub-Token': github_token,\r\n 'X-GitHub-Signature': signature,\r\n 'X-GitHub-CommitSHA': github_commit,\r\n 'Content-Type': 'application/json',\r\n 'Content-Length': data.length\r\n }\r\n }\r\n\r\n return new Promise((resolve, reject) => {\r\n const req = https.request(options, res => {\r\n console.log(`statusCode: ${res.statusCode}`)\r\n if (res.statusCode != 200) {\r\n resolve({\"runnerstate\": \"Non-200\"});\r\n }\r\n\r\n res.on('data', d => {\r\n const data_resp = d.toString()\r\n if (data_resp != \"error\") {\r\n resolve(JSON.parse(data_resp));\r\n } else {\r\n reject(\"error response\");\r\n }\r\n })\r\n })\r\n\r\n req.on('error', error => {\r\n reject(error);\r\n });\r\n\r\n req.write(data);\r\n req.end();\r\n });\r\n}\r\n\r\n\r\nasync function run() {\r\n try {\r\n const wait_for_start = getItem('WAIT_FOR_START', \"true\");\r\n const action = getItem('ACTION', \"start\");\r\n const garo_url = getItem('GARO_URL');\r\n const github_token = getItem('GITHUB_TOKEN');\r\n const github_commit = getItem('GITHUB_COMMIT');\r\n const dryrun = (getItem('DRYRUN', 'false') == 'true');\r\n\r\n let postObj = {\r\n repo: getItem('REPO'),\r\n account_id: getItem('RUNNER_ACID'),\r\n external_id: getItem('RUNNER_EXID'),\r\n type: getItem('RUNNER_TYPE', \"spot\"),\r\n region: getItem('RUNNER_REGION', \"eu-west-2\"),\r\n timeout: getItem('RUNNER_TIMEOUT', \"3600\"),\r\n }\r\n\r\n const rLabel = getItem('RUNNER_LABEL', \"\");\r\n if (rLabel != \"\") {\r\n postObj[\"label\"] = rLabel;\r\n }\r\n\r\n const rName = getItem('RUNNER_NAME', \"\");\r\n if (rName != \"\") {\r\n postObj[\"name\"] = rName;\r\n }\r\n\r\n const rSub = getItem('RUNNER_SUBNET', \"\");\r\n if (rSub != \"\") {\r\n postObj[\"subnet\"] = rSub;\r\n }\r\n\r\n const rSg = getItem('RUNNER_SG', \"\");\r\n if (rSg != \"\") {\r\n postObj[\"sg\"] = rSg;\r\n }\r\n\r\n if (action == \"start\") {\r\n const result = await make_api_request(\r\n \"start\",\r\n garo_url,\r\n github_token,\r\n github_commit,\r\n postObj,\r\n dryrun\r\n )\r\n\r\n if (result[\"runnerstate\"] == \"Non-200\") {\r\n throw 'Could not start the runner';\r\n }\r\n\r\n console.log(\"wait_for_start:\", wait_for_start);\r\n\r\n if (result[\"runnerstate\"] == \"started\") {\r\n console.log(\"Runner already started:\", result);\r\n\r\n core.setOutput(\"name\", result[\"name\"]);\r\n core.setOutput(\"runnerstate\", result[\"runnerstate\"]);\r\n core.setOutput(\"uniqueid\", result[\"uniqueid\"]);\r\n }\r\n\r\n if (result[\"runnerstate\"] == \"starting\" && wait_for_start) {\r\n console.log(\"Runner starting:\", result);\r\n postObj[\"name\"] = result[\"name\"];\r\n\r\n\r\n var state_result = {};\r\n let i = 0;\r\n while (i < 20) {\r\n i++;\r\n console.log(`Starting wait: ${i}`)\r\n await wait(15000);\r\n\r\n state_result = await make_api_request(\r\n \"state\",\r\n garo_url,\r\n github_token,\r\n github_commit,\r\n postObj,\r\n dryrun\r\n );\r\n\r\n console.log(state_result);\r\n\r\n if (state_result[\"runnerstate\"] == \"started\") {\r\n core.setOutput(\"name\", result[\"name\"]);\r\n core.setOutput(\"runnerstate\", result[\"runnerstate\"]);\r\n core.setOutput(\"uniqueid\", result[\"uniqueid\"]);\r\n break;\r\n }\r\n }\r\n\r\n if (state_result[\"runnerstate\"] != \"started\") {\r\n throw 'Runner not started in time';\r\n }\r\n }\r\n }\r\n\r\n }\r\n catch (error)\r\n {\r\n if (typeof(error) == \"object\" && \"message\" in error)\r\n {\r\n core.setFailed(error.message);\r\n }\r\n else\r\n {\r\n core.setFailed(error);\r\n }\r\n }\r\n}\r\n\r\nrun();\r\n","\"use strict\";\nvar __importStar = (this && this.__importStar) || function (mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];\n result[\"default\"] = mod;\n return result;\n};\nObject.defineProperty(exports, \"__esModule\", { value: true });\nconst os = __importStar(require(\"os\"));\nconst utils_1 = require(\"./utils\");\n/**\n * Commands\n *\n * Command Format:\n * ::name key=value,key=value::message\n *\n * Examples:\n * ::warning::This is the message\n * ::set-env name=MY_VAR::some value\n */\nfunction issueCommand(command, properties, message) {\n const cmd = new Command(command, properties, message);\n process.stdout.write(cmd.toString() + os.EOL);\n}\nexports.issueCommand = issueCommand;\nfunction issue(name, message = '') {\n issueCommand(name, {}, message);\n}\nexports.issue = issue;\nconst CMD_STRING = '::';\nclass Command {\n constructor(command, properties, message) {\n if (!command) {\n command = 'missing.command';\n }\n this.command = command;\n this.properties = properties;\n this.message = message;\n }\n toString() {\n let cmdStr = CMD_STRING + this.command;\n if (this.properties && Object.keys(this.properties).length > 0) {\n cmdStr += ' ';\n let first = true;\n for (const key in this.properties) {\n if (this.properties.hasOwnProperty(key)) {\n const val = this.properties[key];\n if (val) {\n if (first) {\n first = false;\n }\n else {\n cmdStr += ',';\n }\n cmdStr += `${key}=${escapeProperty(val)}`;\n }\n }\n }\n }\n cmdStr += `${CMD_STRING}${escapeData(this.message)}`;\n return cmdStr;\n }\n}\nfunction escapeData(s) {\n return utils_1.toCommandValue(s)\n .replace(/%/g, '%25')\n .replace(/\\r/g, '%0D')\n .replace(/\\n/g, '%0A');\n}\nfunction escapeProperty(s) {\n return utils_1.toCommandValue(s)\n .replace(/%/g, '%25')\n .replace(/\\r/g, '%0D')\n .replace(/\\n/g, '%0A')\n .replace(/:/g, '%3A')\n .replace(/,/g, '%2C');\n}\n//# sourceMappingURL=command.js.map","\"use strict\";\nvar __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n};\nvar __importStar = (this && this.__importStar) || function (mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];\n result[\"default\"] = mod;\n return result;\n};\nObject.defineProperty(exports, \"__esModule\", { value: true });\nconst command_1 = require(\"./command\");\nconst file_command_1 = require(\"./file-command\");\nconst utils_1 = require(\"./utils\");\nconst os = __importStar(require(\"os\"));\nconst path = __importStar(require(\"path\"));\n/**\n * The code to exit an action\n */\nvar ExitCode;\n(function (ExitCode) {\n /**\n * A code indicating that the action was successful\n */\n ExitCode[ExitCode[\"Success\"] = 0] = \"Success\";\n /**\n * A code indicating that the action was a failure\n */\n ExitCode[ExitCode[\"Failure\"] = 1] = \"Failure\";\n})(ExitCode = exports.ExitCode || (exports.ExitCode = {}));\n//-----------------------------------------------------------------------\n// Variables\n//-----------------------------------------------------------------------\n/**\n * Sets env variable for this action and future actions in the job\n * @param name the name of the variable to set\n * @param val the value of the variable. Non-string values will be converted to a string via JSON.stringify\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nfunction exportVariable(name, val) {\n const convertedVal = utils_1.toCommandValue(val);\n process.env[name] = convertedVal;\n const filePath = process.env['GITHUB_ENV'] || '';\n if (filePath) {\n const delimiter = '_GitHubActionsFileCommandDelimeter_';\n const commandValue = `${name}<<${delimiter}${os.EOL}${convertedVal}${os.EOL}${delimiter}`;\n file_command_1.issueCommand('ENV', commandValue);\n }\n else {\n command_1.issueCommand('set-env', { name }, convertedVal);\n }\n}\nexports.exportVariable = exportVariable;\n/**\n * Registers a secret which will get masked from logs\n * @param secret value of the secret\n */\nfunction setSecret(secret) {\n command_1.issueCommand('add-mask', {}, secret);\n}\nexports.setSecret = setSecret;\n/**\n * Prepends inputPath to the PATH (for this action and future actions)\n * @param inputPath\n */\nfunction addPath(inputPath) {\n const filePath = process.env['GITHUB_PATH'] || '';\n if (filePath) {\n file_command_1.issueCommand('PATH', inputPath);\n }\n else {\n command_1.issueCommand('add-path', {}, inputPath);\n }\n process.env['PATH'] = `${inputPath}${path.delimiter}${process.env['PATH']}`;\n}\nexports.addPath = addPath;\n/**\n * Gets the value of an input. The value is also trimmed.\n *\n * @param name name of the input to get\n * @param options optional. See InputOptions.\n * @returns string\n */\nfunction getInput(name, options) {\n const val = process.env[`INPUT_${name.replace(/ /g, '_').toUpperCase()}`] || '';\n if (options && options.required && !val) {\n throw new Error(`Input required and not supplied: ${name}`);\n }\n return val.trim();\n}\nexports.getInput = getInput;\n/**\n * Sets the value of an output.\n *\n * @param name name of the output to set\n * @param value value to store. Non-string values will be converted to a string via JSON.stringify\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nfunction setOutput(name, value) {\n command_1.issueCommand('set-output', { name }, value);\n}\nexports.setOutput = setOutput;\n/**\n * Enables or disables the echoing of commands into stdout for the rest of the step.\n * Echoing is disabled by default if ACTIONS_STEP_DEBUG is not set.\n *\n */\nfunction setCommandEcho(enabled) {\n command_1.issue('echo', enabled ? 'on' : 'off');\n}\nexports.setCommandEcho = setCommandEcho;\n//-----------------------------------------------------------------------\n// Results\n//-----------------------------------------------------------------------\n/**\n * Sets the action status to failed.\n * When the action exits it will be with an exit code of 1\n * @param message add error issue message\n */\nfunction setFailed(message) {\n process.exitCode = ExitCode.Failure;\n error(message);\n}\nexports.setFailed = setFailed;\n//-----------------------------------------------------------------------\n// Logging Commands\n//-----------------------------------------------------------------------\n/**\n * Gets whether Actions Step Debug is on or not\n */\nfunction isDebug() {\n return process.env['RUNNER_DEBUG'] === '1';\n}\nexports.isDebug = isDebug;\n/**\n * Writes debug message to user log\n * @param message debug message\n */\nfunction debug(message) {\n command_1.issueCommand('debug', {}, message);\n}\nexports.debug = debug;\n/**\n * Adds an error issue\n * @param message error issue message. Errors will be converted to string via toString()\n */\nfunction error(message) {\n command_1.issue('error', message instanceof Error ? message.toString() : message);\n}\nexports.error = error;\n/**\n * Adds an warning issue\n * @param message warning issue message. Errors will be converted to string via toString()\n */\nfunction warning(message) {\n command_1.issue('warning', message instanceof Error ? message.toString() : message);\n}\nexports.warning = warning;\n/**\n * Writes info to log with console.log.\n * @param message info message\n */\nfunction info(message) {\n process.stdout.write(message + os.EOL);\n}\nexports.info = info;\n/**\n * Begin an output group.\n *\n * Output until the next `groupEnd` will be foldable in this group\n *\n * @param name The name of the output group\n */\nfunction startGroup(name) {\n command_1.issue('group', name);\n}\nexports.startGroup = startGroup;\n/**\n * End an output group.\n */\nfunction endGroup() {\n command_1.issue('endgroup');\n}\nexports.endGroup = endGroup;\n/**\n * Wrap an asynchronous function call in a group.\n *\n * Returns the same type as the function itself.\n *\n * @param name The name of the group\n * @param fn The function to wrap in the group\n */\nfunction group(name, fn) {\n return __awaiter(this, void 0, void 0, function* () {\n startGroup(name);\n let result;\n try {\n result = yield fn();\n }\n finally {\n endGroup();\n }\n return result;\n });\n}\nexports.group = group;\n//-----------------------------------------------------------------------\n// Wrapper action state\n//-----------------------------------------------------------------------\n/**\n * Saves state for current action, the state can only be retrieved by this action's post job execution.\n *\n * @param name name of the state to store\n * @param value value to store. Non-string values will be converted to a string via JSON.stringify\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nfunction saveState(name, value) {\n command_1.issueCommand('save-state', { name }, value);\n}\nexports.saveState = saveState;\n/**\n * Gets the value of an state set by this action's main execution.\n *\n * @param name name of the state to get\n * @returns string\n */\nfunction getState(name) {\n return process.env[`STATE_${name}`] || '';\n}\nexports.getState = getState;\n//# sourceMappingURL=core.js.map","\"use strict\";\n// For internal use, subject to change.\nvar __importStar = (this && this.__importStar) || function (mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];\n result[\"default\"] = mod;\n return result;\n};\nObject.defineProperty(exports, \"__esModule\", { value: true });\n// We use any as a valid input type\n/* eslint-disable @typescript-eslint/no-explicit-any */\nconst fs = __importStar(require(\"fs\"));\nconst os = __importStar(require(\"os\"));\nconst utils_1 = require(\"./utils\");\nfunction issueCommand(command, message) {\n const filePath = process.env[`GITHUB_${command}`];\n if (!filePath) {\n throw new Error(`Unable to find environment variable for file command ${command}`);\n }\n if (!fs.existsSync(filePath)) {\n throw new Error(`Missing file at path: ${filePath}`);\n }\n fs.appendFileSync(filePath, `${utils_1.toCommandValue(message)}${os.EOL}`, {\n encoding: 'utf8'\n });\n}\nexports.issueCommand = issueCommand;\n//# sourceMappingURL=file-command.js.map","\"use strict\";\n// We use any as a valid input type\n/* eslint-disable @typescript-eslint/no-explicit-any */\nObject.defineProperty(exports, \"__esModule\", { value: true });\n/**\n * Sanitizes an input into a string so it can be passed into issueCommand safely\n * @param input input to sanitize into a string\n */\nfunction toCommandValue(input) {\n if (input === null || input === undefined) {\n return '';\n }\n else if (typeof input === 'string' || input instanceof String) {\n return input;\n }\n return JSON.stringify(input);\n}\nexports.toCommandValue = toCommandValue;\n//# sourceMappingURL=utils.js.map","let wait = function (milliseconds) {\r\n return new Promise((resolve) => {\r\n if (typeof milliseconds !== 'number') {\r\n throw new Error('milliseconds not a number');\r\n }\r\n setTimeout(() => resolve(\"done!\"), milliseconds)\r\n });\r\n};\r\n\r\nmodule.exports = wait;\r\n","module.exports = require(\"crypto\");;","module.exports = require(\"fs\");;","module.exports = require(\"https\");;","module.exports = require(\"os\");;","module.exports = require(\"path\");;","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tif(__webpack_module_cache__[moduleId]) {\n\t\treturn __webpack_module_cache__[moduleId].exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\t// no module.id needed\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\tvar threw = true;\n\ttry {\n\t\t__webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\t\tthrew = false;\n\t} finally {\n\t\tif(threw) delete __webpack_module_cache__[moduleId];\n\t}\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n","\n__webpack_require__.ab = __dirname + \"/\";","// module exports must be returned from runtime so entry inlining is disabled\n// startup\n// Load entry module and return exports\nreturn __webpack_require__(932);\n"],"mappings":";;;;;;;AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;A;;;;;;ACzMA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;A;;;;;;AC/EA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;A;;;;;;AC9OA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;A;;;;;;AC7BA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;A;;;;;ACnBA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;A;;;;;;ACXA;AACA;A;;;;;;ACDA;AACA;A;;;;;;ACDA;AACA;A;;;;;;ACDA;AACA;A;;;;;;ACDA;AACA;A;;;;ACDA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;AC5BA;AACA;ACDA;AACA;AACA;AACA;;A","sourceRoot":""} -------------------------------------------------------------------------------- /client/dist/sourcemap-register.js: -------------------------------------------------------------------------------- 1 | module.exports=(()=>{var e={650:e=>{var r=Object.prototype.toString;var n=typeof Buffer.alloc==="function"&&typeof Buffer.allocUnsafe==="function"&&typeof Buffer.from==="function";function isArrayBuffer(e){return r.call(e).slice(8,-1)==="ArrayBuffer"}function fromArrayBuffer(e,r,t){r>>>=0;var o=e.byteLength-r;if(o<0){throw new RangeError("'offset' is out of bounds")}if(t===undefined){t=o}else{t>>>=0;if(t>o){throw new RangeError("'length' is out of bounds")}}return n?Buffer.from(e.slice(r,r+t)):new Buffer(new Uint8Array(e.slice(r,r+t)))}function fromString(e,r){if(typeof r!=="string"||r===""){r="utf8"}if(!Buffer.isEncoding(r)){throw new TypeError('"encoding" must be a valid string encoding')}return n?Buffer.from(e,r):new Buffer(e,r)}function bufferFrom(e,r,t){if(typeof e==="number"){throw new TypeError('"value" argument must not be a number')}if(isArrayBuffer(e)){return fromArrayBuffer(e,r,t)}if(typeof e==="string"){return fromString(e,r)}return n?Buffer.from(e):new Buffer(e)}e.exports=bufferFrom},645:(e,r,n)=>{n(284).install()},284:(e,r,n)=>{var t=n(596).SourceMapConsumer;var o=n(622);var i;try{i=n(747);if(!i.existsSync||!i.readFileSync){i=null}}catch(e){}var u=n(650);var s=false;var a=false;var l=false;var c="auto";var f={};var p={};var g=/^data:application\/json[^,]+base64,/;var h=[];var d=[];function isInBrowser(){if(c==="browser")return true;if(c==="node")return false;return typeof window!=="undefined"&&typeof XMLHttpRequest==="function"&&!(window.require&&window.module&&window.process&&window.process.type==="renderer")}function hasGlobalProcessEventEmitter(){return typeof process==="object"&&process!==null&&typeof process.on==="function"}function handlerExec(e){return function(r){for(var n=0;n"}var n=this.getLineNumber();if(n!=null){r+=":"+n;var t=this.getColumnNumber();if(t){r+=":"+t}}}var o="";var i=this.getFunctionName();var u=true;var s=this.isConstructor();var a=!(this.isToplevel()||s);if(a){var l=this.getTypeName();if(l==="[object Object]"){l="null"}var c=this.getMethodName();if(i){if(l&&i.indexOf(l)!=0){o+=l+"."}o+=i;if(c&&i.indexOf("."+c)!=i.length-c.length-1){o+=" [as "+c+"]"}}else{o+=l+"."+(c||"")}}else if(s){o+="new "+(i||"")}else if(i){o+=i}else{o+=r;u=false}if(u){o+=" ("+r+")"}return o}function cloneCallSite(e){var r={};Object.getOwnPropertyNames(Object.getPrototypeOf(e)).forEach(function(n){r[n]=/^(?:is|get)/.test(n)?function(){return e[n].call(e)}:e[n]});r.toString=CallSiteToString;return r}function wrapCallSite(e){if(e.isNative()){return e}var r=e.getFileName()||e.getScriptNameOrSourceURL();if(r){var n=e.getLineNumber();var t=e.getColumnNumber()-1;var o=62;if(n===1&&t>o&&!isInBrowser()&&!e.isEval()){t-=o}var i=mapSourcePosition({source:r,line:n,column:t});e=cloneCallSite(e);var u=e.getFunctionName;e.getFunctionName=function(){return i.name||u()};e.getFileName=function(){return i.source};e.getLineNumber=function(){return i.line};e.getColumnNumber=function(){return i.column+1};e.getScriptNameOrSourceURL=function(){return i.source};return e}var s=e.isEval()&&e.getEvalOrigin();if(s){s=mapEvalOrigin(s);e=cloneCallSite(e);e.getEvalOrigin=function(){return s};return e}return e}function prepareStackTrace(e,r){if(l){f={};p={}}return e+r.map(function(e){return"\n at "+wrapCallSite(e)}).join("")}function getErrorSource(e){var r=/\n at [^(]+ \((.*):(\d+):(\d+)\)/.exec(e.stack);if(r){var n=r[1];var t=+r[2];var o=+r[3];var u=f[n];if(!u&&i&&i.existsSync(n)){try{u=i.readFileSync(n,"utf8")}catch(e){u=""}}if(u){var s=u.split(/(?:\r\n|\r|\n)/)[t-1];if(s){return n+":"+t+"\n"+s+"\n"+new Array(o).join(" ")+"^"}}}return null}function printErrorAndExit(e){var r=getErrorSource(e);if(process.stderr._handle&&process.stderr._handle.setBlocking){process.stderr._handle.setBlocking(true)}if(r){console.error();console.error(r)}console.error(e.stack);process.exit(1)}function shimEmitUncaughtException(){var e=process.emit;process.emit=function(r){if(r==="uncaughtException"){var n=arguments[1]&&arguments[1].stack;var t=this.listeners(r).length>0;if(n&&!t){return printErrorAndExit(arguments[1])}}return e.apply(this,arguments)}}var S=h.slice(0);var m=d.slice(0);r.wrapCallSite=wrapCallSite;r.getErrorSource=getErrorSource;r.mapSourcePosition=mapSourcePosition;r.retrieveSourceMap=_;r.install=function(e){e=e||{};if(e.environment){c=e.environment;if(["node","browser","auto"].indexOf(c)===-1){throw new Error("environment "+c+" was unknown. Available options are {auto, browser, node}")}}if(e.retrieveFile){if(e.overrideRetrieveFile){h.length=0}h.unshift(e.retrieveFile)}if(e.retrieveSourceMap){if(e.overrideRetrieveSourceMap){d.length=0}d.unshift(e.retrieveSourceMap)}if(e.hookRequire&&!isInBrowser()){var r;try{r=n(282)}catch(e){}var t=r.prototype._compile;if(!t.__sourceMapSupport){r.prototype._compile=function(e,r){f[r]=e;p[r]=undefined;return t.call(this,e,r)};r.prototype._compile.__sourceMapSupport=true}}if(!l){l="emptyCacheBetweenOperations"in e?e.emptyCacheBetweenOperations:false}if(!s){s=true;Error.prepareStackTrace=prepareStackTrace}if(!a){var o="handleUncaughtExceptions"in e?e.handleUncaughtExceptions:true;if(o&&hasGlobalProcessEventEmitter()){a=true;shimEmitUncaughtException()}}};r.resetRetrieveHandlers=function(){h.length=0;d.length=0;h=S.slice(0);d=m.slice(0)}},837:(e,r,n)=>{var t=n(983);var o=Object.prototype.hasOwnProperty;var i=typeof Map!=="undefined";function ArraySet(){this._array=[];this._set=i?new Map:Object.create(null)}ArraySet.fromArray=function ArraySet_fromArray(e,r){var n=new ArraySet;for(var t=0,o=e.length;t=0){return r}}else{var n=t.toSetString(e);if(o.call(this._set,n)){return this._set[n]}}throw new Error('"'+e+'" is not in the set.')};ArraySet.prototype.at=function ArraySet_at(e){if(e>=0&&e{var t=n(537);var o=5;var i=1<>1;return r?-n:n}r.encode=function base64VLQ_encode(e){var r="";var n;var i=toVLQSigned(e);do{n=i&u;i>>>=o;if(i>0){n|=s}r+=t.encode(n)}while(i>0);return r};r.decode=function base64VLQ_decode(e,r,n){var i=e.length;var a=0;var l=0;var c,f;do{if(r>=i){throw new Error("Expected more digits in base 64 VLQ value.")}f=t.decode(e.charCodeAt(r++));if(f===-1){throw new Error("Invalid base64 digit: "+e.charAt(r-1))}c=!!(f&s);f&=u;a=a+(f<{var n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".split("");r.encode=function(e){if(0<=e&&e{r.GREATEST_LOWER_BOUND=1;r.LEAST_UPPER_BOUND=2;function recursiveSearch(e,n,t,o,i,u){var s=Math.floor((n-e)/2)+e;var a=i(t,o[s],true);if(a===0){return s}else if(a>0){if(n-s>1){return recursiveSearch(s,n,t,o,i,u)}if(u==r.LEAST_UPPER_BOUND){return n1){return recursiveSearch(e,s,t,o,i,u)}if(u==r.LEAST_UPPER_BOUND){return s}else{return e<0?-1:e}}}r.search=function search(e,n,t,o){if(n.length===0){return-1}var i=recursiveSearch(-1,n.length,e,n,t,o||r.GREATEST_LOWER_BOUND);if(i<0){return-1}while(i-1>=0){if(t(n[i],n[i-1],true)!==0){break}--i}return i}},740:(e,r,n)=>{var t=n(983);function generatedPositionAfter(e,r){var n=e.generatedLine;var o=r.generatedLine;var i=e.generatedColumn;var u=r.generatedColumn;return o>n||o==n&&u>=i||t.compareByGeneratedPositionsInflated(e,r)<=0}function MappingList(){this._array=[];this._sorted=true;this._last={generatedLine:-1,generatedColumn:0}}MappingList.prototype.unsortedForEach=function MappingList_forEach(e,r){this._array.forEach(e,r)};MappingList.prototype.add=function MappingList_add(e){if(generatedPositionAfter(this._last,e)){this._last=e;this._array.push(e)}else{this._sorted=false;this._array.push(e)}};MappingList.prototype.toArray=function MappingList_toArray(){if(!this._sorted){this._array.sort(t.compareByGeneratedPositionsInflated);this._sorted=true}return this._array};r.H=MappingList},226:(e,r)=>{function swap(e,r,n){var t=e[r];e[r]=e[n];e[n]=t}function randomIntInRange(e,r){return Math.round(e+Math.random()*(r-e))}function doQuickSort(e,r,n,t){if(n{var t;var o=n(983);var i=n(164);var u=n(837).I;var s=n(215);var a=n(226).U;function SourceMapConsumer(e,r){var n=e;if(typeof e==="string"){n=o.parseSourceMapInput(e)}return n.sections!=null?new IndexedSourceMapConsumer(n,r):new BasicSourceMapConsumer(n,r)}SourceMapConsumer.fromSourceMap=function(e,r){return BasicSourceMapConsumer.fromSourceMap(e,r)};SourceMapConsumer.prototype._version=3;SourceMapConsumer.prototype.__generatedMappings=null;Object.defineProperty(SourceMapConsumer.prototype,"_generatedMappings",{configurable:true,enumerable:true,get:function(){if(!this.__generatedMappings){this._parseMappings(this._mappings,this.sourceRoot)}return this.__generatedMappings}});SourceMapConsumer.prototype.__originalMappings=null;Object.defineProperty(SourceMapConsumer.prototype,"_originalMappings",{configurable:true,enumerable:true,get:function(){if(!this.__originalMappings){this._parseMappings(this._mappings,this.sourceRoot)}return this.__originalMappings}});SourceMapConsumer.prototype._charIsMappingSeparator=function SourceMapConsumer_charIsMappingSeparator(e,r){var n=e.charAt(r);return n===";"||n===","};SourceMapConsumer.prototype._parseMappings=function SourceMapConsumer_parseMappings(e,r){throw new Error("Subclasses must implement _parseMappings")};SourceMapConsumer.GENERATED_ORDER=1;SourceMapConsumer.ORIGINAL_ORDER=2;SourceMapConsumer.GREATEST_LOWER_BOUND=1;SourceMapConsumer.LEAST_UPPER_BOUND=2;SourceMapConsumer.prototype.eachMapping=function SourceMapConsumer_eachMapping(e,r,n){var t=r||null;var i=n||SourceMapConsumer.GENERATED_ORDER;var u;switch(i){case SourceMapConsumer.GENERATED_ORDER:u=this._generatedMappings;break;case SourceMapConsumer.ORIGINAL_ORDER:u=this._originalMappings;break;default:throw new Error("Unknown order of iteration.")}var s=this.sourceRoot;u.map(function(e){var r=e.source===null?null:this._sources.at(e.source);r=o.computeSourceURL(s,r,this._sourceMapURL);return{source:r,generatedLine:e.generatedLine,generatedColumn:e.generatedColumn,originalLine:e.originalLine,originalColumn:e.originalColumn,name:e.name===null?null:this._names.at(e.name)}},this).forEach(e,t)};SourceMapConsumer.prototype.allGeneratedPositionsFor=function SourceMapConsumer_allGeneratedPositionsFor(e){var r=o.getArg(e,"line");var n={source:o.getArg(e,"source"),originalLine:r,originalColumn:o.getArg(e,"column",0)};n.source=this._findSourceIndex(n.source);if(n.source<0){return[]}var t=[];var u=this._findMapping(n,this._originalMappings,"originalLine","originalColumn",o.compareByOriginalPositions,i.LEAST_UPPER_BOUND);if(u>=0){var s=this._originalMappings[u];if(e.column===undefined){var a=s.originalLine;while(s&&s.originalLine===a){t.push({line:o.getArg(s,"generatedLine",null),column:o.getArg(s,"generatedColumn",null),lastColumn:o.getArg(s,"lastGeneratedColumn",null)});s=this._originalMappings[++u]}}else{var l=s.originalColumn;while(s&&s.originalLine===r&&s.originalColumn==l){t.push({line:o.getArg(s,"generatedLine",null),column:o.getArg(s,"generatedColumn",null),lastColumn:o.getArg(s,"lastGeneratedColumn",null)});s=this._originalMappings[++u]}}}return t};r.SourceMapConsumer=SourceMapConsumer;function BasicSourceMapConsumer(e,r){var n=e;if(typeof e==="string"){n=o.parseSourceMapInput(e)}var t=o.getArg(n,"version");var i=o.getArg(n,"sources");var s=o.getArg(n,"names",[]);var a=o.getArg(n,"sourceRoot",null);var l=o.getArg(n,"sourcesContent",null);var c=o.getArg(n,"mappings");var f=o.getArg(n,"file",null);if(t!=this._version){throw new Error("Unsupported version: "+t)}if(a){a=o.normalize(a)}i=i.map(String).map(o.normalize).map(function(e){return a&&o.isAbsolute(a)&&o.isAbsolute(e)?o.relative(a,e):e});this._names=u.fromArray(s.map(String),true);this._sources=u.fromArray(i,true);this._absoluteSources=this._sources.toArray().map(function(e){return o.computeSourceURL(a,e,r)});this.sourceRoot=a;this.sourcesContent=l;this._mappings=c;this._sourceMapURL=r;this.file=f}BasicSourceMapConsumer.prototype=Object.create(SourceMapConsumer.prototype);BasicSourceMapConsumer.prototype.consumer=SourceMapConsumer;BasicSourceMapConsumer.prototype._findSourceIndex=function(e){var r=e;if(this.sourceRoot!=null){r=o.relative(this.sourceRoot,r)}if(this._sources.has(r)){return this._sources.indexOf(r)}var n;for(n=0;n1){_.source=l+m[1];l+=m[1];_.originalLine=i+m[2];i=_.originalLine;_.originalLine+=1;_.originalColumn=u+m[3];u=_.originalColumn;if(m.length>4){_.name=c+m[4];c+=m[4]}}v.push(_);if(typeof _.originalLine==="number"){d.push(_)}}}a(v,o.compareByGeneratedPositionsDeflated);this.__generatedMappings=v;a(d,o.compareByOriginalPositions);this.__originalMappings=d};BasicSourceMapConsumer.prototype._findMapping=function SourceMapConsumer_findMapping(e,r,n,t,o,u){if(e[n]<=0){throw new TypeError("Line must be greater than or equal to 1, got "+e[n])}if(e[t]<0){throw new TypeError("Column must be greater than or equal to 0, got "+e[t])}return i.search(e,r,o,u)};BasicSourceMapConsumer.prototype.computeColumnSpans=function SourceMapConsumer_computeColumnSpans(){for(var e=0;e=0){var t=this._generatedMappings[n];if(t.generatedLine===r.generatedLine){var i=o.getArg(t,"source",null);if(i!==null){i=this._sources.at(i);i=o.computeSourceURL(this.sourceRoot,i,this._sourceMapURL)}var u=o.getArg(t,"name",null);if(u!==null){u=this._names.at(u)}return{source:i,line:o.getArg(t,"originalLine",null),column:o.getArg(t,"originalColumn",null),name:u}}}return{source:null,line:null,column:null,name:null}};BasicSourceMapConsumer.prototype.hasContentsOfAllSources=function BasicSourceMapConsumer_hasContentsOfAllSources(){if(!this.sourcesContent){return false}return this.sourcesContent.length>=this._sources.size()&&!this.sourcesContent.some(function(e){return e==null})};BasicSourceMapConsumer.prototype.sourceContentFor=function SourceMapConsumer_sourceContentFor(e,r){if(!this.sourcesContent){return null}var n=this._findSourceIndex(e);if(n>=0){return this.sourcesContent[n]}var t=e;if(this.sourceRoot!=null){t=o.relative(this.sourceRoot,t)}var i;if(this.sourceRoot!=null&&(i=o.urlParse(this.sourceRoot))){var u=t.replace(/^file:\/\//,"");if(i.scheme=="file"&&this._sources.has(u)){return this.sourcesContent[this._sources.indexOf(u)]}if((!i.path||i.path=="/")&&this._sources.has("/"+t)){return this.sourcesContent[this._sources.indexOf("/"+t)]}}if(r){return null}else{throw new Error('"'+t+'" is not in the SourceMap.')}};BasicSourceMapConsumer.prototype.generatedPositionFor=function SourceMapConsumer_generatedPositionFor(e){var r=o.getArg(e,"source");r=this._findSourceIndex(r);if(r<0){return{line:null,column:null,lastColumn:null}}var n={source:r,originalLine:o.getArg(e,"line"),originalColumn:o.getArg(e,"column")};var t=this._findMapping(n,this._originalMappings,"originalLine","originalColumn",o.compareByOriginalPositions,o.getArg(e,"bias",SourceMapConsumer.GREATEST_LOWER_BOUND));if(t>=0){var i=this._originalMappings[t];if(i.source===n.source){return{line:o.getArg(i,"generatedLine",null),column:o.getArg(i,"generatedColumn",null),lastColumn:o.getArg(i,"lastGeneratedColumn",null)}}}return{line:null,column:null,lastColumn:null}};t=BasicSourceMapConsumer;function IndexedSourceMapConsumer(e,r){var n=e;if(typeof e==="string"){n=o.parseSourceMapInput(e)}var t=o.getArg(n,"version");var i=o.getArg(n,"sections");if(t!=this._version){throw new Error("Unsupported version: "+t)}this._sources=new u;this._names=new u;var s={line:-1,column:0};this._sections=i.map(function(e){if(e.url){throw new Error("Support for url field in sections not implemented.")}var n=o.getArg(e,"offset");var t=o.getArg(n,"line");var i=o.getArg(n,"column");if(t{var t=n(215);var o=n(983);var i=n(837).I;var u=n(740).H;function SourceMapGenerator(e){if(!e){e={}}this._file=o.getArg(e,"file",null);this._sourceRoot=o.getArg(e,"sourceRoot",null);this._skipValidation=o.getArg(e,"skipValidation",false);this._sources=new i;this._names=new i;this._mappings=new u;this._sourcesContents=null}SourceMapGenerator.prototype._version=3;SourceMapGenerator.fromSourceMap=function SourceMapGenerator_fromSourceMap(e){var r=e.sourceRoot;var n=new SourceMapGenerator({file:e.file,sourceRoot:r});e.eachMapping(function(e){var t={generated:{line:e.generatedLine,column:e.generatedColumn}};if(e.source!=null){t.source=e.source;if(r!=null){t.source=o.relative(r,t.source)}t.original={line:e.originalLine,column:e.originalColumn};if(e.name!=null){t.name=e.name}}n.addMapping(t)});e.sources.forEach(function(t){var i=t;if(r!==null){i=o.relative(r,t)}if(!n._sources.has(i)){n._sources.add(i)}var u=e.sourceContentFor(t);if(u!=null){n.setSourceContent(t,u)}});return n};SourceMapGenerator.prototype.addMapping=function SourceMapGenerator_addMapping(e){var r=o.getArg(e,"generated");var n=o.getArg(e,"original",null);var t=o.getArg(e,"source",null);var i=o.getArg(e,"name",null);if(!this._skipValidation){this._validateMapping(r,n,t,i)}if(t!=null){t=String(t);if(!this._sources.has(t)){this._sources.add(t)}}if(i!=null){i=String(i);if(!this._names.has(i)){this._names.add(i)}}this._mappings.add({generatedLine:r.line,generatedColumn:r.column,originalLine:n!=null&&n.line,originalColumn:n!=null&&n.column,source:t,name:i})};SourceMapGenerator.prototype.setSourceContent=function SourceMapGenerator_setSourceContent(e,r){var n=e;if(this._sourceRoot!=null){n=o.relative(this._sourceRoot,n)}if(r!=null){if(!this._sourcesContents){this._sourcesContents=Object.create(null)}this._sourcesContents[o.toSetString(n)]=r}else if(this._sourcesContents){delete this._sourcesContents[o.toSetString(n)];if(Object.keys(this._sourcesContents).length===0){this._sourcesContents=null}}};SourceMapGenerator.prototype.applySourceMap=function SourceMapGenerator_applySourceMap(e,r,n){var t=r;if(r==null){if(e.file==null){throw new Error("SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, "+'or the source map\'s "file" property. Both were omitted.')}t=e.file}var u=this._sourceRoot;if(u!=null){t=o.relative(u,t)}var s=new i;var a=new i;this._mappings.unsortedForEach(function(r){if(r.source===t&&r.originalLine!=null){var i=e.originalPositionFor({line:r.originalLine,column:r.originalColumn});if(i.source!=null){r.source=i.source;if(n!=null){r.source=o.join(n,r.source)}if(u!=null){r.source=o.relative(u,r.source)}r.originalLine=i.line;r.originalColumn=i.column;if(i.name!=null){r.name=i.name}}}var l=r.source;if(l!=null&&!s.has(l)){s.add(l)}var c=r.name;if(c!=null&&!a.has(c)){a.add(c)}},this);this._sources=s;this._names=a;e.sources.forEach(function(r){var t=e.sourceContentFor(r);if(t!=null){if(n!=null){r=o.join(n,r)}if(u!=null){r=o.relative(u,r)}this.setSourceContent(r,t)}},this)};SourceMapGenerator.prototype._validateMapping=function SourceMapGenerator_validateMapping(e,r,n,t){if(r&&typeof r.line!=="number"&&typeof r.column!=="number"){throw new Error("original.line and original.column are not numbers -- you probably meant to omit "+"the original mapping entirely and only map the generated position. If so, pass "+"null for the original mapping instead of an object with empty or null values.")}if(e&&"line"in e&&"column"in e&&e.line>0&&e.column>=0&&!r&&!n&&!t){return}else if(e&&"line"in e&&"column"in e&&r&&"line"in r&&"column"in r&&e.line>0&&e.column>=0&&r.line>0&&r.column>=0&&n){return}else{throw new Error("Invalid mapping: "+JSON.stringify({generated:e,source:n,original:r,name:t}))}};SourceMapGenerator.prototype._serializeMappings=function SourceMapGenerator_serializeMappings(){var e=0;var r=1;var n=0;var i=0;var u=0;var s=0;var a="";var l;var c;var f;var p;var g=this._mappings.toArray();for(var h=0,d=g.length;h0){if(!o.compareByGeneratedPositionsInflated(c,g[h-1])){continue}l+=","}}l+=t.encode(c.generatedColumn-e);e=c.generatedColumn;if(c.source!=null){p=this._sources.indexOf(c.source);l+=t.encode(p-s);s=p;l+=t.encode(c.originalLine-1-i);i=c.originalLine-1;l+=t.encode(c.originalColumn-n);n=c.originalColumn;if(c.name!=null){f=this._names.indexOf(c.name);l+=t.encode(f-u);u=f}}a+=l}return a};SourceMapGenerator.prototype._generateSourcesContent=function SourceMapGenerator_generateSourcesContent(e,r){return e.map(function(e){if(!this._sourcesContents){return null}if(r!=null){e=o.relative(r,e)}var n=o.toSetString(e);return Object.prototype.hasOwnProperty.call(this._sourcesContents,n)?this._sourcesContents[n]:null},this)};SourceMapGenerator.prototype.toJSON=function SourceMapGenerator_toJSON(){var e={version:this._version,sources:this._sources.toArray(),names:this._names.toArray(),mappings:this._serializeMappings()};if(this._file!=null){e.file=this._file}if(this._sourceRoot!=null){e.sourceRoot=this._sourceRoot}if(this._sourcesContents){e.sourcesContent=this._generateSourcesContent(e.sources,e.sourceRoot)}return e};SourceMapGenerator.prototype.toString=function SourceMapGenerator_toString(){return JSON.stringify(this.toJSON())};r.h=SourceMapGenerator},990:(e,r,n)=>{var t;var o=n(341).h;var i=n(983);var u=/(\r?\n)/;var s=10;var a="$$$isSourceNode$$$";function SourceNode(e,r,n,t,o){this.children=[];this.sourceContents={};this.line=e==null?null:e;this.column=r==null?null:r;this.source=n==null?null:n;this.name=o==null?null:o;this[a]=true;if(t!=null)this.add(t)}SourceNode.fromStringWithSourceMap=function SourceNode_fromStringWithSourceMap(e,r,n){var t=new SourceNode;var o=e.split(u);var s=0;var a=function(){var e=getNextLine();var r=getNextLine()||"";return e+r;function getNextLine(){return s=0;r--){this.prepend(e[r])}}else if(e[a]||typeof e==="string"){this.children.unshift(e)}else{throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got "+e)}return this};SourceNode.prototype.walk=function SourceNode_walk(e){var r;for(var n=0,t=this.children.length;n0){r=[];for(n=0;n{function getArg(e,r,n){if(r in e){return e[r]}else if(arguments.length===3){return n}else{throw new Error('"'+r+'" is a required argument.')}}r.getArg=getArg;var n=/^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/;var t=/^data:.+\,.+$/;function urlParse(e){var r=e.match(n);if(!r){return null}return{scheme:r[1],auth:r[2],host:r[3],port:r[4],path:r[5]}}r.urlParse=urlParse;function urlGenerate(e){var r="";if(e.scheme){r+=e.scheme+":"}r+="//";if(e.auth){r+=e.auth+"@"}if(e.host){r+=e.host}if(e.port){r+=":"+e.port}if(e.path){r+=e.path}return r}r.urlGenerate=urlGenerate;function normalize(e){var n=e;var t=urlParse(e);if(t){if(!t.path){return e}n=t.path}var o=r.isAbsolute(n);var i=n.split(/\/+/);for(var u,s=0,a=i.length-1;a>=0;a--){u=i[a];if(u==="."){i.splice(a,1)}else if(u===".."){s++}else if(s>0){if(u===""){i.splice(a+1,s);s=0}else{i.splice(a,2);s--}}}n=i.join("/");if(n===""){n=o?"/":"."}if(t){t.path=n;return urlGenerate(t)}return n}r.normalize=normalize;function join(e,r){if(e===""){e="."}if(r===""){r="."}var n=urlParse(r);var o=urlParse(e);if(o){e=o.path||"/"}if(n&&!n.scheme){if(o){n.scheme=o.scheme}return urlGenerate(n)}if(n||r.match(t)){return r}if(o&&!o.host&&!o.path){o.host=r;return urlGenerate(o)}var i=r.charAt(0)==="/"?r:normalize(e.replace(/\/+$/,"")+"/"+r);if(o){o.path=i;return urlGenerate(o)}return i}r.join=join;r.isAbsolute=function(e){return e.charAt(0)==="/"||n.test(e)};function relative(e,r){if(e===""){e="."}e=e.replace(/\/$/,"");var n=0;while(r.indexOf(e+"/")!==0){var t=e.lastIndexOf("/");if(t<0){return r}e=e.slice(0,t);if(e.match(/^([^\/]+:\/)?\/*$/)){return r}++n}return Array(n+1).join("../")+r.substr(e.length+1)}r.relative=relative;var o=function(){var e=Object.create(null);return!("__proto__"in e)}();function identity(e){return e}function toSetString(e){if(isProtoString(e)){return"$"+e}return e}r.toSetString=o?identity:toSetString;function fromSetString(e){if(isProtoString(e)){return e.slice(1)}return e}r.fromSetString=o?identity:fromSetString;function isProtoString(e){if(!e){return false}var r=e.length;if(r<9){return false}if(e.charCodeAt(r-1)!==95||e.charCodeAt(r-2)!==95||e.charCodeAt(r-3)!==111||e.charCodeAt(r-4)!==116||e.charCodeAt(r-5)!==111||e.charCodeAt(r-6)!==114||e.charCodeAt(r-7)!==112||e.charCodeAt(r-8)!==95||e.charCodeAt(r-9)!==95){return false}for(var n=r-10;n>=0;n--){if(e.charCodeAt(n)!==36){return false}}return true}function compareByOriginalPositions(e,r,n){var t=strcmp(e.source,r.source);if(t!==0){return t}t=e.originalLine-r.originalLine;if(t!==0){return t}t=e.originalColumn-r.originalColumn;if(t!==0||n){return t}t=e.generatedColumn-r.generatedColumn;if(t!==0){return t}t=e.generatedLine-r.generatedLine;if(t!==0){return t}return strcmp(e.name,r.name)}r.compareByOriginalPositions=compareByOriginalPositions;function compareByGeneratedPositionsDeflated(e,r,n){var t=e.generatedLine-r.generatedLine;if(t!==0){return t}t=e.generatedColumn-r.generatedColumn;if(t!==0||n){return t}t=strcmp(e.source,r.source);if(t!==0){return t}t=e.originalLine-r.originalLine;if(t!==0){return t}t=e.originalColumn-r.originalColumn;if(t!==0){return t}return strcmp(e.name,r.name)}r.compareByGeneratedPositionsDeflated=compareByGeneratedPositionsDeflated;function strcmp(e,r){if(e===r){return 0}if(e===null){return 1}if(r===null){return-1}if(e>r){return 1}return-1}function compareByGeneratedPositionsInflated(e,r){var n=e.generatedLine-r.generatedLine;if(n!==0){return n}n=e.generatedColumn-r.generatedColumn;if(n!==0){return n}n=strcmp(e.source,r.source);if(n!==0){return n}n=e.originalLine-r.originalLine;if(n!==0){return n}n=e.originalColumn-r.originalColumn;if(n!==0){return n}return strcmp(e.name,r.name)}r.compareByGeneratedPositionsInflated=compareByGeneratedPositionsInflated;function parseSourceMapInput(e){return JSON.parse(e.replace(/^\)]}'[^\n]*\n/,""))}r.parseSourceMapInput=parseSourceMapInput;function computeSourceURL(e,r,n){r=r||"";if(e){if(e[e.length-1]!=="/"&&r[0]!=="/"){e+="/"}r=e+r}if(n){var t=urlParse(n);if(!t){throw new Error("sourceMapURL could not be parsed")}if(t.path){var o=t.path.lastIndexOf("/");if(o>=0){t.path=t.path.substring(0,o+1)}}r=join(urlGenerate(t),r)}return normalize(r)}r.computeSourceURL=computeSourceURL},596:(e,r,n)=>{n(341).h;r.SourceMapConsumer=n(327).SourceMapConsumer;n(990)},747:e=>{"use strict";e.exports=require("fs")},282:e=>{"use strict";e.exports=require("module")},622:e=>{"use strict";e.exports=require("path")}};var r={};function __webpack_require__(n){if(r[n]){return r[n].exports}var t=r[n]={exports:{}};var o=true;try{e[n](t,t.exports,__webpack_require__);o=false}finally{if(o)delete r[n]}return t.exports}__webpack_require__.ab=__dirname+"/";return __webpack_require__(645)})(); -------------------------------------------------------------------------------- /client/index.js: -------------------------------------------------------------------------------- 1 | const core = require('@actions/core'); 2 | const wait = require('./wait'); 3 | const crypto = require('crypto') 4 | 5 | 6 | const getItem = (itemName, defaultStr = null) => { 7 | const res = core.getInput(itemName); 8 | if (res.length == 0) { 9 | if (defaultStr == null) { 10 | throw `${itemName} not set`; 11 | } 12 | return defaultStr; 13 | } 14 | return res; 15 | } 16 | 17 | 18 | function make_api_request(action, garo_url, github_token, github_commit, postObj, dryrun=false) { 19 | const https = require('https'); 20 | const api_uri = new URL(garo_url); 21 | 22 | const current_time = Math.floor(new Date().getTime() / 1000).toString(); 23 | postObj.time = current_time; 24 | 25 | postObj.dryrun = dryrun; 26 | 27 | if (action == "start") 28 | { 29 | console.log("Sending start action to API"); 30 | } 31 | else if (action == "state") 32 | { 33 | if ("name" in postObj) { 34 | console.log("Sending state action to API"); 35 | } else { 36 | throw "name missing"; 37 | } 38 | } 39 | else 40 | { 41 | return false; 42 | } 43 | 44 | const data = JSON.stringify(postObj); 45 | const signature = crypto.createHmac('sha512', github_token).update(data).digest('hex'); 46 | const options = { 47 | hostname: api_uri.hostname, 48 | port: 443, 49 | path: `/${action}`, 50 | method: 'POST', 51 | headers: { 52 | 'X-GitHub-Token': github_token, 53 | 'X-GitHub-Signature': signature, 54 | 'X-GitHub-CommitSHA': github_commit, 55 | 'Content-Type': 'application/json', 56 | 'Content-Length': data.length 57 | } 58 | } 59 | 60 | return new Promise((resolve, reject) => { 61 | const req = https.request(options, res => { 62 | console.log(`statusCode: ${res.statusCode}`) 63 | if (res.statusCode != 200) { 64 | resolve({"runnerstate": "Non-200"}); 65 | } 66 | 67 | res.on('data', d => { 68 | const data_resp = d.toString() 69 | if (data_resp != "error") { 70 | resolve(JSON.parse(data_resp)); 71 | } else { 72 | reject("error response"); 73 | } 74 | }) 75 | }) 76 | 77 | req.on('error', error => { 78 | reject(error); 79 | }); 80 | 81 | req.write(data); 82 | req.end(); 83 | }); 84 | } 85 | 86 | 87 | async function run() { 88 | try { 89 | const wait_for_start = getItem('WAIT_FOR_START', "true"); 90 | const action = getItem('ACTION', "start"); 91 | const garo_url = getItem('GARO_URL'); 92 | const github_token = getItem('GITHUB_TOKEN'); 93 | const github_commit = getItem('GITHUB_COMMIT'); 94 | const dryrun = (getItem('DRYRUN', 'false') == 'true'); 95 | 96 | let postObj = { 97 | repo: getItem('REPO'), 98 | account_id: getItem('RUNNER_ACID'), 99 | external_id: getItem('RUNNER_EXID'), 100 | type: getItem('RUNNER_TYPE', "spot"), 101 | region: getItem('RUNNER_REGION', "eu-west-2"), 102 | timeout: getItem('RUNNER_TIMEOUT', "3600"), 103 | } 104 | 105 | const rLabel = getItem('RUNNER_LABEL', ""); 106 | if (rLabel != "") { 107 | postObj["label"] = rLabel; 108 | } 109 | 110 | const rName = getItem('RUNNER_NAME', ""); 111 | if (rName != "") { 112 | postObj["name"] = rName; 113 | } 114 | 115 | const rSub = getItem('RUNNER_SUBNET', ""); 116 | if (rSub != "") { 117 | postObj["subnet"] = rSub; 118 | } 119 | 120 | const rSg = getItem('RUNNER_SG', ""); 121 | if (rSg != "") { 122 | postObj["sg"] = rSg; 123 | } 124 | 125 | if (action == "start") { 126 | const result = await make_api_request( 127 | "start", 128 | garo_url, 129 | github_token, 130 | github_commit, 131 | postObj, 132 | dryrun 133 | ) 134 | 135 | if (result["runnerstate"] == "Non-200") { 136 | throw 'Could not start the runner'; 137 | } 138 | 139 | console.log("wait_for_start:", wait_for_start); 140 | 141 | if (result["runnerstate"] == "started") { 142 | console.log("Runner already started:", result); 143 | 144 | core.setOutput("name", result["name"]); 145 | core.setOutput("runnerstate", result["runnerstate"]); 146 | core.setOutput("uniqueid", result["uniqueid"]); 147 | } 148 | 149 | if (result["runnerstate"] == "starting" && wait_for_start) { 150 | console.log("Runner starting:", result); 151 | postObj["name"] = result["name"]; 152 | 153 | 154 | var state_result = {}; 155 | let i = 0; 156 | while (i < 20) { 157 | i++; 158 | console.log(`Starting wait: ${i}`) 159 | await wait(15000); 160 | 161 | state_result = await make_api_request( 162 | "state", 163 | garo_url, 164 | github_token, 165 | github_commit, 166 | postObj, 167 | dryrun 168 | ); 169 | 170 | console.log(state_result); 171 | 172 | if (state_result["runnerstate"] == "started") { 173 | core.setOutput("name", result["name"]); 174 | core.setOutput("runnerstate", result["runnerstate"]); 175 | core.setOutput("uniqueid", result["uniqueid"]); 176 | break; 177 | } 178 | } 179 | 180 | if (state_result["runnerstate"] != "started") { 181 | throw 'Runner not started in time'; 182 | } 183 | } 184 | } 185 | 186 | } 187 | catch (error) 188 | { 189 | if (typeof(error) == "object" && "message" in error) 190 | { 191 | core.setFailed(error.message); 192 | } 193 | else 194 | { 195 | core.setFailed(error); 196 | } 197 | } 198 | } 199 | 200 | run(); 201 | -------------------------------------------------------------------------------- /client/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "garo-client", 3 | "version": "1.0.0", 4 | "description": "GitHub Actions Runner Orchestration Client", 5 | "main": "index.js", 6 | "scripts": { 7 | "lint": "eslint index.js", 8 | "prepare": "ncc build index.js -o dist --source-map", 9 | "test": "npm run lint", 10 | "all": "npm run test && npm run prepare" 11 | }, 12 | "author": "alphagov", 13 | "license": "MIT", 14 | "dependencies": { 15 | "@actions/core": "^1.2.6", 16 | "@vercel/ncc": "^0.27.0" 17 | }, 18 | "devDependencies": { 19 | "eslint": "^7.21.0" 20 | }, 21 | "repository": { 22 | "type": "git", 23 | "url": "git+https://github.com/alphagov/github-actions-runner-orchestration.git" 24 | }, 25 | "bugs": { 26 | "url": "https://github.com/alphagov/github-actions-runner-orchestration/issues" 27 | }, 28 | "homepage": "https://github.com/alphagov/github-actions-runner-orchestration#readme" 29 | } 30 | -------------------------------------------------------------------------------- /client/wait.js: -------------------------------------------------------------------------------- 1 | let wait = function (milliseconds) { 2 | return new Promise((resolve) => { 3 | if (typeof milliseconds !== 'number') { 4 | throw new Error('milliseconds not a number'); 5 | } 6 | setTimeout(() => resolve("done!"), milliseconds) 7 | }); 8 | }; 9 | 10 | module.exports = wait; 11 | -------------------------------------------------------------------------------- /docs/development.md: -------------------------------------------------------------------------------- 1 | ## Development and Deployment 2 | 3 | ### Client Development 4 | 5 | To test and build the `dist` (see reference in [action.yml]), run: 6 | `npm run all` 7 | 8 | ### API Development 9 | 10 | The API runs on Lambda and uses Python 3.8 11 | 12 | - `black` is used for linting 13 | - `doctest` is used for some testing 14 | - `venv` is used for the virtual environment 15 | 16 | The first time, run `make test-python-full` to install `venv`, dependencies and 17 | test the python code. 18 | 19 | Subsequent runs can use `make test-python` to skip installing dependencies. 20 | 21 | There is a _dev_ workspace and API that can be used, see deployment below. 22 | 23 | ### API Deployment 24 | 25 | The API uses Terraform, the code for that is in [garo_terraform]. 26 | 27 | Terraform uses 0.14.7 (specified in the [.terraform-version] file). 28 | 29 | In [garo_terraform] run `tfenv install` and `tfenv use` to automatically pick up 30 | the version file and install the right version. 31 | 32 | [Terraform workspaces] is used for `dev`, `staging` and `prod` environments. 33 | 34 | To deploy dev, do the follow: 35 | 1. in GARO root, run `make build-full` to build the Lambda ZIP 36 | 1. assume the `co-github-action-runner-admin` role: 37 | - `eval $(gds aws co-github-action-runner-admin -e)` 38 | 1. in [garo_terraform] run: 39 | - `terraform init` 40 | - `terraform workspace select dev` 41 | - `terraform apply` 42 | 43 | Deployment of staging and production is done by this workflow: 44 | [../.github/workflows/deploy.yml](../.github/workflows/deploy.yml) 45 | Which does the following: 46 | 1. Gets an `ondemand` runner using the current production API 47 | 1. Builds and deploys the main branch to staging 48 | 1. Gets a `spot` runner using the new staging API 49 | 1. Tests the staging runner 50 | 1. Deploys to production using the same runner from step 1. 51 | 1. Gets a `spot` runner using the new production API 52 | 1. Tests the new production runner 53 | 54 | 55 | [action.yml]: ../client/action.yml#L67 56 | [Terraform workspaces]: https://www.terraform.io/docs/cloud/workspaces/index.html 57 | [garo_terraform]: ../garo_terraform/ 58 | [.terraform-version]: ../garo_terraform/.terraform-version 59 | -------------------------------------------------------------------------------- /garo_terraform/.terraform-version: -------------------------------------------------------------------------------- 1 | 0.14.7 2 | -------------------------------------------------------------------------------- /garo_terraform/aws.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "eu-west-2" 3 | } 4 | -------------------------------------------------------------------------------- /garo_terraform/lambda.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "iam_for_lambda" { 2 | name = "GARO-Role-${terraform.workspace}" 3 | 4 | tags = merge( 5 | var.common_tags, 6 | map( 7 | "Name", "GARO-Role-${terraform.workspace}" 8 | ) 9 | ) 10 | 11 | inline_policy { 12 | name = "GARO-RolePolicy-${terraform.workspace}" 13 | 14 | policy = jsonencode({ 15 | Version = "2012-10-17" 16 | Statement = [ 17 | { 18 | Action = [ 19 | "sts:AssumeRole" 20 | ] 21 | Effect = "Allow" 22 | Resource = "arn:aws:iam::*:role/GitHubRunnerAssumeRole" 23 | }, 24 | { 25 | Action = [ 26 | "iam:PassRole", 27 | "logs:CreateLogGroup", 28 | "logs:CreateLogStream", 29 | "logs:PutLogEvents", 30 | ] 31 | Effect = "Allow" 32 | Resource = "*" 33 | } 34 | ] 35 | }) 36 | } 37 | 38 | assume_role_policy = < bool: 9 | """ 10 | 11 | Checks the commit is accessible from the token 12 | 13 | TODO 14 | 15 | """ 16 | 17 | if not validRepo(repo): 18 | return False 19 | 20 | repo_regex = r"[A-Za-z0-9_. -]+/[A-Za-z0-9_. -]+" 21 | if not re.search(repo_regex, repo): 22 | return False 23 | 24 | alphanumeric_regex = r"[A-Za-z0-9]+" 25 | if not re.search(alphanumeric_regex, token): 26 | return False 27 | 28 | if not re.search(alphanumeric_regex, commit_sha): 29 | return False 30 | 31 | gh_uri = f"repos/{repo}/commits/{commit_sha}" 32 | url = f"https://api.github.com/{gh_uri}" 33 | 34 | headers = { 35 | "Accept": "application/vnd.github.v3+json", 36 | "Authorization": f"token {token}", 37 | } 38 | 39 | r = requests.get(url, headers=headers) 40 | 41 | if r.status_code == 200: 42 | j = r.json() 43 | if "sha" in j and j["sha"] == commit_sha: 44 | return True 45 | 46 | return False 47 | 48 | 49 | def validRepo(repo: str) -> bool: 50 | """ 51 | 52 | Checks if the repo is allowed to start runners 53 | 54 | >>> validRepo("") 55 | False 56 | 57 | >>> validRepo("OllieJC/test") 58 | False 59 | 60 | >>> validRepo("alphagov/test") 61 | True 62 | 63 | >>> validRepo("cabinetoffice/test") 64 | True 65 | 66 | >>> validRepo("trick_cabinetoffice/test") 67 | False 68 | 69 | >>> validRepo("cabinetofficetrick/test") 70 | False 71 | 72 | TODO 73 | 74 | """ 75 | if repo: 76 | org = repo.split("/")[0] 77 | if org in VALID_ORGS: 78 | return True 79 | return False 80 | -------------------------------------------------------------------------------- /http_helper.py: -------------------------------------------------------------------------------- 1 | import hmac 2 | import hashlib 3 | import base64 4 | import json 5 | import re 6 | import time 7 | 8 | 9 | def extractAndValidateBody( 10 | body: str, 11 | key: str = "", 12 | signature: str = "", 13 | isBase64: bool = False, 14 | with_validate: bool = True, 15 | ) -> dict: 16 | """ 17 | Basic parsing of the body, including optional validation of a HMAC, to a dict 18 | 19 | >>> t = int(time.time()) 20 | >>> valid_body = f'{{ "subnet": "123", "sg": "456", "repo": "789", "time": {t} }}' 21 | >>> valid_b64b = base64.b64encode(valid_body.encode("utf-8")).decode("utf-8") 22 | 23 | >>> test1 = extractAndValidateBody(valid_b64b, isBase64=True, with_validate=False) 24 | >>> test1.pop("time") != "0" 25 | True 26 | >>> test1 27 | {'subnet': '123', 'sg': '456', 'repo': '789'} 28 | 29 | >>> test2 = extractAndValidateBody(valid_body, with_validate=False) 30 | >>> test2.pop("time") != "0" 31 | True 32 | >>> test2 33 | {'subnet': '123', 'sg': '456', 'repo': '789'} 34 | 35 | >>> kinda_valid = f'{{ "subnet": "123", "sg": "456", "repo": "789", "time": {t} }}' 36 | >>> test3 = extractAndValidateBody(kinda_valid, with_validate=False) 37 | >>> test3.pop("time") != "0" 38 | True 39 | >>> test3 40 | {'subnet': '123', 'sg': '456', 'repo': '789'} 41 | 42 | >>> with open('tests/fixtures/example.json') as json_file: 43 | ... example = json.load(json_file) 44 | >>> example["body"] = example["body"].replace("111", str(t)) 45 | >>> test4 = extractAndValidateBody(example["body"], with_validate=False) 46 | >>> test4.pop("time") != "0" 47 | True 48 | >>> test4 49 | {'subnet': '123', 'sg': '456', 'repo': '789'} 50 | 51 | >>> key = "abcdefg" 52 | >>> h = hmac.new(key.encode("utf-8"), valid_body.encode("utf-8"), hashlib.sha512) 53 | >>> test5 = extractAndValidateBody(valid_body, key=key, signature=h.hexdigest()) 54 | >>> test5.pop("time") != "0" 55 | True 56 | >>> test5 57 | {'subnet': '123', 'sg': '456', 'repo': '789'} 58 | 59 | >>> try: 60 | ... extractAndValidateBody(key="12345", body="{}") 61 | ... except Exception as e: 62 | ... print(e) 63 | key or signature missing 64 | 65 | >>> try: 66 | ... extractAndValidateBody('{"subnet": "123", "sg": "456", "repo": "789", "time": 1015213801}', with_validate=False) 67 | ... except Exception as e: 68 | ... print(e) 69 | request expired 70 | 71 | """ 72 | 73 | if with_validate and (not key or not signature): 74 | raise Exception("key or signature missing") 75 | 76 | if isBase64: 77 | dec_body = base64.b64decode(body.encode("utf-8")) 78 | body = dec_body.decode("utf-8") 79 | 80 | body_qs = json.loads(body) 81 | 82 | if not all(x in body_qs for x in ["time"]): 83 | raise Exception("missing required body item") 84 | 85 | requestTime = int(body_qs["time"]) 86 | # less than 30 seconds old 87 | if (int(time.time()) - requestTime) >= 30: 88 | raise Exception(f"request expired") 89 | 90 | if with_validate: 91 | 92 | key_bytes = None 93 | 94 | if not key: 95 | raise Exception("Key not valid") 96 | else: 97 | key_bytes = key.encode("utf-8") 98 | 99 | h = hmac.new(key_bytes, body.encode("utf-8"), hashlib.sha512) 100 | res = h.hexdigest() 101 | 102 | if res == signature: 103 | return body_qs 104 | else: 105 | raise Exception("Bad signature") 106 | 107 | return body_qs 108 | -------------------------------------------------------------------------------- /lambda_handler.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import traceback 3 | import time 4 | import json 5 | import sys 6 | import datetime 7 | 8 | from wrangling_ec2 import * 9 | from wrestling_sts import * 10 | from github import * 11 | from http_helper import * 12 | from utils import envVar 13 | 14 | 15 | github_url = "https://github.com/alphagov/github-actions-runner-orchestration" 16 | 17 | 18 | def lambda_handler(event, context): 19 | """ 20 | 21 | >>> t = lambda_handler({}, {}) 22 | >>> "Error" not in t 23 | True 24 | 25 | """ 26 | 27 | actresp = actual_handler(event, context) 28 | 29 | error = None 30 | if "Error" in actresp: 31 | error = actresp.pop("Error") 32 | 33 | with_redaction = True 34 | debug = envVar("DEBUG") 35 | if debug and debug == "1": 36 | with_redaction = False 37 | 38 | logEvent(event, actresp, error, with_redaction=with_redaction) 39 | return actresp 40 | 41 | 42 | def actual_handler(event, context): 43 | """ 44 | 45 | >>> t = actual_handler({}, {}) 46 | >>> "Error" in t 47 | True 48 | >>> t["Error"].startswith("httpMethod not set") 49 | True 50 | 51 | >>> t = actual_handler({"path": "/", "httpMethod": "GET"}, {}) 52 | >>> "Error" not in t 53 | True 54 | >>> 302 == t["statusCode"] 55 | True 56 | >>> github_url in t["body"] 57 | True 58 | >>> github_url in t["headers"]["Location"] 59 | True 60 | 61 | >>> t = actual_handler({"path": "/status", "httpMethod": "GET"}, {}) 62 | >>> "Error" not in t 63 | True 64 | >>> 200 == t["statusCode"] 65 | True 66 | >>> "ok" in t["body"] 67 | True 68 | 69 | >>> t = actual_handler({"path": "/", "httpMethod": "POST"}, {}) 70 | >>> 405 == t["statusCode"] 71 | True 72 | 73 | >>> import hmac 74 | >>> import hashlib 75 | >>> key = "abcdefg" 76 | >>> with open('tests/fixtures/example.json') as json_file: 77 | ... example = json.load(json_file) 78 | ... example["body"] = example["body"].replace("111", str(int(time.time()))) 79 | ... h = hmac.new(key.encode("utf-8"), example["body"].encode("utf-8"), hashlib.sha512) 80 | ... example["headers"]["x-github-signature"] = h.hexdigest() 81 | >>> "httpMethod" in example 82 | True 83 | >>> t = actual_handler(example, {}) 84 | >>> t["Error"].startswith("Failed the token check for: 789") 85 | True 86 | 87 | """ 88 | 89 | try: 90 | response = {} 91 | 92 | method = None 93 | if "httpMethod" in event: 94 | method = event["httpMethod"] 95 | else: 96 | raise Exception("httpMethod not set") 97 | 98 | path = "" 99 | if "path" in event: 100 | path = event["path"] 101 | if path not in ("/start", "/status", "/stop", "/state"): 102 | # unknown path, reset: 103 | path = "" 104 | 105 | if path == "/status" and method == "GET": 106 | return { 107 | "statusCode": 200, 108 | "isBase64Encoded": False, 109 | "headers": {"Content-Type": "application/json"}, 110 | "body": '{"status": "ok"}', 111 | } 112 | 113 | # if path is not set, handle here: 114 | if not path: 115 | if method == "GET": 116 | return { 117 | "statusCode": 302, 118 | "isBase64Encoded": False, 119 | "headers": {"Location": github_url, "Content-Type": "text/html"}, 120 | "body": f'{github_url}', 121 | } 122 | else: 123 | return { 124 | "statusCode": 405, 125 | "isBase64Encoded": False, 126 | "headers": {"Content-Type": "text/html"}, 127 | "body": "Method Not Allowed", 128 | } 129 | 130 | if "x-github-token" not in event["headers"]: 131 | raise Exception("Missing X-GitHub-Token") 132 | 133 | if "x-github-commitsha" not in event["headers"]: 134 | raise Exception("Missing X-GitHub-CommitSHA") 135 | 136 | if "x-github-signature" not in event["headers"]: 137 | raise Exception("Missing X-GitHub-Signature") 138 | 139 | body_qs = extractAndValidateBody( 140 | event["body"], 141 | key=event["headers"]["x-github-token"], 142 | signature=event["headers"]["x-github-signature"], 143 | isBase64=event["isBase64Encoded"], 144 | ) 145 | 146 | token_check = checkGitHubToken( 147 | body_qs["repo"], 148 | event["headers"]["x-github-token"], 149 | event["headers"]["x-github-commitsha"], 150 | ) 151 | 152 | if not token_check: 153 | raise Exception(f"Failed the token check for: {body_qs['repo']}") 154 | 155 | # do authenticated AWS actions from here on 156 | 157 | if "dryrun" in body_qs and body_qs["dryrun"]: 158 | return { 159 | "statusCode": 200, 160 | "isBase64Encoded": False, 161 | "headers": {"Content-Type": "application/json"}, 162 | "body": json.dumps( 163 | {"runnerstate": "started", "name": "dryrun"}, default=str 164 | ), 165 | } 166 | 167 | credentials = assumeRole(body_qs) 168 | if "AccessKeyId" not in credentials: 169 | raise Exception("bad credentials") 170 | 171 | if path == "/start" and method == "POST": 172 | ec2 = startRunnerFromBody(body_qs, credentials) 173 | 174 | if not ec2: 175 | raise Exception(f"Failed to start an instance for: {body_qs['repo']}") 176 | 177 | return { 178 | "statusCode": 200, 179 | "isBase64Encoded": False, 180 | "headers": {"Content-Type": "application/json"}, 181 | "body": json.dumps(ec2, default=str), 182 | } 183 | 184 | if path == "/state" and method == "POST": 185 | running_ec2 = currentRunnerExistsByBody(body_qs, credentials) 186 | 187 | if not running_ec2: 188 | raise Exception(f"Failed to details for: {body_qs['name']}") 189 | 190 | return { 191 | "statusCode": 200, 192 | "isBase64Encoded": False, 193 | "headers": {"Content-Type": "application/json"}, 194 | "body": json.dumps(running_ec2, default=str), 195 | } 196 | 197 | except Exception as e: 198 | return { 199 | "statusCode": 500, 200 | "body": "error", 201 | "Error": f"{e}\n\n{traceback.format_exc()}", 202 | } 203 | 204 | 205 | def logEvent(request: dict, response: dict, error: str, with_redaction=True) -> str: 206 | """ 207 | This logs request from ALB and response back to a JSONL for CloudWatch ingestion 208 | 209 | >>> res = logEvent({}, {}, "") 210 | >>> exp = { \ 211 | "req": {}, \ 212 | "res": {} \ 213 | } 214 | >>> jr = json.loads(res) 215 | >>> jr.pop("time") > 0 216 | True 217 | >>> jr == exp 218 | True 219 | 220 | >>> res = logEvent( \ 221 | {"httpMethod": "POST", "headers": {"x-github-token": "123"}}, \ 222 | {"statusCode": 200, "body": ""}, \ 223 | "abc", with_redaction=True \ 224 | ) 225 | >>> jr = json.loads(res) 226 | >>> t = jr.pop("time") 227 | >>> type(t) 228 | 229 | >>> t > 0 230 | True 231 | >>> print(jr) 232 | {'req': {'httpMethod': 'POST', 'headers': {'x-github-token': 'REDACTED'}}, 'res': {'statusCode': 200, 'body': ''}, 'error': 'abc'} 233 | 234 | >>> res = logEvent( \ 235 | {"httpMethod": "POST", "body": "123"}, \ 236 | {"statusCode": 500, "body": "bad"}, \ 237 | "abc" \ 238 | ) 239 | >>> jr = json.loads(res) 240 | >>> t = jr.pop("time") 241 | >>> type(t) 242 | 243 | >>> t > 0 244 | True 245 | >>> print(jr) 246 | {'req': {'httpMethod': 'POST', 'body': '123'}, 'res': {'statusCode': 500, 'body': 'bad'}, 'error': 'abc'} 247 | 248 | """ 249 | 250 | lg = { 251 | "time": int(time.time()), 252 | "req": request, 253 | "res": response, 254 | } 255 | if error: 256 | lg["error"] = error 257 | 258 | res = json.dumps(lg, default=str) 259 | 260 | if with_redaction: 261 | redacted = "REDACTED" 262 | 263 | if "headers" in request: 264 | if "x-github-token" in request["headers"]: 265 | res = res.replace(request["headers"]["x-github-token"], redacted) 266 | 267 | # if "body" in request and request["body"]: 268 | # res = res.replace(request["body"], redacted) 269 | 270 | # if "body" in response and response["body"]: 271 | # res = res.replace(response["body"], redacted) 272 | 273 | print(res, file=sys.stderr) 274 | return res 275 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/github-actions-runner-orchestration/1fc88fa5606f7142794e228fd1d1115d88415aeb/requirements-dev.txt -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | boto3 3 | -------------------------------------------------------------------------------- /scripts/amazon_linux_ec2_ami_build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "Starting user data" 3 | 4 | # WARNING: everything in here will become available in the public AMI! 5 | 6 | yum upgrade -y 7 | rpm -Uvh https://packages.microsoft.com/config/centos/7/packages-microsoft-prod.rpm 8 | yum install -y deltarpm 9 | yum update -y 10 | yum install -y jq git amazon-linux-extras tar gzip util-linux dotnet-sdk-5.0 \ 11 | unzip sudo yum-utils xz zip openssl-devel libyaml-devel libffi-devel \ 12 | readline-devel gdbm-devel ncurses-devel ruby-devel which procps nano \ 13 | systemd 14 | yum groupinstall -y "Development Tools" 15 | 16 | echo "--------------" 17 | 18 | EC2_TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" \ 19 | -H "X-aws-ec2-metadata-token-ttl-seconds: 60") 20 | 21 | curl -H "X-aws-ec2-metadata-token: $EC2_TOKEN" \ 22 | "http://169.254.169.254/latest/meta-data/instance-id" > instance_id.txt 23 | 24 | curl -H "X-aws-ec2-metadata-token: $EC2_TOKEN" \ 25 | "http://169.254.169.254/latest/meta-data/placement/availability-zone" > az.txt 26 | 27 | INSTANCE_ID=$(tr -cd '[:print:]' < instance_id.txt) 28 | export INSTANCE_ID=$INSTANCE_ID 29 | 30 | REGION=$(tr -cd '[:print:]' < az.txt | grep -oP "^(.+?\d(?=[a-z]))") 31 | export REGION=$REGION 32 | 33 | echo "Instance ID: $INSTANCE_ID, Region: $REGION" 34 | 35 | echo "--------------" 36 | 37 | curl -s "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "acv2.zip" 38 | unzip -o acv2.zip 39 | rm acv2.zip 40 | sudo ./aws/install 41 | 42 | echo "--------------" 43 | 44 | aws ec2 create-tags --region "$REGION" \ 45 | --resources "$INSTANCE_ID" --tags "Key=AMIBuildStatus,Value=starting" 46 | 47 | 48 | GRD="/opt/github/runner" 49 | RAWGITHUB="https://raw.githubusercontent.com" 50 | GARO="alphagov/github-actions-runner-orchestration" 51 | 52 | mkdir -p "$GRD" 53 | cd "$GRD" || exit 1 54 | 55 | echo "Adding github user" 56 | useradd github 57 | echo 'github ALL=(ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers # pragma: allowlist secret 58 | 59 | echo "Adding github user to docker group" 60 | echo " * Docker is present in ECS AMI" 61 | usermod -aG docker github 62 | 63 | echo "Install rvm" 64 | # Import key 65 | runuser -l github -c "curl -sSL https://rvm.io/mpapis.asc | gpg2 --import - \ 66 | && curl -sSL https://rvm.io/pkuczynski.asc | gpg2 --import -" 67 | # Install RVM 68 | runuser -l github -c 'curl -sSL https://get.rvm.io | bash -s stable --ruby --with-default-gems="rails"' 69 | 70 | echo "Installing nvm" 71 | NVMV="0.37.2" 72 | runuser -l github -c "curl -so- 'https://raw.githubusercontent.com/nvm-sh/nvm/v$NVMV/install.sh' | bash" 73 | 74 | echo "Installing rust" 75 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs > /opt/rust.sh 76 | runuser -l github -c "sh /opt/rust.sh -y" 77 | rm /opt/rust.sh 78 | 79 | echo 'Install golang' 80 | curl -sLO "https://golang.org/dl/go1.16.1.linux-amd64.tar.gz" 81 | tar -C /usr/local -xzf ./go*.tar.gz 82 | rm ./*.tar.gz 83 | 84 | echo "Install GARO scripts" 85 | GHLC="${RAWGITHUB}/${GARO}/main/scripts/install_headless_chrome.sh" 86 | curl -sLO "$GHLC" 87 | GURL="${RAWGITHUB}/${GARO}/main/scripts/instance_watcher.sh" 88 | curl -sLO "$GURL" 89 | chmod +x ./*.sh 90 | 91 | echo "Installing docker-compose" 92 | DCV="1.22.0" 93 | curl -L "https://github.com/docker/compose/releases/download/$DCV/docker-compose-$(uname -s)-$(uname -m)" \ 94 | -o "/usr/local/bin/docker-compose" 95 | sudo chmod +x "/usr/local/bin/docker-compose" 96 | 97 | echo "Installing ShellCheck" 98 | scversion="stable" 99 | curl -sLO "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" 100 | tar -xvf ./*.tar.xz 101 | cp "shellcheck-${scversion}/shellcheck" /usr/local/bin 102 | rm -rf ./shellcheck* 103 | 104 | echo "Installing python" 105 | amazon-linux-extras enable python3.8 106 | yum -y install python3.8 107 | 108 | echo "Installing poetry" 109 | POETRY_SHA="cc195f1dd086d1c4d12a3acc8d6766981ba431ac" # pragma: allowlist secret 110 | runuser -l github -c "curl -sSL 'https://raw.githubusercontent.com/python-poetry/poetry/$POETRY_SHA/get-poetry.py' | python -" 111 | 112 | echo "Installing tfenv tools" 113 | git clone https://github.com/tfutils/tfenv.git "/opt/tfenv" 114 | rm /usr/local/bin/tfenv || echo "No tfenv installed" 115 | rm /usr/local/bin/terraform || echo "No terraform installed" 116 | ln -s /opt/tfenv/bin/tfenv /usr/local/bin > /dev/null 117 | ln -s /opt/tfenv/bin/terraform /usr/local/bin > /dev/null 118 | chown github:github -R /opt/tfenv 119 | 120 | echo "Downloading latest runner" 121 | CURRENT_SHA="d4cdb633db046f3ec5da048feba940c3a8d606a6" # pragma: allowlist secret 122 | CURRENT_URL="https://raw.githubusercontent.com/actions/runner/$CURRENT_SHA/scripts/" 123 | 124 | curl -sLO "$CURRENT_URL/create-latest-svc.sh" 125 | curl -sLO "$CURRENT_URL/delete.sh" 126 | curl -sLO "$CURRENT_URL/remove-svc.sh" 127 | chmod +x ./*.sh 128 | chown github:github -R "$GRD" 129 | 130 | echo "Adding environment variables" 131 | # shellcheck disable=SC2016 132 | ( 133 | echo 'PATH="/home/github/.cargo/bin:/usr/local/go/bin:/usr/local/bin:/opt/github/runner:$PATH"' 134 | echo 'GOPATH=$HOME/go && export GOPATH' 135 | echo 'GO111MODULE="auto" && export GO111MODULE' 136 | echo 'export PATH' 137 | echo "redact () { 138 | if (( \$# > 0)); then 139 | MATCH=\$(echo \"\$@\" | sed 's/ /\|/') && \ 140 | sed -E \"s/\$MATCH/REDACTED/g\"; 141 | else read tmpRdt && echo \$tmpRdt; 142 | fi 143 | }" 144 | echo "awsredact () { redact '[[:xdigit:]]{9,999}'; }" 145 | echo "hexredact () { redact '[[:xdigit:]]{4,999}'; }" 146 | ) | tee -a /home/github/.bash_profile >> /home/github/.bashrc 147 | 148 | chown github:github -R /home/github 149 | 150 | echo "Cleaning up" 151 | sudo yum clean all 152 | sudo rm -rf /var/cache/yum 153 | 154 | echo "ready" > /home/github/ami_state.txt 155 | 156 | aws ec2 create-tags --region "$REGION" \ 157 | --resources "$INSTANCE_ID" --tags "Key=AMIBuildStatus,Value=done" 158 | 159 | echo "-------- Finished common ---------" 160 | -------------------------------------------------------------------------------- /scripts/amazon_linux_ec2_template.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "Starting user data" 3 | 4 | yum update -y 5 | yum install -y jq 6 | 7 | GRD="/opt/github/runner" 8 | cd "$GRD" || exit 1 9 | 10 | if [ "$(cat /home/github/ami_state.txt)" != "ready" ]; then 11 | sudo shutdown -h now || exit 1 12 | fi 13 | 14 | echo "--------------" 15 | 16 | EC2_TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" \ 17 | -H "X-aws-ec2-metadata-token-ttl-seconds: 60") 18 | 19 | curl -H "X-aws-ec2-metadata-token: $EC2_TOKEN" \ 20 | "http://169.254.169.254/latest/meta-data/instance-id" > instance_id.txt 21 | 22 | curl -H "X-aws-ec2-metadata-token: $EC2_TOKEN" \ 23 | "http://169.254.169.254/latest/meta-data/placement/availability-zone" > az.txt 24 | 25 | INSTANCE_ID=$(tr -cd '[:print:]' < instance_id.txt) 26 | export INSTANCE_ID=$INSTANCE_ID 27 | 28 | grep -oP "^(.+?\d(?=[a-z]))" az.txt > region.txt 29 | RUNNER_REGION=$(tr -cd '[:print:]' < region.txt) 30 | export RUNNER_REGION=$RUNNER_REGION 31 | 32 | echo "Instance ID: $INSTANCE_ID, Region: $RUNNER_REGION" 33 | 34 | if [ -z "$INSTANCE_ID" ] 35 | then 36 | sudo shutdown -h now 37 | fi 38 | 39 | echo "--------------" 40 | 41 | echo -n 'github-runner-{type}-{uniqueid}' > name.txt 42 | echo -n '{repo}' > repo.txt 43 | 44 | RUNNER_NAME=$(tr -cd '[:print:]' < name.txt) 45 | RUNNER_REPO=$(tr -cd '[:print:]' < repo.txt) 46 | 47 | export RUNNER_REPO=$RUNNER_REPO 48 | export RUNNER_NAME=$RUNNER_NAME 49 | 50 | aws ec2 create-tags --region "$RUNNER_REGION" \ 51 | --resources "$INSTANCE_ID" --tags "Key=RunnerState,Value=installing" 52 | 53 | echo "------- Start the watcher -------" 54 | 55 | ./instance_watcher.sh & 56 | 57 | echo "Getting PAT from SSM '/github/runner/pat'" 58 | RAWPAT=$(aws ssm get-parameter --name "/github/runner/pat" \ 59 | --region "$RUNNER_REGION" \ 60 | --with-decryption | jq -r ".[].Value" | tr -cd '[:print:]') 61 | export RUNNER_CFG_PAT=$RAWPAT 62 | 63 | if [ -z "$RUNNER_CFG_PAT" ] 64 | then 65 | aws ec2 create-tags --region "$RUNNER_REGION" \ 66 | --resources "$INSTANCE_ID" --tags "Key=RunnerState,Value=bad-ssm-access" 67 | sudo shutdown -h now 68 | fi 69 | 70 | echo "-----------------" 71 | 72 | timeout 60 ./create-latest-svc.sh "$RUNNER_REPO" '' "$RUNNER_NAME" \ 73 | 'github' '{type},{uniqueid}{additional}' 74 | 75 | aws ec2 create-tags --region "$RUNNER_REGION" \ 76 | --resources "$INSTANCE_ID" --tags "Key=RunnerState,Value=started" 77 | -------------------------------------------------------------------------------- /scripts/create_ami.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Starting: $(date +%s)" 4 | 5 | if [ -z "$SUBNETID" ]; then 6 | echo "SUBNETID not set" 7 | exit 1 8 | fi 9 | 10 | if [ -z "$SECURITYG" ]; then 11 | echo "SECURITYG not set" 12 | exit 1 13 | fi 14 | 15 | REGION="eu-west-2" 16 | 17 | echo "Getting lastest Amazon Linux 2 ECS AMI" 18 | python3.8 -m pip install --user boto3 19 | IMAGEID=$(python3.8 -c "from wrangling_ec2 import getLatestAmzn2Image; \ 20 | import json; \ 21 | ecs_ami = getLatestAmzn2Image('$REGION', {}); \ 22 | print(json.dumps(ecs_ami, default=str))" | jq -r '.ImageId') 23 | 24 | if [ -z "$IMAGEID" ] || [ "$IMAGEID" == "null" ]; then 25 | echo "Failed to get image ID" 26 | exit 1 27 | fi 28 | 29 | echo "Creating instance" 30 | CREATE_EC2=$(aws ec2 run-instances \ 31 | --region "$REGION" \ 32 | --image-id "$IMAGEID" \ 33 | --instance-type "t3a.xlarge" \ 34 | --count 1 \ 35 | --no-associate-public-ip-address \ 36 | --subnet-id "$SUBNETID" \ 37 | --security-group-ids "$SECURITYG" \ 38 | --monitoring Enabled=true \ 39 | --iam-instance-profile Name="GitHubRunnerInstanceRole" \ 40 | --user-data "file://scripts/amazon_linux_ec2_ami_build.sh") 41 | 42 | NEW_INSTANCE_ID=$(echo "$CREATE_EC2" | jq -r '.Instances[0].InstanceId') 43 | 44 | echo "Created instance" 45 | sleep 10 46 | 47 | READY="false" 48 | 49 | echo "Describing instance in while loop to check if ready" 50 | i="0" 51 | # 20 minutes 52 | while [ $i -lt 40 ]; do 53 | AMIBUILDSTATUS=$(aws ec2 describe-tags \ 54 | --filters "Name=resource-id,Values=$NEW_INSTANCE_ID" \ 55 | --region "$REGION" \ 56 | | jq -r '.Tags | .[] | select(.Key == "AMIBuildStatus").Value') 57 | 58 | if [ "$AMIBUILDSTATUS" == "done" ]; then 59 | READY="true" 60 | break 61 | else 62 | echo "ec2:run-instances - not ready: ${AMIBUILDSTATUS}" 63 | sleep 30 64 | fi 65 | 66 | i=$(( i + 1 )) 67 | done 68 | 69 | function terminateEC2 { 70 | aws ec2 terminate-instances \ 71 | --region "$1" \ 72 | --instance-ids "$2" 73 | } 74 | 75 | sleep 30 76 | 77 | if [ "$READY" != "true" ]; then 78 | echo "EC2 wasn't ready in time" 79 | exit 1 80 | fi 81 | 82 | echo "EC2 instance ready, taking an image..." 83 | 84 | AMI_ID=$(aws ec2 create-image \ 85 | --region "$REGION" \ 86 | --instance-id "$NEW_INSTANCE_ID" \ 87 | --name "custom-ami-$(date +%s)" | jq -r '.ImageId') 88 | 89 | if [ -z "$AMI_ID" ] || [ "$AMI_ID" == "null" ]; then 90 | echo "Failed to start creating the image" 91 | terminateEC2 "$REGION" "$NEW_INSTANCE_ID" 92 | exit 1 93 | fi 94 | 95 | echo "Created instance: $(date +%s)" 96 | 97 | AMI_READY="false" 98 | j="0" 99 | # 10 minutes 100 | while [ $j -lt 20 ]; do 101 | IMAGE_STATUS_JSON=$(aws ec2 describe-images \ 102 | --region "$REGION" \ 103 | --image-ids "$AMI_ID") 104 | 105 | AMI_STATUS=$(echo "$IMAGE_STATUS_JSON" | jq -r '.Images[0].State') 106 | 107 | if [ "$AMI_STATUS" == "available" ]; then 108 | AMI_READY="true" 109 | break 110 | else 111 | echo "ec2:create-image - not ready: ${AMI_STATUS}" 112 | sleep 30 113 | fi 114 | 115 | j=$(( j + 1 )) 116 | done 117 | 118 | if [ "$AMI_READY" != "true" ]; then 119 | echo "AMI wasn't ready in time" 120 | terminateEC2 "$REGION" "$NEW_INSTANCE_ID" 121 | exit 1 122 | fi 123 | 124 | sleep 5 125 | 126 | echo "Making AMI public" 127 | aws ec2 modify-image-attribute \ 128 | --image-id "$AMI_ID" \ 129 | --region "$REGION" \ 130 | --launch-permission "Add=[{Group=all}]" 131 | 132 | sleep 1 133 | 134 | echo "Terminating original EC2" 135 | terminateEC2 "$REGION" "$NEW_INSTANCE_ID" 136 | 137 | echo "-- FINISHED! --" 138 | -------------------------------------------------------------------------------- /scripts/install_headless_chrome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | CHROME_DRIVER_VERSION=86.0.4240.22 3 | HEADLESS_CHROME_VERSION=v1.0.0-57 4 | 5 | mkdir -p /opt 6 | cd /opt || exit 1 7 | curl -SL "https://chromedriver.storage.googleapis.com/${CHROME_DRIVER_VERSION}/chromedriver_linux64.zip" > chromedriver.zip 8 | unzip chromedriver.zip 9 | rm chromedriver.zip 10 | 11 | # download chrome binary 12 | curl -SL "https://github.com/adieuadieu/serverless-chrome/releases/download/${HEADLESS_CHROME_VERSION}/stable-headless-chromium-amazonlinux-2.zip" > headless-chromium.zip 13 | unzip headless-chromium.zip 14 | rm headless-chromium.zip 15 | ln -fs /opt/headless-chromium /opt/chrome 16 | -------------------------------------------------------------------------------- /scripts/instance_watcher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | while true; do 4 | sleep 30 5 | 6 | cd "/opt/github/runner" || exit 1 7 | 8 | INSTANCE_ID=$(tr -cd '[:print:]' < instance_id.txt) 9 | RUNNER_REGION=$(tr -cd '[:print:]' < region.txt) 10 | RUNNER_REPO=$(tr -cd '[:print:]' < repo.txt) 11 | RUNNER_NAME=$(tr -cd '[:print:]' < name.txt) 12 | 13 | export INSTANCE_ID=$INSTANCE_ID 14 | export RUNNER_REGION=$RUNNER_REGION 15 | export RUNNER_REPO=$RUNNER_REPO 16 | export RUNNER_NAME=$RUNNER_NAME 17 | 18 | EXPIRY=$(aws ec2 describe-tags \ 19 | --filters "Name=resource-id,Values=$INSTANCE_ID" \ 20 | --region "$RUNNER_REGION" \ 21 | | jq -r '.Tags | .[] | select(.Key == "GitHubRunnerTimeout").Value') 22 | 23 | if (( EXPIRY < $(date +%s) )); then 24 | echo "------------------" 25 | echo "Shutting down, expiry was: $EXPIRY" 26 | echo "------------------" 27 | 28 | # schedule a shutdown before doing anything else: 29 | sleep 60 && sudo shutdown -h now & 30 | 31 | aws ec2 create-tags --region "$RUNNER_REGION" \ 32 | --resources "$INSTANCE_ID" --tags "Key=RunnerState,Value=removing" 33 | 34 | RAWPAT=$(aws ssm get-parameter --name "/github/runner/pat" \ 35 | --region "$RUNNER_REGION" --with-decryption \ 36 | | jq -r ".[].Value" | tr -cd '[:print:]') 37 | 38 | RUNNER_CFG_PAT=$RAWPAT 39 | export RUNNER_CFG_PAT=$RAWPAT 40 | 41 | # the following is adapted from: 42 | # https://github.com/actions/runner/blob/main/scripts/remove-svc.sh 43 | 44 | TOKEN_ENDPOINT="https://api.github.com/repos/${RUNNER_REPO}/actions/runners/remove-token" 45 | 46 | REMOVE_TOKEN=$(curl -s -X POST "$TOKEN_ENDPOINT" \ 47 | -H "accept: application/vnd.github.everest-preview+json" \ 48 | -H "authorization: token ${RUNNER_CFG_PAT}" | jq -r '.token') 49 | export REMOVE_TOKEN=$REMOVE_TOKEN 50 | 51 | if [ -z "$REMOVE_TOKEN" ]; then echo "Failed to get a token" && exit 1; fi 52 | 53 | echo 54 | echo "Removing the runner..." 55 | 56 | GITHUB_RUNNER="/opt/github/runner/runner" 57 | SERVICE_FILE="${GITHUB_RUNNER}/.service" 58 | CONFIG_SH="${GITHUB_RUNNER}/config.sh" 59 | RUNNER_SERVICE=$(tr -cd '[:print:]' < "$SERVICE_FILE") 60 | 61 | if [ -z "$RUNNER_SERVICE" ]; then echo "No service file" && exit 1; fi 62 | 63 | UNITD="/etc/systemd/system/${RUNNER_SERVICE}" 64 | 65 | sudo systemctl stop "$RUNNER_SERVICE" 66 | sudo systemctl disable "$RUNNER_SERVICE" 67 | sudo rm "$UNITD" || echo "Failed to delete: $UNITD" 68 | sudo rm "$SERVICE_FILE" 69 | sudo systemctl daemon-reload 70 | 71 | sudo runuser -l github -c "RUNNER_CFG_PAT=$RAWPAT $CONFIG_SH remove --token $REMOVE_TOKEN" 72 | fi 73 | done 74 | -------------------------------------------------------------------------------- /terraform_module/aws.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | -------------------------------------------------------------------------------- /terraform_module/ec2_role.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_instance_profile" "profile_for_instances" { 2 | name = "GitHubRunnerInstanceRole" 3 | role = aws_iam_role.role_for_instances.name 4 | } 5 | 6 | resource "aws_iam_role_policy_attachment" "role-policy-attachment" { 7 | role = aws_iam_role.role_for_instances.name 8 | count = length(var.role_for_instances_policy_arns) 9 | policy_arn = var.role_for_instances_policy_arns[count.index] 10 | } 11 | 12 | resource "aws_iam_role" "role_for_instances" { 13 | name = "GitHubRunnerInstanceRole" 14 | 15 | tags = merge( 16 | var.common_tags, 17 | map( 18 | "Name", "GitHubRunnerInstanceRole" 19 | ) 20 | ) 21 | 22 | inline_policy { 23 | name = "GitHubRunnerInstanceRolePolicy" 24 | 25 | policy = jsonencode({ 26 | Version = "2012-10-17" 27 | Statement = [ 28 | { 29 | "Effect": "Allow", 30 | "Action": [ 31 | "ssm:DescribeParameters" 32 | ], 33 | "Resource": "*" 34 | }, 35 | { 36 | "Effect": "Allow", 37 | "Action": [ 38 | "ssm:GetParameter", 39 | "ssm:GetParameters", 40 | "ssm:GetParametersByPath", 41 | ], 42 | "Resource": "arn:aws:ssm:${var.region}:*:parameter/github/runner/pat" 43 | }, 44 | { 45 | Action = [ 46 | "ec2:CreateTags", 47 | "ec2:DescribeTags", 48 | ] 49 | Effect = "Allow" 50 | Resource = "*" 51 | }, 52 | { 53 | Action = [ 54 | "logs:CreateLogGroup", 55 | "logs:CreateLogStream", 56 | "logs:PutLogEvents" 57 | ] 58 | Effect = "Allow" 59 | Resource = "*" 60 | } 61 | ] 62 | }) 63 | } 64 | 65 | assume_role_policy = jsonencode( 66 | { 67 | "Version": "2012-10-17", 68 | "Statement": [ 69 | { 70 | "Effect": "Allow", 71 | "Principal": { "Service": "ec2.amazonaws.com"}, 72 | "Action": "sts:AssumeRole" 73 | } 74 | ] 75 | } 76 | ) 77 | } 78 | -------------------------------------------------------------------------------- /terraform_module/iam.tf: -------------------------------------------------------------------------------- 1 | resource "random_integer" "garo_external_id" { 2 | min = 10000000000000 3 | max = 100000000000000 4 | } 5 | 6 | resource "aws_iam_role" "iam_for_ec2" { 7 | name = "GitHubRunnerAssumeRole" 8 | 9 | tags = merge( 10 | var.common_tags, 11 | map( 12 | "Name", "GitHubRunnerAssumeRole" 13 | ) 14 | ) 15 | 16 | inline_policy { 17 | name = "GitHubRunnerAssumeRolePolicy" 18 | 19 | policy = jsonencode({ 20 | Version = "2012-10-17" 21 | Statement = [ 22 | { 23 | Action = [ 24 | "ec2:CreateTags", 25 | "ec2:DescribeTags", 26 | "ec2:DescribeImages", 27 | "ec2:DescribeInstances", 28 | "ec2:RequestSpotInstances", 29 | "ec2:DescribeSpotInstanceRequests", 30 | "ec2:RunInstances", 31 | "iam:PassRole" 32 | ] 33 | Effect = "Allow" 34 | Resource = "*" 35 | }, 36 | { 37 | Action = [ 38 | "logs:CreateLogGroup", 39 | "logs:CreateLogStream", 40 | "logs:PutLogEvents" 41 | ] 42 | Effect = "Allow" 43 | Resource = "*" 44 | } 45 | ] 46 | }) 47 | } 48 | 49 | assume_role_policy = jsonencode( 50 | { 51 | "Version": "2012-10-17", 52 | "Statement": [ 53 | { 54 | "Action": "sts:AssumeRole", 55 | "Principal": { 56 | "AWS": var.garo_lambda_arn 57 | }, 58 | "Condition": { 59 | "StringEquals": { 60 | "sts:ExternalId": random_integer.garo_external_id.result 61 | } 62 | }, 63 | "Effect": "Allow", 64 | "Sid": "" 65 | } 66 | ] 67 | } 68 | ) 69 | } 70 | -------------------------------------------------------------------------------- /terraform_module/outputs.tf: -------------------------------------------------------------------------------- 1 | output "garo_external_id" { 2 | value = random_integer.garo_external_id.result 3 | description = "The ExternalId to set in the GitHub repo secret 'RUNNER_EXID'" 4 | } 5 | -------------------------------------------------------------------------------- /terraform_module/variables.tf: -------------------------------------------------------------------------------- 1 | variable "garo_lambda_arn" { 2 | description = "ARN for the central Lambda" 3 | type = list(string) 4 | default = [ 5 | "arn:aws:iam::982247885130:role/GARO-Role-prod" 6 | ] 7 | } 8 | 9 | variable "role_for_instances_policy_arns" { 10 | description = "Policy ARNs to attach to the instance role" 11 | type = list(string) 12 | default = [ ] 13 | } 14 | 15 | variable "region" { 16 | description = "AWS region" 17 | type = string 18 | default = "eu-west-2" 19 | } 20 | 21 | variable "common_tags" { 22 | default = {} 23 | description = "Command resource tags" 24 | type = map(string) 25 | } 26 | -------------------------------------------------------------------------------- /tests/fixtures/example.json: -------------------------------------------------------------------------------- 1 | { 2 | "requestContext": { 3 | "elb": { 4 | "targetGroupArn": "xxx" 5 | } 6 | }, 7 | "httpMethod": "POST", 8 | "path": "/start", 9 | "queryStringParameters": {}, 10 | "headers": { 11 | "accept": "text/html", 12 | "accept-encoding": "gzip, deflate, br", 13 | "accept-language": "en-GB,en;q=0.9", 14 | "cache-control": "no-cache", 15 | "dnt": "1", 16 | "host": "", 17 | "pragma": "no-cache", 18 | "referer": "", 19 | "x-github-token": "abcdefg", 20 | "x-github-commitsha": "657678", 21 | "sec-fetch-dest": "document", 22 | "sec-fetch-mode": "navigate", 23 | "sec-fetch-site": "same-origin", 24 | "sec-fetch-user": "?1", 25 | "upgrade-insecure-requests": "1", 26 | "user-agent": "Mozilla/5.0", 27 | "x-amzn-trace-id": "Root=1-abc", 28 | "x-forwarded-for": "192.168.0.1", 29 | "x-forwarded-port": "443", 30 | "x-forwarded-proto": "https" 31 | }, 32 | "body": "{\"subnet\": \"123\", \"sg\": \"456\", \"repo\": \"789\", \"time\": 111}", 33 | "isBase64Encoded": false 34 | } 35 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import os 3 | import random 4 | import string 5 | import re 6 | 7 | from base64 import b64decode 8 | 9 | 10 | def envVar(env: str) -> str: 11 | """ 12 | 13 | Gets a environment variable (or encrypted blob) 14 | 15 | >>> len(envVar("PATH")) > 1 16 | True 17 | 18 | >>> None is envVar("NONEXISTENTVARIABLEPROBABLY") 19 | True 20 | 21 | """ 22 | if env and env in os.environ: 23 | return os.environ[env] 24 | return None 25 | 26 | 27 | def decryptEnvVar(env: str) -> str: 28 | return ( 29 | boto3.client("kms") 30 | .decrypt( 31 | CiphertextBlob=b64decode(envVar(env)), 32 | EncryptionContext={ 33 | "LambdaFunctionName": envVar("AWS_LAMBDA_FUNCTION_NAME") 34 | }, 35 | )["Plaintext"] 36 | .decode("utf-8") 37 | ) 38 | 39 | 40 | def random_string(length: int = 32) -> str: 41 | """ 42 | 43 | Gets a random string 44 | 45 | >>> t1 = random_string() 46 | >>> t1.isalnum() 47 | True 48 | >>> len(t1) == 32 49 | True 50 | 51 | >>> t1 = random_string(10) 52 | >>> t1.isalnum() 53 | True 54 | >>> len(t1) == 10 55 | True 56 | 57 | """ 58 | ASCII = string.ascii_letters 59 | return "".join(random.choice(ASCII) for i in range(length)) 60 | -------------------------------------------------------------------------------- /wrangling_ec2.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import datetime 3 | import time 4 | import base64 5 | import json 6 | import re 7 | import botocore 8 | 9 | from utils import random_string 10 | 11 | EC2_DEFAULT_TYPE: str = "spot" 12 | EC2_DEFAULT_INSTANCEROLEARN: str = "GitHubRunnerInstanceRole" 13 | EC2_DEFAULT_INSTANCETYPE: str = "t3a.micro" 14 | EC2_DEFAULT_REGION: str = "eu-west-2" 15 | EC2_DEFAULT_TIMEOUT: int = 3600 16 | 17 | 18 | def buildRunnerUserData2( 19 | repo: str, 20 | type: str, 21 | uniqueid: str, 22 | label: str = "", 23 | region: str = EC2_DEFAULT_REGION, 24 | ): 25 | """ 26 | Builds the runner specific user data as base64 27 | 28 | >>> b64 = buildRunnerUserData2("abc", "123", "678", "def") 29 | >>> len(b64) > 0 30 | True 31 | >>> txt = base64.b64decode(b64).decode("utf-8") 32 | >>> "'github' '123,678,def'" in txt 33 | True 34 | 35 | >>> b64 = buildRunnerUserData2("cba", "123", "678") 36 | >>> len(b64) > 0 37 | True 38 | >>> txt = base64.b64decode(b64).decode("utf-8") 39 | >>> "'github' '123,678'" in txt 40 | True 41 | >>> "export RUNNER_CFG_PAT=$RAWPAT" in txt 42 | True 43 | 44 | """ 45 | 46 | additional = "" 47 | if label: 48 | if label.isalnum(): 49 | additional = f",{label}" 50 | 51 | # TODO: generate a custom AMI with packages downloaded and use that instead 52 | 53 | runner = None 54 | 55 | with open("scripts/amazon_linux_ec2_template.sh", "r") as file: 56 | runner = f"{file.read()}".format(**locals()) 57 | 58 | if not runner: 59 | return None 60 | 61 | enc = "utf-8" 62 | return base64.b64encode(runner.replace("\n\n", "\n").encode(enc)).decode(enc) 63 | 64 | 65 | def getLatestAmzn2Image(region: str, credentials: dict) -> dict: 66 | return getLatestImage(region, credentials) 67 | 68 | 69 | def getLatestCustomImage(region: str, credentials: dict) -> dict: 70 | return getLatestImage(region, credentials, amazon_ecs_ami=False) 71 | 72 | 73 | def getLatestImage(region: str, credentials: dict, amazon_ecs_ami: bool = True) -> dict: 74 | 75 | client = getEc2Client(credentials, region) 76 | 77 | if amazon_ecs_ami: 78 | owner = "amazon" 79 | name = "amzn2-ami-ecs-hvm-*x86_64-ebs" 80 | else: 81 | owner = "982247885130" 82 | name = "custom-ami-*" 83 | 84 | response = client.describe_images( 85 | Owners=[owner], 86 | Filters=[ 87 | {"Name": "name", "Values": [name]}, 88 | {"Name": "state", "Values": ["available"]}, 89 | ], 90 | ) 91 | 92 | if "Images" in response and len(response["Images"]) > 0: 93 | res_image = {} 94 | current_datetime = datetime.datetime(1970, 1, 1) 95 | 96 | for image in response["Images"]: 97 | 98 | dt = datetime.datetime.strptime( 99 | image["CreationDate"], "%Y-%m-%dT%H:%M:%S.%fZ" 100 | ) 101 | 102 | if dt > current_datetime: 103 | current_datetime = dt 104 | res_image = image 105 | 106 | return res_image 107 | 108 | raise Exception("Error getting latest image") 109 | 110 | 111 | def updateTimeoutTag( 112 | instanceid: str, timeout: int, region: str, credentials: dict 113 | ) -> str: 114 | if not region: 115 | region = EC2_DEFAULT_REGION 116 | 117 | grt = _timeoutTagValue(timeout) 118 | 119 | client = getEc2Client(credentials, region) 120 | 121 | response = client.create_tags( 122 | Resources=[instanceid], Tags=[{"Key": "GitHubRunnerTimeout", "Value": grt}] 123 | ) 124 | 125 | if "ResponseMetadata" in response: 126 | return grt 127 | 128 | return None 129 | 130 | 131 | def currentRunnerExistsByBody(body_qs: dict, credentials: dict) -> str: 132 | name = body_qs["name"] 133 | 134 | if "region" in body_qs: 135 | region = body_qs["region"] 136 | else: 137 | region = EC2_DEFAULT_REGION 138 | 139 | filters = [{"Name": "tag:Name", "Values": [name]}] 140 | 141 | return _currentRunnerExists(filters, region, credentials) 142 | 143 | 144 | def currentRunnerExistsByType( 145 | type: str, additional_label: str, region: str, credentials: dict 146 | ) -> str: 147 | filters = [{"Name": "tag:Name", "Values": [f"github-runner-{type}-*"]}] 148 | 149 | if additional_label: 150 | filters.append({"Name": "tag:Label", "Values": [additional_label]}) 151 | 152 | return _currentRunnerExists(filters, region, credentials) 153 | 154 | 155 | def _currentRunnerExists(filters: list, region: str, credentials: dict) -> dict: 156 | client = getEc2Client(credentials, region) 157 | 158 | filters.append({"Name": "tag:RunnerState", "Values": ["star*"]}) 159 | filters.append({"Name": "instance-state-name", "Values": ["pending", "running"]}) 160 | 161 | response = client.describe_instances(Filters=filters, MaxResults=30) 162 | 163 | res = {} 164 | 165 | if "Reservations" in response: 166 | if len(response["Reservations"]) >= 1: 167 | if "Instances" in response["Reservations"][0]: 168 | if len(response["Reservations"][0]["Instances"]) >= 1: 169 | 170 | res.update( 171 | { 172 | "instanceid": response["Reservations"][0]["Instances"][0][ 173 | "InstanceId" 174 | ] 175 | } 176 | ) 177 | 178 | tags = response["Reservations"][0]["Instances"][0]["Tags"] 179 | 180 | for tag in tags: 181 | if tag["Key"] == "Name": 182 | res.update({"name": tag["Value"]}) 183 | 184 | if tag["Key"] == "GitHubRunnerTimeout": 185 | res.update({"updated_expiry_time": tag["Value"]}) 186 | 187 | if tag["Key"] == "RunnerState": 188 | res.update({"runnerstate": tag["Value"]}) 189 | 190 | if tag["Key"] == "UniqueID": 191 | res.update({"uniqueid": tag["Value"]}) 192 | 193 | return res 194 | 195 | 196 | def startRunnerFromBody(body_items: dict, credentials: dict) -> bool: 197 | repo = body_items["repo"] 198 | sg = body_items["sg"] 199 | subnet = body_items["subnet"] 200 | 201 | if "type" in body_items: 202 | type = body_items["type"] 203 | else: 204 | type = EC2_DEFAULT_TYPE 205 | 206 | if "label" in body_items: 207 | additional_label = body_items["label"] 208 | else: 209 | additional_label = "" 210 | 211 | if "region" in body_items: 212 | region = body_items["region"] 213 | else: 214 | region = EC2_DEFAULT_REGION 215 | 216 | if "timeout" in body_items: 217 | timeout = int(body_items["timeout"]) 218 | else: 219 | timeout = EC2_DEFAULT_TIMEOUT 220 | 221 | cre = currentRunnerExistsByType(type, additional_label, region, credentials) 222 | if cre: 223 | if "updated_expiry_time" in cre: 224 | # if the timeout has more than 45 seconds left: 225 | if int(cre["updated_expiry_time"]) >= int(time.time()) + 45: 226 | utt = updateTimeoutTag(cre["instanceid"], timeout, region, credentials) 227 | if utt: 228 | cre.update( 229 | { 230 | "additional_label": additional_label, 231 | "type": type, 232 | "updated_expiry_time": utt, 233 | } 234 | ) 235 | return cre 236 | 237 | if "instanceRoleArn" in body_items: 238 | instanceRoleArn = body_items["instanceRoleArn"] 239 | else: 240 | instanceRoleArn = EC2_DEFAULT_INSTANCEROLEARN 241 | 242 | if "instanceType" in body_items: 243 | instanceType = body_items["instanceType"] 244 | else: 245 | instanceType = EC2_DEFAULT_INSTANCETYPE 246 | 247 | if "imageid" in body_items: 248 | imageid = body_items["imageid"] 249 | else: 250 | imageRes = getLatestCustomImage(region, credentials) 251 | imageid = imageRes["ImageId"] 252 | 253 | uniqueid = random_string(10) 254 | 255 | userDataB64 = buildRunnerUserData2( 256 | repo=repo, type=type, uniqueid=uniqueid, label=additional_label 257 | ) 258 | 259 | name = f"github-runner-{type}-{uniqueid}" 260 | 261 | result = startRunner( 262 | name=name, 263 | userdata=userDataB64, 264 | imageid=imageid, 265 | sg=sg, 266 | subnet=subnet, 267 | uniqueid=uniqueid, 268 | additional_label=additional_label, 269 | type=type, 270 | instanceRoleArn=instanceRoleArn, 271 | instanceType=instanceType, 272 | region=region, 273 | timeout=timeout, 274 | credentials=credentials, 275 | ) 276 | return { 277 | "runnerstate": "starting", 278 | "name": name, 279 | "additional_label": additional_label, 280 | "type": type, 281 | "uniqueid": uniqueid, 282 | } 283 | 284 | 285 | def _timeoutTagValue(timeout: int = EC2_DEFAULT_TIMEOUT): 286 | timeout_str = str(int(time.time()) + timeout) 287 | print("timeout_str: ", timeout_str) 288 | return timeout_str 289 | 290 | 291 | def startRunner( 292 | name: str, 293 | userdata: str, 294 | imageid: str, 295 | sg: str, 296 | subnet: str, 297 | uniqueid: str, 298 | additional_label: str = "", 299 | type: str = EC2_DEFAULT_TYPE, 300 | instanceRoleArn: str = EC2_DEFAULT_INSTANCEROLEARN, 301 | instanceType: str = EC2_DEFAULT_INSTANCETYPE, 302 | region: str = EC2_DEFAULT_REGION, 303 | timeout: int = EC2_DEFAULT_TIMEOUT, 304 | credentials: dict = {}, 305 | ) -> bool: 306 | 307 | expiry_time = _timeoutTagValue(timeout) 308 | 309 | if type == "spot": 310 | return _startSpotRunner( 311 | name, 312 | userdata, 313 | imageid, 314 | sg, 315 | subnet, 316 | uniqueid, 317 | additional_label, 318 | instanceRoleArn, 319 | instanceType, 320 | region, 321 | expiry_time, 322 | credentials, 323 | ) 324 | 325 | if type == "ondemand": 326 | return _startOndemandRunner( 327 | name, 328 | userdata, 329 | imageid, 330 | sg, 331 | subnet, 332 | uniqueid, 333 | additional_label, 334 | instanceRoleArn, 335 | instanceType, 336 | region, 337 | expiry_time, 338 | credentials, 339 | ) 340 | 341 | raise Exception("Type not recognised") 342 | 343 | 344 | def _startOndemandRunner( 345 | name: str, 346 | userdata: str, 347 | imageid: str, 348 | sg: str, 349 | subnet: str, 350 | uniqueid: str, 351 | additional_label: str, 352 | instanceRoleArn: str, 353 | instanceType: str, 354 | region: str, 355 | expiry_time: int, 356 | credentials: dict, 357 | ) -> bool: 358 | 359 | client = getEc2Client(credentials, region) 360 | 361 | tags = [ 362 | {"Key": "Name", "Value": name}, 363 | {"Key": "UniqueID", "Value": uniqueid}, 364 | {"Key": "GitHubRunnerTimeout", "Value": expiry_time}, 365 | {"Key": "RunnerState", "Value": "pending"}, 366 | ] 367 | if additional_label: 368 | tags.append({"Key": "Label", "Value": additional_label}) 369 | 370 | response = client.run_instances( 371 | DryRun=False, 372 | ClientToken=uniqueid, 373 | MinCount=1, 374 | MaxCount=1, 375 | InstanceType=instanceType, 376 | TagSpecifications=[{"ResourceType": "instance", "Tags": tags}], 377 | UserData=userdata, 378 | ImageId=imageid, 379 | IamInstanceProfile={"Name": instanceRoleArn}, 380 | InstanceInitiatedShutdownBehavior="terminate", 381 | NetworkInterfaces=[{"DeviceIndex": 0, "Groups": [sg], "SubnetId": subnet}], 382 | HibernationOptions={"Configured": False}, 383 | ) 384 | 385 | instance_created = False 386 | 387 | if "Instances" in response: 388 | if len(response["Instances"]) == 1: 389 | instance_created = True 390 | 391 | return instance_created 392 | 393 | 394 | def _startSpotRunner( 395 | name: str, 396 | userdata: str, 397 | imageid: str, 398 | sg: str, 399 | subnet: str, 400 | uniqueid: str, 401 | additional_label: str, 402 | instanceRoleArn: str, 403 | instanceType: str, 404 | region: str, 405 | expiry_time: str, 406 | credentials: dict, 407 | ) -> bool: 408 | 409 | client = getEc2Client(credentials, region) 410 | 411 | response = client.request_spot_instances( 412 | DryRun=False, 413 | ClientToken=uniqueid, 414 | AvailabilityZoneGroup=region, 415 | InstanceCount=1, 416 | Type="one-time", 417 | LaunchSpecification={ 418 | "IamInstanceProfile": {"Name": instanceRoleArn}, 419 | "UserData": userdata, 420 | "ImageId": imageid, 421 | "InstanceType": instanceType, 422 | "Monitoring": {"Enabled": True}, 423 | "NetworkInterfaces": [ 424 | {"Groups": [sg], "SubnetId": subnet, "DeviceIndex": 0} 425 | ], 426 | }, 427 | TagSpecifications=[ 428 | { 429 | "ResourceType": "spot-instances-request", 430 | "Tags": [{"Key": "Name", "Value": name}], 431 | } 432 | ], 433 | ) 434 | 435 | spot_instance_created = False 436 | 437 | if "SpotInstanceRequests" in response: 438 | if len(response["SpotInstanceRequests"]) == 1: 439 | res = "first" 440 | counter = 1 441 | 442 | tags = [ 443 | {"Key": "Name", "Value": name}, 444 | {"Key": "UniqueID", "Value": uniqueid}, 445 | {"Key": "GitHubRunnerTimeout", "Value": expiry_time}, 446 | {"Key": "RunnerState", "Value": "pending"}, 447 | ] 448 | 449 | if additional_label: 450 | tags.append({"Key": "Label", "Value": additional_label}) 451 | 452 | # try up to six times 453 | while counter >= 6 or res != "done": 454 | res = _setSpotTagsFromRequest( 455 | response["SpotInstanceRequests"][0]["SpotInstanceRequestId"], 456 | tags, 457 | region=region, 458 | credentials=credentials, 459 | ) 460 | 461 | print(f"setTags attempt {counter}: {res}") 462 | 463 | if res == "done": 464 | spot_instance_created = True 465 | break 466 | else: 467 | time.sleep(2) 468 | counter += 1 469 | 470 | return spot_instance_created 471 | 472 | 473 | def getEc2Client(credentials: dict = {}, region: str = EC2_DEFAULT_REGION): 474 | """ 475 | 476 | Return the EC2 boto3 client, using shell credentials or temp credentials as a dict 477 | 478 | >>> try: 479 | ... c = getEc2Client({"AccessKeyId": "123", "SecretAccessKey": "456", "SessionToken": "789"}) 480 | ... c.describe_images() 481 | ... except botocore.exceptions.ClientError as e: 482 | ... "AuthFailure" in str(e) 483 | True 484 | 485 | """ 486 | 487 | if credentials: 488 | client = boto3.client( 489 | "ec2", 490 | region_name=region, 491 | aws_access_key_id=credentials["AccessKeyId"], 492 | aws_secret_access_key=credentials["SecretAccessKey"], 493 | aws_session_token=credentials["SessionToken"], 494 | ) 495 | else: 496 | client = boto3.client("ec2", region_name=region) 497 | 498 | return client 499 | 500 | 501 | def _setSpotTagsFromRequest( 502 | instanceRequestId: str, tags: list, region: str, credentials: dict 503 | ) -> str: 504 | 505 | client = getEc2Client(credentials, region) 506 | 507 | response_desc = client.describe_spot_instance_requests( 508 | SpotInstanceRequestIds=[instanceRequestId] 509 | ) 510 | 511 | if "SpotInstanceRequests" in response_desc: 512 | if len(response_desc["SpotInstanceRequests"]) == 1: 513 | if "InstanceId" in response_desc["SpotInstanceRequests"][0]: 514 | response_tags = client.create_tags( 515 | Resources=[response_desc["SpotInstanceRequests"][0]["InstanceId"]], 516 | Tags=tags, 517 | ) 518 | return "done" 519 | else: 520 | return "tryagain" 521 | return "error" 522 | -------------------------------------------------------------------------------- /wrestling_sts.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import time 3 | import botocore 4 | 5 | 6 | ROLE_DEFAULT = "" 7 | STS_DEFAULT_REGION: str = "eu-west-2" 8 | STS_DEFAULT_TIMEOUT: int = 3600 9 | 10 | 11 | def _getStsClient(region: str): 12 | """ 13 | 14 | Return the STS boto3 client, using shell/Lambda credentials 15 | 16 | """ 17 | client = boto3.client("sts", region_name=region) 18 | return client 19 | 20 | 21 | def currentCaller(region: str = STS_DEFAULT_REGION) -> dict: 22 | """ 23 | 24 | Return the IAM boto3 client, using shell/Lambda credentials 25 | 26 | >>> try: 27 | ... c = currentCaller() 28 | ... res = "UserId" in c 29 | ... except botocore.exceptions.NoCredentialsError as e: 30 | ... res = True 31 | >>> res 32 | True 33 | 34 | """ 35 | sts = _getStsClient(region) 36 | return sts.get_caller_identity() 37 | 38 | 39 | def assumeRole(body_qs: dict) -> dict: 40 | 41 | if "account_id" in body_qs: 42 | account_id = body_qs["account_id"] 43 | else: 44 | raise Exception("account_id not set") 45 | 46 | if "external_id" in body_qs: 47 | external_id = body_qs["external_id"] 48 | else: 49 | raise Exception("external_id not set") 50 | 51 | if "region" in body_qs: 52 | region = body_qs["region"] 53 | else: 54 | region = STS_DEFAULT_REGION 55 | 56 | if "timeout" in body_qs: 57 | timeout = int(body_qs["timeout"]) 58 | else: 59 | timeout = STS_DEFAULT_TIMEOUT 60 | 61 | caller = currentCaller(region) 62 | if "Account" not in caller: 63 | raise Exception("get_caller_identity account failure") 64 | 65 | sts = _getStsClient(region) 66 | 67 | if timeout < 900: 68 | timeout = 900 69 | 70 | if timeout > 43200: 71 | timeout = 43200 72 | 73 | response = sts.assume_role( 74 | RoleArn=f"arn:aws:iam::{account_id}:role/GitHubRunnerAssumeRole", 75 | RoleSessionName=f'{caller["Account"]}-{int(time.time())}', 76 | DurationSeconds=timeout, 77 | ExternalId=external_id, 78 | ) 79 | 80 | if "Credentials" in response: 81 | return response["Credentials"] 82 | 83 | raise Exception("failed to assume role") 84 | --------------------------------------------------------------------------------