├── .dockerignore ├── .env-gdc ├── .gitattributes ├── .gitignore ├── Dockerfile ├── LICENSE.txt ├── SECURITY.md ├── auth0_mock ├── .dockerignore ├── .gitignore ├── Dockerfile ├── README.md ├── ext_pk │ ├── auth0_jwk.json │ ├── auth0_jwks.json │ └── auth0_rsa ├── index.ts ├── jest.config.ts ├── modules │ ├── authentication.ts │ ├── helpers.ts │ ├── jwk-wrapper.ts │ ├── middleware.ts │ └── user.ts ├── package.json ├── public │ └── images │ │ ├── img.svg │ │ └── ufo-2.svg ├── routes │ ├── api.ts │ ├── authentication.ts │ └── index.ts ├── templates │ └── login_page.ejs ├── tests │ ├── integration │ │ ├── api_route.test.ts │ │ ├── authentication_route.test.ts │ │ └── index_route.test.ts │ ├── unit │ │ ├── access_token_claims.test.ts │ │ ├── authentication.test.ts │ │ ├── default_token_claims.test.ts │ │ ├── helpers.test.ts │ │ ├── id_token_claims.test.ts │ │ ├── jwk_wrapper.test.ts │ │ ├── middleware.test.ts │ │ └── user.test.ts │ └── utils.ts ├── token-claims │ ├── access.ts │ ├── id.ts │ └── token_defaults.ts ├── tsconfig.json ├── tslint.json ├── types.ts └── users.json ├── dc-auth0-host.yml ├── dc-auth0-local-users.yml ├── dc-auth0.yml ├── dc-dns.yml ├── dc-host-custom-mount.yml ├── dc-host-home-dir-rw.yml ├── dc-host-home-dir.yml ├── dc-host-workspace-dir.yml ├── dc-ls-host-dns.yml ├── dc-ls-host.yml ├── dc-ls-persist.yml ├── dc-ls-pro.yml ├── dc-ls-shared.yml ├── dc-ls-static-ip.yml ├── dc-ls.yml ├── dc-proxy-dump.yml ├── dc-proxy-host.yml ├── dc-proxy-web-host.yml ├── dc-proxy-web.yml ├── dc-proxy.yml ├── dc-ssh-agent.yml ├── dc-ssh.yml ├── docker-compose.yml ├── docker-config.json ├── docs ├── auth0 │ └── readme.md ├── aws_ident │ ├── images │ │ ├── access_keys.png │ │ ├── add_mfa.png │ │ ├── change_pw.png │ │ ├── close_button.png │ │ ├── login.png │ │ ├── manage_mfa.png │ │ ├── mfa_account_name.png │ │ ├── mfa_app_screen.png │ │ ├── mfa_codes.png │ │ ├── my_sec_creds.png │ │ ├── new_access_key.png │ │ ├── select_iam.png │ │ └── show_qr.png │ └── readme.md ├── aws_sso │ ├── images │ │ ├── aws_login.png │ │ ├── aws_mfa_done.png │ │ ├── aws_mfa_name.png │ │ ├── aws_mfa_qr.png │ │ ├── aws_new_mfa.png │ │ ├── aws_new_user.png │ │ ├── invite-email-body.png │ │ └── invite-email-subject.png │ └── readme.md ├── bitwarden │ ├── images │ │ └── pop-out.png │ └── readme.md ├── container_dev │ └── images │ │ ├── GenericDevContainer.png │ │ ├── compose_v2.png │ │ ├── ddesktop-container.png │ │ ├── ddesktop-memory.png │ │ └── ddesktop-wsl2.png ├── contributing.md ├── cypress │ ├── images │ │ ├── cypress-error-1.png │ │ └── xquartz-settings.png │ └── readme.md └── debugging │ └── readme.md ├── etc ├── bash_completion.d │ └── gdcex.sh ├── dpkg │ └── dpkg.conf.d │ │ └── 01_nodoc ├── locale.gen ├── profile.d │ └── 02-locale-set.sh ├── skel │ ├── .bashrc │ └── .terraformrc ├── ssh │ ├── ssh_config │ └── sshd_config └── term_colors.sh ├── init.sh ├── k8s ├── .dockerignore ├── .env-gdc ├── Dockerfile ├── awsls │ ├── config.template │ └── credentials ├── build-pod-image.sh ├── dev-deployment.yaml ├── docker-compose.yml ├── docker-config.json ├── etc │ ├── bash_completion.d │ │ └── gdcex.sh │ ├── dpkg │ │ └── dpkg.conf.d │ │ │ └── 01_nodoc │ ├── locale.gen │ ├── profile.d │ │ └── 02-locale-set.sh │ ├── skel │ │ └── .bashrc │ ├── ssh │ │ ├── ssh_config │ │ └── sshd_config │ └── term_colors.sh ├── init.sh ├── postStartCommand.sh └── root │ └── bin │ ├── aws │ ├── assume-role.sh │ ├── aws-remote.py │ ├── aws_assume_remaining.sh │ ├── check-ecs-exec.sh │ ├── export-aws-session.sh │ ├── setup-aws.sh │ ├── ssm-jump-tunnel-old.sh │ ├── ssm-jump-tunnel.sh │ ├── ssm-scp.sh │ ├── ssm-send-command.sh │ └── ssm-ssh.sh │ ├── remote-client.sh │ └── requirements.txt ├── noop ├── postStartCommand.sh ├── readme.md ├── root └── bin │ ├── auth0 │ ├── get-auth-token.sh │ ├── start-auth0.sh │ └── stop-auth0.sh │ ├── aws │ ├── assume-role.sh │ ├── aws-remote.py │ ├── aws_assume_remaining.sh │ ├── check-ecs-exec.sh │ ├── export-aws-session.sh │ ├── setup-aws.sh │ ├── ssm-jump-tunnel-old.sh │ ├── ssm-jump-tunnel.sh │ ├── ssm-scp.sh │ ├── ssm-send-command.sh │ └── ssm-ssh.sh │ ├── check-gdc-update.sh │ ├── docker │ ├── docker-logs.sh │ ├── docker-shell.sh │ ├── docker-stats.sh │ └── docker-stop.sh │ ├── gdc-pipeline-exec.sh │ ├── gdcex.sh │ ├── ls │ ├── start-ls.sh │ └── stop-ls.sh │ ├── remote-client.sh │ ├── requirements.txt │ └── run-gdc.sh └── run-dev-container.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | docker-compose.yml 2 | **/temp 3 | test 4 | **/*.ps1 5 | **/.idea 6 | **/.vscode 7 | data 8 | attachments 9 | **/venv* 10 | **/vendor 11 | **/node_modules 12 | **/__pycache__ 13 | **/*.zip 14 | **/test_data 15 | **/.cache 16 | **/.pulumi 17 | docs 18 | .run 19 | DEADJOE 20 | -------------------------------------------------------------------------------- /.env-gdc: -------------------------------------------------------------------------------- 1 | export ROOT_PW=${ROOT_PW:=ContainersRule} # sets root password in container 2 | 3 | export USE_WORKSPACE=${USE_WORKSPACE:=yes} 4 | 5 | export USE_AWS_HOME=${USE_AWS_HOME:=yes} # copy .aws folder from host home if exists and enables USE_HOST_HOME=yes 6 | export USE_HOME_BIN=${USE_HOME_BIN:=no} # copy bin folder from host home directory if it exists and enables USE_HOST_HOME=yes 7 | 8 | if [ -z ${SHOW_VERSIONS_ON_LOGIN+x} ]; then 9 | export SHOW_VERSIONS_ON_LOGIN=yes # show installed versions on each login 10 | fi 11 | 12 | if [ -z ${TERRAFORM_VERSION+x} ]; then 13 | # export TERRAFORM_VERSION=latest # install this version of terraform by default 14 | export TERRAFORM_VERSION='' 15 | fi 16 | if [ -z ${PULUMI_VERSION+x} ]; then 17 | # export PULUMI_VERSION=latest # install this version of pulumi by default 18 | export PULUMI_VERSION='' # dont install pulumi by default 19 | fi 20 | if [ -z ${PHP_VERSION+x} ]; then 21 | export PHP_VERSION='' # available PHP versions 5.6, 7.0, 7.1, 7.2, 7.3, 7.4, 8.0, 8.1, 8.2 22 | fi 23 | 24 | export USE_BITWARDEN=${USE_BITWARDEN:=yes} # enable bitwarden workflow helpers. requires node install 25 | export PERSIST_BITWARDEN_SESSION=${PERSIST_BITWARDEN_SESSION:=no} # persist unlocked vault creds between container sessions 26 | 27 | 28 | export RUST_VERSION 29 | export CARGO_EXTRA 30 | #if [ -z ${CARGO_EXTRA+x} ]; then 31 | # export CARGO_EXTRA="cargo-edit cargo-outdated cargo-audit cargo-info bacon" # extra cargo packages to install 32 | #fi 33 | export LS_VERSION # starts requested localstack container version 34 | export USE_LOCALSTACK=${USE_LOCALSTACK:=yes} # does not install or start localstack. Only sets up some helpers 35 | export USE_LOCALSTACK_PRO=${USE_LOCALSTACK_PRO:=yes} # does not install or start localstack. Only sets up pro version api key / tokens 36 | export USE_LOCALSTACK_PERSISTENCE=${USE_LOCALSTACK_PERSISTENCE:=no} # toggle persistent storage for LS defaults to persistence disabled. 37 | export USE_LOCALSTACK_HOST=${USE_LOCALSTACK_HOST:=yes} # does not install or start localstack. Only sets up some helpers and port forwards 38 | if [ -z ${LOCALSTACK_HOST_DNS_PORT+x} ]; then 39 | LOCALSTACK_HOST_DNS_PORT=53 # forward this port from host to localstack for DNS 40 | fi 41 | export USE_LOCALSTACK_SHARED=${USE_LOCALSTACK_SHARED:=no} # mount shared volume in LS container under /shared 42 | export LOCALSTACK_HOST_DNS_PORT 43 | export LOCALSTACK_GATEWAY_LISTEN 44 | export USE_LOCALSTACK_DNS=${USE_LOCALSTACK_DNS:=no} # set to yes to assign static ip to LS container and use it as primary DNS 45 | export LS_DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM=${LS_DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM:=""} # comma separated list of patterns to resolve upstream 46 | export LS_LOG 47 | export USE_AUTH0 # starts up auth0 mock container in container only mode 48 | export USE_AUTH0_HOST # starts up auth0 mock container and forwards port from host. Use AUTH0_HOST_PORT to change default of 3001 49 | export AUTH0_HOST_PORT=${AUTH0_HOST_PORT:=3001} # default port for AUTH0 mock if enabled 50 | export AUTH0_LOCAL_USERS_FILE # used to specify location in container for auth0 to mount user override file 51 | export AUTH0_DEFAULT_USER=${AUTH0_DEFAULT_USER:="user1"} # used to auto-populate auth0 mock login page 52 | export AUTH0_DEFAULT_PASSWORD=${AUTH0_DEFAULT_USER:="user1"} # used to auto-populate auth0 mock login page 53 | 54 | export USE_JAVA # install ubuntu:latest openjdk packages 55 | export USE_DOT_NET # install ubuntu:latest dotnet core packages 56 | export USE_AZURE # install latest Azure cli 57 | export USE_POWERSHELL # install powershell 7.5.0 58 | 59 | export EDITOR=${EDITOR:=vi} # sets default editor in container. usually set to same as VISUAL 60 | export VISUAL=${VISUAL:=vi} # sets default editor in container. usually set to same as EDITOR 61 | if [ -z ${SSH_KEYSCAN_HOSTS+x} ]; then 62 | export SSH_KEYSCAN_HOSTS="gitlab.com github.com bitbucket.org" # copy ssh keys from these hosts to prevent unknown key prompts 63 | fi 64 | 65 | # default secondary dns to google secondary dns if not specified 66 | if [ -z "$GDC_DNS_SEC_IP" ]; then 67 | GDC_DNS_SEC_IP=8.8.4.4 68 | fi 69 | 70 | # these will only be used by the container if GDC_DNS_PRI_IP is defined 71 | export GDC_DNS_PRI_IP 72 | export GDC_DNS_SEC_IP 73 | 74 | export DEVNET_GATEWAY 75 | 76 | export USE_COLOR_PROMPT=${USE_COLOR_PROMPT:=yes} # enable colored bash prompt 77 | 78 | export CHECK_UPDATES=${CHECK_UPDATES:=yes} # check for updates on each login 79 | export SHARED_VOLUMES # specify volume names to create and share across all GDC's 80 | export GDC_RUN_MODE=${GDC_RUN_MODE:="start"} # options are start, stop, daemon 81 | 82 | export DEV_CONTAINER_NAME=${DEV_CONTAINER_NAME:="dev-1"} # dev container name 83 | 84 | export COPY_CMD_TO_CLIPBOARD=${COPY_CMD_TO_CLIPBOARD:=yes} # COPY GDC shell launch command to clipboard 85 | 86 | export USE_PROXY=${USE_PROXY:=no} # no, proxy, dump, web 87 | export USE_PROXY_CA=${USE_PROXY_CA:=yes} # if yes and USE_PROXY!=no then install proxy CA cert into GDC 88 | export PROXY_VERSION=${PROXY_VERSION:=latest} # container image version tag 89 | export PROXY_CONTAINER_NAME=${PROXY_CONTAINER_NAME:=proxy} # name of the container 90 | export PROXY_AUTO_EXPORT_ENV=${PROXY_AUTO_EXPORT_ENV:=no} # auto export HTTP_PROXY and HTTPS_PROXY 91 | export USE_PROXY_HOST=${USE_PROXY_HOST:=no} # no, yes 92 | export PROXY_HOST_PORT=${PROXY_HOST_PORT:=8080} # port to expose to host 93 | export PROXY_WEB_HOST_PORT=${PROXY_WEB_HOST_PORT:=8081} # if running in web mode expose this port 94 | 95 | # if not set default $HOME/.aws symlink to false 96 | if [ -z ${USE_AWS_SYMLINK+x} ]; then 97 | export USE_AWS_SYMLINK=no 98 | fi 99 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Autodetect text files 2 | * text=auto 3 | 4 | # ...Unless the name matches the following 5 | # overriding patterns 6 | 7 | # Definitively text files 8 | *.txt text 9 | *.json text 10 | *.js text 11 | *.ts text 12 | .env text 13 | *.sh text 14 | *.sql text 15 | *.yml text 16 | *.py text 17 | *.js text 18 | *.ts text 19 | *.ini text 20 | Dockerfile text 21 | 22 | # Ensure those won't be messed up with 23 | *.jpg binary 24 | *.gif binary 25 | *.png binary 26 | 27 | # force line endings to be lf so db container does not blow up 28 | **/*.sh text eol=lf 29 | **/*.sql text eol=lf 30 | **/.env text eol=lf 31 | **/.env* text eol=lf 32 | **/Dockerfile text eol=lf 33 | **/*.py text eol=lf 34 | **/*.js text eol=lf 35 | **/*.ts text eol=lf 36 | **/*.json text eol=lf 37 | **/*.yml text eol=lf 38 | **/Makefile text eol=lf 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/dist/ 2 | **/node_modules/ 3 | **/.idea/ 4 | **/.vscode 5 | **/venv/ 6 | **/.parcel-cache/ 7 | **/__pycache__/ 8 | **/*.js.map 9 | **/tmp 10 | **/temp 11 | *.env 12 | **/DEADJOE 13 | 14 | .DS_Store 15 | /.env-gdc-local 16 | k8s/.env-gdc-local -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [2022] [Paul Robello] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | | Version | Supported | 6 | | ------- | ------------------ | 7 | | 1.x.x | :white_check_mark: | 8 | | < 1.0 | :x: | 9 | 10 | ## Reporting a Vulnerability 11 | 12 | Please report any issue security or otherwise in the Issues 13 | -------------------------------------------------------------------------------- /auth0_mock/.dockerignore: -------------------------------------------------------------------------------- 1 | docker-compose.yml 2 | **/temp 3 | **/tmp 4 | test 5 | **/*.ps1 6 | **/.idea 7 | **/.vscode 8 | data 9 | attachments 10 | **/venv* 11 | **/vendor 12 | **/node_modules 13 | **/__pycache__ 14 | **/*.zip 15 | **/test_data 16 | **/.cache 17 | **/.pulumi 18 | docs 19 | .run 20 | DEADJOE 21 | -------------------------------------------------------------------------------- /auth0_mock/.gitignore: -------------------------------------------------------------------------------- 1 | coverage/* 2 | keys.json 3 | *.log 4 | *.lock 5 | /package-lock.json 6 | -------------------------------------------------------------------------------- /auth0_mock/Dockerfile: -------------------------------------------------------------------------------- 1 | #FROM node:current-alpine 2 | FROM node:18-alpine 3 | 4 | RUN mkdir /usr/local/app 5 | WORKDIR /usr/local/app 6 | 7 | COPY . ./ 8 | -------------------------------------------------------------------------------- /auth0_mock/README.md: -------------------------------------------------------------------------------- 1 | # auth0-mock 2 | 3 | > running auth0 locally in docker form. Build contained within docker compose yml 4 | 5 | ## Getting Started 6 | 7 | ### Prerequisites 8 | 9 | * Docker / Docker Compose 10 | * Node 11 | * yarn 12 | * mkcert -> discussed further in [Self Signed SSL Cert section](./certs/README.md) 13 | 14 | ## ENV Config opts (set in docker compose) 15 | * **APP_PORT** - port that auth0 mock is running on | defaults to 3001 16 | * **AUTH0_HOST** - host that auth0 mock is running on | defaults to localhost:APP_PORT 17 | * **FRONTEND_PORT** - port that frontend is running on | defaults to 3030 18 | * **FRONTEND_PROTOCOL** - protocol that frontend uses | defaults to http 19 | * **FRONTEND_DOMAIN** - domain that frontend is running on | defaults to localhost:FRONTEND_PORT 20 | 21 | ## Running the app 22 | 23 | * cd into directory with docker compose yml (`fos-data-portal/auto_tests`) 24 | * run `docker compose up` 25 | * startup script is contained within `auth0_mock/package.json -> 'start' script` 26 | * [Setup self signed cert](./certs/README.md) 27 | 28 | ## API Documentation 29 | 30 | ### ROUTES 31 | 32 | #### `GET` / 33 | 34 | returns list of routes and what they do 35 | 36 | #### `GET` /authorize 37 | *official auth0 service uses this route (most frontend frameworks will use it too)* 38 | 39 | renders a login page which makes a POST request to login route 40 | 41 | #### `GET` /login 42 | *required params - username & pw*
43 | 44 | logs a user in. [Users information can be found here](./users.json) 45 | 46 | #### `POST` /login 47 | *official auth0 service uses this route (most frontend frameworks will use it too)*
48 | *required params - username & pw & redirect & state*
49 | 50 | logs a user in. [Users information can be found here](./users.json) 51 | 52 | #### `GET` /logout 53 | 54 | logout user 55 | 56 | #### `GET` /v2/logout 57 | *official auth0 service uses this route (most frontend frameworks will use it too)*
58 | 59 | logout user 60 | 61 | #### `GET` /.well-known/jwks.json 62 | *official auth0 service uses this route (most frontend frameworks will use it too)*
63 | *user must be logged in to access*
64 | 65 | Returns JWKeySet 66 | 67 | #### `GET` /access_token 68 | *user must be logged in to access*
69 | 70 | Returns access_token for user. [access_token props can be found here](./token-claims/access.ts) 71 | 72 | #### `GET` /id_token 73 | *user must be logged in to access*
74 | 75 | returns id_token for user. [id_token props can be found here](./token-claims/id.ts) 76 | 77 | #### `POST` /oauth/token 78 | *user must be logged in to access*
79 | *official auth0 service uses this route (most frontend frameworks will use it too)*
80 | *required body params - client_id*
81 | 82 | returns JSON containing access token, id token, expires_in value, scope, and token type 83 | 84 | #### `GET` /verify_token_test 85 | *user must be logged in to access*
86 | 87 | verifies token for debug purposes - outputs to container logs 88 | 89 | #### `GET` /userinfo 90 | *user must be logged in to access*
91 | *official auth0 service uses this route (most frontend frameworks will use it too)*
92 | 93 | returns [id claims](./token-claims/id.ts) 94 | 95 | ### Modify user and/or properties 96 | 97 | To modify a user go into [user.json](./users.json). From here you can add/remove users and properties. 98 | 99 | *User json key must be same as username* 100 | 101 | *Every user must have a username and a pw* 102 | 103 | *If you are adding users & wish to use refresh tokens make sure to add offline_access to scope property* 104 | 105 | ### Modify token claims 106 | 107 | Claims may be modified from within the *Token_Claims* directory. Claim values can be static or pulled from user file ( 108 | user.json) or [token defaults file](./token-claims/token_defaults.ts) 109 | 110 | ### Self Signed SSL Cert 111 | [instructions contained here!](./certs/README.md) 112 | -------------------------------------------------------------------------------- /auth0_mock/ext_pk/auth0_jwk.json: -------------------------------------------------------------------------------- 1 | { 2 | "alg": "RS256", 3 | "kty": "RSA", 4 | "n": "wHkz_b53rjnY9n9-LGN8czUt2L0mOYTWu_799I8nYg9_TfRBNYv6-u4kt15AhgBn10vnlpMuuzrB7vt_NGFjOdpY4BvBBhrfvJZkLguEmaBGNqruQPy6vIiLXeSzEBezVRc5NrxIiV9KWfhASILKzfSxC-r6DUqbqfj-GMkKT2egSQaRoGiqDBG2JOelVCKubncwec7se3AOnaRnKrrdve1PLwQmEzkGpPorcKCCQVlsJX016u4XDXkqCaSpKv5pyBi-H4x0RZa-SoydBzDjXrrqkh9Kv8w0vyejPnykS31GI9h7vUS-akYCYRUwVFa2N387Pi1zrocyjGuVE138Zw", 5 | "e": "AQAB", 6 | "d": "NAgQh9yHWBUnxpd5huX1_51G9h5kccT6gDdyXpIIjfzcFSbd2r9ptqRx58Rb5WDYYLTK6h0uJ86-OiFSNbgxqOZ1QBQXAv7zq3SuGGFEbGWt7urXAMHV27JFFG7yU99o2x1sFYI45Q7McGnPgphw3Hru8QQGo7OAQX3ZjcACgsZayoRCgRA-cRGP6xiBIvUyT-75qT6g36BEu5bP4pRqAItUOoS8CL6IA5k39oNTsL-yR5Pn37IR42WxYUE7mtPblMypETSB1uiF7wL3-rmzT3KHb_kaz9gbZEyBc12W0T32ri5rNFNIQv2todcpIJGdlhObtp-IuyMYWdGGqZAB", 7 | "p": "231MkG5VF2Hi24wF2ZpBPnfhzRsiWZpz4OswAqcygG6Z5rdPdAethEvUz-Rk6VqbLrYxvSXkVS8by0iOxd628zwY3BosEgc7_ym1KIkstirWx4SnuAMvSD33QF7hyZVJury18zkud1xVQUNXn21HzK1OUSQ9Dv4UYrtDQuNSb7E", 8 | "q": "4H11liCmk1zVEdlZ3DZG5T9b8_wFIY6FjAQLGlpHL-ac2w9ijH8Q4vD7-9MsK8t9UoP2Sl-lQfTm_YIBLcgpc4MyYx2yaxZkjjVk94RH38vSgD1qPYhA2imrhs8XCyHjRnNpuVZBQY49VJ3SWZssjdDaFryWXOItlDBlxIPpi5c", 9 | "dp": "Lsv_SAgOImcfbDnlgWivIneC8C0p3LrenATo_pfRX6q1K4jH6vA8IandXNnQXiSQU5xK7I4oqbTakzQMJMoAbcnRbxQxc0KRmyy0UEk2_DwUAQQaklQzf46eqd3Q_B7VUngrvwjhDFfmYXzPMNGm7k_BE_HLBuhLRmWwyJEZIXE", 10 | "dq": "qMJHyiMjdjZcSr29SslWxHG7-4-if9Z3WImVmyrwxvazRg6rw_ilxiTpGSdn1kh0HrrrRH_gaNPlbf_0SOlnF9ox38bsYIqF703-Z__-VCQSS6tfmYA7WIXo10AJD6pbA5Qxj01jYxe9zUWTYx8_ACFYQa1lz8-L-hHj_zY3NGM", 11 | "qi": "lX0zzdQc-tYxLzsyE88GmMwe12lEEeTOhtZjvac4GDWBoEwq1B0j1TZE48IGbehI5XnCAMtgw_-5vlwO3H2PcqsEcR8sCywFWDAR0bYFzapTK5uqAVROeaeZTQCTz7VV8gh0uv2daqa2VoAzIlR4OSiRi45wIdRYFrRKOfNJw1k", 12 | "kid": "oauth-key" 13 | } -------------------------------------------------------------------------------- /auth0_mock/ext_pk/auth0_jwks.json: -------------------------------------------------------------------------------- 1 | {"keys": [{"alg": "RS256", "kty": "RSA", "n": "wHkz_b53rjnY9n9-LGN8czUt2L0mOYTWu_799I8nYg9_TfRBNYv6-u4kt15AhgBn10vnlpMuuzrB7vt_NGFjOdpY4BvBBhrfvJZkLguEmaBGNqruQPy6vIiLXeSzEBezVRc5NrxIiV9KWfhASILKzfSxC-r6DUqbqfj-GMkKT2egSQaRoGiqDBG2JOelVCKubncwec7se3AOnaRnKrrdve1PLwQmEzkGpPorcKCCQVlsJX016u4XDXkqCaSpKv5pyBi-H4x0RZa-SoydBzDjXrrqkh9Kv8w0vyejPnykS31GI9h7vUS-akYCYRUwVFa2N387Pi1zrocyjGuVE138Zw", "e": "AQAB", "kid": "oauth-key"}]} -------------------------------------------------------------------------------- /auth0_mock/ext_pk/auth0_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDAeTP9vneuOdj2 3 | f34sY3xzNS3YvSY5hNa7/v30jydiD39N9EE1i/r67iS3XkCGAGfXS+eWky67OsHu 4 | +380YWM52ljgG8EGGt+8lmQuC4SZoEY2qu5A/Lq8iItd5LMQF7NVFzk2vEiJX0pZ 5 | +EBIgsrN9LEL6voNSpup+P4YyQpPZ6BJBpGgaKoMEbYk56VUIq5udzB5zux7cA6d 6 | pGcqut297U8vBCYTOQak+itwoIJBWWwlfTXq7hcNeSoJpKkq/mnIGL4fjHRFlr5K 7 | jJ0HMONeuuqSH0q/zDS/J6M+fKRLfUYj2Hu9RL5qRgJhFTBUVrY3fzs+LXOuhzKM 8 | a5UTXfxnAgMBAAECgf80CBCH3IdYFSfGl3mG5fX/nUb2HmRxxPqAN3JekgiN/NwV 9 | Jt3av2m2pHHnxFvlYNhgtMrqHS4nzr46IVI1uDGo5nVAFBcC/vOrdK4YYURsZa3u 10 | 6tcAwdXbskUUbvJT32jbHWwVgjjlDsxwac+CmHDceu7xBAajs4BBfdmNwAKCxlrK 11 | hEKBED5xEY/rGIEi9TJP7vmpPqDfoES7ls/ilGoAi1Q6hLwIvogDmTf2g1Owv7JH 12 | k+ffshHjZbFhQTua09uUzKkRNIHW6IXvAvf6ubNPcodv+RrP2BtkTIFzXZbRPfau 13 | Lms0U0hC/a2h1ykgkZ2WE5u2n4i7IxhZ0YapkAECgYEA231MkG5VF2Hi24wF2ZpB 14 | PnfhzRsiWZpz4OswAqcygG6Z5rdPdAethEvUz+Rk6VqbLrYxvSXkVS8by0iOxd62 15 | 8zwY3BosEgc7/ym1KIkstirWx4SnuAMvSD33QF7hyZVJury18zkud1xVQUNXn21H 16 | zK1OUSQ9Dv4UYrtDQuNSb7ECgYEA4H11liCmk1zVEdlZ3DZG5T9b8/wFIY6FjAQL 17 | GlpHL+ac2w9ijH8Q4vD7+9MsK8t9UoP2Sl+lQfTm/YIBLcgpc4MyYx2yaxZkjjVk 18 | 94RH38vSgD1qPYhA2imrhs8XCyHjRnNpuVZBQY49VJ3SWZssjdDaFryWXOItlDBl 19 | xIPpi5cCgYAuy/9ICA4iZx9sOeWBaK8id4LwLSncut6cBOj+l9FfqrUriMfq8Dwh 20 | qd1c2dBeJJBTnErsjiiptNqTNAwkygBtydFvFDFzQpGbLLRQSTb8PBQBBBqSVDN/ 21 | jp6p3dD8HtVSeCu/COEMV+ZhfM8w0abuT8ET8csG6EtGZbDIkRkhcQKBgQCowkfK 22 | IyN2NlxKvb1KyVbEcbv7j6J/1ndYiZWbKvDG9rNGDqvD+KXGJOkZJ2fWSHQeuutE 23 | f+Bo0+Vt//RI6WcX2jHfxuxgioXvTf5n//5UJBJLq1+ZgDtYhejXQAkPqlsDlDGP 24 | TWNjF73NRZNjHz8AIVhBrWXPz4v6EeP/Njc0YwKBgQCVfTPN1Bz61jEvOzITzwaY 25 | zB7XaUQR5M6G1mO9pzgYNYGgTCrUHSPVNkTjwgZt6EjlecIAy2DD/7m+XA7cfY9y 26 | qwRxHywLLAVYMBHRtgXNqlMrm6oBVE55p5lNAJPPtVXyCHS6/Z1qprZWgDMiVHg5 27 | KJGLjnAh1FgWtEo580nDWQ== 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /auth0_mock/index.ts: -------------------------------------------------------------------------------- 1 | import cors from "cors" 2 | import express, {Application} from "express"; 3 | import {json, urlencoded} from "body-parser"; 4 | import {port} from "./modules/helpers"; 5 | import {rawReqLogger} from "./modules/middleware"; 6 | import {routerApi} from "./routes/api"; 7 | import {routerIndex} from "./routes"; 8 | import {routerAuth} from "./routes/authentication"; 9 | 10 | const app: Application = express(); 11 | 12 | app 13 | .set('view engine', 'ejs') 14 | .use(json()) 15 | .use(urlencoded({extended: true})) 16 | .use(cors()) 17 | .options('*', cors()) 18 | .use(express.static('public')) 19 | .use(rawReqLogger) 20 | .use([routerIndex, routerAuth, routerApi]); 21 | 22 | // Jest automatically defines as test 23 | if (process.env.NODE_ENV !== 'test') { 24 | app.listen(port, '0.0.0.0', () => 25 | console.log('http connected to localhost port ', port) 26 | ); 27 | } 28 | 29 | export default app -------------------------------------------------------------------------------- /auth0_mock/jest.config.ts: -------------------------------------------------------------------------------- 1 | import type {Config} from 'jest'; 2 | 3 | const config: Config = { 4 | preset: 'ts-jest', 5 | testEnvironment: 'node', 6 | // testMatch: ["tests/"], 7 | verbose: true, 8 | }; 9 | 10 | export default config; 11 | -------------------------------------------------------------------------------- /auth0_mock/modules/authentication.ts: -------------------------------------------------------------------------------- 1 | import {IUsers, UsersDefaults} from "../types"; 2 | 3 | class Authentication { 4 | public loggedIn: boolean; 5 | public currentUser: IUsers; 6 | 7 | constructor() { 8 | this.loggedIn = false; 9 | this.currentUser = UsersDefaults; 10 | } 11 | 12 | // log a user in 13 | // if userObj is passed in & not empty then username was correct & only pw needs to be checked 14 | login(userObj: IUsers, pw: string): boolean { 15 | if ( 16 | userObj && 17 | "pw" in userObj && 18 | userObj.pw.toLowerCase() === pw.toLowerCase() 19 | ) { 20 | this.loggedIn = true; 21 | this.currentUser = userObj; 22 | return true; 23 | } 24 | return false; 25 | } 26 | 27 | // log a user out 28 | logout(): void { 29 | this.loggedIn = false; 30 | this.currentUser = UsersDefaults; 31 | console.log('logged out'); 32 | } 33 | 34 | } 35 | 36 | export const Auth = new Authentication(); -------------------------------------------------------------------------------- /auth0_mock/modules/helpers.ts: -------------------------------------------------------------------------------- 1 | import {IIdTokenClaims} from "../types"; 2 | 3 | export const removeNonceIfEmpty = (obj: IIdTokenClaims): IIdTokenClaims => { 4 | if ('nonce' in obj && obj.nonce === '') { 5 | delete obj.nonce; 6 | } 7 | return obj; 8 | }; 9 | 10 | export const removeTrailingSlash = (str: string): string => str.endsWith('/') ? str.slice(0, -1) : str; 11 | 12 | export const buildUriParams = (vars: Record): string => Object.keys(vars) 13 | .reduce((a, k) => { 14 | a.push(`${k}=${encodeURIComponent(vars[k])}`); 15 | return a; 16 | }, []) 17 | .join('&'); 18 | 19 | 20 | export const port: number = parseInt(process.env.APP_PORT, 10) || 3001; 21 | export const auth0Url: string = process.env.AUTH0_DOMAIN || 'http://localhost:' + port; 22 | -------------------------------------------------------------------------------- /auth0_mock/modules/jwk-wrapper.ts: -------------------------------------------------------------------------------- 1 | import jwt from "jsonwebtoken"; 2 | import {JWK, JWS} from "node-jose"; 3 | import jwkToBuffer from "jwk-to-pem"; 4 | import {existsSync, readFileSync, writeFileSync} from "fs"; 5 | import {IIdTokenClaims, IAccessTokenClaims, IKeyList} from "../types"; 6 | 7 | class JWKWrapper { 8 | private readonly kty: string; 9 | private readonly size: number; 10 | private readonly jwkFileName: string; 11 | public readonly expirationDurationInMinutesAccessToken: number; 12 | public readonly expirationDurationInMinutesIdToken: number; 13 | private nonce: string; 14 | private readonly props: any; 15 | private keyStore: JWK.KeyStore; 16 | 17 | constructor( 18 | kty: string = 'RSA', 19 | size: number = 2048, 20 | props: object = {alg: 'RS256', use: 'sig'}, 21 | jwkFileName: string = 'keys.json' 22 | ) { 23 | this.kty = kty; 24 | this.size = size; 25 | this.props = props; 26 | this.jwkFileName = jwkFileName; 27 | this.keyStore = JWK.createKeyStore(); 28 | // 1440 minutes === 24 hours 29 | this.expirationDurationInMinutesAccessToken = parseInt(process.env.AUTH0_ACCESS_TOKEN_EXP, 10) || 1440; 30 | this.expirationDurationInMinutesIdToken = parseInt(process.env.AUTH0_ID_TOKEN_EXP, 10) || 1440; 31 | this.nonce = ''; 32 | this.createJwks(); 33 | } 34 | 35 | // return nonce 36 | getNonce(): string { 37 | return this.nonce; 38 | } 39 | 40 | setNonce(nonce: string): void { 41 | this.nonce = nonce; 42 | } 43 | 44 | // generate & return iat value 45 | getIat(): number { 46 | return Math.floor(Date.now() / 1000); 47 | } 48 | 49 | // generate & return exp value 50 | getExp(durationInMinutes: number): number { 51 | const res= Math.floor( 52 | (Date.now() + durationInMinutes * 60 * 1000) / 1000 53 | ); 54 | console.log(`RES EXP ${res}`); 55 | console.log(`expirationDurationInMinutes ${durationInMinutes}`); 56 | return res 57 | } 58 | 59 | // Create key set and store on local file system 60 | createJwks(): void { 61 | console.log('Creating JWKS store'); 62 | let keyStorePromise: Promise = null; 63 | if (existsSync('./ext_pk/auth0_jwk.json')) { 64 | console.log('Found external JWK file, loading it in store'); 65 | const keyData = readFileSync('./ext_pk/auth0_jwk.json', { 66 | encoding: 'utf8', 67 | flag: 'r' 68 | }); 69 | const keyJson = JSON.parse(keyData); 70 | keyStorePromise = JWK.asKeyStore([keyJson]).then((result: any) => { 71 | // {result} is a jose.JWK.KeyStore 72 | this.keyStore = result; 73 | }); 74 | } else { 75 | console.log('Generate new JWKS store'); 76 | keyStorePromise = this.keyStore.generate(this.kty, this.size, this.props); 77 | } 78 | 79 | keyStorePromise.then(() => 80 | writeFileSync( 81 | this.jwkFileName, 82 | JSON.stringify(this.keyStore.toJSON(true), null, ' ') 83 | ) 84 | ); 85 | } 86 | 87 | // return key set 88 | getKeys(includePrivateKey: boolean = false): IKeyList { 89 | return this.keyStore.toJSON(includePrivateKey) as IKeyList; 90 | } 91 | 92 | // create token with given payload & options 93 | async createToken(payload: IIdTokenClaims | IAccessTokenClaims, opt: object = {}): Promise { 94 | const key: JWK.Key = this.keyStore.all({use: 'sig'})[0]; 95 | 96 | // default options if none passed in 97 | if (Object.keys(opt).length === 0) { 98 | opt = {compact: true, jwk: key, fields: {typ: 'jwt'}}; 99 | } 100 | 101 | return await JWS.createSign(opt, key) 102 | .update(JSON.stringify(payload)) 103 | .final(); 104 | } 105 | 106 | async verify(token: JWS.CreateSignResult): Promise { 107 | try { 108 | console.log('verify token \n'); 109 | console.log(`token \n ${token} \n`); 110 | // Use first sig key 111 | const key: JWK.Key = this.keyStore.all({use: 'sig'})[0]; 112 | // Verify Token with jsonwebtoken 113 | const publicKey: string = jwkToBuffer(key.toJSON() as jwkToBuffer.JWK); 114 | const privateKey: string = jwkToBuffer(key.toJSON(true) as jwkToBuffer.JWK, {private: true}); 115 | console.log(`public key \n ${publicKey} \n`); 116 | console.log(`private key \n ${privateKey} \n`); 117 | const decoded = jwt.verify(token.toString(), publicKey); 118 | console.log('decoded', decoded); 119 | return true; 120 | } catch (e) { 121 | return false 122 | } 123 | } 124 | } 125 | 126 | export const JwkWrapper = new JWKWrapper() -------------------------------------------------------------------------------- /auth0_mock/modules/middleware.ts: -------------------------------------------------------------------------------- 1 | import {Auth} from './authentication' 2 | import {Request, Response, NextFunction} from "express"; 3 | 4 | // checks if user is logged in 5 | export function checkLogin(req: Request, res: Response, next: NextFunction): Response | void { 6 | if (Auth.loggedIn) { 7 | return next(); 8 | } 9 | console.log('Error user not logged in'); 10 | return res.status(401).send('Unauthorized. User not logged in'); 11 | } 12 | 13 | // logs raw request props 14 | export function rawReqLogger(req: Request, res: Response, next: NextFunction): void { 15 | // Debug helper | logs props for all requests 16 | console.log('=========================================='); 17 | console.log(new Date().toISOString(), 'raw request logging'); 18 | console.log('=========================================='); 19 | console.log('route ' + req.path + ' hit \n'); 20 | console.log('req headers ' + JSON.stringify(req.headers) + '\n'); 21 | console.log('req body ' + JSON.stringify(req.body) + '\n'); 22 | console.log('req params ' + JSON.stringify(req.params) + '\n'); 23 | console.log('=========================================='); 24 | 25 | return next(); 26 | } 27 | -------------------------------------------------------------------------------- /auth0_mock/modules/user.ts: -------------------------------------------------------------------------------- 1 | import {join} from "path"; 2 | import {readFileSync, existsSync} from "fs"; 3 | import {IUsers, UsersDefaults} from "../types"; 4 | 5 | class Users { 6 | private readonly userList: Record; 7 | 8 | constructor(userFileName: string = "", userFileDir: string = './') { 9 | if (!userFileName) { 10 | if (existsSync('./users-local.json')) { 11 | console.log('using: users-local.json'); 12 | userFileName = 'users-local.json'; 13 | } else { 14 | console.log('using: users.json'); 15 | userFileName = 'users.json'; 16 | } 17 | } 18 | // parse user config file 19 | this.userList = JSON.parse(readFileSync(join(userFileDir, userFileName), 'utf8')); 20 | } 21 | 22 | // get user object for specific username 23 | public getUser(username: string): IUsers { 24 | return this.userList[username] || UsersDefaults; 25 | } 26 | } 27 | 28 | export const User = new Users() 29 | -------------------------------------------------------------------------------- /auth0_mock/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "auth0-docker-mocker", 3 | "version": "1.0.0", 4 | "main": "dist/index.js", 5 | "scripts": { 6 | "prebuild": "tslint -c tslint.json -p tsconfig.json --fix", 7 | "build": "tsc", 8 | "prestart": "npm install && npm run build", 9 | "start": "node .", 10 | "cleanup": "rm -rf node_modules keys.json", 11 | "test": "jest", 12 | "test-dev": "jest --watch", 13 | "test-leaks": "jest --detectOpenHandles", 14 | "test-coverage": "jest --coverage" 15 | }, 16 | "dependencies": { 17 | "body-parser": "^1.20.2", 18 | "cors": "^2.8.5", 19 | "debug": "^4.3.4", 20 | "ejs": "^3.1.9", 21 | "express": "^4.18.2", 22 | "jsonwebtoken": "^9.0.1", 23 | "jwk-to-pem": "^2.0.5", 24 | "node-jose": "^2.2.0" 25 | }, 26 | "devDependencies": { 27 | "@jest/types": "^29.6.3", 28 | "@types/body-parser": "^1.19.2", 29 | "@types/cors": "^2.8.13", 30 | "@types/express": "^4.17.17", 31 | "@types/jest": "^29.5.4", 32 | "@types/jsonwebtoken": "^9.0.2", 33 | "@types/jwk-to-pem": "^2.0.1", 34 | "@types/node": "^20.5.7", 35 | "@types/node-jose": "^1.1.10", 36 | "@types/supertest": "^2.0.12", 37 | "jest": "^29.6.4", 38 | "node-mocks-http": "^1.13.0", 39 | "supertest": "^6.3.3", 40 | "ts-jest": "^29.1.1", 41 | "ts-node": "^10.9.1", 42 | "tslint": "^6.1.3", 43 | "typescript": "^4.9.5" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /auth0_mock/public/images/ufo-2.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /auth0_mock/routes/api.ts: -------------------------------------------------------------------------------- 1 | import jwktopem from "jwk-to-pem"; 2 | import {Router, Request, Response} from "express"; 3 | import {auth0Url, removeNonceIfEmpty} from "../modules/helpers"; 4 | import {checkLogin} from "../modules/middleware"; 5 | import {JwkWrapper} from "../modules/jwk-wrapper"; 6 | import {accessTokenClaims} from "../token-claims/access"; 7 | import {idTokenClaims} from "../token-claims/id"; 8 | import {IAccessTokenClaims} from "../types"; 9 | 10 | export const routerApi: Router = Router(); 11 | 12 | // Returns JWKS (This is public and does not require login) 13 | routerApi.get('/.well-known/jwks.json', (req: Request, res: Response) => { 14 | res.status(200).send(JwkWrapper.getKeys()); 15 | }); 16 | 17 | // Get the private key used to sign 18 | routerApi.get('/jwks', async (req: Request, res: Response) => { 19 | res.status(200).send(jwktopem(JwkWrapper.getKeys(true).keys[0], {private: true})); 20 | }); 21 | 22 | // Returns access token for user 23 | routerApi.get('/access_token', checkLogin, async (req: Request, res: Response) => { 24 | res.status(200).send( 25 | await JwkWrapper.createToken( 26 | accessTokenClaims('', [`${auth0Url}/userinfo`]) 27 | ) 28 | ); 29 | }); 30 | 31 | // Returns id token for user 32 | routerApi.get('/id_token', checkLogin, async (req: Request, res: Response) => { 33 | res.status(200).send( 34 | await JwkWrapper.createToken(removeNonceIfEmpty(idTokenClaims())) 35 | ); 36 | }); 37 | 38 | // Auth0 token route | returns access && id token 39 | routerApi.post('/oauth/token', checkLogin, async (req: Request, res: Response) => { 40 | console.log(JwkWrapper.getKeys(true)); 41 | const {client_id}: { client_id: string } = req.body; 42 | const accessTokenClaim: IAccessTokenClaims = accessTokenClaims(client_id, [ 43 | `${auth0Url}/userinfo` 44 | ]); 45 | console.log({accessTokenClaim}); 46 | res.status(200).send({ 47 | access_token: await JwkWrapper.createToken(accessTokenClaim), 48 | expires_in: (JwkWrapper.expirationDurationInMinutesAccessToken * 60), 49 | id_token: await JwkWrapper.createToken( 50 | removeNonceIfEmpty(idTokenClaims(client_id)) 51 | ), 52 | scope: accessTokenClaim.scope, 53 | token_type: 'Bearer', 54 | refresh_token: "refresh_token" 55 | }); 56 | }); 57 | 58 | // Used to verify token 59 | routerApi.get('/verify_token_test', checkLogin, async (req: Request, res: Response) => { 60 | const resp: boolean = await JwkWrapper.verify( 61 | await JwkWrapper.createToken(removeNonceIfEmpty(idTokenClaims())) 62 | ); 63 | const msg: string = resp ? "token verified - ": "token verification failed"; 64 | res.status(200).send(`${msg} - see logs for details`); 65 | }); 66 | 67 | // Used to get userinfo 68 | routerApi.get('/userinfo', checkLogin, (req: Request, res: Response) => { 69 | res.status(200).json(removeNonceIfEmpty(idTokenClaims())); 70 | }); 71 | -------------------------------------------------------------------------------- /auth0_mock/routes/authentication.ts: -------------------------------------------------------------------------------- 1 | import {Router, Request, Response} from "express"; 2 | import {User} from "../modules/user"; 3 | import {Auth} from "../modules/authentication"; 4 | import {idTokenClaims} from "../token-claims/id"; 5 | import {JwkWrapper} from "../modules/jwk-wrapper"; 6 | import {accessTokenClaims} from "../token-claims/access"; 7 | import {buildUriParams, auth0Url, removeNonceIfEmpty} from "../modules/helpers" 8 | import {IAuthorize, AuthorizedDefaults, IAccessTokenClaims, ILogin, LoginDefaults} from "../types"; 9 | 10 | export const routerAuth: Router = Router(); 11 | 12 | // path renders login page | used in conjunction with auth0 frontend libs | makes POST to login route 13 | routerAuth.get('/authorize', async (req: Request, res: Response) => { 14 | const {redirect_uri, prompt, state, client_id, nonce, audience}: IAuthorize = {...AuthorizedDefaults, ...req.query}; 15 | 16 | JwkWrapper.setNonce(nonce); 17 | if (!redirect_uri) { 18 | return res.status(400).send('missing redirect url'); 19 | } 20 | if (prompt === 'none') { 21 | console.log('got silent refresh request'); 22 | if (!Auth.loggedIn) { 23 | console.log('silent refresh user not logged in'); 24 | const varsNoPrompt: Record = { 25 | state, 26 | error: 'login_required', 27 | error_description: 'login_required' 28 | }; 29 | const paramsNoPrompt: string = buildUriParams(varsNoPrompt); 30 | const locationNoPrompt: string = `${redirect_uri}?${paramsNoPrompt}`; 31 | console.log('Redirect to Location', locationNoPrompt); 32 | return res.writeHead(302, {Location: locationNoPrompt}).end(); 33 | } 34 | console.log('silent refresh user logged in, doing refresh'); 35 | const accessTokenC: IAccessTokenClaims = accessTokenClaims(audience, [ 36 | audience, 37 | `${auth0Url}/userinfo` 38 | ]); 39 | const vars:Record = { 40 | state, 41 | code: '1234', 42 | access_token: await JwkWrapper.createToken(accessTokenC), 43 | expires_in: 86400, 44 | id_token: await JwkWrapper.createToken( 45 | removeNonceIfEmpty(idTokenClaims(audience)) 46 | ), 47 | scope: accessTokenC.scope, 48 | token_type: 'Bearer' 49 | }; 50 | console.log('silent refresh vars', vars); 51 | const params: string = buildUriParams(vars); 52 | const location: string = `${redirect_uri}?${params}`; 53 | console.log('Redirect to Location', location); 54 | return res.writeHead(302, {Location: location}).end(); 55 | } 56 | return res.render('../templates/login_page', { 57 | username: process.env.AUTH0_DEFAULT_USER || 'user1', 58 | password: process.env.AUTH0_DEFAULT_PASSWORD || 'user1', 59 | redirect: redirect_uri, 60 | state: encodeURIComponent(state) 61 | }); 62 | }); 63 | 64 | // ====================== 65 | // login routes 66 | // ====================== 67 | 68 | // login route | associated with user in user.json file | post made by authorizer route template 69 | routerAuth.post('/login', (req: Request, res: Response) => { 70 | const {redirect, state, username, pw}: ILogin = {...LoginDefaults, ...req.query, ...req.body} 71 | const logMsg: string = 'username = ' + username + ' && pw = ' + pw; 72 | // if logged-in user tries to hit login route twice then just log them out and start over 73 | if (Auth.loggedIn) { 74 | Auth.logout(); 75 | } 76 | // if missing username || password params then error 77 | if (!username || !pw) { 78 | return res.status(400).send('missing username or password'); 79 | } 80 | // if login fails 81 | if (!Auth.login(User.getUser(username), pw)) { 82 | console.error('invalid login - ' + logMsg); 83 | return res.status(401).send('invalid username or password'); 84 | } 85 | // all good in the hood 86 | console.log('Logged in ' + logMsg); 87 | 88 | return res 89 | .writeHead(302, { 90 | Location: `${redirect}?code=1234&state=${encodeURIComponent(state)}` 91 | }) 92 | .end(); 93 | }); 94 | 95 | // login route | alternative to using /authorizer->POST->/login flow 96 | routerAuth.get('/login', (req: Request, res: Response) => { 97 | const {username, pw}: ILogin = {...LoginDefaults, ...req.query} 98 | const logMsg = 'username = ' + username + ' && pw = ' + pw; 99 | 100 | if (Auth.loggedIn) { 101 | Auth.logout(); 102 | } 103 | // if missing username || password params then error 104 | if (!username || !pw) { 105 | return res.status(400).send('missing username or password'); 106 | } 107 | // if login fails 108 | if (!Auth.login(User.getUser(username), pw)) { 109 | console.error('invalid login - ' + logMsg); 110 | return res.status(401).send('invalid username or password'); 111 | } 112 | // all good in the hood 113 | console.log('Logged in ' + logMsg); 114 | 115 | return res 116 | .status(200) 117 | .send( 118 | JSON.stringify(Auth.currentUser) + 119 | '

you may continue with your auth0 needs' 120 | ); 121 | }); 122 | 123 | // ====================== 124 | // logout routes 125 | // ====================== 126 | 127 | routerAuth.get('/logout', (req: Request, res: Response) => { 128 | const currentUser: string = JSON.stringify(Auth.currentUser); 129 | Auth.logout(); 130 | console.log(`logged out ${currentUser}`); 131 | res.status(200).send('logged out'); 132 | }); 133 | 134 | routerAuth.get('/v2/logout', (req: Request, res: Response) => { 135 | const redirect: string = (req.query.returnTo || "").toString(); 136 | const currentUser: string = JSON.stringify(Auth.currentUser); 137 | Auth.logout(); 138 | console.log(`logged out ${currentUser}`); 139 | return res 140 | .writeHead(302, { 141 | Location: `${redirect}` 142 | }) 143 | .end(); 144 | }); 145 | -------------------------------------------------------------------------------- /auth0_mock/routes/index.ts: -------------------------------------------------------------------------------- 1 | import {Router, Request, Response} from "express"; 2 | 3 | export const routerIndex: Router = Router(); 4 | 5 | // lists all the available routes 6 | routerIndex.get('/', (req: Request, res: Response) => { 7 | const routes: Record = { 8 | '/authorize': 9 | 'GET - renders a login page which makes a POST request to login route - official auth0 service uses this route (most frontend frameworks will use it too)', 10 | '/login': 11 | 'POST|GET - login a user | POST used in conjunction with authorize route', 12 | '/logout': 'GET - logs a user out - empties active user obj', 13 | '/v2/logout': 14 | 'GET - official auth0 service uses this route (most frontend frameworks will use it too) -> same function as logout', 15 | '/.well-known/jwks.json': 16 | 'GET - Returns JWKS | official auth0 service uses this route (most frontend frameworks will use it too)', 17 | '/jwks': 18 | 'GET - get private keys used to sign tokens | used for debug purposes', 19 | '/access_token': 'GET - must be logged in - Returns access token for user', 20 | '/id_token': 'GET - must be logged in - Returns id token for user', 21 | '/oauth/token': 22 | 'POST - must be logged in - official auth0 service uses this route (most frontend frameworks will use it too) "token route" - returns object with tokens, expires, scope, and token type', 23 | '/verify_token_test': 24 | 'GET - must be logged in - verifies token for debug purposes - outputs to container logs', 25 | '/userinfo': 26 | 'GET - must be logged in - official auth0 service uses this route (most frontend frameworks will use it too) - returns userinfo aka id claims' 27 | }; 28 | return res 29 | .status(200) 30 | .header('Content-Type', 'application/json') 31 | .send(JSON.stringify(routes, null, 4)); 32 | }); 33 | -------------------------------------------------------------------------------- /auth0_mock/tests/integration/api_route.test.ts: -------------------------------------------------------------------------------- 1 | import {join} from "path"; 2 | import {JWS} from "node-jose"; 3 | import app from "../../index"; 4 | import request from "supertest"; 5 | import {readFileSync} from "fs"; 6 | import {IUsers} from "../../types"; 7 | import {Auth} from "../../modules/authentication"; 8 | import {idTokenClaims} from "../../token-claims/id"; 9 | import {JwkWrapper} from "../../modules/jwk-wrapper"; 10 | 11 | describe("testing api routes", () => { 12 | beforeEach(async () => { 13 | Auth.logout(); 14 | const username: string = "admin1"; 15 | const password: string = "admin1"; 16 | const userObject: IUsers = JSON.parse(readFileSync(join("./", "users.json"), 'utf8'))[username]; 17 | Auth.login(userObject, password); 18 | }); 19 | it("should return keys when /.well-known/jwks.json is hit", async () => { 20 | const res = await request(app).get("/.well-known/jwks.json"); 21 | expect(res.status).toEqual(200); 22 | expect(res.headers["content-type"].includes("application/json")); 23 | expect("keys" in res.body); 24 | }); 25 | it("should return return private key used to sign when /jwks is hit", async () => { 26 | const res = await request(app).get("/jwks"); 27 | expect(res.status).toEqual(200); 28 | expect(res.headers["content-type"].includes("text/html")); 29 | expect(res.text.toLowerCase().includes("private key")); 30 | }); 31 | it("should return access token when /access_token is hit", async () => { 32 | const res = await request(app).get("/access_token"); 33 | expect(res.status).toEqual(200); 34 | expect(res.headers["content-type"].includes("text/html")); 35 | expect(JwkWrapper.verify(res.text as unknown as JWS.CreateSignResult)).toBeTruthy(); 36 | }); 37 | it("should return ID token when /id_token is hit", async () => { 38 | const res = await request(app).get("/id_token"); 39 | expect(res.status).toEqual(200); 40 | expect(res.headers["content-type"].includes("text/html")); 41 | expect(JwkWrapper.verify(res.text as unknown as JWS.CreateSignResult)).toBeTruthy(); 42 | }); 43 | it("should create oauth token object when /oauth/token is hit", async () => { 44 | const res = await request(app).post("/oauth/token").send({client_id: "1234"}); 45 | expect(res.status).toEqual(200); 46 | expect(res.headers["content-type"].includes("application/json")); 47 | const body = res.body; 48 | expect("access_token" in body).toBeTruthy(); 49 | expect("expires_in" in body).toBeTruthy(); 50 | expect("id_token" in body).toBeTruthy(); 51 | expect("scope" in body).toBeTruthy(); 52 | expect("token_type" in body).toBeTruthy(); 53 | expect(JwkWrapper.verify(body.access_token as unknown as JWS.CreateSignResult)).toBeTruthy(); 54 | expect(JwkWrapper.verify(body.id_token as unknown as JWS.CreateSignResult)).toBeTruthy(); 55 | }); 56 | it("should verify token via logs & send msg including done when /verify_token_test is hit", async () => { 57 | const res = await request(app).get("/verify_token_test"); 58 | expect(res.headers["content-type"].includes("text/html")); 59 | expect(res.text.toLowerCase().includes("done")); 60 | }); 61 | it("should return userinfo in json format when /userinfo is hit", async ()=>{ 62 | const res = await request(app).get("/userinfo"); 63 | expect(res.headers["content-type"].includes("application/json")); 64 | // userinfo is ID token claim | make sure props returned are contained in idTokenClaims 65 | expect(Object.keys(res.body).every(key => Object.keys(idTokenClaims()).includes(key))).toBeTruthy(); 66 | }); 67 | }); 68 | -------------------------------------------------------------------------------- /auth0_mock/tests/integration/authentication_route.test.ts: -------------------------------------------------------------------------------- 1 | import {join} from "path"; 2 | import app from "../../index"; 3 | import {readFileSync} from "fs"; 4 | import request from "supertest"; 5 | import {IUsers} from "../../types"; 6 | import {UsersDefaults} from "../../types"; 7 | import {Auth} from "../../modules/authentication"; 8 | import {JwkWrapper} from "../../modules/jwk-wrapper"; 9 | 10 | describe("Authentication route tests", () => { 11 | describe("/authorize route tests", () => { 12 | it("should set none to param provided", async () => { 13 | const nonce = "1234-4321"; 14 | await request(app).get(`/authorize?nonce=${nonce}`); 15 | expect(JwkWrapper.getNonce()).toEqual(nonce); 16 | }); 17 | it("should yield 400 with msg if redirect url is not in query string", async () => { 18 | const res = await request(app).get(`/authorize`); 19 | expect(res.headers["content-type"].includes("text/html")); 20 | expect(res.status).toEqual(400); 21 | expect(res.text.toLowerCase().includes("missing redirect url")).toBeTruthy(); 22 | }); 23 | it("should yield html login prompt if prompt is not set to none", async () => { 24 | const res = await request(app).get(`/authorize?redirect_uri=1234`); 25 | expect(res.headers["content-type"].includes("text/html")); 26 | expect(res.text.includes(" { 29 | const redirectUri = "testing_is_cool" 30 | const state = "234"; 31 | const res = await request(app).get(`/authorize?redirect_uri=${redirectUri}&prompt=none&state=${state}`); 32 | expect(res.status).toEqual(302); 33 | const expectedParams = [redirectUri, `state=${state}`, "error=login_required", "error_description=login_required"] 34 | expectedParams.forEach((v) => { 35 | expect(res.headers.location.includes(v)).toBeTruthy(); 36 | }); 37 | }); 38 | it("should yield 302 & redirect to redirect URI with specific params when prompt is set to none & user logged in", async () => { 39 | const username: string = "admin1"; 40 | const password: string = "admin1"; 41 | const userObject: IUsers = JSON.parse(readFileSync(join("./", "users.json"), 'utf8'))[username]; 42 | Auth.login(userObject, password); 43 | const redirectUri = "testing_is_cool" 44 | const state = "234"; 45 | const res = await request(app).get(`/authorize?redirect_uri=${redirectUri}&prompt=none&state=${state}`); 46 | expect(res.status).toEqual(302); 47 | const expectedParams = [redirectUri, `state=${state}`, "code=1234", "access_token", "expires_in=86400", "id_token", "scope", "token_type=Bearer"] 48 | expectedParams.forEach((v) => { 49 | expect(res.headers.location.includes(v)).toBeTruthy(); 50 | }); 51 | }); 52 | }); 53 | describe("/login route tests", () => { 54 | describe("post alternative", () => { 55 | it("should yield 400 status code & error message is username or password is not provided or empty", async () => { 56 | const res = await request(app).post(`/login`); 57 | expect(res.text.toLowerCase().includes("missing username or password")); 58 | expect(res.headers["content-type"].includes("text/html")); 59 | expect(res.status).toEqual(400); 60 | }); 61 | it("should yield 401 status code & error message user or username is invalid", async () => { 62 | const validUser = "admin1"; 63 | const invalidPassword = "invalid"; 64 | const res = await request(app).post(`/login`).send({username: validUser, pw: invalidPassword}); 65 | expect(res.text.toLowerCase().includes("invalid username or password")); 66 | expect(res.headers["content-type"].includes("text/html")); 67 | expect(res.status).toEqual(401); 68 | }); 69 | it('should yield status code 302 and pass on specified query params when login successful', async () => { 70 | const validUser = "admin1"; 71 | const validPw = "admin1"; 72 | const redirectUri = "testing"; 73 | const state = "9078"; 74 | const res = await request(app).post(`/login`) 75 | .send({ 76 | username: validUser, 77 | pw: validPw, 78 | state, 79 | redirect: redirectUri 80 | }); 81 | expect(res.status).toEqual(302); 82 | const expectedParams = [redirectUri, `state=${state}`, "code=1234"] 83 | expectedParams.forEach((v) => { 84 | expect(res.headers.location.includes(v)).toBeTruthy(); 85 | }); 86 | }); 87 | }); 88 | describe("get alternative", () => { 89 | it("should yield 400 status code & error message is username or password is not provided or empty", async () => { 90 | const res = await request(app).get(`/login`); 91 | expect(res.text.toLowerCase().includes("missing username or password")); 92 | expect(res.headers["content-type"].includes("text/html")); 93 | expect(res.status).toEqual(400); 94 | }); 95 | it("should yield 401 status code & error message user or username is invalid", async () => { 96 | const validUser = "admin1"; 97 | const invalidPassword = "invalid"; 98 | const res = await request(app).get(`/login?username=${validUser}&pw=${invalidPassword}`); 99 | expect(res.text.toLowerCase().includes("invalid username or password")); 100 | expect(res.headers["content-type"].includes("text/html")); 101 | expect(res.status).toEqual(401); 102 | }); 103 | it('should yield status code 302 and pass on specified query params when login successful', async () => { 104 | const validUser = "admin1"; 105 | const validPw = "admin1"; 106 | const res = await request(app).get(`/login?username=${validUser}&pw=${validPw}`) 107 | expect(res.status).toEqual(200); 108 | expect(res.headers["content-type"].includes("text/html")); 109 | expect(res.text.includes("username")).toBeTruthy(); 110 | expect(res.text.includes("pw")).toBeTruthy(); 111 | expect(res.text.includes(validPw)); 112 | }); 113 | }); 114 | }); 115 | describe("logout route tests", () => { 116 | beforeEach(async () => { 117 | const username: string = "admin1"; 118 | const password: string = "admin1"; 119 | const userObject: IUsers = JSON.parse(readFileSync(join("./", "users.json"), 'utf8'))[username]; 120 | Auth.login(userObject, password); 121 | }); 122 | describe("/logout", () => { 123 | it("should log a user out & return 200 statusCode", async () => { 124 | // user is logged in 125 | expect(Auth.currentUser.username).toEqual("admin1"); 126 | const res = await request(app).get(`/logout`); 127 | // user is logged out & props are reset to default 128 | expect(Auth.currentUser).toEqual(UsersDefaults); 129 | expect(res.status).toEqual(200); 130 | expect(res.headers["content-type"].includes("text/html")); 131 | expect(res.text.toLowerCase().includes("logged out")) 132 | }); 133 | }); 134 | describe("/v2/logout", ()=>{ 135 | it("should log a user out & redirect to specified URL", async ()=>{ 136 | // user is logged in 137 | expect(Auth.currentUser.username).toEqual("admin1"); 138 | const redirectUri = "test"; 139 | const res = await request(app).get(`/v2/logout?returnTo=${redirectUri}`); 140 | // user is logged out & props are reset to default 141 | expect(Auth.currentUser).toEqual(UsersDefaults); 142 | expect(res.status).toEqual(302); 143 | expect(res.headers.location).toEqual(redirectUri) 144 | }); 145 | }); 146 | }); 147 | }); -------------------------------------------------------------------------------- /auth0_mock/tests/integration/index_route.test.ts: -------------------------------------------------------------------------------- 1 | import app from "../../index"; 2 | import request from "supertest"; 3 | 4 | describe("testing index router", () => { 5 | it("should return list of all routes", async () => { 6 | const res = await request(app).get("/"); 7 | // if routes are added to index they need to be added here 8 | const routes = [ 9 | "/authorize", 10 | "/login", 11 | "/logout", 12 | "/v2/logout", 13 | "/.well-known/jwks.json", 14 | "/jwks", 15 | "/access_token", 16 | "/id_token", 17 | "/oauth/token", 18 | "/verify_token_test", 19 | "/userinfo" 20 | ] 21 | expect(res.headers["content-type"].includes("application/json")); 22 | expect(Object.keys(res.body).every(key => routes.includes(key))).toBeTruthy(); 23 | }); 24 | }); -------------------------------------------------------------------------------- /auth0_mock/tests/unit/access_token_claims.test.ts: -------------------------------------------------------------------------------- 1 | import {accessTokenClaims} from "../../token-claims/access"; 2 | import {tokenDefaults} from "../../token-claims/token_defaults"; 3 | 4 | describe("access token claims tests", () => { 5 | it("should return IAccessTokenClaims property", () => { 6 | const defaultProps: string[] = [ 7 | "iss", 8 | "sub", 9 | "aud", 10 | "iat", 11 | "exp", 12 | "azp", 13 | "scope", 14 | "permissions" 15 | ]; 16 | const accessClaims = accessTokenClaims(); 17 | expect(defaultProps.every(key => Object.keys(accessClaims).includes(key))).toBeTruthy(); 18 | }); 19 | it("should set azp to what the user sets", () => { 20 | const azpVal = "yes"; 21 | const accessClaims = accessTokenClaims(azpVal); 22 | expect(accessClaims.azp).toEqual(azpVal) 23 | }); 24 | it("should set aud to what was passed in", () => { 25 | const audVal = ["peter", "bug", "king"]; 26 | const accessClaims = accessTokenClaims("yes", audVal); 27 | expect(accessClaims.aud).toEqual(tokenDefaults.aud.concat(audVal)); 28 | }); 29 | }); -------------------------------------------------------------------------------- /auth0_mock/tests/unit/authentication.test.ts: -------------------------------------------------------------------------------- 1 | import {Auth} from "../../modules/authentication"; 2 | import {IUsers, UsersDefaults} from "../../types"; 3 | import {readFileSync} from "fs"; 4 | import {join} from "path"; 5 | 6 | describe("testing Authentication class instance", () => { 7 | beforeEach(() => { 8 | Auth.logout(); 9 | }); 10 | it("should log a user in when user is valid", () => { 11 | const username: string = "admin1"; 12 | const password: string = "admin1"; 13 | const userObject: IUsers = JSON.parse(readFileSync(join("./", "users.json"), 'utf8'))[username]; 14 | 15 | // pre-login 16 | expect(Auth.loggedIn).toBeFalsy(); 17 | expect(Auth.currentUser).toEqual(UsersDefaults); 18 | // login should be successful & return true 19 | expect(Auth.login(userObject, password)).toBeTruthy(); 20 | // post login props 21 | expect(Auth.loggedIn).toBeTruthy(); 22 | expect(Auth.currentUser).toEqual(userObject); 23 | }); 24 | 25 | it("should not log a user in when a user is invalid", () => { 26 | const username: string = "not_valid_username"; 27 | const password: string = "not_valid_password"; 28 | const userObject: IUsers = JSON.parse(readFileSync(join("./", "users.json"), 'utf8'))[username]; 29 | 30 | // pre-login 31 | expect(Auth.loggedIn).toBeFalsy(); 32 | expect(Auth.currentUser).toEqual(UsersDefaults); 33 | // login should be successful & return true 34 | expect(Auth.login(userObject, password)).toBeFalsy(); 35 | // post login props 36 | expect(Auth.loggedIn).toBeFalsy(); 37 | expect(Auth.currentUser).toEqual(UsersDefaults); 38 | }); 39 | 40 | it('should set user properties back to default when logged out', () => { 41 | // pre-login 42 | const defaultLoggedIn: boolean = Auth.loggedIn; 43 | const defaultCurrentUser: IUsers = Auth.currentUser; 44 | const username: string = "admin1"; 45 | const password: string = "admin1"; 46 | const userObject: IUsers = JSON.parse(readFileSync(join("./", "users.json"), 'utf8'))[username]; 47 | 48 | // successful login 49 | expect(Auth.login(userObject, password)).toBeTruthy(); 50 | 51 | // post login props 52 | expect(Auth.loggedIn).toBeTruthy(); 53 | expect(Auth.currentUser).toEqual(userObject); 54 | 55 | // logout 56 | Auth.logout(); 57 | 58 | // post logout prop comparison 59 | expect(Auth.loggedIn).toEqual(defaultLoggedIn); 60 | expect(Auth.currentUser).toEqual(defaultCurrentUser); 61 | 62 | }); 63 | }); -------------------------------------------------------------------------------- /auth0_mock/tests/unit/default_token_claims.test.ts: -------------------------------------------------------------------------------- 1 | import {tokenDefaults} from "../../token-claims/token_defaults"; 2 | 3 | describe("testing token Defaults", () => { 4 | it("should return defaults specified properties", () => { 5 | const defaultProps: string[] = [ 6 | "domain", 7 | "sub", 8 | "defaultPermissions", 9 | "defaultScope", 10 | "aud", 11 | "given_name", 12 | "family_name", 13 | "nickname", 14 | "name", 15 | "email", 16 | "picture", 17 | "amr" 18 | ]; 19 | expect(Object.keys(tokenDefaults).every(key => defaultProps.includes(key))).toBeTruthy(); 20 | }); 21 | }); -------------------------------------------------------------------------------- /auth0_mock/tests/unit/helpers.test.ts: -------------------------------------------------------------------------------- 1 | import * as types from "../../types"; 2 | import * as helpers from "../../modules/helpers"; 3 | import {idTokenClaims} from "../../token-claims/id"; 4 | 5 | describe("testing helper functions", () => { 6 | describe("testing removeNonceIfEmpty", () => { 7 | it("should remove nonce if it is empty", () => { 8 | let idTokenC = idTokenClaims() 9 | expect('nonce' in idTokenC).toBeTruthy(); 10 | expect(idTokenC.nonce === "").toBeTruthy(); 11 | idTokenC = helpers.removeNonceIfEmpty(idTokenC) 12 | expect('nonce' in idTokenC).toBeFalsy(); 13 | expect(idTokenC.nonce === "").toBeFalsy(); 14 | }); 15 | 16 | it("should not remove nonce if it is not empty", () => { 17 | let idTokenC: types.IIdTokenClaims = idTokenClaims(); 18 | const nonceVal: string = "123"; 19 | idTokenC.nonce = nonceVal; 20 | expect('nonce' in idTokenC).toBeTruthy(); 21 | expect(idTokenC.nonce === nonceVal).toBeTruthy(); 22 | idTokenC = helpers.removeNonceIfEmpty(idTokenC); 23 | expect('nonce' in idTokenC).toBeTruthy(); 24 | expect(idTokenC.nonce === nonceVal).toBeTruthy(); 25 | }); 26 | }); 27 | 28 | describe("testing removeTrailingSlash", () => { 29 | it("should remove trailing slash when trailing slash exists", () => { 30 | let str: string = "this has a trailing slash /"; 31 | expect(str.endsWith("/")).toBeTruthy(); 32 | str = helpers.removeTrailingSlash(str); 33 | expect(str.endsWith("/")).toBeFalsy(); 34 | }); 35 | it("should not remove trailing slash when trailing slash is not present", () => { 36 | const str: string = "this doesn't have a trailing slash"; 37 | expect(str.endsWith("/")).toBeFalsy(); 38 | const strTwo: string = helpers.removeTrailingSlash(str); 39 | expect(str.endsWith("/")).toBeFalsy(); 40 | expect(str === strTwo).toBeTruthy(); 41 | }); 42 | 43 | }); 44 | 45 | describe("testing buildUriParams", () => { 46 | it("should take in object & return uri formatted string", () => { 47 | let v: any = { 48 | state: "1234", 49 | error: 'login_required', 50 | error_description: 'login_required' 51 | } 52 | expect(typeof v === "string").toBeFalsy(); 53 | v = helpers.buildUriParams(v); 54 | expect(typeof v === "string").toBeTruthy(); 55 | }); 56 | }); 57 | 58 | describe("testing port const", () => { 59 | it("should return a number in base 10", () => { 60 | expect(helpers.port === parseInt(helpers.port.toString(), 10)).toBeTruthy(); 61 | }); 62 | }); 63 | 64 | describe("testing auth0Url const", () => { 65 | it("should return url string", () => { 66 | expect(typeof helpers.auth0Url === "string").toBeTruthy(); 67 | expect((helpers.auth0Url).includes("http://")).toBeTruthy(); 68 | expect((helpers.auth0Url).includes(`:${helpers.port}`)).toBeTruthy(); 69 | }); 70 | }); 71 | }); 72 | 73 | -------------------------------------------------------------------------------- /auth0_mock/tests/unit/id_token_claims.test.ts: -------------------------------------------------------------------------------- 1 | import {idTokenClaims} from "../../token-claims/id"; 2 | 3 | describe("id token claims tests", () => { 4 | it("should return IIdTokenClaims property", () => { 5 | const accessClaims = idTokenClaims(); 6 | expect("given_name" in accessClaims) 7 | }); 8 | it("should set aud to what the user sets via param", () => { 9 | const aud = "pking"; 10 | const accessClaims = idTokenClaims(aud); 11 | expect(accessClaims.aud).toEqual([aud]) 12 | }); 13 | }); -------------------------------------------------------------------------------- /auth0_mock/tests/unit/jwk_wrapper.test.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | import {JwkWrapper} from "../../modules/jwk-wrapper"; 3 | import {sleep} from "../utils"; 4 | import {idTokenClaims} from "../../token-claims/id"; 5 | 6 | describe("JWKWrapper tests", () => { 7 | it("should return Nonce when getNonce is called", () => { 8 | // set nonce 9 | const nonce = "1234" 10 | JwkWrapper.setNonce(nonce); 11 | expect(JwkWrapper.getNonce()).toEqual(nonce) 12 | }); 13 | it("should set Nonce when setNonce is called", () => { 14 | ["1234", "4321", "9876", "6789"].forEach((value) => { 15 | JwkWrapper.setNonce(value); 16 | expect(JwkWrapper.getNonce()).toEqual(value); 17 | }); 18 | }); 19 | it("should get IAT (a number) when getIat is called", () => { 20 | expect(typeof JwkWrapper.getIat() === "number").toBeTruthy(); 21 | }); 22 | it("should get exp date (a number) when getExp is called", () => { 23 | expect(typeof JwkWrapper.getExp() === "number").toBeTruthy(); 24 | }); 25 | it("should create JWKS file when createJwks is called", async () => { 26 | const filePath = "./keys.json"; 27 | // make sure keys.json is deleted 28 | fs.unlinkSync(filePath); 29 | // run method 30 | JwkWrapper.createJwks(); 31 | // fails without sleep even though usage is synchronous 32 | await sleep(.00000000000000000000000000000001); 33 | console.log(fs.existsSync(filePath)); 34 | // should create file 35 | expect(fs.existsSync(filePath)).toBeTruthy(); 36 | }); 37 | it("should create a token when createToken is called with given claims", async () => { 38 | const token = await JwkWrapper.createToken(idTokenClaims()); 39 | expect(token.toString().split(".").length === 3).toBeTruthy(); 40 | expect(JwkWrapper.verify(token)).toBeTruthy() 41 | }); 42 | it("should return true if no issues with verifying token when verify is called", async () => { 43 | const token = await JwkWrapper.createToken(idTokenClaims()); 44 | const logSpy = jest.spyOn(console, 'log'); 45 | expect(JwkWrapper.verify(token)).toBeTruthy() 46 | expect(logSpy).toHaveBeenCalled(); 47 | 48 | }); 49 | }); -------------------------------------------------------------------------------- /auth0_mock/tests/unit/middleware.test.ts: -------------------------------------------------------------------------------- 1 | import * as middleware from "../../modules/middleware"; 2 | import {Request, Response, NextFunction} from "express"; 3 | import httpMocks from "node-mocks-http"; 4 | import {IUsers} from "../../types"; 5 | import {readFileSync} from "fs"; 6 | import {join} from "path"; 7 | import {Auth} from "../../modules/authentication"; 8 | 9 | 10 | describe("middleware tests", () => { 11 | function nextFunc(): NextFunction { 12 | return true as unknown as NextFunction; 13 | } 14 | 15 | describe("checkLogin tests", () => { 16 | beforeEach(() => { 17 | Auth.logout(); 18 | }); 19 | it('should return next if logged in', () => { 20 | const request: Request = httpMocks.createRequest(); 21 | const response: Response = httpMocks.createResponse(); 22 | const username: string = "admin1"; 23 | const password: string = "admin1"; 24 | const userObject: IUsers = JSON.parse(readFileSync(join("./", "users.json"), 'utf8'))[username]; 25 | 26 | // login successfully 27 | expect(Auth.login(userObject, password)).toBeTruthy(); 28 | 29 | // nextFunc returns true so we check against a truthy return 30 | expect(middleware.checkLogin(request, response, nextFunc)).toBeTruthy(); 31 | }); 32 | it("should return a status code of 401 & body containing Unauthorized if not logged in", () => { 33 | const request: Request = httpMocks.createRequest(); 34 | // cant type this with express typing since we're using mock special funcitonality 35 | const response: httpMocks.MockResponse>> = httpMocks.createResponse(); 36 | 37 | // run middleware function 38 | middleware.checkLogin(request, response, nextFunc) 39 | 40 | // check response object 41 | expect(response.statusCode).toEqual(401); 42 | expect(response._getData().toLowerCase().includes("unauthorized")).toBeTruthy(); 43 | 44 | }); 45 | }); 46 | describe("rawReqLogger tests", () => { 47 | it("should console.log things & return nextFunc", async () => { 48 | const request: Request = httpMocks.createRequest(); 49 | const response: Response = httpMocks.createResponse(); 50 | const logSpy = jest.spyOn(console, 'log'); 51 | middleware.rawReqLogger(request, response, nextFunc) 52 | expect(logSpy).toHaveBeenCalledTimes(8); 53 | }); 54 | }); 55 | }); 56 | -------------------------------------------------------------------------------- /auth0_mock/tests/unit/user.test.ts: -------------------------------------------------------------------------------- 1 | import {User} from "../../modules/user"; 2 | import {IUsers, UsersDefaults} from "../../types"; 3 | import {readFileSync} from "fs"; 4 | import {join} from "path"; 5 | 6 | describe("testing user class instance", () => { 7 | it("should return user if user exists in users.json", () => { 8 | const username: string = "admin1"; 9 | const userList: Record = JSON.parse(readFileSync(join("./", "users.json"), 'utf8')); 10 | expect(userList[username].username === "").toBeFalsy(); 11 | expect(User.getUser(username).username === "").toBeFalsy(); 12 | }); 13 | 14 | it("should return user defaults if user doesn't exist", () => { 15 | const username: string = "user_dont_exist"; 16 | expect(User.getUser(username) === UsersDefaults).toBeTruthy(); 17 | }); 18 | }); -------------------------------------------------------------------------------- /auth0_mock/tests/utils.ts: -------------------------------------------------------------------------------- 1 | // import request from "supertest"; 2 | 3 | export const sleep = (ms: number) => new Promise((r) => setTimeout(r, ms)); 4 | // export function mkRequest(url) -------------------------------------------------------------------------------- /auth0_mock/token-claims/access.ts: -------------------------------------------------------------------------------- 1 | import {Auth} from "../modules/authentication"; 2 | import {JwkWrapper} from "../modules/jwk-wrapper"; 3 | import {tokenDefaults} from "./token_defaults"; 4 | import {IAccessTokenClaims} from "../types"; 5 | 6 | // https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-token-claims 7 | // 8 | // auth token claims -- claim props should be defined within scope of user aka user.json 9 | // if claim not defined in user.json then uses token default values 10 | export const accessTokenClaims = (azp: string = '', aud: string[] = []): IAccessTokenClaims => { 11 | const email = Auth.currentUser.email || tokenDefaults.email; 12 | return { 13 | iss: tokenDefaults.domain, 14 | sub: tokenDefaults.sub + email, 15 | aud: tokenDefaults.aud.concat(aud), 16 | iat: JwkWrapper.getIat(), 17 | exp: JwkWrapper.getExp(JwkWrapper.expirationDurationInMinutesAccessToken), 18 | azp, 19 | scope: Auth.currentUser.scope || tokenDefaults.defaultScope, 20 | permissions: Auth.currentUser.permissions || tokenDefaults.defaultPermissions 21 | }; 22 | }; 23 | -------------------------------------------------------------------------------- /auth0_mock/token-claims/id.ts: -------------------------------------------------------------------------------- 1 | import {Auth} from "../modules/authentication"; 2 | import {JwkWrapper} from "../modules/jwk-wrapper"; 3 | import {tokenDefaults} from "./token_defaults"; 4 | import {IIdTokenClaims} from "../types"; 5 | 6 | // id token claims -- claim props should be defined within scope of user aka user.json 7 | // if claim not defined in user.json then uses token default values 8 | export const idTokenClaims = (aud: string = ''): IIdTokenClaims => { 9 | const email = Auth.currentUser.email || tokenDefaults.email; 10 | return { 11 | given_name: Auth.currentUser.given_name || tokenDefaults.given_name, 12 | family_name: Auth.currentUser.family_name || tokenDefaults.family_name, 13 | nickname: Auth.currentUser.nickname || tokenDefaults.nickname, 14 | name: Auth.currentUser.name || tokenDefaults.name, 15 | email, 16 | picture: Auth.currentUser.picture || tokenDefaults.picture, 17 | iss: tokenDefaults.domain, 18 | sub: tokenDefaults.sub + email, 19 | aud: [aud] || tokenDefaults.aud, 20 | iat: JwkWrapper.getIat(), 21 | exp: JwkWrapper.getExp(JwkWrapper.expirationDurationInMinutesIdToken), 22 | amr: tokenDefaults.amr, 23 | nonce: JwkWrapper.getNonce() 24 | }; 25 | }; 26 | -------------------------------------------------------------------------------- /auth0_mock/token-claims/token_defaults.ts: -------------------------------------------------------------------------------- 1 | import {auth0Url} from "../modules/helpers" 2 | import {ITokenDefault} from "../types" 3 | 4 | export const tokenDefaults: ITokenDefault = { 5 | domain: auth0Url + '/', 6 | sub: 'samlp|MyAzure|', 7 | defaultPermissions: ['chat.admin', 'chat.user'], 8 | defaultScope: 'openid profile', 9 | aud: [process.env.AUTH0_AUDIENCE || 'app'], 10 | given_name: 'test_user_first_name', 11 | family_name: 'test_user_last_name', 12 | nickname: 'test_user_nickname', 13 | name: 'test_user', 14 | email: 'test_user@myorg.com', 15 | picture: 'https://en.gravatar.com/avatar.png', 16 | amr: ['mfa'] 17 | }; 18 | -------------------------------------------------------------------------------- /auth0_mock/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "module": "commonjs", 4 | "esModuleInterop": true, 5 | "target": "esnext", 6 | "noImplicitAny": true, 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "outDir": "dist", 10 | "baseUrl": ".", 11 | "paths": { 12 | "*": [ 13 | "node_modules/*" 14 | ] 15 | } 16 | }, 17 | "files": ["index.ts"] 18 | } -------------------------------------------------------------------------------- /auth0_mock/tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "defaultSeverity": "error", 3 | "extends": [ 4 | "tslint:recommended" 5 | ], 6 | "jsRules": {}, 7 | "rules": { 8 | "no-console": false, 9 | "trailing-comma": [ 10 | false 11 | ] 12 | }, 13 | "rulesDirectory": [] 14 | } -------------------------------------------------------------------------------- /auth0_mock/types.ts: -------------------------------------------------------------------------------- 1 | import jwkToBuffer from "jwk-to-pem"; 2 | 3 | export interface IAuthorize { 4 | redirect_uri: string 5 | prompt: string 6 | state: string 7 | client_id?: string 8 | nonce: string 9 | audience: string 10 | } 11 | 12 | export interface ILogin { 13 | redirect?: string 14 | state?: string 15 | username: string 16 | pw: string 17 | } 18 | 19 | export interface IUsers { 20 | username: string 21 | pw: string 22 | permissions: string[] 23 | scope: string 24 | given_name: string 25 | family_name: string 26 | nickname: string 27 | name: string 28 | email: string 29 | picture: string 30 | } 31 | 32 | export interface IIdTokenClaims { 33 | given_name: string 34 | family_name: string 35 | nickname: string 36 | name: string 37 | email: string 38 | picture: string 39 | iss: string 40 | sub: string 41 | aud: string[] 42 | iat: number 43 | exp: number 44 | amr: string[] 45 | nonce?: string 46 | } 47 | 48 | export interface IAccessTokenClaims { 49 | iss: string 50 | sub: string 51 | aud: string[] 52 | iat: number 53 | exp: number 54 | azp: string 55 | scope: string 56 | permissions: string[] 57 | } 58 | 59 | export interface ITokenDefault { 60 | domain: string 61 | sub: string 62 | defaultPermissions: string[] 63 | defaultScope: string 64 | aud: string[] 65 | given_name: string 66 | family_name: string 67 | nickname: string 68 | name: string 69 | email: string 70 | picture: string 71 | amr: string[] 72 | } 73 | 74 | export const AuthorizedDefaults: IAuthorize = { 75 | redirect_uri: "", 76 | prompt: "", 77 | state: "", 78 | client_id: "", 79 | nonce: "", 80 | audience: "", 81 | }; 82 | 83 | export const UsersDefaults: IUsers = { 84 | username: "", 85 | pw: "", 86 | permissions: [""], 87 | scope: "", 88 | given_name: "", 89 | family_name: "", 90 | nickname: "", 91 | name: "", 92 | email: "", 93 | picture: "", 94 | }; 95 | 96 | export const LoginDefaults: ILogin = { 97 | redirect: "", 98 | state: "", 99 | username: "", 100 | pw: "", 101 | }; 102 | 103 | export interface IKeyList { 104 | keys: jwkToBuffer.JWK[] 105 | } 106 | -------------------------------------------------------------------------------- /auth0_mock/users.json: -------------------------------------------------------------------------------- 1 | { 2 | "user1": { 3 | "username": "user1", 4 | "pw": "user1", 5 | "permissions": ["user"], 6 | "scope": "openid profile email offline_access", 7 | "given_name": "John", 8 | "family_name": "Doe", 9 | "nickname":"JD", 10 | "name":"John Doe", 11 | "email":"john.doe@unknown.com", 12 | "picture":"https://s.gravatar.com/avatar/4cf8395b3f38515aa3144ccef5a49800?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fjd.png" 13 | }, 14 | "admin1":{ 15 | "username":"admin1", 16 | "pw":"admin1", 17 | "permissions":["admin"], 18 | "scope":"openid profile email offline_access", 19 | "given_name":"Bob", 20 | "family_name":"Builder", 21 | "nickname":"Bob", 22 | "name":"Bob Builder", 23 | "email":"bob.builder@build.com", 24 | "picture":"https://s.gravatar.com/avatar/50a7070e1892b9227579b318cd8b677b?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fpr.png" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /dc-auth0-host.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | auth0_mock: 4 | container_name: "${AUTH0_CONTAINER_NAME-auth0_mock}" 5 | stdin_open: true 6 | tty: true 7 | build: 8 | context: ./auth0_mock 9 | dockerfile: Dockerfile 10 | networks: 11 | devnet: 12 | ports: 13 | - "${AUTH0_HOST_PORT-3001}:3001" 14 | command: yarn run start 15 | environment: 16 | NODE_ENV: dev 17 | DEBUG: nodejs-docker-express:* 18 | AUTH0_DOMAIN: 19 | AUTH0_AUDIENCE: 20 | AUTH0_DEFAULT_USER: 21 | AUTH0_DEFAULT_PASSWORD: 22 | AUTH0_ACCESS_TOKEN_EXP: 23 | AUTH0_ID_TOKEN_EXP: 24 | 25 | networks: 26 | devnet: 27 | name: ${DEVNET_NAME-devnet} 28 | external: true 29 | -------------------------------------------------------------------------------- /dc-auth0-local-users.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | auth0_mock: 4 | volumes: 5 | - ${HOST_PROJECT_PATH-.}/${AUTH0_LOCAL_USERS_FILE}:/usr/local/app/users-local.json 6 | -------------------------------------------------------------------------------- /dc-auth0.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | auth0_mock: 4 | container_name: "${AUTH0_CONTAINER_NAME-auth0_mock}" 5 | stdin_open: true 6 | tty: true 7 | build: 8 | context: ./auth0_mock 9 | dockerfile: Dockerfile 10 | networks: 11 | devnet: 12 | command: yarn run start 13 | environment: 14 | NODE_ENV: dev 15 | DEBUG: nodejs-docker-express:* 16 | AUTH0_DOMAIN: 17 | AUTH0_AUDIENCE: 18 | AUTH0_DEFAULT_USER: 19 | AUTH0_DEFAULT_PASSWORD: 20 | AUTH0_ACCESS_TOKEN_EXP: 21 | AUTH0_ID_TOKEN_EXP: 22 | 23 | networks: 24 | devnet: 25 | name: ${DEVNET_NAME-devnet} 26 | external: true 27 | -------------------------------------------------------------------------------- /dc-dns.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | dev: 4 | dns: 5 | - ${GDC_DNS_PRI_IP} 6 | - ${GDC_DNS_SEC_IP} -------------------------------------------------------------------------------- /dc-host-custom-mount.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | dev: 4 | volumes: 5 | - ${HOST_CUSTOM_MOUNT}:/host_custom_mount 6 | -------------------------------------------------------------------------------- /dc-host-home-dir-rw.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | dev: 4 | volumes: 5 | - ~:/root/home-host # read write home dir mounted inside container 6 | -------------------------------------------------------------------------------- /dc-host-home-dir.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | dev: 4 | volumes: 5 | - ~:/root/home-host:ro # needed to copy .aws and bin folder from your home if enabled and they exists 6 | -------------------------------------------------------------------------------- /dc-host-workspace-dir.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | dev: 4 | volumes: 5 | - ${HOST_PROJECT_PATH-.}:/workspace 6 | -------------------------------------------------------------------------------- /dc-ls-host-dns.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | localstack: 4 | ports: 5 | - "${LOCALSTACK_HOST_DNS_PORT}:53" # only required for Pro dns proxy 6 | - "${LOCALSTACK_HOST_DNS_PORT}:53/udp" # only required for Pro dns proxy 7 | -------------------------------------------------------------------------------- /dc-ls-host.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | localstack: 4 | ports: 5 | - "4566:4566" # LocalStack Gateway 6 | - "4510-4560:4510-4560" # external services port range 7 | - "8001:8080" # only required for Pro 8 | - "443:443" # LocalStack HTTPS Gateway (required for Pro) 9 | - "4571:4571" # elasticsearch service 10 | environment: 11 | - LOCALSTACK_HOST=${LOCALSTACK_HOST-localhost.localstack.cloud:4566} 12 | - GATEWAY_LISTEN=${LOCALSTACK_GATEWAY_LISTEN-0.0.0.0:4566,0.0.0.0:443} 13 | -------------------------------------------------------------------------------- /dc-ls-persist.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | localstack: 4 | environment: 5 | - PERSISTENCE=1 6 | 7 | -------------------------------------------------------------------------------- /dc-ls-pro.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | localstack: 4 | environment: 5 | - LOCALSTACK_API_KEY=${LOCALSTACK_API_KEY-} 6 | - LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN-} 7 | -------------------------------------------------------------------------------- /dc-ls-shared.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | localstack: 4 | volumes: 5 | - shared:/shared # persisted and shared between stacks / containers 6 | 7 | volumes: 8 | shared: # this volume is persisted and shared between all stacks 9 | name: shared 10 | external: true 11 | -------------------------------------------------------------------------------- /dc-ls-static-ip.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | localstack: 4 | networks: 5 | devnet: 6 | ipv4_address: ${LOCALSTACK_STATIC_IP} 7 | -------------------------------------------------------------------------------- /dc-ls.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | localstack: 4 | image: "${LS_IMAGE-localstack/localstack}:${LS_VERSION-latest}" 5 | container_name: "${LS_MAIN_CONTAINER_NAME-localstack}" 6 | stdin_open: true 7 | tty: true 8 | networks: 9 | devnet: 10 | environment: 11 | - DOCKER_HOST=unix:///var/run/docker.sock 12 | - MAIN_CONTAINER_NAME=${LS_MAIN_CONTAINER_NAME-localstack} 13 | - EXTRA_CORS_ALLOWED_ORIGINS=* 14 | - DISABLE_CORS_CHECKS=1 15 | - DEBUG=${DEBUG-} 16 | - LS_LOG=${LS_LOG-} 17 | # how long to keep idle lambdas around 18 | - LAMBDA_RUNTIME_ENVIRONMENT_TIMEOUT=15 19 | # remove idle lambdas 20 | - LAMBDA_REMOVE_CONTAINERS=1 21 | # Tell Localstack to put Lambda containers on the same shared network 22 | - LAMBDA_DOCKER_NETWORK=${DEVNET_NAME-devnet} 23 | - KINESIS_ERROR_PROBABILITY=${KINESIS_ERROR_PROBABILITY-} 24 | # enable IAM checks. Only takes action if IAM_SOFT_MODE=0 25 | - ENFORCE_IAM=${LS_ENFORCE_IAM-1} 26 | # only check IAM do not enforce it 27 | - IAM_SOFT_MODE=${LS_IAM_SOFT_MODE-1} 28 | - OPENSEARCH_ENDPOINT_STRATEGY=port 29 | - PROVIDER_OVERRIDE_CLOUDWATCH=v2 30 | - DYNAMODB_REMOVE_EXPIRED_ITEMS=1 31 | - DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM=${LS_DNS_NAME_PATTERNS_TO_RESOLVE_UPSTREAM} 32 | volumes: 33 | - "/var/run/docker.sock:/var/run/docker.sock" 34 | - "${LOCALSTACK_VOLUME_DIR:-/tmp/ls_volume}:/var/lib/localstack" 35 | 36 | networks: 37 | devnet: 38 | name: ${DEVNET_NAME-devnet} 39 | external: true 40 | -------------------------------------------------------------------------------- /dc-proxy-dump.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | proxy: 4 | command: mitmdump --set block_global=false --set flow_detail=3 5 | -------------------------------------------------------------------------------- /dc-proxy-host.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | proxy: 4 | ports: 5 | - "${PROXY_HOST_PORT-8080}:8080" 6 | -------------------------------------------------------------------------------- /dc-proxy-web-host.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | proxy: 4 | ports: 5 | - "${PROXY_WEB_HOST_PORT-8081}:8081" 6 | command: mitmweb --web-iface 0.0.0.0 --web-host 0.0.0.0 7 | -------------------------------------------------------------------------------- /dc-proxy-web.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | proxy: 4 | command: mitmweb --web-iface 0.0.0.0 --web-host 0.0.0.0 5 | -------------------------------------------------------------------------------- /dc-proxy.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | proxy: 4 | image: "mitmproxy/mitmproxy:${PROXY_VERSION-latest}" 5 | container_name: "${PROXY_CONTAINER_NAME-proxy}" 6 | stdin_open: true 7 | tty: true 8 | dns: 9 | - ${GDC_DNS_PRI_IP} 10 | - ${GDC_DNS_SEC_IP} 11 | networks: 12 | devnet: 13 | volumes: 14 | - "${PROXY_VOLUME_DIR:-/tmp/mitmproxy}:/home/mitmproxy/.mitmproxy" 15 | 16 | 17 | networks: 18 | devnet: 19 | name: ${DEVNET_NAME-devnet} 20 | external: true 21 | -------------------------------------------------------------------------------- /dc-ssh-agent.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | dev: 4 | environment: 5 | - SSH_AUTH_SOCK=/var/run/ssh-agent.sock 6 | volumes: 7 | - ${SSH_AUTH_SOCK}:/var/run/ssh-agent.sock # attach ssh agent socket inside container 8 | -------------------------------------------------------------------------------- /dc-ssh.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | dev: 4 | ports: 5 | - "${SSH_SERVER_PORT}:22" 6 | -------------------------------------------------------------------------------- /docker-config.json: -------------------------------------------------------------------------------- 1 | { "credsStore": "ecr-login" } 2 | -------------------------------------------------------------------------------- /docs/auth0/readme.md: -------------------------------------------------------------------------------- 1 | # Auth0 Mock 2 | 3 | The Auth0 Mock is a container that emulates an auth0 login prompt and backend. 4 | Users login via its login page and are authenticated against info in one of 2 json files. 5 | if `users-local.json` is present it will be used, otherwise `users.json` will be used. 6 | 7 | `users.json` / `users-local.json` file format: 8 | 9 | ```json 10 | { 11 | "user1": { 12 | "username": "user1", 13 | "pw": "user1", 14 | "permissions": ["user"], 15 | "scope": "openid profile email", 16 | "given_name": "John", 17 | "family_name": "Doe", 18 | "nickname":"JD", 19 | "name":"John Doe", 20 | "email":"john.doe@unknown.com", 21 | "picture":"https://s.gravatar.com/avatar/4cf8395b3f38515aa3144ccef5a49800?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fjd.png" 22 | }, 23 | "admin1":{ 24 | "username":"admin1", 25 | "pw":"admin1", 26 | "permissions":["admin"], 27 | "scope":"openid profile email", 28 | "given_name":"Bob", 29 | "family_name":"Builder", 30 | "nickname":"Bob", 31 | "name":"Bob Builder", 32 | "email":"bob.builder@build.com", 33 | "picture":"https://s.gravatar.com/avatar/50a7070e1892b9227579b318cd8b677b?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fpr.png" 34 | } 35 | } 36 | ``` 37 | 38 | You can go to http://host.docker.internal:3001/ to see a list of all supported routes. 39 | 40 | The login page is located at http://host.docker.internal:3001/authorize?redirect_uri=YOUR_CALLBACK_URL 41 | YOUR_CALLBACK_URL should be the url of your application that is expecting the auth0 response. 42 | 43 | -------------------------------------------------------------------------------- /docs/aws_ident/images/access_keys.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/access_keys.png -------------------------------------------------------------------------------- /docs/aws_ident/images/add_mfa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/add_mfa.png -------------------------------------------------------------------------------- /docs/aws_ident/images/change_pw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/change_pw.png -------------------------------------------------------------------------------- /docs/aws_ident/images/close_button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/close_button.png -------------------------------------------------------------------------------- /docs/aws_ident/images/login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/login.png -------------------------------------------------------------------------------- /docs/aws_ident/images/manage_mfa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/manage_mfa.png -------------------------------------------------------------------------------- /docs/aws_ident/images/mfa_account_name.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/mfa_account_name.png -------------------------------------------------------------------------------- /docs/aws_ident/images/mfa_app_screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/mfa_app_screen.png -------------------------------------------------------------------------------- /docs/aws_ident/images/mfa_codes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/mfa_codes.png -------------------------------------------------------------------------------- /docs/aws_ident/images/my_sec_creds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/my_sec_creds.png -------------------------------------------------------------------------------- /docs/aws_ident/images/new_access_key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/new_access_key.png -------------------------------------------------------------------------------- /docs/aws_ident/images/select_iam.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/select_iam.png -------------------------------------------------------------------------------- /docs/aws_ident/images/show_qr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_ident/images/show_qr.png -------------------------------------------------------------------------------- /docs/aws_ident/readme.md: -------------------------------------------------------------------------------- 1 | # Setting up MFA for the Identity Account. 2 | 3 | Open a browser to the following URL: [https://YOURSITE.signin.aws.amazon.com/console](https://YOURSITE.signin.aws.amazon.com/console) 4 | Replacing YOURSITE with your site info. 5 | 6 | Enter the username and password that were provided to you. 7 | 8 | ![login](./images/login.png "login") 9 | 10 | On first login you may be prompted to change your password. (Note this login is separate from your SSO login. You may use same password if you want, just know that your SSO login and identity login are managed separately as well as MFA) 11 | 12 | ![change pw](./images/change_pw.png "change pw") 13 | 14 | Your console will look like the picture below. Please click on the IAM service. 15 | 16 | ![select iam](./images/select_iam.png "select iam") 17 | 18 | Click the "Add MFA" button. This will take you to the "My Security Credential" page where you will be able to change 19 | your IAM password, create and delete your AWS access key, and set up your MFA device. 20 | 21 | ![add mfa](./images/add_mfa.png "add mfa") 22 | 23 | Click on the "Assign MFA device" button. This is very important as well as creating the Access key, otherwise, you won't 24 | be able to establish an AWS session via ssm-ssh. 25 | 26 | ![my sec creds](./images/my_sec_creds.png "my sec creds") 27 | 28 | After you click the MFA device make your you select Virtual MFA device and click the continue button as shown below: 29 | 30 | ![manage mfa](./images/manage_mfa.png "manage mfa") 31 | 32 | Click on the Show QR code, so you can scan it with your MFA Authenticator such as Microsoft Authenticator, DUO Mobile or Google Authenticator. 33 | 34 | ![show qr](./images/show_qr.png "show qr") 35 | 36 | In this example I am using an iPhone to scan the QR code with DUO Mobile. The mfa.usr is an example. 37 | 38 | Your username will appear here. 39 | 40 | ![mfa account name](./images/mfa_account_name.png "mfa account name") 41 | 42 | Once you set IAM MFA account on your mobile type two consecutive MFA codes to finalize the MFA setup as shown below: 43 | 44 | ![mfa codes](./images/mfa_codes.png "mfa codes") 45 | 46 | Your aws-identity MFA account would look similar to the following picture once you type the two consecutive codes. 47 | 48 | ![mfa app screen](./images/mfa_app_screen.png "mfa app screen") 49 | 50 | Click on the close button 51 | 52 | ![close button](./images/close_button.png "close button") 53 | 54 | # Creating AWS access keys 55 | 56 | Click on the "Create access key" button 57 | 58 | ![my sec creds](./images/my_sec_creds.png "my sec creds") 59 | 60 | A new set of AWS access keys will be generated. Click the "Download .csv file" button. *You must download before closing the screen. There is no way to retrieve the keys after the page is closed.*** 61 | 62 | ![create access key](./images/new_access_key.png "create access key") 63 | 64 | # Setting up local AWS credentials 65 | 66 | Opening the csv file you downloaded from previous step should show something like the following: (note your values will be different) 67 | 68 | ![access keys](./images/access_keys.png "access keys") 69 | 70 | In your home directory create a folder named ".aws" if you don't already have one. 71 | Then create or edit a file named credentials in that folder with the following structure: 72 | Replacing 73 | * mfa.user with your aws username. 74 | * aws_access_key_id with value from csv in the "Access key ID" column. 75 | * aws_secret_access_key with the value from csv in the "Secret access key" column. 76 | 77 | The localstack section should be kept as is. 78 | 79 | ```ini 80 | [mfa.user-identity] 81 | aws_access_key_id=AKIATHSY35ZK3P3YEGHZ 82 | aws_secret_access_key=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 83 | 84 | [localstack] 85 | aws_access_key_id=test 86 | aws_secret_access_key=test 87 | ``` 88 | Next create or edit a file named config in that folder with the following structure: 89 | Replacing 90 | * mfa.user with your aws username. 91 | 92 | The localstack section should be kept as is. 93 | ```ini 94 | [profile mfa.user-identity] 95 | region=us-west-2 96 | output=json 97 | 98 | [profile localstack] 99 | region=us-east-1 100 | output=text 101 | ``` 102 | 103 | # Managing password, MFA, and access keys 104 | 105 | Use the following link to update your password, MFA, and access keys: 106 | [My security credentials](https://us-east-1.console.aws.amazon.com/iam/home#/security_credentials) 107 | 108 | -------------------------------------------------------------------------------- /docs/aws_sso/images/aws_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_sso/images/aws_login.png -------------------------------------------------------------------------------- /docs/aws_sso/images/aws_mfa_done.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_sso/images/aws_mfa_done.png -------------------------------------------------------------------------------- /docs/aws_sso/images/aws_mfa_name.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_sso/images/aws_mfa_name.png -------------------------------------------------------------------------------- /docs/aws_sso/images/aws_mfa_qr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_sso/images/aws_mfa_qr.png -------------------------------------------------------------------------------- /docs/aws_sso/images/aws_new_mfa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_sso/images/aws_new_mfa.png -------------------------------------------------------------------------------- /docs/aws_sso/images/aws_new_user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_sso/images/aws_new_user.png -------------------------------------------------------------------------------- /docs/aws_sso/images/invite-email-body.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_sso/images/invite-email-body.png -------------------------------------------------------------------------------- /docs/aws_sso/images/invite-email-subject.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/aws_sso/images/invite-email-subject.png -------------------------------------------------------------------------------- /docs/aws_sso/readme.md: -------------------------------------------------------------------------------- 1 | # AWS SSO LOG IN Process 2 | 3 | You should receive an AWS invite email in your inbox once your SSO account is created that will look as follows: 4 | 5 | ![email subject](./images/invite-email-subject.png "email subject") 6 | 7 | ![email body](./images/invite-email-body.png "email body") 8 | 9 | Once you click on the accept invitation you will be directed to create a new password as shown below: 10 | 11 | ![new user](./images/aws_new_user.png "new user") 12 | 13 | To log into AWS SSO please go to this website: [https://YOURSITE.awsapps.com/start](https://YOURSITE.awsapps.com/start) 14 | Replace YOURSITE with your correct url. 15 | 16 | You will be directed to the logon page where you will be entering your username in the recommended format: firstname.lastname. 17 | 18 | Example: 19 | 20 | ![login](./images/aws_login.png "login") 21 | 22 | In the next page, you will be asked to register your MFA (Multi Factor Authenticator) device to access your AWS account. 23 | 24 | ![new mfa](./images/aws_new_mfa.png "new mfa") 25 | 26 | You can use any MFA authenticator such as DUO Mobile, Microsoft Authenticator, Google Authenticator to add the AWS MFA account with QR Code as shown below: 27 | 28 | ![mfa qr](./images/aws_mfa_qr.png "mfa qr") 29 | 30 | ![mfa name](./images/aws_mfa_name.png "mfa name") 31 | 32 | 33 | This is how it looks in DUO after you type the name of the account: 34 | 35 | ![mfa done](./images/aws_mfa_done.png "mfa done") 36 | 37 | Once you type the MFA code it may ask you to reset the password if this is your first logging into AWS SSO. 38 | -------------------------------------------------------------------------------- /docs/bitwarden/images/pop-out.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/bitwarden/images/pop-out.png -------------------------------------------------------------------------------- /docs/container_dev/images/GenericDevContainer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/container_dev/images/GenericDevContainer.png -------------------------------------------------------------------------------- /docs/container_dev/images/compose_v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/container_dev/images/compose_v2.png -------------------------------------------------------------------------------- /docs/container_dev/images/ddesktop-container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/container_dev/images/ddesktop-container.png -------------------------------------------------------------------------------- /docs/container_dev/images/ddesktop-memory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/container_dev/images/ddesktop-memory.png -------------------------------------------------------------------------------- /docs/container_dev/images/ddesktop-wsl2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/container_dev/images/ddesktop-wsl2.png -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Bare Minimum 4 | 1. Create new branch 5 | 2. Change files 6 | 3. Update the version for the `DEV_CONTAINER` variable in the [docker-compose.yml](../docker-compose.yml) file to the next version 7 | 4. Do a PR 8 | 5. Merge the PR 9 | 6. Checkout and pull latest main 10 | 7. Create a new tag with the latest version. 11 | The tag version starts with a "v", and the `DEV_CONTAINER` version variable does not. 12 | ```shell 13 | git checkout main 14 | git pull 15 | git tag -a v -m 16 | ``` 17 | 8. Push the tag 18 | ```shell 19 | git push --tags 20 | ``` 21 | 22 | NOTE - We only bump the `DEV_CONTAINER` version and cut a tag 23 | version when the code for the GDC changes. For example, we don't cut new tags for README-only changes. 24 | -------------------------------------------------------------------------------- /docs/cypress/images/cypress-error-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/cypress/images/cypress-error-1.png -------------------------------------------------------------------------------- /docs/cypress/images/xquartz-settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devxpod/GDC/b6234bab2507ae2aa29605e0bf59b50818dd083c/docs/cypress/images/xquartz-settings.png -------------------------------------------------------------------------------- /docs/cypress/readme.md: -------------------------------------------------------------------------------- 1 | # Cypress End-to-End Testing 2 | 3 | ## Table of Contents 4 | 5 | - [Overview](#overview) 6 | - [Getting Started](#getting_started) 7 | - [Interactive Mode](#interactive_mode) 8 | - [Troubleshooting](#troubleshooting) 9 | 10 | ## Overview 11 | Cypress End-to-End Testing is a powerful testing framework that allows you to automate and validate the behavior of your web applications. It provides an intuitive and developer-friendly interface for creating tests, making it easier to catch bugs and ensure the quality of your application. 12 | 13 | With Cypress, you can simulate real user interactions, such as clicking buttons, filling out forms, and navigating between pages. It runs directly in the browser, allowing you to observe and debug your tests in real-time, which significantly speeds up the development and debugging process. 14 | 15 | ## Getting Started 16 | To start using Cypress in your project with the GDC, follow the steps below: 17 | 1. Create a `.env-gdc` file in the root directory of your project repository and add the following environment variables: 18 | ``` 19 | export EXTRA_PACKAGES="libgtk2.0-0 libgtk-3-0 libgbm-dev libnotify-dev libgconf-2-4 libnss3 libxss1 libasound2 libxtst6 xauth xvfb" 20 | 21 | # only needed for interactive mode 22 | export GDC_ENV_DISPLAY="host.docker.internal:0" 23 | ``` 24 | **Note:** The `EXTRA_PACKAGES` environment variable must be set as shown to run headless tests. Additionally, setting the `GDC_ENV_DISPLAY` environment variable configures the GDC for Cypress Interactive Mode. 25 | 26 | 27 | 2. For new projects, the GDC is configured and ready to be used with Cypress. Follow the [Cypress documentation](https://docs.cypress.io/guides/getting-started/installing-cypress) for installing and using Cypress. For existing projects, stop the GDC (if running) and restart it using the command below. Doing so will clear the GDC build cache and allow the Cypress dependencies (`EXTRA_PACKAGES`) to be downloaded and installed during the next build process. 28 | ``` 29 | CLEAN=yes run-dev-container.sh 30 | ``` 31 | 32 | ## Interactive Mode 33 | With Cypress installed in the GDC, you may use Interactive Mode with the GDC by following the steps below: 34 | 35 | ### macOS 36 | 1. Install XQuartz on your host machine: 37 | ``` 38 | brew install --cask xquartz 39 | ``` 40 | 2. Run XQuartz on your host machine: 41 | ``` 42 | open -a XQuartz 43 | ``` 44 | 3. From your host machine's Apple menu, select **XQuartz > Settings...** 45 | 46 | 47 | 4. Select the `Security` tab and ensure that **Allow connections from network clients** is enabled. 48 | ![XQuartz](./images/xquartz-settings.png) 49 | 50 | 51 | 5. From within the terminal of your host machine, add the GDC address to X11 server access control list by running the following command: 52 | ``` 53 | /usr/X11/bin/xhost + host.docker.internal 54 | ``` 55 | 6. From the GDC terminal, change into your UI project directory containing the `package.json` file and Cypress installation. 56 | 57 | 58 | 7. Start your development server. 59 | 60 | 61 | 8. Run `npx cypress open`, `yarn run cypress open`, or any command for a custom script listed within your `package.json` file. 62 | ### Windows 63 | 64 | 1. Download and install VcXsrv Windows X Server onto your host machine from [SourceForge.net](https://sourceforge.net/projects/vcxsrv/). 65 | 66 | 67 | 2. Run VcXsrv Windows X Server on your host machine. 68 | 69 | 70 | 3. Add `host.docker.internal` to the server's access control list. 71 | 72 | 73 | 4. From the GDC terminal, change into your UI project directory containing the `package.json` file and Cypress installation. 74 | 75 | 76 | 5. Start your development server. 77 | 78 | 79 | 6. Run `npx cypress open`, `yarn run cypress open`, or any command for a custom script listed within your `package.json` file. 80 | 81 | ## Troubleshooting 82 | 83 | ### Cypress Failed to Start 84 | ![cypress-dependency-error](./images/cypress-error-1.png) 85 | 86 | 1. If you encounter a missing library or dependency error like the one depicted above, try running the following command from the terminal on your host machine, followed by re-attempting to run Interactive Mode from within the GDC terminal: 87 | ``` 88 | xhost + host.docker.internal 89 | ``` 90 | **Note:** If the xhost command is not found, it may not be listed in your path. Try `/usr/X11/bin/xhost`. 91 | 92 | 93 | 2. Ensure that `host.docker.internal` in your hosts file on your local machine is pointing to `127.0.0.1`. For macOS, the hosts file is located at `/etc/hosts`. For Windows, the hosts file is located at `c:\windows\system32\drivers\etc\hosts`. 94 | 95 | **NOTE**: Admin permissions are required to edit the hosts file. Use `sudo` for macOS. For Windows, edit the file as an administrator. 96 | -------------------------------------------------------------------------------- /docs/debugging/readme.md: -------------------------------------------------------------------------------- 1 | # Debugging 2 | 3 | ## Python 4 | ### Pycharm 5 | In **PyCharm** IDE go to `Run->Edit Configurations` 6 | Click the + in top left and select `Python Debug Server` 7 | Give it a name of `gdc-python-debug` 8 | `IDE host name` = `host.docker.internal` 9 | `Port = 12345` 10 | Put check in `Redirect output to console` 11 | Uncheck `Suspend after connect` 12 | Click the folder icon to the right of `Path mappings` and click the + to add a new one 13 | `Local Path` = 14 | `Remote` = `/workspace` 15 | Click `ok` until your back to main IDE window 16 | 17 | In the container with project `venv` activated run: 18 | ```bash 19 | pip install pydevd-pycharm~=222.3739.56 20 | ``` 21 | 22 | In the IDE create a file to test the debugger any place in project named `testdebug.py` with content of: 23 | ```python 24 | import pydevd_pycharm 25 | 26 | pydevd_pycharm.settrace( 27 | "host.docker.internal", port=12345, stdoutToServer=True, stderrToServer=True 28 | ) 29 | abc = "123" 30 | print(abc) 31 | ``` 32 | 33 | Put a breakpoint on the line `print(abc)` 34 | Click the little debug icon in the top of the ide next to the run config you created "gdc-python-debug" (this will start the debug server in the IDE) 35 | 36 | Now inside the GDC run: 37 | ```bash 38 | python testdebug.py 39 | ``` 40 | It should connect to the IDE debug server and the IDE should pause execution on the breakpoint and allow you to do usual debug stuff 41 | -------------------------------------------------------------------------------- /etc/bash_completion.d/gdcex.sh: -------------------------------------------------------------------------------- 1 | _get_containers() 2 | { 3 | local cur prev opts 4 | COMPREPLY=() 5 | cur="${COMP_WORDS[COMP_CWORD]}" 6 | prev="${COMP_WORDS[COMP_CWORD-1]}" 7 | opts=$(docker network inspect "$DEVNET_NAME" | jq -r '.[0].Containers[].Name' | grep -v 'arn_aws' | grep -v "$GDC_CONTAINER_NAME" | sort) 8 | 9 | #echo cur=$cur 10 | #echo prev=$prev 11 | #echo opts=$opts 12 | 13 | 14 | COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) 15 | return 0 16 | } 17 | complete -F _get_containers gdcex.sh 18 | -------------------------------------------------------------------------------- /etc/dpkg/dpkg.conf.d/01_nodoc: -------------------------------------------------------------------------------- 1 | # /etc/dpkg/dpkg.conf.d/01_nodoc 2 | 3 | # Delete locales 4 | path-exclude=/usr/share/locale/* 5 | 6 | # Delete docs 7 | path-exclude=/usr/share/doc/* 8 | # we need to keep copyright files for legal reasons 9 | path-include=/usr/share/doc/*/copyright 10 | 11 | # remove the man pages 12 | path-exclude /usr/share/man/* 13 | path-exclude /usr/share/groff/* 14 | path-exclude /usr/share/info/* 15 | 16 | # lintian stuff is small, but really unnecessary 17 | path-exclude /usr/share/lintian/* 18 | path-exclude /usr/share/linda/* 19 | -------------------------------------------------------------------------------- /etc/profile.d/02-locale-set.sh: -------------------------------------------------------------------------------- 1 | LC_ALL="en_US.UTF-8" 2 | LC_CTYPE="en_US.UTF-8" 3 | LANGUAGE="en_US.UTF-8" 4 | -------------------------------------------------------------------------------- /etc/skel/.terraformrc: -------------------------------------------------------------------------------- 1 | plugin_cache_dir = "$HOME/.terraform_plugin_cache" 2 | -------------------------------------------------------------------------------- /etc/ssh/ssh_config: -------------------------------------------------------------------------------- 1 | 2 | # This is the ssh client system-wide configuration file. See 3 | # ssh_config(5) for more information. This file provides defaults for 4 | # users, and the values can be changed in per-user configuration files 5 | # or on the command line. 6 | 7 | # Configuration data is parsed as follows: 8 | # 1. command line options 9 | # 2. user-specific file 10 | # 3. system-wide file 11 | # Any configuration value is only changed the first time it is set. 12 | # Thus, host-specific definitions should be at the beginning of the 13 | # configuration file, and defaults at the end. 14 | 15 | # Site-wide defaults for some commonly used options. For a comprehensive 16 | # list of available options, their meanings and defaults, please see the 17 | # ssh_config(5) man page. 18 | 19 | Include /etc/ssh/ssh_config.d/*.conf 20 | 21 | Host * 22 | ForwardAgent yes 23 | ForwardX11 yes 24 | ForwardX11Trusted yes 25 | # PasswordAuthentication yes 26 | # HostbasedAuthentication no 27 | # GSSAPIAuthentication no 28 | # GSSAPIDelegateCredentials no 29 | # GSSAPIKeyExchange no 30 | # GSSAPITrustDNS no 31 | # BatchMode no 32 | # CheckHostIP yes 33 | # AddressFamily any 34 | # ConnectTimeout 0 35 | # StrictHostKeyChecking ask 36 | # IdentityFile ~/.ssh/id_rsa 37 | # IdentityFile ~/.ssh/id_dsa 38 | # IdentityFile ~/.ssh/id_ecdsa 39 | # IdentityFile ~/.ssh/id_ed25519 40 | # Port 22 41 | # Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-cbc,3des-cbc 42 | # MACs hmac-md5,hmac-sha1,umac-64@openssh.com 43 | # EscapeChar ~ 44 | # Tunnel no 45 | # TunnelDevice any:any 46 | # PermitLocalCommand no 47 | # VisualHostKey no 48 | # ProxyCommand ssh -q -W %h:%p gateway.example.com 49 | # RekeyLimit 1G 1h 50 | SendEnv LANG LC_* 51 | HashKnownHosts yes 52 | GSSAPIAuthentication yes 53 | -------------------------------------------------------------------------------- /etc/ssh/sshd_config: -------------------------------------------------------------------------------- 1 | # $OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $ 2 | 3 | # This is the sshd server system-wide configuration file. See 4 | # sshd_config(5) for more information. 5 | 6 | # This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin 7 | 8 | # The strategy used for options in the default sshd_config shipped with 9 | # OpenSSH is to specify options with their default value where 10 | # possible, but leave them commented. Uncommented options override the 11 | # default value. 12 | 13 | Include /etc/ssh/sshd_config.d/*.conf 14 | 15 | #Port 22 16 | #AddressFamily any 17 | #ListenAddress 0.0.0.0 18 | #ListenAddress :: 19 | 20 | #HostKey /etc/ssh/ssh_host_rsa_key 21 | #HostKey /etc/ssh/ssh_host_ecdsa_key 22 | #HostKey /etc/ssh/ssh_host_ed25519_key 23 | 24 | # Ciphers and keying 25 | #RekeyLimit default none 26 | 27 | # Logging 28 | #SyslogFacility AUTH 29 | #LogLevel INFO 30 | 31 | # Authentication: 32 | 33 | #LoginGraceTime 2m 34 | PermitRootLogin yes 35 | #StrictModes yes 36 | #MaxAuthTries 6 37 | #MaxSessions 10 38 | 39 | PubkeyAuthentication yes 40 | 41 | # Expect .ssh/authorized_keys2 to be disregarded by default in future. 42 | #AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2 43 | 44 | #AuthorizedPrincipalsFile none 45 | 46 | #AuthorizedKeysCommand none 47 | #AuthorizedKeysCommandUser nobody 48 | 49 | # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts 50 | #HostbasedAuthentication no 51 | # Change to yes if you don't trust ~/.ssh/known_hosts for 52 | # HostbasedAuthentication 53 | #IgnoreUserKnownHosts no 54 | # Don't read the user's ~/.rhosts and ~/.shosts files 55 | #IgnoreRhosts yes 56 | 57 | # To disable tunneled clear text passwords, change to no here! 58 | PasswordAuthentication yes 59 | PermitEmptyPasswords no 60 | 61 | # Change to yes to enable challenge-response passwords (beware issues with 62 | # some PAM modules and threads) 63 | ChallengeResponseAuthentication no 64 | 65 | # Kerberos options 66 | #KerberosAuthentication no 67 | #KerberosOrLocalPasswd yes 68 | #KerberosTicketCleanup yes 69 | #KerberosGetAFSToken no 70 | 71 | # GSSAPI options 72 | #GSSAPIAuthentication no 73 | #GSSAPICleanupCredentials yes 74 | #GSSAPIStrictAcceptorCheck yes 75 | #GSSAPIKeyExchange no 76 | 77 | # Set this to 'yes' to enable PAM authentication, account processing, 78 | # and session processing. If this is enabled, PAM authentication will 79 | # be allowed through the ChallengeResponseAuthentication and 80 | # PasswordAuthentication. Depending on your PAM configuration, 81 | # PAM authentication via ChallengeResponseAuthentication may bypass 82 | # the setting of "PermitRootLogin without-password". 83 | # If you just want the PAM account and session checks to run without 84 | # PAM authentication, then enable this but set PasswordAuthentication 85 | # and ChallengeResponseAuthentication to 'no'. 86 | UsePAM yes 87 | 88 | AllowAgentForwarding yes 89 | AllowTcpForwarding yes 90 | #GatewayPorts no 91 | X11Forwarding yes 92 | X11DisplayOffset 10 93 | #X11UseLocalhost yes 94 | PermitTTY yes 95 | PrintMotd no 96 | #PrintLastLog yes 97 | #TCPKeepAlive yes 98 | #PermitUserEnvironment no 99 | #Compression delayed 100 | #ClientAliveInterval 0 101 | #ClientAliveCountMax 3 102 | #UseDNS no 103 | #PidFile /var/run/sshd.pid 104 | #MaxStartups 10:30:100 105 | #PermitTunnel no 106 | #ChrootDirectory none 107 | #VersionAddendum none 108 | 109 | # no default banner path 110 | #Banner none 111 | 112 | # Allow client to pass locale environment variables 113 | AcceptEnv LANG LC_* 114 | 115 | # override default of no subsystems 116 | Subsystem sftp /usr/lib/openssh/sftp-server 117 | 118 | # Example of overriding settings on a per-user basis 119 | #Match User anoncvs 120 | # X11Forwarding no 121 | # AllowTcpForwarding no 122 | # PermitTTY no 123 | # ForceCommand cvs server 124 | -------------------------------------------------------------------------------- /etc/term_colors.sh: -------------------------------------------------------------------------------- 1 | # use these to change the window / tab title. echo -e "$title_start THE_TITLE $title_end" 2 | title_start='\e]0;' 3 | title_end='\a' 4 | 5 | txtblk='\e[0;30m' # Black - Regular 6 | txtred='\e[0;31m' # Red 7 | txtgrn='\e[0;32m' # Green 8 | txtylw='\e[0;33m' # Yellow 9 | txtblu='\e[0;34m' # Blue 10 | txtpur='\e[0;35m' # Purple 11 | txtcyn='\e[0;36m' # Cyan 12 | txtwht='\e[0;37m' # White 13 | 14 | bldblk='\e[1;30m' # Black - Bold 15 | bldred='\e[1;31m' # Red 16 | bldgrn='\e[1;32m' # Green 17 | bldylw='\e[1;33m' # Yellow 18 | bldblu='\e[1;34m' # Blue 19 | bldpur='\e[1;35m' # Purple 20 | bldcyn='\e[1;36m' # Cyan 21 | bldwht='\e[1;37m' # White 22 | 23 | unkblk='\e[4;30m' # Black - Underline 24 | undred='\e[4;31m' # Red 25 | undgrn='\e[4;32m' # Green 26 | undylw='\e[4;33m' # Yellow 27 | undblu='\e[4;34m' # Blue 28 | undpur='\e[4;35m' # Purple 29 | undcyn='\e[4;36m' # Cyan 30 | undwht='\e[4;37m' # White 31 | 32 | bakblk='\e[40m' # Black - Background 33 | bakred='\e[41m' # Red 34 | bakgrn='\e[42m' # Green 35 | bakylw='\e[43m' # Yellow 36 | bakblu='\e[44m' # Blue 37 | bakpur='\e[45m' # Purple 38 | bakcyn='\e[46m' # Cyan 39 | bakwht='\e[47m' # White 40 | txtrst='\e[0m' # Text Reset 41 | 42 | # p_* vars are for use in shell prompts. The extra escape brackets are are so the shell knows not to include the code in the length of the line. 43 | p_txtblk="\[$txtblk\]" # Black - Regular 44 | p_txtred="\[$txtred\]" # Red 45 | p_txtgrn="\[$txtgrn\]" # Green 46 | p_txtylw="\[$txtylw\]" # Yellow 47 | p_txtblu="\[$txtblu\]" # Blue 48 | p_txtpur="\[$txtpur\]" # Purple 49 | p_txtcyn="\[$txtcyn\]" # Cyan 50 | p_txtwht="\[$txtwht\]" # White 51 | 52 | p_bldblk="\[$bldblk\]" # Black - Bold 53 | p_bldred="\[$bldred\]" # Red 54 | p_bldgrn="\[$bldgrn\]" # Green 55 | p_bldylw="\[$bldylw\]" # Yellow 56 | p_bldblu="\[$bldblu\]" # Blue 57 | p_bldpur="\[$bldpur\]" # Purple 58 | p_bldcyn="\[$bldcyn\]" # Cyan 59 | p_bldwht="\[$bldwht\]" # White 60 | 61 | p_unkblk="\[$unkblk\]" # Black - Underline 62 | p_undred="\[$undred\]" # Red 63 | p_undgrn="\[$undgrn\]" # Green 64 | p_undylw="\[$undylw\]" # Yellow 65 | p_undblu="\[$undblu\]" # Blue 66 | p_undpur="\[$undpur\]" # Purple 67 | p_undcyn="\[$undcyn\]" # Cyan 68 | p_undwht="\[$undwht\]" # White 69 | 70 | p_bakblk="\[$bakblk\]" # Black - Background 71 | p_bakred="\[$bakred\]" # Red 72 | p_bakgrn="\[$bakgrn\]" # Green 73 | p_bakylw="\[$bakylw\]" # Yellow 74 | p_bakblu="\[$bakblu\]" # Blue 75 | p_bakpur="\[$bakpur\]" # Purple 76 | p_bakcyn="\[$bakcyn\]" # Cyan 77 | p_bakwht="\[$bakwht\]" # White 78 | p_txtrst="\[$txtrst\]" # Text Reset 79 | 80 | -------------------------------------------------------------------------------- /init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source /etc/term_colors.sh 4 | 5 | TITLE="GDC: $COMPOSE_PROJECT_NAME" 6 | echo -n -e "$title_start$TITLE$title_end" 7 | 8 | dos2unix /root/gdc-host/.env-gdc* 9 | 10 | cd /workspace || echo "$bldred Cant cd to /workspace!!!!! $txtrst" 11 | 12 | if [ -x /postStartCommand.sh ]; then 13 | . /postStartCommand.sh 14 | fi 15 | 16 | 17 | 18 | if [ -n "$GDC_ENTRYPOINT" ]; then 19 | $GDC_ENTRYPOINT 20 | EP_EC=$? 21 | if [ $EP_EC -ne 0 ] ; then 22 | echo "$bldred GDC_ENTRYPOINT returned non-zero exit code: $EP_EC $txtrst" 23 | /root/bin-extra/auth0/stop-auth0.sh 2>/dev/null 24 | /root/bin-extra/ls/stop-ls.sh 2>/dev/null 25 | exit $EP_EC 26 | fi 27 | if [ "$GDC_RUN_MODE" != "daemon" ]; then 28 | /root/bin-extra/auth0/stop-auth0.sh 2>/dev/null 29 | /root/bin-extra/ls/stop-ls.sh 2>/dev/null 30 | exit 0 31 | fi 32 | fi 33 | 34 | 35 | echo -e "$bldwht""================================================================================ $txtrst" 36 | echo -e "$bldgrn""Connect to GDC shell via docker with: docker exec -it $GDC_CONTAINER_NAME bash -l $txtrst $CLIPBOARD_MSG" 37 | 38 | echo -e "user:$bldwht root $txtrst password default unless changed is:$bldwht ContainersRule $txtrst" 39 | if [ -n "$SSH_SERVER_PORT" ]; then 40 | echo -e "Connect to GDC via ssh:$bldwht ssh root@localhost -p $SSH_SERVER_PORT $txtrst" 41 | echo "If you get a REMOTE HOST IDENTIFICATION HAS CHANGED error. " 42 | echo "Use the following command to fix before connecting:" 43 | echo -e "$bldgrn""ssh-keygen -R [localhost]:$SSH_SERVER_PORT $txtrst" 44 | fi 45 | if [ -n "$STARTUP_MSG" ]; then 46 | echo -e "$bldgrn""----- [ $STARTUP_MSG ] ----- $txtrst" 47 | fi 48 | echo -e "$bldwht""================================================================================ $txtrst" 49 | echo "sleeping forever...." 50 | tail -f /dev/null 2>&1 51 | 52 | -------------------------------------------------------------------------------- /k8s/.dockerignore: -------------------------------------------------------------------------------- 1 | docker-compose.yml 2 | **/temp 3 | **/tmp 4 | test 5 | **/*.ps1 6 | **/.idea 7 | **/.vscode 8 | data 9 | attachments 10 | **/venv* 11 | **/vendor 12 | **/node_modules 13 | **/__pycache__ 14 | **/*.zip 15 | **/test_data 16 | **/.cache 17 | **/.pulumi 18 | docs 19 | .run 20 | DEADJOE 21 | -------------------------------------------------------------------------------- /k8s/.env-gdc: -------------------------------------------------------------------------------- 1 | export ROOT_PW=${ROOT_PW:=ContainersRule} # sets root password in container 2 | 3 | export USE_WORKSPACE=${USE_WORKSPACE:=yes} 4 | 5 | export USE_AWS_HOME=${USE_AWS_HOME:=yes} # copy .aws folder from host home if exists and enables USE_HOST_HOME=yes 6 | export USE_HOME_BIN=${USE_HOME_BIN:=no} # copy bin folder from host home directory if it exists and enables USE_HOST_HOME=yes 7 | 8 | if [[ "$USE_HOME_BIN" = "yes" || "$USE_AWS_HOME" = "yes" ]]; then 9 | USE_HOST_HOME=yes 10 | fi 11 | 12 | export USE_HOST_HOME=${USE_HOST_HOME:=yes} # mount home directory from host 13 | 14 | if [ -z ${PULUMI_VERSION+x} ]; then 15 | export PULUMI_VERSION=latest # install this version of pulumi by default 16 | fi 17 | if [ -z ${PHP_VERSION+x} ]; then 18 | export PHP_VERSION='' # available PHP versions 5.6, 7.0, 7.1, 7.2, 7.3, 7.4, 8.0, 8.1, 8.2 19 | fi 20 | if [ -z ${GOLANG_VERSION+x} ]; then 21 | export GOLANG_VERSION='1.23' # install this golang version. blank for none. latest is 1.18 22 | fi 23 | 24 | export USE_AWS=${USE_AWS:=yes} # install latest aws cli, ssm plugin, and ecr helper 25 | 26 | if [[ -z "$AWS_VERSION" || "$AWS_VERSION" = "latest" ]]; then 27 | # latest version 28 | export AWS_VERSION=$(curl -s https://raw.githubusercontent.com/aws/aws-cli/v2/awscli/__init__.py | grep __version__ | cut -f3 -d' ' | tr -d "'") 29 | if [[ -z "$AWS_VERSION" ]]; then # if failed to fetch use known good version 30 | export AWS_VERSION=2.15.36 31 | fi 32 | fi 33 | 34 | if [[ -z ${PYTHON_VERSION+x} ]]; then 35 | export PYTHON_VERSION=3.12 # latest aws lambda supported runtime 36 | fi 37 | 38 | export USE_PRECOMMIT=${USE_PRECOMMIT:=no} # use pre-commit hooks in git to format and lint files 39 | # pre-commit requires python and will enable it if needed 40 | if [[ -z ${PYTHON_VERSION+x} && "$USE_PRECOMMIT" = "yes" ]]; then 41 | export PYTHON_VERSION=3.12 # install this python version 42 | fi 43 | 44 | export USE_BITWARDEN=${USE_BITWARDEN:=yes} # enable bitwarden workflow helpers. requires node install 45 | export PERSIST_BITWARDEN_SESSION=${PERSIST_BITWARDEN_SESSION:=no} # persist unlocked vault creds between container sessions 46 | 47 | export USE_CDK=${USE_CDK:=$USE_AWS} # install latest aws cdk, terraform and cdk for terraform. requires node install 48 | 49 | # if bitwarden is enabled, ensure node is also enabled 50 | if [[ "$USE_BITWARDEN" = "yes" && -z "$NODE_VERSION" ]]; then 51 | export NODE_VERSION=22 # install this version of node. 52 | fi 53 | 54 | # if cdk is enabled, ensure node is also enabled 55 | if [[ "$USE_CDK" = "yes" && -z "$NODE_VERSION" ]]; then 56 | export NODE_VERSION=22 # install this version of node. 57 | fi 58 | 59 | 60 | if [ -n "$LOCALSTACK_API_KEY" ] || [ -n "$LOCALSTACK_AUTH_TOKEN" ]; then 61 | export LS_IMAGE=${LS_IMAGE:="localstack/localstack-pro"} # use pro image if API key is provided 62 | else 63 | export LS_IMAGE=${LS_IMAGE:="localstack/localstack"} # can override with custom image location. Still uses LS_VERSION to create final image location. 64 | fi 65 | export LS_VERSION # starts requested localstack container version 66 | export USE_LOCALSTACK=${USE_LOCALSTACK:=yes} # does not install or start localstack. Only sets up some helpers 67 | export USE_LOCALSTACK_PERSISTENCE=${USE_LOCALSTACK_PERSISTENCE:=no} # toggle persistent storage for LS defaults to persistence disabled. 68 | export USE_LOCALSTACK_HOST=${USE_LOCALSTACK_HOST:=yes} # does not install or start localstack. Only sets up some helpers and port forwards 69 | if [ -z ${LOCALSTACK_HOST_DNS_PORT+x} ]; then 70 | LOCALSTACK_HOST_DNS_PORT=53 # forward this port from host to localstack for DNS 71 | fi 72 | export USE_LOCALSTACK_SHARED=${USE_LOCALSTACK_SHARED:=no} # mount shared volume in LS container under /shared 73 | export LOCALSTACK_HOST_DNS_PORT 74 | export LOCALSTACK_GATEWAY_LISTEN 75 | export USE_LOCALSTACK_DNS=${USE_LOCALSTACK_DNS:=no} # set to yes to assign static ip to LS container and use it as primary DNS 76 | 77 | export USE_AUTH0 # starts up auth0 mock container in container only mode 78 | export USE_AUTH0_HOST # starts up auth0 mock container and forwards port from host. Use AUTH0_HOST_PORT to change default of 3001 79 | export AUTH0_HOST_PORT=${AUTH0_HOST_PORT:=3001} # default port for AUTH0 mock if enabled 80 | export AUTH0_LOCAL_USERS_FILE # used to specify location in container for auth0 to mount user override file 81 | export AUTH0_DEFAULT_USER=${AUTH0_DEFAULT_USER:="user1"} # used to auto-populate auth0 mock login page 82 | export AUTH0_DEFAULT_PASSWORD=${AUTH0_DEFAULT_USER:="user1"} # used to auto-populate auth0 mock login page 83 | 84 | export USE_DOT_NET # install ubuntu:latest dotnet core packages 85 | export USE_JAVA # install ubuntu:latest openjdk packages 86 | export USE_POWERSHELL # install latest stable powershell 87 | 88 | export EDITOR=${EDITOR:=vi} # sets default editor in container. usually set to same as VISUAL 89 | export VISUAL=${VISUAL:=vi} # sets default editor in container. usually set to same as EDITOR 90 | if [ -z ${SSH_KEYSCAN_HOSTS+x} ]; then 91 | export SSH_KEYSCAN_HOSTS="gitlab.com github.com bitbucket.org" # copy ssh keys from these hosts to prevent unknown key prompts 92 | fi 93 | 94 | # default secondary dns to google secondary dns if not specified 95 | if [ -z "$GDC_DNS_SEC_IP" ]; then 96 | GDC_DNS_SEC_IP=8.8.4.4 97 | fi 98 | 99 | # these will only be used by the container if GDC_DNS_PRI_IP is defined 100 | export GDC_DNS_PRI_IP 101 | export GDC_DNS_SEC_IP 102 | 103 | export DEVNET_GATEWAY 104 | 105 | export USE_COLOR_PROMPT=${USE_COLOR_PROMPT:=yes} # enable colored bash prompt 106 | 107 | export CHECK_UPDATES=${CHECK_UPDATES:=yes} # check for updates on each login 108 | export SHARED_VOLUMES # specify volume names to create and share across all GDC's 109 | export GDC_RUN_MODE=${GDC_RUN_MODE:="start"} # options are start, stop, daemon 110 | 111 | export DEV_CONTAINER_NAME=${DEV_CONTAINER_NAME:="dev-1"} # dev container name 112 | 113 | export COPY_CMD_TO_CLIPBOARD=${COPY_CMD_TO_CLIPBOARD:=yes} # COPY GDC shell launch command to clipboard 114 | 115 | export USE_PROXY_HOST=${USE_PROXY_HOST:=no} # no, proxy, dump, web 116 | export PROXY_VERSION=${PROXY_VERSION:=latest} # container image version tag 117 | export PROXY_CONTAINER_NAME=${PROXY_CONTAINER_NAME:=proxy} # name of the container 118 | export PROXY_HOST_PORT=${PROXY_HOST_PORT:=8080} # port to expose to host 119 | export PROXY_WEB_HOST_PORT=${PROXY_WEB_HOST_PORT:=8081} # if running in web mode expose this port 120 | export PROXY_VOLUME_DIR=${PROXY_VOLUME_DIR:=/tmp/mitproxy} # volume to persist certs to 121 | export PROXY_AUTO_EXPORT_ENV=${PROXY_AUTO_EXPORT_ENV:=no} # auto export HTTP_PROXY and HTTPS_PROXY 122 | -------------------------------------------------------------------------------- /k8s/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM amd64/ubuntu:latest 2 | # only effects build time and and makes it so we dont have to specify it every apt install 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | ENV TZ=Etc/UTC 5 | 6 | # turn off documentation 7 | COPY /etc/dpkg/dpkg.conf.d/01_nodoc /etc/dpkg/dpkg.conf.d/01_nodoc 8 | 9 | # update system 10 | RUN \ 11 | --mount=type=cache,target=/var/cache/apt \ 12 | apt-get update -y --fix-missing --no-install-recommends && apt-get -y --fix-missing --no-install-recommends upgrade 13 | # install core 14 | RUN \ 15 | --mount=type=cache,target=/var/cache/apt \ 16 | apt-get install -fy --fix-missing --no-install-recommends locales apt-transport-https \ 17 | software-properties-common dselect zip unzip xz-utils procps less dos2unix jq groff file bash-completion \ 18 | inetutils-ping net-tools dnsutils ssh curl wget telnet-ssl netcat socat ca-certificates gnupg2 git \ 19 | postgresql-client mysql-client 20 | 21 | # install dev 22 | RUN \ 23 | --mount=type=cache,target=/var/cache/apt \ 24 | apt-get install -fy --fix-missing --no-install-recommends build-essential make libffi-dev libreadline-dev libncursesw5-dev libssl-dev \ 25 | libsqlite3-dev libgdbm-dev libc6-dev libbz2-dev zlib1g-dev llvm libncurses5-dev liblzma-dev libpq-dev libcurl4-openssl-dev 26 | 27 | # install editors and any extra packages user has requested 28 | ARG EXTRA_PACKAGES 29 | RUN \ 30 | --mount=type=cache,target=/var/cache/apt \ 31 | apt-get install -fy --fix-missing --no-install-recommends libncurses5 joe nano vim $EXTRA_PACKAGES 32 | 33 | # update default editor 34 | RUN update-alternatives --install /usr/bin/editor editor /usr/bin/vim 80 && \ 35 | update-alternatives --install /usr/bin/editor editor /usr/bin/vi 90 36 | 37 | # add some extra locales to system 38 | COPY /etc/locale.gen /etc/locale.gen 39 | RUN LC_ALL=en_US.UTF-8 LC_CTYPE=en_US.UTF-8 LANG=en_US.UTF-8 locale-gen 40 | RUN mkdir -p /usr/local/share/.cache 41 | 42 | # intstall python if requested 43 | COPY /root/bin/requirements.txt /root/requirements.txt 44 | ARG PYTHON_VERSION 45 | RUN /bin/bash -c 'if [ -n "${PYTHON_VERSION}" ] ; then \ 46 | apt-get install -fy python3-dev python3-openssl && \ 47 | export PYENV_ROOT=/usr/local/pyenv && \ 48 | curl https://pyenv.run | bash && \ 49 | command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" && \ 50 | eval "$(pyenv init -)" && \ 51 | eval "$(pyenv virtualenv-init -)" && \ 52 | pyenv install -v $PYTHON_VERSION && \ 53 | pyenv global $PYTHON_VERSION && \ 54 | python -m pip install --upgrade pip && \ 55 | pip install virtualenv pre-commit && \ 56 | pip install -r /root/requirements.txt; \ 57 | fi; \ 58 | rm /root/requirements.txt; \ 59 | ' 60 | 61 | # install php if requested 62 | ARG PHP_VERSION 63 | RUN /bin/bash -c 'if [ -n "${PHP_VERSION}" ] ; then \ 64 | LC_ALL=C.UTF-8 add-apt-repository ppa:ondrej/php && apt update && \ 65 | apt-get install -fy php${PHP_VERSION}-cli php-pear php${PHP_VERSION}-xml php${PHP_VERSION}-curl php${PHP_VERSION}-dev php${PHP_VERSION}-json php${PHP_VERSION}-mysql php${PHP_VERSION}-pgsql php${PHP_VERSION}-sqlite3; \ 66 | fi' 67 | 68 | ARG USE_JAVA 69 | RUN /bin/bash -c 'if [ "${USE_JAVA}" = "yes" ] ; then \ 70 | apt-get install -fy default-jdk-headless; \ 71 | fi' 72 | 73 | ARG USE_DOT_NET 74 | RUN /bin/bash -c 'if [ "${USE_DOT_NET}" = "yes" ] ; then \ 75 | wget https://dot.net/v1/dotnet-install.sh -O dotnet-install.sh; \ 76 | chmod +x ./dotnet-install.sh; \ 77 | ./dotnet-install.sh; \ 78 | mkdir /usr/local/dotnet; \ 79 | mv /root/.dotnet/* /usr/local/dotnet; \ 80 | fi' 81 | 82 | ARG GOLANG_VERSION 83 | RUN /bin/bash -c 'if [ -n "${GOLANG_VERSION}" ] ; then \ 84 | echo "go x86_64" && \ 85 | curl -fsSL https://golang.org/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz | tar -C /usr/local -xzf -; \ 86 | fi' 87 | 88 | RUN mkdir -p /usr/local/data 89 | WORKDIR /usr/local/data 90 | 91 | # Install websocat 92 | RUN /bin/bash -c 'set -ex && \ 93 | echo "websocat x86_64" && \ 94 | curl -L "https://github.com/vi/websocat/releases/download/v1.10.0/websocat.x86_64-unknown-linux-musl" -o /usr/local/bin/websocat;\ 95 | chmod +x /usr/local/bin/websocat;' 96 | 97 | # Install AWS CLI and SSM plugin 98 | ARG USE_AWS 99 | ARG AWS_VERSION 100 | RUN /bin/bash -c 'if [ "${USE_AWS}" = "yes" ] ; then \ 101 | echo "aws x86_64" && \ 102 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${AWS_VERSION}.zip" -o "awscliv2.zip" && \ 103 | curl "https://s3.amazonaws.com/session-manager-downloads/plugin/latest/ubuntu_64bit/session-manager-plugin.deb" -o "session-manager-plugin.deb" && \ 104 | curl -o /usr/local/bin/aws-iam-authenticator https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.2/2021-07-05/bin/linux/amd64/aws-iam-authenticator; \ 105 | chmod +x /usr/local/bin/aws-iam-authenticator && \ 106 | unzip -q "awscliv2.zip" && ./aws/install && rm awscliv2.zip && \ 107 | dpkg -i session-manager-plugin.deb && rm ./session-manager-plugin.deb; \ 108 | apt install -fy --fix-missing --no-install-recommends amazon-ecr-credential-helper; \ 109 | fi' 110 | 111 | RUN mkdir -p /root/bash_history 112 | 113 | RUN mkdir /root/.aws 114 | RUN chmod 700 /root/.aws 115 | COPY awsls/config.template /root/.aws 116 | COPY awsls/credentials /root/.aws 117 | 118 | ARG NODE_VERSION 119 | 120 | # if bitwarden is enabled so will node 121 | ENV NVM_DIR /usr/local/nvm 122 | RUN /bin/bash -c 'if [ -n "${NODE_VERSION}" ]; then \ 123 | mkdir -p "$NVM_DIR" && \ 124 | curl https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash && \ 125 | . $NVM_DIR/nvm.sh && \ 126 | nvm install "${NODE_VERSION}" && \ 127 | nvm alias default "${NODE_VERSION}" && \ 128 | nvm use default "${NODE_VERSION}" && \ 129 | npm -g i npm@latest yarn npm-check-updates; \ 130 | fi' 131 | 132 | 133 | ARG USE_CDK 134 | # cdk is installed as global node cli module 135 | RUN /bin/bash -c 'if [ "${USE_CDK}" = "yes" ] ; then \ 136 | wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor > /usr/share/keyrings/hashicorp-archive-keyring.gpg; \ 137 | echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/hashicorp.list; \ 138 | apt-get update && apt-get install terraform; \ 139 | . $NVM_DIR/nvm.sh && \ 140 | npm -g i aws-cdk-local@latest aws-cdk@latest cdktf-cli@latest; \ 141 | fi' 142 | 143 | 144 | # if pulumi version is set then install pulumi 145 | ARG PULUMI_VERSION 146 | RUN /bin/bash -c 'if [ -n "${PULUMI_VERSION}" ]; then \ 147 | curl -fsSL https://get.pulumi.com/ | bash -s -- --version $PULUMI_VERSION && \ 148 | mv ~/.pulumi/bin/* /usr/local/bin; \ 149 | fi' 150 | 151 | COPY /etc/term_colors.sh /etc/term_colors.sh 152 | COPY /etc/profile.d /etc/profile.d/ 153 | COPY /etc/skel /etc/skel/ 154 | COPY /etc/ssh /etc/ssh/ 155 | COPY /etc/bash_completion.d /etc/bash_completion.d 156 | COPY init.sh /init.sh 157 | COPY /root/bin/ /root/bin-extra 158 | COPY postStartCommand.sh / 159 | 160 | 161 | RUN chmod a+rx -R /init.sh /postStartCommand.sh /root/bin-extra 162 | 163 | # fix line endings in case files where copied from windows 164 | RUN dos2unix /postStartCommand.sh /init.sh /etc/profile.d/* /etc/skel/.* /root/bin-extra/aws/* 165 | RUN cp /etc/skel/.bashrc /root/.bashrc 166 | 167 | WORKDIR /root 168 | 169 | # removed temp data folder 170 | RUN rm -rf /usr/local/data 171 | 172 | # set default root password 173 | ARG ROOT_PW=ContanersRule 174 | RUN yes "$ROOT_PW" | passwd root 175 | 176 | # install extras 177 | RUN \ 178 | --mount=type=cache,target=/var/cache/apt \ 179 | apt-get install -fy --fix-missing --no-install-recommends gettext-base 180 | 181 | # host project will be mounted here 182 | RUN mkdir /workspace 183 | WORKDIR /workspace 184 | 185 | # transfer build args to env vars for container 186 | 187 | ENV PHP_VERSION=$PHP_VERSION 188 | ENV USE_JAVA=$USE_JAVA 189 | ENV PYTHON_VERSION=$PYTHON_VERSION 190 | ENV GOLANG_VERSION=$GOLANG_VERSION 191 | ENV USE_DOT_NET=$USE_DOT_NET 192 | ENV USE_AWS=$USE_AWS 193 | ENV NODE_VERSION=$NODE_VERSION 194 | ENV USE_BITWARDEN=$USE_BITWARDEN 195 | ENV PULUMI_VERSION=$PULUMI_VERSION 196 | 197 | ENV PATH="$PATH:/root/bin:/root/bin-extra:/root/gdc-host" 198 | 199 | ENTRYPOINT /init.sh 200 | 201 | -------------------------------------------------------------------------------- /k8s/awsls/config.template: -------------------------------------------------------------------------------- 1 | [profile localstack] 2 | region=us-east-1 3 | output=json 4 | endpoint_url = http://${NS_LOCALSTACK_HOST}:4566 5 | -------------------------------------------------------------------------------- /k8s/awsls/credentials: -------------------------------------------------------------------------------- 1 | [localstack] 2 | aws_access_key_id=test 3 | aws_secret_access_key=test 4 | -------------------------------------------------------------------------------- /k8s/build-pod-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Loading container .env-gdc environment file" 4 | source ".env-gdc-local" 5 | source "../.env-gdc" 6 | 7 | echo "building $1:$2" 8 | 9 | export IMAGE=$1 10 | export TAG=$2 11 | docker compose build -------------------------------------------------------------------------------- /k8s/dev-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.31.2 (HEAD) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: dev 10 | name: dev 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: dev 16 | strategy: {} 17 | template: 18 | metadata: 19 | annotations: 20 | kompose.cmd: kompose convert 21 | kompose.version: 1.31.2 (HEAD) 22 | creationTimestamp: null 23 | labels: 24 | io.kompose.network/k8s-default: "true" 25 | io.kompose.service: dev 26 | spec: 27 | containers: 28 | - env: 29 | - name: COMPOSE_PROJECT_NAME 30 | value: k8s 31 | - name: COPY_CMD_TO_CLIPBOARD 32 | value: "yes" 33 | - name: DEV_CONTAINER 34 | value: 1.9.8 35 | - name: EDITOR 36 | value: vi 37 | - name: GDC_RUN_MODE 38 | value: start 39 | - name: HOST_OS 40 | - name: PIP_CACHE_DIR 41 | value: /usr/local/share/.cache/pip 42 | - name: PROJECT_NAME 43 | value: k8s 44 | - name: USE_CDK 45 | value: "yes" 46 | - name: USE_COLOR_PROMPT 47 | value: "yes" 48 | - name: USE_PRECOMMIT 49 | value: "no" 50 | - name: VISUAL 51 | value: vi 52 | - name: YARN_CACHE_FOLDER 53 | value: /usr/local/share/.cache/yarn 54 | - name: npm_config_cache 55 | value: /usr/local/share/.cache/npm 56 | image: dev 57 | name: k8s-dev-1 58 | resources: {} 59 | stdin: true 60 | tty: true 61 | restartPolicy: Always 62 | status: {} 63 | -------------------------------------------------------------------------------- /k8s/docker-compose.yml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | dev: 4 | image: ${IMAGE}:${TAG} 5 | stdin_open: true 6 | tty: true 7 | build: 8 | context: . 9 | dockerfile: Dockerfile 10 | args: 11 | - ROOT_PW # sets root password in container 12 | - PULUMI_VERSION # install this version of pulumi 13 | - PHP_VERSION # 5.6, 7.0, 7.1, 7.2, 7.3, 7.4, 8.0, 8.1 14 | - USE_DOT_NET # install ubuntu:latest dotnet core packages 15 | - GOLANG_VERSION # install this golang version. blank for none. latest is 1.18 16 | - PYTHON_VERSION # install this python version 17 | - USE_AWS # install latest aws cli, ssm plugin, and ecr helper 18 | - NODE_VERSION # install this version of node. blank for none 19 | - USE_BITWARDEN # enable bitwarden workflow helpers. will enable NODE_VERSION=18 if not set 20 | - USE_CDK # install aws cdk, terraform and cdk for terraform 21 | - USE_JAVA # install ubuntu:latest openjdk packages 22 | - USE_DOT_NET # install .NET SDK 9.0 23 | - USE_POWERSHELL # install latest stable powershell 24 | - USE_AZURE # install latest Azure cli 25 | - EXTRA_PACKAGES # any extra Ubuntu packages you want installed into the base container 26 | - AWS_VERSION # install AWS CLI version 27 | environment: 28 | - COMPOSE_PROJECT_NAME 29 | - PROJECT_NAME=${COMPOSE_PROJECT_NAME} 30 | - GDC_CONTAINER_NAME # name of GDC container running in docker 31 | - HOST_OS=${OS:-} # used to remap paths in windows format to linux format 32 | - USE_PRECOMMIT # use pre-commit hooks in git to format and lint files 33 | - EDITOR # sets default editor in container. usually set to same as VISUAL 34 | - VISUAL # sets default editor in container. usually set to same as EDITOR 35 | - USE_COLOR_PROMPT # enable colored bash prompt 36 | - USE_CDK # install cdk if set to yes 37 | - YARN_CACHE_FOLDER=/usr/local/share/.cache/yarn # cache yarn installs 38 | - PIP_CACHE_DIR=/usr/local/share/.cache/pip # cache pip installs 39 | - npm_config_cache=/usr/local/share/.cache/npm # case npm installs 40 | - GDC_ENTRYPOINT # run this command and exit unless GDC_RUN_MODE 41 | - GDC_COMPOSE_FILES # contains list of all compose files in use by GDC 42 | - GDC_RUN_MODE # start, stop, daemon, clean 43 | - STARTUP_MSG # message to display after startup 44 | - CLIPBOARD_MSG # used internally to show message about launch command copied to clipboard 45 | - COPY_CMD_TO_CLIPBOARD # defaults to yes to copy gdc shell launch command to clipboard 46 | - DEV_CONTAINER=1.9.8 # used to detect if running inside dev container 47 | -------------------------------------------------------------------------------- /k8s/docker-config.json: -------------------------------------------------------------------------------- 1 | { "credsStore": "ecr-login" } 2 | -------------------------------------------------------------------------------- /k8s/etc/bash_completion.d/gdcex.sh: -------------------------------------------------------------------------------- 1 | _get_containers() 2 | { 3 | local cur prev opts 4 | COMPREPLY=() 5 | cur="${COMP_WORDS[COMP_CWORD]}" 6 | prev="${COMP_WORDS[COMP_CWORD-1]}" 7 | opts=$(docker network inspect "$DEVNET_NAME" | jq -r '.[0].Containers[].Name' | grep -v 'arn_aws' | grep -v "$GDC_CONTAINER_NAME" | sort) 8 | 9 | #echo cur=$cur 10 | #echo prev=$prev 11 | #echo opts=$opts 12 | 13 | 14 | COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) 15 | return 0 16 | } 17 | complete -F _get_containers gdcex.sh 18 | -------------------------------------------------------------------------------- /k8s/etc/dpkg/dpkg.conf.d/01_nodoc: -------------------------------------------------------------------------------- 1 | # /etc/dpkg/dpkg.conf.d/01_nodoc 2 | 3 | # Delete locales 4 | path-exclude=/usr/share/locale/* 5 | 6 | # Delete docs 7 | path-exclude=/usr/share/doc/* 8 | # we need to keep copyright files for legal reasons 9 | path-include=/usr/share/doc/*/copyright 10 | 11 | # remove the man pages 12 | path-exclude /usr/share/man/* 13 | path-exclude /usr/share/groff/* 14 | path-exclude /usr/share/info/* 15 | 16 | # lintian stuff is small, but really unnecessary 17 | path-exclude /usr/share/lintian/* 18 | path-exclude /usr/share/linda/* 19 | -------------------------------------------------------------------------------- /k8s/etc/profile.d/02-locale-set.sh: -------------------------------------------------------------------------------- 1 | LC_ALL="en_US.UTF-8" 2 | LC_CTYPE="en_US.UTF-8" 3 | LANGUAGE="en_US.UTF-8" 4 | -------------------------------------------------------------------------------- /k8s/etc/ssh/ssh_config: -------------------------------------------------------------------------------- 1 | 2 | # This is the ssh client system-wide configuration file. See 3 | # ssh_config(5) for more information. This file provides defaults for 4 | # users, and the values can be changed in per-user configuration files 5 | # or on the command line. 6 | 7 | # Configuration data is parsed as follows: 8 | # 1. command line options 9 | # 2. user-specific file 10 | # 3. system-wide file 11 | # Any configuration value is only changed the first time it is set. 12 | # Thus, host-specific definitions should be at the beginning of the 13 | # configuration file, and defaults at the end. 14 | 15 | # Site-wide defaults for some commonly used options. For a comprehensive 16 | # list of available options, their meanings and defaults, please see the 17 | # ssh_config(5) man page. 18 | 19 | Include /etc/ssh/ssh_config.d/*.conf 20 | 21 | Host * 22 | ForwardAgent yes 23 | ForwardX11 yes 24 | ForwardX11Trusted yes 25 | # PasswordAuthentication yes 26 | # HostbasedAuthentication no 27 | # GSSAPIAuthentication no 28 | # GSSAPIDelegateCredentials no 29 | # GSSAPIKeyExchange no 30 | # GSSAPITrustDNS no 31 | # BatchMode no 32 | # CheckHostIP yes 33 | # AddressFamily any 34 | # ConnectTimeout 0 35 | # StrictHostKeyChecking ask 36 | # IdentityFile ~/.ssh/id_rsa 37 | # IdentityFile ~/.ssh/id_dsa 38 | # IdentityFile ~/.ssh/id_ecdsa 39 | # IdentityFile ~/.ssh/id_ed25519 40 | # Port 22 41 | # Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-cbc,3des-cbc 42 | # MACs hmac-md5,hmac-sha1,umac-64@openssh.com 43 | # EscapeChar ~ 44 | # Tunnel no 45 | # TunnelDevice any:any 46 | # PermitLocalCommand no 47 | # VisualHostKey no 48 | # ProxyCommand ssh -q -W %h:%p gateway.example.com 49 | # RekeyLimit 1G 1h 50 | SendEnv LANG LC_* 51 | HashKnownHosts yes 52 | GSSAPIAuthentication yes 53 | -------------------------------------------------------------------------------- /k8s/etc/ssh/sshd_config: -------------------------------------------------------------------------------- 1 | # $OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $ 2 | 3 | # This is the sshd server system-wide configuration file. See 4 | # sshd_config(5) for more information. 5 | 6 | # This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin 7 | 8 | # The strategy used for options in the default sshd_config shipped with 9 | # OpenSSH is to specify options with their default value where 10 | # possible, but leave them commented. Uncommented options override the 11 | # default value. 12 | 13 | Include /etc/ssh/sshd_config.d/*.conf 14 | 15 | #Port 22 16 | #AddressFamily any 17 | #ListenAddress 0.0.0.0 18 | #ListenAddress :: 19 | 20 | #HostKey /etc/ssh/ssh_host_rsa_key 21 | #HostKey /etc/ssh/ssh_host_ecdsa_key 22 | #HostKey /etc/ssh/ssh_host_ed25519_key 23 | 24 | # Ciphers and keying 25 | #RekeyLimit default none 26 | 27 | # Logging 28 | #SyslogFacility AUTH 29 | #LogLevel INFO 30 | 31 | # Authentication: 32 | 33 | #LoginGraceTime 2m 34 | PermitRootLogin yes 35 | #StrictModes yes 36 | #MaxAuthTries 6 37 | #MaxSessions 10 38 | 39 | PubkeyAuthentication yes 40 | 41 | # Expect .ssh/authorized_keys2 to be disregarded by default in future. 42 | #AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2 43 | 44 | #AuthorizedPrincipalsFile none 45 | 46 | #AuthorizedKeysCommand none 47 | #AuthorizedKeysCommandUser nobody 48 | 49 | # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts 50 | #HostbasedAuthentication no 51 | # Change to yes if you don't trust ~/.ssh/known_hosts for 52 | # HostbasedAuthentication 53 | #IgnoreUserKnownHosts no 54 | # Don't read the user's ~/.rhosts and ~/.shosts files 55 | #IgnoreRhosts yes 56 | 57 | # To disable tunneled clear text passwords, change to no here! 58 | PasswordAuthentication yes 59 | PermitEmptyPasswords no 60 | 61 | # Change to yes to enable challenge-response passwords (beware issues with 62 | # some PAM modules and threads) 63 | ChallengeResponseAuthentication no 64 | 65 | # Kerberos options 66 | #KerberosAuthentication no 67 | #KerberosOrLocalPasswd yes 68 | #KerberosTicketCleanup yes 69 | #KerberosGetAFSToken no 70 | 71 | # GSSAPI options 72 | #GSSAPIAuthentication no 73 | #GSSAPICleanupCredentials yes 74 | #GSSAPIStrictAcceptorCheck yes 75 | #GSSAPIKeyExchange no 76 | 77 | # Set this to 'yes' to enable PAM authentication, account processing, 78 | # and session processing. If this is enabled, PAM authentication will 79 | # be allowed through the ChallengeResponseAuthentication and 80 | # PasswordAuthentication. Depending on your PAM configuration, 81 | # PAM authentication via ChallengeResponseAuthentication may bypass 82 | # the setting of "PermitRootLogin without-password". 83 | # If you just want the PAM account and session checks to run without 84 | # PAM authentication, then enable this but set PasswordAuthentication 85 | # and ChallengeResponseAuthentication to 'no'. 86 | UsePAM yes 87 | 88 | AllowAgentForwarding yes 89 | AllowTcpForwarding yes 90 | #GatewayPorts no 91 | X11Forwarding yes 92 | X11DisplayOffset 10 93 | #X11UseLocalhost yes 94 | PermitTTY yes 95 | PrintMotd no 96 | #PrintLastLog yes 97 | #TCPKeepAlive yes 98 | #PermitUserEnvironment no 99 | #Compression delayed 100 | #ClientAliveInterval 0 101 | #ClientAliveCountMax 3 102 | #UseDNS no 103 | #PidFile /var/run/sshd.pid 104 | #MaxStartups 10:30:100 105 | #PermitTunnel no 106 | #ChrootDirectory none 107 | #VersionAddendum none 108 | 109 | # no default banner path 110 | #Banner none 111 | 112 | # Allow client to pass locale environment variables 113 | AcceptEnv LANG LC_* 114 | 115 | # override default of no subsystems 116 | Subsystem sftp /usr/lib/openssh/sftp-server 117 | 118 | # Example of overriding settings on a per-user basis 119 | #Match User anoncvs 120 | # X11Forwarding no 121 | # AllowTcpForwarding no 122 | # PermitTTY no 123 | # ForceCommand cvs server 124 | -------------------------------------------------------------------------------- /k8s/etc/term_colors.sh: -------------------------------------------------------------------------------- 1 | # use these to change the window / tab title. echo -e "$title_start THE_TITLE $title_end" 2 | title_start='\e]0;' 3 | title_end='\a' 4 | 5 | txtblk='\e[0;30m' # Black - Regular 6 | txtred='\e[0;31m' # Red 7 | txtgrn='\e[0;32m' # Green 8 | txtylw='\e[0;33m' # Yellow 9 | txtblu='\e[0;34m' # Blue 10 | txtpur='\e[0;35m' # Purple 11 | txtcyn='\e[0;36m' # Cyan 12 | txtwht='\e[0;37m' # White 13 | 14 | bldblk='\e[1;30m' # Black - Bold 15 | bldred='\e[1;31m' # Red 16 | bldgrn='\e[1;32m' # Green 17 | bldylw='\e[1;33m' # Yellow 18 | bldblu='\e[1;34m' # Blue 19 | bldpur='\e[1;35m' # Purple 20 | bldcyn='\e[1;36m' # Cyan 21 | bldwht='\e[1;37m' # White 22 | 23 | unkblk='\e[4;30m' # Black - Underline 24 | undred='\e[4;31m' # Red 25 | undgrn='\e[4;32m' # Green 26 | undylw='\e[4;33m' # Yellow 27 | undblu='\e[4;34m' # Blue 28 | undpur='\e[4;35m' # Purple 29 | undcyn='\e[4;36m' # Cyan 30 | undwht='\e[4;37m' # White 31 | 32 | bakblk='\e[40m' # Black - Background 33 | bakred='\e[41m' # Red 34 | bakgrn='\e[42m' # Green 35 | bakylw='\e[43m' # Yellow 36 | bakblu='\e[44m' # Blue 37 | bakpur='\e[45m' # Purple 38 | bakcyn='\e[46m' # Cyan 39 | bakwht='\e[47m' # White 40 | txtrst='\e[0m' # Text Reset 41 | 42 | # p_* vars are for use in shell prompts. The extra escape brackets are are so the shell knows not to include the code in the length of the line. 43 | p_txtblk="\[$txtblk\]" # Black - Regular 44 | p_txtred="\[$txtred\]" # Red 45 | p_txtgrn="\[$txtgrn\]" # Green 46 | p_txtylw="\[$txtylw\]" # Yellow 47 | p_txtblu="\[$txtblu\]" # Blue 48 | p_txtpur="\[$txtpur\]" # Purple 49 | p_txtcyn="\[$txtcyn\]" # Cyan 50 | p_txtwht="\[$txtwht\]" # White 51 | 52 | p_bldblk="\[$bldblk\]" # Black - Bold 53 | p_bldred="\[$bldred\]" # Red 54 | p_bldgrn="\[$bldgrn\]" # Green 55 | p_bldylw="\[$bldylw\]" # Yellow 56 | p_bldblu="\[$bldblu\]" # Blue 57 | p_bldpur="\[$bldpur\]" # Purple 58 | p_bldcyn="\[$bldcyn\]" # Cyan 59 | p_bldwht="\[$bldwht\]" # White 60 | 61 | p_unkblk="\[$unkblk\]" # Black - Underline 62 | p_undred="\[$undred\]" # Red 63 | p_undgrn="\[$undgrn\]" # Green 64 | p_undylw="\[$undylw\]" # Yellow 65 | p_undblu="\[$undblu\]" # Blue 66 | p_undpur="\[$undpur\]" # Purple 67 | p_undcyn="\[$undcyn\]" # Cyan 68 | p_undwht="\[$undwht\]" # White 69 | 70 | p_bakblk="\[$bakblk\]" # Black - Background 71 | p_bakred="\[$bakred\]" # Red 72 | p_bakgrn="\[$bakgrn\]" # Green 73 | p_bakylw="\[$bakylw\]" # Yellow 74 | p_bakblu="\[$bakblu\]" # Blue 75 | p_bakpur="\[$bakpur\]" # Purple 76 | p_bakcyn="\[$bakcyn\]" # Cyan 77 | p_bakwht="\[$bakwht\]" # White 78 | p_txtrst="\[$txtrst\]" # Text Reset 79 | 80 | -------------------------------------------------------------------------------- /k8s/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source /etc/term_colors.sh 4 | 5 | if [ -x /postStartCommand.sh ]; then 6 | . /postStartCommand.sh 7 | fi 8 | 9 | envsubst < /root/.aws/config.template > /root/.aws/config; 10 | 11 | echo "sleeping forever...." 12 | tail -f /dev/null 2>&1 13 | 14 | -------------------------------------------------------------------------------- /k8s/postStartCommand.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -r /usr/local/pyenv ]; then 4 | export PYENV_ROOT=/usr/local/pyenv 5 | command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" 6 | eval "$(pyenv init -)" 7 | # eval "$(pyenv virtualenv-init -)" 8 | 9 | if [ "$USE_PRECOMMIT" = "yes" ]; then 10 | if [[ ! -r /workspace/.git/hooks/pre-commit || "$(grep -c "File generated by pre-commit" /workspace/.git/hooks/pre-commit)" = "0" ]]; then 11 | echo "installing pre-commit hooks..." 12 | cd /workspace && pre-commit install --allow-missing-config 13 | else 14 | echo "pre-commit hooks already installed, skipping..." 15 | fi 16 | if [ -r /workspace/.git/hooks/pre-commit.legacy ]; then 17 | rm /workspace/.git/hooks/pre-commit.legacy 18 | fi 19 | fi 20 | elif [ "$USE_PRECOMMIT" = "yes" ]; then 21 | echo "USE_PRECOMMIT=yes but python is not enabled. Please set PYTHON_VERSION environment variable" 22 | fi 23 | 24 | if [ -n "$PULUMI_VERSION" ]; then 25 | pulumi plugin install resource docker 26 | pulumi plugin install resource command 27 | pulumi plugin install resource aws 28 | pulumi plugin install resource postgresql 29 | pulumi plugin install resource mysql 30 | fi 31 | 32 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/aws-remote.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import boto3 3 | import os 4 | import click 5 | import sys 6 | 7 | 8 | class Params(object): 9 | def __init__(self, profile=None, region=None): 10 | self.profile = profile 11 | self.region = region 12 | 13 | 14 | def even_spaces(string, spaces=12): 15 | """ 16 | Even spacing for multi-column output 17 | """ 18 | return string + ' ' * (int(spaces) - len(string)) 19 | 20 | 21 | def find_instance_id(instance_id, aws_profile, aws_region): 22 | """ 23 | Looks at provided instance_id, and if it does not start with "-i", looks up instance_id based on Name tag 24 | """ 25 | if str(instance_id).startswith('i-'): 26 | return instance_id 27 | else: 28 | try: 29 | print(f'Finding instance_id for {instance_id}...') 30 | boto_session = boto3.Session(profile_name=aws_profile, region_name=aws_region) 31 | ec2_client = boto_session.client('ec2') 32 | instance_filter = [{ 33 | 'Name': 'tag:Name', 34 | 'Values': [instance_id] 35 | }] 36 | ec2_instance = ec2_client.describe_instances(Filters=instance_filter) 37 | instance_id = ec2_instance['Reservations'][0]['Instances'][0]['InstanceId'] 38 | print(f'Found {instance_id}') 39 | except Exception as e: 40 | print(f'Could not find instance_id for {instance_id}') 41 | print(str(e)) 42 | sys.exit(1) 43 | return instance_id 44 | 45 | 46 | @click.group() 47 | @click.option('--profile', default=None, help='Specify AWS profile') 48 | @click.option('--region', default=None, help='Specify AWS region') 49 | @click.pass_context 50 | def cli(ctx, profile, region): 51 | """ 52 | AWS Remote is a simple, command line tool to view and interact with AWS instances via SSM. 53 | Requires the AWS CLI and Session Manager Plugin to be installed locally. 54 | """ 55 | ctx.obj = Params(profile, region) 56 | 57 | 58 | @cli.command(name='list') 59 | @click.pass_obj 60 | def list_instances(ctx): 61 | """ 62 | List EC2 instances and SSM management status 63 | """ 64 | try: 65 | boto_session = boto3.Session(profile_name=ctx.profile, region_name=ctx.region) 66 | ec2_client = boto_session.client('ec2') 67 | ssm_client = boto_session.client('ssm') 68 | ec2_instances = ec2_client.describe_instances() 69 | ssm_instances = ssm_client.describe_instance_information()['InstanceInformationList'] 70 | print(even_spaces('ID', spaces=22), even_spaces('AZ'), even_spaces('Type'), 71 | even_spaces('State', spaces=10), even_spaces('SSM', spaces=8), even_spaces('Name')) 72 | for instance in ec2_instances['Reservations']: 73 | instance = instance['Instances'][0] 74 | instance_id = instance['InstanceId'] 75 | instance_type = instance['InstanceType'] 76 | instance_az = instance['Placement']['AvailabilityZone'] 77 | instance_state = instance['State']['Name'] 78 | instance_name = '' 79 | if 'Tags' in instance: 80 | for tag in instance['Tags']: 81 | if tag['Key'] == 'Name': 82 | instance_name = tag['Value'] 83 | instance_managed = str(any(instance_id in ssm_instance['InstanceId'] for ssm_instance in ssm_instances)).lower() 84 | print(even_spaces(instance_id, spaces=22), even_spaces(instance_az), 85 | even_spaces(instance_type), even_spaces(instance_state, spaces=10), 86 | even_spaces(instance_managed, spaces=8), even_spaces(instance_name)) 87 | except Exception as e: 88 | print(str(e)) 89 | 90 | 91 | @click.argument('instance_id') 92 | @cli.command() 93 | @click.pass_obj 94 | def session(ctx, instance_id): 95 | """ 96 | Start SSM session with instance id/name 97 | """ 98 | aws_profile = f' --profile {ctx.profile}' if ctx.profile else '' 99 | aws_region = f' --region {ctx.region}' if ctx.region else '' 100 | instance_id = find_instance_id(instance_id, ctx.profile, ctx.region) 101 | os.system(f"aws{aws_profile}{aws_region} ssm start-session --target {instance_id}") 102 | 103 | 104 | @click.argument('instance_id') 105 | @click.argument('instance_port') 106 | @click.argument('local_port') 107 | @cli.command() 108 | @click.pass_obj 109 | def port_forward(ctx, instance_id, local_port, instance_port): 110 | """ 111 | Start SSM port forward to instance id/name 112 | """ 113 | aws_profile = f' --profile {ctx.profile}' if ctx.profile else '' 114 | aws_region = f' --region {ctx.region}' if ctx.region else '' 115 | instance_id = find_instance_id(instance_id, ctx.profile, ctx.region) 116 | os.system(f'aws{aws_profile}{aws_region} ssm start-session --target {instance_id} ' 117 | f'--document-name AWS-StartPortForwardingSession --parameters "portNumber"=["{instance_port}"],' 118 | f'"localPortNumber"=["{local_port}"]') 119 | 120 | 121 | if __name__ == '__main__': 122 | cli() 123 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/aws_assume_remaining.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | if [ -z "$AWS_SESSION_EXPIRATION" ]; then 5 | REMAIN="" 6 | else 7 | if [[ $OSTYPE =~ msys* ]] # GIT BASH 8 | then 9 | e=$(date -u -d "$AWS_SESSION_EXPIRATION" "+%s") 10 | s=$(date -u "+%s") 11 | fi 12 | if [[ "$OSTYPE" == "linux-gnu" ]] # WINDOWS WSL / linux 13 | then 14 | e=$(date -u -d "$AWS_SESSION_EXPIRATION" "+%s") 15 | s=$(date -u "+%s") 16 | fi 17 | if [[ $OSTYPE =~ darwin* ]] # MAC 18 | then 19 | e=$(date -j -u -f "%FT%T%Z" "$AWS_SESSION_EXPIRATION" "+%s") 20 | s=$(date -u "+%s") 21 | fi 22 | diff=$((e - s)) 23 | if [ "$diff" -le "0" ]; then 24 | REMAIN="X" 25 | else 26 | if [[ $OSTYPE =~ msys* ]] # GIT BASH 27 | then 28 | REMAIN=$(date -u -d @$diff "+%T") 29 | fi 30 | if [[ "$OSTYPE" == "linux-gnu" ]] # WINDOWS WSL / linux 31 | then 32 | REMAIN=$(date -u -d @$diff "+%T") 33 | fi 34 | if [[ $OSTYPE =~ darwin* ]] # MAC 35 | then 36 | REMAIN=$(date -j -u -f "%s" $diff "+%T") 37 | fi 38 | fi 39 | fi 40 | echo -n $REMAIN 41 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/export-aws-session.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | file="$1" 4 | 5 | if [ -z "$file" ]; then 6 | file=aws_session 7 | fi 8 | 9 | env | grep AWS_ | sed 's/^AWS_/export AWS_/'> "$file" 10 | env | grep PULUMI_ | sed 's/^PULUMI_/export PULUMI_/'>> "$file" 11 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/setup-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | AWS_DIR=~/shared/.aws 4 | 5 | CONFIG=$AWS_DIR/config 6 | CREDS=$AWS_DIR/credentials 7 | 8 | echo "Setting up local AWS credentials" 9 | 10 | if [ -r $CONFIG ]; then 11 | echo "existing config file detected. aborting..." 12 | exit 13 | fi 14 | 15 | if [ ! -r $AWS_DIR ]; then 16 | mkdir -p $AWS_DIR || exit 1 17 | fi 18 | 19 | echo "Enter aws username in format of first.last" 20 | read -r username 21 | if [ -z "$username" ]; then 22 | echo "Blank username. aborting..." 23 | exit 24 | fi 25 | 26 | cat << EOF > $CONFIG 27 | [profile $username-identity] 28 | region=us-west-2 29 | output=json 30 | 31 | [profile localstack] 32 | region=us-east-1 33 | output=text 34 | EOF 35 | 36 | cat << EOF > $CREDS 37 | [$username-identity] 38 | aws_access_key_id= 39 | aws_secret_access_key= 40 | 41 | [localstack] 42 | aws_access_key_id=test 43 | aws_secret_access_key=test 44 | EOF 45 | 46 | ln -s $AWS_DIR ~/.aws 47 | 48 | echo "Sign in at this URL https://IDENT_ACCOUNT_ALIAS.signin.aws.amazon.com/console then navigate to https://us-east-1.console.aws.amazon.com/iam/home#/security_credentials to generate your AWS keys if you dont already have them." 49 | echo "Edit $CREDS file and add keys to your identity section" 50 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/ssm-jump-tunnel-old.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | bold=$(tput bold) 3 | normal=$(tput sgr0) 4 | 5 | USAGE=$(cat <<-END 6 | ./ssm-jump-tunnel.sh [EC2 Bastion instance id] [availability zone] [local port] [remote host] [remote port] 7 | Script to create an SSH tunnel through a private EC2 instance to another private resource port. 8 | For example: 9 | - your machine 10 | - bastion/jump host in AWS private subnet with access to the resource you want to tunnel to 11 | - resource you want to access such as an RDS endpoint 12 | 13 | Example Usage: ssm-jump-tunnel.sh i-abcd1234 us-west-2a 9191 myrdscluster.cluster-1234oubcj1jy.us-west-2.rds.amazonaws.com 5432 14 | END 15 | ) 16 | 17 | 18 | # error/helper conditions 19 | 20 | if [[ $# -ne 5 ]]; then 21 | echo "$USAGE" >&2 22 | exit 1 23 | fi 24 | 25 | if ! [ -x "$(command -v aws)" ]; then 26 | echo 'Error: aws-cli is not installed.' >&2 27 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 28 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 29 | exit 1 30 | fi 31 | 32 | # if we are running in a dev container listen on all interface so port can be forwarded to host if desired 33 | if [ -n "$DEV_CONTAINER" ]; then 34 | interface="0.0.0.0:" 35 | else 36 | interface="" 37 | fi 38 | 39 | instance_id=$1 40 | shift 41 | availability_zone=$1 42 | shift 43 | local_port=$1 44 | shift 45 | remote_host=$1 46 | shift 47 | remote_port=$1 48 | shift 49 | 50 | chars=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 51 | key_name="aws_temp_" 52 | for i in {1..16} ; do 53 | key_name=$key_name"${chars:RANDOM%${#chars}:1}" 54 | done 55 | 56 | echo "Starting SSM tunnel to: $remote_host:$remote_port with local port $local_port" 57 | echo "Generating public key" 58 | echo "ssh-keygen -q -t rsa -f ~/.ssh/$key_name -N '' <<&1" 59 | ssh-keygen -q -t rsa -f ~/.ssh/$key_name -N '' <<&1 60 | ret=$? 61 | if [ $ret -ne 0 ]; then 62 | echo "Failed to generate $key_name rsa key with exit code ($ret). Aborting..." 63 | exit $ret 64 | fi 65 | 66 | echo "copying temp ssh key to instance ${1}" 67 | echo "aws ec2-instance-connect send-ssh-public-key --instance-id $instance_id --instance-os-user ssm-user --availability-zone $availability_zone --ssh-public-key file://\"~/.ssh/$key_name.pub 2>&1\"" 68 | aws ec2-instance-connect send-ssh-public-key --instance-id $instance_id --instance-os-user ssm-user --availability-zone $availability_zone --ssh-public-key file://"~/.ssh/$key_name.pub" 2>&1 69 | ret=$? 70 | if [ $ret -ne 0 ]; then 71 | echo "Failed to copy $key_name rsa key to instance with exit code ($ret). Aborting..." 72 | rm ~/.ssh/$key_name ~/.ssh/$key_name.pub 2>&1 > /dev/null 73 | exit $ret 74 | fi 75 | 76 | echo "ssh -i ~/.ssh/$key_name -N -L $interface$local_port:$remote_host:$remote_port ssm-user@$instance_id -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ServerAliveInterval=30 -o ServerAliveCountMax=3 -o ProxyCommand=\"aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\"" 77 | echo "the connection is not fully established until you see a message containing \"Permanently added '$instance_id' (ECDSA) to the list of known hosts.\"" 78 | echo "press ^C to close port forward and cleanup" 79 | ssh -i ~/.ssh/$key_name -N -L $interface$local_port:$remote_host:$remote_port ssm-user@$instance_id -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ServerAliveInterval=30 -o ServerAliveCountMax=3 -o IdentitiesOnly=yes -o ProxyCommand="aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p" 2>&1 80 | ret=$? 81 | if [ $ret -ne 0 ]; then 82 | echo "Failed to open tunnel with exit code ($ret)." 83 | fi 84 | echo "Cleaning up rsa keys" 85 | rm ~/.ssh/$key_name ~/.ssh/$key_name.pub 2>&1 > /dev/null 86 | 87 | # immediately quit after ending the session 88 | exit $ret 89 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/ssm-jump-tunnel.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | USAGE=$(cat <<-END 4 | ./ssm-jump-tunnel.sh [EC2 Bastion instance id] [region] [local port] [remote host] [remote port] 5 | Script to create an SSH tunnel through a private EC2 instance to another private resource port. 6 | For example: 7 | - your machine 8 | - bastion/jump host in AWS private subnet with access to the resource you want to tunnel to 9 | - resource you want to access such as an RDS endpoint 10 | 11 | Example Usage: ssm-jump-tunnel.sh i-abcd1234 eu-west-1 5432 db-cluster.cluster-abcdefg6reul.eu-west-1.rds.amazonaws.com 5432 12 | END 13 | ) 14 | 15 | if [[ $# -ne 5 ]]; then 16 | echo "$USAGE" >&2 17 | exit 1 18 | fi 19 | 20 | if ! [ -x "$(command -v aws)" ]; then 21 | echo 'Error: aws-cli is not installed.' >&2 22 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 23 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 24 | exit 1 25 | fi 26 | 27 | 28 | instance_id=$1 29 | shift 30 | region=$1 31 | if [[ $region =~ [a-zA-Z]$ ]]; then 32 | region="${region%?}" 33 | fi 34 | shift 35 | local_port=$1 36 | shift 37 | remote_host=$1 38 | shift 39 | remote_port=$1 40 | shift 41 | 42 | echo "Starting SSM tunnel to: $remote_host:$remote_port with local port $local_port" 43 | echo "Press ^C to close port forward." 44 | 45 | if [ -z "$DEV_CONTAINER" ]; then 46 | echo "The connection is not fully established until you see a message containing \"Waiting for connections...\"" 47 | aws ssm start-session \ 48 | --output text \ 49 | --region "$region" \ 50 | --target "$instance_id" \ 51 | --document-name AWS-StartPortForwardingSessionToRemoteHost \ 52 | --parameters host="$remote_host",portNumber="$remote_port",localPortNumber="$local_port" 53 | ret=$? 54 | if [ $ret -ne 0 ]; then 55 | echo "Failed to open tunnel with exit code ($ret)." 56 | fi 57 | exit $ret 58 | else 59 | echo "socat exposing localhost:$local_port to GDC eth0...." 60 | aws ssm start-session \ 61 | --output text \ 62 | --region "$region" \ 63 | --target "$instance_id" \ 64 | --document-name AWS-StartPortForwardingSessionToRemoteHost \ 65 | --parameters host="$remote_host",portNumber="$remote_port",localPortNumber="$local_port" &>/dev/null & 66 | 67 | # if we are running in a dev container listen on all interface so port can be forwarded to host if desired 68 | socat "tcp-l:$local_port",fork,reuseaddr,bind="$(ifconfig | grep inet | head -n1 | cut -dt -f2 | cut -d' ' -f2)" "tcp:127.0.0.1:$local_port" 69 | fi 70 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/ssm-scp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | bold=$(tput bold) 3 | normal=$(tput sgr0) 4 | 5 | USAGE=$(cat <<-END 6 | ./ssm-jump-scp.sh [EC2 Bastion instance id] [full-az] [mode] [local-file] '[remote-file]' 7 | Script to copy a files between localhost host and remote private EC2 instance. 8 | mode can be ether "push" to send file to instance or "pull" to download file from instance. 9 | Example Usage to copy from local to instance: ssm-jump-scp.sh i-abcd1234 us-east-1a push myfile.txt '~/myfile.txt' 10 | Note: Only one file can be copied at a time. The local file will always be first file specified and remote file second. 11 | The remote file or folder should be quoted to prevent local path expansion. 12 | Note2: Do not specify the "user@" portion of the loca-file or remote-file. 13 | END 14 | ) 15 | 16 | 17 | # error/helper conditions 18 | 19 | if [[ $# -ne 5 ]]; then 20 | echo "$USAGE" >&2 21 | exit 1 22 | fi 23 | 24 | if ! [ -x "$(command -v aws)" ]; then 25 | echo 'Error: aws-cli is not installed.' >&2 26 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 27 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 28 | exit 1 29 | fi 30 | 31 | 32 | instance_id=$1 33 | shift 34 | availability_zone=$1 35 | shift 36 | direction=$1 37 | shift 38 | local_file=$1 39 | shift 40 | remote_file=$1 41 | shift 42 | 43 | if [[ "$direction" != "push" && "$direction" != "pull" ]]; then 44 | echo "$USAGE" >&2 45 | exit 1 46 | fi 47 | 48 | chars=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 49 | key_name="aws_temp_" 50 | for i in {1..16} ; do 51 | key_name=$key_name"${chars:RANDOM%${#chars}:1}" 52 | done 53 | 54 | 55 | echo "Generating public key" 56 | echo "ssh-keygen -q -t ed25519 -f ~/.ssh/$key_name -N '' <<&1" 57 | ssh-keygen -q -t ed25519 -f ~/.ssh/$key_name -N '' <<&1 58 | ret=$? 59 | if [ $ret -ne 0 ]; then 60 | echo "Failed to generate $key_name rsa key with exit code ($ret). Aborting..." 61 | exit $ret 62 | fi 63 | 64 | echo "copying temp ssh key to instance $instance_id" 65 | echo "aws ec2-instance-connect send-ssh-public-key --instance-id $instance_id --instance-os-user ssm-user --availability-zone $availability_zone --ssh-public-key file://\"~/.ssh/$key_name.pub\"" 66 | aws ec2-instance-connect send-ssh-public-key --instance-id $instance_id --instance-os-user ssm-user --availability-zone $availability_zone --ssh-public-key file://"~/.ssh/$key_name.pub" 67 | ret=$? 68 | if [ $ret -ne 0 ]; then 69 | echo "Failed to copy $key_name rsa key to instance with exit code ($ret). Aborting..." 70 | rm ~/.ssh/$key_name ~/.ssh/$key_name.pub 2>&1 > /dev/null 71 | exit $ret 72 | fi 73 | 74 | echo "copying file" 75 | ret=0 76 | if [[ "$direction" == "push" ]]; then 77 | echo "Pushing $local_file to $remote_file on $instance_id" 78 | echo "scp -i ~/.ssh/$key_name -o \"UserKnownHostsFile=/dev/null\" -o \"StrictHostKeyChecking=no\" -o ProxyCommand=\"aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\" $local_file ssm-user@$direction:$remote_file" 79 | scp -r -i ~/.ssh/$key_name -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ProxyCommand="aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p" $local_file ssm-user@$instance_id:$remote_file 80 | ret=$? 81 | if [ $ret -ne 0 ]; then 82 | echo "Failed to scp exit code ($ret)." 83 | fi 84 | else 85 | echo "Pulling $remote_file from $remote_file on $instance_id" 86 | echo "scp -i ~/.ssh/$key_name -o \"UserKnownHostsFile=/dev/null\" -o \"StrictHostKeyChecking=no\" -o ProxyCommand=\"aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\" ssm-user@$direction:$remote_file $local_file" 87 | scp -r -i ~/.ssh/$key_name -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ProxyCommand="aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p" ssm-user@$instance_id:$remote_file $local_file 88 | ret=$? 89 | if [ $ret -ne 0 ]; then 90 | echo "Failed to scp exit code ($ret)." 91 | fi 92 | fi 93 | rm ~/.ssh/$key_name ~/.ssh/$key_name.pub 2>&1 > /dev/null 94 | 95 | # immediately quit after ending the session 96 | #exit $ret 97 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/ssm-send-command.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | bold=$(tput bold) 3 | normal=$(tput sgr0) 4 | 5 | USAGE=$(cat <<-END 6 | ./ssm-send-command.sh 7 | 8 | Example Usage: 9 | export AWS_REGION=us-east-1 10 | source ~/bin/assume-role.sh some.user some-identity arn:aws:iam:::role/BiToolsIdentityAccessRole 11 | source ~/bin/ssm-send-command.sh 12 | 13 | Example send command: 14 | source ~/bin/ssm-send-command.sh i-1nst4nc3ID "sudo rm /opt/app/airflow/output.log && /opt/app/airflow/update-plus.sh prod >> /opt/app/airflow/output.log" 15 | 16 | END 17 | ) 18 | 19 | # error/helper conditions 20 | if [[ $# -lt 2 ]]; then 21 | echo "$USAGE" 22 | exit 0 23 | fi 24 | 25 | if [[ $1 == "-h" ]]; then 26 | echo "$USAGE" 27 | exit 0 28 | fi 29 | 30 | if ! [ -x "$(command -v aws)" ]; then 31 | echo 'Error: aws-cli is not installed.' >&2 32 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 33 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 34 | exit 1 35 | fi 36 | 37 | instance_id=$1 38 | command=$2 39 | wait_for_output=$3 40 | 41 | echo "Starting session to: $instance_id with command $command" 42 | 43 | cmdId=$(aws ssm send-command --instance-ids "$instance_id" --document-name "AWS-RunShellScript" --query "Command.CommandId" --output text --parameters "commands=[${command}]") 44 | [ $? -ne 0 ] && { echo "$USAGE"; exit 1; } 45 | if [ -n "$wait_for_output" ] ; then 46 | while [ "$(aws ssm list-command-invocations --command-id "$cmdId" --query "CommandInvocations[].Status" --output text)" == "InProgress" ]; do sleep 1; done 47 | aws ssm list-command-invocations --command-id "$cmdId" --details --query "CommandInvocations[*].CommandPlugins[*].Output[]" --output text 48 | fi 49 | 50 | -------------------------------------------------------------------------------- /k8s/root/bin/aws/ssm-ssh.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | bold=$(tput bold) 3 | normal=$(tput sgr0) 4 | 5 | USAGE=$(cat <<-END 6 | ./ssm-ssh.sh [EC2 instance id] 7 | 8 | Example Usage (select instance): ./ssm-ssh.sh i-abcdef1234 9 | END 10 | ) 11 | 12 | # error/helper conditions 13 | if [[ $# -ne 1 ]]; then 14 | echo "$USAGE" 15 | exit 0 16 | fi 17 | 18 | if [[ $1 == "-h" ]]; then 19 | echo "$USAGE" 20 | exit 0 21 | fi 22 | 23 | if ! [ -x "$(command -v aws)" ]; then 24 | echo 'Error: aws-cli is not installed.' >&2 25 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 26 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 27 | exit 1 28 | fi 29 | 30 | instance_id=$1 31 | shift 32 | 33 | start_session() { 34 | echo "Starting session to: $instance_id" 35 | aws ssm start-session --target $instance_id 36 | 37 | # immediately quit after ending the session 38 | # exit $? 39 | } 40 | 41 | start_session 42 | -------------------------------------------------------------------------------- /k8s/root/bin/remote-client.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export FORCE_INTERACTIVE=yes 3 | source /root/.bashrc 4 | cd /workspace 5 | echo $@ 6 | $@ 7 | -------------------------------------------------------------------------------- /k8s/root/bin/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 2 | click 3 | awscliv2 4 | awscli-local 5 | terraform-local 6 | pipenv 7 | localstack -------------------------------------------------------------------------------- /noop: -------------------------------------------------------------------------------- 1 | This file does nothing. Its purpose is to allow wildcard copy in dockerfile 2 | -------------------------------------------------------------------------------- /postStartCommand.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$USE_HOST_HOME" = "yes" && "$USE_HOME_BIN" = "yes" && -r ~/home-host/bin ]]; then 4 | cp -a ~/home-host/bin ~ 5 | rm ~/bin/*.exe ~/bin/*.bat 2>/dev/null || true 6 | dos2unix ~/bin/* || true 7 | fi 8 | 9 | if [ "$USE_AWS" = "yes" ]; then 10 | if [[ "$USE_HOST_HOME" = "yes" && -r ~/home-host/.aws ]]; then 11 | echo "Using host .aws folder" 12 | if [ "$USE_AWS_SYMLINK" = "yes" ]; then 13 | ln -s ~/home-host/.aws ~/.aws 14 | else 15 | cp -a ~/home-host/.aws ~ 16 | dos2unix ~/.aws/* 17 | fi 18 | elif [ -r ~/shared/.aws ]; then 19 | echo "Using container ~/shared/.aws folder" 20 | ln -s ~/shared/.aws ~/.aws 21 | else 22 | echo "Creating container ~/shared/.aws folder" 23 | mkdir -p ~/shared/.aws 24 | chmod 700 ~/shared/.aws 25 | ln -s ~/shared/.aws ~/.aws 26 | fi 27 | chmod og-rwx ~/.aws -R 28 | fi 29 | 30 | if [[ "$USE_HOST_HOME" = "yes" && -r ~/home-host/.gitconfig ]]; then 31 | echo "Copying .gitconfig from host" 32 | cp -a ~/home-host/.gitconfig ~ 33 | elif [ -n "$GIT_NAME" ]; then 34 | echo "Setting up new ~/.gitconfig" 35 | cat <~/.gitconfig 36 | [user] 37 | name = $GIT_NAME 38 | email = $GIT_EMAIL 39 | EOF 40 | else 41 | echo "No .gitconfig setup. please set (GIT_NAME and GIT_EMAIL) or USE_HOST_HOME environment variables" 42 | fi 43 | 44 | if [ -r /usr/local/pyenv ]; then 45 | export PYENV_ROOT=/usr/local/pyenv 46 | command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" 47 | eval "$(pyenv init -)" 48 | # eval "$(pyenv virtualenv-init -)" 49 | 50 | if [ "$USE_PRECOMMIT" = "yes" ]; then 51 | if [[ ! -r /workspace/.git/hooks/pre-commit || "$(grep -c "File generated by pre-commit" /workspace/.git/hooks/pre-commit)" = "0" ]]; then 52 | echo "installing pre-commit hooks..." 53 | cd /workspace && pre-commit install --allow-missing-config 54 | else 55 | echo "pre-commit hooks already installed, skipping..." 56 | fi 57 | if [ -r /workspace/.git/hooks/pre-commit.legacy ]; then 58 | rm /workspace/.git/hooks/pre-commit.legacy 59 | fi 60 | fi 61 | elif [ "$USE_PRECOMMIT" = "yes" ]; then 62 | echo "USE_PRECOMMIT=yes but python is not enabled. Please set PYTHON_VERSION environment variable" 63 | fi 64 | 65 | if [ -n "$PULUMI_VERSION" ]; then 66 | pulumi plugin install resource docker 67 | pulumi plugin install resource command 68 | pulumi plugin install resource aws 69 | pulumi plugin install resource azure-native 70 | pulumi plugin install resource postgresql 71 | pulumi plugin install resource mysql 72 | fi 73 | 74 | if [[ -n "$PROXY_URL" && "$PROXY_AUTO_EXPORT_ENV" = "yes" ]]; then 75 | export HTTP_PROXY=$PROXY_URL 76 | export HTTPS_PROXY=$PROXY_URL_SSL 77 | fi 78 | 79 | if [[ -n "$USE_PROXY" && "$USE_PROXY" != "no" && "$USE_PROXY_CA" = "yes" ]]; then 80 | if [ -r /workspace/proxy_volume/mitmproxy-ca-cert.pem ]; then 81 | echo "Setting up proxy CA..." 82 | cp /workspace/proxy_volume/mitmproxy-ca-cert.pem /usr/local/share/ca-certificates/mitmproxy-ca-cert.crt 83 | update-ca-certificates 84 | cat /usr/local/share/ca-certificates/mitmproxy-ca-cert.crt >> "$(python -m certifi)" 85 | export AWS_CA_BUNDLE=/usr/local/share/ca-certificates/mitmproxy-ca-cert.crt 86 | else 87 | echo "Unable to locate mitmproxy-ca-cert.pem. Please ensure the proxy_volume is mounted" 88 | fi 89 | fi 90 | 91 | 92 | #if [ "$USE_LOCALSTACK_DNS" = "yes" ]; then 93 | # if [ -z "$LS_MAIN_CONTAINER_NAME" ]; then 94 | # echo "LS_MAIN_CONTAINER_NAME is not set! Ignoring USE_LOCALSTACK_DNS..." 95 | # else 96 | # LS_IP=$(host -4 "$LS_MAIN_CONTAINER_NAME" | cut -d' ' -f4) 97 | # if [ -n "$LS_IP" ]; then 98 | # echo "LOCALSTACK IP $LS_IP" 99 | # echo "nameserver $LS_IP" > /tmp/resolv.conf 100 | # cp /etc/resolv.conf /etc/resolv.conf.org 101 | # cat /etc/resolv.conf >> /tmp/resolv.conf 102 | # mv /tmp/resolv.conf /etc/resolv.conf 103 | # else 104 | # echo "LOCALSTACK IP COULD NOT BE FOUND! Ignoring USE_LOCALSTACK_DNS..." 105 | # fi 106 | # fi 107 | #fi 108 | 109 | 110 | mkdir -p ~/.ssh 111 | if [[ "$USE_HOST_HOME" = "yes" && -r ~/home-host/.ssh ]]; then 112 | cp -a ~/home-host/.ssh/* ~/.ssh 113 | dos2unix ~/.ssh/* 114 | fi 115 | chmod 700 ~/.ssh 116 | chmod og-rwx ~/.ssh/* 117 | chown -R root.root ~/.ssh 118 | if [ -n "$SSH_KEYSCAN_HOSTS" ]; then 119 | ssh-keyscan $SSH_KEYSCAN_HOSTS >>~/.ssh/known_hosts 120 | fi 121 | 122 | #echo ~/.ssh/id_ed25519 | ssh-keygen -t ed25519 -N '' 123 | 124 | if [ -n "$SSH_SERVER_PORT" ]; then 125 | echo "Enabling ssh server on host port $SSH_SERVER_PORT" 126 | systemctl enable ssh 127 | # ensure ssh is started 128 | service ssh restart 129 | fi 130 | -------------------------------------------------------------------------------- /root/bin/auth0/get-auth-token.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -n "$1" ]; then 4 | UN="$1" 5 | elif [ -n "$AUTH0_DEFAULT_USER" ]; then 6 | UN="$AUTH0_DEFAULT_USER" 7 | else 8 | UN="user1" 9 | fi 10 | 11 | if [ "$UN" = "-h" ] || [ "$UN" = "--help" ]; then 12 | echo "Usage: get-auth-token [username]" 13 | exit 0 14 | fi 15 | 16 | if [ -n "$AUTH0_LOCAL_USERS_FILE" ]; then 17 | FILE="/workspace/$AUTH0_LOCAL_USERS_FILE" # use override auth0 mock user file 18 | else 19 | FILE="/root/gdc-host/auth0_mock/users.json" # use default auth0 mock user file 20 | fi 21 | 22 | PW=$(jq ".$UN.pw" "$FILE" -r) 23 | 24 | RET=$(curl -s -X POST -H "Content-Type: application/json" -d "{\"username\":\"$UN\", \"pw\":\"$PW\"}" "http://$AUTH0_CONTAINER_NAME:3001/login?redirect=nope") 25 | if [ -n "$RET" ]; then 26 | echo "$RET" 27 | exit 1 28 | fi 29 | 30 | 31 | TOKEN=$(curl -s "http://$AUTH0_CONTAINER_NAME:3001/access_token") 32 | echo "$TOKEN" 33 | -------------------------------------------------------------------------------- /root/bin/auth0/start-auth0.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # make sure windows git bash does not alter paths 3 | export MSYS_NO_PATHCONV=1 4 | 5 | cd /root/gdc-host || exit 1 6 | 7 | if [ -r ".env-gdc" ]; then 8 | echo "Loading container .env-gdc environment file" 9 | source ".env-gdc" 10 | fi 11 | 12 | if [ -r ".env-gdc-local" ]; then 13 | echo "Loading container .env-gdc-local environment file" 14 | source ".env-gdc-local" 15 | fi 16 | 17 | if [ -r "/workspace/.env-gdc" ]; then 18 | echo "Loading project .env-gdc environment file" 19 | source "/workspace/.env-gdc" 20 | fi 21 | if [ -r "/workspace/.env-gdc-local" ]; then 22 | echo "Loading project .env-gdc-local environment file" 23 | source "/workspace/.env-gdc-local" 24 | fi 25 | 26 | IS_HOST=$(echo "$GDC_COMPOSE_FILES" | grep -sc '\-f dc-auth0-host.yml') 27 | if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then 28 | echo "Usage $0 [host|internal]" 29 | echo "if no parameters are passed, then GDC launch env variables are used to automatically determine mode." 30 | exit 0 31 | elif [ "$1" = "host" ]; then 32 | IS_HOST="1" 33 | elif [ "$1" = "internal" ]; then 34 | IS_HOST="0" 35 | fi 36 | if [ "$IS_HOST" = "0" ]; then 37 | echo "start-auth0.sh using container mode" 38 | COMPOSE_FILES="-f dc-auth0.yml" 39 | else 40 | echo "start-auth0.sh using host mode" 41 | COMPOSE_FILES="-f dc-auth0-host.yml" 42 | fi 43 | 44 | if [ "$AUTH0_LOCAL_USERS_FILE" ]; then 45 | COMPOSE_FILES="$COMPOSE_FILES -f dc-auth0-local-users.yml" 46 | fi 47 | docker-compose $COMPOSE_FILES up -d --build --force-recreate 48 | 49 | sleep 5 50 | -------------------------------------------------------------------------------- /root/bin/auth0/stop-auth0.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # make sure windows git bash does not alter paths 3 | export MSYS_NO_PATHCONV=1 4 | 5 | if [[ "$1" = "--help" || "$1" = "-h" ]]; then 6 | echo "Used to stop running auth0 container with name $AUTH0_CONTAINER_NAME" 7 | exit 0 8 | fi 9 | 10 | 11 | docker rm -f "$AUTH0_CONTAINER_NAME" 12 | -------------------------------------------------------------------------------- /root/bin/aws/aws-remote.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import boto3 3 | import os 4 | import click 5 | import sys 6 | 7 | 8 | class Params(object): 9 | def __init__(self, profile=None, region=None): 10 | self.profile = profile 11 | self.region = region 12 | 13 | 14 | def even_spaces(string, spaces=12): 15 | """ 16 | Even spacing for multi-column output 17 | """ 18 | return string + ' ' * (int(spaces) - len(string)) 19 | 20 | 21 | def find_instance_id(instance_id, aws_profile, aws_region): 22 | """ 23 | Looks at provided instance_id, and if it does not start with "-i", looks up instance_id based on Name tag 24 | """ 25 | if str(instance_id).startswith('i-'): 26 | return instance_id 27 | else: 28 | try: 29 | print(f'Finding instance_id for {instance_id}...') 30 | boto_session = boto3.Session(profile_name=aws_profile, region_name=aws_region) 31 | ec2_client = boto_session.client('ec2') 32 | instance_filter = [{ 33 | 'Name': 'tag:Name', 34 | 'Values': [instance_id] 35 | }] 36 | ec2_instance = ec2_client.describe_instances(Filters=instance_filter) 37 | instance_id = ec2_instance['Reservations'][0]['Instances'][0]['InstanceId'] 38 | print(f'Found {instance_id}') 39 | except Exception as e: 40 | print(f'Could not find instance_id for {instance_id}') 41 | print(str(e)) 42 | sys.exit(1) 43 | return instance_id 44 | 45 | 46 | @click.group() 47 | @click.option('--profile', default=None, help='Specify AWS profile') 48 | @click.option('--region', default=None, help='Specify AWS region') 49 | @click.pass_context 50 | def cli(ctx, profile, region): 51 | """ 52 | AWS Remote is a simple, command line tool to view and interact with AWS instances via SSM. 53 | Requires the AWS CLI and Session Manager Plugin to be installed locally. 54 | """ 55 | ctx.obj = Params(profile, region) 56 | 57 | 58 | @cli.command(name='list') 59 | @click.pass_obj 60 | def list_instances(ctx): 61 | """ 62 | List EC2 instances and SSM management status 63 | """ 64 | try: 65 | boto_session = boto3.Session(profile_name=ctx.profile, region_name=ctx.region) 66 | ec2_client = boto_session.client('ec2') 67 | ssm_client = boto_session.client('ssm') 68 | ec2_instances = ec2_client.describe_instances() 69 | ssm_instances = ssm_client.describe_instance_information()['InstanceInformationList'] 70 | print(even_spaces('ID', spaces=22), even_spaces('AZ'), even_spaces('Type'), 71 | even_spaces('State', spaces=10), even_spaces('SSM', spaces=8), even_spaces('Name')) 72 | for instance in ec2_instances['Reservations']: 73 | instance = instance['Instances'][0] 74 | instance_id = instance['InstanceId'] 75 | instance_type = instance['InstanceType'] 76 | instance_az = instance['Placement']['AvailabilityZone'] 77 | instance_state = instance['State']['Name'] 78 | instance_name = '' 79 | if 'Tags' in instance: 80 | for tag in instance['Tags']: 81 | if tag['Key'] == 'Name': 82 | instance_name = tag['Value'] 83 | instance_managed = str(any(instance_id in ssm_instance['InstanceId'] for ssm_instance in ssm_instances)).lower() 84 | print(even_spaces(instance_id, spaces=22), even_spaces(instance_az), 85 | even_spaces(instance_type), even_spaces(instance_state, spaces=10), 86 | even_spaces(instance_managed, spaces=8), even_spaces(instance_name)) 87 | except Exception as e: 88 | print(str(e)) 89 | 90 | 91 | @click.argument('instance_id') 92 | @cli.command() 93 | @click.pass_obj 94 | def session(ctx, instance_id): 95 | """ 96 | Start SSM session with instance id/name 97 | """ 98 | aws_profile = f' --profile {ctx.profile}' if ctx.profile else '' 99 | aws_region = f' --region {ctx.region}' if ctx.region else '' 100 | instance_id = find_instance_id(instance_id, ctx.profile, ctx.region) 101 | os.system(f"aws{aws_profile}{aws_region} ssm start-session --target {instance_id}") 102 | 103 | 104 | @click.argument('instance_id') 105 | @click.argument('instance_port') 106 | @click.argument('local_port') 107 | @cli.command() 108 | @click.pass_obj 109 | def port_forward(ctx, instance_id, local_port, instance_port): 110 | """ 111 | Start SSM port forward to instance id/name 112 | """ 113 | aws_profile = f' --profile {ctx.profile}' if ctx.profile else '' 114 | aws_region = f' --region {ctx.region}' if ctx.region else '' 115 | instance_id = find_instance_id(instance_id, ctx.profile, ctx.region) 116 | os.system(f'aws{aws_profile}{aws_region} ssm start-session --target {instance_id} ' 117 | f'--document-name AWS-StartPortForwardingSession --parameters "portNumber"=["{instance_port}"],' 118 | f'"localPortNumber"=["{local_port}"]') 119 | 120 | 121 | if __name__ == '__main__': 122 | cli() 123 | -------------------------------------------------------------------------------- /root/bin/aws/aws_assume_remaining.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | if [ -z "$AWS_SESSION_EXPIRATION" ]; then 5 | REMAIN="" 6 | else 7 | if [[ $OSTYPE =~ msys* ]] # GIT BASH 8 | then 9 | e=$(date -u -d "$AWS_SESSION_EXPIRATION" "+%s") 10 | s=$(date -u "+%s") 11 | fi 12 | if [[ "$OSTYPE" == "linux-gnu" ]] # WINDOWS WSL / linux 13 | then 14 | e=$(date -u -d "$AWS_SESSION_EXPIRATION" "+%s") 15 | s=$(date -u "+%s") 16 | fi 17 | if [[ $OSTYPE =~ darwin* ]] # MAC 18 | then 19 | e=$(date -j -u -f "%FT%T%Z" "$AWS_SESSION_EXPIRATION" "+%s") 20 | s=$(date -u "+%s") 21 | fi 22 | diff=$((e - s)) 23 | if [ "$diff" -le "0" ]; then 24 | REMAIN="X" 25 | else 26 | if [[ $OSTYPE =~ msys* ]] # GIT BASH 27 | then 28 | REMAIN=$(date -u -d @$diff "+%T") 29 | fi 30 | if [[ "$OSTYPE" == "linux-gnu" ]] # WINDOWS WSL / linux 31 | then 32 | REMAIN=$(date -u -d @$diff "+%T") 33 | fi 34 | if [[ $OSTYPE =~ darwin* ]] # MAC 35 | then 36 | REMAIN=$(date -j -u -f "%s" $diff "+%T") 37 | fi 38 | fi 39 | fi 40 | echo -n $REMAIN 41 | -------------------------------------------------------------------------------- /root/bin/aws/export-aws-session.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | file="$1" 4 | 5 | if [ -z "$file" ]; then 6 | file=aws_session 7 | fi 8 | 9 | env | grep AWS_ | sed 's/^AWS_/export AWS_/'> "$file" 10 | env | grep PULUMI_ | sed 's/^PULUMI_/export PULUMI_/'>> "$file" 11 | -------------------------------------------------------------------------------- /root/bin/aws/setup-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | AWS_DIR=~/shared/.aws 4 | 5 | CONFIG=$AWS_DIR/config 6 | CREDS=$AWS_DIR/credentials 7 | 8 | echo "Setting up local AWS credentials" 9 | 10 | if [ -r $CONFIG ]; then 11 | echo "existing config file detected. aborting..." 12 | exit 13 | fi 14 | 15 | if [ ! -r $AWS_DIR ]; then 16 | mkdir -p $AWS_DIR || exit 1 17 | fi 18 | 19 | echo "Enter aws username in format of first.last" 20 | read -r username 21 | if [ -z "$username" ]; then 22 | echo "Blank username. aborting..." 23 | exit 24 | fi 25 | 26 | cat << EOF > $CONFIG 27 | [profile $username-identity] 28 | region=us-west-2 29 | output=json 30 | 31 | [profile localstack] 32 | region=us-east-1 33 | output=text 34 | EOF 35 | 36 | cat << EOF > $CREDS 37 | [$username-identity] 38 | aws_access_key_id= 39 | aws_secret_access_key= 40 | 41 | [localstack] 42 | aws_access_key_id=test 43 | aws_secret_access_key=test 44 | EOF 45 | 46 | ln -s $AWS_DIR ~/.aws 47 | 48 | echo "Sign in at this URL https://IDENT_ACCOUNT_ALIAS.signin.aws.amazon.com/console then navigate to https://us-east-1.console.aws.amazon.com/iam/home#/security_credentials to generate your AWS keys if you dont already have them." 49 | echo "Edit $CREDS file and add keys to your identity section" 50 | -------------------------------------------------------------------------------- /root/bin/aws/ssm-jump-tunnel-old.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | bold=$(tput bold) 3 | normal=$(tput sgr0) 4 | 5 | USAGE=$(cat <<-END 6 | ./ssm-jump-tunnel.sh [EC2 Bastion instance id] [availability zone] [local port] [remote host] [remote port] 7 | Script to create an SSH tunnel through a private EC2 instance to another private resource port. 8 | For example: 9 | - your machine 10 | - bastion/jump host in AWS private subnet with access to the resource you want to tunnel to 11 | - resource you want to access such as an RDS endpoint 12 | 13 | Example Usage: ssm-jump-tunnel.sh i-abcd1234 us-west-2a 9191 myrdscluster.cluster-1234oubcj1jy.us-west-2.rds.amazonaws.com 5432 14 | END 15 | ) 16 | 17 | 18 | # error/helper conditions 19 | 20 | if [[ $# -ne 5 ]]; then 21 | echo "$USAGE" >&2 22 | exit 1 23 | fi 24 | 25 | if ! [ -x "$(command -v aws)" ]; then 26 | echo 'Error: aws-cli is not installed.' >&2 27 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 28 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 29 | exit 1 30 | fi 31 | 32 | # if we are running in a dev container listen on all interface so port can be forwarded to host if desired 33 | if [ -n "$DEV_CONTAINER" ]; then 34 | interface="0.0.0.0:" 35 | else 36 | interface="" 37 | fi 38 | 39 | instance_id=$1 40 | shift 41 | availability_zone=$1 42 | shift 43 | local_port=$1 44 | shift 45 | remote_host=$1 46 | shift 47 | remote_port=$1 48 | shift 49 | 50 | chars=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 51 | key_name="aws_temp_" 52 | for i in {1..16} ; do 53 | key_name=$key_name"${chars:RANDOM%${#chars}:1}" 54 | done 55 | 56 | echo "Starting SSM tunnel to: $remote_host:$remote_port with local port $local_port" 57 | echo "Generating public key" 58 | echo "ssh-keygen -q -t rsa -f ~/.ssh/$key_name -N '' <<&1" 59 | ssh-keygen -q -t rsa -f ~/.ssh/$key_name -N '' <<&1 60 | ret=$? 61 | if [ $ret -ne 0 ]; then 62 | echo "Failed to generate $key_name rsa key with exit code ($ret). Aborting..." 63 | exit $ret 64 | fi 65 | 66 | echo "copying temp ssh key to instance ${1}" 67 | echo "aws ec2-instance-connect send-ssh-public-key --instance-id $instance_id --instance-os-user ssm-user --availability-zone $availability_zone --ssh-public-key file://\"~/.ssh/$key_name.pub 2>&1\"" 68 | aws ec2-instance-connect send-ssh-public-key --instance-id $instance_id --instance-os-user ssm-user --availability-zone $availability_zone --ssh-public-key file://"~/.ssh/$key_name.pub" 2>&1 69 | ret=$? 70 | if [ $ret -ne 0 ]; then 71 | echo "Failed to copy $key_name rsa key to instance with exit code ($ret). Aborting..." 72 | rm ~/.ssh/$key_name ~/.ssh/$key_name.pub 2>&1 > /dev/null 73 | exit $ret 74 | fi 75 | 76 | echo "ssh -i ~/.ssh/$key_name -N -L $interface$local_port:$remote_host:$remote_port ssm-user@$instance_id -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ServerAliveInterval=30 -o ServerAliveCountMax=3 -o ProxyCommand=\"aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\"" 77 | echo "the connection is not fully established until you see a message containing \"Permanently added '$instance_id' (ECDSA) to the list of known hosts.\"" 78 | echo "press ^C to close port forward and cleanup" 79 | ssh -i ~/.ssh/$key_name -N -L $interface$local_port:$remote_host:$remote_port ssm-user@$instance_id -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ServerAliveInterval=30 -o ServerAliveCountMax=3 -o IdentitiesOnly=yes -o ProxyCommand="aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p" 2>&1 80 | ret=$? 81 | if [ $ret -ne 0 ]; then 82 | echo "Failed to open tunnel with exit code ($ret)." 83 | fi 84 | echo "Cleaning up rsa keys" 85 | rm ~/.ssh/$key_name ~/.ssh/$key_name.pub 2>&1 > /dev/null 86 | 87 | # immediately quit after ending the session 88 | exit $ret 89 | -------------------------------------------------------------------------------- /root/bin/aws/ssm-jump-tunnel.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | USAGE=$(cat <<-END 4 | ./ssm-jump-tunnel.sh [EC2 Bastion instance id] [region] [local port] [remote host] [remote port] 5 | Script to create an SSH tunnel through a private EC2 instance to another private resource port. 6 | For example: 7 | - your machine 8 | - bastion/jump host in AWS private subnet with access to the resource you want to tunnel to 9 | - resource you want to access such as an RDS endpoint 10 | 11 | Example Usage: ssm-jump-tunnel.sh i-abcd1234 eu-west-1 5432 db-cluster.cluster-abcdefg6reul.eu-west-1.rds.amazonaws.com 5432 12 | END 13 | ) 14 | 15 | if [[ $# -ne 5 ]]; then 16 | echo "$USAGE" >&2 17 | exit 1 18 | fi 19 | 20 | if ! [ -x "$(command -v aws)" ]; then 21 | echo 'Error: aws-cli is not installed.' >&2 22 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 23 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 24 | exit 1 25 | fi 26 | 27 | 28 | instance_id=$1 29 | shift 30 | region=$1 31 | if [[ $region =~ [a-zA-Z]$ ]]; then 32 | region="${region%?}" 33 | fi 34 | shift 35 | local_port=$1 36 | shift 37 | remote_host=$1 38 | shift 39 | remote_port=$1 40 | shift 41 | 42 | echo "Starting SSM tunnel to: $remote_host:$remote_port with local port $local_port" 43 | echo "Press ^C to close port forward." 44 | 45 | if [ -z "$DEV_CONTAINER" ]; then 46 | echo "The connection is not fully established until you see a message containing \"Waiting for connections...\"" 47 | aws ssm start-session \ 48 | --output text \ 49 | --region "$region" \ 50 | --target "$instance_id" \ 51 | --document-name AWS-StartPortForwardingSessionToRemoteHost \ 52 | --parameters host="$remote_host",portNumber="$remote_port",localPortNumber="$local_port" 53 | ret=$? 54 | if [ $ret -ne 0 ]; then 55 | echo "Failed to open tunnel with exit code ($ret)." 56 | fi 57 | exit $ret 58 | else 59 | echo "socat exposing localhost:$local_port to GDC eth0...." 60 | aws ssm start-session \ 61 | --output text \ 62 | --region "$region" \ 63 | --target "$instance_id" \ 64 | --document-name AWS-StartPortForwardingSessionToRemoteHost \ 65 | --parameters host="$remote_host",portNumber="$remote_port",localPortNumber="$local_port" &>/dev/null & 66 | 67 | # if we are running in a dev container listen on all interface so port can be forwarded to host if desired 68 | socat "tcp-l:$local_port",fork,reuseaddr,bind="$(ifconfig | grep inet | head -n1 | cut -dt -f2 | cut -d' ' -f2)" "tcp:127.0.0.1:$local_port" 69 | fi 70 | -------------------------------------------------------------------------------- /root/bin/aws/ssm-scp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | bold=$(tput bold) 3 | normal=$(tput sgr0) 4 | 5 | USAGE=$(cat <<-END 6 | ./ssm-jump-scp.sh [EC2 Bastion instance id] [full-az] [mode] [local-file] '[remote-file]' 7 | Script to copy a files between localhost host and remote private EC2 instance. 8 | mode can be ether "push" to send file to instance or "pull" to download file from instance. 9 | Example Usage to copy from local to instance: ssm-jump-scp.sh i-abcd1234 us-east-1a push myfile.txt '~/myfile.txt' 10 | Note: Only one file can be copied at a time. The local file will always be first file specified and remote file second. 11 | The remote file or folder should be quoted to prevent local path expansion. 12 | Note2: Do not specify the "user@" portion of the loca-file or remote-file. 13 | END 14 | ) 15 | 16 | 17 | # error/helper conditions 18 | 19 | if [[ $# -ne 5 ]]; then 20 | echo "$USAGE" >&2 21 | exit 1 22 | fi 23 | 24 | if ! [ -x "$(command -v aws)" ]; then 25 | echo 'Error: aws-cli is not installed.' >&2 26 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 27 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 28 | exit 1 29 | fi 30 | 31 | 32 | instance_id=$1 33 | shift 34 | availability_zone=$1 35 | shift 36 | direction=$1 37 | shift 38 | local_file=$1 39 | shift 40 | remote_file=$1 41 | shift 42 | 43 | if [[ "$direction" != "push" && "$direction" != "pull" ]]; then 44 | echo "$USAGE" >&2 45 | exit 1 46 | fi 47 | 48 | chars=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 49 | key_name="aws_temp_" 50 | for i in {1..16} ; do 51 | key_name=$key_name"${chars:RANDOM%${#chars}:1}" 52 | done 53 | 54 | 55 | echo "Generating public key" 56 | echo "ssh-keygen -q -t ed25519 -f ~/.ssh/$key_name -N '' <<&1" 57 | ssh-keygen -q -t ed25519 -f ~/.ssh/$key_name -N '' <<&1 58 | ret=$? 59 | if [ $ret -ne 0 ]; then 60 | echo "Failed to generate $key_name rsa key with exit code ($ret). Aborting..." 61 | exit $ret 62 | fi 63 | 64 | echo "copying temp ssh key to instance $instance_id" 65 | echo "aws ec2-instance-connect send-ssh-public-key --instance-id $instance_id --instance-os-user ssm-user --availability-zone $availability_zone --ssh-public-key file://\"~/.ssh/$key_name.pub\"" 66 | aws ec2-instance-connect send-ssh-public-key --instance-id $instance_id --instance-os-user ssm-user --availability-zone $availability_zone --ssh-public-key file://"~/.ssh/$key_name.pub" 67 | ret=$? 68 | if [ $ret -ne 0 ]; then 69 | echo "Failed to copy $key_name rsa key to instance with exit code ($ret). Aborting..." 70 | rm ~/.ssh/$key_name ~/.ssh/$key_name.pub 2>&1 > /dev/null 71 | exit $ret 72 | fi 73 | 74 | echo "copying file" 75 | ret=0 76 | if [[ "$direction" == "push" ]]; then 77 | echo "Pushing $local_file to $remote_file on $instance_id" 78 | echo "scp -i ~/.ssh/$key_name -o \"UserKnownHostsFile=/dev/null\" -o \"StrictHostKeyChecking=no\" -o ProxyCommand=\"aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\" $local_file ssm-user@$direction:$remote_file" 79 | scp -r -i ~/.ssh/$key_name -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ProxyCommand="aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p" $local_file ssm-user@$instance_id:$remote_file 80 | ret=$? 81 | if [ $ret -ne 0 ]; then 82 | echo "Failed to scp exit code ($ret)." 83 | fi 84 | else 85 | echo "Pulling $remote_file from $remote_file on $instance_id" 86 | echo "scp -i ~/.ssh/$key_name -o \"UserKnownHostsFile=/dev/null\" -o \"StrictHostKeyChecking=no\" -o ProxyCommand=\"aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\" ssm-user@$direction:$remote_file $local_file" 87 | scp -r -i ~/.ssh/$key_name -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ProxyCommand="aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p" ssm-user@$instance_id:$remote_file $local_file 88 | ret=$? 89 | if [ $ret -ne 0 ]; then 90 | echo "Failed to scp exit code ($ret)." 91 | fi 92 | fi 93 | rm ~/.ssh/$key_name ~/.ssh/$key_name.pub 2>&1 > /dev/null 94 | 95 | # immediately quit after ending the session 96 | #exit $ret 97 | -------------------------------------------------------------------------------- /root/bin/aws/ssm-send-command.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | bold=$(tput bold) 3 | normal=$(tput sgr0) 4 | 5 | USAGE=$(cat <<-END 6 | ./ssm-send-command.sh 7 | 8 | Example Usage: 9 | export AWS_REGION=us-east-1 10 | source ~/bin/assume-role.sh some.user some-identity arn:aws:iam:::role/BiToolsIdentityAccessRole 11 | source ~/bin/ssm-send-command.sh 12 | 13 | Example send command: 14 | source ~/bin/ssm-send-command.sh i-1nst4nc3ID "sudo rm /opt/app/airflow/output.log && /opt/app/airflow/update-plus.sh prod >> /opt/app/airflow/output.log" 15 | 16 | END 17 | ) 18 | 19 | # error/helper conditions 20 | if [[ $# -lt 2 ]]; then 21 | echo "$USAGE" 22 | exit 0 23 | fi 24 | 25 | if [[ $1 == "-h" ]]; then 26 | echo "$USAGE" 27 | exit 0 28 | fi 29 | 30 | if ! [ -x "$(command -v aws)" ]; then 31 | echo 'Error: aws-cli is not installed.' >&2 32 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 33 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 34 | exit 1 35 | fi 36 | 37 | instance_id=$1 38 | command=$2 39 | wait_for_output=$3 40 | 41 | echo "Starting session to: $instance_id with command $command" 42 | 43 | cmdId=$(aws ssm send-command --instance-ids "$instance_id" --document-name "AWS-RunShellScript" --query "Command.CommandId" --output text --parameters "commands=[${command}]") 44 | [ $? -ne 0 ] && { echo "$USAGE"; exit 1; } 45 | if [ -n "$wait_for_output" ] ; then 46 | while [ "$(aws ssm list-command-invocations --command-id "$cmdId" --query "CommandInvocations[].Status" --output text)" == "InProgress" ]; do sleep 1; done 47 | aws ssm list-command-invocations --command-id "$cmdId" --details --query "CommandInvocations[*].CommandPlugins[*].Output[]" --output text 48 | fi 49 | 50 | -------------------------------------------------------------------------------- /root/bin/aws/ssm-ssh.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | bold=$(tput bold) 3 | normal=$(tput sgr0) 4 | 5 | USAGE=$(cat <<-END 6 | ./ssm-ssh.sh [EC2 instance id] 7 | 8 | Example Usage (select instance): ./ssm-ssh.sh i-abcdef1234 9 | END 10 | ) 11 | 12 | # error/helper conditions 13 | if [[ $# -ne 1 ]]; then 14 | echo "$USAGE" 15 | exit 0 16 | fi 17 | 18 | if [[ $1 == "-h" ]]; then 19 | echo "$USAGE" 20 | exit 0 21 | fi 22 | 23 | if ! [ -x "$(command -v aws)" ]; then 24 | echo 'Error: aws-cli is not installed.' >&2 25 | echo 'Try installing aws cli v2: go here - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html' >&2 26 | echo 'Then install the ssm plugin from - https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html' >&2 27 | exit 1 28 | fi 29 | 30 | instance_id=$1 31 | shift 32 | 33 | start_session() { 34 | echo "Starting session to: $instance_id" 35 | aws ssm start-session --target $instance_id 36 | 37 | # immediately quit after ending the session 38 | # exit $? 39 | } 40 | 41 | start_session 42 | -------------------------------------------------------------------------------- /root/bin/check-gdc-update.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source /etc/term_colors.sh 4 | 5 | if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then 6 | echo "Checks GDC repository for updated version of GDC." 7 | exit 0 8 | fi 9 | 10 | GDC_ROOT=/root/gdc-host 11 | if [ ! -r $GDC_ROOT ]; then 12 | echo "GDC_ROOT: $GDC_ROOT is not mounted or not readable. Cant check GDC version." 13 | exit 0 14 | fi 15 | REPO_VER=$(git --work-tree=$GDC_ROOT --git-dir=$GDC_ROOT/.git describe --match 'v[0-9]*\.[0-9]*\.[0-9]*' --abbrev=0 --tags \ 16 | "$(git --work-tree=$GDC_ROOT --git-dir=$GDC_ROOT/.git rev-list --tags --max-count=1)" | cut -dv -f2) 17 | 18 | OUR_VERSION=$(grep '\- DEV_CONTAINER=' $GDC_ROOT/docker-compose.yml | cut -d= -f2 | cut -d' ' -f1) 19 | 20 | if [ "$REPO_VER" != "$OUR_VERSION" ]; then 21 | P1=$(printf "%03d" "$(echo "$REPO_VER"| cut -d. -f1)") 22 | P2=$(printf "%03d" "$(echo "$REPO_VER"| cut -d. -f2)") 23 | P3=$(printf "%03d" "$(echo "$REPO_VER"| cut -d. -f3)") 24 | RV="$P1$P2$P3" 25 | P1=$(printf "%03d" "$(echo "$OUR_VERSION"| cut -d. -f1)") 26 | P2=$(printf "%03d" "$(echo "$OUR_VERSION"| cut -d. -f2)") 27 | P3=$(printf "%03d" "$(echo "$OUR_VERSION"| cut -d. -f3)") 28 | OV="$P1$P2$P3" 29 | if [ "$RV" -gt "$OV" ]; then 30 | echo "Your GDC version $OUR_VERSION" 31 | echo -e "$txtylw""GDC Update $REPO_VER available! Please exit GDC, pull then restart GDC with env var CLEAN=yes $txtrst" 32 | exit 33 | fi 34 | fi 35 | 36 | echo "Your GDC version $OUR_VERSION is current." 37 | -------------------------------------------------------------------------------- /root/bin/docker/docker-logs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONTAINER="$1" 4 | shift 5 | CONTAINER=$(docker ps | grep "$CONTAINER" | cut -f1 -d' ' | head -n1) 6 | 7 | if [ -z "$CONTAINER" ]; then 8 | echo "container not found" 9 | exit 1 10 | fi 11 | docker logs -f "$CONTAINER" 12 | -------------------------------------------------------------------------------- /root/bin/docker/docker-shell.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONTAINER="$1" 4 | shift 5 | CONTAINER=$(docker ps | grep "$CONTAINER" | cut -f1 -d' ' | head -n1) 6 | 7 | if [ -z "$CONTAINER" ]; then 8 | echo "container not found" 9 | exit 1 10 | fi 11 | SHELL="$1" 12 | if [ -z "$SHELL" ]; then 13 | SHELL="bash -l" 14 | fi 15 | shift 16 | echo "docker exec -tiu root $CONTAINER $SHELL" 17 | docker exec -tiu root "$CONTAINER" $SHELL "$@" 18 | -------------------------------------------------------------------------------- /root/bin/docker/docker-stats.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker stats --format "table {{.Container}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" 4 | -------------------------------------------------------------------------------- /root/bin/docker/docker-stop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONTAINER="$1" 4 | shift 5 | CONTAINER=$(docker ps | grep "$CONTAINER" | cut -f1 -d' ' | head -n1) 6 | 7 | if [ -z "$CONTAINER" ]; then 8 | echo "container not found" 9 | exit 1 10 | fi 11 | docker stop "$CONTAINER" 12 | -------------------------------------------------------------------------------- /root/bin/gdc-pipeline-exec.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export FORCE_INTERACTIVE=yes 4 | source /root/.bashrc 5 | 6 | $@ 7 | -------------------------------------------------------------------------------- /root/bin/gdcex.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker exec -it $@ 4 | -------------------------------------------------------------------------------- /root/bin/ls/start-ls.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # make sure windows git bash does not alter paths 3 | export MSYS_NO_PATHCONV=1 4 | 5 | cd /root/gdc-host || exit 1 6 | 7 | if [ -r ".env-gdc" ]; then 8 | echo "Loading container .env-gdc environment file" 9 | source ".env-gdc" 10 | fi 11 | 12 | if [ -r ".env-gdc-local" ]; then 13 | echo "Loading container .env-gdc-local environment file" 14 | source ".env-gdc-local" 15 | fi 16 | 17 | if [ -r "/workspace/.env-gdc" ]; then 18 | echo "Loading project .env-gdc environment file" 19 | source "/workspace/.env-gdc" 20 | fi 21 | if [ -r "/workspace/.env-gdc-local" ]; then 22 | echo "Loading project .env-gdc-local environment file" 23 | source "/workspace/.env-gdc-local" 24 | fi 25 | 26 | export LS_VERSION=${LS_VERSION:='latest'} 27 | 28 | IS_HOST=$(echo "$GDC_COMPOSE_FILES" | grep -sc '\-f dc-ls-host.yml') 29 | if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then 30 | echo "Usage $0 [host|internal]" 31 | echo "if no parameters are passed, then GDC launch env variables are used to automatically determine mode." 32 | exit 0 33 | elif [ "$1" = "host" ]; then 34 | IS_HOST="1" 35 | elif [ "$1" = "internal" ]; then 36 | IS_HOST="0" 37 | fi 38 | 39 | COMPOSE_FILES="-f dc-ls.yml" 40 | if [ "$IS_HOST" = "1" ]; then 41 | echo "start-ls.sh using host mode" 42 | COMPOSE_FILES="$COMPOSE_FILES -f dc-ls-host.yml" 43 | fi 44 | if [ -n "$LOCALSTACK_STATIC_IP" ]; then 45 | COMPOSE_FILES="$COMPOSE_FILES -f dc-ls-static-ip.yml" 46 | fi 47 | if [ -n "$LOCALSTACK_HOST_DNS_PORT" ]; then 48 | COMPOSE_FILES="$COMPOSE_FILES -f dc-ls-host-dns.yml" 49 | fi 50 | if [ "$USE_LOCALSTACK_PERSISTENCE" = "yes" ]; then 51 | COMPOSE_FILES="$COMPOSE_FILES -f dc-ls-persist.yml" 52 | fi 53 | if [ "$USE_LOCALSTACK_PRO" = "yes" ]; then 54 | COMPOSE_FILES="$COMPOSE_FILES -f dc-ls-pro.yml" 55 | fi 56 | 57 | 58 | if [ -z "$COMPOSE_BIN" ]; then 59 | COMPOSE_BIN="docker compose" 60 | fi 61 | 62 | $COMPOSE_BIN $COMPOSE_FILES up -d --build --force-recreate 63 | 64 | sleep 5 65 | -------------------------------------------------------------------------------- /root/bin/ls/stop-ls.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # make sure windows git bash does not alter paths 3 | export MSYS_NO_PATHCONV=1 4 | 5 | if [[ "$1" = "--help" || "$1" = "-h" ]]; then 6 | echo "Used to stop running localstack main container with name $LS_MAIN_CONTAINER_NAME" 7 | exit 0 8 | fi 9 | 10 | docker rm -f "$LS_MAIN_CONTAINER_NAME" 11 | docker ps -a --format '{{.Names}}' | grep "^$(echo "$LS_MAIN_CONTAINER_NAME-" | tr "_" "-")" | xargs -I {} docker rm -f {} 12 | 13 | sleep 2 14 | -------------------------------------------------------------------------------- /root/bin/remote-client.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export FORCE_INTERACTIVE=yes 3 | source /root/.bashrc 4 | cd /workspace 5 | echo $@ 6 | $@ 7 | -------------------------------------------------------------------------------- /root/bin/requirements.txt: -------------------------------------------------------------------------------- 1 | virtualenv 2 | pipenv 3 | pipx 4 | black 5 | pylint 6 | pyright 7 | pre-commit 8 | boto3 9 | click 10 | awscliv2 11 | awscli-local 12 | terraform-local 13 | urllib3 14 | requests 15 | simplejson 16 | attrs 17 | charset_normalizer 18 | idna 19 | certifi 20 | requests_file 21 | isodate 22 | pytz 23 | jsonschema 24 | httpx 25 | textual 26 | textual-dev 27 | asyncio 28 | humanize 29 | argparse 30 | pyperclip 31 | rich 32 | typing_extensions 33 | -------------------------------------------------------------------------------- /root/bin/run-gdc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$1" = "--help" || "$1" = "-h" ]]; then 4 | echo "Used to start a GDC session from inside a GDC session." 5 | echo "Execute this as if you are running run-dev-container.sh" 6 | exit 0 7 | fi 8 | 9 | P=$(pwd | sed 's/\/workspace//') 10 | P="$HOST_PROJECT_PATH$P" 11 | 12 | echo "Using host project path of $P" 13 | 14 | CUSTOM_ENVS="" 15 | oIFS="$IFS" 16 | IFS=$'\n' 17 | for ENV in $(env); do 18 | if [[ "$ENV" =~ ^GDC_ENV_ ]]; then 19 | if [ -z "$CUSTOM_ENVS" ]; then 20 | CUSTOM_ENVS="$ENV" 21 | else 22 | CUSTOM_ENVS="$CUSTOM_ENVS"$'\n'"$ENV" 23 | fi 24 | fi 25 | done 26 | 27 | CUSTOM_ENV_FILE="/tmp/run_gdc_custom_env_$COMPOSE_PROJECT_NAME" 28 | if [ -r "$CUSTOM_ENV_FILE" ]; then 29 | rm -rf "$CUSTOM_ENV_FILE" 30 | fi 31 | cat << EOF > "$CUSTOM_ENV_FILE" 32 | export FORCE_PROJECT_PATH=$P 33 | export NO_DEVNET_RM=yes 34 | export DEVNET_NAME=$DEVNET_NAME 35 | export OS=$HOST_OS 36 | export GDC_DIR=$GDC_DIR 37 | export HOME=$HOST_HOME 38 | export PATH=$PATH 39 | export USER=$USER 40 | export LC_CTYPE=${LC_ALL:-${LC_CTYPE:-$LANG}} 41 | export GDC_PARENT=$COMPOSE_PROJECT_NAME 42 | EOF 43 | 44 | if [ -n "$CUSTOM_ENVS" ]; then 45 | for i in $CUSTOM_ENVS; do 46 | cat << EOF >> "$CUSTOM_ENV_FILE" 47 | export $i 48 | EOF 49 | done 50 | 51 | echo "" >> "$CUSTOM_ENV_FILE" 52 | echo "CUSTOM_ENVS: $CUSTOM_ENVS" 53 | fi 54 | IFS="$oIFS" 55 | unset oIFS 56 | 57 | 58 | if [[ "$*" =~ daemon ]]; then 59 | env -i bash --noprofile --rcfile "$CUSTOM_ENV_FILE" -ic "run-dev-container.sh $* &> /dev/null" 60 | else 61 | env -i bash --noprofile --rcfile "$CUSTOM_ENV_FILE" -ic "run-dev-container.sh $*" 62 | fi 63 | --------------------------------------------------------------------------------