├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .vscode └── settings.json ├── API.md ├── Dockerfile ├── LICENCE ├── Makefile ├── README.md ├── api ├── async │ ├── publish.ts │ └── publish_test.ts └── webhook │ └── github.ts ├── badwords-example.txt ├── deps.ts ├── docker-compose.yml ├── indexes ├── atlas_search_index_mapping.json ├── builds_by_name_and_version.json ├── modules_by_is_unlisted_and_star_count.json └── modules_by_owner_and_repo.json ├── run-tests.sh ├── terraform ├── alerts.tf ├── api.tf ├── cdn.tf ├── datastore.tf ├── iam.tf ├── main.tf ├── meta.tf ├── outputs.tf ├── providers.tf ├── publish.tf ├── terraform.tfvars.example ├── variables.tf └── webhook.tf ├── test_deps.ts └── utils ├── database_test.ts ├── datastore_database.ts ├── git.ts ├── http.ts ├── moderation.ts ├── moderation_test.ts ├── net.ts ├── net_test.ts ├── storage.ts ├── test_utils.ts ├── testdata └── deno-v1.3.2.json ├── types.ts ├── utils.ts ├── utils_bench.ts ├── utils_test.ts └── webhooks.d.ts /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-20.04-xl 12 | env: 13 | AWS_DEFAULT_REGION: us-east-1 14 | AWS_BACKUP_REGION: eu-central-1 15 | GOOGLE_PROJECT_ID: test-project 16 | CI: true 17 | steps: 18 | - name: Setup Deno environment 19 | uses: denolib/setup-deno@v2.3.0 20 | with: 21 | deno-version: v1.24.0 22 | 23 | - uses: actions/checkout@v3 24 | 25 | - name: Format 26 | run: | 27 | deno fmt --check 28 | cd terraform && terraform fmt -check -recursive 29 | 30 | - name: Lint 31 | run: deno lint --unstable 32 | 33 | - name: Set up Cloud SDK 34 | uses: google-github-actions/setup-gcloud@v1 35 | with: 36 | install_components: "beta,cloud-datastore-emulator" 37 | 38 | - name: Run Datastore Emulator 39 | run: | 40 | gcloud config set project $GOOGLE_PROJECT_ID 41 | gcloud beta emulators datastore start --use-firestore-in-datastore-mode --host-port=0.0.0.0:8081 & 42 | sleep 5 43 | echo "DATASTORE_HOST=http://localhost:8081" >> $GITHUB_ENV 44 | 45 | - name: Docker build 46 | run: make build 47 | 48 | - name: Test 49 | run: make test 50 | 51 | - name: Validate 52 | run: | 53 | terraform init -backend=false 54 | terraform validate 55 | working-directory: terraform 56 | 57 | - name: Push container to ECR (prod) 58 | if: github.ref == 'refs/heads/main' 59 | run: | 60 | ECR_ID=$(aws sts get-caller-identity --query Account --output text).dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com 61 | IMAGE_ID=$ECR_ID/deno_registry2:$GITHUB_RUN_ID 62 | aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $ECR_ID 63 | docker tag deno_registry2:latest $IMAGE_ID 64 | docker push $IMAGE_ID 65 | env: 66 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 67 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 68 | 69 | - name: Authenticate with GCP (prod) 70 | if: github.ref == 'refs/heads/main' 71 | uses: google-github-actions/auth@v0 72 | with: 73 | credentials_json: ${{ secrets.GCP_SA_KEY }} 74 | 75 | - name: Create terraform plan (prod) 76 | if: github.ref == 'refs/heads/main' 77 | run: | 78 | terraform init \ 79 | -backend-config "bucket=$TERRAFORM_STATE_BUCKET" \ 80 | -backend-config "region=$AWS_DEFAULT_REGION" 81 | terraform plan \ 82 | -var env=prod \ 83 | -var apiland_auth_token=${{ secrets.APILAND_AUTH_TOKEN }} \ 84 | -var aws_backup_region=$AWS_BACKUP_REGION \ 85 | -var aws_default_region=$AWS_DEFAULT_REGION \ 86 | -var docker_tag=$GITHUB_RUN_ID \ 87 | -var github_token=${{ secrets.GH_TOKEN }} \ 88 | -var google_client_email=${{ secrets.GOOGLE_CLIENT_EMAIL }} \ 89 | -var google_project_id=${{ secrets.GOOGLE_PROJECT_ID }} \ 90 | -var google_private_key_id=${{ secrets.GOOGLE_PRIVATE_KEY_ID }} \ 91 | -var "google_private_key=${{ secrets.GOOGLE_PRIVATE_KEY }}" \ 92 | -out plan.tfplan 93 | env: 94 | TERRAFORM_STATE_BUCKET: deno-registry2-prod-terraform-state-b3a31d16 95 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 96 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 97 | working-directory: terraform 98 | 99 | - name: Deploy infrastructure (prod) 100 | if: github.ref == 'refs/heads/main' 101 | run: terraform apply -parallelism=3 plan.tfplan 102 | env: 103 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 104 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 105 | working-directory: terraform 106 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | samconfig.toml 2 | .aws-sam 3 | .DS_Store 4 | 5 | # Local .terraform directories 6 | **/.terraform/* 7 | 8 | # .tfstate files 9 | *.tfstate 10 | *.tfstate.* 11 | 12 | # Crash log files 13 | crash.log 14 | 15 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as 16 | # password, private keys, and other secrets. These should not be part of version 17 | # control as they are data points which are potentially sensitive and subject 18 | # to change depending on the environment. 19 | # 20 | *.tfvars 21 | 22 | # Ignore override files as they are usually used to override resources locally and so 23 | # are not checked in 24 | override.tf 25 | override.tf.json 26 | *_override.tf 27 | *_override.tf.json 28 | 29 | # Include override files you do wish to add to version control using negated pattern 30 | # 31 | # !example_override.tf 32 | 33 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 34 | *.tfplan 35 | 36 | # Ignore CLI configuration files 37 | .terraformrc 38 | terraform.rc 39 | 40 | .deno_plugins 41 | 42 | .terraform.lock.hcl -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "deno.enable": true, 3 | "deno.unstable": true, 4 | "deno.lint": true, 5 | "[typescript]": { 6 | "editor.defaultFormatter": "denoland.vscode-deno" 7 | }, 8 | "[typescriptreact]": { 9 | "editor.defaultFormatter": "denoland.vscode-deno" 10 | }, 11 | "terraform-ls.rootModules": ["terraform/"] 12 | } 13 | -------------------------------------------------------------------------------- /API.md: -------------------------------------------------------------------------------- 1 | # API 2 | 3 | ## POST /webhook/gh/:module 4 | 5 | This API endpoint receives webhooks from GitHub. The `module` parameter in the 6 | URL is the name of the module. Both `application/json` and 7 | `application/x-www-url-formencoded` content types are accepted. 8 | 9 | ### Request 10 | 11 | The contents and headers should be a GitHub `create` or `push` webhook event. 12 | More information: https://developer.github.com/webhooks/event-payloads/#create 13 | and https://developer.github.com/webhooks/event-payloads/#push 14 | 15 | There are optional query parameters that can change the behavior of the request: 16 | 17 | - `subdir`: this specifies a subdirectory of the repository to upload (not set 18 | by default). This directory must be in the format `std/` (notice the trailing 19 | slash.) 20 | - `version_prefix`: only upload versions that match this prefix. When this is 21 | set to `std/` and you tag version `std/0.63.0`, version `0.63.0` will be 22 | uploaded. 23 | 24 | ### Response 25 | 26 | #### Headers 27 | 28 | `content-type`: `application/json` 29 | 30 | #### Body 31 | 32 | ##### 200 OK 33 | 34 | ```json 35 | { 36 | "success": true, 37 | "data": { 38 | "module": "oak", 39 | "version": "v5.3.1", 40 | "repository": "oakserver/oak", 41 | "total_bytes_uploaded": 364546, 42 | "skipped_due_to_size": ["/fixtures/test.jpg", "/examples/static/50MB.zip"] 43 | } 44 | } 45 | ``` 46 | 47 | ##### 400 Bad Request 48 | 49 | ```json 50 | { 51 | "success": false, 52 | "info": "module name is not valid" 53 | } 54 | ``` 55 | 56 | OR 57 | 58 | ```json 59 | { 60 | "success": false, 61 | "error": "module name is registered to a different repository" 62 | } 63 | ``` 64 | 65 | ##### 409 Conflict 66 | 67 | ```json 68 | { 69 | "success": false, 70 | "error": "module name is registered to a different repository" 71 | } 72 | ``` 73 | 74 | ## Other endpoints 75 | 76 | For any other endpoints, please reference the 77 | [documentation for apiland.deno.dev](https://redocly.github.io/redoc/?url=https://apiland.deno.dev/~/spec) 78 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM hayd/deno-lambda:1.24.0 2 | 3 | RUN yum -y install https://packages.endpointdev.com/rhel/7/os/x86_64/endpoint-repo.x86_64.rpm && \ 4 | sed -i 's/$releasever/7/' /etc/yum.repos.d/endpoint.repo 5 | RUN yum install git -y && rm -rf /var/cache/yum 6 | 7 | COPY deps.ts . 8 | RUN deno cache --unstable --no-check deps.ts 9 | 10 | COPY . . 11 | RUN deno cache --unstable --no-check $(find . -name "*.ts" -not -name "*_test.ts") -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020-2021 the Deno authors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | docker build . -t deno_registry2:latest 3 | 4 | test: 5 | docker-compose up -d 6 | sleep 10 7 | /bin/sh ./run-tests.sh 8 | docker-compose down 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # deno_registry2 2 | 3 | This is the backend for the deno.land/x service. 4 | 5 | ## Limits 6 | 7 | There are a few guidelines / rules that you should follow when publishing a 8 | module: 9 | 10 | - Please only register module names that you will actually use. 11 | - Do not squat names. If you do, we might transfer the name to someone that 12 | makes better use of it. 13 | - Do not register names which contain trademarks that you do not own. 14 | - Do not publish modules containing illegal content. 15 | 16 | Additionally to these guidelines there are also hard limits: 17 | 18 | - You can not publish more than 3 different modules from a single repository 19 | source. 20 | - You can not publish more than 15 modules from a single GitHub account or 21 | organization. 22 | 23 | If you need an increase to these quotas, please reach out to 24 | [modules@deno.com](mailto:modules@deno.com). 25 | 26 | ## Requirements 27 | 28 | - AWS account 29 | - [MongoDB Atlas](https://cloud.mongodb.com) account 30 | 31 | ## Preparing Docker 32 | 33 | Make sure to follow the official instructions to 34 | [login to ECR](https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry_auth.html) 35 | via the Docker cli - this is needed to push the images used by the Lambda 36 | deployment to ECR. 37 | 38 | ```bash 39 | aws ecr get-login-password --region region | docker login --username AWS --password-stdin aws_account_id.dkr.ecr.region.amazonaws.com 40 | ``` 41 | 42 | ## Preparing MongoDB Atlas 43 | 44 | 1. Create an API key on [MongoDB Atlas](https://cloud.mongodb.com). The API key 45 | should have sufficient privileges to create a new project and configure it 46 | afterwards. 47 | 48 | ## Deploy 49 | 50 | 1. Install `aws` CLI. 51 | 2. Sign in to `aws` by running `aws configure` 52 | 3. [Install Terraform](https://terraform.io/downloads.html) version 0.13 or 53 | higher 54 | 4. Copy `terraform/terraform.tfvars.example` to `terraform/terraform.tfvars` 55 | 5. Modify `terraform/terraform.tfvars`: set `mongodb_atlas_org_id` to your 56 | MongoDB Atlas organization ID, and update `mongodb_atlas_private_key` and 57 | `mongodb_atlas_public_key` with the API key you created earlier. 58 | 6. Move to the `terraform/` and **comment out** the `backend` section in the 59 | `meta.tf` file (important for first-time apply) 60 | 7. Run the following steps: 61 | 62 | ```bash 63 | terraform init 64 | terraform plan -var-file terraform.tfvars -out plan.tfplan 65 | terraform apply plan.tfplan 66 | aws s3 ls | grep 'terraform-state' # take note of your tf state bucket name 67 | # before the final step, go back and remove the comments from step 5 68 | terraform init -backend-config "bucket=" -backend-config "region=" 69 | ``` 70 | 71 | ## Setting up MongoDB 72 | 73 | Terraform automatically provisions a MongoDB cluster in a separate project. 74 | 75 | 1. In the newly created MongoDB cluster, create a database called `production`. 76 | 2. In this database create a collection called `modules`. 77 | 3. In this collection create a new Atlas Search index with the name `default` 78 | and the mapping defined in `indexes/atlas_search_index_mapping.json` 79 | 4. In this collection create a new index with the name `by_owner_and_repo` like 80 | it is defined in `indexes/modules_by_owner_and_repo.json` 81 | 5. In this collection create a new index with the name 82 | `by_is_unlisted_and_star_count` like it is defined in 83 | `indexes/modules_by_is_unlisted_and_star_count.json` 84 | 6. In this database create a collection called `builds`. 85 | 7. In this collection create a new _unique_ index with the name 86 | `by_name_and_version` like it is defined in 87 | `indexes/builds_by_name_and_version.json` 88 | 89 | ## Teardown 90 | 91 | Before destroying your staging environment, make sure to: 92 | 93 | 1. run `terraform state pull` to make a local copy of your state file 94 | 2. comment out the `backend` section of the `meta.tf` file 95 | 3. re-initialize your terraform workspace by running 96 | `terraform init -backend-config "region="` 97 | 4. make sure you empty your s3 buckets, otherwise the destroy will fail 98 | 99 | You can then run `terraform destroy` to completely remove your staging 100 | environment. 101 | 102 | ## Development 103 | 104 | To run tests locally, make sure you have Docker and docker-compose installed. 105 | Then run: 106 | 107 | ```sh 108 | make test 109 | ``` 110 | -------------------------------------------------------------------------------- /api/async/publish.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | /** 4 | * This function is responsible for downloading a modules' source code 5 | * from an origin repository like GitHub and uploading it to S3. This is 6 | * triggered by an event in the AWS SQS build queue. It contains the ID 7 | * of the build, which is stored in datastore. The build stored in datastore 8 | * contain all relevant information that is required to upload the module: 9 | * the module name, GitHub repository, version, subdirectory ect. 10 | */ 11 | 12 | import { 13 | Context, 14 | join, 15 | jsoncParse, 16 | pooledMap, 17 | readAll, 18 | SQSEvent, 19 | walk, 20 | } from "../../deps.ts"; 21 | import { 22 | Database as Datastore, 23 | NewBuild, 24 | } from "../../utils/datastore_database.ts"; 25 | import { clone } from "../../utils/git.ts"; 26 | import { 27 | getMeta, 28 | uploadMetaJson, 29 | uploadVersionMetaJson, 30 | uploadVersionRaw, 31 | } from "../../utils/storage.ts"; 32 | import type { DirectoryListingFile } from "../../utils/types.ts"; 33 | import { collectAsyncIterable, directorySize } from "../../utils/utils.ts"; 34 | 35 | const datastore = new Datastore(); 36 | 37 | const apilandURL = Deno.env.get("APILAND_URL")!; 38 | const apilandAuthToken = Deno.env.get("APILAND_AUTH_TOKEN")!; 39 | 40 | const DEFAULT_MAX_TOTAL_SIZE = 1024 * 1024 * 20; // 20 mb in total 41 | 42 | const decoder = new TextDecoder(); 43 | 44 | export async function handler( 45 | event: SQSEvent, 46 | _context: Context, 47 | ): Promise { 48 | for (const record of event.Records) { 49 | const { buildID } = JSON.parse(record.body); 50 | const build = await datastore.getBuild(buildID); 51 | if (build === null) { 52 | throw new Error("Build does not exist!"); 53 | } 54 | 55 | switch (build.upload_options.type) { 56 | case "github": 57 | try { 58 | await publishGithub(build); 59 | } catch (err) { 60 | console.log("error", err, err?.response, build); 61 | await datastore.saveBuild({ 62 | ...build, 63 | status: "error", 64 | message: err.message, 65 | }); 66 | return; 67 | } 68 | break; 69 | default: 70 | throw new Error(`Unknown build type: ${build.upload_options.type}`); 71 | } 72 | 73 | let message = "Published module."; 74 | 75 | // send a webhook request to apiland to do further indexing of the module 76 | // this is temporary until apiland subsumes the functionality of registry2 77 | const res = await fetch(apilandURL + "/publish", { 78 | method: "POST", 79 | body: JSON.stringify({ 80 | event: "create", 81 | module: build.module, 82 | version: build.version, 83 | }), 84 | headers: { 85 | "authorization": `bearer ${apilandAuthToken}`, 86 | "content-type": "application/json", 87 | }, 88 | }); 89 | 90 | if (res.status !== 200) { 91 | console.error( 92 | "failed to post webhook to apiland", 93 | apilandURL, 94 | res.status, 95 | res.statusText, 96 | ); 97 | message += " Failed to post webhook to apiland."; 98 | } 99 | 100 | // consume body, to not leak resources 101 | await res.text(); 102 | 103 | await datastore.saveBuild({ 104 | ...build, 105 | status: "success", 106 | message: message, 107 | }); 108 | } 109 | } 110 | 111 | async function publishGithub(build: NewBuild) { 112 | console.log( 113 | `Publishing ${build.module} at ${build.upload_options.ref} from GitHub`, 114 | ); 115 | const quota = await datastore.getOwnerQuota( 116 | build.upload_options.repository.split("/")[0] as string, 117 | ); 118 | await datastore.saveBuild({ 119 | ...build, 120 | status: "publishing", 121 | }); 122 | 123 | const { module, version, upload_options: { repository, ref, subdir } } = 124 | build; 125 | 126 | // Clone the repository from GitHub 127 | const cloneURL = `https://github.com/${repository}`; 128 | const clonePath = await clone(cloneURL, ref, subdir); 129 | 130 | console.log("Finished clone"); 131 | 132 | try { 133 | // Create path that has possible subdir prefix 134 | const path = subdir === undefined ? clonePath : join( 135 | clonePath, 136 | subdir.replace( 137 | /(^\/|\/$)/g, 138 | "", 139 | ), 140 | ); 141 | 142 | // Walk all files in the repository (that start with the subdir if present) 143 | const entries = []; 144 | for await ( 145 | const entry of walk(path, { 146 | includeFiles: true, 147 | includeDirs: true, 148 | }) 149 | ) { 150 | entries.push(entry); 151 | } 152 | 153 | console.log("Total files in repo", entries.length); 154 | 155 | const directory: DirectoryListingFile[] = []; 156 | 157 | await collectAsyncIterable(pooledMap(100, entries, async (entry) => { 158 | const filename = entry.path.substring(path.length); 159 | 160 | // If this is a file in the .git folder, ignore it 161 | if (filename.startsWith("/.git/") || filename === "/.git") return; 162 | 163 | if (filename === "deno.json" || filename === "deno.jsonc") { 164 | const file = await Deno.open(join(path, entry.path)); 165 | const body = await readAll(file); 166 | const bodyText = new TextDecoder().decode(body); 167 | try { 168 | const bodyJSON = jsoncParse(bodyText) as Record; 169 | if ("name" in bodyJSON) { 170 | throw new TypeError( 171 | "This module is meant for JSR publishing, and as such cannot be published to /x/", 172 | ); 173 | } 174 | } catch (e) { 175 | if ( 176 | e.message === 177 | "This module is meant for JSR publishing, and as such cannot be published to /x/" 178 | ) { 179 | throw e; 180 | } 181 | } 182 | } 183 | 184 | if (entry.isFile) { 185 | const stat = await Deno.stat(entry.path); 186 | directory.push({ path: filename, size: stat.size, type: "file" }); 187 | } else { 188 | directory.push({ path: filename, size: undefined, type: "dir" }); 189 | } 190 | })); 191 | 192 | const totalSize = directorySize(directory); 193 | 194 | if (totalSize > (quota?.max_total_size ?? DEFAULT_MAX_TOTAL_SIZE)) { 195 | const message = 196 | `Module too large (${totalSize} bytes). Maximum allowed size is ${DEFAULT_MAX_TOTAL_SIZE} bytes.`; 197 | console.log(message); 198 | throw new Error(message); 199 | } 200 | 201 | // Pool requests because of https://github.com/denoland/deno_registry2/issues/15 202 | await collectAsyncIterable(pooledMap(65, directory, async (entry) => { 203 | if (entry.type === "file") { 204 | const file = await Deno.open(join(path, entry.path)); 205 | const body = await readAll(file); 206 | await uploadVersionRaw( 207 | module, 208 | version, 209 | entry.path, 210 | body, 211 | ); 212 | file.close(); 213 | } 214 | })); 215 | 216 | const versionsBody = await getMeta(module, "versions.json"); 217 | const versions = versionsBody 218 | ? JSON.parse(decoder.decode(versionsBody)) 219 | : { versions: [] }; 220 | await uploadMetaJson( 221 | module, 222 | "versions.json", 223 | { latest: version, versions: [version, ...versions.versions] }, 224 | ); 225 | 226 | // Upload directory listing to S3 227 | await uploadVersionMetaJson( 228 | module, 229 | version, 230 | { 231 | directory_listing: directory.sort((a, b) => 232 | a.path.localeCompare(b.path, "en-US") 233 | ), 234 | uploaded_at: new Date().toISOString(), 235 | upload_options: { 236 | type: "github", 237 | repository, 238 | subdir, 239 | ref, 240 | }, 241 | }, 242 | ); 243 | } finally { 244 | // Remove checkout 245 | await Deno.remove(clonePath, { recursive: true }); 246 | } 247 | } 248 | -------------------------------------------------------------------------------- /api/async/publish_test.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | import { handler } from "./publish.ts"; 3 | import { 4 | cleanupDatabase, 5 | createApiLandMock, 6 | createContext, 7 | createSQSEvent, 8 | } from "../../utils/test_utils.ts"; 9 | import { assert, assertEquals } from "../../test_deps.ts"; 10 | import { s3 } from "../../utils/storage.ts"; 11 | import { Database as Datastore } from "../../utils/datastore_database.ts"; 12 | 13 | const datastore = new Datastore(); 14 | 15 | Deno.test({ 16 | name: "publish success", 17 | async fn() { 18 | try { 19 | createApiLandMock(); 20 | const id = await datastore.createBuild({ 21 | module: "ltest", 22 | version: "0.0.9", 23 | upload_options: { 24 | ref: "0.0.9", 25 | repository: "luca-rand/testing", 26 | type: "github", 27 | }, 28 | status: "queued", 29 | created_at: new Date(), 30 | }); 31 | 32 | await handler( 33 | createSQSEvent({ buildID: id }), 34 | createContext(), 35 | ); 36 | 37 | assertEquals({ ...await datastore.getBuild(id), created_at: undefined }, { 38 | created_at: undefined, 39 | id, 40 | module: "ltest", 41 | version: "0.0.9", 42 | upload_options: { 43 | ref: "0.0.9", 44 | repository: "luca-rand/testing", 45 | type: "github", 46 | }, 47 | status: "success", 48 | message: "Published module.", 49 | }); 50 | 51 | // Check that versions.json file exists 52 | const versions = await s3.getObject("ltest/meta/versions.json"); 53 | assertEquals(versions?.cacheControl, "max-age=10, must-revalidate"); 54 | assertEquals(versions?.contentType, "application/json"); 55 | assert(versions); 56 | assertEquals( 57 | await new Response(versions.body).json(), 58 | { latest: "0.0.9", versions: ["0.0.9"] }, 59 | ); 60 | 61 | const meta = await s3.getObject("ltest/versions/0.0.9/meta/meta.json"); 62 | assertEquals(meta?.cacheControl, "public, max-age=31536000, immutable"); 63 | assertEquals(meta?.contentType, "application/json"); 64 | // Check that meta file exists 65 | assert(meta); 66 | assertEquals( 67 | { 68 | ...await new Response(meta.body).json(), 69 | uploaded_at: undefined, 70 | }, 71 | { 72 | directory_listing: [ 73 | { 74 | path: "", 75 | size: 2735, 76 | type: "dir", 77 | }, 78 | { 79 | path: "/.github", 80 | size: 716, 81 | type: "dir", 82 | }, 83 | { 84 | path: "/.github/README.md", 85 | size: 304, 86 | type: "file", 87 | }, 88 | { 89 | path: "/.github/workflows", 90 | size: 412, 91 | type: "dir", 92 | }, 93 | { 94 | path: "/.github/workflows/ci.yml", 95 | size: 412, 96 | type: "file", 97 | }, 98 | { 99 | path: "/.vscode", 100 | size: 26, 101 | type: "dir", 102 | }, 103 | { 104 | path: "/.vscode/settings.json", 105 | size: 26, 106 | type: "file", 107 | }, 108 | { 109 | path: "/deps.ts", 110 | size: 63, 111 | type: "file", 112 | }, 113 | { 114 | path: "/example.ts", 115 | size: 50, 116 | type: "file", 117 | }, 118 | { 119 | path: "/fixtures", 120 | size: 23, 121 | type: "dir", 122 | }, 123 | { 124 | path: "/fixtures/%", 125 | size: 23, 126 | type: "file", 127 | }, 128 | { 129 | path: "/LICENSE", 130 | size: 1066, 131 | type: "file", 132 | }, 133 | { 134 | path: "/mod_test.ts", 135 | size: 227, 136 | type: "file", 137 | }, 138 | { 139 | path: "/mod.ts", 140 | size: 139, 141 | type: "file", 142 | }, 143 | { 144 | path: "/subproject", 145 | size: 425, 146 | type: "dir", 147 | }, 148 | { 149 | path: "/subproject/mod.ts", 150 | size: 71, 151 | type: "file", 152 | }, 153 | { 154 | path: "/subproject/README.md", 155 | size: 354, 156 | type: "file", 157 | }, 158 | ], 159 | upload_options: { 160 | ref: "0.0.9", 161 | repository: "luca-rand/testing", 162 | type: "github", 163 | }, 164 | uploaded_at: undefined, 165 | }, 166 | ); 167 | 168 | // Check the yml file was uploaded 169 | const yml = await s3.getObject( 170 | "ltest/versions/0.0.9/raw/.github/workflows/ci.yml", 171 | ); 172 | assertEquals(yml?.cacheControl, "public, max-age=31536000, immutable"); 173 | assertEquals(yml?.contentType, "text/yaml"); 174 | assert(yml); 175 | let body = await new Response(yml.body).arrayBuffer(); 176 | assertEquals(body.byteLength, 412); 177 | 178 | // Check the ts file was uploaded 179 | const ts = await s3.getObject("ltest/versions/0.0.9/raw/mod.ts"); 180 | assertEquals(ts?.cacheControl, "public, max-age=31536000, immutable"); 181 | assertEquals(ts?.contentType, "application/typescript; charset=utf-8"); 182 | assert(ts); 183 | body = await new Response(ts.body).arrayBuffer(); 184 | assertEquals(body.byteLength, 139); 185 | 186 | // Check the ts file was uploaded 187 | const readme = await s3.getObject( 188 | "ltest/versions/0.0.9/raw/.github/README.md", 189 | ); 190 | assertEquals(readme?.cacheControl, "public, max-age=31536000, immutable"); 191 | assertEquals(readme?.contentType, "text/markdown"); 192 | assert(readme); 193 | body = await new Response(readme.body).arrayBuffer(); 194 | console.log(new TextDecoder().decode(body)); 195 | assertEquals(body.byteLength, 304); 196 | } finally { 197 | await cleanupDatabase(datastore); 198 | await s3.empty(); 199 | } 200 | }, 201 | }); 202 | 203 | Deno.test({ 204 | name: "publish success subdir", 205 | async fn() { 206 | try { 207 | createApiLandMock(); 208 | const id = await datastore.createBuild({ 209 | module: "ltest", 210 | version: "0.0.7", 211 | upload_options: { 212 | ref: "0.0.7", 213 | repository: "luca-rand/testing", 214 | type: "github", 215 | subdir: "subproject/", 216 | }, 217 | status: "queued", 218 | created_at: new Date(), 219 | }); 220 | 221 | await handler( 222 | createSQSEvent({ buildID: id }), 223 | createContext(), 224 | ); 225 | 226 | assertEquals({ ...await datastore.getBuild(id), created_at: undefined }, { 227 | created_at: undefined, 228 | id, 229 | module: "ltest", 230 | version: "0.0.7", 231 | upload_options: { 232 | ref: "0.0.7", 233 | repository: "luca-rand/testing", 234 | type: "github", 235 | subdir: "subproject/", 236 | }, 237 | status: "success", 238 | message: "Published module.", 239 | }); 240 | 241 | // Check that versions.json file exists 242 | const versions = await s3.getObject("ltest/meta/versions.json"); 243 | assertEquals(versions?.cacheControl, "max-age=10, must-revalidate"); 244 | assertEquals(versions?.contentType, "application/json"); 245 | assert(versions); 246 | assertEquals( 247 | await new Response(versions.body).json(), 248 | { latest: "0.0.7", versions: ["0.0.7"] }, 249 | ); 250 | 251 | const meta = await s3.getObject("ltest/versions/0.0.7/meta/meta.json"); 252 | assertEquals(meta?.cacheControl, "public, max-age=31536000, immutable"); 253 | assertEquals(meta?.contentType, "application/json"); 254 | // Check that meta file exists 255 | assert(meta); 256 | assertEquals( 257 | { 258 | ...await new Response(meta.body).json(), 259 | uploaded_at: undefined, 260 | }, 261 | { 262 | directory_listing: [ 263 | { 264 | path: "", 265 | size: 425, 266 | type: "dir", 267 | }, 268 | { 269 | path: "/mod.ts", 270 | size: 71, 271 | type: "file", 272 | }, 273 | { 274 | path: "/README.md", 275 | size: 354, 276 | type: "file", 277 | }, 278 | ], 279 | upload_options: { 280 | ref: "0.0.7", 281 | repository: "luca-rand/testing", 282 | subdir: "subproject/", 283 | type: "github", 284 | }, 285 | uploaded_at: undefined, 286 | }, 287 | ); 288 | 289 | // Check the ts file was uploaded 290 | const ts = await s3.getObject("ltest/versions/0.0.7/raw/mod.ts"); 291 | assertEquals(ts?.cacheControl, "public, max-age=31536000, immutable"); 292 | assertEquals(ts?.contentType, "application/typescript; charset=utf-8"); 293 | assert(ts); 294 | let body = await new Response(ts.body).arrayBuffer(); 295 | assertEquals(body.byteLength, 71); 296 | 297 | // Check the ts file was uploaded 298 | const readme = await s3.getObject("ltest/versions/0.0.7/raw/README.md"); 299 | assertEquals(readme?.cacheControl, "public, max-age=31536000, immutable"); 300 | assertEquals(readme?.contentType, "text/markdown"); 301 | assert(readme); 302 | body = await new Response(readme.body).arrayBuffer(); 303 | assertEquals(body.byteLength, 354); 304 | } finally { 305 | await cleanupDatabase(datastore); 306 | await s3.empty(); 307 | } 308 | }, 309 | }); 310 | 311 | Deno.test({ 312 | name: "publish too large", 313 | async fn() { 314 | try { 315 | const id = await datastore.createBuild({ 316 | module: "ltest_big", 317 | version: "0.0.1", 318 | upload_options: { 319 | ref: "0.0.1", 320 | repository: "luca-rand/testing_big", 321 | type: "github", 322 | }, 323 | status: "queued", 324 | created_at: new Date(), 325 | }); 326 | 327 | await handler( 328 | createSQSEvent({ buildID: id }), 329 | createContext(), 330 | ); 331 | 332 | assertEquals({ ...await datastore.getBuild(id), created_at: undefined }, { 333 | created_at: undefined, 334 | id, 335 | module: "ltest_big", 336 | version: "0.0.1", 337 | upload_options: { 338 | ref: "0.0.1", 339 | repository: "luca-rand/testing_big", 340 | type: "github", 341 | }, 342 | status: "error", 343 | message: 344 | "Module too large (26214825 bytes). Maximum allowed size is 20971520 bytes.", 345 | }); 346 | 347 | // Check that versions.json file does not exists 348 | const versions = await s3.getObject("ltest/meta/versions.json"); 349 | assertEquals(versions, undefined); 350 | 351 | const meta = await s3.getObject("ltest/versions/0.0.1/meta/meta.json"); 352 | assertEquals(meta, undefined); 353 | 354 | // Check the readme file was not uploaded 355 | const readme = await s3.getObject("ltest/versions/0.0.1/raw/README.md"); 356 | assertEquals(readme, undefined); 357 | } finally { 358 | await cleanupDatabase(datastore); 359 | await s3.empty(); 360 | } 361 | }, 362 | }); 363 | 364 | Deno.test({ 365 | name: "publish large custom quota", 366 | async fn() { 367 | try { 368 | createApiLandMock(); 369 | await datastore.saveOwnerQuota({ 370 | owner: "luca-rand", 371 | type: "github", 372 | max_modules: 7, 373 | max_total_size: 1024 * 1024 * 50, 374 | blocked: false, 375 | }); 376 | 377 | const id = await datastore.createBuild({ 378 | module: "ltest_big", 379 | version: "0.0.1", 380 | upload_options: { 381 | ref: "0.0.1", 382 | repository: "luca-rand/testing_big", 383 | type: "github", 384 | }, 385 | status: "queued", 386 | created_at: new Date(), 387 | }); 388 | 389 | await handler( 390 | createSQSEvent({ buildID: id }), 391 | createContext(), 392 | ); 393 | 394 | assertEquals({ ...await datastore.getBuild(id), created_at: undefined }, { 395 | created_at: undefined, 396 | id, 397 | module: "ltest_big", 398 | version: "0.0.1", 399 | upload_options: { 400 | ref: "0.0.1", 401 | repository: "luca-rand/testing_big", 402 | type: "github", 403 | }, 404 | status: "success", 405 | message: "Published module.", 406 | }); 407 | 408 | // Check that versions.json file exists 409 | const versions = await s3.getObject("ltest_big/meta/versions.json"); 410 | assertEquals(versions?.cacheControl, "max-age=10, must-revalidate"); 411 | assertEquals(versions?.contentType, "application/json"); 412 | assert(versions); 413 | assertEquals( 414 | await new Response(versions.body).json(), 415 | { latest: "0.0.1", versions: ["0.0.1"] }, 416 | ); 417 | } finally { 418 | await cleanupDatabase(datastore); 419 | await s3.empty(); 420 | } 421 | }, 422 | }); 423 | -------------------------------------------------------------------------------- /api/webhook/github.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | /** 4 | * This function receives webhook events from GitHub. When an event is received 5 | * the service checks if it comes from GitHub, if the module name and repository 6 | * ID match up, and if this version of the module has been uploaded already. If 7 | * all of these checks pass a build is created in datastore and the ID of this 8 | * build is added to the AWS SQS build queue to be processed asynchronously. 9 | */ 10 | 11 | import { 12 | APIGatewayProxyEventV2, 13 | APIGatewayProxyResultV2, 14 | Context, 15 | } from "../../deps.ts"; 16 | import { parseRequestBody, respondJSON } from "../../utils/http.ts"; 17 | import { isIp4InCidrs } from "../../utils/net.ts"; 18 | import type { APIErrorResponse } from "../../utils/types.ts"; 19 | 20 | const apilandURL = Deno.env.get("APILAND_URL")!; 21 | const apilandAuthToken = Deno.env.get("APILAND_AUTH_TOKEN")!; 22 | 23 | export async function handler( 24 | event: APIGatewayProxyEventV2, 25 | _context: Context, 26 | ): Promise { 27 | const ip = event.requestContext.http.sourceIp; 28 | if (!isGitHubHooksIP(ip)) { 29 | return respondJSON({ 30 | statusCode: 400, 31 | body: JSON.stringify({ 32 | success: false, 33 | error: "request does not come from GitHub", 34 | } as APIErrorResponse), 35 | }); 36 | } 37 | 38 | const moduleName = event.pathParameters?.name; 39 | if (!moduleName) { 40 | return respondJSON({ 41 | statusCode: 400, 42 | body: JSON.stringify({ 43 | success: false, 44 | error: "no module name specified", 45 | } as APIErrorResponse), 46 | }); 47 | } 48 | 49 | const headers = new Headers(event.headers as Record); 50 | 51 | if ( 52 | !(headers.get("content-type") ?? "").startsWith("application/json") && 53 | !(headers.get("content-type") ?? "").startsWith( 54 | "application/x-www-form-urlencoded", 55 | ) 56 | ) { 57 | return respondJSON({ 58 | statusCode: 400, 59 | body: JSON.stringify({ 60 | success: false, 61 | error: "content-type is not json or x-www-form-urlencoded", 62 | } as APIErrorResponse), 63 | }); 64 | } 65 | 66 | // Check the GitHub event type. 67 | const ghEvent = headers.get("x-github-event"); 68 | 69 | // Decode event body in the case the event is submitted as form-urlencoded 70 | event = parseRequestBody(event); 71 | 72 | const url = new URL(`${apilandURL}/temp_gh/${moduleName}`); 73 | if (event.queryStringParameters) { 74 | for (const [key, val] of Object.entries(event.queryStringParameters)) { 75 | url.searchParams.set(key, val ?? ""); 76 | } 77 | } 78 | 79 | // this is temporary until apiland subsumes the functionality of registry2 80 | const res = await fetch(url, { 81 | method: "POST", 82 | body: event.body, 83 | headers: { 84 | "x-github-event": ghEvent ?? "", 85 | "authorization": `bearer ${apilandAuthToken}`, 86 | }, 87 | }); 88 | 89 | return respondJSON({ 90 | statusCode: res.status, 91 | body: await res.text(), 92 | }); 93 | } 94 | 95 | // From https://api.github.com/meta 96 | const GITHUB_HOOKS_CIDRS = [ 97 | "192.30.252.0/22", 98 | "185.199.108.0/22", 99 | "140.82.112.0/20", 100 | "143.55.64.0/20", 101 | ]; 102 | 103 | export function isGitHubHooksIP(ip: string): boolean { 104 | return isIp4InCidrs(ip, GITHUB_HOOKS_CIDRS); 105 | } 106 | -------------------------------------------------------------------------------- /badwords-example.txt: -------------------------------------------------------------------------------- 1 | pineapple 2 | frisbee 3 | danny_devito -------------------------------------------------------------------------------- /deps.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | export { expandGlob, walk } from "https://deno.land/std@0.149.0/fs/mod.ts"; 4 | export { parse as jsoncParse } from "https://deno.land/std@0.214.0/jsonc/mod.ts"; 5 | export { join } from "https://deno.land/std@0.149.0/path/mod.ts"; 6 | export type { 7 | APIGatewayProxyEventV2, 8 | APIGatewayProxyResultV2, 9 | APIGatewayProxyStructuredResultV2, 10 | Context, 11 | ScheduledEvent, 12 | SQSEvent, 13 | } from "https://deno.land/x/lambda@1.24.0/types.d.ts"; 14 | export { S3Bucket } from "https://deno.land/x/s3@0.5.0/mod.ts"; 15 | export { SQSQueue } from "https://deno.land/x/sqs@0.3.7/mod.ts"; 16 | export { SSM } from "https://deno.land/x/ssm@0.1.4/mod.ts"; 17 | export { lookup } from "https://deno.land/x/media_types@v2.13.0/mod.ts"; 18 | export { pooledMap } from "https://deno.land/std@0.149.0/async/pool.ts"; 19 | export { readAll } from "https://deno.land/std@0.149.0/streams/conversion.ts"; 20 | export { 21 | Datastore, 22 | datastoreValueToValue, 23 | entityToObject, 24 | objectGetKey, 25 | objectSetKey, 26 | objectToEntity, 27 | } from "https://deno.land/x/google_datastore@0.2.1/mod.ts"; 28 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | s3: 5 | image: minio/minio 6 | network_mode: "host" 7 | environment: 8 | MINIO_ACCESS_KEY: AKIAIOSFODNN7EXAMPLE 9 | MINIO_SECRET_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 10 | command: server /data 11 | healthcheck: 12 | test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 13 | interval: 30s 14 | timeout: 20s 15 | retries: 3 16 | logging: 17 | driver: none 18 | sqs: 19 | image: softwaremill/elasticmq 20 | network_mode: "host" 21 | environment: 22 | AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE 23 | AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 24 | logging: 25 | driver: none 26 | localstack: 27 | image: localstack/localstack:0.11.5 28 | network_mode: "host" 29 | environment: 30 | - "SERVICES=ssm" 31 | logging: 32 | driver: none 33 | -------------------------------------------------------------------------------- /indexes/atlas_search_index_mapping.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "dynamic": false, 4 | "fields": { 5 | "_id": { 6 | "analyzer": "lucene.simple", 7 | "type": "string" 8 | }, 9 | "description": { 10 | "analyzer": "lucene.standard", 11 | "type": "string" 12 | } 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /indexes/builds_by_name_and_version.json: -------------------------------------------------------------------------------- 1 | { 2 | "options.moduleName": 1, 3 | "options.version": 1 4 | } 5 | -------------------------------------------------------------------------------- /indexes/modules_by_is_unlisted_and_star_count.json: -------------------------------------------------------------------------------- 1 | { 2 | "is_unlisted": 1, 3 | "star_count": 1 4 | } 5 | -------------------------------------------------------------------------------- /indexes/modules_by_owner_and_repo.json: -------------------------------------------------------------------------------- 1 | { 2 | "owner": 1, 3 | "repo": 1 4 | } 5 | -------------------------------------------------------------------------------- /run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Setting up environment variables..." 4 | export AWS_REGION=us-east-1 5 | export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE 6 | export AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 7 | # required when using the aws cli v2 to disable the sticky pager 8 | # see https://docs.aws.amazon.com/cli/latest/userguide/cliv2-migration.html#cliv2-migration-output-pager 9 | export AWS_PAGER="" 10 | export BUILD_QUEUE=http://localhost:9324/000000000000/builds 11 | export STORAGE_BUCKET=deno-registry2 12 | export MODERATION_BUCKET=deno-registry2-moderation 13 | export S3_ENDPOINT_URL=http://localhost:9000 14 | export SSM_ENDPOINT_URL=http://localhost:4583 15 | export APILAND_URL=http://localhost:8787/webhook/publish 16 | export APILAND_AUTH_TOKEN=123456789 17 | # required because some tests use dates written in UTC in assertions 18 | export TZ='UTC' 19 | 20 | # Set up S3 21 | echo "Setting up S3 buckets..." 22 | aws --endpoint-url=http://localhost:9000 s3 rm --recursive s3://deno-registry2 || true 23 | aws --endpoint-url=http://localhost:9000 s3 rm --recursive s3://deno-registry2-moderation || true 24 | aws --endpoint-url=http://localhost:9000 s3 rb s3://deno-registry2 || true 25 | aws --endpoint-url=http://localhost:9000 s3 rb s3://deno-registry2-moderation || true 26 | aws --endpoint-url=http://localhost:9000 s3 mb s3://deno-registry2 27 | aws --endpoint-url=http://localhost:9000 s3 mb s3://deno-registry2-moderation 28 | aws --endpoint-url=http://localhost:9000 s3api put-bucket-policy --bucket deno-registry2 --policy '{ "Version":"2012-10-17", "Statement":[ { "Sid":"PublicRead", "Effect":"Allow", "Principal": "*", "Action":["s3:GetObject","s3:GetObjectVersion"], "Resource":["arn:aws:s3:::deno-registry2/*"] } ] }' 29 | aws --endpoint-url=http://localhost:9000 s3 cp badwords-example.txt s3://deno-registry2-moderation/badwords.txt 30 | 31 | # Set up SQS 32 | echo "Setting up SQS queue..." 33 | aws --endpoint-url=http://localhost:9324 sqs delete-queue --queue-url http://localhost:9324/000000000000/builds --region us-east-1|| true 34 | aws --endpoint-url=http://localhost:9324 sqs create-queue --queue-name builds --region us-east-1 35 | 36 | echo "Running tests..." 37 | deno test --unstable -A $DENO_ARGS 38 | -------------------------------------------------------------------------------- /terraform/alerts.tf: -------------------------------------------------------------------------------- 1 | resource "aws_sns_topic" "alarm" { 2 | name = "${local.prefix}-alarms-${local.short_uuid}" 3 | tags = local.tags 4 | } 5 | 6 | resource "aws_cloudwatch_metric_alarm" "publish_lambda_errors" { 7 | for_each = { 8 | publish = aws_lambda_function.async_publish.function_name, 9 | webhook = aws_lambda_function.webhook_github.function_name, 10 | } 11 | 12 | alarm_name = "${local.prefix}-lambda-errors-alarm-${each.key}-${local.short_uuid}" 13 | alarm_description = "Lambda function failed more than 2 times in the last 30 minutes." 14 | comparison_operator = "GreaterThanThreshold" 15 | evaluation_periods = 1 16 | period = 1800 17 | datapoints_to_alarm = 1 18 | statistic = "Sum" 19 | metric_name = "Errors" 20 | namespace = "AWS/Lambda" 21 | dimensions = { 22 | "FunctionName" = each.value 23 | } 24 | threshold = 2 25 | treat_missing_data = "missing" 26 | alarm_actions = [aws_sns_topic.alarm.arn] 27 | tags = local.tags 28 | } 29 | 30 | resource "aws_cloudwatch_metric_alarm" "stuck_builds" { 31 | alarm_name = "${local.prefix}-build-queue-old-messages-${local.short_uuid}" 32 | comparison_operator = "GreaterThanThreshold" 33 | evaluation_periods = 1 34 | period = 300 35 | datapoints_to_alarm = 1 36 | statistic = "Maximum" 37 | metric_name = "ApproximateAgeOfOldestMessage" 38 | namespace = "AWS/SQS" 39 | dimensions = { 40 | "QueueName" = aws_sqs_queue.build_queue.name 41 | } 42 | threshold = 900 // 15 minutes 43 | treat_missing_data = "missing" 44 | alarm_actions = [aws_sns_topic.alarm.arn] 45 | tags = local.tags 46 | } 47 | 48 | resource "aws_cloudwatch_metric_alarm" "dlq_messages" { 49 | alarm_name = "${local.prefix}-total-build-dlq-${local.short_uuid}" 50 | comparison_operator = "GreaterThanThreshold" 51 | evaluation_periods = 1 52 | period = 300 53 | datapoints_to_alarm = 1 54 | statistic = "Average" 55 | metric_name = "ApproximateNumberOfMessagesVisible" 56 | namespace = "AWS/SQS" 57 | dimensions = { 58 | "QueueName" = aws_sqs_queue.build_dlq.name 59 | } 60 | threshold = 25 61 | treat_missing_data = "missing" 62 | alarm_actions = [aws_sns_topic.alarm.arn] 63 | tags = local.tags 64 | } 65 | 66 | resource "aws_sns_topic_subscription" "email-bert" { 67 | endpoint = "bert@deno.land" 68 | protocol = "email" 69 | topic_arn = aws_sns_topic.alarm.arn 70 | } 71 | 72 | resource "aws_sns_topic_subscription" "email-luca" { 73 | endpoint = "lucacasonato@yahoo.com" 74 | protocol = "email" 75 | topic_arn = aws_sns_topic.alarm.arn 76 | } 77 | 78 | resource "aws_sns_topic_subscription" "email-ryan" { 79 | endpoint = "ry@tinyclouds.org" 80 | protocol = "email" 81 | topic_arn = aws_sns_topic.alarm.arn 82 | } 83 | 84 | resource "aws_sns_topic_subscription" "sms-luca" { 85 | endpoint = "+31615219593" 86 | protocol = "sms" 87 | topic_arn = aws_sns_topic.alarm.arn 88 | } 89 | -------------------------------------------------------------------------------- /terraform/api.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | api_name = "${local.prefix}_api_${local.short_uuid}" 3 | api_domain_name = "${local.domain_prefix}api.${trimsuffix(data.google_dns_managed_zone.dotland_dns_zone.dns_name, ".")}" 4 | } 5 | 6 | resource "aws_acm_certificate" "api_certificate" { 7 | domain_name = local.api_domain_name 8 | subject_alternative_names = [local.api_domain_name] 9 | validation_method = "DNS" 10 | 11 | options { 12 | certificate_transparency_logging_preference = "ENABLED" 13 | } 14 | 15 | lifecycle { 16 | create_before_destroy = true 17 | } 18 | } 19 | 20 | resource "aws_acm_certificate_validation" "api_certificate_validation" { 21 | certificate_arn = aws_acm_certificate.api_certificate.arn 22 | validation_record_fqdns = [for record in google_dns_record_set.api_domain_validation_record : record.name] 23 | } 24 | 25 | resource "google_dns_record_set" "api_cname_record" { 26 | project = data.google_dns_managed_zone.dotland_dns_zone.project 27 | managed_zone = data.google_dns_managed_zone.dotland_dns_zone.name 28 | name = "${aws_apigatewayv2_domain_name.deno_api_domain.domain_name}." 29 | rrdatas = ["${aws_apigatewayv2_domain_name.deno_api_domain.domain_name_configuration[0].target_domain_name}."] 30 | type = "CNAME" 31 | ttl = 3600 32 | } 33 | 34 | resource "google_dns_record_set" "api_domain_validation_record" { 35 | for_each = { 36 | for dv in aws_acm_certificate.api_certificate.domain_validation_options : dv.domain_name => dv 37 | } 38 | 39 | project = data.google_dns_managed_zone.dotland_dns_zone.project 40 | managed_zone = data.google_dns_managed_zone.dotland_dns_zone.name 41 | name = each.value.resource_record_name 42 | rrdatas = [each.value.resource_record_value] 43 | type = each.value.resource_record_type 44 | ttl = 3600 45 | } 46 | 47 | resource "aws_apigatewayv2_api" "deno_api" { 48 | name = local.api_name 49 | protocol_type = "HTTP" 50 | 51 | cors_configuration { 52 | allow_origins = ["*"] 53 | } 54 | } 55 | 56 | resource "aws_apigatewayv2_domain_name" "deno_api_domain" { 57 | domain_name = aws_acm_certificate.api_certificate.domain_name 58 | domain_name_configuration { 59 | certificate_arn = aws_acm_certificate_validation.api_certificate_validation.certificate_arn 60 | endpoint_type = "REGIONAL" 61 | security_policy = "TLS_1_2" 62 | } 63 | } 64 | 65 | resource "aws_apigatewayv2_api_mapping" "deno_api_mapping" { 66 | api_id = aws_apigatewayv2_api.deno_api.id 67 | domain_name = aws_apigatewayv2_domain_name.deno_api_domain.id 68 | stage = aws_apigatewayv2_stage.deno_api_default_stage.id 69 | } 70 | 71 | resource "aws_apigatewayv2_stage" "deno_api_default_stage" { 72 | api_id = aws_apigatewayv2_api.deno_api.id 73 | name = "$default" 74 | auto_deploy = true 75 | 76 | access_log_settings { 77 | destination_arn = aws_cloudwatch_log_group.api_gateway_log_group.arn 78 | format = "[$context.requestTime] $context.httpMethod $context.path $context.protocol $context.status $context.responseLength $context.requestId" 79 | } 80 | } 81 | 82 | # Region-wide API Gateway config 83 | resource "aws_api_gateway_account" "denoland" { 84 | cloudwatch_role_arn = aws_iam_role.api_gateway_cloudwatch.arn 85 | } 86 | 87 | data "aws_iam_policy_document" "api_gateway_cloudwatch_assume_policy" { 88 | statement { 89 | actions = ["sts:AssumeRole"] 90 | principals { 91 | type = "Service" 92 | identifiers = ["apigateway.amazonaws.com"] 93 | } 94 | } 95 | } 96 | 97 | resource "aws_iam_role" "api_gateway_cloudwatch" { 98 | name = "${local.prefix}_api_gateway_cloudwatch_${local.short_uuid}" 99 | assume_role_policy = data.aws_iam_policy_document.api_gateway_cloudwatch_assume_policy.json 100 | } 101 | 102 | data "aws_iam_policy_document" "api_gateway_cloudwatch_access_policy" { 103 | statement { 104 | actions = [ 105 | "logs:CreateLogGroup", 106 | "logs:CreateLogStream", 107 | "logs:DescribeLogGroups", 108 | "logs:DescribeLogStreams", 109 | "logs:PutLogEvents", 110 | "logs:GetLogEvents", 111 | "logs:FilterLogEvents", 112 | ] 113 | resources = ["*"] 114 | } 115 | } 116 | 117 | resource "aws_iam_role_policy" "cloudwatch" { 118 | name = "default" 119 | role = aws_iam_role.api_gateway_cloudwatch.id 120 | policy = data.aws_iam_policy_document.api_gateway_cloudwatch_access_policy.json 121 | } 122 | 123 | resource "aws_cloudwatch_log_group" "api_gateway_log_group" { 124 | name = "/aws/apigateway/${local.api_name}" 125 | retention_in_days = 14 126 | } 127 | -------------------------------------------------------------------------------- /terraform/cdn.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | cdn_domain_name = "${local.domain_prefix}cdn.${trimsuffix(data.google_dns_managed_zone.dotland_dns_zone.dns_name, ".")}" 3 | cdn_origin_id = "storage_bucket" 4 | } 5 | 6 | resource "aws_acm_certificate" "cdn_certificate" { 7 | domain_name = local.cdn_domain_name 8 | subject_alternative_names = [local.cdn_domain_name] 9 | validation_method = "DNS" 10 | 11 | options { 12 | certificate_transparency_logging_preference = "ENABLED" 13 | } 14 | 15 | lifecycle { 16 | create_before_destroy = true 17 | } 18 | } 19 | 20 | resource "aws_acm_certificate_validation" "cdn_certificate_validation" { 21 | certificate_arn = aws_acm_certificate.cdn_certificate.arn 22 | validation_record_fqdns = [for record in google_dns_record_set.cdn_domain_validation_record : record.name] 23 | } 24 | 25 | resource "google_dns_record_set" "cdn_cname_record" { 26 | project = data.google_dns_managed_zone.dotland_dns_zone.project 27 | managed_zone = data.google_dns_managed_zone.dotland_dns_zone.name 28 | name = "${local.cdn_domain_name}." 29 | rrdatas = ["${aws_cloudfront_distribution.cdn.domain_name}."] 30 | type = "CNAME" 31 | ttl = 3600 32 | } 33 | 34 | resource "google_dns_record_set" "cdn_domain_validation_record" { 35 | for_each = { 36 | for dv in aws_acm_certificate.cdn_certificate.domain_validation_options : dv.domain_name => dv 37 | } 38 | 39 | project = data.google_dns_managed_zone.dotland_dns_zone.project 40 | managed_zone = data.google_dns_managed_zone.dotland_dns_zone.name 41 | name = each.value.resource_record_name 42 | rrdatas = [each.value.resource_record_value] 43 | type = each.value.resource_record_type 44 | ttl = 3600 45 | } 46 | 47 | resource "aws_cloudfront_distribution" "cdn" { 48 | origin { 49 | domain_name = aws_s3_bucket.storage_bucket.bucket_domain_name 50 | origin_id = local.cdn_origin_id 51 | } 52 | 53 | enabled = true 54 | http_version = "http2and3" 55 | is_ipv6_enabled = true 56 | price_class = "PriceClass_All" 57 | 58 | aliases = [local.cdn_domain_name] 59 | 60 | default_cache_behavior { 61 | target_origin_id = local.cdn_origin_id 62 | allowed_methods = ["GET", "HEAD"] 63 | cached_methods = ["GET", "HEAD"] 64 | compress = true 65 | cache_policy_id = data.aws_cloudfront_cache_policy.cdn_cache_policy.id 66 | response_headers_policy_id = aws_cloudfront_response_headers_policy.cdn_response_headers_policy.id 67 | viewer_protocol_policy = "redirect-to-https" 68 | } 69 | 70 | restrictions { 71 | geo_restriction { 72 | restriction_type = "none" 73 | } 74 | } 75 | 76 | viewer_certificate { 77 | acm_certificate_arn = aws_acm_certificate_validation.cdn_certificate_validation.certificate_arn 78 | ssl_support_method = "sni-only" 79 | minimum_protocol_version = "TLSv1.2_2021" 80 | } 81 | } 82 | 83 | data "aws_cloudfront_cache_policy" "cdn_cache_policy" { 84 | name = "Managed-CachingOptimized" 85 | } 86 | 87 | resource "aws_cloudfront_response_headers_policy" "cdn_response_headers_policy" { 88 | name = "${local.prefix}-CORS-all-origins-CSP-strict-${local.short_uuid}" 89 | 90 | cors_config { 91 | access_control_allow_credentials = false 92 | access_control_allow_headers { items = ["*"] } 93 | access_control_allow_methods { items = ["GET", "HEAD"] } 94 | access_control_allow_origins { items = ["*"] } 95 | origin_override = false 96 | } 97 | 98 | security_headers_config { 99 | content_security_policy { 100 | content_security_policy = "default-src 'none'; style-src 'unsafe-inline'; sandbox" 101 | override = false 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /terraform/datastore.tf: -------------------------------------------------------------------------------- 1 | resource "aws_ssm_parameter" "google_private_key" { 2 | name = "${local.prefix}-google-private-key-${local.short_uuid}" 3 | description = "GCP private key" 4 | type = "SecureString" 5 | value = var.google_private_key 6 | tags = local.tags 7 | } 8 | 9 | resource "aws_ssm_parameter" "google_private_key_id" { 10 | name = "${local.prefix}-google-private-key-id-${local.short_uuid}" 11 | description = "GCP private key ID" 12 | type = "SecureString" 13 | value = var.google_private_key_id 14 | tags = local.tags 15 | } 16 | 17 | resource "aws_ssm_parameter" "google_client_email" { 18 | name = "${local.prefix}-google-client-email-${local.short_uuid}" 19 | description = "GCP client email" 20 | type = "SecureString" 21 | value = var.google_client_email 22 | tags = local.tags 23 | } 24 | 25 | resource "aws_ssm_parameter" "google_project_id" { 26 | name = "${local.prefix}-google-project-id-${local.short_uuid}" 27 | description = "GCP project ID" 28 | type = "SecureString" 29 | value = var.google_project_id 30 | tags = local.tags 31 | } 32 | 33 | -------------------------------------------------------------------------------- /terraform/iam.tf: -------------------------------------------------------------------------------- 1 | # Lambda execution role & policy 2 | data "aws_iam_policy_document" "assume_policy" { 3 | statement { 4 | actions = ["sts:AssumeRole"] 5 | principals { 6 | type = "Service" 7 | identifiers = ["lambda.amazonaws.com"] 8 | } 9 | } 10 | } 11 | 12 | resource "aws_iam_role" "lambda_exec_role" { 13 | name = "${local.prefix}_execution_role_${local.short_uuid}" 14 | assume_role_policy = data.aws_iam_policy_document.assume_policy.json 15 | tags = local.tags 16 | } 17 | 18 | # AWS managed policy for write access to X-Ray 19 | data "aws_iam_policy" "xray_write" { 20 | arn = "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess" 21 | } 22 | 23 | data "aws_iam_policy_document" "lambda_permissions" { 24 | statement { 25 | actions = [ 26 | "s3:GetObject", 27 | "s3:PutObject", 28 | "s3:PutObjectAcl", 29 | "s3:ListBucket", 30 | ] 31 | resources = [ 32 | aws_s3_bucket.storage_bucket.arn, 33 | "${aws_s3_bucket.storage_bucket.arn}/*", 34 | aws_s3_bucket.moderation_bucket.arn, 35 | "${aws_s3_bucket.moderation_bucket.arn}/*", 36 | ] 37 | } 38 | 39 | statement { 40 | actions = [ 41 | "sqs:SendMessage", 42 | "sqs:SendMessageBatch", 43 | "sqs:ReceiveMessage", 44 | "sqs:DeleteMessage", 45 | "sqs:GetQueueAttributes", 46 | "sqs:SetQueueAttributes", 47 | "sqs:GetQueueUrl", 48 | ] 49 | resources = [ 50 | aws_sqs_queue.build_queue.arn, 51 | aws_sqs_queue.build_dlq.arn, 52 | ] 53 | } 54 | 55 | statement { 56 | actions = ["ssm:GetParameter"] 57 | resources = [ 58 | aws_ssm_parameter.google_private_key.arn, 59 | aws_ssm_parameter.google_client_email.arn, 60 | aws_ssm_parameter.google_private_key_id.arn, 61 | aws_ssm_parameter.google_project_id.arn 62 | ] 63 | } 64 | 65 | statement { 66 | actions = ["ecr:*"] 67 | resources = [aws_ecr_repository.deployment_package.arn] 68 | } 69 | } 70 | 71 | data "aws_iam_policy" "basic_lambda" { 72 | arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" 73 | } 74 | 75 | resource "aws_iam_role_policy" "lambda_permissions" { 76 | role = aws_iam_role.lambda_exec_role.name 77 | policy = data.aws_iam_policy_document.lambda_permissions.json 78 | } 79 | 80 | resource "aws_iam_role_policy_attachment" "basic_lambda" { 81 | role = aws_iam_role.lambda_exec_role.name 82 | policy_arn = data.aws_iam_policy.basic_lambda.arn 83 | } 84 | 85 | resource "aws_iam_role_policy_attachment" "xray_lambda" { 86 | role = aws_iam_role.lambda_exec_role.name 87 | policy_arn = data.aws_iam_policy.xray_write.arn 88 | } 89 | 90 | # S3 replication role & policy 91 | data "aws_iam_policy_document" "replication_assume" { 92 | statement { 93 | actions = ["sts:AssumeRole"] 94 | principals { 95 | type = "Service" 96 | identifiers = ["s3.amazonaws.com"] 97 | } 98 | } 99 | } 100 | 101 | data "aws_iam_policy_document" "replication_permissions" { 102 | statement { 103 | actions = [ 104 | "s3:ListBucket", 105 | "s3:GetReplicationConfiguration", 106 | "s3:GetObjectVersionForReplication", 107 | "s3:GetObjectVersionAcl", 108 | "s3:GetObjectVersionTagging", 109 | "s3:GetObjectRetention", 110 | "s3:GetObjectLegalHold" 111 | ] 112 | resources = [ 113 | aws_s3_bucket.storage_bucket.arn, 114 | "${aws_s3_bucket.storage_bucket.arn}/*", 115 | aws_s3_bucket.storage_bucket_replication.arn, 116 | "${aws_s3_bucket.storage_bucket_replication.arn}/*" 117 | ] 118 | } 119 | 120 | statement { 121 | actions = [ 122 | "s3:ReplicateObject", 123 | "s3:ReplicateDelete", 124 | "s3:ReplicateTags", 125 | "s3:ObjectOwnerOverrideToBucketOwner" 126 | ] 127 | resources = [ 128 | aws_s3_bucket.storage_bucket_replication.arn, 129 | "${aws_s3_bucket.storage_bucket_replication.arn}/*" 130 | ] 131 | } 132 | } 133 | 134 | resource "aws_iam_role" "replication" { 135 | name = "${local.prefix}-replication-role-${local.short_uuid}" 136 | assume_role_policy = data.aws_iam_policy_document.replication_assume.json 137 | } 138 | 139 | resource "aws_iam_role_policy" "replication" { 140 | role = aws_iam_role.replication.name 141 | policy = data.aws_iam_policy_document.replication_permissions.json 142 | } 143 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_uuid" "this" {} 2 | 3 | data "google_dns_managed_zone" "dotland_dns_zone" { 4 | provider = google.dns 5 | name = "deno-land" 6 | } 7 | 8 | data "aws_caller_identity" "this" {} 9 | 10 | locals { 11 | short_uuid = substr(random_uuid.this.result, 0, 8) 12 | prefix = "deno-registry2-${var.env}" 13 | domain_prefix = var.env == "prod" ? "" : "${var.env}." 14 | lambda_default_timeout = 10 15 | ecr_image_url = "${aws_ecr_repository.deployment_package.repository_url}:${var.docker_tag}" 16 | tags = { 17 | "deno.land/x:environment" = var.env 18 | "deno.land/x:instance" = local.short_uuid 19 | "deno.land/x:provisioned-by" = reverse(split(":", data.aws_caller_identity.this.arn))[0] 20 | } 21 | } 22 | 23 | resource "aws_ecr_repository" "deployment_package" { 24 | name = "deno_registry2" 25 | image_tag_mutability = "IMMUTABLE" 26 | tags = local.tags 27 | 28 | image_scanning_configuration { 29 | scan_on_push = true 30 | } 31 | } 32 | 33 | resource "aws_ecr_repository_policy" "deployment_package_policy" { 34 | repository = aws_ecr_repository.deployment_package.name 35 | policy = data.aws_iam_policy_document.lambda_ecr_image_retrieval.json 36 | } 37 | 38 | data "aws_iam_policy_document" "lambda_ecr_image_retrieval" { 39 | statement { 40 | sid = "LambdaECRImageRetrievalPolicy" 41 | actions = [ 42 | "ecr:BatchGetImage", 43 | "ecr:DeleteRepositoryPolicy", 44 | "ecr:GetDownloadUrlForLayer", 45 | "ecr:GetRepositoryPolicy", 46 | "ecr:SetRepositoryPolicy" 47 | ] 48 | principals { 49 | type = "Service" 50 | identifiers = ["lambda.amazonaws.com"] 51 | } 52 | } 53 | } 54 | 55 | resource "aws_s3_bucket" "storage_bucket" { 56 | bucket = "${local.prefix}-storagebucket-${local.short_uuid}" 57 | tags = local.tags 58 | } 59 | 60 | resource "aws_s3_bucket_ownership_controls" "storage_bucket_ownership_controls" { 61 | bucket = aws_s3_bucket.storage_bucket.id 62 | 63 | rule { 64 | object_ownership = "BucketOwnerEnforced" 65 | } 66 | } 67 | 68 | resource "aws_s3_bucket_public_access_block" "storage_bucket_public_access" { 69 | bucket = aws_s3_bucket.storage_bucket.id 70 | block_public_acls = true 71 | block_public_policy = false 72 | ignore_public_acls = true 73 | restrict_public_buckets = false 74 | } 75 | 76 | resource "aws_s3_bucket_policy" "storage_bucket_policy" { 77 | bucket = aws_s3_bucket.storage_bucket.id 78 | policy = data.aws_iam_policy_document.allow_public_read.json 79 | } 80 | 81 | data "aws_iam_policy_document" "allow_public_read" { 82 | statement { 83 | actions = [ 84 | "s3:GetObject" 85 | ] 86 | principals { 87 | type = "*" 88 | identifiers = ["*"] 89 | } 90 | resources = [ 91 | "${aws_s3_bucket.storage_bucket.arn}/*" 92 | ] 93 | } 94 | } 95 | 96 | resource "aws_s3_bucket_cors_configuration" "storage_bucket_cors_configuration" { 97 | bucket = aws_s3_bucket.storage_bucket.id 98 | 99 | cors_rule { 100 | allowed_headers = ["*"] 101 | allowed_methods = ["GET", "HEAD"] 102 | allowed_origins = ["*"] 103 | expose_headers = [] 104 | } 105 | } 106 | 107 | resource "aws_s3_bucket_versioning" "storage_bucket_versioning" { 108 | bucket = aws_s3_bucket.storage_bucket.id 109 | 110 | versioning_configuration { 111 | status = "Enabled" 112 | } 113 | } 114 | 115 | resource "aws_s3_bucket_replication_configuration" "storage_bucket_replication_configuration" { 116 | bucket = aws_s3_bucket.storage_bucket.id 117 | role = aws_iam_role.replication.arn 118 | 119 | rule { 120 | id = "replication-rule" 121 | status = "Enabled" 122 | 123 | delete_marker_replication { 124 | status = "Enabled" 125 | } 126 | 127 | destination { 128 | bucket = aws_s3_bucket.storage_bucket_replication.arn 129 | storage_class = "STANDARD" 130 | 131 | metrics { 132 | status = "Enabled" 133 | } 134 | } 135 | 136 | filter {} 137 | } 138 | 139 | # Bucket versioning must be enabled first. 140 | depends_on = [aws_s3_bucket_versioning.storage_bucket_versioning] 141 | } 142 | 143 | resource "aws_s3_bucket" "storage_bucket_replication" { 144 | provider = aws.backup 145 | bucket = "${local.prefix}-storagebucket-replication-${local.short_uuid}" 146 | tags = local.tags 147 | } 148 | 149 | resource "aws_s3_bucket_ownership_controls" "storage_bucket_replication_ownership_controls" { 150 | provider = aws.backup 151 | bucket = aws_s3_bucket.storage_bucket_replication.id 152 | 153 | rule { 154 | object_ownership = "BucketOwnerEnforced" 155 | } 156 | } 157 | 158 | resource "aws_s3_bucket_public_access_block" "storage_replication_bucket_public_access" { 159 | provider = aws.backup 160 | bucket = aws_s3_bucket.storage_bucket_replication.id 161 | block_public_acls = true 162 | block_public_policy = true 163 | ignore_public_acls = true 164 | restrict_public_buckets = true 165 | } 166 | 167 | resource "aws_s3_bucket_lifecycle_configuration" "storage_replication_bucket_lifecycle_configuration" { 168 | provider = aws.backup 169 | bucket = aws_s3_bucket.storage_bucket_replication.id 170 | 171 | rule { 172 | id = "transition-to-standard-ia" 173 | status = "Enabled" 174 | 175 | transition { 176 | days = 30 177 | storage_class = "STANDARD_IA" 178 | } 179 | 180 | } 181 | } 182 | 183 | resource "aws_s3_bucket_versioning" "storage_replication_bucket_versioning" { 184 | provider = aws.backup 185 | bucket = aws_s3_bucket.storage_bucket_replication.id 186 | 187 | versioning_configuration { 188 | status = "Enabled" 189 | } 190 | } 191 | 192 | resource "aws_s3_bucket" "moderation_bucket" { 193 | bucket = "${local.prefix}-moderationbucket-${local.short_uuid}" 194 | tags = local.tags 195 | } 196 | 197 | resource "aws_s3_bucket_public_access_block" "moderation_bucket_public_access" { 198 | bucket = aws_s3_bucket.moderation_bucket.id 199 | block_public_acls = true 200 | block_public_policy = true 201 | ignore_public_acls = true 202 | restrict_public_buckets = true 203 | } 204 | 205 | resource "aws_s3_bucket_ownership_controls" "moderation_bucket_ownership_controls" { 206 | bucket = aws_s3_bucket.moderation_bucket.id 207 | 208 | rule { 209 | object_ownership = "BucketOwnerEnforced" 210 | } 211 | } 212 | 213 | resource "aws_s3_bucket_versioning" "moderation_bucket_versioning" { 214 | bucket = aws_s3_bucket.moderation_bucket.id 215 | 216 | versioning_configuration { 217 | status = "Enabled" 218 | } 219 | } 220 | 221 | 222 | resource "aws_sqs_queue" "build_queue" { 223 | name = "${local.prefix}-build-queue-${local.short_uuid}" 224 | max_message_size = 2048 225 | message_retention_seconds = 86400 226 | tags = local.tags 227 | visibility_timeout_seconds = var.sqs_visibility_delay 228 | 229 | redrive_policy = jsonencode({ 230 | deadLetterTargetArn = aws_sqs_queue.build_dlq.arn 231 | maxReceiveCount = 5 232 | }) 233 | } 234 | 235 | resource "aws_sqs_queue" "build_dlq" { 236 | name = "${local.prefix}-build-dlq-${local.short_uuid}" 237 | max_message_size = 2048 238 | message_retention_seconds = 86400 239 | visibility_timeout_seconds = var.sqs_visibility_delay 240 | tags = local.tags 241 | } 242 | -------------------------------------------------------------------------------- /terraform/meta.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | } 7 | archive = { 8 | source = "hashicorp/archive" 9 | } 10 | google = { 11 | source = "hashicorp/google" 12 | version = ">= 4.0.0, < 5.0.0" 13 | } 14 | random = { 15 | source = "hashicorp/random" 16 | } 17 | } 18 | backend "s3" { 19 | key = "terraform.tfstate" 20 | } 21 | } 22 | 23 | resource "aws_s3_bucket" "terraform_state" { 24 | bucket = "${local.prefix}-terraform-state-${local.short_uuid}" 25 | tags = local.tags 26 | } 27 | 28 | resource "aws_s3_bucket_public_access_block" "terraform_state_public_access" { 29 | bucket = aws_s3_bucket.terraform_state.id 30 | block_public_acls = true 31 | block_public_policy = true 32 | ignore_public_acls = true 33 | restrict_public_buckets = true 34 | } 35 | 36 | resource "aws_s3_bucket_ownership_controls" "terraform_state_ownership_controls" { 37 | bucket = aws_s3_bucket.terraform_state.id 38 | 39 | rule { 40 | object_ownership = "BucketOwnerEnforced" 41 | } 42 | } 43 | 44 | resource "aws_s3_bucket_versioning" "terraform_state_versioning" { 45 | bucket = aws_s3_bucket.terraform_state.id 46 | 47 | versioning_configuration { 48 | status = "Enabled" 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /terraform/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/denoland/deno_registry2/272401ed856b25aadd5283cb94450888b915de44/terraform/outputs.tf -------------------------------------------------------------------------------- /terraform/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.aws_default_region 3 | } 4 | 5 | provider "aws" { 6 | alias = "backup" 7 | region = var.aws_backup_region 8 | } 9 | 10 | provider "google" { 11 | alias = "dns" 12 | project = "misc-dns" 13 | } 14 | -------------------------------------------------------------------------------- /terraform/publish.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lambda_function" "async_publish" { 2 | package_type = "Image" 3 | image_uri = local.ecr_image_url 4 | function_name = "${local.prefix}_async_publish_${local.short_uuid}" 5 | role = aws_iam_role.lambda_exec_role.arn 6 | publish = false 7 | timeout = 300 8 | memory_size = 1024 9 | 10 | image_config { 11 | command = ["api/async/publish.handler"] 12 | } 13 | 14 | environment { 15 | variables = { 16 | "DENO_UNSTABLE" = "1" 17 | "STORAGE_BUCKET" = aws_s3_bucket.storage_bucket.id 18 | "APILAND_URL" = "https://apiland.deno.dev/webhook" 19 | "APILAND_AUTH_TOKEN" = var.apiland_auth_token 20 | "GOOGLE_PRIVATE_KEY_SSM" = aws_ssm_parameter.google_private_key.name 21 | "GOOGLE_CLIENT_EMAIL_SSM" = aws_ssm_parameter.google_client_email.name 22 | "GOOGLE_PRIVATE_KEY_ID_SSM" = aws_ssm_parameter.google_private_key_id.name 23 | "GOOGLE_PROJECT_ID_SSM" = aws_ssm_parameter.google_project_id.name 24 | 25 | } 26 | } 27 | 28 | tags = local.tags 29 | } 30 | 31 | resource "aws_lambda_event_source_mapping" "async_publish" { 32 | batch_size = 1 33 | event_source_arn = aws_sqs_queue.build_queue.arn 34 | enabled = true 35 | function_name = aws_lambda_function.async_publish.arn 36 | } 37 | 38 | resource "aws_lambda_permission" "async_publish" { 39 | action = "lambda:InvokeFunction" 40 | function_name = aws_lambda_function.async_publish.function_name 41 | principal = "sqs.amazonaws.com" 42 | source_arn = aws_sqs_queue.build_queue.arn 43 | } 44 | -------------------------------------------------------------------------------- /terraform/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | env = "staging" 2 | apiland_auth_token = "123456789" 3 | aws_backup_region = "eu-central-1" 4 | aws_default_region = "us-east-1" 5 | docker_tag = "latest" 6 | github_token = "xxxxxxxxxxxxxxxxxxxxxxxxxx" 7 | google_private_key = "xxxxxxxxxxxxxxxxxxxxxxxxxx" 8 | google_private_key_id = "eec20ed25e08ac2ff98125cc7bfefa101be1158d" 9 | google_client_email = "foo@example.com" 10 | google_project_id = "deno_registry2" 11 | -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | description = "The deployment environment (prod, staging)" 3 | type = string 4 | } 5 | 6 | variable "apiland_auth_token" { 7 | description = "Authorization token for using apiland webhook" 8 | type = string 9 | } 10 | 11 | variable "aws_backup_region" { 12 | description = "The AWS region used for backups" 13 | type = string 14 | } 15 | 16 | variable "aws_default_region" { 17 | description = "The AWS regio used for most of the infrastructure" 18 | type = string 19 | } 20 | 21 | variable "docker_tag" { 22 | description = "ECR image tag" 23 | type = string 24 | } 25 | 26 | variable "github_token" { 27 | description = "GitHub personal access token" 28 | type = string 29 | } 30 | 31 | variable "google_private_key" { 32 | description = "GCP private key" 33 | type = string 34 | } 35 | 36 | variable "google_private_key_id" { 37 | description = "GCP private key ID" 38 | type = string 39 | } 40 | 41 | variable "google_client_email" { 42 | description = "GCP client email" 43 | type = string 44 | } 45 | 46 | variable "google_project_id" { 47 | description = "GCP project ID" 48 | type = string 49 | } 50 | 51 | variable "sqs_visibility_delay" { 52 | description = "SQS delay before messages become visible again" 53 | type = number 54 | default = 301 55 | } 56 | -------------------------------------------------------------------------------- /terraform/webhook.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lambda_function" "webhook_github" { 2 | package_type = "Image" 3 | image_uri = local.ecr_image_url 4 | function_name = "${local.prefix}_webhook_github_${local.short_uuid}" 5 | role = aws_iam_role.lambda_exec_role.arn 6 | publish = false 7 | timeout = local.lambda_default_timeout 8 | memory_size = 128 9 | 10 | image_config { 11 | command = ["api/webhook/github.handler"] 12 | } 13 | 14 | environment { 15 | variables = { 16 | "DENO_UNSTABLE" = "1" 17 | "STORAGE_BUCKET" = aws_s3_bucket.storage_bucket.id 18 | "MODERATION_BUCKET" = aws_s3_bucket.moderation_bucket.id 19 | "APILAND_URL" = "https://apiland.deno.dev/webhook" 20 | "APILAND_AUTH_TOKEN" = var.apiland_auth_token 21 | "BUILD_QUEUE" = aws_sqs_queue.build_queue.id 22 | "GOOGLE_PRIVATE_KEY_SSM" = aws_ssm_parameter.google_private_key.name 23 | "GOOGLE_CLIENT_EMAIL_SSM" = aws_ssm_parameter.google_client_email.name 24 | "GOOGLE_PRIVATE_KEY_ID_SSM" = aws_ssm_parameter.google_private_key_id.name 25 | "GOOGLE_PROJECT_ID_SSM" = aws_ssm_parameter.google_project_id.name 26 | 27 | } 28 | } 29 | 30 | tags = local.tags 31 | } 32 | 33 | resource "aws_lambda_permission" "webhook_github" { 34 | action = "lambda:InvokeFunction" 35 | function_name = aws_lambda_function.webhook_github.function_name 36 | principal = "apigateway.amazonaws.com" 37 | source_arn = "${aws_apigatewayv2_api.deno_api.execution_arn}/*/*" 38 | } 39 | 40 | resource "aws_apigatewayv2_integration" "webhook_github" { 41 | api_id = aws_apigatewayv2_api.deno_api.id 42 | integration_type = "AWS_PROXY" 43 | 44 | connection_type = "INTERNET" 45 | integration_uri = aws_lambda_function.webhook_github.invoke_arn 46 | payload_format_version = "2.0" 47 | timeout_milliseconds = local.lambda_default_timeout * 1000 48 | } 49 | 50 | resource "aws_apigatewayv2_route" "webhook_github" { 51 | api_id = aws_apigatewayv2_api.deno_api.id 52 | route_key = "POST /webhook/gh/{name}" 53 | target = "integrations/${aws_apigatewayv2_integration.webhook_github.id}" 54 | } 55 | -------------------------------------------------------------------------------- /test_deps.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | export { 4 | assert, 5 | assertEquals, 6 | assertThrows, 7 | } from "https://deno.land/std@0.149.0/testing/asserts.ts"; 8 | -------------------------------------------------------------------------------- /utils/database_test.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | import { assert, assertEquals } from "../test_deps.ts"; 4 | import { 5 | Database as Datastore, 6 | kinds, 7 | NewBuild, 8 | OwnerQuota, 9 | } from "./datastore_database.ts"; 10 | import { cleanupDatabase } from "./test_utils.ts"; 11 | 12 | const datastore = new Datastore(); 13 | 14 | const build1: Omit = { 15 | module: "ltest", 16 | version: "0.4.0", 17 | upload_options: { 18 | type: "github", 19 | repository: "luca-rand/testing", 20 | ref: "v0.4.0", 21 | subdir: "subdir1/", 22 | }, 23 | status: "success", 24 | message: "bla bla bla", 25 | created_at: new Date(), 26 | }; 27 | 28 | const build2: Omit = { 29 | module: "wtest", 30 | version: "0.4.0", 31 | upload_options: { 32 | type: "github", 33 | repository: "wperron-rand/testing", 34 | ref: "v0.4.0", 35 | subdir: "subdir1/", 36 | }, 37 | status: "success", 38 | message: "bla bla bla", 39 | created_at: new Date(), 40 | }; 41 | 42 | Deno.test({ 43 | name: "add, update, and get builds in database", 44 | async fn() { 45 | try { 46 | const id = await datastore.createBuild(build1); 47 | const build = await datastore.getBuild(id); 48 | assert(build); 49 | assert(build.created_at); 50 | assertEquals( 51 | build, 52 | { 53 | ...build1, 54 | id, 55 | created_at: build.created_at, 56 | }, 57 | ); 58 | } finally { 59 | await cleanupDatabase(datastore); 60 | } 61 | }, 62 | }); 63 | 64 | Deno.test({ 65 | ignore: true, 66 | name: "count builds", 67 | async fn() { 68 | try { 69 | // check there are no versions in a clean database 70 | let count = await datastore.countAllBuilds(); 71 | assertEquals(count, 0); 72 | 73 | // check count after adding 1 build 74 | const id = await datastore.createBuild(build1); 75 | const build = (await datastore.getBuild(id))!; 76 | count = await datastore.countAllBuilds(); 77 | assertEquals(count, 1); 78 | 79 | // check count after adding 5 new versions 80 | for (let i = 5; i < 10; i++) { 81 | build.upload_options.ref = `v.0.${i}.0`; 82 | build.version = `0.${i}.0`; 83 | await datastore.createBuild(build); 84 | } 85 | 86 | count = await datastore.countAllBuilds(); 87 | assertEquals(count, 6); 88 | 89 | // check count after adding second module 90 | await datastore.createBuild(build2); 91 | count = await datastore.countAllBuilds(); 92 | assertEquals(count, 7); 93 | } finally { 94 | await cleanupDatabase(datastore); 95 | } 96 | }, 97 | }); 98 | 99 | const ownerQuota1: OwnerQuota = { 100 | owner: "luca-rand", 101 | type: "github", 102 | // deno-lint-ignore camelcase 103 | max_modules: 5, 104 | // deno-lint-ignore camelcase 105 | max_total_size: undefined, 106 | blocked: false, 107 | }; 108 | 109 | Deno.test({ 110 | name: "add and get owner quotas in database", 111 | async fn() { 112 | await datastore.saveOwnerQuota(ownerQuota1); 113 | const ownerQuota = await datastore.getOwnerQuota( 114 | ownerQuota1.owner, 115 | ); 116 | assertEquals( 117 | ownerQuota, 118 | { 119 | owner: "luca-rand", 120 | type: "github", 121 | max_modules: 5, 122 | blocked: false, 123 | }, 124 | ); 125 | 126 | const key = datastore.db.key([kinds.LEGACY_OWNER_QUOTAS, "luca-rand"]); 127 | 128 | for await ( 129 | const _ of datastore.db.commit([{ delete: key }], { 130 | transactional: false, 131 | }) 132 | ) { 133 | // empty 134 | } 135 | }, 136 | }); 137 | -------------------------------------------------------------------------------- /utils/datastore_database.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | import { 4 | Datastore, 5 | datastoreValueToValue, 6 | entityToObject, 7 | objectSetKey, 8 | objectToEntity, 9 | SSM, 10 | } from "../deps.ts"; 11 | 12 | export interface OwnerQuota { 13 | owner: string; 14 | type: string; 15 | // deno-lint-ignore camelcase 16 | max_modules: number; 17 | // deno-lint-ignore camelcase 18 | max_total_size?: number; 19 | blocked: boolean; 20 | note?: string; 21 | } 22 | 23 | export interface Module { 24 | name: string; 25 | type: string; 26 | // deno-lint-ignore camelcase 27 | repo_id: number; 28 | owner: string; 29 | repo: string; 30 | description: string; 31 | // deno-lint-ignore camelcase 32 | star_count: number; 33 | // deno-lint-ignore camelcase 34 | is_unlisted: boolean; 35 | // deno-lint-ignore camelcase 36 | created_at: Date; 37 | } 38 | 39 | export type BuildStatus = 40 | | "queued" 41 | | "success" 42 | | "error" 43 | | "publishing" 44 | | "analyzing_dependencies"; 45 | 46 | export interface NewBuild { 47 | id: string; 48 | module: string; 49 | version: string; 50 | status: BuildStatus; 51 | message?: string; 52 | created_at: Date; 53 | upload_options: { 54 | type: string; 55 | repository: string; 56 | ref: string; 57 | subdir?: string; 58 | }; 59 | } 60 | 61 | export const kinds = { 62 | /** An object which contains information about the usage of built-in APIs. */ 63 | LEGACY_MODULES: "legacy_modules", 64 | LEGACY_OWNER_QUOTAS: "legacy_owner_quotas", 65 | 66 | BUILD: "build", 67 | }; 68 | 69 | let ssm; 70 | try { 71 | ssm = new SSM({ 72 | region: Deno.env.get("AWS_REGION")!, 73 | accessKeyID: Deno.env.get("AWS_ACCESS_KEY_ID")!, 74 | secretKey: Deno.env.get("AWS_SECRET_ACCESS_KEY")!, 75 | sessionToken: Deno.env.get("AWS_SESSION_TOKEN")!, 76 | endpointURL: Deno.env.get("SSM_ENDPOINT_URL")!, 77 | }); 78 | } catch { 79 | // 80 | } 81 | 82 | const googlePrivateKeySecret = await ssm?.getParameter({ 83 | Name: Deno.env.get("GOOGLE_PRIVATE_KEY_SSM") ?? "", 84 | WithDecryption: true, 85 | }); 86 | const GOOGLE_PRIVATE_KEY = googlePrivateKeySecret?.Parameter?.Value; 87 | 88 | const googleClientEmailSecret = await ssm?.getParameter({ 89 | Name: Deno.env.get("GOOGLE_CLIENT_EMAIL_SSM") ?? "", 90 | WithDecryption: true, 91 | }); 92 | const GOOGLE_CLIENT_EMAIL = googleClientEmailSecret?.Parameter?.Value; 93 | 94 | const googlePrivateKeyIdSecret = await ssm?.getParameter({ 95 | Name: Deno.env.get("GOOGLE_PRIVATE_KEY_ID_SSM") ?? "", 96 | WithDecryption: true, 97 | }); 98 | const GOOGLE_PRIVATE_KEY_ID = googlePrivateKeyIdSecret?.Parameter?.Value; 99 | 100 | const googleProjectIdSecret = await ssm?.getParameter({ 101 | Name: Deno.env.get("GOOGLE_PROJECT_ID_SSM") ?? "", 102 | WithDecryption: true, 103 | }); 104 | const GOOGLE_PROJECT_ID = googleProjectIdSecret?.Parameter?.Value; 105 | 106 | export class Database { 107 | db: Datastore; 108 | 109 | constructor() { 110 | const privateKey = GOOGLE_PRIVATE_KEY ?? 111 | Deno.env.get("GOOGLE_PRIVATE_KEY") ?? ""; 112 | const keys = { 113 | client_email: GOOGLE_CLIENT_EMAIL ?? 114 | Deno.env.get("GOOGLE_CLIENT_EMAIL") ?? "", 115 | private_key: 116 | (privateKey.startsWith(`"`) 117 | ? JSON.parse(privateKey) 118 | : privateKey) as string, 119 | private_key_id: GOOGLE_PRIVATE_KEY_ID ?? 120 | Deno.env.get("GOOGLE_PRIVATE_KEY_ID") ?? "", 121 | project_id: GOOGLE_PROJECT_ID ?? Deno.env.get("GOOGLE_PROJECT_ID") ?? "", 122 | datastore_host: Deno.env.get("DATASTORE_HOST"), 123 | }; 124 | this.db = new Datastore(keys); 125 | } 126 | 127 | async getOwnerQuota( 128 | owner: string, 129 | ): Promise { 130 | const result = await this.db.lookup( 131 | this.db.key([kinds.LEGACY_OWNER_QUOTAS, owner]), 132 | ); 133 | 134 | if (result.found && result.found.length) { 135 | return entityToObject(result.found[0].entity); 136 | } else { 137 | return null; 138 | } 139 | } 140 | 141 | async saveOwnerQuota( 142 | ownerQuota: OwnerQuota, 143 | ): Promise { 144 | const key = this.db.key([kinds.LEGACY_OWNER_QUOTAS, ownerQuota.owner]); 145 | objectSetKey(ownerQuota, key); 146 | 147 | for await ( 148 | const _ of this.db.commit([{ upsert: objectToEntity(ownerQuota) }], { 149 | transactional: false, 150 | }) 151 | ) { 152 | // empty 153 | } 154 | } 155 | 156 | async getModule(name: string): Promise { 157 | const result = await this.db.lookup( 158 | this.db.key([kinds.LEGACY_MODULES, name]), 159 | ); 160 | 161 | if (result.found && result.found.length) { 162 | return entityToObject(result.found[0].entity); 163 | } else { 164 | return null; 165 | } 166 | } 167 | 168 | async saveModule(module: Module): Promise { 169 | const key = this.db.key([kinds.LEGACY_MODULES, module.name]); 170 | objectSetKey(module, key); 171 | 172 | for await ( 173 | const _ of this.db.commit([{ upsert: objectToEntity(module) }], { 174 | transactional: false, 175 | }) 176 | ) { 177 | // empty 178 | } 179 | } 180 | 181 | // tests only 182 | async countAllBuilds(): Promise { 183 | const query = await this.db.runGqlAggregationQuery({ 184 | queryString: `SELECT COUNT(*) FROM ${kinds.BUILD}`, 185 | }); 186 | return datastoreValueToValue( 187 | query.batch.aggregationResults[0].aggregateProperties.property_1, 188 | ) as number; 189 | } 190 | 191 | async getBuild(id: string): Promise { 192 | const result = await this.db.lookup( 193 | this.db.key([kinds.BUILD, id]), 194 | ); 195 | 196 | if (result.found && result.found.length) { 197 | return entityToObject(result.found[0].entity); 198 | } else { 199 | return null; 200 | } 201 | } 202 | 203 | async createBuild(build: Omit): Promise { 204 | const id = crypto.randomUUID(); 205 | // @ts-ignore temporary solution 206 | build.id = id; 207 | 208 | objectSetKey(build, this.db.key([kinds.BUILD, id])); 209 | 210 | for await ( 211 | const _ of this.db.commit([{ upsert: objectToEntity(build) }], { 212 | transactional: false, 213 | }) 214 | ) { 215 | // empty 216 | } 217 | 218 | return id; 219 | } 220 | 221 | async saveBuild(build: NewBuild) { 222 | objectSetKey(build, this.db.key([kinds.BUILD, build.id])); 223 | 224 | for await ( 225 | const _ of this.db.commit([{ upsert: objectToEntity(build) }], { 226 | transactional: false, 227 | }) 228 | ) { 229 | // empty 230 | } 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /utils/git.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | import { join } from "../deps.ts"; 3 | 4 | export async function clone( 5 | url: string, 6 | tag: string, 7 | subdir?: string, 8 | ): Promise { 9 | const tmp = await Deno.makeTempDir(); 10 | const cmd = [ 11 | "git", 12 | "clone", 13 | "--depth=1", 14 | "--filter=blob:none", 15 | "--sparse", 16 | // TODO(lucacasonato): re enable, this is is to slow for the moment 17 | // "--recursive", 18 | `--branch=${tag}`, 19 | url, 20 | tmp, 21 | ]; 22 | console.log("$", ...cmd); 23 | const clone = Deno.run({ 24 | cmd, 25 | stdout: "inherit", 26 | stderr: "inherit", 27 | }); 28 | // TODO: better error handling 29 | const cloneRes = await clone.status(); 30 | clone.close(); 31 | if (!cloneRes.success) { 32 | throw new Error(`Failed to clone git repository ${url} at tag ${tag}`); 33 | } 34 | 35 | const cmd2 = ["git", "sparse-checkout", "init", "--no-cone"]; 36 | console.log("$", ...cmd2); 37 | const sparseInit = Deno.run({ 38 | cwd: tmp, 39 | cmd: cmd2, 40 | stdout: "inherit", 41 | stderr: "inherit", 42 | }); 43 | const sparseInitRes = await sparseInit.status(); 44 | sparseInit.close(); 45 | if (!sparseInitRes.success) { 46 | throw new Error( 47 | `Failed to init sparse checkout in git repository ${url} at tag ${tag}`, 48 | ); 49 | } 50 | 51 | const dir = subdir === undefined ? "/*" : join("/", subdir); 52 | const cmd3 = ["git", "sparse-checkout", "set", dir]; 53 | console.log("$", ...cmd3); 54 | const checkout = Deno.run({ 55 | cwd: tmp, 56 | cmd: cmd3, 57 | stdout: "inherit", 58 | stderr: "inherit", 59 | }); 60 | // TODO: better error handling 61 | const checkoutRes = await checkout.status(); 62 | checkout.close(); 63 | if (!checkoutRes.success) { 64 | throw new Error( 65 | `Failed to sparse checkout ${dir} from git repository ${url} at tag ${tag}`, 66 | ); 67 | } 68 | return tmp; 69 | } 70 | -------------------------------------------------------------------------------- /utils/http.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | import type { 4 | APIGatewayProxyEventV2, 5 | APIGatewayProxyResultV2, 6 | APIGatewayProxyStructuredResultV2, 7 | } from "../deps.ts"; 8 | 9 | export function respondJSON( 10 | result: APIGatewayProxyStructuredResultV2, 11 | ): APIGatewayProxyResultV2 { 12 | return { 13 | ...result, 14 | headers: { 15 | "content-type": "application/json", 16 | ...result.headers, 17 | }, 18 | }; 19 | } 20 | 21 | export function parseRequestBody( 22 | event: APIGatewayProxyEventV2, 23 | ): APIGatewayProxyEventV2 { 24 | if (event.isBase64Encoded && event.body) { 25 | event.body = atob(event.body); 26 | event.isBase64Encoded = false; 27 | } 28 | 29 | const headers = new Headers(event.headers as Record); 30 | if ( 31 | headers.get("content-type") === "application/x-www-form-urlencoded" && 32 | event.body 33 | ) { 34 | event.body = new URLSearchParams(event.body).get("payload") ?? undefined; 35 | } 36 | return event; 37 | } 38 | -------------------------------------------------------------------------------- /utils/moderation.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | export function isForbidden( 3 | moduleName: string, 4 | badwords: Array, 5 | ): boolean { 6 | for (const word of badwords) { 7 | const e = new RegExp(`(^|_)(${word})($|_)`); 8 | if (e.test(moduleName)) return true; 9 | } 10 | return false; 11 | } 12 | -------------------------------------------------------------------------------- /utils/moderation_test.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | import { assertEquals } from "../test_deps.ts"; 3 | import { isForbidden } from "./moderation.ts"; 4 | 5 | Deno.test({ 6 | name: "test valid name", 7 | fn() { 8 | const badwords = ["foo", "bar", "baz"]; 9 | assertEquals(isForbidden("testing", badwords), false); 10 | }, 11 | }); 12 | 13 | Deno.test({ 14 | name: "test forbidden name", 15 | fn() { 16 | const badwords = ["foo", "bar", "baz"]; 17 | assertEquals(isForbidden("bar", badwords), true); 18 | }, 19 | }); 20 | 21 | Deno.test({ 22 | name: "test forbidden word combination", 23 | fn() { 24 | const badwords = ["frozen_yogurt", "bouncy_castle"]; 25 | assertEquals(isForbidden("frozen_yogurt", badwords), true); 26 | }, 27 | }); 28 | 29 | Deno.test({ 30 | name: "test forbidden name with other valid words", 31 | fn() { 32 | const badwords = ["foo", "bar", "baz", "zen", "frozen_yogurt"]; 33 | assertEquals(isForbidden("lots_of_foo", badwords), true); 34 | assertEquals(isForbidden("foo_is_great", badwords), true); 35 | assertEquals(isForbidden("the_zen_of_deno", badwords), true); 36 | assertEquals(isForbidden("love_frozen_yogurt_a_lot", badwords), true); 37 | }, 38 | }); 39 | 40 | Deno.test({ 41 | name: "test valid name containing forbidden parts", 42 | fn() { 43 | const badwords = ["foo"]; 44 | assertEquals(isForbidden("foosball", badwords), false); 45 | assertEquals(isForbidden("bigfoot", badwords), false); 46 | }, 47 | }); 48 | -------------------------------------------------------------------------------- /utils/net.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | export function ip4ToInt(ip: string) { 4 | const octs_ = ip.split("."); 5 | if (octs_.length !== 4) throw new Error(`Invalid IP address ${ip}`); 6 | const oct = octs_.map((oct_) => { 7 | const oct = parseInt(oct_, 10); 8 | if (oct > 255 || oct < 0) throw new Error(`Invalid IP address ${ip}`); 9 | return oct; 10 | }); 11 | return oct.reduce( 12 | (int, oct) => (int << 8) + oct, 13 | 0, 14 | ) >>> 0; 15 | } 16 | 17 | export function isIp4InCidr(ip: string) { 18 | return (cidr: string) => { 19 | const [range, bits = "32"] = cidr.split("/"); 20 | const mask = ~(2 ** (32 - parseInt(bits, 10)) - 1); 21 | return (ip4ToInt(ip) & mask) === (ip4ToInt(range) & mask); 22 | }; 23 | } 24 | 25 | export function isIp4InCidrs(ip: string, cidrs: string[]) { 26 | return cidrs.some(isIp4InCidr(ip)); 27 | } 28 | -------------------------------------------------------------------------------- /utils/net_test.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | import { assert, assertEquals, assertThrows } from "../test_deps.ts"; 4 | import { ip4ToInt, isIp4InCidr } from "./net.ts"; 5 | 6 | Deno.test({ 7 | name: "ipv4 parsing", 8 | fn() { 9 | assert(ip4ToInt("1.1.1.1")); 10 | assertThrows(() => ip4ToInt("1.1.1.1.1")); 11 | assertThrows(() => ip4ToInt("1.1.1.-1")); 12 | assertThrows(() => ip4ToInt("1.1.1.300")); 13 | }, 14 | }); 15 | 16 | Deno.test({ 17 | name: "ipv4 in cidr matches", 18 | fn() { 19 | assertEquals(isIp4InCidr("1.1.1.1")("0.0.0.0/0"), true); 20 | assertEquals(isIp4InCidr("1.1.1.1")("1.1.1.0/24"), true); 21 | assertEquals(isIp4InCidr("1.1.1.1")("1.1.1.0/31"), true); 22 | assertEquals(isIp4InCidr("1.1.1.1")("1.1.1.0/32"), false); 23 | assertEquals(isIp4InCidr("1.1.1.1")("1.2.1.0/31"), false); 24 | }, 25 | }); 26 | -------------------------------------------------------------------------------- /utils/storage.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | import { join, lookup, S3Bucket } from "../deps.ts"; 4 | 5 | export const s3 = new S3Bucket( 6 | { 7 | bucket: Deno.env.get("STORAGE_BUCKET")!, 8 | region: Deno.env.get("AWS_REGION")!, 9 | accessKeyID: Deno.env.get("AWS_ACCESS_KEY_ID")!, 10 | secretKey: Deno.env.get("AWS_SECRET_ACCESS_KEY")!, 11 | sessionToken: Deno.env.get("AWS_SESSION_TOKEN"), 12 | endpointURL: Deno.env.get("S3_ENDPOINT_URL"), 13 | }, 14 | ); 15 | 16 | export async function getMeta( 17 | module: string, 18 | file: string, 19 | ): Promise { 20 | const resp = await s3.getObject( 21 | join(module, "meta", file), 22 | {}, 23 | ); 24 | if (resp === undefined) return undefined; 25 | const data = await new Response(resp.body).arrayBuffer(); 26 | return new Uint8Array(data); 27 | } 28 | 29 | const encoder = new TextEncoder(); 30 | 31 | export async function uploadMetaJson( 32 | module: string, 33 | file: string, 34 | data: unknown, 35 | ): Promise<{ etag: string }> { 36 | const resp = await s3.putObject( 37 | join(module, "meta", file), 38 | encoder.encode(JSON.stringify(data)), 39 | { 40 | // Global module meta data must always be fresh. 41 | cacheControl: "max-age=10, must-revalidate", 42 | contentType: "application/json", 43 | }, 44 | ); 45 | return { etag: resp.etag }; 46 | } 47 | 48 | export async function uploadVersionRaw( 49 | module: string, 50 | version: string, 51 | file: string, 52 | contents: Uint8Array, 53 | ): Promise<{ etag: string }> { 54 | const type = lookup(file) ?? 55 | (file.endsWith(".tsx") 56 | ? "application/typescript; charset=utf-8" 57 | : file.endsWith(".tsx") 58 | ? "application/javascript; charset=utf-8" 59 | : "application/octet-stream"); 60 | const resp = await s3.putObject( 61 | join(module, "versions", version, "raw", file), 62 | contents, 63 | { 64 | // Versioned files can be cached indefinitely. (1 year) 65 | cacheControl: "public, max-age=31536000, immutable", 66 | contentType: type === "video/mp2t" 67 | ? "application/typescript; charset=utf-8" 68 | : type === "text/jsx" 69 | ? "application/javascript; charset=utf-8" 70 | : type, 71 | }, 72 | ); 73 | return { etag: resp.etag }; 74 | } 75 | 76 | export async function uploadVersionMetaJson( 77 | module: string, 78 | version: string, 79 | data: unknown, 80 | ): Promise<{ etag: string }> { 81 | const resp = await s3.putObject( 82 | join(module, "versions", version, "meta", "meta.json"), 83 | encoder.encode(JSON.stringify(data)), 84 | { 85 | // Immutable files can be cached indefinitely. (1 year) 86 | cacheControl: "public, max-age=31536000, immutable", 87 | contentType: "application/json", 88 | }, 89 | ); 90 | return { etag: resp.etag }; 91 | } 92 | -------------------------------------------------------------------------------- /utils/test_utils.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | import { 3 | type APIGatewayProxyEventV2, 4 | type Context, 5 | objectGetKey, 6 | type ScheduledEvent, 7 | type SQSEvent, 8 | } from "../deps.ts"; 9 | import { assert } from "../test_deps.ts"; 10 | import { Database as Datastore, kinds } from "./datastore_database.ts"; 11 | 12 | interface KV { 13 | [key: string]: string; 14 | } 15 | 16 | export function createApiLandMock() { 17 | const { port } = new URL(Deno.env.get("APILAND_URL")!); 18 | const authToken = Deno.env.get("APILAND_AUTH_TOKEN"); 19 | 20 | const listener = Deno.listen({ port: parseInt(port, 10) }); 21 | 22 | (async () => { 23 | const conn = await listener.accept(); 24 | const httpConn = Deno.serveHttp(conn); 25 | const requestEvent = await httpConn.nextRequest(); 26 | if (requestEvent) { 27 | const { request, respondWith } = requestEvent; 28 | try { 29 | assert(request.method === "POST"); 30 | assert(request.headers.get("content-type") === "application/json"); 31 | const body = await request.json(); 32 | assert( 33 | request.headers.get("authorization")?.toLowerCase() === 34 | `bearer ${authToken}`, 35 | ); 36 | assert(body.event === "create"); 37 | assert(typeof body.module === "string"); 38 | assert(typeof body.version === "string"); 39 | await respondWith( 40 | new Response( 41 | JSON.stringify({ 42 | "result": "enqueued", 43 | "id": 1, 44 | }), 45 | { headers: { "content-type": "application/json" } }, 46 | ), 47 | ); 48 | } catch (e) { 49 | if (e instanceof Error) { 50 | await respondWith( 51 | new Response(`${e.message}\n${e.stack}`, { status: 401 }), 52 | ); 53 | } else { 54 | await respondWith(new Response("ooops!", { status: 401 })); 55 | } 56 | } 57 | } 58 | httpConn.close(); 59 | try { 60 | conn.close(); 61 | } catch { 62 | // just swallow 63 | } 64 | try { 65 | listener.close(); 66 | } catch { 67 | // just swallow 68 | } 69 | })(); 70 | } 71 | 72 | export function createAPIGatewayProxyEventV2( 73 | method: string, 74 | rawPath: string, 75 | { data, headers, pathParameters, queryStringParameters, isBase64Encoded }: { 76 | data?: unknown; 77 | headers?: KV; 78 | pathParameters?: KV; 79 | queryStringParameters?: KV; 80 | isBase64Encoded?: boolean; 81 | }, 82 | ): APIGatewayProxyEventV2 { 83 | const queryString = new URLSearchParams(queryStringParameters).toString(); 84 | return { 85 | version: "2", 86 | routeKey: "", 87 | headers: headers ?? {}, 88 | body: data 89 | ? (typeof data === "string" ? data : JSON.stringify(data)) 90 | : undefined, 91 | isBase64Encoded: isBase64Encoded ?? false, 92 | rawPath: rawPath, 93 | rawQueryString: queryString, 94 | requestContext: { 95 | accountId: "", 96 | apiId: "", 97 | domainName: "api.deno.land", 98 | domainPrefix: "", 99 | http: { 100 | method, 101 | path: rawPath + (queryString ? "?" + queryString : ""), 102 | protocol: "http", 103 | sourceIp: "192.30.252.10", 104 | userAgent: (headers ? headers["User-Agent"] : undefined) ?? 105 | "Deno/1.2.2", 106 | }, 107 | routeKey: "", 108 | requestId: "xyztest", 109 | stage: "$default", 110 | time: new Date().toISOString(), 111 | timeEpoch: new Date().getTime(), 112 | }, 113 | pathParameters, 114 | queryStringParameters, 115 | }; 116 | } 117 | 118 | export function createSQSEvent(body: unknown): SQSEvent { 119 | return { 120 | Records: [ 121 | { 122 | messageId: "01b06e5c-d65c-11ea-9409-7e8b4a054eac", 123 | body: JSON.stringify(body), 124 | attributes: { 125 | ApproximateFirstReceiveTimestamp: new Date().toISOString(), 126 | ApproximateReceiveCount: "1", 127 | SenderId: "", 128 | SentTimestamp: new Date().toISOString(), 129 | }, 130 | awsRegion: "us-east-1", 131 | eventSource: "", 132 | eventSourceARN: "", 133 | md5OfBody: "", 134 | messageAttributes: {}, 135 | receiptHandle: "", 136 | }, 137 | ], 138 | }; 139 | } 140 | 141 | export function createScheduledEvent(): ScheduledEvent { 142 | return { 143 | id: "cdc73f9d-aea9-11e3-9d5a-835b769c0d9c", 144 | version: "1", 145 | "detail-type": "Scheduled Event", 146 | source: "aws.events", 147 | account: "123456789012", 148 | time: "1970-01-01T00:00:00Z", 149 | region: "ca-central-1", 150 | resources: [ 151 | "arn:aws:events:ca-central-1:123456789012:rule/ExampleRule", 152 | ], 153 | detail: {}, 154 | }; 155 | } 156 | 157 | export function createContext(): Context { 158 | return { 159 | awsRequestId: "", 160 | callbackWaitsForEmptyEventLoop: false, 161 | functionName: "", 162 | functionVersion: "", 163 | invokedFunctionArn: "", 164 | logGroupName: "", 165 | logStreamName: "", 166 | memoryLimitInMB: "", 167 | done() {}, 168 | fail() {}, 169 | getRemainingTimeInMillis: () => 0, 170 | succeed() {}, 171 | }; 172 | } 173 | 174 | export async function cleanupDatabase( 175 | datastore: Datastore, 176 | ): Promise { 177 | await Promise.all([ 178 | (async () => { 179 | const query = await datastore.db.query( 180 | datastore.db.createQuery(kinds.LEGACY_MODULES), 181 | ); 182 | const mutations = query.map((entry) => ({ 183 | delete: objectGetKey(entry)!, 184 | })); 185 | 186 | for await ( 187 | const _ of datastore.db.commit(mutations, { 188 | transactional: false, 189 | }) 190 | ) { 191 | // 192 | } 193 | })(), 194 | (async () => { 195 | const query = await datastore.db.query( 196 | datastore.db.createQuery(kinds.BUILD), 197 | ); 198 | const mutations = query.map((entry) => ({ 199 | delete: objectGetKey(entry)!, 200 | })); 201 | 202 | for await ( 203 | const _ of datastore.db.commit(mutations, { 204 | transactional: false, 205 | }) 206 | ) { 207 | // 208 | } 209 | })(), 210 | (async () => { 211 | const query = await datastore.db.query( 212 | datastore.db.createQuery(kinds.LEGACY_OWNER_QUOTAS), 213 | ); 214 | const mutations = query.map((entry) => ({ 215 | delete: objectGetKey(entry)!, 216 | })); 217 | 218 | for await ( 219 | const _ of datastore.db.commit(mutations, { 220 | transactional: false, 221 | }) 222 | ) { 223 | // 224 | } 225 | })(), 226 | ]); 227 | } 228 | -------------------------------------------------------------------------------- /utils/types.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | 3 | export type APIResponseBase = { 4 | success: boolean; 5 | }; 6 | 7 | export type APIErrorResponse = APIResponseBase & { error: string }; 8 | 9 | export interface DirectoryListingFile { 10 | path: string; 11 | size: number | undefined; 12 | type: "dir" | "file"; 13 | } 14 | -------------------------------------------------------------------------------- /utils/utils.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | import type { DirectoryListingFile } from "./types.ts"; 3 | 4 | export async function collectAsyncIterable( 5 | iterator: AsyncIterable, 6 | ): Promise { 7 | const collected = [] as T[]; 8 | for await (const v of iterator) { 9 | collected.push(v); 10 | } 11 | return collected; 12 | } 13 | 14 | export function directorySize(d: DirectoryListingFile[]): number { 15 | if (d.length === 0) return 0; 16 | if (d.length === 1) return d[0].size ?? 0; 17 | 18 | // sort directory listings in place 19 | d.sort((a, b) => a.path.localeCompare(b.path)); 20 | 21 | // put the root dir at the end to make sure the stack is fully emptied at the 22 | // end the loop 23 | d.push(d[0]); 24 | 25 | let totalSize = 0; 26 | const len = d.length; 27 | const stack: number[] = []; // stack of indexes of all entry of type 'dir' 28 | 29 | let curr = 0; 30 | for (let i = 1; i < len; i++) { // start at one to skip the root directory 31 | // current element is out of the curr directory, popping the stack 32 | // 33 | // special case for the root directory; since it's an empty string, it's 34 | // easier to simply check for it explicitely than compare the paths with 35 | // special conditions to handle "" as the root dir. 36 | while (curr != 0 && !d[i].path.startsWith(d[curr].path + "/")) { 37 | const s = d[curr].size ?? 0; 38 | curr = stack.pop() as number; 39 | d[curr].size = (d[curr].size ?? 0) + s; 40 | } 41 | 42 | if (d[i].type === "file") { 43 | totalSize += d[i].size ?? 0; 44 | d[curr].size = (d[curr].size ?? 0) + (d[i].size ?? 0); 45 | } else if (curr === 0 || d[i].path.startsWith(d[curr].path + "/")) { 46 | // see comment above for the special case of the root dir 47 | stack.push(curr); 48 | curr = i; 49 | } 50 | } 51 | 52 | // remove the duplicate root element introduced earlier 53 | d.pop(); 54 | return totalSize; 55 | } 56 | -------------------------------------------------------------------------------- /utils/utils_bench.ts: -------------------------------------------------------------------------------- 1 | import { directorySize } from "./utils.ts"; 2 | import type { DirectoryListingFile } from "./types.ts"; 3 | 4 | const dir = JSON.parse(Deno.readTextFileSync( 5 | "./utils/testdata/deno-v1.3.2.json", 6 | )) as DirectoryListingFile[]; 7 | 8 | Deno.bench(function benchDirectorySize() { 9 | directorySize(dir); 10 | }); 11 | -------------------------------------------------------------------------------- /utils/utils_test.ts: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. 2 | import { assertEquals } from "../test_deps.ts"; 3 | import { directorySize } from "./utils.ts"; 4 | import type { DirectoryListingFile } from "./types.ts"; 5 | 6 | Deno.test({ 7 | name: "directory size for deno v1.3.2", 8 | async fn() { 9 | const dir = JSON.parse( 10 | await Deno.readTextFile( 11 | "./utils/testdata/deno-v1.3.2.json", 12 | ), 13 | ) as DirectoryListingFile[]; 14 | assertEquals(directorySize(dir), 7206822); // check the calculation 15 | assertEquals(dir[0].size, 7206822); // check the list was modified in place 16 | assertEquals(dir[8].size, 8385); // check the list was modified in place 17 | }, 18 | }); 19 | 20 | Deno.test({ 21 | name: "different directories with the same prefix", 22 | fn() { 23 | const mock: DirectoryListingFile[] = [ 24 | { 25 | path: "", 26 | type: "dir", 27 | size: undefined, 28 | }, 29 | { 30 | path: "foo", 31 | type: "dir", 32 | size: undefined, 33 | }, 34 | { 35 | path: "foo/bar", 36 | type: "dir", 37 | size: undefined, 38 | }, 39 | { 40 | path: "foobar", 41 | type: "dir", 42 | size: undefined, 43 | }, 44 | { 45 | path: "foobarbaz", 46 | type: "dir", 47 | size: undefined, 48 | }, 49 | { 50 | path: "foo/foo.ts", 51 | size: 100, 52 | type: "file", 53 | }, 54 | { 55 | path: "foo/bar/bar.ts", 56 | type: "file", 57 | size: 100, 58 | }, 59 | { 60 | path: "foobar/bar.ts", 61 | size: 100, 62 | type: "file", 63 | }, 64 | { 65 | path: "foobarbaz/baz.ts", 66 | size: 100, 67 | type: "file", 68 | }, 69 | ]; 70 | 71 | assertEquals(directorySize(mock), 400); 72 | // the first item should match the output of the function 73 | assertEquals(mock[0].size, 400); 74 | // the directory "foo" shouldn't count the contents of "foobar" and 75 | // "foobarbaz" in its total 76 | assertEquals(mock[1].size, 200); 77 | }, 78 | }); 79 | -------------------------------------------------------------------------------- /utils/webhooks.d.ts: -------------------------------------------------------------------------------- 1 | // From https://github.com/octokit/webhooks, licensed under MIT: https://github.com/octokit/webhooks/blob/master/LICENSE 2 | // deno-lint-ignore-file 3 | 4 | type PayloadRepositoryOwner = { 5 | login: string; 6 | id: number; 7 | node_id: string; 8 | avatar_url: string; 9 | gravatar_id: string; 10 | url: string; 11 | html_url: string; 12 | followers_url: string; 13 | following_url: string; 14 | gists_url: string; 15 | starred_url: string; 16 | subscriptions_url: string; 17 | organizations_url: string; 18 | repos_url: string; 19 | events_url: string; 20 | received_events_url: string; 21 | type: string; 22 | site_admin: boolean; 23 | name?: string; 24 | email?: string; 25 | }; 26 | 27 | type PayloadRepository = { 28 | id: number; 29 | node_id: string; 30 | name: string; 31 | full_name: string; 32 | private: boolean; 33 | owner: PayloadRepositoryOwner; 34 | html_url: string; 35 | description: null | string; 36 | fork: boolean; 37 | url: string; 38 | forks_url: string; 39 | keys_url: string; 40 | collaborators_url: string; 41 | teams_url: string; 42 | hooks_url: string; 43 | issue_events_url: string; 44 | events_url: string; 45 | assignees_url: string; 46 | branches_url: string; 47 | tags_url: string; 48 | blobs_url: string; 49 | git_tags_url: string; 50 | git_refs_url: string; 51 | trees_url: string; 52 | statuses_url: string; 53 | languages_url: string; 54 | stargazers_url: string; 55 | contributors_url: string; 56 | subscribers_url: string; 57 | subscription_url: string; 58 | commits_url: string; 59 | git_commits_url: string; 60 | comments_url: string; 61 | issue_comment_url: string; 62 | contents_url: string; 63 | compare_url: string; 64 | merges_url: string; 65 | archive_url: string; 66 | downloads_url: string; 67 | issues_url: string; 68 | pulls_url: string; 69 | milestones_url: string; 70 | notifications_url: string; 71 | labels_url: string; 72 | releases_url: string; 73 | deployments_url: string; 74 | created_at: string | number; 75 | updated_at: string; 76 | pushed_at: string | number; 77 | git_url: string; 78 | ssh_url: string; 79 | clone_url: string; 80 | svn_url: string; 81 | homepage: null | string; 82 | size: number; 83 | stargazers_count: number; 84 | watchers_count: number; 85 | language: string | null; 86 | has_issues: boolean; 87 | has_projects: boolean; 88 | has_downloads: boolean; 89 | has_wiki: boolean; 90 | has_pages: boolean; 91 | forks_count: number; 92 | mirror_url: null; 93 | archived: boolean; 94 | disabled?: boolean; 95 | open_issues_count: number; 96 | license: null; 97 | forks: number; 98 | open_issues: number; 99 | watchers: number; 100 | default_branch: string; 101 | stargazers?: number; 102 | master_branch?: string; 103 | permissions?: PayloadRepositoryPermissions; 104 | }; 105 | 106 | type PayloadRepositoryPermissions = { 107 | pull: boolean; 108 | push: boolean; 109 | admin: boolean; 110 | }; 111 | 112 | type WebhookPayloadCreateSender = { 113 | login: string; 114 | id: number; 115 | node_id: string; 116 | avatar_url: string; 117 | gravatar_id: string; 118 | url: string; 119 | html_url: string; 120 | followers_url: string; 121 | following_url: string; 122 | gists_url: string; 123 | starred_url: string; 124 | subscriptions_url: string; 125 | organizations_url: string; 126 | repos_url: string; 127 | events_url: string; 128 | received_events_url: string; 129 | type: string; 130 | site_admin: boolean; 131 | }; 132 | 133 | export type WebhookPayloadCreate = { 134 | ref: string; 135 | ref_type: string; 136 | master_branch: string; 137 | description: null; 138 | pusher_type: string; 139 | repository: PayloadRepository; 140 | sender: WebhookPayloadCreateSender; 141 | }; 142 | 143 | type WebhookPayloadPingSender = { 144 | login: string; 145 | id: number; 146 | node_id: string; 147 | avatar_url: string; 148 | gravatar_id: string; 149 | url: string; 150 | html_url: string; 151 | followers_url: string; 152 | following_url: string; 153 | gists_url: string; 154 | starred_url: string; 155 | subscriptions_url: string; 156 | organizations_url: string; 157 | repos_url: string; 158 | events_url: string; 159 | received_events_url: string; 160 | type: string; 161 | site_admin: boolean; 162 | }; 163 | 164 | type WebhookPayloadPingHookLastResponse = { 165 | code: null; 166 | status: string; 167 | message: null; 168 | }; 169 | 170 | type WebhookPayloadPingHookConfig = { 171 | content_type: string; 172 | url: string; 173 | insecure_ssl: string; 174 | }; 175 | 176 | type WebhookPayloadPingHook = { 177 | type: string; 178 | id: number; 179 | name: string; 180 | active: boolean; 181 | events: Array; 182 | config: WebhookPayloadPingHookConfig; 183 | updated_at: string; 184 | created_at: string; 185 | url: string; 186 | test_url: string; 187 | ping_url: string; 188 | last_response: WebhookPayloadPingHookLastResponse; 189 | }; 190 | 191 | export type WebhookPayloadPing = { 192 | zen: string; 193 | hook_id: number; 194 | hook: WebhookPayloadPingHook; 195 | repository: PayloadRepository; 196 | sender: WebhookPayloadPingSender; 197 | }; 198 | 199 | type WebhookPayloadPushSender = { 200 | login: string; 201 | id: number; 202 | node_id: string; 203 | avatar_url: string; 204 | gravatar_id: string; 205 | url: string; 206 | html_url: string; 207 | followers_url: string; 208 | following_url: string; 209 | gists_url: string; 210 | starred_url: string; 211 | subscriptions_url: string; 212 | organizations_url: string; 213 | repos_url: string; 214 | events_url: string; 215 | received_events_url: string; 216 | type: string; 217 | site_admin: boolean; 218 | }; 219 | 220 | type WebhookPayloadPushPusher = { name: string; email: string }; 221 | 222 | export type WebhookPayloadPush = { 223 | ref: string; 224 | before: string; 225 | after: string; 226 | created: boolean; 227 | deleted: boolean; 228 | forced: boolean; 229 | base_ref: null; 230 | compare: string; 231 | commits: Array; 232 | head_commit: null; 233 | repository: PayloadRepository; 234 | pusher: WebhookPayloadPushPusher; 235 | sender: WebhookPayloadPushSender; 236 | }; 237 | --------------------------------------------------------------------------------