├── .dockerignore ├── .eslintrc.cjs ├── .github ├── FUNDING.yml └── workflows │ ├── cf.yml │ ├── deno-deploy.yml │ ├── deploy.yml │ ├── fly.yml │ ├── pr.yml │ ├── preview.yaml │ ├── profiler.yml │ └── scorecard.yml ├── .gitignore ├── .husky ├── .gitignore └── pre-commit ├── .prettierignore ├── .prettierrc.json ├── .vscode ├── extensions.json ├── launch.json └── settings.json ├── LICENSE ├── README.md ├── bun.Dockerfile ├── deno.Dockerfile ├── deno.json ├── fastly.toml ├── fly.tls.toml ├── fly.toml ├── import_map.json ├── node.Dockerfile ├── package.json ├── run ├── src ├── build │ └── pre.sh ├── commons │ ├── b32.js │ ├── bufutil.js │ ├── crypto.js │ ├── dnsutil.js │ ├── envutil.js │ ├── lf-transformer.js │ └── util.js ├── core │ ├── cfg.js │ ├── deno │ │ ├── blocklists.ts │ │ ├── config.ts │ │ └── dbip.ts │ ├── dns │ │ ├── conns.js │ │ └── transact.js │ ├── doh.js │ ├── env.js │ ├── fastly │ │ └── config.js │ ├── io-state.js │ ├── linux │ │ └── swap.js │ ├── log.js │ ├── node │ │ ├── blocklists.js │ │ ├── config.js │ │ ├── dbip.js │ │ ├── dns-transport.js │ │ ├── util-dev.js │ │ └── util.js │ ├── plugin.js │ ├── svc.js │ └── workers │ │ └── config.js ├── plugins │ ├── cache-util.js │ ├── command-control │ │ └── cc.js │ ├── dns-op │ │ ├── blocker.js │ │ ├── cache-api.js │ │ ├── cache-resolver.js │ │ ├── cache.js │ │ ├── dns-op.js │ │ ├── prefilter.js │ │ └── resolver.js │ ├── observability │ │ ├── geoip.js │ │ └── log-pusher.js │ ├── plugin-response.js │ ├── rdns-util.js │ ├── rethinkdns │ │ ├── filter.js │ │ ├── main.js │ │ └── trie-config.js │ └── users │ │ ├── auth-token.js │ │ ├── user-cache.js │ │ ├── user-op.js │ │ └── users.js ├── server-deno.ts ├── server-fastly.js ├── server-node.js ├── server-workers.js └── system.js ├── test ├── data │ └── tls │ │ ├── README.md │ │ ├── dns.rethinkdns.localhost.crt │ │ ├── dns.rethinkdns.localhost.key │ │ └── domains.ext └── manual │ └── proxy-proto.js ├── webpack.config.cjs ├── webpack.fastly.cjs ├── webpack.fly.cjs └── wrangler.toml /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .gitignore 3 | .env 4 | .env.example 5 | .github/ 6 | .husky/ 7 | .vscode/ 8 | .prettierrc.json 9 | .prettierignore 10 | .eslintrc.cjs 11 | .eslintcache 12 | 13 | node_modules/ 14 | worker/ 15 | dist/ 16 | test/ 17 | log/ 18 | bin/ 19 | blocklists__/ 20 | dbip__/ 21 | 22 | *.md 23 | LICENSE 24 | run 25 | wrangler.toml 26 | package-lock.json 27 | -------------------------------------------------------------------------------- /.eslintrc.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | env: { 4 | // Which globals variables are allowed. 5 | es2021: true, 6 | node: true, 7 | worker: true, 8 | }, 9 | 10 | extends: [ 11 | // Google JS Style Guide Rules 12 | // See: https://github.com/google/eslint-config-google 13 | "google", 14 | ], 15 | 16 | parserOptions: { 17 | ecmaVersion: "latest", 18 | sourceType: "module", 19 | }, 20 | 21 | ignorePatterns: [ 22 | "node_modules/", 23 | "dist/", 24 | "worker/", 25 | "pkg/", 26 | "test/data/cache/", 27 | 28 | // Ignore all files, except JS files (which may be in a directory) 29 | "*", 30 | "!*/", 31 | "!*.js", 32 | "!*.[mc]js", 33 | ], 34 | 35 | plugins: ["prettier"], 36 | rules: { 37 | // Google JS rules, missing in "eslint-config-google" package 38 | "eqeqeq": ["error", "smart"], 39 | 40 | // Rules disabled to avoid conflicts with prettier 41 | // See: https://github.com/prettier/eslint-config-prettier 42 | "indent": 0, 43 | "object-curly-spacing": 0, 44 | "operator-linebreak": 0, 45 | "space-before-function-paren": 0, 46 | 47 | // Our rules overrides. 48 | "comma-dangle": 0, 49 | "require-jsdoc": 0, 50 | "valid-jsdoc": 0, 51 | "quotes": ["error", "double", { allowTemplateLiterals: true }], 52 | "no-unused-vars": "warn", 53 | "new-cap": ["error", { properties: false }], 54 | "max-len": [ 55 | "error", 56 | { 57 | code: 120, 58 | ignoreComments: true, 59 | ignoreUrls: true, 60 | ignoreStrings: true, 61 | ignoreTemplateLiterals: true, 62 | }, 63 | ], 64 | "generator-star-spacing": [ 65 | "error", 66 | { 67 | before: false, 68 | after: true, // function* () {} 69 | anonymous: "neither", // function*() {} 70 | method: { before: false, after: false }, // { *gen() {} } 71 | }, 72 | ], 73 | 74 | // Enforces rules from .prettierrc file. 75 | // These should be fixed automatically with formatting. 76 | // See: https://github.com/prettier/eslint-plugin-prettier 77 | "prettier/prettier": "error", 78 | }, 79 | }; 80 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [serverless-dns] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: ['https://svc.rethinkdns.com/r/sponsor'] 14 | -------------------------------------------------------------------------------- /.github/workflows/cf.yml: -------------------------------------------------------------------------------- 1 | name: ⛅ CF 2 | on: 3 | # github.com/serverless-dns/blocklists/blob/6021f80f/.github/workflows/createUploadBlocklistFilter.yml#L4-L6 4 | # schedule: 5 | # at 7:53 on 3rd, 10th, 18th, 26th of every month 6 | # - cron: '53 7 3,10,18,26 * *' 7 | # docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow 8 | # docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#workflow_dispatch 9 | # docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#onworkflow_dispatchinputs 10 | workflow_dispatch: 11 | # github.blog/changelog/2020-07-06-github-actions-manual-triggers-with-workflow_dispatch/ 12 | # docs.github.com/en/actions/creating-actions/metadata-syntax-for-github-actions#inputs 13 | inputs: 14 | # docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onworkflow_dispatchinputs 15 | environment: 16 | description: 'wrangler env to deploy to' 17 | required: true 18 | default: 'dev' 19 | type: choice 20 | options: 21 | - dev 22 | - prod 23 | - one 24 | commit: 25 | description: 'git tip commit to deploy' 26 | default: 'main' 27 | required: true 28 | 29 | push: 30 | # TODO: inputs.environment and inputs.commit 31 | branches: 32 | - "main" 33 | tags: 34 | - "v*" 35 | paths-ignore: 36 | - ".github/**" 37 | - "!.github/workflows/cf.yml" 38 | - ".env.example" 39 | - ".eslintrc.cjs" 40 | - ".prettierignore" 41 | - "fly.toml" 42 | - "README.md" 43 | - "node.Dockerfile" 44 | - "deno.Dockerfile" 45 | - "import_map.json" 46 | - ".vscode/*" 47 | - ".husky/*" 48 | - ".prettierrc.json" 49 | - "LICENSE" 50 | - "run" 51 | repository_dispatch: 52 | 53 | env: 54 | GIT_REF: ${{ github.event.inputs.commit || github.ref }} 55 | WRANGLER_VER: '3.56.0' 56 | # default is 'dev' which is really empty/no env 57 | WORKERS_ENV: '' 58 | 59 | jobs: 60 | deploy: 61 | name: 🚀 Deploy worker 62 | runs-on: ubuntu-latest 63 | timeout-minutes: 60 64 | steps: 65 | - name: 🛒 Checkout 66 | uses: actions/checkout@v3.3.0 67 | with: 68 | ref: ${{ env.GIT_REF }} 69 | fetch-depth: 0 70 | 71 | - name: 🛸 Env? 72 | # 'dev' env deploys to default WORKERS_ENV, which is, '' (an empty string) 73 | if: github.event.inputs.environment == 'prod' || github.event.inputs.environment == 'one' 74 | run: | 75 | echo "WORKERS_ENV=${WENV}" >> $GITHUB_ENV 76 | echo "COMMIT_SHA=${COMMIT_SHA}" >> $GITHUB_ENV 77 | shell: bash 78 | env: 79 | WENV: ${{ github.event.inputs.environment }} 80 | COMMIT_SHA: ${{ github.sha }} 81 | 82 | - name: 🎱 Tag? 83 | # docs.github.com/en/actions/learn-github-actions/contexts#github-context 84 | if: github.ref_type == 'tag' 85 | run: | 86 | echo "WORKERS_ENV=${WENV}" >> $GITHUB_ENV 87 | echo "COMMIT_SHA=${COMMIT_SHA}" >> $GITHUB_ENV 88 | shell: bash 89 | env: 90 | # tagged deploys always deploy to prod 91 | WENV: 'prod' 92 | COMMIT_SHA: ${{ github.sha }} 93 | 94 | - name: 🌽 Cron? 95 | if: github.event.schedule == '53 7 3,10,18,26 * *' 96 | run: | 97 | echo "WORKERS_ENV=${WENV}" >> $GITHUB_ENV 98 | echo "COMMIT_SHA=${COMMIT_SHA}" >> $GITHUB_ENV 99 | shell: bash 100 | env: 101 | # cron deploys always deploy to prod 102 | WENV: 'prod' 103 | COMMIT_SHA: ${{ github.sha }} 104 | 105 | # npm (and node16) are installed by wrangler-action in a pre-job setup 106 | - name: 🏗 Get dependencies 107 | run: npm i 108 | 109 | - name: 📚 Wrangler publish 110 | # github.com/cloudflare/wrangler-action 111 | uses: cloudflare/wrangler-action@v3 112 | with: 113 | apiToken: ${{ secrets.CF_API_TOKEN }} 114 | # input overrides env-defaults, regardless 115 | environment: ${{ env.WORKERS_ENV }} 116 | wranglerVersion: ${{ env.WRANGLER_VER }} 117 | env: 118 | CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CF_ACCOUNT_ID }} 119 | GIT_COMMIT_ID: ${{ env.COMMIT_SHA }} 120 | 121 | - name: 🎤 Notice 122 | run: | 123 | echo "::notice::Deployed to ${WORKERS_ENV} / ${GIT_REF} @ ${COMMIT_SHA}" 124 | -------------------------------------------------------------------------------- /.github/workflows/deno-deploy.yml: -------------------------------------------------------------------------------- 1 | name: 🦕 Deno 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | paths-ignore: 8 | - ".github/**" 9 | - "!.github/workflows/deno-deploy.yml" 10 | - ".env.example" 11 | - ".eslintrc.cjs" 12 | - ".prettierignore" 13 | - "README.md" 14 | - "wrangler.toml" 15 | - "fly.toml" 16 | - "node.Dockerfile" 17 | - "deno.Dockerfile" 18 | - ".vscode/*" 19 | - ".husky/*" 20 | - ".prettierrc.json" 21 | - "LICENSE" 22 | - "run" 23 | workflow_dispatch: 24 | inputs: 25 | deploy-mode: 26 | description: 'Deploy via action or auto' 27 | required: false 28 | default: 'action' 29 | type: choice 30 | options: 31 | - action 32 | - auto 33 | git-ref: 34 | description: 'Git branch / tag / commit (used for auto deploy-mode)' 35 | required: false 36 | default: 'main' 37 | deployment-type: 38 | description: 'Deploy branch (used for auto deploy-mode)' 39 | required: false 40 | default: 'dev' 41 | type: choice 42 | options: 43 | - dev 44 | - live 45 | 46 | env: 47 | DEPLOY_MODE: 'action' 48 | PROJECT_NAME: ${{ secrets.DENO_PROJECT_NAME || 'rdns' }} 49 | BUILD_BRANCH: > 50 | ${{ github.event_name == 'workflow_dispatch' && 51 | github.event.inputs.deployment-type == 'live' && 52 | 'build/deno-deploy/live' || 'build/deno-deploy/dev' }} 53 | IN_FILE: 'src/server-deno.ts' 54 | OUT_FILE: 'rethinkdns.js' 55 | 56 | jobs: 57 | deploy: 58 | name: 🌯 Deno Deploy 59 | runs-on: ubuntu-latest 60 | permissions: 61 | # needed for auth with deno.com 62 | id-token: write 63 | # needed to clone repo 64 | contents: read 65 | steps: 66 | - name: 🚚 Fetch code 67 | uses: actions/checkout@v4 68 | with: 69 | ref: ${{ github.event.inputs.git-ref || github.ref }} 70 | fetch-depth: 0 71 | 72 | - name: 🏝 Setup env 73 | shell: bash 74 | run: | 75 | echo "DEPLOY_MODE=${DM}" >> $GITHUB_ENV 76 | echo "::notice::deploy mode: $DM, project: $PROJECT_NAME" 77 | env: 78 | DM: ${{ github.event.inputs.deploy-mode || env.DEPLOY_MODE }} 79 | 80 | - name: 🧰 Checkout deploy-branch 81 | if: ${{ env.DEPLOY_MODE == 'auto' }} 82 | run: | 83 | git fetch --all 84 | git checkout --orphan ${BUILD_BRANCH} || git checkout ${BUILD_BRANCH} 85 | git reset 86 | git merge origin/${BUILD_BRANCH} || : 87 | 88 | - name: 🦕 Install Deno @1.44 89 | uses: denoland/setup-deno@main 90 | with: 91 | deno-version: 1.44.4 92 | 93 | - name: 📦 Bundle up 94 | if: ${{ env.DEPLOY_MODE == 'action' }} 95 | run: | 96 | echo "::notice::do not forget to set DENO_PROJECT_NAME via github secrets!" 97 | deno task prepare 98 | deno bundle ${IN_FILE} ${OUT_FILE} 99 | shell: bash 100 | 101 | # github.com/denoland/deployctl/blob/febd898/action.yml 102 | # step output: www.actionsbyexample.com/outputs.html 103 | - name: 🤸🏼 Deploy to deno.com 104 | id: dd 105 | if: ${{ env.DEPLOY_MODE == 'action' }} 106 | uses: denoland/deployctl@1.12.0 107 | with: 108 | project: ${{ env.PROJECT_NAME }} 109 | entrypoint: ${{ env.OUT_FILE }} 110 | 111 | - name: 🚢 Merge latest code into deploy-branch 112 | if: ${{ env.DEPLOY_MODE == 'auto' }} 113 | run: | 114 | git config --local user.name 'github-actions[bot]' 115 | git config --local user.email 'github-actions[bot]@users.noreply.github.com' 116 | git add ${OUT_FILE} 117 | git commit -m "Update bundle for ${GITHUB_SHA}" && \ 118 | echo "::notice::Pushing to ${BUILD_BRANCH}" || \ 119 | echo "::notice::No changes to commit" 120 | git push origin ${BUILD_BRANCH} 121 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Application 2 | on: 3 | push: 4 | branches: [main] 5 | 6 | jobs: 7 | deploy: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | 12 | - name: Install Rust toolchain 13 | uses: actions-rs/toolchain@v1 14 | with: 15 | toolchain: stable 16 | target: wasm32-wasi # WebAssembly target 17 | 18 | - name: Deploy to Compute@Edge 19 | uses: fastly/compute-actions@v5 20 | env: 21 | FASTLY_API_TOKEN: ${{ secrets.FASTLY_API_KEY }} 22 | with: 23 | service_id: '3BCRKNm5R0iXXcfZzmwmn8' 24 | -------------------------------------------------------------------------------- /.github/workflows/fly.yml: -------------------------------------------------------------------------------- 1 | name: 🪂 Fly 2 | 3 | on: 4 | # github.com/serverless-dns/blocklists/blob/6021f80f/.github/workflows/createUploadBlocklistFilter.yml#L4-L6 5 | schedule: 6 | # at 7:53 on 2nd, 9th, 17th, 25th of every month 7 | - cron: '53 7 2,9,17,25 * *' 8 | push: 9 | branches: 10 | - "main" 11 | tags: 12 | - "v*" 13 | paths-ignore: 14 | - ".github/**" 15 | - "!.github/workflows/fly.yml" 16 | - ".env.example" 17 | - ".eslintrc.cjs" 18 | - ".prettierignore" 19 | - "wrangler.toml" 20 | - "README.md" 21 | - "wrangler.toml" 22 | - ".vscode/*" 23 | - ".husky/*" 24 | - ".prettierrc.json" 25 | - "LICENSE" 26 | - "run" 27 | workflow_dispatch: 28 | inputs: 29 | git-ref: 30 | description: "Branch / ref / tag to build" 31 | required: false 32 | default: "main" 33 | deployment-type: 34 | description: "Fly app type to deploy to" 35 | required: true 36 | default: 'dev' 37 | type: choice 38 | options: 39 | - dev 40 | - prod 41 | - onebox 42 | - flytls 43 | deployment-strat: 44 | description: "Deploy strategy" 45 | required: true 46 | default: 'rolling' 47 | type: choice 48 | options: 49 | - rolling 50 | - immediate 51 | - bluegreen 52 | - canary 53 | builder: 54 | description: "App builder type" 55 | required: true 56 | default: 'remote-only' 57 | type: choice 58 | options: 59 | - remote-only 60 | - local-only 61 | 62 | env: 63 | GIT_REF: ${{ github.event.inputs.git-ref || github.ref }} 64 | FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }} 65 | # one of immediate, rolling (default), bluegreen, canary 66 | FLY_DEPLOY_STRAT: ${{ github.event.inputs.deployment-strat }} 67 | # local-only, remote-only (default) 68 | FLY_BUILDER_TYPE: ${{ github.event.inputs.builder }} 69 | # default fly app to deploy to (typically, dev) 70 | FLY_APP: "rdns-dev" 71 | FLY_TLS_APP: "rdns-b1" 72 | FLY_PROD_ONEBOX_APP: "rdns" 73 | FLY_PROD_APP: "udns" 74 | FLY_CFG: "fly.toml" 75 | FLY_TLS_CFG: "fly.tls.toml" 76 | FLY_DEPLOY_WAIT_SEC: "300" 77 | 78 | jobs: 79 | deploy: 80 | name: 🚀 Deploy app 81 | runs-on: ubuntu-latest 82 | steps: 83 | - name: 🚚 Checkout 84 | uses: actions/checkout@v3.3.0 85 | with: 86 | ref: ${{ env.GIT_REF }} 87 | fetch-depth: 0 88 | 89 | - name: 🏗 Setup env vars 90 | run: | 91 | echo "GIT_HEAD=$(git rev-parse HEAD)" >> $GITHUB_ENV 92 | echo "FLY_TOML=${FLY_CFG}" >> $GITHUB_ENV 93 | echo "FLY_DEPLOY_STRAT=${FLY_DEPLOY_STRAT:-rolling}" >> $GITHUB_ENV 94 | echo "FLY_BUILDER_TYPE=${FLY_BUILDER_TYPE:-remote-only}" >> $GITHUB_ENV 95 | shell: bash 96 | 97 | - name: 🔐👨‍🚒 FlyTLS via dispatch? 98 | if: ${{ github.event_name == 'workflow_dispatch' && 99 | github.event.inputs.deployment-type == 'flytls' }} 100 | run: | 101 | echo "FLY_APP=${FLY_TLS_APP}" >> $GITHUB_ENV 102 | echo "FLY_TOML=${FLY_TLS_CFG}" >> $GITHUB_ENV 103 | echo "::notice::Deploying FLYTLS / ${GIT_REF} @ ${COMMIT_SHA}" 104 | shell: bash 105 | env: 106 | COMMIT_SHA: ${{ github.sha }} 107 | 108 | - name: 🚨👨‍🚒 Prod via dispatch? 109 | if: ${{ github.event_name == 'workflow_dispatch' && 110 | github.event.inputs.deployment-type == 'prod' }} 111 | run: | 112 | echo "FLY_APP=${FLY_PROD_APP}" >> $GITHUB_ENV 113 | echo "::notice::Deploying PROD / ${GIT_REF} @ ${COMMIT_SHA}" 114 | shell: bash 115 | env: 116 | COMMIT_SHA: ${{ github.sha }} 117 | 118 | - name: 🚨🛺 Prod via tag? 119 | # docs.github.com/en/actions/learn-github-actions/contexts#github-context 120 | if: ${{ github.event_name != 'workflow_dispatch' && github.ref_type == 'tag' }} 121 | run: | 122 | echo "FLY_APP=${FLY_PROD_APP}" >> $GITHUB_ENV 123 | echo "::notice::Deploying PROD / ${GIT_REF} @ ${COMMIT_SHA}" 124 | shell: bash 125 | env: 126 | COMMIT_SHA: ${{ github.sha }} 127 | 128 | - name: 🚨🌽 Prod via cron? 129 | if: github.event.schedule == '53 7 2,9,17,25 * *' 130 | run: | 131 | echo "FLY_APP=${FLY_PROD_APP}" >> $GITHUB_ENV 132 | echo "::notice::Deploying PROD / ${GIT_REF} @ ${COMMIT_SHA}" 133 | shell: bash 134 | env: 135 | COMMIT_SHA: ${{ github.sha }} 136 | 137 | - name: 🚜👨‍🚒 Onebox via dispatch? 138 | if: ${{ github.event_name == 'workflow_dispatch' && 139 | github.event.inputs.deployment-type == 'onebox' }} 140 | run: | 141 | echo "FLY_APP=${FLY_PROD_ONEBOX_APP}" >> $GITHUB_ENV 142 | echo "::notice::Deploying 1BOX / ${GIT_REF} @ ${COMMIT_SHA}" 143 | shell: bash 144 | env: 145 | COMMIT_SHA: ${{ github.sha }} 146 | 147 | # experimental: github.com/superfly/flyctl-actions/pull/20 148 | - name: 🏗 Setup flyctl @ latest 149 | uses: superfly/flyctl-actions/setup-flyctl@master 150 | with: 151 | version: latest 152 | 153 | - name: 🚢 Ship 154 | run: "flyctl deploy 155 | --${{ env.FLY_BUILDER_TYPE }} 156 | --image-label ${{ env.GIT_HEAD }} 157 | --config ${{ env.FLY_TOML }} 158 | --strategy ${{ env.FLY_DEPLOY_STRAT }} 159 | --wait-timeout ${{ env.FLY_DEPLOY_WAIT_SEC }} 160 | --auto-confirm 161 | --no-cache 162 | --verbose 163 | " 164 | 165 | - name: 📕 Registry 166 | if: success() 167 | run: | 168 | echo "::notice::Image @ registry.fly.io/${{ env.FLY_APP }}:${{ env.GIT_HEAD }}" 169 | shell: bash 170 | -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | name: 📩 PR 2 | 3 | on: 4 | # "I am a workflow on the main branch's HEAD, target (base) of a PR" 5 | pull_request_target: 6 | branches: 7 | - "main" 8 | 9 | # Avoid concurrent runs of this workflow on an incoming PR, cancel previous 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.head_ref }} 12 | cancel-in-progress: true 13 | 14 | env: 15 | # Base branch for the PR 16 | BASE_REF: ${{ github.base_ref }} 17 | BASE_SHA: ${{ github.event.pull_request.base.sha }} 18 | BASE_REPO: ${{ github.event.pull_request.base.repo.clone_url }} 19 | 20 | # Branch of PR (maybe of a fork repo) 21 | PR_HEAD_REF: ${{ github.head_ref }} 22 | PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }} 23 | PR_HEAD_REPO: ${{ github.event.pull_request.head.repo.clone_url }} 24 | 25 | # Full ref of base branch like `refs/heads/master`, gh_sha is same as base_sha 26 | GH_REF: "${{ github.ref }}" 27 | GH_SHA: ${{ github.sha }} 28 | 29 | jobs: 30 | lint-fmt: 31 | name: Lint & fmt 32 | runs-on: ubuntu-latest 33 | steps: 34 | - name: 🚚 Get latest code 35 | uses: actions/checkout@v3.3.0 36 | with: 37 | # Checkout base (target) repo 38 | ref: ${{ env.GH_REF }} 39 | 40 | - name: 🥑 Install deps 41 | run: | 42 | npm install 43 | 44 | - name: 🎣 Fetch PR's head 45 | run: | 46 | git fetch origin ${{ env.PR_HEAD_SHA }} 47 | 48 | # Abort if node modules are checked in 49 | - name: 🚨 Check for unsafe changes 50 | run: | 51 | DIFF=$(git diff --name-only --diff-filter=ACMRT \ 52 | ${{ env.BASE_SHA }}...${{ env.PR_HEAD_SHA }}) 53 | 54 | echo "::notice::DIFF(files): ${DIFF//$'\n'/ }" 55 | 56 | if echo $DIFF | grep "node_modules"; then 57 | echo "::error::Unsafe change 'node_modules' found" 58 | exit 1 59 | fi 60 | 61 | - name: 🍌 Checkout PR_HEAD 62 | run: | 63 | git checkout ${{ env.PR_HEAD_SHA }} 64 | 65 | - name: 🔧 Run eslint on changed files only 66 | run: | 67 | DIFF=$(git diff --name-only --diff-filter=ACMRT \ 68 | ${{ env.BASE_SHA }}...${{ env.PR_HEAD_SHA }}) 69 | 70 | npx eslint $DIFF --cache --fix 71 | 72 | - name: 👀 Look for lint changes 73 | id: lint-changes 74 | run: | 75 | echo changes=$( \ 76 | if git diff-index --quiet HEAD --; \ 77 | then echo "false"; \ 78 | else echo "true"; \ 79 | fi) \ 80 | >> $GITHUB_OUTPUT 81 | 82 | - name: 🏗 Commit lint changes 83 | if: steps.lint-changes.outputs.changes == 'true' 84 | run: | 85 | git config --local user.name 'github-actions[bot]' 86 | git config --local user.email 'github-actions[bot]@users.noreply.github.com' 87 | git commit --no-verify -am "Automatic lint & fmt" 88 | 89 | - name: 🚢 Push to origin? 90 | if: ${{ env.BASE_REPO == env.PR_HEAD_REPO }} 91 | run: | 92 | git push origin HEAD:${{ env.PR_HEAD_REF }} 93 | 94 | # `GITHUB_TOKEN` owned by `github-actions[bot]` has write access to 95 | # origin in this `PR-target` workflow but not the write access to fork 96 | # repo. 97 | # A "personal access token" of repository maintainer may be required 98 | # for pushing to forks. 99 | - name: 🚢 Push to fork? 100 | if: ${{ env.BASE_REPO != env.PR_HEAD_REPO }} 101 | run: | 102 | git remote add fork ${{ env.PR_HEAD_REPO }} 103 | git push fork HEAD:${{ env.PR_HEAD_REF }} 104 | -------------------------------------------------------------------------------- /.github/workflows/preview.yaml: -------------------------------------------------------------------------------- 1 | name: Fastly Compute@Edge Branch Previews 2 | concurrency: 3 | group: ${{ github.head_ref || github.run_id }}-${{ github.workflow}} 4 | on: 5 | pull_request: 6 | types: [opened, synchronize, reopened, closed] 7 | jobs: 8 | deploy: 9 | runs-on: ubuntu-latest 10 | defaults: 11 | run: 12 | shell: bash 13 | steps: 14 | - uses: actions/checkout@v3 15 | - uses: fastly/compute-actions/preview@v5 16 | with: 17 | fastly-api-token: ${{ secrets.FASTLY_API_KEY }} 18 | github-token: ${{ secrets.GITHUB_TOKEN }} 19 | -------------------------------------------------------------------------------- /.github/workflows/profiler.yml: -------------------------------------------------------------------------------- 1 | name: 🎯 Profiler 2 | 3 | on: 4 | push: 5 | branches: 6 | - profile 7 | paths: 8 | - 'src/**' 9 | 10 | workflow_dispatch: 11 | inputs: 12 | git-ref: 13 | description: "git tip: branch/ref/tag" 14 | required: false 15 | default: 'main' 16 | # docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onworkflow_dispatchinputs 17 | js-runtime: 18 | description: "proc: deno/node" 19 | required: false 20 | default: 'node' 21 | type: choice 22 | options: 23 | - node 24 | - deno 25 | mode: 26 | description: "p1 (fetch) / p2 (http2) / p3 (udp/tcp)" 27 | required: false 28 | default: 'p1' 29 | type: choice 30 | options: 31 | - p1 32 | - p2 33 | - p3 34 | maxtime: 35 | description: "run time (in seconds)" 36 | required: false 37 | default: '60s' 38 | 39 | env: 40 | GIT_REF: ${{ github.event.inputs.git-ref || github.ref }} 41 | JS_RUNTIME: 'node' 42 | MAXTIME_SEC: '30s' 43 | NODE_VER: '21.x' 44 | DENO_VER: '1.40.x' 45 | MODE: 'p1' 46 | QDOH: 'q' 47 | 48 | jobs: 49 | profiler1: 50 | name: 🕒 Fetch profiler 51 | runs-on: ubuntu-latest 52 | 53 | steps: 54 | - name: 🍌 Checkout 55 | uses: actions/checkout@v3.3.0 56 | with: 57 | ref: ${{ env.GIT_REF }} 58 | fetch-depth: 0 59 | 60 | - name: ⚓️ Set git tip 61 | run: | 62 | echo "GIT_HEAD=$(git rev-parse HEAD)" >> $GITHUB_ENV 63 | echo "JS_RUNTIME=${JSR}" >> $GITHUB_ENV 64 | shell: bash 65 | env: 66 | JSR: ${{ github.event.inputs.js-runtime || env.JS_RUNTIME }} 67 | 68 | # docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-nodejs-or-python 69 | - name: 🐎 Setup Node @v19 70 | if: env.JS_RUNTIME == 'node' 71 | uses: actions/setup-node@v3.6.0 72 | with: 73 | node-version: ${{ env.NODE_VER }} 74 | 75 | - name: 🥑 Node deps 76 | if: env.JS_RUNTIME == 'node' 77 | run: | 78 | # npm ci is faster, but it needs package-lock.json which isn't checked-in 79 | npm i 80 | npm run build --if-present 81 | 82 | # deno.com/blog/deploy-static-files#example-a-statically-generated-site 83 | - name: 🦕 Setup Deno @1.29.3 84 | if: env.JS_RUNTIME == 'deno' 85 | uses: denoland/setup-deno@main 86 | with: 87 | deno-version: ${{ env.DENO_VER }} 88 | 89 | - name: 🥝 Deno deps 90 | if: env.JS_RUNTIME == 'deno' 91 | run: | 92 | deno task prepare 93 | deno cache ./src/server-deno.ts 94 | 95 | # if non-interactive, prefer apt-get: unix.stackexchange.com/a/590703 96 | # github.com/natesales/repo 97 | # docs.github.com/en/actions/using-github-hosted-runners/customizing-github-hosted-runners#installing-software-on-ubuntu-runners 98 | - name: 🌶 Setup Q 99 | run: | 100 | # sudo and echo: stackoverflow.com/a/550808 101 | echo "deb [trusted=yes] https://repo.natesales.net/apt /" | sudo tee /etc/apt/sources.list.d/natesales.list > /dev/null 102 | sudo apt-get update 103 | sudo apt-get install q 104 | 105 | # docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#example-using-a-fallback-value 106 | # timeout exit-code: stackoverflow.com/a/60996259 107 | - name: 🎱 Run profiler 108 | if: success() 109 | run: | 110 | # for now, use runtime-specific timeouts (currently 60s for node and deno) for profiling 111 | # timeout "$MAXTIME_SEC" ./run "$JS_RUNTIME" "$MODE" || ( [[ $? -eq 124 ]] && echo "::notice::Timeout OK" ) 112 | ./run "$JS_RUNTIME" "$MODE" 113 | shell: bash 114 | env: 115 | MAXTIME_SEC: ${{ github.event.inputs.maxtime || env.MAXTIME_SEC }} 116 | MODE: ${{ github.event.inputs.mode || env.MODE }} 117 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. They are provided 2 | # by a third-party and are governed by separate terms of service, privacy 3 | # policy, and support documentation. 4 | 5 | name: supply-chain scorecard 6 | on: 7 | # For Branch-Protection check. Only the default branch is supported. See 8 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection 9 | branch_protection_rule: 10 | # To guarantee Maintained check is occasionally updated. See 11 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained 12 | schedule: 13 | - cron: '53 21 * * 2' 14 | push: 15 | branches: [ "main" ] 16 | 17 | # Declare default permissions as read only. 18 | permissions: read-all 19 | 20 | jobs: 21 | analysis: 22 | name: Scorecard analysis 23 | runs-on: ubuntu-latest 24 | permissions: 25 | # Needed to upload the results to code-scanning dashboard. 26 | security-events: write 27 | # Needed to publish results and get a badge (see publish_results below). 28 | id-token: write 29 | # Uncomment the permissions below if installing in a private repository. 30 | # contents: read 31 | # actions: read 32 | 33 | steps: 34 | - name: "Checkout code" 35 | uses: actions/checkout@v4 36 | with: 37 | persist-credentials: false 38 | 39 | - name: "Run analysis" 40 | uses: ossf/scorecard-action@v2.3.1 41 | with: 42 | results_file: results.sarif 43 | results_format: sarif 44 | # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: 45 | # - you want to enable the Branch-Protection check on a *public* repository, or 46 | # - you are installing Scorecard on a *private* repository 47 | # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. 48 | # repo_token: ${{ secrets.SCORECARD_TOKEN }} 49 | 50 | # Public repositories: 51 | # - Publish results to OpenSSF REST API for easy access by consumers 52 | # - Allows the repository to include the Scorecard badge. 53 | # - See https://github.com/ossf/scorecard-action#publishing-results. 54 | # For private repositories: 55 | # - `publish_results` will always be set to `false`, regardless 56 | # of the value entered here. 57 | publish_results: true 58 | 59 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF 60 | # format to the repository Actions tab. 61 | - name: "Upload artifact" 62 | uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 63 | with: 64 | name: SARIF file 65 | path: results.sarif 66 | retention-days: 21 67 | 68 | # Upload the results to GitHub's code scanning dashboard. 69 | - name: "Upload to code-scanning" 70 | uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 71 | with: 72 | sarif_file: results.sarif 73 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignoring directories 2 | dist/ 3 | node_modules/ 4 | worker/ 5 | test/data/cache/ 6 | blocklists__/ 7 | dbip__/ 8 | src/basicconfig.json 9 | 10 | # lock files 11 | /package-lock.json 12 | /deno.lock 13 | 14 | .clinic/ 15 | **basicconfig.json 16 | **filetag.json 17 | *.log 18 | *.swp 19 | *.bak 20 | 21 | # Local enviroment variables 22 | .env 23 | 24 | # Cache 25 | .eslintcache 26 | bin/main.wasm 27 | pkg/serverless-dns.tar.gz 28 | -------------------------------------------------------------------------------- /.husky/.gitignore: -------------------------------------------------------------------------------- 1 | _ 2 | -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . "$(dirname "$0")/_/husky.sh" 3 | 4 | npx lint-staged 5 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | dist/ 3 | node_modules/ 4 | worker/ 5 | test/data/cache 6 | 7 | *.md 8 | *.css 9 | *.html 10 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 80, 3 | "tabWidth": 2, 4 | "semi": true, 5 | "singleQuote": false, 6 | "quoteProps": "consistent", 7 | "trailingComma": "es5" 8 | } 9 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | // See https://go.microsoft.com/fwlink/?LinkId=827846 to learn about workspace recommendations. 3 | // Extension identifier format: ${publisher}.${name}. Example: vscode.csharp 4 | // List of extensions which should be recommended for users of this workspace. 5 | "recommendations": [ 6 | "denoland.vscode-deno", 7 | "esbenp.prettier-vscode", 8 | "dbaeumer.vscode-eslint" 9 | ], 10 | // List of extensions recommended by VS Code that should not be recommended for users of this workspace. 11 | "unwantedRecommendations": [] 12 | } -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // For VS Code debugger 3 | // Use IntelliSense to learn about possible attributes. 4 | // Hover to view descriptions of existing attributes. 5 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 6 | "version": "0.2.0", 7 | "configurations": [ 8 | { 9 | "name": "Deno", 10 | "type": "pwa-node", 11 | "request": "launch", 12 | "cwd": "${workspaceFolder}", 13 | "runtimeExecutable": "deno", 14 | "runtimeArgs": [ 15 | "run", 16 | "--inspect", 17 | "--unstable", 18 | "--import-map", 19 | "import_map.json", 20 | "--allow-all", 21 | "src/server-deno.ts" 22 | ], 23 | // "outputCapture": "std", 24 | "attachSimplePort": 9229 25 | }, 26 | { 27 | "type": "pwa-node", 28 | "request": "launch", 29 | "name": "Launch Node Server", 30 | "skipFiles": [ 31 | "/**" 32 | ], 33 | "program": "${workspaceFolder}/src/server-node.js" 34 | } 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | // Uncomment deno specific settings while developing deno for intellisense, 3 | // type definitions, etc. 4 | 5 | // "deno.enable": true, 6 | // "deno.lint": true, 7 | // "deno.unstable": false 8 | "deno.importMap": "./import_map.json", 9 | "cSpell.words": [ 10 | "Deno", 11 | "rethinkdns" 12 | ], 13 | "cSpell.enableFiletypes": [ 14 | "env" 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /bun.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM oven/bun AS setup 2 | WORKDIR /bun-dir 3 | COPY . . 4 | RUN bun build ./src/server-node.js --target node --outdir ./dist --entry-naming bun.mjs --format esm 5 | RUN export BLOCKLIST_DOWNLOAD_ONLY=true && node ./dist/bun.mjs 6 | 7 | FROM oven/bun AS runner 8 | # env vals persist even at run-time: archive.is/QpXp2 9 | # and overrides fly.toml env values 10 | # get working dir in order 11 | WORKDIR /app 12 | COPY --from=setup /bun-dir/dist ./ 13 | COPY --from=setup /bun-dir/blocklists__ ./blocklists__ 14 | COPY --from=setup /bun-dir/dbip__ ./dbip__ 15 | # print files in work dir, must contain blocklists 16 | RUN ls -Fla 17 | # run with the default entrypoint (usually, bash or sh) 18 | CMD ["bun", "run", "./bun.mjs"] 19 | -------------------------------------------------------------------------------- /deno.Dockerfile: -------------------------------------------------------------------------------- 1 | # Based on github.com/denoland/deno_docker/blob/main/alpine.dockerfile 2 | 3 | ARG DENO_VERSION=1.44.4 4 | ARG BIN_IMAGE=denoland/deno:bin-${DENO_VERSION} 5 | 6 | FROM ${BIN_IMAGE} AS bin 7 | 8 | FROM frolvlad/alpine-glibc:alpine-3.13 9 | 10 | RUN apk --no-cache add ca-certificates 11 | 12 | RUN addgroup --gid 1000 deno \ 13 | && adduser --uid 1000 --disabled-password deno --ingroup deno \ 14 | && mkdir /deno-dir/ \ 15 | && chown deno:deno /deno-dir/ 16 | 17 | ENV DENO_DIR /deno-dir/ 18 | ENV DENO_INSTALL_ROOT /usr/local 19 | 20 | ARG DENO_VERSION 21 | ENV DENO_VERSION=${DENO_VERSION} 22 | COPY --from=bin /deno /bin/deno 23 | 24 | WORKDIR /deno-dir 25 | COPY . . 26 | 27 | # runs pre-build step which fetchs the latest basicconfig 28 | RUN src/build/pre.sh 29 | RUN ls -Fla 30 | 31 | ENTRYPOINT ["/bin/deno"] 32 | 33 | # Unstable API for 'Deno.listenTls#alpn_protocols' 34 | # This is only used while building, on fly.io 35 | CMD [ 36 | "run", 37 | "--unstable", 38 | "--allow-net", 39 | "--allow-env", 40 | "--allow-read", 41 | "src/server-deno.ts" 42 | ] 43 | 44 | # Run port process as a root privilege user. For say port 53 45 | # USER root 46 | -------------------------------------------------------------------------------- /deno.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowJs": true 4 | }, 5 | "importMap": "import_map.json", 6 | "nodeModulesDir": true, 7 | "lint": { 8 | "files": { 9 | "exclude": ["**/**"] 10 | } 11 | }, 12 | "fmt": { 13 | "files": { 14 | "exclude": ["**/**"] 15 | } 16 | }, 17 | "tasks": { 18 | "prepare": "./src/build/pre.sh" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /fastly.toml: -------------------------------------------------------------------------------- 1 | # This file describes a Fastly Compute@Edge package. To learn more visit: 2 | # https://developer.fastly.com/reference/fastly-toml/ 3 | 4 | authors = ["pdelolmo@fastly.com", "kailan@enviark.com"] 5 | description = "Weather dashboard at the edge" 6 | language = "rust" 7 | manifest_version = 3 8 | name = "weather-release" 9 | service_id = "" 10 | 11 | [setup] 12 | 13 | [setup.backends] 14 | 15 | [setup.backends."api.openweathermap.org"] 16 | address = "api.openweathermap.org" 17 | description = "OpenWeatherMap API Server" 18 | port = 443 19 | 20 | [setup.config_stores] 21 | 22 | [setup.config_stores.weather_auth] 23 | 24 | [setup.config_stores.weather_auth.items] 25 | 26 | [setup.config_stores.weather_auth.items.key] 27 | description = "API token for openweathermap.org" 28 | -------------------------------------------------------------------------------- /fly.tls.toml: -------------------------------------------------------------------------------- 1 | app = "" 2 | 3 | kill_signal = "SIGINT" 4 | kill_timeout = "15s" 5 | swap_size_mb = 152 6 | 7 | [build] 8 | dockerfile = "node.Dockerfile" 9 | 10 | [env] 11 | # offload TLS on to fly-proxy; 12 | # tls-tls_options for [[services]] must be set 13 | TLS_OFFLOAD = "true" 14 | CLOUD_PLATFORM = "fly" 15 | BUN_ENV = "production" 16 | DENO_ENV = "production" 17 | NODE_ENV = "production" 18 | LOG_LEVEL = "info" 19 | 20 | [experimental] 21 | auto_rollback = true 22 | 23 | # DNS over HTTPS (well, h2c and http1.1) 24 | [[services]] 25 | internal_port = 8055 26 | protocol = "tcp" 27 | auto_stop_machines = true 28 | auto_start_machines = true 29 | 30 | [services.concurrency] 31 | hard_limit = 775 32 | soft_limit = 700 33 | type = "connections" 34 | 35 | [[services.ports]] 36 | handlers = ["tls"] 37 | tls_options = { alpn = ["h2", "http/1.1"] } 38 | port = 443 39 | 40 | [[services.ports]] 41 | handlers = ["tls"] 42 | tls_options = { alpn = ["h2", "http/1.1"] } 43 | port = 8055 44 | 45 | [[services.tcp_checks]] 46 | # super aggressive interval and timeout because 47 | # health-check routing is handled by fly-proxy 48 | # and it is cross-region 49 | # community.fly.io/t/12997 50 | interval = "5s" 51 | timeout = "1s" 52 | grace_period = "15s" 53 | # restart_limit is unused in appsv2 54 | restart_limit = 0 55 | 56 | # DNS over TCP/TLS 57 | [[services]] 58 | internal_port = 10555 59 | protocol = "tcp" 60 | auto_stop_machines = true 61 | auto_start_machines = true 62 | 63 | [services.concurrency] 64 | hard_limit = 775 65 | soft_limit = 700 66 | type = "connections" 67 | 68 | [[services.ports]] 69 | # TODO: ProxyProto v2 70 | handlers = ["tls"] 71 | port = 853 72 | 73 | [[services.ports]] 74 | # TODO: ProxyProto v2 75 | handlers = ["tls"] 76 | port = 10555 77 | 78 | [[services.tcp_checks]] 79 | # super aggressive interval and timeout because 80 | # health-check routing is handled by fly-proxy 81 | # and it is cross-region 82 | # community.fly.io/t/12997 83 | interval = "5s" 84 | timeout = "1s" 85 | grace_period = "15s" 86 | # restart_limit is unused in appsv2 87 | restart_limit = 0 88 | 89 | # community.fly.io/t/5490/3 90 | [checks] 91 | [checks.up] 92 | # grace_period affects time taken for rolling deploys 93 | grace_period = "15s" 94 | interval = "15s" 95 | method = "get" 96 | path = "/check" 97 | port = 8888 98 | timeout = "2s" 99 | type = "http" 100 | 101 | -------------------------------------------------------------------------------- /fly.toml: -------------------------------------------------------------------------------- 1 | app = "" 2 | 3 | kill_signal = "SIGINT" 4 | kill_timeout = "15s" 5 | swap_size_mb = 152 6 | 7 | [build] 8 | dockerfile = "node.Dockerfile" 9 | 10 | [experimental] 11 | auto_rollback = true 12 | 13 | [env] 14 | CLOUD_PLATFORM = "fly" 15 | BUN_ENV = "production" 16 | DENO_ENV = "production" 17 | NODE_ENV = "production" 18 | LOG_LEVEL = "info" 19 | 20 | # DNS over HTTPS 21 | [[services]] 22 | protocol = "tcp" 23 | internal_port = 8080 24 | auto_stop_machines = true 25 | auto_start_machines = true 26 | 27 | [[services.ports]] 28 | # TODO: ProxyProto v2 29 | port = 443 30 | [[services.ports]] 31 | port = 8080 32 | 33 | [services.concurrency] 34 | type = "connections" 35 | hard_limit = 775 36 | soft_limit = 600 37 | 38 | [[services.tcp_checks]] 39 | # super aggressive interval and timeout because 40 | # health-check routing is handled by fly-proxy 41 | # and it is cross-region 42 | # community.fly.io/t/12997 43 | interval = "5s" 44 | timeout = "1s" 45 | grace_period = "15s" 46 | # restart_limit not used on appsv2 47 | restart_limit = 0 48 | 49 | # DNS over TLS 50 | [[services]] 51 | protocol = "tcp" 52 | internal_port = 10000 53 | auto_stop_machines = true 54 | auto_start_machines = true 55 | 56 | [[services.ports]] 57 | # TODO: ProxyProto v2 58 | port = 853 59 | [[services.ports]] 60 | port = 10000 61 | 62 | [services.concurrency] 63 | type = "connections" 64 | hard_limit = 775 65 | soft_limit = 600 66 | 67 | [[services.tcp_checks]] 68 | # super aggressive interval and timeout because 69 | # health-check routing is handled by fly-proxy 70 | # and it is cross-region 71 | # community.fly.io/t/12997 72 | interval = "5s" 73 | timeout = "1s" 74 | grace_period = "15s" 75 | # restart_limit is unused in appsv2 76 | restart_limit = 0 77 | 78 | # community.fly.io/t/5490/3 79 | [checks] 80 | [checks.up] 81 | # grace_period affects time taken for rolling deploys 82 | grace_period = "15s" 83 | interval = "15s" 84 | method = "get" 85 | path = "/check" 86 | port = 8888 87 | timeout = "2s" 88 | type = "http" 89 | -------------------------------------------------------------------------------- /import_map.json: -------------------------------------------------------------------------------- 1 | { 2 | "imports": { 3 | "buffer": "https://deno.land/std@0.177.0/node/buffer.ts", 4 | "node:buffer": "https://deno.land/std@0.177.0/node/buffer.ts", 5 | "os" : "https://deno.land/std@0.177.0/node/os.ts", 6 | "process": "https://deno.land/std@0.177.0/node/process.ts", 7 | "@serverless-dns/dns-parser": "https://github.com/serverless-dns/dns-parser/raw/v2.1.2/index.js", 8 | "@serverless-dns/lfu-cache": "https://github.com/serverless-dns/lfu-cache/raw/v3.4.1/lfu.js", 9 | "@serverless-dns/trie/": "https://github.com/serverless-dns/trie/raw/v0.0.13/src/", 10 | "@riaskov/mmap-io": "https://github.com/ARyaskov/mmap-io/raw/v1.4.3/src" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /node.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:22 as setup 2 | # git is required if any of the npm packages are git[hub] packages 3 | RUN apt-get update && apt-get install git -yq --no-install-suggests --no-install-recommends 4 | WORKDIR /app 5 | COPY . . 6 | # get deps, build, bundle 7 | RUN npm i 8 | # webpack externalizes native modules (@riaskov/mmap-io) 9 | RUN npm run build:fly 10 | # or RUN npx webpack --config webpack.fly.cjs 11 | # download blocklists and bake them in the img 12 | RUN export BLOCKLIST_DOWNLOAD_ONLY=true && node ./dist/fly.mjs 13 | # or RUN export BLOCKLIST_DOWNLOAD_ONLY=true && node ./src/server-node.js 14 | 15 | # stage 2 16 | # pin to node22 for native deps (@ariaskov/mmap-io) 17 | FROM node:22-alpine AS runner 18 | 19 | # env vals persist even at run-time: archive.is/QpXp2 20 | # and overrides fly.toml env values 21 | ENV NODE_ENV production 22 | ENV NODE_OPTIONS="--max-old-space-size=320 --heapsnapshot-signal=SIGUSR2" 23 | # get working dir in order 24 | WORKDIR /app 25 | # external deps not bundled by webpack 26 | RUN npm i @riaskov/mmap-io@v1.4.3 27 | 28 | COPY --from=setup /app/dist ./ 29 | COPY --from=setup /app/blocklists__ ./blocklists__ 30 | COPY --from=setup /app/dbip__ ./dbip__ 31 | 32 | # print files in work dir, must contain blocklists 33 | RUN ls -Fla 34 | # run with the default entrypoint (usually, bash or sh) 35 | CMD ["node", "./fly.mjs"] 36 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "private": true, 3 | "name": "serverless-dns", 4 | "version": "2.0.0", 5 | "license": "MPL-2.0", 6 | "description": "Rethink Free Dns with Blocklist, one click install from github to cloudflare", 7 | "main": "./src/server-workers.js", 8 | "type": "module", 9 | "scripts": { 10 | "clean": "npm run clean:wrangler", 11 | "clean:node": "rm -rf node_modules/ package-lock.json", 12 | "clean:wrangler": "rm -rf worker/ dist/", 13 | "test": "echo \"Error: no test specified\" && exit 1", 14 | "prepare": "./src/build/pre.sh", 15 | "build": "npx webpack --config webpack.config.cjs", 16 | "build:fastly": "npx webpack --config webpack.fastly.cjs && npm run fastly:wasm", 17 | "build:fly": "npx webpack --config webpack.fly.cjs", 18 | "fastly:wasm": "npx js-compute-runtime ./dist/fastly.js ./dist/fastly.wasm" 19 | }, 20 | "repository": { 21 | "type": "git", 22 | "url": "git+https://github.com/serverless-dns/serverless-dns.git" 23 | }, 24 | "author": "", 25 | "bugs": { 26 | "url": "https://github.com/serverless-dns/serverless-dns/issues" 27 | }, 28 | "homepage": "https://github.com/serverless-dns/serverless-dns#readme", 29 | "engines": { 30 | "node": ">=16" 31 | }, 32 | "dependencies": { 33 | "@serverless-dns/dns-parser": "github:serverless-dns/dns-parser#v2.1.2", 34 | "@serverless-dns/lfu-cache": "github:serverless-dns/lfu-cache#v3.5.2", 35 | "@serverless-dns/trie": "github:serverless-dns/trie#v0.0.17", 36 | "httpx-server": "^1.4.4", 37 | "@riaskov/mmap-io": "^1.4.3", 38 | "node-polyfill-webpack-plugin": "^2.0.1", 39 | "proxy-protocol-js": "^4.0.5" 40 | }, 41 | "optionalDependencies": { 42 | "@fastly/js-compute": "^1.0.1" 43 | }, 44 | "devDependencies": { 45 | "@types/node": "^16.11.7", 46 | "buffer": "^6.0.3", 47 | "clinic": "^11.1.0", 48 | "eslint": "^8.5.0", 49 | "eslint-config-google": "^0.14.0", 50 | "eslint-plugin-prettier": "^4.0.0", 51 | "husky": "^7.0.4", 52 | "lint-staged": "^12.1.4", 53 | "node-loader": "^2.0.0", 54 | "prettier": "2.5.1", 55 | "webpack": "^5.92.1", 56 | "webpack-cli": "^4.10.0", 57 | "wrangler": "^3.0.0" 58 | }, 59 | "lint-staged": { 60 | "*.?(m|c)js": "eslint --cache --fix", 61 | "*.ts": "prettier --write" 62 | }, 63 | "eslintIgnore": [ 64 | "src/core/cfg.js" 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -eu 4 | 5 | # this script is for development/test runs only 6 | # defaults: stackoverflow.com/a/16753536 7 | jobfile="${1:-/tmp/sdns-profiler-job-pids}" 8 | runtime="${1:-node}"; 9 | profiler="${2:-$1}"; 10 | waitsec="${3:-0}"; 11 | qdoh="${QDOH:-invalid}"; 12 | prestart="" 13 | proc0="" 14 | 15 | echo "run $runtime"; 16 | 17 | # patorjk.com/software/taag/#p=display&c=echo&f=Small%20Slant&t=r-profiler 18 | profilerbanner() { 19 | echo " ___ _ __ "; 20 | echo " ____ ____ ___ ____ ___ / _/(_)/ /___ ____"; 21 | echo " / __//___// _ \ / __// _ \ / _// // // -_)/ __/"; 22 | echo "/_/ / .__//_/ \___//_/ /_//_/ \__//_/ "; 23 | echo " /_/ "; 24 | } 25 | 26 | # patorjk.com/software/taag/#p=display&c=echo&f=Small%20Slant&t=rethinkdns 27 | banner() { 28 | echo " __ __ _ __ __ "; 29 | echo " ____ ___ / /_ / / (_)___ / /__ ___/ /___ ___"; 30 | echo " / __// -_)/ __// _ \ / // _ \ / '_// _ // _ \ (_-<"; 31 | echo "/_/ \__/ \__//_//_//_//_//_//_/\_\ \_,_//_//_//___/"; 32 | echo " "; 33 | } 34 | 35 | cleanup() { 36 | # alt? unix.stackexchange.com/a/146770 37 | # echo "sig... $proc0" 38 | # kill -INT $proc0 39 | # stackoverflow.com/a/51576504 40 | jobs -p > jobfile 41 | j=$(cat jobfile) 42 | echo "kill... $j" 43 | kill -INT $j || true 44 | # does not work... jobs -p | xargs -r kill -9 45 | rm jobfile 46 | } 47 | # davidpashley.com/articles/writing-robust-shell-scripts 48 | trap cleanup INT TERM EXIT 49 | 50 | bgcmd() { 51 | $1 & 52 | } 53 | 54 | bgsilent() { 55 | $1 >/dev/null 2>&1 & 56 | } 57 | 58 | dohquery() { 59 | domain=$1 60 | resolver="https://localhost:8080/" 61 | pstart="${qdoh} -i -t A -s ${resolver} -q ${domain}" 62 | bgsilent "$pstart" 63 | } 64 | 65 | reqs() { 66 | while true 67 | do 68 | 69 | prefix=$( dd if=/dev/urandom bs=20 count=20 status=none | tr -dc 'A-Z0-9' | xargs echo ) 70 | domain="${prefix}.dnsleaktest.com" 71 | dohquery ${domain} 72 | 73 | if [ $1 != "0" ]; then 74 | sleep $1 75 | fi 76 | 77 | done 78 | } 79 | 80 | greqs() { 81 | while true 82 | do 83 | dohquery google.co.pk 84 | dohquery google.co.in 85 | dohquery google.com 86 | dohquery google.co.uk 87 | 88 | if [ $1 != "0" ]; then 89 | sleep $1 90 | fi 91 | 92 | done 93 | } 94 | 95 | if [ $runtime = "help" ] || [ $runtime = "h" ]; then 96 | echo "note: make sure node / deno / wrangler are in path"; 97 | echo "usage: $0 [node|deno|workers] [[p1|p2] [waitsec]]"; 98 | exit 0; 99 | fi 100 | 101 | if [ $runtime = "deno" ] || [ $runtime = "d" ]; then 102 | echo "note: deno v1.17+ required"; 103 | echo "using `which deno`"; 104 | start="deno run --unstable \ 105 | --allow-env \ 106 | --allow-net \ 107 | --allow-read \ 108 | --allow-write \ 109 | src/server-deno.ts"; 110 | elif [ $runtime = "workers" ] || [ $runtime = "w" ]; then 111 | echo "note: wrangler v1.16+ required"; 112 | echo "using `which wrangler`"; 113 | start="npx wrangler dev --local"; 114 | elif [ $runtime = "fastly" ] || [ $runtime = "f" ]; then 115 | echo "note: Fastly CLI required"; 116 | echo "using `which fastly`"; 117 | # developer.fastly.com/learning/compute/testing/#starting-the-server 118 | start="fastly compute serve --skip-verification --verbose --file ./dist/fastly.wasm"; 119 | elif [ $runtime = "fly" ] || [ $runtime = "ff" ]; then 120 | prestart="npm run build:fly" 121 | export NODE_OPTIONS="--trace-warnings --max-old-space-size=320 --heapsnapshot-signal=SIGUSR2 --heapsnapshot-near-heap-limit=2" 122 | start="node ./dist/fly.mjs" 123 | else 124 | echo "note: nodejs v19+ required"; 125 | echo "using `which node`"; 126 | # verbose: NODE_DEBUG=http2,http,tls,net... ref: stackoverflow.com/a/46858827 127 | export NODE_OPTIONS="--trace-warnings --max-old-space-size=320 --heapsnapshot-signal=SIGUSR2 --heapsnapshot-near-heap-limit=2" 128 | start="node ./src/server-node.js"; 129 | fi 130 | 131 | # prestart ceremony, if any 132 | if [ ! -z "$prestart" ]; then 133 | $prestart 134 | fi 135 | 136 | # prevent stdin from early close on signals? 137 | # unix.stackexchange.com/a/672061 138 | 139 | if [ $profiler = "cpu" ]; then 140 | if [ $runtime != "node" ] && [ $runtime != "n" ]; then 141 | echo "profiler (cpu): only on node" 142 | exit 1 143 | fi 144 | 145 | echo "profiler (cpu): running clinicjs flame"; 146 | echo "profiler (cpu): press ctrl+c to open cpu flame graphs in a browser"; 147 | profilerbanner; 148 | npx clinic flame -- $start 149 | proc0=$! 150 | elif [ $profiler = "mem" ]; then 151 | if [ $runtime != "node" ] && [ $runtime != "n" ]; then 152 | echo "profiler (mem): only on node" 153 | exit 1 154 | fi 155 | 156 | echo "profiler (mem): running clinicjs heapprofile" 157 | echo "profiler (mem): press ctrl+c to open memory flame graphs in a browser" 158 | profilerbanner; 159 | npx clinic heapprofiler -- $start 160 | proc0=$! 161 | elif [ $profiler = "fn" ]; then 162 | if [ $runtime != "node" ] && [ $runtime != "n" ]; then 163 | echo "profiler (fn): only on node" 164 | exit 1 165 | fi 166 | 167 | echo "profiler (fn): running clinicjs bubbleprof" 168 | echo "profiler (fn): press ctrl+c to open func bubble graphs in a browser" 169 | profilerbanner; 170 | npx clinic bubbleprof -- $start 171 | proc0=$! 172 | elif [ $profiler = "profile1" ] || [ $profiler = "p1" ]; then 173 | if [ $qdoh = "invalid" ]; then 174 | echo "Specify env QDOH path" 175 | exit 1 176 | fi 177 | 178 | echo "profiler: running doh with fetch"; 179 | profilerbanner; 180 | DISABLE_BLOCKLISTS=true \ 181 | PROFILE_DNS_RESOLVES=true \ 182 | NODE_DOH_ONLY=true \ 183 | NODE_AVOID_FETCH=false \ 184 | LOG_LEVEL=warn \ 185 | $start & 186 | proc0=$! 187 | sleep 1 188 | reqs "$waitsec" & 189 | elif [ $profiler = "profile2" ] || [ $profiler = "p2" ]; then 190 | if [ ${qdoh} = "invalid" ]; then 191 | echo "Specify env QDOH path" 192 | exit 1 193 | fi 194 | 195 | echo "profiler: running non-fetch doh"; 196 | profilerbanner; 197 | DISABLE_BLOCKLISTS=true \ 198 | PROFILE_DNS_RESOLVES=true \ 199 | NODE_DOH_ONLY=true \ 200 | NODE_AVOID_FETCH=true \ 201 | LOG_LEVEL=warn \ 202 | $start & 203 | proc0=$! 204 | sleep 1 205 | reqs "$waitsec" & 206 | elif [ $profiler = "profile3" ] || [ $profiler = "p3" ]; then 207 | if [ ${qdoh} = "invalid" ]; then 208 | echo "Specify env QDOH path" 209 | exit 1 210 | fi 211 | if [ $runtime != "node" ] && [ $runtime != "n" ]; then 212 | echo "Profile3 only valid on Node" 213 | exit 1 214 | fi 215 | 216 | echo "profiler: running podns"; 217 | profilerbanner; 218 | DISABLE_BLOCKLISTS=true \ 219 | PROFILE_DNS_RESOLVES=true \ 220 | NODE_DOH_ONLY=false \ 221 | LOG_LEVEL=warn \ 222 | $start & 223 | proc0=$! 224 | sleep 1 225 | reqs "$waitsec" & 226 | else 227 | banner; 228 | bgcmd "$start" 229 | proc0=$! 230 | fi 231 | 232 | # list all bg jobs 233 | jobs -l 234 | # wait for the main job to finish 235 | wait $proc0 236 | -------------------------------------------------------------------------------- /src/build/pre.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | wk="$1" 4 | mm="$2" 5 | yyyy="$3" 6 | 7 | # stackoverflow.com/a/24753942 8 | hasfwslash() { 9 | case "$1" in 10 | */*) echo yes ;; 11 | * ) echo no ;; 12 | esac 13 | } 14 | 15 | burl="https://cfstore.rethinkdns.com/blocklists" 16 | dir="bc" 17 | codec="u6" 18 | f="basicconfig.json" 19 | f2="filetag.json" 20 | cwd=$(pwd) 21 | # exec this script from npm or project root 22 | out="./src/${codec}-${f}" 23 | out2="./src/${codec}-${f2}" 24 | name=$(uname) 25 | 26 | # timestamp: 1667519318.799 stackoverflow.com/a/69400542 27 | # nowms =`date -u +"%s.%3N"` 28 | if [ "$name" = "Darwin" ] 29 | then 30 | now=$(date -u +"%s") 31 | else 32 | now=$(date --utc +"%s") 33 | fi 34 | 35 | 36 | # date from timestamp: stackoverflow.com/a/16311821 37 | if [ "$name" = "Darwin" ] 38 | then 39 | day=$(date -r "$now" "+%d") 40 | else 41 | day=$(date -d "@$now" "+%d") 42 | fi 43 | # ex: conv 08 => 8 stackoverflow.com/a/12821845 44 | day=${day#0} 45 | # week; ceil: stackoverflow.com/a/12536521 46 | wkdef=$(((day + 7 -1) / 7)) 47 | # year 48 | if [ "$name" = "Darwin" ] 49 | then 50 | yyyydef=$(date -r "$now" "+%Y") 51 | else 52 | yyyydef=$(date -d "@$now" "+%Y") 53 | fi 54 | # month 55 | if [ "$name" = "Darwin" ] 56 | then 57 | mmdef=$(date -r "$now" "+%m") 58 | else 59 | mmdef=$(date -d "@$now" "+%m") 60 | fi 61 | mmdef=${mmdef#0} 62 | 63 | # defaults: stackoverflow.com/a/28085062 64 | : "${wk:=$wkdef}" "${mm:=$mmdef}" "${yyyy:=$yyyydef}" 65 | 66 | # wget opts: superuser.com/a/689340 67 | wgetopts="--tries=3 --retry-on-http-error=404 --waitretry=3 --no-dns-cache" 68 | 69 | # stackoverflow.com/a/1445507 70 | max=4 71 | # 0..4 (5 loops) 72 | for i in $(seq 0 $max) 73 | do 74 | echo "x=== pre.sh: $i try $yyyy/$mm-$wk at $now from $cwd" 75 | 76 | # TODO: check if the timestamp within the json file is more recent 77 | # file/symlink exists? stackoverflow.com/a/44679975 78 | if [ -f "${out}" ] || [ -L "${out}" ]; then 79 | echo "=x== pre.sh: no op" 80 | exit 0 81 | else 82 | wget $wgetopts -q "${burl}/${yyyy}/${dir}/${mm}-${wk}/${codec}/${f}" -O "${out}" 83 | wcode=$? 84 | 85 | if [ $wcode -eq 0 ]; then 86 | # baretimestamp=$(cut -d"," -f9 "$out" | cut -d":" -f2 | grep -o -E '[0-9]+' | tail -n1) 87 | fulltimestamp=$(cut -d"," -f9 "$out" | cut -d":" -f2 | tr -dc '0-9/') 88 | if [ "$(hasfwslash "$fulltimestamp")" = "no" ]; then 89 | echo "==x= pre.sh: $i filetag at f8" 90 | fulltimestamp=$(cut -d"," -f8 "$out" | cut -d":" -f2 | tr -dc '0-9/') 91 | fi 92 | echo "==x= pre.sh: $i ok $wcode; filetag? ${fulltimestamp}" 93 | wget $wgetopts -q "${burl}/${fulltimestamp}/${codec}/${f2}" -O "${out2}" 94 | wcode2=$? 95 | if [ $wcode2 -eq 0 ]; then 96 | echo "===x pre.sh: $i filetag ok $wcode2" 97 | exit 0 98 | else 99 | echo "===x pre.sh: $i not ok $wcode2" 100 | exit 1 101 | rm ${out} 102 | rm ${out2} 103 | fi 104 | else 105 | # wget creates blank files on errs 106 | rm ${out} 107 | echo "==x= pre.sh: $i not ok $wcode" 108 | fi 109 | fi 110 | 111 | # see if the prev wk was latest 112 | wk=$((wk - 1)) 113 | if [ $wk -eq 0 ]; then 114 | # only feb has 28 days (28/7 => 4), edge-case overcome by retries 115 | wk="5" 116 | # prev month 117 | mm=$((mm - 1)) 118 | fi 119 | if [ $mm -eq 0 ]; then 120 | mm="12" 121 | # prev year 122 | yyyy=$((yyyy - 1)) 123 | fi 124 | done 125 | 126 | exit 1 127 | -------------------------------------------------------------------------------- /src/commons/b32.js: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | // Copyright (c) 2016-2021 Linus Unnebäck 3 | // from github.com/LinusU/base32-encode/blob/b970e2ee5/index.js 4 | // and github.com/LinusU/base32-decode/blob/fa61c01b/index.js 5 | // and github.com/LinusU/to-data-view/blob/e80ca034/index.js 6 | const ALPHA32 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; 7 | // map chars to corresponding indices, 8 | // ex: A => 0, B => 1, ... Z => 25, 2 => 26, 3 => 27, ... 7 => 31 9 | const RALPHA32 = ALPHA32.split("").reduce((o, c, i) => { 10 | o[c] = i; 11 | return o; 12 | }, {}); 13 | 14 | function toDataView(data) { 15 | if ( 16 | data instanceof Int8Array || 17 | data instanceof Uint8Array || 18 | data instanceof Uint8ClampedArray 19 | ) { 20 | return new DataView(data.buffer, data.byteOffset, data.byteLength); 21 | } 22 | 23 | if (data instanceof ArrayBuffer) { 24 | return new DataView(data); 25 | } 26 | 27 | return null; 28 | } 29 | 30 | function readChar(chr) { 31 | chr = chr.toUpperCase(); 32 | const idx = RALPHA32[chr]; 33 | 34 | if (idx == null) { 35 | throw new Error("invalid b32 character: " + chr); 36 | } 37 | 38 | return idx; 39 | } 40 | 41 | function base32(arrbuf, padding) { 42 | const view = toDataView(arrbuf); 43 | if (!view) throw new Error("cannot create data-view from given input"); 44 | 45 | let bits = 0; 46 | let value = 0; 47 | let output = ""; 48 | 49 | for (let i = 0; i < view.byteLength; i++) { 50 | value = (value << 8) | view.getUint8(i); 51 | bits += 8; 52 | 53 | while (bits >= 5) { 54 | output += ALPHA32[(value >>> (bits - 5)) & 31]; 55 | bits -= 5; 56 | } 57 | } 58 | 59 | if (bits > 0) { 60 | output += ALPHA32[(value << (5 - bits)) & 31]; 61 | } 62 | 63 | if (padding) { 64 | while (output.length % 8 !== 0) { 65 | output += "="; 66 | } 67 | } 68 | 69 | return output; 70 | } 71 | 72 | export function rbase32(input) { 73 | input = input.replace(/=+$/, ""); 74 | 75 | const length = input.length; 76 | 77 | let bits = 0; 78 | let value = 0; 79 | 80 | let index = 0; 81 | const output = new Uint8Array(((length * 5) / 8) | 0); 82 | 83 | for (let i = 0; i < length; i++) { 84 | value = (value << 5) | readChar(input[i]); 85 | bits += 5; 86 | 87 | if (bits >= 8) { 88 | output[index++] = (value >>> (bits - 8)) & 255; 89 | bits -= 8; 90 | } 91 | } 92 | return output; 93 | } 94 | -------------------------------------------------------------------------------- /src/commons/bufutil.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import { Buffer } from "node:buffer"; 9 | import * as util from "./util.js"; 10 | 11 | export const ZERO = new Uint8Array(); 12 | const ZEROSTR = ""; 13 | export const ZEROAB = new ArrayBuffer(); 14 | const encoder = new TextEncoder(); 15 | const decoder = new TextDecoder(); 16 | 17 | export function fromStr(s) { 18 | if (util.emptyString(s)) return ZERO; 19 | return encoder.encode(s); 20 | } 21 | 22 | export function toStr(b) { 23 | if (emptyBuf(b)) return ZEROSTR; 24 | return decoder.decode(b); 25 | } 26 | 27 | export function fromB64(b64std) { 28 | if (util.emptyString(b64std)) return ZERO; 29 | return Buffer.from(b64std, "base64"); 30 | } 31 | 32 | export function toB64(buf) { 33 | if (emptyBuf(buf)) return ZEROSTR; 34 | if (buf instanceof Buffer) return buf.toString("base64"); 35 | const u8 = normalize8(buf); 36 | return Buffer.of(u8).toString("base64"); 37 | } 38 | 39 | export function hex(b) { 40 | if (emptyBuf(b)) return ZEROSTR; 41 | // avoids slicing Buffer (normalize8) to get hex 42 | if (b instanceof Buffer) return b.toString("hex"); 43 | const ab = normalize8(b); 44 | return Array.prototype.map 45 | .call(new Uint8Array(ab), (b) => b.toString(16).padStart(2, "0")) 46 | .join(""); 47 | } 48 | 49 | /** 50 | * @param { Buffer | Uint8Array | ArrayBuffer } b 51 | * @returns {number} 52 | */ 53 | export function len(b) { 54 | if (emptyBuf(b)) return 0; 55 | return b.byteLength; 56 | } 57 | 58 | export function bytesToBase64Url(b) { 59 | return btoa(String.fromCharCode(...new Uint8Array(b))) 60 | .replace(/\//g, "_") 61 | .replace(/\+/g, "-") 62 | .replace(/=/g, ""); 63 | } 64 | 65 | function binaryStringToBytes(bs) { 66 | const len = bs.length; 67 | const bytes = new Uint8Array(len); 68 | 69 | for (let i = 0; i < len; i++) { 70 | bytes[i] = bs.charCodeAt(i); 71 | } 72 | 73 | return bytes; 74 | } 75 | 76 | function regularBase64(b64url) { 77 | if (util.emptyString(b64url)) return b64url; 78 | 79 | return b64url.replace(/_/g, "/").replace(/-/g, "+"); 80 | } 81 | 82 | function base64ToUint8(b64uri) { 83 | b64uri = normalizeb64(b64uri); 84 | const b64url = decodeURI(b64uri); 85 | const binaryStr = atob(regularBase64(b64url)); 86 | return binaryStringToBytes(binaryStr); 87 | } 88 | 89 | export function base64ToUint16(b64uri) { 90 | b64uri = normalizeb64(b64uri); 91 | const b64url = decodeURI(b64uri); 92 | const binaryStr = atob(regularBase64(b64url)); 93 | return decodeFromBinary(binaryStr); 94 | } 95 | 96 | export function base64ToBytes(b64uri) { 97 | return raw(base64ToUint8(b64uri)); 98 | } 99 | 100 | export function decodeFromBinary(b, u8) { 101 | // if b is a u8 array, simply u16 it 102 | if (u8) return new Uint16Array(raw(b)); 103 | 104 | // if b is a binary-string, convert it to u8 105 | const bytes = binaryStringToBytes(b); 106 | // ...and then to u16 107 | return new Uint16Array(raw(bytes)); 108 | } 109 | 110 | export function decodeFromBinaryArray(b) { 111 | const u8 = true; 112 | return decodeFromBinary(b, u8); 113 | } 114 | 115 | export function emptyBuf(b) { 116 | return !b || b.byteLength <= 0; 117 | } 118 | 119 | // returns underlying buffer prop when b is TypedArray or node:Buffer 120 | export function raw(b) { 121 | if (!b || b.buffer == null) b = ZERO; 122 | 123 | return b.buffer; 124 | } 125 | 126 | // normalize8 returns the underlying buffer if any, as Uint8Array 127 | // b is either an ArrayBuffer, a TypedArray, or a node:Buffer 128 | export function normalize8(b) { 129 | if (emptyBuf(b)) return ZERO; 130 | 131 | let underlyingBuffer = null; 132 | // ... has byteLength property, b must be of type ArrayBuffer; 133 | if (b instanceof ArrayBuffer) underlyingBuffer = b; 134 | // when b is node:Buffer, this underlying buffer is not its 135 | // TypedArray equivalent: nodejs.org/api/buffer.html#bufbuffer 136 | // but node:Buffer is a subclass of Uint8Array (a TypedArray) 137 | // first though, slice out the relevant range from node:Buffer 138 | else if (b instanceof Buffer) underlyingBuffer = arrayBufferOf(b); 139 | else underlyingBuffer = raw(b); 140 | 141 | return new Uint8Array(underlyingBuffer); 142 | } 143 | 144 | /** 145 | * @param {Uint8Array|Buffer} buf 146 | * @returns {ArrayBuffer} 147 | */ 148 | export function arrayBufferOf(buf) { 149 | // buf is either TypedArray or node:Buffer 150 | if (emptyBuf(buf)) return ZEROAB; 151 | 152 | const offset = buf.byteOffset; 153 | const len = buf.byteLength; 154 | // slice creates a view when buf is node:Buffer, but: 155 | // slice creates a copy when buf is an TypedArray; otoh, 156 | // subarray creates a view for both TypedArray & node:Buffer 157 | // ref: nodejs.org/api/buffer.html#buffers-and-typedarrays. 158 | // what we want to return is an array-buffer after copying 159 | // the relevant contents from the the underlying-buffer. 160 | // stackoverflow.com/a/31394257 161 | return buf.buffer.slice(offset, offset + len); 162 | } 163 | 164 | // stackoverflow.com/a/17064149 165 | export function bufferOf(arrayBuf) { 166 | if (emptyBuf(arrayBuf)) return ZERO; 167 | if (arrayBuf instanceof Uint8Array) return arrayBuf; 168 | 169 | return Buffer.from(new Uint8Array(arrayBuf)); 170 | } 171 | 172 | export function recycleBuffer(b) { 173 | b.fill(0); 174 | return 0; 175 | } 176 | 177 | export function createBuffer(size) { 178 | return Buffer.allocUnsafe(size); 179 | } 180 | 181 | /** 182 | * Encodes a number to an Uint8Array of length `n` in Big Endian byte order. 183 | * https://stackoverflow.com/questions/55583037/ 184 | * @param {Number} n - Number to encode 185 | * @param {Number} len - Length of Array required 186 | * @return {Uint8Array} 187 | */ 188 | export function encodeUint8ArrayBE(n, len) { 189 | const o = n; 190 | 191 | // all zeros... 192 | if (!n) return new Uint8Array(len); 193 | 194 | const a = []; 195 | a.unshift(n & 255); 196 | while (n >= 256) { 197 | n = n >>> 8; 198 | a.unshift(n & 255); 199 | } 200 | 201 | if (a.length > len) { 202 | throw new RangeError(`Cannot encode ${o} in ${len} len Uint8Array`); 203 | } 204 | 205 | let fill = len - a.length; 206 | while (fill--) a.unshift(0); 207 | 208 | return new Uint8Array(a); 209 | } 210 | 211 | // stackoverflow.com/a/40108543/ 212 | // Concatenate a mix of typed arrays 213 | export function concat(arraybuffers) { 214 | const sz = arraybuffers.reduce((sum, a) => sum + a.byteLength, 0); 215 | const buf = new ArrayBuffer(sz); 216 | const cat = new Uint8Array(buf); 217 | let offset = 0; 218 | for (const a of arraybuffers) { 219 | // github: jessetane/array-buffer-concat/blob/7d79d5ebf/index.js#L17 220 | const v = new Uint8Array(a); 221 | cat.set(v, offset); 222 | offset += a.byteLength; 223 | } 224 | return buf; 225 | } 226 | 227 | export function concatBuf(these) { 228 | return Buffer.concat(these); 229 | } 230 | 231 | function normalizeb64(s) { 232 | // beware: atob(null) => \u009eée 233 | // and: decodeURI(null) => "null" 234 | // but: atob("") => "" 235 | // and: atob(undefined) => exception 236 | // so: convert null to empty str 237 | if (util.emptyString(s)) return ""; 238 | else return s; 239 | } 240 | -------------------------------------------------------------------------------- /src/commons/crypto.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | // from: github.com/celzero/otp/blob/cddaaa03f12f/src/base/crypto.js#L1 10 | // nb: stuble crypto api is global on node v19+ 11 | // stackoverflow.com/a/47332317 12 | import { emptyBuf, fromStr } from "./bufutil.js"; 13 | import { emptyString } from "./util.js"; 14 | 15 | const tktsz = 48; 16 | const hkdfalgkeysz = 32; // sha256 17 | 18 | export async function tkt48(seed, ctx) { 19 | if (!emptyBuf(seed) && !emptyString(ctx)) { 20 | try { 21 | const sk256 = seed.slice(0, hkdfalgkeysz); 22 | const info512 = await sha512(fromStr(ctx)); 23 | const dk512 = await gen(sk256, info512); 24 | return new Uint8Array(dk512.slice(0, tktsz)); 25 | } catch (ignore) {} 26 | } 27 | const t = new Uint8Array(tktsz); 28 | crypto.getRandomValues(t); 29 | return t; 30 | } 31 | 32 | // salt for hkdf can be zero: stackoverflow.com/a/64403302 33 | export async function gen(secret, info, salt = new Uint8Array()) { 34 | if (emptyBuf(secret) || emptyBuf(info)) { 35 | throw new Error("empty secret/info"); 36 | } 37 | 38 | const key = await hkdfhmac(secret, info, salt); 39 | return crypto.subtle.exportKey("raw", key); 40 | } 41 | 42 | // with hkdf, salt is optional and public, but if used, 43 | // for a given secret (Z) it needn't be unique per use, 44 | // but it *must* be random: 45 | // cendyne.dev/posts/2023-01-30-how-to-use-hkdf.html 46 | // info adds entropy to extracted keys, and must be unique: 47 | // see: soatok.blog/2021/11/17/understanding-hkdf 48 | async function hkdfhmac(skmac, usectx, salt = new Uint8Array()) { 49 | const dk = await hkdf(skmac); 50 | return await crypto.subtle.deriveKey( 51 | hkdf256(salt, usectx), 52 | dk, 53 | hmac256opts(), 54 | true, // extractable? can be true for sign, verify 55 | ["sign", "verify"] // usage 56 | ); 57 | } 58 | 59 | async function hkdf(sk) { 60 | return await crypto.subtle.importKey( 61 | "raw", 62 | sk, 63 | "HKDF", 64 | false, // extractable? always false for use as derivedKey 65 | ["deriveKey"] // usage 66 | ); 67 | } 68 | 69 | function hmac256opts() { 70 | return { name: "HMAC", hash: "SHA-256" }; 71 | } 72 | 73 | function hkdf256(salt, usectx) { 74 | return { name: "HKDF", hash: "SHA-256", salt: salt, info: usectx }; 75 | } 76 | 77 | async function sha512(buf) { 78 | const ab = await crypto.subtle.digest("SHA-512", buf); 79 | return new Uint8Array(ab); 80 | } 81 | -------------------------------------------------------------------------------- /src/commons/lf-transformer.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | // ref: gist.github.com/stefandanaita/88c4d8b187400d5b07524cd0a12843b2 10 | 11 | /** 12 | * @implements {Transformer} 13 | */ 14 | export class LfTransformer { 15 | /** 16 | * @param {StreamType} typ 17 | * @constructor 18 | * @implements {Transformer} 19 | * @see https://developer.mozilla.org/en-US/docs/Web/API/TransformStream 20 | * @see https://developer.mozilla.org/en-US/docs/Web/API/TransformStreamDefaultController 21 | */ 22 | constructor(typ) { 23 | /** @type {StreamType} */ 24 | this.typ = typ; 25 | 26 | /** @type {Uint8Array|string} */ 27 | this.partial = this.typ.empty(); 28 | } 29 | 30 | /** 31 | * @param {Uint8Array|string} chunk 32 | * @param {TransformStreamDefaultController} controller 33 | */ 34 | transform(chunk, controller) { 35 | // prepend with previous string (empty if none) 36 | const cat = this.typ.concat(this.partial, chunk); 37 | // Extract lines from chunk 38 | const lines = this.typ.split(cat); 39 | // Save last line as it might be incomplete 40 | this.partial = lines.pop() || this.typ.empty(); 41 | 42 | // eslint-disable-next-line no-restricted-syntax 43 | for (const l of lines) { 44 | if (this.typ.include(l)) { 45 | const incl = this.typ.concat(l, this.typ.separator); 46 | controller.enqueue(incl); 47 | } 48 | } 49 | } 50 | 51 | /** 52 | * @param {TransformStreamDefaultController} controller 53 | */ 54 | flush(controller) { 55 | const p = this.partial; 56 | if (this.typ.len(p) > 0) controller.enqueue(p); 57 | } 58 | } 59 | 60 | export const bufstream = (strfilter) => 61 | new TransformStream(new LfTransformer(new ByteType(strfilter))); 62 | 63 | export const strstream = (strfilter) => 64 | new TransformStream(new LfTransformer(new StrType(strfilter))); 65 | 66 | /** 67 | * @param {ReadableStream} stream 68 | * @returns {AsyncIterableIterator} 69 | */ 70 | export async function* streamiter(stream) { 71 | // Get a lock on the stream 72 | const reader = stream.getReader(); 73 | 74 | try { 75 | while (true) { 76 | // Read from the stream 77 | const { done, value } = await reader.read(); 78 | if (done) return; 79 | yield value; 80 | } 81 | } finally { 82 | reader.releaseLock(); 83 | } 84 | } 85 | 86 | /** 87 | * @template [T=Uint8Array] 88 | * @implements {StreamType} 89 | */ 90 | class ByteType { 91 | constructor(strfilter, strsep = "\n") { 92 | const enc = new TextEncoder(); 93 | this.separator = enc.encode(strsep); 94 | this.filter = enc.encode(strfilter); 95 | } 96 | 97 | name() { 98 | return "Byte"; 99 | } 100 | 101 | empty() { 102 | return new Uint8Array(0); 103 | } 104 | 105 | concat(buf1, buf2) { 106 | const cat = new Uint8Array(buf1.length + buf2.length); 107 | cat.set(buf1, 0); 108 | cat.set(buf2, buf1.length); 109 | return cat; 110 | } 111 | 112 | split(buf) { 113 | const sep = this.separator[0]; 114 | const w = []; 115 | w.push( 116 | buf.reduce((acc, x) => { 117 | if (x === sep) { 118 | w.push(acc); 119 | return []; 120 | } else { 121 | acc.push(x); 122 | return acc; 123 | } 124 | }, []) 125 | ); 126 | for (let i = 0; i < w.length; i++) { 127 | if (w[i].length === 0) continue; 128 | w[i] = Uint8Array.from(w[i]); 129 | } 130 | return w; 131 | } 132 | 133 | indexOf(buf, me, limit) { 134 | if (!me || me.length === 0) return -2; 135 | if (this.len(buf) === 0) return -3; 136 | 137 | const ml = me.length - 1; 138 | const bl = buf.length > limit ? limit : buf.length; 139 | 140 | if (bl < ml) return -4; 141 | 142 | // ex: buf [0, 1, 4, 6, 7, 2]; me [4, 6, 7] 143 | // ml => 2; bl - ml => 4 144 | for (let i = 0; i < bl - ml; i++) { 145 | // check if first & last bytes of 'me' match with 'buf' 146 | const start = buf[i] === me[0]; 147 | const end = buf[i + ml] === me[ml]; 148 | // if not, continue 149 | if (!start || !end) continue; 150 | 151 | // if yes, check if 'me' is less than 2 bytes long 152 | // then, return index where 'me' was found in 'buf' 153 | if (ml === 0 || ml === 1) return i; 154 | 155 | // if not, check if the rest of 'me' matches with 'buf' 156 | for (let j = 1, k = i + 1; j < ml; j++, k++) { 157 | // on any mismatch, break out of loop 158 | if (buf[k] !== me[j]) break; 159 | // if entire 'me' matches, return idx where 'me' was found in 'buf' 160 | if (j + 1 >= ml) return k - j; 161 | } 162 | } 163 | 164 | return -1; 165 | } 166 | 167 | // search for 'this.filter' in 'buf' up to 'limit' bytes 168 | include(buf, limit = 200) { 169 | return this.indexOf(buf, this.filter, limit) >= 0; 170 | } 171 | 172 | len(buf) { 173 | return buf.byteLength; 174 | } 175 | } 176 | 177 | /** 178 | * @template [T=string] 179 | * @implements {StreamType} 180 | */ 181 | class StrType { 182 | constructor(strfilter, strsep = "/[\r\n]+/") { 183 | this.separator = strsep; 184 | this.filter = strfilter; 185 | } 186 | 187 | name() { 188 | return "Str"; 189 | } 190 | 191 | empty() { 192 | return ""; 193 | } 194 | 195 | concat(s1, s2) { 196 | return s1 + s2; 197 | } 198 | 199 | split(s) { 200 | const sep = this.separator[0]; 201 | return s.split(sep); 202 | } 203 | 204 | include(s) { 205 | return s && s.include(this.filter); 206 | } 207 | 208 | len(s) { 209 | return s.length; 210 | } 211 | } 212 | 213 | /** 214 | * @template T 215 | * @interface 216 | */ 217 | class StreamType { 218 | /** 219 | * @returns {string} 220 | */ 221 | name() {} 222 | 223 | /** 224 | * @returns {T} 225 | * @abstract 226 | */ 227 | empty() {} 228 | 229 | /** 230 | * @param {T} arg1 231 | * @param {T} arg2 232 | * @returns {T} 233 | * @abstract 234 | */ 235 | concat(arg1, arg2) {} 236 | 237 | /** 238 | * @param {T} arg1 239 | * @returns {T[]} 240 | * @abstract 241 | */ 242 | split(arg1) {} 243 | 244 | /** 245 | * @param {T} arg1 246 | * @returns {boolean} 247 | * @abstract 248 | */ 249 | include(arg1) {} 250 | 251 | /** 252 | * @param {T} arg1 253 | * @returns {number} 254 | * @abstract 255 | */ 256 | len(arg1) {} 257 | } 258 | -------------------------------------------------------------------------------- /src/core/cfg.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | /* eslint-disabled */ 9 | // eslint, no import-assert: github.com/eslint/eslint/discussions/15305 10 | import u6cfg from "../u6-basicconfig.json" with { type: 'json' }; 11 | import u6filetag from "../u6-filetag.json" with { type: 'json' }; 12 | // nodejs.org/docs/latest-v22.x/api/esm.html#json-modules 13 | 14 | export function timestamp() { 15 | return u6cfg.timestamp; 16 | } 17 | 18 | export function tdNodeCount() { 19 | return u6cfg.nodecount; 20 | } 21 | 22 | export function tdParts() { 23 | return u6cfg.tdparts; 24 | } 25 | 26 | export function tdCodec6() { 27 | return u6cfg.useCodec6; 28 | } 29 | 30 | export function orig() { 31 | return u6cfg; 32 | } 33 | 34 | export function filetag() { 35 | return u6filetag; 36 | } 37 | 38 | export function tdmd5() { 39 | return u6cfg.tdmd5; 40 | } 41 | 42 | export function rdmd5() { 43 | return u6cfg.rdmd5; 44 | } 45 | -------------------------------------------------------------------------------- /src/core/deno/blocklists.ts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import * as bufutil from "../../commons/bufutil.js"; 9 | import * as envutil from "../../commons/envutil.js"; 10 | import * as cfg from "../../core/cfg.js"; 11 | import { BlocklistWrapper } from "../../plugins/rethinkdns/main.js"; 12 | 13 | const blocklistsDir = "blocklists__"; 14 | const tdFile = "td.txt"; 15 | const rdFile = "rd.txt"; 16 | 17 | export async function setup(bw: any) { 18 | if (!bw || !envutil.hasDisk()) return false; 19 | 20 | const now = Date.now(); 21 | const timestamp = cfg.timestamp() as string; 22 | const url = envutil.blocklistUrl() + timestamp + "/"; 23 | const nodecount = cfg.tdNodeCount() as number; 24 | const tdparts = cfg.tdParts() as number; 25 | const tdcodec6 = cfg.tdCodec6() as boolean; 26 | const codec = tdcodec6 ? "u6" : "u8"; 27 | 28 | const ok = setupLocally(bw, timestamp, codec); 29 | if (ok) { 30 | console.info("bl setup locally tstamp/nc", timestamp, nodecount); 31 | return true; 32 | } 33 | 34 | console.info("dowloading bl url/codec?", url, codec); 35 | await bw.initBlocklistConstruction( 36 | /* rxid*/ "bl-download", 37 | now, 38 | url, 39 | nodecount, 40 | tdparts, 41 | tdcodec6 42 | ); 43 | 44 | save(bw, timestamp, codec); 45 | } 46 | 47 | function save(bw: BlocklistWrapper, timestamp: string, codec: string) { 48 | if (!bw.isBlocklistFilterSetup()) return false; 49 | 50 | mkdirsIfNeeded(timestamp, codec); 51 | 52 | const [tdfp, rdfp] = getFilePaths(timestamp, codec); 53 | 54 | const td = bw.triedata(); 55 | const rd = bw.rankdata(); 56 | // Deno only writes uint8arrays to disk, never raw arraybuffers 57 | Deno.writeFileSync(tdfp, new Uint8Array(td)); 58 | Deno.writeFileSync(rdfp, new Uint8Array(rd)); 59 | 60 | console.info("blocklists written to disk"); 61 | 62 | return true; 63 | } 64 | 65 | function setupLocally(bw: any, ts: string, codec: string) { 66 | if (!hasBlocklistFiles(ts, codec)) return false; 67 | 68 | const [td, rd] = getFilePaths(ts, codec); 69 | console.info("on-disk c:td/rd", codec, td, rd); 70 | 71 | const tdbuf = Deno.readFileSync(td); 72 | const rdbuf = Deno.readFileSync(rd); 73 | 74 | if (tdbuf.byteLength <= 0 || rdbuf.byteLength <= 0) { 75 | return false; 76 | } 77 | 78 | // TODO: file integrity checks 79 | // concat converts uint8array to an untyped arraybuffer 80 | // that the rethinkdns module expects, 'cause the actual 81 | // type required is uint16array for the trie 82 | const ab0 = bufutil.concat([tdbuf]); 83 | const ab1 = bufutil.concat([rdbuf]); 84 | const json1 = cfg.filetag(); 85 | const json2 = cfg.orig(); 86 | 87 | bw.buildBlocklistFilter( 88 | /* trie*/ ab0, 89 | /* rank-dir*/ ab1, 90 | /* file-tag*/ json1, 91 | /* basic-config*/ json2 92 | ); 93 | 94 | return true; 95 | } 96 | 97 | function hasBlocklistFiles(timestamp: string, codec: string) { 98 | const [td, rd] = getFilePaths(timestamp, codec); 99 | 100 | try { 101 | const tdinfo = Deno.statSync(td); 102 | const rdinfo = Deno.statSync(rd); 103 | 104 | return tdinfo.isFile && rdinfo.isFile; 105 | } catch (ignored) {} 106 | 107 | return false; 108 | } 109 | 110 | function getFilePaths(t: string, c: string) { 111 | const cwd = Deno.cwd(); 112 | 113 | const td = cwd + "/" + blocklistsDir + "/" + t + "/" + c + "/" + tdFile; 114 | const rd = cwd + "/" + blocklistsDir + "/" + t + "/" + c + "/" + rdFile; 115 | 116 | return [td, rd]; 117 | } 118 | 119 | function getDirPaths(t: string, c: string) { 120 | const cwd = Deno.cwd(); 121 | 122 | const bldir = cwd + "/" + blocklistsDir; 123 | const tsdir = cwd + "/" + blocklistsDir + "/" + t; 124 | const codecdir = cwd + "/" + blocklistsDir + "/" + t + "/" + c; 125 | 126 | return [bldir, tsdir, codecdir]; 127 | } 128 | 129 | function mkdirsIfNeeded(timestamp: string, codec: string) { 130 | // deno.land/api@v1.27.1?s=Deno.MkdirOptions 131 | const opts = { recursive: true }; 132 | const [dir1, dir2, dir3] = getDirPaths(timestamp, codec); 133 | let dinfo1 = null; 134 | let dinfo2 = null; 135 | let dinfo3 = null; 136 | 137 | try { 138 | dinfo1 = Deno.statSync(dir1); 139 | dinfo2 = Deno.statSync(dir2); 140 | dinfo3 = Deno.statSync(dir3); 141 | } catch (ignored) {} 142 | 143 | if (!dinfo1 || !dinfo1.isDirectory) { 144 | console.info("creating dir", dir1); 145 | Deno.mkdirSync(dir1, opts); 146 | } 147 | 148 | if (!dinfo2 || !dinfo2.isDirectory) { 149 | console.info("creating dir", dir2); 150 | Deno.mkdirSync(dir2, opts); 151 | } 152 | 153 | if (!dinfo3 || !dinfo3.isDirectory) { 154 | console.info("creating dir", dir3); 155 | Deno.mkdirSync(dir3, opts); 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /src/core/deno/config.ts: -------------------------------------------------------------------------------- 1 | import * as system from "../../system.js"; 2 | import * as blocklists from "./blocklists.ts"; 3 | import * as dbip from "./dbip.ts"; 4 | import { services, stopAfter } from "../svc.js"; 5 | import Log, { LogLevels } from "../log.js"; 6 | import EnvManager from "../env.js"; 7 | import { signal } from "https://deno.land/std@0.171.0/signal/mod.ts"; 8 | 9 | // In global scope. 10 | declare global { 11 | // TypeScript must know type of every var / property. Extend Window 12 | // (globalThis) with declaration merging (archive.is/YUWh2) to define types 13 | // Ref: www.typescriptlang.org/docs/handbook/declaration-merging.html 14 | interface Window { 15 | envManager?: EnvManager; 16 | log?: Log; 17 | env?: any; 18 | } 19 | } 20 | 21 | ((main) => { 22 | system.when("prepare").then(prep); 23 | system.when("steady").then(up); 24 | })(); 25 | 26 | async function sigctrl() { 27 | const sigs = signal("SIGINT"); 28 | for await (const _ of sigs) { 29 | stopAfter(); 30 | } 31 | } 32 | 33 | async function prep() { 34 | // if this file execs... assume we're on deno. 35 | if (!Deno) throw new Error("failed loading deno-specific config"); 36 | 37 | const isProd = Deno.env.get("DENO_ENV") === "production"; 38 | const onDenoDeploy = Deno.env.get("CLOUD_PLATFORM") === "deno-deploy"; 39 | const profiling = Deno.env.get("PROFILE_DNS_RESOLVES") === "true"; 40 | 41 | window.envManager = new EnvManager(); 42 | 43 | window.log = new Log({ 44 | level: window.envManager.get("LOG_LEVEL") as LogLevels, 45 | levelize: isProd || profiling, // levelize if prod or profiling 46 | withTimestamps: !onDenoDeploy, // do not log ts on deno-deploy 47 | }); 48 | 49 | // signal ready 50 | system.pub("ready"); 51 | } 52 | 53 | async function up() { 54 | if (!services.ready) { 55 | console.error("services not yet ready and there is a sig-up!?"); 56 | return; 57 | } 58 | 59 | const bw = services.blocklistWrapper; 60 | if (bw != null && !bw.disabled()) { 61 | await blocklists.setup(bw); 62 | } else { 63 | console.warn("Config", "blocklists unavailable / disabled"); 64 | } 65 | const lp = services.logPusher; 66 | if (lp != null) { 67 | try { 68 | await dbip.setup(lp); 69 | } catch (ex) { 70 | console.error("Config", "dbip setup failed", ex); 71 | } 72 | } else { 73 | console.warn("Config", "logpusher unavailable"); 74 | } 75 | sigctrl(); 76 | // signal all system are-a go 77 | system.pub("go"); 78 | } 79 | -------------------------------------------------------------------------------- /src/core/deno/dbip.ts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import * as util from "../../commons/util.js"; 10 | import * as bufutil from "../../commons/bufutil.js"; 11 | import * as envutil from "../../commons/envutil.js"; 12 | import { LogPusher } from "../../plugins/observability/log-pusher.js"; 13 | 14 | const dbipDir = "./dbip__"; 15 | const geo4name = "dbip.v4"; 16 | const geo6name = "dbip.v6"; 17 | 18 | export async function setup(lp: LogPusher) { 19 | if (!lp) return false; 20 | // in download only mode, logpush enable/disable is ignored 21 | if (!envutil.logpushEnabled() && !envutil.blocklistDownloadOnly()) { 22 | return false; 23 | } 24 | 25 | const url: string = envutil.geoipUrl(); 26 | const timestamp: string = timestampFromUrl(url); 27 | 28 | const ok = setupLocally(lp, timestamp); 29 | if (ok) { 30 | console.info("dbip setup locally", timestamp); 31 | return true; 32 | } 33 | 34 | await lp.init(); 35 | 36 | return save(lp, timestamp); 37 | } 38 | 39 | function timestampFromUrl(url: string) { 40 | if (util.emptyString(url)) throw new Error("empty geo url: " + url); 41 | 42 | const parts = url.split("/"); 43 | const p1 = parts[parts.length - 1]; 44 | const p2 = parts[parts.length - 2]; 45 | const p = p1 || p2; 46 | const ts = parseInt(p); 47 | if (!isNaN(ts) && typeof ts === "number") return p; 48 | 49 | throw new Error("invalid timestamp in: " + url); 50 | } 51 | 52 | function save(lp: LogPusher, timestamp: string) { 53 | if (!lp.initDone()) return false; 54 | 55 | mkdirsIfNeeded(timestamp); 56 | 57 | const [g4fp, g6fp] = getFilePaths(timestamp); 58 | 59 | const g4 = lp.geo4(); 60 | const g6 = lp.geo6(); 61 | // write out array-buffers to disk 62 | g4 && Deno.writeFileSync(g4fp, g4); 63 | g6 && Deno.writeFileSync(g6fp, g6); 64 | 65 | console.info("dbip written to disk (g4/g6)", g4?.byteLength, g6?.byteLength); 66 | 67 | return true; 68 | } 69 | 70 | function setupLocally(lp: LogPusher, timestamp: string) { 71 | const ok = hasDbipFiles(timestamp); 72 | console.info(timestamp, "has dbip files?", ok); 73 | if (!ok) return false; 74 | 75 | const [g4, g6] = getFilePaths(timestamp); 76 | console.info("on-disk dbip v4/v6", g4, g6); 77 | 78 | const g4buf = Deno.readFileSync(g4); 79 | const g6buf = Deno.readFileSync(g6); 80 | 81 | // TODO: file integrity checks 82 | const ab0 = bufutil.raw(g4buf); 83 | const ab1 = bufutil.raw(g6buf); 84 | 85 | lp.init(ab0, ab1); 86 | 87 | return true; 88 | } 89 | 90 | function hasDbipFiles(timestamp: string) { 91 | if (!envutil.hasDisk()) return false; 92 | 93 | const [g4fp, g6fp] = getFilePaths(timestamp); 94 | 95 | try { 96 | const g4ent = Deno.statSync(g4fp); 97 | const g6ent = Deno.statSync(g6fp); 98 | 99 | return g4ent.isFile && g6ent.isFile; 100 | } catch (ignored) {} 101 | 102 | return false; 103 | } 104 | 105 | function getFilePaths(t: string) { 106 | const g4fp = dbipDir + "/" + t + "/" + geo4name; 107 | const g6fp = dbipDir + "/" + t + "/" + geo6name; 108 | 109 | return [g4fp, g6fp]; 110 | } 111 | 112 | function getDirPaths(t: string) { 113 | const cwd = Deno.cwd(); 114 | 115 | const dbdir = cwd + "/" + dbipDir; 116 | const tsdir = cwd + "/" + dbipDir + "/" + t; 117 | 118 | return [dbdir, tsdir]; 119 | } 120 | 121 | function mkdirsIfNeeded(timestamp: string) { 122 | const opts = { recursive: true }; 123 | const [dir1, dir2] = getDirPaths(timestamp); 124 | 125 | let dinfo1 = null; 126 | let dinfo2 = null; 127 | try { 128 | dinfo1 = Deno.statSync(dir1); 129 | dinfo2 = Deno.statSync(dir2); 130 | } catch (ignored) {} 131 | 132 | if (!dinfo1 || !dinfo1.isDirectory) { 133 | console.info("creating dbip dir", dir1); 134 | Deno.mkdirSync(dir1, opts); 135 | } 136 | 137 | if (!dinfo2 || !dinfo2.isDirectory) { 138 | console.info("creating timestamp dir", dir2); 139 | Deno.mkdirSync(dir2, opts); 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/core/dns/conns.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import * as util from "../../commons/util.js"; 10 | 11 | /** 12 | * @typedef {import("net").Socket | import("dgram").Socket} AnySock 13 | */ 14 | 15 | export class TcpConnPool { 16 | constructor(size, ttl) { 17 | this.size = size; 18 | // max sweeps per give/take 19 | this.maxsweep = Math.max((size / 4) | 0, 20); 20 | this.ttl = ttl; // ms 21 | const quarterttl = (ttl / 4) | 0; 22 | this.keepalive = Math.min(/* 60s*/ 60000, quarterttl); // ms 23 | this.lastSweep = 0; 24 | this.sweepGapMs = Math.max(/* 10s*/ 10000, quarterttl); // ms 25 | /** @type {Map} */ 26 | this.pool = new Map(); 27 | log.d("tcp-pool psz:", size, "msw:", this.maxsweep, "t:", ttl); 28 | } 29 | 30 | give(socket) { 31 | if (socket.pending) return false; 32 | if (!socket.writable) return false; 33 | if (!this.ready(socket)) return false; 34 | 35 | if (this.pool.has(socket)) return true; 36 | 37 | const free = this.pool.size < this.size || this.sweep(); 38 | if (!free) return false; 39 | 40 | return this.checkin(socket); 41 | } 42 | 43 | take() { 44 | const thres = this.maxsweep / 2; 45 | let out = null; 46 | let n = 0; 47 | 48 | const sz = this.pool.size; 49 | if (sz <= 0) return out; 50 | 51 | for (const [sock, report] of this.pool) { 52 | if (this.healthy(sock, report)) { 53 | out = this.checkout(sock, report); 54 | } else { 55 | this.evict(sock); 56 | } 57 | if (++n >= thres) break; 58 | if (out) break; 59 | } 60 | 61 | // no evictions, and no free sockets 62 | if (n > 0 || out == null) { 63 | log.d("take, evicted:", n, "out?", out != null); 64 | } else if (n > 0) { 65 | this.lastSweep = Date.now(); 66 | } 67 | return out; 68 | } 69 | 70 | /** 71 | * @param {import("net").Socket} sock 72 | * @param {Report} report 73 | * @returns {import("net").Socket} 74 | */ 75 | checkout(sock, report) { 76 | log.d(report.id, "checkout, size:", this.pool.size); 77 | 78 | try { 79 | sock.removeAllListeners("close"); 80 | sock.removeAllListeners("error"); 81 | sock.setKeepAlive(false); 82 | sock.resume(); 83 | } catch (ignore) { 84 | this.evict(sock); 85 | return null; 86 | } 87 | this.pool.delete(sock); 88 | return sock; 89 | } 90 | 91 | checkin(sock) { 92 | const report = this.mkreport(); 93 | 94 | sock.setKeepAlive(true, this.keepalive); 95 | sock.pause(); 96 | sock.on("close", this.evict.bind(this)); 97 | sock.on("error", this.evict.bind(this)); 98 | 99 | this.pool.set(sock, report); 100 | 101 | log.d(report.id, "checkin, size:", this.pool.size); 102 | return true; 103 | } 104 | 105 | sweep(clear = false) { 106 | const sz = this.pool.size; 107 | if (sz <= 0) return false; 108 | 109 | const now = Date.now(); 110 | if (this.lastSweep + this.sweepGapMs > now) { 111 | if (!clear) return false; 112 | } 113 | this.lastSweep = now; 114 | 115 | let n = 0; 116 | for (const [sock, report] of this.pool) { 117 | if (clear || this.dead(sock, report)) this.evict(sock); 118 | // incr n even if we are clearing (ignoring maxsweep) 119 | if (++n >= this.maxsweep && !clear) break; 120 | } 121 | log.i("sweep, cleared:", sz - this.pool.size, "clear?", clear, "n:", n); 122 | return sz > this.pool.size; // size decreased post-sweep? 123 | } 124 | 125 | ready(sock) { 126 | return sock.readyState === "open"; 127 | } 128 | 129 | healthy(sock, report) { 130 | const destroyed = !sock.writable; 131 | const open = this.ready(sock); 132 | const fresh = report.fresh(this.ttl); 133 | const id = report.id; 134 | log.d(id, "destroyed?", destroyed, "open?", open, "fresh?", fresh); 135 | if (destroyed || !open) return false; 136 | return fresh; // healthy if not expired 137 | } 138 | 139 | dead(sock, report) { 140 | return !this.healthy(sock, report); 141 | } 142 | 143 | evict(sock) { 144 | this.pool.delete(sock); 145 | 146 | try { 147 | if (sock && !sock.destroyed) sock.destroySoon(); 148 | } catch (ignore) {} 149 | } 150 | 151 | mkreport() { 152 | return new Report(util.uid("tcp")); 153 | } 154 | } 155 | 156 | class Report { 157 | /** 158 | * @param {string} id 159 | */ 160 | constructor(id) { 161 | /** @type {string} */ 162 | this.id = id; 163 | /** @type {number} */ 164 | this.lastuse = Date.now(); 165 | } 166 | 167 | fresh(since) { 168 | return this.lastuse + since >= Date.now(); 169 | } 170 | } 171 | 172 | export class UdpConnPool { 173 | constructor(size, ttl) { 174 | this.size = size; 175 | this.maxsweep = Math.max((size / 4) | 0, 20); 176 | this.ttl = Math.max(/* 60s*/ 60000, ttl); // no more than 60s 177 | this.lastSweep = 0; 178 | this.sweepGapMs = Math.max(/* 10s*/ 10000, (ttl / 2) | 0); // ms 179 | /** @type {Map} */ 180 | this.pool = new Map(); 181 | log.d("udp-pool psz:", size, "msw:", this.maxsweep, "t:", ttl); 182 | } 183 | 184 | give(socket) { 185 | if (this.pool.has(socket)) return true; 186 | 187 | const free = this.pool.size < this.size || this.sweep(); 188 | if (!free) return false; 189 | 190 | return this.checkin(socket); 191 | } 192 | 193 | take() { 194 | const thres = this.maxsweep / 2; 195 | let out = null; 196 | let n = 0; 197 | 198 | const sz = this.pool.size; 199 | if (sz <= 0) return out; 200 | 201 | for (const [sock, report] of this.pool) { 202 | if (this.healthy(report)) { 203 | out = this.checkout(sock, report); 204 | } else { 205 | this.evict(sock); 206 | } 207 | if (++n >= thres) break; 208 | if (out) break; 209 | } 210 | // no evictions, but no socket available 211 | if (n > 0 || out == null) { 212 | log.d("take, evicted:", n, "out?", out != null); 213 | } else if (n > 0) { 214 | this.lastSweep = Date.now(); 215 | } 216 | return out; 217 | } 218 | 219 | /** 220 | * @param {import("dgram").Socket} sock 221 | * @param {Report} report 222 | * @returns {import("dgram").Socket} 223 | */ 224 | checkout(sock, report) { 225 | log.d(report.id, "checkout, size:", this.pool.size); 226 | 227 | sock.removeAllListeners("close"); 228 | sock.removeAllListeners("error"); 229 | 230 | this.pool.delete(sock); 231 | return sock; 232 | } 233 | 234 | checkin(sock) { 235 | const report = this.mkreport(); 236 | 237 | sock.on("close", this.evict.bind(this)); 238 | sock.on("error", this.evict.bind(this)); 239 | 240 | this.pool.set(sock, report); 241 | 242 | log.d(report.id, "checkin, size:", this.pool.size); 243 | return true; 244 | } 245 | 246 | sweep(clear = false) { 247 | const sz = this.pool.size; 248 | if (sz <= 0) return false; 249 | 250 | const now = Date.now(); 251 | if (this.lastSweep + this.sweepGapMs > now) { 252 | if (!clear) return false; 253 | } 254 | this.lastSweep = now; 255 | 256 | let n = 0; 257 | for (const [sock, report] of this.pool) { 258 | if (clear || this.dead(report)) this.evict(sock); 259 | // incr n even if we are clearing (ignoring maxsweep) 260 | if (++n >= this.maxsweep && !clear) break; 261 | } 262 | log.i("sweep, cleared:", sz - this.pool.size, "clear?", clear, "n:", n); 263 | return sz > this.pool.size; // size decreased post-sweep? 264 | } 265 | 266 | healthy(report) { 267 | const fresh = report.fresh(this.ttl); 268 | const id = report.id; 269 | log.d(id, "fresh?", fresh); 270 | return fresh; // healthy if not expired 271 | } 272 | 273 | dead(report) { 274 | return !this.healthy(report); 275 | } 276 | 277 | evict(sock) { 278 | if (!sock) return; 279 | this.pool.delete(sock); 280 | 281 | sock.disconnect(); 282 | sock.close(); 283 | } 284 | 285 | mkreport() { 286 | return new Report(util.uid("udp")); 287 | } 288 | } 289 | -------------------------------------------------------------------------------- /src/core/doh.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import RethinkPlugin from "./plugin.js"; 10 | import * as pres from "../plugins/plugin-response.js"; 11 | import * as util from "../commons/util.js"; 12 | import * as dnsutil from "../commons/dnsutil.js"; 13 | import IOState from "./io-state.js"; 14 | 15 | // TODO: define FetchEventLike 16 | /** 17 | * @typedef {any} FetchEventLike 18 | */ 19 | 20 | /** 21 | * @param {FetchEvent|FetchEventLike} event 22 | * @returns {Promise} 23 | */ 24 | export function handleRequest(event) { 25 | return proxyRequest(event); 26 | } 27 | 28 | /** 29 | * @param {FetchEvent} event 30 | * @returns {Promise} 31 | */ 32 | async function proxyRequest(event) { 33 | if (optionsRequest(event.request)) return util.respond204(); 34 | 35 | const io = new IOState(); 36 | const ua = event.request.headers.get("User-Agent"); 37 | 38 | try { 39 | const plugin = new RethinkPlugin(event); 40 | await plugin.initIoState(io); 41 | 42 | // if an early response has been set by plugin.initIoState, return it 43 | if (io.httpResponse) { 44 | return withCors(io, ua); 45 | } 46 | 47 | await util.timedSafeAsyncOp( 48 | /* op*/ async () => plugin.execute(), 49 | /* waitMs*/ dnsutil.requestTimeout(), 50 | /* onTimeout*/ async () => errorResponse(io) 51 | ); 52 | } catch (err) { 53 | log.e("doh", "proxy-request error", err.stack); 54 | errorResponse(io, err); 55 | } 56 | 57 | return withCors(io, ua); 58 | } 59 | 60 | function optionsRequest(request) { 61 | return request.method === "OPTIONS"; 62 | } 63 | 64 | function errorResponse(io, err = null) { 65 | const eres = pres.errResponse("doh.js", err); 66 | io.dnsExceptionResponse(eres); 67 | } 68 | 69 | function withCors(io, ua) { 70 | if (util.fromBrowser(ua)) io.setCorsHeadersIfNeeded(); 71 | return io.httpResponse; 72 | } 73 | -------------------------------------------------------------------------------- /src/core/fastly/config.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import EnvManager from "../env.js"; 9 | import * as system from "../../system.js"; 10 | import Log from "../log.js"; 11 | import { services } from "../svc.js"; 12 | import { allowDynamicBackends } from "fastly:experimental"; 13 | 14 | system.when("prepare").then(prep); 15 | system.when("steady").then(up); 16 | 17 | // on Fastly, setup is called for every new request, 18 | // since server-fastly.js fires "prepare" on every request 19 | function prep() { 20 | allowDynamicBackends(true); 21 | 22 | // This is used within `EnvManager` 23 | if (!globalThis.fastlyEnv) { 24 | globalThis.fastlyEnv = new Dictionary("env"); 25 | } 26 | 27 | if (!globalThis.envManager) { 28 | globalThis.envManager = new EnvManager(); 29 | } 30 | 31 | const isProd = envManager.get("env") === "production"; 32 | 33 | if (!globalThis.log) { 34 | globalThis.log = new Log({ 35 | level: envManager.get("LOG_LEVEL"), 36 | levelize: isProd, // levelize only in prod 37 | withTimestamps: false, // no need to log ts on fastly 38 | }); 39 | } 40 | 41 | // on Fastly, the network-context isn't available in global-scope 42 | // ie network requests, for ex over fetch-api or xhr, don't work. 43 | // And so, system ready event is published by the event listener 44 | // which has the network-context, that is necessary for svc.js 45 | // to setup blocklist-filter, which otherwise fails when invoked 46 | // from global-scope (such as the "main" function in this file). 47 | system.pub("ready"); 48 | } 49 | 50 | function up() { 51 | if (!services.ready) { 52 | log.e("services not yet ready, and we've got a sig-up?!"); 53 | return; 54 | } 55 | // nothing else to do on sig-up on Fastly; fire a sig-go! 56 | system.pub("go"); 57 | } 58 | -------------------------------------------------------------------------------- /src/core/linux/swap.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import { spawnSync } from "node:child_process"; 9 | 10 | const swapfile = "swap__"; 11 | const swapsize = "152M"; 12 | 13 | // linuxize.com/post/create-a-linux-swap-file 14 | export function mkswap() { 15 | return ( 16 | !hasanyswap() && 17 | sh("fallocate", ["-l", swapsize, swapfile]) && 18 | sh("chmod", ["600", swapfile]) && 19 | sh("mkswap", [swapfile]) && 20 | sh("swapon", [swapfile]) && 21 | sh("sysctl", ["vm.swappiness=20"]) 22 | ); 23 | } 24 | 25 | export function rmswap() { 26 | return hasswap() && sh("swapoff", ["-v", swapfile]) && sh("rm", [swapfile]); 27 | } 28 | 29 | function hasanyswap() { 30 | // cat /proc/swaps 31 | // Filename Type Size Used Priority 32 | // /swap__ file 155644 99968 -2 33 | const pswaps = shout("cat", ["/proc/swaps"]); 34 | const lines = pswaps && pswaps.split("\n"); 35 | return lines && lines.length > 1; 36 | } 37 | 38 | // stackoverflow.com/a/53222213 39 | function hasswap() { 40 | return sh("test", ["-e", swapfile]); 41 | } 42 | 43 | function shout(cmd, args) { 44 | return shx(cmd, args, true); 45 | } 46 | 47 | function sh(cmd, args) { 48 | return shx(cmd, args) === 0; 49 | } 50 | 51 | function shx(cmd, args, out = false) { 52 | if (!cmd) return false; 53 | args = args || []; 54 | const opts = { 55 | cwd: "/", 56 | uid: 0, 57 | shell: true, 58 | encoding: "utf8", 59 | }; 60 | const proc = spawnSync(cmd, args, opts); 61 | if (proc.error) log.i(cmd, args, opts, "error", proc.error); 62 | if (proc.stderr) log.e(cmd, args, opts, proc.stderr); 63 | if (proc.stdout) log.l(proc.stdout); 64 | return !out ? proc.status : proc.stdout; 65 | } 66 | -------------------------------------------------------------------------------- /src/core/log.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Logging utilities. 3 | * 4 | * @license 5 | * Copyright (c) 2021 RethinkDNS and its authors. 6 | * 7 | * This Source Code Form is subject to the terms of the Mozilla Public 8 | * License, v. 2.0. If a copy of the MPL was not distributed with this 9 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 10 | */ 11 | 12 | import { uid, stub } from "../commons/util.js"; 13 | 14 | /** 15 | * @typedef {'error'|'logpush'|'warn'|'info'|'timer'|'debug'} LogLevels 16 | */ 17 | 18 | // high "error" (4); low "debug" (0); 19 | const LEVELS = new Set(["error", "logpush", "warn", "info", "timer", "debug"]); 20 | 21 | /** 22 | * Configure console level. 23 | * `console` methods are made non-functional accordingly. 24 | * May be checked with `console.level`. 25 | * Has no default value, to prevent accidentally nullifying console methods. So, 26 | * the de facto console level is 'debug`. 27 | * @param {LogLevels} level - log level 28 | * @return {LogLevels} level 29 | */ 30 | function _setConsoleLevel(level) { 31 | switch (level) { 32 | case "error": 33 | case "logpush": 34 | globalThis.console.warn = stub(); 35 | case "warn": 36 | globalThis.console.info = stub(); 37 | case "info": 38 | globalThis.console.time = stub(); 39 | globalThis.console.timeEnd = stub(); 40 | globalThis.console.timeLog = stub(); 41 | case "timer": 42 | globalThis.console.debug = stub(); 43 | case "debug": 44 | break; 45 | default: 46 | console.error("Unknown console level: ", level); 47 | level = null; 48 | } 49 | if (level) { 50 | // console.log("Console level set: ", level); 51 | globalThis.console.level = level; 52 | } 53 | return level; 54 | } 55 | 56 | export default class Log { 57 | /** 58 | * Provide console methods alias and similar meta methods. 59 | * Sets log level for the current instance. 60 | * Default='debug', so as default instance (`new Log()`) is a pure alias. 61 | * If console level has been set, log level cannot be lower than it. 62 | * @param {{ 63 | * level: LogLevels, 64 | * levelize: boolean, 65 | * withTimestamps: boolean 66 | * }} - options 67 | */ 68 | constructor({ level = "debug", levelize = false, withTimestamps = false }) { 69 | level = level.toLowerCase(); 70 | if (!LEVELS.has(level)) level = "debug"; 71 | // if logpush, then levlelize to stub out all but error and logpush logs 72 | if (level === "logpush") levelize = true; 73 | if (levelize && !console.level) _setConsoleLevel(level); 74 | 75 | this.l = console.log; 76 | this.log = console.log; 77 | this.logTimestamps = withTimestamps; 78 | 79 | this.setLevel(level); 80 | } 81 | 82 | _resetLevel() { 83 | this.d = stub(); 84 | this.debug = stub(); 85 | this.lapTime = stub(); 86 | this.startTime = stub(); 87 | this.endTime = stub(); 88 | this.i = stub(); 89 | this.info = stub(); 90 | this.w = stub(); 91 | this.warn = stub(); 92 | this.e = stub(); 93 | this.error = stub(); 94 | } 95 | 96 | withTags(...tags) { 97 | const that = this; 98 | return { 99 | lapTime: (n, ...r) => { 100 | return that.lapTime(n, ...tags, ...r); 101 | }, 102 | startTime: (n, ...r) => { 103 | const tid = that.startTime(n); 104 | that.d(that.now() + " T", ...tags, "create", tid, ...r); 105 | return tid; 106 | }, 107 | endTime: (n, ...r) => { 108 | that.d(that.now() + " T", ...tags, "end", n, ...r); 109 | return that.endTime(n); 110 | }, 111 | d: (...args) => { 112 | that.d(that.now() + " D", ...tags, ...args); 113 | }, 114 | i: (...args) => { 115 | that.i(that.now() + " I", ...tags, ...args); 116 | }, 117 | w: (...args) => { 118 | that.w(that.now() + " W", ...tags, ...args); 119 | }, 120 | e: (...args) => { 121 | that.e(that.now() + " E", ...tags, ...args); 122 | }, 123 | q: (...args) => { 124 | that.l(that.now() + " Q", ...tags, ...args); 125 | }, 126 | qStart: (...args) => { 127 | that.l(that.now() + " Q", ...tags, that.border()); 128 | that.l(that.now() + " Q", ...tags, ...args); 129 | }, 130 | qEnd: (...args) => { 131 | that.l(that.now() + " Q", ...tags, ...args); 132 | that.l(that.now() + " Q", ...tags, that.border()); 133 | }, 134 | tag: (t) => { 135 | tags.push(t); 136 | }, 137 | }; 138 | } 139 | 140 | now() { 141 | if (this.logTimestamps) return new Date().toISOString(); 142 | else return ""; 143 | } 144 | 145 | border() { 146 | return "-------------------------------"; 147 | } 148 | 149 | /** 150 | * Modify log level of this instance. Unlike the constructor, this has no 151 | * default value. 152 | * @param {LogLevels} level 153 | */ 154 | setLevel(level) { 155 | level = level.toLowerCase(); 156 | if (!LEVELS.has(level)) throw new Error(`Unknown log level: ${level}`); 157 | 158 | this._resetLevel(); 159 | 160 | switch (level) { 161 | default: 162 | case "debug": 163 | this.d = console.debug; 164 | this.debug = console.debug; 165 | case "timer": 166 | this.lapTime = console.timeLog || stub(); // Stubbing required for Fastly as they do not currently support this method. 167 | this.startTime = function (name) { 168 | name = uid(name); 169 | if (console.time) console.time(name); 170 | return name; 171 | }; 172 | this.endTime = console.timeEnd || stub(); // Stubbing required for Fastly as they do not currently support this method. 173 | case "info": 174 | this.i = console.info; 175 | this.info = console.info; 176 | case "warn": 177 | this.w = console.warn; 178 | this.warn = console.warn; 179 | case "error": 180 | case "logpush": 181 | this.e = console.error; 182 | this.error = console.error; 183 | } 184 | console.debug("Log level set: ", level); 185 | this.level = level; 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /src/core/node/blocklists.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import * as fs from "node:fs"; 9 | import * as path from "node:path"; 10 | import * as bufutil from "../../commons/bufutil.js"; 11 | import * as envutil from "../../commons/envutil.js"; 12 | import * as cfg from "../../core/cfg.js"; 13 | import mmap from "@riaskov/mmap-io"; 14 | 15 | const blocklistsDir = "./blocklists__"; 16 | const tdFile = "td.txt"; 17 | const rdFile = "rd.txt"; 18 | 19 | export async function setup(bw) { 20 | if (!bw || !envutil.hasDisk()) return false; 21 | 22 | const now = Date.now(); 23 | // timestamp is of form yyyy/epochMs 24 | const timestamp = cfg.timestamp(); 25 | const url = envutil.blocklistUrl() + timestamp + "/"; 26 | const nodecount = cfg.tdNodeCount(); 27 | const tdparts = cfg.tdParts(); 28 | const tdcodec6 = cfg.tdCodec6(); 29 | const codec = tdcodec6 ? "u6" : "u8"; 30 | const useMmap = envutil.useMmap(); 31 | 32 | const ok = setupLocally(bw, timestamp, codec, useMmap); 33 | if (ok) { 34 | log.i("bl setup locally tstamp/nc", timestamp, nodecount); 35 | return true; 36 | } 37 | 38 | log.i("dowloading bl u/u6?/nc/parts", url, tdcodec6, nodecount, tdparts); 39 | await bw.initBlocklistConstruction( 40 | /* rxid*/ "bl-download", 41 | now, 42 | url, 43 | nodecount, 44 | tdparts, 45 | tdcodec6 46 | ); 47 | 48 | return save(bw, timestamp, codec); 49 | } 50 | 51 | function save(bw, timestamp, codec) { 52 | if (!bw.isBlocklistFilterSetup()) return false; 53 | 54 | mkdirsIfNeeded(timestamp, codec); 55 | 56 | const [tdfp, rdfp] = getFilePaths(timestamp, codec); 57 | 58 | const td = bw.triedata(); 59 | const rd = bw.rankdata(); 60 | // write out array-buffers to disk 61 | fs.writeFileSync(tdfp, bufutil.bufferOf(td)); 62 | fs.writeFileSync(rdfp, bufutil.bufferOf(rd)); 63 | 64 | log.i("blocklists written to disk"); 65 | 66 | return true; 67 | } 68 | 69 | // fmmap mmaps file at fp for random reads, returns a Buffer backed by the file. 70 | function fmmap(fp) { 71 | const fd = fs.openSync(fp, "r+"); 72 | const fsize = fs.fstatSync(fd).size; 73 | const rxprot = mmap.PROT_READ; // protection 74 | const mpriv = mmap.MAP_SHARED; // privacy 75 | const madv = mmap.MADV_RANDOM; // madvise 76 | const offset = 0; 77 | log.i("mmap f:", fp, "size:", fsize, "\nNOTE: md5 checks will fail"); 78 | return mmap.map(fsize, rxprot, mpriv, fd, offset, madv); 79 | } 80 | 81 | function setupLocally(bw, timestamp, codec, useMmap) { 82 | const ok = hasBlocklistFiles(timestamp, codec); 83 | log.i(timestamp, codec, "has bl files?", ok); 84 | if (!ok) return false; 85 | 86 | const [td, rd] = getFilePaths(timestamp, codec); 87 | log.i("on-disk codec/td/rd", codec, td, rd, "mmap?", useMmap); 88 | 89 | let tdbuf = useMmap ? fmmap(td) : null; 90 | if (bufutil.emptyBuf(tdbuf)) { 91 | tdbuf = fs.readFileSync(td); 92 | } 93 | const rdbuf = fs.readFileSync(rd); 94 | 95 | // TODO: file integrity checks 96 | const ab0 = bufutil.raw(tdbuf); 97 | const ab1 = bufutil.raw(rdbuf); 98 | const json1 = cfg.filetag(); 99 | const json2 = cfg.orig(); 100 | 101 | // TODO: Fix basicconfig 102 | bw.buildBlocklistFilter( 103 | /* trie*/ ab0, 104 | /* rank-dir*/ ab1, 105 | /* file-tag*/ json1, 106 | /* basic-config*/ json2 107 | ); 108 | 109 | return true; 110 | } 111 | 112 | function hasBlocklistFiles(timestamp, codec) { 113 | const [td, rd] = getFilePaths(timestamp, codec); 114 | 115 | return fs.existsSync(td) && fs.existsSync(rd); 116 | } 117 | 118 | function getFilePaths(t, codec) { 119 | const td = blocklistsDir + "/" + t + "/" + codec + "/" + tdFile; 120 | const rd = blocklistsDir + "/" + t + "/" + codec + "/" + rdFile; 121 | 122 | return [path.normalize(td), path.normalize(rd)]; 123 | } 124 | 125 | function getDirPaths(t, codec) { 126 | const bldir = path.normalize(blocklistsDir); 127 | const tsdir = path.normalize(blocklistsDir + "/" + t); 128 | const codecdir = path.normalize(blocklistsDir + "/" + t + "/" + codec); 129 | 130 | return [bldir, tsdir, codecdir]; 131 | } 132 | 133 | function mkdirsIfNeeded(timestamp, codec) { 134 | const opts = { recursive: true }; 135 | const [dir1, dir2, dir3] = getDirPaths(timestamp, codec); 136 | 137 | if (!fs.existsSync(dir1)) { 138 | log.i("creating blocklist dir", dir1); 139 | fs.mkdirSync(dir1, opts); 140 | } 141 | 142 | if (!fs.existsSync(dir2)) { 143 | log.i("creating timestamp dir", dir2); 144 | fs.mkdirSync(dir2, opts); 145 | } 146 | 147 | if (!fs.existsSync(dir3)) { 148 | log.i("creating codec dir", dir2); 149 | fs.mkdirSync(dir3, opts); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/core/node/config.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | /** 10 | * Configuration file for node runtime 11 | * TODO: Remove all side-effects and use a constructor? 12 | * This module has side effects, sequentially setting up the environment. 13 | */ 14 | import { atob, btoa } from "node:buffer"; 15 | import process from "node:process"; 16 | import * as util from "./util.js"; 17 | import * as blocklists from "./blocklists.js"; 18 | import * as dbip from "./dbip.js"; 19 | import Log from "../log.js"; 20 | import * as system from "../../system.js"; 21 | import { services, stopAfter } from "../svc.js"; 22 | import EnvManager from "../env.js"; 23 | import * as swap from "../linux/swap.js"; 24 | import * as dnst from "../../core/node/dns-transport.js"; 25 | 26 | // some of the cjs node globals aren't available in esm 27 | // nodejs.org/docs/latest/api/globals.html 28 | // github.com/webpack/webpack/issues/14072 29 | // import path from "path"; 30 | // import { fileURLToPath } from "url"; 31 | // globalThis.__filename = fileURLToPath(import.meta.url); 32 | // globalThis.__dirname = path.dirname(__filename); 33 | 34 | (async (main) => { 35 | system.when("prepare").then(prep); 36 | system.when("steady").then(up); 37 | })(); 38 | 39 | async function prep() { 40 | // if this file execs... assume we're on nodejs. 41 | const isProd = process.env.NODE_ENV === "production"; 42 | const onFly = process.env.CLOUD_PLATFORM === "fly"; 43 | const profiling = process.env.PROFILE_DNS_RESOLVES === "true"; 44 | const debugFly = onFly && process.env.FLY_APP_NAME.includes("-dev"); 45 | 46 | globalThis.envManager = new EnvManager(); 47 | 48 | /** Logger */ 49 | globalThis.log = debugFly 50 | ? new Log({ 51 | level: "debug", 52 | levelize: profiling, // levelize only if profiling 53 | withTimestamps: true, // always log timestamps on node 54 | }) 55 | : new Log({ 56 | level: envManager.get("LOG_LEVEL"), 57 | levelize: isProd || profiling, // levelize if prod or profiling 58 | withTimestamps: true, // always log timestamps on node 59 | }); 60 | 61 | // ---- log and envManager available only after this line ---- \\ 62 | 63 | /** TLS crt and key */ 64 | // If TLS_OFFLOAD == true, skip loading TLS certs and keys; otherwise: 65 | // Raw TLS CERT and KEY are stored (base64) in an env var for fly deploys 66 | // (fly deploys are dev/prod nodejs deploys where env TLS_CN or TLS_ is set). 67 | // Otherwise, retrieve KEY and CERT from the filesystem (this is the case 68 | // for local non-prod nodejs deploys with self-signed certs). 69 | // If requisite TLS secrets are missing, set tlsoffload to true, eventually. 70 | let tlsoffload = envManager.get("TLS_OFFLOAD"); 71 | const TLS_CERTKEY = process.env.TLS_CERTKEY; 72 | 73 | if (tlsoffload) { 74 | log.i("TLS offload enabled"); 75 | } else if (isProd) { 76 | if (TLS_CERTKEY) { 77 | const [tlsKey, tlsCrt] = util.getCertKeyFromEnv(TLS_CERTKEY); 78 | setTlsVars(tlsKey, tlsCrt); 79 | log.i("env (fly) tls setup with tls_certkey"); 80 | } else { 81 | const _TLS_CRT_AND_KEY = 82 | eval(`process.env.TLS_${process.env.TLS_CN}`) || process.env.TLS_; 83 | if (_TLS_CRT_AND_KEY) { 84 | const [tlsKey, tlsCrt] = util.getCertKeyFromEnv(_TLS_CRT_AND_KEY); 85 | setTlsVars(tlsKey, tlsCrt); 86 | log.i("[deprecated] env (fly) tls setup with tls_cn"); 87 | } else { 88 | log.w("Skip TLS: TLS_CERTKEY nor TLS_CN set; enable TLS offload"); 89 | tlsoffload = true; 90 | } 91 | } 92 | } else { 93 | try { 94 | const devutils = await import("./util-dev.js"); 95 | const [tlsKey, tlsCrt] = devutils.getTLSfromFile( 96 | envManager.get("TLS_KEY_PATH"), 97 | envManager.get("TLS_CRT_PATH") 98 | ); 99 | setTlsVars(tlsKey, tlsCrt); 100 | const l1 = tlsKey.byteLength; 101 | const l2 = tlsCrt.byteLength; 102 | log.i("dev (local) tls setup from tls_key_path", l1, l2); 103 | } catch (ex) { 104 | // this can happen when running server in BLOCKLIST_DOWNLOAD_ONLY mode 105 | log.w("Skipping TLS: test TLS crt/key missing; enable TLS offload"); 106 | tlsoffload = true; 107 | } 108 | } 109 | 110 | envManager.set("TLS_OFFLOAD", tlsoffload); 111 | 112 | if (!globalThis.atob || !globalThis.btoa) { 113 | globalThis.atob = atob; 114 | globalThis.btoa = btoa; 115 | log.i("polyfill atob / btoa"); 116 | } else { 117 | log.i("no atob/btoa polyfill required"); 118 | } 119 | 120 | // TODO: move dns* related settings to env 121 | // flydns is always ipv6 (fdaa::53) 122 | const plainOldDnsIp = onFly ? "fdaa::3" : "1.1.1.2"; 123 | let dns53 = null; 124 | /** swap space and recursive resolver on Fly */ 125 | if (onFly || true) { 126 | const ok = swap.mkswap(); 127 | log.i("mkswap done?", ok); 128 | dns53 = dnst.makeTransport(plainOldDnsIp); 129 | log.i("imported udp/tcp dns transport", plainOldDnsIp); 130 | } else { 131 | log.i("no swap required"); 132 | } 133 | 134 | /** signal ready */ 135 | system.pub("ready", [dns53]); 136 | } 137 | 138 | function setTlsVars(tlsKey, tlsCrt) { 139 | envManager.set("TLS_KEY", tlsKey); 140 | envManager.set("TLS_CRT", tlsCrt); 141 | } 142 | 143 | async function up() { 144 | if (!services.ready) { 145 | log.e("services not yet ready yet and there is a sig-up!?"); 146 | return; 147 | } 148 | 149 | const bw = services.blocklistWrapper; 150 | if (bw != null && !bw.disabled()) { 151 | await blocklists.setup(bw); 152 | } else { 153 | log.w("Config", "blocklists unavailable / disabled"); 154 | } 155 | const lp = services.logPusher; 156 | if (lp != null) { 157 | try { 158 | await dbip.setup(lp); 159 | } catch (ex) { 160 | log.e("Config", "dbip setup failed", ex); 161 | } 162 | } else { 163 | log.w("Config", "logpusher unavailable"); 164 | } 165 | 166 | process.on("SIGINT", (sig) => stopAfter()); 167 | 168 | // signal all system are-a go 169 | system.pub("go"); 170 | } 171 | -------------------------------------------------------------------------------- /src/core/node/dbip.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import * as fs from "node:fs"; 9 | import * as path from "node:path"; 10 | import * as util from "../../commons/util.js"; 11 | import * as bufutil from "../../commons/bufutil.js"; 12 | import * as envutil from "../../commons/envutil.js"; 13 | import { LogPusher } from "../../plugins/observability/log-pusher.js"; 14 | 15 | const dbipDir = "./dbip__"; 16 | const geo4name = "dbip.v4"; 17 | const geo6name = "dbip.v6"; 18 | 19 | /** @param {LogPusher} lp */ 20 | export async function setup(lp) { 21 | if (!lp) return false; 22 | // in download only mode, logpush enable/disable is ignored 23 | if (!envutil.logpushEnabled() && !envutil.blocklistDownloadOnly()) { 24 | return false; 25 | } 26 | const url = envutil.geoipUrl(); 27 | const timestamp = timestampFromUrl(url); 28 | 29 | const ok = setupLocally(lp, timestamp); 30 | if (ok) { 31 | log.i("dbip setup locally", timestamp); 32 | return true; 33 | } 34 | 35 | await lp.init(); 36 | 37 | return save(lp, timestamp); 38 | } 39 | 40 | function timestampFromUrl(url) { 41 | if (util.emptyString(url)) throw new Error("empty geo url: " + url); 42 | 43 | const parts = url.split("/"); 44 | const p1 = parts[parts.length - 1]; 45 | const p2 = parts[parts.length - 2]; 46 | const p = p1 || p2; 47 | const ts = parseInt(p); 48 | if (!isNaN(ts) && typeof ts === "number") return p; 49 | 50 | throw new Error("invalid timestamp in: " + url); 51 | } 52 | 53 | /** 54 | * @param {LogPusher} lp 55 | * @param {string} timestamp 56 | * @returns {boolean} 57 | */ 58 | function save(lp, timestamp) { 59 | if (!lp.initDone()) return false; 60 | 61 | mkdirsIfNeeded(timestamp); 62 | 63 | const [g4fp, g6fp] = getFilePaths(timestamp); 64 | 65 | const g4 = lp.geo4(); 66 | const g6 = lp.geo6(); 67 | // write out array-buffers to disk 68 | fs.writeFileSync(g4fp, bufutil.bufferOf(g4)); 69 | fs.writeFileSync(g6fp, bufutil.bufferOf(g6)); 70 | 71 | log.i("dbip written to disk (g4/g6)", g4.byteLength, g6.byteLength); 72 | 73 | return true; 74 | } 75 | 76 | function setupLocally(lp, timestamp) { 77 | const ok = hasDbipFiles(timestamp); 78 | log.i(timestamp, "has dbip files?", ok); 79 | if (!ok) return false; 80 | 81 | const [g4, g6] = getFilePaths(timestamp); 82 | log.i("on-disk dbip v4/v6", g4, g6); 83 | 84 | const g4buf = fs.readFileSync(g4); 85 | const g6buf = fs.readFileSync(g6); 86 | 87 | // TODO: file integrity checks 88 | const ab0 = bufutil.raw(g4buf); 89 | const ab1 = bufutil.raw(g6buf); 90 | 91 | lp.init(ab0, ab1); 92 | 93 | return true; 94 | } 95 | 96 | function hasDbipFiles(timestamp) { 97 | if (!envutil.hasDisk()) return false; 98 | 99 | const [g4, g6] = getFilePaths(timestamp); 100 | return fs.existsSync(g4) && fs.existsSync(g6); 101 | } 102 | 103 | function getFilePaths(t) { 104 | const g4 = dbipDir + "/" + t + "/" + geo4name; 105 | const g6 = dbipDir + "/" + t + "/" + geo6name; 106 | 107 | return [path.normalize(g4), path.normalize(g6)]; 108 | } 109 | 110 | function getDirPaths(t) { 111 | const dbdir = path.normalize(dbipDir); 112 | const tsdir = path.normalize(dbipDir + "/" + t); 113 | 114 | return [dbdir, tsdir]; 115 | } 116 | 117 | function mkdirsIfNeeded(timestamp) { 118 | const opts = { recursive: true }; 119 | const [dir1, dir2] = getDirPaths(timestamp); 120 | 121 | if (!fs.existsSync(dir1)) { 122 | log.i("creating dbip dir", dir1); 123 | fs.mkdirSync(dir1, opts); 124 | } 125 | 126 | if (!fs.existsSync(dir2)) { 127 | log.i("creating timestamp dir", dir2); 128 | fs.mkdirSync(dir2, opts); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/core/node/dns-transport.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import net from "node:net"; 9 | import dgram from "node:dgram"; 10 | import * as util from "../../commons/util.js"; 11 | import { TcpConnPool, UdpConnPool } from "../dns/conns.js"; 12 | import { TcpTx, UdpTx } from "../dns/transact.js"; 13 | 14 | export function makeTransport(host, port = 53, opts = {}) { 15 | return new Transport(host, port, opts); 16 | } 17 | 18 | // Transport upstreams plain-old DNS queries over both UDPv4 and TCPv4. 19 | // Host and port constructor parameters are IPv4 addresses of the upstream. 20 | // TCP and UDP connections are pooled for reuse, but DNS queries are not 21 | // multiplexed. IO timeout, connection pool size, connection expiry are other 22 | // constructor parameters to configure the pooling behaviour. Methods udpquery 23 | // and tcpquery are the main entry points which forward a raw dns-packet as 24 | // and return non-null dns-answers, if recieved on-time and without errors. 25 | export class Transport { 26 | constructor(host, port, opts = {}) { 27 | if (util.emptyString(host)) throw new Error("invalid host" + host); 28 | this.host = host; 29 | this.port = port || 53; 30 | this.connectTimeout = opts.connectTimeout || 3000; // 3s 31 | this.ioTimeout = opts.ioTimeout || 10000; // 10s 32 | this.ipproto = net.isIP(host); // 4, 6, or 0 33 | const sz = opts.poolSize || 500; // conns 34 | const ttl = opts.poolTtl || 60000; // 1m 35 | this.tcpconns = new TcpConnPool(sz, ttl); 36 | this.udpconns = new UdpConnPool(sz, ttl); 37 | 38 | this.log = log.withTags("DnsTransport"); 39 | this.log.i(this.ipproto, "W transport", host, port, "pool", sz, ttl); 40 | } 41 | 42 | async teardown() { 43 | const r1 = this.tcpconns.sweep(true); 44 | const r2 = this.udpconns.sweep(true); 45 | this.log.i("transport teardown (tcp | udp) done?", r1, "|", r2); 46 | } 47 | 48 | async udpquery(rxid, q) { 49 | let sock = this.udpconns.take(); 50 | this.log.d(rxid, "udp pooled?", sock !== null); 51 | 52 | const t = this.log.startTime("udp-query"); 53 | let ans = null; 54 | try { 55 | sock = sock || (await this.makeConn("udp")); 56 | this.log.lapTime(t, rxid, "make-conn"); 57 | 58 | ans = await UdpTx.begin(sock).exchange(rxid, q, this.ioTimeout); 59 | this.log.lapTime(t, rxid, "get-ans"); 60 | 61 | this.parkConn(sock, "udp"); 62 | } catch (ex) { 63 | this.closeUdp(sock); 64 | this.log.e(rxid, ex); 65 | } 66 | this.log.endTime(t); 67 | 68 | return ans; 69 | } 70 | 71 | async tcpquery(rxid, q) { 72 | let sock = this.tcpconns.take(); 73 | this.log.d(rxid, "tcp pooled?", sock !== null); 74 | 75 | const t = this.log.startTime("tcp-query"); 76 | let ans = null; 77 | try { 78 | sock = sock || (await this.makeConn("tcp")); 79 | log.lapTime(t, rxid, "make-conn"); 80 | 81 | ans = await TcpTx.begin(sock).exchange(rxid, q, this.ioTimeout); 82 | log.lapTime(t, rxid, "get-ans"); 83 | 84 | this.parkConn(sock, "tcp"); 85 | } catch (ex) { 86 | this.closeTcp(sock); 87 | this.log.e(rxid, ex); 88 | } 89 | this.log.endTime(t); 90 | 91 | return ans; 92 | } 93 | 94 | parkConn(sock, proto) { 95 | if (proto === "tcp") { 96 | const ok = this.tcpconns.give(sock); 97 | if (!ok) this.closeTcp(sock); 98 | } else if (proto === "udp") { 99 | const ok = this.udpconns.give(sock); 100 | if (!ok) this.closeUdp(sock); 101 | } 102 | } 103 | 104 | makeConn(proto) { 105 | if (proto === "tcp") { 106 | const tcpconnect = (cb) => { 107 | // not monitoring connection-error events, instead relying on timeouts 108 | const sock = net.connect(this.port, this.host, () => cb(sock)); 109 | }; 110 | return util.timedOp(tcpconnect, this.connectTimeout, this.closeTcp); 111 | } else if (proto === "udp") { 112 | // connected udp-sockets: archive.is/JJxaV 113 | const udpconnect = (cb) => { 114 | let sock = null; 115 | if (this.ipproto === 6) { 116 | sock = dgram.createSocket("udp6"); 117 | } else { 118 | // default 119 | sock = dgram.createSocket("udp4"); 120 | } 121 | // connect error, if any, is sent to the connection-callback 122 | sock.connect(this.port, this.host, (err) => cb(sock, err)); 123 | }; 124 | return util.timedOp(udpconnect, this.connectTimeout, this.closeUdp); 125 | } else { 126 | throw new Error("unsupported proto: " + proto); 127 | } 128 | } 129 | 130 | /** 131 | * @param {import("net").Socket} sock 132 | */ 133 | closeTcp(sock) { 134 | // the socket is not expected to have any error-listeners 135 | // so we add one to avoid unhandled errors 136 | sock.on("error", util.stub); 137 | if (sock && !sock.destroyed) sock.destroySoon(); 138 | } 139 | 140 | /** 141 | * @param {import("dgram").Socket} sock 142 | */ 143 | closeUdp(sock) { 144 | if (!sock || sock.destroyed) return; 145 | // the socket is expected to not have any error-listeners 146 | // so we add one just in case to avoid unhandled errors 147 | sock.on("error", util.stub); 148 | sock.disconnect(); 149 | sock.close(); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/core/node/util-dev.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | /* For development environment use only */ 10 | 11 | import * as fs from "node:fs"; 12 | 13 | /** 14 | * @param {String} TLS_KEY_PATH 15 | * @param {String} TLS_CRT_PATH 16 | * @return {Array} [TLS_KEY, TLS_CRT] 17 | */ 18 | export function getTLSfromFile(keyPath, crtPath) { 19 | return [fs.readFileSync(keyPath), fs.readFileSync(crtPath)]; 20 | } 21 | -------------------------------------------------------------------------------- /src/core/node/util.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import { Http2ServerRequest, Http2ServerResponse } from "node:http2"; 10 | 11 | /** 12 | * @param {String} TLS_CRT_KEY - Contains base64 (no wrap) encoded key and 13 | * certificate files seprated by a newline (\n) and described by `KEY=` and 14 | * `CRT=` respectively. Ex: `TLS_="KEY=encoded_string\nCRT=encoded_string"` 15 | * @return {Array} [TLS_KEY, TLS_CRT] 16 | */ 17 | export function getCertKeyFromEnv(TLS_CRT_KEY) { 18 | if (TLS_CRT_KEY == null) throw new Error("TLS cert / key not found"); 19 | 20 | TLS_CRT_KEY = TLS_CRT_KEY.replace(/\\n/g, "\n"); 21 | 22 | if (TLS_CRT_KEY.split("=", 1)[0].indexOf("KEY") >= 0) { 23 | return TLS_CRT_KEY.split("\n").map((v) => 24 | Buffer.from(v.substring(v.indexOf("=") + 1), "base64") 25 | ); 26 | } else if (TLS_CRT_KEY.split("\n")[1].split("=", 1)[0].indexOf("KEY") >= 0) { 27 | return TLS_CRT_KEY.split("\n") 28 | .reverse() 29 | .map((v) => Buffer.from(v.substring(v.indexOf("=") + 1), "base64")); 30 | } else { 31 | throw new Error("TLS cert / key malformed"); 32 | } 33 | } 34 | 35 | /** 36 | * @param {Object} headers 37 | * @return {Object} 38 | */ 39 | export function copyNonPseudoHeaders(headers) { 40 | // nodejs req headers may be of form 41 | // ':authority': 'localhost:8080' 42 | // ':method': 'GET' 43 | // ':path': '/1:AAIAgA==?dns=AAABAAABAAAAAAAACnJldGhpbmtkbnMDY29tAAABAAE' 44 | // ':scheme': 'https' 45 | // accept: 'application/dns-message' 46 | // 'user-agent': 'Go-http-client/2.0' 47 | // [Symbol(nodejs.http2.sensitiveHeaders)]: [] 48 | 49 | const out = {}; 50 | 51 | if (!headers) return out; 52 | 53 | // drop http/2 pseudo-headers 54 | for (const name in headers) { 55 | if (name.startsWith(":")) continue; 56 | out[name] = headers[name]; 57 | } 58 | 59 | return out; 60 | } 61 | 62 | /** 63 | * @param {Object} headers 64 | * @return {Object} 65 | */ 66 | export function transformPseudoHeaders(headers) { 67 | const out = {}; 68 | 69 | if (!headers) return out; 70 | 71 | // transform http/2 pseudo-headers 72 | for (const name in headers) { 73 | if (name.startsWith(":")) { 74 | out[name.slice(1)] = headers[name]; 75 | } else { 76 | out[name] = headers[name]; 77 | } 78 | } 79 | 80 | return out; 81 | } 82 | 83 | /** 84 | * @param {Http2ServerRequest} req 85 | * @return {String} 86 | */ 87 | export function req2str(req) { 88 | if (!req) return "request[null]"; 89 | return ( 90 | `request[${req.method}] ${req.headers["content-type"]} ` + 91 | `${req.url} from ${req.headers["user-agent"]} ` + 92 | `${req.headers["content-length"]}/${req.readableLength} ` 93 | ); 94 | } 95 | 96 | /** 97 | * @param {Http2ServerResponse} res 98 | * @returns {String} 99 | */ 100 | export function res2str(res) { 101 | if (!res) return "response[null]"; 102 | return ( 103 | `response[${res.statusCode}] ${res.getHeader("content-type")} ` + 104 | `headers-sent? ${res.headersSent} write-ended? ${res.writableEnded} ` + 105 | `${res.getHeader("content-length")}/${res.writableLength}` 106 | ); 107 | } 108 | -------------------------------------------------------------------------------- /src/core/svc.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import { BlocklistWrapper } from "../plugins/rethinkdns/main.js"; 10 | import { CommandControl } from "../plugins/command-control/cc.js"; 11 | import { UserOp } from "../plugins/users/user-op.js"; 12 | import { 13 | DNSPrefilter, 14 | DNSCacheResponder, 15 | DNSResolver, 16 | DnsCache, 17 | } from "../plugins/dns-op/dns-op.js"; 18 | import { LogPusher } from "../plugins/observability/log-pusher.js"; 19 | import * as dnsutil from "../commons/dnsutil.js"; 20 | import * as system from "../system.js"; 21 | import * as util from "../commons/util.js"; 22 | 23 | // proc up since 24 | let readytime = 0; 25 | let endtimer = null; 26 | 27 | export const services = { 28 | /** @type {Boolean} ready */ 29 | ready: false, 30 | /** @type {BlocklistWrapper?} blocklistWrapper */ 31 | blocklistWrapper: null, 32 | /** @type {UserOp?} userOp */ 33 | userOp: null, 34 | /** @type {DNSPrefilter?} prefilter */ 35 | prefilter: null, 36 | /** @type {CommandControl?} commandControl */ 37 | commandControl: null, 38 | /** @type {DNSCacheResponder?} dnsCacheHandler */ 39 | dnsCacheHandler: null, 40 | /** @type {DNSResolver?} dnsResolver */ 41 | dnsResolver: null, 42 | /** @type {LogPusher?} logPusher */ 43 | logPusher: null, 44 | }; 45 | 46 | ((main) => { 47 | // On Workers, asynchronous I/O, timeouts, and generating random values, 48 | // can only be performed while handling a request. 49 | system.when("ready").then(systemReady); 50 | system.when("stop").then(systemStop); 51 | })(); 52 | 53 | async function systemReady(args) { 54 | if (services.ready) return; 55 | 56 | log.i("svc", "systemReady"); 57 | 58 | const bw = new BlocklistWrapper(); 59 | const cache = new DnsCache(dnsutil.cacheSize()); 60 | const lp = new LogPusher(); 61 | const dns53 = util.emptyArray(args) ? null : args[0]; 62 | 63 | services.blocklistWrapper = bw; 64 | services.logPusher = lp; 65 | services.userOp = new UserOp(); 66 | services.prefilter = new DNSPrefilter(); 67 | services.dnsCacheHandler = new DNSCacheResponder(bw, cache); 68 | services.dnsResolver = new DNSResolver(bw, cache, dns53); 69 | services.commandControl = new CommandControl(bw, services.dnsResolver, lp); 70 | 71 | services.ready = true; 72 | 73 | readytime = Date.now(); 74 | 75 | system.pub("steady"); 76 | } 77 | 78 | async function systemStop() { 79 | log.d("svc stop, signal close resolver"); 80 | if (services.ready) await services.dnsResolver.close(); 81 | } 82 | 83 | function stopProc() { 84 | log.i("stopping proc, times-up"); 85 | system.pub("stop"); 86 | } 87 | 88 | export function uptime() { 89 | return Date.now() - readytime; 90 | } 91 | 92 | export function stopAfter(ms = 0) { 93 | if (ms < 0) { 94 | log.w("invalid stopAfter", ms); 95 | return; 96 | } else { 97 | log.d("stopAfter", ms); 98 | } 99 | if (!util.emptyObj(endtimer)) clearTimeout(endtimer); 100 | endtimer = util.timeout(ms, stopProc); 101 | } 102 | -------------------------------------------------------------------------------- /src/core/workers/config.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import EnvManager from "../env.js"; 9 | import * as system from "../../system.js"; 10 | import Log from "../log.js"; 11 | import { services } from "../svc.js"; 12 | 13 | ((main) => { 14 | system.when("prepare").then(prep); 15 | system.when("steady").then(up); 16 | })(); 17 | 18 | // on Workers, setup is called for every new request, 19 | // since server-workers.js fires "prepare" on every request 20 | function prep(arg) { 21 | // if this file execs... assume we're on workers. 22 | if (!arg) throw new Error("are we on workers?"); 23 | if (!arg.env) throw new Error("workers cannot be setup with empty env"); 24 | 25 | // okay to attach env to global, as env across requests remains the same 26 | // developers.cloudflare.com/workers/runtime-apis/fetch-event/#parameters 27 | globalThis.wenv = arg.env; 28 | 29 | if (!globalThis.envManager) { 30 | globalThis.envManager = new EnvManager(); 31 | } 32 | 33 | const isProd = wenv.WORKER_ENV === "production"; 34 | 35 | if (!globalThis.log) { 36 | globalThis.log = new Log({ 37 | level: envManager.get("LOG_LEVEL"), 38 | levelize: isProd, // levelize only in prod 39 | withTimestamps: false, // no need to log ts on workers 40 | }); 41 | } 42 | 43 | // on Workers, the network-context isn't available in global-scope 44 | // ie network requests, for ex over fetch-api or xhr, don't work. 45 | // And so, system ready event is published by the event listener 46 | // which has the network-context, that is necessary for svc.js 47 | // to setup blocklist-filter, which otherwise fails when invoked 48 | // from global-scope (such as the "main" function in this file). 49 | system.pub("ready", { env: arg.env }); 50 | } 51 | 52 | function up() { 53 | if (!services.ready) { 54 | log.e("services not yet ready, and we've got a sig-up?!"); 55 | return; 56 | } 57 | // nothing else to do on sig-up on Workers; fire a sig-go! 58 | system.pub("go"); 59 | } 60 | -------------------------------------------------------------------------------- /src/plugins/cache-util.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import * as cfg from "../core/cfg.js"; 9 | import * as util from "../commons/util.js"; 10 | import * as dnsutil from "../commons/dnsutil.js"; 11 | import * as envutil from "../commons/envutil.js"; 12 | import * as pres from "./plugin-response.js"; 13 | 14 | const minTtlSec = 30; // 30s 15 | const maxTtlSec = 180; // 3m 16 | const expiresImmediately = 0; // 0s 17 | const someVeryHighTtl = 1 << 30; // 2^30s 18 | const cheader = "x-rdnscache-metadata"; 19 | const _cacheurl = "https://caches.rethinkdns.com/"; 20 | 21 | const _cacheHeaderKey = "x-rdns-cache"; 22 | const _cacheHeaderHitValue = "hit"; 23 | const _cacheHeaders = { [_cacheHeaderKey]: _cacheHeaderHitValue }; 24 | 25 | function determineCacheExpiry(packet) { 26 | // TODO: do not cache :: / 0.0.0.0 upstream answers? 27 | // expiresImmediately => packet is not an ans but a question 28 | if (!dnsutil.isAnswer(packet)) return expiresImmediately; 29 | 30 | let ttl = someVeryHighTtl; 31 | 32 | // TODO: nxdomain ttls are in the authority section 33 | // TODO: OPT answers need not set a ttl field 34 | // set min(ttl) among all answers, but at least minTtlSec 35 | for (const a of packet.answers) ttl = Math.min(a.ttl || minTtlSec, ttl); 36 | 37 | // if no answers, set min-ttl 38 | if (ttl === someVeryHighTtl) ttl = minTtlSec; 39 | 40 | // see also: isAnswerFresh 41 | ttl += envutil.cacheTtl(); 42 | const expiry = Date.now() + ttl * 1000; 43 | 44 | return expiry; // in millis 45 | } 46 | 47 | /** 48 | * @param {any} dnsPacket 49 | * @param {pres.BStamp} stamps 50 | * @returns {DnsCacheMetadata} 51 | */ 52 | function makeCacheMetadata(dnsPacket, stamps) { 53 | // { 54 | // "expiry": 1642874536022, 55 | // "stamps": { 56 | // "amazonaws.com": [128,2], 57 | // "amazon.com": [16384,1024], 58 | // "rewrite.amazon.com": [944,32768,8,16384,16,16] 59 | // } 60 | // } 61 | return new DnsCacheMetadata(determineCacheExpiry(dnsPacket), stamps); 62 | } 63 | 64 | export class DnsCacheMetadata { 65 | constructor(expiry, stamps) { 66 | /** @type {number} */ 67 | this.expiry = expiry; 68 | /** @type {pres.BStamp} */ 69 | this.stamps = stamps; 70 | } 71 | } 72 | 73 | export class DnsCacheData { 74 | constructor(packet, raw, metadata) { 75 | /** @type {any} */ 76 | this.dnsPacket = packet; // may be null 77 | /** @type {ArrayBuffer} */ 78 | this.dnsBuffer = raw; 79 | /** @type {DnsCacheMetadata} */ 80 | this.metadata = metadata; 81 | } 82 | } 83 | 84 | /** 85 | * @param {any} packet 86 | * @param {ArrayBuffer?} raw 87 | * @param {DnsCacheMetadata} metadata 88 | * @returns {DnsCacheData} 89 | */ 90 | export function makeCacheValue(packet, raw, metadata) { 91 | // null value allowed for packet / raw 92 | return new DnsCacheData(packet, raw, metadata); 93 | } 94 | 95 | /** 96 | * @param {pres.RespData} rdnsResponse 97 | * @returns {DnsCacheData} 98 | */ 99 | export function cacheValueOf(rdnsResponse) { 100 | const stamps = rdnsResponse.stamps; 101 | // do not cache OPT records 102 | // github.com/bluejekyll/trust-dns/blob/a614257fb0/crates/proto/src/rr/rdata/opt.rs#L46-L52 103 | const [packet, modified] = dnsutil.dropOPT(rdnsResponse.dnsPacket); 104 | const raw = modified ? dnsutil.encode(packet) : rdnsResponse.dnsBuffer; 105 | 106 | const metadata = makeCacheMetadata(packet, stamps); 107 | return makeCacheValue(packet, raw, metadata); 108 | } 109 | 110 | function updateTtl(packet, end) { 111 | const now = Date.now(); 112 | const actualttl = Math.floor((end - now) / 1000) - envutil.cacheTtl(); 113 | // jitter between min/max to prevent uniform expiry across clients 114 | const outttl = 115 | actualttl < minTtlSec ? util.rand(minTtlSec, maxTtlSec) : actualttl; 116 | for (const a of packet.answers) { 117 | if (!dnsutil.optAnswer(a)) a.ttl = outttl; 118 | } 119 | } 120 | 121 | function makeId(packet) { 122 | // multiple questions are kind of an undefined behaviour 123 | // stackoverflow.com/a/55093896 124 | if (!dnsutil.hasSingleQuestion(packet)) return null; 125 | const q = packet.questions[0]; 126 | const addn = dnsutil.hasDnssecOk(packet) ? ":dnssec" : ""; 127 | return dnsutil.normalizeName(q.name) + ":" + q.type + addn; 128 | } 129 | 130 | /** 131 | * @param {DnsCacheData} data 132 | * @returns {DnsCacheData} 133 | */ 134 | export function makeLocalCacheValue(data) { 135 | const b = data.dnsBuffer; 136 | const metadata = data.metadata; 137 | // ensure dnsPacket is null 138 | return new DnsCacheData(null, b, metadata); 139 | } 140 | 141 | /** 142 | * @param {DnsCacheData} data 143 | * @returns {Response} 144 | */ 145 | export function makeHttpCacheValue(data) { 146 | const b = data.dnsBuffer; 147 | const metadata = data.metadata; 148 | const headers = { 149 | headers: util.concatHeaders( 150 | { 151 | [cheader]: embedMetadata(metadata), 152 | // ref: developers.cloudflare.com/workers/runtime-apis/cache#headers 153 | "Cache-Control": /* 1w*/ "max-age=604800", 154 | }, 155 | util.contentLengthHeader(b) 156 | ), 157 | // if using the fetch web api, "cf" directive needs to be set, instead 158 | // ref: developers.cloudflare.com/workers/examples/cache-using-fetch 159 | // cf: { cacheTtl: /*1w*/ 604800 }, 160 | }; 161 | // http-cache stores Response objs: 162 | return new Response(b, headers); 163 | } 164 | 165 | /** 166 | * @param {any} packet 167 | * @returns {URL} 168 | */ 169 | export function makeHttpCacheKey(packet) { 170 | const id = makeId(packet); // ex: domain.tld:A:dnssec 171 | if (util.emptyString(id)) return null; 172 | 173 | return new URL(_cacheurl + cfg.timestamp() + "/" + id); 174 | } 175 | 176 | /** 177 | * @param {Response} cres 178 | * @returns {DnsCacheMetadata} 179 | */ 180 | export function extractMetadata(cres) { 181 | const j = JSON.parse(cres.headers.get(cheader)); 182 | return new DnsCacheMetadata(j.expiry, j.stamps); 183 | } 184 | 185 | /** 186 | * @param {DnsCacheMetadata} m 187 | * @returns {string} 188 | */ 189 | function embedMetadata(m) { 190 | return JSON.stringify(m); 191 | } 192 | 193 | export function cacheHeaders() { 194 | return _cacheHeaders; 195 | } 196 | 197 | export function hasCacheHeader(h) { 198 | if (!h) return false; 199 | return h.get(_cacheHeaderKey) === _cacheHeaderHitValue; 200 | } 201 | 202 | function updateQueryId(decodedDnsPacket, queryId) { 203 | if (queryId === decodedDnsPacket.id) return false; // no change 204 | decodedDnsPacket.id = queryId; 205 | return true; 206 | } 207 | 208 | /** 209 | * @param {DnsCacheData} v 210 | * @returns {boolean} 211 | */ 212 | export function isValueValid(v) { 213 | if (util.emptyObj(v)) return false; 214 | 215 | return hasMetadata(v.metadata); 216 | } 217 | 218 | /** 219 | * @param {DnsCacheMetadata} m 220 | * @returns {boolean} 221 | */ 222 | export function hasMetadata(m) { 223 | return !util.emptyObj(m); 224 | } 225 | 226 | /** 227 | * @param {DnsCacheData} v 228 | * @returns {boolean} 229 | */ 230 | export function hasAnswer(v) { 231 | if (!hasMetadata(v.metadata)) return false; 232 | return isAnswerFresh(v.metadata, /* no roll*/ 6); 233 | } 234 | 235 | /** 236 | * @param {DnsCacheMetadata} m 237 | * @param {number} n 238 | * @returns {boolean} 239 | */ 240 | export function isAnswerFresh(m, n = 0) { 241 | // when expiry is 0, c.dnsPacket is a question and not an ans 242 | // ref: determineCacheExpiry 243 | const now = Date.now(); 244 | const ttl = envutil.cacheTtl() * 1000; 245 | const r = n || util.rolldice(6); 246 | if (r % 6 === 0) { 247 | // 1 in 6 (~15% of the time), fresh if answer-ttl hasn't expired 248 | return m.expiry > 0 && now <= m.expiry - ttl; 249 | } else { 250 | // 5 in 6, fresh if cache-ttl hasn't expired, regardless of answer-ttl 251 | return m.expiry > 0 && now <= m.expiry; 252 | } 253 | } 254 | 255 | export function updatedAnswer(dnsPacket, qid, expiry) { 256 | updateQueryId(dnsPacket, qid); 257 | updateTtl(dnsPacket, expiry); 258 | return dnsPacket; 259 | } 260 | -------------------------------------------------------------------------------- /src/plugins/dns-op/blocker.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import * as pres from "../plugin-response.js"; 10 | import * as rdnsutil from "../rdns-util.js"; 11 | import * as dnsutil from "../../commons/dnsutil.js"; 12 | 13 | export class DnsBlocker { 14 | constructor() { 15 | this.log = log.withTags("DnsBlocker"); 16 | } 17 | 18 | blockQuestion(rxid, req, blockInfo) { 19 | const dnsPacket = req.dnsPacket; 20 | const stamps = req.stamps; 21 | 22 | if (!stamps) { 23 | this.log.d(rxid, "q: no stamp"); 24 | return req; 25 | } 26 | 27 | if (!rdnsutil.hasBlockstamp(blockInfo)) { 28 | this.log.d(rxid, "q: no user-set blockstamp"); 29 | return req; 30 | } 31 | 32 | if (!dnsutil.isQueryBlockable(dnsPacket)) { 33 | this.log.d(rxid, "not a blockable dns-query"); 34 | return req; 35 | } 36 | 37 | const domains = dnsutil.extractDomains(dnsPacket); 38 | const bres = this.block(domains, blockInfo, stamps); 39 | 40 | return pres.copyOnlyBlockProperties(req, bres); 41 | } 42 | 43 | blockAnswer(rxid, res, blockInfo) { 44 | const dnsPacket = res.dnsPacket; 45 | const stamps = res.stamps; 46 | 47 | // dnsPacket is null when cache only has metadata 48 | if (!stamps || !dnsutil.hasAnswers(dnsPacket)) { 49 | this.log.d(rxid, "ans: no stamp / dns-packet"); 50 | return res; 51 | } 52 | 53 | if (!rdnsutil.hasBlockstamp(blockInfo)) { 54 | this.log.d(rxid, "ans: no user-set blockstamp"); 55 | return res; 56 | } 57 | 58 | if (!dnsutil.isAnswerBlockable(dnsPacket)) { 59 | this.log.d(rxid, "ans not cloaked with cname/https/svcb"); 60 | return res; 61 | } 62 | 63 | if (dnsutil.isAnswerQuad0(dnsPacket)) { 64 | this.log.d(rxid, "ans: already blocked"); 65 | return res; 66 | } 67 | 68 | const domains = dnsutil.extractDomains(dnsPacket); 69 | const bres = this.block(domains, blockInfo, stamps); 70 | 71 | return pres.copyOnlyBlockProperties(res, bres); 72 | } 73 | 74 | block(names, blockInfo, blockstamps) { 75 | let r = pres.rdnsNoBlockResponse(); 76 | for (const n of names) { 77 | r = rdnsutil.doBlock(n, blockInfo, blockstamps); 78 | if (r.isBlocked) break; 79 | } 80 | return r; 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/plugins/dns-op/cache-api.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import * as envutil from "../../commons/envutil.js"; 9 | 10 | export class CacheApi { 11 | constructor() { 12 | this.noop = !envutil.hasHttpCache(); 13 | 14 | if (this.noop) { 15 | log.w("no-op http-cache-api"); 16 | } 17 | } 18 | 19 | /** 20 | * @param {string} href 21 | * @returns {Promise} 22 | */ 23 | async get(href) { 24 | if (this.noop) return false; 25 | if (!href) return false; 26 | 27 | return await caches.default.match(href); 28 | } 29 | 30 | /** 31 | * @param {string} href 32 | * @param {Response} response 33 | * @returns 34 | */ 35 | put(href, response) { 36 | if (this.noop) return false; 37 | if (!href || !response) return false; 38 | // todo: what does this return? 39 | return caches.default.put(href, response); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/plugins/dns-op/cache-resolver.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import { DnsBlocker } from "./blocker.js"; 10 | import * as cacheutil from "../cache-util.js"; 11 | import * as rdnsutil from "../rdns-util.js"; 12 | import * as pres from "../plugin-response.js"; 13 | import * as dnsutil from "../../commons/dnsutil.js"; 14 | import * as util from "../../commons/util.js"; 15 | 16 | export class DNSCacheResponder { 17 | constructor(blocklistWrapper, cache) { 18 | this.blocker = new DnsBlocker(); 19 | this.log = log.withTags("DnsCacheResponder"); 20 | /** @type {import("./cache.js").DnsCache} */ 21 | this.cache = cache; 22 | /** @type {import("../rethinkdns/main.js").BlocklistWrapper} */ 23 | this.bw = blocklistWrapper; 24 | } 25 | 26 | /** 27 | * @param {{userBlocklistInfo: any, requestDecodedDnsPacket: any, isDnsMsg: boolean}} ctx 28 | * @returns {Promise} 29 | */ 30 | async exec(ctx) { 31 | let response = pres.emptyResponse(); 32 | if (!ctx.isDnsMsg) { 33 | this.log.d(ctx.rxid, "not a dns-msg, nowt to resolve"); 34 | return response; 35 | } 36 | 37 | try { 38 | response.data = await this.resolveFromCache( 39 | ctx.rxid, 40 | ctx.requestDecodedDnsPacket, 41 | ctx.userBlocklistInfo 42 | ); 43 | } catch (e) { 44 | this.log.e(ctx.rxid, "main", e.stack); 45 | response = pres.errResponse("DnsCacheHandler", e); 46 | } 47 | 48 | return response; 49 | } 50 | 51 | async resolveFromCache(rxid, packet, blockInfo) { 52 | const noAnswer = pres.rdnsNoBlockResponse(); 53 | // if blocklist-filter is setup, then there's no need to query http-cache 54 | // (it introduces 5ms to 10ms latency). Because, the sole purpose of the 55 | // cache is to help avoid blocklist-filter downloads which cost 200ms 56 | // (when cached by cf) to 5s (uncached, downloaded from s3). Otherwise, 57 | // it only add 10s, even on cache-misses. This is expensive especially 58 | // when upstream DoHs (like cf, goog) have median response time of 10s. 59 | // When other platforms get http-cache / multiple caches (like on-disk), 60 | // the above reasoning may not apply, since it is only valid for infra 61 | // on Cloudflare, which not only has "free" egress, but also different 62 | // runtime (faster hw and sw) and deployment model (v8 isolates). 63 | const blf = this.bw.getBlocklistFilter(); 64 | const onlyLocal = 65 | this.bw.disabled() || rdnsutil.isBlocklistFilterSetup(blf); 66 | 67 | const k = cacheutil.makeHttpCacheKey(packet); 68 | if (!k) return noAnswer; 69 | 70 | const cr = await this.cache.get(k, onlyLocal); 71 | this.log.d(rxid, onlyLocal, "cache k/m", k.href, cr && cr.metadata); 72 | 73 | if (util.emptyObj(cr)) return noAnswer; 74 | 75 | // note: stamps in cr may be out-of-date; for ex, consider a 76 | // scenario where v6.example.com AAAA to fda3:: today, 77 | // but CNAMEs to v6.test.example.org tomorrow. cr.metadata 78 | // would contain stamps for [v6.example.com, example.com] 79 | // whereas it should be [v6.example.com, example.com 80 | // v6.test.example.org, test.example.org, example.org] 81 | const stamps = rdnsutil.blockstampFromCache(cr); 82 | const res = pres.dnsResponse(cr.dnsPacket, cr.dnsBuffer, stamps); 83 | 84 | this.makeCacheResponse(rxid, /* out*/ res, blockInfo); 85 | 86 | if (res.isBlocked) return res; 87 | 88 | if (!cacheutil.isAnswerFresh(cr.metadata)) { 89 | this.log.d(rxid, "cache ans not fresh"); 90 | return noAnswer; 91 | } 92 | 93 | cacheutil.updatedAnswer( 94 | /* out*/ res.dnsPacket, 95 | packet.id, 96 | cr.metadata.expiry 97 | ); 98 | 99 | const reencoded = dnsutil.encode(res.dnsPacket); 100 | 101 | return pres.dnsResponse(res.dnsPacket, reencoded, res.stamps); 102 | } 103 | 104 | makeCacheResponse(rxid, r, blockInfo) { 105 | // check incoming dns request against blocklists in cache-metadata 106 | this.blocker.blockQuestion(rxid, /* out*/ r, blockInfo); 107 | this.log.d(rxid, blockInfo, "question blocked?", r.isBlocked); 108 | if (r.isBlocked) { 109 | return r; 110 | } 111 | 112 | // cache-response contains only query and not answers, 113 | // hence there are no more domains to block. 114 | if (!dnsutil.hasAnswers(r.dnsPacket)) { 115 | return r; 116 | } 117 | 118 | // check outgoing cached dns-packet against blocklists 119 | this.blocker.blockAnswer(rxid, /* out*/ r, blockInfo); 120 | this.log.d(rxid, "answer block?", r.isBlocked); 121 | 122 | return r; 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/plugins/dns-op/cache.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import { LfuCache } from "@serverless-dns/lfu-cache"; 10 | import { CacheApi } from "./cache-api.js"; 11 | import * as bufutil from "../../commons/bufutil.js"; 12 | import * as dnsutil from "../../commons/dnsutil.js"; 13 | import * as envutil from "../../commons/envutil.js"; 14 | import * as util from "../../commons/util.js"; 15 | import * as cacheutil from "../cache-util.js"; 16 | 17 | export class DnsCache { 18 | constructor(size) { 19 | this.log = log.withTags("DnsCache"); 20 | this.disabled = envutil.disableDnsCache(); 21 | 22 | if (this.disabled) { 23 | this.log.w("DnsCache disabled"); 24 | return; 25 | } 26 | 27 | this.localcache = new LfuCache("DnsCache", size); 28 | this.httpcache = new CacheApi(); 29 | } 30 | 31 | /** 32 | * @param {URL} url 33 | * @param {boolean} localOnly 34 | * @returns {Promise} 35 | */ 36 | async get(url, localOnly = false) { 37 | if (this.disabled) return null; 38 | 39 | if (!url && util.emptyString(url.href)) { 40 | this.log.d("get: empty url", url); 41 | return null; 42 | } 43 | 44 | // http-cache can be updated by any number of workers 45 | // in the region, and could contain latest / full 46 | // entry, whereas a local-cache may not. 47 | let data = this.fromLocalCache(url.href); 48 | if (data) { 49 | return data; 50 | } 51 | 52 | // fetch only from local-cache 53 | if (localOnly) return null; 54 | 55 | // note: http cache api availble only on cloudflare 56 | data = await this.fromHttpCache(url); 57 | if (data) { 58 | // write-through local cache 59 | this.putLocalCache(url.href, data); 60 | } 61 | 62 | return data; 63 | } 64 | 65 | /** 66 | * @param {URL} url 67 | * @param {cacheutil.DnsCacheData} data 68 | * @param {function(function):void} dispatcher 69 | * @returns {Promise} 70 | */ 71 | async put(url, data, dispatcher) { 72 | if (this.disabled) return; 73 | 74 | if ( 75 | !url || 76 | util.emptyString(url.href) || 77 | util.emptyObj(data) || 78 | util.emptyObj(data.metadata) || 79 | util.emptyObj(data.dnsPacket) || 80 | bufutil.emptyBuf(data.dnsBuffer) 81 | ) { 82 | this.log.w("put: empty url/data", url, data); 83 | return; 84 | } 85 | 86 | try { 87 | // data: {dnsPacket, dnsBuffer, metadata}; dnsPacket/Buffer may be null 88 | // verbose: this.log.d("put: data in cache", data); 89 | this.log.d("put: data in cache", data.metadata); 90 | 91 | // a race where the cache may infact have a fresh answer, 92 | // but then we override it with this question-only packet 93 | // so: get existing entry first to rule that out 94 | const c = this.fromLocalCache(url.href); 95 | const hasAns = !util.emptyObj(c) && dnsutil.isAnswer(c.dnsPacket); 96 | const incomingHasAns = dnsutil.isAnswer(data.dnsPacket); 97 | if (hasAns && !incomingHasAns) { 98 | this.log.w("put ignored: cache has answer, incoming does not"); 99 | return; 100 | } // else: override cachedEntry with incoming 101 | 102 | this.putLocalCache(url.href, data); 103 | 104 | dispatcher(this.putHttpCache(url, data)); 105 | } catch (e) { 106 | this.log.e("put", url.href, data, e.stack); 107 | } 108 | } 109 | 110 | /** 111 | * @param {string} href 112 | * @param {cacheutil.DnsCacheData} data 113 | * @returns {void} 114 | */ 115 | putLocalCache(href, data) { 116 | // href "https://caches.rethinkdns.com/2023/1682978161602/0.test.dns0.eu:A" 117 | // k "/0.test.dns0.eu:A" 118 | const k = href.slice(href.lastIndexOf("/")); 119 | const v = cacheutil.makeLocalCacheValue(data); 120 | 121 | if (!k || !v) return; 122 | 123 | this.localcache.put(k, v); 124 | } 125 | 126 | /** 127 | * @param {string} href 128 | * @returns {cacheutil.DnsCacheData|null} 129 | */ 130 | fromLocalCache(href) { 131 | const key = href.slice(href.lastIndexOf("/")); 132 | if (!key) return false; 133 | 134 | const res = this.localcache.get(key); 135 | 136 | if (util.emptyObj(res)) return null; 137 | 138 | const b = res.dnsBuffer; 139 | const p = dnsutil.decode(b); 140 | const m = res.metadata; 141 | 142 | const cr = cacheutil.makeCacheValue(p, b, m); 143 | 144 | return cacheutil.isValueValid(cr) ? cr : null; 145 | } 146 | 147 | /** 148 | * @param {URL} url 149 | * @param {cacheutil.DnsCacheData} data 150 | * @returns 151 | */ 152 | async putHttpCache(url, data) { 153 | const k = url.href; 154 | const v = cacheutil.makeHttpCacheValue(data); 155 | 156 | if (!k || !v) return; 157 | 158 | return this.httpcache.put(k, v); 159 | } 160 | 161 | /** 162 | * @param {URL} url 163 | * @returns {Promise} 164 | */ 165 | async fromHttpCache(url) { 166 | const k = url.href; 167 | const response = await this.httpcache.get(k); 168 | if (!response || !response.ok) return null; 169 | 170 | const metadata = cacheutil.extractMetadata(response); 171 | this.log.d("http-cache response metadata", metadata); 172 | 173 | // 'b' shouldn't be null; but a dns question or a dns answer 174 | const b = await response.arrayBuffer(); 175 | // when 'b' is less than dns-packet header-size, decode errs out 176 | const p = dnsutil.decode(b); 177 | // though 'm' is never empty 178 | const m = metadata; 179 | 180 | const cr = cacheutil.makeCacheValue(p, b, m); 181 | 182 | return cacheutil.isValueValid(cr) ? cr : null; 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/plugins/dns-op/dns-op.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import DNSResolver from "./resolver.js"; 10 | import { DNSPrefilter } from "./prefilter.js"; 11 | import { DNSCacheResponder } from "./cache-resolver.js"; 12 | import { DnsCache } from "./cache.js"; 13 | 14 | export { DNSResolver, DNSCacheResponder, DnsCache, DNSPrefilter }; 15 | -------------------------------------------------------------------------------- /src/plugins/dns-op/prefilter.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import * as dnsutil from "../../commons/dnsutil.js"; 9 | import * as util from "../../commons/util.js"; 10 | import * as pres from "../plugin-response.js"; 11 | 12 | // eslint-disable-next-line max-len 13 | // from: github.com/DNSCrypt/dnscrypt-proxy/blob/10ded3d9f/dnscrypt-proxy/plugin_block_undelegated.go 14 | const undelegated = new Set([ 15 | // eslint-disable-next-line max-len 16 | "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa", 17 | "0.in-addr.arpa", 18 | "1", 19 | // eslint-disable-next-line max-len 20 | "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa", 21 | "10.in-addr.arpa", 22 | "100.100.in-addr.arpa", 23 | "100.51.198.in-addr.arpa", 24 | "101.100.in-addr.arpa", 25 | "102.100.in-addr.arpa", 26 | "103.100.in-addr.arpa", 27 | "104.100.in-addr.arpa", 28 | "105.100.in-addr.arpa", 29 | "106.100.in-addr.arpa", 30 | "107.100.in-addr.arpa", 31 | "108.100.in-addr.arpa", 32 | "109.100.in-addr.arpa", 33 | "110.100.in-addr.arpa", 34 | "111.100.in-addr.arpa", 35 | "112.100.in-addr.arpa", 36 | "113.0.203.in-addr.arpa", 37 | "113.100.in-addr.arpa", 38 | "114.100.in-addr.arpa", 39 | "115.100.in-addr.arpa", 40 | "116.100.in-addr.arpa", 41 | "117.100.in-addr.arpa", 42 | "118.100.in-addr.arpa", 43 | "119.100.in-addr.arpa", 44 | "120.100.in-addr.arpa", 45 | "121.100.in-addr.arpa", 46 | "122.100.in-addr.arpa", 47 | "123.100.in-addr.arpa", 48 | "124.100.in-addr.arpa", 49 | "125.100.in-addr.arpa", 50 | "126.100.in-addr.arpa", 51 | "127.100.in-addr.arpa", 52 | "127.in-addr.arpa", 53 | "16.172.in-addr.arpa", 54 | "168.192.in-addr.arpa", 55 | "17.172.in-addr.arpa", 56 | "18.172.in-addr.arpa", 57 | "19.172.in-addr.arpa", 58 | "2.0.192.in-addr.arpa", 59 | "20.172.in-addr.arpa", 60 | "21.172.in-addr.arpa", 61 | "22.172.in-addr.arpa", 62 | "23.172.in-addr.arpa", 63 | "24.172.in-addr.arpa", 64 | "25.172.in-addr.arpa", 65 | "254.169.in-addr.arpa", 66 | "255.255.255.255.in-addr.arpa", 67 | "26.172.in-addr.arpa", 68 | "27.172.in-addr.arpa", 69 | "28.172.in-addr.arpa", 70 | "29.172.in-addr.arpa", 71 | "30.172.in-addr.arpa", 72 | "31.172.in-addr.arpa", 73 | "64.100.in-addr.arpa", 74 | "65.100.in-addr.arpa", 75 | "66.100.in-addr.arpa", 76 | "67.100.in-addr.arpa", 77 | "68.100.in-addr.arpa", 78 | "69.100.in-addr.arpa", 79 | "70.100.in-addr.arpa", 80 | "71.100.in-addr.arpa", 81 | "72.100.in-addr.arpa", 82 | "73.100.in-addr.arpa", 83 | "74.100.in-addr.arpa", 84 | "75.100.in-addr.arpa", 85 | "76.100.in-addr.arpa", 86 | "77.100.in-addr.arpa", 87 | "78.100.in-addr.arpa", 88 | "79.100.in-addr.arpa", 89 | "8.b.d.0.1.0.0.2.ip6.arpa", 90 | "8.e.f.ip6.arpa", 91 | "80.100.in-addr.arpa", 92 | "81.100.in-addr.arpa", 93 | "82.100.in-addr.arpa", 94 | "83.100.in-addr.arpa", 95 | "84.100.in-addr.arpa", 96 | "85.100.in-addr.arpa", 97 | "86.100.in-addr.arpa", 98 | "87.100.in-addr.arpa", 99 | "88.100.in-addr.arpa", 100 | "89.100.in-addr.arpa", 101 | "9.e.f.ip6.arpa", 102 | "90.100.in-addr.arpa", 103 | "91.100.in-addr.arpa", 104 | "92.100.in-addr.arpa", 105 | "93.100.in-addr.arpa", 106 | "94.100.in-addr.arpa", 107 | "95.100.in-addr.arpa", 108 | "96.100.in-addr.arpa", 109 | "97.100.in-addr.arpa", 110 | "98.100.in-addr.arpa", 111 | "99.100.in-addr.arpa", 112 | "a.e.f.ip6.arpa", 113 | "airdream", 114 | "api", 115 | "b.e.f.ip6.arpa", 116 | "bbrouter", 117 | "belkin", 118 | "bind", 119 | "blinkap", 120 | "corp", 121 | "d.f.ip6.arpa", 122 | "davolink", 123 | "dearmyrouter", 124 | "dhcp", 125 | "dlink", 126 | "domain", 127 | "envoy", 128 | "example", 129 | "f.f.ip6.arpa", 130 | "grp", 131 | "gw==", 132 | "home", 133 | "hub", 134 | "internal", 135 | "intra", 136 | "intranet", 137 | "invalid", 138 | "ksyun", 139 | "lan", 140 | "loc", 141 | "local", 142 | "localdomain", 143 | "localhost", 144 | "localnet", 145 | "modem", 146 | "mynet", 147 | "myrouter", 148 | "novalocal", 149 | "onion", 150 | "openstacklocal", 151 | "priv", 152 | "private", 153 | "prv", 154 | "router", 155 | "telus", 156 | "test", 157 | "totolink", 158 | "wlan_ap", 159 | "workgroup", 160 | "zghjccbob3n0", 161 | ]); 162 | 163 | export class DNSPrefilter { 164 | constructor() { 165 | this.log = log.withTags("DnsPrefilter"); 166 | } 167 | 168 | async close() { 169 | // no-op 170 | } 171 | 172 | /** 173 | * @param {{rxid: string, requestDecodedDnsPacket: any}} ctx 174 | * @returns {Promise} 175 | */ 176 | async exec(ctx) { 177 | let r = pres.emptyResponse(); 178 | 179 | try { 180 | r.data = await this.filterOut(ctx.requestDecodedDnsPacket); 181 | } catch (e) { 182 | r = pres.errResponse("dnsPrefilter", e); 183 | this.log.e(ctx.rxid, "main", e); 184 | } 185 | 186 | return r; 187 | } 188 | 189 | async filterOut(dnsPacket) { 190 | // set a dummy flag, "prefilter" 191 | const block = pres.rdnsBlockResponse("prefilter"); 192 | const allow = pres.rdnsNoBlockResponse(); 193 | const domains = dnsutil.extractDomains(dnsPacket); 194 | 195 | // domains is a Set 196 | for (const d of domains) { 197 | const subdomains = d.split("."); 198 | do { 199 | if (util.emptyArray(subdomains)) break; 200 | if (undelegated.has(subdomains.join("."))) { 201 | return block; 202 | } 203 | } while (subdomains.shift() != null); 204 | } 205 | 206 | return allow; 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/plugins/observability/geoip.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import { LfuCache } from "@serverless-dns/lfu-cache"; 10 | import * as bufutil from "../../commons/bufutil.js"; 11 | import * as envutil from "../../commons/envutil.js"; 12 | import * as util from "../../commons/util.js"; 13 | 14 | const debug = false; 15 | 16 | const ip6sep = ":"; 17 | const ip4sep = "."; 18 | const ip6size = 16; 19 | const ip4size = 4; 20 | 21 | const dbip4 = "dbip.v4"; 22 | const dbip6 = "dbip.v6"; 23 | 24 | // unknown country code 25 | const ccunknown = "ZZ"; 26 | // country code size 27 | const ccsize = 2; 28 | // stop search for country-code beyond this depth in the geo4 / geo6 29 | const maxdepth = 32; 30 | 31 | // wait time in ms, before rechecking if geoip is initialized 32 | const waitms = 50; 33 | // max wait time in ms, before giving up on geoip initialization 34 | const maxwaitms = 5000; 35 | 36 | // geoip cache size 37 | const size = 20000; 38 | 39 | export class GeoIP { 40 | constructor() { 41 | this.geo4 = null; 42 | this.geo6 = null; 43 | this.initializing = false; 44 | this.decoder = new TextDecoder(); 45 | this.repo = envutil.geoipUrl(); 46 | this.cache = new LfuCache("GeoIP", size); 47 | this.log = log.withTags("GeoIP"); 48 | } 49 | 50 | initDone() { 51 | return !bufutil.emptyBuf(this.geo4) && !bufutil.emptyBuf(this.geo6); 52 | } 53 | 54 | async download(force = false) { 55 | if (!force && this.initDone()) { 56 | return Promise.all([this.geo4, this.geo6]); 57 | } 58 | 59 | this.log.d("downloading geoip dbs", this.repo); 60 | const [f1, f2] = await Promise.all([ 61 | fetch(this.repo + dbip4), 62 | fetch(this.repo + dbip6), 63 | ]); 64 | 65 | if (!f1.ok || !f2.ok) throw new Error("geoip download failed"); 66 | 67 | return Promise.all([f1.arrayBuffer(), f2.arrayBuffer()]); 68 | } 69 | 70 | async init(g4, g6) { 71 | if (this.initDone()) return true; 72 | 73 | let totalsleep = 0; 74 | while (this.initializing && totalsleep < maxwaitms) { 75 | await util.sleep(waitms); 76 | totalsleep += waitms; 77 | } 78 | 79 | this.initializing = true; 80 | if (g4 == null || g6 == null) { 81 | [g4, g6] = await this.download(); 82 | const sz4 = this.geo4 && this.geo4.byteLength; 83 | const sz6 = this.geo4 && this.geo6.byteLength; 84 | this.log.d("downloading geoip dbs done", sz4, sz6); 85 | } 86 | this.geo4 = bufutil.normalize8(g4); 87 | this.geo6 = bufutil.normalize8(g6); 88 | 89 | this.initializing = false; 90 | 91 | return this.initDone(); 92 | } 93 | 94 | country(ipstr) { 95 | if (!this.initDone()) return ccunknown; 96 | if (util.emptyString(ipstr)) return ccunknown; 97 | 98 | const cached = this.cache.get(ipstr); 99 | if (!util.emptyObj(cached)) { 100 | return cached; 101 | } 102 | 103 | const ip = this.iptou8(ipstr); 104 | const recsize = ip.length + ccsize; 105 | const g = ip.length === 4 ? this.geo4 : this.geo6; 106 | 107 | let low = 0; 108 | let high = g.byteLength / recsize; 109 | let i = 0; 110 | while (high - 1 > low) { 111 | const mid = ((high + low) / 2) | 0; 112 | const midpos = mid * recsize; 113 | 114 | if (debug) this.log.d(i, "nexti", mid, "", low, high); 115 | 116 | if (this.lessthan(g, midpos, ip)) low = mid; 117 | else high = mid; 118 | 119 | if (i++ > maxdepth) break; 120 | } 121 | 122 | const pos = low * recsize + ip.length; 123 | const raw = g.subarray(pos, pos + ccsize); 124 | const cc = this.decoder.decode(raw); 125 | 126 | this.cache.put(ipstr, cc); 127 | 128 | if (debug) this.log.d(low, high, "", pos, raw, "cc", cc); 129 | 130 | return cc; 131 | } 132 | 133 | lessthan(g, w, ip) { 134 | for (let i = 0; i < ip.length; i++) { 135 | const gi = g[w + i]; 136 | const ii = ip[i]; 137 | if (debug) this.log.d(i, "", w, gi, "", ii); 138 | if (gi > ii) return false; 139 | if (ii > gi) return true; 140 | } 141 | return true; 142 | } 143 | 144 | iptou8(ip) { 145 | if (ip.indexOf(ip6sep) > 0) { 146 | const ip6 = ip.split(ip6sep); 147 | const ip6u8 = new Uint8Array(ip6size); 148 | for (let i = 0; i < ip6size; i++) { 149 | ip6u8[i] = parseInt(ip6[i], 16) | 0; 150 | } 151 | return ip6u8; 152 | } else { 153 | const ip4 = ip.split(ip4sep); 154 | const ip4u8 = new Uint8Array(ip4size); 155 | for (let i = 0; i < ip4size; i++) { 156 | ip4u8[i] = parseInt(ip4[i]) | 0; 157 | } 158 | return ip4u8; 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /src/plugins/plugin-response.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import * as util from "../commons/util.js"; 10 | import * as bufutil from "../commons/bufutil.js"; 11 | 12 | export class RResp { 13 | constructor(data = null, hasex = false, exfrom = "", exstack = "") { 14 | /** @type {RespData?} */ 15 | this.data = data || new RespData(); 16 | /** @type {boolean} */ 17 | this.isException = hasex; 18 | /** @type {String} */ 19 | this.exceptionFrom = exfrom; 20 | /** @type {String} */ 21 | this.exceptionStack = exstack; 22 | } 23 | } 24 | 25 | export class RespData { 26 | constructor(blocked = false, flag, packet, raw, stamps) { 27 | /** @type {boolean} */ 28 | this.isBlocked = blocked; 29 | /** @type {String} */ 30 | this.flag = flag || ""; 31 | /** @type {Object} */ 32 | this.dnsPacket = packet || null; 33 | /** @type {ArrayBuffer} */ 34 | this.dnsBuffer = raw || null; 35 | /** @type {BStamp?} */ 36 | this.stamps = stamps || {}; 37 | } 38 | } 39 | 40 | /** 41 | * @typedef {Object.} BStamp 42 | */ 43 | 44 | /** @returns {RResp} */ 45 | export function emptyResponse() { 46 | return new RResp(); 47 | } 48 | 49 | /** 50 | * @param {String} id 51 | * @param {Error} err 52 | * @returns {RResp} 53 | */ 54 | export function errResponse(id, err) { 55 | const data = null; 56 | const hasex = true; 57 | const st = util.emptyObj(err) || !err.stack ? "no-stacktrace" : err.stack; 58 | return new RResp(data, hasex, id, st); 59 | } 60 | 61 | /** 62 | * @param {Object} packet 63 | * @param {ArrayBuffer} raw 64 | * @param {BStamp?} stamps 65 | * @returns {RespData} 66 | */ 67 | export function dnsResponse(packet = null, raw = null, stamps = null) { 68 | if (util.emptyObj(packet) || bufutil.emptyBuf(raw)) { 69 | throw new Error("empty packet for dns-res"); 70 | } 71 | const flags = ""; 72 | const blocked = false; 73 | return new RespData(blocked, flags, packet, raw, stamps); 74 | } 75 | 76 | /** 77 | * @param {String} flag 78 | * @returns {RespData} 79 | */ 80 | export function rdnsBlockResponse(flag) { 81 | if (util.emptyString(flag)) { 82 | throw new Error("no flag set for block-res"); 83 | } 84 | const blocked = true; 85 | return new RespData(blocked, flag); 86 | } 87 | 88 | /** @returns {RespData} */ 89 | export function rdnsNoBlockResponse() { 90 | return new RespData(false); 91 | } 92 | 93 | /** 94 | * Copy block related props from one RespData to another 95 | * @param {RespData} to 96 | * @param {RespData} from 97 | * @returns {RespData} to 98 | */ 99 | export function copyOnlyBlockProperties(to, from) { 100 | to.isBlocked = from.isBlocked; 101 | to.flag = from.flag; 102 | 103 | return to; 104 | } 105 | -------------------------------------------------------------------------------- /src/plugins/rethinkdns/filter.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import * as dnsutil from "../../commons/dnsutil.js"; 10 | 11 | export class BlocklistFilter { 12 | constructor() { 13 | // see: src/helpers/node/blocklists.js:hasBlocklistFiles 14 | this.ftrie = null; 15 | this.filetag = null; 16 | } 17 | 18 | load(frozentrie, filetag) { 19 | this.ftrie = frozentrie; 20 | this.filetag = filetag; 21 | } 22 | 23 | blockstamp(domainName) { 24 | const n = dnsutil.normalizeName(domainName); 25 | 26 | return this.lookup(n); 27 | } 28 | 29 | lookup(n) { 30 | const t = this.ftrie; 31 | try { 32 | n = t.transform(n); 33 | return t.lookup(n); 34 | } catch (ignored) { 35 | // usually u8 / u6 uencode error 36 | /* 37 | * E DnsResolver [rx.0n550a6jz.dcgr3go4md] main Error: 38 | * encode: undef num: undefined, for: :, 39 | * in: https://app-measurement.com/sdk-exp, res: 22,34,34, 40 | * 30,33,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 41 | * at Codec.encodeinner (file:///app/fly.mjs:9362:15) 42 | * at Codec.encode (file:///app/fly.mjs:9325:24) 43 | * at FrozenTrie.transform (file:///app/fly.mjs:10443:23) 44 | * at BlocklistFilter.lookup (file:///app/fly.mjs:10633:23) 45 | * at BlocklistFilter.blockstamp (file:///app/fly.mjs:10628:17) 46 | * at Object.blockstampFromBlocklistFilter (file:///app/fly.mjs:14692:35) 47 | * at DNSResolver.makeRdnsResponse (file:///app/fly.mjs:11737:54) 48 | * at DNSResolver.resolveDns (file:///app/fly.mjs:11618:26) 49 | * at DNSResolver.exec (file:///app/fly.mjs:11536:34) 50 | */ 51 | log.d("blf lookup err:", ignored.message); 52 | } 53 | return null; 54 | } 55 | 56 | extract(ids) { 57 | const r = {}; 58 | for (const id of ids) r[id] = this.filetag[id]; 59 | return r; 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/plugins/rethinkdns/trie-config.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | // github.com/serverless-dns/trie/blob/49049a87/src/config.js#L22 10 | const defaults = { 11 | // inspect trie building stats 12 | inspect: false, 13 | // debug prints debug logs 14 | debug: false, 15 | // use codec-type b6 to convert js-str to bytes and vice-versa 16 | useCodec6: false, 17 | // optimize storing flags, that is, store less than 3 flags as-is 18 | optflags: false, 19 | }; 20 | 21 | export function withDefaults(cfg) { 22 | const base = Object.assign({}, defaults); 23 | return Object.assign(base, cfg); 24 | } 25 | -------------------------------------------------------------------------------- /src/plugins/users/auth-token.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import { LfuCache } from "@serverless-dns/lfu-cache"; 10 | import * as util from "../../commons/util.js"; 11 | import * as bufutil from "../../commons/bufutil.js"; 12 | import * as envutil from "../../commons/envutil.js"; 13 | import * as rdnsutil from "../../plugins/rdns-util.js"; 14 | 15 | export const info = "sdns-public-auth-info"; 16 | 17 | export class Outcome { 18 | constructor(s) { 19 | this.status = s; 20 | // no auth or auth passed 21 | this.ok = s >= 0; 22 | // no auth or auth failed 23 | this.no = s <= 0; 24 | // auth passed 25 | this.yes = s === 1; 26 | } 27 | 28 | // no auth 29 | static none() { 30 | return new Outcome(0); 31 | } 32 | // auth passed 33 | static pass() { 34 | return new Outcome(1); 35 | } 36 | // auth failed 37 | static fail() { 38 | return new Outcome(-1); 39 | } 40 | // auth failed, missing msg-key 41 | static miss() { 42 | return new Outcome(-2); 43 | } 44 | // auth failed, internal error 45 | static err() { 46 | return new Outcome(-3); 47 | } 48 | } 49 | 50 | const akdelim = "|"; 51 | const msgkeydelim = "|"; 52 | const encoder = new TextEncoder(); 53 | const mem = new LfuCache("AuthTokens", 100); 54 | 55 | /** 56 | * @param {string} rxid 57 | * @param {string} url 58 | * @returns {Promise} 59 | */ 60 | export async function auth(rxid, url) { 61 | const accesskeys = envutil.accessKeys(); 62 | 63 | // empty access key, allow all 64 | if (util.emptySet(accesskeys)) { 65 | return Outcome.none(); 66 | } 67 | const msg = rdnsutil.msgkeyFromUrl(url); 68 | // if missing msg-key in url, deny 69 | if (util.emptyString(msg)) { 70 | log.w(rxid, "auth: stop! missing access-key in", url); 71 | return Outcome.miss(); 72 | } 73 | 74 | let ok = false; 75 | let a6 = ""; 76 | // eval [s2.domain.tld, domain.tld] from a hostname 77 | // like s0.s1.s2.domain.tld 78 | for (const dom of util.domains(url)) { 79 | if (util.emptyString(dom)) continue; 80 | 81 | const [hex, hexcat] = await gen(msg, dom); 82 | 83 | log.d(rxid, msg, dom, "<= msg/h :auth: hex/k =>", hexcat, accesskeys); 84 | 85 | // allow if access-key (upto its full len) matches calculated hex 86 | for (const ak of accesskeys) { 87 | ok = hexcat.startsWith(ak); 88 | if (ok) { 89 | return Outcome.pass(); 90 | } else { 91 | const [d, h] = ak.split(akdelim); 92 | a6 += d + akdelim + h.slice(0, 6) + " "; 93 | } 94 | } 95 | 96 | const h6 = dom + akdelim + hex.slice(0, 6); 97 | log.w(rxid, "auth: key mismatch want:", a6, "have:", h6); 98 | } 99 | 100 | log.w(rxid, "auth: stop! no matches"); 101 | return Outcome.fail(); 102 | } 103 | 104 | export async function gen(msg, domain) { 105 | if (util.emptyString(msg) || util.emptyString(domain)) { 106 | throw new Error(`args empty [${msg} / ${domain}]`); 107 | } 108 | 109 | // reject if msg is not alphanumeric 110 | if (!util.isAlphaNumeric(msg) || !util.isDNSName(domain)) { 111 | throw new Error("args must be alphanumeric"); 112 | } 113 | 114 | const m = msg.toLowerCase(); 115 | const d = domain.toLowerCase(); 116 | const cat = m + msgkeydelim + d; 117 | // return memoized ans 118 | const cached = mem.get(cat); 119 | if (cached) return cached; 120 | 121 | const k8 = encoder.encode(cat); 122 | const m8 = encoder.encode(info); 123 | const ab = await proof(k8, m8); 124 | 125 | // conv to base16, pad 0 for single digits, 01, 02, 03, ... 0f 126 | const hex = bufutil.hex(ab); 127 | const hexcat = domain + akdelim + hex; 128 | const toks = [hex, hexcat]; 129 | 130 | mem.put(cat, toks); 131 | return toks; 132 | } 133 | 134 | // nb: stuble crypto api on node v19+ 135 | // stackoverflow.com/a/47332317 136 | async function proof(key, val) { 137 | const hmac = "HMAC"; 138 | const sha256 = "SHA-256"; 139 | 140 | if (bufutil.emptyBuf(key)) { 141 | throw new Error("key array-buffer empty"); 142 | } 143 | 144 | // use sha256 instead of hmac if nothing to sign 145 | if (bufutil.emptyBuf(val)) { 146 | return await crypto.subtle.digest(sha256, key); 147 | } 148 | 149 | const hmackey = await crypto.subtle.importKey( 150 | "raw", 151 | key, 152 | { 153 | name: hmac, 154 | hash: { name: sha256 }, 155 | }, 156 | false, // export = false 157 | ["sign", "verify"] 158 | ); 159 | 160 | // hmac sign & verify: stackoverflow.com/a/72765383 161 | return await crypto.subtle.sign(hmac, hmackey, val); 162 | } 163 | -------------------------------------------------------------------------------- /src/plugins/users/user-cache.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import { LfuCache } from "@serverless-dns/lfu-cache"; 10 | 11 | export class UserCache { 12 | constructor(size) { 13 | const name = "UserCache"; 14 | this.cache = new LfuCache(name, size); 15 | this.log = log.withTags(name); 16 | } 17 | 18 | get(key) { 19 | return this.cache.get(key); 20 | } 21 | 22 | put(key, val) { 23 | try { 24 | this.cache.put(key, val); 25 | } catch (e) { 26 | this.log.e("put", key, val, e.stack); 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/plugins/users/user-op.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import { UserCache } from "./user-cache.js"; 9 | import * as pres from "../plugin-response.js"; 10 | import * as util from "../../commons/util.js"; 11 | import * as rdnsutil from "../rdns-util.js"; 12 | import * as token from "./auth-token.js"; 13 | import * as bufutil from "../../commons/bufutil.js"; 14 | 15 | // TODO: determine an approp cache-size 16 | const cacheSize = 20000; 17 | 18 | export class UserOp { 19 | constructor() { 20 | this.userConfigCache = new UserCache(cacheSize); 21 | this.log = log.withTags("UserOp"); 22 | } 23 | 24 | /** 25 | * @param {{request: Request, isDnsMsg: Boolean, rxid: string}} ctx 26 | * @returns {Promise} 27 | */ 28 | async exec(ctx) { 29 | let res = pres.emptyResponse(); 30 | 31 | try { 32 | const out = await token.auth(ctx.rxid, ctx.request.url); 33 | if (!out.ok) { 34 | res = pres.errResponse("UserOp:Auth", new Error("auth failed")); 35 | } else { 36 | res = this.loadUser(ctx); 37 | } 38 | res.data.userAuth = out; 39 | } catch (ex) { 40 | res = pres.errResponse("UserOp", ex); 41 | } 42 | 43 | return res; 44 | } 45 | 46 | /** 47 | * @param {{request: Request, isDnsMsg: Boolean, rxid: string}} ctx 48 | * @returns {pres.RResp} 49 | */ 50 | loadUser(ctx) { 51 | let response = pres.emptyResponse(); 52 | 53 | if (!ctx.isDnsMsg) { 54 | this.log.w(ctx.rxid, "not a dns-msg, ignore"); 55 | return response; 56 | } 57 | 58 | try { 59 | const blocklistFlag = rdnsutil.blockstampFromUrl(ctx.request.url); 60 | 61 | if (util.emptyString(blocklistFlag)) { 62 | this.log.d(ctx.rxid, "empty blocklist-flag", ctx.request.url); 63 | } 64 | 65 | // blocklistFlag may be invalid, ref rdnsutil.blockstampFromUrl 66 | let r = this.userConfigCache.get(blocklistFlag); 67 | if (!util.emptyString(blocklistFlag) && util.emptyObj(r)) { 68 | r = rdnsutil.unstamp(blocklistFlag); 69 | 70 | if (!bufutil.emptyBuf(r.userBlocklistFlagUint)) { 71 | this.log.d(ctx.rxid, "new cfg cache kv", blocklistFlag, r); 72 | // TODO: blocklistFlag is not normalized, ie b32 used for dot isn't 73 | // converted to its b64 form (which doh and rethinkdns modules use) 74 | // example, b32: 1-AABABAA / equivalent b64: 1:AAIAgA== 75 | this.userConfigCache.put(blocklistFlag, r); 76 | } 77 | } else { 78 | this.log.d(ctx.rxid, "cfg cache hit?", r != null, blocklistFlag, r); 79 | } 80 | 81 | response.data.userBlocklistInfo = r; 82 | response.data.userBlocklistFlag = blocklistFlag; 83 | // sets user-preferred doh upstream 84 | response.data.dnsResolverUrl = null; 85 | } catch (e) { 86 | this.log.e(ctx.rxid, "loadUser", e); 87 | response = pres.errResponse("UserOp:loadUser", e); 88 | } 89 | 90 | return response; 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/plugins/users/users.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import { UserOp } from "./user-op.js"; 10 | 11 | export { UserOp }; 12 | -------------------------------------------------------------------------------- /src/server-deno.ts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | // env config at top, so if .env file variables are used, it is available to 10 | // other modules. 11 | import "./core/deno/config.ts"; 12 | import { handleRequest } from "./core/doh.js"; 13 | import { stopAfter, uptime } from "./core/svc.js"; 14 | import { serve, serveTls } from "https://deno.land/std@0.171.0/http/server.ts"; 15 | import * as system from "./system.js"; 16 | import * as util from "./commons/util.js"; 17 | import * as bufutil from "./commons/bufutil.js"; 18 | import * as dnsutil from "./commons/dnsutil.js"; 19 | import * as envutil from "./commons/envutil.js"; 20 | 21 | let log: any = null; 22 | let listeners: Array = []; 23 | 24 | ((main) => { 25 | system.sub("go", systemUp); 26 | system.sub("stop", systemDown); 27 | // ask prepare phase to commence 28 | system.pub("prepare"); 29 | })(); 30 | 31 | function systemDown() { 32 | // system-down even may arrive even before the process has had the chance 33 | // to start, in which case globals like env and log may not be available 34 | console.info("rcv stop signal; uptime", uptime() / 1000, "secs"); 35 | 36 | const srvs = listeners; 37 | listeners = []; 38 | 39 | srvs.forEach((s) => { 40 | if (!s) return; 41 | console.info("stopping..."); 42 | // Deno.lisenters are closed, while Deno.Servers are aborted 43 | if (typeof s.close === "function") s.close(); 44 | else if (typeof s.abort === "function") s.abort(); 45 | else console.warn("unknown server type", s); 46 | }); 47 | 48 | util.timeout(/* 2s*/ 2 * 1000, () => { 49 | console.info("game over"); 50 | // exit success aka 0; ref: community.fly.io/t/4547/6 51 | Deno.exit(0); 52 | }); 53 | } 54 | 55 | function systemUp() { 56 | log = util.logger("Deno"); 57 | if (!log) throw new Error("logger unavailable on system up"); 58 | 59 | const downloadmode = envutil.blocklistDownloadOnly() as boolean; 60 | const profilermode = envutil.profileDnsResolves() as boolean; 61 | if (downloadmode) { 62 | log.i("in download mode, not running the dns resolver"); 63 | return; 64 | } else if (profilermode) { 65 | const durationms = 60 * 1000; 66 | log.w("in profiler mode, run for", durationms, "and exit"); 67 | stopAfter(durationms); 68 | } 69 | 70 | const abortctl = new AbortController(); 71 | const onDenoDeploy = envutil.onDenoDeploy() as boolean; 72 | const dohConnOpts = { port: envutil.dohBackendPort() }; 73 | const dotConnOpts = { port: envutil.dotBackendPort() }; 74 | const sigOpts = { 75 | signal: abortctl.signal, 76 | onListen: undefined, 77 | }; 78 | const tlsOpts = { 79 | certFile: envutil.tlsCrtPath() as string, 80 | keyFile: envutil.tlsKeyPath() as string, 81 | }; 82 | // deno.land/manual@v1.18.0/runtime/http_server_apis_low_level 83 | const httpOpts = { 84 | alpnProtocols: ["h2", "http/1.1"], 85 | }; 86 | 87 | startDoh(); 88 | startDotIfPossible(); 89 | 90 | // deno.land/manual@v1.29.1/runtime/http_server_apis 91 | async function startDoh() { 92 | if (terminateTls()) { 93 | // deno.land/std@0.170.0/http/server.ts?s=serveTls 94 | serveTls(serveDoh, { 95 | ...dohConnOpts, 96 | ...tlsOpts, 97 | ...httpOpts, 98 | ...sigOpts, 99 | }); 100 | } else { 101 | // deno.land/std@0.171.0/http/server.ts?s=serve 102 | serve(serveDoh, { ...dohConnOpts, ...sigOpts }); 103 | } 104 | 105 | up("DoH", abortctl, dohConnOpts); 106 | } 107 | 108 | async function startDotIfPossible() { 109 | // No DoT on Deno Deploy which supports only http workloads 110 | if (onDenoDeploy) return; 111 | 112 | // doc.deno.land/deno/stable/~/Deno.listenTls 113 | // doc.deno.land/deno/stable/~/Deno.listen 114 | const dot = terminateTls() 115 | ? Deno.listenTls({ ...dotConnOpts, ...tlsOpts }) 116 | : Deno.listen({ ...dotConnOpts }); 117 | 118 | up("DoT (no blocklists)", dot, dotConnOpts); 119 | 120 | // deno.land/manual@v1.11.3/runtime/http_server_apis#handling-connections 121 | for await (const conn of dot) { 122 | log.d("DoT conn:", conn.remoteAddr); 123 | 124 | // to not block the server and accept further conns, do not await 125 | serveTcp(conn); 126 | } 127 | } 128 | 129 | function up(p: string, s: any, opts: any) { 130 | log.i("up", p, opts, "tls?", terminateTls()); 131 | // 's' may be a Deno.Listener or std:http/Server 132 | listeners.push(s); 133 | } 134 | 135 | function terminateTls() { 136 | if (onDenoDeploy) return false; 137 | if (util.emptyString(tlsOpts.keyFile)) return false; 138 | if (envutil.isCleartext()) return false; 139 | if (util.emptyString(tlsOpts.certFile)) return false; 140 | return true; 141 | } 142 | } 143 | 144 | async function serveDoh(req: Request) { 145 | try { 146 | // doc.deno.land/deno/stable/~/Deno.RequestEvent 147 | // deno.land/manual/runtime/http_server_apis#http-requests-and-responses 148 | return handleRequest(util.mkFetchEvent(req)); 149 | } catch (e) { 150 | // Client may close conn abruptly before a response could be sent 151 | log.w("doh fail", e); 152 | return util.respond405(); 153 | } 154 | } 155 | 156 | async function serveTcp(conn: Deno.Conn) { 157 | // TODO: Sync this impl with serveTcp in server-node.js 158 | const qlBuf = new Uint8Array(2); 159 | 160 | while (true) { 161 | let n = null; 162 | 163 | try { 164 | n = await conn.read(qlBuf); 165 | } catch (e) { 166 | log.w("err tcp query read", e); 167 | break; 168 | } 169 | 170 | if (n == 0 || n == null) { 171 | log.d("tcp socket clean shutdown"); 172 | break; 173 | } 174 | 175 | // TODO: use dnsutil.validateSize instead 176 | if (n < 2) { 177 | log.w("query too small"); 178 | break; 179 | } 180 | 181 | const ql = new DataView(qlBuf.buffer).getUint16(0); 182 | log.d(`Read ${n} octets; q len = ${qlBuf} = ${ql}`); 183 | 184 | const q = new Uint8Array(ql); 185 | n = await conn.read(q); 186 | log.d(`Read ${n} length q`); 187 | 188 | if (n != ql) { 189 | log.w(`query len mismatch: ${n} < ${ql}`); 190 | break; 191 | } 192 | 193 | // TODO: Parallel processing 194 | await handleTCPQuery(q, conn); 195 | } 196 | 197 | // TODO: expect client to close the connection; timeouts. 198 | conn.close(); 199 | } 200 | 201 | async function handleTCPQuery(q: Uint8Array, conn: Deno.Conn) { 202 | try { 203 | const r = await resolveQuery(q); 204 | const rlBuf = bufutil.encodeUint8ArrayBE(r.byteLength, 2); 205 | 206 | const n = await conn.write(new Uint8Array([...rlBuf, ...r])); 207 | if (n != r.byteLength + 2) { 208 | log.e(`res write incomplete: ${n} < ${r.byteLength + 2}`); 209 | } 210 | } catch (e) { 211 | log.w("err tcp query resolve", e); 212 | } 213 | } 214 | 215 | async function resolveQuery(q: Uint8Array) { 216 | // TODO: Sync code with server-node.js:resolveQuery 217 | const freq: Request = new Request("https://ignored.example.com", { 218 | method: "POST", 219 | headers: util.concatHeaders(util.dnsHeaders(), util.contentLengthHeader(q)), 220 | body: q, 221 | }); 222 | 223 | const r = await handleRequest(util.mkFetchEvent(freq)); 224 | 225 | const ans = await r.arrayBuffer(); 226 | 227 | if (!bufutil.emptyBuf(ans)) { 228 | return new Uint8Array(ans); 229 | } else { 230 | return new Uint8Array(dnsutil.servfailQ(q)); 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /src/server-fastly.js: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line spaced-comment 2 | /// 3 | 4 | /* 5 | * Copyright (c) 2022 RethinkDNS and its authors. 6 | * 7 | * This Source Code Form is subject to the terms of the Mozilla Public 8 | * License, v. 2.0. If a copy of the MPL was not distributed with this 9 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 10 | */ 11 | 12 | import "./core/fastly/config.js"; 13 | import { handleRequest } from "./core/doh.js"; 14 | import * as system from "./system.js"; 15 | import * as util from "./commons/util.js"; 16 | 17 | addEventListener("fetch", (event) => { 18 | return event.respondWith(serveDoh(event)); 19 | }); 20 | 21 | /** 22 | * @param {FetchEvent} event 23 | * @returns {Response} 24 | */ 25 | async function serveDoh(event) { 26 | // on Fastly, the network-context is only available in an event listener 27 | // and so, publish system prepare from here instead of from main which 28 | // runs in global-scope. 29 | system.pub("prepare"); 30 | 31 | try { 32 | await system.when("go"); 33 | return await handleRequest(event); 34 | } catch (e) { 35 | console.error("server", "serveDoh err", e); 36 | return util.respond405(); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/server-workers.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | import "./core/workers/config.js"; 10 | import { handleRequest } from "./core/doh.js"; 11 | import * as system from "./system.js"; 12 | import * as util from "./commons/util.js"; 13 | 14 | export default { 15 | // workers/runtime-apis/fetch-event#syntax-module-worker 16 | async fetch(request, env, context) { 17 | return await serveDoh(request, env, context); 18 | }, 19 | }; 20 | 21 | function serveDoh(request, env, ctx) { 22 | // on Workers, the network-context is only available in an event listener 23 | // and so, publish system prepare from here instead of from main which 24 | // runs in global-scope. 25 | system.pub("prepare", { env: env }); 26 | 27 | const event = util.mkFetchEvent( 28 | request, 29 | null, 30 | ctx.waitUntil.bind(ctx), 31 | ctx.passThroughOnException.bind(ctx) 32 | ); 33 | 34 | return new Promise((accept) => { 35 | system 36 | .when("go") 37 | .then((v) => { 38 | return handleRequest(event); 39 | }) 40 | .then((response) => { 41 | accept(response); 42 | }) 43 | .catch((e) => { 44 | console.error("server", "serveDoh err", e); 45 | accept(util.respond405()); 46 | }); 47 | }); 48 | } 49 | -------------------------------------------------------------------------------- /src/system.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 RethinkDNS and its authors. 3 | * 4 | * This Source Code Form is subject to the terms of the Mozilla Public 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | import * as util from "./commons/util.js"; 9 | 10 | // Evaluate if EventTarget APIs can replace this hand-rolled impl 11 | // developers.cloudflare.com/workers/platform/changelog#2021-09-24 12 | // once emitted, they stick; firing off new listeners forever, just the once. 13 | const stickyEvents = new Set([ 14 | // when process bring-up is done 15 | "prepare", 16 | // when env setup is done 17 | "ready", 18 | // when svc setup is done 19 | "steady", 20 | // when all systems are a-go 21 | "go", 22 | ]); 23 | 24 | const events = new Set([ 25 | // when server should cease 26 | "stop", 27 | ]); 28 | 29 | const listeners = new Map(); 30 | const waitGroup = new Map(); 31 | 32 | (() => { 33 | for (const e of events) { 34 | listeners.set(e, new Set()); 35 | waitGroup.set(e, new Set()); 36 | } 37 | 38 | for (const se of stickyEvents) { 39 | listeners.set(se, new Set()); 40 | waitGroup.set(se, new Set()); 41 | } 42 | })(); 43 | 44 | // fires an event 45 | export function pub(event, parcel = undefined) { 46 | awaiters(event, parcel); 47 | callbacks(event, parcel); 48 | } 49 | 50 | // invokes cb when event is fired 51 | export function sub(event, cb) { 52 | const eventCallbacks = listeners.get(event); 53 | 54 | // if such even callbacks don't exist 55 | if (!eventCallbacks) { 56 | // but event is sticky, fire off the listener at once 57 | if (stickyEvents.has(event)) { 58 | microtaskBox(cb); 59 | return true; 60 | } 61 | // but event doesn't exist, then there's nothing to do 62 | return false; 63 | } 64 | 65 | eventCallbacks.add(cb); 66 | 67 | return true; 68 | } 69 | 70 | // waits till event fires or timesout 71 | export function when(event, timeout = 0) { 72 | const wg = waitGroup.get(event); 73 | 74 | if (!wg) { 75 | // if stick event, fulfill promise right away 76 | if (stickyEvents.has(event)) { 77 | return Promise.resolve(event); 78 | } 79 | // no such event 80 | return Promise.reject(new Error(event + " missing")); 81 | } 82 | 83 | return new Promise((accept, reject) => { 84 | const tid = 85 | timeout > 0 86 | ? util.timeout(timeout, () => { 87 | reject(new Error(event + " elapsed " + timeout)); 88 | }) 89 | : -2; 90 | const fulfiller = function (parcel) { 91 | if (tid >= 0) clearTimeout(tid); 92 | accept(parcel, event); 93 | }; 94 | wg.add(fulfiller); 95 | }); 96 | } 97 | 98 | function awaiters(event, parcel) { 99 | const g = waitGroup.get(event); 100 | 101 | if (!g) return; 102 | 103 | // listeners valid just the once for stickyEvents 104 | if (stickyEvents.has(event)) { 105 | waitGroup.delete(event); 106 | } 107 | 108 | safeBox(g, parcel); 109 | } 110 | 111 | function callbacks(event, parcel) { 112 | const cbs = listeners.get(event); 113 | 114 | if (!cbs) return; 115 | 116 | // listeners valid just the once for stickyEvents 117 | if (stickyEvents.has(event)) { 118 | listeners.delete(event); 119 | } 120 | 121 | // callbacks are queued async and don't block the caller. On Workers, 122 | // where IOs or timers require event-context aka network-context, 123 | // which is only available when fns are invoked in response to an 124 | // incoming request (through the fetch event handler), such callbacks 125 | // may not even fire. Instead use: awaiters and not callbacks. 126 | microtaskBox(cbs, parcel); 127 | } 128 | 129 | // TODO: could be replaced with scheduler.wait 130 | // developers.cloudflare.com/workers/platform/changelog#2021-12-10 131 | // queues fn in a macro-task queue of the event-loop 132 | // exec order: github.com/nodejs/node/issues/22257 133 | export function taskBox(fn) { 134 | util.timeout(/* with 0ms delay*/ 0, () => safeBox(fn)); 135 | } 136 | 137 | // queues fn in a micro-task queue 138 | // ref: MDN: Web/API/HTML_DOM_API/Microtask_guide/In_depth 139 | // queue-task polyfill: stackoverflow.com/a/61605098 140 | const taskboxPromise = { p: Promise.resolve() }; 141 | function microtaskBox(fns, arg) { 142 | let enqueue = null; 143 | if (typeof queueMicrotask === "function") { 144 | enqueue = queueMicrotask; 145 | } else { 146 | enqueue = taskboxPromise.p.then.bind(taskboxPromise.p); 147 | } 148 | 149 | enqueue(() => safeBox(fns, arg)); 150 | } 151 | 152 | // TODO: safeBox for async fns with r.push(await f())? 153 | // stackoverflow.com/questions/38508420 154 | function safeBox(fns, arg) { 155 | if (typeof fns === "function") { 156 | fns = [fns]; 157 | } 158 | 159 | const r = []; 160 | if (!util.isIterable(fns)) { 161 | return r; 162 | } 163 | 164 | for (const f of fns) { 165 | if (typeof f !== "function") { 166 | r.push(null); 167 | continue; 168 | } 169 | try { 170 | r.push(f(arg)); 171 | } catch (ignore) { 172 | r.push(null); 173 | } 174 | } 175 | 176 | return r; 177 | } 178 | -------------------------------------------------------------------------------- /test/data/tls/README.md: -------------------------------------------------------------------------------- 1 | Certificates in this directory expire in year 2121. 2 | 3 | Generated using instructions from this gist 4 | - 5 | - 6 | -------------------------------------------------------------------------------- /test/data/tls/dns.rethinkdns.localhost.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDYjCCAkqgAwIBAgIUYYEtMRngt3gZ6UpYFJNIwCX90pUwDQYJKoZIhvcNAQEL 3 | BQAwHDEaMBgGA1UEAwwRTG9jYWxIb3N0LVJvb3QtQ0EwIBcNMjExMjA2MTQwMTIy 4 | WhgPMjEyMTExMTIxNDAxMjJaMCMxITAfBgNVBAMMGGRucy5yZXRoaW5rZG5zLmxv 5 | Y2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKykmkGhhX9q 6 | P7gN2+5pQPXU2yy+4at349AkPQYwszK/jvckjrs2pAoRZLaAcpwIdtd0Ul47oZ5s 7 | xiCoL+JNgHqPU0PLacN/B/NoADtR7GFBKzO0ni0ALBCA9hrFg71pFHz6VQzw2n6T 8 | pPfFYofE4kUlUWo9TBrIPhFEvC5nL9A9y3D3QuN39PZi4kNuJ7f5pVXccxATrzcH 9 | aKquDSdQbFU/iOQ3NPxcykENEGnEN7ZfrrsbRLqiDwr2Nt95E1lgJEHHzAXag/Xu 10 | BlZ8IohTmpNrYy9OPfwS7UuXafVqcAiCdioPkZEf8nUhT0vUayCvsYeX/2OQffv9 11 | M5qyD788hUECAwEAAaOBkjCBjzAfBgNVHSMEGDAWgBRk+0x0rSK0yoEIzsJpFXFP 12 | 4O75xzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcD 13 | ATA/BgNVHREEODA2ghhkbnMucmV0aGlua2Rucy5sb2NhbGhvc3SCGiouZG5zLnJl 14 | dGhpbmtkbnMubG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBD7IrH/tLti9Vr 15 | TDLd1Hi7bpszTwDhdNcQCxi1fqOF0I+mMyvD8EOLCeqx0USCcW1i3wl6/2rLQjlk 16 | e0F72Sy+d0yFx+hQIjgUW9pYhHZTzrcmgLrzF0e5bH+TPIvllqAjuh1qccLr8epy 17 | 3HlMF4T67y4xY7XjvKCciMsU9HIf696hqH3yhg20GRT4emMQ0RdJDg8wxigt/Pcd 18 | NPgaYbKpgeePvqDQiBNKGMEJK4QFffFdhFeqvJ1pw9/RQF31/5uPGevBBgnoagV/ 19 | S364RPY/7pZP4IkeH+MNHAdViiwuUnkpKVJu58WcOyy8GojfvmrB78iTjZv1heHd 20 | ut62VE4H 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /test/data/tls/dns.rethinkdns.localhost.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCspJpBoYV/aj+4 3 | DdvuaUD11NssvuGrd+PQJD0GMLMyv473JI67NqQKEWS2gHKcCHbXdFJeO6GebMYg 4 | qC/iTYB6j1NDy2nDfwfzaAA7UexhQSsztJ4tACwQgPYaxYO9aRR8+lUM8Np+k6T3 5 | xWKHxOJFJVFqPUwayD4RRLwuZy/QPctw90Ljd/T2YuJDbie3+aVV3HMQE683B2iq 6 | rg0nUGxVP4jkNzT8XMpBDRBpxDe2X667G0S6og8K9jbfeRNZYCRBx8wF2oP17gZW 7 | fCKIU5qTa2MvTj38Eu1Ll2n1anAIgnYqD5GRH/J1IU9L1Gsgr7GHl/9jkH37/TOa 8 | sg+/PIVBAgMBAAECggEBAKu7RLJ6YFghMWb9akOasYKYDLlS2xp8tMFr+sP4l5io 9 | kibnV4+Ex+I38Q3Vcawig0zIqdKQ0LDNlLwRcShUXbQMdBAg/ID6EMegMXLQiCkp 10 | 8TaKuTkcTbjQ/34b8XLtXvL+9LFduIpmhZft4ZgaUdXkTDs45EZT3+G/lM032Yjy 11 | /LkX5PBE3eDgewRb5qHXAi3P7dYi19AahPPpC3Izb4TQ8iEF1kvA8lYq2cCddVcy 12 | 1On/P+GeeK9UFs8BODl6GpAmEPPqYaDTR2sdZzpnzE2Tl5pMcF6Xi3fWEp1G1E25 13 | Bx/azjf4Mm1aqgcrSo9nE9fUkgr8wv1ke9Whkndfpx0CgYEA2Yu9R/X1CgtmV7Dc 14 | AjG9TDO5aRHN3r1zoT7BX5vOfZI1PQGVjjFdgwI6vaG8cFR2DwP14ZnKqg5sIJbt 15 | 4imy3HahESD+UbgWTEKm3ZBxtqJUo5sRalqeAIM75YTCOUiHmvCOTeC8yc+NDoT2 16 | PULH9DP97xwvX4r1oc3CfQboQC8CgYEAyyjxn2H6EVgHIfO9LYZGwyYwZFb2qNo7 17 | tKw6VtltDukdJFZdp9bQ9r02d9J7p5ZoMi2y/hXNKeUauxOIuaHc1ddUg343iMpG 18 | EoqolWd5QDcRKN49g+P2o4hCmGc2Ck2HSKAOigFf4tbzswX/8KKxTbhJVp2choYk 19 | jKDZwTgMRY8CgYEA1c7kYkRW599aX5cgNDvke29eq+hmuKLkcTa3YRtFr96x2lqj 20 | PVRBPtBDITtDc/de7NUMpQS8zbRNCx6rgBtRgiJJnQcbTP+rUpozXBFp4YDbxMxU 21 | Kn4TShexF5wKM7iYJmyEv//ALGyDiyCczorC8Lzkt0uxN6rgTX1nx++w9dsCgYEA 22 | sNEW0oAElDnII22utcOLLOe88Geb0EP3+Px43gPoXjTXjzu30Y8uWcE+ebTwuGA4 23 | mYuaoHe7E3558F5E/kN5H4iE3tjoXp6ltBeIOFhReGk0/xvF8Fdk3tmGFHyn1Pm7 24 | B6N+Y/BkUtqAScyr//SpWhpNghw1EgQIJXkNbJRqYdMCgYBpfGTalaUlGpiGV/6M 25 | 9m21XNoEZpK114EjBRO2MflXLgZbPGiyIouVjzTq9L/CnsdjvDvysqBE6vYINCrJ 26 | O8t/jQtbF9zkzbBFmjzX7C3/GppsJeAboHeQoNmugum76zZ1Ryu7HxgA9uX1VxFA 27 | 8FIO/QJpXELq1G8eC0uw9FRDkg== 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /test/data/tls/domains.ext: -------------------------------------------------------------------------------- 1 | authorityKeyIdentifier=keyid,issuer 2 | basicConstraints=CA:FALSE 3 | keyUsage = digitalSignature 4 | extendedKeyUsage = serverAuth 5 | subjectAltName = @alt_names 6 | [alt_names] 7 | DNS.1 = dns.rethinkdns.localhost 8 | DNS.2 = *.dns.rethinkdns.localhost 9 | -------------------------------------------------------------------------------- /test/manual/proxy-proto.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This is a proxy proto server that prepends proxy proto header to a new 3 | * connection and forwards it to the upstream server. 4 | */ 5 | 6 | import net from "net"; 7 | import proxyProtocol from "proxy-protocol-js"; 8 | 9 | const CLIENT_PORT = 20000; 10 | const UPSTREAM_PORT = 10000; 11 | 12 | const src = new proxyProtocol.Peer("localhost", CLIENT_PORT); 13 | const dst = new proxyProtocol.Peer("localhost", UPSTREAM_PORT); 14 | const protocolText = new proxyProtocol.V1ProxyProtocol( 15 | proxyProtocol.INETProtocol.TCP4, 16 | src, 17 | dst 18 | ).build(); 19 | console.log(protocolText); // => PROXY TCP4 127.0.0.1 192.0.2.1 12345 54321\r\n 20 | 21 | const server = net 22 | .createServer(serveConnection) 23 | .listen(CLIENT_PORT, () => console.log(server.address())); 24 | 25 | function serveConnection(clientSocket) { 26 | const upSocket = net.connect( 27 | { 28 | host: "localhost", 29 | port: UPSTREAM_PORT, 30 | // servername: "dns.rethinkdns.localhost", 31 | }, 32 | () => { 33 | console.log("connected to up"); 34 | if (!upSocket.destroyed) 35 | upSocket.write(Buffer.from(protocolText, "ascii")); 36 | 37 | clientSocket.pipe(upSocket); 38 | upSocket.pipe(clientSocket); 39 | } 40 | ); 41 | 42 | upSocket.on("error", (err) => { 43 | console.log("upSocket error", err); 44 | clientSocket.end(); 45 | }); 46 | 47 | clientSocket.on("error", (err) => { 48 | console.log("client socket error", err); 49 | upSocket.destroy(); 50 | }); 51 | } 52 | -------------------------------------------------------------------------------- /webpack.config.cjs: -------------------------------------------------------------------------------- 1 | const webpack = require("webpack"); 2 | const NodePolyfillPlugin = require("node-polyfill-webpack-plugin"); 3 | 4 | // developers.cloudflare.com/workers/cli-wrangler/configuration#modules 5 | // archive.is/FDky9 6 | module.exports = { 7 | entry: "./src/server-workers.js", 8 | target: ["webworker", "es2022"], 9 | mode: "production", 10 | // enable devtool in development 11 | // devtool: 'eval-cheap-module-source-map', 12 | 13 | // gist.github.com/ef4/d2cf5672a93cf241fd47c020b9b3066a 14 | resolve: { 15 | fallback: { 16 | // buffer polyfill: archive.is/7OBM7 17 | buffer: require.resolve("buffer/"), 18 | }, 19 | }, 20 | 21 | plugins: [ 22 | // remove "node:" prefix from imports as target is webworker 23 | // stackoverflow.com/a/73351738 and github.com/vercel/next.js/issues/28774 24 | // github.com/Avansai/next-multilingual/blob/aaad6a7204/src/config/index.ts#L750 25 | new webpack.NormalModuleReplacementPlugin(/node:/, (resource) => { 26 | resource.request = resource.request.replace(/^node:/, ""); 27 | }), 28 | new webpack.ProvidePlugin({ 29 | Buffer: ["buffer", "Buffer"], 30 | }), 31 | new webpack.IgnorePlugin({ 32 | resourceRegExp: 33 | // eslint-disable-next-line max-len 34 | /(^dgram$)|(^http2$)|(\/deno\/.*\.ts$)|(.*-deno\.ts$)|(.*\.deno\.ts$)|(\/node\/.*\.js$)|(.*-node\.js$)|(.*\.node\.js$)/, 35 | }), 36 | // stackoverflow.com/a/65556946 37 | new NodePolyfillPlugin(), 38 | ], 39 | 40 | optimization: { 41 | usedExports: true, 42 | minimize: false, 43 | }, 44 | 45 | experiments: { 46 | outputModule: true, 47 | }, 48 | 49 | // stackoverflow.com/a/68916455 50 | output: { 51 | library: { 52 | type: "module", 53 | }, 54 | filename: "worker.js", 55 | module: true, 56 | }, 57 | }; 58 | -------------------------------------------------------------------------------- /webpack.fastly.cjs: -------------------------------------------------------------------------------- 1 | const webpack = require("webpack"); 2 | const NodePolyfillPlugin = require("node-polyfill-webpack-plugin"); 3 | 4 | module.exports = { 5 | entry: "./src/server-fastly.js", 6 | target: ["webworker", "es2020"], 7 | mode: "production", 8 | // enable devtool in development 9 | // devtool: 'eval-cheap-module-source-map', 10 | 11 | // gist.github.com/ef4/d2cf5672a93cf241fd47c020b9b3066a 12 | resolve: { 13 | fallback: { 14 | // buffer polyfill: archive.is/7OBM7 15 | buffer: require.resolve("buffer/"), 16 | }, 17 | }, 18 | 19 | plugins: [ 20 | new webpack.ProvidePlugin({ 21 | Buffer: ["buffer", "Buffer"], 22 | }), 23 | new webpack.IgnorePlugin({ 24 | resourceRegExp: 25 | // eslint-disable-next-line max-len 26 | /(^dgram$)|(^http2$)|(\/deno\/.*\.ts$)|(.*-deno\.ts$)|(.*\.deno\.ts$)|(\/node\/.*\.js$)|(.*-node\.js$)|(.*\.node\.js$)/, 27 | }), 28 | // stackoverflow.com/a/65556946 29 | new NodePolyfillPlugin(), 30 | ], 31 | 32 | optimization: { 33 | usedExports: true, 34 | minimize: false, 35 | }, 36 | 37 | experiments: { 38 | outputModule: true, 39 | }, 40 | 41 | // stackoverflow.com/a/68916455 42 | output: { 43 | library: { 44 | type: "module", 45 | }, 46 | filename: "fastly.js", 47 | module: true, 48 | }, 49 | externals: [ 50 | ({ request }, callback) => { 51 | // Allow Webpack to handle fastly:* namespaced module imports by treating 52 | // them as modules rather than try to process them as URLs 53 | if (/^fastly:.*$/.test(request)) { 54 | return callback(null, "commonjs " + request); 55 | } 56 | callback(); 57 | }, 58 | ], 59 | }; 60 | -------------------------------------------------------------------------------- /webpack.fly.cjs: -------------------------------------------------------------------------------- 1 | const webpack = require("webpack"); 2 | 3 | module.exports = { 4 | entry: "./src/server-node.js", 5 | target: ["node22", "es2022"], 6 | mode: "production", 7 | // enable devtool in development 8 | // devtool: 'eval-cheap-module-source-map', 9 | 10 | plugins: [ 11 | new webpack.IgnorePlugin({ 12 | resourceRegExp: /(\/deno\/.*\.ts$)|(.*-deno\.ts$)|(.*\.deno\.ts$)/, 13 | }), 14 | // stackoverflow.com/a/60325769 15 | new webpack.optimize.LimitChunkCountPlugin({ 16 | maxChunks: 1, 17 | }), 18 | ], 19 | 20 | /* externalsType: 'module', 21 | externals: { 22 | '@riaskov/mmap-io': '@riaskov/mmap-io', 23 | },*/ 24 | externals: /@riaskov/, 25 | 26 | optimization: { 27 | usedExports: true, 28 | minimize: false, 29 | }, 30 | 31 | experiments: { 32 | outputModule: true, 33 | }, 34 | 35 | // github.com/webpack/webpack/issues/14072 36 | // see also: src/core/node/config.js 37 | /* node: { 38 | global: true, 39 | __filename: true, 40 | __dirname: true, 41 | },*/ 42 | 43 | // require missing: github.com/webpack/webpack/issues/16029 44 | 45 | // github.com/webpack/webpack/issues/13290 46 | // stackoverflow.com/a/68916455 47 | output: { 48 | library: { 49 | type: "module", 50 | }, 51 | clean: true, 52 | filename: "fly.mjs", 53 | module: true, 54 | }, 55 | 56 | // or, cjs: stackoverflow.com/a/68916455 57 | /* 58 | output: { 59 | filename: "fly.cjs", 60 | clean: true, // empty dist before output 61 | }, 62 | */ 63 | }; 64 | -------------------------------------------------------------------------------- /wrangler.toml: -------------------------------------------------------------------------------- 1 | name = "serverless-dns" 2 | main = "./dist/worker.js" 3 | workers_dev = true 4 | # logpush job is setup to capture from envs ending: log, pro, or one 5 | logpush = false 6 | # use node_compat or custom build 7 | # node_compat = true 8 | compatibility_date = "2023-03-21" 9 | send_metrics = false 10 | minify = false 11 | upload_source_maps = true 12 | 13 | # uncomment to enable analytics on serverless-dns 14 | # this binding is not inherited by other worker-envs 15 | #analytics_engine_datasets = [ 16 | # { binding = "METRICS", dataset = "SDNS_M0" }, 17 | # { binding = "BL_METRICS", dataset = "SDNS_BL0" } 18 | #] 19 | 20 | [build] 21 | command = "npm run build" 22 | 23 | [[rules]] 24 | type = "ESModule" 25 | globs = ["**/*.js"] 26 | 27 | [vars] 28 | LOG_LEVEL = "debug" 29 | WORKER_ENV = "development" 30 | CLOUD_PLATFORM = "cloudflare" 31 | 32 | ################## 33 | #------PROD------# 34 | ################## 35 | # wrangler doesn't yet support Workers Services and the new "Environments" 36 | # github.com/cloudflare/cloudflare-docs/pull/3175/files 37 | # github.com/cloudflare/wrangler2/issues/27 38 | # developers.cloudflare.com/workers/platform/environments 39 | [env.prod] 40 | name = "basic-unbound" 41 | minify = true 42 | routes = [ 43 | "sky.rethinkdns.com/*", 44 | "sky.bravedns.com/*", 45 | "basic.rethinkdns.com/*", 46 | "basic.bravedns.com/*", 47 | "free.bravedns.com/*", 48 | ] 49 | # vars are not inherited 50 | # workers/platform/environment-variables#environment-variables-via-wrangler 51 | [env.prod.vars] 52 | LOG_LEVEL = "info" 53 | WORKER_ENV = "production" 54 | CLOUD_PLATFORM = "cloudflare" 55 | 56 | ################## 57 | #------ONE-------# 58 | ################## 59 | [env.one] 60 | name = "dns-one" 61 | logpush = true 62 | minify = true 63 | routes = [ 64 | "one.rethinkdns.com/*", 65 | "one.bravedns.com/*", 66 | ] 67 | analytics_engine_datasets = [ 68 | { binding = "METRICS", dataset = "ONE_M0" }, 69 | { binding = "BL_METRICS", dataset = "ONE_BL0" } 70 | ] 71 | 72 | [env.one.vars] 73 | # just the error and request logs 74 | LOG_LEVEL = "logpush" 75 | WORKER_ENV = "production" 76 | CLOUD_PLATFORM = "cloudflare" 77 | CF_LOGPUSH_R2_PATH = "qlog/" 78 | 79 | ################## 80 | #-----SECRETS----# 81 | ################## 82 | # only for documentation purposes 83 | # developers.cloudflare.com/workers/platform/environment-variables/#adding-secrets-via-wrangler 84 | # [secrets] 85 | # GW_IP4 = "" 86 | # GW_IP6 = "" 87 | # CF_ACCOUNT_ID = "" 88 | # CF_API_TOKEN = "" 89 | # CF_LOGPUSH_R2_SECRET_KEY = "" 90 | # CF_LOGPUSH_R2_ACCESS_KEY = "" 91 | --------------------------------------------------------------------------------