├── .github └── workflows │ ├── branch.yml │ ├── helm.yaml │ ├── master.yml │ └── release.yml ├── .gitignore ├── .mocharc.json ├── .versionrc.json ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── build.js ├── deps └── docker-compose.yaml ├── docker └── entrypoint.sh ├── examples ├── exposr-ssh.sh └── helm-values-sample.yaml ├── exposrd.sh ├── exposrd.ts ├── helm ├── .helmignore ├── Chart.yaml ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── configmap.yaml │ ├── deployment.yaml │ ├── ingress-admin.yaml │ ├── ingress.yaml │ ├── service-headless.yaml │ ├── service.yaml │ └── serviceaccount.yaml └── values.yaml ├── package.json ├── scripts ├── build-version.sh ├── bump-helm.cjs ├── gen-build-env.sh ├── gen-build-js.sh ├── run-test.sh └── test-deps.sh ├── src ├── account │ ├── account-service.ts │ ├── account-tunnel-service.ts │ └── account.ts ├── cluster │ ├── cluster-manager.ts │ ├── cluster-node.ts │ ├── discovery-method.ts │ ├── eventbus-interface.ts │ ├── eventbus.ts │ ├── kubernetes-discovery.ts │ ├── memory-eventbus.ts │ ├── multicast-discovery.ts │ ├── redis-eventbus.ts │ └── udp-eventbus.ts ├── config.js ├── controller │ ├── admin-api-controller.ts │ ├── admin-controller.ts │ ├── api-controller.ts │ └── koa-controller.ts ├── index.ts ├── ingress │ ├── http-ingress.ts │ ├── ingress-base.ts │ ├── ingress-manager.ts │ ├── ingress-service.ts │ ├── sni-ingress.ts │ └── utils.ts ├── listener │ ├── http-listener.ts │ └── listener.ts ├── lock │ ├── index.ts │ ├── lock-provider.ts │ ├── memory-lock-provider.ts │ └── redis-lock-provider.ts ├── logger.js ├── self-test.js ├── storage │ ├── memory-storage-provider.ts │ ├── pgsql-storage-provider.ts │ ├── redis-storage-provider.ts │ ├── serializer.ts │ ├── sqlite-storage-provider.ts │ ├── storage-manager.ts │ ├── storage-provider.ts │ └── storage.ts ├── transport │ ├── cluster │ │ └── cluster-transport.ts │ ├── ssh │ │ ├── index.ts │ │ ├── ssh-endpoint.ts │ │ └── ssh-transport.ts │ ├── transport-endpoint.ts │ ├── transport-service.ts │ ├── transport.ts │ └── ws │ │ ├── index.ts │ │ ├── ws-endpoint.ts │ │ └── ws-transport.ts ├── tunnel │ ├── altname-service.ts │ ├── tunnel-config.ts │ ├── tunnel-connection-manager.ts │ ├── tunnel-service.ts │ └── tunnel.ts ├── utils │ ├── errors.js │ ├── hostname.ts │ ├── http-captor.ts │ ├── http-headers.js │ ├── misc.ts │ └── mutex.ts └── version.js ├── test ├── e2e │ ├── e2e-utils.js │ ├── test_api.js │ ├── test_cluster.js │ ├── test_ssh.js │ └── test_ws.js ├── env.js ├── system │ ├── eventbus │ │ └── test_redis.js │ ├── lock │ │ └── test_redis.js │ └── storage │ │ ├── test_pgsql.ts │ │ ├── test_redis.ts │ │ └── test_sqlite.ts └── unit │ ├── account │ └── test_account-service.ts │ ├── cluster │ ├── test_cluster-eventbus.js │ ├── test_cluster-service.js │ └── test_cluster-transport.ts │ ├── config │ └── test_config.js │ ├── fixtures │ ├── cn-private-key.pem │ ├── cn-public-cert.pem │ ├── no-wildcard-private-key.pem │ ├── no-wildcard-public-cert.pem │ ├── san-private-key.pem │ └── san-public-cert.pem │ ├── ingress │ ├── test_http_ingress.ts │ ├── test_sni-ingress.ts │ ├── test_utils.js │ └── utils.ts │ ├── listener │ └── test_http-listener.ts │ ├── lock │ └── test_inmem.js │ ├── storage │ ├── test_serialize.ts │ └── test_storage.ts │ ├── test-utils.ts │ ├── transport │ ├── test_ssh-endpoint.ts │ ├── test_ssh_transport.ts │ └── test_ws_transport.ts │ ├── tunnel │ ├── test_altname-service.ts │ ├── test_tunnel-connection-manager.ts │ └── test_tunnel-service.ts │ └── utils │ └── test_hostname.js ├── tools ├── echo-server │ ├── .gitignore │ ├── package.json │ ├── src │ │ └── echo-server.ts │ ├── tsconfig.json │ └── yarn.lock └── migrate │ ├── .gitignore │ ├── README.md │ ├── migrate.js │ ├── package.json │ └── yarn.lock ├── tsconfig.json └── yarn.lock /.github/workflows/branch.yml: -------------------------------------------------------------------------------- 1 | name: branch 2 | on: 3 | push: 4 | branches: 5 | - '*' 6 | - '!master' 7 | 8 | workflow_dispatch: 9 | inputs: 10 | publish_container: 11 | description: 'Publish container' 12 | required: true 13 | default: 'false' 14 | 15 | jobs: 16 | build_and_test: 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - uses: actions/checkout@v3 21 | 22 | - name: Run tests 23 | run: | 24 | yarn install --frozen-lockfile 25 | yarn bundle 26 | yarn run test test 27 | 28 | - name: Prepare builder 29 | run: make builder.build 30 | 31 | - name: Build package 32 | run: | 33 | make package.build.container 34 | 35 | - name: Build and smoke test docker image 36 | run: | 37 | make image.build 38 | docker run --rm -t $(make get.image) --version | grep $(make get.version) 39 | docker run --rm -t -e EXPOSR_SELF_TEST=1 $(make get.image) 40 | 41 | - name: Set up QEMU 42 | if: github.event.inputs.publish_container == 'true' 43 | uses: docker/setup-qemu-action@v2 44 | 45 | - name: Set up Docker Buildx 46 | if: github.event.inputs.publish_container == 'true' 47 | id: buildx 48 | uses: docker/setup-buildx-action@v2 49 | 50 | - name: Login to ghcr.io 51 | if: github.event.inputs.publish_container == 'true' 52 | uses: docker/login-action@v2 53 | with: 54 | registry: ghcr.io 55 | username: ${{ github.actor }} 56 | password: ${{ secrets.GITHUB_TOKEN }} 57 | 58 | - name: Publish versioned container to ghcr.io 59 | if: github.event.inputs.publish_container == 'true' 60 | run: | 61 | make publish=true registry=ghcr.io/exposr image.xbuild -------------------------------------------------------------------------------- /.github/workflows/helm.yaml: -------------------------------------------------------------------------------- 1 | name: helm release 2 | on: 3 | workflow_run: 4 | workflows: ["release"] 5 | types: 6 | - completed 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Checkout helm chart repo 19 | uses: actions/checkout@v3 20 | with: 21 | repository: exposr/helm-charts 22 | path: helm-charts 23 | token: ${{ secrets.HELM_CHART_REPO_TOKEN }} 24 | 25 | - name: Copy source chart 26 | run: | 27 | rm -fr helm-charts/charts/exposr 28 | cp -rp helm helm-charts/charts/exposr 29 | 30 | - name: Publish chart 31 | run: | 32 | cd helm-charts 33 | git config user.name "$GITHUB_ACTOR" 34 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 35 | git add . 36 | git commit -m 'chore: update exposr chart' 37 | git push 38 | -------------------------------------------------------------------------------- /.github/workflows/master.yml: -------------------------------------------------------------------------------- 1 | name: master 2 | on: 3 | push: 4 | branches: 5 | - master 6 | 7 | workflow_dispatch: 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Set up git 15 | uses: actions/checkout@v3 16 | 17 | - name: Setup builder 18 | run: make builder.build 19 | 20 | - name: Build package 21 | run: | 22 | make package.build.container 23 | 24 | - name: Save package 25 | uses: actions/upload-artifact@v3 26 | with: 27 | name: package 28 | path: dist 29 | retention-days: 1 30 | 31 | dist_image: 32 | runs-on: ubuntu-latest 33 | needs: build 34 | 35 | steps: 36 | - name: Get package 37 | uses: actions/download-artifact@v3 38 | with: 39 | name: package 40 | 41 | - name: Set up QEMU 42 | uses: docker/setup-qemu-action@v2 43 | 44 | - name: Set up Docker Buildx 45 | id: buildx 46 | uses: docker/setup-buildx-action@v2 47 | 48 | - name: Available platforms 49 | run: echo ${{ steps.buildx.outputs.platforms }} 50 | 51 | - name: Extract package 52 | run: | 53 | tar xvf exposrd-*.tgz --strip-components=1 54 | mkdir -p dist 55 | mv exposrd-*.tgz dist/ 56 | 57 | - name: Login to ghcr.io 58 | uses: docker/login-action@v2 59 | with: 60 | registry: ghcr.io 61 | username: ${{ github.actor }} 62 | password: ${{ secrets.GITHUB_TOKEN }} 63 | 64 | - name: Publish versioned container to ghcr.io 65 | run: | 66 | make publish=true registry=ghcr.io/exposr image.xbuild 67 | 68 | - name: Publish unstable container to ghcr.io 69 | run: | 70 | make registry=ghcr.io/exposr image.xbuild.unstable 71 | 72 | summary: 73 | runs-on: ubuntu-latest 74 | needs: [dist_image] 75 | 76 | steps: 77 | - name: Package artifact 78 | uses: actions/download-artifact@v3 79 | with: 80 | name: package 81 | 82 | - name: Build summary 83 | run: | 84 | ls 85 | 86 | - name: Save artifacts 87 | uses: actions/upload-artifact@v3 88 | with: 89 | name: exposrd-unstable 90 | path: exposr-* 91 | retention-days: 7 -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | on: 3 | push: 4 | tags: 5 | - 'v*' 6 | 7 | workflow_dispatch: 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Set up git 15 | uses: actions/checkout@v3 16 | 17 | - name: Setup builder 18 | run: make builder.build 19 | 20 | - name: Build package 21 | run: | 22 | make package.build.container 23 | 24 | - name: Save package 25 | uses: actions/upload-artifact@v3 26 | with: 27 | name: package 28 | path: dist 29 | retention-days: 1 30 | 31 | dist_image: 32 | runs-on: ubuntu-latest 33 | needs: build 34 | 35 | steps: 36 | - name: Get package 37 | uses: actions/download-artifact@v3 38 | with: 39 | name: package 40 | 41 | - name: Set up QEMU 42 | uses: docker/setup-qemu-action@v2 43 | 44 | - name: Set up Docker Buildx 45 | id: buildx 46 | uses: docker/setup-buildx-action@v2 47 | 48 | - name: Available platforms 49 | run: echo ${{ steps.buildx.outputs.platforms }} 50 | 51 | - name: Extract package 52 | run: | 53 | tar xvf exposrd-*.tgz --strip-components=1 54 | mkdir -p dist 55 | mv exposrd-*.tgz dist/ 56 | 57 | - name: Login to ghcr.io 58 | uses: docker/login-action@v2 59 | with: 60 | registry: ghcr.io 61 | username: ${{ github.actor }} 62 | password: ${{ secrets.GITHUB_TOKEN }} 63 | 64 | - name: Publish versioned container to ghcr.io 65 | run: | 66 | make publish=true registry=ghcr.io/exposr image.xbuild 67 | 68 | - name: Publish latest container to ghcr.io 69 | run: | 70 | make registry=ghcr.io/exposr image.xbuild.latest 71 | 72 | - name: Login to docker.io 73 | uses: docker/login-action@v2 74 | with: 75 | username: ${{ secrets.DOCKER_USERNAME }} 76 | password: ${{ secrets.DOCKER_ACCESS_TOKEN }} 77 | 78 | - name: Publish versioned container to docker.io 79 | run: | 80 | make registry=docker.io/exposr publish=true image.xbuild 81 | 82 | - name: Publish latest container to docker.io 83 | run: | 84 | make registry=docker.io/exposr image.xbuild.latest 85 | 86 | - name: Publish compatibility images to docker.io 87 | run: | 88 | docker buildx imagetools create --tag docker.io/exposr/exposr-server:$(make get.version) docker.io/exposr/exposrd:$(make get.version) 89 | docker buildx imagetools create --tag docker.io/exposr/exposr-server:latest docker.io/exposr/exposr-server:$(make get.version) 90 | 91 | release: 92 | runs-on: ubuntu-latest 93 | needs: [dist_image] 94 | 95 | steps: 96 | - name: Package artifact 97 | uses: actions/download-artifact@v3 98 | with: 99 | name: package 100 | 101 | - name: Generate SHA256 sums for artifacts 102 | run: | 103 | ls exposrd-* | xargs -L 1 -I {} sh -c "sha256sum {} > {}.sha256" 104 | 105 | - name: Build artifacts 106 | run: | 107 | ls 108 | 109 | - uses: "marvinpinto/action-automatic-releases@latest" 110 | with: 111 | repo_token: "${{ secrets.GITHUB_TOKEN }}" 112 | prerelease: ${{ startsWith(github.ref, '/refs/tags/v0') }} 113 | files: | 114 | exposrd-* 115 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | *.tgz 3 | build.env 4 | build.js 5 | exposrd.mjs 6 | dist 7 | out -------------------------------------------------------------------------------- /.mocharc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extension": ["js", "ts"], 3 | "require": "ts-node/register", 4 | "node-option": [ 5 | "experimental-specifier-resolution=node", 6 | "loader=ts-node/esm" 7 | ] 8 | } -------------------------------------------------------------------------------- /.versionrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "bumpFiles": [ 3 | { 4 | "filename": "package.json", 5 | "type": "json" 6 | }, 7 | { 8 | "filename": "helm/Chart.yaml", 9 | "updater": "scripts/bump-helm.cjs" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG NODE_VERSION 2 | ARG ALPINE_VERSION=3.19 3 | FROM node:${NODE_VERSION}-alpine${ALPINE_VERSION} AS builder 4 | RUN apk add \ 5 | build-base \ 6 | cmake \ 7 | python3 \ 8 | curl \ 9 | git 10 | RUN curl -sf https://gobinaries.com/tj/node-prune | sh 11 | RUN touch /.yarnrc && chmod 666 /.yarnrc 12 | RUN mkdir /.npm && chmod 777 /.npm 13 | RUN npm -g install node-gyp 14 | WORKDIR /workdir 15 | CMD ["/bin/sh"] 16 | 17 | FROM builder AS dist 18 | ENV NODE_ENV=production 19 | ARG DIST_SRC 20 | COPY ${DIST_SRC} /exposrd.tgz 21 | RUN tar xvf /exposrd.tgz -C / 22 | WORKDIR /package 23 | RUN yarn install --production --no-default-rc --frozen-lockfile 24 | RUN node-prune 25 | 26 | FROM alpine:${ALPINE_VERSION} as runner 27 | ENV NODE_ENV=production 28 | COPY --from=dist /usr/local/bin/node /bin/node 29 | COPY --from=dist /usr/lib/libstdc++.so.6 /usr/lib/libstdc++.so.6 30 | COPY --from=dist /usr/lib/libgcc_s.so.1 /usr/lib/libgcc_s.so.1 31 | COPY --from=dist /package/exposrd.mjs /app/exposrd.mjs 32 | COPY --from=dist /package/node_modules /app/node_modules 33 | RUN mkdir -p /entrypoint-initdb.d 34 | COPY docker/entrypoint.sh /entrypoint.sh 35 | WORKDIR /app 36 | EXPOSE 8080 37 | EXPOSE 8081 38 | 39 | ENTRYPOINT ["/entrypoint.sh"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Fredrik Lindberg 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | registry?=exposr 2 | node_version=20.11.0 3 | alpine_version=3.19 4 | platforms?=linux/amd64,linux/arm64 5 | 6 | project:=exposrd 7 | version=$(shell [ -e build.env ] && . ./build.env 2> /dev/null && echo $${EXPOSR_BUILD_VERSION} || git describe --tags --always --dirty 2> /dev/null || git rev-parse --short HEAD) 8 | commit=$(shell [ -e build.env ] && . ./build.env 2> /dev/null && echo $${BUILD_GIT_COMMIT} || git rev-parse --short HEAD) 9 | package_name=$(project)-$(version).tgz 10 | 11 | # 12 | # Available make targets 13 | # 14 | # all - Defaults to building a release tarball and a container image for the host platform. 15 | # 16 | # package.build - Creates release tarball 17 | # image.build - Build container image for host platform 18 | # image.xbuild - Build container images for supported platforms 19 | 20 | all: package.build.container image.build 21 | clean: dist.clean 22 | docker buildx rm exposrd-builder || true 23 | rm -fr node_modules 24 | 25 | get.version: 26 | @echo $(version) 27 | 28 | define docker.run 29 | docker run --rm -i \ 30 | -u $(shell id -u):$(shell id -g) \ 31 | -v ${PWD}:/workdir \ 32 | $(project)-builder \ 33 | $1 $2 $3 $4 $5 $6 $7 $8 $9 34 | endef 35 | 36 | # Wraps any call and runs inside builder container 37 | %.container: builder.build 38 | $(call docker.run, make $(subst .container,,$@)) 39 | 40 | package.build: 41 | yarn install --no-default-rc --frozen-lockfile 42 | mkdir -p dist 43 | yarn pack --no-default-rc --frozen-lockfile --filename dist/$(package_name) 44 | 45 | dist/exposrd-$(version).tgz: 46 | make package.build.container 47 | 48 | bundle.build: 49 | yarn install --no-default-rc --frozen-lockfile 50 | yarn run bundle 51 | 52 | dist.clean: 53 | rm -fr dist 54 | 55 | # Builder image 56 | builder.build: 57 | docker build \ 58 | --build-arg NODE_VERSION=${node_version} \ 59 | --build-arg ALPINE_VERSION=${alpine_version} \ 60 | -t $(project)-builder --target builder . 61 | 62 | # Docker image build targets 63 | image.build: 64 | docker build \ 65 | -f Dockerfile \ 66 | --progress plain \ 67 | --build-arg NODE_VERSION=${node_version} \ 68 | --build-arg ALPINE_VERSION=${alpine_version} \ 69 | --build-arg VERSION=${version} \ 70 | --build-arg DIST_SRC=dist/exposrd-$(version).tgz \ 71 | --label "org.opencontainers.image.source=https://github.com/exposr/exposrd" \ 72 | --label "org.opencontainers.image.version=$(version)" \ 73 | --label "org.opencontainers.image.revision=$(commit)" \ 74 | --label "org.opencontainers.image.description=exposrd version $(version) commit $(commit)" \ 75 | -t $(project):$(version) \ 76 | . 77 | 78 | get.image: 79 | @echo $(project):$(version) 80 | 81 | ifneq (, $(publish)) 82 | push_flag=--push 83 | endif 84 | image.xbuild: 85 | docker buildx create --name exposrd-builder --driver docker-container || true 86 | docker buildx build \ 87 | --builder exposrd-builder \ 88 | -f Dockerfile \ 89 | --progress plain \ 90 | --platform $(platforms) \ 91 | $(push_flag) \ 92 | --build-arg NODE_VERSION=${node_version} \ 93 | --build-arg ALPINE_VERSION=${alpine_version} \ 94 | --build-arg VERSION=${version} \ 95 | --build-arg DIST_SRC=dist/exposrd-$(version).tgz \ 96 | --label "org.opencontainers.image.source=https://github.com/exposr/exposrd" \ 97 | --label "org.opencontainers.image.version=$(version)" \ 98 | --label "org.opencontainers.image.revision=$(commit)" \ 99 | --label "org.opencontainers.image.description=exposrd version $(version) commit $(commit)" \ 100 | -t $(registry)/$(project):$(version) \ 101 | . 102 | 103 | image.xbuild.latest: 104 | docker buildx imagetools create --tag $(registry)/$(project):latest $(registry)/$(project):$(version) 105 | 106 | image.xbuild.unstable: 107 | docker buildx imagetools create --tag $(registry)/$(project):unstable $(registry)/$(project):$(version) 108 | 109 | .PHONY: release release.publish builder.build image.build image.xbuild image.xbuild.latest image.xbuild.unstable -------------------------------------------------------------------------------- /build.js: -------------------------------------------------------------------------------- 1 | export const BUILD_VERSION = undefined; 2 | export const BUILD_GIT_BRANCH = undefined; 3 | export const BUILD_GIT_COMMIT = undefined; 4 | export const BUILD_DATE = undefined; 5 | export const BUILD_USER = undefined; 6 | export const BUILD_MACHINE = undefined; 7 | -------------------------------------------------------------------------------- /deps/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | services: 3 | redis: 4 | image: "redis:alpine" 5 | ports: 6 | - "6379:6379" 7 | healthcheck: 8 | test: ["CMD-SHELL", "redis-cli ping | grep PONG"] 9 | interval: 2s 10 | timeout: 3s 11 | retries: 10 12 | postgres: 13 | image: "postgres:15-alpine" 14 | ports: 15 | - "5432:5432" 16 | environment: 17 | POSTGRES_PASSWORD: password 18 | POSTGRES_DB: exposr 19 | healthcheck: 20 | test: ["CMD-SHELL", "pg_isready"] 21 | interval: 2s 22 | timeout: 5s 23 | retries: 10 -------------------------------------------------------------------------------- /docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | for file in $(ls /entrypoint-initdb.d) 4 | do 5 | ./entrypoint-initdb.d/$file 6 | done 7 | 8 | exec node ${NODE_ARGS} exposrd.mjs $@ -------------------------------------------------------------------------------- /examples/exposr-ssh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Example of how to use SSH as an exposr client, requires curl, jq, and ssh. 4 | # Tunnel and account must be setup prior 5 | # 6 | 7 | EXPOSR_SERVER=${EXPOSR_SERVER:-http://localhost:8080} 8 | EXPOSR_ACCOUNT=${EXPOSR_ACCOUNT:-AABBCCDDEEFF0001} 9 | EXPOSR_TUNNEL=${EXPOSR_TUNNEL:-my-tunnel} 10 | DESTINATION=${DESTINATION:-example.com:80} 11 | 12 | loop=true 13 | trap stopfn SIGINT 14 | stopfn() { 15 | loop=false 16 | exit 17 | } 18 | 19 | get_token() { 20 | echo $(curl -s ${EXPOSR_SERVER}/v1/account/${EXPOSR_ACCOUNT}/token | jq -r .token) 21 | } 22 | 23 | # NB: THIS DOES NOT PERFORM ANY SANITY CHECKING ON THE URL, A MALICIOUS SERVER COULD INJECT COMMANDS 24 | get_url() { 25 | token=$(get_token) 26 | echo $(curl -s -H "Authorization: Bearer ${token}" ${EXPOSR_SERVER}/v1/tunnel/${EXPOSR_TUNNEL} | jq -r .transport.ssh.url) 27 | } 28 | 29 | while ${loop}; do 30 | echo "Press Ctrl-C twice to disconnect" 31 | echo "" 32 | ssh -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" -R ${DESTINATION}:${DESTINATION} "$(get_url)" 33 | sleep 2 34 | done -------------------------------------------------------------------------------- /examples/helm-values-sample.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 3 2 | 3 | exposr: 4 | admin: 5 | enabled: false 6 | apiKey: "changeMeToASecretKey" 7 | ingress: 8 | http: 9 | enabled: true 10 | domain: https://tunnel.example.com 11 | sni: 12 | enabled: true 13 | cert: example-com-cert # Uses the same certificate as the ingress 14 | transport: 15 | ws: 16 | enabled: true 17 | ssh: 18 | enabled: true 19 | host: ssh.example.com 20 | # key: "" # Set to Base64 encoded OpenSSH PEM private key for a static host key 21 | # logLevel: debug 22 | # allowRegistration: true 23 | # redisUrl: redis://:secretRedisPassword@redis-master.default.svc.cluster.local 24 | 25 | ingress: 26 | enabled: true 27 | hosts: 28 | - host: api.example.com 29 | paths: ["/"] 30 | - host: "*.tunnel.example.com" 31 | paths: ["/"] 32 | annotations: 33 | cert-manager.io/cluster-issuer: letsencrypt-staging # cert-manager.io certificate issuer 34 | tls: 35 | - secretName: example-com-cert 36 | hosts: 37 | - api.example.com 38 | - "*.tunnel.example.com" 39 | 40 | ingressAdmin: 41 | enabled: true 42 | hosts: 43 | - host: admin.example.com 44 | paths: ["/"] 45 | annotations: 46 | cert-manager.io/cluster-issuer: letsencrypt-staging # cert-manager.io certificate issuer 47 | tls: 48 | - secretName: admin-example-com 49 | hosts: 50 | - admin-example-com 51 | -------------------------------------------------------------------------------- /exposrd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | cd `dirname "$0"` 3 | exec /usr/bin/env node --experimental-json-modules --no-warnings --title="$0 $*" exposrd.js $@ -------------------------------------------------------------------------------- /exposrd.ts: -------------------------------------------------------------------------------- 1 | import ExposrServer from './src/index.js'; 2 | import selfTest from './src/self-test.js'; 3 | 4 | (async () => { 5 | if (process.env.EXPOSR_SELF_TEST) { 6 | const result = await selfTest(); 7 | console.log('All tests:', result ? 'PASS' : 'FAIL'); 8 | process.exit(result ? 0 : -1); 9 | } 10 | const terminate = await ExposrServer(); 11 | 12 | const sigHandler = async (signal: NodeJS.Signals) => { 13 | const graceful = await terminate(signal, {gracefulTimeout: undefined, drainTimeout: undefined}); 14 | process.exit(graceful ? 0 : -1); 15 | }; 16 | 17 | process.once('SIGTERM', sigHandler); 18 | process.once('SIGINT', sigHandler); 19 | })(); -------------------------------------------------------------------------------- /helm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: exposr 3 | description: exposrd tunnel daemon 4 | type: application 5 | version: 0.12.0 6 | appVersion: 0.12.0 7 | -------------------------------------------------------------------------------- /helm/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | exposrd {{ .Chart.AppVersion }} deployed 2 | {{ if .Values.ingress.enabled }} 3 | Kubernetes ingress: 4 | {{- range $host := .Values.ingress.hosts }} 5 | {{- range .paths }} 6 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} 7 | {{- end }} 8 | {{- end }} 9 | {{ end }} 10 | 11 | {{- if .Values.ingressAdmin.enabled }} 12 | Administration API: 13 | {{- range $host := .Values.ingressAdmin.hosts }} 14 | {{- range .paths }} 15 | http{{ if $.Values.ingressAdmin.tls }}s{{ end }}://{{ $host.host }}{{ . }} 16 | {{- end }} 17 | {{- end }} 18 | {{- end }} 19 | 20 | exposr ingress: 21 | HTTP enabled: {{ .Values.exposr.ingress.http.enabled }} 22 | SNI enabled: {{ .Values.exposr.ingress.sni.enabled }} 23 | 24 | exposr transport 25 | WS enabled: {{ .Values.exposr.transport.ws.enabled }} 26 | SSH enabled: {{ .Values.exposr.transport.ssh.enabled }} 27 | -------------------------------------------------------------------------------- /helm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "exposr.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "exposr.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "exposr.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "exposr.labels" -}} 38 | helm.sh/chart: {{ include "exposr.chart" . }} 39 | {{ include "exposr.selectorLabels" . }} 40 | {{- if .Chart.AppVersion }} 41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 42 | {{- end }} 43 | app.kubernetes.io/managed-by: {{ .Release.Service }} 44 | {{- end -}} 45 | 46 | {{/* 47 | Selector labels 48 | */}} 49 | {{- define "exposr.selectorLabels" -}} 50 | app.kubernetes.io/name: {{ include "exposr.name" . }} 51 | app.kubernetes.io/instance: {{ .Release.Name }} 52 | {{- end -}} 53 | 54 | {{/* 55 | Create the name of the service account to use 56 | */}} 57 | {{- define "exposr.serviceAccountName" -}} 58 | {{- if .Values.serviceAccount.create -}} 59 | {{ default (include "exposr.fullname" .) .Values.serviceAccount.name }} 60 | {{- else -}} 61 | {{ default "default" .Values.serviceAccount.name }} 62 | {{- end -}} 63 | {{- end -}} 64 | 65 | 66 | {{/* 67 | Create the image tag to use 68 | */}} 69 | {{- define "exposr.tag" -}} 70 | {{- .Values.image.tag | default (printf "v%s" .Chart.AppVersion) -}} 71 | {{- end -}} -------------------------------------------------------------------------------- /helm/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "exposr.fullname" . }} 5 | labels: 6 | {{- include "exposr.labels" . | nindent 4 }} 7 | data: 8 | {{- $exposrIngress := list }} 9 | {{- if eq .Values.exposr.ingress.http.enabled true }} 10 | {{- $exposrIngress = append $exposrIngress "http" }} 11 | {{- end }} 12 | {{- if eq .Values.exposr.ingress.sni.enabled true }} 13 | {{- $exposrIngress = append $exposrIngress "sni" }} 14 | {{- end }} 15 | {{- $exposrTransport := list }} 16 | {{- if eq .Values.exposr.transport.ws.enabled true }} 17 | {{- $exposrTransport = append $exposrTransport "ws" }} 18 | {{- end }} 19 | {{- if eq .Values.exposr.transport.ssh.enabled true }} 20 | {{- $exposrTransport = append $exposrTransport "ssh" }} 21 | {{- end }} 22 | EXPOSR_LOG_LEVEL: "{{ .Values.exposr.logLevel }}" 23 | EXPOSR_ADMIN_ENABLE: "true" 24 | EXPOSR_ADMIN_PORT: "9000" 25 | EXPOSR_ALLOW_REGISTRATION: "{{ .Values.exposr.allowRegistration }}" 26 | EXPOSR_API_PORT: "8080" 27 | {{- if .Values.exposr.apiUrl }} 28 | EXPOSR_API_URL: "{{ .Values.exposr.apiUrl }}" 29 | {{- end }} 30 | {{- if eq .Values.exposr.admin.enabled true }} 31 | EXPOSR_ADMIN_API_ENABLE: "true" 32 | EXPOSR_ADMIN_API_PORT: "8081" 33 | {{- if .Values.exposr.admin.apiKey }} 34 | EXPOSR_ADMIN_API_KEY: "{{ .Values.exposr.admin.apiKey }}" 35 | {{- end }} 36 | {{- end }} 37 | EXPOSR_CLUSTER: "{{ .Values.exposr.cluster.type }}" 38 | EXPOSR_CLUSTER_KEY: "{{ .Values.exposr.cluster.key }}" 39 | {{- if eq .Values.exposr.cluster.type "udp"}} 40 | EXPOSR_CLUSTER_UDP_DISCOVERY: "{{ .Values.exposr.cluster.udp.discovery }}" 41 | {{- end }} 42 | {{- if .Values.exposr.storage.url }} 43 | EXPOSR_STORAGE_URL: "{{ .Values.exposr.storage.url }}" 44 | {{- end }} 45 | {{- if .Values.exposr.storage.pgsql.connectionPoolSize }} 46 | EXPOSR_STORAGE_PGSQL_CONNECTION_POOL_SIZE: "{{ .Values.exposr.storage.pgsql.connectionPoolSize }}" 47 | {{- end }} 48 | EXPOSR_INGRESS: "{{ join "," $exposrIngress }}" 49 | EXPOSR_TRANSPORT: "{{ join "," $exposrTransport }}" 50 | {{- if eq .Values.exposr.ingress.http.enabled true }} 51 | {{- if .Values.exposr.ingress.http.domain }} 52 | EXPOSR_INGRESS_HTTP_DOMAIN: "{{ .Values.exposr.ingress.http.domain }}" 53 | {{- end }} 54 | EXPOSR_INGRESS_HTTP_PORT: "8080" 55 | {{- end }} 56 | {{- if eq .Values.exposr.ingress.sni.enabled true }} 57 | EXPOSR_INGRESS_SNI_PORT: "{{ .Values.exposr.ingress.sni.port }}" 58 | {{- if .Values.exposr.ingress.sni.cert }} 59 | EXPOSR_INGRESS_SNI_CERT: "/etc/exposr/ingress/sni/cert/tls.crt" 60 | EXPOSR_INGRESS_SNI_KEY: "/etc/exposr/ingress/sni/cert/tls.key" 61 | {{- end }} 62 | {{- end }} 63 | {{- if .Values.exposr.transport.maxConnections }} 64 | EXPOSR_TRANSPORT_MAX_CONNECTIONS: "{{ .Values.exposr.transport.maxConnections }}" 65 | {{- end }} 66 | {{- if eq .Values.exposr.transport.ssh.enabled true }} 67 | EXPOSR_TRANSPORT_SSH_PORT: "{{ .Values.exposr.transport.ssh.port }}" 68 | {{- if .Values.exposr.transport.ssh.host }} 69 | EXPOSR_TRANSPORT_SSH_HOST: "{{ .Values.exposr.transport.ssh.host }}" 70 | {{- end }} 71 | {{- if .Values.exposr.transport.ssh.key }} 72 | EXPOSR_TRANSPORT_SSH_KEY: "{{ .Values.exposr.transport.ssh.key }}" 73 | {{- end }} 74 | {{- end }} 75 | {{- if .Values.exposr.envs }} 76 | {{ toYaml .Values.exposr.envs | indent 2 }} 77 | {{- end }} 78 | -------------------------------------------------------------------------------- /helm/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "exposr.fullname" . }} 5 | labels: 6 | {{- include "exposr.labels" . | nindent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | {{- include "exposr.selectorLabels" . | nindent 6 }} 12 | template: 13 | metadata: 14 | labels: 15 | {{- include "exposr.selectorLabels" . | nindent 8 }} 16 | spec: 17 | {{- with .Values.imagePullSecrets }} 18 | imagePullSecrets: 19 | {{- toYaml . | nindent 8 }} 20 | {{- end }} 21 | serviceAccountName: {{ include "exposr.serviceAccountName" . }} 22 | securityContext: 23 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 24 | containers: 25 | - name: {{ .Chart.Name }} 26 | securityContext: 27 | {{- toYaml .Values.securityContext | nindent 12 }} 28 | image: "{{ .Values.image.repository }}:{{ include "exposr.tag" . }}" 29 | imagePullPolicy: {{ .Values.image.pullPolicy }} 30 | ports: 31 | - name: cluster 32 | containerPort: 1025 33 | protocol: UDP 34 | - name: http 35 | containerPort: 8080 36 | protocol: TCP 37 | {{- if eq .Values.exposr.admin.enabled true }} 38 | - name: admin-api 39 | containerPort: 8081 40 | protocol: TCP 41 | {{- end }} 42 | - name: admin 43 | containerPort: 9000 44 | protocol: TCP 45 | {{- if eq .Values.exposr.ingress.sni.enabled true }} 46 | - name: ingress-sni 47 | containerPort: {{ .Values.exposr.ingress.sni.port }} 48 | protocol: TCP 49 | {{- end }} 50 | {{- if eq .Values.exposr.transport.ssh.enabled true }} 51 | - name: transport-ssh 52 | containerPort: {{ .Values.exposr.transport.ssh.port }} 53 | protocol: TCP 54 | {{- end }} 55 | volumeMounts: 56 | - name: app 57 | mountPath: /tmp/app 58 | {{- with .Values.extraVolumeMounts }} 59 | {{- toYaml . | nindent 12 }} 60 | {{- end }} 61 | {{- if .Values.exposr.ingress.sni.cert }} 62 | - name: ingress-sni-cert 63 | mountPath: /etc/exposr/ingress/sni/cert 64 | readOnly: true 65 | {{- end }} 66 | env: 67 | - name: POD_NAME 68 | valueFrom: 69 | fieldRef: 70 | fieldPath: metadata.name 71 | - name: POD_NAMESPACE 72 | valueFrom: 73 | fieldRef: 74 | fieldPath: metadata.namespace 75 | - name: SERVICE_NAME 76 | value: {{ include "exposr.fullname" . }}-headless 77 | readinessProbe: 78 | httpGet: 79 | path: /health 80 | port: admin 81 | initialDelaySeconds: 10 82 | periodSeconds: 5 83 | timeoutSeconds: 2 84 | failureThreshold: 1 85 | livenessProbe: 86 | httpGet: 87 | path: /ping 88 | port: admin 89 | initialDelaySeconds: 15 90 | periodSeconds: 10 91 | timeoutSeconds: 2 92 | failureThreshold: 3 93 | startupProbe: 94 | httpGet: 95 | path: /ping 96 | port: admin 97 | initialDelaySeconds: 30 98 | periodSeconds: 5 99 | timeoutSeconds: 2 100 | failureThreshold: 10 101 | envFrom: 102 | - configMapRef: 103 | name: {{ include "exposr.fullname" . }} 104 | resources: 105 | {{- toYaml .Values.resources | nindent 12 }} 106 | {{- with .Values.nodeSelector }} 107 | nodeSelector: 108 | {{- toYaml . | nindent 8 }} 109 | {{- end }} 110 | volumes: 111 | - name: app 112 | emptyDir: {} 113 | {{- with .Values.extraVolumes }} 114 | {{- toYaml . | nindent 8 }} 115 | {{- end }} 116 | {{- if .Values.exposr.ingress.sni.cert }} 117 | - name: ingress-sni-cert 118 | secret: 119 | secretName: {{ .Values.exposr.ingress.sni.cert }} 120 | {{- end }} 121 | {{- with .Values.affinity }} 122 | affinity: 123 | {{- toYaml . | nindent 8 }} 124 | {{- end }} 125 | {{- with .Values.tolerations }} 126 | tolerations: 127 | {{- toYaml . | nindent 8 }} 128 | {{- end }} 129 | -------------------------------------------------------------------------------- /helm/templates/ingress-admin.yaml: -------------------------------------------------------------------------------- 1 | {{- if or .Values.ingressAdmin.enabled -}} 2 | {{- $fullName := list ((include "exposr.fullname" .) | trunc 58) "admin" | join "-" -}} 3 | {{- $svcName := include "exposr.fullname" . -}} 4 | {{- $svcPort := .Values.service.adminApiPort -}} 5 | apiVersion: networking.k8s.io/v1 6 | kind: Ingress 7 | metadata: 8 | name: {{ $fullName }} 9 | labels: 10 | {{- include "exposr.labels" . | nindent 4 }} 11 | {{- with .Values.ingressAdmin.annotations }} 12 | annotations: 13 | {{- toYaml . | nindent 4 }} 14 | {{- end }} 15 | spec: 16 | {{- if .Values.ingressAdmin.tls }} 17 | tls: 18 | {{- range .Values.ingressAdmin.tls }} 19 | - hosts: 20 | {{- range .hosts }} 21 | - {{ . | quote }} 22 | {{- end }} 23 | secretName: {{ .secretName }} 24 | {{- end }} 25 | {{- end }} 26 | rules: 27 | {{- range .Values.ingressAdmin.hosts }} 28 | - host: {{ .host | quote }} 29 | http: 30 | paths: 31 | {{- range .paths }} 32 | - path: {{ . }} 33 | pathType: ImplementationSpecific 34 | backend: 35 | service: 36 | name: {{ $svcName }} 37 | port: 38 | number: {{ $svcPort }} 39 | {{- end }} 40 | {{- end }} 41 | {{- end }} 42 | -------------------------------------------------------------------------------- /helm/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "exposr.fullname" . -}} 3 | {{- $svcPort := .Values.service.port -}} 4 | apiVersion: networking.k8s.io/v1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | {{- include "exposr.labels" . | nindent 4 }} 10 | {{- with .Values.ingress.annotations }} 11 | annotations: 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if .Values.ingress.tls }} 16 | tls: 17 | {{- range .Values.ingress.tls }} 18 | - hosts: 19 | {{- range .hosts }} 20 | - {{ . | quote }} 21 | {{- end }} 22 | secretName: {{ .secretName }} 23 | {{- end }} 24 | {{- end }} 25 | rules: 26 | {{- range .Values.ingress.hosts }} 27 | - host: {{ .host | quote }} 28 | http: 29 | paths: 30 | {{- range .paths }} 31 | - path: {{ . }} 32 | pathType: ImplementationSpecific 33 | backend: 34 | service: 35 | name: {{ $fullName }} 36 | port: 37 | number: {{ $svcPort }} 38 | {{- end }} 39 | {{- end }} 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /helm/templates/service-headless.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "exposr.fullname" . }}-headless 5 | labels: 6 | {{- include "exposr.labels" . | nindent 4 }} 7 | spec: 8 | type: ClusterIP 9 | clusterIP: None 10 | ports: 11 | - port: 1025 12 | targetPort: cluster 13 | protocol: UDP 14 | name: cluster 15 | selector: 16 | {{- include "exposr.selectorLabels" . | nindent 4 }} 17 | -------------------------------------------------------------------------------- /helm/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "exposr.fullname" . }} 5 | labels: 6 | {{- include "exposr.labels" . | nindent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | - port: {{ .Values.service.adminApiPort }} 15 | targetPort: admin-api 16 | protocol: TCP 17 | name: admin-api 18 | - port: {{ .Values.service.adminPort }} 19 | targetPort: admin 20 | protocol: TCP 21 | name: admin 22 | {{- if eq .Values.exposr.ingress.sni.enabled true }} 23 | - port: {{ .Values.exposr.ingress.sni.servicePort }} 24 | targetPort: ingress-sni 25 | protocol: TCP 26 | name: ingress-sni 27 | {{- end }} 28 | {{- if eq .Values.exposr.transport.ssh.enabled true }} 29 | - port: {{ .Values.exposr.transport.ssh.servicePort }} 30 | targetPort: transport-ssh 31 | protocol: TCP 32 | name: transport-ssh 33 | {{- end }} 34 | selector: 35 | {{- include "exposr.selectorLabels" . | nindent 4 }} 36 | -------------------------------------------------------------------------------- /helm/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "exposr.serviceAccountName" . }} 6 | labels: 7 | {{- include "exposr.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end -}} 13 | -------------------------------------------------------------------------------- /helm/values.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | exposr: 4 | logLevel: info 5 | allowRegistration: false 6 | admin: 7 | enabled: false 8 | apiKey: "" 9 | cluster: 10 | key: "cluster-signing-key-change-me" 11 | type: udp 12 | udp: 13 | discovery: kubernetes 14 | storage: 15 | url: memory:// 16 | pgsql: {} 17 | transport: 18 | maxConnections: 2 19 | ws: 20 | enabled: true 21 | ssh: 22 | enabled: false 23 | port: 2200 24 | servicePort: 22 25 | host: "" 26 | key: "" 27 | ingress: 28 | http: 29 | enabled: false 30 | domain: "" 31 | sni: 32 | enabled: false 33 | port: 4430 34 | servicePort: 443 35 | cert: "" 36 | envs: {} 37 | 38 | image: 39 | repository: ghcr.io/exposr/exposrd 40 | pullPolicy: Always 41 | 42 | imagePullSecrets: [] 43 | nameOverride: "" 44 | fullnameOverride: "" 45 | 46 | serviceAccount: 47 | create: false 48 | annotations: {} 49 | name: 50 | 51 | podSecurityContext: {} 52 | 53 | securityContext: {} 54 | 55 | service: 56 | type: ClusterIP 57 | port: 80 58 | adminApiPort: 8080 59 | adminPort: 9000 60 | 61 | ingress: 62 | enabled: false 63 | annotations: {} 64 | hosts: [] 65 | tls: [] 66 | 67 | ingressAdmin: 68 | enabled: false 69 | annotations: {} 70 | hosts: [] 71 | tls: [] 72 | 73 | resources: {} 74 | 75 | nodeSelector: {} 76 | 77 | tolerations: [] 78 | 79 | affinity: {} 80 | 81 | extraVolumes: [] 82 | 83 | extraVolumeMounts: [] -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "exposrd", 3 | "version": "0.12.0", 4 | "type": "module", 5 | "main": "exposrd.mjs", 6 | "bin": "exposrd.mjs", 7 | "author": "Fredrik Lindberg ", 8 | "license": "MIT", 9 | "engines": { 10 | "node": ">=18" 11 | }, 12 | "files": [ 13 | "exposrd.mjs", 14 | "README.md", 15 | "LICENSE", 16 | "yarn.lock", 17 | "build.env", 18 | "build.js", 19 | "Dockerfile", 20 | "docker/entrypoint.sh", 21 | "Makefile" 22 | ], 23 | "dependencies": { 24 | "@exposr/ws-multiplex": "^1.4.0", 25 | "better-sqlite3": "^9.1.1", 26 | "content-type": "^1.0.5", 27 | "koa": "^2.14.1", 28 | "koa-joi-router": "^8.0.0", 29 | "koa-router": "^12.0.0", 30 | "log4js": "^6.8.0", 31 | "node-cache": "^5.1.2", 32 | "pg": "^8.10.0", 33 | "port-numbers": "^6.0.1", 34 | "redis": "^4.6.5", 35 | "redlock": "^4.2.0", 36 | "ssh2": "^1.14.0", 37 | "sshpk": "^1.16.1", 38 | "ws": "^8.12.1", 39 | "yargs": "^17.7.1" 40 | }, 41 | "devDependencies": { 42 | "@rollup/plugin-commonjs": "^24.0.1", 43 | "@rollup/plugin-json": "^6.0.0", 44 | "@types/better-sqlite3": "^7.6.8", 45 | "@types/content-type": "^1.1.8", 46 | "@types/koa-joi-router": "^8.0.5", 47 | "@types/mocha": "^10.0.1", 48 | "@types/node": "^20.5.0", 49 | "@types/pg": "^8.10.9", 50 | "@types/port-numbers": "^5.0.0", 51 | "@types/redlock": "^4.0.7", 52 | "@types/sinon": "^10.0.17", 53 | "@types/ssh2": "^1.11.14", 54 | "@types/sshpk": "^1.17.2", 55 | "@types/ws": "^8.5.5", 56 | "commit-and-tag-version": "^11.2.1", 57 | "mocha": "^10.2.0", 58 | "rollup": "^3.18.0", 59 | "sinon": "^15.0.3", 60 | "ts-node": "^10.9.1", 61 | "typescript": "^5.1.6", 62 | "yaml": "^2.2.2" 63 | }, 64 | "scripts": { 65 | "prepack": "yarn run version && yarn run bundle", 66 | "postpack": "rm build.env exposrd.mjs", 67 | "release": "commit-and-tag-version", 68 | "version": "scripts/build-version.sh", 69 | "prebuild": "rm -fr out && mkdir out && cp package.json out", 70 | "build": "tsc", 71 | "dev": "ts-node-esm exposrd.js", 72 | "prebundle": "yarn run build", 73 | "bundle": "rollup out/exposrd.js --file exposrd.mjs --format es -p @rollup/plugin-commonjs -p @rollup/plugin-json", 74 | "dist": "scripts/build-dist.sh", 75 | "test": "scripts/run-test.sh" 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /scripts/build-version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | SCRIPTS=$(dirname "$0") 3 | ROOT=${SCRIPTS}/.. 4 | ${SCRIPTS}/gen-build-env.sh > ${ROOT}/build.env 5 | . ${ROOT}/build.env 6 | export EXPOSR_BUILD_VERSION 7 | export EXPOSR_BUILD_GIT_BRANCH 8 | export EXPOSR_BUILD_GIT_COMMIT 9 | export EXPOSR_BUILD_DATE 10 | export EXPOSR_BUILD_USER 11 | export EXPOSR_BUILD_MACHINE 12 | ${SCRIPTS}/gen-build-js.sh > ${ROOT}/build.js 13 | git update-index --assume-unchanged ${ROOT}/build.js 14 | echo version: ${EXPOSR_BUILD_VERSION} -------------------------------------------------------------------------------- /scripts/bump-helm.cjs: -------------------------------------------------------------------------------- 1 | const YAML = require('yaml') 2 | 3 | module.exports.readVersion = function (contents) { 4 | const yaml = YAML.parse(contents) 5 | return yaml.version 6 | } 7 | 8 | module.exports.writeVersion = function (contents, version) { 9 | const yaml = YAML.parse(contents) 10 | yaml.appVersion = `${version}` 11 | yaml.version = `${version}` 12 | return YAML.stringify(yaml) 13 | } 14 | -------------------------------------------------------------------------------- /scripts/gen-build-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo EXPOSR_BUILD_VERSION=$(git describe --tags --always --dirty 2> /dev/null || git rev-parse --short HEAD) 3 | echo EXPOSR_BUILD_GIT_BRANCH=$(git describe --all --always) 4 | echo EXPOSR_BUILD_GIT_COMMIT=$(git rev-parse HEAD) 5 | echo EXPOSR_BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") 6 | echo EXPOSR_BUILD_USER=$(id -u) 7 | echo EXPOSR_BUILD_MACHINE=\"$(uname -a)\" -------------------------------------------------------------------------------- /scripts/gen-build-js.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo export const BUILD_VERSION = \"${EXPOSR_BUILD_VERSION}\"\; 3 | echo export const BUILD_GIT_BRANCH = \"${EXPOSR_BUILD_GIT_BRANCH}\"\; 4 | echo export const BUILD_GIT_COMMIT = \"${EXPOSR_BUILD_GIT_COMMIT}\"\; 5 | echo export const BUILD_DATE = \"${EXPOSR_BUILD_DATE}\"\; 6 | echo export const BUILD_USER = \"${EXPOSR_BUILD_USER}\"\; 7 | echo export const BUILD_MACHINE = \"${EXPOSR_BUILD_MACHINE}\"\; -------------------------------------------------------------------------------- /scripts/run-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPTS=$(dirname "$0") 4 | ROOT=${SCRIPTS}/.. 5 | 6 | if [ $# -eq 0 ]; then 7 | tests=test/unit 8 | else 9 | tests="$@" 10 | fi 11 | 12 | system_tests=$(find $tests \( -path 'test/e2e*' -o -path 'test/system/*' \) | wc -l) 13 | if [ -z $EXPOSR_TEST_DEPS_RUNNING ]; then 14 | if [ $system_tests -gt 0 ]; then 15 | ${SCRIPTS}/test-deps.sh start 16 | fi 17 | fi 18 | 19 | NODE_ENV=test mocha --exit --recursive $tests 20 | ret=$? 21 | 22 | if [ -z $EXPOSR_TEST_DEPS_RUNNING ]; then 23 | if [ $system_tests -gt 0 ]; then 24 | ${SCRIPTS}/test-deps.sh stop 25 | fi 26 | fi 27 | 28 | exit $ret -------------------------------------------------------------------------------- /scripts/test-deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -eq 0 ]; then 4 | echo "Need argument start or stop" 5 | exit -1 6 | fi 7 | 8 | cmd=$1 9 | 10 | if [ "$cmd" == "start" ]; then 11 | docker compose -f deps/docker-compose.yaml up -d --wait 12 | elif [ "$cmd" == "stop" ]; then 13 | docker compose -f deps/docker-compose.yaml down 14 | else 15 | echo "Unknown command $cmd" 16 | fi -------------------------------------------------------------------------------- /src/account/account-service.ts: -------------------------------------------------------------------------------- 1 | import Account from './account.js'; 2 | import crypto from 'crypto'; 3 | import { Logger } from '../logger.js'; 4 | import TunnelService from '../tunnel/tunnel-service.js'; 5 | import Storage, { ListState } from '../storage/storage.js'; 6 | 7 | type AccountListResult = { 8 | cursor: string | null, 9 | accounts: Array, 10 | }; 11 | 12 | class AccountService { 13 | private static ACCOUNT_ID_ALPHABET = 'CDEFHJKMNPRTVWXY2345689'; 14 | private static ACCOUNT_ID_LENGTH = 16; 15 | private static ACCOUNT_ID_REGEX = new RegExp(`^[${AccountService.ACCOUNT_ID_ALPHABET}]{${AccountService.ACCOUNT_ID_LENGTH}}$`); 16 | 17 | static generateId(): string { 18 | const randomBytes = new Uint8Array(AccountService.ACCOUNT_ID_LENGTH); 19 | crypto.getRandomValues(randomBytes); 20 | 21 | return [...randomBytes] 22 | .map(x => { 23 | const randomPos = x % AccountService.ACCOUNT_ID_ALPHABET.length; 24 | return AccountService.ACCOUNT_ID_ALPHABET[randomPos]; 25 | }) 26 | .join(''); 27 | } 28 | 29 | static normalizeId(accountId: string): string | undefined { 30 | const normalized = accountId.replace(/[ -]/g, '').toUpperCase(); 31 | if (AccountService.ACCOUNT_ID_REGEX.test(normalized)) { 32 | return normalized; 33 | } else { 34 | return undefined; 35 | } 36 | } 37 | 38 | static formatId(accountId: string): string { 39 | return accountId.replace(/.{1,4}(?=(.{4})+$)/g, '$&-'); 40 | } 41 | 42 | private _db: Storage; 43 | private logger: any; 44 | private tunnelService: TunnelService; 45 | 46 | constructor() { 47 | this._db = new Storage("account"); 48 | this.logger = Logger("account-service"); 49 | this.tunnelService = new TunnelService(); 50 | } 51 | 52 | public async destroy(): Promise { 53 | await this.tunnelService.destroy(); 54 | await this._db.destroy(); 55 | } 56 | 57 | public async get(accountId: string): Promise { 58 | const normalizedId = AccountService.normalizeId(accountId); 59 | if (normalizedId == undefined) { 60 | return undefined; 61 | } 62 | 63 | const account = await this._db.read(normalizedId, Account); 64 | if (!(account instanceof Account)) { 65 | return undefined 66 | } 67 | return account; 68 | } 69 | 70 | public async create(): Promise { 71 | let maxTries = 100; 72 | let created: boolean | null; 73 | let account: Account; 74 | do { 75 | account = new Account(AccountService.generateId()); 76 | account.created_at = new Date().toISOString(); 77 | account.updated_at = account.created_at; 78 | created = await this._db.create(account.id, account); 79 | } while (!created && maxTries-- > 0); 80 | 81 | if (!created) { 82 | return undefined; 83 | } 84 | 85 | return account; 86 | } 87 | 88 | async delete(accountId: string): Promise { 89 | const account = await this.get(accountId); 90 | if (!(account instanceof Account)) { 91 | return false; 92 | } 93 | 94 | const tunnels = [...account.tunnels]; 95 | try { 96 | await Promise.allSettled(tunnels.map((tunnelId) => { 97 | return this.tunnelService.delete(tunnelId, accountId) 98 | })); 99 | 100 | return await this._db.delete(account.id); 101 | } catch (e) { 102 | this.logger.error({ 103 | message: `Failed to delete account`, 104 | accountId 105 | }); 106 | return false; 107 | } 108 | } 109 | 110 | async update(accountId: string, callback: (account: Account) => void): Promise { 111 | const normalizedId = AccountService.normalizeId(accountId); 112 | if (normalizedId == undefined) { 113 | return undefined; 114 | } 115 | 116 | const updatedAccount = await this._db.update(normalizedId, Account, async (account: Account) => { 117 | callback(account); 118 | account.updated_at = new Date().toISOString(); 119 | return true; 120 | }); 121 | return updatedAccount ?? undefined 122 | } 123 | 124 | public async list(cursor: string | undefined, count: number = 10, verbose: boolean = false): Promise { 125 | 126 | const listState: ListState | undefined = cursor ? { cursor } : undefined; 127 | let res = await this._db.list(listState, count); 128 | 129 | const keys = res.keys; 130 | if (res.pending > 0) { 131 | res = await this._db.list(res, res.pending); 132 | keys.push(...res.keys); 133 | } 134 | 135 | const data: Array = verbose ? (await this._db.read(keys, Account) || []) : keys.map((id: string) => { 136 | return new Account(id); 137 | }); 138 | return { 139 | cursor: res.cursor, 140 | accounts: data.filter((d) => d != null) as Array, 141 | } 142 | } 143 | 144 | async disable(accountId: string, disabled: boolean, reason?: string): Promise { 145 | const account = await this.update(accountId, (account) => { 146 | account.status.disabled = disabled; 147 | if (account.status.disabled) { 148 | account.status.disabled_at = new Date().toISOString(); 149 | account.status.disabled_reason = reason; 150 | } else { 151 | account.status.disabled_at = undefined; 152 | account.status.disabled_reason = undefined; 153 | } 154 | }); 155 | 156 | if (!account) { 157 | return undefined; 158 | } 159 | 160 | if (account.status.disabled) { 161 | try { 162 | await Promise.allSettled(account.tunnels.map((tunnelId) => { 163 | return this.tunnelService.disconnect(tunnelId, accountId) 164 | })); 165 | } catch (e: any) { 166 | this.logger.warn({ 167 | message: `Failed to disconnect tunnels on disabled account`, 168 | accountId 169 | }); 170 | } 171 | } 172 | return account; 173 | } 174 | 175 | } 176 | 177 | export default AccountService; -------------------------------------------------------------------------------- /src/account/account-tunnel-service.ts: -------------------------------------------------------------------------------- 1 | import Tunnel from "../tunnel/tunnel.js"; 2 | import Account from "./account.js"; 3 | import AccountService from "./account-service.js"; 4 | import { TunnelConfig } from "../tunnel/tunnel-config.js"; 5 | import Storage from "../storage/storage.js"; 6 | 7 | export default class AccountTunnelService { 8 | 9 | private storage: Storage; 10 | 11 | constructor() { 12 | this.storage = new Storage("account"); 13 | } 14 | 15 | public async destroy(): Promise { 16 | await this.storage.destroy(); 17 | } 18 | 19 | public async assignTunnel(tunnelConfig: TunnelConfig): Promise { 20 | const normalizedId = AccountService.normalizeId(tunnelConfig.account); 21 | if (normalizedId == undefined) { 22 | return false; 23 | } 24 | 25 | const res = await this.storage.update(normalizedId, Account, async (account: Account) => { 26 | if (!account.tunnels.includes(tunnelConfig.account)) { 27 | account.tunnels.push(tunnelConfig.id); 28 | } 29 | account.updated_at = new Date().toISOString(); 30 | return true; 31 | }); 32 | return res instanceof Account; 33 | } 34 | 35 | public async unassignTunnel(tunnelConfig: TunnelConfig): Promise { 36 | const normalizedId = AccountService.normalizeId(tunnelConfig.account); 37 | if (normalizedId == undefined) { 38 | return false; 39 | } 40 | 41 | const res = await this.storage.update(normalizedId, Account, async (account: Account) => { 42 | const pos = account.tunnels.indexOf(tunnelConfig.id); 43 | if (pos >= 0) { 44 | account.tunnels.splice(pos, 1); 45 | } 46 | account.updated_at = new Date().toISOString(); 47 | return true; 48 | }); 49 | return res instanceof Account; 50 | } 51 | 52 | public async authorizedAccount(tunnel: Tunnel): Promise { 53 | const normalizedId = AccountService.normalizeId(tunnel.account); 54 | if (normalizedId == undefined) { 55 | throw new Error("no_account_on_tunnel"); 56 | } 57 | 58 | const account = await this.storage.read(normalizedId, Account); 59 | if (!(account instanceof Account)) { 60 | throw new Error("dangling_account"); 61 | } 62 | if (!account.tunnels.includes(tunnel.id)) { 63 | this.assignTunnel(tunnel.config); 64 | } 65 | return account; 66 | } 67 | } -------------------------------------------------------------------------------- /src/account/account.ts: -------------------------------------------------------------------------------- 1 | import { Serializable } from '../storage/serializer.js'; 2 | 3 | type AccountStatus = { 4 | disabled: boolean, 5 | disabled_at?: string, 6 | disabled_reason?: string, 7 | } 8 | 9 | class Account implements Serializable { 10 | public accountId?: string; 11 | public id?: string; 12 | public created_at?: string; 13 | public updated_at?: string; 14 | public tunnels: Array; 15 | public status: AccountStatus; 16 | 17 | constructor(accountId?: string) { 18 | this.accountId = accountId; 19 | this.id = accountId; 20 | this.created_at = undefined; 21 | this.updated_at = undefined; 22 | this.tunnels = []; 23 | this.status = { 24 | disabled: false, 25 | disabled_at: undefined, 26 | disabled_reason: undefined, 27 | }; 28 | } 29 | } 30 | 31 | export default Account; -------------------------------------------------------------------------------- /src/cluster/cluster-node.ts: -------------------------------------------------------------------------------- 1 | import crypto from 'crypto'; 2 | import os, { NetworkInterfaceInfo } from 'os'; 3 | 4 | class Node { 5 | public static readonly hostname = `${process.pid}@${os.hostname}`; 6 | public static readonly identifier = crypto.createHash('sha1').update(`${Date.now() + Math.random()}`).digest('hex'); 7 | public static readonly interface = Node.getNetworkInterface(); 8 | 9 | public static readonly address4 = Node._getIP(Node.interface, 'IPv4'); 10 | public static readonly address6 = Node._getIP(Node.interface, 'IPv6'); 11 | public static readonly address = Node.address4 || Node.address6 || '0.0.0.0'; 12 | 13 | public static getIP() { 14 | return Node._getIP(Node.interface, 'IPv4') || Node._getIP(Node.interface, 'IPv6'); 15 | } 16 | 17 | private static _getIP(iface: string, family: string): string | undefined { 18 | const addresses = os.networkInterfaces()[iface]; 19 | if (!addresses) { 20 | return undefined; 21 | } 22 | return addresses.filter((addr) => { return addr.family == family; })[0]?.address; 23 | } 24 | 25 | public static getNetworkInterface(iface?: string): string { 26 | const interfaces = os.networkInterfaces(); 27 | 28 | if (iface != undefined) { 29 | if (interfaces[iface]) { 30 | return iface; 31 | } else { 32 | throw new Error('no_such_network_interface'); 33 | } 34 | } 35 | 36 | if (Object.keys(interfaces).length == 0) { 37 | throw new Error('no_network_interfaces'); 38 | } 39 | 40 | Object.keys(interfaces).forEach((element) => { 41 | const addresses = interfaces[element]?.filter(entry => !entry.internal); 42 | if (addresses?.length == 0) { 43 | delete interfaces[element]; 44 | } 45 | }); 46 | 47 | const names = Object.keys(interfaces); 48 | names.sort((a: string, b: string) => { 49 | 50 | const haveProperty = (array: Array, predicate: (x: NetworkInterfaceInfo) => boolean) => { 51 | return array.filter(predicate).length; 52 | } 53 | 54 | const score = (element: string) => { 55 | const addresses = interfaces[element]; 56 | if (!addresses) { 57 | return -1; 58 | } 59 | return haveProperty(addresses, (e) => {return e.family == 'IPv4'}) + 60 | haveProperty(addresses, (e) => {return e.family == 'IPv6'}); 61 | } 62 | 63 | return 2*score(b) - 2*score(a) - a.localeCompare(b); 64 | }); 65 | 66 | return names[0]; 67 | } 68 | } 69 | 70 | export default Node; -------------------------------------------------------------------------------- /src/cluster/discovery-method.ts: -------------------------------------------------------------------------------- 1 | import dgram from 'dgram'; 2 | 3 | export default abstract class DiscoveryMethod { 4 | 5 | public abstract readonly name: string; 6 | 7 | public abstract eligible(): number; 8 | 9 | public abstract init(socket: dgram.Socket | undefined, socket6: dgram.Socket | undefined): void; 10 | 11 | public abstract getPeers(): Promise>; 12 | 13 | } -------------------------------------------------------------------------------- /src/cluster/eventbus-interface.ts: -------------------------------------------------------------------------------- 1 | import ClusterManager from './cluster-manager.js'; 2 | 3 | export type EventBusInterfaceOptions = { 4 | callback: (error?: Error) => void 5 | } 6 | 7 | export default abstract class EventBusInterface { 8 | private destroyed: boolean = false; 9 | 10 | constructor(opts: EventBusInterfaceOptions) { 11 | } 12 | 13 | public async destroy(): Promise { 14 | if (this.destroyed) { 15 | return; 16 | } 17 | await this._destroy(); 18 | this.destroyed = true; 19 | } 20 | 21 | public async publish(message: string): Promise { 22 | return this._publish(message); 23 | } 24 | 25 | protected abstract _publish(message: string): Promise; 26 | 27 | protected abstract _destroy(): Promise; 28 | 29 | protected receive(message: string): void { 30 | const res: Boolean | Error = ClusterManager.receive(message); 31 | if (res instanceof Error) { 32 | throw res; 33 | } 34 | } 35 | } -------------------------------------------------------------------------------- /src/cluster/eventbus.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from 'events'; 2 | import { Logger } from '../logger.js'; 3 | import ClusterManager, { EmitCallback } from './cluster-manager.js'; 4 | 5 | export type EmitMeta = { 6 | node: { 7 | id: string, 8 | host: string, 9 | ip: string, 10 | }, 11 | ts: number, 12 | } 13 | 14 | class EventBus extends EventEmitter { 15 | private logger: any; 16 | private emitCallback: EmitCallback; 17 | 18 | constructor() { 19 | super(); 20 | this.logger = Logger("eventbus"); 21 | 22 | this.setMaxListeners(1); 23 | this.on('newListener', () => { 24 | this.setMaxListeners(this.getMaxListeners() + 1); 25 | }); 26 | this.on('removeListener', () => { 27 | this.setMaxListeners(this.getMaxListeners() - 1); 28 | }); 29 | 30 | const emitCallback: EmitCallback = this.emitCallback = (event, message, meta) => { 31 | super.emit(event, message, meta); 32 | this.logger.isTraceEnabled() && 33 | this.logger.trace({ 34 | operation: 'emit', 35 | event, 36 | message, 37 | meta 38 | }); 39 | }; 40 | 41 | ClusterManager.attach(emitCallback); 42 | } 43 | 44 | public async destroy(): Promise { 45 | this.removeAllListeners(); 46 | ClusterManager.detach(this.emitCallback); 47 | } 48 | 49 | async publish(event: string, message: any) { 50 | return ClusterManager.publish(event, message); 51 | } 52 | 53 | async waitFor(channel: string, predicate: (message: any, meta: EmitMeta) => boolean, timeout: number | undefined) { 54 | return new Promise((resolve, reject) => { 55 | let timer: NodeJS.Timeout; 56 | const fun = (message: any, meta: EmitMeta) => { 57 | if (!predicate(message, meta)) { 58 | return; 59 | } 60 | this.removeListener(channel, fun); 61 | timer && clearTimeout(timer); 62 | resolve(message); 63 | }; 64 | this.on(channel, fun); 65 | if (typeof timeout === 'number') { 66 | timer = setTimeout(() => { 67 | this.removeListener(channel, fun); 68 | reject(); 69 | }, timeout); 70 | } 71 | }); 72 | } 73 | } 74 | export default EventBus; 75 | -------------------------------------------------------------------------------- /src/cluster/kubernetes-discovery.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs'; 2 | import dns from 'dns/promises'; 3 | import { Logger } from '../logger.js'; 4 | import DiscoveryMethod from './discovery-method.js'; 5 | import ClusterManager from './cluster-manager.js'; 6 | 7 | export type KubernetesDiscoveryOptions = { 8 | serviceNameEnv?: string, 9 | namespaceEnv?: string, 10 | serviceName?: string, 11 | namespace?: string, 12 | clusterDomain?: string, 13 | } 14 | 15 | class KubernetesDiscovery implements DiscoveryMethod { 16 | public readonly name: string; 17 | 18 | private logger: any; 19 | private _serviceName: string; 20 | private _namespace: string; 21 | private _clusterDomain: string; 22 | private _serviceHost: string; 23 | private _cacheTime: number; 24 | private _cachedPeers: Array | undefined; 25 | 26 | constructor(opts: KubernetesDiscoveryOptions) { 27 | this.logger = Logger("kubernetes-discovery"); 28 | 29 | const serviceNameEnv = opts?.serviceNameEnv || 'SERVICE_NAME'; 30 | const namespaceEnv = opts?.namespaceEnv || 'POD_NAMESPACE'; 31 | 32 | this._serviceName = opts?.serviceName || process.env[serviceNameEnv] || "exposr-headless"; 33 | this._namespace = opts?.namespace || process.env[namespaceEnv] || "default"; 34 | this._clusterDomain = opts?.clusterDomain || 'cluster.local'; 35 | 36 | this._serviceHost = `${this._serviceName}.${this._namespace}.svc.${this._clusterDomain}`; 37 | 38 | this.name = `kubernetes service ${this._serviceHost}`; 39 | this._cacheTime = Date.now() - 1000; 40 | } 41 | 42 | public eligible(): number { 43 | const namespaceFile = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'; 44 | 45 | if (!fs.existsSync(namespaceFile)) { 46 | this.logger.debug({ 47 | message: `${namespaceFile} does not exist`, 48 | }); 49 | return -1; 50 | } 51 | 52 | return 10; 53 | } 54 | 55 | public init(): void { 56 | this.logger.debug({ 57 | message: `using ${this._serviceHost} headless service for pod discovery`, 58 | }); 59 | } 60 | 61 | public async getPeers(): Promise> { 62 | if (this._cachedPeers && (Date.now() - this._cacheTime) < 1000) { 63 | return this._cachedPeers; 64 | } 65 | 66 | let peers: Array = []; 67 | try { 68 | peers = await this._resolvePeers(); 69 | this._cachedPeers = peers; 70 | this._cacheTime = Date.now(); 71 | } catch (err: any) { 72 | this.logger.warn({ 73 | message: `failed to resolve ${this._serviceHost}: ${err.message}` 74 | }); 75 | } 76 | 77 | const learntPeers = ClusterManager.getLearntPeers(); 78 | for (let i = 0; i < learntPeers.length; i++) { 79 | if (peers.indexOf(learntPeers[i]) === -1) { 80 | peers.push(learntPeers[i]); 81 | } 82 | } 83 | 84 | return peers; 85 | } 86 | 87 | async _resolvePeers() { 88 | return Promise.allSettled([ 89 | dns.resolve4(this._serviceHost), 90 | dns.resolve6(this._serviceHost) 91 | ]).then((results) => { 92 | const [result4, result6] = results; 93 | if (result4.status == 'fulfilled' && result4.value?.length > 0) { 94 | return result4.value; 95 | } else if (result6.status == 'fulfilled' && result6.value?.length > 0) { 96 | return result6.value; 97 | } else if (result4.status == 'rejected') { 98 | throw result4.reason; 99 | } else if (result6.status == 'rejected') { 100 | throw result6.reason; 101 | } else { 102 | throw new Error('unknown'); 103 | } 104 | }); 105 | } 106 | } 107 | 108 | export default KubernetesDiscovery; 109 | -------------------------------------------------------------------------------- /src/cluster/memory-eventbus.ts: -------------------------------------------------------------------------------- 1 | import { Logger } from '../logger.js'; 2 | import EventBusInterface, { EventBusInterfaceOptions } from './eventbus-interface.js'; 3 | 4 | export type MemoryEventBusOptions = EventBusInterfaceOptions; 5 | 6 | class MemoryEventBus extends EventBusInterface { 7 | private logger: any; 8 | 9 | constructor(opts: EventBusInterfaceOptions) { 10 | super(opts) 11 | this.logger = Logger("memory-eventbus"); 12 | typeof opts.callback === 'function' && process.nextTick(opts.callback); 13 | } 14 | 15 | protected async _destroy(): Promise { 16 | } 17 | 18 | protected async _publish(message: string): Promise { 19 | return new Promise((resolve) => { 20 | process.nextTick(() => { 21 | try { 22 | this.receive(message); 23 | this.logger.debug({ 24 | operation: 'publish', 25 | message, 26 | }); 27 | } catch (e: any) { 28 | this.logger.error({ 29 | message: `failed to receive message ${message}: ${e.message}`, 30 | }); 31 | } 32 | resolve(); 33 | }); 34 | }); 35 | } 36 | } 37 | 38 | export default MemoryEventBus; -------------------------------------------------------------------------------- /src/cluster/multicast-discovery.ts: -------------------------------------------------------------------------------- 1 | import dgram from 'dgram'; 2 | import DiscoveryMethod from "./discovery-method.js"; 3 | import { Logger } from '../logger.js'; 4 | 5 | function inCidr(ipAddress: string, cidrPrefix: string): boolean { 6 | const [subnet, prefixLength] = cidrPrefix.split('/'); 7 | const subnetOctets = subnet.split('.').map(Number); 8 | const ipOctets = ipAddress.split('.')?.map(Number); 9 | 10 | const subnetInt = (subnetOctets[0] << 24) | 11 | (subnetOctets[1] << 16) | 12 | (subnetOctets[2] << 8) | 13 | subnetOctets[3]; 14 | 15 | const ipInt = (ipOctets[0] << 24) | 16 | (ipOctets[1] << 16) | 17 | (ipOctets[2] << 8) | 18 | ipOctets[3]; 19 | 20 | const mask = (0xffffffff << (32 - Number.parseInt(prefixLength))) >>> 0; 21 | 22 | return (subnetInt & mask) === (ipInt & mask); 23 | } 24 | 25 | export type MulticastDiscoveryOptions = { 26 | group: string, 27 | } 28 | 29 | class MulticastDiscovery implements DiscoveryMethod { 30 | public readonly name: string; 31 | 32 | private _multicastgroup: string; 33 | private logger: any; 34 | 35 | constructor(opts: MulticastDiscoveryOptions) { 36 | this._multicastgroup = opts.group || '239.0.0.1'; 37 | if (!inCidr(this._multicastgroup, "239.0.0.0/8")) { 38 | throw new Error(`${this._multicastgroup} is not within the private multicast range 239.0.0.0/8`); 39 | } 40 | this.logger = Logger('multicast-discovery'); 41 | this.name = `multicast group ${this._multicastgroup}`; 42 | } 43 | 44 | public eligible(): number { 45 | return 0; 46 | } 47 | 48 | public init(socket: dgram.Socket): void { 49 | if (!socket) { 50 | this.logger.error({ 51 | message: `Unable to initialize multicast discovery, no IPv4 socket available` 52 | }); 53 | return; 54 | } 55 | socket.addMembership(this._multicastgroup); 56 | socket.setMulticastLoopback(true); 57 | this.logger.debug({ 58 | message: `joined multicast group ${this._multicastgroup}`, 59 | }); 60 | } 61 | 62 | public async getPeers(): Promise> { 63 | return [this._multicastgroup]; 64 | } 65 | } 66 | 67 | export default MulticastDiscovery; -------------------------------------------------------------------------------- /src/controller/admin-controller.ts: -------------------------------------------------------------------------------- 1 | import { Router } from 'koa-joi-router'; 2 | import { Logger } from '../logger.js'; 3 | import KoaController from "./koa-controller.js"; 4 | 5 | class AdminController extends KoaController { 6 | 7 | public readonly _name: string = 'Admin' 8 | 9 | public appReady: boolean | undefined; 10 | 11 | constructor(opts: any) { 12 | const logger: any = Logger("admin"); 13 | 14 | super({...opts, logger}); 15 | if (!opts.enable) { 16 | logger.info({ 17 | message: `HTTP Admin disabled`, 18 | }); 19 | return; 20 | } 21 | 22 | this.appReady = undefined; 23 | } 24 | 25 | public setReady(ready: boolean) { 26 | ready ??= true; 27 | this.appReady = ready; 28 | } 29 | 30 | protected _initializeRoutes(router: Router): void { 31 | router.route({ 32 | method: 'get', 33 | path: '/ping', 34 | handler: async (ctx, next) => { 35 | ctx.status = this.appReady != undefined ? 200 : 404; 36 | }, 37 | }); 38 | 39 | router.route({ 40 | method: 'get', 41 | path: '/health', 42 | handler: async (ctx, next) => { 43 | ctx.status = this.appReady ? 200 : 404; 44 | }, 45 | }); 46 | } 47 | 48 | protected async _destroy() { 49 | this.appReady = undefined; 50 | } 51 | } 52 | 53 | export default AdminController; -------------------------------------------------------------------------------- /src/controller/koa-controller.ts: -------------------------------------------------------------------------------- 1 | import { strict as assert } from 'assert'; 2 | import Koa from 'koa'; 3 | import Router from 'koa-joi-router'; 4 | import Listener from '../listener/listener.js'; 5 | import HttpListener, { HttpRequestCallback, HttpRequestType } from '../listener/http-listener.js'; 6 | import { IncomingMessage, ServerResponse } from 'http'; 7 | 8 | abstract class KoaController { 9 | 10 | public readonly _name: string = 'controller' 11 | private _port!: number; 12 | private httpListener!: HttpListener; 13 | private _requestHandler!: HttpRequestCallback; 14 | private router!: Router.Router; 15 | private app!: Koa; 16 | 17 | constructor(opts: any) { 18 | assert(opts != undefined); 19 | const {port, callback, logger, host, prio} = opts; 20 | 21 | if (opts?.enable === false) { 22 | typeof callback === 'function' && process.nextTick(() => callback()); 23 | return; 24 | } 25 | 26 | this._port = port; 27 | 28 | const useCallback: HttpRequestCallback = this._requestHandler = async (ctx, next) => { 29 | const setBaseUrl = (req: any, baseUrl: URL | undefined) => { 30 | req._exposrBaseUrl = baseUrl; 31 | }; 32 | setBaseUrl(ctx.req, ctx.baseUrl) 33 | if (!await this.appCallback(ctx.req, ctx.res)) { 34 | return next(); 35 | } 36 | } 37 | 38 | const httpListener = this.httpListener = Listener.acquire(HttpListener, port); 39 | httpListener.use(HttpRequestType.request, { host, logger, prio, logBody: true }, useCallback); 40 | 41 | this.app = new Koa(); 42 | this.router = Router(); 43 | this._initializeRoutes(this.router); 44 | this.app.use(this.router.middleware()); 45 | 46 | this.httpListener.listen() 47 | .then(() => { 48 | logger.info({ 49 | message: `HTTP ${this._name} listening on port ${port}`, 50 | }); 51 | typeof callback === 'function' && process.nextTick(() => callback()); 52 | }) 53 | .catch((err) => { 54 | logger.error({ 55 | message: `Failed to initialize HTTP ${this._name}: ${err.message}`, 56 | }); 57 | typeof callback === 'function' && process.nextTick(() => callback(err)); 58 | }); 59 | } 60 | 61 | private async appCallback(req: IncomingMessage, res: ServerResponse): Promise { 62 | await (this.app.callback()(req, res)); 63 | return true; 64 | } 65 | 66 | protected abstract _initializeRoutes(router: Router.Router): void; 67 | 68 | protected abstract _destroy(): Promise; 69 | 70 | public async destroy(): Promise { 71 | this.httpListener?.removeHandler(HttpRequestType.request, this._requestHandler); 72 | await Promise.allSettled([ 73 | Listener.release(this._port), 74 | this._destroy(), 75 | ]); 76 | } 77 | 78 | protected getBaseUrl(req: IncomingMessage): URL | undefined { 79 | return ((req as any)._exposrBaseUrl as (URL | undefined)); 80 | } 81 | } 82 | 83 | export default KoaController; -------------------------------------------------------------------------------- /src/ingress/ingress-base.ts: -------------------------------------------------------------------------------- 1 | export default abstract class IngressBase { 2 | 3 | public abstract getBaseUrl(tunnelId: string): URL; 4 | 5 | public abstract destroy(): Promise; 6 | } -------------------------------------------------------------------------------- /src/ingress/ingress-manager.ts: -------------------------------------------------------------------------------- 1 | import HttpIngress, { HttpIngressOptions } from './http-ingress.js'; 2 | import SNIIngress, { SniIngressOptions } from './sni-ingress.js'; 3 | import IngressBase from './ingress-base.js'; 4 | 5 | export type IngressOptions = { 6 | http?: { 7 | enabled: boolean, 8 | } & HttpIngressOptions, 9 | sni?: { 10 | enabled: boolean, 11 | } & SniIngressOptions, 12 | } 13 | 14 | export enum IngressType { 15 | INGRESS_HTTP = 'http', 16 | INGRESS_SNI = 'sni', 17 | } 18 | 19 | class IngressManager { 20 | public static listening: boolean = false; 21 | 22 | private static ingress: { 23 | [ key in IngressType ]: { 24 | enabled: boolean, 25 | instance?: IngressBase, 26 | } 27 | } 28 | 29 | public static async listen(opts: IngressOptions): Promise { 30 | if (this.listening) { 31 | return true; 32 | } 33 | 34 | this.ingress = { 35 | http: { 36 | enabled: opts.http?.enabled || false, 37 | }, 38 | sni: { 39 | enabled: opts.sni?.enabled || false, 40 | }, 41 | }; 42 | 43 | const p = []; 44 | 45 | if (this.ingress.http.enabled == true) { 46 | p.push(new Promise((resolve, reject) => { 47 | this.ingress.http.instance = new HttpIngress({ 48 | ...opts.http, 49 | callback: (e?: Error) => { 50 | e ? reject(e) : resolve(undefined) 51 | }, 52 | }); 53 | })); 54 | } 55 | 56 | if (this.ingress.sni.enabled == true) { 57 | p.push(new Promise((resolve, reject) => { 58 | this.ingress.sni.instance = new SNIIngress({ 59 | ...opts.sni, 60 | callback: (e?: Error) => { 61 | e ? reject(e) : resolve(undefined) 62 | }, 63 | }); 64 | })); 65 | } 66 | 67 | const res = await Promise.all(p).then(() => { 68 | return true; 69 | }).catch(async (e) => { 70 | await this.close(); 71 | throw e; 72 | }); 73 | return res; 74 | } 75 | 76 | public static async close(): Promise { 77 | await Promise.allSettled([ 78 | this.ingress.http.instance?.destroy(), 79 | this.ingress.sni.instance?.destroy(), 80 | ]); 81 | this.ingress = { 82 | http: { 83 | enabled: false, 84 | }, 85 | sni: { 86 | enabled: false, 87 | }, 88 | }; 89 | this.listening = false; 90 | } 91 | 92 | public static getIngress(ingressType: IngressType): IngressBase { 93 | return this.ingress[ingressType].instance; 94 | } 95 | 96 | public static ingressEnabled(ingressType: IngressType): boolean { 97 | return this.ingress[ingressType].enabled; 98 | } 99 | } 100 | 101 | export default IngressManager; -------------------------------------------------------------------------------- /src/ingress/ingress-service.ts: -------------------------------------------------------------------------------- 1 | import IngressManager, { IngressType } from "./ingress-manager.js"; 2 | 3 | export default class IngressService { 4 | 5 | static instance: IngressService | undefined; 6 | static ref: number; 7 | 8 | private destroyed: boolean = false; 9 | 10 | constructor() { 11 | if (IngressService.instance instanceof IngressService) { 12 | IngressService.ref++; 13 | return IngressService.instance; 14 | } 15 | } 16 | 17 | public async destroy(): Promise { 18 | if (this.destroyed) { 19 | return; 20 | } 21 | if (--IngressService.ref == 0) { 22 | this.destroyed = true; 23 | IngressService.instance = undefined; 24 | } 25 | } 26 | 27 | public enabled(ingressType: IngressType): boolean { 28 | return IngressManager.ingressEnabled(ingressType) 29 | } 30 | 31 | public getIngressURL(ingressType: IngressType, tunnelId: string): URL { 32 | if (!this.enabled(ingressType)) { 33 | throw new Error('ingress_administratively_disabled'); 34 | } 35 | return IngressManager.getIngress(ingressType).getBaseUrl(tunnelId); 36 | } 37 | } -------------------------------------------------------------------------------- /src/ingress/utils.ts: -------------------------------------------------------------------------------- 1 | class IngressUtils { 2 | static getTunnelId(hostname: string | undefined, wildcardHost?: string): string | undefined { 3 | if (hostname === undefined) { 4 | return undefined; 5 | } 6 | 7 | const host = hostname.toLowerCase().split(":")[0]; 8 | if (host === undefined) { 9 | return undefined; 10 | } 11 | 12 | const tunnelId = host.split('.', 1)[0]; 13 | const parentDomain = host.slice(tunnelId.length + 1); 14 | if (wildcardHost) { 15 | if (wildcardHost.startsWith('*.')) { 16 | wildcardHost = wildcardHost.slice(2); 17 | } 18 | if (parentDomain != wildcardHost) { 19 | return undefined; 20 | } 21 | } 22 | return tunnelId; 23 | } 24 | } 25 | 26 | export default IngressUtils; -------------------------------------------------------------------------------- /src/listener/listener.ts: -------------------------------------------------------------------------------- 1 | 2 | type ListenerPending = (err?: Error) => void; 3 | 4 | export default class Listener { 5 | private static instances: Map> = new Map(); 6 | 7 | public static acquire(type: { new(port:number): T}, port: number): T { 8 | if (this.instances.has(port)) { 9 | const instance = this.instances.get(port) as T; 10 | instance.acquire(); 11 | return instance; 12 | } else { 13 | const instance = new type(port); 14 | this.instances.set(port, instance as ListenerBase); 15 | return instance; 16 | } 17 | } 18 | 19 | public static async release(port: number): Promise { 20 | const instance = this.instances.get(port) as T; 21 | if (!instance) { 22 | return; 23 | } 24 | const release = await instance["destroy"](); 25 | if (release) { 26 | this.instances.delete(port); 27 | } 28 | } 29 | } 30 | 31 | export abstract class ListenerBase { 32 | private _ref: number; 33 | private _listen_ref: number; 34 | private _listening: boolean; 35 | private _pending: Array | undefined; 36 | private _destroyed: boolean; 37 | public readonly port: number; 38 | 39 | constructor(port: number) { 40 | this.port = port; 41 | this._ref = 1; 42 | this._listen_ref = 0; 43 | this._listening = false; 44 | this._pending = undefined; 45 | this._destroyed = false; 46 | } 47 | 48 | public getPort(): number { 49 | return this.port; 50 | } 51 | 52 | public acquire(): void { 53 | this._ref++; 54 | } 55 | 56 | protected abstract _listen(): Promise; 57 | 58 | protected abstract _destroy(): Promise; 59 | 60 | protected abstract _close(): Promise; 61 | 62 | public async listen(): Promise { 63 | this._listen_ref++; 64 | if (this._listening) { 65 | return; 66 | } 67 | 68 | if (this._pending != undefined) { 69 | return new Promise((resolve, reject) => { 70 | const pending = (_err?: Error) => { 71 | _err ? reject(_err) : resolve(); 72 | }; 73 | this._pending!.push(pending); 74 | }) 75 | } 76 | 77 | return new Promise(async (resolve, reject) => { 78 | this._listening = false; 79 | this._pending = []; 80 | 81 | let err: Error | undefined = undefined; 82 | try { 83 | await this._listen(); 84 | this._listening = true; 85 | } catch (e: any) { 86 | err = e; 87 | } 88 | 89 | this._pending.push((_err) => { 90 | _err ? reject(_err) : resolve(); 91 | }); 92 | 93 | this._pending.map((fn) => fn(err)); 94 | this._pending = undefined; 95 | }); 96 | } 97 | 98 | public async close(): Promise { 99 | if (!this._listening) { 100 | return; 101 | } 102 | if (--this._listen_ref == 0) { 103 | await this._close(); 104 | this._listening = false; 105 | } 106 | } 107 | 108 | protected async destroy(): Promise { 109 | if (this._destroyed) { 110 | return false; 111 | } 112 | if (--this._ref == 0) { 113 | this._destroyed = true; 114 | await this._close(); 115 | this._listen_ref = 0; 116 | this._listening = false; 117 | await this._destroy(); 118 | return true; 119 | } 120 | return false; 121 | } 122 | } -------------------------------------------------------------------------------- /src/lock/index.ts: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import RedisLockProvider from './redis-lock-provider.js'; 3 | import MemoryLockProvider from './memory-lock-provider.js'; 4 | import { Logger } from '../logger.js'; 5 | import LockProvider, { ProviderLock } from './lock-provider.js'; 6 | 7 | class Lock { 8 | private _lock: ProviderLock; 9 | private resource: string; 10 | private logger: any; 11 | 12 | constructor(resource: string, lock: ProviderLock, logger: any) { 13 | this._lock = lock; 14 | this.resource = resource; 15 | this.logger = logger; 16 | } 17 | 18 | public locked(): boolean { 19 | return this._lock.active(); 20 | } 21 | 22 | public async unlock(): Promise { 23 | return this._lock.unlock() 24 | .catch((err) => { 25 | this.logger.error({ 26 | message: `failed to unlock resource ${this.resource}: ${err.message}`, 27 | operation: 'unlock', 28 | }); 29 | return false; 30 | }) 31 | .then(() => { 32 | this.logger.isTraceEnabled() && 33 | this.logger.trace({ 34 | operation: 'unlock', 35 | resource: this.resource, 36 | }); 37 | return true; 38 | }); 39 | } 40 | } 41 | export { Lock }; 42 | 43 | export type LockType = "redis" | "mem" | "none"; 44 | export type LockServiceOpts = { 45 | callback: (err?: Error) => void; 46 | redisUrl?: URL; 47 | }; 48 | 49 | class LockService { 50 | private logger: any; 51 | private lockProvider!: LockProvider; 52 | 53 | constructor(type: LockType, opts: LockServiceOpts) { 54 | this.logger = Logger("lock-service"); 55 | 56 | switch (type) { 57 | case 'redis': 58 | this.lockProvider = new RedisLockProvider({ 59 | redisUrl: opts.redisUrl, 60 | callback: (err) => { 61 | typeof opts.callback === 'function' && process.nextTick(() => opts.callback(err)); 62 | } 63 | }); 64 | break; 65 | case 'none': 66 | case 'mem': 67 | this.lockProvider = new MemoryLockProvider(); 68 | typeof opts.callback === 'function' && process.nextTick(() => opts.callback()); 69 | break; 70 | default: 71 | assert.fail(`Unknown lock ${type}`); 72 | } 73 | } 74 | 75 | async destroy(): Promise { 76 | await this.lockProvider.destroy(); 77 | } 78 | 79 | async lock(resource: string): Promise { 80 | try { 81 | const lock = await this.lockProvider.lock(`lock:${resource}`); 82 | if (lock == null) { 83 | throw new Error(`lock provider returned null lock`); 84 | } 85 | 86 | this.logger.isTraceEnabled() && 87 | this.logger.trace({ 88 | operation: 'lock', 89 | resource, 90 | result: lock != null, 91 | }); 92 | return new Lock(resource, lock, this.logger); 93 | } catch (e: any) { 94 | this.logger.error({ 95 | message: `failed to obtain lock on ${resource}: ${e.message}`, 96 | operation: 'lock', 97 | }); 98 | return false; 99 | } 100 | } 101 | } 102 | 103 | export default LockService; -------------------------------------------------------------------------------- /src/lock/lock-provider.ts: -------------------------------------------------------------------------------- 1 | 2 | export interface ProviderLock { 3 | active: () => boolean; 4 | unlock: () => Promise; 5 | } 6 | 7 | export default abstract class LockProvider { 8 | public abstract lock(resource: string): Promise; 9 | public abstract destroy(): Promise; 10 | } -------------------------------------------------------------------------------- /src/lock/memory-lock-provider.ts: -------------------------------------------------------------------------------- 1 | import Mutex from "../utils/mutex.js"; 2 | import LockProvider, { ProviderLock } from "./lock-provider.js"; 3 | 4 | class MemoryLockProvider implements LockProvider { 5 | private _locks: { [key: string]: Mutex }; 6 | private _abort: AbortController; 7 | 8 | constructor() { 9 | this._locks = {}; 10 | this._abort = new AbortController(); 11 | } 12 | 13 | public async lock(resource: string): Promise { 14 | this._locks[resource] ??= new Mutex(); 15 | const mutex = this._locks[resource]; 16 | 17 | try { 18 | const locked = await mutex.acquire(this._abort.signal); 19 | if (!locked) { 20 | return null; 21 | } 22 | return { 23 | active: () => { return true }, 24 | unlock: async () => { 25 | mutex.release(); 26 | if (!mutex.locked()) { 27 | delete this._locks[resource]; 28 | } 29 | } 30 | } 31 | } catch (e: any) { 32 | delete this._locks[resource]; 33 | return null 34 | } 35 | } 36 | 37 | public async destroy(): Promise { 38 | this._abort.abort(); 39 | this._locks = {}; 40 | } 41 | } 42 | 43 | export default MemoryLockProvider; -------------------------------------------------------------------------------- /src/lock/redis-lock-provider.ts: -------------------------------------------------------------------------------- 1 | import Redis, { RedisClientType } from 'redis'; 2 | import Redlock from 'redlock'; 3 | import { Logger } from '../logger.js'; 4 | import LockProvider, { ProviderLock } from './lock-provider.js'; 5 | 6 | export type RedisLockOpts = { 7 | redisUrl: URL; 8 | callback: (err?: Error) => void; 9 | }; 10 | 11 | export default class RedisLockProvider implements LockProvider { 12 | private _redisClient: RedisClientType; 13 | private redlock!: Redlock; 14 | private logger: any; 15 | private destroyed: boolean = false; 16 | 17 | constructor(opts: RedisLockOpts) { 18 | const redisUrl = opts.redisUrl; 19 | 20 | const redis = this._redisClient = Redis.createClient({ 21 | url: redisUrl.href, 22 | legacyMode: true, 23 | }); 24 | 25 | this.logger = Logger("redis-lock"); 26 | 27 | redis.connect() 28 | .catch((err) => { 29 | this.logger.error({ 30 | operation: 'redis_error', 31 | message: `failed to connect to ${redisUrl}: ${err.message}`, 32 | err, 33 | }); 34 | typeof opts.callback === 'function' && process.nextTick(() => opts.callback(err)); 35 | }).then(() => { 36 | redis.on('error', (err) => { 37 | this.logger.error({ 38 | operation: 'redis_error', 39 | message: err.message, 40 | err 41 | }); 42 | }); 43 | this.redlock = new Redlock([redis], { 44 | retryDelay: 200, 45 | retryCount: 25, 46 | }); 47 | 48 | this.redlock.on("clientError", (err: Error) => { 49 | this.logger.debug({ 50 | operation: 'redlock', 51 | message: `redis redlock error: ${err.message}`, 52 | stack: err.stack, 53 | }); 54 | }); 55 | 56 | typeof opts.callback === 'function' && process.nextTick(() => opts.callback()); 57 | }); 58 | } 59 | 60 | async destroy() { 61 | this.destroyed = true; 62 | await this._redisClient.disconnect() 63 | .catch((err) => { 64 | this.logger.error({ 65 | operation: 'redlock', 66 | message: `failed to disconnect redlock: ${err.message}`, 67 | }); 68 | }); 69 | } 70 | 71 | async lock(resource: string): Promise { 72 | const leaseTime = 1000; 73 | try { 74 | const lock = await this.redlock.acquire([resource], leaseTime) 75 | this.logger.isTraceEnabled() && 76 | this.logger.trace({ 77 | operation: 'redlock', 78 | resource, 79 | leaseTime 80 | }); 81 | return new LockWrapper(this.redlock, lock, resource, leaseTime, this.logger); 82 | } catch (e: any) { 83 | this.logger.error({ 84 | message: `failed to acquire lock on ${resource}: ${e.message}`, 85 | operation: 'redlock', 86 | }); 87 | return null; 88 | } 89 | } 90 | } 91 | 92 | class LockWrapper implements ProviderLock { 93 | private redlock: Redlock; 94 | private lock: Redlock.Lock; 95 | private resource: string; 96 | private lock_active: boolean; 97 | private logger: any; 98 | private extendTimer: NodeJS.Timeout; 99 | 100 | constructor(redlock: Redlock, lock: Redlock.Lock, resource: string, leaseTime: number, logger: any) { 101 | this.logger = logger; 102 | this.redlock = redlock; 103 | this.lock = lock; 104 | this.resource = resource; 105 | this.lock_active = true; 106 | 107 | const extend = () => { 108 | this.redlock.extend(lock, leaseTime) 109 | .catch((err: Error) => { 110 | this.lock_active = false; 111 | this.logger.debug({ 112 | message: `failed to extend lock on ${this.resource}: ${err.message}`, 113 | operation: 'redlock', 114 | }); 115 | }) 116 | .then(() => { 117 | this.logger.debug({ 118 | message: `lock on ${this.resource} extended`, 119 | operation: 'redlock', 120 | }); 121 | 122 | this.extendTimer = setTimeout(extend, leaseTime/2); 123 | }); 124 | }; 125 | 126 | this.extendTimer = setTimeout(extend, leaseTime/2); 127 | } 128 | 129 | public async unlock(): Promise { 130 | this.lock_active = false; 131 | clearTimeout(this.extendTimer) 132 | return this.lock.unlock() 133 | .catch((err: Error) => { 134 | this.logger.debug({ 135 | message: `redlock unlock failed on resource ${this.resource}: ${err.message}`, 136 | operation: 'redlock', 137 | }); 138 | }); 139 | } 140 | 141 | public active(): boolean { 142 | return this.lock_active; 143 | } 144 | 145 | }; -------------------------------------------------------------------------------- /src/logger.js: -------------------------------------------------------------------------------- 1 | import Log4js from 'log4js'; 2 | import os from 'os'; 3 | import Config from './config.js'; 4 | 5 | Log4js.addLayout('json', function() { 6 | return function(logEvent) { 7 | const data = typeof logEvent.data[0] == 'string' ? { 8 | message: logEvent.data[0] 9 | } : logEvent.data[0]; 10 | const logEntry = { 11 | timestamp: logEvent.startTime, 12 | data, 13 | ...logEvent.context, 14 | level: logEvent.level.levelStr, 15 | logger: logEvent.categoryName, 16 | pid: logEvent.pid, 17 | } 18 | return JSON.stringify(logEntry, undefined, 0); 19 | } 20 | }); 21 | 22 | const nullAppender = { 23 | configure: (config, layouts, findAppender, levels) => { 24 | return () => {} 25 | }, 26 | }; 27 | 28 | class LoggerFactory { 29 | constructor(namespace) { 30 | const logger = this._logger = Log4js.getLogger(namespace); 31 | const config = new Config(); 32 | 33 | const appender = process.env.EXPOSR_EMBEDDED ? 'null' : 'out'; 34 | Log4js.configure({ 35 | appenders: { 36 | out: { type: 'stdout', layout: { type: config?.get('log-format') || 'json', separator: ',' } }, 37 | null: { type: nullAppender } 38 | }, 39 | categories: { 40 | default: { appenders: [appender], level: config?.get("log-level") || 'info' } 41 | } 42 | }); 43 | 44 | logger.level = config.get("log-level"); 45 | logger.addContext('host', os.hostname()); 46 | 47 | logger.withContext = (key, value) => { 48 | logger.addContext(key, value); 49 | 50 | const logfn = (orig, fn, ...args) => { 51 | logger[fn] = orig; 52 | logger[fn](...args) 53 | logger.removeContext(key, value); 54 | }; 55 | 56 | ['trace', 'debug', 'info', 'warn', 'error', 'fatal'].forEach(fn => { 57 | const orig = logger[fn]; 58 | logger[fn] = (...args) => { return logfn(orig, fn, ...args); } 59 | }); 60 | 61 | return logger; 62 | } 63 | 64 | return logger; 65 | } 66 | } 67 | 68 | export function Logger(ns) { return new LoggerFactory(ns); }; -------------------------------------------------------------------------------- /src/self-test.js: -------------------------------------------------------------------------------- 1 | import dns from 'dns'; 2 | import net from 'net'; 3 | import fs from 'fs'; 4 | import sqlite3 from 'better-sqlite3'; 5 | 6 | async function selfTest() { 7 | const tests = [ 8 | { name: 'DNS lookup', testFn: testDNSLookup }, 9 | { name: 'HTTPS connection', testFn: testHTTPSConnection }, 10 | { name: 'SQLite3 module', testFn: testSQLiteModule } 11 | ]; 12 | 13 | let allTestsPassed = true; 14 | 15 | for (const test of tests) { 16 | const testResult = await test.testFn(); 17 | console.log(`${test.name} test: ${testResult ? 'PASS' : 'FAIL'}`); 18 | allTestsPassed = allTestsPassed && testResult; 19 | } 20 | 21 | return allTestsPassed; 22 | } 23 | 24 | function testDNSLookup() { 25 | return new Promise((resolve) => { 26 | dns.lookup('example.com', (error) => { 27 | if (error) { 28 | console.log('DNS error', error); 29 | } 30 | resolve(!error); 31 | }); 32 | }); 33 | } 34 | 35 | function testHTTPSConnection() { 36 | return new Promise((resolve) => { 37 | const socket = net.connect(443, 'cloudflare.com', () => { 38 | socket.end(); 39 | resolve(true); 40 | }); 41 | 42 | socket.on('error', (error) => { 43 | console.log('HTTPS error', error); 44 | resolve(false); 45 | }); 46 | }); 47 | } 48 | 49 | function testSQLiteModule() { 50 | const databaseName = 'self-test.db'; 51 | 52 | let result; 53 | try { 54 | const db = new sqlite3(databaseName); 55 | db.close(); 56 | result = true; 57 | } catch (error) { 58 | console.log('SQlite error:', error); 59 | result = false; 60 | } finally { 61 | fs.rmSync(databaseName); 62 | } 63 | return result; 64 | } 65 | 66 | export default selfTest; -------------------------------------------------------------------------------- /src/storage/memory-storage-provider.ts: -------------------------------------------------------------------------------- 1 | import { Logger } from '../logger.js'; 2 | import StorageProvider, { AtomicValue, StorageProviderListResult, StorageErrorNotFound, StorageProviderError, StorageProviderOpts } from './storage-provider.js'; 3 | import LockService from '../lock/index.js'; 4 | 5 | export type MemoryStorageProviderOpts = {}; 6 | type _MemoryStorageProviderOpts = StorageProviderOpts & MemoryStorageProviderOpts; 7 | 8 | class MemoryStorageProvider extends StorageProvider { 9 | private logger: any; 10 | private _lockService!: LockService; 11 | private db: { [key: string ]: any }; 12 | private timers: { [key: string ]: NodeJS.Timeout }; 13 | 14 | constructor(opts: _MemoryStorageProviderOpts) { 15 | super(); 16 | this.logger = Logger("memory-storage"); 17 | this.db = {}; 18 | this.timers = {}; 19 | 20 | new Promise((resolve: (lockService: LockService) => void, reject) => { 21 | const lock = new LockService("mem", { 22 | callback: (err?: Error) => { err ? reject(err) : resolve(lock) }, 23 | }); 24 | }).catch((err) => { 25 | typeof opts.callback === 'function' && process.nextTick(() => { opts.callback(err) }); 26 | }).then((lock) => { 27 | this._lockService = lock; 28 | typeof opts.callback === 'function' && process.nextTick(() => { opts.callback() }); 29 | }); 30 | } 31 | 32 | private updateTTL(ns: string, key: string, ttl?: number) { 33 | const compound_key = this.compound_key(ns, key); 34 | clearTimeout(this.timers[compound_key]); 35 | if (typeof ttl != 'number') { 36 | return; 37 | } 38 | this.timers[compound_key] = setTimeout(() => { 39 | delete this.db[ns][key] 40 | }, ttl * 1000); 41 | } 42 | 43 | protected async _destroy(): Promise { 44 | await this._lockService.destroy(); 45 | } 46 | 47 | protected async _init(ns: string): Promise { 48 | this.db[ns] = {}; 49 | } 50 | 51 | public async set(ns: string, key: string, value: string, ttl?: number): Promise { 52 | if (this.db[ns] === undefined) { 53 | throw new StorageProviderError(ns, key, new Error('namespace_not_found')); 54 | } 55 | 56 | if (this.db[ns][key]) { 57 | return false; 58 | } 59 | this.db[ns][key] = value; 60 | this.updateTTL(ns, key, ttl); 61 | return true; 62 | } 63 | 64 | public async put(ns: string, key: string, value: string, ttl?: number): Promise { 65 | if (this.db[ns] === undefined) { 66 | throw new StorageProviderError(ns, key, new Error('namespace_not_found')); 67 | } 68 | 69 | this.db[ns][key] = value; 70 | this.updateTTL(ns, key, ttl); 71 | return true; 72 | } 73 | 74 | public async get(ns: string, key: string): Promise { 75 | if (this.db[ns] === undefined) { 76 | throw new StorageProviderError(ns, key, new Error('namespace_not_found')); 77 | } 78 | 79 | if (this.db[ns][key] === undefined) { 80 | throw new StorageErrorNotFound(ns, key); 81 | } 82 | return this.db[ns][key]; 83 | } 84 | 85 | public async get_multi(ns: string, keys: Array): Promise> { 86 | if (this.db[ns] === undefined) { 87 | throw new StorageProviderError(ns, new Error('namespace_not_found')); 88 | } 89 | 90 | return keys.map((k) => { return this.db[ns][k] ?? null; }); 91 | } 92 | 93 | public async get_and_set(ns: string, key: string): Promise { 94 | if (this.db[ns] === undefined) { 95 | throw new StorageProviderError(ns, key, new Error('namespace_not_found')); 96 | } 97 | 98 | const lock = await this._lockService.lock(key) 99 | if (!lock) { 100 | throw new StorageProviderError(ns, key, new Error('failed_to_lock_key')); 101 | } 102 | 103 | const value = this.db[ns][key] ?? null; 104 | if (value === null) { 105 | lock.unlock(); 106 | throw new StorageErrorNotFound(ns, key); 107 | } 108 | return { 109 | value, 110 | release: async (newValue?: string, newTTL?: number) => { 111 | if (newValue !== undefined) { 112 | this.db[ns][key] = newValue; 113 | } 114 | this.updateTTL(ns, key, newTTL); 115 | lock.unlock(); 116 | return true; 117 | } 118 | } 119 | } 120 | 121 | public async delete(ns: string, key: string): Promise { 122 | if (this.db[ns] === undefined) { 123 | throw new StorageProviderError(ns, key, new Error('namespace_not_found')); 124 | } 125 | if (this.db[ns][key] === undefined) { 126 | throw new StorageErrorNotFound(ns, key); 127 | } 128 | delete this.db[ns][key]; 129 | return true; 130 | } 131 | 132 | public async list(ns: string, cursor: string | undefined, count: number): Promise { 133 | if (this.db[ns] === undefined) { 134 | throw new StorageProviderError(ns, new Error('namespace_not_found')); 135 | } 136 | 137 | const cursor_num = Number(cursor ?? 0); 138 | const keys = Object.keys(this.db[ns]); 139 | const data = keys.slice(cursor_num, cursor_num + count); 140 | return { 141 | keys: data, 142 | cursor: data.length > 0 ? String(cursor_num + data.length) : null, 143 | } 144 | } 145 | } 146 | 147 | export default MemoryStorageProvider; -------------------------------------------------------------------------------- /src/storage/serializer.ts: -------------------------------------------------------------------------------- 1 | export interface Serializable {} 2 | 3 | export default class Serializer { 4 | 5 | static serialize(object: Serializable): string { 6 | return JSON.stringify(object, (key, value) => { 7 | if (key[0] == '_') { 8 | return undefined; 9 | } 10 | return value; 11 | }); 12 | } 13 | 14 | static deserialize(json: string | object, type: { new(): Type ;} ): Type { 15 | const obj = typeof json == 'object' ? json : JSON.parse(json) || {}; 16 | 17 | const merge = (target: any, source: any): Type => { 18 | for (const key of Object.keys(target)) { 19 | if (target[key] instanceof Array && source[key] instanceof Array) { 20 | target[key] = source[key]; 21 | } else if (target[key] instanceof Object && source[key] instanceof Object) { 22 | Object.assign(target[key], merge(target[key], source[key])); 23 | } else if (source[key] != undefined) { 24 | target[key] = source[key]; 25 | } 26 | } 27 | 28 | return target; 29 | } 30 | 31 | const canonicalObj = Object.assign(new type(), { 32 | ...merge(new type(), obj as Type) 33 | }) as Type; 34 | 35 | return canonicalObj; 36 | } 37 | 38 | } -------------------------------------------------------------------------------- /src/storage/storage-manager.ts: -------------------------------------------------------------------------------- 1 | import StorageProvider from "./storage-provider.js"; 2 | 3 | import MemoryStorageProvider from './memory-storage-provider.js'; 4 | import RedisStorageProvider from './redis-storage-provider.js'; 5 | import SqliteStorageProvider from './sqlite-storage-provider.js'; 6 | import PgsqlStorageProvider, { PgsqlStorageProviderOpts } from './pgsql-storage-provider.js'; 7 | import assert from "assert"; 8 | 9 | export type StorageManagerOpts = { 10 | pgsql?: PgsqlStorageProviderOpts; 11 | } 12 | 13 | export default class StorageManager { 14 | 15 | private static _storage: StorageProvider; 16 | 17 | public static async init(url: URL, opts?: StorageManagerOpts): Promise { 18 | 19 | try { 20 | await new Promise((resolve, reject) => { 21 | const callback = (err?: Error) => { 22 | err ? reject(err) : resolve(undefined); 23 | }; 24 | 25 | switch (url.protocol) { 26 | case 'memory:': 27 | this._storage = new MemoryStorageProvider({ 28 | url, 29 | callback, 30 | }); 31 | break; 32 | case 'postgres:': 33 | this._storage = new PgsqlStorageProvider({ 34 | url, 35 | callback, 36 | ...opts?.pgsql, 37 | }); 38 | break; 39 | case 'redis:': 40 | this._storage = new RedisStorageProvider({ 41 | url, 42 | callback, 43 | }); 44 | break; 45 | case 'sqlite:': 46 | this._storage = new SqliteStorageProvider({ 47 | url, 48 | callback, 49 | }); 50 | break; 51 | default: 52 | assert.fail(`Unknown storage ${url.protocol}`); 53 | } 54 | }); 55 | } catch (e: any) { 56 | throw e; 57 | } 58 | } 59 | 60 | public static async close(): Promise { 61 | this._storage.destroy(); 62 | } 63 | 64 | public static getStorage(): StorageProvider { 65 | return this._storage; 66 | } 67 | } -------------------------------------------------------------------------------- /src/storage/storage-provider.ts: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | 3 | export type AtomicValue = { 4 | value: string | object | null, 5 | release: (newValue?: string, newTTL?: number) => Promise, 6 | } 7 | 8 | export type StorageProviderListResult = { 9 | keys: Array, 10 | cursor: string | null, 11 | } 12 | 13 | export type StorageProviderOpts = { 14 | url: URL, 15 | callback: (err?: Error) => void; 16 | } 17 | 18 | export default abstract class StorageProvider { 19 | 20 | public async init(ns: string): Promise { 21 | await this._init(ns); 22 | } 23 | 24 | public async destroy(): Promise { 25 | await this._destroy(); 26 | } 27 | 28 | protected compound_key(ns: string, key: string): string; 29 | protected compound_key(ns: string, key: Array): Array; 30 | protected compound_key(ns: string, key: string | Array): string | Array { 31 | assert(key !== undefined); 32 | assert(ns !== undefined); 33 | if (key instanceof Array) { 34 | return key.map((k) => `${ns}:${k}`); 35 | } else { 36 | return `${ns}:${key}`; 37 | } 38 | } 39 | 40 | protected key_only(ns: string, key: string): string { 41 | return key.slice(key.indexOf(ns) + ns.length + 1); 42 | } 43 | 44 | protected abstract _destroy(): Promise; 45 | protected abstract _init(ns: string): Promise; 46 | 47 | /** 48 | * Set a key, if not already exists 49 | * 50 | * @param ns Namespace 51 | * @param key Key to set 52 | * @param value Serialized json value to set 53 | * @param ttl Time to live in seconds, key will be deleted after this time 54 | * @throws {StorageErrorAlreadyExists} If key already exists 55 | * @throws {StorageProviderError} If failure of the underlying storage provider 56 | * @returns {Promise} Returns true if set successfully, false if key already exists 57 | */ 58 | public abstract set(ns: string, key: string, value: string, ttl?: number): Promise; 59 | 60 | /** 61 | * Put a key, will overwrite existing value if already exists 62 | * 63 | * @param ns Namespace 64 | * @param key Key to set 65 | * @param value Serialized json value to set 66 | * @param ttl Time to live in seconds, key will be deleted after this time 67 | * @throws {StorageProviderError} If failure of the underlying storage provider 68 | * @returns {Promise} Returns true if set successfully. 69 | */ 70 | public abstract put(ns: string, key: string, value: string, ttl?: number): Promise; 71 | 72 | /** 73 | * Read the value for a key. 74 | * 75 | * @param ns Namespace 76 | * @param key Key to read 77 | * @throws {StorageErrorNotFound} If not found 78 | * @throws {StorageProviderError} On failure of the underlying storage provider 79 | * @returns {Promise} Returns serialized json, or an deserialized object. 80 | */ 81 | public abstract get(ns: string, key: string): Promise; 82 | 83 | /** 84 | * Read multiple keys at once 85 | * 86 | * @param ns Namespace 87 | * @param keys Keys to read 88 | * @throws {StorageProviderError} If failure of the underlying storage provider 89 | * @returns {Promise} Returns array of serialized json, or deserialized objects. 90 | */ 91 | public abstract get_multi(ns: string, keys: Array): Promise>; 92 | 93 | /** 94 | * Atomically get and set a key. 95 | * 96 | * Will take an exclusive lock on the key, return the value as an AtomicValue. 97 | * When done, the key must be released, with an optional new value. 98 | * 99 | * @param ns Namespace 100 | * @param key Key to get and set 101 | * @throws {StorageErrorNotFound} If not found 102 | * @throws {StorageProviderError} On failure of the underlying storage provider 103 | * @returns {Promise} Returns an atomic value. 104 | */ 105 | public abstract get_and_set(ns: string, key: string): Promise; 106 | 107 | /** 108 | * Delete a key 109 | * 110 | * @param ns Namespace 111 | * @param key Key to delete 112 | * @throws {StorageErrorNotFound} If not found 113 | * @throws {StorageProviderError} On failure of the underlying storage provider 114 | * @returns {Promise} Returns true if deleted successfully. 115 | */ 116 | public abstract delete(ns: string, key: string): Promise; 117 | 118 | /** 119 | * List keys in a namespace 120 | * 121 | * @param ns Namespace 122 | * @param cursor Cursor to continue from, or undefined to start from first key 123 | * @param count Number of results to return 124 | * @throws {StorageProviderError} On failure of the underlying storage provider 125 | * @returns {Promise} Returns a list of keys. 126 | */ 127 | public abstract list(ns: string, cursor: string | undefined, count: number): Promise; 128 | } 129 | 130 | export class StorageErrorNotFound implements Error { 131 | public readonly name: string = "storage_error_not_found"; 132 | public message: string; 133 | public stack?: string; 134 | public ns: string; 135 | public key: string; 136 | 137 | constructor(ns: string, key: string) { 138 | this.ns = ns; 139 | this.key = key; 140 | this.message = `Key ${ns}:${key} was not found`; 141 | this.stack = new Error().stack; 142 | } 143 | } 144 | 145 | export class StorageErrorAlreadyExists implements Error { 146 | public readonly name: string = "storage_error_already_exists"; 147 | public message: string; 148 | public stack?: string; 149 | public ns: string; 150 | public key: string; 151 | 152 | constructor(ns: string, key: string) { 153 | this.ns = ns; 154 | this.key = key; 155 | this.message = `Key ${ns}:${key} already exists`; 156 | this.stack = new Error().stack; 157 | } 158 | } 159 | 160 | export class StorageProviderError implements Error { 161 | public readonly name: string = "storage_provider_error"; 162 | public message: string; 163 | public stack?: string; 164 | public ns: string; 165 | public key?: string; 166 | public inner: Error; 167 | 168 | constructor(ns: string, key: string, inner: Error); 169 | constructor(ns: string, inner: Error); 170 | constructor(ns: string, key?: string | Error, inner?: Error) { 171 | this.ns = ns; 172 | if (key instanceof Error) { 173 | inner = key; 174 | key = undefined 175 | } 176 | assert(inner !== undefined); 177 | this.key = key; 178 | this.inner = inner; 179 | if (key) { 180 | this.message = `Error in storage provider for ${ns}:${key}: ${inner.message}`; 181 | } else { 182 | this.message = `Error in storage provider for namespace ${ns}: ${inner.message}`; 183 | } 184 | this.stack = inner.stack; 185 | } 186 | } -------------------------------------------------------------------------------- /src/transport/cluster/cluster-transport.ts: -------------------------------------------------------------------------------- 1 | import { Duplex } from "stream"; 2 | import tls from "tls"; 3 | import net from "net"; 4 | import Transport, { TransportConnectionOptions, TransportOptions } from "../transport.js"; 5 | import ClusterManager from "../../cluster/cluster-manager.js"; 6 | 7 | export interface ClusterTransportOptions extends TransportOptions { 8 | nodeId: string, 9 | } 10 | 11 | export default class ClusterTransport extends Transport { 12 | private nodeId: string; 13 | constructor(opts: ClusterTransportOptions) { 14 | super(opts); 15 | this.nodeId = opts.nodeId; 16 | } 17 | 18 | public createConnection(opts: TransportConnectionOptions, callback: (err: Error | undefined, sock: Duplex) => void): Duplex { 19 | 20 | const clusterNode = ClusterManager.getNode(this.nodeId); 21 | if (!clusterNode) { 22 | const sock = new net.Socket(); 23 | sock.destroy(new Error('node_does_not_exist')); 24 | return sock; 25 | } 26 | 27 | let sock: tls.TLSSocket | net.Socket; 28 | 29 | const errorHandler = (err: Error) => { 30 | callback(err, sock); 31 | }; 32 | 33 | if (opts.tls?.enabled == true) { 34 | const tlsConnectOpts: tls.ConnectionOptions = { 35 | servername: opts.tls.servername, 36 | host: clusterNode.ip, 37 | port: opts.port || 0, 38 | ca: [ 39 | opts.tls?.cert?.toString(), 40 | ...tls.rootCertificates, 41 | ], 42 | }; 43 | sock = tls.connect(tlsConnectOpts, () => { 44 | sock.off('error', errorHandler); 45 | callback(undefined, sock); 46 | }); 47 | sock.once('error', errorHandler); 48 | } else { 49 | const socketConnectOpts: net.TcpSocketConnectOpts = { 50 | host: clusterNode.ip, 51 | port: opts.port || 0, 52 | }; 53 | sock = net.connect(socketConnectOpts, () => { 54 | sock.off('error', errorHandler); 55 | callback(undefined, sock); 56 | }); 57 | sock.once('error', errorHandler); 58 | } 59 | 60 | return sock; 61 | } 62 | 63 | protected async _destroy(): Promise { 64 | } 65 | } -------------------------------------------------------------------------------- /src/transport/ssh/index.ts: -------------------------------------------------------------------------------- 1 | import SSHTransport from "./ssh-transport.js"; 2 | import SSHEndpoint from "./ssh-endpoint.js"; 3 | 4 | export { SSHTransport, SSHEndpoint }; -------------------------------------------------------------------------------- /src/transport/transport-endpoint.ts: -------------------------------------------------------------------------------- 1 | import { URL } from "url"; 2 | import Tunnel from "../tunnel/tunnel.js"; 3 | 4 | export type TransportEndpointOptions = { 5 | max_connections: number, 6 | } 7 | 8 | export interface EndpointResult { 9 | url: string, 10 | } 11 | 12 | export default abstract class TransportEndpoint { 13 | public destroyed: boolean = false; 14 | protected max_connections: number; 15 | 16 | constructor(opts: TransportEndpointOptions) { 17 | this.max_connections = opts.max_connections 18 | } 19 | 20 | public abstract getEndpoint(tunnel: Tunnel, baseUrl: URL): EndpointResult; 21 | 22 | protected abstract _destroy(): Promise; 23 | 24 | public async destroy(): Promise { 25 | if (this.destroyed) { 26 | return; 27 | } 28 | await this._destroy(); 29 | this.destroyed = true; 30 | } 31 | } -------------------------------------------------------------------------------- /src/transport/transport-service.ts: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import { WebSocketEndpoint } from "./ws/index.js" 3 | import { SSHEndpoint } from "./ssh/index.js"; 4 | import Tunnel from '../tunnel/tunnel.js'; 5 | import { EndpointResult } from './transport-endpoint.js'; 6 | import { WebSocketEndpointOptions } from './ws/ws-endpoint.js'; 7 | import { SSHEndpointOptions, SSHEndpointResult } from './ssh/ssh-endpoint.js'; 8 | 9 | type TransportServiceOptions = { 10 | callback?: (err?: Error | undefined) => void, 11 | max_connections: number | undefined, 12 | ws: WebSocketEndpointOptions, 13 | ssh: SSHEndpointOptions, 14 | } 15 | 16 | export type TunnelTransports = { 17 | max_connections: number, 18 | ws: ({ 19 | enabled: boolean, 20 | } & EndpointResult) | undefined, 21 | ssh: { 22 | enabled: boolean, 23 | } & SSHEndpointResult | undefined, 24 | } 25 | 26 | class TransportService { 27 | static instance: TransportService | undefined; 28 | static ref: number; 29 | 30 | private max_connections!: number; 31 | private transports!: { 32 | ws: WebSocketEndpoint | undefined, 33 | ssh: SSHEndpoint | undefined, 34 | }; 35 | 36 | constructor(opts?: TransportServiceOptions) { 37 | if (TransportService.instance instanceof TransportService) { 38 | TransportService.ref++; 39 | return TransportService.instance 40 | } 41 | TransportService.ref = 1; 42 | TransportService.instance = this; 43 | 44 | assert(opts != undefined, "opts is undefined"); 45 | 46 | this.transports = { 47 | ws: undefined, 48 | ssh: undefined, 49 | }; 50 | this.max_connections = opts.max_connections || 1; 51 | 52 | const ready = []; 53 | if (opts.ws && opts.ws.enabled === true) { 54 | const promise = new Promise((resolve, reject) => { 55 | this.transports.ws = new WebSocketEndpoint({ 56 | ...opts.ws, 57 | max_connections: this.max_connections, 58 | callback: (err?: Error) => err ? reject(err) : resolve(undefined), 59 | }); 60 | }); 61 | ready.push(promise); 62 | } 63 | 64 | if (opts?.ssh?.enabled === true) { 65 | const promise = new Promise((resolve, reject) => { 66 | this.transports.ssh = new SSHEndpoint({ 67 | ...opts.ssh, 68 | max_connections: this.max_connections, 69 | callback: (err?: Error) => err ? reject(err) : resolve(undefined), 70 | }); 71 | }); 72 | ready.push(promise); 73 | } 74 | 75 | Promise.all(ready) 76 | .then(() => { 77 | typeof opts.callback === 'function' && opts.callback(); 78 | }) 79 | .catch((err) => { 80 | typeof opts.callback === 'function' && opts.callback(err); 81 | }); 82 | } 83 | 84 | public async destroy(): Promise { 85 | if (--TransportService.ref == 0) { 86 | TransportService.instance = undefined; 87 | await Promise.allSettled([ 88 | this.transports.ws?.destroy(), 89 | this.transports.ssh?.destroy() 90 | ]); 91 | } 92 | } 93 | 94 | public getTransports(tunnel: Tunnel, baseUrl: string): TunnelTransports; 95 | public getTransports(tunnel: Tunnel, baseUrl: URL | undefined): TunnelTransports; 96 | public getTransports(tunnel: Tunnel, baseUrl: any): TunnelTransports { 97 | let _baseUrl: URL | undefined; 98 | 99 | const transports: TunnelTransports = { 100 | max_connections: this.max_connections, 101 | ws: undefined, 102 | ssh: undefined, 103 | }; 104 | 105 | if (typeof baseUrl == "string") { 106 | try { 107 | _baseUrl = new URL(baseUrl); 108 | } catch (e: any) { 109 | _baseUrl = undefined; 110 | } 111 | } else { 112 | _baseUrl = baseUrl; 113 | } 114 | 115 | if (_baseUrl == undefined) { 116 | return transports; 117 | } 118 | 119 | if (this.transports.ws instanceof WebSocketEndpoint) { 120 | transports.ws = { 121 | enabled: tunnel.config.transport?.ws?.enabled || false, 122 | ...this.transports.ws.getEndpoint(tunnel, _baseUrl), 123 | } 124 | } 125 | 126 | if (this.transports.ssh instanceof SSHEndpoint) { 127 | transports.ssh = { 128 | enabled: tunnel.config.transport?.ssh?.enabled || false, 129 | ...this.transports.ssh.getEndpoint(tunnel, _baseUrl), 130 | } 131 | } 132 | 133 | return transports; 134 | } 135 | 136 | } 137 | 138 | export default TransportService; 139 | -------------------------------------------------------------------------------- /src/transport/transport.ts: -------------------------------------------------------------------------------- 1 | import crypto from 'crypto'; 2 | import { EventEmitter } from 'events'; 3 | import { Duplex } from 'stream'; 4 | 5 | type TransportConnectTlsOptions = { 6 | enabled: boolean, 7 | servername?: string, 8 | cert?: Buffer, 9 | }; 10 | 11 | export type TransportConnectionOptions = { 12 | remoteAddr: string, 13 | tunnelId?: string, 14 | port?: number, 15 | tls?: TransportConnectTlsOptions, 16 | }; 17 | 18 | export interface TransportOptions { 19 | max_connections?: number 20 | } 21 | 22 | export default abstract class Transport extends EventEmitter { 23 | public readonly max_connections: number; 24 | public destroyed: boolean = false; 25 | public readonly id: string; 26 | 27 | constructor(opts: TransportOptions) { 28 | super(); 29 | this.max_connections = opts.max_connections || 1; 30 | this.id = crypto.randomUUID(); 31 | } 32 | 33 | public abstract createConnection(opts: TransportConnectionOptions, callback: (err: Error | undefined, sock: Duplex) => void): Duplex; 34 | 35 | protected abstract _destroy(): Promise; 36 | 37 | public async destroy(err?: Error): Promise { 38 | this.destroyed = true; 39 | this._destroy(); 40 | this.emit('close', err); 41 | this.removeAllListeners(); 42 | } 43 | 44 | } -------------------------------------------------------------------------------- /src/transport/ws/index.ts: -------------------------------------------------------------------------------- 1 | import WebSocketTransport from "./ws-transport.js"; 2 | import WebSocketEndpoint from "./ws-endpoint.js"; 3 | 4 | export { WebSocketTransport, WebSocketEndpoint }; -------------------------------------------------------------------------------- /src/transport/ws/ws-transport.ts: -------------------------------------------------------------------------------- 1 | import WebSocket from 'ws'; 2 | import Transport, { TransportOptions } from '../transport.js'; 3 | import { WebSocketMultiplex } from '@exposr/ws-multiplex'; 4 | import { Duplex } from 'stream'; 5 | 6 | export type WebSocketTransportOptions = TransportOptions & { 7 | tunnelId: string, 8 | socket: WebSocket, 9 | }; 10 | 11 | export default class WebSocketTransport extends Transport { 12 | private wsm: WebSocketMultiplex; 13 | 14 | constructor(options: WebSocketTransportOptions) { 15 | super({ 16 | max_connections: options.max_connections 17 | }); 18 | 19 | this.wsm = new WebSocketMultiplex(options.socket, { 20 | reference: options.tunnelId 21 | }); 22 | 23 | this.wsm.once('error', (err: Error) => { 24 | this.destroy(err); 25 | }); 26 | 27 | this.wsm.once('close', () => { 28 | this.destroy(); 29 | }); 30 | } 31 | 32 | public createConnection(opts: any = {}, callback: (err: Error | undefined, sock: Duplex) => void): Duplex { 33 | return this.wsm.createConnection({}, callback); 34 | } 35 | 36 | protected async _destroy(): Promise { 37 | this.wsm.removeAllListeners(); 38 | await this.wsm.destroy(); 39 | } 40 | } -------------------------------------------------------------------------------- /src/tunnel/altname-service.ts: -------------------------------------------------------------------------------- 1 | import dns from 'dns'; 2 | import Storage from '../storage/storage.js'; 3 | import { Logger } from '../logger.js'; 4 | import { Serializable } from '../storage/serializer.js'; 5 | 6 | class AltName implements Serializable { 7 | public tunnelId?: string; 8 | public created_at?: string; 9 | 10 | constructor(tunnelId?: string, created_at?: string) { 11 | this.tunnelId = tunnelId; 12 | this.created_at = created_at; 13 | } 14 | } 15 | 16 | class AltNameService { 17 | private db: Storage; 18 | private logger: any; 19 | 20 | constructor() { 21 | this.db = new Storage("ingress-altnames"); 22 | this.logger = Logger("alt-name-service"); 23 | } 24 | 25 | public async destroy(): Promise { 26 | await this.db.destroy(); 27 | } 28 | 29 | private _key(service: string, altName: string): string { 30 | return `${service}-${altName}`.toLowerCase(); 31 | } 32 | 33 | private async _set(service: string, altName: string, tunnelId: string): Promise { 34 | const altNameData: AltName = new AltName(tunnelId, new Date().toISOString()); 35 | const key = this._key(service, altName); 36 | return this.db.set(key, altNameData); 37 | } 38 | 39 | private async _get(service: string, altName: string): Promise { 40 | const res = await this.db.read(this._key(service, altName), AltName); 41 | return res instanceof AltName ? res : undefined; 42 | } 43 | 44 | private async _del(service: string, altName: string, tunnelId?: string): Promise { 45 | const obj = await this._get(service, altName); 46 | if (tunnelId === undefined || obj?.tunnelId === tunnelId) { 47 | return this.db.delete(this._key(service, altName)); 48 | } else { 49 | return false; 50 | } 51 | } 52 | 53 | public async update(service: string, tunnelId: string, add: Array | undefined, remove?: Array): Promise> { 54 | add ??= [] 55 | remove ??= [] 56 | 57 | const adds = add.map((an) => this._set(service, an, tunnelId)); 58 | const dels = remove.map((an) => this._del(service, an, tunnelId)); 59 | await Promise.allSettled([...adds, ...dels]); 60 | 61 | const result = (await Promise.allSettled([...add, ...remove].flatMap(async (an) => { 62 | const obj = await this._get(service, an); 63 | return obj?.tunnelId === tunnelId ? an : []; 64 | }))).flatMap(res => res.status === 'fulfilled' ? res.value : []); 65 | 66 | this.logger.isTraceEnabled() && 67 | this.logger.trace({ 68 | operation: 'update', 69 | service, 70 | tunnelId, 71 | add, 72 | remove, 73 | result, 74 | }); 75 | return result; 76 | } 77 | 78 | public async get(service: string, altName: string): Promise { 79 | const obj = await this._get(service, altName); 80 | return obj?.tunnelId; 81 | } 82 | 83 | static async _resolve(domain: string, altName: string): Promise> { 84 | return new Promise(async (resolve, reject) => { 85 | try { 86 | const res = await dns.promises.resolveCname(altName); 87 | resolve(res.includes(domain) ? [altName]: []); 88 | } catch (e: any) { 89 | reject(); 90 | } 91 | }); 92 | } 93 | 94 | static async resolve(domain: string, altNames: Array): Promise> { 95 | const resolved = await Promise.allSettled(altNames.flatMap((altName) => { 96 | return AltNameService._resolve(domain, altName); 97 | })); 98 | return [...new Set(resolved 99 | .flatMap(res => res.status === 'fulfilled' ? res.value : []))]; 100 | } 101 | 102 | } 103 | 104 | export default AltNameService; -------------------------------------------------------------------------------- /src/tunnel/tunnel-config.ts: -------------------------------------------------------------------------------- 1 | import { Serializable } from "../storage/serializer.js"; 2 | 3 | type TunnelTransportConfig = { 4 | token?: string, 5 | max_connections: number, 6 | ws: TunnelTransportTypeConfig, 7 | ssh: TunnelTransportTypeConfig, 8 | } 9 | 10 | type TunnelTransportTypeConfig = { 11 | enabled: boolean, 12 | } 13 | 14 | export type TunnelIngressConfig = { 15 | http: TunnelHttpIngressConfig, 16 | sni: TunnelIngressTypeConfig, 17 | } 18 | 19 | export type TunnelIngressTypeConfig = { 20 | enabled: boolean, 21 | url: string | undefined, 22 | urls: Array, 23 | } 24 | 25 | export type TunnelHttpIngressConfig = TunnelIngressTypeConfig & { 26 | alt_names: Array, 27 | } 28 | 29 | export type TunnelTargetConfig = { 30 | url: string | undefined 31 | } 32 | 33 | export class TunnelConfig implements Serializable { 34 | public readonly id?: string; 35 | public readonly account?: string; 36 | 37 | public transport: TunnelTransportConfig = { 38 | token: undefined, 39 | max_connections: 1, 40 | ws: { 41 | enabled: false 42 | }, 43 | ssh: { 44 | enabled: false 45 | } 46 | } 47 | 48 | public ingress: TunnelIngressConfig = { 49 | http: { 50 | enabled: false, 51 | url: undefined, 52 | urls: [], 53 | alt_names: [], 54 | }, 55 | sni: { 56 | enabled: false, 57 | url: undefined, 58 | urls: [], 59 | } 60 | } 61 | 62 | public target: TunnelTargetConfig = { 63 | url: undefined 64 | } 65 | 66 | public created_at?: string; 67 | public updated_at?: string; 68 | 69 | constructor(tunnelId?: string, account?: string) { 70 | this.id = tunnelId; 71 | this.account = account; 72 | } 73 | } 74 | 75 | export function cloneTunnelConfig(tunnelConfig: TunnelConfig): TunnelConfig { 76 | const stringify = (object: any) => JSON.stringify(object, (key, value) => { 77 | if (key[0] == '_') { 78 | return undefined; 79 | } 80 | return value; 81 | }); 82 | 83 | return Object.assign(new TunnelConfig(tunnelConfig.id, tunnelConfig.account), JSON.parse(stringify(tunnelConfig))); 84 | } -------------------------------------------------------------------------------- /src/tunnel/tunnel.ts: -------------------------------------------------------------------------------- 1 | import { TunnelConfig } from "./tunnel-config.js"; 2 | import Transport from "../transport/transport.js"; 3 | 4 | export type TunnelConnectionNode = string; 5 | export type TunnelConnectionId = string; 6 | 7 | export type TunnelConnection = { 8 | connection_id: TunnelConnectionId, 9 | node: TunnelConnectionNode, 10 | peer: string, 11 | transport?: Transport, 12 | local: boolean, 13 | connected: boolean, 14 | connected_at?: number, 15 | disconnected_at?: number, 16 | alive_at: number, 17 | } 18 | 19 | export type TunnelState = { 20 | connected: boolean, 21 | connected_at?: number, 22 | disconnected_at?: number, 23 | alive_at?: number, 24 | alive_connections: number, 25 | connections: Array, 26 | } 27 | 28 | export class Tunnel { 29 | public readonly id?: string; 30 | public readonly account?: string; 31 | public config: TunnelConfig; 32 | public readonly state: TunnelState 33 | 34 | constructor(config?: TunnelConfig, state?: TunnelState) { 35 | this.id = config?.id; 36 | this.account = config?.account; 37 | this.config = config || new TunnelConfig(); 38 | this.state = state || { 39 | connected: false, 40 | alive_connections: 0, 41 | connections: [], 42 | }; 43 | } 44 | } 45 | 46 | export default Tunnel; -------------------------------------------------------------------------------- /src/utils/errors.js: -------------------------------------------------------------------------------- 1 | class CustomError extends Error { 2 | constructor(code, message) { 3 | super(); 4 | this.code = `${code}`; 5 | this.errno = code; 6 | this.message = message || this.code; 7 | } 8 | } 9 | 10 | export default CustomError; 11 | 12 | export const ERROR_TUNNEL_NOT_FOUND = 'TUNNEL_NOT_FOUND'; 13 | export const ERROR_TUNNEL_NOT_CONNECTED = 'TUNNEL_NOT_CONNECTED'; 14 | export const ERROR_TUNNEL_ALREADY_CONNECTED = 'TUNNEL_ALREADY_CONNECTED'; 15 | export const ERROR_TUNNEL_HTTP_INGRESS_DISABLED = 'TUNNEL_HTTP_INGRESS_DISABLED'; 16 | export const ERROR_TUNNEL_TRANSPORT_REQUEST_LIMIT = 'TUNNEL_TRANSPORT_REQUEST_LIMIT'; 17 | export const ERROR_TUNNEL_TRANSPORT_CON_TIMEOUT = 'TUNNEL_TRANSPORT_CON_TIMEOUT'; 18 | export const ERROR_TUNNEL_TARGET_CON_REFUSED = 'TUNNEL_TARGET_CON_RESET'; 19 | export const ERROR_TUNNEL_TARGET_CON_FAILED = 'TUNNEL_TARGET_CON_FAILED'; 20 | export const ERROR_TUNNEL_INGRESS_BAD_ALT_NAMES = 'TUNNEL_INGRESS_BAD_ALT_NAMES'; 21 | export const ERROR_HTTP_INGRESS_REQUEST_LOOP = 'HTTP_INGRESS_REQUEST_LOOP'; 22 | export const ERROR_UNKNOWN_ERROR = 'UNKNOWN_ERROR'; 23 | export const ERROR_BAD_INPUT = 'BAD_INPUT'; 24 | export const ERROR_AUTH_NO_ACCESS_TOKEN = 'AUTH_NO_TOKEN'; 25 | export const ERROR_AUTH_PERMISSION_DENIED = 'PERMISSION_DENIED'; 26 | -------------------------------------------------------------------------------- /src/utils/hostname.ts: -------------------------------------------------------------------------------- 1 | import portNumbers from 'port-numbers'; 2 | 3 | export default class Hostname { 4 | static parse(host: string, port?: number | number | undefined): URL | undefined; 5 | static parse(host: string, port?: string | number | undefined): URL | undefined { 6 | let url: URL; 7 | 8 | if (!/:\/\//.test(host)) { 9 | host = `tcp://${host}`; 10 | } 11 | 12 | if (typeof port == 'string') { 13 | port = parseInt(port); 14 | } 15 | 16 | if (port == 0) { 17 | port = undefined; 18 | } 19 | 20 | if (port) { 21 | host += `:${port}`; 22 | port = `${port}`; 23 | } 24 | 25 | try { 26 | url = new URL(host); 27 | } catch (e) { 28 | return undefined; 29 | } 30 | 31 | port = !url.port ? undefined : url.port; 32 | let protocol = url.protocol.slice(0, -1); 33 | 34 | let portInfo = portNumbers.getPort(protocol); 35 | const serviceInfo = portNumbers.getService(parseInt(url?.port || '0')); 36 | if (portInfo == null) { 37 | if (serviceInfo != null) { 38 | protocol = serviceInfo.name; 39 | portInfo = portNumbers.getPort(protocol); 40 | } 41 | } 42 | if (!port && portInfo != null) { 43 | port = `${portInfo.port}`; 44 | } 45 | 46 | try { 47 | return new URL(`${protocol}://${url.hostname}:${port}`); 48 | } catch (e) { 49 | return url; 50 | } 51 | } 52 | 53 | static isTLS(url: URL): boolean { 54 | const tls = [ 55 | 'tcps', 56 | 'tls', 57 | 'https', 58 | 'wss', 59 | ]; 60 | 61 | return tls.includes(url.protocol.slice(0, -1)); 62 | } 63 | 64 | static getPort(url: URL): number { 65 | const mapping: { [k: string]: string } = { 66 | 'ws': 'http', 67 | 'wss': 'https', 68 | }; 69 | 70 | if (url.port != '') { 71 | return parseInt(url.port); 72 | } 73 | 74 | let protocol = url.protocol.slice(0, -1); 75 | protocol = mapping[protocol] ?? protocol; 76 | 77 | const portInfo = portNumbers.getPort(protocol); 78 | return portInfo ? portInfo.port : 0; 79 | } 80 | } -------------------------------------------------------------------------------- /src/utils/http-headers.js: -------------------------------------------------------------------------------- 1 | export const HTTP_HEADER_CONNECTION = 'connection'; 2 | export const HTTP_HEADER_EXPOSR_VIA = 'exposr-via'; 3 | export const HTTP_HEADER_FORWARDED = 'forwarded'; 4 | export const HTTP_HEADER_HOST = 'host'; 5 | export const HTTP_HEADER_X_FORWARDED_FOR = 'x-forwarded-for'; 6 | export const HTTP_HEADER_X_FORWARDED_HOST = 'x-forwarded-host'; 7 | export const HTTP_HEADER_X_FORWARDED_PORT = 'x-forwarded-port'; 8 | export const HTTP_HEADER_X_FORWARDED_PROTO = 'x-forwarded-proto'; 9 | export const HTTP_HEADER_X_REAL_IP = 'x-real-ip'; 10 | export const HTTP_HEADER_X_SCHEME = 'x-scheme'; -------------------------------------------------------------------------------- /src/utils/misc.ts: -------------------------------------------------------------------------------- 1 | import crypto from 'crypto'; 2 | 3 | export function symDifference(a: Array, b: Array): Array { 4 | const as = new Set(a); 5 | const bs = new Set(b); 6 | 7 | return [ 8 | ...a.filter(x => !bs.has(x)), 9 | ...b.filter(x => !as.has(x)) 10 | ]; 11 | } 12 | 13 | export function difference(a: Array, b: Array): Array { 14 | const bs = new Set(b); 15 | return [ 16 | ...a.filter(x => !bs.has(x)), 17 | ] 18 | } 19 | 20 | export function safeEqual(input: string, allowed: string): boolean { 21 | const autoReject = (input.length !== allowed.length); 22 | if (autoReject) { 23 | allowed = input; 24 | } 25 | const isMatch = crypto.timingSafeEqual(Buffer.from(input), Buffer.from(allowed)); 26 | return (!autoReject && isMatch); 27 | } -------------------------------------------------------------------------------- /src/utils/mutex.ts: -------------------------------------------------------------------------------- 1 | class Mutex { 2 | private _locked: boolean; 3 | private _pending: { index: number, acquire: () => void }[]; 4 | private _index: number; 5 | 6 | constructor() { 7 | this._locked = false; 8 | this._pending = []; 9 | this._index = 0; 10 | } 11 | 12 | public async acquire(cancelSignal: AbortSignal): Promise { 13 | 14 | return new Promise((resolve, reject) => { 15 | const index = this._index++; 16 | 17 | if (cancelSignal?.aborted == true) { 18 | return reject(false); 19 | } 20 | 21 | if (!this._locked) { 22 | this._locked = true; 23 | return resolve(true); 24 | } 25 | 26 | const handler = () => { 27 | cancelSignal?.removeEventListener('abort', handler); 28 | if (cancelSignal?.aborted == true) { 29 | this._pending = this._pending.filter((obj) => obj.index != index); 30 | reject(false); 31 | } else { 32 | this._locked = true; 33 | resolve(true); 34 | } 35 | }; 36 | 37 | if (cancelSignal) { 38 | cancelSignal.addEventListener('abort', handler, { once: true }); 39 | } 40 | this._pending.push({ 41 | index, 42 | acquire: () => { 43 | handler(); 44 | } 45 | }); 46 | }); 47 | } 48 | 49 | public release(): void { 50 | if (this._pending.length > 0) { 51 | const pending = this._pending.shift(); 52 | pending?.acquire(); 53 | } else { 54 | this._locked = false; 55 | } 56 | } 57 | 58 | public locked(): boolean { 59 | return this._locked; 60 | } 61 | } 62 | 63 | export default Mutex; -------------------------------------------------------------------------------- /src/version.js: -------------------------------------------------------------------------------- 1 | import child_process from 'child_process'; 2 | import { 3 | BUILD_DATE, 4 | BUILD_GIT_BRANCH, 5 | BUILD_GIT_COMMIT, 6 | BUILD_MACHINE, 7 | BUILD_USER, 8 | BUILD_VERSION, 9 | } from '../build.js'; 10 | 11 | import package_json from '../package.json' assert { type: "json" }; 12 | 13 | class Version { 14 | 15 | static useragent = `exposr-cli/${Version.getVersion().version}`; 16 | 17 | static version = Version.getVersion(); 18 | 19 | static getVersion() { 20 | 21 | const gitVersion = Version.gitVersion(); 22 | const packageVersion = Version.packageVersion(); 23 | const buildVersion = BUILD_VERSION; 24 | 25 | const build = { 26 | branch: BUILD_GIT_BRANCH, 27 | commit: BUILD_GIT_COMMIT, 28 | date: BUILD_DATE, 29 | user: BUILD_USER, 30 | machine: BUILD_MACHINE, 31 | }; 32 | 33 | const version = { 34 | version: buildVersion || gitVersion || packageVersion, 35 | package: packageVersion, 36 | build 37 | } 38 | Version.version = version; 39 | return version; 40 | } 41 | 42 | static gitVersion() { 43 | try { 44 | const obj = child_process.spawnSync("git", ["describe", "--tags", "--always", "--dirty"]); 45 | if (!obj.error && obj.stdout) { 46 | return obj.stdout.toString('utf-8').trim(); 47 | } 48 | } catch (e) {} 49 | return undefined; 50 | } 51 | 52 | static packageVersion() { 53 | return package_json?.version; 54 | } 55 | 56 | } 57 | 58 | export default Version; -------------------------------------------------------------------------------- /test/e2e/e2e-utils.js: -------------------------------------------------------------------------------- 1 | import http from 'http'; 2 | import child_process from 'child_process'; 3 | import ssh from 'ssh2'; 4 | import net from 'net'; 5 | import crypto from 'crypto'; 6 | 7 | export const exposrCliImageTag = "unstable"; 8 | const defaultBaseApi = "http://localhost:8080"; 9 | 10 | export const sshClient = (host, port, username, password, target) => { 11 | const client = new ssh.Client(); 12 | 13 | client.on('error', (err) => { 14 | console.log(err); 15 | }) 16 | 17 | client.on('ready', () => { 18 | client.forwardIn(target.hostname, 0, (err, port) => { 19 | }).on('tcp connection', (info, accept, reject) => { 20 | const targetSock = net.connect(target.port, target.hostname, () => { 21 | const sock = accept(); 22 | targetSock.pipe(sock); 23 | sock.pipe(targetSock); 24 | }).on('error', (err) => { 25 | reject() 26 | }); 27 | }); 28 | }); 29 | 30 | client.connect({ 31 | host, 32 | port: parseInt(port), 33 | username: `${username}:${password}`, 34 | //debug: (str) => { console.log(str) } 35 | }); 36 | 37 | return () => { 38 | client.destroy(); 39 | }; 40 | }; 41 | 42 | export const createEchoServer = async (port = 10000) => { 43 | const server = http.createServer(); 44 | 45 | server.on('request', (request, response) => { 46 | let body = []; 47 | request.on('data', (chunk) => { 48 | body.push(chunk); 49 | }).on('end', () => { 50 | body = Buffer.concat(body).toString(); 51 | response.statusCode = 200; 52 | response.end(body); 53 | }); 54 | }).listen(port); 55 | 56 | return async () => { 57 | server.removeAllListeners('request'); 58 | await new Promise((resolve) => { 59 | server.close(resolve); 60 | }); 61 | }; 62 | }; 63 | 64 | export const createAccount = async (baseApi = defaultBaseApi) => { 65 | try { 66 | const res = await fetch(`${baseApi}/v1/account`, { 67 | method: 'POST' 68 | }); 69 | return res.json(); 70 | } catch (e) { 71 | console.log(e); 72 | } 73 | }; 74 | 75 | export const getAuthToken = async (accountId, baseApi = defaultBaseApi) => { 76 | const res = await fetch(`${baseApi}/v1/account/${accountId}/token`); 77 | const data = await res.json(); 78 | return data.token; 79 | }; 80 | 81 | export const putTunnel = async (authToken, tunnelId, opts = {}, baseApi = defaultBaseApi) => { 82 | const res = await fetch(`${baseApi}/v1/tunnel/${tunnelId}`, { 83 | method: 'PUT', 84 | headers: { 85 | 'Authorization': `Bearer ${authToken}`, 86 | 'Content-Type': 'application/json', 87 | }, 88 | body: JSON.stringify(opts) 89 | }); 90 | return res; 91 | } 92 | 93 | export const getTunnel = async(authToken, tunnelId, baseApi = defaultBaseApi) => { 94 | const res = await fetch(`${baseApi}/v1/tunnel/${tunnelId}`, { 95 | method: 'GET', 96 | headers: { 97 | 'Authorization': `Bearer ${authToken}` 98 | }, 99 | }); 100 | return res; 101 | }; 102 | 103 | export const startExposr = (args, port) => { 104 | const name = crypto.randomBytes(20).toString('hex'); 105 | port ??= 8080; 106 | const obj = child_process.spawn("docker", [ 107 | "run", "--rm", "-t", "--add-host", "host.docker.internal:host-gateway", 108 | "--name", name, 109 | `ghcr.io/exposr/exposr:${exposrCliImageTag}`, 110 | "--non-interactive", 111 | "-s", `http://host.docker.internal:${port}`, 112 | ].concat(args), {detached: true}); 113 | 114 | let buf = ''; 115 | obj.stdout.on('data', (data) => { 116 | data = buf + data.toString('utf-8'); 117 | if (data.indexOf('\n') != -1) { 118 | console.log(`exposr-cli output "${data.slice(0, -1)}"`); 119 | } else { 120 | buf = data; 121 | } 122 | }) 123 | 124 | return () => { 125 | child_process.spawnSync("docker", ["kill", name]); 126 | }; 127 | }; -------------------------------------------------------------------------------- /test/e2e/test_api.js: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import crypto from 'crypto'; 3 | 4 | const baseApi = "http://localhost:8080"; 5 | 6 | describe('API test', () => { 7 | let exposr; 8 | let terminator; 9 | 10 | before(async () => { 11 | process.env.NODE_ENV = "test-e2e"; 12 | exposr = await import('../../src/index.js'); 13 | terminator = await exposr.default([ 14 | "node", 15 | "--admin-enable", 16 | "--admin-api-enable", 17 | "--allow-registration", 18 | "--ingress", "http", 19 | "--ingress-http-url", "http://localhost:8080" 20 | ]); 21 | }); 22 | 23 | after(async () => { 24 | await terminator(undefined, {gracefulTimeout: 1000, drainTimeout: 500}); 25 | }); 26 | 27 | it('Admin interface /ping', async () => { 28 | const res = await fetch("http://localhost:8081/ping"); 29 | assert(res.status == 200, "/ping did not return 200"); 30 | }); 31 | 32 | it('API create account ', async () => { 33 | const res = await fetch(`${baseApi}/v1/account`, { 34 | method: 'POST' 35 | }); 36 | assert(res.status == 201, "/v1/account did not return 201"); 37 | const data = await res.json(); 38 | assert(typeof data.account_id == 'string', "no account returned") 39 | assert(typeof data.account_id_hr == 'string', "no human readable account returned") 40 | }); 41 | 42 | const createAccount = async () => { 43 | const res = await fetch(`${baseApi}/v1/account`, { 44 | method: 'POST' 45 | }); 46 | return res.json(); 47 | }; 48 | 49 | const getAuthToken = async (accountId) => { 50 | const res = await fetch(`${baseApi}/v1/account/${accountId}/token`); 51 | const data = await res.json(); 52 | return data.token; 53 | }; 54 | 55 | const putTunnel = async (authToken, tunnelId, opts = {}) => { 56 | const res = await fetch(`${baseApi}/v1/tunnel/${tunnelId}`, { 57 | method: 'PUT', 58 | headers: { 59 | 'Authorization': `Bearer ${authToken}`, 60 | 'Content-Type': 'application/json', 61 | }, 62 | body: JSON.stringify(opts) 63 | }); 64 | return res; 65 | } 66 | 67 | const getTunnel = async(authToken, tunnelId) => { 68 | const res = await fetch(`${baseApi}/v1/tunnel/${tunnelId}`, { 69 | method: 'GET', 70 | headers: { 71 | 'Authorization': `Bearer ${authToken}` 72 | }, 73 | }); 74 | return res; 75 | }; 76 | 77 | const patchTunnel = async (authToken, tunnelId, opts = {}) => { 78 | const res = await fetch(`${baseApi}/v1/tunnel/${tunnelId}`, { 79 | method: 'PATCH', 80 | headers: { 81 | 'Authorization': `Bearer ${authToken}`, 82 | 'Content-Type': 'application/json', 83 | }, 84 | body: JSON.stringify(opts) 85 | }); 86 | return res; 87 | } 88 | 89 | it('API create tunnel ', async () => { 90 | const account = await createAccount(); 91 | const authToken = await getAuthToken(account.account_id); 92 | 93 | const tunnelId = crypto.randomBytes(20).toString('hex'); 94 | 95 | const res = await putTunnel(authToken, tunnelId); 96 | assert(res.status == 200, "did not get 200 from create tunnel api"); 97 | 98 | const data = await res.json(); 99 | assert(data.id == tunnelId, `tunnel not created, got ${data}`); 100 | }); 101 | 102 | it('API create/update tunnel ', async () => { 103 | const account = await createAccount(); 104 | const authToken = await getAuthToken(account.account_id); 105 | 106 | const tunnelId = crypto.randomBytes(20).toString('hex'); 107 | 108 | let res = await putTunnel(authToken, tunnelId); 109 | assert(res.status == 200, "did not get 200 from create tunnel api"); 110 | let data = await res.json(); 111 | assert(data.id == tunnelId, `tunnel not created, got ${data}`); 112 | 113 | res = await patchTunnel(authToken, tunnelId, { 114 | target: { 115 | url: 'http://example.com' 116 | } 117 | }); 118 | 119 | assert(res.status == 200, `did not get 200 from patch tunnel api, got ${res.status}`); 120 | data = await res.json(); 121 | assert(data?.target?.url == 'http://example.com', `tunnel not updated, got ${data}`); 122 | }); 123 | 124 | it('API create/delete tunnel ', async () => { 125 | const account = await createAccount(); 126 | const authToken = await getAuthToken(account.account_id); 127 | 128 | const tunnelId = crypto.randomBytes(20).toString('hex'); 129 | 130 | let res = await putTunnel(authToken, tunnelId); 131 | assert(res.status == 200, "did not get 200 from create tunnel api"); 132 | let data = await res.json(); 133 | assert(data.id == tunnelId, `tunnel not created, got ${data}`); 134 | 135 | res = await fetch(`${baseApi}/v1/tunnel/${tunnelId}`, { 136 | method: 'DELETE', 137 | headers: { 138 | 'Authorization': `Bearer ${authToken}` 139 | }, 140 | }); 141 | assert(res.status == 204, "did not get 204 from delete tunnel api"); 142 | 143 | res = await getTunnel(authToken, tunnelId); 144 | assert(res.status == 404, "tunnel not deleted"); 145 | }); 146 | 147 | it('API get non-existing tunnel returns 404 ', async () => { 148 | const account = await createAccount(); 149 | const authToken = await getAuthToken(account.account_id); 150 | 151 | const res = await getTunnel(authToken, "non-existing-tunnel"); 152 | assert(res.status == 404, `expected 404, got ${res.status}`); 153 | }); 154 | 155 | it('API existing tunnel with wrong auth returns 401 ', async () => { 156 | const account = await createAccount(); 157 | const authToken = await getAuthToken(account.account_id); 158 | const tunnelId = crypto.randomBytes(20).toString('hex'); 159 | 160 | let res = await putTunnel(authToken, tunnelId); 161 | assert(res.status == 200, "did not get 200 from create tunnel api"); 162 | 163 | res = await getTunnel(authToken, tunnelId); 164 | assert(res.status == 200, `could not read tunnel, got ${res.status}`); 165 | 166 | const account2 = await createAccount(); 167 | const authToken2 = await getAuthToken(account2.account_id); 168 | res = await getTunnel(authToken2, tunnelId); 169 | assert(res.status == 404, `expected 404, got ${res.status}`); 170 | }); 171 | }); -------------------------------------------------------------------------------- /test/e2e/test_ssh.js: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import crypto from 'crypto'; 3 | import http from 'node:http'; 4 | import { setTimeout } from 'timers/promises'; 5 | import { createAccount, createEchoServer, getAuthToken, getTunnel, putTunnel, sshClient } from './e2e-utils.js'; 6 | 7 | const echoServerUrl = "http://localhost:10000"; 8 | 9 | describe('SSH transport E2E', () => { 10 | let exposr; 11 | let terminator; 12 | let echoServerTerminator; 13 | 14 | before(async () => { 15 | process.env.NODE_ENV = "test-e2e"; 16 | exposr = await import('../../src/index.js'); 17 | terminator = await exposr.default([ 18 | "node", 19 | "--admin-enable", 20 | "--allow-registration", 21 | "--transport", "ssh", 22 | "--ingress", "http", 23 | "--ingress-http-url", "http://localhost:8080" 24 | ]); 25 | echoServerTerminator = await createEchoServer(); 26 | }); 27 | 28 | after(async () => { 29 | process.env.NODE_ENV = "test"; 30 | await terminator(undefined, {gracefulTimeout: 1000, drainTimeout: 500}); 31 | await echoServerTerminator() 32 | }); 33 | 34 | it('SSH transport w/ HTTP ingress E2E', async () => { 35 | 36 | const account = await createAccount(); 37 | let authToken = await getAuthToken(account.account_id); 38 | const tunnelId = crypto.randomBytes(20).toString('hex'); 39 | let res = await putTunnel(authToken, tunnelId, { 40 | transport: { 41 | ssh: { 42 | enabled: true 43 | }, 44 | }, 45 | ingress: { 46 | http: { 47 | enabled: true 48 | } 49 | }, 50 | target: { 51 | url: `${echoServerUrl}` 52 | } 53 | }); 54 | 55 | assert(res.status == 200, "could not create tunnel") 56 | 57 | res = await getTunnel(authToken, tunnelId); 58 | let data = await res.json(); 59 | assert(data?.transport?.ssh?.enabled == true, "SSH transport not enabled"); 60 | assert(typeof data?.transport?.ssh?.url == 'string', "No SSH connect URL available"); 61 | 62 | const targetUrl = new URL(data.target.url); 63 | 64 | const terminateClient = sshClient( 65 | data?.transport?.ssh?.host, 66 | data?.transport?.ssh?.port, 67 | data?.transport?.ssh?.username, 68 | data?.transport?.ssh?.password, 69 | targetUrl, 70 | ); 71 | 72 | authToken = await getAuthToken(account.account_id); 73 | do { 74 | await setTimeout(1000); 75 | res = await getTunnel(authToken, tunnelId); 76 | data = await res.json(); 77 | } while (data?.connection?.connected == false); 78 | 79 | assert(data?.connection?.connected == true, "tunnel not connected"); 80 | 81 | const ingressUrl = new URL(data.ingress.http.url); 82 | 83 | let status; 84 | ([status, data] = await new Promise((resolve) => { 85 | const req = http.request({ 86 | hostname: 'localhost', 87 | port: 8080, 88 | method: 'POST', 89 | path: '/', 90 | headers: { 91 | "Host": ingressUrl.hostname 92 | } 93 | }, (res) => { 94 | let data = ''; 95 | 96 | res.on('data', (chunk) => { 97 | data += chunk; 98 | }); 99 | 100 | res.on('close', () => { resolve([res.statusCode, data])}); 101 | }); 102 | req.end('echo'); 103 | })); 104 | 105 | terminateClient(); 106 | 107 | assert(status == 200, `expected status code 200, got ${status}`); 108 | assert(data == "echo", `did not get response from echo server through WS tunnel, got ${data}`); 109 | 110 | }).timeout(60000); 111 | }); -------------------------------------------------------------------------------- /test/e2e/test_ws.js: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import crypto from 'crypto'; 3 | import http from 'node:http'; 4 | import { setTimeout } from 'timers/promises'; 5 | import { createAccount, createEchoServer, getAuthToken, getTunnel, putTunnel, startExposr } from './e2e-utils.js'; 6 | import { PGSQL_URL, REDIS_URL } from '../env.js'; 7 | 8 | const echoServerUrl = "http://host.docker.internal:10000"; 9 | 10 | describe('Websocket E2E', () => { 11 | let exposr; 12 | let terminator; 13 | let echoServerTerminator; 14 | 15 | before(async () => { 16 | process.env.NODE_ENV = "test-e2e"; 17 | exposr = await import('../../src/index.js'); 18 | echoServerTerminator = await createEchoServer(); 19 | }); 20 | 21 | after(async () => { 22 | process.env.NODE_ENV = "test"; 23 | await echoServerTerminator() 24 | }); 25 | 26 | const storageModes = [ 27 | {storage: "In-memory storage", args: []}, 28 | {storage: "Redis storage", args: ["--storage-url", REDIS_URL ]}, 29 | {storage: "SQLite storage", args: ["--storage-url", "sqlite://db.sqlite" ]}, 30 | {storage: "PostgreSQL storage", args: ["--storage-url", PGSQL_URL ]}, 31 | ]; 32 | 33 | storageModes.forEach(({storage, args}) => { 34 | it(`WS transport w/ HTTP ingress E2E w/ ${storage}`, async () => { 35 | terminator = await exposr.default([ 36 | "node", 37 | "--admin-enable", 38 | "--allow-registration", 39 | "--ingress", "http", 40 | "--ingress-http-url", "http://localhost:8080" 41 | ].concat(args)); 42 | 43 | const account = await createAccount(); 44 | let authToken = await getAuthToken(account.account_id); 45 | const tunnelId = crypto.randomBytes(20).toString('hex'); 46 | await putTunnel(authToken, tunnelId); 47 | 48 | const exposrCliTerminator = startExposr([ 49 | "-a", `${account.account_id}`, 50 | "tunnel", "connect", `${tunnelId}`, `${echoServerUrl}` 51 | ]); 52 | 53 | authToken = await getAuthToken(account.account_id); 54 | let res, data; 55 | do { 56 | await setTimeout(1000); 57 | res = await getTunnel(authToken, tunnelId); 58 | data = await res.json(); 59 | } while (data?.connection?.connected == false); 60 | 61 | assert(data?.connection?.connected == true, "tunnel not connected"); 62 | 63 | const ingressUrl = new URL(data.ingress.http.url); 64 | 65 | let status; 66 | ([status, data] = await new Promise((resolve) => { 67 | const req = http.request({ 68 | hostname: 'localhost', 69 | port: 8080, 70 | method: 'POST', 71 | path: '/', 72 | headers: { 73 | "Host": ingressUrl.hostname 74 | } 75 | }, (res) => { 76 | let data = ''; 77 | 78 | res.on('data', (chunk) => { 79 | data += chunk; 80 | }); 81 | 82 | res.on('close', () => { resolve([res.statusCode, data])}); 83 | }); 84 | req.end('echo'); 85 | })); 86 | 87 | exposrCliTerminator(); 88 | await terminator(undefined, {gracefulTimeout: 10000, drainTimeout: 500}); 89 | 90 | assert(status == 200, `expected status code 200, got ${status}`); 91 | assert(data == "echo", `did not get response from echo server through WS tunnel, got ${data}`); 92 | }).timeout(60000); 93 | }); 94 | }); -------------------------------------------------------------------------------- /test/env.js: -------------------------------------------------------------------------------- 1 | export const REDIS_URL = process.env.TEST_REDIS_URL || 'redis://localhost:6379'; 2 | export const PGSQL_URL = process.env.TEST_PGSQL_URL || 'postgres://postgres:password@localhost:5432/exposr'; -------------------------------------------------------------------------------- /test/system/eventbus/test_redis.js: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import Config from '../../../src/config.js'; 3 | import EventBus from '../../../src/cluster/eventbus.js'; 4 | import { REDIS_URL } from '../../env.js'; 5 | import ClusterManager, { ClusterManagerType } from '../../../src/cluster/cluster-manager.js'; 6 | 7 | describe('redis eventbus', () => { 8 | let bus; 9 | let config; 10 | 11 | before(async () => { 12 | config = new Config(); 13 | await ClusterManager.init(ClusterManagerType.REDIS, { 14 | redis: { 15 | redisUrl: REDIS_URL, 16 | } 17 | }); 18 | }); 19 | 20 | after(async () => { 21 | await ClusterManager.close(); 22 | await config.destroy(); 23 | }); 24 | 25 | beforeEach(() => { 26 | bus = new EventBus(); 27 | }); 28 | 29 | afterEach(async () => { 30 | await bus.destroy(); 31 | }); 32 | 33 | it('redis bus pub/sub', async () => { 34 | const recv = new Promise((resolve) => { 35 | bus.once('test', (message) => { 36 | resolve(message) 37 | }); 38 | }); 39 | 40 | await bus.publish('test2', {data: 10}); 41 | await bus.publish('test', {data: 42}); 42 | 43 | let res = await recv; 44 | assert(res.data == 42, `did not get expected message, got ${res.data}`); 45 | }); 46 | 47 | it('redis bus waitfor', async () => { 48 | const recv = bus.waitFor('test', (message) => { 49 | return message.data == 42; 50 | }); 51 | 52 | let res = await bus.publish('test', {data: 42}); 53 | 54 | res = await recv; 55 | assert(res.data == 42); 56 | assert(bus.listenerCount('test') == 0, 'listener still attached'); 57 | }); 58 | 59 | it('redis bus waitfor timeout wrong data', async () => { 60 | const recv = bus.waitFor('test', (message) => { 61 | return message.data == 42; 62 | }, 100); 63 | 64 | let res = await bus.publish('test', {data: 10}); 65 | 66 | try { 67 | await recv; 68 | res = true; 69 | } catch (e) { 70 | res = false; 71 | } 72 | assert(res == false); 73 | assert(bus.listenerCount('test') == 0, 'listener still attached'); 74 | }); 75 | 76 | it('redis bus waitfor timeout no data', async () => { 77 | const recv = bus.waitFor('test', (message) => { 78 | return message.data == 42; 79 | }, 100); 80 | 81 | let res; 82 | try { 83 | await recv; 84 | res = true; 85 | } catch (e) { 86 | res = false; 87 | } 88 | assert(res == false); 89 | assert(bus.listenerCount('test') == 0, 'listener still attached'); 90 | }); 91 | 92 | }); -------------------------------------------------------------------------------- /test/system/lock/test_redis.js: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import Config from '../../../src/config.js'; 3 | import LockService, { Lock } from '../../../src/lock/index.js'; 4 | import { REDIS_URL } from '../../env.js'; 5 | import sinon from 'sinon'; 6 | 7 | describe('redis lock', () => { 8 | const redisUrl = REDIS_URL; 9 | let lockService; 10 | let config; 11 | let clock; 12 | 13 | before(async () => { 14 | clock = sinon.useFakeTimers({shouldAdvanceTime: true}); 15 | config = new Config(); 16 | return new Promise((resolve) => { 17 | lockService = new LockService('redis', { 18 | redisUrl, 19 | callback: (err) => err ? rejects(err) : resolve() 20 | }); 21 | }); 22 | }); 23 | 24 | after(async () => { 25 | await lockService.destroy(); 26 | await config.destroy(); 27 | clock.restore(); 28 | }); 29 | 30 | it('redis lock can be lock and unlocked', async () => { 31 | const lock = await lockService.lock("test"); 32 | assert(lock instanceof Lock, `failed to obtain lock, got ${lock}`); 33 | 34 | const res = await lock.unlock(); 35 | assert(res == true, `failed to release lock, got ${res}`); 36 | }); 37 | 38 | it('redis lock is extended', async () => { 39 | const lock = await lockService.lock("test"); 40 | assert(lock instanceof Lock, `failed to obtain lock, got ${lock}`); 41 | 42 | assert(lock.locked() == true, "lock was not locked"); 43 | await clock.tickAsync(2000); 44 | assert(lock.locked() == true, "lock was not extended"); 45 | 46 | const res = await lock.unlock(); 47 | assert(res == true, `failed to release lock, got ${res}`); 48 | }); 49 | 50 | it('redis lock can be pending', async () => { 51 | const lock = await lockService.lock("test"); 52 | let lock2 = lockService.lock("test"); 53 | 54 | assert(lock.locked() == true, "lock was not locked"); 55 | lock.unlock(); 56 | lock2 = await lock2; 57 | assert(lock2.locked() == true, "second lock was not locked"); 58 | 59 | await lock2.unlock(); 60 | }); 61 | 62 | 63 | }); -------------------------------------------------------------------------------- /test/unit/cluster/test_cluster-transport.ts: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import net from 'net'; 3 | import tls from 'tls'; 4 | import fs from 'fs'; 5 | import sinon from 'sinon'; 6 | import ClusterTransport from '../../../src/transport/cluster/cluster-transport.js'; 7 | import { Duplex } from 'stream'; 8 | import Config from '../../../src/config.js'; 9 | import ClusterManager, { ClusterManagerType, ClusterNode } from '../../../src/cluster/cluster-manager.js'; 10 | 11 | describe('cluster transport', () => { 12 | it('can be created and connected', async () => { 13 | const config = new Config(); 14 | const server = net.createServer((socket: net.Socket) => { 15 | socket.end('success'); 16 | }); 17 | 18 | await new Promise((resolve) => { 19 | server.listen(10000, () => { resolve(undefined); }); 20 | }); 21 | 22 | await ClusterManager.init(ClusterManagerType.MEM); 23 | 24 | sinon.stub(ClusterManager, "getNode").returns({ 25 | id: "some-node-id", 26 | host: "some-node-host", 27 | ip: "127.0.0.1", 28 | last_ts: new Date().getTime(), 29 | stale: false, 30 | }); 31 | 32 | const clusterTransport = new ClusterTransport({ 33 | nodeId: 'some-node-id' 34 | }); 35 | 36 | const sock: Duplex = await new Promise((resolve) => { 37 | const sock = clusterTransport.createConnection({ 38 | port: 10000, 39 | remoteAddr: "127.0.0.2" 40 | }, () => { 41 | resolve(sock); 42 | }); 43 | }); 44 | 45 | const data = await new Promise((resolve) => { 46 | sock.once('data', (chunk: Buffer) => { 47 | resolve(chunk.toString()); 48 | }); 49 | }); 50 | 51 | await ClusterManager.close(); 52 | sock.destroy(); 53 | await new Promise((resolve) => { 54 | server.close(() => { 55 | resolve(undefined); 56 | }); 57 | }); 58 | config.destroy(); 59 | sinon.restore(); 60 | 61 | assert(data == 'success'); 62 | }); 63 | 64 | it('can connect to tls', async () => { 65 | const config = new Config(); 66 | 67 | const key = fs.readFileSync(new URL('../fixtures/cn-private-key.pem', import.meta.url).pathname); 68 | const cert = fs.readFileSync(new URL('../fixtures/cn-public-cert.pem', import.meta.url).pathname); 69 | 70 | const server = tls.createServer({ 71 | key, 72 | cert, 73 | }, (socket: tls.TLSSocket) => { 74 | const servername = (socket).servername; 75 | socket.end(servername); 76 | }); 77 | 78 | await new Promise((resolve) => { 79 | server.listen(11000, () => { resolve(undefined); }); 80 | }); 81 | 82 | await ClusterManager.init(ClusterManagerType.MEM); 83 | 84 | sinon.stub(ClusterManager, "getNode").returns({ 85 | id: "some-node-id", 86 | host: "some-node-host", 87 | ip: "127.0.0.1", 88 | last_ts: new Date().getTime(), 89 | stale: false, 90 | }); 91 | 92 | const clusterTransport = new ClusterTransport({ 93 | nodeId: 'some-node-id' 94 | }); 95 | 96 | const sock: Duplex = await new Promise((resolve) => { 97 | const sock = clusterTransport.createConnection({ 98 | port: 11000, 99 | remoteAddr: "127.0.0.2", 100 | tls: { 101 | enabled: true, 102 | servername: 'test.example.com', 103 | cert: cert, 104 | } 105 | }, () => { 106 | resolve(sock); 107 | }); 108 | }); 109 | 110 | const data = await new Promise((resolve) => { 111 | sock.once('data', (chunk: Buffer) => { 112 | resolve(chunk.toString()); 113 | }); 114 | }); 115 | 116 | await ClusterManager.close(); 117 | sock.destroy(); 118 | await new Promise((resolve) => { 119 | server.close(() => { 120 | resolve(undefined); 121 | }); 122 | }); 123 | config.destroy(); 124 | sinon.restore(); 125 | 126 | assert(data == 'test.example.com'); 127 | }); 128 | }); -------------------------------------------------------------------------------- /test/unit/config/test_config.js: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import Config from '../../../src/config.js'; 3 | 4 | describe('configuration parser', () => { 5 | 6 | it('--redis-url backwards compatibility', () => { 7 | const config = new Config([ 8 | "--redis-url", "redis://redis" 9 | ]); 10 | 11 | assert(config._config['cluster'] == 'redis', `cluster not set to redis, ${config._config['cluster']}`); 12 | assert(config._config['cluster-redis-url'] == 'redis://redis', `cluster url not set, ${config._config['cluster-redis-url']}`); 13 | assert(config._config['storage-url'] == 'redis://redis', `storage url not set, ${config._config['storage-url']}`); 14 | 15 | config.destroy(); 16 | }); 17 | 18 | it('--cluster auto defaults to single-node', () => { 19 | const config = new Config([ 20 | "--cluster", "auto" 21 | ]); 22 | 23 | assert(config._config['cluster'] == 'single-node', `cluster not set to single-node, got ${config._config['cluster']}`); 24 | config.destroy(); 25 | }); 26 | 27 | it('--cluster auto returns udp w/ --storage-url redis', () => { 28 | const config = new Config([ 29 | "--cluster", "auto", 30 | "--storage-url", "redis://redis", 31 | ]); 32 | 33 | assert(config._config['cluster'] == 'udp', `cluster not set to udp, ${config._config['cluster']}`); 34 | config.destroy(); 35 | }); 36 | 37 | it('--cluster redis requires --cluster-redis-url', () => { 38 | const config = new Config([ 39 | "--ingress-http-domain", "http://example.com", 40 | "--cluster", "redis" 41 | ]); 42 | 43 | assert(config._error.message == "Missing required argument: cluster-redis-url", "argument not required"); 44 | 45 | config.destroy(); 46 | }); 47 | }); -------------------------------------------------------------------------------- /test/unit/fixtures/cn-private-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCl0DlVDq03VXUY 3 | UVJ4doOxQAugsA97I3NNdIdF5W3ctdmr19iT8BVoTkKFkuT6AMNO2ChwpeBrv3Ez 4 | VbUt6o/kwfRaFS+EwSn1iovUowGFQSn9rVZDS+vPlJT2a7qau2088c5DPDSoorR8 5 | DSrNTs+nN6jEcq+4VqdyK22JEezaHjPjALGNpEjG3JXETUVzdR7KbowYyYEgEMwP 6 | LuALp5LASCle2YG6hwGn0104uArS9oREI7rXHxz/yHu6/H+ys+IrCYME6Zmo1P0q 7 | N9/sjxXSlDApY+aPnWVesAoSLsm7BTOJjx3RkUnj3xdrHYlHBNXLXNTJoqemETYK 8 | IPGoEKVFAgMBAAECggEAQmtiS9wRLrUgrpkEvBRdNEc/YSKlUOImccRX23vHwj1l 9 | 8BwKOVhWeu2+X9sztaFQmuijalxHxWFuau3OIZJwCWZYBy2tVzQ5Jo5U7NxO453y 10 | wBcLEvH6h7Aw63TDzu99FoiV4wDe5x4lxftQReVn8Meu5uI52VF8yZLz0ZBInCYP 11 | 2W7G1v7qzDqfHI1OlIsW/QgE0cG78ibMwQ/nrTXgJXEe9Yuk5W3xfvUTQ2rjte59 12 | Um9ZAoFYqXmQ0mczGhewwa/hBP8wmamb3XLAjJKBdzweq37MU2eOdNRDdq8Uzyba 13 | xfNRdX7Wb16O2SDhX9x7Z/Cczw4+7UBhAF/WCJ4EEQKBgQDWdAnQPK3vf0G374a6 14 | 27uOriM9d00ZPMN2+rEuKmGsLiUFBOX8HAuc5lNPKAaM3hTTGequthRaaxCzEwtN 15 | G3026Bbu9+aArjuxbBTYAWWHBhRfwEixA1QveL3TCvJz+5WKy/7A+nFf8KOHN6dc 16 | wiCXTWfKDJOrZGdSzrQIG/01VwKBgQDF79r84GlIl93Y4qhl1JkJOYUOzHw7sdCj 17 | mgEFz6p8PuKLEZGXeT33/y76SLmJLrT1dNX55RrgvpbXAr/M90xoKCTU1QkgxZs7 18 | NpBE0H22zXV/b80UrtjQhDVNqY5e5H7kVatIbpWO7SNRjz8md0KbrK/WRyaknRoM 19 | amuQBdCcwwKBgGU6GtO/gL7XC9dd6fPT+qFr01iVMsBh7UYBKZdIDBixI2pOGJ07 20 | Q8QI+HQZyZW+0oee6ScU7WCMvzI4IBKCkkVTolekjppZI/YotWIMrrbgiMd1xRpB 21 | HwDVdozi0vUqYTlF93dyAAgwV5BhFc/0Flw0/nPiIeY6G4d76IEXbRs7AoGAKzm+ 22 | MQsw8tF3eS3GaLaM1R7il+DAIKksTw4KuyImsQtqncQwb0vi5I2tzKqlGfhBPHlP 23 | YUtPG1WDy2CmUjSPUw+xO2lqnOWKiNvZla2SRqGHz1SzDJlCcAbvPQ6SMNblfyfW 24 | R/MeZiATXnRR7iyNN0H3BGx1W+2TCrMS2ljqvdECgYEAhVtLPdlHmif1isSOhboU 25 | G2XqTxrqJrNDevDUw/zD8YK3FJ3eTgi+PEOF6nPOmUlkqoGxnNyDh6kd4NpiONhq 26 | MSErdIuDdS4uxujiWC8a4tEW19DdLnkeM+HJYjStS+Za+Or4AWdPY7MqIGeAxokS 27 | tnSds35e6bbFVWJgeCb/2YM= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /test/unit/fixtures/cn-public-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICrjCCAZYCCQDi9S9kZaP9UDANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA0q 3 | LmV4YW1wbGUuY29tMCAXDTIxMDgwOTEzMzMyNFoYDzIwNzEwNzI4MTMzMzI0WjAY 4 | MRYwFAYDVQQDDA0qLmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A 5 | MIIBCgKCAQEApdA5VQ6tN1V1GFFSeHaDsUALoLAPeyNzTXSHReVt3LXZq9fYk/AV 6 | aE5ChZLk+gDDTtgocKXga79xM1W1LeqP5MH0WhUvhMEp9YqL1KMBhUEp/a1WQ0vr 7 | z5SU9mu6mrttPPHOQzw0qKK0fA0qzU7PpzeoxHKvuFancittiRHs2h4z4wCxjaRI 8 | xtyVxE1Fc3Ueym6MGMmBIBDMDy7gC6eSwEgpXtmBuocBp9NdOLgK0vaERCO61x8c 9 | /8h7uvx/srPiKwmDBOmZqNT9Kjff7I8V0pQwKWPmj51lXrAKEi7JuwUziY8d0ZFJ 10 | 498Xax2JRwTVy1zUyaKnphE2CiDxqBClRQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB 11 | AQAz+YDZwyiwfmkaEnUVwBa+gUS0fzJgUpyCrnOETCuvSYwjhFzycr2MqQ+GASBM 12 | +Cz3YzrD09RpBIXVsGSC7nI9eAfxV1E6AuUXm5cbDhtQfgQIIaQRCAfaI2jOgQzi 13 | CiIB6Oe0NnPZ7af3VKZ8AIanj4yd6kutCD3O37dUxgYx1gwHNQ/dlDb1QXP2FQoF 14 | oKAn919fKDVDxw42m60b31/MUlX0WNG0owlmKctyBOVwMFoWQobNt6obpxNAWvD+ 15 | cYBSqVPXGovqRalnQfu0tfkmmzOZaTYowugVzkdiAO4otn7TwpYh8OD2dF11nFkA 16 | pOlqAFSEPnpeXI2eqH7Perpf 17 | -----END CERTIFICATE----- 18 | -------------------------------------------------------------------------------- /test/unit/fixtures/no-wildcard-private-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDBK/OHJUXiGZiV 3 | fQyX4Ipv+uYpGCHgKCFm8uCJlUJxg45jSu2zdQ+4AokbgmyZZmRp71FZcfYHajrR 4 | Bnk9IVw9KoqirBOuI+u0IcNpNqXdekBquzGRsCF10EPm7VsdIVRKlITNz9YykT0z 5 | UfFfSDsdvOXeKO2s3gw/IxnmQ7WRN4kz/pP/aVuJ9Ej47UkG33vjFtHfrk/vTTVO 6 | PVghmJ3m83vIttBs47bSDkau4/d2fLeoO5FLUEzI79W+gc/7PZWmODjjpWOZrfCd 7 | w+GBdjvBrBcNWUc8BVcHVtrmLg92qvoxfE2lQ6QoHKmZw1nxNJVhhkltSf1SP13w 8 | oIhImv4XAgMBAAECggEAOWWm0aQ4lIsIdHqYkFQurkE9vD4oaqNh8WX/Y8IO4Q22 9 | 9DfiUy1YQ1O1i5AAVsA+2985K7uZ3/Ank845nflYn/ARC27X1ilkMrzJhGyxchKO 10 | K2h9vQ05k/kTA/3I0eoCUrq4jzLPMW1IYwRZFPewYRCsuIsuu+uqdeQnIO1DeOcj 11 | 9YeJpVs3db8/m16yRFbX/URrJokSNqmDBD+AS0AZ2Q0gTJnPoUFT/YJUPXChxjK+ 12 | bggXNFK8oaaAs+cgTUHJa4SuvYE1jUx0BmeSxPNvi/KPt0tw+LsZ3bsQ/9kAmKrY 13 | I7HR7UQS2pczyVwxM5kTCBCGk8sxYg7+kppZdlAiEQKBgQDwgI5+2zWOulVIKKZp 14 | lo/OIdU0vP1F8BJTXTPmzWQBq+FoYSXBanxgOY800UFbwurRNoq5NIz9LqYD6/o4 15 | kFN9g3jO4ANMdSGfqx8cL5fmPtnG/uYajoaU/kcSp5tMHR8GkENv+JYbtQH+sdEW 16 | wgxBmKucsE3cryvWLeTc/ramzwKBgQDNnpt9XhZ5Cubvh/RYLMN3zPOExpx33XKF 17 | vF2V8eeVjOTpzL1surDItWoNTpc/NJ8T6EcXXsr51JdGC2CBJ93YmFqoGiqxqLIb 18 | nV5ATCxkDx5bIKszdYDusiovCY3GVp9U/4WX5GsrGfUn68WwULd4YeJbYlEus0tD 19 | Re9s90kGOQKBgB/w8vvmITbTiIAM5g0f8fluhOJ8NycSu1xXBI2iuaV/LnCudlU5 20 | z0r4WgypvgRx3r3qgIIYeKEnkOT0ckcQ5AXHcEQJfGarJ8wM7hTQPVxW/JVxRR5B 21 | z536J3ZSzE1PkM9OWaaFXWpQn/MzqpFbNEWCAUjOIYktuqS3c63mJlwBAoGAGJXK 22 | SaciUL649hLvaEuR45Uh2NJWiw5zTwJCvlb6NatvA3VzgLihwp3OXGD1PJ1yF9NC 23 | DM1Mv6pnm/xoQYkewXMWA9t9F1jwUtTyHAX/mMcl/gdd8P0vuVqcKJuf1AMvcd5L 24 | funU1oZ4BZnvPTGpfj9V7U9IHalaamTGR+UZWiECgYEArsJuOO0eCNQmpMYFH+gZ 25 | Fi9Kipl/ZL5fYGqLW8K6xz4iB/4S/ZhJNit/CrrQUr/GcnYYGztN6DSyVnnjUlf6 26 | RDJQav74qIV7vSTpNPq7NuoMQhoQ21M9HCjeHmsk3J/O/ADlWNvv3lcL5mVXl8fW 27 | oVsAL0Y3uMdUBu1IFEZmNmM= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /test/unit/fixtures/no-wildcard-public-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICpjCCAY4CCQDbS5wgcB7AzTANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAls 3 | b2NhbGhvc3QwIBcNMjEwODA5MTMzMzQ3WhgPMjA3MTA3MjgxMzMzNDdaMBQxEjAQ 4 | BgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB 5 | AMEr84clReIZmJV9DJfgim/65ikYIeAoIWby4ImVQnGDjmNK7bN1D7gCiRuCbJlm 6 | ZGnvUVlx9gdqOtEGeT0hXD0qiqKsE64j67Qhw2k2pd16QGq7MZGwIXXQQ+btWx0h 7 | VEqUhM3P1jKRPTNR8V9IOx285d4o7azeDD8jGeZDtZE3iTP+k/9pW4n0SPjtSQbf 8 | e+MW0d+uT+9NNU49WCGYnebze8i20GzjttIORq7j93Z8t6g7kUtQTMjv1b6Bz/s9 9 | laY4OOOlY5mt8J3D4YF2O8GsFw1ZRzwFVwdW2uYuD3aq+jF8TaVDpCgcqZnDWfE0 10 | lWGGSW1J/VI/XfCgiEia/hcCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAn95BpdaR 11 | gJgMhjZM48NyfimpTQJxoGBOtj2ExdrhMqVEE9pOPuu15YjsDAsn974i9oDUmKmv 12 | pSA6haaCCZLtRbFZVy7suH6I5Ct8VAXdZp4RbUXCIjk5dwcsuYLvdTiVfr9yju/7 13 | EbnopGT/I0+yG7AptQePl9hRuCT6h5sk4QWuzn5UhTM6TfjDUax8QNANjsKBeK5R 14 | L4b93olZjYu1oJDCM+E/aRLqnIOevRiayC8xpseYZtjowHWBCysGKDEqTf6GM5+P 15 | iGUw3Ut+Tt5AbKMwLx8sTpLL9uaT6Zvu9EYYOKNfKkW37V27roV7+rRBilkgH089 16 | AIC/q7UEY9Gcfg== 17 | -----END CERTIFICATE----- 18 | -------------------------------------------------------------------------------- /test/unit/fixtures/san-private-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCudnCoum826QsQ 3 | +Nz+hRgk8nR84sCefAiHhiuYOGgMr2s+Cyxp3g0pChctji4B9YvhXYfwzqt2dHNl 4 | sG3snfd8kxcrUCNLJ1hI7a7I6P/Kp0ldw0TwjJD/Hq07RitQfIacaulqE1zBPAQ+ 5 | ESc0Icp8Pv37dQiQbONPICTyI/O/X4cgU1why/jgJxjsH5bqdkvATUbW1Rw52axA 6 | 3mya87u3ipX9a5YviQbxFYmNntEjO1ax3cazw/i6s+WKEmZIo/p0wCkOCHHApswg 7 | fxza/L09Lw5jBIuExMmmnXrWaufg81Z7mWNZXKT0idQ2fy4oruMZLpTv+1LLDnip 8 | SZ2OeNQ3AgMBAAECggEBAJ+/Mc89zuH/p7NlvK2NXxtNqp5D3s3/kqvNjhN26sda 9 | xJ7uCj/yJCjE5Y5AFx6SIQA97WDxTI1NefDOCQlVZ6z43ok5euew4iT9BkNPHhEI 10 | 9qPVTkXY4FfKViLlUHoFXBjTyGDp08/YTaYrfLM+kbMM1vEuK6ZbHhRwwYO6qdE7 11 | eeCQ8sJRriKy2y65s7qPG13ijTCi4E08krhvxF3GTFxbDWnEqZGz6zxxecztmtRB 12 | AkfMQmaiWsIDWyY24709U/g0Ujbo1yVUFtGrQq/JcsrGHhOYsv6SuBMUPyYysXDX 13 | PEw1uKdWo2KgU6z+SxEo3bqE+zbi/8bjg1ljj8x7PJECgYEA5zFs8rCwpUqI91aZ 14 | aYNAUGLD4l8W3cyfKAqXtKrCVu3cozB8KBzDggfnWbScB5WroCbLDiqO6n5U/J0B 15 | liTV9IPnSAtEFyvnaITrSu2otPtzj1AQjH2qawcMLFHdtS7Pgtdh0jT5hfh62WYb 16 | PAYJkRolsfCkYo9M/i8i3rWYtzkCgYEAwS60F7vE1LPhhJILJcDJC4IQEPsV1YzZ 17 | UcmcJGvG4zM/xDh1zMKwCA419Ez/h3OU+8x7WCQq3d21Be/Vi5sICk+r0trq9g0o 18 | 09xYCebCC91onMVkuBuwFCxou0c6ZEgpEa9GRtygikqGV4WesWrzccw9iO3qjqK/ 19 | zQVbQD599u8CgYBA3tB4JHXcW1alOCphvc9M/vmByjarU6AQ5y/8JLHVHHb9Xzxn 20 | MPL2yGr088WYJzn5/Qia1/wz9fC41QE99OYH7v71axvW+J286RbwdlERP0EEpG2Q 21 | Ti6ES3zx7qKsFQ9q4i5zgiPFoJwv5gOcSwFiMpkDHceyVA1BgU5ieUf5mQKBgAWf 22 | CUiDgze5QD+uTDq+AFugXj1QQRNktNInbq60oCXuUNWx2uOpapvhZfqE3w6e81/c 23 | VF59Nu4bzG+uyeT1DufV56cMDbg5K832ZG1tVIop2Chvqf2deKMto6KHcRCH9QH5 24 | uq7HLri3cG9NiQ+akBjom4XHfoLGA5TAqiYVa27rAoGAWa9cNlo9UAFKit8q0DOW 25 | vTYkayTL98XbLxWwGrM/xd2uT/yA++EV2HsqxxgeZEgXCt0ISunTsr4yn5p7Eg49 26 | nvt9kjN+6QGAdm6Ty0jRN4mcfSELTb7yKkd+SLxHahTubueO86iN3mN88r8DWMcH 27 | QIVZJd251zJvQPVVyeXkIs4= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /test/unit/fixtures/san-public-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIC1jCCAb6gAwIBAgIJAK08RMX/alKpMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV 3 | BAMMCWxvY2FsaG9zdDAgFw0yMTA4MDkxMzEwMzVaGA8yMDcxMDcyODEzMTAzNVow 4 | FDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB 5 | CgKCAQEArnZwqLpvNukLEPjc/oUYJPJ0fOLAnnwIh4YrmDhoDK9rPgssad4NKQoX 6 | LY4uAfWL4V2H8M6rdnRzZbBt7J33fJMXK1AjSydYSO2uyOj/yqdJXcNE8IyQ/x6t 7 | O0YrUHyGnGrpahNcwTwEPhEnNCHKfD79+3UIkGzjTyAk8iPzv1+HIFNcIcv44CcY 8 | 7B+W6nZLwE1G1tUcOdmsQN5smvO7t4qV/WuWL4kG8RWJjZ7RIztWsd3Gs8P4urPl 9 | ihJmSKP6dMApDghxwKbMIH8c2vy9PS8OYwSLhMTJpp161mrn4PNWe5ljWVyk9InU 10 | Nn8uKK7jGS6U7/tSyw54qUmdjnjUNwIDAQABoykwJzAlBgNVHREEHjAcgg0qLmV4 11 | YW1wbGUuY29tggsqLmxvY2FsaG9zdDANBgkqhkiG9w0BAQsFAAOCAQEArUJEfVVQ 12 | 6Qwlm5vTJoeWYvKUJ5ttPgxKOsiipQwSpkug6fxC6wlGZFijjCPbxVveiTO47KWa 13 | n1ehTIqBERznGYzHpa3vbGT6UCIJGuP5s6sqN3bhIpUHqmzJW2F+5M9UjoBHAG6l 14 | IuzHehl+ggp0Q79kYjtXayf/L0VXlRIwiMbZwnQ0Ikbs+C5riy97U32chrV6VVEb 15 | i2mQYGZ6fG6sGdZdXV+TbHJetfNoOdtExEz2860VGqzRPqV3KphCL40mZ12wASip 16 | trz/su8RfxNSax2UO823gPgdSclFjUf3GY2Ih09odWK+ABGWCYiPIjz+mSEnYD8E 17 | Fc90qDg0To/2Hw== 18 | -----END CERTIFICATE----- 19 | -------------------------------------------------------------------------------- /test/unit/ingress/test_utils.js: -------------------------------------------------------------------------------- 1 | import IngressUtils from '../../../src/ingress/utils.js'; 2 | import assert from 'assert/strict'; 3 | 4 | describe('ingress utils', () => { 5 | 6 | const getTunnelIdTests = [ 7 | {args: ['foo.example.com', "example.com"], expected: "foo"}, 8 | {args: ['foo-123.sub-domain.example.com', "sub-domain.example.com"], expected: "foo-123"}, 9 | {args: ['foo.example.com', "*.example.com"], expected: "foo"}, 10 | {args: ['foo.example.com'], expected: "foo"}, 11 | ]; 12 | 13 | getTunnelIdTests.forEach(({args, expected}) => { 14 | it(`getTunnelId() correctly parses ${args}`, () => { 15 | const tunnelId = IngressUtils.getTunnelId(...args) 16 | assert(tunnelId == expected, `got ${tunnelId}`); 17 | }); 18 | }); 19 | }); -------------------------------------------------------------------------------- /test/unit/ingress/utils.ts: -------------------------------------------------------------------------------- 1 | import http from 'http'; 2 | 3 | export const httpRequest = async (opts: http.RequestOptions, buffer?: string | undefined): Promise<{status: number | undefined, data: any}> => { 4 | const result: {status: number | undefined, data: any} = await new Promise((resolve) => { 5 | const req = http.request(opts, 6 | (res) => { 7 | let data = ''; 8 | 9 | res.on('data', (chunk) => { 10 | data += chunk; 11 | }); 12 | 13 | res.on('close', () => { resolve({status: res.statusCode, data})}); 14 | }); 15 | req.end(buffer); 16 | }); 17 | return result; 18 | } -------------------------------------------------------------------------------- /test/unit/lock/test_inmem.js: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import LockService, { Lock } from '../../../src/lock/index.js'; 3 | 4 | describe('redis lock', () => { 5 | let lockService; 6 | 7 | const createLockService = async () => { 8 | return new Promise((resolve) => { 9 | const lockService = new LockService('mem', { 10 | callback: (err) => err ? rejects(err) : resolve(lockService) 11 | }); 12 | }); 13 | }; 14 | 15 | it('memory lock/unlock', async () => { 16 | const lockService = await createLockService(); 17 | const lock = await lockService.lock("test"); 18 | assert(lock instanceof Lock, `failed to obtain lock, got ${lock}`); 19 | 20 | const islocked = lock.locked() 21 | assert(islocked == true, `lock is not locked, got ${islocked}`); 22 | 23 | const res = await lock.unlock(); 24 | assert(res == true, `failed to release lock, got ${res}`); 25 | await lockService.destroy(); 26 | }); 27 | 28 | it('memory lock can be pending', async () => { 29 | const lockService = await createLockService(); 30 | const lock = await lockService.lock("test"); 31 | let lock2 = lockService.lock("test"); 32 | 33 | assert(lock.locked() == true, "lock was not locked"); 34 | lock.unlock(); 35 | lock2 = await lock2; 36 | assert(lock2.locked() == true, "second lock was not locked"); 37 | 38 | await lock2.unlock(); 39 | await lockService.destroy(); 40 | }); 41 | 42 | it('memory lock can be destroyed with pending locks', async () => { 43 | const lockService = await createLockService(); 44 | 45 | const lock = await lockService.lock("test"); 46 | const lock2 = lockService.lock("test"); 47 | await lockService.destroy(); 48 | 49 | const res = await lock2; 50 | assert(res == false, `expected lock to be unlocked, got ${res}`); 51 | }); 52 | }); -------------------------------------------------------------------------------- /test/unit/storage/test_serialize.ts: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import Serializer, { Serializable } from '../../../src/storage/serializer.js' 3 | 4 | type Sub = { 5 | astring?: string; 6 | anarray?: Array; 7 | } 8 | 9 | class Test implements Serializable { 10 | public astring?: string = undefined; 11 | public anumber?: number = undefined; 12 | public obj?: Sub = { 13 | astring: undefined, 14 | anarray: [], 15 | }; 16 | public anarray?: Array = []; 17 | } 18 | 19 | describe('Serializer', () => { 20 | 21 | it(`Can serialize/deserialize`, () => { 22 | const test: Test = { 23 | anumber: 10 24 | } 25 | 26 | let str = Serializer.serialize(test) 27 | assert(str == '{"anumber":10}') 28 | 29 | let test2 = Serializer.deserialize(str, Test); 30 | assert(test2.anumber == 10); 31 | }); 32 | 33 | it(`Nested objects`, () => { 34 | const test: Test = { 35 | anumber: 10, 36 | obj: { 37 | astring: "foo" 38 | } 39 | } 40 | 41 | let str = Serializer.serialize(test) 42 | assert(str == '{"anumber":10,"obj":{"astring":"foo"}}') 43 | 44 | let test2 = Serializer.deserialize(str, Test); 45 | assert(test2.obj?.astring == 'foo'); 46 | }); 47 | 48 | it(`Array`, () => { 49 | const test: Test = { 50 | anumber: 10, 51 | anarray: [ 52 | "bar" 53 | ] 54 | } 55 | 56 | let str = Serializer.serialize(test) 57 | assert(str == '{"anumber":10,"anarray":["bar"]}'); 58 | 59 | let test2 = Serializer.deserialize(str, Test); 60 | assert(test2?.anarray?.[0] == 'bar'); 61 | }); 62 | 63 | it(`Nested array`, () => { 64 | const test: Test = { 65 | anumber: 10, 66 | obj: { 67 | anarray: [1,2] 68 | } 69 | } 70 | 71 | let str = Serializer.serialize(test) 72 | assert(str == '{"anumber":10,"obj":{"anarray":[1,2]}}'); 73 | 74 | let test2 = Serializer.deserialize(str, Test); 75 | assert(test2?.obj?.anarray?.[0] == 1); 76 | assert(test2?.obj?.anarray?.[1] == 2); 77 | }); 78 | 79 | }); -------------------------------------------------------------------------------- /test/unit/transport/test_ssh-endpoint.ts: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import Tunnel from '../../../src/tunnel/tunnel.js'; 3 | import SSHEndpoint from '../../../src/transport/ssh/ssh-endpoint.js'; 4 | import Config from '../../../src/config.js'; 5 | import IngressManager from '../../../src/ingress/ingress-manager.js'; 6 | import { TunnelConfig } from '../../../src/tunnel/tunnel-config.js'; 7 | import ClusterManager, { ClusterManagerType } from '../../../src/cluster/cluster-manager.js'; 8 | import StorageManager from '../../../src/storage/storage-manager.js'; 9 | 10 | describe('ssh endpoint', () => { 11 | 12 | const endpointTests = [ 13 | { 14 | args: {port: 2200}, 15 | baseUrl: new URL('http://example.com'), 16 | expected: "ssh://test:token@example.com:2200", 17 | }, 18 | { 19 | args: {port: 2200, host: 'localhost'}, 20 | baseUrl: new URL('http://example.com'), 21 | expected: "ssh://test:token@localhost:2200", 22 | }, 23 | { 24 | args: {port: 2200, host: 'localhost:22'}, 25 | baseUrl: new URL('http://example.com'), 26 | expected: "ssh://test:token@localhost:22", 27 | }, 28 | { 29 | args: {port: 2200}, 30 | baseUrl: new URL('http://example.com:8080'), 31 | expected: "ssh://test:token@example.com:2200", 32 | }, 33 | { 34 | args: {port: 2200, host: 'localhost:22'}, 35 | baseUrl: new URL('http://example.com:8080'), 36 | expected: "ssh://test:token@localhost:22", 37 | }, 38 | ]; 39 | 40 | endpointTests.forEach(({args, baseUrl, expected}) => { 41 | it(`getEndpoint() for ${JSON.stringify(args)}, ${baseUrl} returns ${expected}`, async () => { 42 | const config = new Config(); 43 | await StorageManager.init(new URL("memory://")); 44 | await ClusterManager.init(ClusterManagerType.MEM); 45 | await IngressManager.listen({ 46 | http: { 47 | enabled: true, 48 | subdomainUrl: new URL("https://example.com"), 49 | port: 8080, 50 | } 51 | }); 52 | 53 | const tc = new TunnelConfig("test", "test"); 54 | tc.transport.token = 'token'; 55 | const tunnel = new Tunnel(tc) 56 | 57 | const endpoint = new SSHEndpoint({ 58 | ...args, 59 | max_connections: 1, 60 | enabled: true, 61 | hostKey: "", 62 | allowInsecureTarget: true, 63 | }); 64 | const ep = endpoint.getEndpoint(tunnel, baseUrl); 65 | await endpoint.destroy(); 66 | 67 | assert(ep.url == expected, `got ${ep.url}`); 68 | await StorageManager.close(); 69 | await ClusterManager.close(); 70 | await IngressManager.close(); 71 | await config.destroy(); 72 | }); 73 | }); 74 | }); -------------------------------------------------------------------------------- /test/unit/tunnel/test_altname-service.ts: -------------------------------------------------------------------------------- 1 | import assert from 'assert/strict'; 2 | import StorageManager from '../../../src/storage/storage-manager.js'; 3 | import AltNameService from '../../../src/tunnel/altname-service.js'; 4 | 5 | describe('altname service', () => { 6 | let altNameService: AltNameService; 7 | 8 | beforeEach(async () => { 9 | await StorageManager.init(new URL("memory://")); 10 | altNameService = new AltNameService(); 11 | }); 12 | 13 | afterEach(async () => { 14 | await altNameService.destroy(); 15 | await StorageManager.close(); 16 | }) 17 | 18 | it(`can add altname`, async () => { 19 | const altnames = await altNameService.update("test", "tunnel1", ["altname1"]); 20 | assert(altnames.length === 1); 21 | assert(altnames[0] === "altname1"); 22 | }); 23 | 24 | it(`can add and remove altname`, async () => { 25 | let altnames = await altNameService.update("test", "tunnel1", ["altname1"]); 26 | assert(altnames.length === 1); 27 | assert(altnames[0] === "altname1"); 28 | 29 | altnames = await altNameService.update("test", "tunnel1", [], ["altname1"]); 30 | assert(altnames.length === 0); 31 | }); 32 | 33 | it(`same alt name is not duplicated`, async () => { 34 | let altnames = await altNameService.update("test", "tunnel1", ["altname1"]); 35 | assert(altnames.length === 1); 36 | assert(altnames[0] === "altname1"); 37 | 38 | altnames = await altNameService.update("test", "tunnel1", ["altname1"]); 39 | assert(altnames.length === 1); 40 | assert(altnames[0] === "altname1"); 41 | }); 42 | 43 | it(`adding the same alt name to different tunnels`, async () => { 44 | let altnames = await altNameService.update("test", "tunnel1", ["altname1"]); 45 | assert(altnames.length === 1); 46 | assert(altnames[0] === "altname1"); 47 | 48 | altnames = await altNameService.update("test", "tunnel2", ["altname1"]); 49 | assert(altnames.length === 0); 50 | 51 | const tunnelId = await altNameService.get("test", "altname1") 52 | assert(tunnelId === "tunnel1"); 53 | }); 54 | 55 | it(`update can add and remove`, async () => { 56 | let altnames = await altNameService.update("test", "tunnel1", ["altname1"]); 57 | assert(altnames.length === 1); 58 | assert(altnames[0] === "altname1"); 59 | 60 | altnames = await altNameService.update("test", "tunnel1", ["altname1"], ["altname2"]); 61 | assert(altnames.length === 1); 62 | assert(altnames[0] === "altname1"); 63 | }); 64 | 65 | it(`can have multiple altname`, async () => { 66 | const altnames = await altNameService.update("test", "tunnel1", ["altname1", "altname2"]); 67 | assert(altnames.length === 2); 68 | assert(altnames[0] === "altname1"); 69 | assert(altnames[1] === "altname2"); 70 | }); 71 | 72 | it(`supports different altname services`, async () => { 73 | let altnames = await altNameService.update("test", "tunnel1", ["altname1"]); 74 | assert(altnames.length === 1); 75 | assert(altnames[0] === "altname1"); 76 | 77 | let tunnelId = await altNameService.get("test", "altname1") 78 | assert(tunnelId === "tunnel1"); 79 | 80 | altnames = await altNameService.update("test2", "tunnel2", ["altname1"]); 81 | assert(altnames.length === 1); 82 | assert(altnames[0] === "altname1"); 83 | 84 | tunnelId = await altNameService.get("test2", "altname1") 85 | assert(tunnelId === "tunnel2"); 86 | }); 87 | }); -------------------------------------------------------------------------------- /test/unit/utils/test_hostname.js: -------------------------------------------------------------------------------- 1 | import Hostname from '../../../src/utils/hostname.js'; 2 | import assert from 'assert/strict'; 3 | 4 | describe('hostname', () => { 5 | 6 | const parseTests = [ 7 | {args: ['example.com', 80], expected: new URL("http://example.com/")}, 8 | {args: ['example.com', 0], expected: new URL("tcp://example.com")}, 9 | {args: ['example.example', "0"], expected: new URL("tcp://example.example")}, 10 | {args: ['localhost', 0], expected: new URL("tcp://localhost")}, 11 | {args: ['example.com:80'], expected: new URL("http://example.com/")}, 12 | {args: ['http://example.com:80'], expected: new URL("http://example.com/")}, 13 | {args: ['http://example.com'], expected: new URL("http://example.com/")}, 14 | {args: ['example.com', 443], expected: new URL("https://example.com/")}, 15 | {args: ['http://example.com:443'], expected: new URL("http://example.com:443/")}, 16 | {args: ['ssh://example.com:2200'], expected: new URL("ssh://example.com:2200")}, 17 | {args: ['example.com', 65536], expected: undefined}, 18 | ]; 19 | 20 | parseTests.forEach(({args, expected}) => { 21 | it(`parse() correctly parses ${args}`, () => { 22 | const url = Hostname.parse(...args) 23 | assert(url?.href == expected?.href, `got ${url?.href}`); 24 | }); 25 | }); 26 | 27 | const tlsTests = [ 28 | {url: new URL("https://example.com"), expected: true}, 29 | {url: new URL("wss://example.com"), expected: true}, 30 | {url: new URL("tcps://example.com"), expected: true}, 31 | {url: new URL("tcp://example.com"), expected: false}, 32 | {url: new URL("ssh://example.com"), expected: false}, 33 | {url: new URL("http://example.com"), expected: false}, 34 | {url: new URL("ws://example.com"), expected: false}, 35 | ] 36 | 37 | tlsTests.forEach(({url, expected}) => { 38 | it(`isTLS(): ${url.protocol} is TLS ${expected}`, () => { 39 | const result = Hostname.isTLS(url) 40 | assert(result == expected, `got ${result}`); 41 | }); 42 | }); 43 | 44 | const portTests = [ 45 | {url: new URL("https://example.com"), expected: 443}, 46 | {url: new URL("wss://example.com"), expected: 443}, 47 | {url: new URL("ssh://example.com:2200"), expected: 2200}, 48 | {url: new URL("http://example.com"), expected: 80}, 49 | {url: new URL("ws://example.com"), expected: 80}, 50 | {url: new URL("tcp://example.com:1234"), expected: 1234}, 51 | ] 52 | 53 | portTests.forEach(({url, expected}) => { 54 | it(`getPort(): ${url.href} is ${expected}`, () => { 55 | const result = Hostname.getPort(url) 56 | assert(result == expected, `got ${result}`); 57 | }); 58 | }); 59 | }); -------------------------------------------------------------------------------- /tools/echo-server/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /tools/echo-server/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "echo-server", 3 | "description": "http echo server", 4 | "version": "1.0.0", 5 | "author": "Fredrik Lindberg ", 6 | "license": "MIT", 7 | "type": "commonjs", 8 | "engines": { 9 | "node": ">=18" 10 | }, 11 | "devDependencies": { 12 | "@types/node": "^20.4.9", 13 | "@types/ws": "^8.5.5", 14 | "ts-node": "^10.9.1", 15 | "typescript": "^5.1.6" 16 | }, 17 | "scripts": { 18 | "build": "tsc", 19 | "clean": "rm -fr dist" 20 | }, 21 | "dependencies": { 22 | "ws": "^8.13.0" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /tools/echo-server/src/echo-server.ts: -------------------------------------------------------------------------------- 1 | import * as http from 'node:http'; 2 | import { Duplex } from 'node:stream'; 3 | import * as url from 'node:url'; 4 | import { WebSocketServer } from 'ws'; 5 | 6 | export const createEchoHttpServer = async (port = 20000) => { 7 | 8 | const echoRequest = (request: http.IncomingMessage, response: http.ServerResponse) => { 9 | let body: Array = []; 10 | request.on('data', (chunk: Buffer) => { 11 | body.push(chunk); 12 | }).on('end', () => { 13 | const buf = Buffer.concat(body).toString(); 14 | response.statusCode = 200; 15 | response.end(buf); 16 | }); 17 | }; 18 | 19 | const fileGenerator = (size: number, chunkSize: number, response: http.ServerResponse) => { 20 | let sentBytes: number = 0; 21 | 22 | response.statusCode = 200; 23 | response.setHeader("Content-Type", "application/octet-stream"); 24 | response.setHeader('Content-Disposition', 'attachment; filename="file.bin"'); 25 | response.setHeader("Content-Length", size); 26 | 27 | const writeChunk = () => { 28 | if (sentBytes < size) { 29 | const remainingBytes = size - sentBytes; 30 | const chunkToSend = Math.min(chunkSize, remainingBytes); 31 | 32 | const buffer = Buffer.alloc(chunkToSend); 33 | response.write(buffer); 34 | 35 | sentBytes += chunkToSend; 36 | 37 | setTimeout(writeChunk, 0); 38 | } else { 39 | response.end(); 40 | } 41 | } 42 | 43 | writeChunk(); 44 | }; 45 | 46 | const wss = new WebSocketServer({ noServer: true }); 47 | const handleUpgrade = (async (request: http.IncomingMessage, socket: Duplex, head: Buffer) => { 48 | const parsedUrl = url.parse(request.url, true) 49 | if (parsedUrl.pathname != '/ws') { 50 | socket.write(`HTTP/${request.httpVersion} 404 Not found\r\n`); 51 | socket.end(); 52 | socket.destroy(); 53 | } 54 | 55 | wss.handleUpgrade(request, socket, head, (ws) => { 56 | ws.send("hello"); 57 | ws.on('message', (data) => { 58 | ws.send(data); 59 | }); 60 | }); 61 | }); 62 | 63 | const handleRequest = (request: http.IncomingMessage, response: http.ServerResponse) => { 64 | 65 | const parsedUrl = url.parse(request.url, true) 66 | 67 | if (request.method == "GET" && parsedUrl.pathname == '/file') { 68 | const size = Number(parsedUrl.query["size"] || "32"); 69 | const chunkSize = Number(parsedUrl.query["chunk"] || "262144"); 70 | return fileGenerator(size, chunkSize, response); 71 | } else { 72 | return echoRequest(request, response); 73 | } 74 | } 75 | 76 | const server = http.createServer(); 77 | server.on('request', handleRequest); 78 | server.on('upgrade', handleUpgrade); 79 | 80 | server.listen(port); 81 | return { 82 | destroy: () => { 83 | server.removeAllListeners('request'); 84 | server.removeAllListeners('upgrade'); 85 | server.close(); 86 | } 87 | }; 88 | }; 89 | 90 | const echoServer = createEchoHttpServer(); -------------------------------------------------------------------------------- /tools/echo-server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": ["es2021", "dom"], 4 | "module": "Node16", 5 | "target": "es2021", 6 | "strict": true, 7 | "esModuleInterop": true, 8 | "skipLibCheck": true, 9 | "forceConsistentCasingInFileNames": true, 10 | "moduleResolution": "node16", 11 | "declaration": true, 12 | "outDir": "./dist", 13 | "rootDir": "./src" 14 | }, 15 | "include": ["src/**/*"], 16 | "exclude": ["node_modules"] 17 | } -------------------------------------------------------------------------------- /tools/echo-server/yarn.lock: -------------------------------------------------------------------------------- 1 | # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. 2 | # yarn lockfile v1 3 | 4 | 5 | "@cspotcode/source-map-support@^0.8.0": 6 | version "0.8.1" 7 | resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1" 8 | integrity sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw== 9 | dependencies: 10 | "@jridgewell/trace-mapping" "0.3.9" 11 | 12 | "@jridgewell/resolve-uri@^3.0.3": 13 | version "3.1.1" 14 | resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz#c08679063f279615a3326583ba3a90d1d82cc721" 15 | integrity sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA== 16 | 17 | "@jridgewell/sourcemap-codec@^1.4.10": 18 | version "1.4.15" 19 | resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32" 20 | integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== 21 | 22 | "@jridgewell/trace-mapping@0.3.9": 23 | version "0.3.9" 24 | resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz#6534fd5933a53ba7cbf3a17615e273a0d1273ff9" 25 | integrity sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ== 26 | dependencies: 27 | "@jridgewell/resolve-uri" "^3.0.3" 28 | "@jridgewell/sourcemap-codec" "^1.4.10" 29 | 30 | "@tsconfig/node10@^1.0.7": 31 | version "1.0.9" 32 | resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.9.tgz#df4907fc07a886922637b15e02d4cebc4c0021b2" 33 | integrity sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA== 34 | 35 | "@tsconfig/node12@^1.0.7": 36 | version "1.0.11" 37 | resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.11.tgz#ee3def1f27d9ed66dac6e46a295cffb0152e058d" 38 | integrity sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag== 39 | 40 | "@tsconfig/node14@^1.0.0": 41 | version "1.0.3" 42 | resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.3.tgz#e4386316284f00b98435bf40f72f75a09dabf6c1" 43 | integrity sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow== 44 | 45 | "@tsconfig/node16@^1.0.2": 46 | version "1.0.4" 47 | resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.4.tgz#0b92dcc0cc1c81f6f306a381f28e31b1a56536e9" 48 | integrity sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA== 49 | 50 | "@types/node@*": 51 | version "20.5.7" 52 | resolved "https://registry.yarnpkg.com/@types/node/-/node-20.5.7.tgz#4b8ecac87fbefbc92f431d09c30e176fc0a7c377" 53 | integrity sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA== 54 | 55 | "@types/node@^20.4.9": 56 | version "20.5.6" 57 | resolved "https://registry.yarnpkg.com/@types/node/-/node-20.5.6.tgz#5e9aaa86be03a09decafd61b128d6cec64a5fe40" 58 | integrity sha512-Gi5wRGPbbyOTX+4Y2iULQ27oUPrefaB0PxGQJnfyWN3kvEDGM3mIB5M/gQLmitZf7A9FmLeaqxD3L1CXpm3VKQ== 59 | 60 | "@types/ws@^8.5.5": 61 | version "8.5.5" 62 | resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.5.tgz#af587964aa06682702ee6dcbc7be41a80e4b28eb" 63 | integrity sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg== 64 | dependencies: 65 | "@types/node" "*" 66 | 67 | acorn-walk@^8.1.1: 68 | version "8.2.0" 69 | resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1" 70 | integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA== 71 | 72 | acorn@^8.4.1: 73 | version "8.10.0" 74 | resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.10.0.tgz#8be5b3907a67221a81ab23c7889c4c5526b62ec5" 75 | integrity sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw== 76 | 77 | arg@^4.1.0: 78 | version "4.1.3" 79 | resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089" 80 | integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA== 81 | 82 | create-require@^1.1.0: 83 | version "1.1.1" 84 | resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333" 85 | integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== 86 | 87 | diff@^4.0.1: 88 | version "4.0.2" 89 | resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" 90 | integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== 91 | 92 | make-error@^1.1.1: 93 | version "1.3.6" 94 | resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" 95 | integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== 96 | 97 | ts-node@^10.9.1: 98 | version "10.9.1" 99 | resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.1.tgz#e73de9102958af9e1f0b168a6ff320e25adcff4b" 100 | integrity sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw== 101 | dependencies: 102 | "@cspotcode/source-map-support" "^0.8.0" 103 | "@tsconfig/node10" "^1.0.7" 104 | "@tsconfig/node12" "^1.0.7" 105 | "@tsconfig/node14" "^1.0.0" 106 | "@tsconfig/node16" "^1.0.2" 107 | acorn "^8.4.1" 108 | acorn-walk "^8.1.1" 109 | arg "^4.1.0" 110 | create-require "^1.1.0" 111 | diff "^4.0.1" 112 | make-error "^1.1.1" 113 | v8-compile-cache-lib "^3.0.1" 114 | yn "3.1.1" 115 | 116 | typescript@^5.1.6: 117 | version "5.2.2" 118 | resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.2.2.tgz#5ebb5e5a5b75f085f22bc3f8460fba308310fa78" 119 | integrity sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w== 120 | 121 | v8-compile-cache-lib@^3.0.1: 122 | version "3.0.1" 123 | resolved "https://registry.yarnpkg.com/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz#6336e8d71965cb3d35a1bbb7868445a7c05264bf" 124 | integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg== 125 | 126 | ws@^8.13.0: 127 | version "8.13.0" 128 | resolved "https://registry.yarnpkg.com/ws/-/ws-8.13.0.tgz#9a9fb92f93cf41512a0735c8f4dd09b8a1211cd0" 129 | integrity sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA== 130 | 131 | yn@3.1.1: 132 | version "3.1.1" 133 | resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50" 134 | integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q== 135 | -------------------------------------------------------------------------------- /tools/migrate/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules -------------------------------------------------------------------------------- /tools/migrate/README.md: -------------------------------------------------------------------------------- 1 | # Storage provider migration tool 2 | 3 | Best-effort tool that can migrate data between the supported storage providers. 4 | Supports the migration between SQlite, Postgresql and Redis. 5 | 6 | The tool will copy all data from the source to destination, and will remove any data 7 | that exists in the destination, but not present in the source. 8 | 9 | Hence, you can run this tool multiple times. 10 | 11 | ## Setup 12 | Clone or copy the full source tree, the migration tool is reusing modules from exposr. 13 | 14 | Install dependencies 15 | 16 | > cd tools/migrate 17 | > yarn install 18 | 19 | ## Usage 20 | The migration tool takes a source URL and a destination URL as input. 21 | 22 | Usage: migrate.js [--dry-run] 23 | 24 | Positionals: 25 | source-url Source storage [string] 26 | destination-url Destination storage [string] 27 | 28 | Options: 29 | --help Show help [boolean] 30 | --dry-run Do not write to destination [boolean] [default: false] 31 | --namespace Namespaces to migrate [array] [default: ["tunnel","account","ingress-altnames"]] 32 | 33 | Examples: 34 | migrate.js redis://localhost:6379 postgres://localhost:5432 Copy from redis to postgres 35 | migrate.js sqlite://db.sqlite postgres://localhost:5432 Copy from sqlite to postgres 36 | 37 | Both a source and destination is required 38 | 39 | ### Supported source/destinations 40 | #### SQlite 41 | To reference a SQLite database use the URL syntax `sqlite://`. 42 | 43 | For example `sqlite://db.sqlite` or `sqlite:///full/path/to/db.sqlite`. 44 | 45 | #### PostgreSQL 46 | To reference a PostgreSQL database use the URL syntax `postgres://:@/`. 47 | 48 | For example `postgres://pguser:secretpassword@database-server.local:5432/myDatabase`. 49 | 50 | #### Redis 51 | To reference a Redis database use the URL syntax `redis://[:]@`. 52 | 53 | For example `redis://:redispassword@redis-server.local:6379`. 54 | 55 | ### Examples 56 | Migrate from Redis to Postgres 57 | 58 | > node migrate.js redis://localhost:6379 postgres://postgres:password@localhost:5432/exposr 59 | 60 | Migrate from Redis to SQLite 61 | 62 | > node migrate.js redis://localhost:6379 sqlite://db.sqlite -------------------------------------------------------------------------------- /tools/migrate/migrate.js: -------------------------------------------------------------------------------- 1 | process.env.EXPOSR_EMBEDDED = 'true'; 2 | 3 | import yargs from 'yargs'; 4 | import PgsqlStorageProvider from '../../src/storage/pgsql-storage-provider.js'; 5 | import RedisStorageProvider from '../../src/storage/redis-storage-provider.js'; 6 | import SqliteStorageProvider from '../../src/storage/sqlite-storage-provider.js'; 7 | 8 | const parse = (argv) => { 9 | return yargs() 10 | .version(false) 11 | .usage('Usage: $0 [--dry-run]') 12 | .positional('source-url', { 13 | describe: 'Source storage', 14 | type: 'string', 15 | }) 16 | .positional('destination-url', { 17 | describe: 'Destination storage', 18 | type: 'string', 19 | }) 20 | .option('dry-run', { 21 | describe: 'Do not write to destination', 22 | default: false, 23 | type: 'boolean' 24 | }) 25 | .option('namespace', { 26 | describe: 'Namespaces to migrate', 27 | default: ['tunnel', 'account', 'ingress-altnames'], 28 | type: 'array' 29 | }) 30 | .check((args) => { 31 | args._[0] = new URL(args._[0]); 32 | args._[1] = new URL(args._[1]); 33 | return true; 34 | }) 35 | .example('$0 redis://localhost:6379 postgres://localhost:5432', 'Copy from redis to postgres') 36 | .example('$0 sqlite://db.sqlite postgres://localhost:5432', 'Copy from sqlite to postgres') 37 | .demandCommand(2, "Both a source and destination is required") 38 | .wrap(120) 39 | .parse(argv); 40 | }; 41 | 42 | const createStorage = async (url) => { 43 | const type = url.protocol.slice(0, -1) || 'none'; 44 | 45 | let clazz; 46 | let opts; 47 | switch (type) { 48 | case 'redis': 49 | clazz = RedisStorageProvider; 50 | opts = { 51 | url 52 | } 53 | break; 54 | case 'sqlite': 55 | clazz = SqliteStorageProvider; 56 | opts = { 57 | url 58 | } 59 | break; 60 | case 'pgsql': 61 | case 'postgres': 62 | clazz = PgsqlStorageProvider; 63 | opts = { 64 | url 65 | } 66 | break; 67 | default: 68 | console.log(`Unsupported storage ${type}`); 69 | process.exit(-1); 70 | } 71 | 72 | return new Promise((resolve, reject) => { 73 | const storage = new clazz({ 74 | ...opts, 75 | callback: (err) => { err ? reject(err) : resolve(storage) }, 76 | }); 77 | }); 78 | }; 79 | 80 | const migrateNamespace = async (source, destination, namespace, dryRun) => { 81 | console.log(`Migrating namespace '${namespace}'`); 82 | let count = 0; 83 | let total_success = 0; 84 | let total_failed = 0; 85 | 86 | await source.init(namespace); 87 | await destination.init(namespace); 88 | 89 | let res; 90 | while (true) { 91 | res = await source.list(namespace, res?.cursor, 100); 92 | if (!res) { 93 | break; 94 | } 95 | 96 | const keys = res.data; 97 | if (keys.length <= 0) { 98 | break; 99 | } 100 | 101 | count += keys.length; 102 | console.log(`Processing records ${count - keys.length}...${count}`); 103 | 104 | const values = await source.mget(namespace, keys); 105 | 106 | const setter = values.map((value, index) => { 107 | if (value && !dryRun) { 108 | return destination.set(namespace, keys[index], value); 109 | } else { 110 | return new Promise((resolve) => { resolve() }); 111 | } 112 | }); 113 | 114 | const [processed_success, processed_failed] = await Promise.allSettled(setter).then((results) => { 115 | const success = results.filter((result) => result.status == 'fulfilled'); 116 | const failed = results.filter((result) => result.status == 'rejected'); 117 | return [success, failed]; 118 | }); 119 | 120 | total_success += processed_success.length; 121 | total_failed += processed_failed.length; 122 | if (processed_failed.length > 0) { 123 | console.error(`${processed_failed.length} of ${keys.length} records failed to synchronize`); 124 | } 125 | 126 | if (res.cursor == null) { 127 | break; 128 | } 129 | } 130 | 131 | let total_removed = 0; 132 | count = 0; 133 | while (true) { 134 | res = await destination.list(namespace, res?.cursor, 100); 135 | if (!res) { 136 | break; 137 | } 138 | 139 | const keys = res.data; 140 | if (keys.length <= 0) { 141 | break; 142 | } 143 | 144 | count += keys.length; 145 | console.log(`Verifying records ${count - keys.length}...${count}`); 146 | 147 | const values = await source.mget(namespace, keys); 148 | 149 | const setter = values.map((value, index) => { 150 | if (!value && !dryRun) { 151 | return destination.delete(namespace, keys[index]); 152 | } else { 153 | return new Promise((resolve) => { resolve(null) }); 154 | } 155 | }); 156 | 157 | const removed = await Promise.allSettled(setter).then((results) => { 158 | return results.filter((result) => result.value != null); 159 | }); 160 | total_removed += removed.length; 161 | 162 | if (removed.length > 0) { 163 | console.log(`Removed ${removed.length} stale entries from destination`); 164 | } 165 | 166 | if (res.cursor == null) { 167 | break; 168 | } 169 | } 170 | 171 | console.log(`Completed namespace '${namespace}', successful records=${total_success}, failed records=${total_failed}`); 172 | }; 173 | 174 | const migrate = async (srcUrl, dstUrl, dryRun, namespaces) => { 175 | const source = await createStorage(srcUrl); 176 | console.log(`Source ${srcUrl} open`); 177 | const destination = await createStorage(dstUrl); 178 | console.log(`Destination ${dstUrl} open`); 179 | 180 | for (let i = 0; i < namespaces.length; i++) { 181 | const namespace = namespaces[i]; 182 | await migrateNamespace(source, destination, namespace, dryRun); 183 | } 184 | 185 | source.destroy(); 186 | destination.destroy(); 187 | }; 188 | 189 | (async () => { 190 | const args = parse(process.argv.slice(2)); 191 | await migrate(args._[0], args._[1], args['dry-run'], args['namespace']); 192 | })(); -------------------------------------------------------------------------------- /tools/migrate/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "exposr-storage-migrate", 3 | "version": "0.1.0", 4 | "type": "module", 5 | "author": "Fredrik Lindberg ", 6 | "license": "MIT", 7 | "engines": { 8 | "node": ">=18" 9 | }, 10 | "dependencies": { 11 | "better-sqlite3": "^8.3.0", 12 | "pg": "^8.11.0", 13 | "redis": "^4.6.6", 14 | "yargs": "^17.7.2" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": ["es2021", "dom"], 4 | "module": "NodeNext", 5 | "target": "es2021", 6 | "strict": true, 7 | "esModuleInterop": true, 8 | "skipLibCheck": true, 9 | "forceConsistentCasingInFileNames": true, 10 | "moduleResolution": "NodeNext", 11 | "declaration": true, 12 | "allowJs": true, 13 | "outDir": "./out", 14 | "rootDir": "./" 15 | }, 16 | "include": [ 17 | "exposrd.ts", 18 | "build.js", 19 | "package.cjs", 20 | "src/**/*" 21 | ], 22 | "exclude": ["node_modules"] 23 | } --------------------------------------------------------------------------------