├── .cirun.yml ├── .github ├── buildkitd.toml ├── dependabot.yml └── workflows │ ├── archived │ ├── docker-build-brudi.yml │ ├── docker-build-clustersecret.yml │ ├── docker-build-eternaljukebox.yml │ ├── docker-build-huginn.yml │ ├── docker-build-k8s-zabbix.yml │ ├── docker-build-keel.yml │ ├── docker-build-kwatch.yml │ ├── docker-build-languagetool.yml │ ├── docker-build-litefs-patched.yaml │ ├── docker-build-mailcow.yml │ ├── docker-build-moco.yml │ ├── docker-build-nginx-ssl-fingerprint.yml │ ├── docker-build-onlyoffice.yml │ └── docker-build-pulsejet-go-vod.yml │ ├── check-docker-updates.yml │ ├── docker-build-caddy.yml │ ├── docker-build-cert-manager-webhook-powerdns.yml │ ├── docker-build-coredns.yml │ ├── docker-build-gpt4free.yml │ ├── docker-build-ingress-nginx-controller-dynamic-modules.yml │ ├── docker-build-ingress-nginx-custom.yml │ ├── docker-build-ingress-nginx.yml │ ├── docker-build-logrotate.yml │ ├── docker-build-nitter-patched.yml │ ├── docker-build-openresty-ssl-ja3.yml │ ├── docker-build-samba.yml │ ├── docker-build-stash.yml │ ├── docker-build-stolon.yml │ ├── docker-build-tor-single-hop.yml │ ├── docker-build-wireproxy-debian.yml │ └── workflow-run-cleanup.yml ├── README.md ├── brudi └── Dockerfile ├── caddy └── Dockerfile ├── clustersecret └── Dockerfile ├── coredns └── plugin.cfg ├── eternaljukebox └── Dockerfile ├── huginn ├── multi-process │ ├── Dockerfile │ └── scripts │ │ └── standalone-packages ├── patches │ └── force_nokogiri_compilation.patch ├── scripts │ └── prepare └── single-process │ └── Dockerfile ├── ingress-nginx-controller-dynamic-modules └── Dockerfile ├── ingress-nginx-custom └── Dockerfile ├── ingress-nginx └── patches │ └── 0001-add-quic-to-nginx-ingress.patch ├── kwatch └── Dockerfile ├── languagetool └── Dockerfile ├── litefs-patched └── 0001-force-all-requests-to-master-get-method-also.patch ├── logrotate └── Dockerfile ├── moco ├── .gitignore ├── Dockerfile.backup └── Dockerfile.controller ├── nginx-ssl-fingerprint └── Dockerfile ├── nitter └── patches │ ├── 0001-cdn-hmac-support.patch │ ├── 0002-proxy-pictures-support.patch │ ├── 0003-add-x-cache-header.patch │ └── 0004-custom-donation-page.patch ├── openresty-ssl-ja3 ├── .gitignore ├── Dockerfile ├── README.md └── http2 │ ├── config │ ├── patches │ └── nginx-1.27.patch │ └── src │ ├── nginx_ssl_fingerprint.c │ ├── nginx_ssl_fingerprint.h │ └── ngx_http_ssl_fingerprint_module.c ├── samba └── samba.sh ├── stash └── Dockerfile.in ├── stolon └── Dockerfile ├── tor-single-hop ├── override │ ├── etc │ │ └── tor │ │ │ └── torrc-dist │ └── var │ │ └── lib │ │ └── tor │ │ ├── geoip │ │ └── geoip6 └── patches │ └── 0001-reduce-hops.patch └── wireproxy-debian └── Dockerfile /.cirun.yml: -------------------------------------------------------------------------------- 1 | runners: 2 | - name: big-runner 3 | cloud: digitalocean 4 | instance_type: c-4 5 | machine_image: ubuntu-22-04-x64 6 | region: ams3 7 | labels: 8 | - cirun-runner -------------------------------------------------------------------------------- /.github/buildkitd.toml: -------------------------------------------------------------------------------- 1 | [worker.oci] 2 | max-parallelism = 1 -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: "monthly" 7 | -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-brudi.yml: -------------------------------------------------------------------------------- 1 | name: Docker build brudi image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-brudi.yml' 10 | - 'brudi/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/mittwald/brudi.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: brudi 29 | key: ${{ runner.os }}-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: all 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo mittwald/brudi 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "mittwald/brudi" 45 | ref: 'master' 46 | path: 'brudi-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Build and push docker image 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | id: docker_build_new 70 | uses: docker/build-push-action@v3 71 | with: 72 | context: ./brudi-git 73 | file: ./brudi/Dockerfile 74 | platforms: linux/amd64,linux/arm64 75 | push: true 76 | tags: quay.io/unixfox/brudi:latest, quay.io/unixfox/brudi:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-clustersecret.yml: -------------------------------------------------------------------------------- 1 | name: Docker build clustersecret image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-clustersecret.yml' 10 | - 'clustersecret/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/zakkg3/ClusterSecret.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: clustersecret 29 | key: ${{ runner.os }}-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: arm64 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo zakkg3/ClusterSecret 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "zakkg3/ClusterSecret" 45 | ref: 'master' 46 | path: 'clustersecret-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Build and push docker image 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | id: docker_build_new 70 | uses: docker/build-push-action@v3 71 | with: 72 | context: ./clustersecret-git 73 | file: ./clustersecret/Dockerfile 74 | platforms: linux/amd64,linux/arm64 75 | push: true 76 | tags: quay.io/unixfox/clustersecret:latest, quay.io/unixfox/clustersecret:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-eternaljukebox.yml: -------------------------------------------------------------------------------- 1 | name: Docker build EternalJukebox image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-eternaljukebox.yml' 10 | - 'eternaljukebox/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/UnderMybrella/EternalJukebox.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: eternaljukebox 29 | key: ${{ runner.os }}-v2-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: all 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo UnderMybrella/EternalJukebox 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "UnderMybrella/EternalJukebox" 45 | ref: 'master' 46 | path: 'eternaljukebox-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Fix maven URL 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | run: sed -i 's/maven.abimon.org/maven.brella.dev/g' ./eternaljukebox-git/build.gradle 70 | 71 | - name: Build and push docker image 72 | if: steps.cache.outputs.cache-hit != 'true' 73 | id: docker_build_new 74 | uses: docker/build-push-action@v3 75 | with: 76 | context: ./eternaljukebox-git 77 | file: ./eternaljukebox/Dockerfile 78 | platforms: linux/amd64,linux/arm64 79 | push: true 80 | tags: quay.io/unixfox/eternaljukebox:latest, quay.io/unixfox/eternaljukebox:build-${{ env.timestamp }} 81 | -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-huginn.yml: -------------------------------------------------------------------------------- 1 | name: Docker build huginn image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-huginn.yml' 10 | - 'huginn/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/huginn/huginn.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: huginn 29 | key: ${{ runner.os }}-v1-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: all 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo huginn/huginn 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "huginn/huginn" 45 | ref: 'master' 46 | path: 'huginn-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Override prepare script 68 | id: override-prepare-script 69 | if: steps.cache.outputs.cache-hit != 'true' 70 | run: | 71 | cp huginn/scripts/prepare huginn-git/docker/scripts/prepare 72 | cp huginn/multi-process/scripts/standalone-packages huginn-git/docker/multi-process/scripts/standalone-packages 73 | shell: bash 74 | 75 | - name: Apply patches 76 | if: steps.cache.outputs.cache-hit != 'true' 77 | run: | 78 | mv ./huginn/patches/*.patch ./huginn-git 79 | git config --global user.email "you@example.com" 80 | git config --global user.name "Your Name" 81 | cd ./huginn-git && git am *.patch 82 | shell: bash 83 | 84 | - name: Build and push docker image single process 85 | if: steps.cache.outputs.cache-hit != 'true' 86 | id: docker_build_new_single_process 87 | uses: docker/build-push-action@v3 88 | with: 89 | context: ./huginn-git 90 | file: ./huginn/single-process/Dockerfile 91 | platforms: linux/amd64,linux/arm64 92 | push: true 93 | tags: quay.io/unixfox/huginn-single-process:latest, quay.io/unixfox/huginn-single-process:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-k8s-zabbix.yml: -------------------------------------------------------------------------------- 1 | name: Docker build k8s-zabbix image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-k8s-zabbix.yml' 10 | - 'k8s-zabbix/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/zabbix-tooling/k8s-zabbix.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: k8s-zabbix 29 | key: ${{ runner.os }}-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: all 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo zabbix-tooling/k8s-zabbix 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "zabbix-tooling/k8s-zabbix" 45 | ref: 'master' 46 | path: 'k8s-zabbix-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Build and push docker image 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | id: docker_build_new 70 | uses: docker/build-push-action@v3 71 | with: 72 | context: ./k8s-zabbix-git 73 | file: ./k8s-zabbix-git/Dockerfile 74 | platforms: linux/amd64,linux/arm64 75 | push: true 76 | tags: quay.io/unixfox/zabbix-tooling-k8s-zabbix:latest, quay.io/unixfox/zabbix-tooling-k8s-zabbix:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-keel.yml: -------------------------------------------------------------------------------- 1 | name: Docker build keel image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-keel.yml' 10 | - 'keel/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/keel-hq/keel.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: keel 29 | key: ${{ runner.os }}-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: arm64 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo keel-hq/keel 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "keel-hq/keel" 45 | ref: 'master' 46 | path: 'keel-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Build and push docker image 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | id: docker_build_new 70 | uses: docker/build-push-action@v3 71 | with: 72 | context: ./keel-git 73 | file: ./keel-git/Dockerfile 74 | platforms: linux/amd64,linux/arm64 75 | push: true 76 | tags: quay.io/unixfox/keel:latest, quay.io/unixfox/keel:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-kwatch.yml: -------------------------------------------------------------------------------- 1 | name: Docker build kwatch image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-kwatch.yml' 10 | - 'kwatch/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/abahmed/kwatch.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: kwatch 29 | key: ${{ runner.os }}-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: all 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo abahmed/kwatch 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "abahmed/kwatch" 45 | ref: 'main' 46 | path: 'kwatch-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Build and push docker image 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | id: docker_build_new 70 | uses: docker/build-push-action@v3 71 | with: 72 | context: ./kwatch-git 73 | file: ./kwatch/Dockerfile 74 | platforms: linux/amd64,linux/arm64 75 | push: true 76 | tags: quay.io/unixfox/kwatch:latest, quay.io/unixfox/kwatch:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-languagetool.yml: -------------------------------------------------------------------------------- 1 | name: Docker build Languagetool image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-languagetool.yml' 10 | - 'languagetool/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/Erikvl87/docker-languagetool | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: languagetool 29 | key: ${{ runner.os }}-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: arm64 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Login to Quay.io 41 | if: steps.cache.outputs.cache-hit != 'true' 42 | uses: docker/login-action@v2 43 | with: 44 | registry: quay.io 45 | username: ${{ secrets.QUAY_USERNAME }} 46 | password: ${{ secrets.QUAY_PASSWORD }} 47 | 48 | - name: Set up Docker Buildx 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | id: buildx 51 | uses: docker/setup-buildx-action@v2 52 | with: 53 | version: latest 54 | config: .github/buildkitd.toml 55 | 56 | - name: Set up build timestamp 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 59 | 60 | - name: Build and push docker image 61 | if: steps.cache.outputs.cache-hit != 'true' 62 | id: docker_build_new 63 | uses: docker/build-push-action@v3 64 | with: 65 | context: ./ 66 | file: ./languagetool/Dockerfile 67 | platforms: linux/amd64,linux/arm64 68 | push: true 69 | tags: quay.io/unixfox/languagetool:latest, quay.io/unixfox/languagetool:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-litefs-patched.yaml: -------------------------------------------------------------------------------- 1 | name: Docker build litefs patched docker image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-litefs-patched.yml' 10 | - 'litefs-patched/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest tag 20 | id: get-latest-tag 21 | run: | 22 | echo "tag=$(git ls-remote --refs --tags https://github.com/superfly/litefs.git|cut --delimiter='/' --fields=3|sort --version-sort|tail --lines=1)" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: litefs 29 | key: ${{ runner.os }}-v4-${{ env.tag }} 30 | 31 | - name: Check Out Repo 32 | uses: actions/checkout@v3 33 | 34 | - name: Check Out Repo superfly/litefs 35 | uses: actions/checkout@v3 36 | if: steps.cache.outputs.cache-hit != 'true' 37 | with: 38 | repository: "superfly/litefs" 39 | ref: '${{ env.tag }}' 40 | path: 'litefs-git' 41 | 42 | - name: patch litefs main repo 43 | if: steps.cache.outputs.cache-hit != 'true' 44 | run: | 45 | cd litefs-git 46 | git config --global user.email "you@example.com" 47 | git config --global user.name "Your Name" 48 | git am ../litefs-patched/*.patch 49 | 50 | - name: Login to Quay.io 51 | if: steps.cache.outputs.cache-hit != 'true' 52 | uses: docker/login-action@v2 53 | with: 54 | registry: quay.io 55 | username: ${{ secrets.QUAY_USERNAME }} 56 | password: ${{ secrets.QUAY_PASSWORD }} 57 | 58 | - name: Set up Docker Buildx 59 | if: steps.cache.outputs.cache-hit != 'true' 60 | id: buildx 61 | uses: docker/setup-buildx-action@v2 62 | with: 63 | version: latest 64 | 65 | - name: Set up build timestamp 66 | if: steps.cache.outputs.cache-hit != 'true' 67 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 68 | 69 | - name: Build and push patched litefs 70 | if: steps.cache.outputs.cache-hit != 'true' 71 | id: docker_build_new_custom 72 | uses: docker/build-push-action@v3 73 | with: 74 | context: ./litefs-git 75 | file: ./litefs-git/Dockerfile 76 | platforms: linux/amd64,linux/arm64,linux/arm/v7 77 | push: true 78 | tags: quay.io/unixfox/litefs-patched:latest, quay.io/unixfox/litefs-patched:${{ env.tag }}-${{ env.timestamp }} 79 | build-args: LITEFS_VERSION=${{ env.tag }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-mailcow.yml: -------------------------------------------------------------------------------- 1 | name: Docker build mailcow images 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-mailcow.yml' 10 | - 'mailcow/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest tag 20 | id: get-latest-tag 21 | run: | 22 | echo "tag=$(git ls-remote --refs --tags https://github.com/mailcow/mailcow-dockerized.git|cut --delimiter='/' --fields=3|sort --version-sort|tail --lines=1)" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: mailcow 29 | key: ${{ runner.os }}-v2-${{ env.tag }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: arm64 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo mailcow/mailcow-dockerized 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "mailcow/mailcow-dockerized" 45 | ref: '${{ env.tag }}' 46 | path: 'mailcow-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Patch rspamd 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | run: | 70 | sed -i 's/debian:bullseye-slim/ubuntu:jammy/' ./mailcow-git/data/Dockerfiles/rspamd/Dockerfile 71 | shell: bash 72 | 73 | - name: Build and push acme 74 | if: steps.cache.outputs.cache-hit != 'true' 75 | id: docker_build_new_custom 76 | uses: docker/build-push-action@v3 77 | with: 78 | context: ./mailcow-git/data/Dockerfiles/acme 79 | file: ./mailcow-git/data/Dockerfiles/acme/Dockerfile 80 | platforms: linux/arm64 81 | push: true 82 | tags: quay.io/mailcowarm64/acme:latest, quay.io/mailcowarm64/acme:${{ env.tag }}-${{ env.timestamp }} 83 | 84 | - name: Build and push dockerapi 85 | if: steps.cache.outputs.cache-hit != 'true' 86 | id: docker_build_dockerapi 87 | uses: docker/build-push-action@v3 88 | with: 89 | context: ./mailcow-git/data/Dockerfiles/dockerapi 90 | file: ./mailcow-git/data/Dockerfiles/dockerapi/Dockerfile 91 | platforms: linux/arm64 92 | push: true 93 | tags: quay.io/mailcowarm64/dockerapi:latest, quay.io/mailcowarm64/dockerapi:${{ env.tag }}-${{ env.timestamp }} 94 | 95 | - name: Build and push netfilter 96 | if: steps.cache.outputs.cache-hit != 'true' 97 | id: docker_build_netfilter 98 | uses: docker/build-push-action@v3 99 | with: 100 | context: ./mailcow-git/data/Dockerfiles/netfilter 101 | file: ./mailcow-git/data/Dockerfiles/netfilter/Dockerfile 102 | platforms: linux/arm64 103 | push: true 104 | tags: quay.io/mailcowarm64/netfilter:latest, quay.io/mailcowarm64/netfilter:${{ env.tag }}-${{ env.timestamp }} 105 | 106 | - name: Build and push olefy 107 | if: steps.cache.outputs.cache-hit != 'true' 108 | id: docker_build_olefy 109 | uses: docker/build-push-action@v3 110 | with: 111 | context: ./mailcow-git/data/Dockerfiles/olefy 112 | file: ./mailcow-git/data/Dockerfiles/olefy/Dockerfile 113 | platforms: linux/arm64 114 | push: true 115 | tags: quay.io/mailcowarm64/olefy:latest, quay.io/mailcowarm64/olefy:${{ env.tag }}-${{ env.timestamp }} 116 | 117 | - name: Build and push phpfpm 118 | if: steps.cache.outputs.cache-hit != 'true' 119 | id: docker_build_phpfpm 120 | uses: docker/build-push-action@v3 121 | with: 122 | context: ./mailcow-git/data/Dockerfiles/phpfpm 123 | file: ./mailcow-git/data/Dockerfiles/phpfpm/Dockerfile 124 | platforms: linux/arm64 125 | push: true 126 | tags: quay.io/mailcowarm64/phpfpm:latest, quay.io/mailcowarm64/phpfpm:${{ env.tag }}-${{ env.timestamp }} 127 | 128 | - name: Build and push postfix 129 | if: steps.cache.outputs.cache-hit != 'true' 130 | id: docker_build_postfix 131 | uses: docker/build-push-action@v3 132 | with: 133 | context: ./mailcow-git/data/Dockerfiles/postfix 134 | file: ./mailcow-git/data/Dockerfiles/postfix/Dockerfile 135 | platforms: linux/arm64 136 | push: true 137 | tags: quay.io/mailcowarm64/postfix:latest, quay.io/mailcowarm64/postfix:${{ env.tag }}-${{ env.timestamp }} 138 | 139 | - name: Build and push patched rspamd 140 | if: steps.cache.outputs.cache-hit != 'true' 141 | id: docker_build_rspamd 142 | uses: docker/build-push-action@v3 143 | with: 144 | context: ./mailcow-git/data/Dockerfiles/rspamd 145 | file: ./mailcow-git/data/Dockerfiles/rspamd/Dockerfile 146 | platforms: linux/arm64 147 | push: true 148 | tags: quay.io/mailcowarm64/rspamd:latest, quay.io/mailcowarm64/rspamd:${{ env.tag }}-${{ env.timestamp }} 149 | 150 | - name: Build and push solr 151 | if: steps.cache.outputs.cache-hit != 'true' 152 | id: docker_build_solr 153 | uses: docker/build-push-action@v3 154 | with: 155 | context: ./mailcow-git/data/Dockerfiles/solr 156 | file: ./mailcow-git/data/Dockerfiles/solr/Dockerfile 157 | platforms: linux/arm64 158 | push: true 159 | tags: quay.io/mailcowarm64/solr:latest, quay.io/mailcowarm64/solr:${{ env.tag }}-${{ env.timestamp }} 160 | 161 | - name: Build and push unbound 162 | if: steps.cache.outputs.cache-hit != 'true' 163 | id: docker_build_unbound 164 | uses: docker/build-push-action@v3 165 | with: 166 | context: ./mailcow-git/data/Dockerfiles/unbound 167 | file: ./mailcow-git/data/Dockerfiles/unbound/Dockerfile 168 | platforms: linux/arm64 169 | push: true 170 | tags: quay.io/mailcowarm64/unbound:latest, quay.io/mailcowarm64/unbound:${{ env.tag }}-${{ env.timestamp }} 171 | 172 | - name: Build and push watchdog 173 | if: steps.cache.outputs.cache-hit != 'true' 174 | id: docker_build_watchdog 175 | uses: docker/build-push-action@v3 176 | with: 177 | context: ./mailcow-git/data/Dockerfiles/watchdog 178 | file: ./mailcow-git/data/Dockerfiles/watchdog/Dockerfile 179 | platforms: linux/arm64 180 | push: true 181 | tags: quay.io/mailcowarm64/watchdog:latest, quay.io/mailcowarm64/watchdog:${{ env.tag }}-${{ env.timestamp }} 182 | -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-moco.yml: -------------------------------------------------------------------------------- 1 | name: Docker build moco image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-moco.yml' 10 | - 'moco/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest tag 20 | id: get-latest-tag 21 | run: | 22 | echo "tag=$(git ls-remote --refs --tags https://github.com/cybozu-go/moco.git|cut --delimiter='/' --fields=3|sort --version-sort|tail --lines=1)" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: moco 29 | key: ${{ runner.os }}-${{ env.tag }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: arm64 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo cybozu-go/moco 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "cybozu-go/moco" 45 | ref: '${{ env.tag }}' 46 | path: 'moco-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Build and push docker image for controller 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | id: docker_build_controller 70 | uses: docker/build-push-action@v3 71 | with: 72 | context: ./moco-git 73 | file: ./moco/Dockerfile.controller 74 | platforms: linux/arm64 75 | push: true 76 | tags: quay.io/unixfox/moco-controller:latest, quay.io/unixfox/moco-controller:${{ env.tag }}, quay.io/unixfox/moco-controller:${{ env.tag }}-build-${{ env.timestamp }} 77 | 78 | - name: Build and push docker image for backup 79 | if: steps.cache.outputs.cache-hit != 'true' 80 | id: docker_build_backup 81 | uses: docker/build-push-action@v3 82 | with: 83 | context: ./moco-git 84 | file: ./moco/Dockerfile.backup 85 | platforms: linux/arm64 86 | push: true 87 | tags: quay.io/unixfox/moco-backup:latest, quay.io/unixfox/moco-backup:${{ env.tag }}, quay.io/unixfox/moco-backup:${{ env.tag }}-build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-nginx-ssl-fingerprint.yml: -------------------------------------------------------------------------------- 1 | name: Docker build nginx-ssl-fingerprint image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-nginx-ssl-fingerprint.yml' 8 | - 'nginx-ssl-fingerprint/**' 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-24.04-arm 13 | 14 | steps: 15 | - name: Get latest commit hash 16 | id: get-latest-commit 17 | run: | 18 | echo "commit=$(git ls-remote https://github.com/phuslu/nginx-ssl-fingerprint.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 19 | shell: bash 20 | 21 | - name: Check Out Repo 22 | uses: actions/checkout@v3 23 | 24 | - name: Check Out Repo phuslu/nginx-ssl-fingerprint 25 | uses: actions/checkout@v3 26 | with: 27 | repository: "phuslu/nginx-ssl-fingerprint" 28 | ref: 'master' 29 | path: 'nginx-ssl-fingerprint-git' 30 | 31 | - name: Login to Quay.io 32 | uses: docker/login-action@v2 33 | with: 34 | registry: quay.io 35 | username: ${{ secrets.QUAY_USERNAME }} 36 | password: ${{ secrets.QUAY_PASSWORD }} 37 | 38 | - name: Set up Docker Buildx 39 | id: buildx 40 | uses: docker/setup-buildx-action@v2 41 | with: 42 | version: latest 43 | 44 | - name: Set up build timestamp 45 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 46 | 47 | - name: Build and push docker image 48 | id: docker_build_new 49 | uses: docker/build-push-action@v3 50 | with: 51 | context: ./nginx-ssl-fingerprint-git 52 | file: ./nginx-ssl-fingerprint/Dockerfile 53 | platforms: linux/arm64 54 | push: true 55 | tags: quay.io/unixfox/nginx-ssl-fingerprint:openssl32-nginx1255-latest, quay.io/unixfox/nginx-ssl-fingerprint:openssl32-nginx1255-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-onlyoffice.yml: -------------------------------------------------------------------------------- 1 | name: Docker build onlyoffice image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-onlyoffice.yml' 10 | - 'onlyoffice/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-4vcpu-ubuntu-2204-arm" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest commit hash 20 | id: get-latest-commit 21 | run: | 22 | echo "commit=$(git ls-remote https://github.com/jiriks74/Docker-DocumentServer-Arm64.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: onlyoffice 29 | key: ${{ runner.os }}-${{ env.commit }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: arm64 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo jiriks74/Docker-DocumentServer-Arm64 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "jiriks74/Docker-DocumentServer-Arm64" 45 | ref: 'master' 46 | path: 'onlyoffice-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Set up build timestamp 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 66 | 67 | - name: Build and push docker image 68 | if: steps.cache.outputs.cache-hit != 'true' 69 | id: docker_build_new 70 | uses: docker/build-push-action@v3 71 | with: 72 | context: ./onlyoffice-git 73 | file: ./onlyoffice-git/Dockerfile 74 | platforms: linux/arm64 75 | push: true 76 | tags: quay.io/unixfox/onlyoffice:latest, quay.io/unixfox/onlyoffice:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/archived/docker-build-pulsejet-go-vod.yml: -------------------------------------------------------------------------------- 1 | name: Docker build pulsejet go-vod image 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | push: 8 | paths: 9 | - '.github/workflows/docker-build-pulsejet-go-vod.yml' 10 | - 'go-vod/**' 11 | 12 | jobs: 13 | 14 | build: 15 | 16 | runs-on: ${{ (github.event_name == 'push') && fromJSON('[ "buildjet-16vcpu-ubuntu-2204" ]') || 'ubuntu-latest' }} 17 | 18 | steps: 19 | - name: Get latest tag 20 | id: get-latest-tag 21 | run: | 22 | echo "tag=$(git ls-remote --refs --tags https://github.com/pulsejet/go-vod.git|cut --delimiter='/' --fields=3|sort --version-sort|tail --lines=1)" >> $GITHUB_ENV 23 | shell: bash 24 | 25 | - uses: actions/cache@v4 26 | id: cache 27 | with: 28 | path: pulsejet-go-vod 29 | key: ${{ runner.os }}-v1-${{ env.tag }} 30 | 31 | - name: Set up QEMU 32 | uses: docker/setup-qemu-action@v2 33 | if: steps.cache.outputs.cache-hit != 'true' 34 | with: 35 | platforms: all 36 | 37 | - name: Check Out Repo 38 | uses: actions/checkout@v3 39 | 40 | - name: Check Out Repo pulsejet/go-vod 41 | uses: actions/checkout@v3 42 | if: steps.cache.outputs.cache-hit != 'true' 43 | with: 44 | repository: "pulsejet/go-vod" 45 | ref: '${{ env.tag }}' 46 | path: 'pulsejet-go-vod-git' 47 | 48 | - name: Login to Quay.io 49 | if: steps.cache.outputs.cache-hit != 'true' 50 | uses: docker/login-action@v2 51 | with: 52 | registry: quay.io 53 | username: ${{ secrets.QUAY_USERNAME }} 54 | password: ${{ secrets.QUAY_PASSWORD }} 55 | 56 | - name: Set up Docker Buildx 57 | if: steps.cache.outputs.cache-hit != 'true' 58 | id: buildx 59 | uses: docker/setup-buildx-action@v2 60 | with: 61 | version: latest 62 | 63 | - name: Build and push docker image go-vod 64 | if: steps.cache.outputs.cache-hit != 'true' 65 | id: docker_build_new_single_process 66 | uses: docker/build-push-action@v3 67 | with: 68 | context: ./pulsejet-go-vod-git 69 | file: ./pulsejet-go-vod-git/Dockerfile 70 | platforms: linux/amd64 71 | push: true 72 | tags: quay.io/unixfox/pulsejet-go-vod:latest, quay.io/unixfox/pulsejet-go-vod:${{ env.tag }}-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/check-docker-updates.yml: -------------------------------------------------------------------------------- 1 | name: Check for Docker image updates 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "0 * * * *" 7 | 8 | jobs: 9 | check-updates: 10 | runs-on: ubuntu-latest 11 | outputs: 12 | updates: ${{ steps.check-updates.outputs.updates }} 13 | 14 | steps: 15 | - name: Check Out Repo 16 | uses: actions/checkout@v4 17 | 18 | - name: Cache versions state 19 | uses: actions/cache@v4 20 | with: 21 | path: .github/versions 22 | key: ${{ runner.os }}-versions-${{ github.run_id }} 23 | restore-keys: | 24 | ${{ runner.os }}-versions- 25 | 26 | - name: Check for updates 27 | id: check-updates 28 | run: | 29 | declare -A repos=( 30 | ["wireproxy"]="https://github.com/whyvl/wireproxy.git" 31 | ["nginx-ssl-ja3"]="https://github.com/fooinha/nginx-ssl-ja3.git" 32 | ["ingress-nginx"]="https://github.com/kubernetes/ingress-nginx.git refs/tags/controller" 33 | ["samba"]="https://github.com/samba-team/samba.git refs/tags/" 34 | ["stolon"]="https://github.com/docker-library/postgres.git" 35 | ["nitter"]="https://github.com/zedeus/nitter.git" 36 | ["tor"]="https://github.com/m0wer/docker-tor.git" 37 | ["stash"]="https://github.com/stashed/stash.git refs/tags/" 38 | ["logrotate"]="https://github.com/blacklabelops/logrotate.git" 39 | ["cert-manager-webhook-powerdns"]="https://github.com/lordofsystem/cert-manager-webhook-powerdns.git" 40 | ["caddy"]="https://github.com/caddyserver/caddy.git refs/tags/" 41 | ["coredns"]="https://github.com/coredns/coredns.git refs/tags/" 42 | #["gpt4free"]="https://github.com/xtekky/gpt4free.git" 43 | #["nginx-ssl-fingerprint"]="https://github.com/phuslu/nginx-ssl-fingerprint.git" 44 | ) 45 | 46 | updates="" 47 | mkdir -p .github/versions 48 | 49 | for repo in "${!repos[@]}"; do 50 | if [[ "${repos[$repo]}" == *"refs/tags/"* ]]; then 51 | latest=$(git ls-remote --refs --tags ${repos[$repo]%% *} | sort -t '/' -k 3 -V | tail -n1) 52 | else 53 | latest=$(git ls-remote ${repos[$repo]} | head -n1) 54 | fi 55 | 56 | if [ -f ".github/versions/${repo}" ]; then 57 | previous=$(cat ".github/versions/${repo}") 58 | if [ "$latest" != "$previous" ]; then 59 | updates="${updates}${repo}," 60 | fi 61 | else 62 | updates="${updates}${repo}," 63 | fi 64 | echo "$latest" > ".github/versions/${repo}" 65 | done 66 | 67 | if [ ! -z "$updates" ]; then 68 | updates=${updates%,} 69 | echo "updates=${updates}" >> $GITHUB_OUTPUT 70 | fi 71 | 72 | trigger-workflows: 73 | needs: check-updates 74 | if: needs.check-updates.outputs.updates != '' 75 | runs-on: ubuntu-latest 76 | 77 | steps: 78 | - name: Check Out Repo 79 | uses: actions/checkout@v4 80 | 81 | - name: Setup GitHub CLI 82 | run: | 83 | gh auth login --with-token <<< "${{ secrets.GITHUB_TOKEN }}" 84 | 85 | - name: Trigger relevant workflows 86 | run: | 87 | IFS=',' read -ra UPDATES <<< "${{ needs.check-updates.outputs.updates }}" 88 | for repo in "${UPDATES[@]}"; do 89 | case $repo in 90 | "wireproxy") 91 | gh workflow run docker-build-wireproxy-debian.yml 92 | ;; 93 | "nginx-ssl-ja3") 94 | gh workflow run docker-build-openresty-ssl-ja3.yml 95 | ;; 96 | "ingress-nginx") 97 | gh workflow run docker-build-ingress-nginx.yml 98 | gh workflow run docker-build-ingress-nginx-controller-dynamic-modules.yml 99 | gh workflow run docker-build-ingress-nginx-custom.yml 100 | ;; 101 | "samba") 102 | gh workflow run docker-build-samba.yml 103 | ;; 104 | "stolon") 105 | gh workflow run docker-build-stolon.yml 106 | ;; 107 | "nitter") 108 | gh workflow run docker-build-nitter-patched.yml 109 | ;; 110 | "tor") 111 | gh workflow run docker-build-tor-single-hop.yml 112 | ;; 113 | "stash") 114 | gh workflow run docker-build-stash.yml 115 | ;; 116 | "logrotate") 117 | gh workflow run docker-build-logrotate.yml 118 | ;; 119 | "cert-manager-webhook-powerdns") 120 | gh workflow run docker-build-cert-manager-webhook-powerdns.yml 121 | ;; 122 | #"gpt4free") 123 | # gh workflow run docker-build-gpt4free.yml 124 | # ;; 125 | #"nginx-ssl-fingerprint") 126 | # gh workflow run docker-build-nginx-ssl-fingerprint.yml 127 | # ;; 128 | esac 129 | done 130 | env: 131 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/docker-build-caddy.yml: -------------------------------------------------------------------------------- 1 | name: Docker build caddy image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-caddy.yml' 8 | - 'caddy/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest tag 18 | id: get-latest-tag 19 | run: | 20 | echo "tag=$(git ls-remote --refs --tags https://github.com/caddyserver/caddy.git| grep -v beta | cut --delimiter='/' --fields=3|sort --version-sort|tail --lines=1|cut -d "v" -f2)" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Login to Quay.io 32 | uses: docker/login-action@v3 33 | with: 34 | registry: quay.io 35 | username: ${{ secrets.QUAY_USERNAME }} 36 | password: ${{ secrets.QUAY_PASSWORD }} 37 | 38 | - name: Set up Docker Buildx 39 | id: buildx 40 | uses: docker/setup-buildx-action@v3 41 | with: 42 | version: latest 43 | 44 | - name: Build and push docker image 45 | id: docker_build_new 46 | uses: docker/build-push-action@v6 47 | with: 48 | context: ./caddy 49 | file: ./caddy/Dockerfile 50 | platforms: linux/amd64,linux/arm64 51 | push: true 52 | tags: quay.io/unixfox/caddy:latest, quay.io/unixfox/caddy:2.9.1 53 | build-args: CADDY_VERSION=2.9.1 54 | # https://github.com/caddy-dns/powerdns/issues/4 55 | #build-args: CADDY_VERSION=${{ env.tag }} 56 | #tags: quay.io/unixfox/caddy:latest, quay.io/unixfox/caddy:${{ env.tag }} 57 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-cert-manager-webhook-powerdns.yml: -------------------------------------------------------------------------------- 1 | name: Docker build cert-manager-webhook-powerdns image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-cert-manager-webhook-powerdns.yml' 8 | - 'cert-manager-webhook-powerdns/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest commit hash 18 | id: get-latest-commit 19 | run: | 20 | echo "commit=$(git ls-remote https://github.com/lordofsystem/cert-manager-webhook-powerdns.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Check Out Repo lordofsystem/cert-manager-webhook-powerdns 32 | uses: actions/checkout@v4 33 | with: 34 | repository: "lordofsystem/cert-manager-webhook-powerdns" 35 | ref: 'main' 36 | path: 'cert-manager-webhook-powerdns-git' 37 | 38 | - name: Login to Quay.io 39 | uses: docker/login-action@v3 40 | with: 41 | registry: quay.io 42 | username: ${{ secrets.QUAY_USERNAME }} 43 | password: ${{ secrets.QUAY_PASSWORD }} 44 | 45 | - name: Set up Docker Buildx 46 | id: buildx 47 | uses: docker/setup-buildx-action@v3 48 | with: 49 | version: latest 50 | 51 | - name: Set up build timestamp 52 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 53 | 54 | - name: Build and push docker image 55 | id: docker_build_new 56 | uses: docker/build-push-action@v6 57 | with: 58 | context: ./cert-manager-webhook-powerdns-git 59 | file: ./cert-manager-webhook-powerdns-git/Dockerfile 60 | platforms: linux/amd64,linux/arm64 61 | push: true 62 | tags: quay.io/unixfox/cert-manager-webhook-powerdns:latest, quay.io/unixfox/cert-manager-webhook-powerdns:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/docker-build-coredns.yml: -------------------------------------------------------------------------------- 1 | name: Docker build coredns image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-coredns.yml' 8 | - 'coredns/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest tag 18 | id: get-latest-tag 19 | run: | 20 | echo "tag=$(git ls-remote --refs --tags https://github.com/coredns/coredns.git| grep -v beta | cut --delimiter='/' --fields=3| grep -v "v0" | sort --version-sort |tail --lines=1 |cut -d "v" -f2)" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Check Out Repo coredns 32 | uses: actions/checkout@v4 33 | with: 34 | repository: "coredns/coredns" 35 | ref: 'v${{ env.tag }}' 36 | path: 'coredns-git' 37 | 38 | - name: Add extra plugins 39 | run: | 40 | cat coredns-git/plugin.cfg coredns/plugin.cfg > coredns-git/plugin.cfg 41 | cd coredns-git 42 | go get github.com/wenerme/coredns-pdsql 43 | go get github.com/jinzhu/gorm/dialects/postgres 44 | go generate 45 | 46 | - name: Login to Quay.io 47 | uses: docker/login-action@v3 48 | with: 49 | registry: quay.io 50 | username: ${{ secrets.QUAY_USERNAME }} 51 | password: ${{ secrets.QUAY_PASSWORD }} 52 | 53 | - name: Set up Docker Buildx 54 | id: buildx 55 | uses: docker/setup-buildx-action@v3 56 | with: 57 | version: latest 58 | 59 | - name: Build coredns 60 | run: | 61 | cd coredns-git 62 | make LINUX_ARCH="amd64 arm arm64" -f Makefile.release release 63 | mkdir -p build/docker/amd64 build/docker/arm build/docker/arm64 64 | make LINUX_ARCH="amd64 arm arm64" VERSION=${{ env.tag }} DOCKER=quay.io/unixfox -f Makefile.docker docker-build 65 | make LINUX_ARCH="amd64 arm arm64" VERSION=${{ env.tag }} DOCKER=quay.io/unixfox -f Makefile.docker docker-push 66 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-gpt4free.yml: -------------------------------------------------------------------------------- 1 | name: Docker build gpt4free image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-gpt4free.yml' 8 | 9 | jobs: 10 | build: 11 | runs-on: 'ubuntu-24.04-arm' 12 | 13 | steps: 14 | - name: Get latest commit hash 15 | id: get-latest-commit 16 | run: | 17 | echo "commit=$(git ls-remote https://github.com/xtekky/gpt4free.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 18 | shell: bash 19 | 20 | - name: Check Out Repo 21 | uses: actions/checkout@v4 22 | 23 | - name: Check Out Repo gpt4free 24 | uses: actions/checkout@v4 25 | with: 26 | repository: "xtekky/gpt4free" 27 | ref: 'main' 28 | path: 'gpt4free-git' 29 | 30 | - name: Use docker base image that supports arm64 31 | run: | 32 | sed -i 's|selenium/node-chrome|selenium/node-chromium|' gpt4free-git/docker/Dockerfile 33 | 34 | - name: Login to Quay.io 35 | uses: docker/login-action@v3 36 | with: 37 | registry: quay.io 38 | username: ${{ secrets.QUAY_USERNAME }} 39 | password: ${{ secrets.QUAY_PASSWORD }} 40 | 41 | - name: Set up Docker Buildx 42 | id: buildx 43 | uses: docker/setup-buildx-action@v3 44 | with: 45 | version: latest 46 | 47 | - name: Set up build timestamp 48 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 49 | 50 | - name: Build and push docker image 51 | id: docker_build_new 52 | uses: docker/build-push-action@v6 53 | with: 54 | context: ./gpt4free-git 55 | file: ./gpt4free-git/docker/Dockerfile 56 | platforms: linux/arm64 57 | push: true 58 | tags: quay.io/unixfox/gpt4free:latest, quay.io/unixfox/gpt4free:build-${{ env.timestamp }} 59 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-ingress-nginx-controller-dynamic-modules.yml: -------------------------------------------------------------------------------- 1 | name: Docker build ingress-nginx-controller-dynamic-modules image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-ingress-nginx-controller-dynamic-modules.yml' 8 | - 'ingress-nginx-controller-dynamic-modules/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest tag 18 | id: get-latest-tag 19 | run: | 20 | echo "tag=$(git ls-remote --refs --tags https://github.com/kubernetes/ingress-nginx |grep refs/tags/controller | tail -1 | awk '{print $2;}' | sed 's/refs\/tags\/controller-//')" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Login to Quay.io 32 | uses: docker/login-action@v3 33 | with: 34 | registry: quay.io 35 | username: ${{ secrets.QUAY_USERNAME }} 36 | password: ${{ secrets.QUAY_PASSWORD }} 37 | 38 | - name: Set up Docker Buildx 39 | id: buildx 40 | uses: docker/setup-buildx-action@v3 41 | with: 42 | version: latest 43 | 44 | - name: Set up build timestamp 45 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 46 | 47 | - name: Build and push docker image 48 | id: docker_build_new 49 | uses: docker/build-push-action@v6 50 | with: 51 | context: ./ingress-nginx-controller-dynamic-modules 52 | file: ./ingress-nginx-controller-dynamic-modules/Dockerfile 53 | platforms: linux/amd64,linux/arm64 54 | push: true 55 | tags: quay.io/unixfox/ingress-nginx-controller-dynamic-modules:latest, quay.io/unixfox/ingress-nginx-controller-dynamic-modules:${{ env.tag }}, quay.io/unixfox/ingress-nginx-controller-dynamic-modules:${{ env.tag }}-build-${{ env.timestamp }} 56 | build-args: INGRESS_NGINX_CONTROLLER_VERSION=${{ env.tag }} -------------------------------------------------------------------------------- /.github/workflows/docker-build-ingress-nginx-custom.yml: -------------------------------------------------------------------------------- 1 | name: Docker build ingress-nginx-custom image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-ingress-nginx-custom.yml' 8 | - 'ingress-nginx-custom/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest tag 18 | id: get-latest-tag 19 | run: | 20 | echo "tag=$(git ls-remote --refs --tags https://github.com/kubernetes/ingress-nginx |grep refs/tags/controller | tail -1 | awk '{print $2;}' | sed 's/refs\/tags\/controller-//')" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Login to Quay.io 32 | uses: docker/login-action@v3 33 | with: 34 | registry: quay.io 35 | username: ${{ secrets.QUAY_USERNAME }} 36 | password: ${{ secrets.QUAY_PASSWORD }} 37 | 38 | - name: Set up Docker Buildx 39 | id: buildx 40 | uses: docker/setup-buildx-action@v3 41 | with: 42 | version: latest 43 | 44 | - name: Set up build timestamp 45 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 46 | 47 | - name: Build and push docker image 48 | id: docker_build_new 49 | uses: docker/build-push-action@v6 50 | with: 51 | context: ./ingress-nginx-custom 52 | file: ./ingress-nginx-custom/Dockerfile 53 | platforms: linux/amd64,linux/arm64 54 | push: true 55 | tags: quay.io/unixfox/ingress-nginx-custom:latest, quay.io/unixfox/ingress-nginx-custom:${{ env.tag }}, quay.io/unixfox/ingress-nginx-custom:${{ env.tag }}-build-${{ env.timestamp }} 56 | build-args: INGRESS_NGINX_CONTROLLER_VERSION=${{ env.tag }} -------------------------------------------------------------------------------- /.github/workflows/docker-build-ingress-nginx.yml: -------------------------------------------------------------------------------- 1 | name: Docker build ingress-nginx image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-ingress-nginx.yml' 8 | - 'ingress-nginx/**' 9 | 10 | jobs: 11 | 12 | checks: 13 | runs-on: ubuntu-latest 14 | outputs: 15 | cache-hit: ${{ steps.cache.outputs.cache-hit }} 16 | env-tag: ${{ env.tag }} 17 | steps: 18 | - name: Get latest tag 19 | id: get-latest-tag 20 | run: | 21 | echo "tag=v1.6.4" >> $GITHUB_ENV 22 | shell: bash 23 | 24 | - uses: actions/cache@v4 25 | id: cache 26 | with: 27 | path: ingress-nginx 28 | key: ${{ runner.os }}-v4-${{ env.tag }} 29 | 30 | build: 31 | 32 | needs: checks 33 | runs-on: buildjet-32vcpu-ubuntu-2204-arm 34 | if: needs.checks.outputs.cache-hit != 'true' 35 | steps: 36 | 37 | - name: Setup build base 38 | run: | 39 | sudo apt install golang -y 40 | 41 | - name: Set up QEMU 42 | uses: docker/setup-qemu-action@v3 43 | with: 44 | platforms: all 45 | 46 | - name: Check Out Repo 47 | uses: actions/checkout@v4 48 | 49 | - name: Check Out Repo kubernetes/ingress-nginx 50 | uses: actions/checkout@v4 51 | with: 52 | repository: "kubernetes/ingress-nginx" 53 | ref: 'controller-${{ needs.checks.outputs.env-tag }}' 54 | path: 'ingress-nginx-git' 55 | 56 | - name: Set up Docker Buildx 57 | id: buildx 58 | uses: docker/setup-buildx-action@v3 59 | with: 60 | version: latest 61 | 62 | - name: Build and push docker base image nginx 63 | run: | 64 | cp ingress-nginx/patches/* ingress-nginx-git 65 | git config --global user.email "you@example.com" 66 | git config --global user.name "Your Name" 67 | cd ingress-nginx-git 68 | git am *.patch 69 | docker login quay.io --username ${USERNAME} --password ${DOCKER_TOKEN} 70 | cd images/nginx 71 | sudo apt update && sudo apt install build-essential -y 72 | REGISTRY=quay.io/unixfox TAG=${{ needs.checks.outputs.env-tag }} PLATFORMS=linux/arm64 make push 73 | env: 74 | REGISTRY: quay.io/unixfox 75 | DOCKER_TOKEN: ${{ secrets.QUAY_PASSWORD }} 76 | USERNAME: ${{ secrets.QUAY_USERNAME }} 77 | 78 | - name: Build and push docker image nginx controller 79 | run: | 80 | docker login quay.io --username ${USERNAME} --password ${DOCKER_TOKEN} 81 | cd ingress-nginx-git 82 | REGISTRY=quay.io/unixfox TAG=${{ needs.checks.outputs.env-tag }} BASE_IMAGE=quay.io/unixfox/nginx:${{ needs.checks.outputs.env-tag }} PLATFORMS="arm64" BUILDX_PLATFORMS=linux/arm64 make release 83 | env: 84 | REGISTRY: quay.io/unixfox 85 | DOCKER_TOKEN: ${{ secrets.QUAY_PASSWORD }} 86 | USERNAME: ${{ secrets.QUAY_USERNAME }} 87 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-logrotate.yml: -------------------------------------------------------------------------------- 1 | name: Docker build logrotate image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-logrotate.yml' 8 | - 'logrotate/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest commit hash 18 | id: get-latest-commit 19 | run: | 20 | echo "commit=$(git ls-remote https://github.com/blacklabelops/logrotate.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Check Out Repo blacklabelops/logrotate 32 | uses: actions/checkout@v4 33 | with: 34 | repository: "blacklabelops/logrotate" 35 | ref: 'master' 36 | path: 'logrotate-git' 37 | 38 | - name: Login to Quay.io 39 | uses: docker/login-action@v3 40 | with: 41 | registry: quay.io 42 | username: ${{ secrets.QUAY_USERNAME }} 43 | password: ${{ secrets.QUAY_PASSWORD }} 44 | 45 | - name: Set up Docker Buildx 46 | id: buildx 47 | uses: docker/setup-buildx-action@v3 48 | with: 49 | version: latest 50 | 51 | - name: Set up build timestamp 52 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 53 | 54 | - name: Build and push docker image 55 | id: docker_build_new 56 | uses: docker/build-push-action@v6 57 | with: 58 | context: ./logrotate-git 59 | file: ./logrotate/Dockerfile 60 | platforms: linux/amd64,linux/arm64 61 | push: true 62 | tags: quay.io/unixfox/logrotate:latest, quay.io/unixfox/logrotate:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/docker-build-nitter-patched.yml: -------------------------------------------------------------------------------- 1 | name: Docker build nitter patched docker image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-nitter-patched.yml' 8 | - 'nitter/**' 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-24.04-arm 13 | 14 | steps: 15 | - name: Get latest commit 16 | id: get-latest-commit 17 | run: | 18 | echo "commit=$(git ls-remote https://github.com/zedeus/nitter.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 19 | shell: bash 20 | 21 | - name: Check Out Repo 22 | uses: actions/checkout@v4 23 | 24 | - name: Check Out Repo zedeus/nitter 25 | uses: actions/checkout@v4 26 | with: 27 | repository: "zedeus/nitter" 28 | ref: 'master' 29 | path: 'nitter-git' 30 | 31 | - name: patch nitter master repo 32 | run: | 33 | cd nitter-git 34 | git config --global user.email "you@example.com" 35 | git config --global user.name "Your Name" 36 | git am ../nitter/patches/*.patch 37 | 38 | - name: Login to Quay.io 39 | uses: docker/login-action@v3 40 | with: 41 | registry: quay.io 42 | username: ${{ secrets.QUAY_USERNAME }} 43 | password: ${{ secrets.QUAY_PASSWORD }} 44 | 45 | - name: Set up Docker Buildx 46 | id: buildx 47 | uses: docker/setup-buildx-action@v3 48 | with: 49 | version: latest 50 | 51 | - name: Set up build timestamp 52 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 53 | 54 | - name: Build and push patched nitter 55 | id: docker_build_new_custom 56 | uses: docker/build-push-action@v6 57 | with: 58 | context: ./nitter-git 59 | file: ./nitter-git/Dockerfile.arm64 60 | platforms: linux/arm64 61 | push: true 62 | tags: quay.io/unixfox/nitter-patched:latest, quay.io/unixfox/nitter-patched:${{ env.commit }}-${{ env.timestamp }} 63 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-openresty-ssl-ja3.yml: -------------------------------------------------------------------------------- 1 | name: Docker build openresty-ssl-ja3 image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-openresty-ssl-ja3.yml' 8 | - 'openresty-ssl-ja3/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: ubuntu-24.04-arm 15 | 16 | steps: 17 | - name: Get latest commit hash 18 | id: get-latest-commit 19 | run: | 20 | echo "commit=$(git ls-remote https://github.com/fooinha/nginx-ssl-ja3.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set number of cores 24 | run: | 25 | echo "NUMBER_OF_CORES=$(nproc)" >> $GITHUB_ENV 26 | 27 | - name: Check Out Repo openresty/docker-openresty 28 | uses: actions/checkout@v4 29 | with: 30 | repository: "openresty/docker-openresty" 31 | ref: 'master' 32 | 33 | - name: Check Out Repo 34 | uses: actions/checkout@v4 35 | with: 36 | path: 'periodic-build' 37 | 38 | - name: Login to Quay.io 39 | uses: docker/login-action@v3 40 | with: 41 | registry: quay.io 42 | username: ${{ secrets.QUAY_USERNAME }} 43 | password: ${{ secrets.QUAY_PASSWORD }} 44 | 45 | - name: Set up Docker Buildx 46 | id: buildx 47 | uses: docker/setup-buildx-action@v3 48 | with: 49 | version: latest 50 | 51 | - name: Set up build timestamp 52 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 53 | 54 | - name: Build and push docker image 55 | id: docker_build_new 56 | uses: docker/build-push-action@v6 57 | with: 58 | context: ./ 59 | file: ./periodic-build/openresty-ssl-ja3/Dockerfile 60 | platforms: linux/arm64 61 | push: true 62 | build-args: | 63 | RESTY_J=${{ env.NUMBER_OF_CORES }} 64 | tags: quay.io/unixfox/openresty-ssl-ja3:openssl3w-nginx1271-latest, quay.io/unixfox/openresty-ssl-ja3:openssl3w-nginx1271-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/docker-build-samba.yml: -------------------------------------------------------------------------------- 1 | name: Docker build samba image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-samba.yml' 8 | - 'samba/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest commit hash 18 | id: get-latest-commit 19 | run: | 20 | echo "commit=$(git ls-remote --tags --sort="v:refname" https://github.com/samba-team/samba.git | grep samba-4 | tail -n1 | awk '{print $1;}')" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Check Out Repo dperson/samba 32 | uses: actions/checkout@v4 33 | with: 34 | repository: "dperson/samba" 35 | ref: 'master' 36 | path: 'samba-git' 37 | 38 | - name: Login to Quay.io 39 | uses: docker/login-action@v3 40 | with: 41 | registry: quay.io 42 | username: ${{ secrets.QUAY_USERNAME }} 43 | password: ${{ secrets.QUAY_PASSWORD }} 44 | 45 | - name: Set up Docker Buildx 46 | id: buildx 47 | uses: docker/setup-buildx-action@v3 48 | with: 49 | version: latest 50 | 51 | - name: Override samba file 52 | run: cp samba/samba.sh samba-git/ 53 | 54 | - name: Set up build timestamp 55 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 56 | 57 | - name: Build and push docker image 58 | id: docker_build_new 59 | uses: docker/build-push-action@v6 60 | with: 61 | context: ./samba-git 62 | file: ./samba-git/Dockerfile 63 | platforms: linux/amd64,linux/arm64 64 | push: true 65 | tags: quay.io/unixfox/samba:latest, quay.io/unixfox/samba:build-${{ env.timestamp }} -------------------------------------------------------------------------------- /.github/workflows/docker-build-stash.yml: -------------------------------------------------------------------------------- 1 | name: Docker build stash image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-stash.yml' 8 | - 'stash/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest tag 18 | id: get-latest-tag 19 | run: | 20 | echo "tag=$(git ls-remote --refs --tags https://github.com/stashed/stash.git|cut --delimiter='/' --fields=3|sort --version-sort|tail --lines=1)" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Check Out Repo stashed/stash 32 | uses: actions/checkout@v4 33 | with: 34 | repository: "stashed/stash" 35 | ref: '${{ env.tag }}' 36 | path: 'stash-git' 37 | 38 | - name: Set up Docker Buildx 39 | id: buildx 40 | uses: docker/setup-buildx-action@v3 41 | with: 42 | version: latest 43 | 44 | - name: Replace rest by rclone and copy modified Dockerfile 45 | id: replace-copy 46 | run: | 47 | sed -i 's/rest:/rclone:/' stash-git/vendor/stash.appscode.dev/apimachinery/pkg/restic/setup.go 48 | cp stash/Dockerfile.in stash-git/Dockerfile.in 49 | shell: bash 50 | 51 | - name: Publish docker image 52 | env: 53 | REGISTRY: quay.io/unixfox 54 | DOCKER_TOKEN: ${{ secrets.QUAY_PASSWORD }} 55 | USERNAME: ${{ secrets.QUAY_USERNAME }} 56 | APPSCODE_ENV: prod 57 | run: | 58 | docker login quay.io --username ${USERNAME} --password ${DOCKER_TOKEN} 59 | cd stash-git && make release -------------------------------------------------------------------------------- /.github/workflows/docker-build-stolon.yml: -------------------------------------------------------------------------------- 1 | name: Docker build stolon image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-stolon.yml' 8 | - 'stolon/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | postgres-version: 20 | - 17 21 | - 16 22 | - 15 23 | - 14 24 | - 13 25 | 26 | steps: 27 | - name: Get latest commit hash of github.com/docker-library/postgres 28 | id: get-latest-commit 29 | run: | 30 | echo "commit=$(git ls-remote https://github.com/docker-library/postgres.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 31 | shell: bash 32 | 33 | - name: Get latest tag of github.com/sorintlab/stolon 34 | id: get-latest-tag 35 | run: | 36 | echo "tag=$(git ls-remote --refs --tags https://github.com/sorintlab/stolon.git|cut --delimiter='/' --fields=3|sort --version-sort|tail --lines=1)" >> $GITHUB_ENV 37 | shell: bash 38 | 39 | - name: Set up QEMU 40 | uses: docker/setup-qemu-action@v3 41 | with: 42 | platforms: all 43 | 44 | - name: Check Out Repo 45 | uses: actions/checkout@v4 46 | 47 | - name: Check Out Repo sorintlab/stolon 48 | uses: actions/checkout@v4 49 | with: 50 | repository: "sorintlab/stolon" 51 | ref: '${{ env.tag }}' 52 | path: 'stolon-git' 53 | 54 | - name: Login to Quay.io 55 | uses: docker/login-action@v3 56 | with: 57 | registry: quay.io 58 | username: ${{ secrets.QUAY_USERNAME }} 59 | password: ${{ secrets.QUAY_PASSWORD }} 60 | 61 | - name: Set up Docker Buildx 62 | id: buildx 63 | uses: docker/setup-buildx-action@v3 64 | with: 65 | version: latest 66 | 67 | - name: Set up build timestamp 68 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 69 | 70 | - name: Build and push docker image 71 | id: docker_build_new 72 | uses: docker/build-push-action@v6 73 | with: 74 | context: ./stolon-git 75 | file: ./stolon/Dockerfile 76 | platforms: linux/amd64,linux/arm64 77 | push: true 78 | tags: quay.io/unixfox/stolon:latest-pg${{ matrix.postgres-version }}, quay.io/unixfox/stolon:${{ env.tag }}-pg${{ matrix.postgres-version }}, quay.io/unixfox/stolon:${{ env.tag }}-pg${{ matrix.postgres-version }}-build-${{ env.timestamp }} 79 | build-args: PGVERSION=${{ matrix.postgres-version }} 80 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-tor-single-hop.yml: -------------------------------------------------------------------------------- 1 | name: Docker build tor single hop image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-tor-single-hop.yml' 8 | - 'tor-single-hop/**' 9 | 10 | jobs: 11 | 12 | buildamd64: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest commit hash 18 | id: get-latest-commit 19 | run: | 20 | echo "commit=$(git ls-remote https://github.com/m0wer/docker-tor.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Check Out Repo 24 | uses: actions/checkout@v4 25 | 26 | - name: Check Out Repo lncm/docker-tor 27 | uses: actions/checkout@v4 28 | with: 29 | repository: "lncm/docker-tor" 30 | ref: 'master' 31 | path: 'tor-git' 32 | 33 | - name: Login to Quay.io 34 | uses: docker/login-action@v3 35 | with: 36 | registry: quay.io 37 | username: ${{ secrets.QUAY_USERNAME }} 38 | password: ${{ secrets.QUAY_PASSWORD }} 39 | 40 | - name: Set up Docker Buildx 41 | id: buildx 42 | uses: docker/setup-buildx-action@v3 43 | with: 44 | version: latest 45 | 46 | - name: Set up build timestamp 47 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 48 | 49 | - name: Reduce hops and override files 50 | run: | 51 | cp -R ./tor-single-hop/override ./tor-git/override 52 | mv ./tor-single-hop/patches/*.patch ./tor-git 53 | git config --global user.email "you@example.com" 54 | git config --global user.name "Your Name" 55 | cd ./tor-git && git am *.patch 56 | shell: bash 57 | 58 | - name: Build and push docker image tor single hop 59 | id: docker_build_new_single_process 60 | uses: docker/build-push-action@v6 61 | with: 62 | context: ./tor-git 63 | file: ./tor-git/Dockerfile 64 | platforms: linux/amd64 65 | push: true 66 | tags: quay.io/unixfox/tor-single-hop:latest, quay.io/unixfox/tor-single-hop:build-${{ env.timestamp }} 67 | 68 | buildarm: 69 | 70 | runs-on: 'ubuntu-24.04-arm' 71 | 72 | steps: 73 | - name: Get latest commit hash 74 | id: get-latest-commit 75 | run: | 76 | echo "commit=$(git ls-remote https://github.com/lncm/m0wer/docker-tor.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 77 | shell: bash 78 | 79 | - name: Check Out Repo 80 | uses: actions/checkout@v4 81 | 82 | - name: Check Out Repo lncm/docker-tor 83 | uses: actions/checkout@v4 84 | with: 85 | repository: "lncm/docker-tor" 86 | ref: 'master' 87 | path: 'tor-git' 88 | 89 | - name: Login to Quay.io 90 | uses: docker/login-action@v3 91 | with: 92 | registry: quay.io 93 | username: ${{ secrets.QUAY_USERNAME }} 94 | password: ${{ secrets.QUAY_PASSWORD }} 95 | 96 | - name: Set up Docker Buildx 97 | id: buildx 98 | uses: docker/setup-buildx-action@v3 99 | with: 100 | version: latest 101 | 102 | - name: Set up build timestamp 103 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 104 | 105 | - name: Reduce hops and override files 106 | run: | 107 | cp -R ./tor-single-hop/override ./tor-git/override 108 | mv ./tor-single-hop/patches/*.patch ./tor-git 109 | git config --global user.email "you@example.com" 110 | git config --global user.name "Your Name" 111 | cd ./tor-git && git am *.patch 112 | shell: bash 113 | 114 | - name: Build and push docker image tor single hop 115 | id: docker_build_new_single_process 116 | uses: docker/build-push-action@v6 117 | with: 118 | context: ./tor-git 119 | file: ./tor-git/Dockerfile 120 | platforms: linux/arm64 121 | push: true 122 | tags: quay.io/unixfox/tor-single-hop:latest-arm64, quay.io/unixfox/tor-single-hop:build-arm64-${{ env.timestamp }} 123 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-wireproxy-debian.yml: -------------------------------------------------------------------------------- 1 | name: Docker build wireproxy with debian based image 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - '.github/workflows/docker-build-wireproxy-debian.yml' 8 | - 'wireproxy-debian/**' 9 | 10 | jobs: 11 | 12 | build: 13 | 14 | runs-on: 'ubuntu-latest' 15 | 16 | steps: 17 | - name: Get latest commit hash 18 | id: get-latest-commit 19 | run: | 20 | echo "commit=$(git ls-remote https://github.com/whyvl/wireproxy.git | head -n1 | awk '{print $1;}')" >> $GITHUB_ENV 21 | shell: bash 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Check Out Repo 29 | uses: actions/checkout@v4 30 | 31 | - name: Login to Quay.io 32 | uses: docker/login-action@v3 33 | with: 34 | registry: quay.io 35 | username: ${{ secrets.QUAY_USERNAME }} 36 | password: ${{ secrets.QUAY_PASSWORD }} 37 | 38 | - name: Set up Docker Buildx 39 | id: buildx 40 | uses: docker/setup-buildx-action@v3 41 | with: 42 | version: latest 43 | 44 | - name: Set up build timestamp 45 | run: echo "timestamp=$(date +%Y%m%d)" >> $GITHUB_ENV 46 | 47 | - name: Build and push docker image 48 | id: docker_build_new 49 | uses: docker/build-push-action@v6 50 | with: 51 | context: ./wireproxy-debian 52 | file: ./wireproxy-debian/Dockerfile 53 | platforms: linux/amd64,linux/arm64,linux/arm/v7 54 | push: true 55 | tags: quay.io/unixfox/wireproxy-debian:latest, quay.io/unixfox/wireproxy-debian:build-${{ env.timestamp }} 56 | -------------------------------------------------------------------------------- /.github/workflows/workflow-run-cleanup.yml: -------------------------------------------------------------------------------- 1 | name: Delete old workflow runs 2 | on: 3 | workflow_dispatch: 4 | schedule: 5 | - cron: '0 1 * * *' 6 | # Run monthly, at 00:00 on the 1st day of month. 7 | 8 | jobs: 9 | del_runs: 10 | runs-on: 'ubuntu-latest' 11 | permissions: 12 | actions: write 13 | contents: read 14 | steps: 15 | - name: Delete workflow runs 16 | uses: Mattraks/delete-workflow-runs@v2 17 | with: 18 | token: ${{ github.token }} 19 | repository: ${{ github.repository }} 20 | retain_days: 30 21 | keep_minimum_runs: 200 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # active 2 | - cert-manager-webhook-powerdns: https://quay.io/unixfox/cert-manager-webhook-powerdns 3 | - ingress-nginx: https://quay.io/unixfox/nginx 4 | - samba: https://quay.io/unixfox/samba 5 | - stash: https://quay.io/repository/unixfox/stash 6 | - stolon: https://quay.io/repository/unixfox/stolon 7 | - tor-single-hop: https://quay.io/unixfox/tor-single-hop 8 | - openresty-ssl-ja3: https://quay.io/unixfox/openresty-ssl-ja3 9 | - caddy: https://quay.io/unixfox/caddy 10 | - nitter: https://quay.io/unixfox/nitter 11 | 12 | # frozen/inactive 13 | Broken, need to be fixed. 14 | 15 | # archived 16 | Don't plan to maintain anymore. 17 | 18 | - brudi: https://quay.io/unixfox/brudi 19 | - eternaljukebox: https://quay.io/unixfox/eternaljukebox 20 | - k8s-zabbix: https://quay.io/unixfox/zabbix-tooling-k8s-zabbix 21 | - keel: https://quay.io/unixfox/keel 22 | - languagetool: https://quay.io/unixfox/languagetool 23 | - mysql-router: https://quay.io/unixfox/mysql-router 24 | - onlyoffice: https://quay.io/unixfox/onlyoffice 25 | - pgbouncer: https://quay.io/unixfox/pgbouncer 26 | - mailcow: https://quay.io/mailcowarm64 27 | - huginn: https://quay.io/unixfox/huginn-single-process 28 | - clustersecret: https://quay.io/unixfox/clustersecret 29 | - kwatch: https://quay.io/unixfox/kwatch 30 | 31 | # just for commit 32 | 4 33 | -------------------------------------------------------------------------------- /brudi/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.15-alpine3.12 as builder 2 | 3 | WORKDIR /app 4 | 5 | COPY . . 6 | 7 | RUN go build . 8 | 9 | FROM alpine:3.12 10 | 11 | LABEL maintainer="Mittwald CM Service " 12 | 13 | ENV BRUDI_USER="brudi" \ 14 | BRUDI_GID="1000" \ 15 | BRUDI_UID="1000" 16 | 17 | COPY --from=builder /app/brudi /usr/local/bin/brudi 18 | 19 | COPY --from=rclone/rclone:1.57 /usr/local/bin/rclone /usr/bin/rclone 20 | COPY --from=restic/restic:0.11.0 /usr/bin/restic /usr/local/bin/restic 21 | COPY --from=redis:alpine /usr/local/bin/redis-cli /usr/local/bin/redis-cli 22 | 23 | RUN apk add --no-cache --upgrade \ 24 | mongodb-tools \ 25 | mysql-client \ 26 | postgresql-client \ 27 | && \ 28 | addgroup \ 29 | -S "${BRUDI_USER}" \ 30 | -g "${BRUDI_GID}" \ 31 | && \ 32 | adduser \ 33 | -u "${BRUDI_UID}" \ 34 | -S \ 35 | -G "${BRUDI_USER}" \ 36 | "${BRUDI_USER}" 37 | 38 | USER ${BRUDI_USER} 39 | 40 | ENTRYPOINT ["brudi"] -------------------------------------------------------------------------------- /caddy/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG CADDY_VERSION=2.9.1 2 | 3 | FROM caddy:$CADDY_VERSION-builder AS builder 4 | 5 | RUN xcaddy build \ 6 | --with github.com/tailscale/caddy-tailscale \ 7 | --with github.com/caddy-dns/powerdns \ 8 | --with github.com/pteich/caddy-tlsconsul 9 | 10 | FROM caddy:$CADDY_VERSION 11 | 12 | COPY --from=builder /usr/bin/caddy /usr/bin/caddy -------------------------------------------------------------------------------- /clustersecret/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | ADD base-image/requirements.txt / 3 | RUN pip install -r requirements.txt 4 | ADD src /src 5 | CMD kopf run -A /src/handlers.py -------------------------------------------------------------------------------- /coredns/plugin.cfg: -------------------------------------------------------------------------------- 1 | pdsql:github.com/wenerme/coredns-pdsql 2 | pdsql_postgres:github.com/jinzhu/gorm/dialects/postgres -------------------------------------------------------------------------------- /eternaljukebox/Dockerfile: -------------------------------------------------------------------------------- 1 | # set up the main image with dependencies first, to avoid re-doing this after each build 2 | FROM adoptopenjdk:8-jdk-hotspot as deps 3 | 4 | WORKDIR /EternalJukebox 5 | 6 | RUN curl -L https://github.com/yt-dlp/yt-dlp/releases/download/2022.08.08/yt-dlp -o /usr/local/bin/youtube-dl \ 7 | && chmod a+rx /usr/local/bin/youtube-dl\ 8 | && apt-get update \ 9 | && apt-get install ffmpeg gettext python -y \ 10 | && apt-get clean \ 11 | && touch hikari.properties 12 | 13 | # build jar with gradle 14 | 15 | FROM gradle:jdk8 as gradle-build 16 | 17 | WORKDIR /home/gradle/project 18 | 19 | 20 | # Only copy dependency-related files 21 | COPY build.gradle gradle.propertie* settings.gradle ./EternalJukebox/ 22 | 23 | # Only download dependencies 24 | # Eat the expected build failure since no source code has been copied yet 25 | RUN gradle clean shadowJar --no-daemon > /dev/null 2>&1 || true 26 | 27 | COPY . ./EternalJukebox 28 | 29 | RUN cd EternalJukebox\ 30 | && gradle clean shadowJar --no-daemon 31 | 32 | # build web with jekyll 33 | 34 | FROM bretfisher/jekyll as jekyll-build 35 | 36 | WORKDIR /EternalJukebox 37 | 38 | COPY --from=gradle-build /home/gradle/project/EternalJukebox . 39 | 40 | RUN chmod -R 777 . && jekyll build --source _web --destination web 41 | 42 | # copy into main image 43 | 44 | FROM deps as main 45 | 46 | COPY --from=jekyll-build /EternalJukebox/ ./ 47 | COPY --from=gradle-build /home/gradle/project/EternalJukebox/build/libs/* ./ 48 | 49 | # envsubst is used so environment variables can be used instead of a config file 50 | 51 | CMD envsubst < "/EternalJukebox/envvar_config.yaml" > "/EternalJukebox/config.yaml"\ 52 | && java -jar EternalJukebox.jar 53 | -------------------------------------------------------------------------------- /huginn/multi-process/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ruby:2.6-buster 2 | 3 | COPY docker/scripts/prepare /scripts/ 4 | RUN /scripts/prepare 5 | 6 | COPY docker/multi-process/scripts/standalone-packages /scripts/ 7 | RUN /scripts/standalone-packages 8 | 9 | WORKDIR /app 10 | 11 | COPY ["Gemfile", "Gemfile.lock", "/app/"] 12 | COPY lib/gemfile_helper.rb /app/lib/ 13 | COPY vendor/gems/ /app/vendor/gems/ 14 | 15 | # Get rid of annoying "fatal: Not a git repository (or any of the parent directories): .git" messages 16 | RUN umask 002 && git init && \ 17 | LC_ALL=en_US.UTF-8 RAILS_ENV=production APP_SECRET_TOKEN=secret DATABASE_ADAPTER=mysql2 ON_HEROKU=true bundle install --without test development --path vendor/bundle -j 4 18 | 19 | COPY ./ /app/ 20 | 21 | ARG OUTDATED_DOCKER_IMAGE_NAMESPACE=false 22 | ENV OUTDATED_DOCKER_IMAGE_NAMESPACE ${OUTDATED_DOCKER_IMAGE_NAMESPACE} 23 | 24 | RUN umask 002 && \ 25 | LC_ALL=en_US.UTF-8 RAILS_ENV=production APP_SECRET_TOKEN=secret DATABASE_ADAPTER=mysql2 ON_HEROKU=true bundle exec rake assets:clean assets:precompile && \ 26 | chmod g=u /app/.env.example /app/Gemfile.lock /app/config/ /app/tmp/ 27 | 28 | 29 | EXPOSE 3000 30 | 31 | COPY docker/multi-process/scripts/supervisord.conf /etc/supervisor/ 32 | COPY ["docker/multi-process/scripts/bootstrap.conf", \ 33 | "docker/multi-process/scripts/foreman.conf", \ 34 | "docker/multi-process/scripts/mysqld.conf", "/etc/supervisor/conf.d/"] 35 | COPY ["docker/multi-process/scripts/bootstrap.sh", \ 36 | "docker/multi-process/scripts/foreman.sh", \ 37 | "docker/multi-process/scripts/init", \ 38 | "docker/scripts/setup_env", "/scripts/"] 39 | CMD ["/scripts/init"] 40 | 41 | USER 1001 42 | 43 | VOLUME /var/lib/mysql 44 | -------------------------------------------------------------------------------- /huginn/multi-process/scripts/standalone-packages: -------------------------------------------------------------------------------- 1 | export DEBIAN_FRONTEND=noninteractive 2 | apt-get update 3 | apt-get install -y python2.7 python-docutils mysql-server \ 4 | supervisor python-pip && \ 5 | apt-get -y clean 6 | pip install supervisor-stdout 7 | rm -rf /var/lib/apt/lists/* 8 | rm -rf /usr/share/doc/ 9 | rm -rf /usr/share/man/ 10 | rm -rf /usr/share/locale/ 11 | rm -rf /var/log/* 12 | 13 | mkdir -p /var/log/supervisor /var/log/mysql 14 | chgrp -R 0 /etc/supervisor /var/lib/mysql /var/log/supervisor /var/log/mysql 15 | chmod -R g=u /etc/supervisor /var/lib/mysql /var/log/supervisor /var/log/mysql 16 | sed -r -i /etc/mysql/mysql.conf.d/mysqld.cnf \ 17 | -e 's/^ *user *.+/user=1001/' \ 18 | -e 's#/var/run/mysqld/mysqld.sock#/app/tmp/sockets/mysqld.sock#' \ 19 | -e 's#/var/run/mysqld/mysqld.pid#/app/tmp/pids/mysqld.pid#' 20 | sed -r -i /etc/mysql/debian.cnf \ 21 | -e 's#/var/run/mysqld/mysqld.sock#/app/tmp/sockets/mysqld.sock#' 22 | cp /etc/mysql/debian.cnf /etc/mysql/mysql.conf.d/client.cnf 23 | chmod 644 /etc/mysql/mysql.conf.d/client.cnf -------------------------------------------------------------------------------- /huginn/patches/force_nokogiri_compilation.patch: -------------------------------------------------------------------------------- 1 | From 5ad2f1919cc02d4b9d7c746efa7a59690b1f95d9 Mon Sep 17 00:00:00 2001 2 | From: Emilien Devos 3 | Date: Tue, 10 May 2022 19:42:10 +0200 4 | Subject: [PATCH 1/1] force nokogiri compilation 5 | 6 | --- 7 | Gemfile | 2 +- 8 | Gemfile.lock | 2 +- 9 | 2 files changed, 2 insertions(+), 2 deletions(-) 10 | 11 | diff --git a/Gemfile b/Gemfile 12 | index d3b094c9..30921ddc 100644 13 | --- a/Gemfile 14 | +++ b/Gemfile 15 | @@ -117,7 +117,7 @@ gem 'loofah', '~> 2.0' 16 | gem 'mail', '>= 2.8.1' 17 | gem 'mini_magick', ">= 4.9.4" 18 | gem 'multi_xml' 19 | -gem "nokogiri", ">= 1.10.8" 20 | +gem "nokogiri", force_ruby_platform: true 21 | gem 'omniauth' 22 | gem 'rails', '~> 6.0.4' 23 | gem 'sprockets', '~> 3.7.2' 24 | diff --git a/Gemfile.lock b/Gemfile.lock 25 | index adda050a..9770c8b0 100644 26 | --- a/Gemfile.lock 27 | +++ b/Gemfile.lock 28 | @@ -889,4 +889,4 @@ RUBY VERSION 29 | ruby 2.7.6p219 30 | 31 | BUNDLED WITH 32 | - 2.3.10 33 | + 2.3.26 34 | -------------------------------------------------------------------------------- /huginn/scripts/prepare: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cat > /etc/dpkg/dpkg.cfg.d/01_nodoc <&1 |sed -n -e 's/nginx version: //p' |cut -d'/' -f2); \ 22 | curl -L "http://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz" | tar -C /tmp/nginx --strip-components=1 -xz 23 | 24 | WORKDIR /src/njs-nginx-module 25 | RUN curl -L https://github.com/nginx/njs/archive/refs/tags/0.8.0.tar.gz | tar --strip-components=1 -xz 26 | 27 | WORKDIR /tmp/nginx 28 | RUN ./configure --with-compat --add-dynamic-module=/src/njs-nginx-module/nginx && \ 29 | make modules 30 | 31 | FROM registry.k8s.io/ingress-nginx/controller:${INGRESS_NGINX_CONTROLLER_VERSION} 32 | 33 | COPY --from=build /tmp/nginx/objs/ngx_http_js_module.so /usr/local/nginx/modules/ -------------------------------------------------------------------------------- /ingress-nginx-custom/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG INGRESS_NGINX_CONTROLLER_VERSION 2 | 3 | FROM registry.k8s.io/ingress-nginx/controller:${INGRESS_NGINX_CONTROLLER_VERSION} 4 | 5 | USER root 6 | RUN apk update \ 7 | && apk add --no-cache \ 8 | pcre2 \ 9 | && rm -rf /var/cache/apk/* 10 | 11 | USER www-data 12 | 13 | ENTRYPOINT ["/usr/bin/dumb-init", "--"] 14 | 15 | CMD ["/nginx-ingress-controller"] 16 | -------------------------------------------------------------------------------- /ingress-nginx/patches/0001-add-quic-to-nginx-ingress.patch: -------------------------------------------------------------------------------- 1 | From 2667581dc096a08731006a0b7a6fa1dbf924dfa1 Mon Sep 17 00:00:00 2001 2 | From: Emilien Devos 3 | Date: Fri, 10 Feb 2023 16:08:53 +0100 4 | Subject: [PATCH 1/1] add quic to nginx ingress 5 | 6 | --- 7 | images/nginx/rootfs/build.sh | 35 +++++++++++++++++++++++++++++------ 8 | 1 file changed, 29 insertions(+), 6 deletions(-) 9 | 10 | diff --git a/images/nginx/rootfs/build.sh b/images/nginx/rootfs/build.sh 11 | index 8023575c0..ad3d878d4 100755 12 | --- a/images/nginx/rootfs/build.sh 13 | +++ b/images/nginx/rootfs/build.sh 14 | @@ -20,6 +20,10 @@ set -o pipefail 15 | 16 | export NGINX_VERSION=1.21.6 17 | 18 | +export NGINX_COMMIT=7c2adf237091 19 | + 20 | +export BORINGSSL_COMMIT=edbdc240ecb6a2d5a500b8e2eedfe3e6a2423c0a 21 | + 22 | # Check for recent changes: https://github.com/vision5/ngx_devel_kit/compare/v0.3.1...master 23 | export NDK_VERSION=0.3.1 24 | 25 | @@ -195,7 +199,9 @@ apk add \ 26 | unzip \ 27 | dos2unix \ 28 | yaml-cpp \ 29 | - coreutils 30 | + coreutils \ 31 | + ninja \ 32 | + go 33 | 34 | mkdir -p /etc/nginx 35 | 36 | @@ -477,6 +483,18 @@ cmake -DCMAKE_BUILD_TYPE=Release \ 37 | make 38 | make install 39 | 40 | +# build boringssl 41 | +cd "$BUILD_PATH" 42 | +git clone -n https://github.com/google/boringssl 43 | +cd boringssl 44 | +git checkout $BORINGSSL_COMMIT 45 | + 46 | +echo "Building boringssl ..." 47 | +mkdir build 48 | +cd build 49 | +cmake -GNinja .. 50 | +ninja 51 | + 52 | # Get Brotli source and deps 53 | cd "$BUILD_PATH" 54 | git clone --depth=1 https://github.com/google/ngx_brotli.git 55 | @@ -574,7 +592,9 @@ Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-999-EXCLUSION-RULES-AFTE 56 | " > /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf 57 | 58 | # build nginx 59 | -cd "$BUILD_PATH/nginx-$NGINX_VERSION" 60 | +cd "$BUILD_PATH" 61 | +hg clone -b quic --rev $NGINX_COMMIT https://hg.nginx.org/nginx-quic nginx 62 | +cd nginx 63 | 64 | # apply nginx patches 65 | for PATCH in `ls /patches`;do 66 | @@ -586,7 +606,9 @@ for PATCH in `ls /patches`;do 67 | fi 68 | done 69 | 70 | -WITH_FLAGS="--with-debug \ 71 | +WITH_FLAGS="--with-http_v3_module \ 72 | + --build=quic-$NGINX_VERSION-boringssl-$BORINGSSL_COMMIT \ 73 | + --with-debug \ 74 | --with-compat \ 75 | --with-pcre-jit \ 76 | --with-http_ssl_module \ 77 | @@ -618,9 +640,10 @@ CC_OPT="-g -O2 -fPIE -fstack-protector-strong \ 78 | -DTCP_FASTOPEN=23 \ 79 | -fPIC \ 80 | -I$HUNTER_INSTALL_DIR/include \ 81 | - -Wno-cast-function-type" 82 | + -Wno-cast-function-type \ 83 | + -I../boringssl/include" 84 | 85 | -LD_OPT="-fPIE -fPIC -pie -Wl,-z,relro -Wl,-z,now -L$HUNTER_INSTALL_DIR/lib" 86 | +LD_OPT="-fPIE -fPIC -pie -Wl,-z,relro -Wl,-z,now -L$HUNTER_INSTALL_DIR/lib -L../boringssl/build/ssl -L../boringssl/build/crypto" 87 | 88 | if [[ ${ARCH} != "aarch64" ]]; then 89 | WITH_FLAGS+=" --with-file-aio" 90 | @@ -646,7 +669,7 @@ WITH_MODULES=" \ 91 | --add-dynamic-module=$BUILD_PATH/ngx_http_geoip2_module-${GEOIP2_VERSION} \ 92 | --add-dynamic-module=$BUILD_PATH/ngx_brotli" 93 | 94 | -./configure \ 95 | +./auto/configure \ 96 | --prefix=/usr/local/nginx \ 97 | --conf-path=/etc/nginx/nginx.conf \ 98 | --modules-path=/etc/nginx/modules \ 99 | -- 100 | 2.39.1 101 | 102 | -------------------------------------------------------------------------------- /kwatch/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine AS builder 2 | ARG RELEASE_VERSION="nothing" 3 | LABEL maintainer="Abdelrahman Ahmed 3 | Date: Sun, 30 Jul 2023 20:38:10 +0200 4 | Subject: [PATCH 1/1] force all requests to master + get method also 5 | 6 | --- 7 | http/proxy_server.go | 23 +++++++++++------------ 8 | 1 file changed, 11 insertions(+), 12 deletions(-) 9 | 10 | diff --git a/http/proxy_server.go b/http/proxy_server.go 11 | index 7d6a3ff..a31de70 100644 12 | --- a/http/proxy_server.go 13 | +++ b/http/proxy_server.go 14 | @@ -163,13 +163,11 @@ func (s *ProxyServer) serveHTTP(w http.ResponseWriter, r *http.Request) { 15 | // If request matches any passthrough regexes, send directly to target. 16 | if s.isPassthrough(r) { 17 | s.logf("proxy: %s %s: matches passthrough expression, proxying to target", r.Method, r.URL.Path) 18 | - s.proxyToTarget(w, r, true) 19 | + s.proxyToTarget(w, r, true, s.Target) 20 | return 21 | } 22 | 23 | switch r.Method { 24 | - case http.MethodGet: 25 | - s.serveRead(w, r) 26 | case http.MethodHead: 27 | s.serveRead(w, r) 28 | default: 29 | @@ -187,7 +185,7 @@ func (s *ProxyServer) serveRead(w http.ResponseWriter, r *http.Request) { 30 | // No TXID or we couldn't parse it. Just send to the target. 31 | if txid == 0 { 32 | s.logf("proxy: %s %s: no client txid, proxying to target", r.Method, r.URL.Path) 33 | - s.proxyToTarget(w, r, false) 34 | + s.proxyToTarget(w, r, false, s.Target) 35 | return 36 | } 37 | 38 | @@ -196,7 +194,7 @@ func (s *ProxyServer) serveRead(w http.ResponseWriter, r *http.Request) { 39 | db := s.store.DB(s.DBName) 40 | if db == nil { 41 | s.logf("proxy: %s %s: no database %q, proxying to target", r.Method, r.URL.Path, s.DBName) 42 | - s.proxyToTarget(w, r, false) 43 | + s.proxyToTarget(w, r, false, s.Target) 44 | return 45 | } 46 | 47 | @@ -225,7 +223,7 @@ LOOP: 48 | } 49 | 50 | // Send request to the target once we've caught up to the last write seen. 51 | - s.proxyToTarget(w, r, false) 52 | + s.proxyToTarget(w, r, false, s.Target) 53 | } 54 | 55 | func (s *ProxyServer) serveNonRead(w http.ResponseWriter, r *http.Request) { 56 | @@ -234,7 +232,7 @@ func (s *ProxyServer) serveNonRead(w http.ResponseWriter, r *http.Request) { 57 | // If this is the primary, send the request to the target. 58 | if isPrimary { 59 | s.logf("proxy: %s %s: node is primary, proxying to target", r.Method, r.URL.Path) 60 | - s.proxyToTarget(w, r, false) 61 | + s.proxyToTarget(w, r, false, s.Target) 62 | return 63 | } 64 | 65 | @@ -242,18 +240,19 @@ func (s *ProxyServer) serveNonRead(w http.ResponseWriter, r *http.Request) { 66 | // go ahead and send the request 67 | if info == nil { 68 | s.logf("proxy: %s %s: no primary available, proxying to target", r.Method, r.URL.Path) 69 | - s.proxyToTarget(w, r, false) 70 | + s.proxyToTarget(w, r, false, s.Target) 71 | return 72 | } 73 | 74 | - // If this is a replica, then we'll redirect the request to the primary. 75 | - w.Header().Set("fly-replay", "instance="+info.Hostname) 76 | + _, targetPort, _ := net.SplitHostPort(s.Target) 77 | + 78 | + s.proxyToTarget(w, r, false, info.Hostname+":"+targetPort) 79 | } 80 | 81 | -func (s *ProxyServer) proxyToTarget(w http.ResponseWriter, r *http.Request, passthrough bool) { 82 | +func (s *ProxyServer) proxyToTarget(w http.ResponseWriter, r *http.Request, passthrough bool, host string) { 83 | // Update request URL to target server. 84 | r.URL.Scheme = "http" 85 | - r.URL.Host = s.Target 86 | + r.URL.Host = host 87 | 88 | resp, err := s.HTTPTransport.RoundTrip(r) 89 | if err != nil { 90 | -- 91 | 2.41.0 92 | 93 | -------------------------------------------------------------------------------- /logrotate/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8 2 | MAINTAINER Steffen Bleul 3 | 4 | # logrotate version (e.g. 3.9.1-r0) 5 | ARG LOGROTATE_VERSION=latest 6 | # permissions 7 | ARG CONTAINER_UID=1000 8 | ARG CONTAINER_GID=1000 9 | 10 | # install dev tools 11 | RUN export CONTAINER_USER=logrotate && \ 12 | export CONTAINER_GROUP=logrotate && \ 13 | addgroup -g $CONTAINER_GID logrotate && \ 14 | adduser -u $CONTAINER_UID -G logrotate -h /usr/bin/logrotate.d -s /bin/bash -S logrotate && \ 15 | # Blacklabelops Feature Script Folder 16 | mkdir -p /var/blacklabelops && \ 17 | apk add --update \ 18 | bash \ 19 | tzdata \ 20 | vim \ 21 | tini \ 22 | su-exec \ 23 | gzip \ 24 | tar \ 25 | wget \ 26 | curl \ 27 | tar \ 28 | gzip \ 29 | wget \ 30 | tzdata && \ 31 | if [ "${LOGROTATE_VERSION}" = "latest" ]; \ 32 | then apk add logrotate ; \ 33 | else apk add "logrotate=${LOGROTATE_VERSION}" ; \ 34 | fi && \ 35 | mkdir -p /usr/bin/logrotate.d && \ 36 | wget --no-check-certificate -O /tmp/go-cron.tar.gz https://github.com/michaloo/go-cron/releases/download/v0.0.2/go-cron.tar.gz && \ 37 | tar xvf /tmp/go-cron.tar.gz -C /usr/bin && \ 38 | apk del \ 39 | wget && \ 40 | rm -rf /var/cache/apk/* && rm -rf /tmp/* 41 | 42 | # environment variable for this container 43 | ENV LOGROTATE_OLDDIR= \ 44 | LOGROTATE_COMPRESSION= \ 45 | LOGROTATE_INTERVAL= \ 46 | LOGROTATE_COPIES= \ 47 | LOGROTATE_SIZE= \ 48 | LOGS_DIRECTORIES= \ 49 | LOG_FILE_ENDINGS= \ 50 | LOGROTATE_LOGFILE= \ 51 | LOGROTATE_CRONSCHEDULE= \ 52 | LOGROTATE_PARAMETERS= \ 53 | LOGROTATE_STATUSFILE= \ 54 | LOG_FILE= 55 | 56 | COPY docker-entrypoint.sh /usr/bin/logrotate.d/docker-entrypoint.sh 57 | COPY update-logrotate.sh /usr/bin/logrotate.d/update-logrotate.sh 58 | COPY logrotate.sh /usr/bin/logrotate.d/logrotate.sh 59 | COPY logrotateConf.sh /usr/bin/logrotate.d/logrotateConf.sh 60 | COPY logrotateCreateConf.sh /usr/bin/logrotate.d/logrotateCreateConf.sh 61 | 62 | ENTRYPOINT ["/sbin/tini","--","/usr/bin/logrotate.d/docker-entrypoint.sh"] 63 | VOLUME ["/logrotate-status"] 64 | CMD ["cron"] -------------------------------------------------------------------------------- /moco/.gitignore: -------------------------------------------------------------------------------- 1 | moco/ 2 | -------------------------------------------------------------------------------- /moco/Dockerfile.backup: -------------------------------------------------------------------------------- 1 | # Build the moco-controller binary 2 | FROM golang:1.17-buster as builder 3 | 4 | WORKDIR /src/app 5 | 6 | # Copy the go source 7 | COPY ./ . 8 | 9 | # Build 10 | RUN go build -ldflags="-w -s" -o moco-backup ./cmd/moco-backup 11 | 12 | # For MySQL binaries 13 | FROM mysql:8.0.28 as mysql 14 | 15 | # the backup image 16 | FROM debian:buster-slim 17 | LABEL org.opencontainers.image.source https://github.com/cybozu-go/moco 18 | 19 | ARG MYSQLSH_VERSION=8.0.28-1 20 | 21 | COPY --from=builder WORKDIR /src/app/moco-backup /moco-backup 22 | 23 | COPY --from=mysql /usr/local/mysql/LICENSE /usr/local/mysql/LICENSE 24 | COPY --from=mysql /usr/local/mysql/bin/mysqlbinlog /usr/local/mysql/bin/mysqlbinlog 25 | COPY --from=mysql /usr/local/mysql/bin/mysql /usr/local/mysql/bin/mysql 26 | 27 | RUN apt-get update \ 28 | && apt-get install -y --no-install-recommends zstd python3 libpython3.8 s3cmd \ 29 | && rm -rf /var/lib/apt/lists/* \ 30 | && curl -o /tmp/mysqlsh.deb -fsL https://dev.mysql.com/get/Downloads/MySQL-Shell/mysql-shell_${MYSQLSH_VERSION}ubuntu20.04_arm64.deb \ 31 | && dpkg -i /tmp/mysqlsh.deb \ 32 | && rm -f /tmp/mysqlsh.deb 33 | 34 | ENV PATH=/usr/local/mysql/bin:"$PATH" 35 | USER 10000:10000 36 | ENTRYPOINT ["/moco-backup"] -------------------------------------------------------------------------------- /moco/Dockerfile.controller: -------------------------------------------------------------------------------- 1 | # Build the moco-controller binary 2 | FROM golang:1.17-buster as builder 3 | 4 | WORKDIR /src/app 5 | 6 | # Copy the go source 7 | COPY ./ . 8 | 9 | # Build 10 | RUN CGO_ENABLED=0 go build -ldflags="-w -s" -o moco-controller ./cmd/moco-controller 11 | 12 | # the controller image 13 | FROM scratch 14 | LABEL org.opencontainers.image.source https://github.com/cybozu-go/moco 15 | 16 | COPY --from=builder /src/app/moco-controller ./ 17 | USER 10000:10000 18 | 19 | ENTRYPOINT ["/moco-controller"] -------------------------------------------------------------------------------- /nginx-ssl-fingerprint/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:12-slim 2 | 3 | ARG OPENSSL_VERSION=openssl-3.2 4 | ARG NGINX_VERSION=release-1.25.5 5 | 6 | WORKDIR /build 7 | 8 | ADD patches/ /build/nginx-ssl-fingerprint/patches/ 9 | ADD src/ /build/nginx-ssl-fingerprint/src/ 10 | ADD config nginx.conf /build/nginx-ssl-fingerprint/ 11 | 12 | RUN export DEBIAN_FRONTEND=noninteractive && \ 13 | apt-get update && \ 14 | apt-get install -y git make gcc curl zlib1g-dev libpcre3-dev && \ 15 | git clone -b ${OPENSSL_VERSION} --depth=1 https://github.com/openssl/openssl && \ 16 | git clone -b ${NGINX_VERSION} --depth=1 https://github.com/nginx/nginx && \ 17 | patch -p1 -d openssl < nginx-ssl-fingerprint/patches/openssl.openssl-3.2.patch && \ 18 | patch -p1 -d nginx < nginx-ssl-fingerprint/patches/nginx-1.25.patch && \ 19 | cd nginx && \ 20 | ASAN_OPTIONS=symbolize=1 ./auto/configure --with-openssl=$(pwd)/../openssl --add-module=$(pwd)/../nginx-ssl-fingerprint --with-http_ssl_module --with-stream_ssl_module --with-debug --with-stream --with-http_v2_module --with-cc-opt="-fsanitize=address -O -fno-omit-frame-pointer" --with-ld-opt="-L/usr/local/lib -Wl,-E -lasan" && \ 21 | make && \ 22 | make install -j`nproc` && \ 23 | cd .. && \ 24 | rm -Rf openssl nginx && \ 25 | apt-get purge -y git make gcc curl zlib1g-dev libpcre3-dev && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | EXPOSE 443 80 29 | 30 | CMD /usr/local/nginx/sbin/nginx -g 'daemon off;' -c /etc/nginx/nginx.conf 31 | -------------------------------------------------------------------------------- /nitter/patches/0001-cdn-hmac-support.patch: -------------------------------------------------------------------------------- 1 | From a9f9d74c9448120f8f118ba13f2c3a4e7622ea49 Mon Sep 17 00:00:00 2001 2 | From: Emilien <4016501+unixfox@users.noreply.github.com> 3 | Date: Fri, 21 Feb 2025 22:17:06 +0100 4 | Subject: [PATCH 1/1] cdn + hmac support 5 | 6 | --- 7 | nitter.example.conf | 1 + 8 | src/config.nim | 3 ++- 9 | src/nitter.nim | 1 + 10 | src/routes/media.nim | 16 ++++++++++++---- 11 | src/types.nim | 1 + 12 | src/utils.nim | 27 +++++++++++++++++++-------- 13 | src/views/general.nim | 2 +- 14 | src/views/rss.nimf | 12 ++++++------ 15 | 8 files changed, 43 insertions(+), 20 deletions(-) 16 | 17 | diff --git a/nitter.example.conf b/nitter.example.conf 18 | index bddb9a4..dd2faef 100644 19 | --- a/nitter.example.conf 20 | +++ b/nitter.example.conf 21 | @@ -26,6 +26,7 @@ enableRSS = true # set this to false to disable RSS feeds 22 | enableDebug = false # enable request logs and debug endpoints (/.sessions) 23 | proxy = "" # http/https url, SOCKS proxies are not supported 24 | proxyAuth = "" 25 | +cdnUrl = "https://cdn.xcancel.com" 26 | 27 | # Change default preferences here, see src/prefs_impl.nim for a complete list 28 | [Preferences] 29 | diff --git a/src/config.nim b/src/config.nim 30 | index 1b05ffe..96bf23b 100644 31 | --- a/src/config.nim 32 | +++ b/src/config.nim 33 | @@ -40,7 +40,8 @@ proc getConfig*(path: string): (Config, parseCfg.Config) = 34 | enableRss: cfg.get("Config", "enableRSS", true), 35 | enableDebug: cfg.get("Config", "enableDebug", false), 36 | proxy: cfg.get("Config", "proxy", ""), 37 | - proxyAuth: cfg.get("Config", "proxyAuth", "") 38 | + proxyAuth: cfg.get("Config", "proxyAuth", ""), 39 | + cdnUrl: cfg.get("Config", "cdnUrl", "https://cdn.xcancel.com") 40 | ) 41 | 42 | return (conf, cfg) 43 | diff --git a/src/nitter.nim b/src/nitter.nim 44 | index f81dc1c..14970ab 100644 45 | --- a/src/nitter.nim 46 | +++ b/src/nitter.nim 47 | @@ -35,6 +35,7 @@ updateDefaultPrefs(fullCfg) 48 | setCacheTimes(cfg) 49 | setHmacKey(cfg.hmacKey) 50 | setProxyEncoding(cfg.base64Media) 51 | +setCdnUrl(cfg.cdnUrl) 52 | setMaxHttpConns(cfg.httpMaxConns) 53 | setHttpProxy(cfg.proxy, cfg.proxyAuth) 54 | initAboutPage(cfg.staticDir) 55 | diff --git a/src/routes/media.nim b/src/routes/media.nim 56 | index de51061..4a7ed29 100644 57 | --- a/src/routes/media.nim 58 | +++ b/src/routes/media.nim 59 | @@ -91,8 +91,12 @@ proc createMediaRouter*(cfg: Config) = 60 | get "/pic/?": 61 | resp Http404 62 | 63 | - get re"^\/pic\/orig\/(enc)?\/?(.+)": 64 | - var url = decoded(request, 1) 65 | + get re"^\/pic\/orig\/(enc)?\/?(.+)\/(.+)$": 66 | + var url = decoded(request, 2) 67 | + 68 | + if getHmac(url) != request.matches[1]: 69 | + resp Http403, showError("Failed to verify signature", cfg) 70 | + 71 | if "twimg.com" notin url: 72 | url.insert(twimg) 73 | if not url.startsWith(https): 74 | @@ -105,8 +109,12 @@ proc createMediaRouter*(cfg: Config) = 75 | let code = await proxyMedia(request, url) 76 | check code 77 | 78 | - get re"^\/pic\/(enc)?\/?(.+)": 79 | - var url = decoded(request, 1) 80 | + get re"^\/pic\/(enc)?\/?(.+)\/(.+)$": 81 | + var url = decoded(request, 2) 82 | + 83 | + if getHmac(url) != request.matches[1]: 84 | + resp Http403, showError("Failed to verify signature", cfg) 85 | + 86 | if "twimg.com" notin url: 87 | url.insert(twimg) 88 | if not url.startsWith(https): 89 | diff --git a/src/types.nim b/src/types.nim 90 | index 4e565ee..6aa326b 100644 91 | --- a/src/types.nim 92 | +++ b/src/types.nim 93 | @@ -271,6 +271,7 @@ type 94 | enableDebug*: bool 95 | proxy*: string 96 | proxyAuth*: string 97 | + cdnUrl*: string 98 | 99 | rssCacheTime*: int 100 | listCacheTime*: int 101 | diff --git a/src/utils.nim b/src/utils.nim 102 | index c96a6dd..cdb6f3a 100644 103 | --- a/src/utils.nim 104 | +++ b/src/utils.nim 105 | @@ -1,10 +1,12 @@ 106 | # SPDX-License-Identifier: AGPL-3.0-only 107 | import strutils, strformat, uri, tables, base64 108 | import nimcrypto 109 | +import std/times 110 | 111 | var 112 | hmacKey: string 113 | base64Media = false 114 | + cdnUrl: string 115 | 116 | const 117 | https* = "https://" 118 | @@ -20,34 +22,43 @@ const 119 | "x.com" 120 | ] 121 | 122 | +let now = now() 123 | + 124 | proc setHmacKey*(key: string) = 125 | hmacKey = key 126 | 127 | proc setProxyEncoding*(state: bool) = 128 | base64Media = state 129 | 130 | +proc setCdnUrl*(url: string) = 131 | + cdnUrl = url 132 | + 133 | proc getHmac*(data: string): string = 134 | - ($hmac(sha256, hmacKey, data))[0 .. 12] 135 | + ($hmac(sha256, hmacKey, data & intToStr(now().year + int(now().month) + now().monthDay)))[0 .. 12] 136 | 137 | proc getVidUrl*(link: string): string = 138 | if link.len == 0: return 139 | let sig = getHmac(link) 140 | - if base64Media: 141 | - &"/video/enc/{sig}/{encode(link, safe=true)}" 142 | - else: 143 | + if "m3u8" in link: 144 | &"/video/{sig}/{encodeUrl(link)}" 145 | + elif base64Media: 146 | + &"{cdnUrl}/video/enc/{sig}/{encode(link, safe=true)}" 147 | + else: 148 | + &"{cdnUrl}/video/{sig}/{encodeUrl(link)}" 149 | 150 | proc getPicUrl*(link: string): string = 151 | + let sig = getHmac(link) 152 | if base64Media: 153 | - &"/pic/enc/{encode(link, safe=true)}" 154 | + &"{cdnUrl}/pic/enc/{sig}/{encode(link, safe=true)}" 155 | else: 156 | - &"/pic/{encodeUrl(link)}" 157 | + &"{cdnUrl}/pic/{sig}/{encodeUrl(link)}" 158 | 159 | proc getOrigPicUrl*(link: string): string = 160 | + let sig = getHmac(link) 161 | if base64Media: 162 | - &"/pic/orig/enc/{encode(link, safe=true)}" 163 | + &"{cdnUrl}/pic/orig/enc/{sig}/{encode(link, safe=true)}" 164 | else: 165 | - &"/pic/orig/{encodeUrl(link)}" 166 | + &"{cdnUrl}/pic/orig/{sig}/{encodeUrl(link)}" 167 | 168 | proc filterParams*(params: Table): seq[(string, string)] = 169 | for p in params.pairs(): 170 | diff --git a/src/views/general.nim b/src/views/general.nim 171 | index 5ba40a3..69ba668 100644 172 | --- a/src/views/general.nim 173 | +++ b/src/views/general.nim 174 | @@ -102,7 +102,7 @@ proc renderHead*(prefs: Prefs; cfg: Config; req: Request; titleText=""; desc=""; 175 | else: getSmallPic(url) 176 | link(rel="preload", type="image/png", href=preloadUrl, `as`="image") 177 | 178 | - let image = getUrlPrefix(cfg) & getPicUrl(url) 179 | + let image = getPicUrl(url) 180 | meta(property="og:image", content=image) 181 | meta(property="twitter:image:src", content=image) 182 | 183 | diff --git a/src/views/rss.nimf b/src/views/rss.nimf 184 | index 036a7b9..e29b070 100644 185 | --- a/src/views/rss.nimf 186 | +++ b/src/views/rss.nimf 187 | @@ -39,19 +39,19 @@ Twitter feed for: ${desc}. Generated by ${cfg.hostname} 188 | #end if 189 | #if tweet.photos.len > 0: 190 | # for photo in tweet.photos: 191 | - 192 | + 193 | # end for 194 | #elif tweet.video.isSome: 195 | - 196 | + 197 | #elif tweet.gif.isSome: 198 | -# let thumb = &"{urlPrefix}{getPicUrl(get(tweet.gif).thumb)}" 199 | -# let url = &"{urlPrefix}{getPicUrl(get(tweet.gif).url)}" 200 | +# let thumb = &"{getPicUrl(get(tweet.gif).thumb)}" 201 | +# let url = &"{getPicUrl(get(tweet.gif).url)}" 202 | 204 | #elif tweet.card.isSome: 205 | # let card = tweet.card.get() 206 | # if card.image.len > 0: 207 | - 208 | + 209 | # end if 210 | #end if 211 | #end proc 212 | @@ -102,7 +102,7 @@ Twitter feed for: ${desc}. Generated by ${cfg.hostname} 213 | 214 | ${title} 215 | ${urlPrefix}/${profile.user.username} 216 | - ${urlPrefix}${getPicUrl(profile.user.getUserPic(style="_400x400"))} 217 | + ${getPicUrl(profile.user.getUserPic(style="_400x400"))} 218 | 128 219 | 128 220 | 221 | -- 222 | 2.48.0 223 | 224 | -------------------------------------------------------------------------------- /nitter/patches/0002-proxy-pictures-support.patch: -------------------------------------------------------------------------------- 1 | From f08398bb801272a7fd5990a7a8a7211824f9acfa Mon Sep 17 00:00:00 2001 2 | From: Emilien <4016501+unixfox@users.noreply.github.com> 3 | Date: Fri, 21 Feb 2025 22:23:39 +0100 4 | Subject: [PATCH 1/1] proxy pictures support 5 | 6 | --- 7 | nitter.example.conf | 1 + 8 | public/js/proxyneededcheck.js | 17 ++++++++++++++++ 9 | src/parser.nim | 2 +- 10 | src/prefs_impl.nim | 3 +++ 11 | src/routes/list.nim | 2 +- 12 | src/routes/router_utils.nim | 2 +- 13 | src/routes/status.nim | 2 +- 14 | src/utils.nim | 38 ++++++++++++++++++++++++++--------- 15 | src/views/general.nim | 11 ++++++---- 16 | src/views/list.nim | 6 +++--- 17 | src/views/profile.nim | 16 +++++++-------- 18 | src/views/renderutils.nim | 8 ++++---- 19 | src/views/rss.nimf | 12 +++++------ 20 | src/views/timeline.nim | 2 +- 21 | src/views/tweet.nim | 26 ++++++++++++------------ 22 | 15 files changed, 95 insertions(+), 53 deletions(-) 23 | create mode 100644 public/js/proxyneededcheck.js 24 | 25 | diff --git a/nitter.example.conf b/nitter.example.conf 26 | index dd2faef..8db364f 100644 27 | --- a/nitter.example.conf 28 | +++ b/nitter.example.conf 29 | @@ -35,5 +35,6 @@ replaceTwitter = "nitter.net" 30 | replaceYouTube = "piped.video" 31 | replaceReddit = "teddit.net" 32 | proxyVideos = true 33 | +proxyPics = true 34 | hlsPlayback = false 35 | infiniteScroll = false 36 | diff --git a/public/js/proxyneededcheck.js b/public/js/proxyneededcheck.js 37 | new file mode 100644 38 | index 0000000..8cc5010 39 | --- /dev/null 40 | +++ b/public/js/proxyneededcheck.js 41 | @@ -0,0 +1,17 @@ 42 | +// This check if the browser can't load pbs.twimg.com domain. 43 | +// If it can't then we force proxy on videos and pictures. 44 | +if (!document.cookie.includes("proxyPics") && !document.cookie.includes("proxyVideos")) { 45 | + var img = new Image(); 46 | + img.onerror = function () { 47 | + var expires = (new Date(Date.now() + 360 * 24 * 60 * 60 * 1000)).toUTCString(); 48 | + if (location.protocol === 'https:') { 49 | + document.cookie = "proxyPics=on; path=/; Secure; expires=" + expires; 50 | + document.cookie = "proxyVideos=on; path=/; Secure; expires=" + expires; 51 | + } else { 52 | + document.cookie = "proxyPics=on; path=/; expires=" + expires; 53 | + document.cookie = "proxyVideos=on; path=/; expires=" + expires; 54 | + } 55 | + location.reload(); 56 | + }; 57 | + img.src = 'https://pbs.twimg.com/favicon.ico'; 58 | +} 59 | diff --git a/src/parser.nim b/src/parser.nim 60 | index 6e0be73..fb2272b 100644 61 | --- a/src/parser.nim 62 | +++ b/src/parser.nim 63 | @@ -198,7 +198,7 @@ proc parseCard(js: JsonNode; urls: JsonNode): Card = 64 | 65 | if kind in {promoImageConvo, promoImageApp, imageDirectMessage} and 66 | result.url.len == 0 or result.url.startsWith("card://"): 67 | - result.url = getPicUrl(result.image) 68 | + result.url = getPicUrl(result.image, true) 69 | 70 | proc parseTweet(js: JsonNode; jsCard: JsonNode = newJNull()): Tweet = 71 | if js.isNull: return 72 | diff --git a/src/prefs_impl.nim b/src/prefs_impl.nim 73 | index 8e2ac8f..a647d1f 100644 74 | --- a/src/prefs_impl.nim 75 | +++ b/src/prefs_impl.nim 76 | @@ -88,6 +88,9 @@ genPrefs: 77 | proxyVideos(checkbox, true): 78 | "Proxy video streaming through the server (might be slow)" 79 | 80 | + proxyPics(checkbox, true): 81 | + "Proxy pictures through the server (might be slow)" 82 | + 83 | muteVideos(checkbox, false): 84 | "Mute videos by default" 85 | 86 | diff --git a/src/routes/list.nim b/src/routes/list.nim 87 | index ac3e97e..d0d1fbe 100644 88 | --- a/src/routes/list.nim 89 | +++ b/src/routes/list.nim 90 | @@ -12,7 +12,7 @@ template respList*(list, timeline, title, vnode: typed) = 91 | resp Http404, showError(&"""List "{@"id"}" not found""", cfg) 92 | 93 | let 94 | - html = renderList(vnode, timeline.query, list) 95 | + html = renderList(vnode, timeline.query, list, prefs.proxyPics) 96 | rss = &"""/i/lists/{@"id"}/rss""" 97 | 98 | resp renderMain(html, request, cfg, prefs, titleText=title, rss=rss, banner=list.banner) 99 | diff --git a/src/routes/router_utils.nim b/src/routes/router_utils.nim 100 | index a071a0d..5a11b13 100644 101 | --- a/src/routes/router_utils.nim 102 | +++ b/src/routes/router_utils.nim 103 | @@ -9,7 +9,7 @@ export utils, prefs, types, uri 104 | template savePref*(pref, value: string; req: Request; expire=false) = 105 | if not expire or pref in cookies(req): 106 | setCookie(pref, value, daysForward(when expire: -10 else: 360), 107 | - httpOnly=true, secure=cfg.useHttps, sameSite=None) 108 | + httpOnly=false, secure=cfg.useHttps, sameSite=None) 109 | 110 | template cookiePrefs*(): untyped {.dirty.} = 111 | getPrefs(cookies(request)) 112 | diff --git a/src/routes/status.nim b/src/routes/status.nim 113 | index 7e89220..91caad6 100644 114 | --- a/src/routes/status.nim 115 | +++ b/src/routes/status.nim 116 | @@ -54,7 +54,7 @@ proc createStatusRouter*(cfg: Config) = 117 | video = getVideoEmbed(cfg, conv.tweet.id) 118 | elif conv.tweet.gif.isSome(): 119 | images = @[get(conv.tweet.gif).thumb] 120 | - video = getPicUrl(get(conv.tweet.gif).url) 121 | + video = getPicUrl(get(conv.tweet.gif).url, prefs.proxyPics) 122 | elif conv.tweet.card.isSome(): 123 | let card = conv.tweet.card.get() 124 | if card.image.len > 0: 125 | diff --git a/src/utils.nim b/src/utils.nim 126 | index cdb6f3a..5a831c5 100644 127 | --- a/src/utils.nim 128 | +++ b/src/utils.nim 129 | @@ -46,19 +46,37 @@ proc getVidUrl*(link: string): string = 130 | else: 131 | &"{cdnUrl}/video/{sig}/{encodeUrl(link)}" 132 | 133 | -proc getPicUrl*(link: string): string = 134 | - let sig = getHmac(link) 135 | - if base64Media: 136 | - &"{cdnUrl}/pic/enc/{sig}/{encode(link, safe=true)}" 137 | +proc getPicUrl*(link: string, proxyPics: bool): string = 138 | + if not proxyPics: 139 | + var url = link 140 | + if "twimg.com" notin url: 141 | + url.insert(twimg) 142 | + if not url.startsWith(https): 143 | + url.insert(https) 144 | + & "{url}" 145 | else: 146 | - &"{cdnUrl}/pic/{sig}/{encodeUrl(link)}" 147 | + let sig = getHmac(link) 148 | + if base64Media: 149 | + &"{cdnUrl}/pic/enc/{sig}/{encode(link, safe=true)}" 150 | + else: 151 | + &"{cdnUrl}/pic/{sig}/{encodeUrl(link)}" 152 | + 153 | 154 | -proc getOrigPicUrl*(link: string): string = 155 | - let sig = getHmac(link) 156 | - if base64Media: 157 | - &"{cdnUrl}/pic/orig/enc/{sig}/{encode(link, safe=true)}" 158 | +proc getOrigPicUrl*(link: string, proxyPics: bool): string = 159 | + if not proxyPics: 160 | + var url = link 161 | + if "twimg.com" notin url: 162 | + url.insert(twimg) 163 | + if not url.startsWith(https): 164 | + url.insert(https) 165 | + url.add("?name=orig") 166 | + & "{url}" 167 | else: 168 | - &"{cdnUrl}/pic/orig/{sig}/{encodeUrl(link)}" 169 | + let sig = getHmac(link) 170 | + if base64Media: 171 | + &"{cdnUrl}/pic/orig/enc/{sig}/{encode(link, safe=true)}" 172 | + else: 173 | + &"{cdnUrl}/pic/orig/{sig}/{encodeUrl(link)}" 174 | 175 | proc filterParams*(params: Table): seq[(string, string)] = 176 | for p in params.pairs(): 177 | diff --git a/src/views/general.nim b/src/views/general.nim 178 | index 69ba668..f08b478 100644 179 | --- a/src/views/general.nim 180 | +++ b/src/views/general.nim 181 | @@ -66,6 +66,9 @@ proc renderHead*(prefs: Prefs; cfg: Config; req: Request; titleText=""; desc=""; 182 | link(rel="search", type="application/opensearchdescription+xml", title=cfg.title, 183 | href=opensearchUrl) 184 | 185 | + if not prefs.proxyPics and not prefs.proxyVideos: 186 | + script(src="/js/proxyneededcheck.js") 187 | + 188 | if canonical.len > 0: 189 | link(rel="canonical", href=canonical) 190 | 191 | @@ -94,15 +97,15 @@ proc renderHead*(prefs: Prefs; cfg: Config; req: Request; titleText=""; desc=""; 192 | meta(property="og:locale", content="en_US") 193 | 194 | if banner.len > 0 and not banner.startsWith('#'): 195 | - let bannerUrl = getPicUrl(banner) 196 | + let bannerUrl = getPicUrl(banner, prefs.proxyPics) 197 | link(rel="preload", type="image/png", href=bannerUrl, `as`="image") 198 | 199 | for url in images: 200 | - let preloadUrl = if "400x400" in url: getPicUrl(url) 201 | - else: getSmallPic(url) 202 | + let preloadUrl = if "400x400" in url: getPicUrl(url, prefs.proxyPics) 203 | + else: getSmallPic(url, prefs.proxyPics) 204 | link(rel="preload", type="image/png", href=preloadUrl, `as`="image") 205 | 206 | - let image = getPicUrl(url) 207 | + let image = getPicUrl(url, prefs.proxyPics) 208 | meta(property="og:image", content=image) 209 | meta(property="twitter:image:src", content=image) 210 | 211 | diff --git a/src/views/list.nim b/src/views/list.nim 212 | index e5639d1..bffbea2 100644 213 | --- a/src/views/list.nim 214 | +++ b/src/views/list.nim 215 | @@ -12,12 +12,12 @@ proc renderListTabs*(query: Query; path: string): VNode = 216 | li(class=query.getTabClass(userList)): 217 | a(href=(path & "/members")): text "Members" 218 | 219 | -proc renderList*(body: VNode; query: Query; list: List): VNode = 220 | +proc renderList*(body: VNode; query: Query; list: List; proxyPics: bool): VNode = 221 | buildHtml(tdiv(class="timeline-container")): 222 | if list.banner.len > 0: 223 | tdiv(class="timeline-banner"): 224 | - a(href=getPicUrl(list.banner), target="_blank"): 225 | - genImg(list.banner) 226 | + a(href=getPicUrl(list.banner, proxyPics), target="_blank"): 227 | + genImg(list.banner, proxyPics) 228 | 229 | tdiv(class="timeline-header"): 230 | text &"\"{list.name}\" by @{list.username}" 231 | diff --git a/src/views/profile.nim b/src/views/profile.nim 232 | index 2b2e410..3c187e4 100644 233 | --- a/src/views/profile.nim 234 | +++ b/src/views/profile.nim 235 | @@ -16,13 +16,13 @@ proc renderUserCard*(user: User; prefs: Prefs): VNode = 236 | buildHtml(tdiv(class="profile-card")): 237 | tdiv(class="profile-card-info"): 238 | let 239 | - url = getPicUrl(user.getUserPic()) 240 | + url = getPicUrl(user.getUserPic(), prefs.proxyPics) 241 | size = 242 | if prefs.autoplayGifs and user.userPic.endsWith("gif"): "" 243 | else: "_400x400" 244 | 245 | a(class="profile-card-avatar", href=url, target="_blank"): 246 | - genImg(user.getUserPic(size)) 247 | + genImg(user.getUserPic(size), prefs.proxyPics) 248 | 249 | tdiv(class="profile-card-tabs-name"): 250 | linkUser(user, class="profile-card-fullname") 251 | @@ -63,7 +63,7 @@ proc renderUserCard*(user: User; prefs: Prefs): VNode = 252 | renderStat(user.followers, "followers") 253 | renderStat(user.likes, "likes") 254 | 255 | -proc renderPhotoRail(profile: Profile): VNode = 256 | +proc renderPhotoRail(profile: Profile; proxyPics: bool): VNode = 257 | let count = insertSep($profile.user.media, ',') 258 | buildHtml(tdiv(class="photo-rail-card")): 259 | tdiv(class="photo-rail-header"): 260 | @@ -82,16 +82,16 @@ proc renderPhotoRail(profile: Profile): VNode = 261 | if "format" in photo.url or "placeholder" in photo.url: "" 262 | else: ":thumb" 263 | a(href=(&"/{profile.user.username}/status/{photo.tweetId}#m")): 264 | - genImg(photo.url & photoSuffix) 265 | + genImg(photo.url & photoSuffix, proxyPics) 266 | 267 | -proc renderBanner(banner: string): VNode = 268 | +proc renderBanner(banner: string; proxyPics: bool): VNode = 269 | buildHtml(): 270 | if banner.len == 0: 271 | a() 272 | elif banner.startsWith('#'): 273 | a(style={backgroundColor: banner}) 274 | else: 275 | - a(href=getPicUrl(banner), target="_blank"): genImg(banner) 276 | + a(href=getPicUrl(banner, proxyPics), target="_blank"): genImg(banner, proxyPics) 277 | 278 | proc renderProtected(username: string): VNode = 279 | buildHtml(tdiv(class="timeline-container")): 280 | @@ -105,13 +105,13 @@ proc renderProfile*(profile: var Profile; prefs: Prefs; path: string): VNode = 281 | buildHtml(tdiv(class="profile-tabs")): 282 | if not prefs.hideBanner: 283 | tdiv(class="profile-banner"): 284 | - renderBanner(profile.user.banner) 285 | + renderBanner(profile.user.banner, prefs.proxyPics) 286 | 287 | let sticky = if prefs.stickyProfile: " sticky" else: "" 288 | tdiv(class=("profile-tab" & sticky)): 289 | renderUserCard(profile.user, prefs) 290 | if profile.photoRail.len > 0: 291 | - renderPhotoRail(profile) 292 | + renderPhotoRail(profile, prefs.proxyPics) 293 | 294 | if profile.user.protected: 295 | renderProtected(profile.user.username) 296 | diff --git a/src/views/renderutils.nim b/src/views/renderutils.nim 297 | index 41ef8df..3124567 100644 298 | --- a/src/views/renderutils.nim 299 | +++ b/src/views/renderutils.nim 300 | @@ -5,11 +5,11 @@ import ".."/[types, utils] 301 | 302 | const smallWebp* = "?name=small&format=webp" 303 | 304 | -proc getSmallPic*(url: string): string = 305 | +proc getSmallPic*(url: string; proxyPics: bool): string = 306 | result = url 307 | if "?" notin url and not url.endsWith("placeholder.png"): 308 | result &= smallWebp 309 | - result = getPicUrl(result) 310 | + result = getPicUrl(result, proxyPics) 311 | 312 | proc icon*(icon: string; text=""; title=""; class=""; href=""): VNode = 313 | var c = "icon-" & icon 314 | @@ -89,9 +89,9 @@ proc genDate*(pref, state: string): VNode = 315 | input(name=pref, `type`="date", value=state) 316 | icon "calendar" 317 | 318 | -proc genImg*(url: string; class=""): VNode = 319 | +proc genImg*(url: string; proxyPics: bool; class="";): VNode = 320 | buildHtml(): 321 | - img(src=getPicUrl(url), class=class, alt="", loading="lazy") 322 | + img(src=getPicUrl(url, proxyPics), class=class, alt="", loading="lazy") 323 | 324 | proc getTabClass*(query: Query; tab: QueryKind): string = 325 | if query.kind == tab: "tab-item active" 326 | diff --git a/src/views/rss.nimf b/src/views/rss.nimf 327 | index e29b070..9a06325 100644 328 | --- a/src/views/rss.nimf 329 | +++ b/src/views/rss.nimf 330 | @@ -39,19 +39,19 @@ Twitter feed for: ${desc}. Generated by ${cfg.hostname} 331 | #end if 332 | #if tweet.photos.len > 0: 333 | # for photo in tweet.photos: 334 | - 335 | + 336 | # end for 337 | #elif tweet.video.isSome: 338 | - 339 | + 340 | #elif tweet.gif.isSome: 341 | -# let thumb = &"{getPicUrl(get(tweet.gif).thumb)}" 342 | -# let url = &"{getPicUrl(get(tweet.gif).url)}" 343 | +# let thumb = &"{getPicUrl(get(tweet.gif).thumb, false)}" 344 | +# let url = &"{getPicUrl(get(tweet.gif).url, false)}" 345 | 347 | #elif tweet.card.isSome: 348 | # let card = tweet.card.get() 349 | # if card.image.len > 0: 350 | - 351 | + 352 | # end if 353 | #end if 354 | #end proc 355 | @@ -102,7 +102,7 @@ Twitter feed for: ${desc}. Generated by ${cfg.hostname} 356 | 357 | ${title} 358 | ${urlPrefix}/${profile.user.username} 359 | - ${getPicUrl(profile.user.getUserPic(style="_400x400"))} 360 | + ${getPicUrl(profile.user.getUserPic(style="_400x400"), false)} 361 | 128 362 | 128 363 | 364 | diff --git a/src/views/timeline.nim b/src/views/timeline.nim 365 | index abeb6d3..23e5a84 100644 366 | --- a/src/views/timeline.nim 367 | +++ b/src/views/timeline.nim 368 | @@ -61,7 +61,7 @@ proc renderUser(user: User; prefs: Prefs): VNode = 369 | tdiv(class="tweet-body profile-result"): 370 | tdiv(class="tweet-header"): 371 | a(class="tweet-avatar", href=("/" & user.username)): 372 | - genImg(user.getUserPic("_bigger"), class=prefs.getAvatarClass) 373 | + genImg(user.getUserPic("_bigger"), prefs.proxyPics, class=prefs.getAvatarClass) 374 | 375 | tdiv(class="tweet-name-row"): 376 | tdiv(class="fullname-and-username"): 377 | diff --git a/src/views/tweet.nim b/src/views/tweet.nim 378 | index 34dcd4c..6c7445d 100644 379 | --- a/src/views/tweet.nim 380 | +++ b/src/views/tweet.nim 381 | @@ -10,7 +10,7 @@ import general 382 | const doctype = "\n" 383 | 384 | proc renderMiniAvatar(user: User; prefs: Prefs): VNode = 385 | - genImg(user.getUserPic("_mini"), class=(prefs.getAvatarClass & " mini")) 386 | + genImg(user.getUserPic("_mini"), prefs.proxyPics, class=(prefs.getAvatarClass & " mini")) 387 | 388 | proc renderHeader(tweet: Tweet; retweet: string; pinned: bool; prefs: Prefs): VNode = 389 | buildHtml(tdiv): 390 | @@ -26,7 +26,7 @@ proc renderHeader(tweet: Tweet; retweet: string; pinned: bool; prefs: Prefs): VN 391 | var size = "_bigger" 392 | if not prefs.autoplayGifs and tweet.user.userPic.endsWith("gif"): 393 | size = "_400x400" 394 | - genImg(tweet.user.getUserPic(size), class=prefs.getAvatarClass) 395 | + genImg(tweet.user.getUserPic(size), prefs.proxyPics, class=prefs.getAvatarClass) 396 | 397 | tdiv(class="tweet-name-row"): 398 | tdiv(class="fullname-and-username"): 399 | @@ -37,7 +37,7 @@ proc renderHeader(tweet: Tweet; retweet: string; pinned: bool; prefs: Prefs): VN 400 | a(href=getLink(tweet), title=tweet.getTime): 401 | text tweet.getShortTime 402 | 403 | -proc renderAlbum(tweet: Tweet): VNode = 404 | +proc renderAlbum(tweet: Tweet; proxyPics: bool): VNode = 405 | let 406 | groups = if tweet.photos.len < 3: @[tweet.photos] 407 | else: tweet.photos.distribute(2) 408 | @@ -51,8 +51,8 @@ proc renderAlbum(tweet: Tweet): VNode = 409 | let 410 | named = "name=" in photo 411 | small = if named: photo else: photo & smallWebp 412 | - a(href=getOrigPicUrl(photo), class="still-image", target="_blank"): 413 | - genImg(small) 414 | + a(href=getOrigPicUrl(photo, proxyPics), class="still-image", target="_blank"): 415 | + genImg(small, proxyPics) 416 | 417 | proc isPlaybackEnabled(prefs: Prefs; playbackType: VideoType): bool = 418 | case playbackType 419 | @@ -88,7 +88,7 @@ proc renderVideo*(video: Video; prefs: Prefs; path: string): VNode = 420 | buildHtml(tdiv(class="attachments card")): 421 | tdiv(class="gallery-video" & container): 422 | tdiv(class="attachment video-container"): 423 | - let thumb = getSmallPic(video.thumb) 424 | + let thumb = getSmallPic(video.thumb, prefs.proxyPics) 425 | if not video.available: 426 | img(src=thumb, loading="lazy") 427 | renderVideoUnavailable(video) 428 | @@ -120,9 +120,9 @@ proc renderGif(gif: Gif; prefs: Prefs): VNode = 429 | buildHtml(tdiv(class="attachments media-gif")): 430 | tdiv(class="gallery-gif", style={maxHeight: "unset"}): 431 | tdiv(class="attachment"): 432 | - video(class="gif", poster=getSmallPic(gif.thumb), autoplay=prefs.autoplayGifs, 433 | + video(class="gif", poster=getSmallPic(gif.thumb, prefs.proxyPics), autoplay=prefs.autoplayGifs, 434 | controls="", muted="", loop=""): 435 | - source(src=getPicUrl(gif.url), `type`="video/mp4") 436 | + source(src=getPicUrl(gif.url, prefs.proxyPics), `type`="video/mp4") 437 | 438 | proc renderPoll(poll: Poll): VNode = 439 | buildHtml(tdiv(class="poll")): 440 | @@ -139,10 +139,10 @@ proc renderPoll(poll: Poll): VNode = 441 | span(class="poll-info"): 442 | text &"{insertSep($poll.votes, ',')} votes • {poll.status}" 443 | 444 | -proc renderCardImage(card: Card): VNode = 445 | +proc renderCardImage(card: Card; proxyPics: bool): VNode = 446 | buildHtml(tdiv(class="card-image-container")): 447 | tdiv(class="card-image"): 448 | - genImg(card.image) 449 | + genImg(card.image, proxyPics) 450 | if card.kind == player: 451 | tdiv(class="card-overlay"): 452 | tdiv(class="overlay-circle"): 453 | @@ -170,7 +170,7 @@ proc renderCard(card: Card; prefs: Prefs; path: string): VNode = 454 | else: 455 | a(class="card-container", href=url): 456 | if card.image.len > 0: 457 | - renderCardImage(card) 458 | + renderCardImage(card, prefs.proxyPics) 459 | tdiv(class="card-content-container"): 460 | renderCardContent(card) 461 | 462 | @@ -212,7 +212,7 @@ proc renderMediaTags(tags: seq[User]): VNode = 463 | proc renderQuoteMedia(quote: Tweet; prefs: Prefs; path: string): VNode = 464 | buildHtml(tdiv(class="quote-media-container")): 465 | if quote.photos.len > 0: 466 | - renderAlbum(quote) 467 | + renderAlbum(quote, prefs.proxyPics) 468 | elif quote.video.isSome: 469 | renderVideo(quote.video.get(), prefs, path) 470 | elif quote.gif.isSome: 471 | @@ -322,7 +322,7 @@ proc renderTweet*(tweet: Tweet; prefs: Prefs; path: string; class=""; index=0; 472 | renderCard(tweet.card.get(), prefs, path) 473 | 474 | if tweet.photos.len > 0: 475 | - renderAlbum(tweet) 476 | + renderAlbum(tweet, prefs.proxyPics) 477 | elif tweet.video.isSome: 478 | renderVideo(tweet.video.get(), prefs, path) 479 | views = tweet.video.get().views 480 | -- 481 | 2.48.0 482 | 483 | -------------------------------------------------------------------------------- /nitter/patches/0003-add-x-cache-header.patch: -------------------------------------------------------------------------------- 1 | From f790e90bdc3c154ac1445ec66ee4a296891cafd6 Mon Sep 17 00:00:00 2001 2 | From: Emilien <4016501+unixfox@users.noreply.github.com> 3 | Date: Fri, 20 Dec 2024 19:07:05 +0100 4 | Subject: [PATCH 1/1] add x-cache header 5 | 6 | --- 7 | src/routes/rss.nim | 12 ++++++------ 8 | 1 file changed, 6 insertions(+), 6 deletions(-) 9 | 10 | diff --git a/src/routes/rss.nim b/src/routes/rss.nim 11 | index 447f4ad..0fd23f0 100644 12 | --- a/src/routes/rss.nim 13 | +++ b/src/routes/rss.nim 14 | @@ -42,7 +42,7 @@ proc timelineRss*(req: Request; cfg: Config; query: Query): Future[Rss] {.async. 15 | let rss = renderTimelineRss(profile, cfg, multi=(names.len > 1)) 16 | return Rss(feed: rss, cursor: profile.tweets.bottom) 17 | 18 | -template respRss*(rss, page) = 19 | +template respRss*(rss; page; cached = false ) = 20 | if rss.cursor.len == 0: 21 | let info = case page 22 | of "User": " \"" & @"name" & "\" " 23 | @@ -54,7 +54,7 @@ template respRss*(rss, page) = 24 | resp Http404, showError(getSuspended(@"name"), cfg) 25 | 26 | let headers = {"Content-Type": "application/rss+xml; charset=utf-8", 27 | - "Min-Id": rss.cursor} 28 | + "Min-Id": rss.cursor, "X-Cache": $cached} 29 | resp Http200, headers, rss.feed 30 | 31 | proc createRssRouter*(cfg: Config) = 32 | @@ -74,7 +74,7 @@ proc createRssRouter*(cfg: Config) = 33 | 34 | var rss = await getCachedRss(key) 35 | if rss.cursor.len > 0: 36 | - respRss(rss, "Search") 37 | + respRss(rss, "Search", true) 38 | 39 | let tweets = await getGraphTweetSearch(query, cursor) 40 | rss.cursor = tweets.bottom 41 | @@ -92,7 +92,7 @@ proc createRssRouter*(cfg: Config) = 42 | 43 | var rss = await getCachedRss(key) 44 | if rss.cursor.len > 0: 45 | - respRss(rss, "User") 46 | + respRss(rss, "User", true) 47 | 48 | rss = await timelineRss(request, cfg, Query(fromUser: @[name])) 49 | 50 | @@ -120,7 +120,7 @@ proc createRssRouter*(cfg: Config) = 51 | 52 | var rss = await getCachedRss(key) 53 | if rss.cursor.len > 0: 54 | - respRss(rss, "User") 55 | + respRss(rss, "User", true) 56 | 57 | rss = await timelineRss(request, cfg, query) 58 | 59 | @@ -153,7 +153,7 @@ proc createRssRouter*(cfg: Config) = 60 | 61 | var rss = await getCachedRss(key) 62 | if rss.cursor.len > 0: 63 | - respRss(rss, "List") 64 | + respRss(rss, "List", true) 65 | 66 | let 67 | list = await getCachedList(id=id) 68 | -- 69 | 2.48.0 70 | 71 | -------------------------------------------------------------------------------- /nitter/patches/0004-custom-donation-page.patch: -------------------------------------------------------------------------------- 1 | From 2b9b9fe23a9f54d1c8ac1ef248474e18991d56a3 Mon Sep 17 00:00:00 2001 2 | From: Emilien <4016501+unixfox@users.noreply.github.com> 3 | Date: Fri, 21 Feb 2025 22:02:42 +0100 4 | Subject: [PATCH 1/1] custom donation page 5 | 6 | --- 7 | public/md/about.md | 32 +++++++++++++++++++++++++++++++- 8 | src/views/general.nim | 4 +++- 9 | 2 files changed, 34 insertions(+), 2 deletions(-) 10 | 11 | diff --git a/public/md/about.md b/public/md/about.md 12 | index 3825e8f..91b30d0 100644 13 | --- a/public/md/about.md 14 | +++ b/public/md/about.md 15 | @@ -1,5 +1,7 @@ 16 | # About 17 | 18 | +XCancel is an instance of Nitter. 19 | + 20 | Nitter is a free and open source alternative Twitter front-end focused on 21 | privacy and performance. The source is available on GitHub at 22 | 23 | @@ -43,6 +45,34 @@ Twitter account. 24 | 25 | ## Donating 26 | 27 | +You can either donate to XCancel or the Nitter project. 28 | + 29 | +Donating to XCancel helps keep this Nitter instance running. And donating to the Nitter project helps the development of the project. Both projects are run by separate people. 30 | + 31 | +### Donating to XCancel 32 | + 33 | +#### Credit/debit card and bank transfer 34 | + 35 | +Liberapay(recurrent donation): \ 36 | +Ko-fi (one time donation): 37 | + 38 | +#### Cryptocurrency 39 | + 40 | +Bitcoin: 1fyoJKdFo5cDPt21CGHW2RDQtcktQU9cL (Segwit compatible) \ 41 | +Bitcoin cash: qz6qvjt9m4wqrqhyet3v3gljrpnys3wl2yxz0jfuu2 \ 42 | +Ethereum: 0x52B72e00be486C03C9b09AA0D441ADE1EfA5d2CB (you can send any ERC20 token) \ 43 | +Monero: 82VziQe69ynRNKZ2Bk7XcoYUA6Q1eRuPyGxV3gVWDju7EPkUXZE2oGTGWiah51cCKRMAmwTY11D6qcH3NpGtfdjcNccmXL5 \ 44 | +Nano: nano_3hrphgbgi4px1gfiftsphokukcj1tkk168it6xeetxpc9c7jt5e6i7kmjupi \ 45 | +Stellar: GB5LHWSIOM6BRO7CMWRMWVWJUGPCKVRAVINGUJHA7PYP3CHES2XCMDG5 \ 46 | +Dogecoin: D6dsXSZEp1rkqvLAV41QxXTPPgvDSU2rjo \ 47 | +Dash: Xdtr4fFe3U56mmQVi3iC5aW2LRNRb95Gbg \ 48 | +Decred: DsY4tZLcikXjJwdLBFr2pYWgGPatY9y81cZ \ 49 | +Binance coin: bnb10vd22k3ujp9ezjc6s8x7vqvuh02hlvcwqtsepq (you can send any BEP20 token) \ 50 | +Litecoin: ltc1qre3xwwjsnctpfrx6eu0y77nca3cwlhe8kzy27d \ 51 | +USD Coin: 0xd415a7A9455DBf1a666F933c78A7325914E73C6b (ETH) - bnb10vd22k3ujp9ezjc6s8x7vqvuh02hlvcwqtsepq (BNB) 52 | + 53 | +### Donating to the Nitter project. 54 | + 55 | Liberapay: https://liberapay.com/zedeus \ 56 | Patreon: https://patreon.com/nitter \ 57 | BTC: bc1qpqpzjkcpgluhzf7x9yqe7jfe8gpfm5v08mdr55 \ 58 | @@ -53,4 +83,4 @@ ZEC: u1vndfqtzyy6qkzhkapxelel7ams38wmfeccu3fdpy2wkuc4erxyjm8ncjhnyg747x6t0kf0faq 59 | 60 | ## Contact 61 | 62 | -Feel free to join our [Matrix channel](https://matrix.to/#/#nitter:matrix.org). 63 | +Feel free to join Nitter [Matrix channel](https://matrix.to/#/#nitter:matrix.org). 64 | diff --git a/src/views/general.nim b/src/views/general.nim 65 | index 5ba40a3..12c2e9f 100644 66 | --- a/src/views/general.nim 67 | +++ b/src/views/general.nim 68 | @@ -24,6 +24,8 @@ proc renderNavbar(cfg: Config; req: Request; rss, canonical: string): VNode = 69 | tdiv(class="inner-nav"): 70 | tdiv(class="nav-item"): 71 | a(class="site-name", href="/"): text cfg.title 72 | + a(href="/about"): text "(donate)" 73 | + 74 | 75 | a(href="/"): img(class="site-logo", src="/logo.png", alt="Logo") 76 | 77 | @@ -32,7 +34,7 @@ proc renderNavbar(cfg: Config; req: Request; rss, canonical: string): VNode = 78 | if cfg.enableRss and rss.len > 0: 79 | icon "rss-feed", title="RSS Feed", href=rss 80 | icon "bird", title="Open in Twitter", href=canonical 81 | - a(href="https://liberapay.com/zedeus"): verbatim lp 82 | + a(href="https://liberapay.com/yewtube"): verbatim lp 83 | icon "info", title="About", href="/about" 84 | icon "cog", title="Preferences", href=("/settings?referer=" & encodeUrl(path)) 85 | 86 | -- 87 | 2.49.0 88 | -------------------------------------------------------------------------------- /openresty-ssl-ja3/.gitignore: -------------------------------------------------------------------------------- 1 | docker-openresty/ 2 | -------------------------------------------------------------------------------- /openresty-ssl-ja3/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile - alpine 2 | # https://github.com/openresty/docker-openresty 3 | 4 | ARG RESTY_IMAGE_BASE="alpine" 5 | ARG RESTY_IMAGE_TAG="3.21.3" 6 | 7 | FROM ${RESTY_IMAGE_BASE}:${RESTY_IMAGE_TAG} 8 | 9 | LABEL maintainer="Evan Wies " 10 | 11 | # Docker Build Arguments 12 | ARG RESTY_IMAGE_BASE="alpine" 13 | ARG RESTY_IMAGE_TAG="3.21.3" 14 | ARG RESTY_VERSION="1.27.1.1" 15 | 16 | # https://github.com/openresty/openresty-packaging/blob/master/alpine/openresty-openssl3/APKBUILD 17 | ARG RESTY_OPENSSL_VERSION="3.0.16" 18 | ARG RESTY_OPENSSL_PATCH_VERSION="3.0.15" 19 | ARG RESTY_OPENSSL_URL_BASE="https://github.com/openssl/openssl/releases/download/openssl-${RESTY_OPENSSL_VERSION}" 20 | # LEGACY: "https://www.openssl.org/source/old/1.1.1" 21 | ARG RESTY_OPENSSL_BUILD_OPTIONS="enable-camellia enable-seed enable-rfc3779 enable-cms enable-md2 enable-rc5 \ 22 | enable-weak-ssl-ciphers enable-ssl3 enable-ssl3-method enable-md2 enable-ktls enable-fips \ 23 | " 24 | 25 | # https://github.com/openresty/openresty-packaging/blob/master/alpine/openresty-pcre2/APKBUILD 26 | ARG RESTY_PCRE_VERSION="10.44" 27 | ARG RESTY_PCRE_SHA256="86b9cb0aa3bcb7994faa88018292bc704cdbb708e785f7c74352ff6ea7d3175b" 28 | ARG RESTY_PCRE_BUILD_OPTIONS="--enable-jit --enable-pcre2grep-jit --disable-bsr-anycrlf --disable-coverage --disable-ebcdic --disable-fuzz-support \ 29 | --disable-jit-sealloc --disable-never-backslash-C --enable-newline-is-lf --enable-pcre2-8 --enable-pcre2-16 --enable-pcre2-32 \ 30 | --enable-pcre2grep-callout --enable-pcre2grep-callout-fork --disable-pcre2grep-libbz2 --disable-pcre2grep-libz --disable-pcre2test-libedit \ 31 | --enable-percent-zt --disable-rebuild-chartables --enable-shared --disable-static --disable-silent-rules --enable-unicode --disable-valgrind \ 32 | " 33 | 34 | ARG RESTY_J="1" 35 | 36 | # https://github.com/openresty/openresty-packaging/blob/master/alpine/openresty/APKBUILD 37 | ARG RESTY_CONFIG_OPTIONS="\ 38 | --with-cc-opt='-DJA3_SORT_EXT' \ 39 | --with-compat \ 40 | --without-http_rds_json_module \ 41 | --without-http_rds_csv_module \ 42 | --without-lua_rds_parser \ 43 | --without-mail_pop3_module \ 44 | --without-mail_imap_module \ 45 | --without-mail_smtp_module \ 46 | --with-http_addition_module \ 47 | --add-module=/tmp/njs/nginx \ 48 | --add-module=/tmp/nginx-ssl-ja3 \ 49 | --add-module=/nginx-ssl-fingerprint \ 50 | --with-http_auth_request_module \ 51 | --with-http_dav_module \ 52 | --with-http_flv_module \ 53 | --with-http_geoip_module=dynamic \ 54 | --with-http_gunzip_module \ 55 | --with-http_gzip_static_module \ 56 | --with-http_image_filter_module=dynamic \ 57 | --with-http_mp4_module \ 58 | --with-http_random_index_module \ 59 | --with-http_realip_module \ 60 | --with-http_secure_link_module \ 61 | --with-http_slice_module \ 62 | --with-http_ssl_module \ 63 | --with-http_stub_status_module \ 64 | --with-http_sub_module \ 65 | --with-http_v2_module \ 66 | --with-http_v3_module \ 67 | --with-http_xslt_module=dynamic \ 68 | --with-ipv6 \ 69 | --with-mail \ 70 | --with-mail_ssl_module \ 71 | --with-md5-asm \ 72 | --with-sha1-asm \ 73 | --with-stream \ 74 | --with-stream_ssl_module \ 75 | --with-stream_ssl_preread_module \ 76 | --with-threads \ 77 | " 78 | ARG RESTY_CONFIG_OPTIONS_MORE="" 79 | ARG RESTY_LUAJIT_OPTIONS="--with-luajit-xcflags='-DLUAJIT_NUMMODE=2 -DLUAJIT_ENABLE_LUA52COMPAT'" 80 | ARG RESTY_PCRE_OPTIONS="--with-pcre-jit" 81 | 82 | ARG RESTY_ADD_PACKAGE_BUILDDEPS="perl" 83 | ARG RESTY_ADD_PACKAGE_RUNDEPS="" 84 | ARG RESTY_EVAL_PRE_CONFIGURE="" 85 | ARG RESTY_EVAL_POST_DOWNLOAD_PRE_CONFIGURE="" 86 | ARG RESTY_EVAL_POST_MAKE="" 87 | 88 | # These are not intended to be user-specified 89 | ARG _RESTY_CONFIG_DEPS="--with-pcre \ 90 | --with-cc-opt='-DNGX_LUA_ABORT_AT_PANIC -I/usr/local/openresty/pcre2/include -I/usr/local/openresty/openssl3/include -I /tmp/quickjs' \ 91 | --with-ld-opt='-L/usr/local/openresty/pcre2/lib -L/usr/local/openresty/openssl3/lib -Wl,-rpath,/usr/local/openresty/pcre2/lib:/usr/local/openresty/openssl3/lib -L /tmp/quickjs' \ 92 | " 93 | 94 | LABEL resty_image_base="${RESTY_IMAGE_BASE}" 95 | LABEL resty_image_tag="${RESTY_IMAGE_TAG}" 96 | LABEL resty_version="${RESTY_VERSION}" 97 | LABEL resty_openssl_version="${RESTY_OPENSSL_VERSION}" 98 | LABEL resty_openssl_patch_version="${RESTY_OPENSSL_PATCH_VERSION}" 99 | LABEL resty_openssl_url_base="${RESTY_OPENSSL_URL_BASE}" 100 | LABEL resty_openssl_build_options="${RESTY_OPENSSL_BUILD_OPTIONS}" 101 | LABEL resty_pcre_version="${RESTY_PCRE_VERSION}" 102 | LABEL resty_pcre_build_options="${RESTY_PCRE_BUILD_OPTIONS}" 103 | LABEL resty_pcre_sha256="${RESTY_PCRE_SHA256}" 104 | LABEL resty_config_options="${RESTY_CONFIG_OPTIONS}" 105 | LABEL resty_config_options_more="${RESTY_CONFIG_OPTIONS_MORE}" 106 | LABEL resty_config_deps="${_RESTY_CONFIG_DEPS}" 107 | LABEL resty_add_package_builddeps="${RESTY_ADD_PACKAGE_BUILDDEPS}" 108 | LABEL resty_add_package_rundeps="${RESTY_ADD_PACKAGE_RUNDEPS}" 109 | LABEL resty_eval_pre_configure="${RESTY_EVAL_PRE_CONFIGURE}" 110 | LABEL resty_eval_post_download_pre_configure="${RESTY_EVAL_POST_DOWNLOAD_PRE_CONFIGURE}" 111 | LABEL resty_eval_post_make="${RESTY_EVAL_POST_MAKE}" 112 | LABEL resty_luajit_options="${RESTY_LUAJIT_OPTIONS}" 113 | LABEL resty_pcre_options="${RESTY_PCRE_OPTIONS}" 114 | 115 | ADD periodic-build/openresty-ssl-ja3/http2/ /nginx-ssl-fingerprint 116 | #ADD config nginx.conf /nginx-ssl-fingerprint/ 117 | 118 | RUN apk add --no-cache --virtual .build-deps \ 119 | build-base \ 120 | coreutils \ 121 | curl \ 122 | gd-dev \ 123 | geoip-dev \ 124 | libxslt-dev \ 125 | linux-headers \ 126 | make \ 127 | perl-dev \ 128 | readline-dev \ 129 | zlib-dev \ 130 | git \ 131 | ${RESTY_ADD_PACKAGE_BUILDDEPS} \ 132 | && apk add --no-cache \ 133 | gd \ 134 | geoip \ 135 | libgcc \ 136 | libxslt \ 137 | tzdata \ 138 | zlib \ 139 | ${RESTY_ADD_PACKAGE_RUNDEPS} \ 140 | && cd /tmp \ 141 | && if [ -n "${RESTY_EVAL_PRE_CONFIGURE}" ]; then eval $(echo ${RESTY_EVAL_PRE_CONFIGURE}); fi \ 142 | && cd /tmp \ 143 | && curl -fSL "${RESTY_OPENSSL_URL_BASE}/openssl-${RESTY_OPENSSL_VERSION}.tar.gz" -o openssl-${RESTY_OPENSSL_VERSION}.tar.gz \ 144 | && tar xzf openssl-${RESTY_OPENSSL_VERSION}.tar.gz \ 145 | && cd openssl-${RESTY_OPENSSL_VERSION} \ 146 | && if [ $(echo ${RESTY_OPENSSL_VERSION} | cut -c 1-4) = "3.0." ] ; then \ 147 | echo 'patching OpenSSL 3.0.15 for OpenResty' \ 148 | && curl -s https://raw.githubusercontent.com/openresty/openresty/master/patches/openssl-${RESTY_OPENSSL_PATCH_VERSION}-sess_set_get_cb_yield.patch | patch -p1 \ 149 | && curl -s https://raw.githubusercontent.com/fooinha/nginx-ssl-ja3/master/patches/openssl-3.extensions.patch | patch -p1 ; \ 150 | fi \ 151 | && if [ $(echo ${RESTY_OPENSSL_VERSION} | cut -c 1-5) = "1.1.1" ] ; then \ 152 | echo 'patching OpenSSL 1.1.1 for OpenResty' \ 153 | && curl -s https://raw.githubusercontent.com/openresty/openresty/master/patches/openssl-${RESTY_OPENSSL_PATCH_VERSION}-sess_set_get_cb_yield.patch | patch -p1 ; \ 154 | fi \ 155 | && if [ $(echo ${RESTY_OPENSSL_VERSION} | cut -c 1-5) = "1.1.0" ] ; then \ 156 | echo 'patching OpenSSL 1.1.0 for OpenResty' \ 157 | && curl -s https://raw.githubusercontent.com/openresty/openresty/ed328977028c3ec3033bc25873ee360056e247cd/patches/openssl-1.1.0j-parallel_build_fix.patch | patch -p1 \ 158 | && curl -s https://raw.githubusercontent.com/openresty/openresty/master/patches/openssl-${RESTY_OPENSSL_PATCH_VERSION}-sess_set_get_cb_yield.patch | patch -p1 ; \ 159 | fi \ 160 | && ./config \ 161 | shared zlib -g \ 162 | --prefix=/usr/local/openresty/openssl3 \ 163 | --libdir=lib \ 164 | -Wl,-rpath,/usr/local/openresty/openssl3/lib \ 165 | ${RESTY_OPENSSL_BUILD_OPTIONS} \ 166 | && make -j${RESTY_J} \ 167 | && make -j${RESTY_J} install_sw \ 168 | && cd /tmp \ 169 | && curl -fSL "https://github.com/PCRE2Project/pcre2/releases/download/pcre2-${RESTY_PCRE_VERSION}/pcre2-${RESTY_PCRE_VERSION}.tar.gz" -o pcre2-${RESTY_PCRE_VERSION}.tar.gz \ 170 | && echo "${RESTY_PCRE_SHA256} pcre2-${RESTY_PCRE_VERSION}.tar.gz" | shasum -a 256 --check \ 171 | && tar xzf pcre2-${RESTY_PCRE_VERSION}.tar.gz \ 172 | && cd /tmp/pcre2-${RESTY_PCRE_VERSION} \ 173 | && CFLAGS="-g -O3" ./configure \ 174 | --prefix=/usr/local/openresty/pcre2 \ 175 | --libdir=/usr/local/openresty/pcre2/lib \ 176 | ${RESTY_PCRE_BUILD_OPTIONS} \ 177 | && CFLAGS="-g -O3" make -j${RESTY_J} \ 178 | && CFLAGS="-g -O3" make -j${RESTY_J} install \ 179 | && cd /tmp \ 180 | && git clone -b 0.8.9 https://github.com/nginx/njs \ 181 | && git clone https://github.com/bellard/quickjs \ 182 | && cd quickjs && git checkout 6e2e68fd0896957f92eb6c242a2e048c1ef3cae0 && CFLAGS='-fPIC' make libquickjs.a && cd .. \ 183 | && git clone https://github.com/fooinha/nginx-ssl-ja3.git \ 184 | && curl -fSL https://openresty.org/download/openresty-${RESTY_VERSION}.tar.gz -o openresty-${RESTY_VERSION}.tar.gz \ 185 | && tar xzf openresty-${RESTY_VERSION}.tar.gz \ 186 | && cd /tmp/openresty-${RESTY_VERSION} \ 187 | && if [ -n "${RESTY_EVAL_POST_DOWNLOAD_PRE_CONFIGURE}" ]; then eval $(echo ${RESTY_EVAL_POST_DOWNLOAD_PRE_CONFIGURE}); fi \ 188 | && cd bundle/nginx-1.27.1 \ 189 | && curl -s https://raw.githubusercontent.com/fooinha/nginx-ssl-ja3/master/patches/nginx.1.27.2.ssl.extensions.patch | patch -p1 \ 190 | && cat /nginx-ssl-fingerprint/patches/nginx-1.27.patch | patch -p1 \ 191 | && cd /tmp/openresty-${RESTY_VERSION} \ 192 | && eval ./configure -j${RESTY_J} ${_RESTY_CONFIG_DEPS} ${RESTY_CONFIG_OPTIONS} ${RESTY_CONFIG_OPTIONS_MORE} ${RESTY_LUAJIT_OPTIONS} ${RESTY_PCRE_OPTIONS} \ 193 | && make -j${RESTY_J} \ 194 | && make -j${RESTY_J} install \ 195 | && cd /tmp \ 196 | && if [ -n "${RESTY_EVAL_POST_MAKE}" ]; then eval $(echo ${RESTY_EVAL_POST_MAKE}); fi \ 197 | && rm -rf \ 198 | openssl-${RESTY_OPENSSL_VERSION}.tar.gz openssl-${RESTY_OPENSSL_VERSION} \ 199 | pcre2-${RESTY_PCRE_VERSION}.tar.gz pcre2-${RESTY_PCRE_VERSION} \ 200 | openresty-${RESTY_VERSION}.tar.gz openresty-${RESTY_VERSION} \ 201 | && /usr/local/openresty/bin/opm get openresty/lua-resty-string \ 202 | && /usr/local/openresty/bin/opm get bungle/lua-resty-session \ 203 | && /usr/local/openresty/bin/opm get fffonion/lua-resty-acme \ 204 | && apk del .build-deps \ 205 | && mkdir -p /var/run/openresty \ 206 | && ln -sf /dev/stdout /usr/local/openresty/nginx/logs/access.log \ 207 | && ln -sf /dev/stderr /usr/local/openresty/nginx/logs/error.log 208 | 209 | # Add additional binaries into PATH for convenience 210 | ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin 211 | 212 | # Copy nginx configuration files 213 | COPY nginx.conf /usr/local/openresty/nginx/conf/nginx.conf 214 | COPY nginx.vh.default.conf /etc/nginx/conf.d/default.conf 215 | 216 | CMD ["/usr/local/openresty/bin/openresty", "-g", "daemon off;"] 217 | 218 | # Use SIGQUIT instead of default SIGTERM to cleanly drain requests 219 | # See https://github.com/openresty/docker-openresty/blob/master/README.md#tips--pitfalls 220 | STOPSIGNAL SIGQUIT 221 | -------------------------------------------------------------------------------- /openresty-ssl-ja3/README.md: -------------------------------------------------------------------------------- 1 | Using patches from https://github.com/fooinha/nginx-ssl-ja3 2 | 3 | And using custom Dockerfile based on https://github.com/openresty/docker-openresty/tree/master/alpine -------------------------------------------------------------------------------- /openresty-ssl-ja3/http2/config: -------------------------------------------------------------------------------- 1 | # 2 | # HTTP/2 fingerprint module conf 3 | # 4 | 5 | ngx_addon_name=ngx_ssl_fingerprint_module 6 | 7 | CORE_LIBS="$CORE_LIBS" 8 | CORE_INCS="$CORE_INCS $ngx_addon_dir/src" 9 | 10 | HTTP_MODULES="$HTTP_MODULES ngx_http_ssl_fingerprint_module" 11 | 12 | NGX_ADDON_SRCS="$NGX_ADDON_SRCS \ 13 | $ngx_addon_dir/src/nginx_ssl_fingerprint.c \ 14 | $ngx_addon_dir/src/ngx_http_ssl_fingerprint_module.c" 15 | 16 | have=NGX_HTTP2_FINGERPRING_MODULE . auto/have 17 | -------------------------------------------------------------------------------- /openresty-ssl-ja3/http2/patches/nginx-1.27.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c 2 | index 0f5bd3de8..1b85e378d 100644 3 | --- a/src/http/v2/ngx_http_v2.c 4 | +++ b/src/http/v2/ngx_http_v2.c 5 | @@ -301,6 +301,8 @@ ngx_http_v2_init(ngx_event_t *rev) 6 | ngx_add_timer(rev, cscf->client_header_timeout); 7 | } 8 | 9 | + h2c->fp_fingerprinted = 0; 10 | + 11 | c->idle = 1; 12 | ngx_reusable_connection(c, 0); 13 | 14 | @@ -1352,6 +1354,14 @@ ngx_http_v2_state_headers(ngx_http_v2_connection_t *h2c, u_char *pos, 15 | } 16 | } 17 | 18 | + if (!h2c->fp_fingerprinted && h2c->fp_priorities.len < 32) { 19 | + h2c->fp_priorities.data[h2c->fp_priorities.len] = (uint8_t)stream->node->id; 20 | + h2c->fp_priorities.data[h2c->fp_priorities.len+1] = (uint8_t)excl; 21 | + h2c->fp_priorities.data[h2c->fp_priorities.len+2] = (uint8_t)depend; 22 | + h2c->fp_priorities.data[h2c->fp_priorities.len+3] = (uint8_t)(weight-1); 23 | + h2c->fp_priorities.len += 4; 24 | + } 25 | + 26 | return ngx_http_v2_state_header_block(h2c, pos, end); 27 | 28 | rst_stream: 29 | @@ -1775,6 +1785,9 @@ ngx_http_v2_state_process_header(ngx_http_v2_connection_t *h2c, u_char *pos, 30 | } 31 | 32 | if (header->name.data[0] == ':') { 33 | + if (!h2c->fp_fingerprinted && h2c->fp_pseudoheaders.len < 32 && header->name.len > 1) 34 | + h2c->fp_pseudoheaders.data[h2c->fp_pseudoheaders.len++] = header->name.data[1]; 35 | + 36 | rc = ngx_http_v2_pseudo_header(r, header); 37 | 38 | if (rc == NGX_OK) { 39 | @@ -2194,6 +2207,12 @@ ngx_http_v2_state_settings_params(ngx_http_v2_connection_t *h2c, u_char *pos, 40 | ngx_log_debug2(NGX_LOG_DEBUG_HTTP, h2c->connection->log, 0, 41 | "http2 setting %ui:%ui", id, value); 42 | 43 | + if (!h2c->fp_fingerprinted && h2c->fp_settings.len < 32) { 44 | + h2c->fp_settings.data[h2c->fp_settings.len] = (uint8_t)id; 45 | + *(uint32_t*)(h2c->fp_settings.data + h2c->fp_settings.len + 1) = (uint32_t)value; 46 | + h2c->fp_settings.len += 5; 47 | + } 48 | + 49 | switch (id) { 50 | 51 | case NGX_HTTP_V2_INIT_WINDOW_SIZE_SETTING: 52 | @@ -2478,6 +2497,9 @@ ngx_http_v2_state_window_update(ngx_http_v2_connection_t *h2c, u_char *pos, 53 | } 54 | 55 | h2c->send_window += window; 56 | + if (!h2c->fp_fingerprinted) { 57 | + h2c->fp_windowupdate = window; 58 | + } 59 | 60 | while (!ngx_queue_empty(&h2c->waiting)) { 61 | q = ngx_queue_head(&h2c->waiting); 62 | diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h 63 | index 6751b3026..60a68a0fd 100644 64 | --- a/src/http/v2/ngx_http_v2.h 65 | +++ b/src/http/v2/ngx_http_v2.h 66 | @@ -17,6 +17,8 @@ 67 | 68 | #define NGX_HTTP_V2_STATE_BUFFER_SIZE 16 69 | 70 | +#define NGX_FP_V2_BUFFER_SIZE 32 71 | + 72 | #define NGX_HTTP_V2_DEFAULT_FRAME_SIZE (1 << 14) 73 | #define NGX_HTTP_V2_MAX_FRAME_SIZE ((1 << 24) - 1) 74 | 75 | @@ -121,6 +123,12 @@ typedef struct { 76 | } ngx_http_v2_hpack_t; 77 | 78 | 79 | +typedef struct { 80 | + u_char data[NGX_FP_V2_BUFFER_SIZE]; 81 | + size_t len; 82 | +} ngx_http_v2_fp_fixed_str_t; 83 | + 84 | + 85 | struct ngx_http_v2_connection_s { 86 | ngx_connection_t *connection; 87 | ngx_http_connection_t *http_connection; 88 | @@ -168,6 +176,13 @@ struct ngx_http_v2_connection_s { 89 | unsigned table_update:1; 90 | unsigned blocked:1; 91 | unsigned goaway:1; 92 | + 93 | + unsigned fp_fingerprinted:1; 94 | + ngx_http_v2_fp_fixed_str_t fp_settings, 95 | + fp_priorities, 96 | + fp_pseudoheaders; 97 | + ngx_uint_t fp_windowupdate; 98 | + ngx_str_t fp_str; 99 | }; 100 | -------------------------------------------------------------------------------- /openresty-ssl-ja3/http2/src/nginx_ssl_fingerprint.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | static inline 10 | unsigned char *append_uint8(unsigned char* dst, uint8_t n) 11 | { 12 | if (n < 10) { 13 | dst[0] = n + '0'; 14 | dst++; 15 | } else if (n < 100) { 16 | dst[1] = n % 10 + '0'; 17 | dst[0] = n / 10 + '0'; 18 | dst += 2; 19 | } else { 20 | dst[2] = n % 10 + '0'; 21 | n /= 10; 22 | dst[1] = n % 10 + '0'; 23 | dst[0] = n / 10 + '0'; 24 | dst += 3; 25 | } 26 | 27 | return dst; 28 | } 29 | 30 | static inline 31 | unsigned char *append_uint16(unsigned char* dst, uint16_t n) 32 | { 33 | if (n < 10) { 34 | dst[0] = n + '0'; 35 | dst++; 36 | } else if (n < 100) { 37 | dst[1] = n % 10 + '0'; 38 | dst[0] = n / 10 + '0'; 39 | dst += 2; 40 | } else if (n < 1000) { 41 | dst[2] = n % 10 + '0'; 42 | n /= 10; 43 | dst[1] = n % 10 + '0'; 44 | dst[0] = n / 10 + '0'; 45 | dst += 3; 46 | } else if (n < 10000) { 47 | dst[3] = n % 10 + '0'; 48 | n /= 10; 49 | dst[2] = n % 10 + '0'; 50 | n /= 10; 51 | dst[1] = n % 10 + '0'; 52 | dst[0] = n / 10 + '0'; 53 | dst += 4; 54 | } else { 55 | dst[4] = n % 10 + '0'; 56 | n /= 10; 57 | dst[3] = n % 10 + '0'; 58 | n /= 10; 59 | dst[2] = n % 10 + '0'; 60 | n /= 10; 61 | dst[1] = n % 10 + '0'; 62 | dst[0] = n / 10 + '0'; 63 | dst += 5; 64 | } 65 | 66 | return dst; 67 | } 68 | 69 | static inline 70 | unsigned char *append_uint32(unsigned char* dst, uint32_t n) 71 | { 72 | if (n < 10) { 73 | dst[0] = n + '0'; 74 | dst++; 75 | } else if (n < 100) { 76 | dst[1] = n % 10 + '0'; 77 | dst[0] = n / 10 + '0'; 78 | dst += 2; 79 | } else if (n < 1000) { 80 | dst[2] = n % 10 + '0'; 81 | n /= 10; 82 | dst[1] = n % 10 + '0'; 83 | dst[0] = n / 10 + '0'; 84 | dst += 3; 85 | } else if (n < 10000) { 86 | dst[3] = n % 10 + '0'; 87 | n /= 10; 88 | dst[2] = n % 10 + '0'; 89 | n /= 10; 90 | dst[1] = n % 10 + '0'; 91 | dst[0] = n / 10 + '0'; 92 | dst += 4; 93 | } else if (n < 100000) { 94 | dst[4] = n % 10 + '0'; 95 | n /= 10; 96 | dst[3] = n % 10 + '0'; 97 | n /= 10; 98 | dst[2] = n % 10 + '0'; 99 | n /= 10; 100 | dst[1] = n % 10 + '0'; 101 | dst[0] = n / 10 + '0'; 102 | dst += 5; 103 | } else if (n < 1000000) { 104 | dst[5] = n % 10 + '0'; 105 | n /= 10; 106 | dst[4] = n % 10 + '0'; 107 | n /= 10; 108 | dst[3] = n % 10 + '0'; 109 | n /= 10; 110 | dst[2] = n % 10 + '0'; 111 | n /= 10; 112 | dst[1] = n % 10 + '0'; 113 | dst[0] = n / 10 + '0'; 114 | dst += 6; 115 | } else if (n < 10000000) { 116 | dst[6] = n % 10 + '0'; 117 | n /= 10; 118 | dst[5] = n % 10 + '0'; 119 | n /= 10; 120 | dst[4] = n % 10 + '0'; 121 | n /= 10; 122 | dst[3] = n % 10 + '0'; 123 | n /= 10; 124 | dst[2] = n % 10 + '0'; 125 | n /= 10; 126 | dst[1] = n % 10 + '0'; 127 | dst[0] = n / 10 + '0'; 128 | dst += 7; 129 | } else if (n < 100000000) { 130 | dst[7] = n % 10 + '0'; 131 | n /= 10; 132 | dst[6] = n % 10 + '0'; 133 | n /= 10; 134 | dst[5] = n % 10 + '0'; 135 | n /= 10; 136 | dst[4] = n % 10 + '0'; 137 | n /= 10; 138 | dst[3] = n % 10 + '0'; 139 | n /= 10; 140 | dst[2] = n % 10 + '0'; 141 | n /= 10; 142 | dst[1] = n % 10 + '0'; 143 | dst[0] = n / 10 + '0'; 144 | dst += 8; 145 | } else if (n < 1000000000) { 146 | dst[8] = n % 10 + '0'; 147 | n /= 10; 148 | dst[7] = n % 10 + '0'; 149 | n /= 10; 150 | dst[6] = n % 10 + '0'; 151 | n /= 10; 152 | dst[5] = n % 10 + '0'; 153 | n /= 10; 154 | dst[4] = n % 10 + '0'; 155 | n /= 10; 156 | dst[3] = n % 10 + '0'; 157 | n /= 10; 158 | dst[2] = n % 10 + '0'; 159 | n /= 10; 160 | dst[1] = n % 10 + '0'; 161 | dst[0] = n / 10 + '0'; 162 | dst += 9; 163 | } else { 164 | dst[9] = n % 10 + '0'; 165 | n /= 10; 166 | dst[8] = n % 10 + '0'; 167 | n /= 10; 168 | dst[7] = n % 10 + '0'; 169 | n /= 10; 170 | dst[6] = n % 10 + '0'; 171 | n /= 10; 172 | dst[5] = n % 10 + '0'; 173 | n /= 10; 174 | dst[4] = n % 10 + '0'; 175 | n /= 10; 176 | dst[3] = n % 10 + '0'; 177 | n /= 10; 178 | dst[2] = n % 10 + '0'; 179 | n /= 10; 180 | dst[1] = n % 10 + '0'; 181 | dst[0] = n / 10 + '0'; 182 | dst += 10; 183 | } 184 | 185 | return dst; 186 | } 187 | 188 | /** 189 | * Params: 190 | * c and h2c should be a valid pointers 191 | * 192 | * Returns: 193 | * NGX_OK -- h2c->fp_str is set 194 | * NGX_ERROR -- something went wrong 195 | */ 196 | int ngx_http2_fingerprint(ngx_connection_t *c, ngx_http_v2_connection_t *h2c) 197 | { 198 | unsigned char *pstr = NULL; 199 | unsigned short n = 0; 200 | size_t i; 201 | 202 | if (h2c->fp_str.len > 0) { 203 | return NGX_OK; 204 | } 205 | 206 | n = 4 + h2c->fp_settings.len * 3 207 | + 10 + h2c->fp_priorities.len * 2 208 | + h2c->fp_pseudoheaders.len * 2; 209 | 210 | h2c->fp_str.data = ngx_pnalloc(c->pool, n); 211 | if (h2c->fp_str.data == NULL) { 212 | /** Else we break a stream */ 213 | return NGX_ERROR; 214 | } 215 | pstr = h2c->fp_str.data; 216 | 217 | ngx_log_debug(NGX_LOG_DEBUG_EVENT, c->log, 0, "ngx_http2_fingerprint: alloc bytes: [%d]\\n", n); 218 | 219 | /* setting */ 220 | for (i = 0; i < h2c->fp_settings.len; i+=5) { 221 | pstr = append_uint8(pstr, h2c->fp_settings.data[i]); 222 | *pstr++ = ':'; 223 | pstr = append_uint32(pstr, *(uint32_t*)(h2c->fp_settings.data+i+1)); 224 | *pstr++ = ';'; 225 | } 226 | *(pstr-1) = '|'; 227 | 228 | /* windows update */ 229 | pstr = append_uint32(pstr, h2c->fp_windowupdate); 230 | *pstr++ = '|'; 231 | 232 | /* priorities */ 233 | for (i = 0; i < h2c->fp_priorities.len; i+=4) { 234 | pstr = append_uint8(pstr, h2c->fp_priorities.data[i]); 235 | *pstr++ = ':'; 236 | pstr = append_uint8(pstr, h2c->fp_priorities.data[i+1]); 237 | *pstr++ = ':'; 238 | pstr = append_uint8(pstr, h2c->fp_priorities.data[i+2]); 239 | *pstr++ = ':'; 240 | pstr = append_uint16(pstr, (uint16_t)h2c->fp_priorities.data[i+3]+1); 241 | *pstr++ = ','; 242 | } 243 | *(pstr-1) = '|'; 244 | 245 | /* fp_pseudoheaders */ 246 | for (i = 0; i < h2c->fp_pseudoheaders.len; i++) { 247 | *pstr++ = h2c->fp_pseudoheaders.data[i]; 248 | *pstr++ = ','; 249 | } 250 | 251 | /* null terminator */ 252 | *--pstr = 0; 253 | 254 | h2c->fp_str.len = pstr - h2c->fp_str.data; 255 | 256 | h2c->fp_fingerprinted = 1; 257 | 258 | ngx_log_debug(NGX_LOG_DEBUG_EVENT, c->log, 0, "ngx_http2_fingerprint: http2 fingerprint: [%V], len=[%d]\\n", &h2c->fp_str, h2c->fp_str.len); 259 | 260 | return NGX_OK; 261 | } 262 | -------------------------------------------------------------------------------- /openresty-ssl-ja3/http2/src/nginx_ssl_fingerprint.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Obj: nginx_ssl_fingerprint.c 3 | */ 4 | 5 | #ifndef NGINX_SSL_FINGERPRINT_H_ 6 | #define NGINX_SSL_FINGERPRINT_H_ 1 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | int ngx_http2_fingerprint(ngx_connection_t *c, ngx_http_v2_connection_t *h2c); 13 | 14 | #endif /** NGINX_SSL_FINGERPRINT_H_ */ 15 | -------------------------------------------------------------------------------- /openresty-ssl-ja3/http2/src/ngx_http_ssl_fingerprint_module.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | static ngx_int_t ngx_http_ssl_fingerprint_init(ngx_conf_t *cf); 8 | static ngx_int_t ngx_http_http2_fingerprint(ngx_http_request_t *r, 9 | ngx_http_variable_value_t *v, uintptr_t data); 10 | 11 | static ngx_http_module_t ngx_http_ssl_fingerprint_module_ctx = { 12 | ngx_http_ssl_fingerprint_init, /* preconfiguration */ 13 | NULL, /* postconfiguration */ 14 | NULL, /* create main configuration */ 15 | NULL, /* init main configuration */ 16 | NULL, /* create server configuration */ 17 | NULL, /* merge server configuration */ 18 | NULL, /* create location configuration */ 19 | NULL /* merge location configuration */ 20 | }; 21 | 22 | ngx_module_t ngx_http_ssl_fingerprint_module = { 23 | NGX_MODULE_V1, 24 | &ngx_http_ssl_fingerprint_module_ctx, /* module context */ 25 | NULL, /* module directives */ 26 | NGX_HTTP_MODULE, /* module type */ 27 | NULL, /* init master */ 28 | NULL, /* init module */ 29 | NULL, /* init process */ 30 | NULL, /* init thread */ 31 | NULL, /* exit thread */ 32 | NULL, /* exit process */ 33 | NULL, /* exit master */ 34 | NGX_MODULE_V1_PADDING}; 35 | 36 | static ngx_http_variable_t ngx_http_ssl_fingerprint_variables_list[] = { 37 | {ngx_string("http2_fingerprint"), NULL, ngx_http_http2_fingerprint, 38 | 0, NGX_HTTP_VAR_NOCACHEABLE, 0}, 39 | ngx_http_null_variable 40 | }; 41 | 42 | static ngx_int_t 43 | ngx_http_http2_fingerprint(ngx_http_request_t *r, 44 | ngx_http_variable_value_t *v, uintptr_t data) 45 | { 46 | /* For access.log's map $VAR {}: 47 | * if it's not found, then user could add a defined string */ 48 | v->not_found = 1; 49 | 50 | if (r->stream == NULL) { 51 | return NGX_OK; 52 | } 53 | 54 | if (ngx_http2_fingerprint(r->connection, r->stream->connection) 55 | != NGX_OK) 56 | { 57 | return NGX_ERROR; 58 | } 59 | 60 | v->data = r->stream->connection->fp_str.data; 61 | v->len = r->stream->connection->fp_str.len; 62 | v->not_found = 0; 63 | 64 | return NGX_OK; 65 | } 66 | 67 | static ngx_int_t 68 | ngx_http_ssl_fingerprint_init(ngx_conf_t *cf) 69 | { 70 | ngx_http_variable_t *var, *v; 71 | 72 | for (v = ngx_http_ssl_fingerprint_variables_list; v->name.len; v++) { 73 | var = ngx_http_add_variable(cf, &v->name, v->flags); 74 | if (var == NULL) { 75 | return NGX_ERROR; 76 | } 77 | var->get_handler = v->get_handler; 78 | var->data = v->data; 79 | } 80 | 81 | return NGX_OK; 82 | } 83 | -------------------------------------------------------------------------------- /samba/samba.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #=============================================================================== 3 | # FILE: samba.sh 4 | # 5 | # USAGE: ./samba.sh 6 | # 7 | # DESCRIPTION: Entrypoint for samba docker container 8 | # 9 | # OPTIONS: --- 10 | # REQUIREMENTS: --- 11 | # BUGS: --- 12 | # NOTES: --- 13 | # AUTHOR: David Personette (dperson@gmail.com), 14 | # ORGANIZATION: 15 | # CREATED: 09/28/2014 12:11 16 | # REVISION: 1.0 17 | #=============================================================================== 18 | 19 | set -o nounset # Treat unset variables as an error 20 | 21 | ### charmap: setup character mapping for file/directory names 22 | # Arguments: 23 | # chars) from:to character mappings separated by ',' 24 | # Return: configured character mapings 25 | charmap() { local chars="$1" file=/etc/samba/smb.conf 26 | grep -q catia $file || sed -i '/TCP_NODELAY/a \ 27 | \ 28 | vfs objects = catia\ 29 | catia:mappings =\ 30 | 31 | ' $file 32 | 33 | sed -i '/catia:mappings/s| =.*| = '"$chars"'|' $file 34 | } 35 | 36 | ### generic: set a generic config option in a section 37 | # Arguments: 38 | # section) section of config file 39 | # option) raw option 40 | # Return: line added to smb.conf (replaces existing line with same key) 41 | generic() { local section="$1" key="$(sed 's| *=.*||' <<< $2)" \ 42 | value="$(sed 's|[^=]*= *||' <<< $2)" file=/etc/samba/smb.conf 43 | if sed -n '/^\['"$section"'\]/,/^\[/p' $file | grep -qE '^;*\s*'"$key"; then 44 | sed -i '/^\['"$1"'\]/,/^\[/s|^;*\s*\('"$key"' = \).*| \1'"$value"'|' \ 45 | "$file" 46 | else 47 | sed -i '/\['"$section"'\]/a \ '"$key = $value" "$file" 48 | fi 49 | } 50 | 51 | ### global: set a global config option 52 | # Arguments: 53 | # option) raw option 54 | # Return: line added to smb.conf (replaces existing line with same key) 55 | global() { local key="$(sed 's| *=.*||' <<< $1)" \ 56 | value="$(sed 's|[^=]*= *||' <<< $1)" file=/etc/samba/smb.conf 57 | if sed -n '/^\[global\]/,/^\[/p' $file | grep -qE '^;*\s*'"$key"; then 58 | sed -i '/^\[global\]/,/^\[/s|^;*\s*\('"$key"' = \).*| \1'"$value"'|' \ 59 | "$file" 60 | else 61 | sed -i '/\[global\]/a \ '"$key = $value" "$file" 62 | fi 63 | } 64 | 65 | ### include: add a samba config file include 66 | # Arguments: 67 | # file) file to import 68 | include() { local includefile="$1" file=/etc/samba/smb.conf 69 | sed -i "\\|include = $includefile|d" "$file" 70 | echo "include = $includefile" >> "$file" 71 | } 72 | 73 | ### import: import a smbpasswd file 74 | # Arguments: 75 | # file) file to import 76 | # Return: user(s) added to container 77 | import() { local file="$1" name id 78 | while read name id; do 79 | grep -q "^$name:" /etc/passwd || adduser -D -H -u "$id" "$name" 80 | done < <(cut -d: -f1,2 $file | sed 's/:/ /') 81 | pdbedit -i smbpasswd:$file 82 | } 83 | 84 | ### perms: fix ownership and permissions of share paths 85 | # Arguments: 86 | # none) 87 | # Return: result 88 | perms() { local i file=/etc/samba/smb.conf 89 | for i in $(awk -F ' = ' '/ path = / {print $2}' $file); do 90 | chown -Rh smbuser. $i 91 | find $i -type d ! -perm 775 -exec chmod 775 {} \; 92 | find $i -type f ! -perm 0664 -exec chmod 0664 {} \; 93 | done 94 | } 95 | export -f perms 96 | 97 | ### recycle: disable recycle bin 98 | # Arguments: 99 | # none) 100 | # Return: result 101 | recycle() { local file=/etc/samba/smb.conf 102 | sed -i '/recycle:/d; /vfs objects/s/ recycle / /' $file 103 | } 104 | 105 | ### share: Add share 106 | # Arguments: 107 | # share) share name 108 | # path) path to share 109 | # browsable) 'yes' or 'no' 110 | # readonly) 'yes' or 'no' 111 | # guest) 'yes' or 'no' 112 | # users) list of allowed users 113 | # admins) list of admin users 114 | # writelist) list of users that can write to a RO share 115 | # comment) description of share 116 | # Return: result 117 | share() { local share="$1" path="$2" browsable="${3:-yes}" ro="${4:-yes}" \ 118 | guest="${5:-yes}" users="${6:-""}" admins="${7:-""}" \ 119 | writelist="${8:-""}" comment="${9:-""}" file=/etc/samba/smb.conf 120 | sed -i "/\\[$share\\]/,/^\$/d" $file 121 | echo "[$share]" >>$file 122 | echo " path = $path" >>$file 123 | echo " browsable = $browsable" >>$file 124 | echo " read only = $ro" >>$file 125 | echo " guest ok = $guest" >>$file 126 | [[ ${VETO:-yes} == no ]] || { 127 | echo -n " veto files = /.apdisk/.DS_Store/.TemporaryItems/" >>$file 128 | echo -n ".Trashes/desktop.ini/ehthumbs.db/Network Trash Folder/" >>$file 129 | echo "Temporary Items/Thumbs.db/" >>$file 130 | echo " delete veto files = yes" >>$file 131 | } 132 | [[ ${users:-""} && ! ${users:-""} == all ]] && 133 | echo " valid users = $(tr ',' ' ' <<< $users)" >>$file 134 | [[ ${admins:-""} && ! ${admins:-""} =~ none ]] && 135 | echo " admin users = $(tr ',' ' ' <<< $admins)" >>$file 136 | [[ ${writelist:-""} && ! ${writelist:-""} =~ none ]] && 137 | echo " write list = $(tr ',' ' ' <<< $writelist)" >>$file 138 | [[ ${comment:-""} && ! ${comment:-""} =~ none ]] && 139 | echo " comment = $(tr ',' ' ' <<< $comment)" >>$file 140 | echo "" >>$file 141 | [[ -d $path ]] || mkdir -p $path 142 | } 143 | 144 | ### smb: disable SMB2 minimum 145 | # Arguments: 146 | # none) 147 | # Return: result 148 | smb() { local file=/etc/samba/smb.conf 149 | sed -i 's/\([^#]*min protocol *=\).*/\1 LANMAN1/' $file 150 | } 151 | 152 | ### user: add a user 153 | # Arguments: 154 | # name) for user 155 | # password) for user 156 | # id) for user 157 | # group) for user 158 | # gid) for group 159 | # Return: user added to container 160 | user() { local name="$1" passwd="$2" id="${3:-""}" group="${4:-""}" \ 161 | gid="${5:-""}" 162 | [[ "$group" ]] && { grep -q "^$group:" /etc/group || 163 | addgroup ${gid:+--gid $gid }"$group"; } 164 | grep -q "^$name:" /etc/passwd || 165 | adduser -D -H ${group:+-G $group} ${id:+-u $id} "$name" 166 | echo -e "$passwd\n$passwd" | smbpasswd -s -a "$name" 167 | } 168 | 169 | ### workgroup: set the workgroup 170 | # Arguments: 171 | # workgroup) the name to set 172 | # Return: configure the correct workgroup 173 | workgroup() { local workgroup="$1" file=/etc/samba/smb.conf 174 | sed -i 's|^\( *workgroup = \).*|\1'"$workgroup"'|' $file 175 | } 176 | 177 | ### widelinks: allow access wide symbolic links 178 | # Arguments: 179 | # none) 180 | # Return: result 181 | widelinks() { local file=/etc/samba/smb.conf \ 182 | replace='\1\n wide links = yes\n unix extensions = no' 183 | sed -i 's/\(follow symlinks = yes\)/'"$replace"'/' $file 184 | } 185 | 186 | ### usage: Help 187 | # Arguments: 188 | # none) 189 | # Return: Help text 190 | usage() { local RC="${1:-0}" 191 | echo "Usage: ${0##*/} [-opt] [command] 192 | Options (fields in '[]' are optional, '<>' are required): 193 | -h This help 194 | -c \"\" setup character mapping for file/directory names 195 | required arg: \"\" character mappings separated by ',' 196 | -G \"\" Provide generic section option for smb.conf 197 | required arg: \"
\" - IE: \"share\" 198 | required arg: \"\" - IE: \"log level = 2\" 199 | -g \"\" Provide global option for smb.conf 200 | required arg: \"\" - IE: \"log level = 2\" 201 | -i \"\" Import smbpassword 202 | required arg: \"\" - full file path in container 203 | -n Start the 'nmbd' daemon to advertise the shares 204 | -p Set ownership and permissions on the shares 205 | -r Disable recycle bin for shares 206 | -S Disable SMB2 minimum version 207 | -s \"[;browse;readonly;guest;users;admins;writelist;comment]\" 208 | Configure a share 209 | required arg: \";\" 210 | is how it's called for clients 211 | path to share 212 | NOTE: for the default value, just leave blank 213 | [browsable] default:'yes' or 'no' 214 | [readonly] default:'yes' or 'no' 215 | [guest] allowed default:'yes' or 'no' 216 | NOTE: for user lists below, usernames are separated by ',' 217 | [users] allowed default:'all' or list of allowed users 218 | [admins] allowed default:'none' or list of admin users 219 | [writelist] list of users that can write to a RO share 220 | [comment] description of share 221 | -u \"[;ID;group;GID]\" Add a user 222 | required arg: \";\" 223 | for user 224 | for user 225 | [ID] for user 226 | [group] for user 227 | [GID] for group 228 | -w \"\" Configure the workgroup (domain) samba should use 229 | required arg: \"\" 230 | for samba 231 | -W Allow access wide symbolic links 232 | -I Add an include option at the end of the smb.conf 233 | required arg: \"\" 234 | in the container, e.g. a bind mount 235 | 236 | The 'command' (if provided and valid) will be run instead of samba 237 | " >&2 238 | exit $RC 239 | } 240 | 241 | [[ "${USERID:-""}" =~ ^[0-9]+$ ]] && usermod -u $USERID -o smbuser 242 | [[ "${GROUPID:-""}" =~ ^[0-9]+$ ]] && groupmod -g $GROUPID -o smb 243 | 244 | while getopts ":hc:G:g:i:nprs:Su:Ww:I:" opt; do 245 | case "$opt" in 246 | h) usage ;; 247 | c) charmap "$OPTARG" ;; 248 | G) eval generic $(sed 's/^/"/; s/$/"/; s/;/" "/g' <<< $OPTARG) ;; 249 | g) global "$OPTARG" ;; 250 | i) import "$OPTARG" ;; 251 | n) NMBD="true" ;; 252 | p) PERMISSIONS="true" ;; 253 | r) recycle ;; 254 | s) eval share $(sed 's/^/"/; s/$/"/; s/;/" "/g' <<< $OPTARG) ;; 255 | S) smb ;; 256 | u) eval user $(sed 's/^/"/; s/$/"/; s/;/" "/g' <<< $OPTARG) ;; 257 | w) workgroup "$OPTARG" ;; 258 | W) widelinks ;; 259 | I) include "$OPTARG" ;; 260 | "?") echo "Unknown option: -$OPTARG"; usage 1 ;; 261 | ":") echo "No argument value for option: -$OPTARG"; usage 2 ;; 262 | esac 263 | done 264 | shift $(( OPTIND - 1 )) 265 | 266 | [[ "${CHARMAP:-""}" ]] && charmap "$CHARMAP" 267 | while read i; do 268 | eval generic $(sed 's/^/"/; s/$/"/; s/;/" "/g' <<< $i) 269 | done < <(env | awk '/^GENERIC[0-9=_]/ {sub (/^[^=]*=/, "", $0); print}') 270 | while read i; do 271 | global "$i" 272 | done < <(env | awk '/^GLOBAL[0-9=_]/ {sub (/^[^=]*=/, "", $0); print}') 273 | [[ "${IMPORT:-""}" ]] && import "$IMPORT" 274 | [[ "${RECYCLE:-""}" ]] && recycle 275 | while read i; do 276 | eval share $(sed 's/^/"/; s/$/"/; s/;/" "/g' <<< $i) 277 | done < <(env | awk '/^SHARE[0-9=_]/ {sub (/^[^=]*=/, "", $0); print}') 278 | [[ "${SMB:-""}" ]] && smb 279 | while read i; do 280 | eval user $(sed 's/^/"/; s/$/"/; s/;/" "/g' <<< $i) 281 | done < <(env | awk '/^USER[0-9=_]/ {sub (/^[^=]*=/, "", $0); print}') 282 | [[ "${WORKGROUP:-""}" ]] && workgroup "$WORKGROUP" 283 | [[ "${WIDELINKS:-""}" ]] && widelinks 284 | [[ "${INCLUDE:-""}" ]] && include "$INCLUDE" 285 | [[ "${PERMISSIONS:-""}" ]] && perms & 286 | 287 | if [[ $# -ge 1 && -x $(which $1 2>&-) ]]; then 288 | exec "$@" 289 | elif [[ $# -ge 1 ]]; then 290 | echo "ERROR: command not found: $1" 291 | exit 13 292 | elif ps -ef | egrep -v grep | grep -q smbd; then 293 | echo "Service already running, please restart container to apply changes" 294 | else 295 | [[ ${NMBD:-""} ]] && ionice -c 3 nmbd -D 296 | exec ionice -c 3 smbd -F --debug-stdout --no-process-group 3 | Date: Sat, 8 Oct 2022 17:01:51 +0200 4 | Subject: [PATCH 1/1] reduce hops 5 | 6 | --- 7 | Dockerfile | 4 ++++ 8 | torrc-dist | 16 ++-------------- 9 | 2 files changed, 6 insertions(+), 14 deletions(-) 10 | 11 | diff --git a/Dockerfile b/Dockerfile 12 | index 7046cb1..040d4c5 100644 13 | --- a/Dockerfile 14 | +++ b/Dockerfile 15 | @@ -46,6 +46,8 @@ WORKDIR /tor-$VERSION/ 16 | 17 | COPY --from=preparer /tor-$VERSION/ ./ 18 | 19 | +RUN sed -i 's/DEFAULT_ROUTE_LEN 2/DEFAULT_ROUTE_LEN 1/g' ./src/core/or/or.h 20 | + 21 | RUN ./configure --sysconfdir=/etc --datadir=/var/lib 22 | RUN make -j$(nproc) 23 | RUN make install 24 | @@ -68,6 +70,8 @@ COPY --from=builder /usr/lib /usr/lib 25 | # Copy all the TOR files 26 | COPY --from=builder /usr/local/bin/tor* /usr/local/bin/ 27 | 28 | +COPY override/ / 29 | + 30 | # NOTE: Default GID == UID == 1000 31 | RUN adduser --disabled-password \ 32 | --home "$DIR/" \ 33 | diff --git a/torrc-dist b/torrc-dist 34 | index 0f82b26..bdc90b1 100644 35 | --- a/torrc-dist 36 | +++ b/torrc-dist 37 | @@ -1,19 +1,7 @@ 38 | # This is a sample configuration 39 | 40 | # Ports 41 | -SocksPort 127.0.0.1:9050 42 | +SocksPort 0.0.0.0:9050 43 | ControlPort 127.0.0.1:9051 44 | 45 | -# Default Password (Please change) 46 | -# Refer to https://github.com/lncm/docker-tor#generating-tor-passwords 47 | -HashedControlPassword 16:C171CBB3DECE55156066E90509F28E3E5FDFACFB6211701926A200E70D 48 | - 49 | -# SSH v3 50 | -HiddenServiceDir /var/lib/tor/ssh 51 | -HiddenServiceVersion 3 52 | -HiddenServicePort 22 127.0.0.1:22 53 | - 54 | -# Bitcoin P2P v3 55 | -HiddenServiceDir /var/lib/tor/bitcoin-p2p 56 | -HiddenServiceVersion 3 57 | -HiddenServicePort 8333 127.0.0.1:8333 58 | +ExitNodes {NL} 59 | \ No newline at end of file 60 | -- 61 | 2.38.0 62 | 63 | -------------------------------------------------------------------------------- /wireproxy-debian/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/whyvl/wireproxy:latest as build 2 | FROM debian:latest 3 | RUN apt-get update && apt-get install -y --no-install-recommends \ 4 | && apt-get install -y jq curl \ 5 | && rm -rf /var/lib/apt/lists/* 6 | COPY --from=build /usr/bin/wireproxy /usr/bin/wireproxy 7 | ENTRYPOINT [ "/usr/bin/wireproxy" ] 8 | CMD [ "--config", "/etc/wireproxy/config" ] 9 | --------------------------------------------------------------------------------