├── .github ├── FUNDING.yml ├── assets │ └── mkbrr-dark.png ├── dependabot.yml └── workflows │ └── release.yml ├── .gitignore ├── .goreleaser.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── ci.Dockerfile ├── cmd ├── check.go ├── create.go ├── inspect.go ├── modify.go ├── root.go ├── update.go └── version.go ├── docs └── benchmarks │ ├── benchmark_comparison.png │ ├── benchmark_plots.py │ ├── consistency_comparison.png │ ├── requirements.txt │ └── speed_comparison.png ├── examples ├── batch.yaml └── presets.yaml ├── go.mod ├── go.sum ├── internal ├── preset │ ├── preset.go │ └── preset_test.go ├── torrent │ ├── batch.go │ ├── batch_test.go │ ├── create.go │ ├── create_test.go │ ├── display.go │ ├── hasher.go │ ├── hasher_large_test.go │ ├── hasher_test.go │ ├── ignore.go │ ├── modify.go │ ├── modify_test.go │ ├── progress.go │ ├── seasonfinder.go │ ├── seasonfinder_test.go │ ├── types.go │ ├── verify.go │ └── verify_test.go └── trackers │ ├── trackers.go │ └── trackers_test.go ├── main.go ├── schema ├── batch.json └── presets.json └── utils └── brr.sh /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [s0up4200, zze0s] 2 | polar: soup 3 | -------------------------------------------------------------------------------- /.github/assets/mkbrr-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autobrr/mkbrr/18b58846c7d408a291efa85e2c52c0f4cba644c2/.github/assets/mkbrr-dark.png -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: weekly 7 | day: saturday 8 | time: "07:00" 9 | groups: 10 | github: 11 | patterns: 12 | - "*" 13 | 14 | - package-ecosystem: gomod 15 | directory: / 16 | schedule: 17 | interval: monthly 18 | groups: 19 | golang: 20 | patterns: 21 | - "*" 22 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | - "develop" 8 | tags: 9 | - 'v*' 10 | paths-ignore: 11 | - '.github/ISSUE_TEMPLATE/**' 12 | - '.github/images/**' 13 | - 'config.toml' 14 | - 'docker-compose.yml' 15 | - 'Makefile' 16 | - '**.md' 17 | pull_request: 18 | paths-ignore: 19 | - '.github/ISSUE_TEMPLATE/**' 20 | - '.github/images/**' 21 | - 'config.toml' 22 | - 'docker-compose.yml' 23 | - 'Makefile' 24 | - '**.md' 25 | 26 | env: 27 | REGISTRY: ghcr.io 28 | REGISTRY_IMAGE: ghcr.io/${{ github.repository }} 29 | GO_VERSION: "1.24" 30 | 31 | permissions: 32 | contents: write 33 | packages: write 34 | 35 | jobs: 36 | test: 37 | strategy: 38 | fail-fast: true 39 | matrix: 40 | os: [ ubuntu-latest ] 41 | name: Test ${{ matrix.os }} 42 | runs-on: ${{ matrix.os }} 43 | steps: 44 | - name: Checkout 45 | uses: actions/checkout@v4 46 | with: 47 | fetch-depth: 0 48 | 49 | - name: Set up Go 50 | uses: actions/setup-go@v5 51 | with: 52 | go-version: ${{ env.GO_VERSION }} 53 | cache: true 54 | 55 | - name: Run Tests with Race Detection 56 | run: make test-race 57 | 58 | - name: Generate Test Summary 59 | run: go run gotest.tools/gotestsum@latest --junitfile unit-tests.xml --format pkgname 60 | 61 | - name: Test Summary 62 | uses: test-summary/action@v2 63 | with: 64 | paths: "unit-tests.xml" 65 | if: always() 66 | 67 | testother: 68 | strategy: 69 | fail-fast: true 70 | matrix: 71 | os: [ macos-latest, windows-latest ] 72 | name: Test ${{ matrix.os }} 73 | runs-on: ${{ matrix.os }} 74 | env: 75 | GOPATH: ${{ startsWith(matrix.os, 'windows') && 'D:\golang\go' || '' }} 76 | GOCACHE: ${{ startsWith(matrix.os, 'windows') && 'D:\golang\cache' || '' }} 77 | GOMODCACHE: ${{ startsWith(matrix.os, 'windows') && 'D:\golang\modcache' || '' }} 78 | USERPROFILE: ${{ startsWith(matrix.os, 'windows') && 'D:\homedir' || '' }} 79 | steps: 80 | - name: Checkout 81 | uses: actions/checkout@v4 82 | with: 83 | fetch-depth: 0 84 | 85 | - name: Set up Go 86 | uses: actions/setup-go@v5 87 | with: 88 | go-version: ${{ env.GO_VERSION }} 89 | cache: true 90 | 91 | - name: Run Tests (MacOS) 92 | if: matrix.os == 'macos-latest' 93 | env: 94 | CGO_ENABLED: 0 95 | run: go test ./... 96 | 97 | - name: Run Tests (Windows) 98 | if: matrix.os == 'windows-latest' 99 | env: 100 | CGO_ENABLED: 0 101 | run: go test -short ./... 102 | 103 | - name: Generate Test Summary 104 | if: matrix.os != 'windows-latest' 105 | run: go run gotest.tools/gotestsum@latest --junitfile unit-tests.xml --format pkgname 106 | 107 | - name: Test Summary 108 | uses: test-summary/action@v2 109 | with: 110 | paths: "unit-tests.xml" 111 | if: always() && matrix.os != 'windows-latest' 112 | 113 | pgo: 114 | name: Generate PGO Profile 115 | runs-on: ubuntu-latest 116 | needs: [ test, testother ] 117 | steps: 118 | - name: Checkout 119 | uses: actions/checkout@v4 120 | with: 121 | fetch-depth: 0 122 | 123 | - name: Set up Go 124 | uses: actions/setup-go@v5 125 | with: 126 | go-version: ${{ env.GO_VERSION }} 127 | cache: true 128 | 129 | # Create test data for profiling 130 | - name: Create test data 131 | shell: bash 132 | run: | 133 | mkdir -p test_data 134 | dd if=/dev/urandom of=test_data/test1.bin bs=1M count=100 135 | dd if=/dev/urandom of=test_data/test2.bin bs=1M count=100 136 | for i in {1..20000}; do dd if=/dev/urandom bs=1023 count=1 of=test_data/file$i.bin >/dev/null 2>&1; done 137 | 138 | - name: Generate Profile 139 | env: 140 | CGO_ENABLED: 0 141 | run: | 142 | # Build initial binary 143 | go build -o mkbrr 144 | 145 | # Run different workload scenarios 146 | ./mkbrr create test_data/test1.bin --cpuprofile=cpu1.pprof 147 | ./mkbrr create test_data/test2.bin --cpuprofile=cpu2.pprof 148 | ./mkbrr create test_data --cpuprofile=cpu3.pprof 149 | 150 | # Verify profiles exist 151 | if [ ! -f cpu1.pprof ] || [ ! -f cpu2.pprof ] || [ ! -f cpu3.pprof ]; then 152 | echo "Error: One or more profile files not generated" 153 | ls -la *.pprof 154 | exit 1 155 | fi 156 | 157 | # Merge profiles 158 | go tool pprof -proto cpu1.pprof cpu2.pprof cpu3.pprof > cpu.pprof 159 | 160 | # Verify final profile 161 | if [ ! -s cpu.pprof ]; then 162 | echo "Error: Final profile not generated or empty" 163 | exit 1 164 | fi 165 | 166 | - name: Upload pprof 167 | uses: actions/upload-artifact@v4 168 | with: 169 | name: pprof 170 | path: cpu.pprof 171 | 172 | goreleaserbuild: 173 | name: Build distribution binaries 174 | runs-on: ubuntu-latest 175 | needs: [ pgo ] 176 | steps: 177 | - name: Checkout 178 | uses: actions/checkout@v4 179 | with: 180 | fetch-depth: 0 181 | 182 | - name: Set up Go 183 | uses: actions/setup-go@v5 184 | with: 185 | go-version: ${{ env.GO_VERSION }} 186 | cache: true 187 | 188 | - name: Download pprof 189 | uses: actions/download-artifact@v4 190 | with: 191 | name: pprof 192 | path: . 193 | 194 | - name: Run GoReleaser build 195 | if: github.event_name == 'pull_request' 196 | uses: goreleaser/goreleaser-action@v6 197 | with: 198 | distribution: goreleaser 199 | version: "~> v2" 200 | args: release --clean --skip=validate,publish --parallelism 5 201 | env: 202 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 203 | BUILDER: ${{ github.actor }}@github-actions 204 | 205 | - name: Run GoReleaser build and publish tags 206 | if: startsWith(github.ref, 'refs/tags/') 207 | uses: goreleaser/goreleaser-action@v6 208 | with: 209 | distribution: goreleaser 210 | version: "~> v2" 211 | args: release --clean 212 | env: 213 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 214 | BUILDER: ${{ github.actor }}@github-actions 215 | 216 | - name: Upload assets 217 | uses: actions/upload-artifact@v4 218 | with: 219 | name: mkbrr 220 | path: | 221 | dist/*.tar.gz 222 | dist/*.zip 223 | dist/*.deb 224 | dist/*.rpm 225 | 226 | docker: 227 | name: Build Docker images 228 | runs-on: ubuntu-latest 229 | strategy: 230 | fail-fast: true 231 | matrix: 232 | platform: 233 | - linux/386 234 | - linux/amd64 235 | - linux/amd64/v2 236 | - linux/amd64/v3 237 | - linux/arm/v6 238 | - linux/arm/v7 239 | - linux/arm64 240 | # - linux/mips64le 241 | # - linux/mips64 242 | - linux/ppc64le 243 | # - linux/riscv64 244 | - linux/s390x 245 | needs: [ pgo, test ] 246 | steps: 247 | - name: Checkout 248 | uses: actions/checkout@v4 249 | with: 250 | fetch-depth: 0 251 | 252 | - name: Download pprof profile 253 | uses: actions/download-artifact@v4 254 | with: 255 | name: pprof 256 | 257 | - name: Login to GitHub Container Registry 258 | uses: docker/login-action@v3 259 | with: 260 | registry: ${{ env.REGISTRY }} 261 | username: ${{ github.repository_owner }} 262 | password: ${{ secrets.GITHUB_TOKEN }} 263 | 264 | - name: Extract metadata 265 | id: meta 266 | uses: docker/metadata-action@v5 267 | with: 268 | images: ${{ env.REGISTRY_IMAGE }} 269 | tags: | 270 | type=semver,pattern={{version}},prefix=v 271 | type=semver,pattern={{major}}.{{minor}},prefix=v 272 | type=ref,event=branch 273 | type=ref,event=pr 274 | flavor: | 275 | latest=auto 276 | 277 | - name: Set up QEMU 278 | uses: docker/setup-qemu-action@v3 279 | 280 | - name: Set up Docker Buildx 281 | uses: docker/setup-buildx-action@v3 282 | 283 | - name: Supported Architectures 284 | run: docker buildx ls 285 | 286 | - name: Build and publish image 287 | id: docker_build 288 | uses: docker/build-push-action@v6 289 | with: 290 | context: . 291 | file: './ci.Dockerfile' 292 | platforms: ${{ matrix.platform }} 293 | outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=${{ (github.event.pull_request.head.repo.full_name == github.repository || github.event_name != 'pull_request') && 'true' || 'false' }} 294 | labels: ${{ steps.meta.outputs.labels }} 295 | build-args: | 296 | BUILDTIME=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }} 297 | VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }} 298 | REVISION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }} 299 | cache-from: type=gha 300 | cache-to: type=gha,mode=max 301 | provenance: false 302 | 303 | - name: Export image digest 304 | id: digest-prep 305 | run: | 306 | mkdir -p /tmp/digests 307 | digest="${{ steps.docker_build.outputs.digest }}" 308 | echo "manifest-hash=${digest#sha256:}" >> "$GITHUB_OUTPUT" 309 | touch "/tmp/digests/${digest#sha256:}" 310 | 311 | - name: Upload image digest 312 | uses: actions/upload-artifact@v4 313 | with: 314 | name: docker-digests-${{ steps.digest-prep.outputs.manifest-hash }} 315 | path: /tmp/digests/* 316 | if-no-files-found: error 317 | retention-days: 1 318 | 319 | docker-merge: 320 | name: Publish Docker multi-arch manifest 321 | if: ${{ github.event.pull_request.head.repo.full_name == github.repository || github.event_name != 'pull_request' }} 322 | runs-on: ubuntu-latest 323 | needs: [ docker ] 324 | steps: 325 | - name: Download image digests 326 | uses: actions/download-artifact@v4 327 | with: 328 | path: /tmp/digests 329 | pattern: docker-digests-* 330 | merge-multiple: true 331 | 332 | - name: Set up Docker Buildx 333 | uses: docker/setup-buildx-action@v3 334 | 335 | - name: Login to GitHub Container Registry 336 | uses: docker/login-action@v3 337 | with: 338 | registry: ${{ env.REGISTRY }} 339 | username: ${{ github.repository_owner }} 340 | password: ${{ secrets.GITHUB_TOKEN }} 341 | 342 | - name: Extract metadata 343 | id: meta 344 | uses: docker/metadata-action@v5 345 | with: 346 | images: ${{ env.REGISTRY_IMAGE }} 347 | tags: | 348 | type=semver,pattern={{version}},prefix=v 349 | type=semver,pattern={{major}}.{{minor}},prefix=v 350 | type=ref,event=branch 351 | type=ref,event=pr 352 | flavor: | 353 | latest=auto 354 | 355 | - name: Create manifest list and push 356 | working-directory: /tmp/digests 357 | run: | 358 | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ 359 | $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) 360 | 361 | - name: Inspect image 362 | run: | 363 | docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.torrent 2 | /build 3 | .DS_Store 4 | /dist 5 | 6 | race_report.log 7 | coverage.txt 8 | coverage.html 9 | test.sh 10 | .idea 11 | *.pprof 12 | test_ant/* 13 | .pgo_profiles/ 14 | .pgo_test_data/ 15 | .repomixignore 16 | repomix-mkbrr.xml 17 | repomix.config.json 18 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | before: 4 | hooks: 5 | - go mod tidy 6 | 7 | builds: 8 | - id: mkbrr 9 | env: 10 | - CGO_ENABLED=0 11 | - BUILDER 12 | flags: 13 | - -pgo=cpu.pprof 14 | goos: 15 | - linux 16 | - windows 17 | - darwin 18 | - freebsd 19 | goarch: 20 | - amd64 21 | - arm 22 | - arm64 23 | goarm: 24 | - 6 25 | ignore: 26 | - goos: windows 27 | goarch: arm 28 | - goos: windows 29 | goarch: arm64 30 | - goos: darwin 31 | goarch: arm 32 | - goos: freebsd 33 | goarch: arm 34 | - goos: freebsd 35 | goarch: arm64 36 | main: ./main.go 37 | binary: mkbrr 38 | ldflags: 39 | - -s -w 40 | - -X main.version={{.Version}} 41 | - -X main.buildTime={{.CommitDate}} 42 | 43 | archives: 44 | - format_overrides: 45 | - goos: windows 46 | format: zip 47 | name_template: >- 48 | {{ .ProjectName }}_ 49 | {{- .Version }}_ 50 | {{- .Os }}_ 51 | {{- if eq .Arch "amd64" }}x86_64 52 | {{- else }}{{ .Arch }}{{ end }} 53 | 54 | release: 55 | prerelease: auto 56 | footer: | 57 | **Full Changelog**: https://github.com/autobrr/mkbrr/compare/{{ .PreviousTag }}...{{ .Tag }} 58 | 59 | checksum: 60 | name_template: "{{ .ProjectName }}_{{ .Version }}_checksums.txt" 61 | 62 | changelog: 63 | sort: asc 64 | use: github 65 | filters: 66 | exclude: 67 | - Merge pull request 68 | - Merge remote-tracking branch 69 | - Merge branch 70 | groups: 71 | - title: "New Features" 72 | regexp: "^.*feat[(\\w)]*:+.*$" 73 | order: 0 74 | - title: "Bug fixes" 75 | regexp: "^.*fix[(\\w)]*:+.*$" 76 | order: 10 77 | - title: Other work 78 | order: 999 79 | 80 | nfpms: 81 | - package_name: mkbrr 82 | maintainer: autobrr 83 | description: |- 84 | mkbrr is a tool for creating, inspecting and modifying .torrent files. 85 | formats: 86 | - apk 87 | - deb 88 | - rpm 89 | - archlinux 90 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # build app 2 | FROM golang:1.24-alpine3.21 AS app-builder 3 | 4 | ARG VERSION=dev 5 | ARG REVISION=dev 6 | ARG BUILDTIME 7 | 8 | RUN apk add --no-cache git build-base tzdata 9 | 10 | ENV SERVICE=mkbrr 11 | 12 | WORKDIR /src 13 | 14 | COPY go.mod go.sum ./ 15 | RUN go mod download 16 | 17 | COPY . ./ 18 | 19 | #ENV GOOS=linux 20 | #ENV CGO_ENABLED=0 21 | 22 | RUN go build -ldflags "-s -w -X main.version=${VERSION} -X main.commit=${REVISION} -X main.date=${BUILDTIME}" -o bin/mkbrr main.go 23 | 24 | # build runner 25 | FROM alpine:latest 26 | 27 | LABEL org.opencontainers.image.source="https://github.com/autobrr/mkbrr" 28 | 29 | ENV HOME="/config" \ 30 | XDG_CONFIG_HOME="/config" \ 31 | XDG_DATA_HOME="/config" 32 | 33 | RUN apk --no-cache add ca-certificates 34 | 35 | WORKDIR /app 36 | 37 | VOLUME /config 38 | 39 | COPY --from=app-builder /src/bin/mkbrr /usr/local/bin/ 40 | 41 | CMD ["/usr/local/bin/mkbrr"] -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # binary name 2 | BINARY_NAME=mkbrr 3 | 4 | # go related variables 5 | GO=go 6 | GOBIN=$(shell $(GO) env GOPATH)/bin 7 | 8 | # build variables 9 | BUILD_DIR=build 10 | VERSION=$(shell git describe --tags 2>/dev/null || echo "dev") 11 | BUILD_TIME=$(shell date +%FT%T%z) 12 | LDFLAGS=-ldflags "-X main.version=${VERSION} -X main.buildTime=${BUILD_TIME}" 13 | 14 | # race detector settings 15 | GORACE=log_path=./race_report.log \ 16 | history_size=2 \ 17 | halt_on_error=1 \ 18 | atexit_sleep_ms=2000 19 | 20 | # make all builds and installs 21 | .PHONY: all 22 | all: clean build install 23 | 24 | # build binary 25 | .PHONY: build 26 | build: 27 | @echo "Building ${BINARY_NAME}..." 28 | @mkdir -p ${BUILD_DIR} 29 | CGO_ENABLED=0 $(GO) build ${LDFLAGS} -o ${BUILD_DIR}/${BINARY_NAME} 30 | 31 | # build with PGO 32 | .PHONY: build-pgo 33 | build-pgo: 34 | @echo "Building ${BINARY_NAME} with PGO..." 35 | @if [ ! -f "cpu.pprof" ]; then \ 36 | echo "No PGO profile found. Run 'make profile' first."; \ 37 | exit 1; \ 38 | fi 39 | @mkdir -p ${BUILD_DIR} 40 | CGO_ENABLED=0 $(GO) build -pgo=cpu.pprof ${LDFLAGS} -o ${BUILD_DIR}/${BINARY_NAME} 41 | 42 | # generate PGO profile with various workloads 43 | .PHONY: profile 44 | profile: 45 | @echo "Generating PGO profile..." 46 | @mkdir -p test_data 47 | @dd if=/dev/urandom of=test_data/test1.bin bs=1M count=100 48 | @dd if=/dev/urandom of=test_data/test2.bin bs=1M count=100 49 | @go build -o ${BUILD_DIR}/${BINARY_NAME} 50 | @echo "Running profile workload 1: Large file..." 51 | @${BUILD_DIR}/${BINARY_NAME} create test_data/test1.bin --cpuprofile=./cpu1.pprof 52 | @echo "Running profile workload 2: Multiple files..." 53 | @${BUILD_DIR}/${BINARY_NAME} create test_data --cpuprofile=./cpu2.pprof 54 | @echo "Merging profiles..." 55 | @if [ -f "cpu1.pprof" ] && [ -f "cpu2.pprof" ]; then \ 56 | go tool pprof -proto cpu1.pprof cpu2.pprof > cpu.pprof; \ 57 | rm cpu1.pprof cpu2.pprof; \ 58 | echo "Profile generated at cpu.pprof"; \ 59 | else \ 60 | echo "Error: Profile files not generated correctly"; \ 61 | exit 1; \ 62 | fi 63 | @rm -rf test_data 64 | 65 | # install binary in system path 66 | .PHONY: install 67 | install: build 68 | @echo "Installing ${BINARY_NAME}..." 69 | @if [ "$$(id -u)" = "0" ]; then \ 70 | install -m 755 ${BUILD_DIR}/${BINARY_NAME} /usr/local/bin/; \ 71 | else \ 72 | install -m 755 ${BUILD_DIR}/${BINARY_NAME} ${GOBIN}/; \ 73 | fi 74 | 75 | # install binary with PGO optimization 76 | .PHONY: install-pgo 77 | install-pgo: 78 | @echo "Installing ${BINARY_NAME} with PGO..." 79 | @if [ ! -f "cpu.pprof" ]; then \ 80 | echo "No PGO profile found. Run 'make profile' first."; \ 81 | exit 1; \ 82 | fi 83 | @mkdir -p ${BUILD_DIR} 84 | CGO_ENABLED=0 $(GO) build -pgo=cpu.pprof ${LDFLAGS} -o ${BUILD_DIR}/${BINARY_NAME} 85 | @if [ "$$(id -u)" = "0" ]; then \ 86 | install -m 755 ${BUILD_DIR}/${BINARY_NAME} /usr/local/bin/; \ 87 | else \ 88 | install -m 755 ${BUILD_DIR}/${BINARY_NAME} ${GOBIN}/; \ 89 | fi 90 | 91 | # run all tests (excluding large tests) 92 | .PHONY: test 93 | test: 94 | @echo "Running tests..." 95 | $(GO) test -v ./... 96 | 97 | # run quick tests with race detector (for CI and quick feedback) 98 | .PHONY: test-race-short 99 | test-race-short: 100 | @echo "Running quick tests with race detector..." 101 | GORACE="$(GORACE)" $(GO) test -race -short ./internal/torrent -v 102 | @if [ -f "./race_report.log" ]; then \ 103 | echo "Race conditions detected! Check race_report.log"; \ 104 | cat "./race_report.log"; \ 105 | fi 106 | 107 | # run all tests with race detector (excluding large tests) 108 | .PHONY: test-race 109 | test-race: 110 | @echo "Running tests with race detector..." 111 | GORACE="$(GORACE)" $(GO) test -race ./internal/torrent -v 112 | @if [ -f "./race_report.log" ]; then \ 113 | echo "Race conditions detected! Check race_report.log"; \ 114 | cat "./race_report.log"; \ 115 | fi 116 | 117 | # run large tests (resource intensive) 118 | .PHONY: test-large 119 | test-large: 120 | @echo "Running large tests..." 121 | GORACE="$(GORACE)" $(GO) test -v -tags=large_tests ./internal/torrent 122 | @if [ -f "./race_report.log" ]; then \ 123 | echo "Race conditions detected! Check race_report.log"; \ 124 | cat "./race_report.log"; \ 125 | fi 126 | 127 | # run tests with coverage 128 | .PHONY: test-coverage 129 | test-coverage: 130 | @echo "Running tests with coverage..." 131 | GORACE="$(GORACE)" $(GO) test -v -race -coverprofile=coverage.txt -covermode=atomic ./... 132 | $(GO) tool cover -html=coverage.txt -o coverage.html 133 | @if [ -f "./race_report.log" ]; then \ 134 | echo "Race conditions detected! Check race_report.log"; \ 135 | cat "./race_report.log"; \ 136 | fi 137 | 138 | # run golangci-lint 139 | .PHONY: lint 140 | lint: 141 | @echo "Running linter..." 142 | @if ! command -v golangci-lint &> /dev/null; then \ 143 | echo "Installing golangci-lint..."; \ 144 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest; \ 145 | fi 146 | golangci-lint run 147 | 148 | # clean build artifacts 149 | .PHONY: clean 150 | clean: 151 | @echo "Cleaning..." 152 | @rm -rf ${BUILD_DIR} 153 | @rm -f coverage.txt coverage.html 154 | 155 | # show help 156 | .PHONY: help 157 | help: 158 | @echo "Available targets:" 159 | @echo " all - Clean, build, and install the binary" 160 | @echo " build - Build the binary" 161 | @echo " build-pgo - Build the binary with PGO optimization" 162 | @echo " install - Install the binary in GOPATH" 163 | @echo " install-pgo - Install the binary with PGO optimization" 164 | @echo " test - Run tests (excluding large tests)" 165 | @echo " test-race-short- Run quick tests with race detector" 166 | @echo " test-race - Run all tests with race detector (excluding large tests)" 167 | @echo " test-large - Run large tests (resource intensive)" 168 | @echo " test-coverage - Run tests with coverage report" 169 | @echo " lint - Run golangci-lint" 170 | @echo " clean - Remove build artifacts" 171 | @echo " help - Show this help" 172 | -------------------------------------------------------------------------------- /ci.Dockerfile: -------------------------------------------------------------------------------- 1 | # build app 2 | FROM --platform=$BUILDPLATFORM golang:1.24-alpine3.21 AS app-builder 3 | RUN apk add --no-cache git tzdata 4 | 5 | ENV SERVICE=mkbrr 6 | 7 | WORKDIR /src 8 | 9 | # Cache Go modules 10 | COPY go.mod go.sum ./ 11 | RUN go mod download 12 | 13 | COPY . ./ 14 | 15 | ARG VERSION=dev 16 | ARG REVISION=dev 17 | ARG BUILDTIME 18 | ARG TARGETOS 19 | ARG TARGETARCH 20 | ARG TARGETVARIANT 21 | 22 | RUN --network=none --mount=target=. \ 23 | export GOOS=$TARGETOS; \ 24 | export GOARCH=$TARGETARCH; \ 25 | [[ "$GOARCH" == "amd64" ]] && export GOAMD64=$TARGETVARIANT; \ 26 | [[ "$GOARCH" == "arm" ]] && [[ "$TARGETVARIANT" == "v6" ]] && export GOARM=6; \ 27 | [[ "$GOARCH" == "arm" ]] && [[ "$TARGETVARIANT" == "v7" ]] && export GOARM=7; \ 28 | echo $GOARCH $GOOS $GOARM$GOAMD64; \ 29 | go build -pgo=cpu.pprof -ldflags "-s -w -X main.version=${VERSION} -X main.commit=${REVISION} -X main.date=${BUILDTIME}" -o /out/bin/mkbrr main.go 30 | 31 | # build runner 32 | FROM alpine:latest AS runner 33 | 34 | LABEL org.opencontainers.image.source="https://github.com/autobrr/mkbrr" 35 | LABEL org.opencontainers.image.licenses="GPL-2.0-or-later" 36 | LABEL org.opencontainers.image.base.name="alpine:latest" 37 | 38 | ENV HOME="/config" \ 39 | XDG_CONFIG_HOME="/config" \ 40 | XDG_DATA_HOME="/config" 41 | 42 | RUN apk --no-cache add ca-certificates 43 | 44 | WORKDIR /app 45 | VOLUME /config 46 | 47 | COPY --link --from=app-builder /out/bin/mkbrr* /usr/local/bin/ 48 | 49 | CMD ["/usr/local/bin/mkbrr"] -------------------------------------------------------------------------------- /cmd/check.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "time" 7 | 8 | "github.com/fatih/color" 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/autobrr/mkbrr/internal/torrent" 12 | ) 13 | 14 | // checkOptions encapsulates all the flags for the check command 15 | type checkOptions struct { 16 | Verbose bool 17 | Quiet bool 18 | Workers int 19 | } 20 | 21 | var checkOpts checkOptions 22 | 23 | var checkCmd = &cobra.Command{ 24 | Use: "check ", 25 | Short: "Verify the integrity of content against a torrent file", 26 | Long: `Checks if the data in the specified content path (file or directory) matches 27 | the pieces defined in the torrent file. This is useful for verifying downloads 28 | or checking data integrity after moving files.`, 29 | Args: cobra.ExactArgs(2), 30 | RunE: runCheck, 31 | DisableFlagsInUseLine: true, 32 | SuggestionsMinimumDistance: 1, 33 | SilenceUsage: true, 34 | } 35 | 36 | func init() { 37 | checkCmd.Flags().SortFlags = false 38 | checkCmd.Flags().BoolVarP(&checkOpts.Verbose, "verbose", "v", false, "show list of bad piece indices") 39 | checkCmd.Flags().BoolVar(&checkOpts.Quiet, "quiet", false, "reduced output mode (prints only completion percentage)") 40 | checkCmd.Flags().IntVar(&checkOpts.Workers, "workers", 0, "number of worker goroutines for verification (0 for automatic)") 41 | checkCmd.SetUsageTemplate(`Usage: 42 | {{.CommandPath}} [flags] 43 | 44 | Arguments: 45 | torrent-file Path to the .torrent file 46 | content-path Path to the directory or file containing the data 47 | 48 | Flags: 49 | {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}} 50 | `) 51 | } 52 | 53 | // validateCheckArgs validates the command arguments and returns the paths 54 | func validateCheckArgs(args []string) (torrentPath string, contentPath string, err error) { 55 | torrentPath = args[0] 56 | contentPath = args[1] 57 | 58 | if _, err := os.Stat(torrentPath); err != nil { 59 | return "", "", fmt.Errorf("invalid torrent file path %q: %w", torrentPath, err) 60 | } 61 | 62 | if _, err := os.Stat(contentPath); err != nil { 63 | return "", "", fmt.Errorf("invalid content path %q: %w", contentPath, err) 64 | } 65 | 66 | return torrentPath, contentPath, nil 67 | } 68 | 69 | // buildVerifyOptions creates the verification options from the command flags 70 | func buildVerifyOptions(opts checkOptions, torrentPath, contentPath string) torrent.VerifyOptions { 71 | return torrent.VerifyOptions{ 72 | TorrentPath: torrentPath, 73 | ContentPath: contentPath, 74 | Verbose: opts.Verbose, 75 | Quiet: opts.Quiet, 76 | Workers: opts.Workers, 77 | } 78 | } 79 | 80 | // displayCheckResults handles the display of verification results 81 | func displayCheckResults(display *torrent.Display, result *torrent.VerificationResult, duration time.Duration, opts checkOptions) { 82 | display.SetQuiet(opts.Quiet) 83 | 84 | if opts.Quiet { 85 | fmt.Printf("%.2f%%\n", result.Completion) 86 | } else { 87 | display.ShowVerificationResult(result, duration) 88 | } 89 | } 90 | 91 | func runCheck(cmd *cobra.Command, args []string) error { 92 | torrentPath, contentPath, err := validateCheckArgs(args) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | start := time.Now() 98 | 99 | verifyOpts := buildVerifyOptions(checkOpts, torrentPath, contentPath) 100 | display := torrent.NewDisplay(torrent.NewFormatter(checkOpts.Verbose)) 101 | 102 | if !checkOpts.Quiet { 103 | green := color.New(color.FgGreen).SprintFunc() 104 | cyan := color.New(color.FgCyan).SprintFunc() 105 | fmt.Fprintf(os.Stdout, "\n%s\n", green("Verifying:")) 106 | fmt.Fprintf(os.Stdout, " Torrent file: %s\n", cyan(torrentPath)) 107 | fmt.Fprintf(os.Stdout, " Content: %s\n", cyan(contentPath)) 108 | } 109 | 110 | result, err := torrent.VerifyData(verifyOpts) 111 | if err != nil { 112 | return fmt.Errorf("verification failed: %w", err) 113 | } 114 | 115 | duration := time.Since(start) 116 | displayCheckResults(display, result, duration, checkOpts) 117 | 118 | if result.BadPieces > 0 || len(result.MissingFiles) > 0 { 119 | return fmt.Errorf("verification failed or incomplete") 120 | } 121 | 122 | return nil 123 | } 124 | -------------------------------------------------------------------------------- /cmd/create.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "runtime/pprof" 7 | "slices" 8 | "time" 9 | 10 | "github.com/spf13/cobra" 11 | 12 | "github.com/autobrr/mkbrr/internal/preset" 13 | "github.com/autobrr/mkbrr/internal/torrent" 14 | "github.com/autobrr/mkbrr/internal/trackers" 15 | ) 16 | 17 | // createOptions encapsulates all command-line flag values for the create command 18 | type createOptions struct { 19 | pieceLengthExp *uint 20 | maxPieceLengthExp *uint 21 | trackerURL string 22 | comment string 23 | outputPath string 24 | outputDir string 25 | source string 26 | batchFile string 27 | presetName string 28 | presetFile string 29 | webSeeds []string 30 | excludePatterns []string 31 | includePatterns []string 32 | createWorkers int 33 | isPrivate bool 34 | noDate bool 35 | noCreator bool 36 | verbose bool 37 | entropy bool 38 | quiet bool 39 | skipPrefix bool 40 | } 41 | 42 | var options = createOptions{ 43 | isPrivate: true, 44 | } 45 | 46 | var createCmd = &cobra.Command{ 47 | Use: "create [path]", 48 | Short: "Create a new torrent file", 49 | Long: `Create a new torrent file from a file or directory. 50 | Supports both single file/directory and batch mode using a YAML config file. 51 | Supports presets for commonly used settings. 52 | When a tracker URL is provided, the output filename will use the tracker domain (without TLD) as prefix by default (e.g. "example_filename.torrent"). This behavior can be disabled with --skip-prefix.`, 53 | Args: func(cmd *cobra.Command, args []string) error { 54 | if len(args) > 1 { 55 | return fmt.Errorf("accepts at most one arg") 56 | } 57 | if len(args) == 0 && options.batchFile == "" { 58 | presetFlag := cmd.Flags().Lookup("preset") 59 | if presetFlag != nil && presetFlag.Changed { 60 | return fmt.Errorf("when using a preset (-P/--preset), you must provide a path to the content") 61 | } 62 | return fmt.Errorf("requires a path argument or --batch flag") 63 | } 64 | if len(args) == 1 && options.batchFile != "" { 65 | return fmt.Errorf("cannot specify both path argument and --batch flag") 66 | } 67 | return nil 68 | }, 69 | RunE: runCreate, 70 | DisableFlagsInUseLine: true, 71 | SuggestionsMinimumDistance: 1, 72 | SilenceUsage: true, 73 | } 74 | 75 | func init() { 76 | createCmd.Flags().SortFlags = false 77 | createCmd.Flags().StringVarP(&options.batchFile, "batch", "b", "", "batch config file (YAML)") 78 | 79 | createCmd.Flags().StringVarP(&options.presetName, "preset", "P", "", "use preset from config") 80 | createCmd.Flags().StringVar(&options.presetFile, "preset-file", "", "preset config file (default ~/.config/mkbrr/presets.yaml)") 81 | createCmd.Flags().StringVarP(&options.trackerURL, "tracker", "t", "", "tracker URL") 82 | createCmd.Flags().StringArrayVarP(&options.webSeeds, "web-seed", "w", nil, "add web seed URLs") 83 | createCmd.Flags().BoolVarP(&options.isPrivate, "private", "p", true, "make torrent private") 84 | createCmd.Flags().StringVarP(&options.comment, "comment", "c", "", "add comment") 85 | 86 | var defaultPieceLength, defaultMaxPieceLength uint 87 | createCmd.Flags().UintVarP(&defaultPieceLength, "piece-length", "l", 0, "set piece length to 2^n bytes (16-27, automatic if not specified)") 88 | createCmd.Flags().UintVarP(&defaultMaxPieceLength, "max-piece-length", "m", 0, "limit maximum piece length to 2^n bytes (16-27, unlimited if not specified)") 89 | createCmd.PreRun = func(cmd *cobra.Command, args []string) { 90 | if cmd.Flags().Changed("piece-length") { 91 | options.pieceLengthExp = &defaultPieceLength 92 | } 93 | if cmd.Flags().Changed("max-piece-length") { 94 | options.maxPieceLengthExp = &defaultMaxPieceLength 95 | } 96 | } 97 | 98 | createCmd.Flags().StringVarP(&options.outputPath, "output", "o", "", "set output path (default: .torrent)") 99 | createCmd.Flags().StringVar(&options.outputDir, "output-dir", "", "output directory for created torrent") 100 | createCmd.Flags().StringVarP(&options.source, "source", "s", "", "add source string") 101 | createCmd.Flags().BoolVarP(&options.noDate, "no-date", "d", false, "don't write creation date") 102 | createCmd.Flags().BoolVarP(&options.noCreator, "no-creator", "", false, "don't write creator") 103 | createCmd.Flags().BoolVarP(&options.entropy, "entropy", "e", false, "randomize info hash by adding entropy field") 104 | createCmd.Flags().BoolVarP(&options.verbose, "verbose", "v", false, "be verbose") 105 | createCmd.Flags().BoolVar(&options.quiet, "quiet", false, "reduced output mode (prints only final torrent path)") 106 | createCmd.Flags().BoolVarP(&options.skipPrefix, "skip-prefix", "", false, "don't add tracker domain prefix to output filename") 107 | createCmd.Flags().StringArrayVarP(&options.excludePatterns, "exclude", "", nil, "exclude files matching these patterns (e.g., \"*.nfo,*.jpg\" or --exclude \"*.nfo\" --exclude \"*.jpg\")") 108 | createCmd.Flags().StringArrayVarP(&options.includePatterns, "include", "", nil, "include only files matching these patterns (e.g., \"*.mkv,*.mp4\" or --include \"*.mkv\" --include \"*.mp4\")") 109 | createCmd.Flags().IntVar(&options.createWorkers, "workers", 0, "number of worker goroutines for hashing (0 for automatic)") 110 | 111 | createCmd.Flags().String("cpuprofile", "", "write cpu profile to file (development flag)") 112 | 113 | createCmd.SetUsageTemplate(`Usage: 114 | {{.CommandPath}} /path/to/content [flags] 115 | 116 | Flags: 117 | {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}} 118 | `) 119 | } 120 | 121 | // setupProfiling sets up CPU profiling if the --cpuprofile flag is set 122 | // It returns a cleanup function that should be deferred by the caller 123 | func setupProfiling(cmd *cobra.Command) (cleanup func(), err error) { 124 | cpuprofile, _ := cmd.Flags().GetString("cpuprofile") 125 | if cpuprofile == "" { 126 | return func() {}, nil 127 | } 128 | 129 | f, err := os.Create(cpuprofile) 130 | if err != nil { 131 | return nil, fmt.Errorf("could not create CPU profile: %w", err) 132 | } 133 | 134 | if err := pprof.StartCPUProfile(f); err != nil { 135 | f.Close() 136 | return nil, fmt.Errorf("could not start CPU profile: %w", err) 137 | } 138 | 139 | return func() { 140 | pprof.StopCPUProfile() 141 | f.Close() 142 | }, nil 143 | } 144 | 145 | // processBatchMode handles processing multiple torrents using a batch configuration file 146 | func processBatchMode(opts createOptions, version string, startTime time.Time) error { 147 | results, err := torrent.ProcessBatch(opts.batchFile, opts.verbose, opts.quiet, version) 148 | if err != nil { 149 | return fmt.Errorf("batch processing failed: %w", err) 150 | } 151 | 152 | if opts.quiet { 153 | for _, result := range results { 154 | if result.Success { 155 | fmt.Println("Wrote:", result.Info.Path) 156 | } 157 | } 158 | } else { 159 | display := torrent.NewDisplay(torrent.NewFormatter(opts.verbose)) 160 | display.ShowBatchResults(results, time.Since(startTime)) 161 | } 162 | return nil 163 | } 164 | 165 | // buildCreateOptions creates a torrent.CreateTorrentOptions struct from command-line options and presets 166 | func buildCreateOptions(cmd *cobra.Command, inputPath string, opts createOptions, version string) (torrent.CreateTorrentOptions, error) { 167 | createOpts := torrent.CreateTorrentOptions{ 168 | Path: inputPath, 169 | TrackerURL: opts.trackerURL, 170 | WebSeeds: opts.webSeeds, 171 | IsPrivate: opts.isPrivate, 172 | Comment: opts.comment, 173 | PieceLengthExp: opts.pieceLengthExp, 174 | MaxPieceLength: opts.maxPieceLengthExp, 175 | Source: opts.source, 176 | NoDate: opts.noDate, 177 | NoCreator: opts.noCreator, 178 | Verbose: opts.verbose, 179 | Version: version, 180 | Entropy: opts.entropy, 181 | Quiet: opts.quiet, 182 | SkipPrefix: opts.skipPrefix, 183 | ExcludePatterns: opts.excludePatterns, 184 | IncludePatterns: opts.includePatterns, 185 | Workers: opts.createWorkers, 186 | OutputDir: opts.outputDir, 187 | } 188 | 189 | // If a preset is specified, load the preset options and merge with command-line flags 190 | if opts.presetName != "" { 191 | presetFilePath, err := preset.FindPresetFile(opts.presetFile) 192 | if err != nil { 193 | return createOpts, fmt.Errorf("could not find preset file: %w", err) 194 | } 195 | 196 | presetOpts, err := preset.LoadPresetOptions(presetFilePath, opts.presetName) 197 | if err != nil { 198 | return createOpts, fmt.Errorf("could not load preset options: %w", err) 199 | } 200 | 201 | if len(presetOpts.Trackers) > 0 && !cmd.Flags().Changed("tracker") { 202 | createOpts.TrackerURL = presetOpts.Trackers[0] 203 | } 204 | 205 | if len(presetOpts.WebSeeds) > 0 && !cmd.Flags().Changed("web-seed") { 206 | createOpts.WebSeeds = presetOpts.WebSeeds 207 | } 208 | 209 | if presetOpts.Private != nil && !cmd.Flags().Changed("private") { 210 | createOpts.IsPrivate = *presetOpts.Private 211 | } 212 | 213 | if presetOpts.Comment != "" && !cmd.Flags().Changed("comment") { 214 | createOpts.Comment = presetOpts.Comment 215 | } 216 | 217 | if presetOpts.Source != "" && !cmd.Flags().Changed("source") { 218 | createOpts.Source = presetOpts.Source 219 | } 220 | 221 | if presetOpts.OutputDir != "" && !cmd.Flags().Changed("output-dir") { 222 | createOpts.OutputDir = presetOpts.OutputDir 223 | } 224 | 225 | if presetOpts.NoDate != nil && !cmd.Flags().Changed("no-date") { 226 | createOpts.NoDate = *presetOpts.NoDate 227 | } 228 | 229 | if presetOpts.NoCreator != nil && !cmd.Flags().Changed("no-creator") { 230 | createOpts.NoCreator = *presetOpts.NoCreator 231 | } 232 | 233 | if presetOpts.SkipPrefix != nil && !cmd.Flags().Changed("skip-prefix") { 234 | createOpts.SkipPrefix = *presetOpts.SkipPrefix 235 | } 236 | 237 | if presetOpts.PieceLength != 0 && !cmd.Flags().Changed("piece-length") { 238 | pieceLen := presetOpts.PieceLength 239 | createOpts.PieceLengthExp = &pieceLen 240 | } 241 | 242 | if presetOpts.MaxPieceLength != 0 && !cmd.Flags().Changed("max-piece-length") { 243 | maxPieceLen := presetOpts.MaxPieceLength 244 | createOpts.MaxPieceLength = &maxPieceLen 245 | } 246 | 247 | if !cmd.Flags().Changed("entropy") && presetOpts.Entropy != nil { 248 | createOpts.Entropy = *presetOpts.Entropy 249 | } 250 | 251 | if len(presetOpts.ExcludePatterns) > 0 { 252 | if !cmd.Flags().Changed("exclude") { 253 | createOpts.ExcludePatterns = slices.Clone(presetOpts.ExcludePatterns) 254 | } else { 255 | createOpts.ExcludePatterns = append(slices.Clone(presetOpts.ExcludePatterns), createOpts.ExcludePatterns...) 256 | } 257 | } 258 | 259 | if len(presetOpts.IncludePatterns) > 0 { 260 | if !cmd.Flags().Changed("include") { 261 | createOpts.IncludePatterns = slices.Clone(presetOpts.IncludePatterns) 262 | } else { 263 | createOpts.IncludePatterns = append(slices.Clone(presetOpts.IncludePatterns), createOpts.IncludePatterns...) 264 | } 265 | } 266 | } 267 | 268 | // Check for tracker's default source only if no source is set by flag or preset 269 | if createOpts.Source == "" && !cmd.Flags().Changed("source") { 270 | if trackerSource, ok := trackers.GetTrackerDefaultSource(createOpts.TrackerURL); ok { 271 | createOpts.Source = trackerSource 272 | } 273 | } 274 | 275 | if opts.outputPath != "" { 276 | createOpts.OutputPath = opts.outputPath 277 | } 278 | 279 | return createOpts, nil 280 | } 281 | 282 | // createSingleTorrent handles creating a single torrent file 283 | func createSingleTorrent(cmd *cobra.Command, args []string, opts createOptions, version string, startTime time.Time) error { 284 | inputPath := args[0] 285 | 286 | createOpts, err := buildCreateOptions(cmd, inputPath, opts, version) 287 | if err != nil { 288 | return err 289 | } 290 | 291 | torrentInfo, err := torrent.Create(createOpts) 292 | if err != nil { 293 | return err 294 | } 295 | 296 | if opts.quiet { 297 | fmt.Println("Wrote:", torrentInfo.Path) 298 | } else { 299 | display := torrent.NewDisplay(torrent.NewFormatter(opts.verbose)) 300 | display.ShowOutputPathWithTime(torrentInfo.Path, time.Since(startTime)) 301 | } 302 | 303 | return nil 304 | } 305 | 306 | func runCreate(cmd *cobra.Command, args []string) error { 307 | cleanup, err := setupProfiling(cmd) 308 | if err != nil { 309 | return err 310 | } 311 | defer cleanup() 312 | 313 | start := time.Now() 314 | 315 | if options.batchFile != "" { 316 | return processBatchMode(options, version, start) 317 | } 318 | 319 | return createSingleTorrent(cmd, args, options, version, start) 320 | } 321 | -------------------------------------------------------------------------------- /cmd/inspect.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/anacrolix/torrent/bencode" 8 | "github.com/anacrolix/torrent/metainfo" 9 | "github.com/fatih/color" 10 | "github.com/spf13/cobra" 11 | 12 | "github.com/autobrr/mkbrr/internal/torrent" 13 | ) 14 | 15 | // inspectOptions encapsulates command-line flag values for the inspect command 16 | type inspectOptions struct { 17 | verbose bool 18 | } 19 | 20 | var ( 21 | inspectOpts = inspectOptions{} 22 | cyan = color.New(color.FgMagenta, color.Bold).SprintFunc() 23 | label = color.New(color.Bold, color.FgHiWhite).SprintFunc() 24 | ) 25 | 26 | var inspectCmd = &cobra.Command{ 27 | Use: "inspect ", 28 | Short: "Inspect a torrent file", 29 | Long: "Inspect a torrent file", 30 | Args: cobra.ExactArgs(1), 31 | RunE: runInspect, 32 | DisableFlagsInUseLine: true, 33 | SuggestionsMinimumDistance: 1, 34 | SilenceUsage: true, 35 | } 36 | 37 | func init() { 38 | inspectCmd.Flags().SortFlags = false 39 | inspectCmd.Flags().BoolVarP(&inspectOpts.verbose, "verbose", "v", false, "show all metadata fields") 40 | inspectCmd.SetUsageTemplate(`Usage: 41 | {{.CommandPath}} 42 | 43 | Flags: 44 | {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}} 45 | `) 46 | } 47 | 48 | // loadTorrentData reads the torrent file and extracts metainfo, info, and raw bytes 49 | func loadTorrentData(filePath string) (mi *metainfo.MetaInfo, info *metainfo.Info, rawBytes []byte, err error) { 50 | rawBytes, err = os.ReadFile(filePath) 51 | if err != nil { 52 | return nil, nil, nil, fmt.Errorf("error reading file: %w", err) 53 | } 54 | 55 | mi, err = metainfo.LoadFromFile(filePath) 56 | if err != nil { 57 | return nil, nil, rawBytes, fmt.Errorf("error loading torrent: %w", err) 58 | } 59 | 60 | parsedInfo, err := mi.UnmarshalInfo() 61 | if err != nil { 62 | return mi, nil, rawBytes, fmt.Errorf("error parsing info: %w", err) 63 | } 64 | 65 | return mi, &parsedInfo, rawBytes, nil 66 | } 67 | 68 | // displayStandardInfo shows the core information about the torrent 69 | func displayStandardInfo(display *torrent.Display, mi *metainfo.MetaInfo, info *metainfo.Info) { 70 | t := &torrent.Torrent{MetaInfo: mi} 71 | display.ShowTorrentInfo(t, info) 72 | } 73 | 74 | // displayVerboseInfo shows additional metadata fields found in the torrent file 75 | func displayVerboseInfo(rawBytes []byte, mi *metainfo.MetaInfo) { 76 | fmt.Printf("%s\n", cyan("Additional metadata:")) 77 | 78 | // Display extra root-level fields 79 | rootMap := make(map[string]interface{}) 80 | if err := bencode.Unmarshal(rawBytes, &rootMap); err == nil { 81 | standardRoot := map[string]bool{ 82 | "announce": true, "announce-list": true, "comment": true, 83 | "created by": true, "creation date": true, "info": true, 84 | "url-list": true, "nodes": true, 85 | } 86 | 87 | for k, v := range rootMap { 88 | if !standardRoot[k] { 89 | fmt.Printf(" %-13s %v\n", label(k+":"), v) 90 | } 91 | } 92 | } 93 | 94 | // Display extra info-dictionary fields 95 | infoMap := make(map[string]interface{}) 96 | if err := bencode.Unmarshal(mi.InfoBytes, &infoMap); err == nil { 97 | standardInfo := map[string]bool{ 98 | "name": true, "piece length": true, "pieces": true, 99 | "files": true, "length": true, "private": true, 100 | "source": true, "path": true, "paths": true, 101 | "md5sum": true, 102 | } 103 | 104 | for k, v := range infoMap { 105 | if !standardInfo[k] { 106 | fmt.Printf(" %-13s %v\n", label("info."+k+":"), v) 107 | } 108 | } 109 | } 110 | fmt.Println() 111 | } 112 | 113 | // displayFileTreeIfNeeded shows the file tree if the torrent contains multiple files 114 | func displayFileTreeIfNeeded(display *torrent.Display, info *metainfo.Info) { 115 | if info.IsDir() { 116 | display.ShowFileTree(info) 117 | } 118 | } 119 | 120 | func runInspect(cmd *cobra.Command, args []string) error { 121 | torrentPath := args[0] 122 | 123 | mi, info, rawBytes, err := loadTorrentData(torrentPath) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | display := torrent.NewDisplay(torrent.NewFormatter(inspectOpts.verbose)) 129 | displayStandardInfo(display, mi, info) 130 | 131 | if inspectOpts.verbose { 132 | displayVerboseInfo(rawBytes, mi) 133 | displayFileTreeIfNeeded(display, info) 134 | } 135 | 136 | return nil 137 | } 138 | -------------------------------------------------------------------------------- /cmd/modify.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/autobrr/mkbrr/internal/torrent" 10 | ) 11 | 12 | // modifyOptions encapsulates command-line flag values for the modify command 13 | type modifyOptions struct { 14 | PresetName string 15 | PresetFile string 16 | OutputDir string 17 | Output string 18 | Tracker string 19 | Comment string 20 | Source string 21 | WebSeeds []string 22 | DryRun bool 23 | NoDate bool 24 | NoCreator bool 25 | Verbose bool 26 | Quiet bool 27 | SkipPrefix bool 28 | Private bool 29 | Entropy bool 30 | } 31 | 32 | var modifyOpts = modifyOptions{ 33 | Private: true, 34 | } 35 | 36 | var modifyCmd = &cobra.Command{ 37 | Use: "modify [torrent files...]", 38 | Short: "Modify existing torrent files using a preset", 39 | Long: `Modify existing torrent files using a preset or flags. 40 | This allows batch modification of torrent files with new tracker URLs, source tags, etc. 41 | Original files are preserved and new files are created with the tracker domain (without TLD) as prefix, e.g. "example_filename.torrent". 42 | A custom output filename can also be specified via --output. 43 | 44 | Note: All unnecessary metadata will be stripped.`, 45 | Args: cobra.MinimumNArgs(1), 46 | RunE: runModify, 47 | DisableFlagsInUseLine: true, 48 | SilenceUsage: true, 49 | } 50 | 51 | func init() { 52 | modifyCmd.Flags().SortFlags = false 53 | modifyCmd.Flags().StringVarP(&modifyOpts.PresetName, "preset", "P", "", "use preset from config") 54 | modifyCmd.Flags().StringVar(&modifyOpts.PresetFile, "preset-file", "", "preset config file (default: ~/.config/mkbrr/presets.yaml)") 55 | modifyCmd.Flags().StringVar(&modifyOpts.OutputDir, "output-dir", "", "output directory for modified files") 56 | modifyCmd.Flags().StringVarP(&modifyOpts.Output, "output", "o", "", "custom output filename (without extension)") 57 | modifyCmd.Flags().BoolVarP(&modifyOpts.NoDate, "no-date", "d", false, "don't update creation date") 58 | modifyCmd.Flags().BoolVarP(&modifyOpts.NoCreator, "no-creator", "", false, "don't write creator") 59 | modifyCmd.Flags().StringVarP(&modifyOpts.Tracker, "tracker", "t", "", "tracker URL") 60 | modifyCmd.Flags().StringArrayVarP(&modifyOpts.WebSeeds, "web-seed", "w", nil, "add web seed URLs") 61 | modifyCmd.Flags().BoolVarP(&modifyOpts.Private, "private", "p", true, "make torrent private (default: true)") 62 | modifyCmd.Flags().StringVarP(&modifyOpts.Comment, "comment", "c", "", "add comment") 63 | modifyCmd.Flags().StringVarP(&modifyOpts.Source, "source", "s", "", "add source string") 64 | modifyCmd.Flags().BoolVarP(&modifyOpts.Entropy, "entropy", "e", false, "randomize info hash by adding entropy field") 65 | modifyCmd.Flags().BoolVarP(&modifyOpts.Verbose, "verbose", "v", false, "be verbose") 66 | modifyCmd.Flags().BoolVar(&modifyOpts.Quiet, "quiet", false, "reduced output mode (prints only final torrent paths)") 67 | modifyCmd.Flags().BoolVarP(&modifyOpts.SkipPrefix, "skip-prefix", "", false, "don't add tracker domain prefix to output filename") 68 | modifyCmd.Flags().BoolVarP(&modifyOpts.DryRun, "dry-run", "n", false, "show what would be modified without making changes") 69 | 70 | modifyCmd.SetUsageTemplate(`Usage: 71 | {{.CommandPath}} [flags] [torrent files...] 72 | 73 | Flags: 74 | {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}} 75 | `) 76 | } 77 | 78 | // buildTorrentOptions creates a torrent.Options struct from command-line flags 79 | func buildTorrentOptions(cmd *cobra.Command, opts modifyOptions) torrent.Options { 80 | torrentOpts := torrent.Options{ 81 | PresetName: opts.PresetName, 82 | PresetFile: opts.PresetFile, 83 | OutputDir: opts.OutputDir, 84 | OutputPattern: opts.Output, 85 | NoDate: opts.NoDate, 86 | NoCreator: opts.NoCreator, 87 | DryRun: opts.DryRun, 88 | Verbose: opts.Verbose, 89 | Quiet: opts.Quiet, 90 | TrackerURL: opts.Tracker, 91 | WebSeeds: opts.WebSeeds, 92 | Comment: opts.Comment, 93 | Source: opts.Source, 94 | Version: version, 95 | Entropy: opts.Entropy, 96 | SkipPrefix: opts.SkipPrefix, 97 | } 98 | 99 | if cmd.Flags().Changed("private") { 100 | torrentOpts.IsPrivate = &opts.Private 101 | } 102 | 103 | return torrentOpts 104 | } 105 | 106 | // displayModifyResults handles showing the results of torrent modification 107 | func displayModifyResults(results []*torrent.Result, opts modifyOptions, display *torrent.Display, startTime time.Time) int { 108 | successCount := 0 109 | 110 | for _, result := range results { 111 | if result.Error != nil { 112 | display.ShowError(fmt.Sprintf("Error processing %s: %v", result.Path, result.Error)) 113 | continue 114 | } 115 | 116 | if !result.WasModified { 117 | display.ShowMessage(fmt.Sprintf("Skipping %s (no changes needed)", result.Path)) 118 | continue 119 | } 120 | 121 | if opts.DryRun { 122 | display.ShowMessage(fmt.Sprintf("Would modify %s", result.Path)) 123 | continue 124 | } 125 | 126 | if opts.Verbose { 127 | // Load the modified torrent to display its info 128 | mi, err := torrent.LoadFromFile(result.OutputPath) 129 | if err == nil { 130 | info, err := mi.UnmarshalInfo() 131 | if err == nil { 132 | display.ShowTorrentInfo(mi, &info) 133 | } 134 | } 135 | } 136 | 137 | if opts.Quiet { 138 | fmt.Println("Wrote:", result.OutputPath) 139 | } else { 140 | display.ShowOutputPathWithTime(result.OutputPath, time.Since(startTime)) 141 | } 142 | successCount++ 143 | } 144 | 145 | return successCount 146 | } 147 | 148 | func runModify(cmd *cobra.Command, args []string) error { 149 | start := time.Now() 150 | 151 | display := torrent.NewDisplay(torrent.NewFormatter(modifyOpts.Verbose)) 152 | display.SetQuiet(modifyOpts.Quiet) 153 | display.ShowMessage(fmt.Sprintf("Modifying %d torrent files...", len(args))) 154 | 155 | // Build torrent options from command-line flags 156 | torrentOpts := buildTorrentOptions(cmd, modifyOpts) 157 | 158 | // Process the torrent files 159 | results, err := torrent.ProcessTorrents(args, torrentOpts) 160 | if err != nil { 161 | return fmt.Errorf("could not process torrent files: %w", err) 162 | } 163 | 164 | // Display the results 165 | displayModifyResults(results, modifyOpts, display, start) 166 | 167 | return nil 168 | } 169 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | const banner = ` __ ___. 8 | _____ | | _\_ |________________ 9 | / \| |/ /| __ \_ __ \_ __ \ 10 | | Y Y \ < | \_\ \ | \/| | \/ 11 | |__|_| /__|_ \|___ /__| |__| 12 | \/ \/ \/ ` 13 | 14 | var rootCmd = &cobra.Command{ 15 | Use: "mkbrr", 16 | Short: "A tool to inspect and create torrent files", 17 | Long: banner + "\n\nmkbrr is a tool to create and inspect torrent files.", 18 | } 19 | 20 | func init() { 21 | cobra.EnableCommandSorting = false 22 | rootCmd.AddCommand(createCmd) 23 | rootCmd.AddCommand(checkCmd) 24 | rootCmd.AddCommand(inspectCmd) 25 | rootCmd.AddCommand(modifyCmd) 26 | rootCmd.AddCommand(updateCmd) 27 | rootCmd.AddCommand(versionCmd) 28 | } 29 | 30 | func Execute() error { 31 | rootCmd.CompletionOptions.DisableDefaultCmd = true 32 | rootCmd.SilenceUsage = false 33 | 34 | rootCmd.SetUsageTemplate(`Usage: 35 | {{.CommandPath}} [command] 36 | 37 | Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} 38 | {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} 39 | 40 | Flags: 41 | {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}} 42 | 43 | Use "{{.CommandPath}} [command] --help" for more information about a command. 44 | `) 45 | 46 | return rootCmd.Execute() 47 | } 48 | -------------------------------------------------------------------------------- /cmd/update.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/blang/semver" 7 | "github.com/creativeprojects/go-selfupdate" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var updateCmd = &cobra.Command{ 12 | Use: "update", 13 | Short: "Update mkbrr", 14 | Long: `Update mkbrr to latest version.`, 15 | RunE: runUpdate, 16 | DisableFlagsInUseLine: true, 17 | } 18 | 19 | func init() { 20 | updateCmd.SetUsageTemplate(`Usage: 21 | {{.CommandPath}} 22 | 23 | Flags: 24 | {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}} 25 | `) 26 | } 27 | 28 | func runUpdate(cmd *cobra.Command, args []string) error { 29 | _, err := semver.ParseTolerant(version) 30 | if err != nil { 31 | return fmt.Errorf("could not parse version: %w", err) 32 | } 33 | 34 | latest, found, err := selfupdate.DetectLatest(cmd.Context(), selfupdate.ParseSlug("autobrr/mkbrr")) 35 | if err != nil { 36 | return fmt.Errorf("error occurred while detecting version: %w", err) 37 | } 38 | if !found { 39 | return fmt.Errorf("latest version for %s/%s could not be found from github repository", "autobrr/mkbrr", version) 40 | } 41 | 42 | if latest.LessOrEqual(version) { 43 | fmt.Printf("Current binary is the latest version: %s\n", version) 44 | return nil 45 | } 46 | 47 | exe, err := selfupdate.ExecutablePath() 48 | if err != nil { 49 | return fmt.Errorf("could not locate executable path: %w", err) 50 | } 51 | 52 | if err := selfupdate.UpdateTo(cmd.Context(), latest.AssetURL, latest.AssetName, exe); err != nil { 53 | return fmt.Errorf("error occurred while updating binary: %w", err) 54 | } 55 | 56 | fmt.Printf("Successfully updated to version: %s\n", latest.Version()) 57 | return nil 58 | } 59 | -------------------------------------------------------------------------------- /cmd/version.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "runtime/debug" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var ( 11 | version string 12 | buildTime string 13 | ) 14 | 15 | var versionCmd = &cobra.Command{ 16 | Use: "version", 17 | Short: "Print version information", 18 | Run: func(cmd *cobra.Command, args []string) { 19 | fmt.Printf("mkbrr version: %s\n", version) 20 | if buildTime != "unknown" { 21 | fmt.Printf("Build Time: %s\n", buildTime) 22 | } 23 | }, 24 | DisableFlagsInUseLine: true, 25 | } 26 | 27 | func SetVersion(v, bt string) { 28 | if v == "dev" { 29 | if info, ok := debug.ReadBuildInfo(); ok { 30 | v = info.Main.Version 31 | } 32 | } 33 | version = v 34 | buildTime = bt 35 | } 36 | 37 | func init() { 38 | versionCmd.SetUsageTemplate(`Usage: 39 | {{.CommandPath}} 40 | 41 | Prints the version and build time information for mkbrr. 42 | `) 43 | } 44 | -------------------------------------------------------------------------------- /docs/benchmarks/benchmark_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autobrr/mkbrr/18b58846c7d408a291efa85e2c52c0f4cba644c2/docs/benchmarks/benchmark_comparison.png -------------------------------------------------------------------------------- /docs/benchmarks/benchmark_plots.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import pandas as pd 4 | import seaborn as sns 5 | 6 | # Set style 7 | plt.style.use('bmh') 8 | sns.set_theme(style="whitegrid") 9 | 10 | # Data 11 | hardware = ['Leaseweb (SSD)', 'Hetzner (HDD)', 'Macbook (NVME)'] 12 | test_size = [21, 14, 30] # GiB 13 | 14 | # Times in seconds 15 | mkbrr = [7.24, 41.02, 9.71] 16 | mktorrent = [45.41, 68.17, 10.90] 17 | torrenttools = [9.07, 47.97, np.nan] 18 | torf = [8.85, 58.19, 9.78] 19 | 20 | # Create DataFrame 21 | df = pd.DataFrame({ 22 | 'Hardware': hardware, 23 | 'Test Size (GiB)': test_size, 24 | 'mkbrr': mkbrr, 25 | 'mktorrent': mktorrent, 26 | 'torrenttools': torrenttools, 27 | 'torf': torf 28 | }) 29 | 30 | plt.rcParams['figure.figsize'] = (10, 6) 31 | plt.rcParams['font.size'] = 10 32 | 33 | # Create horizontal bar plot 34 | fig, ax = plt.subplots() 35 | y = np.arange(len(hardware)) 36 | width = 0.2 37 | 38 | ax.barh(y - width*1.5, mkbrr, width, label='mkbrr', color='#2ecc71') 39 | ax.barh(y - width/2, mktorrent, width, label='mktorrent', color='#e74c3c') 40 | ax.barh(y + width/2, torrenttools, width, label='torrenttools', color='#3498db') 41 | ax.barh(y + width*1.5, torf, width, label='torf', color='#f1c40f') 42 | 43 | # Customize plot 44 | ax.set_xlabel('Time (seconds)') 45 | ax.set_title('Torrent Creation Performance Comparison') 46 | ax.set_yticks(y) 47 | ax.set_yticklabels(hardware) 48 | ax.legend(bbox_to_anchor=(1.02, 1), loc='upper left') 49 | 50 | # Add test size labels 51 | for i, size in enumerate(test_size): 52 | ax.text(1, i, f'{size} GiB', ha='left', va='center') 53 | 54 | # Adjust layout 55 | plt.tight_layout() 56 | 57 | # Save plot 58 | plt.savefig('benchmark_comparison.png', dpi=300, bbox_inches='tight') 59 | 60 | # Create speed comparison plot 61 | speed_vs_mktorrent = [6.3, 1.7, 1.1] 62 | speed_vs_torrenttools = [1.3, 1.2, np.nan] 63 | speed_vs_torf = [1.2, 1.4, 1.0] 64 | 65 | fig, ax = plt.subplots() 66 | y = np.arange(len(hardware)) 67 | width = 0.25 68 | 69 | ax.barh(y - width, speed_vs_mktorrent, width, label='vs mktorrent', color='#e74c3c') 70 | ax.barh(y, speed_vs_torrenttools, width, label='vs torrenttools', color='#3498db') 71 | ax.barh(y + width, speed_vs_torf, width, label='vs torf', color='#f1c40f') 72 | 73 | # Customize plot 74 | ax.set_xlabel('Speed Multiplier (×)') 75 | ax.set_title('mkbrr Speed Comparison') 76 | ax.set_yticks(y) 77 | ax.set_yticklabels(hardware) 78 | ax.legend(bbox_to_anchor=(1.02, 1), loc='upper left') 79 | 80 | # Add vertical line at 1.0 81 | ax.axvline(x=1.0, color='black', linestyle='--', alpha=0.3) 82 | 83 | # Adjust layout 84 | plt.tight_layout() 85 | 86 | # Save plot 87 | plt.savefig('speed_comparison.png', dpi=300, bbox_inches='tight') 88 | 89 | # Create consistency plot (standard deviation percentages) 90 | std_mkbrr = [0.25, 2.39, 3.66] 91 | std_mktorrent = [0.36, 39.10, 6.43] 92 | std_torrenttools = [1.02, 22.00, np.nan] 93 | std_torf = [0.87, 9.95, 7.66] 94 | 95 | fig, ax = plt.subplots() 96 | y = np.arange(len(hardware)) 97 | width = 0.2 98 | 99 | ax.barh(y - width*1.5, std_mkbrr, width, label='mkbrr', color='#2ecc71') 100 | ax.barh(y - width/2, std_mktorrent, width, label='mktorrent', color='#e74c3c') 101 | ax.barh(y + width/2, std_torrenttools, width, label='torrenttools', color='#3498db') 102 | ax.barh(y + width*1.5, std_torf, width, label='torf', color='#f1c40f') 103 | 104 | # Customize plot 105 | ax.set_xlabel('Standard Deviation (%)') 106 | ax.set_title('Performance Consistency Comparison') 107 | ax.set_yticks(y) 108 | ax.set_yticklabels(hardware) 109 | ax.legend(bbox_to_anchor=(1.02, 1), loc='upper left') 110 | 111 | # Adjust layout 112 | plt.tight_layout() 113 | 114 | # Save plot 115 | plt.savefig('consistency_comparison.png', dpi=300, bbox_inches='tight') -------------------------------------------------------------------------------- /docs/benchmarks/consistency_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autobrr/mkbrr/18b58846c7d408a291efa85e2c52c0f4cba644c2/docs/benchmarks/consistency_comparison.png -------------------------------------------------------------------------------- /docs/benchmarks/requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib>=3.7.0 2 | numpy>=1.24.0 3 | pandas>=2.0.0 4 | seaborn>=0.12.0 -------------------------------------------------------------------------------- /docs/benchmarks/speed_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autobrr/mkbrr/18b58846c7d408a291efa85e2c52c0f4cba644c2/docs/benchmarks/speed_comparison.png -------------------------------------------------------------------------------- /examples/batch.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/autobrr/mkbrr/main/schema/batch.json 2 | version: 1 3 | jobs: 4 | - output: randomtracker_random_movie.torrent 5 | path: /Users/user/Downloads/Random.Movie.Title.2023.1080p.WEB-DL.mkv 6 | trackers: 7 | - https://tracker.randomtracker.org/announce 8 | comment: "Random Movie Title - A thrilling adventure" 9 | private: false 10 | 11 | - output: anothertracker_random_release.torrent 12 | path: '/Users/user/Downloads/Random Album - Best Hits (2025)' 13 | trackers: 14 | - https://tracker.anothertracker.com/announce 15 | private: true 16 | source: "anothertracker" 17 | no_date: true 18 | exclude_patterns: # Example: exclude NFO files and samples 19 | - "*.nfo" 20 | - "*sample*" 21 | include_patterns: # Example: include only video files 22 | - "*.mkv" 23 | - "*.mp4" 24 | - "*.avi" 25 | -------------------------------------------------------------------------------- /examples/presets.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/autobrr/mkbrr/main/schema/presets.json 2 | version: 1 3 | 4 | # defaults that always apply to all presets unless overridden 5 | default: 6 | private: true 7 | no_date: true 8 | no_creator: false 9 | skip_prefix: false 10 | output_dir: "/full/path/to/torrents" 11 | # workers: 2 # override built-in calculation 12 | # comment: "Default comment for all torrents" # Torrent comment 13 | # source: "DEFAULT" # Source tag 14 | # exclude_patterns: # Default list of glob patterns to exclude files 15 | # - "*.bak" 16 | # - "temp.*" 17 | # include_patterns: # Default list of glob patterns to include files (only these will be included) 18 | # - "*.mkv" 19 | # - "*.mp4" 20 | 21 | presets: 22 | ptp: 23 | source: "PTP" 24 | output_dir: "/full/path/to/torrents/ptp" 25 | trackers: 26 | - "https://please.passthe.tea/announce" 27 | exclude_patterns: # Example: exclude NFO files and samples 28 | - "*.nfo" 29 | - "*sample*" 30 | include_patterns: # Example: include only video files 31 | - "*.mkv" 32 | - "*.mp4" 33 | # entropy: true # randomize the hash, useful for cross-seeded torrents 34 | #workers: 1 # override built-in calculation 35 | 36 | # Public tracker preset with all options shown 37 | public: 38 | output_dir: "/full/path/to/torrents/public" 39 | private: false # overrides default preset 40 | no_date: false 41 | no_creator: true 42 | trackers: 43 | - "udp://tracker.opentrackr.org:1337/announce" 44 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/autobrr/mkbrr 2 | 3 | go 1.24 4 | 5 | require ( 6 | github.com/anacrolix/torrent v1.58.1 7 | github.com/blang/semver v3.5.1+incompatible 8 | github.com/creativeprojects/go-selfupdate v1.5.0 9 | github.com/dustin/go-humanize v1.0.1 10 | github.com/fatih/color v1.18.0 11 | github.com/schollz/progressbar/v3 v3.18.0 12 | github.com/spf13/cobra v1.9.1 13 | gopkg.in/yaml.v3 v3.0.1 14 | ) 15 | 16 | require ( 17 | code.gitea.io/sdk/gitea v0.21.0 // indirect 18 | github.com/42wim/httpsig v1.2.3 // indirect 19 | github.com/Masterminds/semver/v3 v3.3.1 // indirect 20 | github.com/anacrolix/generics v0.0.3-0.20240902042256-7fb2702ef0ca // indirect 21 | github.com/anacrolix/missinggo v1.3.0 // indirect 22 | github.com/anacrolix/missinggo/v2 v2.8.0 // indirect 23 | github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect 24 | github.com/davidmz/go-pageant v1.0.2 // indirect 25 | github.com/go-fed/httpsig v1.1.0 // indirect 26 | github.com/google/go-github/v30 v30.1.0 // indirect 27 | github.com/google/go-querystring v1.1.0 // indirect 28 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 29 | github.com/hashicorp/go-retryablehttp v0.7.7 // indirect 30 | github.com/hashicorp/go-version v1.7.0 // indirect 31 | github.com/huandu/xstrings v1.5.0 // indirect 32 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 33 | github.com/klauspost/cpuid/v2 v2.2.10 // indirect 34 | github.com/mattn/go-colorable v0.1.14 // indirect 35 | github.com/mattn/go-isatty v0.0.20 // indirect 36 | github.com/minio/sha256-simd v1.0.1 // indirect 37 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect 38 | github.com/mr-tron/base58 v1.2.0 // indirect 39 | github.com/multiformats/go-multihash v0.2.3 // indirect 40 | github.com/multiformats/go-varint v0.0.7 // indirect 41 | github.com/rivo/uniseg v0.4.7 // indirect 42 | github.com/spaolacci/murmur3 v1.1.0 // indirect 43 | github.com/spf13/pflag v1.0.6 // indirect 44 | github.com/ulikunitz/xz v0.5.12 // indirect 45 | github.com/xanzy/go-gitlab v0.115.0 // indirect 46 | golang.org/x/crypto v0.37.0 // indirect 47 | golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect 48 | golang.org/x/oauth2 v0.29.0 // indirect 49 | golang.org/x/sys v0.32.0 // indirect 50 | golang.org/x/term v0.31.0 // indirect 51 | golang.org/x/time v0.11.0 // indirect 52 | lukechampine.com/blake3 v1.4.0 // indirect 53 | ) 54 | -------------------------------------------------------------------------------- /internal/preset/preset.go: -------------------------------------------------------------------------------- 1 | package preset 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "time" 9 | 10 | "github.com/anacrolix/torrent/bencode" 11 | "github.com/anacrolix/torrent/metainfo" 12 | "gopkg.in/yaml.v3" 13 | ) 14 | 15 | // Config represents the YAML configuration for torrent creation presets 16 | type Config struct { 17 | Default *Options `yaml:"default"` 18 | Presets map[string]Options `yaml:"presets"` 19 | Version int `yaml:"version"` 20 | } 21 | 22 | // Options represents the options for a single preset 23 | type Options struct { 24 | Private *bool `yaml:"private"` 25 | NoDate *bool `yaml:"no_date"` 26 | NoCreator *bool `yaml:"no_creator"` 27 | SkipPrefix *bool `yaml:"skip_prefix"` 28 | Entropy *bool `yaml:"entropy"` 29 | Comment string `yaml:"comment"` 30 | Source string `yaml:"source"` 31 | OutputDir string `yaml:"output_dir"` 32 | Version string // used for creator string 33 | Trackers []string `yaml:"trackers"` 34 | WebSeeds []string `yaml:"webseeds"` 35 | ExcludePatterns []string `yaml:"exclude_patterns"` 36 | IncludePatterns []string `yaml:"include_patterns"` 37 | PieceLength uint `yaml:"piece_length"` 38 | MaxPieceLength uint `yaml:"max_piece_length"` 39 | Workers int `yaml:"workers"` 40 | } 41 | 42 | // FindPresetFile searches for a preset file in known locations 43 | func FindPresetFile(explicitPath string) (string, error) { 44 | // check known locations in order 45 | locations := []string{ 46 | explicitPath, // explicitly specified file 47 | "presets.yaml", // current directory 48 | } 49 | 50 | // add user home directory locations 51 | if home, err := os.UserHomeDir(); err == nil { 52 | locations = append(locations, 53 | filepath.Join(home, ".config", "mkbrr", "presets.yaml"), // ~/.config/mkbrr/ 54 | filepath.Join(home, ".mkbrr", "presets.yaml"), // ~/.mkbrr/ 55 | ) 56 | } 57 | 58 | // find first existing preset file 59 | for _, loc := range locations { 60 | if _, err := os.Stat(loc); err == nil { 61 | return loc, nil 62 | } 63 | } 64 | 65 | return "", fmt.Errorf("could not find preset file in known locations") 66 | } 67 | 68 | // Load loads presets from a config file 69 | func Load(configPath string) (*Config, error) { 70 | data, err := os.ReadFile(configPath) 71 | if err != nil { 72 | return nil, fmt.Errorf("could not read preset config: %w", err) 73 | } 74 | 75 | var config Config 76 | if err := yaml.Unmarshal(data, &config); err != nil { 77 | return nil, fmt.Errorf("could not parse preset config: %w", err) 78 | } 79 | 80 | if config.Version != 1 { 81 | return nil, fmt.Errorf("unsupported preset config version: %d", config.Version) 82 | } 83 | 84 | if len(config.Presets) == 0 { 85 | return nil, fmt.Errorf("no presets defined in config") 86 | } 87 | 88 | return &config, nil 89 | } 90 | 91 | // GetPreset returns a preset by name, merged with default settings 92 | func (c *Config) GetPreset(name string) (*Options, error) { 93 | preset, ok := c.Presets[name] 94 | if !ok { 95 | return nil, fmt.Errorf("preset %q not found", name) 96 | } 97 | 98 | // create a copy with hardcoded defaults 99 | defaultPrivate := true 100 | defaultNoDate := false 101 | defaultNoCreator := false 102 | defaultSkipPrefix := false 103 | defaultWorkers := 0 // auto 104 | 105 | merged := Options{ 106 | Private: &defaultPrivate, 107 | NoDate: &defaultNoDate, 108 | NoCreator: &defaultNoCreator, 109 | SkipPrefix: &defaultSkipPrefix, 110 | Workers: defaultWorkers, 111 | } 112 | 113 | // if we have defaults in config, use those instead 114 | if c.Default != nil { 115 | if c.Default.Private != nil { 116 | merged.Private = c.Default.Private 117 | } 118 | if c.Default.NoDate != nil { 119 | merged.NoDate = c.Default.NoDate 120 | } 121 | if c.Default.NoCreator != nil { 122 | merged.NoCreator = c.Default.NoCreator 123 | } 124 | if c.Default.SkipPrefix != nil { 125 | merged.SkipPrefix = c.Default.SkipPrefix 126 | } 127 | merged.Trackers = c.Default.Trackers 128 | merged.WebSeeds = c.Default.WebSeeds 129 | merged.Comment = c.Default.Comment 130 | merged.Source = c.Default.Source 131 | merged.OutputDir = c.Default.OutputDir 132 | merged.PieceLength = c.Default.PieceLength 133 | merged.MaxPieceLength = c.Default.MaxPieceLength 134 | merged.Workers = c.Default.Workers 135 | if len(c.Default.ExcludePatterns) > 0 { 136 | merged.ExcludePatterns = c.Default.ExcludePatterns 137 | } 138 | if len(c.Default.IncludePatterns) > 0 { 139 | merged.IncludePatterns = c.Default.IncludePatterns 140 | } 141 | if c.Default.Entropy != nil { 142 | merged.Entropy = c.Default.Entropy 143 | } 144 | } 145 | 146 | // override with preset values if they are set 147 | if len(preset.Trackers) > 0 { 148 | merged.Trackers = preset.Trackers 149 | } 150 | if len(preset.WebSeeds) > 0 { 151 | merged.WebSeeds = preset.WebSeeds 152 | } 153 | if preset.Comment != "" { 154 | merged.Comment = preset.Comment 155 | } 156 | if preset.Source != "" { 157 | merged.Source = preset.Source 158 | } 159 | if preset.OutputDir != "" { 160 | merged.OutputDir = preset.OutputDir 161 | } 162 | if preset.PieceLength != 0 { 163 | merged.PieceLength = preset.PieceLength 164 | } 165 | if preset.MaxPieceLength != 0 { 166 | merged.MaxPieceLength = preset.MaxPieceLength 167 | } 168 | if preset.Private != nil { 169 | merged.Private = preset.Private 170 | } 171 | if preset.NoDate != nil { 172 | merged.NoDate = preset.NoDate 173 | } 174 | if preset.NoCreator != nil { 175 | merged.NoCreator = preset.NoCreator 176 | } 177 | if preset.SkipPrefix != nil { 178 | merged.SkipPrefix = preset.SkipPrefix 179 | } 180 | if len(preset.ExcludePatterns) > 0 { 181 | merged.ExcludePatterns = preset.ExcludePatterns 182 | } 183 | if len(preset.IncludePatterns) > 0 { 184 | merged.IncludePatterns = preset.IncludePatterns 185 | } 186 | if preset.Entropy != nil { 187 | merged.Entropy = preset.Entropy 188 | } 189 | if preset.Workers != 0 { 190 | merged.Workers = preset.Workers 191 | } 192 | 193 | return &merged, nil 194 | } 195 | 196 | // ApplyToMetaInfo applies preset options to a MetaInfo object 197 | func (o *Options) ApplyToMetaInfo(mi *metainfo.MetaInfo) (bool, error) { 198 | wasModified := false 199 | 200 | info, err := mi.UnmarshalInfo() 201 | if err != nil { 202 | return false, fmt.Errorf("could not unmarshal info: %w", err) 203 | } 204 | 205 | // Only modify values that are explicitly set in the preset 206 | if len(o.Trackers) > 0 { 207 | mi.Announce = o.Trackers[0] 208 | mi.AnnounceList = [][]string{o.Trackers} 209 | wasModified = true 210 | } 211 | 212 | if len(o.WebSeeds) > 0 { 213 | mi.UrlList = o.WebSeeds 214 | wasModified = true 215 | } 216 | 217 | if o.Source != "" { 218 | info.Source = o.Source 219 | wasModified = true 220 | } 221 | 222 | if o.Comment != "" { 223 | mi.Comment = o.Comment 224 | wasModified = true 225 | } 226 | 227 | if o.Private != nil { 228 | if info.Private == nil { 229 | info.Private = new(bool) 230 | } 231 | *info.Private = *o.Private 232 | wasModified = true 233 | } 234 | 235 | if o.NoCreator != nil { 236 | if *o.NoCreator { 237 | mi.CreatedBy = "" 238 | } else { 239 | mi.CreatedBy = fmt.Sprintf("mkbrr/%s", o.Version) 240 | } 241 | wasModified = true 242 | } 243 | 244 | if o.NoDate != nil { 245 | if *o.NoDate { 246 | mi.CreationDate = 0 247 | } else { 248 | mi.CreationDate = time.Now().Unix() 249 | } 250 | wasModified = true 251 | } 252 | 253 | // re-marshal the modified info if needed 254 | if wasModified { 255 | if infoBytes, err := bencode.Marshal(info); err == nil { 256 | mi.InfoBytes = infoBytes 257 | } 258 | } 259 | 260 | return wasModified, nil 261 | } 262 | 263 | // GetDomainPrefix extracts a clean domain name from a tracker URL to use as a filename prefix 264 | func GetDomainPrefix(trackerURL string) string { 265 | if trackerURL == "" { 266 | return "modified" 267 | } 268 | 269 | cleanURL := strings.TrimSpace(trackerURL) 270 | 271 | domain := cleanURL 272 | 273 | if strings.Contains(domain, "://") { 274 | parts := strings.SplitN(domain, "://", 2) 275 | if len(parts) == 2 { 276 | domain = parts[1] 277 | } 278 | } 279 | 280 | if strings.Contains(domain, "/") { 281 | domain = strings.SplitN(domain, "/", 2)[0] 282 | } 283 | 284 | if strings.Contains(domain, ":") { 285 | domain = strings.SplitN(domain, ":", 2)[0] 286 | } 287 | 288 | domain = strings.TrimPrefix(domain, "www.") 289 | 290 | if domain != "" { 291 | parts := strings.Split(domain, ".") 292 | 293 | if len(parts) > 1 { 294 | // take only the domain name without TLD 295 | // for example, from "tracker.example.com", get "example" 296 | if len(parts) > 2 { 297 | // for subdomains, use the second-to-last part 298 | domain = parts[len(parts)-2] 299 | } else { 300 | // for simple domains like example.com, use the first part 301 | domain = parts[0] 302 | } 303 | } 304 | 305 | return sanitizeFilename(domain) 306 | } 307 | 308 | return "modified" 309 | } 310 | 311 | // GenerateOutputPath generates an output path for a modified torrent file 312 | func GenerateOutputPath(originalPath, outputDir, presetName string, outputPattern string, trackerURL string, metaInfoName string, skipPrefix bool) string { 313 | dir := filepath.Dir(originalPath) 314 | if outputDir != "" { 315 | dir = outputDir 316 | } 317 | 318 | base := filepath.Base(originalPath) 319 | ext := filepath.Ext(base) 320 | 321 | name := strings.TrimSuffix(base, ext) 322 | 323 | // if custom output pattern is provided, use it 324 | if outputPattern != "" { 325 | return filepath.Join(dir, outputPattern+ext) 326 | } 327 | 328 | // if skip-prefix is true, just return the original filename 329 | if skipPrefix { 330 | return filepath.Join(dir, base) 331 | } 332 | 333 | // prioritize preset name over tracker URL 334 | var prefix string 335 | if presetName != "" { 336 | prefix = sanitizeFilename(presetName) 337 | } else { 338 | prefix = GetDomainPrefix(trackerURL) 339 | } 340 | 341 | return filepath.Join(dir, prefix+"_"+name+ext) 342 | } 343 | 344 | // LoadPresetOptions loads and returns preset options from a file by name. 345 | // It handles the full process of loading the presets file and resolving the named preset, 346 | // including applying any default settings. 347 | func LoadPresetOptions(presetFilePath string, presetName string) (*Options, error) { 348 | // Load the presets from the file 349 | config, err := Load(presetFilePath) 350 | if err != nil { 351 | return nil, fmt.Errorf("could not load presets: %w", err) 352 | } 353 | 354 | // Get the specific preset with default settings applied 355 | presetOpts, err := config.GetPreset(presetName) 356 | if err != nil { 357 | return nil, fmt.Errorf("could not get preset: %w", err) 358 | } 359 | 360 | return presetOpts, nil 361 | } 362 | 363 | // sanitizeFilename removes characters that are invalid in filenames 364 | func sanitizeFilename(input string) string { 365 | // replace characters that are problematic in filenames 366 | replacer := strings.NewReplacer( 367 | "/", "_", 368 | "\\", "_", 369 | ":", "_", 370 | "*", "_", 371 | "?", "_", 372 | "\"", "_", 373 | "<", "_", 374 | ">", "_", 375 | "|", "_", 376 | " ", "_", 377 | ) 378 | return replacer.Replace(input) 379 | } 380 | -------------------------------------------------------------------------------- /internal/preset/preset_test.go: -------------------------------------------------------------------------------- 1 | package preset 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | func TestOutputDirMerging(t *testing.T) { 9 | // Create a temporary file for test config 10 | tmpFile, err := os.CreateTemp("", "presets-*.yaml") 11 | if err != nil { 12 | t.Fatalf("Failed to create temp file: %v", err) 13 | } 14 | defer os.Remove(tmpFile.Name()) 15 | 16 | // Write test presets config 17 | testConfig := `version: 1 18 | default: 19 | output_dir: "/default/output/dir" 20 | private: true 21 | 22 | presets: 23 | with_output_dir: 24 | output_dir: "/preset/output/dir" 25 | source: "TEST" 26 | 27 | without_output_dir: 28 | source: "TEST2" 29 | ` 30 | if err := os.WriteFile(tmpFile.Name(), []byte(testConfig), 0644); err != nil { 31 | t.Fatalf("Failed to write test config: %v", err) 32 | } 33 | 34 | // Load the config 35 | config, err := Load(tmpFile.Name()) 36 | if err != nil { 37 | t.Fatalf("Failed to load test config: %v", err) 38 | } 39 | 40 | // Test 1: Preset with its own output_dir should override default 41 | presetWithDir, err := config.GetPreset("with_output_dir") 42 | if err != nil { 43 | t.Fatalf("Failed to get preset: %v", err) 44 | } 45 | 46 | if presetWithDir.OutputDir != "/preset/output/dir" { 47 | t.Errorf("Expected preset output_dir to be '/preset/output/dir', got '%s'", presetWithDir.OutputDir) 48 | } 49 | 50 | // Test 2: Preset without output_dir should inherit from default 51 | presetWithoutDir, err := config.GetPreset("without_output_dir") 52 | if err != nil { 53 | t.Fatalf("Failed to get preset: %v", err) 54 | } 55 | 56 | if presetWithoutDir.OutputDir != "/default/output/dir" { 57 | t.Errorf("Expected preset to inherit default output_dir '/default/output/dir', got '%s'", presetWithoutDir.OutputDir) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /internal/torrent/batch.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "sync" 8 | 9 | "github.com/autobrr/mkbrr/internal/preset" 10 | "gopkg.in/yaml.v3" 11 | ) 12 | 13 | // BatchConfig represents the YAML configuration for batch torrent creation 14 | type BatchConfig struct { 15 | Jobs []BatchJob `yaml:"jobs"` 16 | Version int `yaml:"version"` 17 | } 18 | 19 | // BatchJob represents a single torrent creation job within a batch 20 | type BatchJob struct { 21 | Output string `yaml:"output"` 22 | Path string `yaml:"path"` 23 | Name string `yaml:"-"` 24 | Comment string `yaml:"comment"` 25 | Source string `yaml:"source"` 26 | Trackers []string `yaml:"trackers"` 27 | WebSeeds []string `yaml:"webseeds"` 28 | ExcludePatterns []string `yaml:"exclude_patterns"` 29 | IncludePatterns []string `yaml:"include_patterns"` 30 | PieceLength uint `yaml:"piece_length"` 31 | Private bool `yaml:"private"` 32 | NoDate bool `yaml:"no_date"` 33 | SkipPrefix bool `yaml:"skip_prefix"` 34 | } 35 | 36 | // ToCreateOptions converts a BatchJob to CreateTorrentOptions 37 | func (j *BatchJob) ToCreateOptions(verbose bool, quiet bool, version string) CreateTorrentOptions { 38 | var tracker string 39 | if len(j.Trackers) > 0 { 40 | tracker = j.Trackers[0] 41 | } 42 | 43 | opts := CreateTorrentOptions{ 44 | Path: j.Path, 45 | Name: j.Name, 46 | TrackerURL: tracker, 47 | WebSeeds: j.WebSeeds, 48 | IsPrivate: j.Private, 49 | Comment: j.Comment, 50 | Source: j.Source, 51 | NoDate: j.NoDate, 52 | Verbose: verbose, 53 | Quiet: quiet, 54 | Version: version, 55 | SkipPrefix: j.SkipPrefix, 56 | ExcludePatterns: j.ExcludePatterns, 57 | IncludePatterns: j.IncludePatterns, 58 | } 59 | 60 | if j.PieceLength != 0 { 61 | pieceLen := j.PieceLength 62 | opts.PieceLengthExp = &pieceLen 63 | } 64 | 65 | return opts 66 | } 67 | 68 | // BatchResult represents the result of a single job in the batch 69 | type BatchResult struct { 70 | Error error 71 | Info *TorrentInfo 72 | Trackers []string 73 | Job BatchJob 74 | Success bool 75 | } 76 | 77 | // ProcessBatch processes a batch configuration file and creates multiple torrents 78 | func ProcessBatch(configPath string, verbose bool, quiet bool, version string) ([]BatchResult, error) { 79 | data, err := os.ReadFile(configPath) 80 | if err != nil { 81 | return nil, fmt.Errorf("failed to read batch config: %w", err) 82 | } 83 | 84 | var config BatchConfig 85 | if err := yaml.Unmarshal(data, &config); err != nil { 86 | return nil, fmt.Errorf("failed to parse batch config: %w", err) 87 | } 88 | 89 | if config.Version != 1 { 90 | return nil, fmt.Errorf("unsupported batch config version: %d", config.Version) 91 | } 92 | 93 | if len(config.Jobs) == 0 { 94 | return nil, fmt.Errorf("no jobs defined in batch config") 95 | } 96 | 97 | // validate all jobs before processing 98 | for _, job := range config.Jobs { 99 | if err := validateJob(job); err != nil { 100 | return nil, fmt.Errorf("invalid job configuration: %w", err) 101 | } 102 | } 103 | 104 | results := make([]BatchResult, len(config.Jobs)) 105 | var wg sync.WaitGroup 106 | 107 | // process jobs in parallel with a worker pool 108 | workers := minInt(len(config.Jobs), 4) // limit concurrent jobs 109 | jobs := make(chan int, len(config.Jobs)) 110 | 111 | // start workers 112 | for i := 0; i < workers; i++ { 113 | wg.Add(1) 114 | go func() { 115 | defer wg.Done() 116 | for idx := range jobs { 117 | results[idx] = processJob(config.Jobs[idx], verbose, quiet, version) 118 | } 119 | }() 120 | } 121 | 122 | // send jobs to workers 123 | for i := range config.Jobs { 124 | jobs <- i 125 | } 126 | close(jobs) 127 | 128 | wg.Wait() 129 | return results, nil 130 | } 131 | 132 | func validateJob(job BatchJob) error { 133 | if job.Path == "" { 134 | return fmt.Errorf("path is required") 135 | } 136 | 137 | if _, err := os.Stat(job.Path); err != nil { 138 | return fmt.Errorf("invalid path %q: %w", job.Path, err) 139 | } 140 | 141 | if job.Output == "" { 142 | return fmt.Errorf("output is required") 143 | } 144 | 145 | if job.PieceLength != 0 && (job.PieceLength < 14 || job.PieceLength > 24) { 146 | return fmt.Errorf("piece length must be between 14 and 24") 147 | } 148 | 149 | return nil 150 | } 151 | 152 | func processJob(job BatchJob, verbose bool, quiet bool, version string) BatchResult { 153 | result := BatchResult{ 154 | Job: job, 155 | Trackers: job.Trackers, 156 | } 157 | 158 | var trackerURL string 159 | if len(job.Trackers) > 0 { 160 | trackerURL = job.Trackers[0] 161 | } 162 | 163 | output := job.Output 164 | if output == "" { 165 | baseName := filepath.Base(filepath.Clean(job.Path)) 166 | 167 | if trackerURL != "" && !job.SkipPrefix { 168 | prefix := preset.GetDomainPrefix(trackerURL) 169 | baseName = prefix + "_" + baseName 170 | } 171 | 172 | output = baseName 173 | } 174 | 175 | // ensure output has .torrent extension 176 | if filepath.Ext(output) != ".torrent" { 177 | output += ".torrent" 178 | } 179 | 180 | // convert job to CreateTorrentOptions 181 | opts := job.ToCreateOptions(verbose, quiet, version) 182 | 183 | // create the torrent 184 | mi, err := CreateTorrent(opts) 185 | if err != nil { 186 | result.Error = fmt.Errorf("failed to create torrent: %w", err) 187 | return result 188 | } 189 | 190 | // write the torrent file 191 | f, err := os.Create(output) 192 | if err != nil { 193 | result.Error = fmt.Errorf("failed to create output file: %w", err) 194 | return result 195 | } 196 | defer f.Close() 197 | 198 | if err := mi.Write(f); err != nil { 199 | result.Error = fmt.Errorf("failed to write torrent file: %w", err) 200 | return result 201 | } 202 | 203 | // collect torrent info 204 | info := mi.GetInfo() 205 | result.Success = true 206 | result.Info = &TorrentInfo{ 207 | Path: output, 208 | Size: info.TotalLength(), 209 | InfoHash: mi.HashInfoBytes().String(), 210 | Files: len(info.Files), 211 | } 212 | 213 | return result 214 | } 215 | -------------------------------------------------------------------------------- /internal/torrent/batch_test.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "testing" 8 | ) 9 | 10 | func TestProcessBatch(t *testing.T) { 11 | // create a temporary directory for test files 12 | tmpDir, err := os.MkdirTemp("", "mkbrr-batch-test") 13 | if err != nil { 14 | t.Fatalf("Failed to create temp dir: %v", err) 15 | } 16 | defer os.RemoveAll(tmpDir) 17 | 18 | // create test files and directories 19 | testFiles := []struct { 20 | path string 21 | content string 22 | }{ 23 | { 24 | path: "file1.txt", 25 | content: "test file 1 content", 26 | }, 27 | { 28 | path: "dir1/file2.txt", 29 | content: "test file 2 content", 30 | }, 31 | { 32 | path: "dir1/file3.txt", 33 | content: "test file 3 content", 34 | }, 35 | } 36 | 37 | for _, tf := range testFiles { 38 | path := filepath.Join(tmpDir, tf.path) 39 | if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 40 | t.Fatalf("Failed to create directory: %v", err) 41 | } 42 | if err := os.WriteFile(path, []byte(tf.content), 0644); err != nil { 43 | t.Fatalf("Failed to write test file: %v", err) 44 | } 45 | } 46 | 47 | // create batch config file 48 | configPath := filepath.Join(tmpDir, "batch.yaml") 49 | configContent := []byte(fmt.Sprintf(`version: 1 50 | jobs: 51 | - output: %s 52 | path: %s 53 | name: "Test File 1" 54 | trackers: 55 | - udp://tracker.example.com:1337/announce 56 | private: true 57 | piece_length: 16 58 | - output: %s 59 | path: %s 60 | name: "Test Directory" 61 | trackers: 62 | - udp://tracker.example.com:1337/announce 63 | webseeds: 64 | - https://example.com/files/ 65 | comment: "Test batch torrent" 66 | `, 67 | filepath.Join(tmpDir, "file1.torrent"), 68 | filepath.Join(tmpDir, "file1.txt"), 69 | filepath.Join(tmpDir, "dir1.torrent"), 70 | filepath.Join(tmpDir, "dir1"))) 71 | 72 | if err := os.WriteFile(configPath, configContent, 0644); err != nil { 73 | t.Fatalf("Failed to write config file: %v", err) 74 | } 75 | 76 | // process batch 77 | results, err := ProcessBatch(configPath, true, false, "test-version") 78 | if err != nil { 79 | t.Fatalf("ProcessBatch failed: %v", err) 80 | } 81 | 82 | // verify results 83 | if len(results) != 2 { 84 | t.Errorf("Expected 2 results, got %d", len(results)) 85 | } 86 | 87 | for i, result := range results { 88 | if !result.Success { 89 | t.Errorf("Job %d failed: %v", i, result.Error) 90 | continue 91 | } 92 | 93 | if result.Info == nil { 94 | t.Errorf("Job %d missing info", i) 95 | continue 96 | } 97 | 98 | // verify torrent files were created 99 | if _, err := os.Stat(result.Info.Path); err != nil { 100 | t.Errorf("Job %d torrent file not created: %v", i, err) 101 | } 102 | 103 | // basic validation of torrent info 104 | if result.Info.InfoHash == "" { 105 | t.Errorf("Job %d missing info hash", i) 106 | } 107 | 108 | if result.Info.Size == 0 { 109 | t.Errorf("Job %d has zero size", i) 110 | } 111 | 112 | // check specific job details 113 | switch i { 114 | case 0: // file1.txt 115 | if result.Info.Files != 0 { 116 | t.Errorf("Expected single file torrent, got %d files", result.Info.Files) 117 | } 118 | case 1: // dir1 119 | if result.Info.Files != 2 { 120 | t.Errorf("Expected 2 files in directory torrent, got %d", result.Info.Files) 121 | } 122 | } 123 | } 124 | } 125 | 126 | func TestBatchValidation(t *testing.T) { 127 | tests := []struct { 128 | name string 129 | config string 130 | expectError bool 131 | }{ 132 | { 133 | name: "invalid version", 134 | config: `version: 2 135 | jobs: 136 | - output: test.torrent 137 | path: test.txt`, 138 | expectError: true, 139 | }, 140 | { 141 | name: "missing path", 142 | config: `version: 1 143 | jobs: 144 | - output: test.torrent`, 145 | expectError: true, 146 | }, 147 | { 148 | name: "missing output", 149 | config: `version: 1 150 | jobs: 151 | - path: test.txt`, 152 | expectError: true, 153 | }, 154 | { 155 | name: "invalid piece length", 156 | config: `version: 1 157 | jobs: 158 | - output: test.torrent 159 | path: test.txt 160 | piece_length: 25`, 161 | expectError: true, 162 | }, 163 | { 164 | name: "empty jobs", 165 | config: `version: 1 166 | jobs: []`, 167 | expectError: true, 168 | }, 169 | } 170 | 171 | for _, tt := range tests { 172 | t.Run(tt.name, func(t *testing.T) { 173 | tmpDir, err := os.MkdirTemp("", "mkbrr-batch-validation") 174 | if err != nil { 175 | t.Fatalf("Failed to create temp dir: %v", err) 176 | } 177 | defer os.RemoveAll(tmpDir) 178 | 179 | configPath := filepath.Join(tmpDir, "batch.yaml") 180 | if err := os.WriteFile(configPath, []byte(tt.config), 0644); err != nil { 181 | t.Fatalf("Failed to write config file: %v", err) 182 | } 183 | 184 | _, err = ProcessBatch(configPath, false, false, "test-version") 185 | if tt.expectError && err == nil { 186 | t.Error("Expected error but got nil") 187 | } 188 | if !tt.expectError && err != nil { 189 | t.Errorf("Unexpected error: %v", err) 190 | } 191 | }) 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /internal/torrent/create.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "sort" 9 | "strings" 10 | "time" 11 | 12 | "github.com/anacrolix/torrent/bencode" 13 | "github.com/anacrolix/torrent/metainfo" 14 | 15 | "github.com/autobrr/mkbrr/internal/preset" 16 | "github.com/autobrr/mkbrr/internal/trackers" 17 | ) 18 | 19 | // max returns the larger of x or y 20 | func max(x, y int64) int64 { 21 | if x > y { 22 | return x 23 | } 24 | return y 25 | } 26 | 27 | // formatPieceSize returns a human readable piece size, using KiB for sizes < 1024 KiB and MiB for larger sizes 28 | func formatPieceSize(exp uint) string { 29 | size := uint64(1) << (exp - 10) // convert to KiB 30 | if size >= 1024 { 31 | return fmt.Sprintf("%d MiB", size/1024) 32 | } 33 | return fmt.Sprintf("%d KiB", size) 34 | } 35 | 36 | // calculatePieceLength calculates the optimal piece length based on total size. 37 | // The min/max bounds (2^16 to 2^24) take precedence over other constraints 38 | func calculatePieceLength(totalSize int64, maxPieceLength *uint, trackerURL string, verbose bool) uint { 39 | minExp := uint(16) 40 | maxExp := uint(24) // default max 16 MiB for automatic calculation, can be overridden up to 2^27 41 | 42 | // check if tracker has a maximum piece length constraint 43 | if trackerURL != "" { 44 | if trackerMaxExp, ok := trackers.GetTrackerMaxPieceLength(trackerURL); ok { 45 | maxExp = trackerMaxExp 46 | } 47 | 48 | // check if tracker has specific piece size ranges 49 | if exp, ok := trackers.GetTrackerPieceSizeExp(trackerURL, uint64(totalSize)); ok { 50 | // ensure we stay within bounds 51 | if exp < minExp { 52 | exp = minExp 53 | } 54 | if exp > maxExp { 55 | exp = maxExp 56 | } 57 | if verbose { 58 | display := NewDisplay(NewFormatter(verbose)) 59 | display.ShowMessage(fmt.Sprintf("using tracker-specific range for content size: %d MiB (recommended: %s pieces)", 60 | totalSize>>20, formatPieceSize(exp))) 61 | } 62 | return exp 63 | } 64 | } 65 | 66 | // validate maxPieceLength - if it's below minimum, use minimum 67 | if maxPieceLength != nil { 68 | if *maxPieceLength < minExp { 69 | return minExp 70 | } 71 | if *maxPieceLength > 27 { 72 | maxExp = 27 73 | } else { 74 | maxExp = *maxPieceLength 75 | } 76 | } 77 | 78 | // default calculation for automatic piece length 79 | // ensure minimum of 1 byte for calculation 80 | size := max(totalSize, 1) 81 | 82 | var exp uint 83 | switch { 84 | case size <= 64<<20: // 0 to 64 MB: 32 KiB pieces (2^15) 85 | exp = 15 86 | case size <= 128<<20: // 64-128 MB: 64 KiB pieces (2^16) 87 | exp = 16 88 | case size <= 256<<20: // 128-256 MB: 128 KiB pieces (2^17) 89 | exp = 17 90 | case size <= 512<<20: // 256-512 MB: 256 KiB pieces (2^18) 91 | exp = 18 92 | case size <= 1024<<20: // 512 MB-1 GB: 512 KiB pieces (2^19) 93 | exp = 19 94 | case size <= 2048<<20: // 1-2 GB: 1 MiB pieces (2^20) 95 | exp = 20 96 | case size <= 4096<<20: // 2-4 GB: 2 MiB pieces (2^21) 97 | exp = 21 98 | case size <= 8192<<20: // 4-8 GB: 4 MiB pieces (2^22) 99 | exp = 22 100 | case size <= 16384<<20: // 8-16 GB: 8 MiB pieces (2^23) 101 | exp = 23 102 | case size <= 32768<<20: // 16-32 GB: 16 MiB pieces (2^24) 103 | exp = 24 104 | case size <= 65536<<20: // 32-64 GB: 32 MiB pieces (2^25) 105 | exp = 25 106 | case size <= 131072<<20: // 64-128 GB: 64 MiB pieces (2^26) 107 | exp = 26 108 | default: // above 128 GB: 128 MiB pieces (2^27) 109 | exp = 27 110 | } 111 | 112 | // if no manual piece length was specified, cap at 2^24 113 | if maxPieceLength == nil && exp > 24 { 114 | exp = 24 115 | } 116 | 117 | // ensure we stay within bounds 118 | if exp > maxExp { 119 | exp = maxExp 120 | } 121 | 122 | return exp 123 | } 124 | 125 | func (t *Torrent) GetInfo() *metainfo.Info { 126 | info := &metainfo.Info{} 127 | _ = bencode.Unmarshal(t.InfoBytes, info) 128 | return info 129 | } 130 | 131 | func generateRandomString() (string, error) { 132 | b := make([]byte, 32) 133 | if _, err := rand.Read(b); err != nil { 134 | return "", err 135 | } 136 | return fmt.Sprintf("%x", b), nil 137 | } 138 | 139 | func CreateTorrent(opts CreateTorrentOptions) (*Torrent, error) { 140 | path := filepath.ToSlash(opts.Path) 141 | name := opts.Name 142 | if name == "" { 143 | // preserve the folder name even for single-file torrents 144 | name = filepath.Base(filepath.Clean(path)) 145 | } 146 | 147 | mi := &metainfo.MetaInfo{ 148 | Announce: opts.TrackerURL, 149 | Comment: opts.Comment, 150 | } 151 | 152 | if !opts.NoCreator { 153 | mi.CreatedBy = fmt.Sprintf("mkbrr/%s (https://github.com/autobrr/mkbrr)", opts.Version) 154 | } 155 | 156 | if !opts.NoDate { 157 | mi.CreationDate = time.Now().Unix() 158 | } 159 | 160 | files := make([]fileEntry, 0, 1) 161 | var totalSize int64 162 | var baseDir string 163 | originalPaths := make(map[string]string) // map resolved path -> original path for metainfo 164 | 165 | err := filepath.Walk(path, func(currentPath string, walkInfo os.FileInfo, walkErr error) error { 166 | if walkErr != nil { 167 | // check if the error is due to a broken symlink during walk 168 | // if lstat works but stat fails, it's likely a broken link we might handle later 169 | if _, lerr := os.Lstat(currentPath); lerr == nil { 170 | // we can lstat it, maybe it's a broken link we can ignore? 171 | // for now, let's return the original error to maintain behavior. 172 | // consider adding verbose logging here if needed. 173 | } 174 | return walkErr 175 | } 176 | 177 | lstatInfo, err := os.Lstat(currentPath) 178 | if err != nil { 179 | fmt.Fprintf(os.Stderr, "Warning: could not lstat %q: %v\n", currentPath, err) 180 | return nil 181 | } 182 | 183 | resolvedPath := currentPath 184 | resolvedInfo := lstatInfo 185 | 186 | // check if it's a symlink 187 | if lstatInfo.Mode()&os.ModeSymlink != 0 { 188 | linkTarget, err := os.Readlink(currentPath) 189 | if err != nil { 190 | fmt.Fprintf(os.Stderr, "Warning: could not readlink %q: %v\n", currentPath, err) 191 | return nil 192 | } 193 | // if link is relative, resolve it based on the link's directory 194 | if !filepath.IsAbs(linkTarget) { 195 | linkTarget = filepath.Join(filepath.Dir(currentPath), linkTarget) 196 | } 197 | resolvedPath = filepath.Clean(linkTarget) 198 | 199 | // stat target 200 | statInfo, err := os.Stat(resolvedPath) 201 | if err != nil { 202 | fmt.Fprintf(os.Stderr, "Warning: could not stat symlink target %q for link %q: %v\n", resolvedPath, currentPath, err) 203 | return nil // skip broken link or inaccessible target 204 | } 205 | resolvedInfo = statInfo 206 | } 207 | 208 | if resolvedInfo.IsDir() { 209 | if baseDir == "" && currentPath == path { // only set baseDir for the initial path if it's a dir 210 | baseDir = currentPath 211 | } 212 | return nil 213 | } 214 | 215 | // it's a file (or a link pointing to one) 216 | shouldIgnore, err := shouldIgnoreFile(currentPath, opts.ExcludePatterns, opts.IncludePatterns) // ignore based on original path 217 | if err != nil { 218 | return fmt.Errorf("error processing file patterns for %q: %w", currentPath, err) 219 | } 220 | if shouldIgnore { 221 | return nil 222 | } 223 | 224 | // add the file using the resolved path for hashing, but store the original path for metainfo 225 | files = append(files, fileEntry{ 226 | path: resolvedPath, // use the actual content path for hashing 227 | length: resolvedInfo.Size(), 228 | offset: totalSize, 229 | }) 230 | originalPaths[resolvedPath] = currentPath 231 | totalSize += resolvedInfo.Size() 232 | return nil 233 | }) 234 | if err != nil { 235 | return nil, fmt.Errorf("error walking path: %w", err) 236 | } 237 | 238 | // sort files to ensure consistent order 239 | sort.Slice(files, func(i, j int) bool { 240 | return files[i].path < files[j].path 241 | }) 242 | 243 | // recalculate offsets based on the sorted file order 244 | // context: https://github.com/autobrr/mkbrr/issues/64 245 | var currentOffset int64 = 0 246 | for i := range files { 247 | files[i].offset = currentOffset 248 | currentOffset += files[i].length 249 | } 250 | 251 | if totalSize == 0 { 252 | return nil, fmt.Errorf("input path %q contains no files or only empty files, cannot create torrent", path) 253 | } 254 | 255 | // Function to create torrent with given piece length 256 | createWithPieceLength := func(pieceLength uint) (*Torrent, error) { 257 | pieceLenInt := int64(1) << pieceLength 258 | numPieces := (totalSize + pieceLenInt - 1) / pieceLenInt 259 | 260 | display := NewDisplay(NewFormatter(opts.Verbose)) 261 | display.SetQuiet(opts.Quiet) 262 | 263 | var pieceHashes [][]byte 264 | hasher := NewPieceHasher(files, pieceLenInt, int(numPieces), display) 265 | // Pass the specified or default worker count from opts 266 | if err := hasher.hashPieces(opts.Workers); err != nil { 267 | return nil, fmt.Errorf("error hashing pieces: %w", err) 268 | } 269 | pieceHashes = hasher.pieces 270 | 271 | info := &metainfo.Info{ 272 | Name: name, 273 | PieceLength: pieceLenInt, 274 | Private: &opts.IsPrivate, 275 | } 276 | 277 | if opts.Source != "" { 278 | info.Source = opts.Source 279 | } 280 | 281 | info.Pieces = make([]byte, len(pieceHashes)*20) 282 | for i, piece := range pieceHashes { 283 | copy(info.Pieces[i*20:], piece) 284 | } 285 | 286 | if len(files) == 1 { 287 | // check if the input path is a directory 288 | pathInfo, err := os.Stat(path) 289 | if err != nil { 290 | return nil, fmt.Errorf("error checking path: %w", err) 291 | } 292 | 293 | if pathInfo.IsDir() { 294 | // if it's a directory, use the folder structure even for single files 295 | info.Files = make([]metainfo.FileInfo, 1) 296 | // Use the original path for calculating relative path in metainfo 297 | originalFilepath := originalPaths[files[0].path] 298 | if originalFilepath == "" { 299 | originalFilepath = files[0].path // Fallback if mapping missing 300 | } 301 | relPath, _ := filepath.Rel(baseDir, originalFilepath) 302 | pathComponents := strings.Split(filepath.ToSlash(relPath), "/") // Ensure forward slashes 303 | info.Files[0] = metainfo.FileInfo{ 304 | Path: pathComponents, 305 | Length: files[0].length, // Length comes from resolved file 306 | } 307 | } else { 308 | // if it's a single file directly, use the simple format 309 | info.Length = files[0].length 310 | } 311 | } else { 312 | info.Files = make([]metainfo.FileInfo, len(files)) 313 | for i, f := range files { 314 | // Use the original path for calculating relative path in metainfo 315 | originalFilepath := originalPaths[f.path] 316 | if originalFilepath == "" { 317 | originalFilepath = f.path // Fallback if mapping missing 318 | } 319 | relPath, _ := filepath.Rel(baseDir, originalFilepath) 320 | pathComponents := strings.Split(filepath.ToSlash(relPath), "/") // Ensure forward slashes 321 | info.Files[i] = metainfo.FileInfo{ 322 | Path: pathComponents, 323 | Length: f.length, // Length comes from resolved file 324 | } 325 | } 326 | } 327 | 328 | infoBytes, err := bencode.Marshal(info) 329 | if err != nil { 330 | return nil, fmt.Errorf("error encoding info: %w", err) 331 | } 332 | 333 | // add random entropy field for cross-seeding if enabled 334 | if opts.Entropy { 335 | infoMap := make(map[string]interface{}) 336 | if err := bencode.Unmarshal(infoBytes, &infoMap); err == nil { 337 | if entropy, err := generateRandomString(); err == nil { 338 | infoMap["entropy"] = entropy 339 | if infoBytes, err = bencode.Marshal(infoMap); err == nil { 340 | mi.InfoBytes = infoBytes 341 | } 342 | } 343 | } 344 | } else { 345 | mi.InfoBytes = infoBytes 346 | } 347 | 348 | if len(opts.WebSeeds) > 0 { 349 | mi.UrlList = opts.WebSeeds 350 | } 351 | 352 | return &Torrent{mi}, nil 353 | } 354 | 355 | var pieceLength uint 356 | if opts.PieceLengthExp == nil { 357 | if opts.MaxPieceLength != nil { 358 | // Get tracker's max piece length if available 359 | maxExp := uint(27) // absolute max 128 MiB 360 | if trackerMaxExp, ok := trackers.GetTrackerMaxPieceLength(opts.TrackerURL); ok { 361 | maxExp = trackerMaxExp 362 | } 363 | 364 | if *opts.MaxPieceLength < 14 || *opts.MaxPieceLength > maxExp { 365 | return nil, fmt.Errorf("max piece length exponent must be between 14 (16 KiB) and %d (%d MiB), got: %d", 366 | maxExp, 1<<(maxExp-20), *opts.MaxPieceLength) 367 | } 368 | } 369 | pieceLength = calculatePieceLength(totalSize, opts.MaxPieceLength, opts.TrackerURL, opts.Verbose) 370 | } else { 371 | pieceLength = *opts.PieceLengthExp 372 | 373 | // Get tracker's max piece length if available 374 | maxExp := uint(27) // absolute max 128 MiB 375 | if trackerMaxExp, ok := trackers.GetTrackerMaxPieceLength(opts.TrackerURL); ok { 376 | maxExp = trackerMaxExp 377 | } 378 | 379 | if pieceLength < 16 || pieceLength > maxExp { 380 | if opts.TrackerURL != "" { 381 | return nil, fmt.Errorf("piece length exponent must be between 16 (64 KiB) and %d (%d MiB) for %s, got: %d", 382 | maxExp, 1<<(maxExp-20), opts.TrackerURL, pieceLength) 383 | } 384 | return nil, fmt.Errorf("piece length exponent must be between 16 (64 KiB) and %d (%d MiB), got: %d", 385 | maxExp, 1<<(maxExp-20), pieceLength) 386 | } 387 | 388 | // If we have a tracker with specific ranges, show that we're using them and check if piece length matches 389 | if exp, ok := trackers.GetTrackerPieceSizeExp(opts.TrackerURL, uint64(totalSize)); ok { 390 | if opts.Verbose { 391 | display := NewDisplay(NewFormatter(opts.Verbose)) 392 | display.SetQuiet(opts.Quiet) 393 | display.ShowMessage(fmt.Sprintf("using tracker-specific range for content size: %d MiB (recommended: %s pieces)", 394 | totalSize>>20, formatPieceSize(exp))) 395 | fmt.Fprintln(display.output) 396 | if pieceLength != exp { 397 | display.ShowWarning(fmt.Sprintf("custom piece length %s differs from recommendation", 398 | formatPieceSize(pieceLength))) 399 | } 400 | } 401 | } 402 | } 403 | 404 | // Check for tracker size limits and adjust piece length if needed 405 | if maxSize, ok := trackers.GetTrackerMaxTorrentSize(opts.TrackerURL); ok { 406 | // Try creating the torrent with initial piece length 407 | t, err := createWithPieceLength(pieceLength) 408 | if err != nil { 409 | return nil, err 410 | } 411 | 412 | // Check if it exceeds size limit 413 | torrentData, err := bencode.Marshal(t.MetaInfo) 414 | if err != nil { 415 | return nil, fmt.Errorf("error marshaling torrent data: %w", err) 416 | } 417 | 418 | // If it exceeds limit, try increasing piece length until it fits or we hit max 419 | for uint64(len(torrentData)) > maxSize && pieceLength < 24 { 420 | if opts.Verbose { 421 | display := NewDisplay(NewFormatter(opts.Verbose)) 422 | display.SetQuiet(opts.Quiet) 423 | display.ShowWarning(fmt.Sprintf("increasing piece length to reduce torrent size (current: %.1f KiB, limit: %.1f KiB)", 424 | float64(len(torrentData))/(1<<10), float64(maxSize)/(1<<10))) 425 | } 426 | 427 | pieceLength++ 428 | t, err = createWithPieceLength(pieceLength) 429 | if err != nil { 430 | return nil, err 431 | } 432 | 433 | torrentData, err = bencode.Marshal(t.MetaInfo) 434 | if err != nil { 435 | return nil, fmt.Errorf("error marshaling torrent data: %w", err) 436 | } 437 | } 438 | 439 | if uint64(len(torrentData)) > maxSize { 440 | return nil, fmt.Errorf("unable to create torrent under size limit (%.1f KiB) even with maximum piece length", 441 | float64(maxSize)/(1<<10)) 442 | } 443 | 444 | return t, nil 445 | } 446 | 447 | // No size limit, just create with original piece length 448 | return createWithPieceLength(pieceLength) 449 | } 450 | 451 | // Create creates a new torrent file with the given options 452 | func Create(opts CreateTorrentOptions) (*TorrentInfo, error) { 453 | // validate input path 454 | if _, err := os.Stat(opts.Path); err != nil { 455 | return nil, fmt.Errorf("invalid path %q: %w", opts.Path, err) 456 | } 457 | 458 | // set name if not provided 459 | if opts.Name == "" { 460 | opts.Name = filepath.Base(filepath.Clean(opts.Path)) 461 | } 462 | 463 | fileName := opts.Name 464 | if opts.TrackerURL != "" && !opts.SkipPrefix { 465 | fileName = preset.GetDomainPrefix(opts.TrackerURL) + "_" + opts.Name 466 | } 467 | 468 | if opts.OutputDir != "" { 469 | opts.OutputPath = filepath.Join(opts.OutputDir, fileName+".torrent") 470 | } else if opts.OutputPath == "" { 471 | opts.OutputPath = fileName + ".torrent" 472 | } else if !strings.HasSuffix(opts.OutputPath, ".torrent") { 473 | opts.OutputPath = opts.OutputPath + ".torrent" 474 | } 475 | 476 | if opts.OutputDir != "" { 477 | if err := os.MkdirAll(opts.OutputDir, 0755); err != nil { 478 | return nil, fmt.Errorf("error creating output directory %q: %w", opts.OutputDir, err) 479 | } 480 | } 481 | 482 | // create torrent 483 | t, err := CreateTorrent(opts) 484 | if err != nil { 485 | return nil, err 486 | } 487 | 488 | // create output file 489 | f, err := os.Create(opts.OutputPath) 490 | if err != nil { 491 | return nil, fmt.Errorf("error creating output file: %w", err) 492 | } 493 | defer f.Close() 494 | 495 | // write torrent file 496 | if err := t.Write(f); err != nil { 497 | return nil, fmt.Errorf("error writing torrent file: %w", err) 498 | } 499 | 500 | // get info for display 501 | info := t.GetInfo() 502 | 503 | // create torrent info for return 504 | torrentInfo := &TorrentInfo{ 505 | Path: opts.OutputPath, 506 | Size: info.Length, 507 | InfoHash: t.MetaInfo.HashInfoBytes().String(), 508 | Files: len(info.Files), 509 | Announce: opts.TrackerURL, 510 | } 511 | 512 | // display info if verbose 513 | if opts.Verbose { 514 | display := NewDisplay(NewFormatter(opts.Verbose)) 515 | display.ShowTorrentInfo(t, info) 516 | //if len(info.Files) > 0 { 517 | //display.ShowFileTree(info) 518 | //} 519 | } 520 | 521 | return torrentInfo, nil 522 | } 523 | -------------------------------------------------------------------------------- /internal/torrent/create_test.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "os" 7 | "path/filepath" 8 | "reflect" 9 | "runtime" 10 | "testing" 11 | 12 | "github.com/anacrolix/torrent/metainfo" 13 | 14 | "github.com/autobrr/mkbrr/internal/preset" 15 | ) 16 | 17 | func Test_calculatePieceLength(t *testing.T) { 18 | tests := []struct { 19 | name string 20 | totalSize int64 21 | maxPieceLength *uint 22 | trackerURL string 23 | want uint 24 | wantPieces *uint // expected number of pieces (approximate) 25 | }{ 26 | { 27 | name: "small file should use minimum piece length", 28 | totalSize: 1 << 10, // 1 KiB 29 | want: 15, // 32 KiB pieces 30 | }, 31 | { 32 | name: "63MB file should use 32KiB pieces", 33 | totalSize: 63 << 20, 34 | want: 15, // 32 KiB pieces 35 | }, 36 | { 37 | name: "65MB file should use 64KiB pieces", 38 | totalSize: 65 << 20, 39 | want: 16, // 64 KiB pieces 40 | }, 41 | { 42 | name: "129MB file should use 128KiB pieces", 43 | totalSize: 129 << 20, 44 | want: 17, // 128 KiB pieces 45 | }, 46 | { 47 | name: "257MB file should use 256KiB pieces", 48 | totalSize: 257 << 20, 49 | want: 18, // 256 KiB pieces 50 | }, 51 | { 52 | name: "513MB file should use 512KiB pieces", 53 | totalSize: 513 << 20, 54 | want: 19, // 512 KiB pieces 55 | }, 56 | { 57 | name: "1.1GB file should use 1MiB pieces", 58 | totalSize: 1100 << 20, 59 | want: 20, // 1 MiB pieces 60 | }, 61 | { 62 | name: "2.1GB file should use 2MiB pieces", 63 | totalSize: 2100 << 20, 64 | want: 21, // 2 MiB pieces 65 | }, 66 | { 67 | name: "4.1GB file should use 4MiB pieces", 68 | totalSize: 4100 << 20, 69 | want: 22, // 4 MiB pieces 70 | }, 71 | { 72 | name: "8.1GB file should use 8MiB pieces", 73 | totalSize: 8200 << 20, 74 | want: 23, // 8 MiB pieces 75 | }, 76 | { 77 | name: "16.1GB file should use 16MiB pieces", 78 | totalSize: 16500 << 20, 79 | want: 24, // 16 MiB pieces 80 | }, 81 | { 82 | name: "256.1GB file should use 16MiB pieces by default", 83 | totalSize: 256100 << 20, // 256.1 GB 84 | want: 24, // 16 MiB pieces 85 | }, 86 | { 87 | name: "emp should respect max piece length of 2^23", 88 | totalSize: 100 << 30, // 100 GiB 89 | trackerURL: "https://empornium.sx/announce?passkey=123", 90 | want: 23, // limited to 8 MiB pieces 91 | }, 92 | { 93 | name: "unknown tracker should use default calculation", 94 | totalSize: 10 << 30, // 10 GiB 95 | trackerURL: "https://unknown.tracker.com/announce", 96 | want: 23, // 8 MiB pieces 97 | }, 98 | } 99 | 100 | for _, tt := range tests { 101 | t.Run(tt.name, func(t *testing.T) { 102 | got := calculatePieceLength(tt.totalSize, tt.maxPieceLength, tt.trackerURL, false) 103 | if got != tt.want { 104 | t.Errorf("calculatePieceLength() = %v, want %v", got, tt.want) 105 | } 106 | 107 | // verify the piece count is within reasonable bounds when targeting pieces 108 | if tt.wantPieces != nil { 109 | pieceLen := int64(1) << got 110 | pieces := (tt.totalSize + pieceLen - 1) / pieceLen 111 | 112 | // verify we're within 10% of expected piece count 113 | ratio := float64(pieces) / float64(*tt.wantPieces) 114 | if ratio < 0.9 || ratio > 1.1 { 115 | t.Errorf("pieces count too far from expected: got %v pieces, expected %v (ratio %.2f)", 116 | pieces, *tt.wantPieces, ratio) 117 | } 118 | } 119 | }) 120 | } 121 | } 122 | 123 | func TestCreateTorrent_Symlink(t *testing.T) { 124 | // Skip symlink tests on Windows as it requires special privileges 125 | if runtime.GOOS == "windows" { 126 | t.Skip("Skipping symlink test on Windows") 127 | } 128 | 129 | // 1. Setup temporary directory structure 130 | tmpDir, err := os.MkdirTemp("", "mkbrr-symlink-test") 131 | if err != nil { 132 | t.Fatalf("Failed to create temp dir: %v", err) 133 | } 134 | defer os.RemoveAll(tmpDir) 135 | 136 | // Create real content directory and file 137 | realContentDir := filepath.Join(tmpDir, "real_content") 138 | if err := os.Mkdir(realContentDir, 0755); err != nil { 139 | t.Fatalf("Failed to create real_content dir: %v", err) 140 | } 141 | realFilePath := filepath.Join(realContentDir, "file.txt") 142 | fileContent := []byte("This is the actual content of the file.") 143 | if err := os.WriteFile(realFilePath, fileContent, 0644); err != nil { 144 | t.Fatalf("Failed to write real file: %v", err) 145 | } 146 | realFileInfo, _ := os.Stat(realFilePath) // Get real file size 147 | 148 | // Create directory to contain the symlink 149 | linkDir := filepath.Join(tmpDir, "link_dir") 150 | if err := os.Mkdir(linkDir, 0755); err != nil { 151 | t.Fatalf("Failed to create link_dir: %v", err) 152 | } 153 | 154 | // Create the symlink pointing to the real file (relative path) 155 | linkPath := filepath.Join(linkDir, "link_to_file.txt") 156 | linkTarget := "../real_content/file.txt" 157 | if err := os.Symlink(linkTarget, linkPath); err != nil { 158 | t.Fatalf("Failed to create symlink: %v", err) 159 | } 160 | 161 | // 2. Create Torrent Options 162 | pieceLenExp := uint(16) // 64 KiB pieces 163 | opts := CreateTorrentOptions{ 164 | Path: linkDir, // Create torrent from the directory containing the link 165 | OutputPath: filepath.Join(tmpDir, "symlink_test.torrent"), 166 | IsPrivate: true, 167 | NoCreator: true, 168 | NoDate: true, 169 | PieceLengthExp: &pieceLenExp, 170 | } 171 | 172 | // 3. Create the torrent 173 | createdTorrentInfo, err := Create(opts) 174 | if err != nil { 175 | t.Fatalf("Create() failed: %v", err) 176 | } 177 | 178 | // 4. Verification 179 | // Load the created torrent file 180 | mi, err := metainfo.LoadFromFile(createdTorrentInfo.Path) 181 | if err != nil { 182 | t.Fatalf("Failed to load created torrent file %q: %v", createdTorrentInfo.Path, err) 183 | } 184 | 185 | info, err := mi.UnmarshalInfo() 186 | if err != nil { 187 | t.Fatalf("Failed to unmarshal info from created torrent: %v", err) 188 | } 189 | 190 | // Verify torrent structure (should contain the link name, not the target name) 191 | if len(info.Files) != 1 { 192 | t.Fatalf("Expected 1 file in torrent info, got %d", len(info.Files)) 193 | } 194 | expectedPathInTorrent := []string{"link_to_file.txt"} 195 | if !reflect.DeepEqual(info.Files[0].Path, expectedPathInTorrent) { 196 | t.Errorf("Expected file path in torrent %v, got %v", expectedPathInTorrent, info.Files[0].Path) 197 | } 198 | 199 | // Verify file length matches the *target* file's length 200 | if info.Files[0].Length != realFileInfo.Size() { 201 | t.Errorf("Expected file length %d (target size), got %d", realFileInfo.Size(), info.Files[0].Length) 202 | } 203 | 204 | // Verify piece hash matches the *target* file's content 205 | pieceLen := int64(1 << pieceLenExp) 206 | numPieces := (realFileInfo.Size() + pieceLen - 1) / pieceLen 207 | if int(numPieces) != len(info.Pieces)/20 { 208 | t.Fatalf("Piece count mismatch: expected %d, got %d pieces in torrent", numPieces, len(info.Pieces)/20) 209 | } 210 | 211 | // Since the content is smaller than the piece size, the hash is just the hash of the content 212 | // padded with zeros to the piece length if necessary (though CreateTorrent handles this internally). 213 | // For simplicity here, we hash just the content as it fits in one piece. 214 | hasher := sha1.New() 215 | hasher.Write(fileContent) 216 | expectedHash := hasher.Sum(nil) 217 | 218 | // The actual piece hash in the torrent might be padded if piece length > content length. 219 | // We need to compare against the actual hash stored. 220 | actualPieceHash := info.Pieces[:20] // Get the first (and only) piece hash 221 | 222 | if !bytes.Equal(actualPieceHash, expectedHash) { 223 | t.Errorf("Piece hash mismatch:\nExpected: %x\nGot: %x", expectedHash, actualPieceHash) 224 | } 225 | 226 | t.Logf("Symlink test successful: Torrent created from %q, correctly referencing content from %q", linkDir, realFilePath) 227 | } 228 | 229 | func TestCreateTorrent_OutputDirPriority(t *testing.T) { 230 | // Setup temporary directories for test 231 | tmpDir, err := os.MkdirTemp("", "mkbrr-create-test") 232 | if err != nil { 233 | t.Fatalf("Failed to create temp dir: %v", err) 234 | } 235 | defer os.RemoveAll(tmpDir) 236 | 237 | // Create a non-empty file in the temp directory for the torrent content 238 | dummyFilePath := filepath.Join(tmpDir, "dummy.txt") 239 | if err := os.WriteFile(dummyFilePath, []byte("test content"), 0644); err != nil { 240 | t.Fatalf("Failed to create dummy file: %v", err) 241 | } 242 | 243 | // Create preset file 244 | presetDir := filepath.Join(tmpDir, "presets") 245 | if err := os.Mkdir(presetDir, 0755); err != nil { 246 | t.Fatalf("Failed to create presets dir: %v", err) 247 | } 248 | presetPath := filepath.Join(presetDir, "presets.yaml") 249 | presetConfig := `version: 1 250 | presets: 251 | test: 252 | output_dir: "` + filepath.ToSlash(filepath.Join(tmpDir, "preset_output")) + `" 253 | private: true 254 | source: "TEST" 255 | ` 256 | if err := os.WriteFile(presetPath, []byte(presetConfig), 0644); err != nil { 257 | t.Fatalf("Failed to write preset config: %v", err) 258 | } 259 | 260 | // Create the output directories 261 | cmdLineOutputDir := filepath.Join(tmpDir, "cmdline_output") 262 | presetOutputDir := filepath.Join(tmpDir, "preset_output") 263 | if err := os.Mkdir(cmdLineOutputDir, 0755); err != nil { 264 | t.Fatalf("Failed to create cmdline output dir: %v", err) 265 | } 266 | if err := os.Mkdir(presetOutputDir, 0755); err != nil { 267 | t.Fatalf("Failed to create preset output dir: %v", err) 268 | } 269 | 270 | // Test cases 271 | tests := []struct { 272 | name string 273 | opts CreateTorrentOptions 274 | expectedOutDir string 275 | }{ 276 | { 277 | name: "Command-line OutputDir should take precedence", 278 | opts: CreateTorrentOptions{ 279 | Path: tmpDir, 280 | OutputDir: cmdLineOutputDir, 281 | IsPrivate: true, 282 | NoDate: true, 283 | NoCreator: true, 284 | }, 285 | expectedOutDir: cmdLineOutputDir, 286 | }, 287 | { 288 | name: "Preset OutputDir should be used when no command-line OutputDir", 289 | opts: CreateTorrentOptions{ 290 | Path: tmpDir, 291 | OutputDir: "", // empty to simulate preset usage 292 | IsPrivate: true, 293 | NoDate: true, 294 | NoCreator: true, 295 | }, 296 | expectedOutDir: presetOutputDir, 297 | }, 298 | } 299 | 300 | for _, tt := range tests { 301 | t.Run(tt.name, func(t *testing.T) { 302 | // For the preset test case, we need to simulate the preset loading 303 | if tt.name == "Preset OutputDir should be used when no command-line OutputDir" { 304 | // Load preset options and apply them 305 | presetOpts, err := preset.LoadPresetOptions(presetPath, "test") 306 | if err != nil { 307 | t.Fatalf("Failed to load preset options: %v", err) 308 | } 309 | 310 | // Apply preset OutputDir if command-line OutputDir is empty 311 | if tt.opts.OutputDir == "" && presetOpts.OutputDir != "" { 312 | tt.opts.OutputDir = presetOpts.OutputDir 313 | } 314 | } 315 | 316 | result, err := Create(tt.opts) 317 | if err != nil { 318 | t.Fatalf("Create failed: %v", err) 319 | } 320 | 321 | // Verify the output path contains the expected directory 322 | dir := filepath.Dir(result.Path) 323 | if dir != tt.expectedOutDir { 324 | t.Errorf("Expected output directory %q, got %q", tt.expectedOutDir, dir) 325 | } 326 | 327 | // Verify the file was actually created in the expected directory 328 | if _, err := os.Stat(result.Path); os.IsNotExist(err) { 329 | t.Errorf("Output file wasn't created at expected path: %s", result.Path) 330 | } 331 | }) 332 | } 333 | } 334 | -------------------------------------------------------------------------------- /internal/torrent/display.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "log" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | "time" 11 | 12 | "github.com/anacrolix/torrent/metainfo" 13 | humanize "github.com/dustin/go-humanize" 14 | "github.com/fatih/color" 15 | progressbar "github.com/schollz/progressbar/v3" 16 | ) 17 | 18 | type Display struct { 19 | output io.Writer 20 | formatter *Formatter 21 | bar *progressbar.ProgressBar 22 | isBatch bool 23 | quiet bool 24 | } 25 | 26 | func NewDisplay(formatter *Formatter) *Display { 27 | return &Display{ 28 | formatter: formatter, 29 | quiet: false, 30 | output: os.Stdout, 31 | } 32 | } 33 | 34 | // SetQuiet enables/disables quiet mode (output redirected to io.Discard) 35 | func (d *Display) SetQuiet(quiet bool) { 36 | d.quiet = quiet 37 | if quiet { 38 | d.output = io.Discard 39 | } else { 40 | d.output = os.Stdout 41 | } 42 | } 43 | 44 | func (d *Display) ShowProgress(total int) { 45 | // Progress bar needs explicit quiet check because it writes directly to the terminal, 46 | // bypassing our d.output writer 47 | if d.quiet { 48 | return 49 | } 50 | fmt.Fprintln(d.output) 51 | d.bar = progressbar.NewOptions(total, 52 | progressbar.OptionEnableColorCodes(true), 53 | progressbar.OptionSetDescription("[cyan][bold]Hashing pieces...[reset]"), 54 | progressbar.OptionSetTheme(progressbar.Theme{ 55 | Saucer: "[green]=[reset]", 56 | SaucerHead: "[green]>[reset]", 57 | SaucerPadding: " ", 58 | BarStart: "[", 59 | BarEnd: "]", 60 | }), 61 | ) 62 | } 63 | 64 | func (d *Display) UpdateProgress(completed int, hashrate float64) { 65 | // Progress bar needs explicit quiet check because it writes directly to the terminal, 66 | // bypassing our d.output writer 67 | if d.isBatch || d.quiet { 68 | return 69 | } 70 | if d.bar != nil { 71 | if err := d.bar.Set(completed); err != nil { 72 | log.Printf("failed to update progress bar: %v", err) 73 | } 74 | 75 | if hashrate > 0 { 76 | hrStr := d.formatter.FormatBytes(int64(hashrate)) 77 | description := fmt.Sprintf("[cyan][bold]Hashing pieces...[reset] [%s/s]", hrStr) 78 | d.bar.Describe(description) 79 | } 80 | } 81 | } 82 | 83 | // ShowFiles displays the list of files being processed and the number of workers used. 84 | func (d *Display) ShowFiles(files []fileEntry, numWorkers int) { 85 | if d.quiet { 86 | return 87 | } 88 | 89 | workerMsg := fmt.Sprintf("Using %d worker(s)", numWorkers) 90 | if numWorkers == 0 { 91 | workerMsg = "Using automatic worker count" 92 | } 93 | fmt.Fprintf(d.output, "\n%s %s\n", label("Concurrency:"), workerMsg) 94 | 95 | if !d.formatter.verbose && len(files) > 20 { 96 | fmt.Fprintf(d.output, "%s suppressed file output (limit 20, found %d), use --verbose to show all\n", yellow("Note:"), len(files)) 97 | fmt.Fprintf(d.output, "%s\n", magenta("Files being processed:")) 98 | return 99 | } 100 | fmt.Fprintf(d.output, "\n%s\n", magenta("Files being hashed:")) 101 | if len(files) > 0 { 102 | topDir := filepath.Dir(files[0].path) 103 | fmt.Fprintf(d.output, "%s %s\n", "└─", success(filepath.Base(topDir))) 104 | } 105 | for i, file := range files { 106 | prefix := " ├─" 107 | if i == len(files)-1 { 108 | prefix = " └─" 109 | } 110 | fmt.Fprintf(d.output, "%s %s (%s)\n", 111 | prefix, 112 | success(filepath.Base(file.path)), 113 | label(d.formatter.FormatBytes(file.length))) 114 | } 115 | } 116 | 117 | func (d *Display) FinishProgress() { 118 | // Progress bar needs explicit quiet check because it writes directly to the terminal, 119 | // bypassing our d.output writer 120 | if d.quiet { 121 | return 122 | } 123 | if d.bar != nil { 124 | if err := d.bar.Finish(); err != nil { 125 | log.Printf("failed to finish progress bar: %v", err) 126 | } 127 | fmt.Fprintln(d.output) 128 | } 129 | } 130 | 131 | func (d *Display) IsBatch() bool { 132 | return d.isBatch 133 | } 134 | 135 | func (d *Display) SetBatch(isBatch bool) { 136 | d.isBatch = isBatch 137 | } 138 | 139 | var ( 140 | magenta = color.New(color.FgMagenta).SprintFunc() 141 | //green = color.New(color.FgGreen).SprintFunc() 142 | yellow = color.New(color.FgYellow).SprintFunc() 143 | success = color.New(color.FgGreen).SprintFunc() 144 | label = color.New(color.FgCyan).SprintFunc() 145 | highlight = color.New(color.FgHiWhite).SprintFunc() 146 | errorColor = color.New(color.FgRed).SprintFunc() 147 | white = fmt.Sprint 148 | ) 149 | 150 | func (d *Display) ShowMessage(msg string) { 151 | fmt.Fprintf(d.output, "%s %s\n", success("\nInfo:"), msg) 152 | } 153 | 154 | func (d *Display) ShowError(msg string) { 155 | fmt.Fprintln(d.output, errorColor(msg)) 156 | } 157 | 158 | func (d *Display) ShowWarning(msg string) { 159 | fmt.Fprintf(d.output, "%s %s\n", yellow("Warning:"), msg) 160 | } 161 | 162 | func (d *Display) ShowTorrentInfo(t *Torrent, info *metainfo.Info) { 163 | fmt.Fprintf(d.output, "\n%s\n", magenta("Torrent info:")) 164 | fmt.Fprintf(d.output, " %-13s %s\n", label("Name:"), info.Name) 165 | fmt.Fprintf(d.output, " %-13s %s\n", label("Hash:"), t.HashInfoBytes()) 166 | fmt.Fprintf(d.output, " %-13s %s\n", label("Size:"), d.formatter.FormatBytes(info.TotalLength())) 167 | fmt.Fprintf(d.output, " %-13s %s\n", label("Piece length:"), d.formatter.FormatBytes(info.PieceLength)) 168 | fmt.Fprintf(d.output, " %-13s %d\n", label("Pieces:"), len(info.Pieces)/20) 169 | 170 | if t.AnnounceList != nil { 171 | fmt.Fprintf(d.output, " %-13s\n", label("Trackers:")) 172 | for _, tier := range t.AnnounceList { 173 | for _, tracker := range tier { 174 | fmt.Fprintf(d.output, " %s\n", success(tracker)) 175 | } 176 | } 177 | } else if t.Announce != "" { 178 | fmt.Fprintf(d.output, " %-13s %s\n", label("Tracker:"), success(t.Announce)) 179 | } 180 | 181 | if len(t.UrlList) > 0 { 182 | fmt.Fprintf(d.output, " %-13s\n", label("Web seeds:")) 183 | for _, seed := range t.UrlList { 184 | fmt.Fprintf(d.output, " %s\n", highlight(seed)) 185 | } 186 | } 187 | 188 | if info.Private != nil && *info.Private { 189 | fmt.Fprintf(d.output, " %-13s %s\n", label("Private:"), "yes") 190 | } 191 | 192 | if info.Source != "" { 193 | fmt.Fprintf(d.output, " %-13s %s\n", label("Source:"), info.Source) 194 | } 195 | 196 | if t.Comment != "" { 197 | fmt.Fprintf(d.output, " %-13s %s\n", label("Comment:"), t.Comment) 198 | } 199 | 200 | if t.CreatedBy != "" { 201 | fmt.Fprintf(d.output, " %-13s %s\n", label("Created by:"), t.CreatedBy) 202 | } 203 | 204 | if t.CreationDate != 0 { 205 | creationTime := time.Unix(t.CreationDate, 0) 206 | fmt.Fprintf(d.output, " %-13s %s\n", label("Created on:"), creationTime.Format("2006-01-02 15:04:05 MST")) 207 | } 208 | 209 | if len(info.Files) > 0 { 210 | fmt.Fprintf(d.output, " %-13s %d\n", label("Files:"), len(info.Files)) 211 | } 212 | 213 | fmt.Fprintln(d.output) 214 | 215 | } 216 | 217 | // ShowFileTree displays the file structure of a multi-file torrent 218 | // The decision to show the tree is now handled in cmd/inspect.go 219 | func (d *Display) ShowFileTree(info *metainfo.Info) { 220 | fmt.Fprintf(d.output, "%s\n", magenta("File tree:")) 221 | fmt.Fprintf(d.output, "%s %s\n", "└─", success(info.Name)) 222 | for i, file := range info.Files { 223 | prefix := " ├─" 224 | if i == len(info.Files)-1 { 225 | prefix = " └─" 226 | } 227 | fmt.Fprintf(d.output, "%s %s (%s)\n", 228 | prefix, 229 | success(filepath.Join(file.Path...)), 230 | label(d.formatter.FormatBytes(file.Length))) 231 | } 232 | fmt.Fprintln(d.output) 233 | } 234 | 235 | func (d *Display) ShowOutputPathWithTime(path string, duration time.Duration) { 236 | if !d.formatter.verbose { 237 | fmt.Fprintln(d.output) 238 | } 239 | if duration < time.Second { 240 | fmt.Fprintf(d.output, "%s %s (%s)\n", 241 | success("Wrote"), 242 | white(path), 243 | magenta(fmt.Sprintf("elapsed %dms", duration.Milliseconds()))) 244 | } else { 245 | fmt.Fprintf(d.output, "%s %s (%s)\n", 246 | success("Wrote"), 247 | white(path), 248 | magenta(fmt.Sprintf("elapsed %.2fs", duration.Seconds()))) 249 | } 250 | } 251 | 252 | func (d *Display) ShowBatchResults(results []BatchResult, duration time.Duration) { 253 | fmt.Fprintf(d.output, "\n%s\n", magenta("Batch processing results:")) 254 | 255 | successful := 0 256 | failed := 0 257 | totalSize := int64(0) 258 | 259 | for _, result := range results { 260 | if result.Success { 261 | successful++ 262 | if result.Info != nil { 263 | totalSize += result.Info.Size 264 | } 265 | } else { 266 | failed++ 267 | } 268 | } 269 | 270 | fmt.Fprintf(d.output, " %-15s %d\n", label("Total jobs:"), len(results)) 271 | fmt.Fprintf(d.output, " %-15s %s\n", label("Successful:"), success(successful)) 272 | fmt.Fprintf(d.output, " %-15s %s\n", label("Failed:"), errorColor(failed)) 273 | fmt.Fprintf(d.output, " %-15s %s\n", label("Total size:"), d.formatter.FormatBytes(totalSize)) 274 | fmt.Fprintf(d.output, " %-15s %s\n", label("Processing time:"), d.formatter.FormatDuration(duration)) 275 | 276 | if d.formatter.verbose { 277 | fmt.Fprintf(d.output, "\n%s\n", magenta("Detailed results:")) 278 | for i, result := range results { 279 | fmt.Fprintf(d.output, "\n%s %d:\n", label("Job"), i+1) 280 | if result.Success { 281 | fmt.Fprintf(d.output, " %-11s %s\n", label("Status:"), success("Success")) 282 | fmt.Fprintf(d.output, " %-11s %s\n", label("Output:"), result.Info.Path) 283 | fmt.Fprintf(d.output, " %-11s %s\n", label("Size:"), d.formatter.FormatBytes(result.Info.Size)) 284 | fmt.Fprintf(d.output, " %-11s %s\n", label("Info hash:"), result.Info.InfoHash) 285 | fmt.Fprintf(d.output, " %-11s %s\n", label("Trackers:"), strings.Join(result.Trackers, ", ")) 286 | if result.Info.Files > 0 { 287 | fmt.Fprintf(d.output, " %-11s %d\n", label("Files:"), result.Info.Files) 288 | } 289 | } else { 290 | fmt.Fprintf(d.output, " %-11s %s\n", label("Status:"), errorColor("Failed")) 291 | fmt.Fprintf(d.output, " %-11s %v\n", label("Error:"), result.Error) 292 | fmt.Fprintf(d.output, " %-11s %s\n", label("Input:"), result.Job.Path) 293 | } 294 | } 295 | } 296 | } 297 | 298 | type Formatter struct { 299 | verbose bool 300 | } 301 | 302 | func NewFormatter(verbose bool) *Formatter { 303 | return &Formatter{verbose: verbose} 304 | } 305 | 306 | func (f *Formatter) FormatBytes(bytes int64) string { 307 | return humanize.IBytes(uint64(bytes)) 308 | } 309 | 310 | func (f *Formatter) FormatDuration(dur time.Duration) string { 311 | if dur < time.Second { 312 | return fmt.Sprintf("%dms", dur.Milliseconds()) 313 | } 314 | return humanize.RelTime(time.Now().Add(-dur), time.Now(), "", "") 315 | } 316 | 317 | func (d *Display) ShowSeasonPackWarnings(info *SeasonPackInfo) { 318 | if !info.IsSeasonPack { 319 | return 320 | } 321 | 322 | if len(info.MissingEpisodes) > 0 { 323 | fmt.Fprintf(d.output, "\n%s %s\n", yellow("Warning:"), "Possible incomplete season pack detected") 324 | fmt.Fprintf(d.output, " %-13s %d\n", label("Season number:"), info.Season) 325 | fmt.Fprintf(d.output, " %-13s %d\n", label("Highest episode number found:"), info.MaxEpisode) 326 | fmt.Fprintf(d.output, " %-13s %d\n", label("Episodes found:"), len(info.Episodes)) 327 | 328 | missingStrs := make([]string, len(info.MissingEpisodes)) 329 | for i, ep := range info.MissingEpisodes { 330 | missingStrs[i] = fmt.Sprintf("episode %d", ep) 331 | } 332 | fmt.Fprintf(d.output, " %-13s %s\n", label("Missing:"), strings.Join(missingStrs, ", ")) 333 | 334 | fmt.Fprintln(d.output, yellow("\nThis may be an incomplete season pack. Check files before uploading.")) 335 | } 336 | } 337 | 338 | // ShowVerificationResult displays the results of a torrent verification check 339 | func (d *Display) ShowVerificationResult(result *VerificationResult, duration time.Duration) { 340 | fmt.Fprintf(d.output, "\n%s\n", magenta("Verification results:")) 341 | 342 | completionStr := fmt.Sprintf("%.2f%%", result.Completion) 343 | fmt.Fprintf(d.output, " %-15s %s (%d/%d pieces)\n", label("Completion:"), success(completionStr), result.GoodPieces, result.TotalPieces) 344 | 345 | if result.BadPieces > 0 { 346 | fmt.Fprintf(d.output, " %-15s %s\n", label("Bad pieces:"), errorColor(result.BadPieces)) 347 | if d.formatter.verbose && len(result.BadPieceIndices) > 0 { 348 | maxIndicesToShow := 20 349 | indicesStr := make([]string, 0, len(result.BadPieceIndices)) 350 | for i, idx := range result.BadPieceIndices { 351 | if i >= maxIndicesToShow { 352 | indicesStr = append(indicesStr, "...") 353 | break 354 | } 355 | indicesStr = append(indicesStr, fmt.Sprintf("%d", idx)) 356 | } 357 | fmt.Fprintf(d.output, " %s %s\n", label("Indices:"), strings.Join(indicesStr, ", ")) 358 | } 359 | } 360 | 361 | if len(result.MissingFiles) > 0 { 362 | fmt.Fprintf(d.output, " %-15s %s\n", label("Missing files:"), errorColor(len(result.MissingFiles))) 363 | if d.formatter.verbose { 364 | maxFilesToShow := 10 365 | for i, file := range result.MissingFiles { 366 | if i >= maxFilesToShow { 367 | fmt.Fprintf(d.output, " %s ...and %d more\n", errorColor("└─"), len(result.MissingFiles)-maxFilesToShow) 368 | break 369 | } 370 | prefix := " ├─" 371 | if i == len(result.MissingFiles)-1 || i == maxFilesToShow-1 { 372 | prefix = " └─" 373 | } 374 | fmt.Fprintf(d.output, " %s %s\n", errorColor(prefix), file) 375 | } 376 | } 377 | } 378 | 379 | fmt.Fprintf(d.output, " %-15s %s\n", label("Check time:"), d.formatter.FormatDuration(duration)) 380 | } 381 | -------------------------------------------------------------------------------- /internal/torrent/hasher.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "crypto/sha1" 5 | "fmt" 6 | "io" 7 | "os" 8 | "runtime" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | ) 13 | 14 | type pieceHasher struct { 15 | startTime time.Time 16 | lastUpdate time.Time 17 | display Displayer 18 | bufferPool *sync.Pool 19 | pieces [][]byte 20 | files []fileEntry 21 | pieceLen int64 22 | numPieces int 23 | readSize int 24 | 25 | bytesProcessed int64 26 | mutex sync.RWMutex 27 | } 28 | 29 | // optimizeForWorkload determines optimal read buffer size and number of worker goroutines 30 | // based on the characteristics of input files (size and count). It considers: 31 | // - single vs multiple files 32 | // - average file size 33 | // - system CPU count 34 | // returns readSize (buffer size for reading) and numWorkers (concurrent goroutines) 35 | func (h *pieceHasher) optimizeForWorkload() (int, int) { 36 | if len(h.files) == 0 { 37 | return 0, 0 38 | } 39 | 40 | // calculate total and maximum file sizes for optimization decisions 41 | var totalSize int64 42 | maxFileSize := int64(0) 43 | for _, f := range h.files { 44 | totalSize += f.length 45 | if f.length > maxFileSize { 46 | maxFileSize = f.length 47 | } 48 | } 49 | avgFileSize := totalSize / int64(len(h.files)) 50 | 51 | var readSize, numWorkers int 52 | 53 | // optimize buffer size and worker count based on file characteristics 54 | switch { 55 | case len(h.files) == 1: 56 | if totalSize < 1<<20 { 57 | readSize = 64 << 10 // 64 KiB for very small files 58 | numWorkers = 1 59 | } else if totalSize < 1<<30 { // < 1 GiB 60 | readSize = 4 << 20 // 4 MiB 61 | numWorkers = runtime.NumCPU() 62 | } else { 63 | readSize = 8 << 20 // 8 MiB for large files 64 | numWorkers = runtime.NumCPU() * 2 // over-subscription for better I/O utilization 65 | } 66 | case avgFileSize < 1<<20: // avg < 1 MiB 67 | readSize = 256 << 10 // 256 KiB 68 | numWorkers = runtime.NumCPU() 69 | case avgFileSize < 10<<20: // avg < 10 MiB 70 | readSize = 1 << 20 // 1 MiB 71 | numWorkers = runtime.NumCPU() 72 | case avgFileSize < 1<<30: // avg < 1 GiB 73 | readSize = 4 << 20 // 4 MiB 74 | numWorkers = runtime.NumCPU() * 2 75 | default: // avg >= 1 GiB 76 | readSize = 8 << 20 // 8 MiB 77 | numWorkers = runtime.NumCPU() * 2 78 | } 79 | 80 | // ensure we don't create more workers than pieces to process 81 | if numWorkers > h.numPieces { 82 | numWorkers = h.numPieces 83 | } 84 | return readSize, numWorkers 85 | } 86 | 87 | // hashPieces coordinates the parallel hashing of all pieces in the torrent. 88 | // It initializes a buffer pool, creates worker goroutines, and manages progress tracking. 89 | // The pieces are distributed evenly across the specified number of workers. 90 | // Returns an error if any worker encounters issues during hashing. 91 | func (h *pieceHasher) hashPieces(numWorkers int) error { 92 | // Determine readSize and numWorkers. Use optimizeForWorkload if numWorkers isn't specified. 93 | if numWorkers <= 0 { 94 | h.readSize, numWorkers = h.optimizeForWorkload() 95 | } else { 96 | // If workers are specified, still need to determine readSize 97 | h.readSize, _ = h.optimizeForWorkload() // Only need readSize here 98 | // Ensure specified workers don't exceed pieces or minimum of 1 99 | if numWorkers > h.numPieces { 100 | numWorkers = h.numPieces 101 | } 102 | // Ensure at least 1 worker if pieces exist, even if user specified 0 somehow 103 | if h.numPieces > 0 && numWorkers <= 0 { 104 | numWorkers = 1 105 | } 106 | } 107 | 108 | // Final safeguard: Ensure at least one worker if there are pieces 109 | if h.numPieces > 0 && numWorkers <= 0 { 110 | numWorkers = 1 111 | } 112 | 113 | if numWorkers == 0 { 114 | // no workers needed, possibly no pieces to hash 115 | h.display.ShowProgress(0) 116 | h.display.FinishProgress() 117 | return nil 118 | } 119 | 120 | // initialize buffer pool 121 | h.bufferPool = &sync.Pool{ 122 | New: func() interface{} { 123 | buf := make([]byte, h.readSize) 124 | return buf 125 | }, 126 | } 127 | 128 | h.mutex.Lock() 129 | h.startTime = time.Now() 130 | h.lastUpdate = h.startTime 131 | h.mutex.Unlock() 132 | h.bytesProcessed = 0 133 | 134 | h.display.ShowFiles(h.files, numWorkers) 135 | 136 | seasonInfo := AnalyzeSeasonPack(h.files) 137 | 138 | h.display.ShowSeasonPackWarnings(seasonInfo) 139 | 140 | var completedPieces uint64 141 | piecesPerWorker := (h.numPieces + numWorkers - 1) / numWorkers 142 | errorsCh := make(chan error, numWorkers) 143 | 144 | h.display.ShowProgress(h.numPieces) 145 | 146 | // spawn worker goroutines to process piece ranges in parallel 147 | var wg sync.WaitGroup 148 | for i := 0; i < numWorkers; i++ { 149 | start := i * piecesPerWorker 150 | end := start + piecesPerWorker 151 | if end > h.numPieces { 152 | end = h.numPieces 153 | } 154 | 155 | wg.Add(1) 156 | go func(startPiece, endPiece int) { 157 | defer wg.Done() 158 | if err := h.hashPieceRange(startPiece, endPiece, &completedPieces); err != nil { 159 | errorsCh <- err 160 | } 161 | }(start, end) 162 | } 163 | 164 | // monitor and update progress bar in separate goroutine 165 | go func() { 166 | for { 167 | completed := atomic.LoadUint64(&completedPieces) 168 | if completed >= uint64(h.numPieces) { 169 | break 170 | } 171 | 172 | bytesProcessed := atomic.LoadInt64(&h.bytesProcessed) 173 | h.mutex.RLock() 174 | elapsed := time.Since(h.startTime).Seconds() 175 | h.mutex.RUnlock() 176 | var hashrate float64 177 | if elapsed > 0 { 178 | hashrate = float64(bytesProcessed) / elapsed 179 | } 180 | 181 | h.display.UpdateProgress(int(completed), hashrate) 182 | time.Sleep(200 * time.Millisecond) 183 | } 184 | }() 185 | 186 | wg.Wait() 187 | close(errorsCh) 188 | 189 | for err := range errorsCh { 190 | if err != nil { 191 | return err 192 | } 193 | } 194 | 195 | h.display.FinishProgress() 196 | return nil 197 | } 198 | 199 | // hashPieceRange processes and hashes a specific range of pieces assigned to a worker. 200 | // It handles: 201 | // - reading from multiple files that may span piece boundaries 202 | // - maintaining file positions and readers 203 | // - calculating SHA1 hashes for each piece 204 | // - updating progress through the completedPieces counter 205 | // Parameters: 206 | // 207 | // startPiece: first piece index to process 208 | // endPiece: last piece index to process (exclusive) 209 | // completedPieces: atomic counter for progress tracking 210 | func (h *pieceHasher) hashPieceRange(startPiece, endPiece int, completedPieces *uint64) error { 211 | // reuse buffer from pool to minimize allocations 212 | buf := h.bufferPool.Get().([]byte) 213 | defer h.bufferPool.Put(buf) 214 | 215 | hasher := sha1.New() 216 | // track open file handles to avoid reopening the same file 217 | readers := make(map[string]*fileReader) 218 | defer func() { 219 | for _, r := range readers { 220 | r.file.Close() 221 | } 222 | }() 223 | 224 | for pieceIndex := startPiece; pieceIndex < endPiece; pieceIndex++ { 225 | pieceOffset := int64(pieceIndex) * h.pieceLen 226 | pieceLength := h.pieceLen 227 | 228 | // handle last piece which may be shorter than others 229 | if pieceIndex == h.numPieces-1 { 230 | var totalLength int64 231 | for _, f := range h.files { 232 | totalLength += f.length 233 | } 234 | remaining := totalLength - pieceOffset 235 | if remaining < pieceLength { 236 | pieceLength = remaining 237 | } 238 | } 239 | 240 | hasher.Reset() 241 | remainingPiece := pieceLength 242 | 243 | for _, file := range h.files { 244 | // skip files that don't contain data for this piece 245 | if pieceOffset >= file.offset+file.length { 246 | continue 247 | } 248 | if remainingPiece <= 0 { 249 | break 250 | } 251 | 252 | // calculate read boundaries within the current file 253 | readStart := pieceOffset - file.offset 254 | if readStart < 0 { 255 | readStart = 0 256 | } 257 | 258 | readLength := file.length - readStart 259 | if readLength > remainingPiece { 260 | readLength = remainingPiece 261 | } 262 | 263 | // reuse or create new file reader 264 | reader, ok := readers[file.path] 265 | if !ok { 266 | f, err := os.OpenFile(file.path, os.O_RDONLY, 0) 267 | if err != nil { 268 | return fmt.Errorf("failed to open file %s: %w", file.path, err) 269 | } 270 | reader = &fileReader{ 271 | file: f, 272 | position: 0, 273 | length: file.length, 274 | } 275 | readers[file.path] = reader 276 | } 277 | 278 | // ensure correct file position before reading 279 | if reader.position != readStart { 280 | if _, err := reader.file.Seek(readStart, 0); err != nil { 281 | return fmt.Errorf("failed to seek in file %s: %w", file.path, err) 282 | } 283 | reader.position = readStart 284 | } 285 | 286 | // read file data in chunks to avoid large memory allocations 287 | remaining := readLength 288 | for remaining > 0 { 289 | n := int(remaining) 290 | if n > len(buf) { 291 | n = len(buf) 292 | } 293 | 294 | read, err := io.ReadFull(reader.file, buf[:n]) 295 | if err != nil && err != io.ErrUnexpectedEOF { 296 | return fmt.Errorf("failed to read file %s: %w", file.path, err) 297 | } 298 | 299 | hasher.Write(buf[:read]) 300 | remaining -= int64(read) 301 | remainingPiece -= int64(read) 302 | reader.position += int64(read) 303 | pieceOffset += int64(read) 304 | 305 | atomic.AddInt64(&h.bytesProcessed, int64(read)) 306 | } 307 | } 308 | 309 | // store piece hash and update progress 310 | h.pieces[pieceIndex] = hasher.Sum(nil) 311 | atomic.AddUint64(completedPieces, 1) 312 | } 313 | 314 | return nil 315 | } 316 | 317 | func NewPieceHasher(files []fileEntry, pieceLen int64, numPieces int, display Displayer) *pieceHasher { 318 | bufferPool := &sync.Pool{ 319 | New: func() interface{} { 320 | buf := make([]byte, pieceLen) 321 | return buf 322 | }, 323 | } 324 | return &pieceHasher{ 325 | pieces: make([][]byte, numPieces), 326 | pieceLen: pieceLen, 327 | numPieces: numPieces, 328 | files: files, 329 | display: display, 330 | bufferPool: bufferPool, 331 | } 332 | } 333 | 334 | // minInt returns the smaller of two integers 335 | func minInt(a, b int) int { 336 | if a < b { 337 | return a 338 | } 339 | return b 340 | } 341 | -------------------------------------------------------------------------------- /internal/torrent/hasher_large_test.go: -------------------------------------------------------------------------------- 1 | //go:build large_tests 2 | // +build large_tests 3 | 4 | package torrent 5 | 6 | import ( 7 | "fmt" 8 | "runtime" 9 | "testing" 10 | ) 11 | 12 | // TestPieceHasher_LargeFiles tests the hasher with large file scenarios. 13 | // These tests are skipped by default and in CI due to their resource requirements. 14 | // Run with: go test -v -tags=large_tests 15 | func TestPieceHasher_LargeFiles(t *testing.T) { 16 | if runtime.GOOS == "windows" { 17 | t.Skip("skipping large file tests on Windows") 18 | } 19 | 20 | if testing.Short() { 21 | t.Skip("skipping large file tests") 22 | } 23 | 24 | tests := []struct { 25 | name string 26 | numFiles int 27 | fileSize int64 28 | pieceLen int64 29 | numPieces int 30 | }{ 31 | { 32 | name: "4k movie remux", 33 | numFiles: 1, 34 | fileSize: 64 << 30, // 64GB 35 | pieceLen: 1 << 24, // 16MB - PTP recommendation for large remuxes 36 | numPieces: 4096, 37 | }, 38 | { 39 | name: "1080p season pack", 40 | numFiles: 10, // 10 episodes 41 | fileSize: 4 << 30, // 4GB per episode (~40GB total) 42 | pieceLen: 1 << 23, // 8MB - better for multi-file packs 43 | numPieces: 5120, 44 | }, 45 | { 46 | name: "game distribution", 47 | numFiles: 100, // Many small files 48 | fileSize: 1 << 28, // 256MB per file (~25GB total) 49 | pieceLen: 1 << 21, // 2MB - better for smaller individual files 50 | numPieces: 12800, 51 | }, 52 | } 53 | 54 | for _, tt := range tests { 55 | t.Run(tt.name, func(t *testing.T) { 56 | files, expectedHashes := createTestFilesFast(t, tt.numFiles, tt.fileSize, tt.pieceLen) 57 | hasher := NewPieceHasher(files, tt.pieceLen, tt.numPieces, &mockDisplay{}) 58 | 59 | // test with different worker counts 60 | workerCounts := []int{1, 2, 4, 8} 61 | for _, workers := range workerCounts { 62 | t.Run(fmt.Sprintf("workers_%d", workers), func(t *testing.T) { 63 | if err := hasher.hashPieces(workers); err != nil { 64 | t.Fatalf("hashPieces failed with %d workers: %v", workers, err) 65 | } 66 | verifyHashes(t, hasher.pieces, expectedHashes) 67 | }) 68 | } 69 | }) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /internal/torrent/ignore.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "strings" 7 | ) 8 | 9 | // file patterns to ignore in source directory (case insensitive) - These are always ignored. 10 | var ignoredPatterns = []string{ 11 | ".torrent", 12 | ".ds_store", 13 | "thumbs.db", 14 | "desktop.ini", 15 | "zone.identifier", // https://superuser.com/questions/1692240/auto-generated-zone-identity-files-can-should-i-delete 16 | } 17 | 18 | // shouldIgnoreFile checks if a file should be ignored based on predefined patterns, 19 | // user-defined include patterns, and user-defined exclude patterns (glob matching). 20 | // Logic: 21 | // 1. Check built-in ignored patterns (always ignored). 22 | // 2. If include patterns are provided: 23 | // - Check if the file matches any include pattern. If yes, KEEP the file (return false). 24 | // - If it does not match any include pattern, IGNORE the file (return true). 25 | // 26 | // 3. If NO include patterns are provided: 27 | // - Check if the file matches any exclude pattern. If yes, IGNORE the file (return true). 28 | // 29 | // 4. If none of the above conditions cause the file to be ignored, KEEP the file (return false). 30 | func shouldIgnoreFile(path string, excludePatterns []string, includePatterns []string) (bool, error) { 31 | // 1. Check built-in patterns (always ignored) 32 | lowerPath := strings.ToLower(path) 33 | for _, pattern := range ignoredPatterns { 34 | if strings.HasSuffix(lowerPath, pattern) { 35 | return true, nil 36 | } 37 | } 38 | 39 | filename := filepath.Base(path) 40 | lowerFilename := strings.ToLower(filename) 41 | 42 | // 2. Check include patterns if provided 43 | if len(includePatterns) > 0 { 44 | matchesInclude := false 45 | for _, patternGroup := range includePatterns { 46 | for _, pattern := range strings.Split(patternGroup, ",") { 47 | pattern = strings.TrimSpace(pattern) 48 | if pattern == "" { 49 | continue 50 | } 51 | match, err := filepath.Match(strings.ToLower(pattern), lowerFilename) 52 | if err != nil { 53 | return false, fmt.Errorf("invalid include pattern %q: %w", pattern, err) 54 | } 55 | if match { 56 | matchesInclude = true 57 | break 58 | } 59 | } 60 | if matchesInclude { 61 | break 62 | } 63 | } 64 | 65 | if matchesInclude { 66 | return false, nil // Keep the file because it matches an include pattern 67 | } else { 68 | return true, nil // Ignore the file because include patterns were given, but none matched 69 | } 70 | } 71 | 72 | // 3. If NO include patterns were provided, check exclude patterns 73 | if len(excludePatterns) > 0 { 74 | for _, patternGroup := range excludePatterns { 75 | for _, pattern := range strings.Split(patternGroup, ",") { 76 | pattern = strings.TrimSpace(pattern) 77 | if pattern == "" { 78 | continue 79 | } 80 | match, err := filepath.Match(strings.ToLower(pattern), lowerFilename) 81 | if err != nil { 82 | return false, fmt.Errorf("invalid exclude pattern %q: %w", pattern, err) 83 | } 84 | if match { 85 | return true, nil // Ignore if it matches an exclude pattern (and no include patterns were specified) 86 | } 87 | } 88 | } 89 | } 90 | 91 | // 4. Keep the file if no ignore conditions were met 92 | return false, nil 93 | } 94 | -------------------------------------------------------------------------------- /internal/torrent/modify.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "time" 7 | 8 | "github.com/anacrolix/torrent/bencode" 9 | "github.com/anacrolix/torrent/metainfo" 10 | 11 | "github.com/autobrr/mkbrr/internal/preset" 12 | ) 13 | 14 | // Options represents the options for modifying a torrent, 15 | // including both preset-related options and flag-based overrides. 16 | type Options struct { 17 | IsPrivate *bool 18 | PieceLengthExp *uint 19 | MaxPieceLength *uint 20 | PresetName string 21 | PresetFile string 22 | OutputDir string 23 | OutputPattern string 24 | TrackerURL string 25 | Comment string 26 | Source string 27 | Version string 28 | WebSeeds []string 29 | NoDate bool 30 | NoCreator bool 31 | DryRun bool 32 | Verbose bool 33 | Quiet bool 34 | Entropy bool 35 | SkipPrefix bool 36 | } 37 | 38 | // Result represents the result of modifying a torrent 39 | type Result struct { 40 | Error error 41 | Path string 42 | OutputPath string 43 | WasModified bool 44 | } 45 | 46 | // LoadFromFile loads a torrent file and returns a Torrent 47 | func LoadFromFile(path string) (*Torrent, error) { 48 | mi, err := metainfo.LoadFromFile(path) 49 | if err != nil { 50 | return nil, fmt.Errorf("could not load torrent: %w", err) 51 | } 52 | return &Torrent{MetaInfo: mi}, nil 53 | } 54 | 55 | // ModifyTorrent modifies a single torrent file according to the given options 56 | func ModifyTorrent(path string, opts Options) (*Result, error) { 57 | result := &Result{ 58 | Path: path, 59 | } 60 | 61 | // load torrent file 62 | mi, err := metainfo.LoadFromFile(path) 63 | if err != nil { 64 | result.Error = fmt.Errorf("could not load torrent: %w", err) 65 | return result, result.Error 66 | } 67 | 68 | // load preset if specified 69 | var presetOpts *preset.Options 70 | if opts.PresetName != "" { 71 | presetPath, err := preset.FindPresetFile(opts.PresetFile) 72 | if err != nil { 73 | result.Error = fmt.Errorf("could not find preset file: %w", err) 74 | return result, result.Error 75 | } 76 | 77 | presets, err := preset.Load(presetPath) 78 | if err != nil { 79 | result.Error = fmt.Errorf("could not load presets: %w", err) 80 | return result, result.Error 81 | } 82 | 83 | presetOpts, err = presets.GetPreset(opts.PresetName) 84 | if err != nil { 85 | result.Error = fmt.Errorf("could not get preset: %w", err) 86 | return result, result.Error 87 | } 88 | 89 | presetOpts.Version = opts.Version 90 | } 91 | 92 | // apply preset modifications if any 93 | wasModified := false 94 | if presetOpts != nil { 95 | wasModified, err = presetOpts.ApplyToMetaInfo(mi) 96 | if err != nil { 97 | result.Error = fmt.Errorf("could not apply preset: %w", err) 98 | return result, result.Error 99 | } 100 | } 101 | 102 | // apply flag-based overrides: 103 | // update tracker if flag provided 104 | if opts.TrackerURL != "" { 105 | mi.Announce = opts.TrackerURL 106 | mi.AnnounceList = [][]string{{opts.TrackerURL}} 107 | wasModified = true 108 | } 109 | 110 | // update web seeds if provided via flag 111 | if len(opts.WebSeeds) > 0 { 112 | mi.UrlList = opts.WebSeeds 113 | wasModified = true 114 | } 115 | 116 | // update comment if provided via flag 117 | if opts.Comment != "" && mi.Comment != opts.Comment { 118 | mi.Comment = opts.Comment 119 | wasModified = true 120 | } 121 | 122 | // update private flag if provided via flag 123 | if opts.IsPrivate != nil { 124 | info, err := mi.UnmarshalInfo() 125 | if err == nil { 126 | // update only if different 127 | if info.Private == nil || *info.Private != *opts.IsPrivate { 128 | info.Private = opts.IsPrivate 129 | if infoBytes, err := bencode.Marshal(info); err == nil { 130 | mi.InfoBytes = infoBytes 131 | } 132 | wasModified = true 133 | } 134 | } 135 | } 136 | 137 | // update source if provided via flag 138 | if opts.Source != "" { 139 | info, err := mi.UnmarshalInfo() 140 | if err == nil { 141 | if info.Source != opts.Source { 142 | info.Source = opts.Source 143 | if infoBytes, err := bencode.Marshal(info); err == nil { 144 | mi.InfoBytes = infoBytes 145 | } 146 | wasModified = true 147 | } 148 | } 149 | } 150 | 151 | // add random entropy field for cross-seeding if enabled 152 | if opts.Entropy { 153 | infoMap := make(map[string]interface{}) 154 | if err := bencode.Unmarshal(mi.InfoBytes, &infoMap); err == nil { 155 | if entropy, err := generateRandomString(); err == nil { 156 | infoMap["entropy"] = entropy 157 | if infoBytes, err := bencode.Marshal(infoMap); err == nil { 158 | mi.InfoBytes = infoBytes 159 | wasModified = true 160 | } 161 | } 162 | } 163 | } 164 | 165 | // handle creator 166 | if presetOpts != nil && presetOpts.NoCreator != nil && *presetOpts.NoCreator || opts.NoCreator { 167 | mi.CreatedBy = "" 168 | wasModified = true 169 | } 170 | 171 | // update creation date based on preset and command line options 172 | if presetOpts != nil && presetOpts.NoDate != nil && *presetOpts.NoDate || opts.NoDate { 173 | mi.CreationDate = 0 174 | wasModified = true 175 | } else { 176 | mi.CreationDate = time.Now().Unix() 177 | wasModified = true 178 | } 179 | 180 | if !wasModified { 181 | return result, nil 182 | } 183 | 184 | if opts.DryRun { 185 | result.WasModified = true 186 | return result, nil 187 | } 188 | 189 | var metaInfoName string 190 | info, err := mi.UnmarshalInfo() 191 | if err == nil { 192 | metaInfoName = info.Name 193 | } 194 | 195 | basePath := path 196 | if opts.OutputPattern == "" && metaInfoName != "" { 197 | basePath = metaInfoName + ".torrent" 198 | } 199 | 200 | // determine output directory: command-line flag takes precedence over preset 201 | outputDir := opts.OutputDir 202 | if outputDir == "" && presetOpts != nil && presetOpts.OutputDir != "" { 203 | outputDir = presetOpts.OutputDir 204 | } 205 | 206 | // generate output path using the preset generating helper 207 | outPath := preset.GenerateOutputPath(basePath, outputDir, opts.PresetName, opts.OutputPattern, opts.TrackerURL, metaInfoName, opts.SkipPrefix) 208 | result.OutputPath = outPath 209 | 210 | // ensure output directory exists if specified 211 | if outputDir != "" { 212 | if err := os.MkdirAll(outputDir, 0755); err != nil { 213 | result.Error = fmt.Errorf("could not create output directory: %w", err) 214 | return result, result.Error 215 | } 216 | } 217 | 218 | // save modified torrent file 219 | f, err := os.Create(outPath) 220 | if err != nil { 221 | result.Error = fmt.Errorf("could not create output file: %w", err) 222 | return result, result.Error 223 | } 224 | defer f.Close() 225 | 226 | if err := mi.Write(f); err != nil { 227 | result.Error = fmt.Errorf("could not write output file: %w", err) 228 | return result, result.Error 229 | } 230 | 231 | result.WasModified = true 232 | return result, nil 233 | } 234 | 235 | // ProcessTorrents modifies multiple torrent files according to the given options 236 | func ProcessTorrents(paths []string, opts Options) ([]*Result, error) { 237 | if len(paths) == 0 { 238 | return nil, fmt.Errorf("no torrent files specified") 239 | } 240 | 241 | results := make([]*Result, 0, len(paths)) 242 | for _, path := range paths { 243 | result, err := ModifyTorrent(path, opts) 244 | if err != nil { 245 | // continue processing other files even if one fails 246 | result.Error = err 247 | } 248 | results = append(results, result) 249 | } 250 | 251 | return results, nil 252 | } 253 | -------------------------------------------------------------------------------- /internal/torrent/modify_test.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | ) 8 | 9 | func TestModifyTorrent_OutputDirPriority(t *testing.T) { 10 | // Setup temporary directories for test 11 | tmpDir, err := os.MkdirTemp("", "mkbrr-modify-test") 12 | if err != nil { 13 | t.Fatalf("Failed to create temp dir: %v", err) 14 | } 15 | defer os.RemoveAll(tmpDir) 16 | 17 | // Create a non-empty file in the temp directory for the torrent content 18 | dummyFilePath := filepath.Join(tmpDir, "dummy.txt") 19 | if err := os.WriteFile(dummyFilePath, []byte("test content"), 0644); err != nil { 20 | t.Fatalf("Failed to create dummy file: %v", err) 21 | } 22 | 23 | // Create test torrent file (minimal content for test) 24 | torrentPath := filepath.Join(tmpDir, "test.torrent") 25 | torrent, err := Create(CreateTorrentOptions{ 26 | Path: tmpDir, 27 | OutputPath: torrentPath, 28 | IsPrivate: true, 29 | NoDate: true, 30 | }) 31 | if err != nil { 32 | t.Fatalf("Failed to create test torrent: %v", err) 33 | } 34 | 35 | // Create preset file 36 | presetDir := filepath.Join(tmpDir, "presets") 37 | if err := os.Mkdir(presetDir, 0755); err != nil { 38 | t.Fatalf("Failed to create presets dir: %v", err) 39 | } 40 | presetPath := filepath.Join(presetDir, "presets.yaml") 41 | presetConfig := `version: 1 42 | presets: 43 | test: 44 | output_dir: "` + filepath.ToSlash(filepath.Join(tmpDir, "preset_output")) + `" 45 | private: true 46 | source: "TEST" 47 | ` 48 | if err := os.WriteFile(presetPath, []byte(presetConfig), 0644); err != nil { 49 | t.Fatalf("Failed to write preset config: %v", err) 50 | } 51 | 52 | // Create the output directories 53 | cmdLineOutputDir := filepath.Join(tmpDir, "cmdline_output") 54 | presetOutputDir := filepath.Join(tmpDir, "preset_output") 55 | if err := os.Mkdir(cmdLineOutputDir, 0755); err != nil { 56 | t.Fatalf("Failed to create cmdline output dir: %v", err) 57 | } 58 | if err := os.Mkdir(presetOutputDir, 0755); err != nil { 59 | t.Fatalf("Failed to create preset output dir: %v", err) 60 | } 61 | 62 | // Test cases 63 | tests := []struct { 64 | name string 65 | opts Options 66 | expectedOutDir string 67 | }{ 68 | { 69 | name: "Command-line OutputDir should take precedence", 70 | opts: Options{ 71 | PresetName: "test", 72 | PresetFile: presetPath, 73 | OutputDir: cmdLineOutputDir, 74 | Version: "test", 75 | }, 76 | expectedOutDir: cmdLineOutputDir, 77 | }, 78 | { 79 | name: "Preset OutputDir should be used when no command-line OutputDir", 80 | opts: Options{ 81 | PresetName: "test", 82 | PresetFile: presetPath, 83 | OutputDir: "", // empty to use preset 84 | Version: "test", 85 | }, 86 | expectedOutDir: presetOutputDir, 87 | }, 88 | } 89 | 90 | for _, tt := range tests { 91 | t.Run(tt.name, func(t *testing.T) { 92 | result, err := ModifyTorrent(torrent.Path, tt.opts) 93 | if err != nil { 94 | t.Fatalf("ModifyTorrent failed: %v", err) 95 | } 96 | 97 | // Verify the output path contains the expected directory 98 | dir := filepath.Dir(result.OutputPath) 99 | if dir != tt.expectedOutDir { 100 | t.Errorf("Expected output directory %q, got %q", tt.expectedOutDir, dir) 101 | } 102 | 103 | // Verify the file was actually created in the expected directory 104 | if _, err := os.Stat(result.OutputPath); os.IsNotExist(err) { 105 | t.Errorf("Output file wasn't created at expected path: %s", result.OutputPath) 106 | } 107 | }) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /internal/torrent/progress.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | // Displayer defines the interface for displaying progress during torrent creation 4 | type Displayer interface { 5 | ShowProgress(total int) 6 | UpdateProgress(completed int, hashrate float64) 7 | ShowFiles(files []fileEntry, numWorkers int) 8 | ShowSeasonPackWarnings(info *SeasonPackInfo) 9 | FinishProgress() 10 | IsBatch() bool 11 | } 12 | -------------------------------------------------------------------------------- /internal/torrent/seasonfinder.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "path/filepath" 5 | "regexp" 6 | "sort" 7 | "strconv" 8 | "strings" 9 | ) 10 | 11 | type SeasonPackInfo struct { 12 | Episodes []int 13 | MissingEpisodes []int 14 | Season int 15 | MaxEpisode int 16 | VideoFileCount int 17 | IsSeasonPack bool 18 | IsSuspicious bool 19 | } 20 | 21 | var seasonPackPatterns = []*regexp.Regexp{ 22 | regexp.MustCompile(`(?i)\.S(\d{1,2})(?:\.|-|_|\s)Complete`), 23 | regexp.MustCompile(`(?i)\.Season\.(\d{1,2})\.`), 24 | regexp.MustCompile(`(?i)\.S(\d{1,2})(?:\.|-|_|\s)*$`), 25 | regexp.MustCompile(`(?i)[-_\s]S(\d{1,2})[-_\s]`), 26 | regexp.MustCompile(`(?i)[/\\]Season\s*(\d{1,2})[/\\]`), 27 | regexp.MustCompile(`(?i)[/\\]S(\d{1,2})[/\\]`), 28 | regexp.MustCompile(`(?i)\.S(\d{1,2})\.(?:\d+p|Complete|COMPLETE)`), 29 | regexp.MustCompile(`(?i)Season\s*(\d{1,2})(?:[/\\]|$)`), 30 | regexp.MustCompile(`(?i)\.S(\d{1,2})$`), 31 | } 32 | 33 | var episodePattern = regexp.MustCompile(`(?i)S\d{1,2}E(\d{1,3})`) 34 | var multiEpisodePattern = regexp.MustCompile(`(?i)S\d{1,2}E(\d{1,3})(?:-E?|E)(\d{1,3})`) 35 | 36 | var videoExtensions = map[string]bool{ 37 | ".mkv": true, 38 | ".mp4": true, 39 | } 40 | 41 | func AnalyzeSeasonPack(files []fileEntry) *SeasonPackInfo { 42 | if len(files) == 0 { 43 | return &SeasonPackInfo{IsSeasonPack: false} 44 | } 45 | 46 | dirPath := filepath.Dir(files[0].path) 47 | season := detectSeasonNumber(dirPath) 48 | 49 | if season == 0 && len(files) > 1 { 50 | for i := 0; i < minInt(5, len(files)); i++ { 51 | if s, _ := extractSeasonEpisode(filepath.Base(files[i].path)); s > 0 { 52 | season = s 53 | break 54 | } 55 | } 56 | } 57 | 58 | if season == 0 { 59 | return &SeasonPackInfo{IsSeasonPack: false} 60 | } 61 | 62 | info := &SeasonPackInfo{ 63 | IsSeasonPack: true, 64 | Season: season, 65 | Episodes: make([]int, 0), 66 | } 67 | 68 | episodeMap := make(map[int]bool) 69 | for _, file := range files { 70 | ext := strings.ToLower(filepath.Ext(file.path)) 71 | if videoExtensions[ext] { 72 | info.VideoFileCount++ 73 | 74 | // check for multi-episodes first 75 | multiEps := extractMultiEpisodes(filepath.Base(file.path)) 76 | if len(multiEps) > 0 { 77 | for _, ep := range multiEps { 78 | if ep > 0 { 79 | episodeMap[ep] = true 80 | if ep > info.MaxEpisode { 81 | info.MaxEpisode = ep 82 | } 83 | } 84 | } 85 | } else { 86 | _, episode := extractSeasonEpisode(filepath.Base(file.path)) 87 | if episode > 0 { 88 | episodeMap[episode] = true 89 | if episode > info.MaxEpisode { 90 | info.MaxEpisode = episode 91 | } 92 | } 93 | } 94 | } 95 | } 96 | 97 | for ep := range episodeMap { 98 | if ep > 0 { 99 | info.Episodes = append(info.Episodes, ep) 100 | } 101 | } 102 | sort.Ints(info.Episodes) 103 | 104 | if info.MaxEpisode > 0 { 105 | episodeCount := len(info.Episodes) 106 | 107 | expectedEpisodes := info.MaxEpisode 108 | 109 | info.MissingEpisodes = []int{} 110 | for i := 1; i <= info.MaxEpisode; i++ { 111 | if !episodeMap[i] { 112 | info.MissingEpisodes = append(info.MissingEpisodes, i) 113 | } 114 | } 115 | 116 | if episodeCount < expectedEpisodes { 117 | missingCount := expectedEpisodes - episodeCount 118 | percentMissing := float64(missingCount) / float64(expectedEpisodes) * 100 119 | 120 | if (missingCount >= 3 && info.MaxEpisode >= 7) || percentMissing > 50 { 121 | info.IsSuspicious = true 122 | } 123 | } 124 | } 125 | 126 | return info 127 | } 128 | 129 | func detectSeasonNumber(path string) int { 130 | for _, pattern := range seasonPackPatterns { 131 | matches := pattern.FindStringSubmatch(path) 132 | if len(matches) > 1 { 133 | if season, err := strconv.Atoi(matches[1]); err == nil { 134 | return season 135 | } 136 | } 137 | } 138 | return 0 139 | } 140 | 141 | func extractSeasonEpisode(filename string) (season, episode int) { 142 | epMatches := episodePattern.FindStringSubmatch(filename) 143 | if len(epMatches) > 1 { 144 | episode, _ = strconv.Atoi(epMatches[1]) 145 | } 146 | 147 | seasonPattern := regexp.MustCompile(`(?i)S(\d{1,2})`) 148 | sMatches := seasonPattern.FindStringSubmatch(filename) 149 | if len(sMatches) > 1 { 150 | season, _ = strconv.Atoi(sMatches[1]) 151 | } 152 | 153 | return season, episode 154 | } 155 | 156 | func extractMultiEpisodes(filename string) []int { 157 | episodes := []int{} 158 | 159 | matches := multiEpisodePattern.FindStringSubmatch(filename) 160 | 161 | if len(matches) > 2 { 162 | start, err1 := strconv.Atoi(matches[1]) 163 | end, err2 := strconv.Atoi(matches[2]) 164 | if err1 == nil && err2 == nil && end >= start { 165 | for i := start; i <= end; i++ { 166 | episodes = append(episodes, i) 167 | } 168 | } 169 | } 170 | 171 | return episodes 172 | } 173 | -------------------------------------------------------------------------------- /internal/torrent/seasonfinder_test.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "path/filepath" 5 | "testing" 6 | ) 7 | 8 | func TestDetectSeasonNumber(t *testing.T) { 9 | tests := []struct { 10 | path string 11 | expected int 12 | }{ 13 | {filepath.Join("/test", "Dexter.Original.Sin.S01.1080p"), 1}, 14 | {filepath.Join("/test", "Show.Name.S02.Complete"), 2}, 15 | {filepath.Join("/test", "Some.Show.Season.03.1080p"), 3}, 16 | {filepath.Join("/test", "My.Show.S04"), 4}, 17 | {filepath.Join("/test", "Season 05"), 5}, 18 | {filepath.Join("/test", "Regular.Movie.2024"), 0}, 19 | } 20 | 21 | for _, tc := range tests { 22 | season := detectSeasonNumber(tc.path) 23 | if season != tc.expected { 24 | t.Errorf("Expected season %d for path %s, got %d", tc.expected, tc.path, season) 25 | } 26 | } 27 | } 28 | 29 | func TestAnalyzeSeasonPack_MultiEpisode(t *testing.T) { 30 | files := []fileEntry{ 31 | {path: filepath.Join("/test", "Show.S02E01E02.mkv")}, 32 | {path: filepath.Join("/test", "Show.S02E03.mkv")}, 33 | {path: filepath.Join("/test", "Show.S02E04.mkv")}, 34 | {path: filepath.Join("/test", "Show.S02E05.mkv")}, 35 | {path: filepath.Join("/test", "Show.S02E06.mkv")}, 36 | {path: filepath.Join("/test", "Show.S02E07.mkv")}, 37 | {path: filepath.Join("/test", "Show.S02E08.mkv")}, 38 | {path: filepath.Join("/test", "Show.S02E09.mkv")}, 39 | {path: filepath.Join("/test", "Show.S02E10E11.mkv")}, 40 | {path: filepath.Join("/test", "Show.S02E12.mkv")}, 41 | } 42 | 43 | info := AnalyzeSeasonPack(files) 44 | 45 | if !info.IsSeasonPack { 46 | t.Error("Expected IsSeasonPack to be true") 47 | } 48 | if info.Season != 2 { 49 | t.Errorf("Expected Season 2, got %d", info.Season) 50 | } 51 | if info.VideoFileCount != 10 { 52 | t.Errorf("Expected 10 video files, got %d", info.VideoFileCount) 53 | } 54 | if info.MaxEpisode != 12 { 55 | t.Errorf("Expected MaxEpisode 12, got %d", info.MaxEpisode) 56 | } 57 | if len(info.Episodes) != 12 { 58 | t.Errorf("Expected 12 unique episodes, got %d", len(info.Episodes)) 59 | } 60 | if len(info.MissingEpisodes) != 0 { 61 | t.Errorf("Expected no missing episodes, got %v", info.MissingEpisodes) 62 | } 63 | if info.IsSuspicious { 64 | t.Error("Expected IsSuspicious to be false for complete season pack with multi-episode file") 65 | } 66 | 67 | expectedEpisodes := make([]int, 12) 68 | for i := range expectedEpisodes { 69 | expectedEpisodes[i] = i + 1 70 | } 71 | for i, ep := range expectedEpisodes { 72 | if i >= len(info.Episodes) || info.Episodes[i] != ep { 73 | t.Errorf("Expected episode %d at position %d, got %d (or index out of bounds)", ep, i, info.Episodes[i]) 74 | } 75 | } 76 | } 77 | 78 | func TestExtractSeasonEpisode(t *testing.T) { 79 | tests := []struct { 80 | filename string 81 | expectSeason int 82 | expectEpisode int 83 | }{ 84 | {"Show.S01E01.Name.mkv", 1, 1}, 85 | {"S02E05.Episode.Name.mp4", 2, 5}, 86 | {"My.Show.S03E10.1080p.mkv", 3, 10}, 87 | {"Movie.2024.mkv", 0, 0}, // Not an episode 88 | } 89 | 90 | for _, tc := range tests { 91 | season, episode := extractSeasonEpisode(tc.filename) 92 | if season != tc.expectSeason || episode != tc.expectEpisode { 93 | t.Errorf("For %s expected S%02dE%02d, got S%02dE%02d", 94 | tc.filename, tc.expectSeason, tc.expectEpisode, season, episode) 95 | } 96 | } 97 | } 98 | 99 | func TestMultipleEpisodes(t *testing.T) { 100 | tests := []struct { 101 | filename string 102 | expectedEpisodes []int 103 | }{ 104 | {"Show.S01E01E02.mkv", []int{1, 2}}, 105 | {"Show.S01E05-E07.mkv", []int{5, 6, 7}}, 106 | } 107 | 108 | for _, tc := range tests { 109 | episodes := extractMultiEpisodes(tc.filename) 110 | 111 | if len(episodes) != len(tc.expectedEpisodes) { 112 | t.Errorf("For %s expected %v episodes, got %v", tc.filename, tc.expectedEpisodes, episodes) 113 | continue 114 | } 115 | 116 | for i, ep := range episodes { 117 | if i < len(tc.expectedEpisodes) && ep != tc.expectedEpisodes[i] { 118 | t.Errorf("For %s expected episode %d at position %d, got %d", 119 | tc.filename, tc.expectedEpisodes[i], i, ep) 120 | } 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /internal/torrent/types.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/anacrolix/torrent/metainfo" 7 | ) 8 | 9 | // CreateTorrentOptions contains all options for creating a torrent 10 | type CreateTorrentOptions struct { 11 | PieceLengthExp *uint 12 | MaxPieceLength *uint 13 | Path string 14 | Name string 15 | TrackerURL string 16 | Comment string 17 | Source string 18 | Version string 19 | OutputPath string 20 | OutputDir string 21 | WebSeeds []string 22 | ExcludePatterns []string 23 | IncludePatterns []string 24 | Workers int 25 | IsPrivate bool 26 | NoDate bool 27 | NoCreator bool 28 | Verbose bool 29 | Entropy bool 30 | Quiet bool 31 | SkipPrefix bool 32 | } 33 | 34 | // Torrent represents a torrent file with additional functionality 35 | type Torrent struct { 36 | *metainfo.MetaInfo 37 | } 38 | 39 | // FileEntry represents a file in the torrent 40 | type FileEntry struct { 41 | Name string 42 | Path string 43 | Size int64 44 | } 45 | 46 | // internal file entry for processing 47 | type fileEntry struct { 48 | path string 49 | length int64 50 | offset int64 51 | } 52 | 53 | // internal file reader for processing 54 | type fileReader struct { 55 | file *os.File 56 | position int64 57 | length int64 58 | } 59 | 60 | // TorrentInfo contains summary information about the created torrent 61 | type TorrentInfo struct { 62 | MetaInfo *metainfo.MetaInfo 63 | Path string 64 | InfoHash string 65 | Announce string 66 | Size int64 67 | Files int 68 | } 69 | 70 | // VerificationResult holds the outcome of a torrent data verification check 71 | type VerificationResult struct { 72 | BadPieceIndices []int 73 | MissingFiles []string 74 | TotalPieces int 75 | GoodPieces int 76 | BadPieces int 77 | MissingPieces int 78 | Completion float64 79 | } 80 | -------------------------------------------------------------------------------- /internal/torrent/verify.go: -------------------------------------------------------------------------------- 1 | package torrent 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "fmt" 7 | "io" 8 | "os" 9 | "path/filepath" 10 | "runtime" 11 | "sort" 12 | "strings" 13 | "sync" 14 | "sync/atomic" 15 | "time" 16 | 17 | "github.com/anacrolix/torrent/metainfo" 18 | ) 19 | 20 | // VerifyOptions holds options for the verification process 21 | type VerifyOptions struct { 22 | TorrentPath string 23 | ContentPath string 24 | Verbose bool 25 | Quiet bool 26 | Workers int // Number of worker goroutines for verification 27 | } 28 | 29 | type pieceVerifier struct { 30 | startTime time.Time 31 | lastUpdate time.Time 32 | torrentInfo *metainfo.Info 33 | display *Display // Changed to concrete type 34 | bufferPool *sync.Pool 35 | contentPath string 36 | files []fileEntry // Mapped files based on contentPath 37 | 38 | badPieceIndices []int 39 | missingFiles []string 40 | missingRanges [][2]int64 // Byte ranges [start, end) of missing/mismatched files 41 | 42 | pieceLen int64 43 | numPieces int 44 | readSize int 45 | 46 | goodPieces uint64 47 | badPieces uint64 48 | missingPieces uint64 // Pieces belonging to missing files 49 | 50 | bytesVerified int64 51 | mutex sync.RWMutex 52 | } 53 | 54 | // VerifyData checks the integrity of content files against a torrent file. 55 | func VerifyData(opts VerifyOptions) (*VerificationResult, error) { 56 | mi, err := metainfo.LoadFromFile(opts.TorrentPath) 57 | if err != nil { 58 | return nil, fmt.Errorf("could not load torrent file %q: %w", opts.TorrentPath, err) 59 | } 60 | 61 | info, err := mi.UnmarshalInfo() 62 | if err != nil { 63 | return nil, fmt.Errorf("could not unmarshal info dictionary from %q: %w", opts.TorrentPath, err) 64 | } 65 | 66 | mappedFiles := make([]fileEntry, 0) 67 | var totalSize int64 68 | var missingFiles []string 69 | baseContentPath := filepath.Clean(opts.ContentPath) 70 | 71 | if info.IsDir() { 72 | // Multi-file torrent 73 | expectedFiles := make(map[string]int64) // Map relative path (using '/') to expected size 74 | for _, f := range info.Files { 75 | // Ensure the key uses forward slashes, consistent with torrent format 76 | relPathKey := filepath.ToSlash(filepath.Join(f.Path...)) 77 | expectedFiles[relPathKey] = f.Length 78 | } 79 | 80 | // Walk the content directory provided by the user 81 | err = filepath.Walk(baseContentPath, func(currentPath string, fileInfo os.FileInfo, walkErr error) error { 82 | if walkErr != nil { 83 | fmt.Fprintf(os.Stderr, "Warning: error walking path %q: %v\n", currentPath, walkErr) 84 | return nil 85 | } 86 | if fileInfo.IsDir() { 87 | if currentPath == baseContentPath { 88 | return nil 89 | } 90 | return nil 91 | } 92 | 93 | relPath, err := filepath.Rel(baseContentPath, currentPath) 94 | if err != nil { 95 | return fmt.Errorf("failed to get relative path for %q: %w", currentPath, err) 96 | } 97 | relPath = filepath.ToSlash(relPath) // Ensure consistent slashes 98 | 99 | if expectedSize, ok := expectedFiles[relPath]; ok { 100 | if fileInfo.Size() != expectedSize { 101 | missingFiles = append(missingFiles, relPath+" (size mismatch)") 102 | delete(expectedFiles, relPath) 103 | return nil 104 | } 105 | 106 | mappedFiles = append(mappedFiles, fileEntry{ 107 | path: currentPath, 108 | length: fileInfo.Size(), 109 | offset: totalSize, 110 | }) 111 | totalSize += fileInfo.Size() 112 | delete(expectedFiles, relPath) 113 | } 114 | return nil 115 | }) 116 | 117 | if err != nil { 118 | return nil, fmt.Errorf("error walking content path %q: %w", baseContentPath, err) 119 | } 120 | 121 | for relPathKey := range expectedFiles { 122 | missingFiles = append(missingFiles, relPathKey) 123 | } 124 | 125 | } else { 126 | // Single-file torrent 127 | contentFileInfo, err := os.Stat(baseContentPath) 128 | if err != nil { 129 | if os.IsNotExist(err) { 130 | missingFiles = append(missingFiles, info.Name) 131 | } else { 132 | return nil, fmt.Errorf("could not stat content file %q: %w", baseContentPath, err) 133 | } 134 | } else { 135 | if contentFileInfo.IsDir() { 136 | filePathInDir := filepath.Join(baseContentPath, info.Name) 137 | contentFileInfo, err = os.Stat(filePathInDir) 138 | if err != nil { 139 | if os.IsNotExist(err) { 140 | missingFiles = append(missingFiles, info.Name) 141 | } else { 142 | return nil, fmt.Errorf("could not stat content file %q: %w", filePathInDir, err) 143 | } 144 | } else if contentFileInfo.IsDir() { 145 | return nil, fmt.Errorf("expected content file %q, but found a directory", filePathInDir) 146 | } else if contentFileInfo.Size() != info.Length { 147 | missingFiles = append(missingFiles, info.Name+" (size mismatch)") 148 | } else { 149 | mappedFiles = append(mappedFiles, fileEntry{ 150 | path: filePathInDir, 151 | length: contentFileInfo.Size(), 152 | offset: 0, 153 | }) 154 | totalSize = contentFileInfo.Size() 155 | } 156 | } else { 157 | if contentFileInfo.Size() != info.Length { 158 | missingFiles = append(missingFiles, info.Name+" (size mismatch)") 159 | } else { 160 | mappedFiles = append(mappedFiles, fileEntry{ 161 | path: baseContentPath, 162 | length: contentFileInfo.Size(), 163 | offset: 0, 164 | }) 165 | totalSize = contentFileInfo.Size() 166 | } 167 | } 168 | } 169 | } 170 | 171 | // Sort mapped files based on original torrent order before recalculating offsets 172 | if info.IsDir() && len(info.Files) > 0 && len(mappedFiles) > 1 { 173 | originalOrder := make(map[string]int) 174 | for i, f := range info.Files { 175 | originalOrder[filepath.ToSlash(filepath.Join(f.Path...))] = i 176 | } 177 | sort.SliceStable(mappedFiles, func(i, j int) bool { 178 | relPathI, _ := filepath.Rel(baseContentPath, mappedFiles[i].path) 179 | relPathJ, _ := filepath.Rel(baseContentPath, mappedFiles[j].path) 180 | return originalOrder[filepath.ToSlash(relPathI)] < originalOrder[filepath.ToSlash(relPathJ)] 181 | }) 182 | } 183 | 184 | // Recalculate offsets after sorting 185 | currentOffset := int64(0) 186 | for i := range mappedFiles { 187 | mappedFiles[i].offset = currentOffset 188 | currentOffset += mappedFiles[i].length 189 | } 190 | 191 | // 4. Initialize Verifier 192 | numPieces := len(info.Pieces) / 20 193 | verifier := &pieceVerifier{ 194 | torrentInfo: &info, 195 | contentPath: opts.ContentPath, 196 | pieceLen: info.PieceLength, 197 | numPieces: numPieces, 198 | files: mappedFiles, 199 | display: NewDisplay(NewFormatter(opts.Verbose)), 200 | missingFiles: missingFiles, 201 | } 202 | verifier.display.SetQuiet(opts.Quiet) 203 | 204 | // Calculate missing ranges *before* verification starts 205 | if len(verifier.missingFiles) > 0 { 206 | missingFileSet := make(map[string]bool) 207 | for _, mf := range verifier.missingFiles { 208 | basePath := strings.TrimSuffix(mf, " (size mismatch)") 209 | missingFileSet[basePath] = true 210 | } 211 | 212 | currentOffset := int64(0) 213 | if info.IsDir() { 214 | for _, f := range info.Files { 215 | relPath := filepath.ToSlash(filepath.Join(f.Path...)) 216 | fileEndOffset := currentOffset + f.Length 217 | if missingFileSet[relPath] { 218 | verifier.missingRanges = append(verifier.missingRanges, [2]int64{currentOffset, fileEndOffset}) 219 | } 220 | currentOffset = fileEndOffset 221 | } 222 | } else if len(verifier.missingFiles) > 0 { // Handle single missing file 223 | verifier.missingRanges = append(verifier.missingRanges, [2]int64{0, info.Length}) 224 | } 225 | } 226 | 227 | // 5. Perform Verification (Hashing and Comparison) 228 | // Pass opts.Workers to verifyPieces 229 | err = verifier.verifyPieces(opts.Workers) // Pass workers from options 230 | if err != nil { 231 | return nil, fmt.Errorf("verification failed: %w", err) 232 | } 233 | 234 | // 6. Compile and Return Results 235 | result := &VerificationResult{ 236 | TotalPieces: verifier.numPieces, 237 | GoodPieces: int(verifier.goodPieces), 238 | BadPieces: int(verifier.badPieces), 239 | MissingPieces: int(verifier.missingPieces), // This is now correctly counted atomically 240 | Completion: 0.0, // Will be calculated below 241 | BadPieceIndices: verifier.badPieceIndices, 242 | MissingFiles: verifier.missingFiles, 243 | } 244 | 245 | // Final calculation of completion percentage based on pieces that could be checked 246 | checkablePieces := result.TotalPieces - result.MissingPieces 247 | if checkablePieces > 0 { 248 | // Base completion on pieces that were actually checked (good / checkable) 249 | result.Completion = (float64(result.GoodPieces) / float64(checkablePieces)) * 100.0 250 | } else if result.TotalPieces > 0 { 251 | // All pieces were missing or part of missing files 252 | result.Completion = 0.0 253 | } else { 254 | // 0 total pieces (empty torrent) 255 | result.Completion = 0.0 // Verification of nothing is 0% complete 256 | } 257 | 258 | return result, nil 259 | } 260 | 261 | // optimizeForWorkload determines optimal read buffer size and number of worker goroutines 262 | func (v *pieceVerifier) optimizeForWorkload() (int, int) { 263 | if len(v.files) == 0 { 264 | return 0, 0 265 | } 266 | 267 | var totalSize int64 268 | for _, f := range v.files { 269 | totalSize += f.length 270 | } 271 | avgFileSize := int64(0) 272 | if len(v.files) > 0 { 273 | avgFileSize = totalSize / int64(len(v.files)) 274 | } 275 | 276 | var readSize, numWorkers int 277 | 278 | switch { 279 | case len(v.files) == 1: 280 | if totalSize < 1<<20 { 281 | readSize = 64 << 10 282 | numWorkers = 1 283 | } else if totalSize < 1<<30 { 284 | readSize = 4 << 20 285 | numWorkers = runtime.NumCPU() 286 | } else { 287 | readSize = 8 << 20 288 | numWorkers = runtime.NumCPU() * 2 289 | } 290 | case avgFileSize < 1<<20: 291 | readSize = 256 << 10 292 | numWorkers = runtime.NumCPU() 293 | case avgFileSize < 10<<20: 294 | readSize = 1 << 20 295 | numWorkers = runtime.NumCPU() 296 | case avgFileSize < 1<<30: 297 | readSize = 4 << 20 298 | numWorkers = runtime.NumCPU() * 2 299 | default: 300 | readSize = 8 << 20 301 | numWorkers = runtime.NumCPU() * 2 302 | } 303 | 304 | if numWorkers > v.numPieces { 305 | numWorkers = v.numPieces 306 | } 307 | if v.numPieces > 0 && numWorkers == 0 { 308 | numWorkers = 1 309 | } 310 | 311 | return readSize, numWorkers 312 | } 313 | 314 | // verifyPieces coordinates the parallel verification of pieces. 315 | // Accepts numWorkersOverride: if > 0, uses this value; otherwise, optimizes automatically. 316 | func (v *pieceVerifier) verifyPieces(numWorkersOverride int) error { 317 | if v.numPieces == 0 { 318 | // Don't show progress for 0 pieces 319 | return nil 320 | } 321 | 322 | var numWorkers int 323 | // Use override if provided, otherwise optimize 324 | if numWorkersOverride > 0 { 325 | numWorkers = numWorkersOverride 326 | // Still need readSize if workers are specified 327 | v.readSize, _ = v.optimizeForWorkload() // Only need readSize 328 | // Ensure specified workers don't exceed pieces or minimum of 1 329 | if numWorkers > v.numPieces { 330 | numWorkers = v.numPieces 331 | } 332 | if v.numPieces > 0 && numWorkers <= 0 { // Safety check 333 | numWorkers = 1 334 | } 335 | } else { 336 | v.readSize, numWorkers = v.optimizeForWorkload() // Optimize both 337 | } 338 | 339 | // Final safeguard: Ensure at least one worker if there are pieces 340 | if v.numPieces > 0 && numWorkers <= 0 { 341 | numWorkers = 1 342 | } 343 | 344 | v.bufferPool = &sync.Pool{ 345 | New: func() interface{} { 346 | allocSize := v.readSize 347 | if allocSize < 64<<10 { 348 | allocSize = 64 << 10 349 | } 350 | buf := make([]byte, allocSize) 351 | return buf 352 | }, 353 | } 354 | 355 | v.mutex.Lock() 356 | v.startTime = time.Now() 357 | v.lastUpdate = v.startTime 358 | v.mutex.Unlock() 359 | v.bytesVerified = 0 360 | 361 | v.display.ShowFiles(v.files, numWorkers) 362 | 363 | var completedPieces uint64 364 | piecesPerWorker := (v.numPieces + numWorkers - 1) / numWorkers 365 | errorsCh := make(chan error, numWorkers) 366 | 367 | v.display.ShowProgress(v.numPieces) // Show progress bar only if numPieces > 0 368 | 369 | var wg sync.WaitGroup 370 | for i := 0; i < numWorkers; i++ { 371 | start := i * piecesPerWorker 372 | end := start + piecesPerWorker 373 | if end > v.numPieces { 374 | end = v.numPieces 375 | } 376 | 377 | wg.Add(1) 378 | go func(startPiece, endPiece int) { 379 | defer wg.Done() 380 | if err := v.verifyPieceRange(startPiece, endPiece, &completedPieces); err != nil { 381 | errorsCh <- err 382 | } 383 | }(start, end) 384 | } 385 | 386 | // Progress monitoring goroutine 387 | go func() { 388 | ticker := time.NewTicker(200 * time.Millisecond) 389 | defer ticker.Stop() 390 | for range ticker.C { 391 | completed := atomic.LoadUint64(&completedPieces) 392 | // Update display 393 | // We might need to adjust UpdateProgress or pass different values 394 | // For now, let's pass the overall completed count (good+bad+missing) 395 | v.mutex.RLock() 396 | elapsed := time.Since(v.startTime).Seconds() 397 | v.mutex.RUnlock() 398 | var rate float64 399 | if elapsed > 0 { 400 | bytesVerified := atomic.LoadInt64(&v.bytesVerified) 401 | rate = float64(bytesVerified) / elapsed 402 | } 403 | // Pass total completed count and rate to UpdateProgress 404 | // Note: UpdateProgress might need adjustment if it expects percentage instead of count 405 | v.display.UpdateProgress(int(completed), rate) 406 | 407 | if completed >= uint64(v.numPieces) { 408 | return // Exit goroutine when all pieces are processed 409 | } 410 | } 411 | }() 412 | 413 | wg.Wait() 414 | close(errorsCh) 415 | 416 | for err := range errorsCh { 417 | if err != nil { 418 | v.display.FinishProgress() 419 | return err 420 | } 421 | } 422 | 423 | v.display.FinishProgress() 424 | return nil 425 | } 426 | 427 | // verifyPieceRange processes and verifies a specific range of pieces. 428 | func (v *pieceVerifier) verifyPieceRange(startPiece, endPiece int, completedPieces *uint64) error { 429 | buf := v.bufferPool.Get().([]byte) 430 | defer v.bufferPool.Put(buf) 431 | 432 | hasher := sha1.New() 433 | readers := make(map[string]*fileReader) 434 | defer func() { 435 | for _, r := range readers { 436 | if r.file != nil { 437 | r.file.Close() 438 | } 439 | } 440 | }() 441 | 442 | currentFileIndex := 0 443 | 444 | for pieceIndex := startPiece; pieceIndex < endPiece; pieceIndex++ { 445 | var expectedHash []byte 446 | var actualHash []byte 447 | 448 | pieceOffset := int64(pieceIndex) * v.pieceLen 449 | pieceEndOffset := pieceOffset + v.pieceLen 450 | 451 | // Check if this piece falls within a known missing range 452 | isMissing := false 453 | for _, r := range v.missingRanges { 454 | if pieceOffset < r[1] && pieceEndOffset > r[0] { 455 | isMissing = true 456 | break 457 | } 458 | } 459 | 460 | if isMissing { 461 | atomic.AddUint64(&v.missingPieces, 1) 462 | atomic.AddUint64(completedPieces, 1) 463 | continue // Skip hashing/comparison for missing pieces 464 | } 465 | 466 | // If not missing, proceed to hash and compare 467 | hasher.Reset() 468 | bytesHashedThisPiece := int64(0) 469 | 470 | foundStartFile := false 471 | for fIdx := currentFileIndex; fIdx < len(v.files); fIdx++ { 472 | file := v.files[fIdx] 473 | if pieceOffset < file.offset+file.length { 474 | currentFileIndex = fIdx 475 | foundStartFile = true 476 | break 477 | } 478 | } 479 | if !foundStartFile { 480 | // Should not happen if missingRanges logic is correct and piece is not missing 481 | atomic.AddUint64(&v.badPieces, 1) 482 | v.mutex.Lock() 483 | v.badPieceIndices = append(v.badPieceIndices, pieceIndex) 484 | v.mutex.Unlock() 485 | atomic.AddUint64(completedPieces, 1) 486 | continue 487 | } 488 | 489 | for fIdx := currentFileIndex; fIdx < len(v.files); fIdx++ { 490 | file := v.files[fIdx] 491 | if file.offset >= pieceEndOffset { 492 | break 493 | } 494 | 495 | readStartInFile := int64(0) 496 | if pieceOffset > file.offset { 497 | readStartInFile = pieceOffset - file.offset 498 | } 499 | readEndInFile := file.length 500 | if pieceEndOffset < file.offset+file.length { 501 | readEndInFile = pieceEndOffset - file.offset 502 | } 503 | readLength := readEndInFile - readStartInFile 504 | if readLength <= 0 { 505 | continue 506 | } 507 | 508 | reader, ok := readers[file.path] 509 | if !ok { 510 | f, err := os.OpenFile(file.path, os.O_RDONLY, 0) 511 | if err != nil { 512 | // File became unreadable after initial check? Mark as bad. 513 | atomic.AddUint64(&v.badPieces, 1) 514 | v.mutex.Lock() 515 | v.badPieceIndices = append(v.badPieceIndices, pieceIndex) 516 | v.mutex.Unlock() 517 | goto nextPiece // Use goto to ensure completedPieces is incremented 518 | } 519 | reader = &fileReader{file: f, position: -1, length: file.length} 520 | readers[file.path] = reader 521 | } 522 | 523 | if reader.position != readStartInFile { 524 | _, err := reader.file.Seek(readStartInFile, io.SeekStart) 525 | if err != nil { 526 | atomic.AddUint64(&v.badPieces, 1) 527 | v.mutex.Lock() 528 | v.badPieceIndices = append(v.badPieceIndices, pieceIndex) 529 | v.mutex.Unlock() 530 | goto nextPiece 531 | } 532 | reader.position = readStartInFile 533 | } 534 | 535 | bytesToRead := readLength 536 | for bytesToRead > 0 { 537 | readSize := int64(len(buf)) 538 | if bytesToRead < readSize { 539 | readSize = bytesToRead 540 | } 541 | n, err := reader.file.Read(buf[:readSize]) 542 | if err != nil && err != io.EOF { 543 | atomic.AddUint64(&v.badPieces, 1) 544 | v.mutex.Lock() 545 | v.badPieceIndices = append(v.badPieceIndices, pieceIndex) 546 | v.mutex.Unlock() 547 | goto nextPiece 548 | } 549 | if n == 0 && err == io.EOF { 550 | break 551 | } 552 | hasher.Write(buf[:n]) 553 | bytesHashedThisPiece += int64(n) 554 | reader.position += int64(n) 555 | bytesToRead -= int64(n) 556 | atomic.AddInt64(&v.bytesVerified, int64(n)) 557 | } 558 | pieceOffset += readLength 559 | } 560 | 561 | expectedHash = v.torrentInfo.Pieces[pieceIndex*20 : (pieceIndex+1)*20] 562 | actualHash = hasher.Sum(nil) 563 | 564 | if bytes.Equal(actualHash, expectedHash) { 565 | atomic.AddUint64(&v.goodPieces, 1) 566 | } else { 567 | atomic.AddUint64(&v.badPieces, 1) 568 | v.mutex.Lock() 569 | v.badPieceIndices = append(v.badPieceIndices, pieceIndex) 570 | v.mutex.Unlock() 571 | } 572 | 573 | nextPiece: 574 | atomic.AddUint64(completedPieces, 1) 575 | } 576 | 577 | return nil 578 | } 579 | -------------------------------------------------------------------------------- /internal/trackers/trackers.go: -------------------------------------------------------------------------------- 1 | package trackers 2 | 3 | import "strings" 4 | 5 | // TrackerConfig holds tracker-specific configuration 6 | type TrackerConfig struct { 7 | DefaultSource string // default source to use for this tracker 8 | URLs []string // list of tracker URLs that share this config 9 | PieceSizeRanges []PieceSizeRange // custom piece size ranges for specific content sizes 10 | MaxPieceLength uint // maximum piece length exponent (2^n) 11 | MaxTorrentSize uint64 // maximum .torrent file size in bytes (0 means no limit) 12 | UseDefaultRanges bool // whether to use default piece size ranges when content size is outside custom ranges 13 | } 14 | 15 | // PieceSizeRange defines a range of content sizes and their corresponding piece size exponent 16 | type PieceSizeRange struct { 17 | MaxSize uint64 // maximum content size in bytes for this range 18 | PieceExp uint // piece size exponent (2^n) 19 | } 20 | 21 | // trackerConfigs maps known tracker base URLs to their configurations 22 | var trackerConfigs = []TrackerConfig{ 23 | { 24 | URLs: []string{ 25 | "anthelion.me", 26 | }, 27 | MaxTorrentSize: 250 << 10, // 250 KiB torrent file size limit 28 | }, 29 | { 30 | URLs: []string{ 31 | "hdbits.org", 32 | "beyond-hd.me", 33 | "superbits.org", 34 | "sptracker.cc", 35 | }, 36 | MaxPieceLength: 24, // max 16 MiB pieces (2^24) 37 | UseDefaultRanges: true, 38 | }, 39 | { 40 | URLs: []string{ 41 | "passthepopcorn.me", 42 | }, // https://ptp/upload.php?action=piecesize 43 | MaxPieceLength: 24, // max 16 MiB pieces (2^24) 44 | PieceSizeRanges: []PieceSizeRange{ 45 | {MaxSize: 58 << 20, PieceExp: 16}, // 64 KiB for <= 58 MiB 46 | {MaxSize: 122 << 20, PieceExp: 17}, // 128 KiB for 58-122 MiB 47 | {MaxSize: 213 << 20, PieceExp: 18}, // 256 KiB for 122-213 MiB 48 | {MaxSize: 444 << 20, PieceExp: 19}, // 512 KiB for 213-444 MiB 49 | {MaxSize: 922 << 20, PieceExp: 20}, // 1 MiB for 444-922 MiB 50 | {MaxSize: 3977 << 20, PieceExp: 21}, // 2 MiB for 922 MiB-3.88 GiB 51 | {MaxSize: 6861 << 20, PieceExp: 22}, // 4 MiB for 3.88-6.70 GiB 52 | {MaxSize: 14234 << 20, PieceExp: 23}, // 8 MiB for 6.70-13.90 GiB 53 | {MaxSize: ^uint64(0), PieceExp: 24}, // 16 MiB for > 13.90 GiB 54 | }, 55 | UseDefaultRanges: false, 56 | }, 57 | { 58 | URLs: []string{ 59 | "empornium.sx", 60 | "morethantv.me", // https://mtv/forum/thread/3237?postid=74725#post74725 61 | }, 62 | MaxPieceLength: 23, // max 8 MiB pieces (2^23) 63 | UseDefaultRanges: true, 64 | }, 65 | { 66 | URLs: []string{ 67 | "gazellegames.net", 68 | }, 69 | MaxPieceLength: 26, // max 64 MiB pieces (2^26) 70 | PieceSizeRanges: []PieceSizeRange{ // https://ggn/wiki.php?action=article&id=300 71 | {MaxSize: 64 << 20, PieceExp: 15}, // 32 KiB for < 64 MB 72 | {MaxSize: 128 << 20, PieceExp: 16}, // 64 KiB for 64-128 MB 73 | {MaxSize: 256 << 20, PieceExp: 17}, // 128 KiB for 128-256 MB 74 | {MaxSize: 512 << 20, PieceExp: 18}, // 256 KiB for 256-512 MB 75 | {MaxSize: 1024 << 20, PieceExp: 19}, // 512 KiB for 512 MB-1 GB 76 | {MaxSize: 2048 << 20, PieceExp: 20}, // 1 MiB for 1-2 GB 77 | {MaxSize: 4096 << 20, PieceExp: 21}, // 2 MiB for 2-4 GB 78 | {MaxSize: 8192 << 20, PieceExp: 22}, // 4 MiB for 4-8 GB 79 | {MaxSize: 16384 << 20, PieceExp: 23}, // 8 MiB for 8-16 GB 80 | {MaxSize: 32768 << 20, PieceExp: 24}, // 16 MiB for 16-32 GB 81 | {MaxSize: 65536 << 20, PieceExp: 25}, // 32 MiB for 32-64 GB 82 | {MaxSize: ^uint64(0), PieceExp: 26}, // 64 MiB for > 64 GB 83 | }, 84 | UseDefaultRanges: false, 85 | MaxTorrentSize: 1 << 20, // 1 MB torrent file size limit 86 | DefaultSource: "GGn", 87 | }, 88 | { 89 | URLs: []string{ 90 | "tracker.alpharatio.cc", 91 | }, 92 | MaxPieceLength: 26, // max 64 MiB pieces (2^26) 93 | PieceSizeRanges: []PieceSizeRange{ 94 | {MaxSize: 64 << 20, PieceExp: 15}, // 32 KiB for < 64 MB 95 | {MaxSize: 128 << 20, PieceExp: 16}, // 64 KiB for 64-128 MB 96 | {MaxSize: 256 << 20, PieceExp: 17}, // 128 KiB for 128-256 MB 97 | {MaxSize: 512 << 20, PieceExp: 18}, // 256 KiB for 256-512 MB 98 | {MaxSize: 1024 << 20, PieceExp: 19}, // 512 KiB for 512 MB-1 GB 99 | {MaxSize: 2048 << 20, PieceExp: 20}, // 1 MiB for 1-2 GB 100 | {MaxSize: 4096 << 20, PieceExp: 21}, // 2 MiB for 2-4 GB 101 | {MaxSize: 8192 << 20, PieceExp: 22}, // 4 MiB for 4-8 GB 102 | {MaxSize: 16384 << 20, PieceExp: 23}, // 8 MiB for 8-16 GB 103 | {MaxSize: 32768 << 20, PieceExp: 24}, // 16 MiB for 16-32 GB 104 | {MaxSize: 65536 << 20, PieceExp: 25}, // 32 MiB for 32-64 GB 105 | {MaxSize: ^uint64(0), PieceExp: 26}, // 64 MiB for > 64 GB 106 | }, 107 | UseDefaultRanges: false, 108 | MaxTorrentSize: 2 << 20, // 2 MB torrent file size limit 109 | DefaultSource: "AlphaRatio", 110 | }, 111 | { 112 | URLs: []string{ 113 | "seedpool.org", 114 | }, 115 | MaxPieceLength: 27, // max 128 MiB pieces (2^27) 116 | PieceSizeRanges: []PieceSizeRange{ // Mirror default calculation logic from create.go 117 | {MaxSize: 64 << 20, PieceExp: 15}, // 32 KiB for <= 64 MB 118 | {MaxSize: 128 << 20, PieceExp: 16}, // 64 KiB for 64-128 MB 119 | {MaxSize: 256 << 20, PieceExp: 17}, // 128 KiB for 128-256 MB 120 | {MaxSize: 512 << 20, PieceExp: 18}, // 256 KiB for 256-512 MB 121 | {MaxSize: 1024 << 20, PieceExp: 19}, // 512 KiB for 512 MB-1 GB 122 | {MaxSize: 2048 << 20, PieceExp: 20}, // 1 MiB for 1-2 GB 123 | {MaxSize: 4096 << 20, PieceExp: 21}, // 2 MiB for 2-4 GB 124 | {MaxSize: 8192 << 20, PieceExp: 22}, // 4 MiB for 4-8 GB 125 | {MaxSize: 16384 << 20, PieceExp: 23}, // 8 MiB for 8-16 GB 126 | {MaxSize: 32768 << 20, PieceExp: 24}, // 16 MiB for 16-32 GB 127 | {MaxSize: 65536 << 20, PieceExp: 25}, // 32 MiB for 32-64 GB 128 | {MaxSize: 131072 << 20, PieceExp: 26}, // 64 MiB for 64-128 GB 129 | {MaxSize: ^uint64(0), PieceExp: 27}, // 128 MiB for > 128 GB 130 | }, 131 | UseDefaultRanges: false, 132 | }, 133 | { 134 | URLs: []string{ 135 | "norbits.net", 136 | }, 137 | PieceSizeRanges: []PieceSizeRange{ // https://nb/ulguide.php 138 | {MaxSize: 250 << 20, PieceExp: 18}, // 256 KiB for < 250 MB 139 | {MaxSize: 1024 << 20, PieceExp: 20}, // 1 MiB for 250-1024 MB 140 | {MaxSize: 5120 << 20, PieceExp: 21}, // 2 MiB for 1-5 GB 141 | {MaxSize: 20480 << 20, PieceExp: 22}, // 4 MiB for 5-20 GB 142 | {MaxSize: 40960 << 20, PieceExp: 23}, // 8 MiB for 20-40 GB 143 | {MaxSize: ^uint64(0), PieceExp: 24}, // 16 MiB for > 40 GB 144 | }, 145 | MaxPieceLength: 24, // max 16 MiB pieces (2^24) 146 | UseDefaultRanges: false, 147 | }, 148 | { 149 | URLs: []string{ 150 | "landof.tv", 151 | }, 152 | PieceSizeRanges: []PieceSizeRange{ // https://btn/forums.php?action=viewthread&threadid=18301 153 | {MaxSize: 32 << 20, PieceExp: 15}, // 32 KiB for <= 32 MiB 154 | {MaxSize: 62 << 20, PieceExp: 16}, // 64 KiB for 32-62 MiB 155 | {MaxSize: 125 << 20, PieceExp: 17}, // 128 KiB for 62-125 MiB 156 | {MaxSize: 250 << 20, PieceExp: 18}, // 256 KiB for 125-250 MiB 157 | {MaxSize: 500 << 20, PieceExp: 19}, // 512 KiB for 250-500 MiB 158 | {MaxSize: 1000 << 20, PieceExp: 20}, // 1 MiB for 500-1000 MiB 159 | {MaxSize: 1945 << 20, PieceExp: 21}, // 2 MiB for 1000 MiB-1.95 GiB 160 | {MaxSize: 3906 << 20, PieceExp: 22}, // 4 MiB for 1.95-3.906 GiB 161 | {MaxSize: 7810 << 20, PieceExp: 23}, // 8 MiB for 3.906-7.81 GiB 162 | {MaxSize: ^uint64(0), PieceExp: 24}, // 16 MiB for > 7.81 GiB 163 | }, 164 | MaxPieceLength: 24, // max 16 MiB pieces (2^24) 165 | UseDefaultRanges: false, 166 | }, 167 | { 168 | URLs: []string{ 169 | "torrent-syndikat.org", 170 | "tee-stube.org", 171 | }, 172 | MaxPieceLength: 24, // max 16 MiB pieces (2^24) 173 | PieceSizeRanges: []PieceSizeRange{ 174 | {MaxSize: 250 << 20, PieceExp: 20}, // 1 MiB for < 250 MB 175 | {MaxSize: 1024 << 20, PieceExp: 20}, // 1 MiB for 250 MB-1 GB 176 | {MaxSize: 5120 << 20, PieceExp: 20}, // 1 MiB for 1-5 GB 177 | {MaxSize: 20480 << 20, PieceExp: 22}, // 4 MiB for 5-20 GB 178 | {MaxSize: 51200 << 20, PieceExp: 23}, // 8 MiB for 20-50 GB 179 | {MaxSize: ^uint64(0), PieceExp: 24}, // 16 MiB for > 50 GB 180 | }, 181 | UseDefaultRanges: false, 182 | }, 183 | } 184 | 185 | // findTrackerConfig returns the config for a given tracker URL 186 | func findTrackerConfig(trackerURL string) *TrackerConfig { 187 | for i := range trackerConfigs { 188 | for _, url := range trackerConfigs[i].URLs { 189 | if strings.Contains(trackerURL, url) { 190 | return &trackerConfigs[i] 191 | } 192 | } 193 | } 194 | return nil 195 | } 196 | 197 | // GetTrackerMaxPieceLength returns the maximum piece length exponent for a tracker if known. 198 | // This is a hard limit that will not be exceeded. 199 | func GetTrackerMaxPieceLength(trackerURL string) (uint, bool) { 200 | if config := findTrackerConfig(trackerURL); config != nil { 201 | return config.MaxPieceLength, config.MaxPieceLength > 0 202 | } 203 | return 0, false 204 | } 205 | 206 | // GetTrackerPieceSizeExp returns the recommended piece size exponent for a given content size and tracker 207 | func GetTrackerPieceSizeExp(trackerURL string, contentSize uint64) (uint, bool) { 208 | if config := findTrackerConfig(trackerURL); config != nil { 209 | if len(config.PieceSizeRanges) > 0 { 210 | for _, r := range config.PieceSizeRanges { 211 | if contentSize <= r.MaxSize { 212 | return r.PieceExp, true 213 | } 214 | } 215 | // if we have ranges but didn't find a match, and UseDefaultRanges is false, 216 | // use the highest defined piece size 217 | if !config.UseDefaultRanges { 218 | return config.PieceSizeRanges[len(config.PieceSizeRanges)-1].PieceExp, true 219 | } 220 | } 221 | } 222 | return 0, false 223 | } 224 | 225 | // GetTrackerMaxTorrentSize returns the maximum allowed .torrent file size for a tracker if known 226 | func GetTrackerMaxTorrentSize(trackerURL string) (uint64, bool) { 227 | if config := findTrackerConfig(trackerURL); config != nil { 228 | return config.MaxTorrentSize, config.MaxTorrentSize > 0 229 | } 230 | return 0, false 231 | } 232 | 233 | // GetTrackerDefaultSource returns the default source for a tracker if defined 234 | func GetTrackerDefaultSource(trackerURL string) (string, bool) { 235 | if config := findTrackerConfig(trackerURL); config != nil && config.DefaultSource != "" { 236 | return config.DefaultSource, true 237 | } 238 | return "", false 239 | } 240 | -------------------------------------------------------------------------------- /internal/trackers/trackers_test.go: -------------------------------------------------------------------------------- 1 | package trackers 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func Test_GetTrackerPieceSizeExp(t *testing.T) { 8 | tests := []struct { 9 | name string 10 | trackerURL string 11 | contentSize uint64 12 | wantExp uint 13 | wantFound bool 14 | }{ 15 | { 16 | name: "ggn small file should use 32 KiB pieces", 17 | trackerURL: "https://gazellegames.net/announce?passkey=123", 18 | contentSize: 32 << 20, // 32 MB 19 | wantExp: 15, // 32 KiB pieces 20 | wantFound: true, 21 | }, 22 | { 23 | name: "ggn medium file should use 1 MiB pieces", 24 | trackerURL: "https://gazellegames.net/announce?passkey=123", 25 | contentSize: (3 << 29), // 1.5 GB (3 * 512MB) 26 | wantExp: 20, // 1 MiB pieces 27 | wantFound: true, 28 | }, 29 | { 30 | name: "ggn huge file should use 64 MiB pieces", 31 | trackerURL: "https://gazellegames.net/announce?passkey=123", 32 | contentSize: 100 << 30, // 100 GB 33 | wantExp: 26, // 64 MiB pieces 34 | wantFound: true, 35 | }, 36 | { 37 | name: "torrent-syndikat small file should use 1 MiB pieces", 38 | trackerURL: "https://ulo.torrent-syndikat.org/ts_ann.php?passkey=123", 39 | contentSize: 200 << 20, // 200 MB 40 | wantExp: 20, // 1 MiB pieces 41 | wantFound: true, 42 | }, 43 | { 44 | name: "torrent-syndikat medium file should use 1 MiB pieces", 45 | trackerURL: "https://ulo.torrent-syndikat.org/ts_ann.php?passkey=123", 46 | contentSize: 3 << 30, // 3 GB 47 | wantExp: 20, // 1 MiB pieces 48 | wantFound: true, 49 | }, 50 | { 51 | name: "torrent-syndikat large file should use 4 MiB pieces", 52 | trackerURL: "https://ulo.torrent-syndikat.org/ts_ann.php?passkey=123", 53 | contentSize: 10 << 30, // 10 GB 54 | wantExp: 22, // 4 MiB pieces 55 | wantFound: true, 56 | }, 57 | { 58 | name: "torrent-syndikat very large file should use 8 MiB pieces", 59 | trackerURL: "https://ulo.torrent-syndikat.org/ts_ann.php?passkey=123", 60 | contentSize: 40 << 30, // 40 GB 61 | wantExp: 23, // 8 MiB pieces 62 | wantFound: true, 63 | }, 64 | { 65 | name: "torrent-syndikat huge file should use 16 MiB pieces", 66 | trackerURL: "https://ulo.torrent-syndikat.org/ts_ann.php?passkey=123", 67 | contentSize: 60 << 30, // 60 GB 68 | wantExp: 24, // 16 MiB pieces 69 | wantFound: true, 70 | }, 71 | { 72 | name: "torrent-syndikat alternate domain should use correct piece size", 73 | trackerURL: "https://ulo.tee-stube.org/ts_ann.php?passkey=123", 74 | contentSize: 60 << 30, // 60 GB 75 | wantExp: 24, // 16 MiB pieces 76 | wantFound: true, 77 | }, 78 | { 79 | name: "unknown tracker should not return piece size recommendations", 80 | trackerURL: "https://unknown.tracker/announce", 81 | contentSize: 1 << 30, 82 | wantExp: 0, 83 | wantFound: false, 84 | }, 85 | } 86 | 87 | for _, tt := range tests { 88 | t.Run(tt.name, func(t *testing.T) { 89 | gotExp, gotFound := GetTrackerPieceSizeExp(tt.trackerURL, tt.contentSize) 90 | if gotFound != tt.wantFound { 91 | t.Errorf("GetTrackerPieceSizeExp() found = %v, want %v", gotFound, tt.wantFound) 92 | } 93 | if gotExp != tt.wantExp { 94 | t.Errorf("GetTrackerPieceSizeExp() exp = %v, want %v", gotExp, tt.wantExp) 95 | } 96 | }) 97 | } 98 | } 99 | 100 | func Test_GetTrackerMaxPieceLength(t *testing.T) { 101 | tests := []struct { 102 | name string 103 | trackerURL string 104 | wantExp uint 105 | wantFound bool 106 | }{ 107 | { 108 | name: "ggn should allow up to 64 MiB pieces", 109 | trackerURL: "https://gazellegames.net/announce?passkey=123", 110 | wantExp: 26, // 64 MiB pieces 111 | wantFound: true, 112 | }, 113 | { 114 | name: "ptp should allow up to 16 MiB pieces", 115 | trackerURL: "https://passthepopcorn.me/announce?passkey=123", 116 | wantExp: 24, // 16 MiB pieces 117 | wantFound: true, 118 | }, 119 | { 120 | name: "hdb should allow up to 16 MiB pieces", 121 | trackerURL: "https://hdbits.org/announce?passkey=123", 122 | wantExp: 24, // 16 MiB pieces 123 | wantFound: true, 124 | }, 125 | { 126 | name: "emp should allow up to 8 MiB pieces", 127 | trackerURL: "https://empornium.sx/announce?passkey=123", 128 | wantExp: 23, // 8 MiB pieces 129 | wantFound: true, 130 | }, 131 | { 132 | name: "mtv should allow up to 8 MiB pieces", 133 | trackerURL: "https://morethantv.me/announce?passkey=123", 134 | wantExp: 23, // 8 MiB pieces 135 | wantFound: true, 136 | }, 137 | { 138 | name: "torrent-syndikat should allow up to 16 MiB pieces", 139 | trackerURL: "https://ulo.torrent-syndikat.org/ts_ann.php?passkey=123", 140 | wantExp: 24, // 16 MiB pieces 141 | wantFound: true, 142 | }, 143 | { 144 | name: "torrent-syndikat alternate domain should allow up to 16 MiB pieces", 145 | trackerURL: "https://ulo.tee-stube.org/ts_ann.php?passkey=123", 146 | wantExp: 24, // 16 MiB pieces 147 | wantFound: true, 148 | }, 149 | { 150 | name: "unknown tracker should not return max piece length", 151 | trackerURL: "https://unknown.tracker/announce", 152 | wantExp: 0, 153 | wantFound: false, 154 | }, 155 | } 156 | 157 | for _, tt := range tests { 158 | t.Run(tt.name, func(t *testing.T) { 159 | gotExp, gotFound := GetTrackerMaxPieceLength(tt.trackerURL) 160 | if gotFound != tt.wantFound { 161 | t.Errorf("GetTrackerMaxPieceLength() found = %v, want %v", gotFound, tt.wantFound) 162 | } 163 | if gotExp != tt.wantExp { 164 | t.Errorf("GetTrackerMaxPieceLength() exp = %v, want %v", gotExp, tt.wantExp) 165 | } 166 | }) 167 | } 168 | } 169 | 170 | func Test_GetTrackerMaxTorrentSize(t *testing.T) { 171 | tests := []struct { 172 | name string 173 | trackerURL string 174 | wantSize uint64 175 | wantFound bool 176 | }{ 177 | { 178 | name: "ggn should have 1 MB torrent size limit", 179 | trackerURL: "https://gazellegames.net/announce?passkey=123", 180 | wantSize: 1 << 20, // 1 MB 181 | wantFound: true, 182 | }, 183 | { 184 | name: "anthelion should have 250 KiB torrent size limit", 185 | trackerURL: "https://anthelion.me/announce?passkey=123", 186 | wantSize: 250 << 10, // 250 KiB torrent file size limit 187 | wantFound: true, 188 | }, 189 | { 190 | name: "ptp should not have torrent size limit", 191 | trackerURL: "https://passthepopcorn.me/announce?passkey=123", 192 | wantSize: 0, 193 | wantFound: false, 194 | }, 195 | { 196 | name: "hdb should not have torrent size limit", 197 | trackerURL: "https://hdbits.org/announce?passkey=123", 198 | wantSize: 0, 199 | wantFound: false, 200 | }, 201 | { 202 | name: "unknown tracker should not have torrent size limit", 203 | trackerURL: "https://unknown.tracker/announce", 204 | wantSize: 0, 205 | wantFound: false, 206 | }, 207 | } 208 | 209 | for _, tt := range tests { 210 | t.Run(tt.name, func(t *testing.T) { 211 | gotSize, gotFound := GetTrackerMaxTorrentSize(tt.trackerURL) 212 | if gotFound != tt.wantFound { 213 | t.Errorf("GetTrackerMaxTorrentSize() found = %v, want %v", gotFound, tt.wantFound) 214 | } 215 | if gotSize != tt.wantSize { 216 | t.Errorf("GetTrackerMaxTorrentSize() size = %v, want %v", gotSize, tt.wantSize) 217 | } 218 | }) 219 | } 220 | } 221 | 222 | func Test_trackerConfigConsistency(t *testing.T) { 223 | for _, config := range trackerConfigs { 224 | // Skip empty configs 225 | if len(config.URLs) == 0 { 226 | t.Error("found tracker config with no URLs") 227 | continue 228 | } 229 | 230 | // Verify piece size ranges are in ascending order 231 | for i := 1; i < len(config.PieceSizeRanges); i++ { 232 | if config.PieceSizeRanges[i].MaxSize <= config.PieceSizeRanges[i-1].MaxSize { 233 | t.Errorf("tracker %v: piece size range %d (max size %d) is not greater than range %d (max size %d)", 234 | config.URLs, i, config.PieceSizeRanges[i].MaxSize, i-1, config.PieceSizeRanges[i-1].MaxSize) 235 | } 236 | } 237 | 238 | // Verify piece size exponents are within bounds 239 | for i, r := range config.PieceSizeRanges { 240 | if r.PieceExp > config.MaxPieceLength { 241 | t.Errorf("tracker %v: piece size range %d has exponent %d exceeding max piece length %d", 242 | config.URLs, i, r.PieceExp, config.MaxPieceLength) 243 | } 244 | } 245 | 246 | // Verify piece size ranges don't have gaps 247 | if len(config.PieceSizeRanges) > 0 { 248 | for i := 1; i < len(config.PieceSizeRanges); i++ { 249 | prev := config.PieceSizeRanges[i-1] 250 | curr := config.PieceSizeRanges[i] 251 | 252 | // skip check if current range is the "infinity" range 253 | if curr.MaxSize == ^uint64(0) { 254 | continue 255 | } 256 | 257 | // verify current range starts where previous range ends 258 | if curr.MaxSize <= prev.MaxSize { 259 | t.Errorf("tracker %v: piece size range %d (max size %d) must be greater than range %d (max size %d)", 260 | config.URLs, i, curr.MaxSize, i-1, prev.MaxSize) 261 | } 262 | } 263 | } 264 | } 265 | } 266 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/autobrr/mkbrr/cmd" 8 | ) 9 | 10 | var ( 11 | version = "dev" 12 | buildTime = "unknown" 13 | ) 14 | 15 | func main() { 16 | cmd.SetVersion(version, buildTime) 17 | if err := cmd.Execute(); err != nil { 18 | fmt.Println(err) 19 | os.Exit(1) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /schema/batch.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "title": "mkbrr Batch Configuration", 4 | "description": "Schema for mkbrr batch torrent creation configuration", 5 | "type": "object", 6 | "required": ["version", "jobs"], 7 | "properties": { 8 | "version": { 9 | "type": "integer", 10 | "enum": [1], 11 | "description": "Schema version, must be 1" 12 | }, 13 | "jobs": { 14 | "type": "array", 15 | "description": "List of torrent creation jobs", 16 | "items": { 17 | "type": "object", 18 | "required": ["output", "path"], 19 | "properties": { 20 | "output": { 21 | "type": "string", 22 | "description": "Output path for .torrent file" 23 | }, 24 | "path": { 25 | "type": "string", 26 | "description": "Path to source file/directory" 27 | }, 28 | "trackers": { 29 | "type": "array", 30 | "description": "List of tracker URLs", 31 | "items": { 32 | "type": "string", 33 | "format": "uri" 34 | } 35 | }, 36 | "webseeds": { 37 | "type": "array", 38 | "description": "List of webseed URLs", 39 | "items": { 40 | "type": "string", 41 | "format": "uri" 42 | } 43 | }, 44 | "private": { 45 | "type": "boolean", 46 | "description": "Make torrent private", 47 | "default": false 48 | }, 49 | "piece_length": { 50 | "type": "integer", 51 | "description": "Piece length exponent (2^n bytes)", 52 | "minimum": 14, 53 | "maximum": 24 54 | }, 55 | "comment": { 56 | "type": "string", 57 | "description": "Torrent comment" 58 | }, 59 | "source": { 60 | "type": "string", 61 | "description": "Source tag" 62 | }, 63 | "no_date": { 64 | "type": "boolean", 65 | "description": "Don't write creation date", 66 | "default": false 67 | }, 68 | "exclude_patterns": { 69 | "type": "array", 70 | "description": "List of glob patterns to exclude files (e.g., \"*.nfo\", \"*sample*\")", 71 | "items": { 72 | "type": "string" 73 | } 74 | }, 75 | "include_patterns": { 76 | "type": "array", 77 | "description": "List of glob patterns to include files (e.g., \"*.mkv\", \"*video*\")", 78 | "items": { 79 | "type": "string" 80 | } 81 | } 82 | } 83 | } 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /schema/presets.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "title": "Presets Configuration", 4 | "description": "Configuration file for torrent creation presets", 5 | "type": "object", 6 | "required": ["version", "presets"], 7 | "properties": { 8 | "version": { 9 | "type": "integer", 10 | "description": "Schema version", 11 | "enum": [1] 12 | }, 13 | "default": { 14 | "type": "object", 15 | "description": "Default settings that apply to all presets unless overridden", 16 | "properties": { 17 | "trackers": { 18 | "type": "array", 19 | "description": "List of tracker URLs", 20 | "items": { 21 | "type": "string", 22 | "format": "uri" 23 | } 24 | }, 25 | "webseeds": { 26 | "type": "array", 27 | "description": "List of webseed URLs", 28 | "items": { 29 | "type": "string", 30 | "format": "uri" 31 | } 32 | }, 33 | "private": { 34 | "type": "boolean", 35 | "description": "Whether the torrent is private" 36 | }, 37 | "piece_length": { 38 | "type": "integer", 39 | "description": "Piece length as 2^n bytes (16-27)", 40 | "minimum": 16, 41 | "maximum": 27 42 | }, 43 | "comment": { 44 | "type": "string", 45 | "description": "Torrent comment" 46 | }, 47 | "source": { 48 | "type": "string", 49 | "description": "Source tag" 50 | }, 51 | "no_date": { 52 | "type": "boolean", 53 | "description": "Don't write creation date" 54 | }, 55 | "exclude_patterns": { 56 | "type": "array", 57 | "description": "List of glob patterns to exclude files (e.g., \"*.nfo\", \"*sample*\")", 58 | "items": { 59 | "type": "string" 60 | } 61 | }, 62 | "include_patterns": { 63 | "type": "array", 64 | "description": "List of glob patterns to include files (e.g., \"*.mkv\", \"*video*\")", 65 | "items": { 66 | "type": "string" 67 | } 68 | } 69 | } 70 | }, 71 | "presets": { 72 | "type": "object", 73 | "description": "Map of preset names to their configurations", 74 | "additionalProperties": { 75 | "type": "object", 76 | "description": "Preset configuration", 77 | "properties": { 78 | "trackers": { 79 | "type": "array", 80 | "description": "List of tracker URLs", 81 | "items": { 82 | "type": "string", 83 | "format": "uri" 84 | } 85 | }, 86 | "webseeds": { 87 | "type": "array", 88 | "description": "List of webseed URLs", 89 | "items": { 90 | "type": "string", 91 | "format": "uri" 92 | } 93 | }, 94 | "private": { 95 | "type": "boolean", 96 | "description": "Whether the torrent is private" 97 | }, 98 | "piece_length": { 99 | "type": "integer", 100 | "description": "Piece length as 2^n bytes (16-27)", 101 | "minimum": 16, 102 | "maximum": 27 103 | }, 104 | "comment": { 105 | "type": "string", 106 | "description": "Torrent comment" 107 | }, 108 | "source": { 109 | "type": "string", 110 | "description": "Source tag" 111 | }, 112 | "no_date": { 113 | "type": "boolean", 114 | "description": "Don't write creation date" 115 | }, 116 | "max_piece_length": { 117 | "type": "integer", 118 | "description": "Maximum piece length as 2^n bytes (16-27)", 119 | "minimum": 16, 120 | "maximum": 27 121 | }, 122 | "exclude_patterns": { 123 | "type": "array", 124 | "description": "List of glob patterns to exclude files (e.g., \"*.nfo\", \"*sample*\")", 125 | "items": { 126 | "type": "string" 127 | } 128 | }, 129 | "include_patterns": { 130 | "type": "array", 131 | "description": "List of glob patterns to include files (e.g., \"*.mkv\", \"*video*\")", 132 | "items": { 133 | "type": "string" 134 | } 135 | } 136 | } 137 | } 138 | } 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /utils/brr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to benchmark mkbrr with different number of workers 4 | 5 | if [ $# -eq 0 ]; then 6 | echo "Error: No file path provided" 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | FILE_PATH="$1" 12 | 13 | if [ ! -d "$FILE_PATH" ]; then 14 | echo "Error: Directory '$FILE_PATH' does not exist" 15 | exit 1 16 | fi 17 | 18 | WORKER_COUNTS=(0 4 8 16 32 64) # 0 means auto 19 | 20 | HYPERFINE_CMD="hyperfine --warmup 1 --runs 10" 21 | HYPERFINE_CMD+=" --setup 'sudo sync && sudo sh -c \"echo 3 > /proc/sys/vm/drop_caches\"'" 22 | HYPERFINE_CMD+=" --prepare 'sudo sync && sudo sh -c \"echo 3 > /proc/sys/vm/drop_caches\"'" 23 | 24 | for WORKERS in "${WORKER_COUNTS[@]}"; do 25 | HYPERFINE_CMD+=" 'mkbrr create \"$FILE_PATH\" --workers $WORKERS'" 26 | done 27 | 28 | eval "$HYPERFINE_CMD" 29 | 30 | echo "Benchmarking complete." --------------------------------------------------------------------------------