├── .Dockerignore ├── .editorconfig ├── .github └── workflows │ └── test.yaml ├── .gitignore ├── .golangci.yml ├── LICENSE ├── Makefile ├── client ├── client.go ├── client_test.go ├── metrics.go ├── openfile_darwin.go ├── openfile_linux.go └── testdata │ ├── dummy-stream │ ├── master.m3u8 │ ├── stream_0.m3u8 │ ├── stream_1.m3u8 │ ├── stream_2.m3u8 │ └── stream_3.m3u8 │ └── known-stream │ ├── master.m3u8 │ ├── v0.m3u8 │ ├── v1.m3u8 │ ├── v2.m3u8 │ └── v3.m3u8 ├── conductor.ex.yml ├── docker-compose.yml ├── docker ├── Dockerfile-conductor ├── Dockerfile-cworker ├── Dockerfile-ffmpeg ├── Dockerfile-ffprobe └── Dockerfile-tccli ├── encoder ├── encoder.go ├── encoder_test.go ├── logger.go ├── pool.go ├── pool_test.go └── spritegen.go ├── go.mod ├── go.sum ├── internal ├── config │ └── config.go ├── metrics │ └── metrics.go ├── testservices │ └── testservices.go └── version │ └── version.go ├── ladder ├── arguments.go ├── const.go ├── defaults.go ├── defaults.yml ├── ladder.go ├── ladder_test.go ├── logger.go └── metadata.go ├── library ├── db │ ├── db.go │ ├── migrations.go │ ├── migrations │ │ ├── 0001_initial.sql │ │ ├── 0002_add_manifest.sql │ │ ├── 0003_default_accessed_at.sql │ │ └── 0004_add_released_at.sql │ ├── models.go │ ├── queries.sql │ └── queries.sql.go ├── library.go ├── library_test.go ├── logger.go ├── maintenance.go ├── maintenance_test.go ├── metrics.go ├── stream.go ├── stream_test.go ├── testdata │ └── dummy-stream │ │ ├── .manifest │ │ ├── master.m3u8 │ │ ├── stream_0.m3u8 │ │ ├── stream_1.m3u8 │ │ ├── stream_2.m3u8 │ │ └── stream_3.m3u8 ├── testing.go ├── validator.go └── walker.go ├── manager ├── channels.go ├── http.go ├── http_test.go ├── logger.go ├── manager.go ├── manager_test.go ├── metrics.go ├── pool.go └── pool_test.go ├── openapi.yaml ├── pkg ├── conductor │ ├── cmd │ │ └── main.go │ ├── conductor.go │ ├── metrics │ │ └── metrics.go │ └── tasks │ │ ├── messages.go │ │ └── tasks.go ├── dispatcher │ ├── dispatcher.go │ ├── dispatcher_test.go │ ├── logger.go │ ├── metrics.go │ ├── wait.go │ └── wait_test.go ├── logging │ ├── logging.go │ └── zapadapter │ │ ├── adapter_zap.go │ │ └── test_adapter_zap.go ├── mfr │ ├── logger.go │ ├── mfr.go │ └── mfr_test.go ├── migrator │ ├── db.go │ ├── logger.go │ ├── migrator.go │ └── testing.go ├── resolve │ ├── errors.go │ ├── logger.go │ ├── resolve.go │ └── resolve_test.go ├── retriever │ ├── retriever.go │ └── retriever_test.go └── timer │ └── timer.go ├── readme.md ├── sqlc.yaml ├── storage ├── local.go ├── logger.go ├── s3.go ├── s3_test.go ├── storage.go └── testdata │ └── dummy-stream │ ├── .manifest │ ├── master.m3u8 │ ├── stream_0.m3u8 │ ├── stream_1.m3u8 │ ├── stream_2.m3u8 │ └── stream_3.m3u8 ├── tccli └── main.go ├── tower.ex.yml └── worker.ex.yml /.Dockerignore: -------------------------------------------------------------------------------- 1 | ** 2 | !dist 3 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | insert_final_newline = true 9 | trim_trailing_whitespace = true 10 | end_of_line = lf 11 | charset = utf-8 12 | 13 | # Use 2 spaces for the HTML files 14 | [*.html] 15 | indent_size = 2 16 | 17 | # The JSON files contain newlines inconsistently 18 | [*.json] 19 | indent_size = 2 20 | insert_final_newline = ignore 21 | 22 | [*.yml] 23 | indent_size = 2 24 | 25 | [*.yaml] 26 | indent_size = 2 27 | 28 | [**/admin/js/vendor/**] 29 | indent_style = ignore 30 | indent_size = ignore 31 | 32 | # Minified JavaScript files shouldn't be changed 33 | [**.min.js] 34 | indent_style = ignore 35 | insert_final_newline = ignore 36 | 37 | # Makefiles always use tabs for indentation 38 | [Makefile] 39 | indent_style = tab 40 | 41 | [docs/**.txt] 42 | max_line_length = 79 43 | 44 | # Go lang uses tabs by default 45 | [*.go] 46 | indent_style = tab 47 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - '**' 10 | 11 | jobs: 12 | test: 13 | name: test 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | with: 18 | fetch-depth: 0 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version: "1.24" 22 | - uses: FedericoCarboni/setup-ffmpeg@v3 23 | id: setup-ffmpeg 24 | with: 25 | ffmpeg-version: release 26 | - run: | 27 | make test_prepare 28 | - name: Run tests 29 | run: go test -covermode=count -coverprofile=coverage.out ./... 30 | lint: 31 | name: lint 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | - uses: actions/setup-go@v5 36 | with: 37 | go-version: "1.24" 38 | cache: false 39 | - name: golangci-lint 40 | uses: golangci/golangci-lint-action@v7 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/dist/ 2 | coverage.* 3 | *.sqlite* 4 | ./transcoder 5 | data/ 6 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | linters: 3 | default: none 4 | enable: 5 | - dupl 6 | - goconst 7 | - gocritic 8 | - gocyclo 9 | - gosec 10 | - govet 11 | - ineffassign 12 | - misspell 13 | - sqlclosecheck 14 | - staticcheck 15 | - unused 16 | settings: 17 | errcheck: 18 | check-type-assertions: true 19 | goconst: 20 | min-len: 2 21 | min-occurrences: 3 22 | exclusions: 23 | generated: lax 24 | presets: 25 | - comments 26 | - common-false-positives 27 | - legacy 28 | - std-error-handling 29 | paths: 30 | - third_party$ 31 | - builtin$ 32 | - examples$ 33 | formatters: 34 | enable: 35 | - gci 36 | - gofmt 37 | - goimports 38 | settings: 39 | gci: 40 | sections: 41 | - standard 42 | - prefix(github.com/OdyseeTeam) 43 | - prefix(github.com/lbryio) 44 | - default 45 | custom-order: true 46 | no-lex-order: true 47 | exclusions: 48 | generated: lax 49 | paths: 50 | - third_party$ 51 | - builtin$ 52 | - examples$ 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 LBRY Inc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 6 | "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the 8 | following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 11 | 12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 13 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 14 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | LOCAL_ARCH ?= $(shell uname) 2 | VERSION ?= $(shell git describe --tags --match 'v*'|sed -e 's/v//') 3 | TRANSCODER_VERSION ?= $(shell git describe --tags --match 'transcoder-v*'|sed 's/transcoder-v\([0-9.]*\).*/\1/') 4 | BUILD_DIR ?= dist 5 | GOOS ?= linux 6 | GOARCH ?= amd64 7 | GO_BUILD ?= go build 8 | 9 | transcoder: 10 | GOARCH=$(GOARCH) GOOS=$(GOOS) CGO_ENABLED=0 \ 11 | $(GO_BUILD) -o $(BUILD_DIR)/$(GOOS)_$(GOARCH)/transcoder \ 12 | -ldflags "-s -w -X github.com/OdyseeTeam/transcoder/internal/version.Version=$(TRANSCODER_VERSION)" \ 13 | ./pkg/conductor/cmd/ 14 | 15 | conductor_image: $(BUILD_DIR)/$(GOOS)_$(GOARCH)/transcoder 16 | docker buildx build -f docker/Dockerfile-conductor -t odyseeteam/transcoder-conductor:$(TRANSCODER_VERSION) --platform linux/amd64 . 17 | docker tag odyseeteam/transcoder-conductor:$(TRANSCODER_VERSION) odyseeteam/transcoder-conductor:latest 18 | 19 | cworker_image: $(BUILD_DIR)/$(GOOS)_$(GOARCH)/transcoder 20 | docker buildx build -f docker/Dockerfile-cworker -t odyseeteam/transcoder-cworker:$(TRANSCODER_VERSION) --platform linux/amd64 . 21 | docker tag odyseeteam/transcoder-cworker:$(TRANSCODER_VERSION) odyseeteam/transcoder-cworker:latest 22 | 23 | ffmpeg_image: 24 | docker buildx build -f docker/Dockerfile-ffmpeg -t odyseeteam/transcoder-ffmpeg:git --platform linux/amd64 . 25 | 26 | tccli_image: 27 | docker buildx build -f docker/Dockerfile-tccli -t odyseeteam/transcoder-tccli:latest --platform linux/amd64 . 28 | 29 | test_clean: 30 | docker-compose down 31 | docker volume rm -f transcoder_minio-data 32 | docker volume rm -f transcoder_redis-data 33 | docker volume rm -f transcoder_db-data 34 | 35 | test_prepare: 36 | make transcoder 37 | make conductor_image 38 | make cworker_image 39 | docker compose -p transcoder up -d minio db redis 40 | docker compose -p transcoder up -d cworker conductor 41 | docker compose -p transcoder up minio-prepare 42 | 43 | test: test_prepare 44 | go test -covermode=count -coverprofile=coverage.out ./... 45 | 46 | towerz: 47 | docker run --rm -v "$(PWD)":/usr/src/transcoder -w /usr/src/transcoder --platform linux/amd64 golang:1.16.10 make tower 48 | 49 | .PHONY: tccli 50 | tccli: 51 | GOARCH=$(GOARCH) GOOS=$(GOOS) CGO_ENABLED=0 \ 52 | $(GO_BUILD) -o $(BUILD_DIR)/$(GOOS)_$(GOARCH)/tccli \ 53 | -ldflags "-s -w -X github.com/OdyseeTeam/transcoder/internal/version.Version=$(TRANSCODER_VERSION)" \ 54 | ./tccli/ 55 | 56 | tccli_mac: 57 | CGO_ENABLED=0 go build -o dist/arm64_darwin/tccli ./tccli 58 | 59 | clean: 60 | rm -rf $(BUILD_DIR)/* 61 | -------------------------------------------------------------------------------- /client/metrics.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | ) 8 | 9 | const ( 10 | failureForbidden = "forbidden" 11 | failureNotFound = "not_found" 12 | failureTransport = "transport_error" 13 | failureServerError = "server_error" 14 | 15 | resultUnderway = "underway" 16 | resultFound = "found" 17 | 18 | fetchSourceRemote = "remote" 19 | fetchSourceLocal = "local" 20 | ) 21 | 22 | var ( 23 | once = sync.Once{} 24 | 25 | TranscodedCacheSizeBytes = prometheus.NewGauge(prometheus.GaugeOpts{ 26 | Name: "transcoded_cache_size_bytes", 27 | }) 28 | TranscodedCacheItemsCount = prometheus.NewGauge(prometheus.GaugeOpts{ 29 | Name: "transcoded_cache_items_count", 30 | }) 31 | TranscodedResult = prometheus.NewCounterVec(prometheus.CounterOpts{ 32 | Name: "transcoded_request_result", 33 | }, []string{"type"}) 34 | 35 | TranscodedCacheQueryCount = prometheus.NewCounter(prometheus.CounterOpts{ 36 | Name: "transcoded_cache_query_count", 37 | }) 38 | TranscodedCacheMiss = prometheus.NewCounter(prometheus.CounterOpts{ 39 | Name: "transcoded_cache_miss", 40 | }) 41 | TranscodedCacheRetry = prometheus.NewCounter(prometheus.CounterOpts{ 42 | Name: "transcoded_cache_retry", 43 | }) 44 | 45 | FetchSizeBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ 46 | Name: "fetch_size_bytes", 47 | }, []string{"source"}) 48 | FetchDurationSeconds = prometheus.NewCounterVec(prometheus.CounterOpts{ 49 | Name: "fetch_duration_seconds", 50 | }, []string{"source"}) 51 | FetchCount = prometheus.NewCounterVec(prometheus.CounterOpts{ 52 | Name: "fetch_count", 53 | }, []string{"source"}) 54 | FetchFailureCount = prometheus.NewCounterVec(prometheus.CounterOpts{ 55 | Name: "fetch_failure_count", 56 | }, []string{"source", "type"}) 57 | ) 58 | 59 | func RegisterMetrics() { 60 | once.Do(func() { 61 | prometheus.MustRegister( 62 | TranscodedCacheSizeBytes, TranscodedCacheItemsCount, TranscodedResult, 63 | TranscodedCacheQueryCount, TranscodedCacheMiss, TranscodedCacheRetry, 64 | FetchSizeBytes, FetchDurationSeconds, 65 | FetchCount, FetchFailureCount, 66 | ) 67 | }) 68 | } 69 | -------------------------------------------------------------------------------- /client/openfile_darwin.go: -------------------------------------------------------------------------------- 1 | //go:build darwin 2 | // +build darwin 3 | 4 | package client 5 | 6 | import ( 7 | "io" 8 | "os" 9 | ) 10 | 11 | func directCopy(dst string, from io.Reader) (int64, error) { 12 | f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) 13 | if err != nil { 14 | return 0, err 15 | } 16 | 17 | defer f.Close() 18 | return io.Copy(f, from) 19 | } 20 | -------------------------------------------------------------------------------- /client/openfile_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package client 5 | 6 | import ( 7 | "io" 8 | "os" 9 | "syscall" 10 | 11 | "github.com/brk0v/directio" 12 | ) 13 | 14 | func directCopy(dst string, from io.Reader) (int64, error) { 15 | f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC|syscall.O_DIRECT, 0666) 16 | if err != nil { 17 | return 0, err 18 | } 19 | 20 | df, err := directio.New(f) 21 | if err != nil { 22 | return 0, err 23 | } 24 | n, err := io.Copy(df, from) 25 | df.Flush() 26 | f.Close() 27 | 28 | return n, err 29 | } 30 | -------------------------------------------------------------------------------- /client/testdata/dummy-stream/master.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-STREAM-INF:BANDWIDTH=3990800,RESOLUTION=1920x1080,CODECS="avc1.42c02a,mp4a.40.2" 4 | stream_0.m3u8 5 | 6 | #EXT-X-STREAM-INF:BANDWIDTH=2560800,RESOLUTION=1280x720,CODECS="avc1.42c020,mp4a.40.2" 7 | stream_1.m3u8 8 | 9 | #EXT-X-STREAM-INF:BANDWIDTH=2010800,RESOLUTION=854x480,CODECS="avc1.42c01f,mp4a.40.2" 10 | stream_2.m3u8 11 | 12 | #EXT-X-STREAM-INF:BANDWIDTH=1020800,RESOLUTION=640x360,CODECS="avc1.42c01f,mp4a.40.2" 13 | stream_3.m3u8 14 | -------------------------------------------------------------------------------- /client/testdata/dummy-stream/stream_0.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s0_000000.ts 9 | #EXTINF:10.010000, 10 | s0_000001.ts 11 | #EXTINF:10.010000, 12 | s0_000002.ts 13 | #EXTINF:10.010000, 14 | s0_000003.ts 15 | #EXTINF:10.010000, 16 | s0_000004.ts 17 | #EXTINF:10.010000, 18 | s0_000005.ts 19 | #EXTINF:10.010000, 20 | s0_000006.ts 21 | #EXTINF:10.010000, 22 | s0_000007.ts 23 | #EXTINF:10.010000, 24 | s0_000008.ts 25 | #EXTINF:10.010000, 26 | s0_000009.ts 27 | #EXTINF:10.010000, 28 | s0_000010.ts 29 | #EXTINF:10.010000, 30 | s0_000011.ts 31 | #EXTINF:10.010000, 32 | s0_000012.ts 33 | #EXTINF:10.010000, 34 | s0_000013.ts 35 | #EXTINF:10.010000, 36 | s0_000014.ts 37 | #EXTINF:10.010000, 38 | s0_000015.ts 39 | #EXTINF:10.010000, 40 | s0_000016.ts 41 | #EXTINF:10.010000, 42 | s0_000017.ts 43 | #EXTINF:10.010000, 44 | s0_000018.ts 45 | #EXTINF:10.010000, 46 | s0_000019.ts 47 | #EXTINF:10.010000, 48 | s0_000020.ts 49 | #EXTINF:10.010000, 50 | s0_000021.ts 51 | #EXTINF:10.010000, 52 | s0_000022.ts 53 | #EXTINF:10.010000, 54 | s0_000023.ts 55 | #EXTINF:10.010000, 56 | s0_000024.ts 57 | #EXTINF:10.010000, 58 | s0_000025.ts 59 | #EXTINF:10.010000, 60 | s0_000026.ts 61 | #EXTINF:10.010000, 62 | s0_000027.ts 63 | #EXTINF:10.010000, 64 | s0_000028.ts 65 | #EXTINF:10.010000, 66 | s0_000029.ts 67 | #EXTINF:10.010000, 68 | s0_000030.ts 69 | #EXTINF:10.010000, 70 | s0_000031.ts 71 | #EXTINF:10.010000, 72 | s0_000032.ts 73 | #EXTINF:10.010000, 74 | s0_000033.ts 75 | #EXTINF:10.010000, 76 | s0_000034.ts 77 | #EXTINF:10.010000, 78 | s0_000035.ts 79 | #EXTINF:10.010000, 80 | s0_000036.ts 81 | #EXTINF:10.010000, 82 | s0_000037.ts 83 | #EXTINF:10.010000, 84 | s0_000038.ts 85 | #EXTINF:10.010000, 86 | s0_000039.ts 87 | #EXTINF:10.010000, 88 | s0_000040.ts 89 | #EXTINF:10.010000, 90 | s0_000041.ts 91 | #EXTINF:10.010000, 92 | s0_000042.ts 93 | #EXTINF:10.010000, 94 | s0_000043.ts 95 | #EXTINF:10.010000, 96 | s0_000044.ts 97 | #EXTINF:10.010000, 98 | s0_000045.ts 99 | #EXTINF:10.010000, 100 | s0_000046.ts 101 | #EXTINF:10.010000, 102 | s0_000047.ts 103 | #EXTINF:10.010000, 104 | s0_000048.ts 105 | #EXTINF:10.010000, 106 | s0_000049.ts 107 | #EXTINF:10.010000, 108 | s0_000050.ts 109 | #EXTINF:10.010000, 110 | s0_000051.ts 111 | #EXTINF:10.010000, 112 | s0_000052.ts 113 | #EXTINF:10.010000, 114 | s0_000053.ts 115 | #EXTINF:10.010000, 116 | s0_000054.ts 117 | #EXTINF:10.010000, 118 | s0_000055.ts 119 | #EXTINF:10.010000, 120 | s0_000056.ts 121 | #EXTINF:10.010000, 122 | s0_000057.ts 123 | #EXTINF:10.010000, 124 | s0_000058.ts 125 | #EXTINF:10.010000, 126 | s0_000059.ts 127 | #EXTINF:10.010000, 128 | s0_000060.ts 129 | #EXTINF:10.010000, 130 | s0_000061.ts 131 | #EXTINF:10.010000, 132 | s0_000062.ts 133 | #EXTINF:10.010000, 134 | s0_000063.ts 135 | #EXTINF:10.010000, 136 | s0_000064.ts 137 | #EXTINF:10.010000, 138 | s0_000065.ts 139 | #EXTINF:10.010000, 140 | s0_000066.ts 141 | #EXTINF:10.010000, 142 | s0_000067.ts 143 | #EXTINF:10.010000, 144 | s0_000068.ts 145 | #EXTINF:10.010000, 146 | s0_000069.ts 147 | #EXTINF:10.010000, 148 | s0_000070.ts 149 | #EXTINF:10.010000, 150 | s0_000071.ts 151 | #EXTINF:10.010000, 152 | s0_000072.ts 153 | #EXTINF:10.010000, 154 | s0_000073.ts 155 | #EXTINF:10.010000, 156 | s0_000074.ts 157 | #EXTINF:10.010000, 158 | s0_000075.ts 159 | #EXTINF:10.010000, 160 | s0_000076.ts 161 | #EXTINF:9.479289, 162 | s0_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /client/testdata/dummy-stream/stream_1.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s1_000000.ts 9 | #EXTINF:10.010000, 10 | s1_000001.ts 11 | #EXTINF:10.010000, 12 | s1_000002.ts 13 | #EXTINF:10.010000, 14 | s1_000003.ts 15 | #EXTINF:10.010000, 16 | s1_000004.ts 17 | #EXTINF:10.010000, 18 | s1_000005.ts 19 | #EXTINF:10.010000, 20 | s1_000006.ts 21 | #EXTINF:10.010000, 22 | s1_000007.ts 23 | #EXTINF:10.010000, 24 | s1_000008.ts 25 | #EXTINF:10.010000, 26 | s1_000009.ts 27 | #EXTINF:10.010000, 28 | s1_000010.ts 29 | #EXTINF:10.010000, 30 | s1_000011.ts 31 | #EXTINF:10.010000, 32 | s1_000012.ts 33 | #EXTINF:10.010000, 34 | s1_000013.ts 35 | #EXTINF:10.010000, 36 | s1_000014.ts 37 | #EXTINF:10.010000, 38 | s1_000015.ts 39 | #EXTINF:10.010000, 40 | s1_000016.ts 41 | #EXTINF:10.010000, 42 | s1_000017.ts 43 | #EXTINF:10.010000, 44 | s1_000018.ts 45 | #EXTINF:10.010000, 46 | s1_000019.ts 47 | #EXTINF:10.010000, 48 | s1_000020.ts 49 | #EXTINF:10.010000, 50 | s1_000021.ts 51 | #EXTINF:10.010000, 52 | s1_000022.ts 53 | #EXTINF:10.010000, 54 | s1_000023.ts 55 | #EXTINF:10.010000, 56 | s1_000024.ts 57 | #EXTINF:10.010000, 58 | s1_000025.ts 59 | #EXTINF:10.010000, 60 | s1_000026.ts 61 | #EXTINF:10.010000, 62 | s1_000027.ts 63 | #EXTINF:10.010000, 64 | s1_000028.ts 65 | #EXTINF:10.010000, 66 | s1_000029.ts 67 | #EXTINF:10.010000, 68 | s1_000030.ts 69 | #EXTINF:10.010000, 70 | s1_000031.ts 71 | #EXTINF:10.010000, 72 | s1_000032.ts 73 | #EXTINF:10.010000, 74 | s1_000033.ts 75 | #EXTINF:10.010000, 76 | s1_000034.ts 77 | #EXTINF:10.010000, 78 | s1_000035.ts 79 | #EXTINF:10.010000, 80 | s1_000036.ts 81 | #EXTINF:10.010000, 82 | s1_000037.ts 83 | #EXTINF:10.010000, 84 | s1_000038.ts 85 | #EXTINF:10.010000, 86 | s1_000039.ts 87 | #EXTINF:10.010000, 88 | s1_000040.ts 89 | #EXTINF:10.010000, 90 | s1_000041.ts 91 | #EXTINF:10.010000, 92 | s1_000042.ts 93 | #EXTINF:10.010000, 94 | s1_000043.ts 95 | #EXTINF:10.010000, 96 | s1_000044.ts 97 | #EXTINF:10.010000, 98 | s1_000045.ts 99 | #EXTINF:10.010000, 100 | s1_000046.ts 101 | #EXTINF:10.010000, 102 | s1_000047.ts 103 | #EXTINF:10.010000, 104 | s1_000048.ts 105 | #EXTINF:10.010000, 106 | s1_000049.ts 107 | #EXTINF:10.010000, 108 | s1_000050.ts 109 | #EXTINF:10.010000, 110 | s1_000051.ts 111 | #EXTINF:10.010000, 112 | s1_000052.ts 113 | #EXTINF:10.010000, 114 | s1_000053.ts 115 | #EXTINF:10.010000, 116 | s1_000054.ts 117 | #EXTINF:10.010000, 118 | s1_000055.ts 119 | #EXTINF:10.010000, 120 | s1_000056.ts 121 | #EXTINF:10.010000, 122 | s1_000057.ts 123 | #EXTINF:10.010000, 124 | s1_000058.ts 125 | #EXTINF:10.010000, 126 | s1_000059.ts 127 | #EXTINF:10.010000, 128 | s1_000060.ts 129 | #EXTINF:10.010000, 130 | s1_000061.ts 131 | #EXTINF:10.010000, 132 | s1_000062.ts 133 | #EXTINF:10.010000, 134 | s1_000063.ts 135 | #EXTINF:10.010000, 136 | s1_000064.ts 137 | #EXTINF:10.010000, 138 | s1_000065.ts 139 | #EXTINF:10.010000, 140 | s1_000066.ts 141 | #EXTINF:10.010000, 142 | s1_000067.ts 143 | #EXTINF:10.010000, 144 | s1_000068.ts 145 | #EXTINF:10.010000, 146 | s1_000069.ts 147 | #EXTINF:10.010000, 148 | s1_000070.ts 149 | #EXTINF:10.010000, 150 | s1_000071.ts 151 | #EXTINF:10.010000, 152 | s1_000072.ts 153 | #EXTINF:10.010000, 154 | s1_000073.ts 155 | #EXTINF:10.010000, 156 | s1_000074.ts 157 | #EXTINF:10.010000, 158 | s1_000075.ts 159 | #EXTINF:10.010000, 160 | s1_000076.ts 161 | #EXTINF:9.479289, 162 | s1_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /client/testdata/dummy-stream/stream_2.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s2_000000.ts 9 | #EXTINF:10.010000, 10 | s2_000001.ts 11 | #EXTINF:10.010000, 12 | s2_000002.ts 13 | #EXTINF:10.010000, 14 | s2_000003.ts 15 | #EXTINF:10.010000, 16 | s2_000004.ts 17 | #EXTINF:10.010000, 18 | s2_000005.ts 19 | #EXTINF:10.010000, 20 | s2_000006.ts 21 | #EXTINF:10.010000, 22 | s2_000007.ts 23 | #EXTINF:10.010000, 24 | s2_000008.ts 25 | #EXTINF:10.010000, 26 | s2_000009.ts 27 | #EXTINF:10.010000, 28 | s2_000010.ts 29 | #EXTINF:10.010000, 30 | s2_000011.ts 31 | #EXTINF:10.010000, 32 | s2_000012.ts 33 | #EXTINF:10.010000, 34 | s2_000013.ts 35 | #EXTINF:10.010000, 36 | s2_000014.ts 37 | #EXTINF:10.010000, 38 | s2_000015.ts 39 | #EXTINF:10.010000, 40 | s2_000016.ts 41 | #EXTINF:10.010000, 42 | s2_000017.ts 43 | #EXTINF:10.010000, 44 | s2_000018.ts 45 | #EXTINF:10.010000, 46 | s2_000019.ts 47 | #EXTINF:10.010000, 48 | s2_000020.ts 49 | #EXTINF:10.010000, 50 | s2_000021.ts 51 | #EXTINF:10.010000, 52 | s2_000022.ts 53 | #EXTINF:10.010000, 54 | s2_000023.ts 55 | #EXTINF:10.010000, 56 | s2_000024.ts 57 | #EXTINF:10.010000, 58 | s2_000025.ts 59 | #EXTINF:10.010000, 60 | s2_000026.ts 61 | #EXTINF:10.010000, 62 | s2_000027.ts 63 | #EXTINF:10.010000, 64 | s2_000028.ts 65 | #EXTINF:10.010000, 66 | s2_000029.ts 67 | #EXTINF:10.010000, 68 | s2_000030.ts 69 | #EXTINF:10.010000, 70 | s2_000031.ts 71 | #EXTINF:10.010000, 72 | s2_000032.ts 73 | #EXTINF:10.010000, 74 | s2_000033.ts 75 | #EXTINF:10.010000, 76 | s2_000034.ts 77 | #EXTINF:10.010000, 78 | s2_000035.ts 79 | #EXTINF:10.010000, 80 | s2_000036.ts 81 | #EXTINF:10.010000, 82 | s2_000037.ts 83 | #EXTINF:10.010000, 84 | s2_000038.ts 85 | #EXTINF:10.010000, 86 | s2_000039.ts 87 | #EXTINF:10.010000, 88 | s2_000040.ts 89 | #EXTINF:10.010000, 90 | s2_000041.ts 91 | #EXTINF:10.010000, 92 | s2_000042.ts 93 | #EXTINF:10.010000, 94 | s2_000043.ts 95 | #EXTINF:10.010000, 96 | s2_000044.ts 97 | #EXTINF:10.010000, 98 | s2_000045.ts 99 | #EXTINF:10.010000, 100 | s2_000046.ts 101 | #EXTINF:10.010000, 102 | s2_000047.ts 103 | #EXTINF:10.010000, 104 | s2_000048.ts 105 | #EXTINF:10.010000, 106 | s2_000049.ts 107 | #EXTINF:10.010000, 108 | s2_000050.ts 109 | #EXTINF:10.010000, 110 | s2_000051.ts 111 | #EXTINF:10.010000, 112 | s2_000052.ts 113 | #EXTINF:10.010000, 114 | s2_000053.ts 115 | #EXTINF:10.010000, 116 | s2_000054.ts 117 | #EXTINF:10.010000, 118 | s2_000055.ts 119 | #EXTINF:10.010000, 120 | s2_000056.ts 121 | #EXTINF:10.010000, 122 | s2_000057.ts 123 | #EXTINF:10.010000, 124 | s2_000058.ts 125 | #EXTINF:10.010000, 126 | s2_000059.ts 127 | #EXTINF:10.010000, 128 | s2_000060.ts 129 | #EXTINF:10.010000, 130 | s2_000061.ts 131 | #EXTINF:10.010000, 132 | s2_000062.ts 133 | #EXTINF:10.010000, 134 | s2_000063.ts 135 | #EXTINF:10.010000, 136 | s2_000064.ts 137 | #EXTINF:10.010000, 138 | s2_000065.ts 139 | #EXTINF:10.010000, 140 | s2_000066.ts 141 | #EXTINF:10.010000, 142 | s2_000067.ts 143 | #EXTINF:10.010000, 144 | s2_000068.ts 145 | #EXTINF:10.010000, 146 | s2_000069.ts 147 | #EXTINF:10.010000, 148 | s2_000070.ts 149 | #EXTINF:10.010000, 150 | s2_000071.ts 151 | #EXTINF:10.010000, 152 | s2_000072.ts 153 | #EXTINF:10.010000, 154 | s2_000073.ts 155 | #EXTINF:10.010000, 156 | s2_000074.ts 157 | #EXTINF:10.010000, 158 | s2_000075.ts 159 | #EXTINF:10.010000, 160 | s2_000076.ts 161 | #EXTINF:9.479289, 162 | s2_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /client/testdata/dummy-stream/stream_3.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s3_000000.ts 9 | #EXTINF:10.010000, 10 | s3_000001.ts 11 | #EXTINF:10.010000, 12 | s3_000002.ts 13 | #EXTINF:10.010000, 14 | s3_000003.ts 15 | #EXTINF:10.010000, 16 | s3_000004.ts 17 | #EXTINF:10.010000, 18 | s3_000005.ts 19 | #EXTINF:10.010000, 20 | s3_000006.ts 21 | #EXTINF:10.010000, 22 | s3_000007.ts 23 | #EXTINF:10.010000, 24 | s3_000008.ts 25 | #EXTINF:10.010000, 26 | s3_000009.ts 27 | #EXTINF:10.010000, 28 | s3_000010.ts 29 | #EXTINF:10.010000, 30 | s3_000011.ts 31 | #EXTINF:10.010000, 32 | s3_000012.ts 33 | #EXTINF:10.010000, 34 | s3_000013.ts 35 | #EXTINF:10.010000, 36 | s3_000014.ts 37 | #EXTINF:10.010000, 38 | s3_000015.ts 39 | #EXTINF:10.010000, 40 | s3_000016.ts 41 | #EXTINF:10.010000, 42 | s3_000017.ts 43 | #EXTINF:10.010000, 44 | s3_000018.ts 45 | #EXTINF:10.010000, 46 | s3_000019.ts 47 | #EXTINF:10.010000, 48 | s3_000020.ts 49 | #EXTINF:10.010000, 50 | s3_000021.ts 51 | #EXTINF:10.010000, 52 | s3_000022.ts 53 | #EXTINF:10.010000, 54 | s3_000023.ts 55 | #EXTINF:10.010000, 56 | s3_000024.ts 57 | #EXTINF:10.010000, 58 | s3_000025.ts 59 | #EXTINF:10.010000, 60 | s3_000026.ts 61 | #EXTINF:10.010000, 62 | s3_000027.ts 63 | #EXTINF:10.010000, 64 | s3_000028.ts 65 | #EXTINF:10.010000, 66 | s3_000029.ts 67 | #EXTINF:10.010000, 68 | s3_000030.ts 69 | #EXTINF:10.010000, 70 | s3_000031.ts 71 | #EXTINF:10.010000, 72 | s3_000032.ts 73 | #EXTINF:10.010000, 74 | s3_000033.ts 75 | #EXTINF:10.010000, 76 | s3_000034.ts 77 | #EXTINF:10.010000, 78 | s3_000035.ts 79 | #EXTINF:10.010000, 80 | s3_000036.ts 81 | #EXTINF:10.010000, 82 | s3_000037.ts 83 | #EXTINF:10.010000, 84 | s3_000038.ts 85 | #EXTINF:10.010000, 86 | s3_000039.ts 87 | #EXTINF:10.010000, 88 | s3_000040.ts 89 | #EXTINF:10.010000, 90 | s3_000041.ts 91 | #EXTINF:10.010000, 92 | s3_000042.ts 93 | #EXTINF:10.010000, 94 | s3_000043.ts 95 | #EXTINF:10.010000, 96 | s3_000044.ts 97 | #EXTINF:10.010000, 98 | s3_000045.ts 99 | #EXTINF:10.010000, 100 | s3_000046.ts 101 | #EXTINF:10.010000, 102 | s3_000047.ts 103 | #EXTINF:10.010000, 104 | s3_000048.ts 105 | #EXTINF:10.010000, 106 | s3_000049.ts 107 | #EXTINF:10.010000, 108 | s3_000050.ts 109 | #EXTINF:10.010000, 110 | s3_000051.ts 111 | #EXTINF:10.010000, 112 | s3_000052.ts 113 | #EXTINF:10.010000, 114 | s3_000053.ts 115 | #EXTINF:10.010000, 116 | s3_000054.ts 117 | #EXTINF:10.010000, 118 | s3_000055.ts 119 | #EXTINF:10.010000, 120 | s3_000056.ts 121 | #EXTINF:10.010000, 122 | s3_000057.ts 123 | #EXTINF:10.010000, 124 | s3_000058.ts 125 | #EXTINF:10.010000, 126 | s3_000059.ts 127 | #EXTINF:10.010000, 128 | s3_000060.ts 129 | #EXTINF:10.010000, 130 | s3_000061.ts 131 | #EXTINF:10.010000, 132 | s3_000062.ts 133 | #EXTINF:10.010000, 134 | s3_000063.ts 135 | #EXTINF:10.010000, 136 | s3_000064.ts 137 | #EXTINF:10.010000, 138 | s3_000065.ts 139 | #EXTINF:10.010000, 140 | s3_000066.ts 141 | #EXTINF:10.010000, 142 | s3_000067.ts 143 | #EXTINF:10.010000, 144 | s3_000068.ts 145 | #EXTINF:10.010000, 146 | s3_000069.ts 147 | #EXTINF:10.010000, 148 | s3_000070.ts 149 | #EXTINF:10.010000, 150 | s3_000071.ts 151 | #EXTINF:10.010000, 152 | s3_000072.ts 153 | #EXTINF:10.010000, 154 | s3_000073.ts 155 | #EXTINF:10.010000, 156 | s3_000074.ts 157 | #EXTINF:10.010000, 158 | s3_000075.ts 159 | #EXTINF:10.010000, 160 | s3_000076.ts 161 | #EXTINF:9.479289, 162 | s3_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /client/testdata/known-stream/master.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-STREAM-INF:BANDWIDTH=4026000,RESOLUTION=1920x1080,CODECS="avc1.4d4028,mp4a.40.2" 4 | v0.m3u8 5 | 6 | #EXT-X-STREAM-INF:BANDWIDTH=2890800,RESOLUTION=1280x720,CODECS="avc1.4d401f,mp4a.40.2" 7 | v1.m3u8 8 | 9 | #EXT-X-STREAM-INF:BANDWIDTH=655600,RESOLUTION=640x360,CODECS="avc1.4d401e,mp4a.40.2" 10 | v2.m3u8 11 | 12 | #EXT-X-STREAM-INF:BANDWIDTH=215600,RESOLUTION=256x144,CODECS="avc1.4d400b,mp4a.40.2" 13 | v3.m3u8 14 | 15 | -------------------------------------------------------------------------------- /client/testdata/known-stream/v0.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:6 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:5.772433, 8 | v0_s000000.ts 9 | #EXT-X-ENDLIST 10 | -------------------------------------------------------------------------------- /client/testdata/known-stream/v1.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:6 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:5.772433, 8 | v1_s000000.ts 9 | #EXT-X-ENDLIST 10 | -------------------------------------------------------------------------------- /client/testdata/known-stream/v2.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:6 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:5.772433, 8 | v2_s000000.ts 9 | #EXT-X-ENDLIST 10 | -------------------------------------------------------------------------------- /client/testdata/known-stream/v3.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:6 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:5.866667, 8 | v3_s000000.ts 9 | #EXT-X-ENDLIST 10 | -------------------------------------------------------------------------------- /conductor.ex.yml: -------------------------------------------------------------------------------- 1 | Storages: 2 | - Name: local 3 | Type: S3 4 | Endpoint: http://minio:9000 5 | Region: us-east-1 6 | Bucket: transcoded 7 | Key: ody 8 | Secret: odyseetes3 9 | MaxSize: 1TB 10 | CreateBucket: true 11 | - Name: remote 12 | Type: S3 13 | Endpoint: https://s3.wasabisys.com 14 | Region: us-east-1 15 | Bucket: production-videos 16 | Key: AKIAXXXXXXXXXXXXXXXX 17 | Secret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX 18 | MaxSize: 5TB 19 | CreateBucket: false 20 | 21 | AdaptiveQueue: 22 | MinHits: 1 23 | 24 | Library: 25 | DSN: postgres://postgres:odyseeteam@db 26 | ManagerToken: managertoken123 27 | 28 | Redis: redis://:odyredis@redis:6379/1 29 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | conductor: 3 | image: odyseeteam/transcoder-conductor:latest 4 | platform: linux/amd64 5 | container_name: conductor 6 | command: 7 | - ./transcoder 8 | - conductor 9 | - --debug 10 | volumes: 11 | - ${PWD}/conductor.ex.yml:/app/conductor.yml 12 | ports: 13 | - "8080:8080" 14 | # build: 15 | # context: . 16 | # dockerfile: docker/Dockerfile-tower 17 | depends_on: 18 | - redis 19 | - db 20 | restart: unless-stopped 21 | cworker: 22 | image: odyseeteam/transcoder-cworker:latest 23 | platform: linux/amd64 24 | container_name: cworker 25 | command: 26 | - ./transcoder 27 | - worker 28 | - --blob-server=blobcache-eu.lbry.com 29 | # build: 30 | # context: . 31 | # dockerfile: docker/Dockerfile-worker 32 | depends_on: 33 | - redis 34 | - minio 35 | volumes: 36 | - ${PWD}/worker.ex.yml:/app/worker.yml 37 | restart: unless-stopped 38 | # asynqmon: 39 | # image: hibiken/asynqmon:latest 40 | # container_name: asynqmon 41 | # environment: 42 | # - REDIS_URL=redis://:odyredis@redis:6379/1 43 | # ports: 44 | # - '9090:8080' 45 | redis: 46 | image: redis:7.0 47 | container_name: redis 48 | ports: 49 | - '6379:6379' 50 | volumes: 51 | - redis-data:/data 52 | command: > 53 | --requirepass odyredis --appendonly yes 54 | labels: 55 | com.centurylinklabs.watchtower.enable: false 56 | restart: unless-stopped 57 | db: 58 | image: postgres:14 59 | container_name: db 60 | ports: 61 | - "5432:5432" 62 | environment: 63 | POSTGRES_PASSWORD: odyseeteam 64 | volumes: 65 | - db-data:/var/lib/postgresql/data 66 | command: 67 | - -clogging_collector=on 68 | - -clog_statement=all 69 | - -clog_destination=stderr 70 | healthcheck: 71 | test: ["CMD-SHELL", "pg_isready -U postgres"] 72 | interval: 5s 73 | timeout: 5s 74 | retries: 5 75 | minio: 76 | image: minio/minio 77 | container_name: minio 78 | ports: 79 | - "9000:9000" 80 | - "38861:38861" 81 | - "41949:41949" 82 | volumes: 83 | - minio-data:/data 84 | environment: 85 | MINIO_ROOT_USER: ody 86 | MINIO_ROOT_PASSWORD: odyseetes3 87 | command: server --address 0.0.0.0:9000 /data 88 | minio-prepare: 89 | image: minio/mc 90 | volumes: 91 | - minio-data:/data 92 | entrypoint: > 93 | /bin/sh -c " 94 | sleep 3; 95 | /usr/bin/mc config host add myminio http://minio:9000 ody odyseetes3; 96 | /usr/bin/mc mb myminio/transcoded; 97 | /usr/bin/mc anonymous set download myminio/transcoded; 98 | /usr/bin/mc anonymous set public myminio/transcoded; 99 | exit 0; 100 | " 101 | depends_on: ["minio"] 102 | 103 | volumes: 104 | db-data: 105 | minio-data: 106 | redis-data: 107 | -------------------------------------------------------------------------------- /docker/Dockerfile-conductor: -------------------------------------------------------------------------------- 1 | FROM alpine:3.21 2 | EXPOSE 8080 3 | 4 | RUN apk add --no-cache libc6-compat 5 | 6 | WORKDIR /app 7 | COPY ./dist/linux_amd64/transcoder . 8 | 9 | CMD ["./transcoder", "conductor"] 10 | -------------------------------------------------------------------------------- /docker/Dockerfile-cworker: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM odyseeteam/transcoder-ffmpeg:git AS ffmpeg 4 | FROM odyseeteam/transcoder-gensprite:22.10.0 AS spritegen 5 | FROM odyseeteam/transcoder-gensprite:22.10.0 6 | 7 | EXPOSE 8080 8 | 9 | RUN apk add --no-cache libc6-compat 10 | COPY --from=ffmpeg /build/ffmpeg /build/ffprobe /usr/local/bin/ 11 | COPY --from=spritegen /usr/src/app /usr/src/spritegen 12 | 13 | WORKDIR /app 14 | 15 | COPY ./dist/linux_amd64/transcoder . 16 | 17 | CMD ["./transcoder", "worker"] 18 | -------------------------------------------------------------------------------- /docker/Dockerfile-ffmpeg: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM alpine:3.21 4 | 5 | WORKDIR /build 6 | 7 | ADD https://johnvansickle.com/ffmpeg/builds/ffmpeg-git-amd64-static.tar.xz ./ 8 | RUN tar -xf ffmpeg-git-amd64-static.tar.xz && mv ffmpeg-*-static/ffprobe ffmpeg-*-static/ffmpeg ./ 9 | 10 | RUN chmod a+x ffmpeg ffprobe 11 | -------------------------------------------------------------------------------- /docker/Dockerfile-ffprobe: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM odyseeteam/transcoder-ffmpeg:git AS ffmpeg 4 | FROM alpine:3.21 5 | 6 | COPY --from=ffmpeg /build/ffprobe /usr/local/bin/ 7 | 8 | ENTRYPOINT ["/usr/local/bin/ffprobe"] 9 | -------------------------------------------------------------------------------- /docker/Dockerfile-tccli: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # FROM odyseeteam/transcoder-gensprite:latest AS spritegen 4 | 5 | FROM alpine:3.21 AS gather 6 | 7 | WORKDIR /build 8 | 9 | ADD https://johnvansickle.com/ffmpeg/builds/ffmpeg-git-arm64-static.tar.xz ./ 10 | RUN tar -xf ffmpeg-git-arm64-static.tar.xz && mv ffmpeg-*-static/ffmpeg ffmpeg-*-static/ffprobe ./ 11 | 12 | RUN chmod a+x ffmpeg ffprobe 13 | 14 | FROM alpine:3.21 AS build 15 | 16 | EXPOSE 8080 17 | 18 | RUN apk add --no-cache libc6-compat 19 | COPY --from=gather /build/ffmpeg /build/ffprobe /usr/local/bin/ 20 | # COPY --from=spritegen /usr/src/app /usr/src/spritegen 21 | 22 | WORKDIR /app 23 | 24 | COPY ./dist/linux_amd64/tccli . 25 | COPY ./conductor.ex.yml ./conductor.yml 26 | 27 | ENTRYPOINT ["/app/tccli"] 28 | -------------------------------------------------------------------------------- /encoder/encoder_test.go: -------------------------------------------------------------------------------- 1 | package encoder 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | "path/filepath" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/OdyseeTeam/transcoder/ladder" 12 | "github.com/OdyseeTeam/transcoder/pkg/logging/zapadapter" 13 | "github.com/OdyseeTeam/transcoder/pkg/resolve" 14 | 15 | "github.com/shopspring/decimal" 16 | "github.com/stretchr/testify/assert" 17 | "github.com/stretchr/testify/require" 18 | "github.com/stretchr/testify/suite" 19 | ) 20 | 21 | var ( 22 | FPS0 = decimal.NewFromInt(0) 23 | FPS15 = decimal.NewFromInt(15) 24 | ) 25 | 26 | type encoderSuite struct { 27 | suite.Suite 28 | file *os.File 29 | in, out string 30 | } 31 | 32 | func TestEncoderSuite(t *testing.T) { 33 | suite.Run(t, new(encoderSuite)) 34 | } 35 | 36 | func (s *encoderSuite) SetupSuite() { 37 | s.out = path.Join(os.TempDir(), "encoderSuite_out") 38 | s.in = path.Join(os.TempDir(), "encoderSuite_in") 39 | 40 | url := "@specialoperationstest#3/fear-of-death-inspirational#a" 41 | c, err := resolve.ResolveStream(url) 42 | if err != nil { 43 | panic(err) 44 | } 45 | s.file, _, err = c.Download(s.in) 46 | s.file.Close() 47 | s.Require().NoError(err) 48 | } 49 | 50 | func (s *encoderSuite) TearDownSuite() { 51 | os.Remove(s.file.Name()) 52 | os.RemoveAll(s.out) 53 | } 54 | 55 | func (s *encoderSuite) TestCheckFastStart() { 56 | absPath, _ := filepath.Abs(s.file.Name()) 57 | e, err := NewEncoder(Configure().Log(zapadapter.NewKV(nil)).Ladder(ladder.Default).SpritegenPath("")) 58 | s.Require().NoError(err) 59 | m, err := e.GetMetadata(absPath) 60 | s.Require().NoError(err) 61 | s.True(m.FastStart) 62 | } 63 | 64 | func (s *encoderSuite) TestEncode() { 65 | absPath, _ := filepath.Abs(s.file.Name()) 66 | cfg := Configure().Log(zapadapter.NewKV(nil)).Ladder(ladder.Default).SpritegenPath("") 67 | e, err := NewEncoder(cfg) 68 | s.Require().NoError(err) 69 | 70 | res, err := e.Encode(absPath, s.out) 71 | s.Require().NoError(err) 72 | 73 | vs := res.OrigMeta.VideoStream 74 | s.Equal(1920, vs.GetWidth()) 75 | s.Equal(1080, vs.GetHeight()) 76 | 77 | progress := 0.0 78 | for p := range res.Progress { 79 | progress = p.GetProgress() 80 | } 81 | 82 | s.Require().GreaterOrEqual(progress, 98.5) 83 | 84 | matchTranscodedOutput(s.T(), s.out, res) 85 | } 86 | 87 | func TestTweakRealStreams(t *testing.T) { 88 | t.Skip() 89 | encoder, err := NewEncoder(Configure().Log(zapadapter.NewKV(nil)).Ladder(ladder.Default).SpritegenPath("")) 90 | require.NoError(t, err) 91 | 92 | testCases := []struct { 93 | url string 94 | expectedTiers []ladder.Tier 95 | }{ 96 | { 97 | // "hot-tub-streamers-are-furious-at#06e0bc43f55fec0bd946a3cb18fc2ff9bc1cb2aa", 98 | "hot-tub-streamers-are-furious.mp4", 99 | []ladder.Tier{ 100 | {Width: 1920, Height: 1080, VideoBitrate: 3500_000, AudioBitrate: "160k", Framerate: FPS0}, 101 | {Width: 1280, Height: 720, VideoBitrate: 2500_000, AudioBitrate: "128k", Framerate: FPS0}, 102 | {Width: 640, Height: 360, VideoBitrate: 500_000, AudioBitrate: "96k", Framerate: FPS0}, 103 | {Width: 256, Height: 144, VideoBitrate: 100_000, AudioBitrate: "96k", Framerate: FPS15}, 104 | }, 105 | }, 106 | { 107 | // "hot-tub-streamers-are-furious-at#06e0bc43f55fec0bd946a3cb18fc2ff9bc1cb2aa", 108 | "why-mountain-biking-here-will.mp4", 109 | []ladder.Tier{ 110 | {Width: 1920, Height: 1080, VideoBitrate: 3500_000, AudioBitrate: "160k", Framerate: FPS0}, 111 | {Width: 1280, Height: 720, VideoBitrate: 2500_000, AudioBitrate: "128k", Framerate: FPS0}, 112 | {Width: 640, Height: 360, VideoBitrate: 500_000, AudioBitrate: "96k", Framerate: FPS0}, 113 | {Width: 256, Height: 144, VideoBitrate: 100_000, AudioBitrate: "96k", Framerate: FPS15}, 114 | }, 115 | }, 116 | } 117 | 118 | for _, tc := range testCases { 119 | absPath, err := filepath.Abs(filepath.Join("./testdata", tc.url)) 120 | require.NoError(t, err) 121 | lmeta, err := encoder.GetMetadata(absPath) 122 | require.NoError(t, err) 123 | 124 | stream := ladder.GetVideoStream(lmeta.FMeta) 125 | testName := fmt.Sprintf( 126 | "%s (%vx%v@%vbps)", 127 | tc.url, 128 | stream.GetWidth(), 129 | stream.GetHeight(), 130 | lmeta.FMeta.GetFormat().GetBitRate(), 131 | ) 132 | t.Run(testName, func(t *testing.T) { 133 | assert.NoError(t, err) 134 | targetLadder, err := ladder.Default.Tweak(lmeta) 135 | assert.NoError(t, err) 136 | assert.NoError(t, err) 137 | require.Equal(t, len(tc.expectedTiers), len(targetLadder.Tiers), targetLadder.Tiers) 138 | for i, tier := range targetLadder.Tiers { 139 | assert.Equal(t, tc.expectedTiers[i].Width, tier.Width, tier) 140 | assert.Equal(t, tc.expectedTiers[i].Height, tier.Height, tier) 141 | assert.Equal(t, tc.expectedTiers[i].VideoBitrate, tier.VideoBitrate, tier) 142 | assert.Equal(t, tc.expectedTiers[i].AudioBitrate, tier.AudioBitrate, tier) 143 | assert.Equal(t, tc.expectedTiers[i].Framerate, tier.Framerate, tier) 144 | } 145 | }) 146 | } 147 | } 148 | 149 | func matchTranscodedOutput(t *testing.T, streamPath string, res *Result) { 150 | assert.Equal(t, 1080, res.Ladder.Tiers[0].Height) 151 | assert.Equal(t, 720, res.Ladder.Tiers[1].Height) 152 | assert.Equal(t, 360, res.Ladder.Tiers[2].Height) 153 | assert.Equal(t, 144, res.Ladder.Tiers[3].Height) 154 | 155 | outFiles := map[string]string{ 156 | "master.m3u8": ` 157 | #EXTM3U 158 | #EXT-X-VERSION:6 159 | #EXT-X-STREAM-INF:BANDWIDTH=\d+,(?:AVERAGE-BANDWIDTH=\d+,)?RESOLUTION=1920x1080,CODECS="avc1.\w+,mp4a.40.2" 160 | v0.m3u8 161 | 162 | #EXT-X-STREAM-INF:BANDWIDTH=\d+,(?:AVERAGE-BANDWIDTH=\d+,)?RESOLUTION=1280x720,CODECS="avc1.\w+,mp4a.40.2" 163 | v1.m3u8 164 | 165 | #EXT-X-STREAM-INF:BANDWIDTH=\d+,(?:AVERAGE-BANDWIDTH=\d+,)?RESOLUTION=640x360,CODECS="avc1.\w+,mp4a.40.2" 166 | v2.m3u8 167 | 168 | #EXT-X-STREAM-INF:BANDWIDTH=\d+,(AVERAGE-BANDWIDTH=\d+,)?RESOLUTION=256x144,CODECS="avc1.\w+,mp4a.40.2" 169 | v3.m3u8`, 170 | "v0.m3u8": "v0_s000000.ts", 171 | "v1.m3u8": "v1_s000000.ts", 172 | "v2.m3u8": "v2_s000000.ts", 173 | "v3.m3u8": "v3_s000000.ts", 174 | "v0_s000000.ts": "", 175 | "v1_s000000.ts": "", 176 | "v2_s000000.ts": "", 177 | "v3_s000000.ts": "", 178 | } 179 | for f, str := range outFiles { 180 | cont, err := os.ReadFile(path.Join(streamPath, f)) 181 | require.NoError(t, err) 182 | assert.Regexp(t, strings.TrimSpace(str), string(cont)) 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /encoder/logger.go: -------------------------------------------------------------------------------- 1 | package encoder 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("encoder", logging.Dev) // nolint:unused 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /encoder/pool.go: -------------------------------------------------------------------------------- 1 | package encoder 2 | 3 | import "github.com/OdyseeTeam/transcoder/pkg/dispatcher" 4 | 5 | type encodeTask [2]string 6 | 7 | type pool struct { 8 | dispatcher.Dispatcher 9 | } 10 | 11 | type worker struct { 12 | encoder Encoder 13 | } 14 | 15 | func (w worker) Work(t dispatcher.Task) error { 16 | et := t.Payload.(encodeTask) 17 | res, err := w.encoder.Encode(et[0], et[1]) 18 | t.SetResult(res) 19 | return err 20 | } 21 | 22 | // NewPool will create a pool of encoders that you can throw work at. 23 | func NewPool(encoder Encoder, parallel int) pool { 24 | d := dispatcher.Start(parallel, worker{encoder}, 0) 25 | return pool{d} 26 | } 27 | 28 | // Encode throws encoding task into a pool of workers. 29 | // It works slightly different from encoder.Encode but the result should eventually be the same. 30 | // For how to obtain encoding progress, see poolSuite.TestEncode. 31 | func (p pool) Encode(in, out string) *dispatcher.Result { 32 | return p.Dispatch(encodeTask{in, out}) 33 | } 34 | -------------------------------------------------------------------------------- /encoder/pool_test.go: -------------------------------------------------------------------------------- 1 | package encoder 2 | 3 | import ( 4 | "os" 5 | "path" 6 | "path/filepath" 7 | "testing" 8 | 9 | "github.com/OdyseeTeam/transcoder/ladder" 10 | "github.com/OdyseeTeam/transcoder/pkg/logging/zapadapter" 11 | "github.com/OdyseeTeam/transcoder/pkg/resolve" 12 | 13 | "github.com/stretchr/testify/suite" 14 | ) 15 | 16 | type poolSuite struct { 17 | suite.Suite 18 | file *os.File 19 | out string 20 | } 21 | 22 | func TestPoolSuite(t *testing.T) { 23 | suite.Run(t, new(poolSuite)) 24 | } 25 | 26 | func (s *poolSuite) SetupSuite() { 27 | s.out = path.Join(os.TempDir(), "poolSuite_out") 28 | 29 | url := "@specialoperationstest#3/fear-of-death-inspirational#a" 30 | c, err := resolve.ResolveStream(url) 31 | if err != nil { 32 | panic(err) 33 | } 34 | s.file, _, err = c.Download(path.Join(os.TempDir(), "poolSuite_in")) 35 | s.file.Close() 36 | s.Require().NoError(err) 37 | } 38 | 39 | func (s *poolSuite) TearDownSuite() { 40 | os.Remove(s.file.Name()) 41 | os.RemoveAll(s.out) 42 | } 43 | 44 | func (s *poolSuite) TestEncode() { 45 | absPath, _ := filepath.Abs(s.file.Name()) 46 | enc, err := NewEncoder(Configure().Log(zapadapter.NewKV(nil)).Ladder(ladder.Default).SpritegenPath("")) 47 | s.Require().NoError(err) 48 | p := NewPool(enc, 10) 49 | 50 | res := (<-p.Encode(absPath, s.out).Value()).(*Result) 51 | s.Require().NotNil(res, "result shouldn't be nil") 52 | vs := res.OrigMeta.VideoStream 53 | s.Equal(1920, vs.GetWidth()) 54 | s.Equal(1080, vs.GetHeight()) 55 | 56 | progress := 0.0 57 | for p := range res.Progress { 58 | progress = p.GetProgress() 59 | } 60 | 61 | s.Require().GreaterOrEqual(progress, 98.5) 62 | 63 | matchTranscodedOutput(s.T(), s.out, res) 64 | } 65 | -------------------------------------------------------------------------------- /encoder/spritegen.go: -------------------------------------------------------------------------------- 1 | package encoder 2 | 3 | import ( 4 | "fmt" 5 | "os/exec" 6 | "strings" 7 | 8 | "github.com/OdyseeTeam/transcoder/pkg/logging" 9 | ) 10 | 11 | type SpriteGenerator struct { 12 | cmdPath string 13 | args []string 14 | log logging.KVLogger 15 | } 16 | 17 | // defaultArgs contains arguments to nodejs plus script args. 18 | var defaultArgs = []string{ 19 | "/usr/src/spritegen/cli.js", 20 | "--interval", "2", 21 | "--filename", "stream", 22 | } 23 | 24 | func NewSpriteGenerator(cmdPath string, log logging.KVLogger) (*SpriteGenerator, error) { 25 | cmd := exec.Command(cmdPath, "-h") 26 | if err := cmd.Run(); err != nil { 27 | return nil, fmt.Errorf("unable to execute generator: %w", err) 28 | } 29 | return &SpriteGenerator{cmdPath, defaultArgs, log}, nil 30 | } 31 | 32 | func (g SpriteGenerator) Generate(input, output string) error { 33 | args := g.args 34 | args = append(args, "--input", input, "--outputFolder", output) 35 | g.log.Info("starting spritegen", 36 | "cmd", g.cmdPath, 37 | "args", strings.Join(args, " "), 38 | ) 39 | _, err := exec.Command(g.cmdPath, args...).Output() // #nosec G204 40 | if err != nil { 41 | return err 42 | } 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/spf13/viper" 9 | ) 10 | 11 | type Storages []S3Config 12 | 13 | type ConductorConfig struct { 14 | Storages Storages 15 | Redis string 16 | AdaptiveQueue AdaptiveQueue 17 | Library Library 18 | } 19 | 20 | type WorkerConfig struct { 21 | Storage S3Config 22 | Redis string 23 | EdgeToken string 24 | } 25 | 26 | type S3Config struct { 27 | Type string 28 | Name string 29 | Endpoint string 30 | Region string 31 | Bucket string 32 | Key string 33 | Secret string 34 | MaxSize string 35 | CreateBucket bool 36 | } 37 | 38 | type AdaptiveQueue struct { 39 | MinHits int 40 | } 41 | 42 | type Library struct { 43 | DSN string 44 | ManagerToken string 45 | } 46 | 47 | func ProjectRoot() (string, error) { 48 | ex, err := os.Executable() 49 | if err != nil { 50 | return "", err 51 | } 52 | return filepath.Dir(ex), nil 53 | } 54 | 55 | func Read(name string, cfg any) error { 56 | v := viper.New() 57 | v.SetConfigName(name) 58 | 59 | pp, err := ProjectRoot() 60 | if err != nil { 61 | return err 62 | } 63 | v.AddConfigPath(pp) 64 | v.AddConfigPath(".") 65 | 66 | if err := v.ReadInConfig(); err != nil { 67 | return fmt.Errorf("fatal error reading config file: %w", err) 68 | } 69 | 70 | if err := v.Unmarshal(cfg); err != nil { 71 | return fmt.Errorf("unable to decode into struct: %w", err) 72 | } 73 | 74 | return nil 75 | } 76 | 77 | func ReadConductorConfig() (*ConductorConfig, error) { 78 | cfg := &ConductorConfig{} 79 | return cfg, Read("conductor", cfg) 80 | } 81 | 82 | func ReadWorkerConfig() (*WorkerConfig, error) { 83 | cfg := &WorkerConfig{} 84 | return cfg, Read("worker", cfg) 85 | } 86 | 87 | func (s Storages) Endpoints() []string { 88 | endpoints := []string{} 89 | for _, v := range s { 90 | endpoints = append(endpoints, v.Endpoint) 91 | } 92 | return endpoints 93 | } 94 | -------------------------------------------------------------------------------- /internal/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | ) 8 | 9 | const ( 10 | StorageLocal = "local" 11 | StorageRemote = "remote" 12 | ) 13 | 14 | var ( 15 | once = sync.Once{} 16 | 17 | DownloadedSizeMB = prometheus.NewCounter(prometheus.CounterOpts{ 18 | Name: "downloaded_size_mb", 19 | }) 20 | S3UploadedSizeMB = prometheus.NewCounter(prometheus.CounterOpts{ 21 | Name: "s3_uploaded_size_mb", 22 | }) 23 | 24 | EncodedDurationSeconds = prometheus.NewCounter(prometheus.CounterOpts{ 25 | Name: "encoded_duration_seconds", 26 | }) 27 | EncodedBitrateMbit = prometheus.NewHistogramVec( 28 | prometheus.HistogramOpts{ 29 | Name: "encoded_bitrate_mbit", 30 | Buckets: []float64{0.5, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8, 10, 15, 20, 25, 30}, 31 | }, 32 | []string{"resolution"}, 33 | ) 34 | 35 | StreamsRequestedCount = prometheus.NewCounterVec(prometheus.CounterOpts{ 36 | Name: "streams_requested_count", 37 | }, []string{"storage"}) 38 | 39 | HTTPAPIRequests = prometheus.NewHistogramVec( 40 | prometheus.HistogramOpts{ 41 | Name: "http_api_requests", 42 | Help: "Method call latency distributions", 43 | Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.4, 1, 2, 5, 10}, 44 | }, 45 | []string{"status_code"}, 46 | ) 47 | ) 48 | 49 | func RegisterMetrics() { 50 | once.Do(func() { 51 | prometheus.MustRegister( 52 | DownloadedSizeMB, S3UploadedSizeMB, EncodedDurationSeconds, EncodedBitrateMbit, 53 | StreamsRequestedCount, HTTPAPIRequests, 54 | ) 55 | }) 56 | 57 | } 58 | -------------------------------------------------------------------------------- /internal/testservices/testservices.go: -------------------------------------------------------------------------------- 1 | package testservices 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | 8 | dockertest "github.com/ory/dockertest/v3" 9 | "github.com/ory/dockertest/v3/docker" 10 | redis "github.com/redis/go-redis/v9" 11 | ) 12 | 13 | type S3Options struct { 14 | AccessKey string 15 | SecretKey string 16 | Endpoint string 17 | } 18 | 19 | type Teardown func() error 20 | 21 | // Redis will spin up a redis container and return a connection options 22 | // plus a tear down function that needs to be called to spin the container down. 23 | func Redis() (*redis.Options, Teardown, error) { 24 | var err error 25 | pool, err := dockertest.NewPool("") 26 | if err != nil { 27 | return nil, nil, fmt.Errorf("could not connect to docker: %w", err) 28 | } 29 | 30 | resource, err := pool.Run("redis", "7", nil) 31 | if err != nil { 32 | return nil, nil, fmt.Errorf("could not start resource: %w", err) 33 | } 34 | 35 | redisOpts := &redis.Options{ 36 | Addr: fmt.Sprintf("localhost:%s", resource.GetPort("6379/tcp")), 37 | } 38 | 39 | if err = pool.Retry(func() error { 40 | db := redis.NewClient(redisOpts) 41 | err := db.Ping(context.Background()).Err() 42 | return err 43 | }); err != nil { 44 | return nil, nil, fmt.Errorf("could not connect to redis: %w", err) 45 | } 46 | 47 | return redisOpts, func() error { 48 | if err = pool.Purge(resource); err != nil { 49 | return fmt.Errorf("could not purge resource: %w", err) 50 | } 51 | return nil 52 | }, nil 53 | } 54 | 55 | // Minio will spin up a Minio container and return a connection options 56 | // plus a tear down function that needs to be called to spin the container down. 57 | func Minio() (*S3Options, Teardown, error) { 58 | pool, err := dockertest.NewPool("") 59 | if err != nil { 60 | return nil, nil, err 61 | } 62 | 63 | options := &dockertest.RunOptions{ 64 | Repository: "minio/minio", 65 | Tag: "latest", 66 | Cmd: []string{"server", "/data"}, 67 | // PortBindings: map[dc.Port][]dc.PortBinding{ 68 | // "9000/tcp": []dc.PortBinding{{HostPort: "9000"}}, 69 | // }, 70 | Env: []string{"MINIO_ACCESS_KEY=MYACCESSKEY", "MINIO_SECRET_KEY=MYSECRETKEY"}, 71 | } 72 | 73 | resource, err := pool.RunWithOptions( 74 | options, 75 | func(config *docker.HostConfig) { 76 | // set AutoRemove to true so that stopped container goes away by itself 77 | config.AutoRemove = true 78 | // config.RestartPolicy = docker.RestartPolicy{Name: "no"} 79 | }, 80 | ) 81 | if err != nil { 82 | return nil, nil, fmt.Errorf("could not start resource: %w", err) 83 | } 84 | 85 | endpoint := fmt.Sprintf("localhost:%s", resource.GetPort("9000/tcp")) 86 | // or you could use the following, because we mapped the port 9000 to the port 9000 on the host 87 | // endpoint := "localhost:9000" 88 | 89 | // exponential backoff-retry, because the application in the container might not be ready to accept connections yet 90 | // the minio client does not do service discovery for you (i.e. it does not check if connection can be established), so we have to use the health check 91 | if err := pool.Retry(func() error { 92 | url := fmt.Sprintf("http://%s/minio/health/live", endpoint) 93 | resp, err := http.Get(url) // #nosec G107 94 | if err != nil { 95 | return err 96 | } 97 | if resp.StatusCode != http.StatusOK { 98 | return fmt.Errorf("status code not OK") 99 | } 100 | return nil 101 | }); err != nil { 102 | return nil, nil, fmt.Errorf("could not connect to docker: %w", err) 103 | } 104 | 105 | opts := &S3Options{ 106 | AccessKey: "MYACCESSKEY", 107 | SecretKey: "MYSECRETKEY", 108 | Endpoint: endpoint, 109 | } 110 | return opts, func() error { 111 | return pool.Purge(resource) 112 | }, nil 113 | } 114 | -------------------------------------------------------------------------------- /internal/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var Version = "unknown" 4 | -------------------------------------------------------------------------------- /ladder/arguments.go: -------------------------------------------------------------------------------- 1 | package ladder 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/shopspring/decimal" 8 | ) 9 | 10 | const ( 11 | MasterPlaylist = "master.m3u8" 12 | preset = "veryfast" 13 | videoCodec = "libx264" 14 | constantRateFactor = "26" 15 | hlsTime = "10" 16 | ) 17 | 18 | const ( 19 | argVarStreamMap = "var_stream_map" 20 | ) 21 | 22 | type ArgumentSet struct { 23 | Output string 24 | Ladder Ladder 25 | Arguments map[string]string 26 | Metadata *Metadata 27 | } 28 | 29 | var hlsDefaultArguments = map[string]string{ 30 | "threads": "2", 31 | "preset": preset, 32 | "sc_threshold": "0", 33 | "c:v": "libx264", 34 | "pix_fmt": "yuv420p", 35 | "c:a": "aac", 36 | "ac": "2", 37 | "ar": "44100", 38 | "f": "hls", 39 | "hls_time": hlsTime, 40 | "hls_playlist_type": "vod", 41 | "hls_flags": "independent_segments", 42 | "master_pl_name": MasterPlaylist, 43 | "strftime_mkdir": "1", 44 | "hls_segment_filename": "v%v_s%06d.ts", 45 | } 46 | 47 | // GetStrArguments serializes ffmpeg arguments in a format sutable for `ffmpeg.Transcoder.Start“. 48 | func (a *ArgumentSet) GetStrArguments() []string { 49 | strArgs := []string{} 50 | 51 | args := a.Arguments 52 | ladArgs := []string{} 53 | args[argVarStreamMap] = "" 54 | 55 | for k, v := range a.Ladder.Args { 56 | args[k] = v 57 | } 58 | 59 | for n, tier := range a.Ladder.Tiers { 60 | s := strconv.Itoa(n) 61 | args[argVarStreamMap] += fmt.Sprintf("v:%s,a:%s ", s, s) 62 | vRate := strconv.Itoa(tier.VideoBitrate) 63 | ladArgs = append(ladArgs, 64 | "-map", "v:0", 65 | "-filter:v:"+s, "scale=-2:"+strconv.Itoa(tier.Height), 66 | "-crf:v:"+s, strconv.Itoa(tier.CRF), 67 | "-b:v:"+s, vRate, 68 | "-maxrate:v:"+s, vRate, 69 | "-bufsize:v:"+s, vRate, 70 | ) 71 | 72 | switch { 73 | case tier.KeepFramerate: 74 | ladArgs = append(ladArgs, 75 | "-g:v:"+s, strconv.Itoa(a.Metadata.FPS.Int()*2)) // nolint:goconst 76 | case !tier.Framerate.IsZero(): 77 | ladArgs = append(ladArgs, 78 | "-r:v:"+s, tier.Framerate.String(), 79 | "-g:v:"+s, (tier.Framerate.Mul(decimal.NewFromInt(2)).String())) // nolint:goconst 80 | default: 81 | ladArgs = append(ladArgs, 82 | "-r:v:"+s, a.Metadata.FPS.String(), 83 | "-g:v:"+s, strconv.Itoa(a.Metadata.FPS.Int()*2)) // nolint:goconst 84 | } 85 | ladArgs = append(ladArgs, "-map", "a:0", "-b:a:"+s, tier.AudioBitrate) 86 | } 87 | 88 | for k, v := range args { 89 | strArgs = append(strArgs, fmt.Sprintf("-%v", k), v) 90 | } 91 | strArgs = append(strArgs, ladArgs...) 92 | return strArgs 93 | } 94 | -------------------------------------------------------------------------------- /ladder/const.go: -------------------------------------------------------------------------------- 1 | package ladder 2 | 3 | const ( 4 | D1080p Definition = "1080p" 5 | D720p Definition = "720p" 6 | D144p Definition = "144p" 7 | 8 | nsRateFactor = 0.37 9 | ) 10 | -------------------------------------------------------------------------------- /ladder/defaults.go: -------------------------------------------------------------------------------- 1 | package ladder 2 | 3 | import _ "embed" 4 | 5 | //go:embed defaults.yml 6 | var defaultLadderYaml []byte 7 | 8 | const DefaultCRF = 24 9 | 10 | var Default, _ = Load(defaultLadderYaml) 11 | -------------------------------------------------------------------------------- /ladder/defaults.yml: -------------------------------------------------------------------------------- 1 | args: 2 | sws_flags: bilinear 3 | profile:v: main 4 | refs: 1 5 | preset: veryfast 6 | force_key_frames: "expr:gte(t,n_forced*2)" 7 | hls_time: 10 8 | 9 | tiers: 10 | - definition: 1080p 11 | bitrate: 3500_000 12 | # bitrate_cutoff: 6000_000 13 | audio_bitrate: 160k 14 | width: 1920 15 | height: 1080 16 | crf: 23 17 | - definition: 720p 18 | bitrate: 2500_000 19 | audio_bitrate: 128k 20 | width: 1280 21 | height: 720 22 | crf: 24 23 | - definition: 360p 24 | bitrate: 500_000 25 | audio_bitrate: 96k 26 | width: 640 27 | height: 360 28 | crf: 25 29 | - definition: 144p 30 | width: 256 31 | height: 144 32 | bitrate: 100_000 33 | audio_bitrate: 96k 34 | framerate: 15 35 | crf: 26 36 | -------------------------------------------------------------------------------- /ladder/ladder.go: -------------------------------------------------------------------------------- 1 | package ladder 2 | 3 | import ( 4 | "math" 5 | "strconv" 6 | "strings" 7 | 8 | "github.com/shopspring/decimal" 9 | yaml "gopkg.in/yaml.v3" 10 | ) 11 | 12 | type Definition string 13 | 14 | type Ladder struct { 15 | Args map[string]string 16 | Metadata *Metadata 17 | Tiers []Tier `yaml:",flow"` 18 | } 19 | 20 | type Tier struct { 21 | Definition Definition 22 | Height int 23 | Width int 24 | VideoBitrate int `yaml:"bitrate"` 25 | AudioBitrate string `yaml:"audio_bitrate"` 26 | Framerate decimal.Decimal `yaml:",omitempty"` 27 | KeepFramerate bool `yaml:"keep_framerate"` 28 | BitrateCutoff int `yaml:"bitrate_cutoff"` 29 | CRF int 30 | } 31 | 32 | func Load(yamlLadder []byte) (Ladder, error) { 33 | l := Ladder{} 34 | err := yaml.Unmarshal(yamlLadder, &l) 35 | return l, err 36 | } 37 | 38 | // Tweak generates encoding parameters from the ladder for provided video metadata. 39 | func (x Ladder) Tweak(md *Metadata) (Ladder, error) { 40 | newLadder := Ladder{ 41 | Args: x.Args, 42 | Tiers: []Tier{}, 43 | Metadata: md, 44 | } 45 | vrate, _ := strconv.Atoi(md.VideoStream.GetBitRate()) 46 | var vert, origResSeen bool 47 | w := md.VideoStream.GetWidth() 48 | h := md.VideoStream.GetHeight() 49 | if h > w { 50 | vert = true 51 | } 52 | for _, t := range x.Tiers { 53 | if t.BitrateCutoff >= vrate { 54 | logger.Debugw("video bitrate lower than the cut-off", "bitrate", vrate, "cutoff", t.BitrateCutoff) 55 | if t.Height == h { 56 | origResSeen = true 57 | } 58 | continue 59 | } 60 | if vert { 61 | t.Width, t.Height = t.Height, t.Width 62 | } 63 | if t.Height > h { 64 | logger.Debugw("tier definition higher than stream", "tier", t.Height, "height", h) 65 | continue 66 | } 67 | if t.Height == h { 68 | origResSeen = true 69 | } 70 | if t.CRF == 0 { 71 | t.CRF = DefaultCRF 72 | } 73 | newLadder.Tiers = append(newLadder.Tiers, t) 74 | } 75 | 76 | if !origResSeen && x.Tiers[0].Height >= h && len(newLadder.Tiers) > 0 { 77 | newLadder.Tiers = append([]Tier{{ 78 | Height: h, 79 | Width: w, 80 | VideoBitrate: nsRate(w, h), 81 | AudioBitrate: "128k", 82 | CRF: DefaultCRF, 83 | }}, newLadder.Tiers...) 84 | } 85 | 86 | logger.Debugw("ladder built", "tiers", newLadder.Tiers) 87 | return newLadder, nil 88 | } 89 | 90 | func (x Ladder) ArgumentSet(out string) *ArgumentSet { 91 | d := map[string]string{} 92 | for k, v := range hlsDefaultArguments { 93 | d[k] = v 94 | } 95 | return &ArgumentSet{ 96 | Output: out, 97 | Arguments: d, 98 | Ladder: x, 99 | Metadata: x.Metadata, 100 | } 101 | } 102 | 103 | func (x Ladder) String() string { 104 | return strings.Join(x.ArgumentSet("...").GetStrArguments(), " ") 105 | } 106 | 107 | func nsRate(w, h int) int { 108 | return int(math.Ceil(float64(800*600) / nsRateFactor)) 109 | } 110 | -------------------------------------------------------------------------------- /ladder/logger.go: -------------------------------------------------------------------------------- 1 | package ladder 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("formats", logging.Dev) 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /ladder/metadata.go: -------------------------------------------------------------------------------- 1 | package ladder 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "regexp" 7 | "strconv" 8 | 9 | "github.com/floostack/transcoder" 10 | "github.com/floostack/transcoder/ffmpeg" 11 | "github.com/pkg/errors" 12 | ) 13 | 14 | type Metadata struct { 15 | FMeta *ffmpeg.Metadata 16 | FPS *FPS 17 | FastStart bool 18 | VideoStream transcoder.Streams 19 | AudioStream transcoder.Streams 20 | } 21 | 22 | type FPS struct { 23 | Ratio string 24 | Float float64 25 | } 26 | 27 | var fpsPattern = regexp.MustCompile(`^(\d+)/(\d+)$`) 28 | 29 | func WrapMeta(fmeta *ffmpeg.Metadata) (*Metadata, error) { 30 | m := &Metadata{ 31 | FMeta: fmeta, 32 | } 33 | vs := m.videoStream() 34 | if vs == nil { 35 | return nil, errors.New("no video stream found") 36 | } 37 | m.VideoStream = vs 38 | as := m.videoStream() 39 | if as == nil { 40 | return nil, errors.New("no audio stream found") 41 | } 42 | m.AudioStream = as 43 | 44 | f, err := m.determineFramerate() 45 | if err != nil { 46 | return nil, errors.Wrap(err, "cannot determine framerate") 47 | } 48 | m.FPS = f 49 | 50 | return m, nil 51 | } 52 | 53 | func (m *Metadata) videoStream() transcoder.Streams { 54 | return GetVideoStream(m.FMeta) 55 | } 56 | 57 | func (m *Metadata) audioStream() transcoder.Streams { // nolint:unused 58 | for _, s := range m.FMeta.GetStreams() { 59 | if s.GetCodecType() == "audio" { 60 | return s 61 | } 62 | } 63 | return nil 64 | } 65 | 66 | func (m *Metadata) determineFramerate() (*FPS, error) { 67 | fr := m.VideoStream.GetAvgFrameRate() 68 | fpsm := fpsPattern.FindStringSubmatch(fr) 69 | if len(fpsm) < 2 { 70 | return nil, fmt.Errorf("no match found in %s", fr) 71 | } 72 | fpsdd, err := strconv.Atoi(fpsm[1]) 73 | if err != nil { 74 | return nil, err 75 | } 76 | fpsds, err := strconv.Atoi(fpsm[2]) 77 | if err != nil { 78 | return nil, err 79 | } 80 | if fpsds == 0 { 81 | return nil, errors.New("divisor cannot be zero") 82 | } 83 | return &FPS{Ratio: fr, Float: float64(fpsdd) / float64(fpsds)}, nil 84 | } 85 | 86 | func (f FPS) Int() int { 87 | return int(math.Ceil(f.Float)) 88 | } 89 | 90 | func (f FPS) String() string { 91 | return f.Ratio 92 | } 93 | func GetVideoStream(meta *ffmpeg.Metadata) transcoder.Streams { 94 | for _, s := range meta.GetStreams() { 95 | if s.GetCodecType() == "video" { 96 | return s 97 | } 98 | } 99 | return nil 100 | } 101 | -------------------------------------------------------------------------------- /library/db/db.go: -------------------------------------------------------------------------------- 1 | // Code generated by sqlc. DO NOT EDIT. 2 | // versions: 3 | // sqlc v1.27.0 4 | 5 | package db 6 | 7 | import ( 8 | "context" 9 | "database/sql" 10 | ) 11 | 12 | type DBTX interface { 13 | ExecContext(context.Context, string, ...interface{}) (sql.Result, error) 14 | PrepareContext(context.Context, string) (*sql.Stmt, error) 15 | QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) 16 | QueryRowContext(context.Context, string, ...interface{}) *sql.Row 17 | } 18 | 19 | func New(db DBTX) *Queries { 20 | return &Queries{db: db} 21 | } 22 | 23 | type Queries struct { 24 | db DBTX 25 | } 26 | 27 | func (q *Queries) WithTx(tx *sql.Tx) *Queries { 28 | return &Queries{ 29 | db: tx, 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /library/db/migrations.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "embed" 4 | 5 | //go:embed migrations/*.sql 6 | var MigrationsFS embed.FS 7 | -------------------------------------------------------------------------------- /library/db/migrations/0001_initial.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | 3 | CREATE TYPE channel_priority AS ENUM ( 4 | 'high', 5 | 'normal', 6 | 'low', 7 | 'disabled' 8 | ); 9 | 10 | CREATE TABLE videos ( 11 | id SERIAL NOT NULL PRIMARY KEY, 12 | 13 | created_at TIMESTAMP NOT NULL DEFAULT NOW(), 14 | updated_at TIMESTAMP, 15 | accessed_at TIMESTAMP, 16 | access_count integer DEFAULT 0, 17 | 18 | tid text NOT NULL UNIQUE CHECK (tid <> ''), 19 | 20 | url text NOT NULL CHECK (url <> ''), 21 | sd_hash text NOT NULL UNIQUE CHECK (sd_hash <> ''), 22 | channel text NOT NULL CHECK (channel <> ''), 23 | 24 | storage text NOT NULL CHECK (storage <> ''), 25 | path text NOT NULL CHECK (path <> ''), 26 | size bigint NOT NULL CHECK (size > 0), 27 | 28 | checksum text 29 | ); 30 | 31 | CREATE TABLE channels ( 32 | id SERIAL NOT NULL PRIMARY KEY, 33 | 34 | created_at TIMESTAMP NOT NULL DEFAULT NOW(), 35 | 36 | url text NOT NULL UNIQUE CHECK (url <> ''), 37 | claim_id text NOT NULL UNIQUE CHECK (claim_id <> ''), 38 | priority channel_priority NOT NULL 39 | ); 40 | 41 | -- +migrate Down 42 | DROP TABLE videos; 43 | DROP TABLE channels; 44 | DROP TYPE channel_priority; 45 | -------------------------------------------------------------------------------- /library/db/migrations/0002_add_manifest.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | 3 | ALTER TABLE videos 4 | ADD COLUMN manifest jsonb; 5 | 6 | -- +migrate Down 7 | ALTER TABLE videos 8 | DROP COLUMN manifest; 9 | -------------------------------------------------------------------------------- /library/db/migrations/0003_default_accessed_at.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | -- +migrate StatementBegin 3 | ALTER TABLE videos ALTER COLUMN accessed_at SET NOT NULL; 4 | ALTER TABLE videos ALTER COLUMN accessed_at SET DEFAULT NOW(); 5 | -- +migrate StatementEnd 6 | 7 | -- +migrate Down 8 | -- +migrate StatementBegin 9 | ALTER TABLE videos ALTER COLUMN accessed_at DROP NOT NULL; 10 | ALTER TABLE videos ALTER COLUMN accessed_at DROP DEFAULT; 11 | -- +migrate StatementEnd 12 | -------------------------------------------------------------------------------- /library/db/migrations/0004_add_released_at.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | 3 | ALTER TABLE videos 4 | ADD COLUMN released_at TIMESTAMP; 5 | 6 | -- +migrate Down 7 | ALTER TABLE videos 8 | DROP COLUMN released_at; 9 | -------------------------------------------------------------------------------- /library/db/models.go: -------------------------------------------------------------------------------- 1 | // Code generated by sqlc. DO NOT EDIT. 2 | // versions: 3 | // sqlc v1.27.0 4 | 5 | package db 6 | 7 | import ( 8 | "database/sql" 9 | "database/sql/driver" 10 | "fmt" 11 | "time" 12 | 13 | "github.com/sqlc-dev/pqtype" 14 | ) 15 | 16 | type ChannelPriority string 17 | 18 | const ( 19 | ChannelPriorityHigh ChannelPriority = "high" 20 | ChannelPriorityNormal ChannelPriority = "normal" 21 | ChannelPriorityLow ChannelPriority = "low" 22 | ChannelPriorityDisabled ChannelPriority = "disabled" 23 | ) 24 | 25 | func (e *ChannelPriority) Scan(src interface{}) error { 26 | switch s := src.(type) { 27 | case []byte: 28 | *e = ChannelPriority(s) 29 | case string: 30 | *e = ChannelPriority(s) 31 | default: 32 | return fmt.Errorf("unsupported scan type for ChannelPriority: %T", src) 33 | } 34 | return nil 35 | } 36 | 37 | type NullChannelPriority struct { 38 | ChannelPriority ChannelPriority 39 | Valid bool // Valid is true if ChannelPriority is not NULL 40 | } 41 | 42 | // Scan implements the Scanner interface. 43 | func (ns *NullChannelPriority) Scan(value interface{}) error { 44 | if value == nil { 45 | ns.ChannelPriority, ns.Valid = "", false 46 | return nil 47 | } 48 | ns.Valid = true 49 | return ns.ChannelPriority.Scan(value) 50 | } 51 | 52 | // Value implements the driver Valuer interface. 53 | func (ns NullChannelPriority) Value() (driver.Value, error) { 54 | if !ns.Valid { 55 | return nil, nil 56 | } 57 | return string(ns.ChannelPriority), nil 58 | } 59 | 60 | type Channel struct { 61 | ID int32 62 | CreatedAt time.Time 63 | URL string 64 | ClaimID string 65 | Priority ChannelPriority 66 | } 67 | 68 | type Video struct { 69 | ID int32 70 | CreatedAt time.Time 71 | UpdatedAt sql.NullTime 72 | AccessedAt time.Time 73 | AccessCount sql.NullInt32 74 | TID string 75 | URL string 76 | SDHash string 77 | Channel string 78 | Storage string 79 | Path string 80 | Size int64 81 | Checksum sql.NullString 82 | Manifest pqtype.NullRawMessage 83 | ReleasedAt sql.NullTime 84 | } 85 | -------------------------------------------------------------------------------- /library/db/queries.sql: -------------------------------------------------------------------------------- 1 | -- name: AddVideo :one 2 | INSERT INTO videos ( 3 | tid, sd_hash, url, released_at, channel, storage, path, size, checksum, manifest 4 | ) VALUES ( 5 | $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 6 | ) 7 | RETURNING *; 8 | 9 | -- name: GetAllVideos :many 10 | SELECT * FROM videos; 11 | 12 | -- name: GetAllVideosForStorage :many 13 | SELECT * FROM videos 14 | WHERE storage = $1; 15 | 16 | -- name: GetAllVideosForStorageLimit :many 17 | SELECT * FROM videos 18 | WHERE storage = $1 19 | ORDER BY id ASC 20 | LIMIT $2 OFFSET $3; 21 | 22 | -- name: GetVideo :one 23 | SELECT * FROM videos 24 | WHERE sd_hash = $1 LIMIT 1; 25 | 26 | -- name: RecordVideoAccess :exec 27 | UPDATE videos 28 | SET accessed_at = NOW(), access_count = access_count + 1 29 | WHERE sd_hash = $1; 30 | 31 | -- name: DeleteVideo :exec 32 | DELETE from videos 33 | WHERE tid = $1; 34 | 35 | -- name: AddChannel :one 36 | INSERT into channels ( 37 | url, claim_id, priority 38 | ) VALUES ( 39 | $1, $2, $3 40 | ) 41 | RETURNING *; 42 | 43 | -- name: GetChannel :one 44 | SELECT * from channels 45 | WHERE claim_id = $1; 46 | 47 | -- name: GetAllChannels :many 48 | SELECT * from channels; 49 | -------------------------------------------------------------------------------- /library/library_test.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/OdyseeTeam/transcoder/library/db" 10 | "github.com/OdyseeTeam/transcoder/pkg/logging/zapadapter" 11 | 12 | "github.com/stretchr/testify/suite" 13 | ) 14 | 15 | type librarySuite struct { 16 | suite.Suite 17 | LibraryTestHelper 18 | } 19 | 20 | func TestLibrarySuite(t *testing.T) { 21 | suite.Run(t, new(librarySuite)) 22 | } 23 | 24 | func (s *librarySuite) SetupTest() { 25 | s.Require().NoError(s.SetupLibraryDB()) 26 | } 27 | 28 | func (s *librarySuite) TearDownTest() { 29 | s.Require().NoError(s.TearDownLibraryDB()) 30 | } 31 | 32 | func (s *librarySuite) TestAddChannel() { 33 | lib := New(Config{DB: s.DB, Log: zapadapter.NewKV(nil)}) 34 | c, err := lib.AddChannel("lbry://@specialoperationstest#3", "") 35 | s.Require().NoError(err) 36 | s.Equal("395b0f23dcd07212c3e956b697ba5ba89578ca54", c.ClaimID) 37 | s.Equal("lbry://@specialoperationstest#3", c.URL) 38 | s.Equal(db.ChannelPriorityNormal, c.Priority) 39 | } 40 | 41 | func (s *librarySuite) TestAddGetVideo() { 42 | var err error 43 | 44 | dummyStorage := NewDummyStorage("dummy1", "https://storage.host") 45 | lib := New(Config{DB: s.DB, Storages: map[string]Storage{dummyStorage.Name(): dummyStorage}, Log: zapadapter.NewKV(nil)}) 46 | newStream := GenerateDummyStream(dummyStorage) 47 | 48 | url, err := lib.GetVideoURL(newStream.SDHash()) 49 | s.ErrorIs(err, ErrStreamNotFound) 50 | s.Empty(url) 51 | 52 | err = lib.AddRemoteStream(*newStream) 53 | s.Require().NoError(err) 54 | 55 | url, err = lib.GetVideoURL(newStream.SDHash()) 56 | s.Require().NoError(err) 57 | s.Equal(fmt.Sprintf("remote://%s/%s/", newStream.RemoteStorage, newStream.Manifest.TID), url) 58 | 59 | v, err := lib.GetVideo(newStream.SDHash()) 60 | s.Require().NoError(err) 61 | s.EqualValues(1, v.AccessCount.Int32) 62 | s.GreaterOrEqual(2, int(time.Since(v.AccessedAt).Seconds())) 63 | m := &Manifest{} 64 | err = json.Unmarshal(v.Manifest.RawMessage, m) 65 | s.Require().NoError(err) 66 | m.TranscodedAt = time.Time{} 67 | newStream.Manifest.TranscodedAt = time.Time{} 68 | s.EqualValues(m, newStream.Manifest) 69 | } 70 | -------------------------------------------------------------------------------- /library/logger.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("library", logging.Dev) 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /library/maintenance.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "time" 7 | 8 | "github.com/OdyseeTeam/transcoder/library/db" 9 | 10 | "github.com/c2h5oh/datasize" 11 | ) 12 | 13 | func SpawnLibraryCleaning(lib *Library, storageName string, maxSize uint64) chan struct{} { 14 | stopChan := make(chan struct{}) 15 | logger.Infow( 16 | "starting remote library maintenance", 17 | "max_remote_size", toGB(maxSize), 18 | ) 19 | 20 | running := make(chan struct{}, 1) 21 | ticker := time.NewTicker(1 * time.Hour) 22 | 23 | running <- struct{}{} 24 | go func() { 25 | defer func() { <-running }() 26 | retireVideos(lib, storageName, maxSize) 27 | }() 28 | 29 | go func() { 30 | for range ticker.C { 31 | select { 32 | case running <- struct{}{}: 33 | go func() { 34 | defer func() { <-running }() 35 | retireVideos(lib, storageName, maxSize) 36 | }() 37 | case <-stopChan: 38 | logger.Info("stopping library maintenance") 39 | return 40 | default: 41 | } 42 | } 43 | }() 44 | 45 | return stopChan 46 | } 47 | 48 | func toGB(s uint64) string { 49 | return fmt.Sprintf("%.2fGB", datasize.ByteSize(s).GBytes()) 50 | } 51 | 52 | func retireVideos(lib *Library, storageName string, maxSize uint64) { 53 | logger.Infow("starting library retirement procedure", "max_remote_size", toGB(maxSize)) 54 | totalSize, retiredSize, err := lib.RetireVideos(storageName, maxSize) 55 | ll := logger.With("total_gb", toGB(totalSize), "retired_gb", toGB(retiredSize)) 56 | LibraryBytes.Set(float64(totalSize)) 57 | switch { 58 | case err != nil: 59 | ll.Infow("error retiring videos", "err", err) 60 | case retiredSize > 0: 61 | ll.Infow("retired some videos") 62 | default: 63 | ll.Infow("failed to retire any videos") 64 | } 65 | } 66 | 67 | func tailVideos(videos []db.Video, maxSize uint64, call func(v db.Video) error) (totalSize uint64, furloughedSize uint64, err error) { 68 | for _, v := range videos { 69 | if v.Size < 0 { 70 | logger.Warnw("invalid video size", "tid", v.TID, "size", v.Size) 71 | continue 72 | } 73 | totalSize += uint64(v.Size) 74 | } 75 | if maxSize >= totalSize { 76 | return 77 | } 78 | 79 | weight := func(v db.Video) int64 { return v.AccessedAt.Unix() } 80 | sort.Slice(videos, func(i, j int) bool { return weight(videos[i]) < weight(videos[j]) }) 81 | allStart := time.Now() 82 | for _, v := range videos { 83 | if v.Size < 0 { 84 | continue 85 | } 86 | 87 | start := time.Now() 88 | err := call(v) 89 | if err != nil { 90 | logger.Warnw("failed to execute function for video", "sd_hash", v.SDHash, "err", err) 91 | continue 92 | } 93 | 94 | LibraryRetiredGB.Add(float64(v.Size) / float64(1<<30)) 95 | LibraryRetiredDuration.Add(float64(time.Since(start).Seconds())) 96 | 97 | furloughedSize += uint64(v.Size) 98 | remainingSize := totalSize - maxSize - furloughedSize 99 | 100 | furloughedGB := float64(furloughedSize) / float64(1<<30) 101 | remainingGB := float64(remainingSize) / float64(1<<30) 102 | speed := furloughedGB / time.Since(allStart).Seconds() * 60 * 60 103 | remainingHours := remainingGB / (furloughedGB / time.Since(allStart).Seconds()) / 60 / 60 104 | donePct := float64(furloughedSize) / float64(totalSize-maxSize) * float64(100) 105 | logger.Infof( 106 | "maintenance: %.1f h, %.4f%% , %.2f GB/h, %.2f GB, remaining: %.2f GB, %.1f h", 107 | time.Since(allStart).Hours(), donePct, speed, furloughedGB, remainingGB, remainingHours, 108 | ) 109 | 110 | if maxSize >= totalSize-furloughedSize { 111 | break 112 | } 113 | } 114 | 115 | return 116 | } 117 | -------------------------------------------------------------------------------- /library/maintenance_test.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "context" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | 9 | "github.com/OdyseeTeam/transcoder/library/db" 10 | "github.com/OdyseeTeam/transcoder/pkg/logging/zapadapter" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | "github.com/stretchr/testify/suite" 15 | ) 16 | 17 | type maintenanceSuite struct { 18 | suite.Suite 19 | LibraryTestHelper 20 | } 21 | 22 | func (s *maintenanceSuite) SetupTest() { 23 | s.Require().NoError(s.SetupLibraryDB()) 24 | } 25 | 26 | func (s *maintenanceSuite) TearDownTest() { 27 | s.Require().NoError(s.TearDownLibraryDB()) 28 | } 29 | 30 | func TestTailSizeables(t *testing.T) { 31 | vs := []db.Video{ 32 | {Size: 10000, AccessedAt: time.Now().Add(-25 * time.Hour)}, 33 | {Size: 20000, AccessedAt: time.Now().Add(-24 * time.Hour)}, 34 | {Size: 50000, AccessedAt: time.Now().Add(-1 * time.Hour)}, 35 | {Size: 30000, AccessedAt: time.Now().Add(-30 * time.Hour)}, 36 | {Size: 20000, AccessedAt: time.Now().Add(-23 * time.Hour)}, 37 | } 38 | vsog := make([]db.Video, 5) 39 | copy(vsog, vs) 40 | 41 | removed := []db.Video{} 42 | 43 | totalSize, furloughedSize, err := tailVideos(vs, 75000, func(v db.Video) error { removed = append(removed, v); return nil }) 44 | require.NoError(t, err) 45 | assert.EqualValues(t, 130000, totalSize) 46 | assert.EqualValues(t, 60000, furloughedSize) 47 | assert.Equal(t, vsog[3], removed[0]) 48 | assert.Equal(t, vsog[0], removed[1]) 49 | assert.Equal(t, vsog[1], removed[2]) 50 | } 51 | 52 | func TestMaintenanceSuite(t *testing.T) { 53 | suite.Run(t, new(maintenanceSuite)) 54 | } 55 | 56 | func (s *maintenanceSuite) TestRetireVideos() { 57 | var totalSize, sizeToKeep, sizeRemote uint64 58 | var initialCount, afterCount int64 59 | 60 | dummyStorage := NewDummyStorage("dummy1", "") 61 | lib := New(Config{DB: s.DB, Storages: map[string]Storage{dummyStorage.Name(): dummyStorage}, Log: zapadapter.NewKV(nil)}) 62 | 63 | for i := range [100]int{} { 64 | stream := GenerateDummyStream(dummyStorage) 65 | err := lib.AddRemoteStream(*stream) 66 | s.Require().NoError(err) 67 | 68 | if i%3 == 0 { 69 | _, err = s.DB.ExecContext( 70 | context.Background(), 71 | "UPDATE videos SET storage = $2 where tid = $1", 72 | "storage2", 73 | stream.TID(), 74 | ) 75 | s.Require().NoError(err) 76 | } 77 | _, err = s.DB.ExecContext( 78 | context.Background(), 79 | "UPDATE videos SET accessed_at = $2 where tid = $1", 80 | stream.TID(), 81 | time.Now().AddDate(0, 0, -rand.Intn(30)), // #nosec G404 82 | ) 83 | s.Require().NoError(err) 84 | } 85 | 86 | r := s.DB.QueryRowContext(context.Background(), `select sum(size) from videos`) 87 | err := r.Scan(&totalSize) 88 | s.Require().NoError(err) 89 | 90 | r = s.DB.QueryRowContext(context.Background(), `select count(*) from videos`) 91 | err = r.Scan(&initialCount) 92 | s.Require().NoError(err) 93 | 94 | sizeToKeep = uint64(rand.Int63n(1000000 * 50)) // #nosec G404 G115 95 | totalSizeAfterRetire, retiredSize, err := lib.RetireVideos(dummyStorage.Name(), sizeToKeep) 96 | s.NoError(err) 97 | s.Equal(totalSize, totalSizeAfterRetire) 98 | s.InDelta(sizeToKeep, totalSizeAfterRetire-retiredSize, 5000000) 99 | 100 | r = s.DB.QueryRowContext(context.Background(), `select sum(size) from videos`) 101 | err = r.Scan(&sizeRemote) 102 | s.Require().NoError(err) 103 | s.InDelta(sizeToKeep, sizeRemote, 5000000) 104 | 105 | r = s.DB.QueryRowContext(context.Background(), `select count(*) from videos`) 106 | err = r.Scan(&afterCount) 107 | s.Require().NoError(err) 108 | 109 | s.GreaterOrEqual(len(dummyStorage.Ops), 10) 110 | s.EqualValues(initialCount-afterCount, len(dummyStorage.Ops)) 111 | for _, op := range dummyStorage.Ops { 112 | s.Len(op.Args, len(PopulatedHLSPlaylistFiles)) 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /library/metrics.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | ) 6 | 7 | var ( 8 | LibraryBytes = prometheus.NewGauge(prometheus.GaugeOpts{ 9 | Name: "library_total_bytes", 10 | }) 11 | LibraryRetiredGB = prometheus.NewCounter(prometheus.CounterOpts{ 12 | Name: "library_retired_gb", 13 | }) 14 | LibraryRetiredDuration = prometheus.NewCounter(prometheus.CounterOpts{ 15 | Name: "library_retired_duration_seconds", 16 | }) 17 | ) 18 | 19 | func RegisterMetrics() { 20 | prometheus.MustRegister( 21 | LibraryBytes, LibraryRetiredGB, LibraryRetiredDuration, 22 | ) 23 | } 24 | -------------------------------------------------------------------------------- /library/stream.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "crypto/sha256" 5 | "crypto/sha512" 6 | "encoding/hex" 7 | "fmt" 8 | "hash" 9 | "io" 10 | "io/fs" 11 | "os" 12 | "path" 13 | "regexp" 14 | "sort" 15 | "time" 16 | 17 | "github.com/karrick/godirwalk" 18 | "github.com/pkg/errors" 19 | "gopkg.in/yaml.v3" 20 | ) 21 | 22 | const ( 23 | MasterPlaylistName = "master.m3u8" 24 | PlaylistExt = ".m3u8" 25 | FragmentExt = ".ts" 26 | ManifestName = ".manifest" 27 | PlaylistContentType = "application/x-mpegurl" 28 | FragmentContentType = "video/mp2t" 29 | 30 | SkipChecksum = "SkipChecksumForThisStream" 31 | 32 | tidTimestampFormat = "2006-01-02T15:04" 33 | ) 34 | 35 | var SDHashRe = regexp.MustCompile(`/([A-Za-z0-9]{96})/`) 36 | 37 | type Stream struct { 38 | LocalPath string `json:"local_path,omitempty"` 39 | RemoteStorage string `json:"remote_storage,omitempty"` 40 | Manifest *Manifest 41 | } 42 | 43 | type Manifest struct { 44 | URL string 45 | ReleasedAt time.Time 46 | ChannelURL string `yaml:",omitempty" json:"channel_url"` 47 | SDHash string 48 | 49 | // Meta attributes 50 | TranscodedBy string `yaml:"transcoded_by,omitempty" json:"transcoded_by"` 51 | TranscodedAt time.Time `yaml:"transcoded_at,omitempty" json:"transcoded_at"` 52 | Version string `yaml:",omitempty"` 53 | 54 | // Auto-filled attributes 55 | TID string `yaml:",omitempty"` 56 | Size int64 `yaml:",omitempty"` 57 | Checksum string `yaml:",omitempty"` 58 | 59 | FfmpegArgs string `yaml:"ffmpeg_args,omitempty"` 60 | Files []string `yaml:",omitempty"` 61 | } 62 | 63 | type StreamWalker func(fi fs.FileInfo, fullPath, name string) error 64 | 65 | func WithTranscodedAt(ts time.Time) func(*Manifest) { 66 | return func(m *Manifest) { 67 | m.TranscodedAt = ts 68 | } 69 | } 70 | 71 | func WithReleasedAt(ts time.Time) func(*Manifest) { 72 | return func(m *Manifest) { 73 | m.ReleasedAt = ts 74 | } 75 | } 76 | 77 | func WithWorkerName(n string) func(*Manifest) { 78 | return func(m *Manifest) { 79 | m.TranscodedBy = n 80 | } 81 | } 82 | 83 | func WithVersion(v string) func(*Manifest) { 84 | return func(m *Manifest) { 85 | m.Version = v 86 | } 87 | } 88 | 89 | func GetStreamHasher() hash.Hash { 90 | return sha512.New512_224() 91 | } 92 | 93 | func InitStream(dir string, remoteStorage string) *Stream { 94 | s := Stream{LocalPath: dir, RemoteStorage: remoteStorage} 95 | return &s 96 | } 97 | 98 | func (s *Stream) generateTID() string { 99 | h := sha256.New() 100 | h.Write([]byte(s.SDHash())) 101 | return hex.EncodeToString(h.Sum([]byte(s.Manifest.TranscodedAt.Format(tidTimestampFormat)))) 102 | } 103 | 104 | // GenerateManifest needs to be called for newly initialized (transcoded) streams. 105 | func (s *Stream) GenerateManifest(url, channel, sdHash string, manifestFuncs ...func(*Manifest)) error { 106 | var err error 107 | m := &Manifest{ 108 | URL: url, 109 | ChannelURL: channel, 110 | SDHash: sdHash, 111 | } 112 | 113 | for _, f := range manifestFuncs { 114 | f(m) 115 | } 116 | 117 | s.Manifest = m 118 | s.Manifest.TID = s.generateTID() 119 | 120 | m.Files, m.Size, err = s.getFileList() 121 | if err != nil { 122 | return errors.Wrap(err, "cannot calculate size") 123 | } 124 | m.Checksum, err = s.generateChecksum() 125 | if err != nil { 126 | return errors.Wrap(err, "cannot calculate checksum") 127 | } 128 | 129 | d, err := yaml.Marshal(s.Manifest) 130 | if err != nil { 131 | return err 132 | } 133 | return os.WriteFile(path.Join(s.LocalPath, ManifestName), d, 0644) // #nosec G306, we need the file to be publicly readable 134 | } 135 | 136 | func (s *Stream) Checksum() string { 137 | if s.Manifest == nil { 138 | return "" 139 | } 140 | return s.Manifest.Checksum 141 | } 142 | 143 | func (s *Stream) URL() string { 144 | if s.Manifest == nil { 145 | return "" 146 | } 147 | return s.Manifest.URL 148 | } 149 | 150 | func (s *Stream) TID() string { 151 | if s.Manifest == nil { 152 | return "" 153 | } 154 | return s.Manifest.TID 155 | } 156 | 157 | func (s *Stream) generateChecksum() (string, error) { 158 | hash := GetStreamHasher() 159 | err := WalkStream( 160 | s.LocalPath, 161 | openFile, 162 | func(_ string, r io.ReadCloser) error { 163 | _, err := io.Copy(hash, r) 164 | if err != nil { 165 | return err 166 | } 167 | return nil 168 | }, 169 | ) 170 | if err != nil { 171 | return "", err 172 | } 173 | return hex.EncodeToString(hash.Sum(nil)), nil 174 | } 175 | 176 | func (s *Stream) getFileList() ([]string, int64, error) { 177 | var size int64 178 | fl := []string{} 179 | err := s.Walk(func(fi fs.FileInfo, _, name string) error { 180 | if name == ManifestName { 181 | return nil 182 | } 183 | size += fi.Size() 184 | fl = append(fl, name) 185 | return nil 186 | }) 187 | sort.Strings(fl) 188 | return fl, size, err 189 | } 190 | 191 | func (s *Stream) Walk(walker StreamWalker) error { 192 | return godirwalk.Walk(s.LocalPath, &godirwalk.Options{ 193 | Callback: func(fullPath string, de *godirwalk.Dirent) error { 194 | if de.IsDir() { 195 | if fullPath != s.LocalPath { 196 | return fmt.Errorf("%v is a directory while only files are expected here", fullPath) 197 | } 198 | return nil 199 | } 200 | fi, err := os.Stat(fullPath) 201 | if err != nil { 202 | return err 203 | } 204 | return walker(fi, fullPath, de.Name()) 205 | }, 206 | }) 207 | } 208 | 209 | func (s *Stream) SDHash() string { 210 | if s.Manifest == nil { 211 | return "" 212 | } 213 | return s.Manifest.SDHash 214 | } 215 | 216 | func (s *Stream) Size() int64 { 217 | if s.Manifest == nil { 218 | return 0 219 | } 220 | return s.Manifest.Size 221 | } 222 | 223 | func (s *Stream) ChecksumValid(checksum string) bool { 224 | return checksum == s.Checksum() 225 | } 226 | 227 | func (s *Stream) ReadManifest() error { 228 | m := &Manifest{} 229 | d, err := os.ReadFile(path.Join(s.LocalPath, ManifestName)) 230 | if err != nil { 231 | return errors.Wrap(err, "cannot read manifest file") 232 | } 233 | err = yaml.Unmarshal(d, m) 234 | if err != nil { 235 | return errors.Wrap(err, "cannot unmarshal manifest") 236 | } 237 | s.Manifest = m 238 | return nil 239 | } 240 | 241 | func openFile(rootPath ...string) (io.ReadCloser, error) { 242 | return os.Open(path.Join(rootPath...)) 243 | } 244 | -------------------------------------------------------------------------------- /library/stream_test.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "path" 5 | "sort" 6 | "testing" 7 | "time" 8 | 9 | "github.com/Pallinder/go-randomdata" 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestStream(t *testing.T) { 15 | dir := t.TempDir() 16 | sdHash := randomdata.Alphanumeric(96) 17 | PopulateHLSPlaylist(t, dir, sdHash) 18 | 19 | t.Run("InitStream", func(t *testing.T) { 20 | t.Parallel() 21 | 22 | stream := InitStream(path.Join(dir, sdHash), "") 23 | err := stream.GenerateManifest(randomdata.SillyName(), randomdata.SillyName(), sdHash) 24 | require.NoError(t, err) 25 | 26 | assert.Greater(t, stream.Manifest.Size, int64(1000)) 27 | assert.NotEmpty(t, stream.Manifest.Checksum) 28 | sort.Strings(PopulatedHLSPlaylistFiles) 29 | assert.Equal(t, PopulatedHLSPlaylistFiles, stream.Manifest.Files) 30 | }) 31 | 32 | t.Run("GenerateTID", func(t *testing.T) { 33 | t.Parallel() 34 | ts := time.Now() 35 | 36 | stream1 := InitStream(path.Join(dir, sdHash), "") 37 | require.NoError(t, 38 | stream1.GenerateManifest(randomdata.SillyName(), randomdata.SillyName(), sdHash, WithTranscodedAt(ts)), 39 | ) 40 | 41 | stream2 := InitStream(path.Join(dir, sdHash), "") 42 | require.NoError(t, 43 | stream2.GenerateManifest(stream1.URL(), stream1.Manifest.ChannelURL, stream1.SDHash(), WithTranscodedAt(ts)), 44 | ) 45 | err := stream1.GenerateManifest(randomdata.SillyName(), randomdata.SillyName(), sdHash, WithTranscodedAt(ts)) 46 | require.NoError(t, err) 47 | 48 | assert.Equal(t, stream1.Manifest.TID, stream2.Manifest.TID) 49 | }) 50 | 51 | t.Run("WithManifestOptions", func(t *testing.T) { 52 | t.Parallel() 53 | ts := time.Now() 54 | 55 | workerName := randomdata.SillyName() 56 | version := randomdata.BoundedDigits(5, 0, 99999) 57 | url := randomdata.SillyName() 58 | channelURL := randomdata.SillyName() 59 | 60 | stream := InitStream(path.Join(dir, sdHash), "") 61 | require.NoError(t, 62 | stream.GenerateManifest( 63 | url, channelURL, sdHash, 64 | WithTranscodedAt(ts), 65 | WithVersion(version), 66 | WithWorkerName(workerName), 67 | ), 68 | ) 69 | 70 | assert.Equal(t, ts, stream.Manifest.TranscodedAt) 71 | assert.Equal(t, workerName, stream.Manifest.TranscodedBy) 72 | assert.Equal(t, version, stream.Manifest.Version) 73 | assert.Equal(t, url, stream.Manifest.URL) 74 | assert.Equal(t, channelURL, stream.Manifest.ChannelURL) 75 | assert.Equal(t, sdHash, stream.Manifest.SDHash) 76 | }) 77 | } 78 | -------------------------------------------------------------------------------- /library/testdata/dummy-stream/.manifest: -------------------------------------------------------------------------------- 1 | url: "" 2 | sdhash: "" 3 | size: 11915 4 | checksum: SkipChecksumForThisStream 5 | -------------------------------------------------------------------------------- /library/testdata/dummy-stream/master.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-STREAM-INF:BANDWIDTH=3990800,RESOLUTION=1920x1080,CODECS="avc1.42c02a,mp4a.40.2" 4 | stream_0.m3u8 5 | 6 | #EXT-X-STREAM-INF:BANDWIDTH=2560800,RESOLUTION=1280x720,CODECS="avc1.42c020,mp4a.40.2" 7 | stream_1.m3u8 8 | 9 | #EXT-X-STREAM-INF:BANDWIDTH=2010800,RESOLUTION=854x480,CODECS="avc1.42c01f,mp4a.40.2" 10 | stream_2.m3u8 11 | 12 | #EXT-X-STREAM-INF:BANDWIDTH=1020800,RESOLUTION=640x360,CODECS="avc1.42c01f,mp4a.40.2" 13 | stream_3.m3u8 14 | -------------------------------------------------------------------------------- /library/testdata/dummy-stream/stream_0.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s0_000000.ts 9 | #EXTINF:10.010000, 10 | s0_000001.ts 11 | #EXTINF:10.010000, 12 | s0_000002.ts 13 | #EXTINF:10.010000, 14 | s0_000003.ts 15 | #EXTINF:10.010000, 16 | s0_000004.ts 17 | #EXTINF:10.010000, 18 | s0_000005.ts 19 | #EXTINF:10.010000, 20 | s0_000006.ts 21 | #EXTINF:10.010000, 22 | s0_000007.ts 23 | #EXTINF:10.010000, 24 | s0_000008.ts 25 | #EXTINF:10.010000, 26 | s0_000009.ts 27 | #EXTINF:10.010000, 28 | s0_000010.ts 29 | #EXTINF:10.010000, 30 | s0_000011.ts 31 | #EXTINF:10.010000, 32 | s0_000012.ts 33 | #EXTINF:10.010000, 34 | s0_000013.ts 35 | #EXTINF:10.010000, 36 | s0_000014.ts 37 | #EXTINF:10.010000, 38 | s0_000015.ts 39 | #EXTINF:10.010000, 40 | s0_000016.ts 41 | #EXTINF:10.010000, 42 | s0_000017.ts 43 | #EXTINF:10.010000, 44 | s0_000018.ts 45 | #EXTINF:10.010000, 46 | s0_000019.ts 47 | #EXTINF:10.010000, 48 | s0_000020.ts 49 | #EXTINF:10.010000, 50 | s0_000021.ts 51 | #EXTINF:10.010000, 52 | s0_000022.ts 53 | #EXTINF:10.010000, 54 | s0_000023.ts 55 | #EXTINF:10.010000, 56 | s0_000024.ts 57 | #EXTINF:10.010000, 58 | s0_000025.ts 59 | #EXTINF:10.010000, 60 | s0_000026.ts 61 | #EXTINF:10.010000, 62 | s0_000027.ts 63 | #EXTINF:10.010000, 64 | s0_000028.ts 65 | #EXTINF:10.010000, 66 | s0_000029.ts 67 | #EXTINF:10.010000, 68 | s0_000030.ts 69 | #EXTINF:10.010000, 70 | s0_000031.ts 71 | #EXTINF:10.010000, 72 | s0_000032.ts 73 | #EXTINF:10.010000, 74 | s0_000033.ts 75 | #EXTINF:10.010000, 76 | s0_000034.ts 77 | #EXTINF:10.010000, 78 | s0_000035.ts 79 | #EXTINF:10.010000, 80 | s0_000036.ts 81 | #EXTINF:10.010000, 82 | s0_000037.ts 83 | #EXTINF:10.010000, 84 | s0_000038.ts 85 | #EXTINF:10.010000, 86 | s0_000039.ts 87 | #EXTINF:10.010000, 88 | s0_000040.ts 89 | #EXTINF:10.010000, 90 | s0_000041.ts 91 | #EXTINF:10.010000, 92 | s0_000042.ts 93 | #EXTINF:10.010000, 94 | s0_000043.ts 95 | #EXTINF:10.010000, 96 | s0_000044.ts 97 | #EXTINF:10.010000, 98 | s0_000045.ts 99 | #EXTINF:10.010000, 100 | s0_000046.ts 101 | #EXTINF:10.010000, 102 | s0_000047.ts 103 | #EXTINF:10.010000, 104 | s0_000048.ts 105 | #EXTINF:10.010000, 106 | s0_000049.ts 107 | #EXTINF:10.010000, 108 | s0_000050.ts 109 | #EXTINF:10.010000, 110 | s0_000051.ts 111 | #EXTINF:10.010000, 112 | s0_000052.ts 113 | #EXTINF:10.010000, 114 | s0_000053.ts 115 | #EXTINF:10.010000, 116 | s0_000054.ts 117 | #EXTINF:10.010000, 118 | s0_000055.ts 119 | #EXTINF:10.010000, 120 | s0_000056.ts 121 | #EXTINF:10.010000, 122 | s0_000057.ts 123 | #EXTINF:10.010000, 124 | s0_000058.ts 125 | #EXTINF:10.010000, 126 | s0_000059.ts 127 | #EXTINF:10.010000, 128 | s0_000060.ts 129 | #EXTINF:10.010000, 130 | s0_000061.ts 131 | #EXTINF:10.010000, 132 | s0_000062.ts 133 | #EXTINF:10.010000, 134 | s0_000063.ts 135 | #EXTINF:10.010000, 136 | s0_000064.ts 137 | #EXTINF:10.010000, 138 | s0_000065.ts 139 | #EXTINF:10.010000, 140 | s0_000066.ts 141 | #EXTINF:10.010000, 142 | s0_000067.ts 143 | #EXTINF:10.010000, 144 | s0_000068.ts 145 | #EXTINF:10.010000, 146 | s0_000069.ts 147 | #EXTINF:10.010000, 148 | s0_000070.ts 149 | #EXTINF:10.010000, 150 | s0_000071.ts 151 | #EXTINF:10.010000, 152 | s0_000072.ts 153 | #EXTINF:10.010000, 154 | s0_000073.ts 155 | #EXTINF:10.010000, 156 | s0_000074.ts 157 | #EXTINF:10.010000, 158 | s0_000075.ts 159 | #EXTINF:10.010000, 160 | s0_000076.ts 161 | #EXTINF:9.479289, 162 | s0_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /library/testdata/dummy-stream/stream_1.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s1_000000.ts 9 | #EXTINF:10.010000, 10 | s1_000001.ts 11 | #EXTINF:10.010000, 12 | s1_000002.ts 13 | #EXTINF:10.010000, 14 | s1_000003.ts 15 | #EXTINF:10.010000, 16 | s1_000004.ts 17 | #EXTINF:10.010000, 18 | s1_000005.ts 19 | #EXTINF:10.010000, 20 | s1_000006.ts 21 | #EXTINF:10.010000, 22 | s1_000007.ts 23 | #EXTINF:10.010000, 24 | s1_000008.ts 25 | #EXTINF:10.010000, 26 | s1_000009.ts 27 | #EXTINF:10.010000, 28 | s1_000010.ts 29 | #EXTINF:10.010000, 30 | s1_000011.ts 31 | #EXTINF:10.010000, 32 | s1_000012.ts 33 | #EXTINF:10.010000, 34 | s1_000013.ts 35 | #EXTINF:10.010000, 36 | s1_000014.ts 37 | #EXTINF:10.010000, 38 | s1_000015.ts 39 | #EXTINF:10.010000, 40 | s1_000016.ts 41 | #EXTINF:10.010000, 42 | s1_000017.ts 43 | #EXTINF:10.010000, 44 | s1_000018.ts 45 | #EXTINF:10.010000, 46 | s1_000019.ts 47 | #EXTINF:10.010000, 48 | s1_000020.ts 49 | #EXTINF:10.010000, 50 | s1_000021.ts 51 | #EXTINF:10.010000, 52 | s1_000022.ts 53 | #EXTINF:10.010000, 54 | s1_000023.ts 55 | #EXTINF:10.010000, 56 | s1_000024.ts 57 | #EXTINF:10.010000, 58 | s1_000025.ts 59 | #EXTINF:10.010000, 60 | s1_000026.ts 61 | #EXTINF:10.010000, 62 | s1_000027.ts 63 | #EXTINF:10.010000, 64 | s1_000028.ts 65 | #EXTINF:10.010000, 66 | s1_000029.ts 67 | #EXTINF:10.010000, 68 | s1_000030.ts 69 | #EXTINF:10.010000, 70 | s1_000031.ts 71 | #EXTINF:10.010000, 72 | s1_000032.ts 73 | #EXTINF:10.010000, 74 | s1_000033.ts 75 | #EXTINF:10.010000, 76 | s1_000034.ts 77 | #EXTINF:10.010000, 78 | s1_000035.ts 79 | #EXTINF:10.010000, 80 | s1_000036.ts 81 | #EXTINF:10.010000, 82 | s1_000037.ts 83 | #EXTINF:10.010000, 84 | s1_000038.ts 85 | #EXTINF:10.010000, 86 | s1_000039.ts 87 | #EXTINF:10.010000, 88 | s1_000040.ts 89 | #EXTINF:10.010000, 90 | s1_000041.ts 91 | #EXTINF:10.010000, 92 | s1_000042.ts 93 | #EXTINF:10.010000, 94 | s1_000043.ts 95 | #EXTINF:10.010000, 96 | s1_000044.ts 97 | #EXTINF:10.010000, 98 | s1_000045.ts 99 | #EXTINF:10.010000, 100 | s1_000046.ts 101 | #EXTINF:10.010000, 102 | s1_000047.ts 103 | #EXTINF:10.010000, 104 | s1_000048.ts 105 | #EXTINF:10.010000, 106 | s1_000049.ts 107 | #EXTINF:10.010000, 108 | s1_000050.ts 109 | #EXTINF:10.010000, 110 | s1_000051.ts 111 | #EXTINF:10.010000, 112 | s1_000052.ts 113 | #EXTINF:10.010000, 114 | s1_000053.ts 115 | #EXTINF:10.010000, 116 | s1_000054.ts 117 | #EXTINF:10.010000, 118 | s1_000055.ts 119 | #EXTINF:10.010000, 120 | s1_000056.ts 121 | #EXTINF:10.010000, 122 | s1_000057.ts 123 | #EXTINF:10.010000, 124 | s1_000058.ts 125 | #EXTINF:10.010000, 126 | s1_000059.ts 127 | #EXTINF:10.010000, 128 | s1_000060.ts 129 | #EXTINF:10.010000, 130 | s1_000061.ts 131 | #EXTINF:10.010000, 132 | s1_000062.ts 133 | #EXTINF:10.010000, 134 | s1_000063.ts 135 | #EXTINF:10.010000, 136 | s1_000064.ts 137 | #EXTINF:10.010000, 138 | s1_000065.ts 139 | #EXTINF:10.010000, 140 | s1_000066.ts 141 | #EXTINF:10.010000, 142 | s1_000067.ts 143 | #EXTINF:10.010000, 144 | s1_000068.ts 145 | #EXTINF:10.010000, 146 | s1_000069.ts 147 | #EXTINF:10.010000, 148 | s1_000070.ts 149 | #EXTINF:10.010000, 150 | s1_000071.ts 151 | #EXTINF:10.010000, 152 | s1_000072.ts 153 | #EXTINF:10.010000, 154 | s1_000073.ts 155 | #EXTINF:10.010000, 156 | s1_000074.ts 157 | #EXTINF:10.010000, 158 | s1_000075.ts 159 | #EXTINF:10.010000, 160 | s1_000076.ts 161 | #EXTINF:9.479289, 162 | s1_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /library/testdata/dummy-stream/stream_2.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s2_000000.ts 9 | #EXTINF:10.010000, 10 | s2_000001.ts 11 | #EXTINF:10.010000, 12 | s2_000002.ts 13 | #EXTINF:10.010000, 14 | s2_000003.ts 15 | #EXTINF:10.010000, 16 | s2_000004.ts 17 | #EXTINF:10.010000, 18 | s2_000005.ts 19 | #EXTINF:10.010000, 20 | s2_000006.ts 21 | #EXTINF:10.010000, 22 | s2_000007.ts 23 | #EXTINF:10.010000, 24 | s2_000008.ts 25 | #EXTINF:10.010000, 26 | s2_000009.ts 27 | #EXTINF:10.010000, 28 | s2_000010.ts 29 | #EXTINF:10.010000, 30 | s2_000011.ts 31 | #EXTINF:10.010000, 32 | s2_000012.ts 33 | #EXTINF:10.010000, 34 | s2_000013.ts 35 | #EXTINF:10.010000, 36 | s2_000014.ts 37 | #EXTINF:10.010000, 38 | s2_000015.ts 39 | #EXTINF:10.010000, 40 | s2_000016.ts 41 | #EXTINF:10.010000, 42 | s2_000017.ts 43 | #EXTINF:10.010000, 44 | s2_000018.ts 45 | #EXTINF:10.010000, 46 | s2_000019.ts 47 | #EXTINF:10.010000, 48 | s2_000020.ts 49 | #EXTINF:10.010000, 50 | s2_000021.ts 51 | #EXTINF:10.010000, 52 | s2_000022.ts 53 | #EXTINF:10.010000, 54 | s2_000023.ts 55 | #EXTINF:10.010000, 56 | s2_000024.ts 57 | #EXTINF:10.010000, 58 | s2_000025.ts 59 | #EXTINF:10.010000, 60 | s2_000026.ts 61 | #EXTINF:10.010000, 62 | s2_000027.ts 63 | #EXTINF:10.010000, 64 | s2_000028.ts 65 | #EXTINF:10.010000, 66 | s2_000029.ts 67 | #EXTINF:10.010000, 68 | s2_000030.ts 69 | #EXTINF:10.010000, 70 | s2_000031.ts 71 | #EXTINF:10.010000, 72 | s2_000032.ts 73 | #EXTINF:10.010000, 74 | s2_000033.ts 75 | #EXTINF:10.010000, 76 | s2_000034.ts 77 | #EXTINF:10.010000, 78 | s2_000035.ts 79 | #EXTINF:10.010000, 80 | s2_000036.ts 81 | #EXTINF:10.010000, 82 | s2_000037.ts 83 | #EXTINF:10.010000, 84 | s2_000038.ts 85 | #EXTINF:10.010000, 86 | s2_000039.ts 87 | #EXTINF:10.010000, 88 | s2_000040.ts 89 | #EXTINF:10.010000, 90 | s2_000041.ts 91 | #EXTINF:10.010000, 92 | s2_000042.ts 93 | #EXTINF:10.010000, 94 | s2_000043.ts 95 | #EXTINF:10.010000, 96 | s2_000044.ts 97 | #EXTINF:10.010000, 98 | s2_000045.ts 99 | #EXTINF:10.010000, 100 | s2_000046.ts 101 | #EXTINF:10.010000, 102 | s2_000047.ts 103 | #EXTINF:10.010000, 104 | s2_000048.ts 105 | #EXTINF:10.010000, 106 | s2_000049.ts 107 | #EXTINF:10.010000, 108 | s2_000050.ts 109 | #EXTINF:10.010000, 110 | s2_000051.ts 111 | #EXTINF:10.010000, 112 | s2_000052.ts 113 | #EXTINF:10.010000, 114 | s2_000053.ts 115 | #EXTINF:10.010000, 116 | s2_000054.ts 117 | #EXTINF:10.010000, 118 | s2_000055.ts 119 | #EXTINF:10.010000, 120 | s2_000056.ts 121 | #EXTINF:10.010000, 122 | s2_000057.ts 123 | #EXTINF:10.010000, 124 | s2_000058.ts 125 | #EXTINF:10.010000, 126 | s2_000059.ts 127 | #EXTINF:10.010000, 128 | s2_000060.ts 129 | #EXTINF:10.010000, 130 | s2_000061.ts 131 | #EXTINF:10.010000, 132 | s2_000062.ts 133 | #EXTINF:10.010000, 134 | s2_000063.ts 135 | #EXTINF:10.010000, 136 | s2_000064.ts 137 | #EXTINF:10.010000, 138 | s2_000065.ts 139 | #EXTINF:10.010000, 140 | s2_000066.ts 141 | #EXTINF:10.010000, 142 | s2_000067.ts 143 | #EXTINF:10.010000, 144 | s2_000068.ts 145 | #EXTINF:10.010000, 146 | s2_000069.ts 147 | #EXTINF:10.010000, 148 | s2_000070.ts 149 | #EXTINF:10.010000, 150 | s2_000071.ts 151 | #EXTINF:10.010000, 152 | s2_000072.ts 153 | #EXTINF:10.010000, 154 | s2_000073.ts 155 | #EXTINF:10.010000, 156 | s2_000074.ts 157 | #EXTINF:10.010000, 158 | s2_000075.ts 159 | #EXTINF:10.010000, 160 | s2_000076.ts 161 | #EXTINF:9.479289, 162 | s2_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /library/testdata/dummy-stream/stream_3.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s3_000000.ts 9 | #EXTINF:10.010000, 10 | s3_000001.ts 11 | #EXTINF:10.010000, 12 | s3_000002.ts 13 | #EXTINF:10.010000, 14 | s3_000003.ts 15 | #EXTINF:10.010000, 16 | s3_000004.ts 17 | #EXTINF:10.010000, 18 | s3_000005.ts 19 | #EXTINF:10.010000, 20 | s3_000006.ts 21 | #EXTINF:10.010000, 22 | s3_000007.ts 23 | #EXTINF:10.010000, 24 | s3_000008.ts 25 | #EXTINF:10.010000, 26 | s3_000009.ts 27 | #EXTINF:10.010000, 28 | s3_000010.ts 29 | #EXTINF:10.010000, 30 | s3_000011.ts 31 | #EXTINF:10.010000, 32 | s3_000012.ts 33 | #EXTINF:10.010000, 34 | s3_000013.ts 35 | #EXTINF:10.010000, 36 | s3_000014.ts 37 | #EXTINF:10.010000, 38 | s3_000015.ts 39 | #EXTINF:10.010000, 40 | s3_000016.ts 41 | #EXTINF:10.010000, 42 | s3_000017.ts 43 | #EXTINF:10.010000, 44 | s3_000018.ts 45 | #EXTINF:10.010000, 46 | s3_000019.ts 47 | #EXTINF:10.010000, 48 | s3_000020.ts 49 | #EXTINF:10.010000, 50 | s3_000021.ts 51 | #EXTINF:10.010000, 52 | s3_000022.ts 53 | #EXTINF:10.010000, 54 | s3_000023.ts 55 | #EXTINF:10.010000, 56 | s3_000024.ts 57 | #EXTINF:10.010000, 58 | s3_000025.ts 59 | #EXTINF:10.010000, 60 | s3_000026.ts 61 | #EXTINF:10.010000, 62 | s3_000027.ts 63 | #EXTINF:10.010000, 64 | s3_000028.ts 65 | #EXTINF:10.010000, 66 | s3_000029.ts 67 | #EXTINF:10.010000, 68 | s3_000030.ts 69 | #EXTINF:10.010000, 70 | s3_000031.ts 71 | #EXTINF:10.010000, 72 | s3_000032.ts 73 | #EXTINF:10.010000, 74 | s3_000033.ts 75 | #EXTINF:10.010000, 76 | s3_000034.ts 77 | #EXTINF:10.010000, 78 | s3_000035.ts 79 | #EXTINF:10.010000, 80 | s3_000036.ts 81 | #EXTINF:10.010000, 82 | s3_000037.ts 83 | #EXTINF:10.010000, 84 | s3_000038.ts 85 | #EXTINF:10.010000, 86 | s3_000039.ts 87 | #EXTINF:10.010000, 88 | s3_000040.ts 89 | #EXTINF:10.010000, 90 | s3_000041.ts 91 | #EXTINF:10.010000, 92 | s3_000042.ts 93 | #EXTINF:10.010000, 94 | s3_000043.ts 95 | #EXTINF:10.010000, 96 | s3_000044.ts 97 | #EXTINF:10.010000, 98 | s3_000045.ts 99 | #EXTINF:10.010000, 100 | s3_000046.ts 101 | #EXTINF:10.010000, 102 | s3_000047.ts 103 | #EXTINF:10.010000, 104 | s3_000048.ts 105 | #EXTINF:10.010000, 106 | s3_000049.ts 107 | #EXTINF:10.010000, 108 | s3_000050.ts 109 | #EXTINF:10.010000, 110 | s3_000051.ts 111 | #EXTINF:10.010000, 112 | s3_000052.ts 113 | #EXTINF:10.010000, 114 | s3_000053.ts 115 | #EXTINF:10.010000, 116 | s3_000054.ts 117 | #EXTINF:10.010000, 118 | s3_000055.ts 119 | #EXTINF:10.010000, 120 | s3_000056.ts 121 | #EXTINF:10.010000, 122 | s3_000057.ts 123 | #EXTINF:10.010000, 124 | s3_000058.ts 125 | #EXTINF:10.010000, 126 | s3_000059.ts 127 | #EXTINF:10.010000, 128 | s3_000060.ts 129 | #EXTINF:10.010000, 130 | s3_000061.ts 131 | #EXTINF:10.010000, 132 | s3_000062.ts 133 | #EXTINF:10.010000, 134 | s3_000063.ts 135 | #EXTINF:10.010000, 136 | s3_000064.ts 137 | #EXTINF:10.010000, 138 | s3_000065.ts 139 | #EXTINF:10.010000, 140 | s3_000066.ts 141 | #EXTINF:10.010000, 142 | s3_000067.ts 143 | #EXTINF:10.010000, 144 | s3_000068.ts 145 | #EXTINF:10.010000, 146 | s3_000069.ts 147 | #EXTINF:10.010000, 148 | s3_000070.ts 149 | #EXTINF:10.010000, 150 | s3_000071.ts 151 | #EXTINF:10.010000, 152 | s3_000072.ts 153 | #EXTINF:10.010000, 154 | s3_000073.ts 155 | #EXTINF:10.010000, 156 | s3_000074.ts 157 | #EXTINF:10.010000, 158 | s3_000075.ts 159 | #EXTINF:10.010000, 160 | s3_000076.ts 161 | #EXTINF:9.479289, 162 | s3_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /library/validator.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "path" 7 | "strings" 8 | 9 | "github.com/pkg/errors" 10 | ) 11 | 12 | type ValidationResult struct { 13 | URL string 14 | Present, Missing []string 15 | } 16 | 17 | func ValidateStream(baseURL string, failFast bool, skipSegments bool) (*ValidationResult, error) { 18 | vr := &ValidationResult{ 19 | URL: baseURL, 20 | Missing: []string{}, 21 | Present: []string{}, 22 | } 23 | baseURL = strings.TrimSuffix(baseURL, "/") 24 | 25 | err := WalkStream(baseURL, 26 | func(p ...string) (io.ReadCloser, error) { 27 | var ( 28 | r *http.Response 29 | err error 30 | ) 31 | url := strings.Join(p, "/") 32 | if path.Ext(p[len(p)-1]) == ".ts" { 33 | if skipSegments { 34 | return nil, ErrSkipSegment 35 | } 36 | r, err = http.Head(url) // #nosec G107 37 | } else { 38 | r, err = http.Get(url) // #nosec G107 39 | } 40 | logger.Debugf("checked %s [%v]", p, r.StatusCode) 41 | if err != nil { 42 | return nil, err 43 | } 44 | if r.StatusCode != http.StatusOK { 45 | return nil, nil 46 | } 47 | return r.Body, nil 48 | }, 49 | func(fgName string, r io.ReadCloser) error { 50 | if r == nil { 51 | logger.Debugf("missing: %s", fgName) 52 | vr.Missing = append(vr.Missing, fgName) 53 | if failFast { 54 | return errors.New("broken stream") 55 | } 56 | } else { 57 | vr.Present = append(vr.Present, fgName) 58 | } 59 | return nil 60 | }, 61 | ) 62 | return vr, err 63 | } 64 | -------------------------------------------------------------------------------- /library/walker.go: -------------------------------------------------------------------------------- 1 | package library 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "io" 8 | 9 | "github.com/grafov/m3u8" 10 | ) 11 | 12 | type StreamGetter func(path ...string) (io.ReadCloser, error) 13 | type StreamProcessor func(fgName string, r io.ReadCloser) error 14 | 15 | var ErrSkipSegment = errors.New("skip fragment") 16 | 17 | // WalkStream parses an HLS playlist, calling `getFn` to load and `processFn` 18 | // for the master playlist located in `baseURI`, subplaylists and all segments contained within. 19 | func WalkStream(baseURI string, getFn StreamGetter, processFn StreamProcessor) error { 20 | parsePlaylist := func(name string) (m3u8.Playlist, error) { 21 | r, err := getFn(baseURI, name) 22 | if err != nil { 23 | return nil, fmt.Errorf("error getting stream item %v: %w", name, err) 24 | } 25 | if r == nil { 26 | processFn(name, r) 27 | return nil, errors.New("empty playlist") 28 | } 29 | dr, err := read(r) 30 | if err != nil { 31 | return nil, fmt.Errorf("error reading stream item %v: %w", name, err) 32 | } 33 | 34 | err = processFn(name, io.NopCloser(dr)) 35 | if err != nil { 36 | return nil, fmt.Errorf("error processing stream item %v: %w", name, err) 37 | } 38 | _, err = dr.Seek(0, io.SeekStart) 39 | if err != nil { 40 | return nil, fmt.Errorf("error seeking in item %v: %w", name, err) 41 | } 42 | 43 | p, _, err := m3u8.DecodeFrom(dr, true) 44 | if err != nil { 45 | return nil, fmt.Errorf("error decoding stream item %v: %w", name, err) 46 | } 47 | return p, nil 48 | } 49 | 50 | pl, err := parsePlaylist(MasterPlaylistName) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | masterpl := pl.(*m3u8.MasterPlaylist) 56 | for _, varpl := range masterpl.Variants { 57 | p, err := parsePlaylist(varpl.URI) 58 | if err != nil { 59 | return err 60 | } 61 | mediapl := p.(*m3u8.MediaPlaylist) 62 | 63 | for _, seg := range mediapl.Segments { 64 | if seg == nil { 65 | continue 66 | } 67 | r, err := getFn(baseURI, seg.URI) 68 | if errors.Is(err, ErrSkipSegment) { 69 | continue 70 | } 71 | if err != nil { 72 | return fmt.Errorf("error getting stream item %v: %w", seg.URI, err) 73 | } 74 | err = processFn(seg.URI, r) 75 | if r != nil { 76 | r.Close() 77 | } 78 | if err != nil { 79 | return fmt.Errorf("error processing stream item %v: %w", varpl.URI, err) 80 | } 81 | } 82 | } 83 | return nil 84 | } 85 | 86 | func read(r io.ReadCloser) (io.ReadSeeker, error) { 87 | d, err := io.ReadAll(r) 88 | r.Close() 89 | if err != nil { 90 | return nil, err 91 | } 92 | dr := bytes.NewReader(d) 93 | return dr, nil 94 | } 95 | -------------------------------------------------------------------------------- /manager/channels.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/OdyseeTeam/transcoder/library" 8 | db "github.com/OdyseeTeam/transcoder/library/db" 9 | ) 10 | 11 | type channelList struct { 12 | sync.Mutex 13 | items map[string]db.ChannelPriority 14 | } 15 | 16 | func newChannelList() *channelList { 17 | return &channelList{ 18 | Mutex: sync.Mutex{}, 19 | items: map[string]db.ChannelPriority{}, 20 | } 21 | } 22 | 23 | func (c *channelList) StartLoadingChannels(lib *library.Library) { 24 | for range time.Tick(5 * time.Second) { 25 | channels, err := lib.GetAllChannels() 26 | if err != nil { 27 | logger.Error("error loading channels", "err", err) 28 | } 29 | c.Load(channels) 30 | } 31 | } 32 | 33 | func (c *channelList) Load(channels []db.Channel) { 34 | c.Lock() 35 | defer c.Unlock() 36 | for _, ch := range channels { 37 | c.items[ch.ClaimID] = ch.Priority 38 | } 39 | } 40 | 41 | func (c *channelList) GetPriority(r *TranscodingRequest) db.ChannelPriority { 42 | c.Lock() 43 | defer c.Unlock() 44 | if ch, ok := c.items[r.ChannelClaimID]; !ok { 45 | return db.ChannelPriorityLow 46 | } else { 47 | return ch 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /manager/http.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/url" 7 | "strings" 8 | 9 | "github.com/OdyseeTeam/transcoder/internal/metrics" 10 | "github.com/OdyseeTeam/transcoder/library/db" 11 | "github.com/OdyseeTeam/transcoder/pkg/dispatcher" 12 | "github.com/OdyseeTeam/transcoder/pkg/logging" 13 | "github.com/OdyseeTeam/transcoder/pkg/resolve" 14 | "github.com/OdyseeTeam/transcoder/pkg/timer" 15 | 16 | "github.com/fasthttp/router" 17 | "github.com/prometheus/client_golang/prometheus/promhttp" 18 | "github.com/valyala/fasthttp" 19 | "github.com/valyala/fasthttp/fasthttpadaptor" 20 | ) 21 | 22 | const ( 23 | TokenCtxField = "token" 24 | AuthHeader = "Authorization" 25 | AuthTokenPrefix = "Bearer " 26 | AdminChannelField = "channel" 27 | ) 28 | 29 | type AuthCallback func(*fasthttp.RequestCtx) bool 30 | 31 | type httpVideoHandler struct { 32 | manager *VideoManager 33 | log logging.KVLogger 34 | authCallback AuthCallback 35 | } 36 | 37 | // CreateRoutes creates a set of HTTP entrypoints that will route requests into video library. 38 | func CreateRoutes(r *router.Router, manager *VideoManager, log logging.KVLogger, cb AuthCallback) { 39 | h := httpVideoHandler{ 40 | log: log, 41 | manager: manager, 42 | authCallback: cb, 43 | } 44 | 45 | r.GET("/api/v1/video/{kind:hls}/{url}", h.handleVideo) 46 | r.GET("/api/v2/video/{url}", h.handleVideo) 47 | r.GET("/api/v3/video", h.handleVideo) // accepts URL as a query param 48 | 49 | r.POST("/api/v1/channel", h.handleChannel) 50 | 51 | metrics.RegisterMetrics() 52 | dispatcher.RegisterMetrics() 53 | RegisterMetrics() 54 | r.GET("/metrics", fasthttpadaptor.NewFastHTTPHandler(promhttp.Handler())) 55 | } 56 | 57 | func (h httpVideoHandler) handleVideo(ctx *fasthttp.RequestCtx) { 58 | var path, videoURL string 59 | var err error 60 | urlQ, _ := ctx.UserValue("url").(string) 61 | 62 | if urlQ != "" { 63 | path = string(ctx.Path()) 64 | 65 | videoURL, err = url.PathUnescape(urlQ) 66 | if err != nil { 67 | logger.Errorw("url parsing error", "url", urlQ, "error", err) 68 | ctx.SetStatusCode(http.StatusBadRequest) 69 | fmt.Fprint(ctx, err.Error()) 70 | return 71 | } 72 | } else { 73 | videoURL = string(ctx.FormValue("url")) 74 | } 75 | 76 | if videoURL == "" { 77 | logger.Info("no url supplied") 78 | ctx.SetStatusCode(http.StatusBadRequest) 79 | fmt.Fprint(ctx, "no url supplied") 80 | return 81 | } 82 | 83 | ll := logger.With( 84 | "url", videoURL, 85 | "path", path, 86 | ) 87 | 88 | location, err := h.manager.GetVideoURL(videoURL) 89 | 90 | if err != nil { 91 | var ( 92 | statusCode int 93 | statusMessage string 94 | ) 95 | switch err { 96 | case resolve.ErrTranscodingForbidden: 97 | statusCode = http.StatusForbidden 98 | case resolve.ErrChannelNotEnabled: 99 | statusCode = http.StatusForbidden 100 | case resolve.ErrNoSigningChannel: 101 | statusCode = http.StatusForbidden 102 | case resolve.ErrTranscodingQueued: 103 | statusCode = http.StatusAccepted 104 | case resolve.ErrTranscodingUnderway: 105 | statusCode = http.StatusAccepted 106 | case resolve.ErrClaimNotFound: 107 | statusCode = http.StatusNotFound 108 | ll.Info("claim not found") 109 | default: 110 | statusCode = http.StatusInternalServerError 111 | ll.Errorw("internal error", "err", err) 112 | } 113 | 114 | ll.Debug(err.Error()) 115 | ctx.SetStatusCode(statusCode) 116 | if statusMessage == "" { 117 | statusMessage = err.Error() 118 | } 119 | ctx.SetBodyString(statusMessage) 120 | return 121 | } 122 | 123 | ctx.Response.StatusCode() 124 | metrics.StreamsRequestedCount.WithLabelValues(metrics.StorageRemote).Inc() 125 | ll.Debugw("stream found", "location", location) 126 | ctx.Redirect(location, http.StatusSeeOther) 127 | } 128 | 129 | func (h httpVideoHandler) handleChannel(ctx *fasthttp.RequestCtx) { 130 | if h.authCallback == nil { 131 | h.log.Error("management endpoint called but authenticator function not set") 132 | ctx.SetStatusCode(http.StatusForbidden) 133 | ctx.SetBodyString("authorization failed") 134 | return 135 | } 136 | token := strings.Replace(string(ctx.Request.Header.Peek(AuthHeader)), AuthTokenPrefix, "", 1) 137 | ctx.SetUserValue(TokenCtxField, token) 138 | 139 | if !h.authCallback(ctx) { 140 | h.log.Info("authorization failed") 141 | ctx.SetStatusCode(http.StatusForbidden) 142 | ctx.SetBodyString("authorization failed") 143 | return 144 | } 145 | 146 | channel := string(ctx.FormValue(AdminChannelField)) 147 | if channel == "" { 148 | ctx.SetStatusCode(http.StatusBadRequest) 149 | fmt.Fprint(ctx, "channel missing") 150 | return 151 | } 152 | var priority db.ChannelPriority 153 | priority.Scan(ctx.FormValue("priority")) 154 | c, err := h.manager.lib.AddChannel(channel, priority) 155 | if err != nil { 156 | ctx.SetStatusCode(http.StatusBadRequest) 157 | fmt.Fprint(ctx, err.Error()) 158 | return 159 | } 160 | ctx.SetStatusCode(http.StatusCreated) 161 | fmt.Fprintf(ctx, "channel %s (%s) added with priority %s", c.URL, c.ClaimID, c.Priority) 162 | } 163 | 164 | func CORSMiddleware(h fasthttp.RequestHandler) fasthttp.RequestHandler { 165 | return func(ctx *fasthttp.RequestCtx) { 166 | ctx.Response.Header.Set("Access-Control-Allow-Origin", "*") 167 | h(ctx) 168 | } 169 | } 170 | 171 | func MetricsMiddleware(h fasthttp.RequestHandler) fasthttp.RequestHandler { 172 | return func(ctx *fasthttp.RequestCtx) { 173 | t := timer.Start() 174 | h(ctx) 175 | metrics.HTTPAPIRequests.WithLabelValues(fmt.Sprintf("%v", ctx.Response.StatusCode())).Observe(t.Duration()) 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /manager/logger.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("manager", logging.Dev) 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /manager/manager_test.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "math/rand" 5 | "sort" 6 | "strings" 7 | "testing" 8 | "time" 9 | 10 | "github.com/OdyseeTeam/transcoder/library" 11 | "github.com/OdyseeTeam/transcoder/library/db" 12 | "github.com/OdyseeTeam/transcoder/pkg/logging" 13 | "github.com/OdyseeTeam/transcoder/pkg/logging/zapadapter" 14 | "github.com/OdyseeTeam/transcoder/pkg/mfr" 15 | "github.com/OdyseeTeam/transcoder/pkg/resolve" 16 | 17 | "github.com/Pallinder/go-randomdata" 18 | "github.com/stretchr/testify/suite" 19 | ) 20 | 21 | type managerSuite struct { 22 | suite.Suite 23 | library.LibraryTestHelper 24 | } 25 | 26 | func isLevel5(_ string) bool { 27 | return rand.Intn(2) == 0 // #nosec G404 28 | } 29 | 30 | func isChannelEnabled(_ string) bool { 31 | return rand.Intn(2) == 0 // #nosec G404 32 | } 33 | 34 | func TestManagerSuite(t *testing.T) { 35 | suite.Run(t, new(managerSuite)) 36 | } 37 | 38 | func (s *managerSuite) SetupSuite() { 39 | logger = logging.Create("manager", logging.Dev) 40 | s.Require().NoError(s.SetupLibraryDB()) 41 | } 42 | 43 | func (s *managerSuite) TearDownSuite() { 44 | s.Require().NoError(s.TearDownLibraryDB()) 45 | } 46 | 47 | func (s *managerSuite) TestVideo() { 48 | var err error 49 | lib := library.New(library.Config{DB: s.DB, Log: zapadapter.NewKV(nil)}) 50 | 51 | _, err = lib.AddChannel("@BretWeinstein#f", db.ChannelPriorityHigh) 52 | s.Require().NoError(err) 53 | _, err = lib.AddChannel("@veritasium:f", "") 54 | s.Require().NoError(err) 55 | _, err = lib.AddChannel("@specialoperationstest#3", "") 56 | s.Require().NoError(err) 57 | _, err = lib.AddChannel("@TheVoiceofReason#a", db.ChannelPriorityDisabled) 58 | s.Require().NoError(err) 59 | 60 | mgr := NewManager(lib, 0) 61 | 62 | urlsPriority := []string{ 63 | "@BretWeinstein#f/EvoLens87#1", 64 | } 65 | urlsEnabled := []string{ 66 | "@veritasium#f/on-these-questions,-smarter-people-do#e", 67 | "@specialoperationstest#3/fear-of-death-inspirational#a", 68 | } 69 | urlsLevel5 := []string{ 70 | "@samtime#1/airpods-max-parody-ehh-pods-max#7", 71 | } 72 | urlsNotEnabled := []string{ 73 | "@TRUTH#2/what-do-you-know-what-do-you-believe#2", 74 | } 75 | urlsNoChannel := []string{ 76 | "what#1", 77 | } 78 | urlsDisabled := []string{ 79 | "lbry://@TheVoiceofReason#a/PaypalSucks#5", 80 | } 81 | urlsNotFound := []string{ 82 | randomdata.SillyName() + "#" + randomdata.SillyName(), 83 | randomdata.Alphanumeric(96), 84 | } 85 | 86 | for _, u := range urlsPriority { 87 | v, err := mgr.GetVideoURL(u) 88 | s.Empty(v) 89 | s.Equal(resolve.ErrTranscodingQueued, err, u) 90 | } 91 | 92 | for _, u := range urlsEnabled { 93 | v, err := mgr.GetVideoURL(u) 94 | s.Empty(v) 95 | s.Equal(resolve.ErrTranscodingQueued, err, u) 96 | } 97 | 98 | for _, u := range urlsLevel5 { 99 | v, err := mgr.GetVideoURL(u) 100 | s.Empty(v) 101 | s.Equal(resolve.ErrTranscodingQueued, err, u) 102 | } 103 | 104 | for _, u := range urlsNotEnabled { 105 | v, err := mgr.GetVideoURL(u) 106 | s.Empty(v) 107 | s.Equal(resolve.ErrTranscodingForbidden, err, u) 108 | } 109 | 110 | for _, u := range urlsDisabled { 111 | v, err := mgr.GetVideoURL(u) 112 | s.Empty(v) 113 | s.Equal(resolve.ErrTranscodingForbidden, err, u) 114 | } 115 | 116 | for _, u := range urlsNoChannel { 117 | v, err := mgr.GetVideoURL(u) 118 | s.Empty(v) 119 | s.Equal(resolve.ErrNoSigningChannel, err, u) 120 | } 121 | 122 | for _, u := range urlsNotFound { 123 | v, err := mgr.GetVideoURL(u) 124 | s.Empty(v) 125 | s.Equal(resolve.ErrClaimNotFound, err) 126 | } 127 | 128 | expectedUrls := []string{urlsPriority[0], urlsEnabled[0], urlsLevel5[0], urlsNotEnabled[0], urlsEnabled[1]} 129 | receivedUrls := func() []string { 130 | requestsChan := mgr.Requests() 131 | timeout := time.After(15 * time.Second) 132 | defer mgr.pool.Stop() 133 | urls := []string{} 134 | for len(urls) <= 5 { 135 | select { 136 | case r := <-requestsChan: 137 | urls = append(urls, strings.TrimPrefix(r.URI, "lbry://")) 138 | case <-timeout: 139 | return urls 140 | } 141 | } 142 | return urls 143 | }() 144 | 145 | sort.Strings(expectedUrls) 146 | sort.Strings(receivedUrls) 147 | s.Equal(expectedUrls, receivedUrls) 148 | 149 | } 150 | 151 | func (s *managerSuite) TestRequests() { 152 | var r1, r2 *TranscodingRequest 153 | 154 | lib := library.New(library.Config{DB: s.DB, Log: zapadapter.NewKV(nil)}) 155 | mgr := NewManager(lib, 0) 156 | 157 | mgr.GetVideoURL("@specialoperationstest#3/fear-of-death-inspirational#a") 158 | out := mgr.Requests() 159 | r1 = <-out 160 | 161 | s.Equal(mfr.StatusActive, mgr.RequestStatus(r1.SDHash)) 162 | select { 163 | case r2 = <-out: 164 | s.Failf("got output from Requests channel", "%v", r2) 165 | default: 166 | } 167 | 168 | s.NotNil(r1) 169 | } 170 | -------------------------------------------------------------------------------- /manager/metrics.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | ) 8 | 9 | var ( 10 | once = sync.Once{} 11 | 12 | QueueLength = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 13 | Name: "transcoding_queue_length", 14 | Help: "Video queue length", 15 | }, []string{"queue"}) 16 | 17 | QueueHits = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 18 | Name: "transcoding_queue_hits", 19 | Help: "Video queue hits", 20 | }, []string{"queue"}) 21 | 22 | QueueItemAge = prometheus.NewSummaryVec(prometheus.SummaryOpts{ 23 | Name: "transcoding_queue_item_age_seconds", 24 | Help: "Age of queue items before they get processed", 25 | }, []string{"queue"}) 26 | ) 27 | 28 | func RegisterMetrics() { 29 | once.Do(func() { 30 | prometheus.MustRegister(QueueLength, QueueHits, QueueItemAge) 31 | }) 32 | } 33 | -------------------------------------------------------------------------------- /manager/pool.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "container/ring" 5 | "time" 6 | 7 | "github.com/OdyseeTeam/transcoder/pkg/mfr" 8 | "github.com/OdyseeTeam/transcoder/pkg/resolve" 9 | 10 | "github.com/prometheus/client_golang/prometheus" 11 | ) 12 | 13 | var pollTimeout = 50 * time.Millisecond 14 | 15 | type level struct { 16 | name string 17 | queue *mfr.Queue 18 | keeper Gatekeeper 19 | minHits uint 20 | } 21 | 22 | // Pool contains queues which can admit items based on gatekeeper functions. 23 | type Pool struct { 24 | levels []*level 25 | out chan *mfr.Item 26 | stopChan chan interface{} 27 | } 28 | 29 | // Gatekeeper defines a function that checks if supplied queue item and its value should be admitted to the queue. 30 | type Gatekeeper func(key string, value interface{}, queue *mfr.Queue) bool 31 | 32 | func NewPool() *Pool { 33 | pool := &Pool{ 34 | levels: []*level{}, 35 | out: make(chan *mfr.Item), 36 | stopChan: make(chan interface{}, 1), 37 | } 38 | return pool 39 | } 40 | 41 | // AddQueue adds a queue and its gatekeeper function to the pool. 42 | func (p *Pool) AddQueue(name string, minHits uint, k Gatekeeper) { 43 | p.levels = append(p.levels, &level{name: name, queue: mfr.NewQueue(), keeper: k, minHits: minHits}) 44 | } 45 | 46 | // Admit retries to put item into the first queue that would accept it. 47 | // Queues are traversed in the same order they are added. 48 | // If gatekeeper returns an error, admission stops and the error is returned to the caller. 49 | func (p *Pool) Admit(key string, value interface{}) error { 50 | ll := logger.With("key", key) 51 | for i, level := range p.levels { 52 | ll.Debugw("checking level", "level", level.name) 53 | q := level.queue 54 | _, s := level.queue.Get(key) 55 | 56 | mql := QueueLength.With(prometheus.Labels{"queue": level.name}) 57 | mqh := QueueHits.With(prometheus.Labels{"queue": level.name}) 58 | switch s { 59 | case mfr.StatusNone: 60 | if level.keeper(key, value, level.queue) { 61 | mql.Inc() 62 | mqh.Inc() 63 | if i == len(p.levels)-1 { 64 | return resolve.ErrTranscodingForbidden 65 | } 66 | return resolve.ErrTranscodingQueued 67 | } 68 | case mfr.StatusActive: 69 | mqh.Inc() 70 | q.Hit(key, value) 71 | return resolve.ErrTranscodingUnderway 72 | case mfr.StatusQueued: 73 | mqh.Inc() 74 | q.Hit(key, value) 75 | return resolve.ErrTranscodingQueued 76 | case mfr.StatusDone: 77 | mqh.Inc() 78 | q.Hit(key, value) 79 | // This is to prevent race conditions when the item has been transcoded already 80 | // while the request is still in flight. 81 | return resolve.ErrTranscodingUnderway 82 | } 83 | } 84 | ll.Debug("suitable level not found") 85 | return resolve.ErrChannelNotEnabled 86 | } 87 | 88 | // Start will launch the cycle of retrieving items out of queues. Should be called after at least one `AddQueue` call. 89 | // Queues are pooled sequentially. 90 | func (p *Pool) Start() { 91 | r := ring.New(len(p.levels)) 92 | for i := 0; i < r.Len(); i++ { 93 | r.Value = p.levels[i] 94 | r = r.Next() 95 | } 96 | for { 97 | r = r.Next() 98 | select { 99 | case <-p.stopChan: 100 | close(p.out) 101 | return 102 | default: 103 | } 104 | 105 | l := r.Value.(*level) 106 | item := l.queue.MinPop(l.minHits) 107 | if item == nil { 108 | // Non-stop polling will cause excessive CPU load. 109 | time.Sleep(pollTimeout) 110 | continue 111 | } 112 | logger.Named("pool").Debugf("popping item %v", item.Value) 113 | QueueLength.With(prometheus.Labels{"queue": l.name}).Dec() 114 | QueueItemAge.With(prometheus.Labels{"queue": l.name}).Observe(float64(item.Age())) 115 | p.out <- item 116 | } 117 | } 118 | 119 | func (p *Pool) Out() <-chan *mfr.Item { 120 | return p.out 121 | } 122 | 123 | // Next returns the next item in the queue almost in a non-blocking way. 124 | func (p *Pool) Next() *mfr.Item { 125 | select { 126 | case e := <-p.out: 127 | return e 128 | case <-time.After(pollTimeout + 50*time.Millisecond): 129 | return nil 130 | } 131 | } 132 | 133 | // Stop stops the queue polling routine. 134 | func (p *Pool) Stop() { 135 | p.stopChan <- true 136 | } 137 | -------------------------------------------------------------------------------- /manager/pool_test.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/OdyseeTeam/transcoder/pkg/logging" 7 | "github.com/OdyseeTeam/transcoder/pkg/mfr" 8 | 9 | "github.com/Pallinder/go-randomdata" 10 | "github.com/stretchr/testify/suite" 11 | ) 12 | 13 | type poolSuite struct { 14 | suite.Suite 15 | } 16 | 17 | type element struct { 18 | sdHash, url string 19 | } 20 | 21 | func TestPoolSuite(t *testing.T) { 22 | suite.Run(t, new(poolSuite)) 23 | } 24 | 25 | func (s *poolSuite) TestPool() { 26 | sampleSize := 200 27 | mfr.SetLogger(logging.Create("mfr", logging.Prod)) 28 | 29 | var p1, p2, p3 int 30 | pool := NewPool() 31 | 32 | s.Nil(pool.Next()) 33 | 34 | // Level 5 channel queue 35 | pool.AddQueue("level5", 0, func(k string, v interface{}, q *mfr.Queue) bool { 36 | // Randomly determined as level 5 37 | if isLevel5(k) { 38 | p1++ 39 | q.Hit(k, v) 40 | return true 41 | } 42 | return false 43 | }) 44 | // Hardcoded channel queue 45 | pool.AddQueue("hardcoded", 0, func(k string, v interface{}, q *mfr.Queue) bool { 46 | // Randomly enabled 47 | if isChannelEnabled(k) { 48 | p2++ 49 | q.Hit(k, v) 50 | return true 51 | } 52 | return false 53 | }) 54 | // Common queue 55 | pool.AddQueue("common", 0, func(k string, v interface{}, q *mfr.Queue) bool { 56 | q.Hit(k, v) 57 | p3++ 58 | return true 59 | }) 60 | 61 | go pool.Start() 62 | 63 | s.Nil(pool.Next()) 64 | 65 | for i := 0; i < sampleSize; i++ { 66 | c := &element{randomdata.Alphanumeric(96), randomdata.Alphanumeric(25)} 67 | pool.Admit(c.url, c) 68 | } 69 | 70 | s.GreaterOrEqual(p1, 1) 71 | s.GreaterOrEqual(p2, 1) 72 | s.GreaterOrEqual(p3, 1) 73 | 74 | total := 0 75 | for e := range pool.Out() { 76 | s.Require().NotNil(e, "pool is exhausted with %v hits", total) 77 | total += int(e.Hits()) // #nosec G115 78 | if total >= sampleSize { 79 | break 80 | } 81 | } 82 | s.Equal(sampleSize, total) 83 | } 84 | 85 | func (s *poolSuite) TestPoolMinHits() { 86 | pool := NewPool() 87 | 88 | pool.AddQueue("common", 10, func(k string, v interface{}, q *mfr.Queue) bool { 89 | q.Hit(k, v) 90 | return true 91 | }) 92 | 93 | go pool.Start() 94 | s.Nil(pool.Next()) 95 | 96 | c := &element{randomdata.Alphanumeric(96), randomdata.Alphanumeric(25)} 97 | pool.Admit(c.url, c) 98 | s.Nil(pool.Next()) 99 | 100 | for range [8]int{} { 101 | pool.Admit(c.url, c) 102 | } 103 | s.Nil(pool.Next()) 104 | 105 | pool.Admit(c.url, c) 106 | 107 | e := pool.Next() 108 | s.Require().NotNil(e) 109 | s.Equal(c, e.Value.(*element)) 110 | 111 | pool.Admit(c.url, c) 112 | s.Nil(pool.Next()) 113 | } 114 | -------------------------------------------------------------------------------- /openapi.yaml: -------------------------------------------------------------------------------- 1 | openapi: "3.0.0" 2 | info: 3 | title: LBRY Video Transcoder 4 | version: "0.0.1" 5 | servers: 6 | - url: https://api.example.com/api/v1 7 | 8 | paths: 9 | /video/{type}/{url}: 10 | get: 11 | summary: Get a video stream 12 | responses: 13 | "200": 14 | description: transcoded stream found and can be delivered 15 | content: 16 | application/x-mpegURL: {} 17 | "202": 18 | description: transcoding is underway 19 | content: 20 | application/json: 21 | schema: 22 | $ref: "#/components/schemas/TranscodingProgress" 23 | "403": 24 | description: transcoded stream was not found but will not be queued for processing 25 | "404": 26 | description: stream not found 27 | parameters: 28 | - name: url 29 | in: path 30 | required: true 31 | schema: 32 | type: string 33 | - name: type 34 | in: path 35 | required: true 36 | schema: 37 | type: string 38 | enum: 39 | - dash 40 | - hls 41 | - range 42 | - name: touch 43 | in: query 44 | required: false 45 | description: > 46 | if present in the query, will only return http/transcoding status, 47 | omitting video data even if present 48 | schema: 49 | type: boolean 50 | default: false 51 | 52 | components: 53 | schemas: 54 | URL: 55 | description: LBRY content URL 56 | type: string 57 | pattern: ^lbry://.+ 58 | TranscodingProgress: 59 | type: object 60 | properties: 61 | progress: 62 | type: integer 63 | minimum: 0 64 | maximum: 100 65 | speed: 66 | type: number 67 | minimum: 0 68 | started: 69 | type: string 70 | format: date-time 71 | TranscodingTask: 72 | type: object 73 | required: 74 | - claim_id 75 | - signed_channel_address 76 | properties: 77 | url: 78 | $ref: "#/components/schemas/URL" 79 | claim_id: 80 | type: string 81 | format: byte 82 | maxLength: 41 83 | progress: 84 | $ref: "#/components/schemas/TranscodingProgress" 85 | encoding_parameters: 86 | nullable: true 87 | type: object 88 | properties: 89 | resolution: 90 | type: array 91 | enum: 92 | - 1080p 93 | - 720p 94 | - 360p 95 | created: 96 | type: string 97 | format: date-time 98 | status: 99 | type: string 100 | enum: 101 | - waiting 102 | - abandoned 103 | - encoding 104 | - done 105 | signed_channel_address: 106 | type: string 107 | format: byte 108 | description: | 109 | Signed channel address should be a channel address signed 110 | by channel's key owner. 111 | Transcoder will validate this signature against channel's public key 112 | to prevent unauthorized requests. -------------------------------------------------------------------------------- /pkg/conductor/conductor.go: -------------------------------------------------------------------------------- 1 | package conductor 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/OdyseeTeam/transcoder/library" 10 | "github.com/OdyseeTeam/transcoder/manager" 11 | "github.com/OdyseeTeam/transcoder/pkg/conductor/metrics" 12 | "github.com/OdyseeTeam/transcoder/pkg/conductor/tasks" 13 | "github.com/OdyseeTeam/transcoder/pkg/logging" 14 | 15 | "github.com/hibiken/asynq" 16 | redis "github.com/redis/go-redis/v9" 17 | ) 18 | 19 | type Conductor struct { 20 | rdb *redis.Client 21 | asynqClient *asynq.Client 22 | asynqInspector *asynq.Inspector 23 | library *library.Library 24 | incoming <-chan *manager.TranscodingRequest 25 | stopChan chan struct{} 26 | options *ConductorOptions 27 | } 28 | 29 | type ConductorOptions struct { 30 | Logger logging.KVLogger 31 | } 32 | 33 | func WithLogger(logger logging.KVLogger) func(options *ConductorOptions) { 34 | return func(options *ConductorOptions) { 35 | options.Logger = logger 36 | } 37 | } 38 | 39 | func NewConductor( 40 | redisOpts asynq.RedisConnOpt, incoming <-chan *manager.TranscodingRequest, library *library.Library, 41 | optionFuncs ...func(*ConductorOptions), 42 | ) (*Conductor, error) { 43 | options := &ConductorOptions{ 44 | Logger: logging.NoopKVLogger{}, 45 | } 46 | for _, optionFunc := range optionFuncs { 47 | optionFunc(options) 48 | } 49 | c := &Conductor{ 50 | asynqClient: asynq.NewClient(redisOpts), 51 | asynqInspector: asynq.NewInspector(redisOpts), 52 | rdb: redisOpts.MakeRedisClient().(*redis.Client), 53 | stopChan: make(chan struct{}), 54 | options: options, 55 | incoming: incoming, 56 | library: library, 57 | } 58 | return c, nil 59 | } 60 | 61 | func StartWorker(redisOpts asynq.RedisConnOpt, concurrency int, runner *tasks.EncoderRunner, log logging.Logger) { 62 | srv := asynq.NewServer( 63 | redisOpts, 64 | asynq.Config{ 65 | Concurrency: concurrency, 66 | // Optionally specify multiple queues with different priority. 67 | // Queues: map[string]int{ 68 | // "critical": 6, 69 | // "default": 3, 70 | // "low": 1, 71 | // }, 72 | Logger: log, 73 | RetryDelayFunc: runner.RetryDelay, 74 | }, 75 | ) 76 | 77 | // mux maps a type to a handler 78 | mux := asynq.NewServeMux() 79 | mux.HandleFunc(tasks.TypeTranscodingRequest, runner.Run) 80 | 81 | if err := srv.Run(mux); err != nil { 82 | log.Fatal("could not run server: %v", err) 83 | } 84 | } 85 | 86 | func (c *Conductor) Start() { 87 | go func() { 88 | t := time.NewTicker(500 * time.Millisecond) 89 | for { 90 | select { 91 | case <-t.C: 92 | err := c.PutLoad() 93 | if err != nil { 94 | c.options.Logger.Error("work cycle failed", "err", err) 95 | } 96 | case <-c.stopChan: 97 | return 98 | } 99 | } 100 | }() 101 | go func() { 102 | for { 103 | select { 104 | case <-c.stopChan: 105 | return 106 | default: 107 | err := c.ProcessNextResult() 108 | if err != nil { 109 | c.options.Logger.Error("result cycle failed", "err", err) 110 | } 111 | } 112 | } 113 | }() 114 | } 115 | 116 | func (c *Conductor) Stop() { 117 | close(c.stopChan) 118 | c.rdb.Close() 119 | c.asynqClient.Close() 120 | c.asynqInspector.Close() 121 | } 122 | 123 | func (c *Conductor) PutLoad() error { 124 | servers, err := c.asynqInspector.Servers() 125 | if err != nil { 126 | return err 127 | } 128 | spares := 0 129 | for _, s := range servers { 130 | active := len(s.ActiveWorkers) 131 | c.options.Logger.Debug("inspecting worker", "wid", s.Host, "concurrency", s.Concurrency, "active", len(s.ActiveWorkers)) 132 | metrics.Capacity.WithLabelValues(s.Host).Set(float64(s.Concurrency)) 133 | metrics.Running.WithLabelValues(s.Host).Set(float64(active)) 134 | spares += s.Concurrency - active 135 | } 136 | for i := 0; i < spares; i++ { 137 | err := c.DispatchNextTask() 138 | if err != nil { 139 | return err 140 | } else { 141 | metrics.RequestsPublished.Inc() 142 | } 143 | } 144 | return nil 145 | } 146 | 147 | func (c *Conductor) DispatchNextTask() error { 148 | req := &tasks.TranscodingRequest{} 149 | trReq := <-c.incoming 150 | req.URL = trReq.URI 151 | req.SDHash = trReq.SDHash 152 | logger := c.options.Logger.With("url", req.URL, "sd_hash", req.SDHash) 153 | t, err := tasks.NewTranscodingTask(*req) 154 | if err != nil { 155 | return fmt.Errorf("task creation error: %w", err) 156 | } 157 | info, err := c.asynqClient.Enqueue( 158 | t, 159 | asynq.Unique(24*time.Hour), 160 | asynq.Timeout(24*time.Hour), 161 | asynq.Retention(72*time.Hour), 162 | // asynq.Queue("critical"), 163 | ) 164 | if errors.Is(err, asynq.ErrDuplicateTask) { 165 | logger.Info("task deemed duplicate, skipping") 166 | return c.DispatchNextTask() 167 | } 168 | if err != nil { 169 | return fmt.Errorf("task enqueue error: %w", err) 170 | } 171 | logger.Info("enqueued task", "tid", info.ID, "queue", info.Queue) 172 | return nil 173 | } 174 | 175 | func (c *Conductor) ProcessNextResult() error { 176 | res := &tasks.TranscodingResult{} 177 | r, err := c.rdb.BLPop(context.Background(), 0, tasks.QueueTranscodingResults).Result() 178 | if err != nil { 179 | return fmt.Errorf("message reading error: %w", err) 180 | } 181 | c.options.Logger.Debug("result message received", "body", r[1]) 182 | err = res.FromString(r[1]) 183 | if err != nil { 184 | return fmt.Errorf("message parsing error: %w", err) 185 | } 186 | logger := c.options.Logger.With("url", res.Stream.URL(), "sd_hash", res.Stream.SDHash()) 187 | if err := c.library.AddRemoteStream(*res.Stream); err != nil { 188 | logger.Info("error adding remote stream", "err", err) 189 | metrics.ErrorsCount.WithLabelValues(metrics.StageLibraryAdd).Inc() 190 | return fmt.Errorf("failed to add remote stream: %w", err) 191 | } 192 | metrics.RequestsCompleted.WithLabelValues(res.Stream.Manifest.TranscodedBy).Inc() 193 | logger.Info("remote stream added", "tid", res.Stream.TID()) 194 | return nil 195 | } 196 | -------------------------------------------------------------------------------- /pkg/conductor/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | ) 8 | 9 | const ( 10 | LabelWorkerName string = "worker_name" 11 | LabelStage string = "stage" 12 | StageAccepted string = "accepted" 13 | StageDownloading string = "downloading" 14 | StageEncoding string = "encoding" 15 | StageUploading string = "uploading" 16 | StageMetadataFill string = "metadata_fill" 17 | StageLibraryAdd string = "library_add" 18 | ) 19 | 20 | var ( 21 | once = sync.Once{} 22 | 23 | RequestsPublished = prometheus.NewCounter(prometheus.CounterOpts{ 24 | Name: "requests_published", 25 | }) 26 | RequestsCompleted = prometheus.NewCounterVec(prometheus.CounterOpts{ 27 | Name: "requests_completed", 28 | }, []string{LabelWorkerName}) 29 | Capacity = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 30 | Name: "capacity", 31 | }, []string{LabelWorkerName}) 32 | Running = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 33 | Name: "running", 34 | }, []string{LabelWorkerName}) 35 | 36 | TranscodedSeconds = prometheus.NewCounter(prometheus.CounterOpts{ 37 | Name: "transcoded_seconds", 38 | }) 39 | TranscodedCount = prometheus.NewCounter(prometheus.CounterOpts{ 40 | Name: "transcoded_count", 41 | }) 42 | SpentSeconds = prometheus.NewCounterVec(prometheus.CounterOpts{ 43 | Name: "spent_seconds", 44 | }, []string{LabelStage}) 45 | StageRunning = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 46 | Name: "stage_running", 47 | }, []string{LabelStage}) 48 | 49 | InputBytes = prometheus.NewCounter(prometheus.CounterOpts{ 50 | Name: "input_bytes", 51 | }) 52 | OutputBytes = prometheus.NewCounter(prometheus.CounterOpts{ 53 | Name: "output_bytes", 54 | }) 55 | 56 | ErrorsCount = prometheus.NewCounterVec(prometheus.CounterOpts{ 57 | Name: "errors_count", 58 | }, []string{LabelStage}) 59 | ) 60 | 61 | func RegisterConductorMetrics() { 62 | once.Do(func() { 63 | prometheus.MustRegister( 64 | RequestsPublished, RequestsCompleted, Capacity, Running) 65 | }) 66 | } 67 | 68 | func RegisterWorkerMetrics() { 69 | once.Do(func() { 70 | prometheus.MustRegister( 71 | TranscodedSeconds, TranscodedCount, 72 | SpentSeconds, StageRunning, 73 | InputBytes, OutputBytes, 74 | ErrorsCount, 75 | ) 76 | }) 77 | } 78 | -------------------------------------------------------------------------------- /pkg/conductor/tasks/messages.go: -------------------------------------------------------------------------------- 1 | package tasks 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/OdyseeTeam/transcoder/library" 7 | ) 8 | 9 | type TranscodingRequest struct { 10 | URL string `json:"url"` 11 | SDHash string `json:"sd_hash"` 12 | } 13 | 14 | type TranscodingResult struct { 15 | Stream *library.Stream `json:"stream"` 16 | } 17 | 18 | func (m TranscodingRequest) String() string { 19 | out, _ := json.Marshal(m) 20 | return string(out) 21 | } 22 | 23 | func (m *TranscodingRequest) FromString(s string) error { 24 | return json.Unmarshal([]byte(s), m) 25 | } 26 | 27 | func (m TranscodingResult) String() string { 28 | out, _ := json.Marshal(m) 29 | return string(out) 30 | } 31 | 32 | func (m *TranscodingResult) FromString(s string) error { 33 | return json.Unmarshal([]byte(s), m) 34 | } 35 | -------------------------------------------------------------------------------- /pkg/dispatcher/dispatcher.go: -------------------------------------------------------------------------------- 1 | // Package dispatcher provides a convenient interface for parallelizing long tasks and keeping them at bay. 2 | 3 | package dispatcher 4 | 5 | import ( 6 | "errors" 7 | "fmt" 8 | "sync" 9 | ) 10 | 11 | const ( 12 | TaskFailed = iota 13 | TaskDone 14 | TaskActive 15 | TaskPending 16 | TaskDropped 17 | ) 18 | 19 | var ErrInvalidPayload = errors.New("invalid payload") 20 | 21 | // Worker can be any object that is capable to do `Work()`. 22 | type Worker interface { 23 | Work(Task) error 24 | } 25 | 26 | // Task represents a unit of work. 27 | // Each worker should accept it as an argument. 28 | // Example: 29 | // 30 | // func (w encoderWorker) Work(t dispatcher.Task) error { 31 | // r := t.Payload.(*resolve.TranscodingRequest) 32 | // ... 33 | type Task struct { 34 | Payload interface{} 35 | Dispatcher *Dispatcher 36 | result *Result 37 | } 38 | 39 | // Result is a result of Task execution. 40 | // TODO: setting/returning this needs to be implemented better using channels. 41 | type Result struct { 42 | Status int 43 | Error error 44 | value chan interface{} 45 | } 46 | 47 | const ( 48 | sigStop = iota 49 | sigDoAndStop 50 | ) 51 | 52 | type agent struct { 53 | id string 54 | tasks chan Task 55 | pool chan chan Task 56 | sigChan chan int 57 | worker Worker 58 | gwait *sync.WaitGroup 59 | wait *sync.WaitGroup 60 | } 61 | type Dispatcher struct { 62 | agentPool chan chan Task 63 | agents []*agent 64 | incomingTasks chan Task 65 | sigChan chan int 66 | gwait *sync.WaitGroup 67 | } 68 | 69 | // Start spawns a pool of workers. 70 | // tasksBuffer sets how many tasks should be pre-emptively put into each worker's 71 | // incoming queue. Set to 0 for prevent greedy tasks assignment (this will make `Dispatch` blocking). 72 | func Start(parallel int, worker Worker, tasksBuffer int) Dispatcher { 73 | d := Dispatcher{ 74 | agentPool: make(chan chan Task, 1000), 75 | incomingTasks: make(chan Task, tasksBuffer), 76 | sigChan: make(chan int, 1), 77 | gwait: &sync.WaitGroup{}, 78 | } 79 | 80 | for i := 0; i < parallel; i++ { 81 | a := newAgent(i, d.agentPool, worker, d.gwait) 82 | d.agents = append(d.agents, &a) 83 | a.Start() 84 | } 85 | 86 | go func() { 87 | for { 88 | select { 89 | case task := <-d.incomingTasks: 90 | DispatcherQueueLength.Dec() 91 | logger.Debugw("dispatching incoming task", "task", fmt.Sprintf("%+v", task)) 92 | agentQueue := <-d.agentPool 93 | agentQueue <- task 94 | case sig := <-d.sigChan: 95 | if sig == sigStop { 96 | for _, a := range d.agents { 97 | a.Stop() 98 | } 99 | return 100 | } 101 | } 102 | } 103 | }() 104 | 105 | return d 106 | } 107 | 108 | func newAgent(id int, agentPool chan chan Task, worker Worker, gwait *sync.WaitGroup) agent { 109 | return agent{ 110 | id: fmt.Sprintf("%T#%v", worker, id), 111 | tasks: make(chan Task), 112 | pool: agentPool, 113 | sigChan: make(chan int), 114 | worker: worker, 115 | gwait: gwait, 116 | wait: &sync.WaitGroup{}, 117 | } 118 | } 119 | 120 | func newResult() *Result { 121 | return &Result{Status: TaskPending, value: make(chan interface{})} 122 | } 123 | 124 | func (t Task) SetResult(v interface{}) { 125 | go func() { t.result.value <- v }() 126 | } 127 | 128 | func (r Result) Failed() bool { 129 | return r.Status == TaskFailed 130 | } 131 | 132 | func (r Result) Done() bool { 133 | return r.Status == TaskDone 134 | } 135 | 136 | func (r Result) Value() <-chan interface{} { 137 | return r.value 138 | } 139 | 140 | // Start starts reading from tasks channel 141 | func (a *agent) Start() { 142 | logger.Infof("spawned dispatch agent %v", a.id) 143 | a.gwait.Add(1) 144 | go func() { 145 | for { 146 | a.pool <- a.tasks 147 | 148 | select { 149 | case t := <-a.tasks: 150 | t.result.Status = TaskActive 151 | ll := logger.With("wid", a.id, "task", fmt.Sprintf("%+v", t)) 152 | ll.Debugw("agent got a task") 153 | DispatcherTasksActive.Inc() 154 | err := a.worker.Work(t) 155 | DispatcherTasksActive.Dec() 156 | if err != nil { 157 | t.result.Status = TaskFailed 158 | t.result.Error = err 159 | DispatcherTasksFailed.WithLabelValues(a.id).Inc() 160 | ll.Errorw("workload failed", "err", err) 161 | } else { 162 | DispatcherTasksDone.WithLabelValues(a.id).Inc() 163 | ll.Debugw("agent done a task") 164 | } 165 | t.result.Status = TaskDone 166 | case sig := <-a.sigChan: 167 | if sig == sigStop { 168 | close(a.tasks) 169 | a.gwait.Done() 170 | logger.Infof("stopped dispatch agent %v", a.id) 171 | return 172 | } 173 | } 174 | } 175 | }() 176 | } 177 | 178 | // Stop stops the worker invocation cycle (it will finish the current worker). 179 | func (a *agent) Stop() { 180 | a.sigChan <- sigStop 181 | } 182 | 183 | // Dispatch takes `payload`, wraps it into a `Task` and dispatches to the first available `Worker`. 184 | func (d *Dispatcher) Dispatch(payload interface{}) *Result { 185 | r := newResult() 186 | d.incomingTasks <- Task{Payload: payload, Dispatcher: d, result: r} 187 | DispatcherQueueLength.Inc() 188 | DispatcherTasksQueued.Inc() 189 | return r 190 | } 191 | 192 | func (d Dispatcher) Stop() { 193 | d.sigChan <- sigStop 194 | d.gwait.Wait() 195 | logger.Infof("all %v agents are stopped", len(d.agents)) 196 | } 197 | -------------------------------------------------------------------------------- /pkg/dispatcher/dispatcher_test.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "runtime" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/OdyseeTeam/transcoder/pkg/logging" 10 | 11 | "github.com/Pallinder/go-randomdata" 12 | "github.com/stretchr/testify/suite" 13 | "go.uber.org/goleak" 14 | ) 15 | 16 | type DispatcherSuite struct { 17 | suite.Suite 18 | } 19 | 20 | type testWorker struct { 21 | sync.Mutex 22 | seenTasks []string 23 | } 24 | 25 | func (w *testWorker) Work(t Task) error { 26 | w.Lock() 27 | defer w.Unlock() 28 | pl := t.Payload.(struct{ URL, SDHash string }) 29 | w.seenTasks = append(w.seenTasks, pl.URL+pl.SDHash) 30 | t.SetResult(pl.URL + pl.SDHash) 31 | return nil 32 | } 33 | 34 | func TestDispatcherSuite(t *testing.T) { 35 | suite.Run(t, new(DispatcherSuite)) 36 | } 37 | 38 | func (s *DispatcherSuite) SetupTest() { 39 | } 40 | 41 | func (s *DispatcherSuite) TestDispatcher() { 42 | defer goleak.VerifyNone(s.T()) 43 | 44 | worker := testWorker{seenTasks: []string{}} 45 | d := Start(20, &worker, 1000) 46 | 47 | SetLogger(logging.Create("dispatcher", logging.Prod)) 48 | results := []*Result{} 49 | 50 | for range [500]bool{} { 51 | r := d.Dispatch(struct{ URL, SDHash string }{URL: randomdata.Alphanumeric(25), SDHash: randomdata.Alphanumeric(96)}) 52 | results = append(results, r) 53 | } 54 | 55 | time.Sleep(3000 * time.Millisecond) 56 | 57 | for _, r := range results { 58 | v := <-r.Value() 59 | s.Require().Equal(25+96, len(v.(string))) 60 | s.Require().True(r.Done()) 61 | } 62 | s.Equal(500, len(worker.seenTasks)) 63 | 64 | d.Stop() 65 | } 66 | 67 | func (s *DispatcherSuite) TestBlockingDispatch() { 68 | defer goleak.VerifyNone(s.T()) 69 | 70 | worker := testWorker{seenTasks: []string{}} 71 | d := Start(5, &worker, 0) 72 | 73 | results := []*Result{} 74 | 75 | for range [20]bool{} { 76 | r := d.Dispatch(struct{ URL, SDHash string }{URL: randomdata.Alphanumeric(25), SDHash: randomdata.Alphanumeric(96)}) 77 | results = append(results, r) 78 | } 79 | 80 | for _, r := range results { 81 | v := <-r.Value() 82 | s.Require().Equal(25+96, len(v.(string))) 83 | s.Require().True(r.Done()) 84 | } 85 | s.Equal(20, len(worker.seenTasks)) 86 | 87 | d.Stop() 88 | } 89 | 90 | func (s *DispatcherSuite) TestDispatcherLeaks() { 91 | worker := testWorker{seenTasks: []string{}} 92 | results := [10000]*Result{} 93 | d := Start(20, &worker, 1000) 94 | grCount := runtime.NumGoroutine() 95 | 96 | SetLogger(logging.Create("dispatcher", logging.Prod)) 97 | 98 | for i := 0; i < 10000; i++ { 99 | r := d.Dispatch(struct{ URL, SDHash string }{URL: randomdata.SillyName(), SDHash: randomdata.Alphanumeric(64)}) 100 | results[i] = r 101 | } 102 | 103 | time.Sleep(500 * time.Millisecond) 104 | s.Equal(grCount+10000, runtime.NumGoroutine()) 105 | 106 | for _, r := range results { 107 | <-r.Value() 108 | } 109 | time.Sleep(3000 * time.Millisecond) 110 | s.Equal(grCount, runtime.NumGoroutine()) 111 | 112 | d.Stop() 113 | } 114 | -------------------------------------------------------------------------------- /pkg/dispatcher/logger.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("dispatcher", logging.Dev) 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /pkg/dispatcher/metrics.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | ) 8 | 9 | var ( 10 | once = sync.Once{} 11 | 12 | DispatcherQueueLength = prometheus.NewGauge(prometheus.GaugeOpts{ 13 | Name: "dispatcher_queue_length", 14 | }) 15 | DispatcherTasksActive = prometheus.NewGauge(prometheus.GaugeOpts{ 16 | Name: "dispatcher_tasks_active", 17 | }) 18 | DispatcherTasksQueued = prometheus.NewCounter(prometheus.CounterOpts{ 19 | Name: "dispatcher_tasks_queued", 20 | }) 21 | DispatcherTasksDropped = prometheus.NewCounter(prometheus.CounterOpts{ 22 | Name: "dispatcher_tasks_dropped", 23 | }) 24 | DispatcherTasksDone = prometheus.NewCounterVec(prometheus.CounterOpts{ 25 | Name: "dispatcher_tasks_done", 26 | }, []string{"agent_id"}) 27 | DispatcherTasksFailed = prometheus.NewCounterVec(prometheus.CounterOpts{ 28 | Name: "dispatcher_tasks_failed", 29 | }, []string{"agent_id"}) 30 | ) 31 | 32 | func RegisterMetrics() { 33 | once.Do(func() { 34 | prometheus.MustRegister( 35 | DispatcherQueueLength, DispatcherTasksActive, DispatcherTasksQueued, 36 | DispatcherTasksDropped, DispatcherTasksDone, DispatcherTasksFailed, 37 | ) 38 | }) 39 | } 40 | -------------------------------------------------------------------------------- /pkg/dispatcher/wait.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | func WaitUntilTrue(ctx context.Context, between time.Duration, f func() bool) error { 10 | for { 11 | select { 12 | case <-ctx.Done(): 13 | return fmt.Errorf("timed out") 14 | default: 15 | if f() { 16 | return nil 17 | } 18 | time.Sleep(between) 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /pkg/dispatcher/wait_test.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "go.uber.org/goleak" 10 | ) 11 | 12 | func TestWaitUntilTrue(t *testing.T) { 13 | defer goleak.VerifyNone(t) 14 | var i, x int 15 | 16 | ctx, cancel1 := context.WithTimeout(context.Background(), 100*time.Millisecond) 17 | err := WaitUntilTrue(ctx, 10*time.Millisecond, func() bool { 18 | if i > 5 { 19 | return true 20 | } 21 | i++ 22 | return false 23 | }) 24 | assert.NoError(t, err) 25 | cancel1() 26 | 27 | ctx, cancel2 := context.WithTimeout(context.Background(), 50*time.Millisecond) 28 | err = WaitUntilTrue(ctx, 10*time.Millisecond, func() bool { 29 | if x > 5 { 30 | return true 31 | } 32 | x++ 33 | return false 34 | }) 35 | cancel2() 36 | 37 | assert.EqualError(t, err, "timed out") 38 | } 39 | -------------------------------------------------------------------------------- /pkg/logging/logging.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "go.uber.org/zap" 5 | "go.uber.org/zap/zapcore" 6 | "logur.dev/logur" 7 | ) 8 | 9 | var ( 10 | EnvDebug = "debug" 11 | EnvProd = "prod" 12 | ) 13 | 14 | var Prod = zap.NewProductionConfig() 15 | var Dev = zap.NewDevelopmentConfig() 16 | 17 | func init() { 18 | Prod.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder 19 | zap.ReplaceGlobals(Create("", Dev).Desugar()) 20 | } 21 | 22 | func Create(name string, cfg zap.Config) *zap.SugaredLogger { 23 | l, _ := cfg.Build() 24 | return l.Named(name).Sugar() 25 | } 26 | 27 | type Logger interface { 28 | Debug(args ...interface{}) 29 | Info(args ...interface{}) 30 | Warn(args ...interface{}) 31 | Error(args ...interface{}) 32 | Fatal(args ...interface{}) 33 | With(keyvals ...interface{}) Logger 34 | } 35 | 36 | type KVLogger interface { 37 | Debug(msg string, keyvals ...interface{}) 38 | Info(msg string, keyvals ...interface{}) 39 | Warn(msg string, keyvals ...interface{}) 40 | Error(msg string, keyvals ...interface{}) 41 | Fatal(msg string, keyvals ...interface{}) 42 | With(keyvals ...interface{}) KVLogger 43 | } 44 | 45 | type NoopKVLogger struct { 46 | logur.NoopKVLogger 47 | } 48 | 49 | type NoopLogger struct{} 50 | 51 | func (NoopLogger) Debug(args ...interface{}) {} 52 | func (NoopLogger) Info(args ...interface{}) {} 53 | func (NoopLogger) Warn(args ...interface{}) {} 54 | func (NoopLogger) Error(args ...interface{}) {} 55 | func (NoopLogger) Fatal(args ...interface{}) {} 56 | 57 | func (l NoopLogger) With(args ...interface{}) Logger { 58 | return l 59 | } 60 | 61 | func (l NoopKVLogger) Fatal(msg string, keyvals ...interface{}) {} 62 | 63 | func (l NoopKVLogger) With(keyvals ...interface{}) KVLogger { 64 | return l 65 | } 66 | 67 | func AddLogRef(l KVLogger, sdHash string) KVLogger { 68 | if len(sdHash) >= 8 { 69 | return l.With("ref", sdHash[:8]) 70 | } 71 | return l.With("ref?", sdHash) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/logging/zapadapter/adapter_zap.go: -------------------------------------------------------------------------------- 1 | package zapadapter 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/OdyseeTeam/transcoder/pkg/logging" 7 | 8 | "go.uber.org/zap" 9 | "go.uber.org/zap/zapcore" 10 | "logur.dev/logur" 11 | ) 12 | 13 | // logger is a Logur adapter for Uber's Zap. 14 | type logger struct { 15 | logger *zap.SugaredLogger 16 | core zapcore.Core 17 | } 18 | 19 | // kvLogger is a Logur adapter for Uber's Zap. 20 | type kvLogger struct { 21 | logger *zap.SugaredLogger 22 | core zapcore.Core 23 | } 24 | 25 | // NewKV returns a new Logur kvLogger. 26 | // If kvLogger is nil, a default instance is created. 27 | func New(zlogger *zap.Logger) *logger { 28 | if zlogger == nil { 29 | zlogger = zap.L() 30 | } 31 | zlogger = zlogger.WithOptions(zap.AddCallerSkip(1)) 32 | 33 | return &logger{ 34 | logger: zlogger.Sugar(), 35 | core: zlogger.Core(), 36 | } 37 | } 38 | 39 | // NewKV returns a new Logur kvLogger. 40 | // If kvLogger is nil, a default instance is created. 41 | func NewKV(logger *zap.Logger) *kvLogger { 42 | if logger == nil { 43 | logger = zap.L() 44 | } 45 | logger = logger.WithOptions(zap.AddCallerSkip(1)) 46 | 47 | return &kvLogger{ 48 | logger: logger.Sugar(), 49 | core: logger.Core(), 50 | } 51 | } 52 | 53 | // Trace implements the Logur logger interface. 54 | func (l *logger) Trace(args ...interface{}) { 55 | // Fall back to Debug 56 | l.logger.Debug(args...) 57 | } 58 | 59 | // Debug implements the Logur logger interface. 60 | func (l *logger) Debug(args ...interface{}) { 61 | if !l.core.Enabled(zap.DebugLevel) { 62 | return 63 | } 64 | l.logger.Debug(args...) 65 | } 66 | 67 | // Info implements the Logur logger interface. 68 | func (l *logger) Info(args ...interface{}) { 69 | if !l.core.Enabled(zap.InfoLevel) { 70 | return 71 | } 72 | l.logger.Info(args...) 73 | } 74 | 75 | // Warn implements the Logur logger interface. 76 | func (l *logger) Warn(args ...interface{}) { 77 | if !l.core.Enabled(zap.WarnLevel) { 78 | return 79 | } 80 | l.logger.Warn(args...) 81 | } 82 | 83 | // Error implements the Logur logger interface. 84 | func (l *logger) Error(args ...interface{}) { 85 | if !l.core.Enabled(zap.ErrorLevel) { 86 | return 87 | } 88 | l.logger.Error(args...) 89 | } 90 | 91 | // Error implements the Logur logger interface. 92 | func (l *logger) Fatal(args ...interface{}) { 93 | l.logger.Fatal(args...) 94 | } 95 | 96 | // ... 97 | func (l *logger) With(keyvals ...interface{}) logging.Logger { 98 | newLogger := l.logger.With(keyvals...) 99 | return &logger{ 100 | logger: newLogger, 101 | core: newLogger.Desugar().Core(), 102 | } 103 | } 104 | 105 | // LevelEnabled implements the Logur LevelEnabler interface. 106 | func (l *logger) LevelEnabled(level logur.Level) bool { 107 | switch level { 108 | case logur.Trace: 109 | return l.core.Enabled(zap.DebugLevel) 110 | case logur.Debug: 111 | return l.core.Enabled(zap.DebugLevel) 112 | case logur.Info: 113 | return l.core.Enabled(zap.InfoLevel) 114 | case logur.Warn: 115 | return l.core.Enabled(zap.WarnLevel) 116 | case logur.Error: 117 | return l.core.Enabled(zap.ErrorLevel) 118 | } 119 | 120 | return true 121 | } 122 | 123 | // Trace implements the Logur kvLogger interface. 124 | func (l *kvLogger) Trace(msg string, keyvals ...interface{}) { 125 | // Fall back to Debug 126 | l.logger.Debugw(msg, keyvals...) 127 | } 128 | 129 | // Debug implements the Logur kvLogger interface. 130 | func (l *kvLogger) Debug(msg string, keyvals ...interface{}) { 131 | if !l.core.Enabled(zap.DebugLevel) { 132 | return 133 | } 134 | l.logger.Debugw(msg, keyvals...) 135 | } 136 | 137 | // Info implements the Logur kvLogger interface. 138 | func (l *kvLogger) Info(msg string, keyvals ...interface{}) { 139 | if !l.core.Enabled(zap.InfoLevel) { 140 | return 141 | } 142 | l.logger.Infow(msg, keyvals...) 143 | } 144 | 145 | // Warn implements the Logur kvLogger interface. 146 | func (l *kvLogger) Warn(msg string, keyvals ...interface{}) { 147 | if !l.core.Enabled(zap.WarnLevel) { 148 | return 149 | } 150 | l.logger.Warnw(msg, keyvals...) 151 | } 152 | 153 | // Error implements the Logur kvLogger interface. 154 | func (l *kvLogger) Error(msg string, keyvals ...interface{}) { 155 | if !l.core.Enabled(zap.ErrorLevel) { 156 | return 157 | } 158 | l.logger.Errorw(msg, keyvals...) 159 | } 160 | 161 | // Error implements the Logur kvLogger interface. 162 | func (l *kvLogger) Fatal(msg string, keyvals ...interface{}) { 163 | l.logger.Fatalw(msg, keyvals...) 164 | } 165 | 166 | func (l *kvLogger) TraceContext(_ context.Context, msg string, keyvals ...interface{}) { 167 | l.Trace(msg, keyvals...) 168 | } 169 | 170 | func (l *kvLogger) DebugContext(_ context.Context, msg string, keyvals ...interface{}) { 171 | l.Debug(msg, keyvals...) 172 | } 173 | 174 | func (l *kvLogger) InfoContext(_ context.Context, msg string, keyvals ...interface{}) { 175 | l.Info(msg, keyvals...) 176 | } 177 | 178 | func (l *kvLogger) WarnContext(_ context.Context, msg string, keyvals ...interface{}) { 179 | l.Warn(msg, keyvals...) 180 | } 181 | 182 | func (l *kvLogger) ErrorContext(_ context.Context, msg string, keyvals ...interface{}) { 183 | l.Error(msg, keyvals...) 184 | } 185 | 186 | // ... 187 | func (l *kvLogger) With(keyvals ...interface{}) logging.KVLogger { 188 | newLogger := l.logger.With(keyvals...) 189 | return &kvLogger{ 190 | logger: newLogger, 191 | core: newLogger.Desugar().Core(), 192 | } 193 | } 194 | 195 | // LevelEnabled implements the Logur LevelEnabler interface. 196 | func (l *kvLogger) LevelEnabled(level logur.Level) bool { 197 | switch level { 198 | case logur.Trace: 199 | return l.core.Enabled(zap.DebugLevel) 200 | case logur.Debug: 201 | return l.core.Enabled(zap.DebugLevel) 202 | case logur.Info: 203 | return l.core.Enabled(zap.InfoLevel) 204 | case logur.Warn: 205 | return l.core.Enabled(zap.WarnLevel) 206 | case logur.Error: 207 | return l.core.Enabled(zap.ErrorLevel) 208 | } 209 | 210 | return true 211 | } 212 | -------------------------------------------------------------------------------- /pkg/logging/zapadapter/test_adapter_zap.go: -------------------------------------------------------------------------------- 1 | package zapadapter 2 | 3 | import ( 4 | "testing" 5 | 6 | "logur.dev/logur" 7 | "logur.dev/logur/logtesting" 8 | ) 9 | 10 | func TestKVLogger(t *testing.T) { 11 | logger := &logur.TestLogger{} 12 | log := NewKV(nil) 13 | log.Info("hello log", "abc", 123, "foo", "bar") 14 | 15 | logEvent := logur.LogEvent{ 16 | Line: "hello log", 17 | Level: logur.Info, 18 | Fields: map[string]interface{}{ 19 | "foo": "bar", 20 | "abc": "123", 21 | }, 22 | } 23 | 24 | logtesting.AssertLogEventsEqual(t, logEvent, *(logger.LastEvent())) 25 | } 26 | -------------------------------------------------------------------------------- /pkg/mfr/logger.go: -------------------------------------------------------------------------------- 1 | package mfr 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("mfr", logging.Dev) 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /pkg/mfr/mfr.go: -------------------------------------------------------------------------------- 1 | package mfr 2 | 3 | import ( 4 | "container/list" 5 | "fmt" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | const ( 11 | StatusNone = iota 12 | StatusQueued // waiting to get to the top 13 | StatusActive // being processed 14 | StatusDone // done processing 15 | ) 16 | 17 | // Item is a queue storage unit. 18 | type Item struct { 19 | key string 20 | Value interface{} 21 | queue *Queue 22 | posParent *list.Element 23 | created time.Time 24 | } 25 | 26 | type Position struct { 27 | entries map[*Item]int 28 | freq uint 29 | } 30 | 31 | // Queue stores a priority queue with Items with most Hits being at the top. 32 | type Queue struct { 33 | entries map[string]*Item 34 | positions *list.List 35 | size uint 36 | hits uint 37 | mu sync.RWMutex 38 | } 39 | 40 | var now = time.Now 41 | 42 | // NewQueue initializes an empty priority queue suitable for registering Hits right away. 43 | func NewQueue() *Queue { 44 | queue := &Queue{ 45 | positions: list.New(), 46 | entries: map[string]*Item{}, 47 | mu: sync.RWMutex{}, 48 | } 49 | queue.positions.PushFront(&Position{freq: 1, entries: map[*Item]int{}}) 50 | return queue 51 | } 52 | 53 | // Hit puts Item stoStatusActive at `key` higher up in the queue, or inserts it to the bottom of the pile if the item is not present. 54 | func (q *Queue) Hit(key string, value interface{}) { 55 | q.mu.Lock() 56 | defer q.mu.Unlock() 57 | if item, ok := q.entries[key]; ok { 58 | q.increment(item) 59 | logger.Debugw("increment", "key", key, "pointer", fmt.Sprintf("%p", value), "hits", item.Hits()) 60 | } else { 61 | logger.Debugw("insert", "key", key, "pointer", fmt.Sprintf("%p", value)) 62 | q.insert(key, value) 63 | } 64 | } 65 | 66 | // Get retrieves item by key along with its processing status. 67 | func (q *Queue) Get(key string) (*Item, int) { 68 | q.mu.Lock() 69 | defer q.mu.Unlock() 70 | if e, ok := q.entries[key]; ok { 71 | 72 | return e, e.posParent.Value.(*Position).entries[e] 73 | } 74 | return nil, StatusNone 75 | } 76 | 77 | // Peek returns the top-most item of the queue without marking it as being processed. 78 | func (q *Queue) Peek() *Item { 79 | return q.pop(false, 0) 80 | } 81 | 82 | // Pop returns the top-most item of the queue and marks it as being processed so consecutive calls will return subsequent items. 83 | func (q *Queue) Pop() *Item { 84 | return q.pop(true, 0) 85 | } 86 | 87 | // MinPeek returns the top-most item of the queue if it has a required minimum of hits, without marking it as being processed. 88 | func (q *Queue) MinPeek(minHits uint) *Item { 89 | return q.pop(false, minHits) 90 | } 91 | 92 | // MinPop returns the top-most item of the queue if it has a required minimum of hits 93 | // and marks it as being processed so consecutive calls will return subsequent items. 94 | func (q *Queue) MinPop(minHits uint) *Item { 95 | return q.pop(true, minHits) 96 | } 97 | 98 | func (q *Queue) pop(lockItem bool, minHits uint) *Item { 99 | var ( 100 | i, it *Item 101 | status int 102 | ) 103 | top := q.positions.Back() 104 | 105 | for top != nil && i == nil { 106 | pos := top.Value.(*Position) 107 | q.mu.Lock() 108 | for it, status = range pos.entries { 109 | if it.Hits() < minHits { 110 | q.mu.Unlock() 111 | return nil 112 | } 113 | if status == StatusActive || status == StatusDone { 114 | continue 115 | } 116 | i = it 117 | logger.Debugw("pop candidate", "key", i.key, "status", pos.entries[i], "neighbors", fmt.Sprintf("%v", pos.entries), "q", fmt.Sprintf("%p", q)) 118 | if lockItem { 119 | pos.entries[i] = StatusActive 120 | } 121 | break 122 | } 123 | q.mu.Unlock() 124 | top = top.Prev() 125 | } 126 | if i != nil { 127 | logger.Debugw("pop", "key", i.key, "pointer", fmt.Sprintf("%p", i.Value), "hits", i.Hits()) 128 | } 129 | return i 130 | } 131 | 132 | // Release returns the item back into the queue for future possibility to be `Pop`ped again. 133 | func (q *Queue) Release(key string) { 134 | q.setStatus(key, StatusQueued) 135 | } 136 | 137 | // Done marks the queue item as fully processed. 138 | func (q *Queue) Done(key string) { 139 | q.setStatus(key, StatusDone) 140 | } 141 | 142 | func (q *Queue) Hits() uint { 143 | return q.hits 144 | } 145 | 146 | func (q *Queue) setStatus(key string, status int) { 147 | item := q.entries[key] 148 | if item == nil { 149 | return 150 | } 151 | q.mu.Lock() 152 | item.posParent.Value.(*Position).entries[item] = status 153 | q.mu.Unlock() 154 | } 155 | 156 | func (q *Queue) insert(key string, value interface{}) { 157 | posParent := q.positions.Front() 158 | item := &Item{ 159 | key: key, 160 | Value: value, 161 | queue: q, 162 | posParent: posParent, 163 | created: now(), 164 | } 165 | posParent.Value.(*Position).entries[item] = StatusQueued 166 | q.entries[key] = item 167 | q.size++ 168 | q.hits++ 169 | } 170 | 171 | func (q *Queue) increment(item *Item) { 172 | pos := item.posParent.Value.(*Position) 173 | status := pos.entries[item] 174 | nextFreq := pos.freq + 1 175 | delete(pos.entries, item) 176 | 177 | nextPosParent := item.posParent.Next() 178 | if nextPosParent == nil || nextPosParent.Value.(*Position).freq > nextFreq { 179 | nextPosParent = q.positions.InsertAfter(&Position{freq: nextFreq, entries: map[*Item]int{}}, item.posParent) 180 | } 181 | nextPosParent.Value.(*Position).entries[item] = status 182 | item.posParent = nextPosParent 183 | q.hits++ 184 | } 185 | 186 | func (q *Queue) Size() uint { 187 | return q.size 188 | } 189 | 190 | // Hits returns the number of hits for the item. 191 | func (i *Item) Hits() uint { 192 | return i.posParent.Value.(*Position).freq 193 | } 194 | 195 | // Release returns the item back into the queue for future possibility to be `Pop`ped again (it won't stop registering hits). 196 | func (i *Item) Release() { 197 | logger.Debugw("release", "key", i.key) 198 | i.queue.Release(i.key) 199 | } 200 | 201 | // Done marks the item as fully processed (it won't stop registering hits). 202 | func (i *Item) Done() { 203 | logger.Debugw("done", "key", i.key) 204 | i.queue.Done(i.key) 205 | } 206 | 207 | // Age returns how many seconds have passed since the item was created. 208 | func (i *Item) Age() int { 209 | return int(time.Since(i.created).Seconds()) 210 | } 211 | -------------------------------------------------------------------------------- /pkg/mfr/mfr_test.go: -------------------------------------------------------------------------------- 1 | package mfr 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/OdyseeTeam/transcoder/pkg/logging" 10 | 11 | "github.com/stretchr/testify/suite" 12 | ) 13 | 14 | type mfrSuite struct { 15 | suite.Suite 16 | popClaim1, 17 | popClaim2, 18 | popClaim3 *claim 19 | q *Queue 20 | } 21 | 22 | type claim struct { 23 | sdHash, url string 24 | } 25 | 26 | func TestMFRSuite(t *testing.T) { 27 | suite.Run(t, new(mfrSuite)) 28 | } 29 | 30 | func (s *mfrSuite) SetupTest() { 31 | now = func() time.Time { return time.Now().Add(-30 * time.Second) } 32 | 33 | SetLogger(logging.Create("mfr", logging.Prod)) 34 | 35 | q := NewQueue() 36 | 37 | popClaim1 := &claim{randomString(25), randomString(96)} 38 | popClaim2 := &claim{randomString(25), randomString(96)} 39 | popClaim3 := &claim{randomString(25), randomString(96)} 40 | 41 | wg := &sync.WaitGroup{} 42 | wg.Add(4) 43 | go func() { 44 | defer wg.Done() 45 | for range [10000]byte{} { 46 | q.Hit(popClaim1.url, popClaim1) 47 | q.Peek() 48 | } 49 | }() 50 | go func() { 51 | defer wg.Done() 52 | for range [9999]byte{} { 53 | q.Hit(popClaim2.url, popClaim2) 54 | q.Peek() 55 | } 56 | }() 57 | go func() { 58 | defer wg.Done() 59 | for range [9000]byte{} { 60 | q.Hit(popClaim3.url, popClaim3) 61 | q.Peek() 62 | } 63 | }() 64 | go func() { 65 | defer wg.Done() 66 | for range [50000]byte{} { 67 | c := &claim{randomString(25), randomString(96)} 68 | q.Peek() 69 | q.Hit(c.url, c) 70 | } 71 | }() 72 | wg.Wait() 73 | s.popClaim1 = popClaim1 74 | s.popClaim2 = popClaim2 75 | s.popClaim3 = popClaim3 76 | s.q = q 77 | } 78 | 79 | func (s *mfrSuite) TestPop() { 80 | item1 := s.q.Pop() 81 | s.Require().NotNil(item1) 82 | s.Equal(s.popClaim1.url, item1.key) 83 | s.Equal(s.popClaim1, item1.Value.(*claim)) 84 | s.EqualValues(10000, item1.Hits()) 85 | 86 | s.q.Hit(item1.key, item1) 87 | _, status := s.q.Get(item1.key) 88 | s.Equal(StatusActive, status) 89 | 90 | item2 := s.q.Pop() 91 | s.Require().NotNil(item2) 92 | s.Equal(s.popClaim2.url, item2.key) 93 | s.Equal(s.popClaim2, item2.Value.(*claim)) 94 | s.EqualValues(9999, item2.Hits()) 95 | 96 | item3 := s.q.Pop() 97 | s.Require().NotNil(item3) 98 | s.Equal(s.popClaim3.url, item3.key) 99 | s.Equal(s.popClaim3, item3.Value.(*claim)) 100 | s.EqualValues(9000, item3.Hits()) 101 | 102 | s.EqualValues(79000, s.q.hits) 103 | } 104 | 105 | func (s *mfrSuite) TestRelease() { 106 | item := s.q.Pop() 107 | s.Require().NotNil(item) 108 | s.q.Release(item.key) 109 | 110 | item2 := s.q.Pop() 111 | s.Equal(item, item2) 112 | } 113 | 114 | func (s *mfrSuite) TestDone() { 115 | item := s.q.Pop() 116 | s.Require().NotNil(item) 117 | 118 | s.q.Done(item.key) 119 | item2 := s.q.Pop() 120 | s.NotEqual(item, item2) 121 | } 122 | 123 | func (s *mfrSuite) TestGet() { 124 | item, status := s.q.Get("none") 125 | s.Nil(item) 126 | s.Equal(StatusNone, status) 127 | 128 | item, status = s.q.Get(s.popClaim1.url) 129 | s.Equal(s.popClaim1, item.Value.(*claim)) 130 | s.Equal(StatusQueued, status) 131 | 132 | item = s.q.Pop() 133 | s.Equal(s.popClaim1, item.Value.(*claim)) 134 | 135 | item, status = s.q.Get(s.popClaim1.url) 136 | s.Equal(s.popClaim1, item.Value.(*claim)) 137 | s.Equal(StatusActive, status) 138 | 139 | s.q.Release(s.popClaim1.url) 140 | item, status = s.q.Get(s.popClaim1.url) 141 | s.Equal(s.popClaim1, item.Value.(*claim)) 142 | s.Equal(StatusQueued, status) 143 | 144 | s.q.Done(item.key) 145 | item, status = s.q.Get(s.popClaim1.url) 146 | s.Equal(s.popClaim1, item.Value.(*claim)) 147 | s.Equal(StatusDone, status) 148 | 149 | s.GreaterOrEqual(item.Age(), 30) 150 | } 151 | 152 | func randomString(n int) string { 153 | var letter = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") 154 | 155 | b := make([]rune, n) 156 | for i := range b { 157 | b[i] = letter[rand.Intn(len(letter))] // #nosec G404 158 | } 159 | return string(b) 160 | } 161 | -------------------------------------------------------------------------------- /pkg/migrator/db.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "database/sql" 5 | "embed" 6 | "fmt" 7 | ) 8 | 9 | type DBConfig struct { 10 | appName, dsn, dbName, connOpts string 11 | migrate bool 12 | } 13 | 14 | func DefaultDBConfig() *DBConfig { 15 | return &DBConfig{ 16 | dsn: "postgres://postgres:odyseeteam@localhost", 17 | dbName: "postgres", 18 | connOpts: "sslmode=disable", 19 | migrate: true, 20 | } 21 | } 22 | 23 | func (c *DBConfig) DSN(dsn string) *DBConfig { 24 | c.dsn = dsn 25 | return c 26 | } 27 | 28 | func (c *DBConfig) Name(dbName string) *DBConfig { 29 | c.dbName = dbName 30 | return c 31 | } 32 | 33 | func (c *DBConfig) AppName(appName string) *DBConfig { 34 | c.appName = appName 35 | return c 36 | } 37 | 38 | func (c *DBConfig) ConnOpts(connOpts string) *DBConfig { 39 | c.connOpts = connOpts 40 | return c 41 | } 42 | 43 | func (c *DBConfig) NoMigration() *DBConfig { 44 | c.migrate = false 45 | return c 46 | } 47 | 48 | func (c *DBConfig) GetFullDSN() string { 49 | return fmt.Sprintf("%s/%s?%s", c.dsn, c.dbName, c.connOpts) 50 | } 51 | 52 | func ConnectDB(config *DBConfig, migrationsFS embed.FS) (*sql.DB, error) { 53 | var err error 54 | db, err := sql.Open("postgres", config.GetFullDSN()) 55 | if err != nil { 56 | return nil, err 57 | } 58 | if config.migrate { 59 | n, err := NewMigrator(db, migrationsFS, config.appName).MigrateUp() 60 | if err != nil { 61 | return nil, err 62 | } 63 | logger.Infow("migrations applied", "count", n) 64 | } 65 | 66 | return db, nil 67 | } 68 | -------------------------------------------------------------------------------- /pkg/migrator/logger.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("migrator", logging.Dev) 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /pkg/migrator/migrator.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "database/sql" 5 | "embed" 6 | "fmt" 7 | "strings" 8 | 9 | "github.com/lib/pq" 10 | migrate "github.com/rubenv/sql-migrate" 11 | ) 12 | 13 | const dialect = "postgres" 14 | 15 | type CLI struct { 16 | MigrateUp struct { 17 | } `cmd:"" help:"Apply database migrations"` 18 | MigrateDown struct { 19 | Max int `optional:"" help:"Max number of migrations to unapply" default:"0"` 20 | } `cmd:"" help:"Unapply database migrations"` 21 | } 22 | 23 | type Migrator struct { 24 | db *sql.DB 25 | ms migrate.MigrationSet 26 | source *migrate.EmbedFileSystemMigrationSource 27 | } 28 | 29 | func NewMigrator(db *sql.DB, fs embed.FS, migrTableName string) Migrator { 30 | return Migrator{ 31 | db, 32 | migrate.MigrationSet{TableName: migrTableName + "_gorp_migrations"}, 33 | &migrate.EmbedFileSystemMigrationSource{ 34 | FileSystem: fs, 35 | Root: "migrations", 36 | }, 37 | } 38 | } 39 | 40 | // MigrateUp executes forward migrations. 41 | func (m Migrator) MigrateUp() (int, error) { 42 | return m.ms.Exec(m.db, dialect, m.source, migrate.Up) 43 | } 44 | 45 | // MigrateDown undoes a specified number of migrations. 46 | func (m Migrator) MigrateDown(max int) (int, error) { 47 | return m.ms.ExecMax(m.db, dialect, m.source, migrate.Down, max) 48 | } 49 | 50 | // Truncate purges records from the requested tables. 51 | func (m Migrator) Truncate(tables []string) error { 52 | _, err := m.db.Exec(fmt.Sprintf("TRUNCATE %s CASCADE;", strings.Join(tables, ", "))) 53 | return err 54 | } 55 | 56 | // CreateDB creates the requested database. 57 | func (m Migrator) CreateDB(dbName string) error { 58 | // fmt.Sprintf is used instead of query placeholders because postgres does not 59 | // handle them in schema-modifying queries. 60 | _, err := m.db.Exec(fmt.Sprintf("create database %s;", pq.QuoteIdentifier(dbName))) 61 | // c.logger.WithFields(logrus.Fields{"db_name": dbName}).Info("created the database") 62 | return err 63 | } 64 | 65 | // DropDB drops the requested database. 66 | func (m Migrator) DropDB(dbName string) error { 67 | _, err := m.db.Exec(fmt.Sprintf("drop database %s;", pq.QuoteIdentifier(dbName))) 68 | return err 69 | } 70 | -------------------------------------------------------------------------------- /pkg/migrator/testing.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "database/sql" 5 | "embed" 6 | 7 | "github.com/Pallinder/go-randomdata" 8 | ) 9 | 10 | type TestDBCleanup func() error 11 | 12 | func CreateTestDB(mfs embed.FS) (*sql.DB, TestDBCleanup, error) { 13 | db, err := ConnectDB(DefaultDBConfig().NoMigration(), mfs) 14 | tdbn := "test-db-" + randomdata.Alphanumeric(12) 15 | if err != nil { 16 | return nil, nil, err 17 | } 18 | m := NewMigrator(db, mfs, "") 19 | m.CreateDB(tdbn) 20 | 21 | tdb, err := ConnectDB(DefaultDBConfig().Name(tdbn), mfs) 22 | if err != nil { 23 | return nil, nil, err 24 | } 25 | tm := NewMigrator(tdb, mfs, "") 26 | _, err = tm.MigrateUp() 27 | if err != nil { 28 | return nil, nil, err 29 | } 30 | return tdb, func() error { 31 | tdb.Close() 32 | err := m.DropDB(tdbn) 33 | db.Close() 34 | if err != nil { 35 | return err 36 | } 37 | return nil 38 | }, nil 39 | } 40 | -------------------------------------------------------------------------------- /pkg/resolve/errors.go: -------------------------------------------------------------------------------- 1 | package resolve 2 | 3 | import "errors" 4 | 5 | var ( 6 | ErrTranscodingUnderway = errors.New("transcoding is in progress") 7 | ErrTranscodingQueued = errors.New("transcoding queued") 8 | ErrTranscodingForbidden = errors.New("transcoding is disabled for this channel") 9 | ErrChannelNotEnabled = errors.New("transcoding is not enabled for this channel") 10 | 11 | ErrClaimNotFound = errors.New("could not resolve stream URI") 12 | ErrNoSigningChannel = errors.New("no signing channel for stream") 13 | ) 14 | -------------------------------------------------------------------------------- /pkg/resolve/logger.go: -------------------------------------------------------------------------------- 1 | package resolve 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("resolve", logging.Dev) 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /pkg/resolve/resolve.go: -------------------------------------------------------------------------------- 1 | package resolve 2 | 3 | import ( 4 | "encoding/hex" 5 | "errors" 6 | "fmt" 7 | "math" 8 | "os" 9 | "path" 10 | "regexp" 11 | "strconv" 12 | "strings" 13 | "time" 14 | 15 | "github.com/OdyseeTeam/transcoder/pkg/timer" 16 | 17 | ljsonrpc "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 18 | 19 | "github.com/nikooo777/lbry-blobs-downloader/downloader" 20 | "github.com/nikooo777/lbry-blobs-downloader/shared" 21 | ) 22 | 23 | const downloaderConcurrency = 3 24 | 25 | var ( 26 | odyseeAPI = "https://api.na-backend.odysee.com/api/v1/proxy" 27 | blobServer = "blobcache-eu.lbry.com" 28 | edgeToken = "UNSET" 29 | 30 | lbrytvClient = ljsonrpc.NewClient(odyseeAPI) 31 | 32 | ErrNotReflected = errors.New("stream not fully reflected") 33 | ErrNetwork = errors.New("network error") 34 | 35 | reClaimID = regexp.MustCompile("^[a-z0-9]{40}$") 36 | ) 37 | 38 | type WriteCounter struct { 39 | Loaded, Size uint64 40 | Started time.Time 41 | URL string 42 | progressLogged map[int]bool 43 | } 44 | 45 | type ResolvedStream struct { 46 | URI, Name, ClaimID, SDHash, ChannelURI, 47 | ChannelClaimID, NormalizedName string 48 | ChannelSupportAmount int64 49 | ReleaseTime time.Time 50 | } 51 | 52 | func (wc *WriteCounter) Write(p []byte) (int, error) { 53 | n := len(p) 54 | wc.Loaded += uint64(n) 55 | progress := int(float64(wc.Loaded) / float64(wc.Size) * 100) 56 | 57 | if progress > 0 && progress%25 == 0 && !wc.progressLogged[progress] { 58 | wc.progressLogged[progress] = true 59 | rate := int64(float64(wc.Loaded) / time.Since(wc.Started).Seconds()) 60 | logger.Debugw( 61 | "download progress", 62 | "url", wc.URL, 63 | "size", wc.Size, "progress", int(progress), "rate", rate) 64 | } 65 | return n, nil 66 | } 67 | 68 | func ResolveStream(uri string) (*ResolvedStream, error) { 69 | claim, err := Resolve(uri) 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | if claim.ShortURL == "" { 75 | return nil, ErrClaimNotFound 76 | } 77 | 78 | if claim.SigningChannel == nil { 79 | return nil, ErrNoSigningChannel 80 | } 81 | 82 | stream := claim.Value.GetStream() 83 | if stream == nil { 84 | return nil, errors.New("claim doesn't have a stream") 85 | } 86 | releaseTime := time.Unix(stream.GetReleaseTime(), 0) 87 | 88 | streamSource := stream.GetSource() 89 | if streamSource == nil { 90 | return nil, errors.New("stream doesn't have source data") 91 | } 92 | h := hex.EncodeToString(streamSource.SdHash) 93 | 94 | ch := strings.Replace(strings.ToLower(claim.SigningChannel.CanonicalURL), "#", ":", 1) 95 | sup, _ := strconv.ParseFloat(claim.SigningChannel.Meta.SupportAmount, 64) 96 | 97 | r := &ResolvedStream{ 98 | URI: claim.CanonicalURL, 99 | SDHash: h, 100 | Name: claim.Name, 101 | NormalizedName: claim.NormalizedName, 102 | ClaimID: claim.ClaimID, 103 | ChannelURI: ch, 104 | ChannelClaimID: claim.SigningChannel.ClaimID, 105 | ChannelSupportAmount: int64(math.Floor(sup)), 106 | ReleaseTime: releaseTime, 107 | } 108 | return r, nil 109 | } 110 | 111 | func Resolve(uri string) (*ljsonrpc.Claim, error) { 112 | lbrytvClient.SetRPCTimeout(10 * time.Second) 113 | 114 | if reClaimID.Match([]byte(uri)) { 115 | res, err := lbrytvClient.ClaimSearch(ljsonrpc.ClaimSearchArgs{ 116 | ClaimID: &uri, 117 | Page: 1, 118 | PageSize: 1, 119 | }) 120 | if err != nil { 121 | if strings.Contains(err.Error(), "rpc call claim_search()") { 122 | return nil, fmt.Errorf("%w: %s", ErrNetwork, err) 123 | } 124 | return nil, err 125 | } 126 | if len(res.Claims) == 0 { 127 | return nil, ErrClaimNotFound 128 | } 129 | return &res.Claims[0], nil 130 | } 131 | resolved, err := lbrytvClient.Resolve(uri) 132 | if err != nil { 133 | if strings.Contains(err.Error(), "rpc call resolve()") { 134 | return nil, fmt.Errorf("%w: %s", ErrNetwork, err) 135 | } 136 | return nil, err 137 | } 138 | 139 | c, ok := (*resolved)[uri] 140 | if !ok { 141 | return nil, ErrClaimNotFound 142 | } 143 | return &c, nil 144 | } 145 | 146 | // Download retrieves a stream from LBRY CDN and saves it into dstDir folder under original name. 147 | func (c *ResolvedStream) Download(dstDir string) (*os.File, int64, error) { 148 | UDPPort := 5568 149 | TCPPort := 5567 150 | HTTPPort := 5569 151 | 152 | // TODO: Fix this 153 | shared.ReflectorPeerServer = fmt.Sprintf("%s:%d", blobServer, TCPPort) 154 | shared.ReflectorQuicServer = fmt.Sprintf("%s:%d", blobServer, UDPPort) 155 | shared.ReflectorHttpServer = fmt.Sprintf("%s:%d", blobServer, HTTPPort) 156 | shared.EdgeToken = edgeToken 157 | 158 | var readLen int64 159 | dstFile := path.Join(dstDir, c.streamFileName()) 160 | 161 | logger.Infow("downloading stream", "url", c.URI) 162 | t := timer.Start() 163 | 164 | if err := os.MkdirAll(dstDir, os.ModePerm); err != nil { 165 | return nil, 0, err 166 | } 167 | 168 | tmpBlobsPath := "tmp_" + c.SDHash 169 | sdBlob, err := downloader.DownloadStream(c.SDHash, false, downloader.HTTP, tmpBlobsPath, downloaderConcurrency) 170 | // This is needed to cleanup after downloader fails midway 171 | defer os.RemoveAll(path.Join(os.TempDir(), c.streamFileName()+".tmp")) 172 | if err != nil { 173 | return nil, 0, err 174 | } 175 | defer os.RemoveAll(tmpBlobsPath) 176 | 177 | if err := shared.BuildStream(sdBlob, c.streamFileName(), dstDir, tmpBlobsPath); err != nil { 178 | // This is needed to cleanup after BuildStream failing midway 179 | os.RemoveAll(path.Join(dstDir, c.streamFileName())) 180 | if strings.HasSuffix(err.Error(), "no such file or directory") { 181 | return nil, 0, ErrNotReflected 182 | } 183 | return nil, 0, err 184 | } 185 | t.Stop() 186 | 187 | fi, err := os.Stat(dstFile) 188 | if err != nil { 189 | return nil, 0, err 190 | } 191 | readLen = fi.Size() 192 | 193 | f, err := os.Open(dstFile) 194 | if err != nil { 195 | return nil, 0, err 196 | } 197 | 198 | rate := int64(float64(readLen) / t.Duration()) 199 | logger.Infow("stream downloaded", "url", c.URI, "rate", rate, "size", readLen, "seconds_spent", t.DurationInt()) 200 | return f, readLen, nil 201 | } 202 | 203 | func (c *ResolvedStream) streamFileName() string { 204 | return c.SDHash 205 | } 206 | 207 | func SetBlobServer(s string) { 208 | blobServer = s 209 | } 210 | 211 | func SetEdgeToken(s string) { 212 | edgeToken = s 213 | } 214 | -------------------------------------------------------------------------------- /pkg/resolve/resolve_test.go: -------------------------------------------------------------------------------- 1 | package resolve 2 | 3 | import ( 4 | "os" 5 | "path" 6 | "testing" 7 | 8 | ljsonrpc "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | const testStreamURL = "@specialoperationstest#3/fear-of-death-inspirational#a" 15 | 16 | func TestTranscodingRequestResolve(t *testing.T) { 17 | c, err := Resolve(testStreamURL) 18 | require.NoError(t, err) 19 | assert.Equal(t, "fear-of-death-inspirational", c.NormalizedName) 20 | } 21 | 22 | func TestTranscodingRequestResolveClaimID(t *testing.T) { 23 | claimID := "aa372cc164a4164ce9ea20741dd7331c28c0e044" 24 | c, err := Resolve(claimID) 25 | require.NoError(t, err) 26 | assert.Equal(t, "fear-of-death-inspirational", c.NormalizedName) 27 | } 28 | 29 | func TestTranscodingRequestResolveClaimID2(t *testing.T) { 30 | claimID := "11b6b88a7e31a6663c5b7734540f3784124e16f7" 31 | c, err := Resolve(claimID) 32 | require.NoError(t, err) 33 | assert.Equal(t, "weekly_webinar_april14", c.NormalizedName) 34 | } 35 | 36 | func TestTranscodingRequestResolveFailure(t *testing.T) { 37 | lbrytvClientOrig := lbrytvClient 38 | lbrytvClient = ljsonrpc.NewClient("http://localhost:2/") 39 | _, err := Resolve(testStreamURL) 40 | require.ErrorIs(t, err, ErrNetwork) 41 | lbrytvClient = lbrytvClientOrig 42 | } 43 | 44 | func TestTranscodingRequestDownload(t *testing.T) { 45 | dstPath := path.Join(os.TempDir(), "transcoder_test") 46 | c, err := Resolve(testStreamURL) 47 | require.NoError(t, err) 48 | 49 | r, err := ResolveStream(testStreamURL) 50 | require.NoError(t, err) 51 | 52 | assert.Equal(t, "395b0f23dcd07212c3e956b697ba5ba89578ca54", r.ChannelClaimID) 53 | assert.Equal(t, "lbry://@specialoperationstest:3", r.ChannelURI) 54 | 55 | f, n, err := r.Download(dstPath) 56 | f.Close() 57 | require.NoError(t, err) 58 | 59 | fi, err := os.Stat(f.Name()) 60 | require.NoError(t, err) 61 | assert.EqualValues(t, c.Value.GetStream().GetSource().Size, fi.Size()) 62 | assert.EqualValues(t, c.Value.GetStream().GetSource().Size, n) 63 | 64 | require.NoError(t, os.Remove(f.Name())) 65 | require.NoError(t, os.Remove(dstPath)) 66 | 67 | _, err = os.Stat(dstPath) 68 | require.Error(t, err) 69 | } 70 | -------------------------------------------------------------------------------- /pkg/retriever/retriever.go: -------------------------------------------------------------------------------- 1 | package retriever 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/OdyseeTeam/transcoder/pkg/dispatcher" 7 | "github.com/OdyseeTeam/transcoder/pkg/resolve" 8 | ) 9 | 10 | type downloadTask struct { 11 | url, output string 12 | } 13 | 14 | type DownloadResult struct { 15 | File *os.File 16 | Size int64 17 | Resolved *resolve.ResolvedStream 18 | } 19 | 20 | type pool struct { 21 | dispatcher.Dispatcher 22 | } 23 | 24 | type worker struct{} 25 | 26 | func Retrieve(url, out string) (*DownloadResult, error) { 27 | r, err := resolve.ResolveStream(url) 28 | if err != nil { 29 | return nil, err 30 | } 31 | f, n, err := r.Download(out) 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | return &DownloadResult{f, n, r}, nil 37 | } 38 | 39 | // NewPool will create a pool of retrievers that you can throw work at. 40 | func NewPool(parallel int) pool { 41 | d := dispatcher.Start(parallel, worker{}, 0) 42 | return pool{d} 43 | } 44 | 45 | // Retrieve throws download into a pool of workers. 46 | // It will block if all workers are busy. 47 | // Duplicate urls are not checked for. 48 | func (p pool) Retrieve(url, out string) *dispatcher.Result { 49 | return p.Dispatch(downloadTask{url, out}) 50 | } 51 | 52 | func (w worker) Work(t dispatcher.Task) error { 53 | dt := t.Payload.(downloadTask) 54 | res, err := Retrieve(dt.url, dt.output) 55 | if err != nil { 56 | return err 57 | } 58 | t.SetResult(res) 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /pkg/retriever/retriever_test.go: -------------------------------------------------------------------------------- 1 | package retriever 2 | 3 | import ( 4 | "os" 5 | "path" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestRetrieve(t *testing.T) { 13 | outPath := path.Join(os.TempDir(), "retriever_test") 14 | defer os.RemoveAll(outPath) 15 | url := "@specialoperationstest#3/fear-of-death-inspirational#a" 16 | 17 | p := NewPool(10) 18 | 19 | r := p.Retrieve(url, outPath) 20 | rv := <-r.Value() 21 | require.NoError(t, r.Error) 22 | dr := rv.(*DownloadResult) 23 | 24 | err := dr.File.Close() 25 | require.NoError(t, err) 26 | 27 | fi, err := os.Stat(dr.File.Name()) 28 | require.NoError(t, err) 29 | assert.EqualValues(t, 11814366, fi.Size()) 30 | assert.EqualValues(t, 11814366, dr.Size) 31 | } 32 | -------------------------------------------------------------------------------- /pkg/timer/timer.go: -------------------------------------------------------------------------------- 1 | package timer 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | type Timer struct { 9 | Started time.Time 10 | duration float64 11 | } 12 | 13 | func Start() *Timer { 14 | return &Timer{Started: time.Now()} 15 | } 16 | 17 | func (t *Timer) Stop() float64 { 18 | if t.duration == 0 { 19 | t.duration = time.Since(t.Started).Seconds() 20 | } 21 | return t.duration 22 | } 23 | 24 | func (t *Timer) Duration() float64 { 25 | if t.duration == 0 { 26 | return time.Since(t.Started).Seconds() 27 | } 28 | return t.duration 29 | } 30 | 31 | func (t *Timer) DurationInt() int64 { 32 | if t.duration == 0 { 33 | return int64(time.Since(t.Started).Seconds()) 34 | } 35 | return int64(t.duration) 36 | } 37 | 38 | func (t *Timer) String() string { 39 | return fmt.Sprintf("%.2f", t.Duration()) 40 | } 41 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Transcoder Server/Worker for Odysee 2 | 3 | [![Go Report Card](https://goreportcard.com/badge/github.com/OdyseeTeam/transcoder)](https://goreportcard.com/report/github.com/OdyseeTeam/transcoder) 4 | ![Test Status](https://github.com/OdyseeTeam/transcoder/workflows/Test/badge.svg) 5 | 6 | ## Development 7 | 8 | Requires go 1.24. 9 | 10 | ## Building 11 | 12 | To build the x86 Linux binary, which is used both for `conductor` (controller part) and `cworker` (transcoding worker part): 13 | 14 | ``` 15 | make transcoder 16 | ``` 17 | 18 | #### Docker images 19 | 20 | ``` 21 | make conductor_image cworker_image 22 | ``` 23 | 24 | This will build and tag images with a version tag, as well as the `latest`. To push latest images: 25 | 26 | ``` 27 | docker push odyseeteam/transcoder-conductor:latest 28 | docker push odyseeteam/transcoder-cworker:latest 29 | ``` 30 | 31 | `cworker` image is using ffmpeg image as a base. To update or rebuild it, see [its dockerfile](./docker/Dockerfile-ffmpeg) and run: 32 | 33 | ``` 34 | make ffmpeg_image 35 | ``` 36 | 37 | ## Versioning 38 | 39 | This project is using [SemVer](https://semver.org) YY.MM.MINOR[.MICRO] for `client` package and [CalVer](https://calver.org) YY.MM.MINOR for `transcoder` releases since February 2024: 40 | 41 | ``` 42 | git tag transcoder-v24.2.0 43 | ``` 44 | 45 | ## Tools 46 | 47 | To download a regular stream and produce a transcoded copy locally: 48 | 49 | ``` 50 | docker run -v $(pwd):$(pwd) -w $(pwd) odyseeteam/transcoder-tccli transcode "lbry://@specialoperationstest#3/fear-of-dea 51 | th-inspirational#a" 52 | ``` 53 | 54 | Check `./tccli/main.go` for more commands. 55 | 56 | ## Contributing 57 | 58 | Please ensure that your code builds, passes `golanci-lint` and automated tests run successfully before pushing your branch. 59 | 60 | ## License 61 | 62 | This project is MIT licensed. For the full license, see [LICENSE](LICENSE). 63 | 64 | ## Security 65 | 66 | We take security seriously. Please contact security@odysee.com regarding any issues you may encounter. 67 | 68 | ## Contact 69 | 70 | The primary contact for this project is [@anbsky](https://github.com/anbsky) (andrey.beletsky@odysee.com). 71 | -------------------------------------------------------------------------------- /sqlc.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | sql: 3 | - schema: "library/db/migrations/" 4 | queries: "library/db/queries.sql" 5 | name: "db" 6 | engine: "postgresql" 7 | gen: 8 | go: 9 | package: db 10 | out: library/db 11 | rename: 12 | url: "URL" 13 | sd_hash: "SDHash" 14 | ulid: "ULID" 15 | tid: "TID" 16 | overrides: 17 | - column: "videos.size" 18 | go_type: "int64" 19 | -------------------------------------------------------------------------------- /storage/local.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "os" 5 | "path" 6 | ) 7 | 8 | type LocalStorage struct { 9 | path string 10 | } 11 | 12 | func Local(path string) LocalStorage { 13 | return LocalStorage{path} 14 | } 15 | 16 | func (s LocalStorage) Delete(sdHash string) error { 17 | return os.RemoveAll(path.Join(s.path, sdHash)) 18 | } 19 | 20 | func (s LocalStorage) Path() string { 21 | return s.path 22 | } 23 | -------------------------------------------------------------------------------- /storage/logger.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "github.com/OdyseeTeam/transcoder/pkg/logging" 5 | 6 | "go.uber.org/zap" 7 | ) 8 | 9 | var logger = logging.Create("storage", logging.Dev) 10 | 11 | func SetLogger(l *zap.SugaredLogger) { 12 | logger = l 13 | } 14 | -------------------------------------------------------------------------------- /storage/s3_test.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "path" 7 | "testing" 8 | 9 | "github.com/OdyseeTeam/transcoder/internal/config" 10 | "github.com/OdyseeTeam/transcoder/library" 11 | 12 | randomdata "github.com/Pallinder/go-randomdata" 13 | "github.com/aws/aws-sdk-go/aws/awserr" 14 | "github.com/docker/go-connections/nat" 15 | "github.com/stretchr/testify/suite" 16 | testcontainers "github.com/testcontainers/testcontainers-go" 17 | "github.com/testcontainers/testcontainers-go/wait" 18 | ) 19 | 20 | var ( 21 | minioAccessKey = "s3-test" 22 | minioSecretKey = randomdata.Alphanumeric(24) 23 | 24 | fragments = []string{library.MasterPlaylistName, "stream_0.m3u8", "stream_1.m3u8", "stream_2.m3u8", "stream_3.m3u8"} 25 | ) 26 | 27 | type s3Container struct { 28 | testcontainers.Container 29 | URI string 30 | } 31 | 32 | type s3suite struct { 33 | suite.Suite 34 | s3container *s3Container 35 | s3driver *S3Driver 36 | } 37 | 38 | func TestS3suite(t *testing.T) { 39 | suite.Run(t, new(s3suite)) 40 | } 41 | 42 | func (s *s3suite) SetupSuite() { 43 | var err error 44 | 45 | s.s3container, err = setupS3(context.Background()) 46 | s.Require().NoError(err) 47 | 48 | s3driver, err := InitS3Driver(config.S3Config{ 49 | Name: "test", 50 | Endpoint: s.s3container.URI, 51 | Region: "us-east-1", 52 | Key: minioAccessKey, 53 | Secret: minioSecretKey, 54 | Bucket: "storage-s3-test", 55 | CreateBucket: true, 56 | }) 57 | s.Require().NoError(err) 58 | s.s3driver = s3driver 59 | } 60 | 61 | func (s *s3suite) SetupTest() { 62 | 63 | } 64 | 65 | func (s *s3suite) TestPut() { 66 | s3drv := s.s3driver 67 | stream := s.putStream() 68 | 69 | sf, err := s3drv.GetFragment(stream.TID(), library.MasterPlaylistName) 70 | s.Require().NoError(err) 71 | s.Require().NotNil(sf) 72 | 73 | mf, err := s3drv.GetFragment(stream.TID(), library.ManifestName) 74 | s.Require().NoError(err) 75 | s.Require().NotNil(mf) 76 | 77 | err = s3drv.Put(stream, false) 78 | s.ErrorIs(err, ErrStreamExists) 79 | 80 | err = s3drv.Put(stream, true) 81 | s.NoError(err) 82 | } 83 | 84 | func (s *s3suite) TestDelete() { 85 | s3drv := s.s3driver 86 | stream := s.putStream() 87 | 88 | err := s3drv.Delete(stream.TID()) 89 | s.Require().NoError(err) 90 | 91 | for _, n := range fragments { 92 | p, err := s3drv.GetFragment(stream.TID(), n) 93 | s.NotNil(err) 94 | awsErr := err.(awserr.Error) 95 | s.Equal("NoSuchKey", awsErr.Code()) 96 | s.Nil(p) 97 | } 98 | } 99 | 100 | func (s *s3suite) TestDeleteFragments() { 101 | s3drv := s.s3driver 102 | stream := s.putStream() 103 | 104 | err := s3drv.DeleteFragments(stream.TID(), fragments) 105 | s.Require().NoError(err) 106 | 107 | for _, n := range fragments { 108 | p, err := s3drv.GetFragment(stream.TID(), n) 109 | s.NotNil(err) 110 | awsErr := err.(awserr.Error) 111 | s.Equal("NoSuchKey", awsErr.Code()) 112 | s.Nil(p) 113 | } 114 | } 115 | 116 | func (s *s3suite) putStream() *library.Stream { 117 | streamsPath := s.T().TempDir() 118 | sdHash := randomdata.Alphanumeric(96) 119 | library.PopulateHLSPlaylist(s.T(), streamsPath, sdHash) 120 | 121 | stream := library.InitStream(path.Join(streamsPath, sdHash), "") 122 | err := stream.GenerateManifest("url", "channel", sdHash) 123 | s.Require().NoError(err) 124 | 125 | err = s.s3driver.Put(stream, false) 126 | s.Require().NoError(err) 127 | return stream 128 | } 129 | 130 | type TestLogConsumer struct { 131 | Msgs []string 132 | } 133 | 134 | func (g *TestLogConsumer) Accept(l testcontainers.Log) { 135 | g.Msgs = append(g.Msgs, string(l.Content)) 136 | } 137 | 138 | func setupS3(ctx context.Context) (*s3Container, error) { 139 | p, err := nat.NewPort("tcp", "9000") 140 | if err != nil { 141 | return nil, err 142 | } 143 | req := testcontainers.ContainerRequest{ 144 | Image: "minio/minio:latest", 145 | ExposedPorts: []string{"9000/tcp"}, 146 | WaitingFor: wait.ForHTTP("/minio/health/ready").WithPort(p), 147 | Env: map[string]string{ 148 | "MINIO_ROOT_USER": minioAccessKey, 149 | "MINIO_ROOT_PASSWORD": minioSecretKey, 150 | }, 151 | Entrypoint: []string{"sh"}, 152 | Cmd: []string{"-c", fmt.Sprintf("mkdir -p /data/%s && /usr/bin/minio server /data", "")}, 153 | } 154 | 155 | container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ 156 | ContainerRequest: req, 157 | Started: false, 158 | }) 159 | if err != nil { 160 | return nil, err 161 | } 162 | 163 | g := TestLogConsumer{ 164 | Msgs: []string{}, 165 | } 166 | err = container.StartLogProducer(ctx) 167 | if err != nil { 168 | return nil, err 169 | } 170 | 171 | container.FollowOutput(&g) 172 | 173 | err = container.Start(ctx) 174 | if err != nil { 175 | return nil, err 176 | } 177 | 178 | ip, err := container.Host(ctx) 179 | if err != nil { 180 | return nil, err 181 | } 182 | 183 | mappedPort, err := container.MappedPort(ctx, p) 184 | if err != nil { 185 | return nil, err 186 | } 187 | 188 | uri := fmt.Sprintf("http://%s:%s", ip, mappedPort.Port()) 189 | 190 | return &s3Container{Container: container, URI: uri}, nil 191 | } 192 | -------------------------------------------------------------------------------- /storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | type StreamFragment interface { 8 | io.ReadCloser 9 | } 10 | -------------------------------------------------------------------------------- /storage/testdata/dummy-stream/.manifest: -------------------------------------------------------------------------------- 1 | url: "" 2 | sdhash: "" 3 | size: 11915 4 | checksum: SkipChecksumForThisStream 5 | -------------------------------------------------------------------------------- /storage/testdata/dummy-stream/master.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-STREAM-INF:BANDWIDTH=3990800,RESOLUTION=1920x1080,CODECS="avc1.42c02a,mp4a.40.2" 4 | stream_0.m3u8 5 | 6 | #EXT-X-STREAM-INF:BANDWIDTH=2560800,RESOLUTION=1280x720,CODECS="avc1.42c020,mp4a.40.2" 7 | stream_1.m3u8 8 | 9 | #EXT-X-STREAM-INF:BANDWIDTH=2010800,RESOLUTION=854x480,CODECS="avc1.42c01f,mp4a.40.2" 10 | stream_2.m3u8 11 | 12 | #EXT-X-STREAM-INF:BANDWIDTH=1020800,RESOLUTION=640x360,CODECS="avc1.42c01f,mp4a.40.2" 13 | stream_3.m3u8 14 | -------------------------------------------------------------------------------- /storage/testdata/dummy-stream/stream_0.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s0_000000.ts 9 | #EXTINF:10.010000, 10 | s0_000001.ts 11 | #EXTINF:10.010000, 12 | s0_000002.ts 13 | #EXTINF:10.010000, 14 | s0_000003.ts 15 | #EXTINF:10.010000, 16 | s0_000004.ts 17 | #EXTINF:10.010000, 18 | s0_000005.ts 19 | #EXTINF:10.010000, 20 | s0_000006.ts 21 | #EXTINF:10.010000, 22 | s0_000007.ts 23 | #EXTINF:10.010000, 24 | s0_000008.ts 25 | #EXTINF:10.010000, 26 | s0_000009.ts 27 | #EXTINF:10.010000, 28 | s0_000010.ts 29 | #EXTINF:10.010000, 30 | s0_000011.ts 31 | #EXTINF:10.010000, 32 | s0_000012.ts 33 | #EXTINF:10.010000, 34 | s0_000013.ts 35 | #EXTINF:10.010000, 36 | s0_000014.ts 37 | #EXTINF:10.010000, 38 | s0_000015.ts 39 | #EXTINF:10.010000, 40 | s0_000016.ts 41 | #EXTINF:10.010000, 42 | s0_000017.ts 43 | #EXTINF:10.010000, 44 | s0_000018.ts 45 | #EXTINF:10.010000, 46 | s0_000019.ts 47 | #EXTINF:10.010000, 48 | s0_000020.ts 49 | #EXTINF:10.010000, 50 | s0_000021.ts 51 | #EXTINF:10.010000, 52 | s0_000022.ts 53 | #EXTINF:10.010000, 54 | s0_000023.ts 55 | #EXTINF:10.010000, 56 | s0_000024.ts 57 | #EXTINF:10.010000, 58 | s0_000025.ts 59 | #EXTINF:10.010000, 60 | s0_000026.ts 61 | #EXTINF:10.010000, 62 | s0_000027.ts 63 | #EXTINF:10.010000, 64 | s0_000028.ts 65 | #EXTINF:10.010000, 66 | s0_000029.ts 67 | #EXTINF:10.010000, 68 | s0_000030.ts 69 | #EXTINF:10.010000, 70 | s0_000031.ts 71 | #EXTINF:10.010000, 72 | s0_000032.ts 73 | #EXTINF:10.010000, 74 | s0_000033.ts 75 | #EXTINF:10.010000, 76 | s0_000034.ts 77 | #EXTINF:10.010000, 78 | s0_000035.ts 79 | #EXTINF:10.010000, 80 | s0_000036.ts 81 | #EXTINF:10.010000, 82 | s0_000037.ts 83 | #EXTINF:10.010000, 84 | s0_000038.ts 85 | #EXTINF:10.010000, 86 | s0_000039.ts 87 | #EXTINF:10.010000, 88 | s0_000040.ts 89 | #EXTINF:10.010000, 90 | s0_000041.ts 91 | #EXTINF:10.010000, 92 | s0_000042.ts 93 | #EXTINF:10.010000, 94 | s0_000043.ts 95 | #EXTINF:10.010000, 96 | s0_000044.ts 97 | #EXTINF:10.010000, 98 | s0_000045.ts 99 | #EXTINF:10.010000, 100 | s0_000046.ts 101 | #EXTINF:10.010000, 102 | s0_000047.ts 103 | #EXTINF:10.010000, 104 | s0_000048.ts 105 | #EXTINF:10.010000, 106 | s0_000049.ts 107 | #EXTINF:10.010000, 108 | s0_000050.ts 109 | #EXTINF:10.010000, 110 | s0_000051.ts 111 | #EXTINF:10.010000, 112 | s0_000052.ts 113 | #EXTINF:10.010000, 114 | s0_000053.ts 115 | #EXTINF:10.010000, 116 | s0_000054.ts 117 | #EXTINF:10.010000, 118 | s0_000055.ts 119 | #EXTINF:10.010000, 120 | s0_000056.ts 121 | #EXTINF:10.010000, 122 | s0_000057.ts 123 | #EXTINF:10.010000, 124 | s0_000058.ts 125 | #EXTINF:10.010000, 126 | s0_000059.ts 127 | #EXTINF:10.010000, 128 | s0_000060.ts 129 | #EXTINF:10.010000, 130 | s0_000061.ts 131 | #EXTINF:10.010000, 132 | s0_000062.ts 133 | #EXTINF:10.010000, 134 | s0_000063.ts 135 | #EXTINF:10.010000, 136 | s0_000064.ts 137 | #EXTINF:10.010000, 138 | s0_000065.ts 139 | #EXTINF:10.010000, 140 | s0_000066.ts 141 | #EXTINF:10.010000, 142 | s0_000067.ts 143 | #EXTINF:10.010000, 144 | s0_000068.ts 145 | #EXTINF:10.010000, 146 | s0_000069.ts 147 | #EXTINF:10.010000, 148 | s0_000070.ts 149 | #EXTINF:10.010000, 150 | s0_000071.ts 151 | #EXTINF:10.010000, 152 | s0_000072.ts 153 | #EXTINF:10.010000, 154 | s0_000073.ts 155 | #EXTINF:10.010000, 156 | s0_000074.ts 157 | #EXTINF:10.010000, 158 | s0_000075.ts 159 | #EXTINF:10.010000, 160 | s0_000076.ts 161 | #EXTINF:9.479289, 162 | s0_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /storage/testdata/dummy-stream/stream_1.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s1_000000.ts 9 | #EXTINF:10.010000, 10 | s1_000001.ts 11 | #EXTINF:10.010000, 12 | s1_000002.ts 13 | #EXTINF:10.010000, 14 | s1_000003.ts 15 | #EXTINF:10.010000, 16 | s1_000004.ts 17 | #EXTINF:10.010000, 18 | s1_000005.ts 19 | #EXTINF:10.010000, 20 | s1_000006.ts 21 | #EXTINF:10.010000, 22 | s1_000007.ts 23 | #EXTINF:10.010000, 24 | s1_000008.ts 25 | #EXTINF:10.010000, 26 | s1_000009.ts 27 | #EXTINF:10.010000, 28 | s1_000010.ts 29 | #EXTINF:10.010000, 30 | s1_000011.ts 31 | #EXTINF:10.010000, 32 | s1_000012.ts 33 | #EXTINF:10.010000, 34 | s1_000013.ts 35 | #EXTINF:10.010000, 36 | s1_000014.ts 37 | #EXTINF:10.010000, 38 | s1_000015.ts 39 | #EXTINF:10.010000, 40 | s1_000016.ts 41 | #EXTINF:10.010000, 42 | s1_000017.ts 43 | #EXTINF:10.010000, 44 | s1_000018.ts 45 | #EXTINF:10.010000, 46 | s1_000019.ts 47 | #EXTINF:10.010000, 48 | s1_000020.ts 49 | #EXTINF:10.010000, 50 | s1_000021.ts 51 | #EXTINF:10.010000, 52 | s1_000022.ts 53 | #EXTINF:10.010000, 54 | s1_000023.ts 55 | #EXTINF:10.010000, 56 | s1_000024.ts 57 | #EXTINF:10.010000, 58 | s1_000025.ts 59 | #EXTINF:10.010000, 60 | s1_000026.ts 61 | #EXTINF:10.010000, 62 | s1_000027.ts 63 | #EXTINF:10.010000, 64 | s1_000028.ts 65 | #EXTINF:10.010000, 66 | s1_000029.ts 67 | #EXTINF:10.010000, 68 | s1_000030.ts 69 | #EXTINF:10.010000, 70 | s1_000031.ts 71 | #EXTINF:10.010000, 72 | s1_000032.ts 73 | #EXTINF:10.010000, 74 | s1_000033.ts 75 | #EXTINF:10.010000, 76 | s1_000034.ts 77 | #EXTINF:10.010000, 78 | s1_000035.ts 79 | #EXTINF:10.010000, 80 | s1_000036.ts 81 | #EXTINF:10.010000, 82 | s1_000037.ts 83 | #EXTINF:10.010000, 84 | s1_000038.ts 85 | #EXTINF:10.010000, 86 | s1_000039.ts 87 | #EXTINF:10.010000, 88 | s1_000040.ts 89 | #EXTINF:10.010000, 90 | s1_000041.ts 91 | #EXTINF:10.010000, 92 | s1_000042.ts 93 | #EXTINF:10.010000, 94 | s1_000043.ts 95 | #EXTINF:10.010000, 96 | s1_000044.ts 97 | #EXTINF:10.010000, 98 | s1_000045.ts 99 | #EXTINF:10.010000, 100 | s1_000046.ts 101 | #EXTINF:10.010000, 102 | s1_000047.ts 103 | #EXTINF:10.010000, 104 | s1_000048.ts 105 | #EXTINF:10.010000, 106 | s1_000049.ts 107 | #EXTINF:10.010000, 108 | s1_000050.ts 109 | #EXTINF:10.010000, 110 | s1_000051.ts 111 | #EXTINF:10.010000, 112 | s1_000052.ts 113 | #EXTINF:10.010000, 114 | s1_000053.ts 115 | #EXTINF:10.010000, 116 | s1_000054.ts 117 | #EXTINF:10.010000, 118 | s1_000055.ts 119 | #EXTINF:10.010000, 120 | s1_000056.ts 121 | #EXTINF:10.010000, 122 | s1_000057.ts 123 | #EXTINF:10.010000, 124 | s1_000058.ts 125 | #EXTINF:10.010000, 126 | s1_000059.ts 127 | #EXTINF:10.010000, 128 | s1_000060.ts 129 | #EXTINF:10.010000, 130 | s1_000061.ts 131 | #EXTINF:10.010000, 132 | s1_000062.ts 133 | #EXTINF:10.010000, 134 | s1_000063.ts 135 | #EXTINF:10.010000, 136 | s1_000064.ts 137 | #EXTINF:10.010000, 138 | s1_000065.ts 139 | #EXTINF:10.010000, 140 | s1_000066.ts 141 | #EXTINF:10.010000, 142 | s1_000067.ts 143 | #EXTINF:10.010000, 144 | s1_000068.ts 145 | #EXTINF:10.010000, 146 | s1_000069.ts 147 | #EXTINF:10.010000, 148 | s1_000070.ts 149 | #EXTINF:10.010000, 150 | s1_000071.ts 151 | #EXTINF:10.010000, 152 | s1_000072.ts 153 | #EXTINF:10.010000, 154 | s1_000073.ts 155 | #EXTINF:10.010000, 156 | s1_000074.ts 157 | #EXTINF:10.010000, 158 | s1_000075.ts 159 | #EXTINF:10.010000, 160 | s1_000076.ts 161 | #EXTINF:9.479289, 162 | s1_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /storage/testdata/dummy-stream/stream_2.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s2_000000.ts 9 | #EXTINF:10.010000, 10 | s2_000001.ts 11 | #EXTINF:10.010000, 12 | s2_000002.ts 13 | #EXTINF:10.010000, 14 | s2_000003.ts 15 | #EXTINF:10.010000, 16 | s2_000004.ts 17 | #EXTINF:10.010000, 18 | s2_000005.ts 19 | #EXTINF:10.010000, 20 | s2_000006.ts 21 | #EXTINF:10.010000, 22 | s2_000007.ts 23 | #EXTINF:10.010000, 24 | s2_000008.ts 25 | #EXTINF:10.010000, 26 | s2_000009.ts 27 | #EXTINF:10.010000, 28 | s2_000010.ts 29 | #EXTINF:10.010000, 30 | s2_000011.ts 31 | #EXTINF:10.010000, 32 | s2_000012.ts 33 | #EXTINF:10.010000, 34 | s2_000013.ts 35 | #EXTINF:10.010000, 36 | s2_000014.ts 37 | #EXTINF:10.010000, 38 | s2_000015.ts 39 | #EXTINF:10.010000, 40 | s2_000016.ts 41 | #EXTINF:10.010000, 42 | s2_000017.ts 43 | #EXTINF:10.010000, 44 | s2_000018.ts 45 | #EXTINF:10.010000, 46 | s2_000019.ts 47 | #EXTINF:10.010000, 48 | s2_000020.ts 49 | #EXTINF:10.010000, 50 | s2_000021.ts 51 | #EXTINF:10.010000, 52 | s2_000022.ts 53 | #EXTINF:10.010000, 54 | s2_000023.ts 55 | #EXTINF:10.010000, 56 | s2_000024.ts 57 | #EXTINF:10.010000, 58 | s2_000025.ts 59 | #EXTINF:10.010000, 60 | s2_000026.ts 61 | #EXTINF:10.010000, 62 | s2_000027.ts 63 | #EXTINF:10.010000, 64 | s2_000028.ts 65 | #EXTINF:10.010000, 66 | s2_000029.ts 67 | #EXTINF:10.010000, 68 | s2_000030.ts 69 | #EXTINF:10.010000, 70 | s2_000031.ts 71 | #EXTINF:10.010000, 72 | s2_000032.ts 73 | #EXTINF:10.010000, 74 | s2_000033.ts 75 | #EXTINF:10.010000, 76 | s2_000034.ts 77 | #EXTINF:10.010000, 78 | s2_000035.ts 79 | #EXTINF:10.010000, 80 | s2_000036.ts 81 | #EXTINF:10.010000, 82 | s2_000037.ts 83 | #EXTINF:10.010000, 84 | s2_000038.ts 85 | #EXTINF:10.010000, 86 | s2_000039.ts 87 | #EXTINF:10.010000, 88 | s2_000040.ts 89 | #EXTINF:10.010000, 90 | s2_000041.ts 91 | #EXTINF:10.010000, 92 | s2_000042.ts 93 | #EXTINF:10.010000, 94 | s2_000043.ts 95 | #EXTINF:10.010000, 96 | s2_000044.ts 97 | #EXTINF:10.010000, 98 | s2_000045.ts 99 | #EXTINF:10.010000, 100 | s2_000046.ts 101 | #EXTINF:10.010000, 102 | s2_000047.ts 103 | #EXTINF:10.010000, 104 | s2_000048.ts 105 | #EXTINF:10.010000, 106 | s2_000049.ts 107 | #EXTINF:10.010000, 108 | s2_000050.ts 109 | #EXTINF:10.010000, 110 | s2_000051.ts 111 | #EXTINF:10.010000, 112 | s2_000052.ts 113 | #EXTINF:10.010000, 114 | s2_000053.ts 115 | #EXTINF:10.010000, 116 | s2_000054.ts 117 | #EXTINF:10.010000, 118 | s2_000055.ts 119 | #EXTINF:10.010000, 120 | s2_000056.ts 121 | #EXTINF:10.010000, 122 | s2_000057.ts 123 | #EXTINF:10.010000, 124 | s2_000058.ts 125 | #EXTINF:10.010000, 126 | s2_000059.ts 127 | #EXTINF:10.010000, 128 | s2_000060.ts 129 | #EXTINF:10.010000, 130 | s2_000061.ts 131 | #EXTINF:10.010000, 132 | s2_000062.ts 133 | #EXTINF:10.010000, 134 | s2_000063.ts 135 | #EXTINF:10.010000, 136 | s2_000064.ts 137 | #EXTINF:10.010000, 138 | s2_000065.ts 139 | #EXTINF:10.010000, 140 | s2_000066.ts 141 | #EXTINF:10.010000, 142 | s2_000067.ts 143 | #EXTINF:10.010000, 144 | s2_000068.ts 145 | #EXTINF:10.010000, 146 | s2_000069.ts 147 | #EXTINF:10.010000, 148 | s2_000070.ts 149 | #EXTINF:10.010000, 150 | s2_000071.ts 151 | #EXTINF:10.010000, 152 | s2_000072.ts 153 | #EXTINF:10.010000, 154 | s2_000073.ts 155 | #EXTINF:10.010000, 156 | s2_000074.ts 157 | #EXTINF:10.010000, 158 | s2_000075.ts 159 | #EXTINF:10.010000, 160 | s2_000076.ts 161 | #EXTINF:9.479289, 162 | s2_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /storage/testdata/dummy-stream/stream_3.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:6 3 | #EXT-X-TARGETDURATION:10 4 | #EXT-X-MEDIA-SEQUENCE:0 5 | #EXT-X-PLAYLIST-TYPE:VOD 6 | #EXT-X-INDEPENDENT-SEGMENTS 7 | #EXTINF:10.010000, 8 | s3_000000.ts 9 | #EXTINF:10.010000, 10 | s3_000001.ts 11 | #EXTINF:10.010000, 12 | s3_000002.ts 13 | #EXTINF:10.010000, 14 | s3_000003.ts 15 | #EXTINF:10.010000, 16 | s3_000004.ts 17 | #EXTINF:10.010000, 18 | s3_000005.ts 19 | #EXTINF:10.010000, 20 | s3_000006.ts 21 | #EXTINF:10.010000, 22 | s3_000007.ts 23 | #EXTINF:10.010000, 24 | s3_000008.ts 25 | #EXTINF:10.010000, 26 | s3_000009.ts 27 | #EXTINF:10.010000, 28 | s3_000010.ts 29 | #EXTINF:10.010000, 30 | s3_000011.ts 31 | #EXTINF:10.010000, 32 | s3_000012.ts 33 | #EXTINF:10.010000, 34 | s3_000013.ts 35 | #EXTINF:10.010000, 36 | s3_000014.ts 37 | #EXTINF:10.010000, 38 | s3_000015.ts 39 | #EXTINF:10.010000, 40 | s3_000016.ts 41 | #EXTINF:10.010000, 42 | s3_000017.ts 43 | #EXTINF:10.010000, 44 | s3_000018.ts 45 | #EXTINF:10.010000, 46 | s3_000019.ts 47 | #EXTINF:10.010000, 48 | s3_000020.ts 49 | #EXTINF:10.010000, 50 | s3_000021.ts 51 | #EXTINF:10.010000, 52 | s3_000022.ts 53 | #EXTINF:10.010000, 54 | s3_000023.ts 55 | #EXTINF:10.010000, 56 | s3_000024.ts 57 | #EXTINF:10.010000, 58 | s3_000025.ts 59 | #EXTINF:10.010000, 60 | s3_000026.ts 61 | #EXTINF:10.010000, 62 | s3_000027.ts 63 | #EXTINF:10.010000, 64 | s3_000028.ts 65 | #EXTINF:10.010000, 66 | s3_000029.ts 67 | #EXTINF:10.010000, 68 | s3_000030.ts 69 | #EXTINF:10.010000, 70 | s3_000031.ts 71 | #EXTINF:10.010000, 72 | s3_000032.ts 73 | #EXTINF:10.010000, 74 | s3_000033.ts 75 | #EXTINF:10.010000, 76 | s3_000034.ts 77 | #EXTINF:10.010000, 78 | s3_000035.ts 79 | #EXTINF:10.010000, 80 | s3_000036.ts 81 | #EXTINF:10.010000, 82 | s3_000037.ts 83 | #EXTINF:10.010000, 84 | s3_000038.ts 85 | #EXTINF:10.010000, 86 | s3_000039.ts 87 | #EXTINF:10.010000, 88 | s3_000040.ts 89 | #EXTINF:10.010000, 90 | s3_000041.ts 91 | #EXTINF:10.010000, 92 | s3_000042.ts 93 | #EXTINF:10.010000, 94 | s3_000043.ts 95 | #EXTINF:10.010000, 96 | s3_000044.ts 97 | #EXTINF:10.010000, 98 | s3_000045.ts 99 | #EXTINF:10.010000, 100 | s3_000046.ts 101 | #EXTINF:10.010000, 102 | s3_000047.ts 103 | #EXTINF:10.010000, 104 | s3_000048.ts 105 | #EXTINF:10.010000, 106 | s3_000049.ts 107 | #EXTINF:10.010000, 108 | s3_000050.ts 109 | #EXTINF:10.010000, 110 | s3_000051.ts 111 | #EXTINF:10.010000, 112 | s3_000052.ts 113 | #EXTINF:10.010000, 114 | s3_000053.ts 115 | #EXTINF:10.010000, 116 | s3_000054.ts 117 | #EXTINF:10.010000, 118 | s3_000055.ts 119 | #EXTINF:10.010000, 120 | s3_000056.ts 121 | #EXTINF:10.010000, 122 | s3_000057.ts 123 | #EXTINF:10.010000, 124 | s3_000058.ts 125 | #EXTINF:10.010000, 126 | s3_000059.ts 127 | #EXTINF:10.010000, 128 | s3_000060.ts 129 | #EXTINF:10.010000, 130 | s3_000061.ts 131 | #EXTINF:10.010000, 132 | s3_000062.ts 133 | #EXTINF:10.010000, 134 | s3_000063.ts 135 | #EXTINF:10.010000, 136 | s3_000064.ts 137 | #EXTINF:10.010000, 138 | s3_000065.ts 139 | #EXTINF:10.010000, 140 | s3_000066.ts 141 | #EXTINF:10.010000, 142 | s3_000067.ts 143 | #EXTINF:10.010000, 144 | s3_000068.ts 145 | #EXTINF:10.010000, 146 | s3_000069.ts 147 | #EXTINF:10.010000, 148 | s3_000070.ts 149 | #EXTINF:10.010000, 150 | s3_000071.ts 151 | #EXTINF:10.010000, 152 | s3_000072.ts 153 | #EXTINF:10.010000, 154 | s3_000073.ts 155 | #EXTINF:10.010000, 156 | s3_000074.ts 157 | #EXTINF:10.010000, 158 | s3_000075.ts 159 | #EXTINF:10.010000, 160 | s3_000076.ts 161 | #EXTINF:9.479289, 162 | s3_000077.ts 163 | #EXT-X-ENDLIST 164 | -------------------------------------------------------------------------------- /tccli/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "net/url" 7 | "os" 8 | "path" 9 | "path/filepath" 10 | "strings" 11 | "sync" 12 | "time" 13 | 14 | "github.com/OdyseeTeam/transcoder/client" 15 | "github.com/OdyseeTeam/transcoder/encoder" 16 | "github.com/OdyseeTeam/transcoder/library" 17 | ldb "github.com/OdyseeTeam/transcoder/library/db" 18 | "github.com/OdyseeTeam/transcoder/pkg/logging" 19 | "github.com/OdyseeTeam/transcoder/pkg/logging/zapadapter" 20 | "github.com/OdyseeTeam/transcoder/pkg/migrator" 21 | "github.com/OdyseeTeam/transcoder/pkg/resolve" 22 | 23 | "github.com/alecthomas/kong" 24 | "github.com/panjf2000/ants/v2" 25 | "github.com/spf13/viper" 26 | ) 27 | 28 | var CLI struct { 29 | GetFragmentUrl struct { 30 | Server string `optional:"" name:"server" help:"Transcoding server" default:"use-tower1.transcoder.odysee.com:8080"` 31 | URL string `name:"url" help:"LBRY URL"` 32 | SDHash string `name:"sd-hash" help:"SD hash"` 33 | Name string `optional:"" name:"name" help:"Fragment file name" default:"master.m3u8"` 34 | } `cmd:"" help:"Get fragment URL"` 35 | GetVideoUrl struct { 36 | Server string `optional:"" name:"server" help:"Transcoding server" default:"use-tower1.transcoder.odysee.com:8080"` 37 | URL string `name:"url" help:"LBRY URL"` 38 | } `cmd:"" help:"Get video URL"` 39 | GenerateManifests struct { 40 | VideoDir string `help:"Directory containing videos"` 41 | DBPath string `help:"Path to the SQLite DB file"` 42 | } `cmd:"" help:"Generate manifest files for videos"` 43 | Retire struct { 44 | VideoDir string `help:"Directory containing videos"` 45 | MaxSize int `help:"Max size of videos to keep in gigabytes"` 46 | } `cmd:"" help:"Generate manifest files for videos"` 47 | Genstream struct { 48 | Path string `arg:"" help:"Path containing transcoded stream"` 49 | URL string `arg:"" help:"Stream URL"` 50 | } `cmd:"" help:"Generate stream"` 51 | Transcode struct { 52 | URL string `arg:"" help:"LBRY URL"` 53 | } `cmd:"" help:"Download and transcode a specified video"` 54 | ValidateStream struct { 55 | URL string `arg:"" help:"HTTP URL for stream to verify"` 56 | } `cmd:"" help:"Verify a specified stream"` 57 | } 58 | 59 | func main() { 60 | ctx := kong.Parse(&CLI) 61 | log := zapadapter.NewKV(logging.Create("cli", logging.Dev).Desugar()) 62 | 63 | switch ctx.Command() { 64 | case "get-fragment-url": 65 | client.New( 66 | client.Configure().VideoPath(path.Join("./transcoder-client", "")). 67 | Server("http://" + CLI.GetFragmentUrl.Server). 68 | LogLevel(client.Dev), 69 | ) 70 | 71 | // fmt.Println(c.BuildURL(c.GetPlaybackPath(CLI.GetFragmentUrl.URL, CLI.GetFragmentUrl.SDHash))) 72 | case "get-video-url": 73 | fmt.Printf("http://%s/api/v2/video/%s\n", CLI.GetVideoUrl.Server, url.PathEscape(strings.TrimSpace(CLI.GetVideoUrl.URL))) 74 | case "transcode ": 75 | var inPath, outPath string 76 | var rr *resolve.ResolvedStream 77 | 78 | if strings.HasPrefix(CLI.Transcode.URL, "file://") { 79 | inPath = strings.TrimPrefix(CLI.Transcode.URL, "file://") 80 | outPath = inPath + "_out" 81 | } else { 82 | tmpDir, err := os.MkdirTemp("./", "") 83 | if err != nil { 84 | panic(err) 85 | } 86 | rr, err = resolve.ResolveStream(CLI.Transcode.URL) 87 | if err != nil { 88 | panic(err) 89 | } 90 | f, _, err := rr.Download(tmpDir) 91 | if err != nil { 92 | panic(err) 93 | } 94 | f.Close() 95 | inPath, _ = filepath.Abs(f.Name()) 96 | outPath = url.PathEscape(rr.Name) 97 | defer os.RemoveAll(tmpDir) 98 | } 99 | 100 | e, err := encoder.NewEncoder(encoder.Configure().Log(log).SpritegenPath("/dev/null")) 101 | if err != nil { 102 | panic(err) 103 | } 104 | t := time.Now() 105 | r, err := e.Encode(inPath, outPath) 106 | if err != nil { 107 | panic(err) 108 | } 109 | for p := range r.Progress { 110 | fmt.Printf("%.2f ", p.GetProgress()) 111 | } 112 | fmt.Printf("done in %.2f seconds\n", time.Since(t).Seconds()) 113 | case "genstream ": 114 | rr, err := resolve.ResolveStream(CLI.Genstream.URL) 115 | if err != nil { 116 | panic(err) 117 | } 118 | ls := library.InitStream(CLI.Genstream.Path, "wasabi") 119 | err = ls.GenerateManifest( 120 | rr.URI, rr.ChannelURI, rr.SDHash, 121 | library.WithTranscodedAt(time.Now()), 122 | library.WithWorkerName("manual"), 123 | ) 124 | if err != nil { 125 | panic(err) 126 | } 127 | 128 | cfg := viper.New() 129 | cfg.SetConfigName("conductor") 130 | cfg.AddConfigPath(".") 131 | err = cfg.ReadInConfig() 132 | if err != nil { 133 | panic(err) 134 | } 135 | 136 | libCfg := cfg.GetStringMapString("library") 137 | 138 | libDB, err := migrator.ConnectDB(migrator.DefaultDBConfig().DSN(libCfg["dsn"]).AppName("library"), ldb.MigrationsFS) 139 | if err != nil { 140 | panic(err) 141 | } 142 | lib := library.New(library.Config{ 143 | DB: libDB, 144 | Log: zapadapter.NewKV(nil), 145 | }) 146 | if err := lib.AddRemoteStream(*ls); err != nil { 147 | fmt.Println("error adding remote stream", "err", err) 148 | } 149 | case "validate-stream ": 150 | res, err := library.ValidateStream(CLI.ValidateStream.URL, false, false) 151 | 152 | if err != nil { 153 | fmt.Printf("error validating stream: %s\n", err) 154 | return 155 | } 156 | fmt.Printf("%v parts present, %v missing", len(res.Present), len(res.Missing)) 157 | case "validate-streams": 158 | wg := sync.WaitGroup{} 159 | results := make(chan *library.ValidationResult) 160 | 161 | go func() { 162 | for vr := range results { 163 | if len(vr.Missing) > 0 { 164 | fmt.Printf("%s broken\n", vr.URL) 165 | } 166 | } 167 | }() 168 | pipe := func(i interface{}) error { 169 | url := i.(string) 170 | vr, _ := library.ValidateStream(url, true, true) 171 | results <- vr 172 | return nil 173 | } 174 | 175 | scanner := bufio.NewScanner(os.Stdin) 176 | 177 | p, _ := ants.NewPoolWithFunc(10, func(i interface{}) { 178 | err := pipe(i) 179 | if err != nil { 180 | fmt.Println(err) 181 | } 182 | wg.Done() 183 | }) 184 | defer p.Release() 185 | for scanner.Scan() { 186 | u := strings.TrimSpace(scanner.Text()) 187 | if u == "" { 188 | break 189 | } 190 | wg.Add(1) 191 | _ = p.Invoke(u) 192 | } 193 | if err := scanner.Err(); err != nil { 194 | fmt.Fprintln(os.Stderr, "reading standard input:", err) 195 | } 196 | wg.Wait() 197 | default: 198 | panic(ctx.Command()) 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /tower.ex.yml: -------------------------------------------------------------------------------- 1 | S3: 2 | Name: local 3 | Endpoint: http://s3:9000 4 | Bucket: transcoded 5 | Key: ody 6 | Secret: odyseetes3 7 | MaxSize: 1TB 8 | 9 | AdaptiveQueue: 10 | MinHits: 1 11 | 12 | Library: 13 | DSN: postgres://postgres:odyseeteam@db 14 | ManagerToken: managertoken123 15 | 16 | Queue: 17 | DSN: postgres://postgres:odyseeteam@db 18 | 19 | Tower: 20 | WorkDir: /storage/tower 21 | 22 | 23 | EnabledChannels: 24 | - "@specialoperationstest#3" 25 | 26 | PriorityChannels: [] 27 | DisabledChannels: [] 28 | -------------------------------------------------------------------------------- /worker.ex.yml: -------------------------------------------------------------------------------- 1 | Storage: 2 | Name: local 3 | Type: S3 4 | Endpoint: http://minio:9000 5 | Region: us-east-1 6 | Bucket: transcoded 7 | Key: ody 8 | Secret: odyseetes3 9 | MaxSize: 1TB 10 | CreateBucket: true 11 | 12 | Redis: redis://:odyredis@redis:6379/1 13 | --------------------------------------------------------------------------------