├── .editorconfig ├── .github └── workflows │ └── pipeline.yml ├── .gitignore ├── Dockerfile ├── Makefile ├── cmd └── main.go ├── config.example.json ├── docker-compose.yml ├── firewall ├── firewall.go └── firewall_test.go ├── go.mod ├── go.sum ├── init.sql ├── internal ├── config │ └── config.go ├── iapi │ └── api.go ├── metrics │ └── metrics.go └── version │ └── version.go ├── main.go ├── pkg ├── app │ ├── app.go │ └── app_test.go ├── logger │ ├── logger.go │ ├── sentry.go │ ├── sentry_test.go │ └── testing.go ├── mime │ └── mime.go └── paid │ ├── handlers.go │ ├── handlers_test.go │ ├── paid.go │ ├── paid_test.go │ ├── validator.go │ └── validator_test.go ├── player ├── chunk.go ├── chunk_test.go ├── decrypted_cache.go ├── errors.go ├── hot_cache.go ├── hot_cache_test.go ├── http_handlers.go ├── http_handlers_test.go ├── http_routes.go ├── media_types.go ├── memory_leak_test.go ├── player.go ├── player_test.go ├── server.go ├── stream.go ├── testdata │ ├── new_stream.json │ └── old_mime.json └── v5_test.go ├── readme.md └── scripts └── wait_for_wallet.sh /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | insert_final_newline = true 9 | trim_trailing_whitespace = true 10 | end_of_line = lf 11 | charset = utf-8 12 | 13 | # Use 2 spaces for the HTML files 14 | [*.html] 15 | indent_size = 2 16 | 17 | # The JSON files contain newlines inconsistently 18 | [*.json] 19 | indent_size = 2 20 | insert_final_newline = ignore 21 | 22 | [{*.yml,*.yaml}] 23 | indent_size = 2 24 | 25 | [**/admin/js/vendor/**] 26 | indent_style = ignore 27 | indent_size = ignore 28 | 29 | # Minified JavaScript files shouldn't be changed 30 | [**.min.js] 31 | indent_style = ignore 32 | insert_final_newline = ignore 33 | 34 | # Makefiles always use tabs for indentation 35 | [Makefile] 36 | indent_style = tab 37 | 38 | [docs/**.txt] 39 | max_line_length = 79 40 | 41 | # Go lang uses tabs by default 42 | [*.go] 43 | indent_style = tab 44 | -------------------------------------------------------------------------------- /.github/workflows/pipeline.yml: -------------------------------------------------------------------------------- 1 | name: Pipeline 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | test: 13 | name: Test 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | 21 | - name: Prepare test 22 | run: make prepare_test 23 | 24 | - name: Check running containers 25 | run: docker ps -a 26 | 27 | - name: Set up Go 1.22 28 | uses: actions/setup-go@v5 29 | with: 30 | go-version: '1.22.x' 31 | id: go 32 | 33 | - name: Test 34 | run: make test_ci 35 | 36 | # - name: Submit coverage 37 | # run: goveralls -coverprofile=coverage.out -service=circle-ci -repotoken ${{ secrets.COVERALLS_TOKEN }} 38 | 39 | - name: Submit coverage report 40 | uses: coverallsapp/github-action@v2 41 | with: 42 | file: coverage.lcov 43 | github-token: ${{ secrets.GITHUB_TOKEN }} 44 | 45 | - name: Make binary 46 | run: make linux 47 | 48 | - name: Login to DockerHub 49 | uses: docker/login-action@v3 50 | with: 51 | username: ${{ secrets.DOCKERHUB_USERNAME }} 52 | password: ${{ secrets.DOCKERHUB_TOKEN }} 53 | 54 | - name: Get release details 55 | run: | 56 | echo "head_version=$(git tag --points-at=HEAD|sed -e 's/v//')" >> $GITHUB_ENV 57 | echo "version=$(git describe --tags --abbrev=0|sed -e 's/v//')" >> $GITHUB_ENV 58 | 59 | - run: echo "::warning ::Release ${{ env.head_version }} ${{ env.version }}" 60 | 61 | - name: Build docker image 62 | if: ${{ github.ref != 'refs/heads/master' }} 63 | uses: docker/build-push-action@v6 64 | with: 65 | push: false 66 | tags: odyseeteam/player-server:${{ env.version }} 67 | context: . 68 | 69 | - name: Build and push docker image 70 | if: ${{ github.ref == 'refs/heads/master' }} 71 | uses: docker/build-push-action@v2 72 | with: 73 | push: true 74 | tags: odyseeteam/player-server:${{ env.version }},odyseeteam/player-server:latest 75 | context: . 76 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /dist/ 2 | coverage.* 3 | 4 | config.json 5 | 6 | blacklist.json 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | EXPOSE 8080 3 | 4 | # For /etc/mime.types 5 | RUN apk add mailcap 6 | 7 | WORKDIR /app 8 | COPY dist/linux_amd64/odysee_player /app/ 9 | 10 | CMD ["./odysee_player"] 11 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | version := $(shell git describe --tags) 2 | 3 | .PHONY: prepare_test 4 | prepare_test: 5 | curl https://raw.githubusercontent.com/OdyseeTeam/gody-cdn/master/db-init.sql -o init.sql 6 | cp config.example.json config.json 7 | @if command -v docker-compose >/dev/null 2>&1; then \ 8 | docker-compose up -d mysql; \ 9 | else \ 10 | docker compose up -d mysql; \ 11 | fi 12 | # rm init.sql 13 | 14 | .PHONY: test 15 | test: 16 | go test -cover ./... 17 | 18 | .PHONY: test_ci 19 | test_ci: 20 | go install golang.org/x/tools/cmd/cover@latest 21 | go install github.com/mattn/goveralls@latest 22 | go install github.com/jandelgado/gcov2lcov@latest 23 | go test -covermode=count -coverprofile=coverage.out ./... 24 | gcov2lcov -infile=coverage.out -outfile=coverage.lcov 25 | 26 | linux: 27 | CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -o dist/linux_amd64/odysee_player -ldflags "-s -w -X github.com/OdyseeTeam/player-server/internal/version.version=$(version)" 28 | 29 | macos: 30 | CGO_ENABLED=0 GOARCH=amd64 GOOS=darwin go build -o dist/darwin_amd64/odysee_player -ldflags "-s -w -X github.com/OdyseeTeam/player-server/internal/version.version=$(version)" 31 | 32 | version := $(shell git describe --abbrev=0 --tags|sed 's/v//') 33 | cur_branch := $(shell git rev-parse --abbrev-ref HEAD) 34 | .PHONY: image 35 | image: 36 | docker buildx build -t odyseeteam/player-server:$(version) -t odyseeteam/player-server:latest -t odyseeteam/player-server:$(cur_branch) --platform linux/amd64 . 37 | 38 | version := $(shell git describe --abbrev=0 --tags|sed 's/v//') 39 | .PHONY: publish_image 40 | publish_image: 41 | docker push odyseeteam/player-server:$(version) 42 | docker tag odyseeteam/player-server:$(version) odyseeteam/player-server:latest odyseeteam/player-server:$(cur_branch) 43 | docker push odyseeteam/player-server:latest 44 | 45 | tag := $(shell git describe --abbrev=0 --tags) 46 | .PHONY: retag 47 | retag: 48 | @echo "Re-setting tag $(tag) to the current commit" 49 | git push origin :$(tag) 50 | git tag -d $(tag) 51 | git tag $(tag) 52 | 53 | 54 | .PHONY: dev hotdev 55 | dev: 56 | go run . --upstream-reflector=reflector.lbry.com:5568 --verbose --hot-cache-size=50M 57 | hotdev: 58 | reflex --decoration=none --start-service=true --regex='\.go$$' make dev 59 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "os" 8 | "time" 9 | 10 | "github.com/OdyseeTeam/player-server/internal/config" 11 | "github.com/OdyseeTeam/player-server/internal/metrics" 12 | "github.com/OdyseeTeam/player-server/internal/version" 13 | "github.com/OdyseeTeam/player-server/pkg/app" 14 | "github.com/OdyseeTeam/player-server/pkg/logger" 15 | "github.com/OdyseeTeam/player-server/pkg/paid" 16 | "github.com/OdyseeTeam/player-server/player" 17 | "github.com/lbryio/reflector.go/server/http3" 18 | 19 | tclient "github.com/OdyseeTeam/transcoder/client" 20 | "github.com/lbryio/lbry.go/v2/stream" 21 | "github.com/lbryio/reflector.go/server/peer" 22 | "github.com/lbryio/reflector.go/store" 23 | 24 | "github.com/c2h5oh/datasize" 25 | "github.com/sirupsen/logrus" 26 | "github.com/spf13/cobra" 27 | ) 28 | 29 | var Logger = logger.GetLogger() 30 | 31 | var ( 32 | bindAddress string 33 | enablePrefetch bool 34 | enableProfile bool 35 | verboseOutput bool 36 | allowDownloads bool 37 | lbrynetAddress string 38 | paidPubKey string 39 | 40 | upstreamReflector string 41 | upstreamProtocol string 42 | cloudFrontEndpoint string 43 | diskCacheDir string 44 | diskCacheSize string 45 | hotCacheSize string 46 | 47 | transcoderVideoPath string 48 | transcoderVideoSize string 49 | transcoderAddr string 50 | transcoderRemoteServer string 51 | 52 | edgeToken string 53 | 54 | rootCmd = &cobra.Command{ 55 | Use: "odysee_player", 56 | Short: "media server for odysee.com", 57 | Version: version.FullName(), 58 | Run: run, 59 | } 60 | ) 61 | 62 | func init() { 63 | rootCmd.Flags().StringVar(&bindAddress, "bind", "0.0.0.0:8080", "address to bind HTTP server to") 64 | rootCmd.Flags().StringVar(&lbrynetAddress, "lbrynet", "https://api.na-backend.odysee.com/api/v1/proxy", "lbrynet server URL") 65 | rootCmd.Flags().StringVar(&paidPubKey, "paid_pubkey", "https://api.na-backend.odysee.com/api/v1/paid/pubkey", "pubkey for playing paid content") 66 | 67 | rootCmd.Flags().UintVar(&player.StreamWriteTimeout, "http-stream-write-timeout", player.StreamWriteTimeout, "write timeout for stream http requests (seconds)") 68 | rootCmd.Flags().UintVar(&app.WriteTimeout, "http-write-timeout", app.WriteTimeout, "write timeout for http requests (seconds)") 69 | rootCmd.Flags().UintVar(&app.ReadTimeout, "http-read-timeout", app.ReadTimeout, "read timeout for http requests (seconds)") 70 | 71 | rootCmd.Flags().BoolVar(&enablePrefetch, "prefetch", false, "enable prefetch for blobs") 72 | rootCmd.Flags().BoolVar(&enableProfile, "profile", false, fmt.Sprintf("enable profiling server at %v", player.ProfileRoutePath)) 73 | rootCmd.Flags().BoolVar(&verboseOutput, "verbose", false, "enable verbose logging") 74 | rootCmd.Flags().BoolVar(&allowDownloads, "allow-downloads", true, "enable stream downloads") 75 | 76 | rootCmd.Flags().StringVar(&upstreamReflector, "upstream-reflector", "", "host:port of a reflector server where blobs are fetched from") 77 | rootCmd.Flags().StringVar(&upstreamProtocol, "upstream-protocol", "http", "protocol used to fetch blobs from another upstream reflector server (tcp/http3/http)") 78 | 79 | rootCmd.Flags().StringVar(&cloudFrontEndpoint, "cloudfront-endpoint", "", "CloudFront edge endpoint for standard HTTP retrieval") 80 | rootCmd.Flags().StringVar(&diskCacheDir, "disk-cache-dir", "", "enable disk cache, storing blobs in dir") 81 | rootCmd.Flags().StringVar(&diskCacheSize, "disk-cache-size", "100MB", "max size of disk cache: 16GB, 500MB, etc.") 82 | rootCmd.Flags().StringVar(&hotCacheSize, "hot-cache-size", "50MB", "max size for in-memory cache: 16GB, 500MB, etc") 83 | rootCmd.Flags().StringVar(&transcoderVideoPath, "transcoder-video-path", "", "path to store transcoded videos") 84 | rootCmd.Flags().StringVar(&transcoderVideoSize, "transcoder-video-size", "200GB", "max size of transcoder video storage") 85 | rootCmd.Flags().StringVar(&transcoderAddr, "transcoder-addr", "", "transcoder API address") 86 | rootCmd.Flags().StringVar(&transcoderRemoteServer, "transcoder-remote-server", "", "remote transcoder storage server URL") 87 | 88 | rootCmd.Flags().UintVar(&player.PrefetchCount, "prefetch-count", player.DefaultPrefetchLen, "how many blobs to retrieve from origin in advance") 89 | 90 | rootCmd.Flags().StringVar(&config.UserName, "config-username", "lbry", "Username to access the config endpoint with") 91 | rootCmd.Flags().StringVar(&config.Password, "config-password", "lbry", "Password to access the config endpoint with") 92 | rootCmd.Flags().Float64Var(&player.ThrottleScale, "throttle-scale", 1.5, "Throttle scale to rate limit in MB/s, only the 1.2 in 1.2MB/s") 93 | rootCmd.Flags().BoolVar(&player.ThrottleSwitch, "throttle-enabled", true, "Enables throttling") 94 | 95 | rootCmd.Flags().StringVar(&edgeToken, "edge-token", "", "Edge token for delivering purchased/rented streams") 96 | } 97 | 98 | func run(cmd *cobra.Command, args []string) { 99 | initLogger() 100 | defer logger.Flush() 101 | 102 | initPubkey() 103 | 104 | blobSource := getBlobSource() 105 | 106 | p := player.NewPlayer( 107 | initHotCache(blobSource), 108 | player.WithLbrynetServer(lbrynetAddress), 109 | player.WithDownloads(allowDownloads), 110 | player.WithPrefetch(enablePrefetch), 111 | player.WithEdgeToken(edgeToken), 112 | ) 113 | 114 | var tcsize datasize.ByteSize 115 | err := tcsize.UnmarshalText([]byte(transcoderVideoSize)) 116 | if err != nil { 117 | Logger.Fatal(err) 118 | } 119 | if transcoderVideoPath != "" && tcsize > 0 && transcoderAddr != "" { 120 | err := os.Mkdir(transcoderVideoPath, os.ModePerm) 121 | if err != nil && !os.IsExist(err) { 122 | Logger.Fatal(err) 123 | } 124 | 125 | tCfg := tclient.Configure(). 126 | VideoPath(transcoderVideoPath). 127 | Server(transcoderAddr). 128 | CacheSize(int64(tcsize)). 129 | ItemsToPrune(10) 130 | if verboseOutput { 131 | tCfg = tCfg.LogLevel(tclient.Dev) 132 | } 133 | if transcoderRemoteServer != "" { 134 | tCfg = tCfg.RemoteServer(transcoderRemoteServer) 135 | } 136 | c := tclient.New(tCfg) 137 | //TODO: this can probably be in a separate go routine so that startup isn't blocked 138 | n, err := c.RestoreCache() 139 | if err != nil { 140 | Logger.Error(err) 141 | } else { 142 | Logger.Infof("restored %v items into transcoder cache", n) 143 | } 144 | 145 | p.AddTranscoderClient(&c, transcoderVideoPath) 146 | } 147 | 148 | a := app.New(app.Opts{Address: bindAddress, BlobStore: blobSource, EdgeToken: edgeToken}) 149 | 150 | metrics.InstallRoute(a.Router) 151 | player.InstallPlayerRoutes(a.Router, p) 152 | config.InstallConfigRoute(a.Router) 153 | if enableProfile { 154 | player.InstallProfilingRoutes(a.Router) 155 | } 156 | 157 | a.Start() 158 | a.ServeUntilShutdown() 159 | } 160 | 161 | func initHotCache(origin store.BlobStore) *player.HotCache { 162 | var hotCacheBytes datasize.ByteSize 163 | err := hotCacheBytes.UnmarshalText([]byte(hotCacheSize)) 164 | if err != nil { 165 | Logger.Fatal(err) 166 | } 167 | if hotCacheBytes <= 0 { 168 | Logger.Fatal("hot cache size must be greater than 0. if you want to disable hot cache, you'll have to do a bit of coding") 169 | } 170 | 171 | metrics.PlayerCacheInfo(hotCacheBytes.Bytes()) 172 | unencryptedCache := player.NewDecryptedCache(origin) 173 | return player.NewHotCache(*unencryptedCache, int64(hotCacheBytes.Bytes())) 174 | } 175 | 176 | func getBlobSource() store.BlobStore { 177 | var blobSource store.BlobStore 178 | 179 | if upstreamReflector != "" { 180 | switch upstreamProtocol { 181 | case "tcp": 182 | blobSource = peer.NewStore(peer.StoreOpts{ 183 | Address: upstreamReflector, 184 | Timeout: 30 * time.Second, 185 | }) 186 | case "http3": 187 | blobSource = http3.NewStore(http3.StoreOpts{ 188 | Address: upstreamReflector, 189 | Timeout: 30 * time.Second, 190 | }) 191 | case "http": 192 | blobSource = store.NewHttpStore(upstreamReflector, edgeToken) 193 | default: 194 | Logger.Fatalf("protocol is not recognized: %s", upstreamProtocol) 195 | } 196 | 197 | } else if cloudFrontEndpoint != "" { 198 | blobSource = store.NewCloudFrontROStore(cloudFrontEndpoint) 199 | } else { 200 | Logger.Fatal("one of [--upstream-reflector|--cloudfront-endpoint] is required") 201 | } 202 | 203 | diskCacheMaxSize, diskCachePath := diskCacheParams() //TODO: use reflector code instead of code duplication 204 | //we are tracking blobs in memory with a 1 byte long boolean, which means that for each 2MB (a blob) we need 1Byte 205 | // so if the underlying cache holds 10MB, 10MB/2MB=5Bytes which is also the exact count of objects to restore on startup 206 | realCacheSize := float64(diskCacheMaxSize) / float64(stream.MaxBlobSize) 207 | if diskCacheMaxSize > 0 { 208 | err := os.MkdirAll(diskCachePath, os.ModePerm) 209 | if err != nil { 210 | Logger.Fatal(err) 211 | } 212 | blobSource = store.NewCachingStore( 213 | "player", 214 | blobSource, 215 | store.NewGcacheStore("player", store.NewDiskStore(diskCachePath, 2), int(realCacheSize), store.LRU), 216 | ) 217 | } 218 | 219 | return blobSource 220 | } 221 | 222 | func diskCacheParams() (int, string) { 223 | l := Logger 224 | 225 | if diskCacheDir == "" { 226 | return 0, "" 227 | } 228 | 229 | path := diskCacheDir 230 | if len(path) == 0 || path[0] != '/' { 231 | l.Fatal("--disk-cache-dir must start with '/'") 232 | } 233 | 234 | var maxSize datasize.ByteSize 235 | err := maxSize.UnmarshalText([]byte(diskCacheSize)) 236 | if err != nil { 237 | l.Fatal(err) 238 | } 239 | if maxSize <= 0 { 240 | l.Fatal("--disk-cache-size must be more than 0") 241 | } 242 | 243 | return int(maxSize), path 244 | } 245 | 246 | func initLogger() { 247 | logLevel := logrus.InfoLevel 248 | if verboseOutput { 249 | logLevel = logrus.DebugLevel 250 | } 251 | logger.ConfigureDefaults(logLevel) 252 | Logger.Infof("initializing %v\n", version.FullName()) 253 | logger.ConfigureSentry(version.Version(), logger.EnvProd) 254 | } 255 | 256 | func initPubkey() { 257 | l := Logger 258 | 259 | r, err := http.Get(paidPubKey) 260 | if err != nil { 261 | l.Fatal(err) 262 | } 263 | rawKey, err := io.ReadAll(r.Body) 264 | if err != nil { 265 | l.Fatal(err) 266 | } 267 | err = paid.InitPubKey(rawKey) 268 | if err != nil { 269 | l.Fatal(err) 270 | } 271 | } 272 | 273 | func Execute() { 274 | if err := rootCmd.Execute(); err != nil { 275 | Logger.Fatalf("error: %v\n", err) 276 | } 277 | } 278 | -------------------------------------------------------------------------------- /config.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "local_db": { 3 | "host": "localhost", 4 | "user": "godycdn", 5 | "database": "godycdn", 6 | "password": "godycdn" 7 | }, 8 | "disk_cache": { 9 | "path": "/tmp/objects/", 10 | "size": "1GB" 11 | }, 12 | "s3_origins": [], 13 | "cleanup_interval_seconds": 60 14 | } -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | mysql: 3 | image: percona:ps-8.0 4 | platform: linux/amd64 5 | ports: 6 | - "127.0.0.1:3306:3306" 7 | volumes: 8 | #uncomment line below to persist storage when downing and upping the container 9 | #- /tmp/godycdn_db/mysql:/var/lib/mysql 10 | # get the script from the gody-cdn repo https://raw.githubusercontent.com/OdyseeTeam/gody-cdn/master/db-init.sql 11 | - ./init.sql:/docker-entrypoint-initdb.d/init.sql 12 | environment: 13 | - MYSQL_USER=godycdn 14 | - MYSQL_PASSWORD=godycdn 15 | - MYSQL_DATABASE=godycdn 16 | - MYSQL_ROOT_PASSWORD=godycdnrootpw 17 | player: 18 | image: 'odyseeteam/player-server:latest' 19 | logging: 20 | driver: "json-file" 21 | options: 22 | max-size: "1M" 23 | max-file: "3" 24 | labels: 25 | com.centurylinklabs.watchtower.enable: true 26 | ports: 27 | - "8080:8080" 28 | - "5567:5567" 29 | - "5569:5569" 30 | - "5568:5568/udp" 31 | - "5568:5568/tcp" 32 | volumes: 33 | - "/tmp/reflector_cache:/tmp/player_cache" 34 | - "/tmp/reflector_cache:/tmp/transcoded_cache" 35 | - "/tmp/objects_cache:/tmp/objects" 36 | - "./config.json:/app/config.json" 37 | entrypoint: > 38 | ./odysee_player 39 | --upstream-reflector=reflector.lbry.com:5569 40 | --upstream-protocol=http 41 | --bind=0.0.0.0:8080 42 | --prefetch=true 43 | --hot-cache-size=100MB 44 | --profile=true 45 | --config-username=test 46 | --config-password=test 47 | --throttle-scale=3.0 48 | --throttle-enabled=false 49 | --transcoder-video-path=/tmp/transcoded_cache 50 | # --disk-cache-dir="/tmp/player_cache" 51 | # --disk-cache-size=1GB 52 | environment: 53 | - SPACE_USE_DB=true 54 | - PLAYER_NAME=test-player 55 | - GOGC=60 56 | depends_on: 57 | - mysql 58 | -------------------------------------------------------------------------------- /firewall/firewall.go: -------------------------------------------------------------------------------- 1 | package firewall 2 | 3 | import ( 4 | "archive/tar" 5 | "compress/gzip" 6 | "encoding/json" 7 | "io" 8 | "net" 9 | "net/http" 10 | "net/netip" 11 | "os" 12 | "path/filepath" 13 | "sync" 14 | "time" 15 | 16 | "github.com/OdyseeTeam/player-server/internal/iapi" 17 | "github.com/OdyseeTeam/player-server/pkg/logger" 18 | "github.com/bluele/gcache" 19 | "github.com/gaissmai/bart" 20 | "github.com/lbryio/lbry.go/v2/extras/errors" 21 | "github.com/oschwald/maxminddb-golang" 22 | "github.com/puzpuzpuz/xsync/v3" 23 | ) 24 | 25 | type blacklist struct { 26 | BlacklistedAsn []int `json:"blacklisted_asn"` 27 | BlacklistedIPs []string `json:"blacklisted_ips"` 28 | } 29 | 30 | func init() { 31 | ReloadBlacklist() 32 | } 33 | 34 | func ReloadBlacklist() { 35 | f, err := os.ReadFile("blacklist.json") 36 | if err != nil { 37 | Logger.Warn("no blacklist file found, skipping blacklist") 38 | return 39 | } 40 | 41 | var bl blacklist 42 | err = json.Unmarshal(f, &bl) 43 | if err != nil { 44 | Logger.Errorf("failed to unmarshal blacklist: %v", err) 45 | return 46 | } 47 | blacklistedAsn.Clear() 48 | bannedIPs = &bart.Table[int]{} 49 | for _, v := range bl.BlacklistedAsn { 50 | blacklistedAsn.Store(v, true) 51 | } 52 | for _, v := range bl.BlacklistedIPs { 53 | parsedPrefix, err := netip.ParsePrefix(v) 54 | if err != nil { 55 | Logger.Warnf("Error parsing IP %s: %s", v, err) 56 | continue 57 | } 58 | bannedIPs.Insert(parsedPrefix, 1) 59 | } 60 | } 61 | 62 | var WindowSize = 120 * time.Second 63 | 64 | const MaxStringsPerIp = 4 65 | 66 | var resourcesForIPCache = gcache.New(1000).Simple().Build() 67 | var whitelist = map[string]bool{ 68 | "51.210.0.171": true, 69 | } 70 | 71 | var bannedIPs = &bart.Table[int]{} 72 | var blacklistedAsn = xsync.NewMapOf[int, bool]() 73 | var Logger = logger.GetLogger() 74 | 75 | func CheckBans(ip string) bool { 76 | parsedIp, err := netip.ParseAddr(ip) 77 | if err != nil { 78 | Logger.Warnf("Error parsing IP %s: %s", ip, err) 79 | return false 80 | } 81 | _, ok := bannedIPs.Lookup(parsedIp) 82 | if ok { 83 | Logger.Warnf("IP %s matches an entry in the banned list", ip) 84 | return true 85 | } 86 | org, asn, err := GetProviderForIP(ip) 87 | if err == nil { 88 | if _, found := blacklistedAsn.Load(asn); found { 89 | Logger.Warnf("IP %s matches abusive ANS-%d (%s)", ip, asn, org) 90 | return true 91 | } 92 | } 93 | return false 94 | } 95 | 96 | func CheckAndRateLimitIp(ip string, endpoint string) (bool, int) { 97 | if ip == "" { 98 | return false, 0 99 | } 100 | if whitelist[ip] { 101 | return false, 0 102 | } 103 | resources, err := resourcesForIPCache.Get(ip) 104 | if errors.Is(err, gcache.KeyNotFoundError) { 105 | tokensMap := &sync.Map{} 106 | tokensMap.Store(endpoint, time.Now()) 107 | err := resourcesForIPCache.SetWithExpire(ip, tokensMap, WindowSize*10) 108 | if err != nil { 109 | return false, 1 110 | } 111 | return false, 1 112 | } 113 | tokensForIP, _ := resources.(*sync.Map) 114 | currentTime := time.Now() 115 | tokensForIP.Store(endpoint, currentTime) 116 | resourcesCount := 0 117 | flagged := false 118 | tokensForIP.Range(func(k, v interface{}) bool { 119 | if currentTime.Sub(v.(time.Time)) > WindowSize { 120 | tokensForIP.Delete(k) 121 | return true 122 | } 123 | resourcesCount++ 124 | if !flagged && resourcesCount > MaxStringsPerIp { 125 | flagged = true 126 | } 127 | return true 128 | }) 129 | return flagged, resourcesCount 130 | } 131 | 132 | func IsStreamBlocked(claimId string, channelClaimId *string) bool { 133 | blocked, err := iapi.GetBlockedContent() 134 | if err == nil { 135 | if blocked[claimId] { 136 | return true 137 | } 138 | } 139 | 140 | if channelClaimId != nil && blocked[*channelClaimId] { 141 | return true 142 | } 143 | return false 144 | } 145 | 146 | var geoIpDbLocation = filepath.Join(os.TempDir(), "GeoLite2-ASN.mmdb") 147 | var providerDB *maxminddb.Reader 148 | 149 | func GetProviderForIP(ipStr string) (string, int, error) { 150 | ip := net.ParseIP(ipStr) 151 | if providerDB == nil { 152 | p, err := initISPGeoIPDB() 153 | if err != nil { 154 | return "", 0, err 155 | } 156 | providerDB = p 157 | } 158 | var ASN struct { 159 | AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"` 160 | AutonomousSystemNumber int `maxminddb:"autonomous_system_number"` 161 | } 162 | 163 | err := providerDB.Lookup(ip, &ASN) 164 | if err != nil { 165 | return "", 0, errors.Err(err) 166 | } 167 | return ASN.AutonomousSystemOrganization, ASN.AutonomousSystemNumber, nil 168 | } 169 | 170 | func initISPGeoIPDB() (*maxminddb.Reader, error) { 171 | key := os.Getenv("MAXMIND_KEY") 172 | if key == "" { 173 | return nil, errors.Err("MAXMIND_KEY not set") 174 | } 175 | info, err := os.Stat(geoIpDbLocation) 176 | if os.IsNotExist(err) || (err == nil && info.IsDir()) { 177 | // Get the data 178 | resp, err := http.Get("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=" + key + "&suffix=tar.gz") 179 | if err != nil { 180 | return nil, errors.Err(err) 181 | } 182 | defer resp.Body.Close() 183 | 184 | err = extractGeoIPDB(resp.Body, "GeoLite2-ASN.mmdb") 185 | if err != nil { 186 | return nil, errors.Err(err) 187 | } 188 | } else if err != nil { 189 | return nil, errors.Err(err) 190 | } 191 | 192 | providerDB, err := maxminddb.Open(geoIpDbLocation) 193 | if err != nil { 194 | return nil, errors.Err(err) 195 | } 196 | return providerDB, nil 197 | } 198 | 199 | func extractGeoIPDB(r io.Reader, dbName string) error { 200 | gzr, err := gzip.NewReader(r) 201 | if err != nil { 202 | return errors.Err(err) 203 | } 204 | defer func() { _ = gzr.Close() }() 205 | 206 | tr := tar.NewReader(gzr) 207 | 208 | for { 209 | header, err := tr.Next() 210 | if err == io.EOF { 211 | return nil 212 | } 213 | if err != nil { 214 | return errors.Err(err) 215 | } 216 | 217 | if header == nil { 218 | continue 219 | } 220 | 221 | target := filepath.Join(os.TempDir(), filepath.Base(header.Name)) 222 | 223 | switch header.Typeflag { 224 | case tar.TypeReg: 225 | if filepath.Base(header.Name) == dbName { 226 | err := extractFile(tr, target, header) 227 | if err != nil { 228 | return errors.Err(err) 229 | } 230 | } 231 | 232 | case tar.TypeDir: 233 | err := os.MkdirAll(target, os.FileMode(header.Mode)) 234 | if err != nil { 235 | return errors.Err(err) 236 | } 237 | } 238 | } 239 | } 240 | 241 | func extractFile(tr *tar.Reader, target string, header *tar.Header) error { 242 | err := os.MkdirAll(filepath.Dir(target), os.ModePerm) 243 | if err != nil { 244 | return errors.Err(err) 245 | } 246 | 247 | f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) 248 | if err != nil { 249 | return errors.Err(err) 250 | } 251 | defer f.Close() 252 | 253 | _, err = io.Copy(f, tr) 254 | if err != nil { 255 | return errors.Err(err) 256 | } 257 | err = f.Sync() 258 | if err != nil { 259 | return errors.Err(err) 260 | } 261 | return nil 262 | } 263 | -------------------------------------------------------------------------------- /firewall/firewall_test.go: -------------------------------------------------------------------------------- 1 | package firewall 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strconv" 7 | "testing" 8 | "time" 9 | 10 | "github.com/lbryio/lbry.go/v2/extras/errors" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestCheckIPAccess(t *testing.T) { 15 | ip := "192.168.0.1" 16 | endpoint := "/api/v1/example" 17 | WindowSize = 7 * time.Second 18 | // Test the first five accesses for an IP don't exceed the limit 19 | for i := 1; i <= MaxStringsPerIp; i++ { 20 | result, _ := CheckAndRateLimitIp(ip, endpoint+strconv.Itoa(i)) 21 | assert.False(t, result, "Expected result to be false, got %v for endpoint %s", result, endpoint+strconv.Itoa(i)) 22 | } 23 | 24 | // Test the sixth access for an IP exceeds the limit 25 | result, _ := CheckAndRateLimitIp(ip, endpoint+"7") 26 | assert.True(t, result, "Expected result to be true, got %v for endpoint %s", result, endpoint+"7") 27 | 28 | // Wait for the window size to elapse 29 | time.Sleep(WindowSize) 30 | 31 | // Test the access for an IP after the window size elapses doesn't exceed the limit 32 | result, _ = CheckAndRateLimitIp(ip, endpoint+"7") 33 | assert.False(t, result, "Expected result to be false, got %v for endpoint %s", result, endpoint+"7") 34 | } 35 | 36 | func Test_initISPGeoIPDB(t *testing.T) { 37 | if os.Getenv("MAXMIND_KEY") == "" { 38 | t.Skip("Skipping test because MAXMIND_KEY is not set") 39 | } 40 | ip := "1.1.1.1" 41 | ASN, nr, err := GetProviderForIP(ip) 42 | if !assert.NoError(t, err) { 43 | fmt.Println(errors.FullTrace(err)) 44 | } 45 | assert.Equal(t, "CLOUDFLARENET", ASN) 46 | assert.Equal(t, 13335, nr) 47 | } 48 | 49 | func Test_checkBannedIp(t *testing.T) { 50 | if os.Getenv("MAXMIND_KEY") == "" { 51 | t.Skip("Skipping test because MAXMIND_KEY is not set") 52 | } 53 | ip := "207.182.29.47" 54 | ReloadBlacklist() 55 | if !assert.True(t, CheckBans(ip)) { 56 | return 57 | } 58 | ip = "1.1.1.1" 59 | assert.False(t, CheckBans(ip)) 60 | 61 | } 62 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/OdyseeTeam/player-server 2 | 3 | go 1.22.0 4 | 5 | toolchain go1.23.3 6 | 7 | require ( 8 | github.com/Depado/ginprom v1.8.1 9 | github.com/OdyseeTeam/gody-cdn v1.0.8 10 | github.com/OdyseeTeam/transcoder v0.19.2 11 | github.com/Pallinder/go-randomdata v1.2.0 12 | github.com/aybabtme/iocontrol v0.0.0-20240617100617-d92f77f10b49 13 | github.com/bluele/gcache v0.0.2 14 | github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 15 | github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db 16 | github.com/gaissmai/bart v0.13.1 17 | github.com/getsentry/sentry-go v0.31.1 18 | github.com/getsentry/sentry-go/gin v0.31.1 19 | github.com/gin-contrib/pprof v1.5.0 20 | github.com/gin-gonic/gin v1.10.0 21 | github.com/golang-jwt/jwt/v4 v4.5.1 22 | github.com/lbryio/lbry.go/v2 v2.7.2-0.20230307181431-a01aa6dc0629 23 | github.com/lbryio/reflector.go v1.1.3-0.20240409180046-de736b068d75 24 | github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6 25 | github.com/oschwald/maxminddb-golang v1.13.1 26 | github.com/prometheus/client_golang v1.19.0 27 | github.com/puzpuzpuz/xsync/v3 v3.4.0 28 | github.com/sirupsen/logrus v1.9.3 29 | github.com/spf13/cobra v1.8.0 30 | github.com/stretchr/testify v1.9.0 31 | github.com/ybbus/jsonrpc v2.1.2+incompatible 32 | golang.org/x/sync v0.10.0 33 | ) 34 | 35 | require ( 36 | filippo.io/edwards25519 v1.1.0 // indirect 37 | github.com/aws/aws-sdk-go v1.51.19 // indirect 38 | github.com/benbjohnson/clock v1.3.5 // indirect 39 | github.com/beorn7/perks v1.0.1 // indirect 40 | github.com/bits-and-blooms/bitset v1.14.3 // indirect 41 | github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7 // indirect 42 | github.com/btcsuite/btcd v0.23.4 // indirect 43 | github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect 44 | github.com/btcsuite/btcutil v1.0.2 // indirect 45 | github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect 46 | github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect 47 | github.com/bytedance/sonic v1.11.6 // indirect 48 | github.com/bytedance/sonic/loader v0.1.1 // indirect 49 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 50 | github.com/cloudwego/base64x v0.1.4 // indirect 51 | github.com/cloudwego/iasm v0.2.0 // indirect 52 | github.com/davecgh/go-spew v1.1.1 // indirect 53 | github.com/fatih/structs v1.1.0 // indirect 54 | github.com/friendsofgo/errors v0.9.2 // indirect 55 | github.com/fsnotify/fsnotify v1.6.0 // indirect 56 | github.com/gabriel-vasile/mimetype v1.4.3 // indirect 57 | github.com/ghodss/yaml v1.0.0 // indirect 58 | github.com/gin-contrib/sse v0.1.0 // indirect 59 | github.com/go-errors/errors v1.5.1 // indirect 60 | github.com/go-gorp/gorp/v3 v3.1.0 // indirect 61 | github.com/go-ini/ini v1.67.0 // indirect 62 | github.com/go-playground/locales v0.14.1 // indirect 63 | github.com/go-playground/universal-translator v0.18.1 // indirect 64 | github.com/go-playground/validator/v10 v10.20.0 // indirect 65 | github.com/go-sql-driver/mysql v1.8.1 // indirect 66 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect 67 | github.com/goccy/go-json v0.10.2 // indirect 68 | github.com/gofrs/uuid v4.4.0+incompatible // indirect 69 | github.com/golang/protobuf v1.5.3 // indirect 70 | github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect 71 | github.com/gorilla/mux v1.8.1 // indirect 72 | github.com/grafov/m3u8 v0.12.0 // indirect 73 | github.com/hashicorp/hcl v1.0.0 // indirect 74 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 75 | github.com/jmespath/go-jmespath v0.4.0 // indirect 76 | github.com/json-iterator/go v1.1.12 // indirect 77 | github.com/karlseguin/ccache/v2 v2.0.8 // indirect 78 | github.com/karrick/godirwalk v1.17.0 // indirect 79 | github.com/klauspost/cpuid/v2 v2.2.7 // indirect 80 | github.com/lbryio/chainquery v1.9.1-0.20230515181855-2fcba3115cfe // indirect 81 | github.com/leodido/go-urn v1.4.0 // indirect 82 | github.com/lib/pq v1.10.9 // indirect 83 | github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 // indirect 84 | github.com/magiconair/properties v1.8.7 // indirect 85 | github.com/mattn/go-isatty v0.0.20 // indirect 86 | github.com/mitchellh/mapstructure v1.5.0 // indirect 87 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 88 | github.com/modern-go/reflect2 v1.0.2 // indirect 89 | github.com/nikooo777/lbry-blobs-downloader v1.2.2 // indirect 90 | github.com/onsi/ginkgo/v2 v2.12.1 // indirect 91 | github.com/panjf2000/ants/v2 v2.8.2 // indirect 92 | github.com/pelletier/go-toml/v2 v2.2.2 // indirect 93 | github.com/pkg/errors v0.9.1 // indirect 94 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 95 | github.com/prometheus/client_model v0.5.0 // indirect 96 | github.com/prometheus/common v0.48.0 // indirect 97 | github.com/prometheus/procfs v0.12.0 // indirect 98 | github.com/quic-go/qpack v0.4.0 // indirect 99 | github.com/quic-go/quic-go v0.42.0 // indirect 100 | github.com/rubenv/sql-migrate v1.5.2 // indirect 101 | github.com/shopspring/decimal v1.3.1 // indirect 102 | github.com/spf13/afero v1.10.0 // indirect 103 | github.com/spf13/cast v1.6.0 // indirect 104 | github.com/spf13/jwalterweatherman v1.1.0 // indirect 105 | github.com/spf13/pflag v1.0.5 // indirect 106 | github.com/spf13/viper v1.16.0 // indirect 107 | github.com/subosito/gotenv v1.6.0 // indirect 108 | github.com/tabbed/pqtype v0.1.1 // indirect 109 | github.com/tkanos/gonfig v0.0.0-20210106201359-53e13348de2f // indirect 110 | github.com/twitchyliquid64/golang-asm v0.15.1 // indirect 111 | github.com/ugorji/go/codec v1.2.12 // indirect 112 | github.com/volatiletech/inflect v0.0.1 // indirect 113 | github.com/volatiletech/null/v8 v8.1.2 // indirect 114 | github.com/volatiletech/randomize v0.0.1 // indirect 115 | github.com/volatiletech/strmangle v0.0.5 // indirect 116 | github.com/ybbus/jsonrpc/v2 v2.1.7 // indirect 117 | go.uber.org/atomic v1.11.0 // indirect 118 | go.uber.org/mock v0.4.0 // indirect 119 | go.uber.org/multierr v1.11.0 // indirect 120 | go.uber.org/zap v1.26.0 // indirect 121 | golang.org/x/arch v0.8.0 // indirect 122 | golang.org/x/crypto v0.31.0 // indirect 123 | golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect 124 | golang.org/x/mod v0.17.0 // indirect 125 | golang.org/x/net v0.33.0 // indirect 126 | golang.org/x/sys v0.28.0 // indirect 127 | golang.org/x/text v0.21.0 // indirect 128 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect 129 | golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect 130 | google.golang.org/protobuf v1.34.1 // indirect 131 | gopkg.in/ini.v1 v1.67.0 // indirect 132 | gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 // indirect 133 | gopkg.in/yaml.v2 v2.4.0 // indirect 134 | gopkg.in/yaml.v3 v3.0.1 // indirect 135 | logur.dev/logur v0.17.0 // indirect 136 | ) 137 | 138 | replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19 139 | 140 | replace github.com/floostack/transcoder => github.com/anbsky/transcoder v1.2.0 141 | 142 | //replace github.com/OdyseeTeam/gody-cdn => /home/niko/work/repositories/gody-cdn/ 143 | //replace github.com/lbryio/reflector.go => /home/niko/go/src/github.com/lbryio/reflector.go 144 | //replace github.com/lbryio/transcoder => github.com/OdyseeTeam/transcoder master 145 | -------------------------------------------------------------------------------- /init.sql: -------------------------------------------------------------------------------- 1 | use godycdn; 2 | CREATE TABLE `object` 3 | ( 4 | `id` bigint unsigned NOT NULL AUTO_INCREMENT, 5 | `hash` char(64) COLLATE utf8_unicode_ci NOT NULL, 6 | `is_stored` tinyint(1) NOT NULL DEFAULT '0', 7 | `length` bigint unsigned DEFAULT NULL, 8 | `last_accessed_at` timestamp NULL DEFAULT NULL, 9 | PRIMARY KEY (`id`), 10 | UNIQUE KEY `id` (`id`), 11 | UNIQUE KEY `hash_idx` (`hash`), 12 | KEY `last_accessed_idx` (`last_accessed_at`), 13 | KEY `is_stored_idx` (`is_stored`) 14 | ); -------------------------------------------------------------------------------- /internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "net/http" 5 | "strconv" 6 | 7 | "github.com/OdyseeTeam/player-server/firewall" 8 | "github.com/OdyseeTeam/player-server/player" 9 | 10 | "github.com/gin-gonic/gin" 11 | ) 12 | 13 | var UserName string 14 | var Password string 15 | 16 | func InstallConfigRoute(r *gin.Engine) { 17 | authorized := r.Group("/config", gin.BasicAuth(gin.Accounts{ 18 | UserName: Password, 19 | })) 20 | authorized.POST("/throttle", throttle) 21 | authorized.POST("/blacklist", reloadBlacklist) 22 | } 23 | 24 | // throttle allows for live configuration of the throttle scalar for the player (MB/s) 25 | // http://localhost:8080/config/throttle?scale=1.2 //postman to add basic auth or temporarily remove basic auth 26 | func throttle(c *gin.Context) { 27 | enabledStr := c.PostForm("enabled") 28 | if enabledStr != "" { 29 | enabled, err := strconv.ParseBool(enabledStr) 30 | if err != nil { 31 | c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "failed to parse enabled " + enabledStr}) 32 | return 33 | } else { 34 | player.ThrottleSwitch = enabled 35 | } 36 | } 37 | scaleStr := c.PostForm("scale") 38 | if scaleStr != "" { 39 | scale, err := strconv.ParseFloat(scaleStr, 64) 40 | if err != nil { 41 | c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "failed to parse scale " + scaleStr}) 42 | } else { 43 | player.ThrottleScale = scale 44 | } 45 | } 46 | } 47 | 48 | // reloadBlacklist reloads the blacklist from the file 49 | func reloadBlacklist(c *gin.Context) { 50 | firewall.ReloadBlacklist() 51 | c.String(http.StatusOK, "blacklist reloaded") 52 | } 53 | -------------------------------------------------------------------------------- /internal/iapi/api.go: -------------------------------------------------------------------------------- 1 | package iapi 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | "net/http" 7 | "time" 8 | 9 | "github.com/bluele/gcache" 10 | "github.com/lbryio/lbry.go/v2/extras/errors" 11 | ) 12 | 13 | type BlockedContent struct { 14 | ClaimID string `json:"claim_id"` 15 | //Outpoint string `json:"outpoint"` 16 | } 17 | 18 | var blockedCache = gcache.New(10).Expiration(2 * time.Minute).Build() 19 | 20 | func GetBlockedContent() (map[string]bool, error) { 21 | cachedVal, err := blockedCache.Get("blocked") 22 | if err == nil && cachedVal != nil { 23 | return cachedVal.(map[string]bool), nil 24 | } 25 | 26 | url := "https://api.odysee.com/file/list_blocked?with_claim_id=true" 27 | method := "GET" 28 | type APIResponse struct { 29 | Data []BlockedContent 30 | } 31 | 32 | client := &http.Client{} 33 | req, err := http.NewRequest(method, url, nil) 34 | 35 | if err != nil { 36 | return nil, errors.Err(err) 37 | } 38 | res, err := client.Do(req) 39 | if err != nil { 40 | return nil, errors.Err(err) 41 | } 42 | defer res.Body.Close() 43 | if res.StatusCode != 200 { 44 | return nil, errors.Err("unexpected status code %d", res.StatusCode) 45 | } 46 | body, err := io.ReadAll(res.Body) 47 | if err != nil { 48 | return nil, errors.Err(err) 49 | } 50 | var response APIResponse 51 | err = json.Unmarshal(body, &response) 52 | if err != nil { 53 | return nil, errors.Err(err) 54 | } 55 | blockedMap := make(map[string]bool, len(response.Data)) 56 | for _, bc := range response.Data { 57 | blockedMap[bc.ClaimID] = true 58 | } 59 | err = blockedCache.Set("blocked", blockedMap) 60 | if err != nil { 61 | return blockedMap, errors.Err(err) 62 | } 63 | return blockedMap, nil 64 | } 65 | -------------------------------------------------------------------------------- /internal/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/Depado/ginprom" 7 | "github.com/gin-gonic/gin" 8 | "github.com/prometheus/client_golang/prometheus" 9 | "github.com/prometheus/client_golang/prometheus/promauto" 10 | ) 11 | 12 | func InstallRoute(r *gin.Engine) { 13 | p := ginprom.New( 14 | ginprom.Engine(r), 15 | ginprom.Subsystem("gin"), 16 | ginprom.Path("/metrics"), 17 | ) 18 | r.Use(p.Instrument()) 19 | } 20 | 21 | const ( 22 | ns = "player" 23 | 24 | StreamOriginal = "original" 25 | StreamTranscoded = "transcoded" 26 | 27 | ResolveSource = "source" 28 | ResolveKind = "kind" 29 | 30 | ResolveSourceCache = "cache" 31 | ResolveSourceOApi = "oapi" 32 | ResolveFailureGeneral = "general" 33 | ResolveFailureClaimNotFound = "claim_not_found" 34 | ) 35 | 36 | var ( 37 | StreamsRunning = promauto.NewGaugeVec(prometheus.GaugeOpts{ 38 | Namespace: ns, 39 | Subsystem: "streams", 40 | Name: "running", 41 | Help: "Number of streams currently playing", 42 | }, []string{"variant"}) 43 | 44 | StreamsDelivered = promauto.NewCounterVec(prometheus.CounterOpts{ 45 | Namespace: ns, 46 | Subsystem: "streams", 47 | Name: "delivered", 48 | Help: "Total number streams delivered", 49 | }, []string{"variant"}) 50 | 51 | InBytes = promauto.NewCounter(prometheus.CounterOpts{ 52 | Namespace: ns, 53 | Name: "in_bytes", 54 | Help: "Total number of bytes downloaded", 55 | }) 56 | OutBytes = promauto.NewCounter(prometheus.CounterOpts{ 57 | Namespace: ns, 58 | Name: "out_bytes", 59 | Help: "Total number of bytes streamed out", 60 | }) 61 | TcOutBytes = promauto.NewCounter(prometheus.CounterOpts{ 62 | Namespace: ns, 63 | Name: "tc_out_bytes", 64 | Help: "Total number of bytes streamed out via transcoded content", 65 | }) 66 | HotCacheSize = promauto.NewGauge(prometheus.GaugeOpts{ 67 | Namespace: ns, 68 | Subsystem: "hotcache", 69 | Name: "items_size", 70 | Help: "Size of items in cache", 71 | }) 72 | HotCacheItems = promauto.NewGauge(prometheus.GaugeOpts{ 73 | Namespace: ns, 74 | Subsystem: "hotcache", 75 | Name: "items_count", 76 | Help: "Number of items in cache", 77 | }) 78 | HotCacheRequestCount = promauto.NewCounterVec(prometheus.CounterOpts{ 79 | Namespace: ns, 80 | Subsystem: "hotcache", 81 | Name: "request_total", 82 | Help: "Total number of blobs requested from hot cache", 83 | }, []string{"blob_type", "result"}) 84 | DecryptedCacheRequestCount = promauto.NewCounterVec(prometheus.CounterOpts{ 85 | Namespace: ns, 86 | Subsystem: "decryptedcache", 87 | Name: "request_total", 88 | Help: "Total number of objects requested from decrypted cache", 89 | }, []string{"object_type", "result"}) 90 | HotCacheEvictions = promauto.NewCounter(prometheus.CounterOpts{ 91 | Namespace: ns, 92 | Subsystem: "hotcache", 93 | Name: "evictions_total", 94 | Help: "Total number of items evicted from the cache", 95 | }) 96 | 97 | ResolveFailures = promauto.NewCounterVec(prometheus.CounterOpts{ 98 | Namespace: ns, 99 | Subsystem: "resolve", 100 | Name: "failures", 101 | Help: "Total number of failed SDK resolves", 102 | }, []string{ResolveSource, ResolveKind}) 103 | ResolveFailuresDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ 104 | Namespace: ns, 105 | Subsystem: "resolve", 106 | Name: "failures_duration", 107 | Help: "Failed resolves durations", 108 | }, []string{ResolveSource, ResolveKind}) 109 | 110 | ResolveSuccesses = promauto.NewCounterVec(prometheus.CounterOpts{ 111 | Namespace: ns, 112 | Subsystem: "resolve", 113 | Name: "successes", 114 | Help: "Total number of succeeded resolves", 115 | }, []string{ResolveSource}) 116 | ResolveSuccessesDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ 117 | Namespace: ns, 118 | Subsystem: "resolve", 119 | Name: "successes_duration", 120 | Help: "Successful resolves durations", 121 | }, []string{ResolveSource}) 122 | 123 | ResolveTimeMS = promauto.NewHistogram(prometheus.HistogramOpts{ 124 | Namespace: ns, 125 | Subsystem: "resolve", 126 | Name: "time", 127 | Help: "Resolve times", 128 | Buckets: []float64{1, 2, 5, 25, 50, 100, 250, 400, 1000}, 129 | }) 130 | 131 | playerInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{ 132 | Namespace: ns, 133 | Subsystem: "hotcache", 134 | Name: "info", 135 | Help: "Info about cache", 136 | }, []string{"max_size"}) 137 | ) 138 | 139 | func PlayerCacheInfo(cacheSize uint64) { 140 | playerInfo.With(prometheus.Labels{ 141 | "max_size": fmt.Sprintf("%d", cacheSize), 142 | }).Set(1.0) 143 | } 144 | -------------------------------------------------------------------------------- /internal/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import "fmt" 4 | 5 | var ( 6 | name = "lbrytv-player" 7 | version = "unknown" 8 | commit = "unknown" 9 | date = "unknown" 10 | ) 11 | 12 | // Name returns main application name 13 | func Name() string { 14 | return name 15 | } 16 | 17 | // Version returns current application version 18 | func Version() string { 19 | return version 20 | } 21 | 22 | // FullName returns current app version, commit and build time 23 | func FullName() string { 24 | return fmt.Sprintf("%v %v, commit %v, built at %v", Name(), Version(), commit, date) 25 | } 26 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/OdyseeTeam/player-server/cmd" 5 | ) 6 | 7 | func main() { 8 | cmd.Execute() 9 | } 10 | -------------------------------------------------------------------------------- /pkg/app/app.go: -------------------------------------------------------------------------------- 1 | package app 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "net/http" 7 | "os" 8 | "os/signal" 9 | "syscall" 10 | "time" 11 | 12 | "github.com/OdyseeTeam/player-server/pkg/logger" 13 | 14 | "github.com/lbryio/lbry.go/v2/extras/errors" 15 | reflectorHttp "github.com/lbryio/reflector.go/server/http" 16 | "github.com/lbryio/reflector.go/server/http3" 17 | "github.com/lbryio/reflector.go/server/peer" 18 | "github.com/lbryio/reflector.go/store" 19 | 20 | nice "github.com/ekyoung/gin-nice-recovery" 21 | "github.com/gin-gonic/gin" 22 | ) 23 | 24 | var ( 25 | Logger = logger.GetLogger() 26 | connContextKey = &contextKey{"conn"} 27 | ReadTimeout = uint(10) 28 | WriteTimeout = uint(15) 29 | ) 30 | 31 | type contextKey struct { 32 | key string 33 | } 34 | 35 | // App holds entities that can be used to control the web server 36 | type App struct { 37 | DefaultHeaders map[string]string 38 | Router *gin.Engine 39 | Address string 40 | BlobStore store.BlobStore 41 | 42 | stopChan chan os.Signal 43 | stopWait time.Duration 44 | server *http.Server 45 | peerServer *peer.Server 46 | http3Server *http3.Server 47 | httpServer *reflectorHttp.Server 48 | } 49 | 50 | // Opts holds basic web server settings. 51 | type Opts struct { 52 | Address string 53 | StopWaitSeconds int 54 | Listener *http.Server 55 | BlobStore store.BlobStore 56 | EdgeToken string 57 | } 58 | 59 | // New returns a new App HTTP server initialized with settings from supplied Opts. 60 | func New(opts Opts) *App { 61 | a := &App{ 62 | stopChan: make(chan os.Signal), 63 | DefaultHeaders: map[string]string{ 64 | "Access-Control-Allow-Origin": "*", 65 | "Server": "Odysee media player", 66 | }, 67 | Address: opts.Address, 68 | BlobStore: opts.BlobStore, 69 | stopWait: 15 * time.Second, 70 | } 71 | 72 | if opts.StopWaitSeconds != 0 { 73 | a.stopWait = time.Second * time.Duration(opts.StopWaitSeconds) 74 | } 75 | 76 | a.Router = a.newRouter() 77 | a.server = a.newServer() 78 | 79 | if a.BlobStore != nil { 80 | a.peerServer = peer.NewServer(a.BlobStore) 81 | a.http3Server = http3.NewServer(a.BlobStore, 200) 82 | a.httpServer = reflectorHttp.NewServer(a.BlobStore, 200, opts.EdgeToken) 83 | } 84 | 85 | return a 86 | } 87 | 88 | func (a *App) newServer() *http.Server { 89 | return &http.Server{ 90 | Addr: a.Address, 91 | Handler: a.Router, 92 | // Can't have WriteTimeout set for streaming endpoints 93 | ReadTimeout: time.Duration(ReadTimeout) * time.Second, 94 | WriteTimeout: time.Duration(WriteTimeout) * time.Second, 95 | ReadHeaderTimeout: 10 * time.Second, 96 | IdleTimeout: 10 * time.Second, 97 | ConnContext: func(ctx context.Context, c net.Conn) context.Context { 98 | return context.WithValue(ctx, connContextKey, c) 99 | }, 100 | } 101 | } 102 | 103 | func (a *App) newRouter() *gin.Engine { 104 | gin.SetMode(gin.ReleaseMode) 105 | r := gin.New() 106 | r.Use(gin.Logger()) 107 | // Install nice.Recovery, passing the handler to call after recovery 108 | r.Use(nice.Recovery(func(c *gin.Context, err interface{}) { 109 | c.JSON(500, gin.H{ 110 | "title": "Error", 111 | "err": err, 112 | }) 113 | })) 114 | 115 | r.Use(a.defaultHeadersMiddleware()) 116 | r.Use(logger.SentryHandler) 117 | return r 118 | } 119 | 120 | func (a *App) defaultHeadersMiddleware() gin.HandlerFunc { 121 | return func(c *gin.Context) { 122 | for k, v := range a.DefaultHeaders { 123 | c.Header(k, v) 124 | } 125 | c.Next() 126 | } 127 | } 128 | 129 | // Start starts a HTTP server and returns immediately. 130 | func (a *App) Start() { 131 | go func() { 132 | Logger.Infof("starting app server on %v", a.Address) 133 | if err := a.server.ListenAndServe(); err != nil { 134 | if err.Error() != "http: Server closed" { 135 | Logger.Fatal(err) 136 | } 137 | } 138 | }() 139 | 140 | if a.peerServer != nil { 141 | err := a.peerServer.Start(":5567") 142 | if err != nil && !errors.Is(err, http.ErrServerClosed) { 143 | Logger.Fatal(err) 144 | } 145 | } 146 | 147 | if a.http3Server != nil { 148 | err := a.http3Server.Start(":5568") 149 | if err != nil && !errors.Is(err, http.ErrServerClosed) { 150 | Logger.Fatal(err) 151 | } 152 | } 153 | 154 | if a.httpServer != nil { 155 | err := a.httpServer.Start(":5569") 156 | if err != nil && !errors.Is(err, http.ErrServerClosed) { 157 | Logger.Fatal(err) 158 | } 159 | } 160 | } 161 | 162 | // ServeUntilShutdown blocks until a shutdown signal is received, then shuts down the HTTP server. 163 | func (a *App) ServeUntilShutdown() { 164 | signal.Notify(a.stopChan, os.Interrupt, syscall.SIGTERM, syscall.SIGINT) 165 | sig := <-a.stopChan 166 | 167 | Logger.Printf("caught a signal (%v), shutting down http server...", sig) 168 | 169 | err := a.Shutdown() 170 | if err != nil { 171 | Logger.Error("error shutting down http server: ", err) 172 | } else { 173 | Logger.Info("http server shut down") 174 | } 175 | 176 | if a.peerServer != nil { 177 | a.peerServer.Shutdown() 178 | } 179 | 180 | if a.http3Server != nil { 181 | a.http3Server.Shutdown() 182 | } 183 | 184 | if a.httpServer != nil { 185 | a.httpServer.Shutdown() 186 | } 187 | } 188 | 189 | // Shutdown gracefully shuts down the HTTP server. 190 | func (a *App) Shutdown() error { 191 | ctx, cancel := context.WithTimeout(context.Background(), a.stopWait) 192 | defer cancel() 193 | err := a.server.Shutdown(ctx) 194 | return err 195 | } 196 | 197 | // GetConnection returns the connection for the request. 198 | func GetConnection(r *http.Request) (net.Conn, error) { 199 | c, ok := r.Context().Value(connContextKey).(net.Conn) 200 | if !ok { 201 | return nil, errors.Err("cannot retrieve connection from request") 202 | } 203 | return c, nil 204 | } 205 | -------------------------------------------------------------------------------- /pkg/app/app_test.go: -------------------------------------------------------------------------------- 1 | package app 2 | 3 | import ( 4 | "net/http" 5 | "testing" 6 | 7 | "github.com/OdyseeTeam/player-server/pkg/logger" 8 | 9 | "github.com/getsentry/sentry-go" 10 | "github.com/gin-gonic/gin" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestSentryMiddleware(t *testing.T) { 16 | logger.ConfigureSentry("test", logger.EnvTest) 17 | a := New(Opts{ 18 | Address: ":4999", 19 | }) 20 | 21 | a.Router.GET("/error", func(c *gin.Context) { 22 | if hub := sentry.GetHubFromContext(c.Request.Context()); hub != nil { 23 | hub.Scope().SetExtra("errored", "completely") 24 | } 25 | panic("y tho") 26 | }) 27 | a.Start() 28 | 29 | resp, err := http.Get("http://localhost:4999/error") 30 | require.NoError(t, err) 31 | require.Equal(t, http.StatusInternalServerError, resp.StatusCode) 32 | require.NotNil(t, logger.TestSentryTransport.LastEvent) 33 | assert.Equal(t, "y tho", logger.TestSentryTransport.LastEvent.Message) 34 | //assert.Equal(t, "completely", logger.TestSentryTransport.LastEvent.Extra["errored"]) //TODO: idk why I can't get this last thing to work 35 | 36 | assert.NoError(t, a.Shutdown()) 37 | } 38 | -------------------------------------------------------------------------------- /pkg/logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | const ( 10 | EnvTest = "test" 11 | EnvProd = "prod" 12 | ) 13 | 14 | var ( 15 | JsonFormatter = &logrus.JSONFormatter{DisableTimestamp: true} 16 | TextFormatter = &logrus.TextFormatter{FullTimestamp: true, TimestampFormat: "15:04:05"} 17 | level = logrus.InfoLevel 18 | formatter = TextFormatter 19 | loggers []*logrus.Logger 20 | ) 21 | 22 | func ConfigureDefaults(logLevel logrus.Level) { 23 | level = logLevel 24 | for _, logger := range loggers { 25 | logger.SetLevel(level) 26 | } 27 | } 28 | 29 | func GetLogger() *logrus.Logger { 30 | logger := logrus.New() 31 | logger.SetLevel(level) 32 | logger.SetFormatter(JsonFormatter) 33 | loggers = append(loggers, logger) 34 | return logger 35 | } 36 | 37 | // DisableLogger turns off logging output for this module logger 38 | func DisableLogger(l *logrus.Logger) { 39 | l.SetLevel(logrus.PanicLevel) 40 | l.SetOutput(io.Discard) 41 | } 42 | -------------------------------------------------------------------------------- /pkg/logger/sentry.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "net/http" 5 | "os" 6 | "time" 7 | 8 | "github.com/getsentry/sentry-go" 9 | sentrygin "github.com/getsentry/sentry-go/gin" 10 | ) 11 | 12 | var IgnoredExceptions = []string{} 13 | var SentryHandler = sentrygin.New(sentrygin.Options{ 14 | Repanic: true, 15 | WaitForDelivery: true, 16 | }) 17 | 18 | func ConfigureSentry(release, env string) { 19 | l := GetLogger() 20 | dsn := os.Getenv("SENTRY_DSN") 21 | playerName := os.Getenv("PLAYER_NAME") 22 | opts := sentry.ClientOptions{ 23 | Dsn: dsn, 24 | Release: release, 25 | AttachStacktrace: true, 26 | BeforeSend: filterEvent, 27 | ServerName: playerName, 28 | } 29 | 30 | if env == EnvTest { 31 | opts.Transport = TestSentryTransport 32 | } else if dsn == "" { 33 | l.Info("sentry disabled") 34 | return 35 | } 36 | 37 | err := sentry.Init(opts) 38 | if err != nil { 39 | l.Fatalf("sentry initialization failed: %v", err) 40 | } else { 41 | l.Info("sentry initialized") 42 | } 43 | } 44 | 45 | // SendToSentry sends an error to Sentry with optional extra details. 46 | func SendToSentry(err error, request *http.Request, details ...string) *sentry.EventID { 47 | extra := map[string]string{} 48 | var eventID *sentry.EventID 49 | for i := 0; i < len(details); i += 2 { 50 | if i+1 > len(details)-1 { 51 | break 52 | } 53 | extra[details[i]] = details[i+1] 54 | } 55 | 56 | sentry.WithScope(func(scope *sentry.Scope) { 57 | for k, v := range extra { 58 | scope.SetExtra(k, v) 59 | } 60 | scope.SetRequest(request) 61 | sentry.CaptureException(err) 62 | }) 63 | return eventID 64 | } 65 | 66 | func filterEvent(event *sentry.Event, hint *sentry.EventHint) *sentry.Event { 67 | for _, exc := range event.Exception { 68 | for _, ignored := range IgnoredExceptions { 69 | if exc.Value == ignored { 70 | return nil 71 | } 72 | } 73 | } 74 | return event 75 | } 76 | 77 | func Flush() { 78 | sentry.Flush(2 * time.Second) 79 | sentry.Recover() 80 | } 81 | -------------------------------------------------------------------------------- /pkg/logger/sentry_test.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "testing" 7 | 8 | "github.com/getsentry/sentry-go" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestSentryMiddleware(t *testing.T) { 14 | var event *sentry.Event 15 | ConfigureSentry("test", EnvTest) 16 | request, err := http.NewRequest(http.MethodGet, "http://player.tv/v6/stream", nil) 17 | require.NoError(t, err) 18 | 19 | SendToSentry(errors.New("general error"), request) 20 | event = TestSentryTransport.LastEvent 21 | require.NotNil(t, event) 22 | assert.Equal(t, "general error", event.Exception[0].Value) 23 | assert.Equal(t, "http://player.tv/v6/stream", event.Request.URL) 24 | assert.Empty(t, event.Extra) 25 | 26 | SendToSentry(errors.New("general error"), request, "user", "someUser") 27 | event = TestSentryTransport.LastEvent 28 | require.NotNil(t, event) 29 | assert.Equal(t, "general error", event.Exception[0].Value) 30 | assert.Equal(t, "http://player.tv/v6/stream", event.Request.URL) 31 | assert.Equal(t, map[string]any{"user": "someUser"}, event.Extra) 32 | 33 | SendToSentry(errors.New("general error"), request, "user", "someUser", "dangling param") 34 | event = TestSentryTransport.LastEvent 35 | require.NotNil(t, event) 36 | assert.Equal(t, "general error", event.Exception[0].Value) 37 | assert.Equal(t, "http://player.tv/v6/stream", event.Request.URL) 38 | assert.Equal(t, map[string]any{"user": "someUser"}, event.Extra) 39 | } 40 | -------------------------------------------------------------------------------- /pkg/logger/testing.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/getsentry/sentry-go" 7 | ) 8 | 9 | var TestSentryTransport = &TestTransport{} 10 | 11 | type TestTransport struct { 12 | LastEvent *sentry.Event 13 | } 14 | 15 | func (t *TestTransport) Flush(timeout time.Duration) bool { 16 | return true 17 | } 18 | 19 | func (t *TestTransport) Configure(options sentry.ClientOptions) { 20 | } 21 | 22 | func (t *TestTransport) SendEvent(event *sentry.Event) { 23 | t.LastEvent = event 24 | } 25 | 26 | func (t *TestTransport) Close() { 27 | } 28 | -------------------------------------------------------------------------------- /pkg/mime/mime.go: -------------------------------------------------------------------------------- 1 | package mime 2 | 3 | func GetExtensionByType(t string) string { 4 | return extensions[t] 5 | } 6 | 7 | var extensions = map[string]string{ 8 | "application/andrew-inset": "ez", 9 | "application/annodex": "anx", 10 | 11 | "application/atom+xml": "atom", 12 | "application/atomcat+xml": "atomcat", 13 | 14 | "application/atomserv+xml": "atomsrv", 15 | 16 | "application/bbolin": "lin", 17 | 18 | "application/cu-seeme": "cu", 19 | 20 | "application/davmount+xml": "davmount", 21 | 22 | "application/dicom": "dcm", 23 | "application/dsptype": "tsp", 24 | 25 | "application/ecmascript": "es", 26 | "application/font-sfnt": "otf", 27 | "application/font-tdpfr": "pfr", 28 | "application/font-woff": "woff", 29 | "application/futuresplash": "spl", 30 | 31 | "application/gzip": "gz", 32 | "application/hta": "hta", 33 | "application/java-archive": "jar", 34 | "application/java-serialized-object": "ser", 35 | "application/java-vm": "class", 36 | "application/javascript": "js", 37 | "application/json": "json", 38 | "application/m3g": "m3g", 39 | "application/mac-binhex40": "hqx", 40 | "application/mac-compactpro": "cpt", 41 | "application/mathematica": "nb", 42 | "application/mbox": "mbox", 43 | "application/msaccess": "mdb", 44 | "application/msword": "doc", 45 | "application/mxf": "mxf", 46 | "application/octet-stream": "bin", 47 | "application/oda": "oda", 48 | "application/oebps-package+xml": "opf", 49 | "application/ogg": "ogx", 50 | "application/onenote": "one", 51 | "application/pdf": "pdf", 52 | "application/pgp-encrypted": "pgp", 53 | "application/pgp-keys": "key", 54 | "application/pgp-signature": "sig", 55 | "application/pics-rules": "prf", 56 | "application/postscript": "ps", 57 | "application/rar": "rar", 58 | "application/rdf+xml": "rdf", 59 | "application/rtf": "rtf", 60 | "application/sla": "stl", 61 | "application/smil+xml": "smi", 62 | "application/xhtml+xml": "xhtml", 63 | "application/xml": "xml", 64 | "application/xslt+xml": "xsl", 65 | "application/xspf+xml": "xspf", 66 | "application/zip": "zip", 67 | "application/vnd.android.package-archive": "apk", 68 | "application/vnd.cinderella": "cdy", 69 | "application/vnd.debian.binary-package": "deb", 70 | "application/vnd.font-fontforge-sfd": "sfd", 71 | "application/vnd.google-earth.kml+xml": "kml", 72 | "application/vnd.google-earth.kmz": "kmz", 73 | "application/vnd.mozilla.xul+xml": "xul", 74 | 75 | "application/vnd.ms-excel": "xls", 76 | "application/vnd.ms-excel.addin.macroEnabled.12": "xlam", 77 | "application/vnd.ms-excel.sheet.binary.macroEnabled.12": "xlsb", 78 | "application/vnd.ms-excel.sheet.macroEnabled.12": "xlsm", 79 | "application/vnd.ms-excel.template.macroEnabled.12": "xltm", 80 | "application/vnd.ms-fontobject": "eot", 81 | 82 | "application/vnd.ms-officetheme": "thmx", 83 | "application/vnd.ms-pki.seccat": "cat", 84 | "application/vnd.ms-pki.stl": "stl", 85 | "application/vnd.ms-powerpoint": "ppt", 86 | "application/vnd.ms-powerpoint.addin.macroEnabled.12": "ppam", 87 | "application/vnd.ms-powerpoint.presentation.macroEnabled.12": "pptm", 88 | "application/vnd.ms-powerpoint.slide.macroEnabled.12": "sldm", 89 | "application/vnd.ms-powerpoint.slideshow.macroEnabled.12": "ppsm", 90 | "application/vnd.ms-powerpoint.template.macroEnabled.12": "potm", 91 | "application/vnd.ms-word.document.macroEnabled.12": "docm", 92 | "application/vnd.ms-word.template.macroEnabled.12": "dotm", 93 | "application/vnd.oasis.opendocument.chart": "odc", 94 | "application/vnd.oasis.opendocument.database": "odb", 95 | "application/vnd.oasis.opendocument.formula": "odf", 96 | "application/vnd.oasis.opendocument.graphics": "odg", 97 | "application/vnd.oasis.opendocument.graphics-template": "otg", 98 | "application/vnd.oasis.opendocument.image": "odi", 99 | "application/vnd.oasis.opendocument.presentation": "odp", 100 | "application/vnd.oasis.opendocument.presentation-template": "otp", 101 | "application/vnd.oasis.opendocument.spreadsheet": "ods", 102 | "application/vnd.oasis.opendocument.spreadsheet-template": "ots", 103 | "application/vnd.oasis.opendocument.text": "odt", 104 | "application/vnd.oasis.opendocument.text-master": "odm", 105 | "application/vnd.oasis.opendocument.text-template": "ott", 106 | "application/vnd.oasis.opendocument.text-web": "oth", 107 | "application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx", 108 | "application/vnd.openxmlformats-officedocument.presentationml.slide": "sldx", 109 | "application/vnd.openxmlformats-officedocument.presentationml.slideshow": "ppsx", 110 | "application/vnd.openxmlformats-officedocument.presentationml.template": "potx", 111 | "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx", 112 | "application/vnd.openxmlformats-officedocument.spreadsheetml.template": "xltx", 113 | "application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx", 114 | "application/vnd.openxmlformats-officedocument.wordprocessingml.template": "dotx", 115 | 116 | "application/vnd.rim.cod": "cod", 117 | "application/vnd.smaf": "mmf", 118 | "application/vnd.stardivision.calc": "sdc", 119 | "application/vnd.stardivision.chart": "sds", 120 | "application/vnd.stardivision.draw": "sda", 121 | "application/vnd.stardivision.impress": "sdd", 122 | "application/vnd.stardivision.math": "sdf", 123 | "application/vnd.stardivision.writer": "sdw", 124 | "application/vnd.stardivision.writer-global": "sgl", 125 | 126 | "application/vnd.sun.xml.calc": "sxc", 127 | "application/vnd.sun.xml.calc.template": "stc", 128 | "application/vnd.sun.xml.draw": "sxd", 129 | "application/vnd.sun.xml.draw.template": "std", 130 | "application/vnd.sun.xml.impress": "sxi", 131 | "application/vnd.sun.xml.impress.template": "sti", 132 | "application/vnd.sun.xml.math": "sxm", 133 | "application/vnd.sun.xml.writer": "sxw", 134 | "application/vnd.sun.xml.writer.global": "sxg", 135 | "application/vnd.sun.xml.writer.template": "stw", 136 | "application/vnd.symbian.install": "sis", 137 | "application/vnd.tcpdump.pcap": "cap", 138 | "application/vnd.visio": "vsd", 139 | "application/vnd.wap.wbxml": "wbxml", 140 | "application/vnd.wap.wmlc": "wmlc", 141 | "application/vnd.wap.wmlscriptc": "wmlsc", 142 | 143 | "application/vnd.wordperfect": "wpd", 144 | "application/vnd.wordperfect5.1": "wp5", 145 | "application/x-123": "wk", 146 | "application/x-7z-compressed": "7z", 147 | "application/x-abiword": "abw", 148 | "application/x-apple-diskimage": "dmg", 149 | "application/x-bcpio": "bcpio", 150 | "application/x-bittorrent": "torrent", 151 | "application/x-cab": "cab", 152 | "application/x-cbr": "cbr", 153 | "application/x-cbz": "cbz", 154 | "application/x-cdf": "cdf", 155 | "application/x-cdlink": "vcd", 156 | "application/x-chess-pgn": "pgn", 157 | "application/x-comsol": "mph", 158 | 159 | "application/x-cpio": "cpio", 160 | "application/x-csh": "csh", 161 | "application/x-debian-package": "deb", 162 | "application/x-director": "dcr", 163 | "application/x-dms": "dms", 164 | "application/x-doom": "wad", 165 | "application/x-dvi": "dvi", 166 | 167 | "application/x-font": "pfa", 168 | "application/x-font-pcf": "pcf", 169 | "application/x-freemind": "mm", 170 | "application/x-futuresplash": "spl", 171 | "application/x-ganttproject": "gan", 172 | "application/x-gnumeric": "gnumeric", 173 | "application/x-go-sgf": "sgf", 174 | "application/x-graphing-calculator": "gcf", 175 | "application/x-gtar": "gtar", 176 | "application/x-gtar-compressed": "tgz", 177 | "application/x-hdf": "hdf", 178 | "application/x-httpd-eruby": "rhtml", 179 | "application/x-httpd-php": "phtml", 180 | "application/x-httpd-php-source": "phps", 181 | "application/x-httpd-php3": "php3", 182 | "application/x-httpd-php3-preprocessed": "php3p", 183 | "application/x-httpd-php4": "php4", 184 | "application/x-httpd-php5": "php5", 185 | "application/x-hwp": "hwp", 186 | "application/x-ica": "ica", 187 | "application/x-info": "info", 188 | "application/x-internet-signup": "ins", 189 | "application/x-iphone": "iii", 190 | "application/x-iso9660-image": "iso", 191 | "application/x-jam": "jam", 192 | "application/x-java-jnlp-file": "jnlp", 193 | "application/x-jmol": "jmz", 194 | "application/x-kchart": "chrt", 195 | 196 | "application/x-killustrator": "kil", 197 | "application/x-koan": "skp", 198 | "application/x-kpresenter": "kpr", 199 | "application/x-kspread": "ksp", 200 | "application/x-kword": "kwd", 201 | "application/x-latex": "latex", 202 | "application/x-lha": "lha", 203 | "application/x-lyx": "lyx", 204 | "application/x-lzh": "lzh", 205 | "application/x-lzx": "lzx", 206 | "application/x-maker": "frm", 207 | "application/x-mif": "mif", 208 | "application/x-mpegURL": "m3u8", 209 | "application/x-ms-application": "application", 210 | "application/x-ms-manifest": "manifest", 211 | "application/x-ms-wmd": "wmd", 212 | "application/x-ms-wmz": "wmz", 213 | "application/x-msdos-program": "com", 214 | "application/x-msi": "msi", 215 | "application/x-netcdf": "nc", 216 | "application/x-ns-proxy-autoconfig": "pac", 217 | "application/x-nwc": "nwc", 218 | "application/x-object": "o", 219 | "application/x-oz-application": "oza", 220 | "application/x-pkcs7-certreqresp": "p7r", 221 | "application/x-pkcs7-crl": "crl", 222 | "application/x-python-code": "pyc", 223 | "application/x-qgis": "qgs", 224 | "application/x-quicktimeplayer": "qtl", 225 | "application/x-rdp": "rdp", 226 | "application/x-redhat-package-manager": "rpm", 227 | "application/x-rss+xml": "rss", 228 | "application/x-ruby": "rb", 229 | "application/x-scilab": "sci", 230 | "application/x-scilab-xcos": "xcos", 231 | "application/x-sh": "sh", 232 | "application/x-shar": "shar", 233 | "application/x-shockwave-flash": "swf", 234 | "application/x-silverlight": "scr", 235 | "application/x-sql": "sql", 236 | "application/x-stuffit": "sit", 237 | "application/x-sv4cpio": "sv4cpio", 238 | "application/x-sv4crc": "sv4crc", 239 | "application/x-tar": "tar", 240 | "application/x-tcl": "tcl", 241 | "application/x-tex-gf": "gf", 242 | "application/x-tex-pk": "pk", 243 | "application/x-texinfo": "texinfo", 244 | "application/x-trash": "old", 245 | "application/x-troff": "t", 246 | "application/x-troff-man": "man", 247 | "application/x-troff-me": "me", 248 | "application/x-troff-ms": "ms", 249 | "application/x-ustar": "ustar", 250 | "application/x-wais-source": "src", 251 | "application/x-wingz": "wz", 252 | "application/x-x509-ca-cert": "crt", 253 | "application/x-xcf": "xcf", 254 | "application/x-xfig": "fig", 255 | "application/x-xpinstall": "xpi", 256 | "application/x-xz": "xz", 257 | "audio/amr": "amr", 258 | "audio/amr-wb": "awb", 259 | "audio/annodex": "axa", 260 | "audio/basic": "au", 261 | "audio/csound": "csd", 262 | "audio/flac": "flac", 263 | "audio/midi": "mid", 264 | "audio/mpeg": "mp3", 265 | "audio/mpegurl": "m3u", 266 | "audio/ogg": "oga", 267 | "audio/prs.sid": "sid", 268 | "audio/x-aiff": "aif", 269 | "audio/x-gsm": "gsm", 270 | "audio/x-mpegurl": "m3u", 271 | "audio/x-ms-wma": "wma", 272 | "audio/x-ms-wax": "wax", 273 | "audio/x-pn-realaudio": "ra", 274 | "audio/x-realaudio": "ra", 275 | "audio/x-scpls": "pls", 276 | "audio/x-sd2": "sd2", 277 | "audio/x-wav": "wav", 278 | "chemical/x-alchemy": "alc", 279 | "chemical/x-cache": "cac", 280 | "chemical/x-cache-csf": "csf", 281 | "chemical/x-cactvs-binary": "cbin", 282 | "chemical/x-cdx": "cdx", 283 | "chemical/x-cerius": "cer", 284 | "chemical/x-chem3d": "c3d", 285 | "chemical/x-chemdraw": "chm", 286 | "chemical/x-cif": "cif", 287 | "chemical/x-cmdf": "cmdf", 288 | "chemical/x-cml": "cml", 289 | "chemical/x-compass": "cpa", 290 | "chemical/x-crossfire": "bsd", 291 | "chemical/x-csml": "csml", 292 | "chemical/x-ctx": "ctx", 293 | "chemical/x-cxf": "cxf", 294 | "chemical/x-daylight-smiles": "smi", 295 | "chemical/x-embl-dl-nucleotide": "emb", 296 | "chemical/x-galactic-spc": "spc", 297 | "chemical/x-gamess-input": "inp", 298 | "chemical/x-gaussian-checkpoint": "fch", 299 | "chemical/x-gaussian-cube": "cub", 300 | "chemical/x-gaussian-input": "gau", 301 | "chemical/x-gaussian-log": "gal", 302 | "chemical/x-gcg8-sequence": "gcg", 303 | "chemical/x-genbank": "gen", 304 | "chemical/x-hin": "hin", 305 | "chemical/x-isostar": "istr", 306 | "chemical/x-jcamp-dx": "jdx", 307 | "chemical/x-kinemage": "kin", 308 | "chemical/x-macmolecule": "mcm", 309 | "chemical/x-macromodel-input": "mmd", 310 | "chemical/x-mdl-molfile": "mol", 311 | "chemical/x-mdl-rdfile": "rd", 312 | "chemical/x-mdl-rxnfile": "rxn", 313 | "chemical/x-mdl-sdfile": "sd", 314 | "chemical/x-mdl-tgf": "tgf", 315 | "chemical/x-mif": "mif", 316 | "chemical/x-mmcif": "mcif", 317 | "chemical/x-mol2": "mol2", 318 | "chemical/x-molconn-Z": "b", 319 | "chemical/x-mopac-graph": "gpt", 320 | "chemical/x-mopac-input": "mop", 321 | "chemical/x-mopac-out": "moo", 322 | "chemical/x-mopac-vib": "mvb", 323 | "chemical/x-ncbi-asn1": "asn", 324 | "chemical/x-ncbi-asn1-ascii": "prt", 325 | "chemical/x-ncbi-asn1-binary": "val", 326 | "chemical/x-ncbi-asn1-spec": "asn", 327 | "chemical/x-pdb": "pdb", 328 | "chemical/x-rosdal": "ros", 329 | "chemical/x-swissprot": "sw", 330 | "chemical/x-vamas-iso14976": "vms", 331 | "chemical/x-vmd": "vmd", 332 | "chemical/x-xtel": "xtel", 333 | "chemical/x-xyz": "xyz", 334 | "image/gif": "gif", 335 | "image/ief": "ief", 336 | "image/jp2": "jp2", 337 | "image/jpeg": "jpeg", 338 | "image/jpm": "jpm", 339 | "image/jpx": "jpx", 340 | "image/pcx": "pcx", 341 | "image/png": "png", 342 | "image/svg+xml": "svg", 343 | "image/tiff": "tiff", 344 | "image/vnd.djvu": "djvu", 345 | "image/vnd.microsoft.icon": "ico", 346 | "image/vnd.wap.wbmp": "wbmp", 347 | "image/x-canon-cr2": "cr2", 348 | "image/x-canon-crw": "crw", 349 | "image/x-cmu-raster": "ras", 350 | "image/x-coreldraw": "cdr", 351 | "image/x-coreldrawpattern": "pat", 352 | "image/x-coreldrawtemplate": "cdt", 353 | "image/x-corelphotopaint": "cpt", 354 | "image/x-epson-erf": "erf", 355 | "image/x-jg": "art", 356 | "image/x-jng": "jng", 357 | "image/x-ms-bmp": "bmp", 358 | "image/x-nikon-nef": "nef", 359 | "image/x-olympus-orf": "orf", 360 | "image/x-photoshop": "psd", 361 | "image/x-portable-anymap": "pnm", 362 | "image/x-portable-bitmap": "pbm", 363 | "image/x-portable-graymap": "pgm", 364 | "image/x-portable-pixmap": "ppm", 365 | "image/x-rgb": "rgb", 366 | "image/x-xbitmap": "xbm", 367 | "image/x-xpixmap": "xpm", 368 | "image/x-xwindowdump": "xwd", 369 | "message/rfc822": "eml", 370 | "model/iges": "igs", 371 | "model/mesh": "msh", 372 | "model/vrml": "wrl", 373 | "model/x3d+vrml": "x3dv", 374 | "model/x3d+xml": "x3d", 375 | "model/x3d+binary": "x3db", 376 | "text/cache-manifest": "appcache", 377 | "text/calendar": "ics", 378 | "text/css": "css", 379 | "text/csv": "csv", 380 | "text/h323": "323", 381 | "text/html": "html", 382 | "text/iuls": "uls", 383 | "text/mathml": "mml", 384 | "text/markdown": "md", 385 | "text/plain": "asc", 386 | "text/richtext": "rtx", 387 | "text/scriptlet": "sct", 388 | "text/texmacs": "tm", 389 | "text/tab-separated-values": "tsv", 390 | "text/turtle": "ttl", 391 | "text/vcard": "vcf", 392 | "text/vnd.sun.j2me.app-descriptor": "jad", 393 | "text/vnd.wap.wml": "wml", 394 | "text/vnd.wap.wmlscript": "wmls", 395 | "text/x-bibtex": "bib", 396 | "text/x-boo": "boo", 397 | "text/x-c++hdr": "hpp", 398 | "text/x-c++src": "cpp", 399 | "text/x-chdr": "h", 400 | "text/x-component": "htc", 401 | "text/x-csh": "csh", 402 | "text/x-csrc": "c", 403 | "text/x-dsrc": "d", 404 | "text/x-diff": "diff", 405 | "text/x-haskell": "hs", 406 | "text/x-java": "java", 407 | "text/x-lilypond": "ly", 408 | "text/x-literate-haskell": "lhs", 409 | "text/x-moc": "moc", 410 | "text/x-pascal": "p", 411 | "text/x-pcs-gcd": "gcd", 412 | "text/x-perl": "pl", 413 | "text/x-python": "py", 414 | "text/x-scala": "scala", 415 | "text/x-setext": "etx", 416 | "text/x-sfv": "sfv", 417 | "text/x-sh": "sh", 418 | "text/x-tcl": "tcl", 419 | "text/x-tex": "tex", 420 | "text/x-vcalendar": "vcs", 421 | "video/3gpp": "3gp", 422 | "video/annodex": "axv", 423 | "video/dl": "dl", 424 | "video/dv": "dif", 425 | "video/fli": "fli", 426 | "video/gl": "gl", 427 | "video/mpeg": "mpeg", 428 | "video/MP2T": "ts", 429 | "video/mp4": "mp4", 430 | "video/quicktime": "qt", 431 | "video/ogg": "ogv", 432 | "video/webm": "webm", 433 | "video/vnd.mpegurl": "mxu", 434 | "video/x-flv": "flv", 435 | "video/x-la-asf": "lsf", 436 | "video/x-mng": "mng", 437 | "video/x-ms-asf": "asf", 438 | "video/x-ms-wm": "wm", 439 | "video/x-ms-wmv": "wmv", 440 | "video/x-ms-wmx": "wmx", 441 | "video/x-ms-wvx": "wvx", 442 | "video/x-msvideo": "avi", 443 | "video/x-sgi-movie": "movie", 444 | "video/x-matroska": "mpv", 445 | "x-conference/x-cooltalk": "ice", 446 | "x-epoc/x-sisx-app": "sisx", 447 | "x-world/x-vrml": "vrml", 448 | } 449 | -------------------------------------------------------------------------------- /pkg/paid/handlers.go: -------------------------------------------------------------------------------- 1 | package paid 2 | 3 | import ( 4 | "net/http" 5 | ) 6 | 7 | // HandlePublicKeyRequest delivers marshaled pubkey to remote agents for media token verification 8 | func HandlePublicKeyRequest(w http.ResponseWriter, r *http.Request) { 9 | w.Write(km.PublicKeyBytes()) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/paid/handlers_test.go: -------------------------------------------------------------------------------- 1 | package paid 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestHandlePublicKeyRequest(t *testing.T) { 14 | r, err := http.NewRequest(http.MethodGet, "", nil) 15 | require.NoError(t, err) 16 | 17 | rr := httptest.NewRecorder() 18 | HandlePublicKeyRequest(rr, r) 19 | 20 | response := rr.Result() 21 | key, err := io.ReadAll(response.Body) 22 | require.NoError(t, err) 23 | assert.Equal(t, http.StatusOK, response.StatusCode) 24 | assert.NotZero(t, key) 25 | assert.Equal(t, km.PublicKeyBytes(), key) 26 | } 27 | -------------------------------------------------------------------------------- /pkg/paid/paid.go: -------------------------------------------------------------------------------- 1 | package paid 2 | 3 | // Key files should be generated as follows: 4 | // $ openssl genrsa -traditional -out privateKey.pem 2048 5 | 6 | import ( 7 | "crypto/rand" 8 | "crypto/rsa" 9 | "crypto/x509" 10 | "encoding/pem" 11 | "errors" 12 | "fmt" 13 | "time" 14 | 15 | "github.com/OdyseeTeam/player-server/pkg/logger" 16 | 17 | "github.com/golang-jwt/jwt/v4" 18 | ) 19 | 20 | var Logger = logger.GetLogger() 21 | 22 | // Expfunc is a function type intended for CreateToken. 23 | // Should take stream size in bytes and return validity as Unix time 24 | type Expfunc func(uint64) int64 25 | 26 | // StreamToken contains stream ID and transaction id of a stream that has been purchased. 27 | // StreamID can be any string uniquely identifying a stream, has to be consistent 28 | // for both API server and player server 29 | type StreamToken struct { 30 | StreamID string `json:"sid"` 31 | TxID string `json:"txid"` 32 | jwt.StandardClaims 33 | } 34 | 35 | type keyManager struct { 36 | privKey *rsa.PrivateKey 37 | pubKeyMarshaled []byte 38 | } 39 | 40 | var km *keyManager 41 | 42 | // InitPrivateKey loads a private key from `[]bytes` for later token signing and derived pubkey distribution. 43 | func InitPrivateKey(rawKey []byte) error { 44 | km = &keyManager{} 45 | err := km.loadFromBytes(rawKey) 46 | if err != nil { 47 | return err 48 | } 49 | return nil 50 | } 51 | 52 | // CreateToken takes stream ID, purchase transaction id and stream size to generate a JWS. 53 | // In addition it accepts expiry function that takes streamSize and returns token expiry date as Unix time 54 | func CreateToken(streamID string, txid string, streamSize uint64, expfunc Expfunc) (string, error) { 55 | return km.createToken(streamID, txid, streamSize, expfunc) 56 | } 57 | 58 | // GeneratePrivateKey generates an in-memory private key 59 | func GeneratePrivateKey() error { 60 | privKey, err := rsa.GenerateKey(rand.Reader, 2048) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | k := &keyManager{privKey: privKey} 66 | k.pubKeyMarshaled, err = k.marshalPublicKey() 67 | if err != nil { 68 | return err 69 | } 70 | Logger.Infof("generated an in-memory private key") 71 | 72 | km = k 73 | err = InitPubKey(k.PublicKeyBytes()) 74 | if err != nil { 75 | return err 76 | } 77 | return nil 78 | } 79 | 80 | func (k *keyManager) createToken(streamID string, txid string, streamSize uint64, expfunc Expfunc) (string, error) { 81 | if k.privKey == nil { 82 | return "", fmt.Errorf("cannot create a token, private key is not initialized (call InitPrivateKey)") 83 | } 84 | token := jwt.NewWithClaims(jwt.SigningMethodRS256, &StreamToken{ 85 | streamID, 86 | txid, 87 | jwt.StandardClaims{ 88 | ExpiresAt: expfunc(streamSize), 89 | IssuedAt: time.Now().UTC().Unix(), 90 | }, 91 | }) 92 | Logger.Debugf("created a token %v / %v", token.Header, token.Claims) 93 | return token.SignedString(k.privKey) 94 | } 95 | 96 | func (k *keyManager) loadFromBytes(b []byte) error { 97 | block, _ := pem.Decode(b) 98 | if block == nil { 99 | return errors.New("no PEM blob found") 100 | } 101 | key, err := x509.ParsePKCS1PrivateKey(block.Bytes) 102 | if err != nil { 103 | return err 104 | } 105 | k.privKey = key 106 | Logger.Infof("loaded a private RSA key (%v bytes)", k.privKey.Size()) 107 | 108 | k.pubKeyMarshaled, err = k.marshalPublicKey() 109 | if err != nil { 110 | return err 111 | } 112 | 113 | return nil 114 | } 115 | 116 | func (k *keyManager) PublicKeyBytes() []byte { 117 | return k.pubKeyMarshaled 118 | } 119 | 120 | func (k *keyManager) PublicKeyManager() *pubKeyManager { 121 | return &pubKeyManager{key: &k.privKey.PublicKey} 122 | } 123 | 124 | func (k *keyManager) marshalPublicKey() ([]byte, error) { 125 | pubKey, err := x509.MarshalPKIXPublicKey(&k.privKey.PublicKey) 126 | if err != nil { 127 | return nil, err 128 | } 129 | 130 | pubBytes := pem.EncodeToMemory(&pem.Block{ 131 | Type: "RSA PUBLIC KEY", 132 | Bytes: pubKey, 133 | }) 134 | 135 | return pubBytes, nil 136 | } 137 | 138 | // ExpTenSecPer100MB returns expiry date as calculated by 10 seconds times the stream size in MB. 139 | // Note: this does not actually accept entire uint64 range and will oveflow and return dates in the past 140 | // with multi-petabyte sizes. 141 | func ExpTenSecPer100MB(streamSize uint64) int64 { 142 | expiresAt := time.Now().UTC().Add(time.Duration(streamSize/1024^2*10) * time.Second) 143 | return expiresAt.Unix() 144 | } 145 | -------------------------------------------------------------------------------- /pkg/paid/paid_test.go: -------------------------------------------------------------------------------- 1 | package paid 2 | 3 | import ( 4 | "encoding/pem" 5 | "fmt" 6 | mrand "math/rand" 7 | "os" 8 | "os/exec" 9 | "runtime" 10 | "testing" 11 | "time" 12 | 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | var testStreamID = "bea4d30a1868a00e98297cfe8cdefc1be6c141b54bea3b7c95b34a66786c22ab4e9f35ae19aa453b3630e76afbd24fe2" 18 | var testTxID = "e5a6a9ef4433b8868cebb770d1d2244eed410eb7503a9ffa82dce40aa62bbae7" 19 | 20 | func generateKeyFile() (string, error) { 21 | keyFilename := fmt.Sprintf("test_private_key_%v.pem", time.Now().Unix()) 22 | var cmd string 23 | if runtime.GOOS == "darwin" { 24 | cmd = fmt.Sprintf("openssl genrsa -out %s 2048", keyFilename) 25 | } else { 26 | cmd = fmt.Sprintf("openssl genrsa -out %s -traditional 2048", keyFilename) 27 | } 28 | if out, err := exec.Command("sh", "-c", cmd).Output(); err != nil { 29 | return "", fmt.Errorf("command %s failed: %s (%s)", cmd, err, string(out)) 30 | } 31 | return keyFilename, nil 32 | } 33 | 34 | func TestMain(m *testing.M) { 35 | keyFile, err := generateKeyFile() 36 | if err != nil { 37 | panic(err) 38 | } 39 | 40 | rawKey, err := os.ReadFile(keyFile) 41 | if err != nil { 42 | panic(err) 43 | } 44 | 45 | err = InitPrivateKey(rawKey) 46 | if err != nil { 47 | panic(err) 48 | } 49 | 50 | pubKM = &pubKeyManager{} 51 | err = pubKM.loadFromBytes(km.PublicKeyBytes()) 52 | if err != nil { 53 | panic(err) 54 | } 55 | 56 | code := m.Run() 57 | os.Remove(keyFile) // defers do not run after os.Exit() 58 | 59 | os.Exit(code) 60 | } 61 | 62 | func TestCreateToken(t *testing.T) { 63 | size := uint64(mrand.Int63n(1000_000_000)) 64 | token, err := CreateToken(testStreamID, testTxID, size, ExpTenSecPer100MB) 65 | require.NoError(t, err) 66 | 67 | streamToken, err := km.PublicKeyManager().ValidateToken(token) 68 | require.NoError(t, err) 69 | assert.Equal(t, streamToken.StreamID, testStreamID) 70 | assert.Equal(t, streamToken.TxID, testTxID) 71 | } 72 | 73 | func TestPublicKeyBytes(t *testing.T) { 74 | b, r := pem.Decode(km.PublicKeyBytes()) 75 | assert.NotNil(t, b) 76 | assert.Empty(t, r) 77 | } 78 | 79 | func BenchmarkCreateToken(b *testing.B) { 80 | for i := 0; i < b.N; i++ { 81 | if _, err := CreateToken(testStreamID, testTxID, 100_000_000, ExpTenSecPer100MB); err != nil { 82 | b.Fatal(err) 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /pkg/paid/validator.go: -------------------------------------------------------------------------------- 1 | package paid 2 | 3 | import ( 4 | "crypto/rsa" 5 | "crypto/x509" 6 | "encoding/pem" 7 | "errors" 8 | "fmt" 9 | 10 | "github.com/golang-jwt/jwt/v4" 11 | ) 12 | 13 | type pubKeyManager struct { 14 | key *rsa.PublicKey 15 | } 16 | 17 | var pubKM *pubKeyManager 18 | 19 | // InitPubKey should be called with pubkey url as an argument before VerifyStreamAccess can be called 20 | func InitPubKey(rawKey []byte) error { 21 | k := &pubKeyManager{} 22 | k.loadFromBytes(rawKey) 23 | pubKM = k 24 | return nil 25 | } 26 | 27 | // VerifyStreamAccess is the main entry point for players to validate paid media tokens 28 | func VerifyStreamAccess(streamID string, stringToken string) error { 29 | t, err := pubKM.ValidateToken(stringToken) 30 | if err != nil { 31 | return err 32 | } 33 | if t.StreamID != streamID { 34 | return fmt.Errorf("stream mismatch: requested %v, token valid for %v", streamID, t.StreamID) 35 | } 36 | return nil 37 | } 38 | 39 | func (k *pubKeyManager) loadFromBytes(b []byte) error { 40 | block, _ := pem.Decode(b) 41 | if block == nil { 42 | return errors.New("no PEM blob found") 43 | } 44 | pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) 45 | key := pubKey.(*rsa.PublicKey) 46 | if err != nil { 47 | return err 48 | } 49 | k.key = key 50 | Logger.Infof("loaded a private RSA key (%v bytes)", key.Size()) 51 | return nil 52 | } 53 | 54 | // ValidateToken parses a setialized JWS stream token, verifies its signature, expiry date and returns StreamToken 55 | func (k *pubKeyManager) ValidateToken(stringToken string) (*StreamToken, error) { 56 | token, err := jwt.ParseWithClaims(stringToken, &StreamToken{}, func(token *jwt.Token) (interface{}, error) { 57 | return k.key, nil 58 | }) 59 | if err != nil { 60 | Logger.Debugf("token is not valid") 61 | return nil, err 62 | } 63 | if streamToken, ok := token.Claims.(*StreamToken); ok && token.Valid { 64 | Logger.Debugf("token validated") 65 | return streamToken, nil 66 | } 67 | return nil, err 68 | } 69 | -------------------------------------------------------------------------------- /pkg/paid/validator_test.go: -------------------------------------------------------------------------------- 1 | package paid 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/rsa" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestVerifyStreamAccess(t *testing.T) { 13 | noError := func(t *testing.T, err error) { assert.NoError(t, err) } 14 | type tokenMaker func() (string, error) 15 | type errChecker func(*testing.T, error) 16 | 17 | tests := []struct { 18 | name string 19 | makeToken tokenMaker 20 | checkError errChecker 21 | }{ 22 | { 23 | name: "valid", 24 | makeToken: func() (string, error) { 25 | return CreateToken(testStreamID, testTxID, 120_000_000, ExpTenSecPer100MB) 26 | }, 27 | checkError: noError, 28 | }, 29 | { 30 | name: "expired", 31 | makeToken: func() (string, error) { 32 | expFunc := func(uint64) int64 { return 1 } // Returns the 1st second of Unix epoch 33 | return CreateToken(testStreamID, testTxID, 120_000_000, expFunc) 34 | }, 35 | checkError: func(t *testing.T, err error) { assert.Regexp(t, "token is expired by \\d+h\\d+m\\d+s", err) }, 36 | }, 37 | { 38 | name: "missigned", 39 | makeToken: func() (string, error) { 40 | otherPkey, _ := rsa.GenerateKey(rand.Reader, 2048) 41 | otherKM := &keyManager{privKey: otherPkey} 42 | return otherKM.createToken(testStreamID, testTxID, 120_000_000, ExpTenSecPer100MB) 43 | }, 44 | checkError: func(t *testing.T, err error) { assert.EqualError(t, err, "crypto/rsa: verification error") }, 45 | }, 46 | { 47 | name: "wrong_stream", 48 | makeToken: func() (string, error) { 49 | return CreateToken("wrOngsTream", testTxID, 120_000_000, ExpTenSecPer100MB) 50 | }, 51 | checkError: func(t *testing.T, err error) { 52 | assert.EqualError(t, err, "stream mismatch: requested bea4d30a1868a00e98297cfe8cdefc1be6c141b54bea3b7c95b34a66786c22ab4e9f35ae19aa453b3630e76afbd24fe2, token valid for wrOngsTream") 53 | }, 54 | }, 55 | { 56 | name: "wrong_token", 57 | makeToken: func() (string, error) { 58 | return "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzaWQiOiJpT1MtMTMtQWRvYmVYRC85Y2QyZTkzYmZjNzUyZGQ2NTYwZTQzNjIzZjM2ZDBjMzUwNGRiY2E2IiwidHhpZCI6Ijg2NzhiY2Y0NzAxNGZhMDA0ZjU3ZjQ2NTE2NjdjYWM2MmRmMzk5MDA2NjIyOTg2YjI1YTNmNWY2YTAwYTViNTgiLCJleHAiOjE1ODk5MDg3ODUsImlhdCI6MTU4OTgyNTI3OX0.igcb5DBB_XDvPqJRFIg3rurHfCds6UoPyV37oMvxnDsGG5WOK6VAcQJXZPkEH2LUxvebRsI9exLhUJUlqqwYxduiBlM9VHrj-7SmC5FNOrkZ4iZ5gfOS-Tyc5b4GwslACC6JBtlx_tL4qOeCxXDyEfEpnLvZVy_SlKdiwBWASZA0E56NLDVW74wQYkEWy17mK_stXcySopmjkbJQ8hDhw93NtcGy4MYVgPJviq-c4YHm8boDKzxbp_CEK5hyUh5jxxC1F2yIMZiHWBS6Dy1YrhFW5jDUh-Y-09B6abAVxCm052Ut6RHPaM9rxRgLJ9QZXdb8iFq6NG32XIWffVn-Jz", nil 59 | }, 60 | checkError: func(t *testing.T, err error) { 61 | assert.EqualError(t, err, "crypto/rsa: verification error") 62 | }, 63 | }, 64 | { 65 | name: "partial_token", 66 | makeToken: func() (string, error) { 67 | return "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9", nil 68 | }, 69 | checkError: func(t *testing.T, err error) { 70 | assert.EqualError(t, err, "token contains an invalid number of segments") 71 | }, 72 | }, 73 | } 74 | 75 | for _, tt := range tests { 76 | t.Run(tt.name, func(t *testing.T) { 77 | token, err := tt.makeToken() 78 | require.NoError(t, err) 79 | 80 | err = VerifyStreamAccess(testStreamID, token) 81 | tt.checkError(t, err) 82 | }) 83 | } 84 | } 85 | 86 | func BenchmarkParseToken(b *testing.B) { 87 | token, err := CreateToken(testStreamID, testTxID, 100_000_000, ExpTenSecPer100MB) 88 | require.NoError(b, err) 89 | 90 | for i := 0; i < b.N; i++ { 91 | if err := VerifyStreamAccess(testStreamID, token); err != nil { 92 | b.Fatal(err) 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /player/chunk.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/lbryio/lbry.go/v2/stream" 7 | ) 8 | 9 | const ( 10 | // MaxChunkSize is the max size of decrypted blob. 11 | MaxChunkSize = stream.MaxBlobSize - 1 12 | 13 | // DefaultPrefetchLen is how many blobs we should prefetch ahead. 14 | // 3 should be enough to deliver 2 x 4 = 8MB/s streams. 15 | // however since we can't keep up, let's see if 2 works 16 | DefaultPrefetchLen = 2 17 | ) 18 | 19 | var PrefetchCount uint 20 | 21 | // ReadableChunk is a chunk object that Stream can Read() from. 22 | type ReadableChunk []byte 23 | 24 | // Read is called by stream.Read. 25 | func (b ReadableChunk) Read(offset, n int64, dest []byte) (int, error) { 26 | if offset+n > int64(len(b)) { 27 | n = int64(len(b)) - offset 28 | } 29 | 30 | read := copy(dest, b[offset:offset+n]) 31 | return read, nil 32 | } 33 | 34 | // Size returns the chunk size. Used by ccache to track size of cache 35 | // DISABLED FOR NOW. we're making some guesses in cmd/main.go instead 36 | //func (b ReadableChunk) Size() int64 { 37 | // return int64(len(b)) 38 | //} 39 | 40 | // streamRange provides handy blob calculations for a requested stream range. 41 | type streamRange struct { 42 | FirstChunkIdx int64 // index of chunk that contains start of range 43 | LastChunkIdx int64 // index of chunk that contains end of range 44 | FirstChunkOffset int64 // offset from start of first chunk to start of range 45 | LastChunkReadLen int64 // number of bytes read from the last chunk that's read 46 | LastChunkOffset int64 // offset from start of last chunk to end of range (only set if the whole range is in a single chunk) 47 | } 48 | 49 | // getRange returns a streamRange for the given start offset and reader buffer length. 50 | func getRange(offset int64, readLen int) streamRange { 51 | r := streamRange{} 52 | 53 | rangeStartBytes := offset 54 | rangeEndBytes := offset + int64(readLen) 55 | 56 | r.FirstChunkIdx = rangeStartBytes / MaxChunkSize 57 | r.LastChunkIdx = rangeEndBytes / MaxChunkSize 58 | r.FirstChunkOffset = offset - r.FirstChunkIdx*MaxChunkSize 59 | if r.FirstChunkIdx == r.LastChunkIdx { 60 | r.LastChunkOffset = offset - r.LastChunkIdx*MaxChunkSize 61 | } 62 | r.LastChunkReadLen = rangeEndBytes - r.LastChunkIdx*MaxChunkSize - r.LastChunkOffset 63 | 64 | return r 65 | } 66 | 67 | func (r streamRange) String() string { 68 | return fmt.Sprintf("B%v[%v:]-B%v[%v:%v]", r.FirstChunkIdx, r.FirstChunkOffset, r.LastChunkIdx, r.LastChunkOffset, r.LastChunkReadLen) 69 | } 70 | 71 | func (r streamRange) ByteRangeForChunk(i int64) (int64, int64) { 72 | switch { 73 | case i == r.FirstChunkIdx: 74 | return r.FirstChunkOffset, MaxChunkSize - r.FirstChunkOffset 75 | case i == r.LastChunkIdx: 76 | return r.LastChunkOffset, r.LastChunkReadLen 77 | case r.FirstChunkIdx == r.LastChunkIdx: 78 | return r.FirstChunkOffset, r.LastChunkReadLen 79 | default: 80 | // Andrey says this never happens because apparently http.ServeContent never reads deeper 81 | // than 32KB from Stream.Read so the case when i is in the middle just doesn't happen 82 | return 0, 0 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /player/chunk_test.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestStreamRange(t *testing.T) { 11 | type testCase struct { 12 | offset int64 13 | readLen int 14 | expected streamRange 15 | } 16 | 17 | // size: 128791189, has blobs: 62 + padding, last blob index: 61 18 | testCases := []testCase{ 19 | // read 512 bytes from the start of a single chunk 20 | {0, 512, streamRange{0, 0, 0, 512, 0}}, 21 | // read 64KiB from the middle of the 2nd chunk 22 | {2450019, 64000, streamRange{1, 1, 352867 + 1, 64000, 352868}}, 23 | // read 99 bytes from the middle of the 61st chunk 24 | {128791089, 99, streamRange{61, 61, 864817 + 61, 99, 864878}}, 25 | // read across 61 chunks, ending mid-chunk. 26 | {0, 128791189, streamRange{0, 61, 0, 864978, 0}}, 27 | // read 43 bytes across one chunk boundary 28 | {2097149, 43, streamRange{0, 1, 2097149, 41, 0}}, 29 | } 30 | 31 | for n, row := range testCases { 32 | t.Run(fmt.Sprintf("row:%v", n), func(t *testing.T) { 33 | bc := getRange(row.offset, row.readLen) 34 | assert.Equal(t, row.expected.FirstChunkIdx, bc.FirstChunkIdx) 35 | assert.Equal(t, row.expected.LastChunkIdx, bc.LastChunkIdx) 36 | assert.Equal(t, row.expected.FirstChunkOffset, bc.FirstChunkOffset) 37 | assert.Equal(t, row.expected.LastChunkReadLen, bc.LastChunkReadLen) 38 | assert.Equal(t, row.expected.LastChunkOffset, bc.LastChunkOffset) 39 | }) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /player/decrypted_cache.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/OdyseeTeam/gody-cdn/cleanup" 8 | "github.com/OdyseeTeam/gody-cdn/configs" 9 | objectStore "github.com/OdyseeTeam/gody-cdn/store" 10 | "github.com/OdyseeTeam/player-server/internal/metrics" 11 | "github.com/lbryio/lbry.go/v2/extras/errors" 12 | "github.com/lbryio/lbry.go/v2/extras/stop" 13 | "github.com/lbryio/lbry.go/v2/stream" 14 | "github.com/sirupsen/logrus" 15 | 16 | "github.com/lbryio/reflector.go/shared" 17 | "github.com/lbryio/reflector.go/store" 18 | 19 | "golang.org/x/sync/singleflight" 20 | ) 21 | 22 | // DecryptedCache Stores and retrieves unencrypted blobs on disk. 23 | type DecryptedCache struct { 24 | cache *objectStore.CachingStore 25 | sf *singleflight.Group 26 | stopper *stop.Group 27 | } 28 | 29 | type decryptionData struct { 30 | key, iv []byte 31 | } 32 | 33 | func NewDecryptedCache(origin store.BlobStore) *DecryptedCache { 34 | stopper := stop.New() 35 | 36 | // TODO: Improve config file discovery. 37 | err := configs.Init("config.json") 38 | if err != nil { 39 | err := configs.Init("../config.json") 40 | if err != nil { 41 | logrus.Fatalln(errors.FullTrace(err)) 42 | } 43 | } 44 | err = os.MkdirAll(configs.Configuration.DiskCache.Path, os.ModePerm) 45 | if err != nil { 46 | logrus.Fatal(errors.FullTrace(err)) 47 | } 48 | ds, err := objectStore.NewDiskStore(configs.Configuration.DiskCache.Path, 2) 49 | if err != nil { 50 | logrus.Fatal(errors.FullTrace(err)) 51 | } 52 | localDB := configs.Configuration.LocalDB 53 | localDsn := fmt.Sprintf("%s:%s@tcp(%s:3306)/%s", localDB.User, localDB.Password, localDB.Host, localDB.Database) 54 | dbs := objectStore.NewDBBackedStore(ds, localDsn) 55 | go cleanup.SelfCleanup(dbs, dbs, stopper, configs.Configuration.DiskCache, configs.Configuration.GetCleanupInterval()) 56 | 57 | baseFuncs := objectStore.BaseFuncs{ 58 | GetFunc: func(hash string, extra interface{}) ([]byte, shared.BlobTrace, error) { 59 | //add miss metric logic here 60 | metrics.DecryptedCacheRequestCount.WithLabelValues("object", "miss").Inc() 61 | data, stack, err := origin.Get(hash) 62 | if extra != nil { 63 | dd := extra.(*decryptionData) 64 | chunk, err := stream.DecryptBlob(data, dd.key, dd.iv) 65 | return chunk, stack, err 66 | } 67 | 68 | return data, stack, err 69 | }, 70 | HasFunc: func(hash string, extra interface{}) (bool, error) { 71 | return origin.Has(hash) 72 | }, 73 | PutFunc: func(hash string, object []byte, extra interface{}) error { 74 | return errors.Err("not implemented") 75 | }, 76 | DelFunc: func(hash string, extra interface{}) error { 77 | return origin.Delete(hash) 78 | }, 79 | } 80 | finalStore := objectStore.NewCachingStoreV2("nvme-db-store", baseFuncs, dbs) 81 | 82 | h := &DecryptedCache{ 83 | cache: finalStore, 84 | sf: new(singleflight.Group), 85 | stopper: stopper, 86 | } 87 | 88 | return h 89 | } 90 | 91 | // GetSDBlob gets an sd blob. If it's not in the cache, it is fetched from the origin and cached. 92 | // store.ErrBlobNotFound is returned if blob is not found. 93 | func (h *DecryptedCache) GetSDBlob(hash string) (*stream.SDBlob, error) { 94 | metrics.DecryptedCacheRequestCount.WithLabelValues("sdblob", "total").Inc() 95 | cached, _, err := h.cache.Get(hash, nil) 96 | if err != nil { 97 | return nil, err 98 | } 99 | 100 | var sd stream.SDBlob 101 | err = sd.FromBlob(cached) 102 | return &sd, errors.Err(err) 103 | } 104 | 105 | // GetChunk gets a decrypted stream chunk. If chunk is not cached, it is fetched from origin 106 | // and decrypted. 107 | func (h *DecryptedCache) GetChunk(hash string, key, iv []byte) (ReadableChunk, error) { 108 | dd := decryptionData{ 109 | key: key, 110 | iv: iv, 111 | } 112 | metrics.DecryptedCacheRequestCount.WithLabelValues("blob", "total").Inc() 113 | item, _, err := h.cache.Get(hash, &dd) 114 | return item, err 115 | } 116 | 117 | func (h *DecryptedCache) IsCached(hash string) bool { 118 | has, _ := h.cache.Has(hash, nil) 119 | return has 120 | } 121 | 122 | func (h *DecryptedCache) Shutdown() { 123 | h.cache.Shutdown() 124 | h.stopper.StopAndWait() 125 | } 126 | -------------------------------------------------------------------------------- /player/errors.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | var ( 8 | ErrPaidStream = errors.New("paid stream") 9 | ErrEdgeAuthenticationMisconfigured = errors.New("edge authentication misconfigured") 10 | ErrEdgeAuthenticationFailed = errors.New("edge authentication failed") 11 | ErrEdgeCredentialsMissing = errors.New("edge credentials missing") 12 | ErrClaimNotFound = errors.New("could not resolve stream URI") 13 | 14 | ErrSeekBeforeStart = errors.New("seeking before the beginning of file") 15 | ErrSeekOutOfBounds = errors.New("seeking out of bounds") 16 | ErrStreamSizeZero = errors.New("stream size is zero") 17 | ) 18 | -------------------------------------------------------------------------------- /player/hot_cache.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "time" 5 | "unsafe" 6 | 7 | "github.com/OdyseeTeam/player-server/internal/metrics" 8 | "github.com/lbryio/lbry.go/v2/stream" 9 | 10 | "github.com/bluele/gcache" 11 | "golang.org/x/sync/singleflight" 12 | ) 13 | 14 | const longTTL = 365 * 24 * time.Hour 15 | 16 | // HotCache is basically an in-memory BlobStore but it stores the blobs decrypted 17 | // You have to know which blobs you expect to be sd blobs when using HotCache 18 | type HotCache struct { 19 | origin DecryptedCache 20 | cache gcache.Cache 21 | sf *singleflight.Group 22 | } 23 | 24 | func NewHotCache(origin DecryptedCache, maxSizeInBytes int64) *HotCache { 25 | h := &HotCache{ 26 | origin: origin, 27 | cache: gcache.New(int(maxSizeInBytes / stream.MaxBlobSize)).ARC().EvictedFunc(func(key interface{}, value interface{}) { 28 | metrics.HotCacheEvictions.Add(1) 29 | }).Build(), 30 | sf: new(singleflight.Group), 31 | } 32 | 33 | go func() { 34 | for { 35 | <-time.After(15 * time.Second) 36 | metrics.HotCacheSize.Set(float64(maxSizeInBytes)) 37 | metrics.HotCacheItems.Set(float64(h.cache.Len(false))) 38 | } 39 | }() 40 | 41 | return h 42 | } 43 | 44 | // GetSDBlob gets an sd blob. If it's not in the cache, it is fetched from the origin and cached. 45 | // store.ErrBlobNotFound is returned if blob is not found. 46 | func (h *HotCache) GetSDBlob(hash string) (*stream.SDBlob, error) { 47 | cached, err := h.cache.Get(hash) 48 | if err == nil && cached != nil { 49 | metrics.HotCacheRequestCount.WithLabelValues("sd", "hit").Inc() 50 | return cached.(sizedSD).sd, nil 51 | } 52 | 53 | metrics.HotCacheRequestCount.WithLabelValues("sd", "miss").Inc() 54 | return h.getSDFromOrigin(hash) 55 | } 56 | 57 | // getSDFromOrigin gets the blob from the origin, caches it, and returns it 58 | func (h *HotCache) getSDFromOrigin(hash string) (*stream.SDBlob, error) { 59 | blob, err, _ := h.sf.Do(hash, func() (interface{}, error) { 60 | sd, err := h.origin.GetSDBlob(hash) 61 | if err != nil { 62 | return nil, err 63 | } 64 | _ = h.cache.Set(hash, sizedSD{sd}) 65 | 66 | return sd, nil 67 | }) 68 | 69 | if err != nil || blob == nil { 70 | return nil, err 71 | } 72 | 73 | return blob.(*stream.SDBlob), nil 74 | } 75 | 76 | // GetChunk gets a decrypted stream chunk. If chunk is not cached, it is fetched from origin 77 | // and decrypted. 78 | func (h *HotCache) GetChunk(hash string, key, iv []byte) (ReadableChunk, error) { 79 | item, err := h.cache.Get(hash) 80 | if err == nil { 81 | metrics.HotCacheRequestCount.WithLabelValues("chunk", "hit").Inc() 82 | return ReadableChunk(item.(sizedSlice)[:]), nil 83 | } 84 | 85 | metrics.HotCacheRequestCount.WithLabelValues("chunk", "miss").Inc() 86 | return h.getChunkFromOrigin(hash, key, iv) 87 | } 88 | 89 | // getChunkFromOrigin gets the chunk from the origin, decrypts it, caches it, and returns it 90 | func (h *HotCache) getChunkFromOrigin(hash string, key, iv []byte) (ReadableChunk, error) { 91 | chunk, err, _ := h.sf.Do(hash, func() (interface{}, error) { 92 | chunk, err := h.origin.GetChunk(hash, key, iv) 93 | if err != nil { 94 | return nil, err 95 | } 96 | metrics.InBytes.Add(float64(len(chunk))) 97 | _ = h.cache.Set(hash, sizedSlice(chunk)) 98 | 99 | return chunk, nil 100 | }) 101 | 102 | if err != nil || chunk == nil { 103 | return nil, err 104 | } 105 | 106 | return chunk.(ReadableChunk)[:], nil 107 | } 108 | 109 | func (h *HotCache) IsCached(hash string) bool { 110 | return h.cache.Has(hash) 111 | } 112 | 113 | type sizedSlice []byte 114 | 115 | func (s sizedSlice) Size() int64 { return int64(len(s)) } 116 | 117 | type sizedSD struct { 118 | sd *stream.SDBlob 119 | } 120 | 121 | func (s sizedSD) Size() int64 { 122 | total := int64(unsafe.Sizeof(s)) + int64(unsafe.Sizeof(&(s.sd))) 123 | for _, bi := range s.sd.BlobInfos { 124 | total += int64(unsafe.Sizeof(bi)) + int64(len(bi.BlobHash)+len(bi.IV)) 125 | } 126 | return total + int64(len(s.sd.StreamName)+len(s.sd.StreamType)+len(s.sd.Key)+len(s.sd.SuggestedFileName)+len(s.sd.StreamHash)) 127 | } 128 | -------------------------------------------------------------------------------- /player/hot_cache_test.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/lbryio/lbry.go/v2/extras/errors" 8 | "github.com/lbryio/lbry.go/v2/stream" 9 | "github.com/lbryio/reflector.go/store" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestHotCache_BlobNotFound(t *testing.T) { 16 | origin := store.NewMemStore() 17 | ds := NewDecryptedCache(origin) 18 | hc := NewHotCache(*ds, 100000000) 19 | assert.NotNil(t, hc) 20 | 21 | _, err := hc.GetSDBlob("test") 22 | assert.True(t, errors.Is(err, store.ErrBlobNotFound)) 23 | } 24 | 25 | func TestHotCache_Stream(t *testing.T) { 26 | origin := store.NewMemStore() 27 | ds := NewDecryptedCache(origin) 28 | 29 | data := randomString(MaxChunkSize * 3) 30 | s, err := stream.New(bytes.NewReader([]byte(data))) 31 | require.NoError(t, err) 32 | require.Equal(t, 4, len(s)) // make sure we got an sd blob plus 3 content blobs 33 | 34 | for _, b := range s { 35 | origin.Put(b.HashHex(), b) 36 | } 37 | 38 | hc := NewHotCache(*ds, 100000000) 39 | 40 | var streamSDBlob stream.SDBlob 41 | err = streamSDBlob.FromBlob(s[0]) 42 | require.NoError(t, err) 43 | 44 | storedSDBlob, err := hc.GetSDBlob(s[0].HashHex()) 45 | require.NoError(t, err) 46 | assert.EqualValues(t, streamSDBlob, *storedSDBlob) 47 | 48 | // check the first chunk matches the stream data 49 | chunkIdx := 0 50 | chunk, err := hc.GetChunk(s[chunkIdx+1].HashHex(), streamSDBlob.Key, streamSDBlob.BlobInfos[chunkIdx].IV) 51 | require.NoError(t, err) 52 | assert.EqualValues(t, data[:20], chunk[:20]) 53 | } 54 | 55 | // new LRU library has no size method 56 | //func TestHotCache_Size(t *testing.T) { 57 | // origin := store.NewMemStore() 58 | // dataLen := 444 59 | // data, err := stream.NewBlob([]byte(randomString(dataLen)), stream.NullIV(), stream.NullIV()) 60 | // require.NoError(t, err) 61 | // origin.Put("hash", data) 62 | // 63 | // hc := NewHotCache(origin, 100000000) 64 | // hc.GetChunk("hash", stream.NullIV(), stream.NullIV()) 65 | // 66 | // time.Sleep(10 * time.Millisecond) // give cache worker time to update cache size 67 | // 68 | // assert.EqualValues(t, dataLen, hc.cache.Size()) 69 | //} 70 | -------------------------------------------------------------------------------- /player/http_handlers.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "net/http" 8 | "net/url" 9 | "os" 10 | "regexp" 11 | "strconv" 12 | "strings" 13 | "time" 14 | 15 | "github.com/OdyseeTeam/player-server/firewall" 16 | "github.com/OdyseeTeam/player-server/internal/iapi" 17 | "github.com/OdyseeTeam/player-server/internal/metrics" 18 | "github.com/OdyseeTeam/player-server/pkg/app" 19 | "github.com/OdyseeTeam/player-server/pkg/logger" 20 | tclient "github.com/OdyseeTeam/transcoder/client" 21 | 22 | "github.com/getsentry/sentry-go" 23 | "github.com/gin-gonic/gin" 24 | "github.com/sirupsen/logrus" 25 | ) 26 | 27 | // SpeechPrefix is root level prefix for speech URLs. 28 | const SpeechPrefix = "/speech/" 29 | 30 | const ( 31 | paramDownload = "download" 32 | paramHashHLS = "hash-hls" // Nested hash parameter for signed hls url to use with StackPath 33 | paramClientIP = "ip" // Nested client IP parameter for hls urls to use with StackPath 34 | paramHash77 = "hash77" // Nested hash parameter for signed url to use with CDN77 35 | ) 36 | 37 | var ( 38 | StreamWriteTimeout = uint(86400) 39 | playerName = "unknown-player" 40 | reV5StartEndpoint = regexp.MustCompile(`^/v5/streams/start/.+`) 41 | reV6StartEndpoint = regexp.MustCompile(`^/v6/streams/.+(\.mp4)?$`) 42 | ) 43 | 44 | // RequestHandler is a HTTP request handler for player package. 45 | type RequestHandler struct { 46 | player *Player 47 | } 48 | 49 | func init() { 50 | var err error 51 | 52 | playerName = os.Getenv("PLAYER_NAME") 53 | if playerName == "" { 54 | playerName, err = os.Hostname() 55 | if err != nil { 56 | playerName = "unknown-player" 57 | } 58 | } 59 | } 60 | 61 | // NewRequestHandler initializes an HTTP request handler with the provided Player instance. 62 | func NewRequestHandler(p *Player) *RequestHandler { 63 | return &RequestHandler{p} 64 | } 65 | 66 | var allowedReferrers = map[string]bool{ 67 | "https://piped.kavin.rocks/": true, 68 | "https://piped.video/": true, 69 | "https://www.gstatic.com/": true, 70 | "http://localhost:9090/": true, 71 | } 72 | var allowedTldReferrers = map[string]bool{ 73 | "odysee.com": true, 74 | "odysee.tv": true, 75 | } 76 | var allowedOrigins = map[string]bool{ 77 | "https://odysee.com": true, 78 | "https://neko.odysee.tv": true, 79 | "https://salt.odysee.tv": true, 80 | "https://kp.odysee.tv": true, 81 | "https://inf.odysee.tv": true, 82 | "https://stashu.odysee.tv": true, 83 | "https://www.gstatic.com": true, 84 | "https://odysee.ap.ngrok.io": true, 85 | } 86 | var allowedUserAgents = []string{ 87 | "LBRY/", 88 | "Roku/", 89 | } 90 | var allowedSpecialHeaders = map[string]bool{"x-cf-lb-monitor": true} 91 | 92 | var allowedXRequestedWith = "com.odysee.app" 93 | 94 | // Handle is responsible for all HTTP media delivery via player module. 95 | func (h *RequestHandler) Handle(c *gin.Context) { 96 | addExtraResponseHeaders(c) 97 | c.Header("player-request-method", c.Request.Method) 98 | if c.Request.Method == http.MethodHead { 99 | c.Header("Cache-Control", "no-store, No-cache") 100 | } 101 | 102 | var uri string 103 | var isSpeech bool 104 | if strings.HasPrefix(c.Request.URL.String(), SpeechPrefix) { 105 | // Speech stuff 106 | uri = c.Request.URL.String()[len(SpeechPrefix):] 107 | extStart := strings.LastIndex(uri, ".") 108 | if extStart >= 0 { 109 | uri = uri[:extStart] 110 | } 111 | if uri == "" { 112 | c.AbortWithStatus(http.StatusNotFound) 113 | return 114 | } 115 | isSpeech = true 116 | // Speech stuff over 117 | } else if c.Param("claim_name") != "" { 118 | uri = c.Param("claim_name") + "#" + c.Param("claim_id") 119 | } else { 120 | uri = c.Param("claim_id") 121 | if len(uri) != 40 { 122 | c.AbortWithStatus(http.StatusNotFound) 123 | return 124 | } 125 | } 126 | 127 | magicTimestamp, exists := c.GetQuery("magic") 128 | magicPass := false 129 | if exists && magicTimestamp != "" { 130 | unixT, err := strconv.Atoi(magicTimestamp) 131 | if err == nil { 132 | genesisTime := time.Unix(int64(unixT), 0) 133 | if time.Since(genesisTime) < 5*time.Minute { 134 | magicPass = true 135 | } 136 | } 137 | } 138 | 139 | flagged := true 140 | for header, v := range c.Request.Header { 141 | hasHeaderToCheck := header != "User-Agent" && header != "Referer" && header != "Origin" && header != "X-Requested-With" 142 | if hasHeaderToCheck && !allowedSpecialHeaders[strings.ToLower(header)] { 143 | continue 144 | } 145 | if strings.ToLower(header) == "origin" && allowedOrigins[v[0]] { 146 | flagged = false 147 | break 148 | } 149 | if strings.ToLower(header) == "referer" { 150 | if allowedReferrers[v[0]] { 151 | flagged = false 152 | break 153 | } 154 | //check if the referrer is from an allowed tld (weak check) 155 | for tld := range allowedTldReferrers { 156 | if strings.Contains(v[0], tld) { 157 | flagged = false 158 | break 159 | } 160 | } 161 | } 162 | 163 | if strings.ToLower(header) == "user-agent" { 164 | for _, ua := range allowedUserAgents { 165 | if strings.HasPrefix(v[0], ua) { 166 | flagged = false 167 | break 168 | } 169 | } 170 | } 171 | if allowedSpecialHeaders[strings.ToLower(header)] { 172 | flagged = false 173 | break 174 | } 175 | if strings.ToLower(header) == "x-requested-with" && v[0] == allowedXRequestedWith { 176 | flagged = false 177 | break 178 | } 179 | } 180 | //if the request is flagged and the magic pass is not set then we will not serve the request 181 | //with an exception for /v3/ endpoints for now 182 | flagged = !magicPass && flagged && !strings.HasPrefix(c.Request.URL.String(), "/api/v3/") 183 | 184 | //this is here temporarily due to abuse. a better solution will be found 185 | ip := c.ClientIP() 186 | if firewall.CheckBans(ip) { 187 | c.AbortWithStatus(http.StatusTooManyRequests) 188 | return 189 | } 190 | 191 | if strings.Contains(c.Request.URL.String(), "Katmovie18") { 192 | c.String(http.StatusForbidden, "this content cannot be accessed") 193 | return 194 | } 195 | //end of abuse block 196 | 197 | blocked, err := iapi.GetBlockedContent() 198 | if err == nil { 199 | if blocked[c.Param("claim_id")] { 200 | c.String(http.StatusForbidden, "this content cannot be accessed") 201 | return 202 | } 203 | } 204 | isDownload, _ := strconv.ParseBool(c.Query(paramDownload)) 205 | 206 | if isDownload { 207 | // log all headers for download requests 208 | //encode headers in a json string 209 | headers, err := json.MarshalIndent(c.Request.Header, "", " ") 210 | if err == nil { 211 | logrus.Infof("download request for %s with IP %s and headers: %+v", uri, ip, string(headers)) 212 | } 213 | } 214 | //don't allow downloads if either flagged or disabled 215 | if isDownload && (!h.player.options.downloadsEnabled || flagged) { 216 | c.String(http.StatusForbidden, "downloads are currently disabled") 217 | return 218 | } 219 | 220 | stream, err := h.player.ResolveStream(uri) 221 | addBreadcrumb(c.Request, "sdk", fmt.Sprintf("resolve %v", uri)) 222 | if err != nil { 223 | processStreamError("resolve", c, uri, err) 224 | return 225 | } 226 | hasValidChannel := stream.Claim.SigningChannel != nil && stream.Claim.SigningChannel.ClaimID != "" 227 | var channelClaimId *string 228 | if hasValidChannel { 229 | channelClaimId = &stream.Claim.SigningChannel.ClaimID 230 | } 231 | if firewall.IsStreamBlocked(uri, channelClaimId) { 232 | c.String(http.StatusForbidden, "this content cannot be accessed") 233 | return 234 | } 235 | 236 | abusiveIP, abuseCount := firewall.CheckAndRateLimitIp(ip, stream.ClaimID) 237 | if abusiveIP { 238 | Logger.Warnf("IP %s is abusing resources (count: %d): %s - %s", ip, abuseCount, stream.ClaimID, stream.Claim.Name) 239 | if abuseCount > 10 { 240 | c.String(http.StatusTooManyRequests, "Try again later") 241 | return 242 | } 243 | } 244 | if isDownload && abuseCount > 2 { 245 | c.String(http.StatusTooManyRequests, "Try again later") 246 | return 247 | } 248 | 249 | err = h.player.VerifyAccess(stream, c) 250 | if err != nil { 251 | processStreamError("access", c, uri, err) 252 | return 253 | } 254 | if flagged && !isSpeech { 255 | c.String(http.StatusUnauthorized, "this content cannot be accessed at the moment") 256 | return 257 | } 258 | 259 | if !isDownload && fitForTranscoder(c, stream) && h.player.tclient != nil { 260 | tcPath := h.player.tclient.GetPlaybackPath(c.Param("claim_id"), stream.hash) 261 | if tcPath != "" { 262 | metrics.StreamsDelivered.WithLabelValues(metrics.StreamTranscoded).Inc() 263 | c.Redirect( 264 | http.StatusPermanentRedirect, 265 | getPlaylistURL(c.Request.URL.Path, c.Request.URL.Query(), tcPath, stream), 266 | ) 267 | return 268 | } 269 | } 270 | 271 | metrics.StreamsDelivered.WithLabelValues(metrics.StreamOriginal).Inc() 272 | 273 | err = stream.PrepareForReading() 274 | addBreadcrumb(c.Request, "sdk", fmt.Sprintf("retrieve %v", uri)) 275 | if err != nil { 276 | processStreamError("retrieval", c, uri, err) 277 | return 278 | } 279 | 280 | writeHeaders(c, stream) 281 | 282 | conn, err := app.GetConnection(c.Request) 283 | if err != nil { 284 | Logger.Warn("can't set write timeout: ", err) 285 | } else { 286 | err = conn.SetWriteDeadline(time.Now().Add(time.Duration(StreamWriteTimeout) * time.Second)) 287 | if err != nil { 288 | Logger.Error("can't set write timeout: ", err) 289 | } 290 | } 291 | 292 | switch c.Request.Method { 293 | case http.MethodHead: 294 | c.Status(http.StatusOK) 295 | case http.MethodGet: 296 | addBreadcrumb(c.Request, "player", fmt.Sprintf("play %v", uri)) 297 | err = h.player.Play(stream, c) 298 | if err != nil { 299 | processStreamError("playback", c, uri, err) 300 | return 301 | } 302 | } 303 | } 304 | 305 | func (h *RequestHandler) HandleTranscodedFragment(c *gin.Context) { 306 | uri := c.Param("claim_id") 307 | addExtraResponseHeaders(c) 308 | metrics.StreamsRunning.WithLabelValues(metrics.StreamTranscoded).Inc() 309 | defer metrics.StreamsRunning.WithLabelValues(metrics.StreamTranscoded).Dec() 310 | 311 | stream, err := h.player.ResolveStream(uri) 312 | addBreadcrumb(c.Request, "sdk", fmt.Sprintf("resolve %v", uri)) 313 | if err != nil { 314 | processStreamError("resolve", c, uri, err) 315 | return 316 | } 317 | hasValidChannel := stream.Claim.SigningChannel != nil && stream.Claim.SigningChannel.ClaimID != "" 318 | var channelClaimId *string 319 | if hasValidChannel { 320 | channelClaimId = &stream.Claim.SigningChannel.ClaimID 321 | } 322 | if firewall.IsStreamBlocked(uri, channelClaimId) { 323 | c.String(http.StatusForbidden, "this content cannot be accessed") 324 | return 325 | } 326 | err = h.player.VerifyAccess(stream, c) 327 | if err != nil { 328 | processStreamError("access", c, uri, err) 329 | return 330 | } 331 | size, err := h.player.tclient.PlayFragment(uri, c.Param("sd_hash"), c.Param("fragment"), c.Writer, c.Request) 332 | if err != nil { 333 | processStreamError("transcoder", c, uri, err, "sd_hash", c.Param("sd_hash"), "fragment", c.Param("fragment")) 334 | return 335 | } 336 | metrics.TcOutBytes.Add(float64(size)) 337 | } 338 | 339 | func writeHeaders(c *gin.Context, s *Stream) { 340 | c.Header("Content-Length", fmt.Sprintf("%v", s.Size)) 341 | c.Header("Content-Type", s.ContentType) 342 | c.Header("Last-Modified", s.Timestamp().UTC().Format(http.TimeFormat)) 343 | if c.Request.Method != http.MethodHead { 344 | c.Header("Cache-Control", "public, max-age=31536000") 345 | } 346 | 347 | isDownload, _ := strconv.ParseBool(c.Query(paramDownload)) 348 | if isDownload { 349 | filename := regexp.MustCompile(`[^\p{L}\d\-\._ ]+`).ReplaceAllString(s.Filename(), "") 350 | c.Header("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename))) 351 | } 352 | } 353 | 354 | func processStreamError(errorType string, gctx *gin.Context, uri string, err error, extra ...string) { 355 | req := gctx.Request 356 | w := gctx.Writer 357 | if err == tclient.ErrChannelNotEnabled { 358 | return 359 | } 360 | 361 | if w == nil { 362 | Logger.Errorf("%s stream GET - %s error: %v", uri, errorType, err) 363 | return 364 | } 365 | 366 | Logger.Errorf("%s stream %v - %s error: %v", gctx.Request.Method, uri, errorType, err) 367 | 368 | if errors.Is(err, ErrPaidStream) { 369 | writeErrorResponse(w, http.StatusPaymentRequired, err.Error()) 370 | } else if errors.Is(err, ErrClaimNotFound) { 371 | writeErrorResponse(w, http.StatusNotFound, err.Error()) 372 | } else if errors.Is(err, ErrEdgeCredentialsMissing) { 373 | writeErrorResponse(w, http.StatusUnauthorized, err.Error()) 374 | } else if strings.Contains(err.Error(), "blob not found") { 375 | writeErrorResponse(w, http.StatusServiceUnavailable, err.Error()) 376 | } else if strings.Contains(err.Error(), "hash in response does not match") { 377 | logger.SendToSentry(err, req, "error_type", errorType) 378 | writeErrorResponse(w, http.StatusServiceUnavailable, err.Error()) 379 | } else if strings.Contains(err.Error(), "token contains an invalid number of segments") { 380 | logger.SendToSentry(err, req, "error_type", errorType) 381 | writeErrorResponse(w, http.StatusUnauthorized, err.Error()) 382 | } else if strings.Contains(err.Error(), "crypto/rsa: verification error") { 383 | logger.SendToSentry(err, req, "error_type", errorType) 384 | writeErrorResponse(w, http.StatusUnauthorized, err.Error()) 385 | } else if strings.Contains(err.Error(), "token is expired") { 386 | logger.SendToSentry(err, req, "error_type", errorType) 387 | writeErrorResponse(w, http.StatusGone, err.Error()) 388 | } else if errorType == "transcoder" { 389 | logger.SendToSentry(err, req, extra...) 390 | writeErrorResponse(w, http.StatusNotFound, err.Error()) 391 | } else { 392 | logger.SendToSentry(err, req, "error_type", errorType) 393 | writeErrorResponse(w, http.StatusInternalServerError, err.Error()) 394 | } 395 | } 396 | 397 | func writeErrorResponse(w http.ResponseWriter, statusCode int, msg string) { 398 | w.WriteHeader(statusCode) 399 | w.Write([]byte(msg)) 400 | } 401 | 402 | func addBreadcrumb(r *http.Request, category, message string) { 403 | if hub := sentry.GetHubFromContext(r.Context()); hub != nil { 404 | hub.Scope().AddBreadcrumb(&sentry.Breadcrumb{ 405 | Category: category, 406 | Message: message, 407 | }, 99) 408 | } 409 | } 410 | 411 | func addExtraResponseHeaders(c *gin.Context) { 412 | c.Header("Report-To", `{"group":"default","max_age":31536000,"endpoints":[{"url":"https://6fd448c230d0731192f779791c8e45c3.report-uri.com/a/d/g"}],"include_subdomains":true}`) 413 | c.Header("Content-Security-Policy", "script-src 'none'; report-uri https://6fd448c230d0731192f779791c8e45c3.report-uri.com/r/d/csp/enforce; report-to default") 414 | c.Header("Access-Control-Expose-Headers", "X-Powered-By, X-77-Cache") 415 | c.Header("X-Powered-By", playerName) 416 | } 417 | 418 | func getPlaylistURL(fullPath string, query url.Values, tcPath string, stream *Stream) string { 419 | if strings.HasPrefix(fullPath, "/v5/streams/start/") { 420 | qs := "" 421 | if query.Get(paramHashHLS) != "" { 422 | qs = fmt.Sprintf("?ip=%s&hash=%s", query.Get(paramClientIP), query.Get(paramHashHLS)) 423 | } 424 | return fmt.Sprintf("/v5/streams/hls/%s%s", tcPath, qs) 425 | } else if strings.HasPrefix(fullPath, "/v6/streams/") { 426 | path := fmt.Sprintf("/v6/streams/%s", tcPath) 427 | h := query.Get(paramHash77) 428 | if h != "" { 429 | path = "/" + h + path 430 | } 431 | return path 432 | } 433 | return fmt.Sprintf("/api/v4/streams/tc/%s/%s", stream.URL, tcPath) 434 | } 435 | 436 | func fitForTranscoder(c *gin.Context, s *Stream) bool { 437 | return (strings.HasPrefix(c.FullPath(), "/api/v4/") || 438 | ((reV5StartEndpoint.MatchString(c.FullPath()) || reV6StartEndpoint.MatchString(c.FullPath())) && c.Request.Method == http.MethodHead)) && 439 | strings.HasPrefix(s.ContentType, "video/") 440 | } 441 | -------------------------------------------------------------------------------- /player/http_handlers_test.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/http/httptest" 9 | "net/url" 10 | "strings" 11 | "testing" 12 | 13 | "github.com/OdyseeTeam/player-server/pkg/paid" 14 | "github.com/Pallinder/go-randomdata" 15 | 16 | "github.com/gin-gonic/gin" 17 | "github.com/stretchr/testify/assert" 18 | "github.com/stretchr/testify/require" 19 | ) 20 | 21 | type rangeHeader struct { 22 | start, end, knownLen int 23 | } 24 | 25 | func makeRequest(t *testing.T, router *gin.Engine, method, uri string, rng *rangeHeader) *http.Response { 26 | if router == nil { 27 | router = gin.New() 28 | InstallPlayerRoutes(router, getTestPlayer()) 29 | } 30 | 31 | r, err := http.NewRequest(method, uri, nil) 32 | require.NoError(t, err) 33 | r.Header.Add("Referer", "https://odysee.com") 34 | if rng != nil { 35 | if rng.start == 0 { 36 | r.Header.Add("Range", fmt.Sprintf("bytes=0-%v", rng.end)) 37 | } else if rng.end == 0 { 38 | r.Header.Add("Range", fmt.Sprintf("bytes=%v-", rng.start)) 39 | } else { 40 | r.Header.Add("Range", fmt.Sprintf("bytes=%v-%v", rng.start, rng.end)) 41 | } 42 | } 43 | 44 | rr := httptest.NewRecorder() 45 | router.ServeHTTP(rr, r) 46 | return rr.Result() 47 | } 48 | 49 | func TestHandleGet(t *testing.T) { 50 | player := getTestPlayer() 51 | router := gin.New() 52 | router.Any("/content/claims/:claim_name/:claim_id/:filename", NewRequestHandler(player).Handle) 53 | 54 | type testInput struct { 55 | name, uri string 56 | rng *rangeHeader 57 | } 58 | type testCase struct { 59 | input testInput 60 | output string 61 | } 62 | testCases := []testCase{ 63 | { 64 | testInput{"MiddleBytes", "/content/claims/what/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/stream.mp4", &rangeHeader{start: 156, end: 259}}, 65 | "00000001D39A07E8D39A07E80000000100000000008977680000" + 66 | "0000000000000000000000000000000100000000000000000000" + 67 | "0000000000010000000000000000000000000000400000000780" + 68 | "00000438000000000024656474730000001C656C737400000000", 69 | }, 70 | { 71 | testInput{"FirstBytes", "/content/claims/what/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/stream.mp4", &rangeHeader{start: 0, end: 52}}, 72 | "00000018667479706D703432000000006D7034326D7034310000" + 73 | "C4EA6D6F6F760000006C6D76686400000000D39A07E8D39A07F200", 74 | }, 75 | { 76 | testInput{"BytesFromSecondBlob", "/content/claims/what/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/stream.mp4", &rangeHeader{start: 4000000, end: 4000104}}, 77 | "6E81C93A90DD3A322190C8D608E29AA929867407596665097B5AE780412" + 78 | "61638A51C10BC26770AFFEF1533715FBD1428DCADEDC7BEA5D7A9C7D170" + 79 | "B71EF38E7138D24B0C7E86D791695EDAE1B88EDBE54F95C98EF3DCFD91D" + 80 | "A025C284EE37D8FEEA2EA84B76B9A22D3", 81 | }, 82 | { 83 | testInput{"LastBytes", "/content/claims/known-size/0590f924bbee6627a2e79f7f2ff7dfb50bf2877c/stream", &rangeHeader{start: 128791089, knownLen: 100}}, 84 | "2505CA36CB47B0B14CA023203410E965657B6314F6005D51E992D073B8090419D49E28E99306C95CF2DDB9" + 85 | "51DA5FE6373AC542CC2D83EB129548FFA0B4FFE390EB56600AD72F0D517236140425E323FDFC649FDEB80F" + 86 | "A429227D149FD493FBCA2042141F", 87 | }, 88 | { 89 | testInput{"BetweenBlobs", "/content/claims/known-size/0590f924bbee6627a2e79f7f2ff7dfb50bf2877c/stream", 90 | &rangeHeader{start: 2097149, end: 2097191}}, 91 | "6BD50FA7383B3760C5CE5DFC2F73BD5EE7D3591C986758A5E43D8F3712A59861898F349BC0FA25CDED91DB", 92 | }, 93 | { 94 | testInput{"SecondBLob", "/content/claims/known-size/0590f924bbee6627a2e79f7f2ff7dfb50bf2877c/stream", 95 | &rangeHeader{start: 2097151, end: 2097191}}, 96 | "0FA7383B3760C5CE5DFC2F73BD5EE7D3591C986758A5E43D8F3712A59861898F349BC0FA25CDED91DB", 97 | }, 98 | } 99 | 100 | for _, row := range testCases { 101 | t.Run(row.input.name, func(t *testing.T) { 102 | var expectedLen int 103 | response := makeRequest(t, router, http.MethodGet, row.input.uri, row.input.rng) 104 | 105 | if row.input.rng.knownLen > 0 { 106 | expectedLen = row.input.rng.knownLen 107 | } else { 108 | expectedLen = row.input.rng.end - row.input.rng.start + 1 109 | } 110 | require.Equal(t, http.StatusPartialContent, response.StatusCode) 111 | assert.Equal(t, fmt.Sprintf("%v", expectedLen), response.Header.Get("Content-Length")) 112 | assert.Equal(t, "bytes", response.Header.Get("Accept-Ranges")) 113 | assert.Equal(t, "video/mp4", response.Header.Get("Content-Type")) 114 | assert.Equal(t, "", response.Header.Get("Content-Disposition")) 115 | assert.Equal(t, "public, max-age=31536000", response.Header.Get("Cache-Control")) 116 | 117 | responseStream := make([]byte, expectedLen) 118 | _, err := response.Body.Read(responseStream) 119 | require.NoError(t, err) 120 | assert.Equal(t, strings.ToLower(row.output), hex.EncodeToString(responseStream)) 121 | }) 122 | } 123 | } 124 | 125 | func TestHandleUnpaid(t *testing.T) { 126 | response := makeRequest(t, nil, http.MethodGet, "/content/claims/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6/stream.mp4", nil) 127 | assert.Equal(t, http.StatusPaymentRequired, response.StatusCode) 128 | } 129 | 130 | func TestHandleHead(t *testing.T) { 131 | response := makeRequest(t, nil, http.MethodHead, "/content/claims/what/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/stream.mp4", nil) 132 | 133 | assert.Equal(t, http.StatusOK, response.StatusCode) 134 | assert.Equal(t, "video/mp4", response.Header.Get("Content-Type")) 135 | assert.Equal(t, "Fri, 17 Nov 2017 17:19:50 GMT", response.Header.Get("Last-Modified")) 136 | assert.Equal(t, "158433824", response.Header.Get("Content-Length")) 137 | } 138 | 139 | func TestHandleHeadErrors(t *testing.T) { 140 | r := makeRequest(t, nil, http.MethodHead, "/content/claims/completely/ef/stream", nil) 141 | require.Equal(t, http.StatusNotFound, r.StatusCode) 142 | } 143 | 144 | func TestHandleNotFound(t *testing.T) { 145 | r := makeRequest(t, nil, http.MethodGet, "/content/claims/completely/ef/stream", nil) 146 | require.Equal(t, http.StatusNotFound, r.StatusCode) 147 | } 148 | 149 | func TestHandleOutOfBounds(t *testing.T) { 150 | r := makeRequest(t, nil, http.MethodGet, "/content/claims/known-size/0590f924bbee6627a2e79f7f2ff7dfb50bf2877c/stream", &rangeHeader{start: 999999999}) 151 | 152 | require.Equal(t, http.StatusRequestedRangeNotSatisfiable, r.StatusCode) 153 | } 154 | 155 | func TestHandleDownloadableFile(t *testing.T) { 156 | checkRequest := func(r *http.Response) { 157 | assert.Equal(t, http.StatusOK, r.StatusCode) 158 | assert.Equal(t, `attachment; filename="861382668_228248581_tenor.gif"; filename*=UTF-8''861382668_228248581_tenor.gif`, r.Header.Get("Content-Disposition")) 159 | assert.Equal(t, "8722934", r.Header.Get("Content-Length")) 160 | } 161 | 162 | testCases := []struct { 163 | url, method string 164 | }{ 165 | { 166 | "/content/claims/scalable-test2/0a15a743ac078a83a02cc086fbb8b566e912b7c5/stream?download=true", 167 | http.MethodGet, 168 | }, 169 | { 170 | "/content/claims/scalable-test2/0a15a743ac078a83a02cc086fbb8b566e912b7c5/stream?download=true", 171 | http.MethodHead, 172 | }, 173 | { 174 | "/content/claims/scalable-test2/0a15a743ac078a83a02cc086fbb8b566e912b7c5/stream?download=1", 175 | http.MethodGet, 176 | }, 177 | { 178 | "/content/claims/scalable-test2/0a15a743ac078a83a02cc086fbb8b566e912b7c5/stream?download=1", 179 | http.MethodHead, 180 | }, 181 | } 182 | for _, c := range testCases { 183 | r := makeRequest(t, nil, c.method, c.url, nil) 184 | checkRequest(r) 185 | } 186 | } 187 | 188 | func TestUTF8Filename(t *testing.T) { 189 | _ = ` 【大苑子APP宣傳影片】分享新鮮_精彩生活-20181106.mp4` // original filename, just for reference 190 | r := makeRequest(t, nil, http.MethodHead, "/content/claims/"+url.PathEscape(`-【大苑子APP宣傳影片】分享新鮮_精彩生活-20181106`)+"/e9bbe7a0ffe8bb1070ffe41b342e93b054641b6c/stream?download=1", nil) 191 | assert.Equal(t, http.StatusOK, r.StatusCode) 192 | assert.Equal(t, `attachment; filename=" 大苑子APP宣傳影片分享新鮮精彩生活-20181106.mp4"; filename*=UTF-8''%20%E5%A4%A7%E8%8B%91%E5%AD%90APP%E5%AE%A3%E5%82%B3%E5%BD%B1%E7%89%87%E5%88%86%E4%BA%AB%E6%96%B0%E9%AE%AE%E7%B2%BE%E5%BD%A9%E7%94%9F%E6%B4%BB-20181106.mp4`, r.Header.Get("Content-Disposition")) 193 | assert.Equal(t, "294208625", r.Header.Get("Content-Length")) 194 | 195 | _ = `"Bitcoin je scam" - informujú média.mp4` // original filename, just for reference 196 | r = makeRequest(t, nil, http.MethodHead, "/content/claims/"+url.PathEscape(`-Bitcoin-je-scam----informujú-média`)+"/554c23406b0821c5e2a101ea0e865e35948b632c/stream?download=1", nil) 197 | assert.Equal(t, http.StatusOK, r.StatusCode) 198 | assert.Equal(t, `attachment; filename="Bitcoin je scam - informuju media.mp4"; filename*=UTF-8''Bitcoin%20je%20scam%20-%20informuju%20media.mp4`, r.Header.Get("Content-Disposition")) 199 | assert.Equal(t, "504872011", r.Header.Get("Content-Length")) 200 | } 201 | 202 | func TestHandleHeadStreamsV2(t *testing.T) { 203 | r, err := http.Get("https://api.na-backend.odysee.com/api/v1/paid/pubkey") 204 | require.NoError(t, err) 205 | rawKey, err := io.ReadAll(r.Body) 206 | require.NoError(t, err) 207 | err = paid.InitPubKey(rawKey) 208 | require.NoError(t, err) 209 | 210 | r = makeRequest(t, nil, http.MethodHead, "/api/v2/streams/paid/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6/eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9", nil) 211 | body, _ := io.ReadAll(r.Body) 212 | assert.Equal(t, http.StatusUnauthorized, r.StatusCode, string(body)) 213 | 214 | r = makeRequest(t, nil, http.MethodHead, "/api/v2/streams/free/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6", nil) 215 | body, _ = io.ReadAll(r.Body) 216 | assert.Equal(t, http.StatusPaymentRequired, r.StatusCode, string(body)) 217 | 218 | paid.GeneratePrivateKey() 219 | expiredToken, err := paid.CreateToken("iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6", "000", 120_000_000, func(uint64) int64 { return 1 }) 220 | require.NoError(t, err) 221 | 222 | r = makeRequest(t, nil, http.MethodHead, "/api/v2/streams/paid/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6/"+expiredToken, nil) 223 | body, _ = io.ReadAll(r.Body) 224 | assert.Equal(t, http.StatusGone, r.StatusCode, string(body)) 225 | 226 | validToken, err := paid.CreateToken("iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6", "000", 120_000_000, paid.ExpTenSecPer100MB) 227 | require.NoError(t, err) 228 | 229 | r = makeRequest(t, nil, http.MethodHead, "/api/v2/streams/paid/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6/"+validToken, nil) 230 | body, _ = io.ReadAll(r.Body) 231 | assert.Equal(t, http.StatusOK, r.StatusCode, string(body)) 232 | } 233 | 234 | func TestHandleHeadStreamsV3(t *testing.T) { 235 | r, err := http.Get("https://api.na-backend.odysee.com/api/v1/paid/pubkey") 236 | require.NoError(t, err) 237 | rawKey, err := io.ReadAll(r.Body) 238 | require.NoError(t, err) 239 | err = paid.InitPubKey(rawKey) 240 | require.NoError(t, err) 241 | 242 | r = makeRequest(t, nil, http.MethodHead, "/api/v3/streams/paid/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6/abcdef/eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9", nil) 243 | body, _ := io.ReadAll(r.Body) 244 | assert.Equal(t, http.StatusUnauthorized, r.StatusCode, string(body)) 245 | 246 | r = makeRequest(t, nil, http.MethodHead, "/api/v3/streams/free/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6/abcdef", nil) 247 | body, _ = io.ReadAll(r.Body) 248 | assert.Equal(t, http.StatusPaymentRequired, r.StatusCode, string(body)) 249 | 250 | paid.GeneratePrivateKey() 251 | expiredToken, err := paid.CreateToken("iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6", "000", 120_000_000, func(uint64) int64 { return 1 }) 252 | require.NoError(t, err) 253 | 254 | r = makeRequest(t, nil, http.MethodHead, "/api/v3/streams/paid/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6/abcdef/"+expiredToken, nil) 255 | body, _ = io.ReadAll(r.Body) 256 | assert.Equal(t, http.StatusGone, r.StatusCode, string(body)) 257 | 258 | validToken, err := paid.CreateToken("iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6", "000", 120_000_000, paid.ExpTenSecPer100MB) 259 | require.NoError(t, err) 260 | 261 | r = makeRequest(t, nil, http.MethodHead, "/api/v3/streams/paid/iOS-13-AdobeXD/9cd2e93bfc752dd6560e43623f36d0c3504dbca6/abcdef/"+validToken, nil) 262 | body, _ = io.ReadAll(r.Body) 263 | assert.Equal(t, http.StatusOK, r.StatusCode, string(body)) 264 | } 265 | 266 | func TestHandleSpeech(t *testing.T) { 267 | r := makeRequest(t, nil, http.MethodGet, "/speech/8b2d4d78e9c8120e:f.jpg", nil) 268 | assert.Equal(t, http.StatusOK, r.StatusCode) 269 | assert.Equal(t, "image/jpeg", r.Header.Get("content-type")) 270 | } 271 | 272 | func Test_getPlaylistURL(t *testing.T) { 273 | stream := &Stream{URL: "lbryStreamURL"} 274 | // This is the pattern transcoder client should return. 275 | tcURL := "claimID/SDhash/master.m3u8" 276 | 277 | t.Run("v4", func(t *testing.T) { 278 | assert.Equal(t, 279 | "/api/v4/streams/tc/lbryStreamURL/claimID/SDhash/master.m3u8", 280 | getPlaylistURL("/api/v4/streams/free/lbryStreamURL/claimID/SDhash/", url.Values{}, tcURL, stream), 281 | ) 282 | }) 283 | t.Run("v5", func(t *testing.T) { 284 | assert.Equal(t, 285 | "/v5/streams/hls/claimID/SDhash/master.m3u8", 286 | getPlaylistURL("/v5/streams/start/claimID/SDhash/", url.Values{}, tcURL, stream), 287 | ) 288 | }) 289 | t.Run("v5 signed", func(t *testing.T) { 290 | q := url.Values{} 291 | h := randomdata.Alphanumeric(32) 292 | ip := randomdata.IpV4Address() 293 | q.Add(paramHashHLS, h) 294 | q.Add(paramClientIP, ip) 295 | assert.Equal(t, 296 | fmt.Sprintf("/v5/streams/hls/claimID/SDhash/master.m3u8?ip=%s&hash=%s", ip, h), 297 | getPlaylistURL("/v5/streams/start/claimID/SDhash/", q, tcURL, stream), 298 | ) 299 | }) 300 | t.Run("v6", func(t *testing.T) { 301 | assert.Equal(t, 302 | "/v6/streams/claimID/SDhash/master.m3u8", 303 | getPlaylistURL("/v6/streams/claimID/SDhash/start", url.Values{}, tcURL, stream), 304 | ) 305 | }) 306 | t.Run("v6 with hash", func(t *testing.T) { 307 | q := url.Values{} 308 | h := "abc,89898" 309 | q.Add(paramHash77, h) 310 | assert.Equal(t, 311 | "/abc,89898/v6/streams/claimID/SDhash/master.m3u8", 312 | getPlaylistURL("/v6/streams/claimID/SDhash/start", q, tcURL, stream), 313 | ) 314 | }) 315 | } 316 | 317 | func Test_fitForTranscoder(t *testing.T) { 318 | gin.SetMode(gin.TestMode) 319 | var r *http.Request 320 | p := getTestPlayer() 321 | 322 | w := httptest.NewRecorder() 323 | c, e := gin.CreateTestContext(w) 324 | 325 | InstallPlayerRoutes(e, p) 326 | 327 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/api/v4/streams/free/claimname/abc/sdhash", nil) 328 | r.Header.Add("referer", "https://odysee.com/") 329 | c.Request = r 330 | e.HandleContext(c) 331 | 332 | s, err := p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 333 | require.NoError(t, err) 334 | assert.True(t, fitForTranscoder(c, s)) 335 | 336 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/api/v4/streams/free/claimname/abc/sdhash", nil) 337 | c.Request = r 338 | e.HandleContext(c) 339 | s, err = p.ResolveStream("9cd2e93bfc752dd6560e43623f36d0c3504dbca6") 340 | require.NoError(t, err) 341 | assert.False(t, fitForTranscoder(c, s)) 342 | 343 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/api/v4/streams/free/claimname/abc/sdhash", nil) 344 | c.Request = r 345 | e.HandleContext(c) 346 | r.Header.Add("Range", "bytes=12121-") 347 | s, err = p.ResolveStream("9cd2e93bfc752dd6560e43623f36d0c3504dbca6") 348 | require.NoError(t, err) 349 | assert.False(t, fitForTranscoder(c, s)) 350 | 351 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/api/v3/streams/free/claimname/abc/sdhash", nil) 352 | c.Request = r 353 | e.HandleContext(c) 354 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 355 | require.NoError(t, err) 356 | assert.False(t, fitForTranscoder(c, s)) 357 | 358 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/api/v5/streams/original/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc", nil) 359 | c.Request = r 360 | e.HandleContext(c) 361 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 362 | require.NoError(t, err) 363 | assert.False(t, fitForTranscoder(c, s)) 364 | 365 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/v5/streams/start/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc", nil) 366 | c.Request = r 367 | e.HandleContext(c) 368 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 369 | require.NoError(t, err) 370 | assert.False(t, fitForTranscoder(c, s)) 371 | 372 | r, _ = http.NewRequest(http.MethodHead, "https://cdn.lbryplayer.xyz/v5/streams/start/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc", nil) 373 | c.Request = r 374 | e.HandleContext(c) 375 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 376 | require.NoError(t, err) 377 | assert.True(t, fitForTranscoder(c, s)) 378 | 379 | r, _ = http.NewRequest(http.MethodHead, "https://cdn.lbryplayer.xyz/v5/streams/start/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc", nil) 380 | c.Request = r 381 | r.Header.Add("Range", "bytes=0-13337") 382 | e.HandleContext(c) 383 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 384 | require.NoError(t, err) 385 | assert.True(t, fitForTranscoder(c, s)) 386 | 387 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/v5/streams/original/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc", nil) 388 | c.Request = r 389 | e.HandleContext(c) 390 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 391 | require.NoError(t, err) 392 | assert.False(t, fitForTranscoder(c, s)) 393 | 394 | r, _ = http.NewRequest(http.MethodHead, "https://cdn.lbryplayer.xyz/v6/streams/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc", nil) 395 | c.Request = r 396 | e.HandleContext(c) 397 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 398 | require.NoError(t, err) 399 | assert.True(t, fitForTranscoder(c, s)) 400 | 401 | r, _ = http.NewRequest(http.MethodHead, "https://cdn.lbryplayer.xyz/v6/streams/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc.mp4", nil) 402 | c.Request = r 403 | e.HandleContext(c) 404 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 405 | require.NoError(t, err) 406 | assert.True(t, fitForTranscoder(c, s)) 407 | 408 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/v6/streams/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc", nil) 409 | c.Request = r 410 | e.HandleContext(c) 411 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 412 | require.NoError(t, err) 413 | assert.False(t, fitForTranscoder(c, s)) 414 | 415 | r, _ = http.NewRequest(http.MethodGet, "https://cdn.lbryplayer.xyz/v6/streams/6769855a9aa43b67086f9ff3c1a5bacb5698a27a/abcabc.mp4", nil) 416 | c.Request = r 417 | e.HandleContext(c) 418 | s, err = p.ResolveStream("6769855a9aa43b67086f9ff3c1a5bacb5698a27a") 419 | require.NoError(t, err) 420 | assert.False(t, fitForTranscoder(c, s)) 421 | } 422 | -------------------------------------------------------------------------------- /player/http_routes.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "github.com/gin-contrib/pprof" 5 | "github.com/gin-gonic/gin" 6 | ) 7 | 8 | const ProfileRoutePath = "/superdebug/pprof" 9 | 10 | func InstallPlayerRoutes(r *gin.Engine, p *Player) { 11 | playerHandler := NewRequestHandler(p) 12 | 13 | v1Router := r.Group("/content/claims/:claim_name/:claim_id/:filename") 14 | v1Router.HEAD("", playerHandler.Handle) 15 | v1Router.GET("", playerHandler.Handle) 16 | 17 | v2Router := r.Group("/api/v2/streams") 18 | v2Router.HEAD("/free/:claim_name/:claim_id", playerHandler.Handle) 19 | v2Router.GET("/free/:claim_name/:claim_id", playerHandler.Handle) 20 | v2Router.HEAD("/paid/:claim_name/:claim_id/:token", playerHandler.Handle) 21 | v2Router.GET("/paid/:claim_name/:claim_id/:token", playerHandler.Handle) 22 | 23 | v3Router := r.Group("/api/v3/streams") 24 | v3Router.HEAD("/free/:claim_name/:claim_id/:sd_hash", playerHandler.Handle) 25 | v3Router.GET("/free/:claim_name/:claim_id/:sd_hash", playerHandler.Handle) 26 | v3Router.HEAD("/paid/:claim_name/:claim_id/:sd_hash/:token", playerHandler.Handle) 27 | v3Router.GET("/paid/:claim_name/:claim_id/:sd_hash/:token", playerHandler.Handle) 28 | 29 | v4Router := r.Group("/api/v4/streams") 30 | v4Router.HEAD("/free/:claim_name/:claim_id/:sd_hash", playerHandler.Handle) 31 | v4Router.GET("/free/:claim_name/:claim_id/:sd_hash", playerHandler.Handle) 32 | 33 | v5Router := r.Group("/v5/streams") 34 | // HEAD will redirect to the transcoded version, if available; GET request will send a binary stream regardless 35 | v5Router.HEAD("/start/:claim_id/:sd_hash", playerHandler.Handle) 36 | v5Router.GET("/start/:claim_id/:sd_hash", playerHandler.Handle) 37 | v5Router.GET("/original/:claim_id/:sd_hash", playerHandler.Handle) 38 | 39 | v6Router := r.Group("/v6/streams") 40 | // HEAD will redirect to the transcoded version, if available; GET request will send a binary stream regardless 41 | v6Router.HEAD("/:claim_id/:sd_hash", playerHandler.Handle) 42 | v6Router.GET("/:claim_id/:sd_hash", playerHandler.Handle) 43 | 44 | if p.TCVideoPath != "" { 45 | v4Router.GET("/tc/:claim_name/:claim_id/:sd_hash/:fragment", playerHandler.HandleTranscodedFragment) 46 | v4Router.HEAD("/tc/:claim_name/:claim_id/:sd_hash/:fragment", playerHandler.HandleTranscodedFragment) 47 | 48 | v5Router.HEAD("/hls/:claim_id/:sd_hash/:fragment", playerHandler.HandleTranscodedFragment) 49 | v5Router.GET("/hls/:claim_id/:sd_hash/:fragment", playerHandler.HandleTranscodedFragment) 50 | 51 | v6Router.HEAD("/:claim_id/:sd_hash/:fragment", playerHandler.HandleTranscodedFragment) 52 | v6Router.GET("/:claim_id/:sd_hash/:fragment", playerHandler.HandleTranscodedFragment) 53 | } 54 | 55 | r.HEAD(SpeechPrefix+"*whatever", playerHandler.Handle) 56 | r.GET(SpeechPrefix+"*whatever", playerHandler.Handle) 57 | } 58 | 59 | func InstallProfilingRoutes(r *gin.Engine) { 60 | pprof.Register(r, ProfileRoutePath) 61 | } 62 | -------------------------------------------------------------------------------- /player/media_types.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | func patchMediaType(t string) string { 4 | switch t { 5 | case "video/m4v", "video/webm": 6 | return "video/mp4" 7 | default: 8 | return t 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /player/memory_leak_test.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func BenchmarkMemoryLeak(b *testing.B) { 8 | b.ReportAllocs() 9 | 10 | } 11 | -------------------------------------------------------------------------------- /player/player.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "encoding/hex" 5 | "errors" 6 | "regexp" 7 | "strings" 8 | "time" 9 | 10 | "github.com/OdyseeTeam/player-server/internal/metrics" 11 | "github.com/OdyseeTeam/player-server/pkg/logger" 12 | "github.com/OdyseeTeam/player-server/pkg/paid" 13 | "github.com/prometheus/client_golang/prometheus" 14 | 15 | tclient "github.com/OdyseeTeam/transcoder/client" 16 | ljsonrpc "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 17 | 18 | "github.com/bluele/gcache" 19 | "github.com/gin-gonic/gin" 20 | ) 21 | 22 | const ( 23 | edgeTokenHeader = "Authorization" 24 | edgeTokenPrefix = "Token " 25 | resolveCacheDuration = 5 * time.Minute 26 | defaultSdkAddress = "https://api.na-backend.odysee.com/api/v1/proxy" 27 | ) 28 | 29 | var ( 30 | Logger = logger.GetLogger() 31 | reClaim = regexp.MustCompile("^[a-z0-9]{40}$") 32 | ) 33 | 34 | type PlayerOptions struct { 35 | edgeToken string 36 | lbrynetAddress string 37 | downloadsEnabled bool 38 | prefetch bool 39 | } 40 | 41 | // Player is an entry-point object to the new player package. 42 | type Player struct { 43 | lbrynetClient *ljsonrpc.Client 44 | blobSource *HotCache 45 | resolveCache gcache.Cache 46 | tclient *tclient.Client 47 | TCVideoPath string 48 | 49 | options PlayerOptions 50 | } 51 | 52 | func WithEdgeToken(token string) func(options *PlayerOptions) { 53 | return func(options *PlayerOptions) { 54 | options.edgeToken = token 55 | } 56 | } 57 | 58 | func WithLbrynetServer(address string) func(options *PlayerOptions) { 59 | return func(options *PlayerOptions) { 60 | options.lbrynetAddress = address 61 | } 62 | } 63 | 64 | func WithDownloads(allow bool) func(options *PlayerOptions) { 65 | return func(options *PlayerOptions) { 66 | options.downloadsEnabled = allow 67 | } 68 | } 69 | 70 | func WithPrefetch(enabled bool) func(options *PlayerOptions) { 71 | return func(options *PlayerOptions) { 72 | options.prefetch = enabled 73 | } 74 | } 75 | 76 | // NewPlayer initializes an instance with optional BlobStore. 77 | func NewPlayer(hotCache *HotCache, optionFuncs ...func(*PlayerOptions)) *Player { 78 | options := &PlayerOptions{ 79 | lbrynetAddress: defaultSdkAddress, 80 | downloadsEnabled: true, 81 | } 82 | 83 | for _, optionFunc := range optionFuncs { 84 | optionFunc(options) 85 | } 86 | 87 | lbrynetClient := ljsonrpc.NewClient(options.lbrynetAddress) 88 | lbrynetClient.SetRPCTimeout(10 * time.Second) 89 | return &Player{ 90 | lbrynetClient: lbrynetClient, 91 | blobSource: hotCache, 92 | resolveCache: gcache.New(10000).ARC().Build(), 93 | options: *options, 94 | } 95 | } 96 | 97 | func (p *Player) AddTranscoderClient(c *tclient.Client, path string) { 98 | p.tclient = c 99 | p.TCVideoPath = path 100 | } 101 | 102 | // Play delivers requested URI onto the supplied http.ResponseWriter. 103 | func (p *Player) Play(s *Stream, c *gin.Context) error { 104 | metrics.StreamsRunning.WithLabelValues(metrics.StreamOriginal).Inc() 105 | defer metrics.StreamsRunning.WithLabelValues(metrics.StreamOriginal).Dec() 106 | ServeStream(c, s) 107 | return nil 108 | } 109 | 110 | // ResolveStream resolves provided URI by calling the SDK. 111 | func (p *Player) ResolveStream(claimId string) (*Stream, error) { 112 | start := time.Now() 113 | defer func(t time.Time) { 114 | metrics.ResolveTimeMS.Observe(float64(time.Since(t).Milliseconds())) 115 | }(start) 116 | 117 | var claim *ljsonrpc.Claim 118 | 119 | cachedClaim, cErr := p.resolveCache.Get(claimId) 120 | if cErr != nil { 121 | var err error 122 | claim, err = p.resolve(claimId) 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | maxDepth := 5 128 | for i := 0; i < maxDepth; i++ { 129 | repost := claim.Value.GetRepost() 130 | if repost == nil { 131 | break 132 | } 133 | if repost.ClaimHash == nil { 134 | return nil, errors.New("repost has no claim hash") 135 | } 136 | 137 | claimID := hex.EncodeToString(rev(repost.ClaimHash)) 138 | _, err := p.resolve(claimID) 139 | if err != nil { 140 | if errors.Is(err, ErrClaimNotFound) { 141 | return nil, errors.New("reposted claim not found") 142 | } 143 | return nil, err 144 | } 145 | } 146 | metrics.ResolveSuccesses.WithLabelValues(metrics.ResolveSourceOApi).Inc() 147 | _ = p.resolveCache.SetWithExpire(claimId, claim, resolveCacheDuration) 148 | } else { 149 | metrics.ResolveSuccessesDuration.WithLabelValues(metrics.ResolveSourceCache).Observe(float64(time.Since(start))) 150 | metrics.ResolveSuccesses.WithLabelValues(metrics.ResolveSourceCache).Inc() 151 | claim = cachedClaim.(*ljsonrpc.Claim) 152 | } 153 | 154 | if claim.Value.GetStream() == nil { 155 | return nil, errors.New("claim is not stream") 156 | } 157 | if claim.Value.GetStream().GetSource() == nil { 158 | return nil, errors.New("stream has no source") 159 | } 160 | 161 | return NewStream(p, claim), nil 162 | } 163 | 164 | // resolve the claim 165 | func (p *Player) resolve(claimID string) (*ljsonrpc.Claim, error) { 166 | generalFailureLabels := prometheus.Labels{ 167 | metrics.ResolveSource: metrics.ResolveSourceOApi, 168 | metrics.ResolveKind: metrics.ResolveFailureGeneral, 169 | } 170 | notFoundFailureLabels := prometheus.Labels{ 171 | metrics.ResolveSource: metrics.ResolveSourceOApi, 172 | metrics.ResolveKind: metrics.ResolveFailureClaimNotFound, 173 | } 174 | 175 | start := time.Now() 176 | 177 | // TODO: Get rid of the resolve call when ClaimSearchArgs acquires URI param 178 | if !reClaim.MatchString(claimID) { 179 | resolved, err := p.lbrynetClient.Resolve(claimID) 180 | if err != nil { 181 | metrics.ResolveFailuresDuration.With(generalFailureLabels).Observe(float64(time.Since(start))) 182 | metrics.ResolveFailures.With(generalFailureLabels).Inc() 183 | return nil, err 184 | } 185 | 186 | claim := (*resolved)[claimID] 187 | if claim.CanonicalURL == "" { 188 | metrics.ResolveFailuresDuration.With(notFoundFailureLabels).Observe(float64(time.Since(start))) 189 | metrics.ResolveFailures.With(notFoundFailureLabels).Inc() 190 | return nil, ErrClaimNotFound 191 | } 192 | return &claim, nil 193 | } 194 | resp, err := p.lbrynetClient.ClaimSearch(ljsonrpc.ClaimSearchArgs{ClaimID: &claimID, PageSize: 1, Page: 1}) 195 | if err != nil { 196 | metrics.ResolveFailuresDuration.With(prometheus.Labels{ 197 | metrics.ResolveSource: metrics.ResolveSourceOApi, 198 | metrics.ResolveKind: metrics.ResolveFailureGeneral, 199 | }).Observe(float64(time.Since(start))) 200 | metrics.ResolveFailures.With(prometheus.Labels{ 201 | metrics.ResolveSource: metrics.ResolveSourceOApi, 202 | metrics.ResolveKind: metrics.ResolveFailureGeneral, 203 | }).Inc() 204 | return nil, err 205 | } 206 | if len(resp.Claims) == 0 { 207 | metrics.ResolveFailuresDuration.With(prometheus.Labels{ 208 | metrics.ResolveSource: metrics.ResolveSourceOApi, 209 | metrics.ResolveKind: metrics.ResolveFailureClaimNotFound, 210 | }).Observe(float64(time.Since(start))) 211 | metrics.ResolveFailures.With(prometheus.Labels{ 212 | metrics.ResolveSource: metrics.ResolveSourceOApi, 213 | metrics.ResolveKind: metrics.ResolveFailureClaimNotFound, 214 | }).Inc() 215 | return nil, ErrClaimNotFound 216 | } 217 | return &resp.Claims[0], nil 218 | } 219 | 220 | // VerifyAccess checks if the stream is protected and the token supplied matched the stream 221 | func (p *Player) VerifyAccess(stream *Stream, ctx *gin.Context) error { 222 | protectedMap := map[string]bool{ 223 | "c:members-only": true, 224 | "c:rental": true, 225 | "c:purchase": true, 226 | "c:unlisted": true, 227 | } 228 | protectedWithTimeMap := map[string]bool{ 229 | "c:scheduled:show": true, 230 | "c:scheduled:hide": true, 231 | } 232 | for _, t := range stream.Claim.Value.Tags { 233 | if protectedMap[t] || 234 | strings.HasPrefix(t, "purchase:") || 235 | strings.HasPrefix(t, "rental:") || 236 | (protectedWithTimeMap[t] && stream.Claim.Value.GetStream().ReleaseTime > time.Now().Unix()) { 237 | th := ctx.Request.Header.Get(edgeTokenHeader) 238 | if th == "" { 239 | return ErrEdgeCredentialsMissing 240 | } 241 | if p.options.edgeToken == "" { 242 | return ErrEdgeAuthenticationMisconfigured 243 | } 244 | if strings.TrimPrefix(th, edgeTokenPrefix) != p.options.edgeToken { 245 | return ErrEdgeAuthenticationFailed 246 | } 247 | return nil 248 | } 249 | } 250 | 251 | token := ctx.Param("token") 252 | if stream.resolvedStream.Fee == nil || stream.resolvedStream.Fee.Amount <= 0 { 253 | return nil 254 | } 255 | 256 | Logger.WithField("uri", stream.URI).Info("paid stream requested") 257 | if token == "" { 258 | return ErrPaidStream 259 | } 260 | if err := paid.VerifyStreamAccess(strings.Replace(stream.URI(), "#", "/", 1), token); err != nil { 261 | return err 262 | } 263 | return nil 264 | } 265 | 266 | func rev(b []byte) []byte { 267 | r := make([]byte, len(b)) 268 | for left, right := 0, len(b)-1; left < right; left, right = left+1, right-1 { 269 | r[left], r[right] = b[right], b[left] 270 | } 271 | return r 272 | } 273 | -------------------------------------------------------------------------------- /player/player_test.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "encoding/hex" 5 | "encoding/json" 6 | "io" 7 | "math/rand" 8 | "net/http" 9 | "net/http/httptest" 10 | "os" 11 | "path/filepath" 12 | "testing" 13 | 14 | ljsonrpc "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 15 | "github.com/lbryio/reflector.go/store" 16 | 17 | "github.com/gin-gonic/gin" 18 | "github.com/stretchr/testify/assert" 19 | "github.com/stretchr/testify/require" 20 | "github.com/ybbus/jsonrpc" 21 | ) 22 | 23 | // An MP4 file, size: 158433824 bytes, blobs: 77 24 | const claimID = "6769855a9aa43b67086f9ff3c1a5bacb5698a27a" 25 | 26 | // An MP4 file, size: 128791189 bytes, blobs: 63 27 | const knownSizeClaimID = "0590f924bbee6627a2e79f7f2ff7dfb50bf2877c" 28 | 29 | type knownStream struct { 30 | uri string 31 | size int64 32 | blobsNum int 33 | } 34 | 35 | var knownStreams = []knownStream{ 36 | {uri: claimID, size: 158433824, blobsNum: 77}, 37 | {uri: knownSizeClaimID, size: 128791189, blobsNum: 63}, 38 | } 39 | 40 | func randomString(n int) string { 41 | var letter = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") 42 | 43 | b := make([]rune, n) 44 | for i := range b { 45 | b[i] = letter[rand.Intn(len(letter))] 46 | } 47 | return string(b) 48 | } 49 | 50 | func getTestPlayer() *Player { 51 | origin := store.NewHttpStore("source.odycdn.com:5569", "") 52 | ds := NewDecryptedCache(origin) 53 | return NewPlayer( 54 | NewHotCache(*ds, 100000000), 55 | WithDownloads(true), 56 | WithEdgeToken(testEdgeToken), 57 | ) 58 | } 59 | 60 | func loadResponseFixture(t *testing.T, f string) jsonrpc.RPCResponse { 61 | var r jsonrpc.RPCResponse 62 | 63 | absPath, _ := filepath.Abs(filepath.Join("./testdata", f)) 64 | rawJSON, err := os.ReadFile(absPath) 65 | require.NoError(t, err) 66 | err = json.Unmarshal(rawJSON, &r) 67 | require.NoError(t, err) 68 | return r 69 | } 70 | 71 | func TestPlayerResolveStream(t *testing.T) { 72 | p := getTestPlayer() 73 | s, err := p.ResolveStream("389ba57c9f76b859c2763c4b9a419bd78b1a8dd0") 74 | require.NoError(t, err) 75 | err = s.PrepareForReading() 76 | require.NoError(t, err) 77 | } 78 | 79 | func TestPlayerResolveStreamNotFound(t *testing.T) { 80 | p := getTestPlayer() 81 | s, err := p.ResolveStream(randomString(20)) 82 | assert.Equal(t, ErrClaimNotFound, err) 83 | assert.Nil(t, s) 84 | } 85 | 86 | func TestStreamSeek(t *testing.T) { 87 | p := getTestPlayer() 88 | 89 | for _, stream := range knownStreams { 90 | s, err := p.ResolveStream(stream.uri) 91 | require.NoError(t, err) 92 | err = s.PrepareForReading() 93 | require.NoError(t, err) 94 | 95 | // Seeking to the end 96 | n, err := s.Seek(0, io.SeekEnd) 97 | require.NoError(t, err) 98 | assert.EqualValues(t, stream.size, n) 99 | 100 | // Seeking to the middle of the stream 101 | n, err = s.Seek(stream.size/2, io.SeekStart) 102 | require.NoError(t, err) 103 | assert.EqualValues(t, stream.size/2, n) 104 | 105 | // Seeking back to the beginning of the stream 106 | n, err = s.Seek(-stream.size/2, io.SeekCurrent) 107 | require.NoError(t, err) 108 | assert.EqualValues(t, 0, n) 109 | 110 | n, err = s.Seek(0, io.SeekStart) 111 | require.NoError(t, err) 112 | assert.EqualValues(t, 0, n) 113 | 114 | s.Seek(0, io.SeekEnd) 115 | n, err = s.Seek(-999999999, io.SeekEnd) 116 | assert.EqualValues(t, 0, n) 117 | assert.Equal(t, ErrSeekOutOfBounds, err) 118 | 119 | n, err = s.Seek(-99, io.SeekStart) 120 | assert.EqualValues(t, 0, n) 121 | assert.Equal(t, ErrSeekBeforeStart, err) 122 | 123 | n, err = s.Seek(999999999, io.SeekStart) 124 | assert.EqualValues(t, 0, n) 125 | assert.Equal(t, ErrSeekOutOfBounds, err) 126 | } 127 | } 128 | 129 | func TestStreamRead(t *testing.T) { 130 | p := getTestPlayer() 131 | s, err := p.ResolveStream(claimID) 132 | require.NoError(t, err) 133 | 134 | err = s.PrepareForReading() 135 | require.NoError(t, err) 136 | 137 | n, err := s.Seek(4000000, io.SeekStart) 138 | require.NoError(t, err) 139 | require.EqualValues(t, 4000000, n) 140 | 141 | readData := make([]byte, 105) 142 | readNum, err := s.Read(readData) 143 | require.NoError(t, err) 144 | assert.Equal(t, 105, readNum) 145 | expectedData, err := hex.DecodeString( 146 | "6E81C93A90DD3A322190C8D608E29AA929867407596665097B5AE780412" + 147 | "61638A51C10BC26770AFFEF1533715FBD1428DCADEDC7BEA5D7A9C7D170" + 148 | "B71EF38E7138D24B0C7E86D791695EDAE1B88EDBE54F95C98EF3DCFD91D" + 149 | "A025C284EE37D8FEEA2EA84B76B9A22D3") 150 | require.NoError(t, err) 151 | assert.Equal(t, expectedData, readData) 152 | } 153 | 154 | func TestStreamFilenameOldMime(t *testing.T) { 155 | r := loadResponseFixture(t, "old_mime.json") 156 | res := &ljsonrpc.ResolveResponse{} 157 | ljsonrpc.Decode(r.Result, res) 158 | uri := "lbry://@Deterrence-Dispensed#2/Ivans100DIY30rdAR-15MagazineV10-DeterrenceDispensed#1" 159 | claim := (*res)[uri] 160 | s := NewStream(&Player{}, &claim) 161 | assert.Equal(t, "ivans100diy30rdar-15magazinev10-deterrencedispensed.zip", s.Filename()) 162 | } 163 | 164 | func TestStreamFilenameNew(t *testing.T) { 165 | r := loadResponseFixture(t, "new_stream.json") 166 | res := &ljsonrpc.ResolveResponse{} 167 | ljsonrpc.Decode(r.Result, res) 168 | uri := "what" 169 | claim := (*res)[uri] 170 | s := NewStream(&Player{}, &claim) 171 | assert.Equal(t, "1 dog and chicken.mp4", s.Filename()) 172 | } 173 | 174 | func TestStreamReadHotCache(t *testing.T) { 175 | p := getTestPlayer() 176 | 177 | s1, err := p.ResolveStream(claimID) 178 | require.NoError(t, err) 179 | 180 | assert.EqualValues(t, 0, p.blobSource.cache.Len(false)) 181 | 182 | err = s1.PrepareForReading() 183 | require.NoError(t, err) 184 | 185 | assert.EqualValues(t, 2, p.blobSource.cache.Len(false)) // 2 because it gets the sd blob and the last blob when setting stream size 186 | 187 | // Warm up the cache 188 | n, err := s1.Seek(4000000, io.SeekStart) 189 | require.NoError(t, err) 190 | require.EqualValues(t, 4000000, n) 191 | 192 | readData := make([]byte, 105) 193 | readNum, err := s1.Read(readData) 194 | require.NoError(t, err) 195 | assert.Equal(t, 105, readNum) 196 | 197 | assert.EqualValues(t, 3, p.blobSource.cache.Len(false)) 198 | 199 | // Re-get the stream 200 | 201 | s2, err := p.ResolveStream(claimID) 202 | require.NoError(t, err) 203 | 204 | err = s2.PrepareForReading() 205 | require.NoError(t, err) 206 | 207 | assert.EqualValues(t, 3, p.blobSource.cache.Len(false)) 208 | 209 | for i := 0; i < 2; i++ { 210 | n, err := s2.Seek(4000000, io.SeekStart) 211 | require.NoError(t, err) 212 | require.EqualValues(t, 4000000, n) 213 | 214 | readData := make([]byte, 105) 215 | readNum, err := s2.Read(readData) 216 | require.NoError(t, err) 217 | assert.Equal(t, 105, readNum) 218 | expectedData, err := hex.DecodeString( 219 | "6E81C93A90DD3A322190C8D608E29AA929867407596665097B5AE780412" + 220 | "61638A51C10BC26770AFFEF1533715FBD1428DCADEDC7BEA5D7A9C7D170" + 221 | "B71EF38E7138D24B0C7E86D791695EDAE1B88EDBE54F95C98EF3DCFD91D" + 222 | "A025C284EE37D8FEEA2EA84B76B9A22D3") 223 | require.NoError(t, err) 224 | assert.Equal(t, expectedData, readData) 225 | } 226 | 227 | // no new blobs should have been fetched because they are all cached 228 | assert.EqualValues(t, 3, p.blobSource.cache.Len(false)) 229 | 230 | n, err = s2.Seek(2000000, io.SeekCurrent) 231 | require.NoError(t, err) 232 | require.EqualValues(t, 6000105, n) 233 | 234 | readData = make([]byte, 105) 235 | readNum, err = s2.Read(readData) 236 | require.NoError(t, err) 237 | assert.Equal(t, 105, readNum) 238 | require.NoError(t, err) 239 | 240 | assert.EqualValues(t, 4, p.blobSource.cache.Len(false)) 241 | } 242 | 243 | func TestStreamReadOutOfBounds(t *testing.T) { 244 | p := getTestPlayer() 245 | s, err := p.ResolveStream(claimID) 246 | require.NoError(t, err) 247 | 248 | err = s.PrepareForReading() 249 | require.NoError(t, err) 250 | 251 | n, err := s.Seek(4000000, io.SeekStart) 252 | require.NoError(t, err) 253 | require.EqualValues(t, 4000000, n) 254 | 255 | readData := make([]byte, 105) 256 | readNum, err := s.Read(readData) 257 | require.NoError(t, err) 258 | assert.Equal(t, 105, readNum) 259 | expectedData, err := hex.DecodeString( 260 | "6E81C93A90DD3A322190C8D608E29AA929867407596665097B5AE780412" + 261 | "61638A51C10BC26770AFFEF1533715FBD1428DCADEDC7BEA5D7A9C7D170" + 262 | "B71EF38E7138D24B0C7E86D791695EDAE1B88EDBE54F95C98EF3DCFD91D" + 263 | "A025C284EE37D8FEEA2EA84B76B9A22D3") 264 | require.NoError(t, err) 265 | assert.Equal(t, expectedData, readData) 266 | } 267 | 268 | func TestVerifyAccess(t *testing.T) { 269 | restrictedUrls := []string{ 270 | "lbry://@gifprofile#7/rental1#8", 271 | "lbry://@gifprofile#7/purchase1#2", 272 | "lbry://@gifprofile#7/members-only#7", 273 | } 274 | 275 | gin.SetMode(gin.TestMode) 276 | p := getTestPlayer() 277 | 278 | for _, url := range restrictedUrls { 279 | t.Run(url, func(t *testing.T) { 280 | ctx, e := gin.CreateTestContext(httptest.NewRecorder()) 281 | req, _ := http.NewRequest(http.MethodGet, "", nil) 282 | ctx.Request = req 283 | e.HandleContext(ctx) 284 | 285 | s, err := p.ResolveStream(url) 286 | require.NoError(t, err) 287 | err = p.VerifyAccess(s, ctx) 288 | require.ErrorIs(t, err, ErrEdgeCredentialsMissing) 289 | }) 290 | } 291 | 292 | for _, url := range restrictedUrls { 293 | t.Run(url, func(t *testing.T) { 294 | ctx, e := gin.CreateTestContext(httptest.NewRecorder()) 295 | req, _ := http.NewRequest(http.MethodGet, "", nil) 296 | req.Header.Set(edgeTokenHeader, testEdgeToken) 297 | ctx.Request = req 298 | e.HandleContext(ctx) 299 | 300 | s, err := p.ResolveStream(url) 301 | require.NoError(t, err) 302 | err = p.VerifyAccess(s, ctx) 303 | require.NoError(t, err) 304 | }) 305 | } 306 | } 307 | -------------------------------------------------------------------------------- /player/server.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/textproto" 9 | "strconv" 10 | "strings" 11 | "time" 12 | 13 | "github.com/aybabtme/iocontrol" 14 | "github.com/gin-gonic/gin" 15 | ) 16 | 17 | var ThrottleScale float64 = 1.5 18 | var ThrottleSwitch = true 19 | 20 | // errNoOverlap is returned by serveContent's parseRange if first-byte-pos of 21 | // all of the byte-range-spec values is greater than the content size. 22 | var errNoOverlap = errors.New("invalid range: failed to overlap") 23 | 24 | // ServeStream replies to the request using the content in the 25 | // provided ReadSeeker. The main benefit of ServeStream over io.Copy 26 | // is that it handles Range requests properly, sets the MIME type, and 27 | // handles If-Match, If-Unmodified-Since, If-None-Match, If-Modified-Since, 28 | // and If-Range requests. 29 | // 30 | // The content's Seek method must work: ServeStream uses 31 | // a seek to the end of the content to determine its size. 32 | // 33 | // If the caller has set w's ETag header formatted per RFC 7232, section 2.3, 34 | // ServeStream uses it to handle requests using If-Match, If-None-Match, or If-Range. 35 | // 36 | // content must be seeked to the beginning of the file. 37 | func ServeStream(c *gin.Context, content *Stream) { 38 | code := http.StatusOK 39 | size := int64(content.Size) 40 | 41 | // handle Content-Range header. 42 | sendSize := size 43 | var sendContent io.Reader = content 44 | if size >= 0 { 45 | ranges, err := parseRange(c.GetHeader("Range"), size) 46 | if err != nil { 47 | if err == errNoOverlap { 48 | c.Header("Content-Range", fmt.Sprintf("bytes */%d", size)) 49 | } 50 | Error(c, err.Error(), http.StatusRequestedRangeNotSatisfiable) 51 | return 52 | } 53 | 54 | if sumRangesSize(ranges) > size { 55 | // The total number of bytes in all the ranges 56 | // is larger than the size of the file by 57 | // itself, so this is probably an attack, or a 58 | // dumb client. Ignore the range request. 59 | ranges = nil 60 | } 61 | 62 | if len(ranges) == 1 { 63 | // RFC 7233, Section 4.1: 64 | // "If a single part is being transferred, the server 65 | // generating the 206 response MUST generate a 66 | // Content-Range header field, describing what range 67 | // of the selected representation is enclosed, and a 68 | // payload consisting of the range. 69 | // ... 70 | // A server MUST NOT generate a multipart response to 71 | // a request for a single range, since a client that 72 | // does not request multiple parts might not support 73 | // multipart responses." 74 | ra := ranges[0] 75 | if _, err := content.Seek(ra.start, io.SeekStart); err != nil { 76 | Error(c, err.Error(), http.StatusRequestedRangeNotSatisfiable) 77 | return 78 | } 79 | 80 | if c.Request.Method != http.MethodHead { 81 | _, err = content.GetChunk(int(getRange(ra.start, 1).FirstChunkIdx)) 82 | if err != nil { 83 | Error(c, err.Error(), http.StatusRequestedRangeNotSatisfiable) 84 | return 85 | } 86 | } 87 | 88 | sendSize = ra.length 89 | code = http.StatusPartialContent 90 | c.Header("Content-Range", ra.contentRange(size)) 91 | } 92 | 93 | c.Header("Accept-Ranges", "bytes") 94 | if c.GetHeader("Content-Encoding") == "" { 95 | c.Header("Content-Length", strconv.FormatInt(sendSize, 10)) 96 | } 97 | } 98 | 99 | c.Status(code) 100 | 101 | if c.Request.Method != http.MethodHead { 102 | if ThrottleSwitch { 103 | throttledW := iocontrol.ThrottledWriter(c.Writer, int(ThrottleScale*iocontrol.MiB), 1*time.Second) 104 | io.CopyN(throttledW, sendContent, sendSize) 105 | } else { 106 | io.CopyN(c.Writer, sendContent, sendSize) 107 | } 108 | } 109 | } 110 | 111 | // Error replies to the request with the specified error message and HTTP code. 112 | // It does not otherwise end the request; the caller should ensure no further 113 | // writes are done to w. 114 | // The error message should be plain text. 115 | func Error(c *gin.Context, error string, code int) { 116 | c.Header("Content-Type", "text/plain; charset=utf-8") 117 | c.Header("X-Content-Type-Options", "nosniff") 118 | c.Status(code) 119 | fmt.Fprintln(c.Writer, error) 120 | } 121 | 122 | // parseRange parses a Range header string as per RFC 7233. 123 | // errNoOverlap is returned if none of the ranges overlap. 124 | func parseRange(s string, size int64) ([]httpRange, error) { 125 | if s == "" { 126 | return nil, nil // header not present 127 | } 128 | const b = "bytes=" 129 | if !strings.HasPrefix(s, b) { 130 | return nil, errors.New("invalid range") 131 | } 132 | var ranges []httpRange 133 | noOverlap := false 134 | for _, ra := range strings.Split(s[len(b):], ",") { 135 | ra = strings.TrimSpace(ra) 136 | if ra == "" { 137 | continue 138 | } 139 | i := strings.Index(ra, "-") 140 | if i < 0 { 141 | return nil, errors.New("invalid range") 142 | } 143 | start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) 144 | var r httpRange 145 | if start == "" { 146 | // If no start is specified, end specifies the 147 | // range start relative to the end of the file. 148 | i, err := strconv.ParseInt(end, 10, 64) 149 | if err != nil { 150 | return nil, errors.New("invalid range") 151 | } 152 | if i > size { 153 | i = size 154 | } 155 | r.start = size - i 156 | r.length = size - r.start 157 | } else { 158 | i, err := strconv.ParseInt(start, 10, 64) 159 | if err != nil || i < 0 { 160 | return nil, errors.New("invalid range") 161 | } 162 | if i >= size { 163 | // If the range begins after the size of the content, 164 | // then it does not overlap. 165 | noOverlap = true 166 | continue 167 | } 168 | r.start = i 169 | if end == "" { 170 | // If no end is specified, range extends to end of the file. 171 | r.length = size - r.start 172 | } else { 173 | i, err := strconv.ParseInt(end, 10, 64) 174 | if err != nil || r.start > i { 175 | return nil, errors.New("invalid range") 176 | } 177 | if i >= size { 178 | i = size - 1 179 | } 180 | r.length = i - r.start + 1 181 | } 182 | } 183 | ranges = append(ranges, r) 184 | } 185 | if noOverlap && len(ranges) == 0 { 186 | // The specified ranges did not overlap with the content. 187 | return nil, errNoOverlap 188 | } 189 | return ranges, nil 190 | } 191 | 192 | // httpRange specifies the byte range to be sent to the client. 193 | type httpRange struct { 194 | start, length int64 195 | } 196 | 197 | func sumRangesSize(ranges []httpRange) (size int64) { 198 | for _, ra := range ranges { 199 | size += ra.length 200 | } 201 | return 202 | } 203 | 204 | func (r httpRange) contentRange(size int64) string { 205 | return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size) 206 | } 207 | 208 | func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader { 209 | return textproto.MIMEHeader{ 210 | "Content-Range": {r.contentRange(size)}, 211 | "Content-Type": {contentType}, 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /player/stream.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "io" 7 | "math" 8 | "time" 9 | 10 | "github.com/OdyseeTeam/player-server/internal/metrics" 11 | "github.com/OdyseeTeam/player-server/pkg/mime" 12 | "github.com/lbryio/lbry.go/v2/extras/errors" 13 | 14 | ljsonrpc "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 15 | "github.com/lbryio/lbry.go/v2/stream" 16 | pb "github.com/lbryio/types/v2/go" 17 | ) 18 | 19 | // Stream provides an io.ReadSeeker interface to a stream of blobs to be used by standard http library for range requests, 20 | // as well as some stream metadata. 21 | type Stream struct { 22 | // URI string 23 | URL, ClaimID string 24 | Size uint64 25 | ContentType string 26 | hash string 27 | prefetchedChunks map[int]bool 28 | 29 | player *Player 30 | Claim *ljsonrpc.Claim 31 | source *pb.Source 32 | resolvedStream *pb.Stream 33 | sdBlob *stream.SDBlob 34 | seekOffset int64 35 | 36 | currentChunkHash string 37 | currentChunk *ReadableChunk 38 | } 39 | 40 | func NewStream(p *Player, claim *ljsonrpc.Claim) *Stream { 41 | stream := claim.Value.GetStream() 42 | source := stream.GetSource() 43 | return &Stream{ 44 | URL: claim.Name, 45 | ClaimID: claim.ClaimID, 46 | ContentType: patchMediaType(source.MediaType), 47 | Size: source.GetSize(), 48 | 49 | player: p, 50 | Claim: claim, 51 | source: source, 52 | resolvedStream: stream, 53 | hash: hex.EncodeToString(source.SdHash), 54 | prefetchedChunks: make(map[int]bool, 50), 55 | } 56 | } 57 | 58 | func (s *Stream) URI() string { 59 | return s.URL + "#" + s.ClaimID 60 | } 61 | 62 | // Filename detects name of the original file, suitable for saving under on the filesystem. 63 | func (s *Stream) Filename() string { 64 | name := s.source.GetName() 65 | if name != "" { 66 | return name 67 | } 68 | name = s.Claim.NormalizedName 69 | // exts, err := mime.ExtensionsByType(s.ContentType) 70 | ext := mime.GetExtensionByType(s.ContentType) 71 | if ext == "" { 72 | return name 73 | } 74 | return fmt.Sprintf("%v.%v", name, ext) 75 | } 76 | 77 | // PrepareForReading downloads stream description from the reflector and tries to determine stream size 78 | // using several methods, including legacy ones for streams that do not have metadata. 79 | func (s *Stream) PrepareForReading() error { 80 | sdBlob, err := s.player.blobSource.GetSDBlob(s.hash) 81 | if err != nil { 82 | return err 83 | } 84 | 85 | s.sdBlob = sdBlob 86 | 87 | s.setSize() 88 | 89 | return nil 90 | } 91 | 92 | func (s *Stream) setSize() { 93 | if s.Size > 0 { 94 | return 95 | } 96 | 97 | if s.source.GetSize() > 0 { 98 | s.Size = s.source.GetSize() 99 | return 100 | } 101 | 102 | size, err := s.getStreamSizeFromLastBlobSize() 103 | if err == nil { 104 | s.Size = size 105 | return 106 | } 107 | 108 | Logger.Infof("couldn't figure out stream %v size from last chunk: %v", s.URI(), err) 109 | for _, blob := range s.sdBlob.BlobInfos { 110 | if blob.Length == stream.MaxBlobSize { 111 | size += MaxChunkSize 112 | } else { 113 | size += uint64(blob.Length - 1) 114 | } 115 | } 116 | // last padding is unguessable 117 | size -= 16 118 | 119 | s.Size = size 120 | } 121 | 122 | // Timestamp returns stream creation timestamp, used in HTTP response header. 123 | func (s *Stream) Timestamp() time.Time { 124 | return time.Unix(int64(s.Claim.Timestamp), 0) 125 | } 126 | 127 | // Seek implements io.ReadSeeker interface and is meant to be called by http.ServeContent. 128 | func (s *Stream) Seek(offset int64, whence int) (int64, error) { 129 | var newOffset int64 130 | 131 | if s.Size == 0 { 132 | return 0, ErrStreamSizeZero 133 | } else if uint64(math.Abs(float64(offset))) > s.Size { 134 | return 0, ErrSeekOutOfBounds 135 | } 136 | 137 | switch whence { 138 | case io.SeekStart: 139 | newOffset = offset 140 | case io.SeekCurrent: 141 | newOffset = s.seekOffset + offset 142 | case io.SeekEnd: 143 | newOffset = int64(s.Size) - offset 144 | default: 145 | return 0, errors.Err("invalid seek whence argument") 146 | } 147 | 148 | if newOffset < 0 { 149 | return 0, ErrSeekBeforeStart 150 | } 151 | 152 | s.seekOffset = newOffset 153 | return newOffset, nil 154 | } 155 | 156 | // Read implements io.ReadSeeker interface and is meant to be called by http.ServeContent. 157 | // Actual chunk retrieval and delivery happens in s.readFromChunks(). 158 | func (s *Stream) Read(dest []byte) (n int, err error) { 159 | n, err = s.readFromChunks(getRange(s.seekOffset, len(dest)), dest) 160 | s.seekOffset += int64(n) 161 | 162 | metrics.OutBytes.Add(float64(n)) 163 | 164 | if err != nil { 165 | Logger.Errorf("failed to read from stream %v at offset %v: %v", s.URI(), s.seekOffset, errors.FullTrace(err)) 166 | } 167 | 168 | if n == 0 && err == nil { 169 | err = errors.Err("read 0 bytes triggering an endless loop, exiting stream") 170 | Logger.Errorf("failed to read from stream %v at offset %v: %v", s.URI(), s.seekOffset, err) 171 | } 172 | 173 | return n, err 174 | } 175 | 176 | func (s *Stream) readFromChunks(sr streamRange, dest []byte) (int, error) { 177 | var read int 178 | var index int64 179 | var err error 180 | index, read, err = s.attemptReadFromChunks(sr, dest) 181 | if err != nil { 182 | return read, err 183 | } 184 | if read <= 0 { 185 | Logger.Warnf("Read 0 bytes for %s at blob index %d/%d at offset %d", s.URI(), int(index), len(s.sdBlob.BlobInfos), s.seekOffset) 186 | } 187 | 188 | return read, nil 189 | } 190 | 191 | func (s *Stream) attemptReadFromChunks(sr streamRange, dest []byte) (i int64, read int, err error) { 192 | i = sr.FirstChunkIdx 193 | for i < sr.LastChunkIdx+1 { 194 | offset, readLen := sr.ByteRangeForChunk(i) 195 | 196 | b, err := s.GetChunk(int(i)) 197 | if err != nil { 198 | return i, read, err 199 | } 200 | 201 | n, err := b.Read(offset, readLen, dest[read:]) 202 | read += n 203 | if err != nil { 204 | return i, read, err 205 | } 206 | 207 | i++ 208 | } 209 | return i, read, nil 210 | } 211 | 212 | // GetChunk returns the nth ReadableChunk of the stream. 213 | func (s *Stream) GetChunk(chunkIdx int) (ReadableChunk, error) { 214 | if chunkIdx > len(s.sdBlob.BlobInfos) { 215 | return nil, errors.Err("blob index out of bounds") 216 | } 217 | 218 | bi := s.sdBlob.BlobInfos[chunkIdx] 219 | hash := hex.EncodeToString(bi.BlobHash) 220 | 221 | if s.currentChunkHash == hash { 222 | return *s.currentChunk, nil 223 | } 224 | 225 | chunk, err := s.player.blobSource.GetChunk(hash, s.sdBlob.Key, bi.IV) 226 | if err != nil || chunk == nil { 227 | return nil, err 228 | } 229 | 230 | chunkToPrefetch := chunkIdx + 1 231 | prefetched := s.prefetchedChunks[chunkToPrefetch] 232 | if s.player.options.prefetch && !prefetched { 233 | s.prefetchedChunks[chunkToPrefetch] = true 234 | go s.prefetchChunk(chunkToPrefetch) 235 | } 236 | 237 | s.currentChunk = &chunk 238 | s.currentChunkHash = hash 239 | return chunk, nil 240 | } 241 | 242 | func (s *Stream) prefetchChunk(chunkIdx int) { 243 | prefetchLen := int(PrefetchCount) 244 | chunksLeft := len(s.sdBlob.BlobInfos) - chunkIdx - 1 // Last blob is empty 245 | if chunksLeft < prefetchLen { 246 | prefetchLen = chunksLeft 247 | } 248 | if prefetchLen <= 0 { 249 | return 250 | } 251 | 252 | Logger.Debugf("prefetching %d chunks to local cache", prefetchLen) 253 | for _, bi := range s.sdBlob.BlobInfos[chunkIdx : chunkIdx+prefetchLen] { 254 | hash := hex.EncodeToString(bi.BlobHash) 255 | 256 | if s.player.blobSource.IsCached(hash) { 257 | Logger.Debugf("chunk %v found in cache, not prefetching", hash) 258 | continue 259 | } 260 | 261 | Logger.Debugf("prefetching chunk %s", hash) 262 | _, err := s.player.blobSource.GetChunk(hash, s.sdBlob.Key, bi.IV) 263 | if err != nil { 264 | Logger.Errorf("failed to prefetch chunk %s: %s", hash, err.Error()) 265 | return 266 | } 267 | } 268 | } 269 | 270 | // getStreamSizeFromLastChunkSize gets the exact size of a stream from the sd blob and the last chunk 271 | func (s *Stream) getStreamSizeFromLastBlobSize() (uint64, error) { 272 | if s.Claim.Value.GetStream() == nil { 273 | return 0, errors.Err("claim is not a stream") 274 | } 275 | 276 | numChunks := len(s.sdBlob.BlobInfos) - 1 277 | if numChunks <= 0 { 278 | return 0, nil 279 | } 280 | 281 | lastBlobInfo := s.sdBlob.BlobInfos[numChunks-1] 282 | 283 | lastChunk, err := s.player.blobSource.GetChunk(hex.EncodeToString(lastBlobInfo.BlobHash), s.sdBlob.Key, lastBlobInfo.IV) 284 | if err != nil { 285 | return 0, err 286 | } 287 | 288 | return uint64(MaxChunkSize)*uint64(numChunks-1) + uint64(len(lastChunk)), nil 289 | } 290 | -------------------------------------------------------------------------------- /player/testdata/new_stream.json: -------------------------------------------------------------------------------- 1 | { 2 | "jsonrpc": "2.0", 3 | "result": { 4 | "what": { 5 | "address": "bYB1RNSVXr7X3sHGVa7D6s8jWH4hFip3vG", 6 | "amount": "0.009", 7 | "canonical_url": "lbry://what#74", 8 | "claim_id": "74f7d9a8748fec1754476c23840ce326d108f748", 9 | "claim_op": "create", 10 | "confirmations": 2924, 11 | "height": 858955, 12 | "is_my_output": false, 13 | "meta": { 14 | "activation_height": 861233, 15 | "creation_height": 858955, 16 | "creation_timestamp": 1604641226, 17 | "effective_amount": "28.51278745", 18 | "expiration_height": 2961355, 19 | "is_controlling": true, 20 | "reposted": 0, 21 | "support_amount": "28.50378745", 22 | "take_over_height": 861233, 23 | "trending_global": 0.0, 24 | "trending_group": 0, 25 | "trending_local": 0.0, 26 | "trending_mixed": 0.004628309514373541 27 | }, 28 | "name": "what", 29 | "normalized_name": "what", 30 | "nout": 0, 31 | "permanent_url": "lbry://what#74f7d9a8748fec1754476c23840ce326d108f748", 32 | "protobuf": "000aa3010a8a010a3026fe6587abef1ad3130d1f3c4f476dcbe1be2814d7f5058260f4542fe230a225dc4abdffb2da94e0e4d08b702389d01912153120646f6720616e6420636869636b656e2e6d703418a5ab1c2209766964656f2f6d70343230c8bb048ebdee70c542987cd5769b6043bbf3164fba98ab9bd792b4d952a3fd824a766fbb0b59edfaf81324c06e45ca4a1a044e6f6e65288cc393fd055a0808bc0310d005180842045768617452282a2668747470733a2f2f737065652e63682f392f626663363833316164633239343135352e6a706762020801", 33 | "short_url": "lbry://what#74", 34 | "timestamp": 1604641226, 35 | "txid": "001f9d09fb20849d38641421a6686ac4c7778e4c3042397bb319a592ad38f154", 36 | "type": "claim", 37 | "value": { 38 | "languages": [ 39 | "en" 40 | ], 41 | "license": "None", 42 | "release_time": "1604641164", 43 | "source": { 44 | "hash": "26fe6587abef1ad3130d1f3c4f476dcbe1be2814d7f5058260f4542fe230a225dc4abdffb2da94e0e4d08b702389d019", 45 | "media_type": "video/mp4", 46 | "name": "1 dog and chicken.mp4", 47 | "sd_hash": "c8bb048ebdee70c542987cd5769b6043bbf3164fba98ab9bd792b4d952a3fd824a766fbb0b59edfaf81324c06e45ca4a", 48 | "size": "464293" 49 | }, 50 | "stream_type": "video", 51 | "thumbnail": { 52 | "url": "https://spee.ch/9/bfc6831adc294155.jpg" 53 | }, 54 | "title": "What", 55 | "video": { 56 | "duration": 8, 57 | "height": 720, 58 | "width": 444 59 | } 60 | }, 61 | "value_type": "stream" 62 | } 63 | }, 64 | "id": 0 65 | } 66 | -------------------------------------------------------------------------------- /player/testdata/old_mime.json: -------------------------------------------------------------------------------- 1 | { 2 | "jsonrpc": "2.0", 3 | "result": { 4 | "lbry://@Deterrence-Dispensed#2/Ivans100DIY30rdAR-15MagazineV10-DeterrenceDispensed#1": { 5 | "address": "bL5VaySGGboEDSydxTcfNtdiV25LKi4Tph", 6 | "amount": "0.0001", 7 | "canonical_url": "lbry://@Deterrence-Dispensed#2/Ivans100DIY30rdAR-15MagazineV10-DeterrenceDispensed#1", 8 | "claim_id": "1587a0f5aad617b8a2a86677d446a275c087db75", 9 | "claim_op": "update", 10 | "confirmations": 246875, 11 | "height": 615004, 12 | "is_channel_signature_valid": true, 13 | "is_my_output": false, 14 | "meta": { 15 | "activation_height": 538565, 16 | "creation_height": 538565, 17 | "creation_timestamp": 1553229472, 18 | "effective_amount": "292.90047345", 19 | "expiration_height": 2640965, 20 | "is_controlling": true, 21 | "reposted": 1, 22 | "support_amount": "292.90037345", 23 | "take_over_height": 538565, 24 | "trending_global": 0.0, 25 | "trending_group": 0, 26 | "trending_local": 0.0, 27 | "trending_mixed": 0.042904097586870193 28 | }, 29 | "name": "Ivans100DIY30rdAR-15MagazineV10-DeterrenceDispensed", 30 | "normalized_name": "ivans100diy30rdar-15magazinev10-deterrencedispensed", 31 | "nout": 0, 32 | "permanent_url": "lbry://Ivans100DIY30rdAR-15MagazineV10-DeterrenceDispensed#1587a0f5aad617b8a2a86677d446a275c087db75", 33 | "protobuf": "01979d90ed629bbf932703b2be3226179270262022d96dd436e9de80f8c8dd68d5d86cf05273016ec74e6acdad298e35f4a7dbe320404b6e749a85db27ee6c2c39da322ba102acf7d574bd62bb9cd0be9cc3e935da0a5a0a43220f6170706c69636174696f6e2f7a6970323039a6077029e9b349b572ed8d187a08c4bbf85f4bdcf63d4b61ef9833ea07703cab72bdb9322a8f9fcb58f6bd5e8b238a1a0d5075626c696320446f6d61696e28feada6e505423231303025204449592041522d313520333020726f756e64204d6167617a696e65202d206279204976616e54686554726f6c6c4ab502446574657272656e63652044697370656e7365642070726573656e74733a0a0a4976616e7320313030252044495920333072642041522d3135206d6167617a696e65202d2052656c656173652056657273696f6e20312e300a0a57697468207468652068656c70206f662074686520636f6e74656e747320696e20746869732072656c6561736520796f75206172652061626c6520746f206d616b65206120333072642041522d3135206d6167617a696e6520636f6d706c6574656c792066726f6d20736372617463682e0a0a506f7374206578616d706c6573206f6620796f757220776f726b2c207175657374696f6e7320616e642069737375657320746f3a0a0a68747470733a2f2f6b6579626173652e696f2f7465616d2f6465745f646973700a0a4c6976652046726565206f722044696552302a2e68747470733a2f2f737065652e63682f662f6f4845766233417a474d6f65337144476d667033506a72332e6a706762020801", 34 | "short_url": "lbry://Ivans100DIY30rdAR-15MagazineV10-DeterrenceDispensed#1", 35 | "signing_channel": { 36 | "address": "bQbEbsV3nNPfg1YBQi9BYZ1jTbo4HXekaZ", 37 | "amount": "1.0001", 38 | "canonical_url": "lbry://@Deterrence-Dispensed#2", 39 | "claim_id": "2220267092172632beb2032793bf9b62ed909d97", 40 | "claim_op": "update", 41 | "confirmations": 232695, 42 | "has_signing_key": false, 43 | "height": 629184, 44 | "meta": { 45 | "activation_height": 537744, 46 | "claims_in_channel": 113, 47 | "creation_height": 537744, 48 | "creation_timestamp": 1553098369, 49 | "effective_amount": "5395.0901", 50 | "expiration_height": 2640144, 51 | "is_controlling": true, 52 | "reposted": 0, 53 | "support_amount": "5394.09", 54 | "take_over_height": 537744, 55 | "trending_global": 0.0, 56 | "trending_group": 0, 57 | "trending_local": 0.0, 58 | "trending_mixed": 0.0 59 | }, 60 | "name": "@Deterrence-Dispensed", 61 | "normalized_name": "@deterrence-dispensed", 62 | "nout": 0, 63 | "permanent_url": "lbry://@Deterrence-Dispensed#2220267092172632beb2032793bf9b62ed909d97", 64 | "protobuf": "0012e2010a583056301006072a8648ce3d020106052b8104000a0342000430223ea16b725b1c64bea292b5dd80375a7e4e0175aa20fd5d59de0c70dc2afbd4f71112de773b93623411bf46ead2d5316729064f2de3d180da6d07fb025fe5121b6976616e74686574726f6c6c4070726f746f6e6d61696c2e636f6d1a2b68747470733a2f2f6976616e74686574726f6c6c2e6b6579626173652e7075622f696e6465782e68746d6c223c2a3a68747470733a2f2f737065652e63682f392f64663364613166372d313865332d343733372d613939322d3339653631656136613362392e706e674214446574657272656e63652044697370656e7365644a5f5765206174204465745f4469737020776f726b20746f2073707265616420746563686e6963616c20696e666f726d6174696f6e2061626f75742074686520646566656e73652061727469636c657320776520636172652061626f75742e2020523c2a3a68747470733a2f2f737065652e63682f632f61656534653337352d623735362d343561312d623762662d6565323065346438396262642e706e67", 65 | "short_url": "lbry://@Deterrence-Dispensed#2", 66 | "timestamp": 1567779048, 67 | "txid": "c604c149c7625fa96b955ae53da7b90101fe7247433af4a8d3d729fb8c055ebb", 68 | "type": "claim", 69 | "value": { 70 | "cover": { 71 | "url": "https://spee.ch/9/df3da1f7-18e3-4737-a992-39e61ea6a3b9.png" 72 | }, 73 | "description": "We at Det_Disp work to spread technical information about the defense articles we care about. ", 74 | "email": "ivanthetroll@protonmail.com", 75 | "public_key": "3056301006072a8648ce3d020106052b8104000a0342000430223ea16b725b1c64bea292b5dd80375a7e4e0175aa20fd5d59de0c70dc2afbd4f71112de773b93623411bf46ead2d5316729064f2de3d180da6d07fb025fe5", 76 | "public_key_id": "bWAngrEkxeigFt521ERQ2xaKYbSpyikm9o", 77 | "thumbnail": { 78 | "url": "https://spee.ch/c/aee4e375-b756-45a1-b7bf-ee20e4d89bbd.png" 79 | }, 80 | "title": "Deterrence Dispensed", 81 | "website_url": "https://ivanthetroll.keybase.pub/index.html" 82 | }, 83 | "value_type": "channel" 84 | }, 85 | "timestamp": 1565498373, 86 | "txid": "8a879bfa785b589458ca5361f35272a97fa9a9a78b05812485fd3a138af0d46f", 87 | "type": "claim", 88 | "value": { 89 | "description": "Deterrence Dispensed presents:\n\nIvans 100% DIY 30rd AR-15 magazine - Release Version 1.0\n\nWith the help of the contents in this release you are able to make a 30rd AR-15 magazine completely from scratch.\n\nPost examples of your work, questions and issues to:\n\nhttps://keybase.io/team/det_disp\n\nLive Free or Die", 90 | "languages": [ 91 | "en" 92 | ], 93 | "license": "Public Domain", 94 | "release_time": "1554618110", 95 | "source": { 96 | "media_type": "application/zip", 97 | "sd_hash": "39a6077029e9b349b572ed8d187a08c4bbf85f4bdcf63d4b61ef9833ea07703cab72bdb9322a8f9fcb58f6bd5e8b238a" 98 | }, 99 | "stream_type": "binary", 100 | "thumbnail": { 101 | "url": "https://spee.ch/f/oHEvb3AzGMoe3qDGmfp3Pjr3.jpg" 102 | }, 103 | "title": "100% DIY AR-15 30 round Magazine - by IvanTheTroll" 104 | }, 105 | "value_type": "stream" 106 | } 107 | }, 108 | "id": 0 109 | } 110 | -------------------------------------------------------------------------------- /player/v5_test.go: -------------------------------------------------------------------------------- 1 | package player 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "net/http/httptest" 7 | "os" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/lbryio/reflector.go/store" 12 | 13 | "github.com/Pallinder/go-randomdata" 14 | "github.com/gin-gonic/gin" 15 | "github.com/stretchr/testify/require" 16 | "github.com/stretchr/testify/suite" 17 | ) 18 | 19 | const ( 20 | rentalClaim = "81b1749f773bad5b9b53d21508051560f2746cdc" 21 | purchaseClaim = "2742f9e8eea0c4654ea8b51507dbb7f23f1f5235" 22 | ) 23 | 24 | var testEdgeToken = randomdata.Alphanumeric(32) 25 | 26 | type httpTest struct { 27 | Name string 28 | 29 | Method string 30 | URL string 31 | 32 | ReqBody io.Reader 33 | ReqHeader map[string]string 34 | 35 | Code int 36 | ResBody string 37 | ResHeader map[string]string 38 | ResContains string 39 | } 40 | 41 | type apiV5Suite struct { 42 | suite.Suite 43 | 44 | player *Player 45 | router *gin.Engine 46 | } 47 | 48 | func (s *apiV5Suite) SetupSuite() { 49 | et, ok := os.LookupEnv("TEST_EDGE_TOKEN") 50 | if !ok { 51 | s.T().Skip("TEST_EDGE_TOKEN not set, skipping") 52 | } 53 | origin := store.NewHttpStore("source.odycdn.com:5569", et) 54 | ds := NewDecryptedCache(origin) 55 | p := NewPlayer(NewHotCache(*ds, 100000000), WithDownloads(true), WithEdgeToken(testEdgeToken)) 56 | s.player = p 57 | 58 | s.router = gin.New() 59 | 60 | InstallPlayerRoutes(s.router, s.player) 61 | } 62 | 63 | func (s *apiV5Suite) TestMissingEdgeToken() { 64 | (&httpTest{ 65 | Method: http.MethodGet, 66 | URL: "/api/v3/streams/free/randomstring/2742f9e8eea0c4654ea8b51507dbb7f23f1f5235/abcabc", 67 | Code: http.StatusUnauthorized, 68 | ResContains: "edge credentials missing", 69 | }).Run(s.router, s.T()) 70 | (&httpTest{ 71 | Method: http.MethodGet, 72 | URL: "/api/v4/streams/free/randomstring/2742f9e8eea0c4654ea8b51507dbb7f23f1f5235/abcabc", 73 | Code: http.StatusUnauthorized, 74 | ResContains: "edge credentials missing", 75 | }).Run(s.router, s.T()) 76 | (&httpTest{ 77 | Method: http.MethodGet, 78 | URL: "/v5/streams/original/2742f9e8eea0c4654ea8b51507dbb7f23f1f5235/abcdef", 79 | Code: http.StatusUnauthorized, 80 | ResContains: "edge credentials missing", 81 | }).Run(s.router, s.T()) 82 | } 83 | 84 | func (s *apiV5Suite) TestValidEdgeToken() { 85 | (&httpTest{ 86 | Method: http.MethodGet, 87 | URL: "/v5/streams/start/2742f9e8eea0c4654ea8b51507dbb7f23f1f5235/abcdef", 88 | Code: http.StatusOK, 89 | ReqHeader: map[string]string{ 90 | "Authorization": "Token " + testEdgeToken, 91 | }, 92 | }).Run(s.router, s.T()) 93 | (&httpTest{ 94 | Method: http.MethodGet, 95 | URL: "/v5/streams/original/2742f9e8eea0c4654ea8b51507dbb7f23f1f5235/abcdef", 96 | Code: http.StatusOK, 97 | ReqHeader: map[string]string{ 98 | "Authorization": "Token " + testEdgeToken, 99 | }, 100 | }).Run(s.router, s.T()) 101 | } 102 | 103 | func (test *httpTest) Run(handler http.Handler, t *testing.T) *httptest.ResponseRecorder { 104 | t.Helper() 105 | req, err := http.NewRequest(test.Method, test.URL, test.ReqBody) 106 | require.NoError(t, err) 107 | // req.RequestURI = test.URL 108 | 109 | // Add headers 110 | for key, value := range test.ReqHeader { 111 | req.Header.Set(key, value) 112 | } 113 | 114 | req.Host = "odysee.com" 115 | w := httptest.NewRecorder() 116 | handler.ServeHTTP(w, req) 117 | 118 | if w.Code != test.Code { 119 | t.Errorf("Expected %v %s as status code (got %v %s)", test.Code, http.StatusText(test.Code), w.Code, http.StatusText(w.Code)) 120 | } 121 | 122 | for key, value := range test.ResHeader { 123 | header := w.Header().Get(key) 124 | 125 | if value != header { 126 | t.Errorf("Expected '%s' as '%s' (got '%s')", value, key, header) 127 | } 128 | } 129 | 130 | if test.ResBody != "" && w.Body.String() != test.ResBody { 131 | t.Errorf("Expected '%s' as body (got '%s'", test.ResBody, w.Body.String()) 132 | } 133 | 134 | if test.ResContains != "" && !strings.Contains(w.Body.String(), test.ResContains) { 135 | t.Errorf("Expected '%s' to be present in response (got '%s'", test.ResContains, w.Body.String()) 136 | } 137 | 138 | return w 139 | } 140 | 141 | func TestAPIV5Suite(t *testing.T) { 142 | suite.Run(t, new(apiV5Suite)) 143 | } 144 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Media server for Odysee 2 | 3 | ![Tests](https://github.com/OdyseeTeam/player-server/actions/workflows/pipeline.yml/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/OdyseeTeam/player-server/badge.svg?branch=master)](https://coveralls.io/github/OdyseeTeam/player-server?branch=master) 4 | 5 | # Development 6 | 7 | To run tests: 8 | 9 | ``` 10 | make prepare_test 11 | make test 12 | ``` 13 | 14 | # Usage 15 | 16 | `player-server` requires lbry SDK and mysql. 17 | 18 | ``` 19 | go run .\ 20 | --disk-cache-dir=/tmp/player_cache\ 21 | --disk-cache-size=800GB\ 22 | --upstream-reflector=eu-p2.lbryplayer.xyz:5569\ 23 | --upstream-protocol=http\ 24 | --bind=0.0.0.0:8081\ 25 | --prefetch=true\ 26 | --hot-cache-size=6GB\ 27 | --profile=true\ 28 | --config-username=lbry\ 29 | --config-password=beameriscool\ 30 | --throttle-scale=3.0\ 31 | --throttle-enabled=false\ 32 | --transcoder-video-path=/tmp/transcoded_cache\ 33 | --transcoder-video-size=800GB\ 34 | --transcoder-addr=https://root.transcoder.odysee.com \ 35 | --transcoder-remote-server=https://cache-us.transcoder.odysee.com/t-na 36 | ``` 37 | 38 | Run `odysee_player -h` to see the full list of flags and options. 39 | 40 | ### Some flags explained 41 | 42 | `cloudfront-endpoint` and `upstream-reflector` are mutually exclusive: the player can either pull blobs from an existing reflector or from a CDN endpoint. Only specify one. 43 | 44 | - example for `cloudfront-endpoint`: http://XXXXXXXXXX.cloudfront.net/ 45 | - example for `upstream-reflector`: reflector.lbry.com:5568 46 | 47 | `disk-cache-dir` and `disk-cache-size` refer to the location and size where encrypted blobs are stored locally. Access is then regulated using Least Frequently Accessed (with Dynamic Aging) as eviction strategy. 48 | 49 | `hot-cache-size` refers to the size of the in memory cache where unencrypted blobs are stored. Blobs are evicted using LRU as strategy. 50 | 51 | `prefetch` and `prefetch-count` can help reduce buffering by downloading blobs to the player in advance so that they're ready when they'll be requested by the client in the near future. 52 | 53 | `throttle-enabled` and `throttle-scale` allow for limiting the outbound bandwidth on a per stream resolution. This helps ensure that no single client can saturate the uplink pipe of the server. 54 | 55 | `transcoder-video-path` `transcoder-video-size` similarly to the disk cache flags, these regulate the location and size of the transcoded videos that are retrieved from `transcoder-addr` 56 | 57 | ## Running with Docker 58 | 59 | The primary way player server is intended to run is in a docker environment managed by `docker-compose`. To launch and start serving: 60 | 61 | ``` 62 | docker-compose up -d 63 | ``` 64 | 65 | # Releasing 66 | 67 | Releases are built and packed into docker images automatically off `master` branch. Approximate `Makefile` commands that are used: 68 | 69 | ``` 70 | make linux 71 | make image 72 | docker push odyseeteam/player-server:21.3.4 73 | docker push odyseeteam/player-server:latest 74 | ``` 75 | 76 | You need to tag your commit with a proper CalVer tag. Example: 77 | 78 | ``` 79 | git tag v21.3.4 # March 2021, version 4 80 | ``` 81 | 82 | Check [Makefile](./Makefile) for more details. 83 | 84 | ## License 85 | 86 | This project is MIT licensed. For the full license, see [LICENSE](LICENSE). 87 | 88 | ## Contact 89 | 90 | The primary contact for this project is [@anbsky](https://github.com/anbsky) (andrey.beletsky@odysee.com). 91 | -------------------------------------------------------------------------------- /scripts/wait_for_wallet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | lbrynet=http://localhost:5279/ 4 | echo "Waiting for wallet server '${lbrynet}'..." 5 | 6 | while [[ ! $(curl -sd '{"method": "status"}' ${lbrynet} |grep '"wallet": true') ]]; do 7 | sleep 1 8 | done 9 | 10 | 11 | echo "Wallet server is alive" 12 | --------------------------------------------------------------------------------