├── e2e ├── chainquery │ ├── docker │ │ ├── healthcheck.sh │ │ ├── build.sh │ │ ├── my.cnf │ │ ├── Dockerfile │ │ └── start.sh │ └── docker-compose.yml ├── init.sql ├── lbrynet │ ├── docker │ │ ├── start.sh │ │ ├── build.sh │ │ ├── checkmount.sh │ │ └── Dockerfile │ ├── settings │ │ └── daemon_settings.yml │ └── docker-compose.yml ├── lbrycrd │ ├── docker │ │ ├── advance_blocks.sh │ │ ├── build.sh │ │ ├── healthcheck.sh │ │ ├── fix-permissions.c │ │ ├── Dockerfile │ │ └── start.sh │ └── docker-compose.yml ├── daemon_settings.yml ├── supporty │ ├── Makefile │ └── supporty.go ├── walletserver │ └── docker-compose.yml ├── data_setup.sh ├── chainqueryconfig.toml ├── docker-compose.yml └── e2e.sh ├── scripts ├── release.sh └── deploy.sh ├── .gitignore ├── ytapi ├── ytapi_test.go └── ytapi.go ├── metrics └── metrics.go ├── shared ├── shared_test.go └── shared.go ├── ip_manager ├── throttle_test.go └── throttle.go ├── Makefile ├── .goreleaser.yml ├── config.json.example ├── sources ├── shared.go └── youtubeVideo_test.go ├── downloader ├── downloader_test.go ├── ytdl │ └── Video.go └── downloader.go ├── LICENSE ├── util ├── log_wrapper.go ├── archive.go └── util.go ├── namer ├── names_test.go └── names.go ├── .travis.yml ├── configs └── configs.go ├── blobs_reflector └── reflect.go ├── timing └── timing.go ├── tags_manager └── tags_mapping_test.go ├── thumbs └── uploader.go ├── README.md ├── 0001-lbry-patch.patch ├── main.go ├── manager ├── manager.go ├── s3_storage.go ├── transfer.go └── setup.go ├── go.mod └── sdk └── api.go /e2e/chainquery/docker/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | curl --fail http://localhost:6300/api/status || exit 1 -------------------------------------------------------------------------------- /e2e/init.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE IF NOT EXISTS chainquery; 2 | GRANT ALL PRIVILEGES ON chainquery.* TO 'lbry'@'%'; 3 | FLUSH PRIVILEGES; -------------------------------------------------------------------------------- /scripts/release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | GO111MODULE=off go get github.com/caarlos0/svu 4 | git tag `svu "$1"` 5 | git push --tags 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | e2e/persist 3 | e2e/supporty/supporty 4 | .env 5 | blobsfiles 6 | ytsync_docker 7 | 8 | e2e/config.json 9 | 10 | e2e/cookies.txt 11 | -------------------------------------------------------------------------------- /e2e/lbrynet/docker/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | lbrynet start \ 3 | --api="${API_BIND_IP:-0.0.0.0}":"${API_PORT:-5279}" \ 4 | --config="${CONFIG_PATH:-/etc/lbry/daemon_settings.yml}" -------------------------------------------------------------------------------- /e2e/lbrycrd/docker/advance_blocks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | while true; do 3 | lbrycrd-cli -conf=/etc/lbry/lbrycrd.conf generate 100 >> /tmp/output.log 4 | sleep 2 5 | done -------------------------------------------------------------------------------- /e2e/lbrycrd/docker/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ $# -eq 0 ] 3 | then 4 | echo "No docker tag argument supplied. Use './build.sh '" 5 | exit 1 6 | fi 7 | docker build --build-arg VERSION=$1 --tag lbry/lbrycrd:$1 . 8 | docker push lbry/lbrycrd:$1 -------------------------------------------------------------------------------- /e2e/lbrynet/docker/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ $# -eq 0 ] 3 | then 4 | echo "No docker tag argument supplied. Use './build.sh '" 5 | exit 1 6 | fi 7 | docker build --build-arg VERSION=$1 --tag lbry/lbrynet:$1 . 8 | docker push lbry/lbrynet:$1 -------------------------------------------------------------------------------- /e2e/daemon_settings.yml: -------------------------------------------------------------------------------- 1 | blockchain_name: lbrycrd_regtest 2 | lbryum_servers: 3 | - walletserver:50001 4 | reflect_streams: false 5 | save_blobs: true 6 | save_files: false 7 | share_usage_data: false 8 | tcp_port: 3333 9 | udp_port: 4444 10 | use_upnp: false -------------------------------------------------------------------------------- /e2e/chainquery/docker/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ $# -eq 0 ] 3 | then 4 | echo "No docker tag argument supplied. Use './build.sh '" 5 | exit 1 6 | fi 7 | docker build --no-cache --build-arg VERSION=$1 --tag lbry/chainquery:$1 . 8 | docker push lbry/chainquery:$1 -------------------------------------------------------------------------------- /e2e/lbrycrd/docker/healthcheck.sh: -------------------------------------------------------------------------------- 1 | ## TODO: Implement a healthcheck for lbrycrd. 2 | curl --data-binary '{"jsonrpc":"1.0","id":"curltext","method":"getinfo","params":[]}' -H 'content-type:text/plain;' http://$RPC_USER:$RPC_PASSWORD@127.0.0.1:9246 3 | ## OR 4 | lbrycrd-cli getinfo -------------------------------------------------------------------------------- /ytapi/ytapi_test.go: -------------------------------------------------------------------------------- 1 | package ytapi 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestChannelInfo(t *testing.T) { 10 | info, err := ChannelInfo("UCNQfQvFMPnInwsU_iGYArJQ") 11 | assert.NoError(t, err) 12 | assert.NotNil(t, info) 13 | } 14 | -------------------------------------------------------------------------------- /e2e/chainquery/docker/my.cnf: -------------------------------------------------------------------------------- 1 | # Default Homebrew MySQL server config 2 | [mysqld] 3 | # Only allow connections from localhost 4 | innodb_log_file_size=5G 5 | key_buffer_size=1G 6 | innodb_flush_log_at_trx_commit = 0 7 | innodb_autoinc_lock_mode=2 8 | innodb_buffer_pool_size=1G 9 | innodb_log_buffer_size=1G -------------------------------------------------------------------------------- /e2e/supporty/Makefile: -------------------------------------------------------------------------------- 1 | BINARY=supporty 2 | 3 | DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd) 4 | BIN_DIR = ${DIR} 5 | 6 | .PHONY: build clean test lint 7 | .DEFAULT_GOAL: build 8 | 9 | 10 | build: 11 | mkdir -p ${BIN_DIR} && CGO_ENABLED=0 go build -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} supporty.go 12 | chmod +x ${BIN_DIR}/${BINARY} -------------------------------------------------------------------------------- /e2e/lbrycrd/docker/fix-permissions.c: -------------------------------------------------------------------------------- 1 | #include 2 | int main() { 3 | // This program needs to run with setuid == root 4 | // This needs to be in a compiled language because you cannot setuid bash scripts 5 | setuid(0); 6 | execle("/bin/bash", "bash", "-c", 7 | "/bin/chown -R lbrycrd:lbrycrd /data && /bin/chmod -R 755 /data/", 8 | (char*) NULL, (char*) NULL); 9 | } -------------------------------------------------------------------------------- /scripts/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export IMPORTPATH="github.com/lbryio/ytsync" 4 | export VERSIONSHORT="${TRAVIS_COMMIT:-"$(git describe --tags --always --dirty)"}" 5 | export VERSIONLONG="${TRAVIS_COMMIT:-"$(git describe --tags --always --dirty --long)"}" 6 | export COMMITMSG="$(echo ${TRAVIS_COMMIT_MESSAGE:-"$(git show -s --format=%s)"} | tr -d '"' | head -n 1)" 7 | curl -sL https://git.io/goreleaser | bash -------------------------------------------------------------------------------- /e2e/lbrynet/settings/daemon_settings.yml: -------------------------------------------------------------------------------- 1 | #blockchain_name: lbrycrd_main 2 | #blockchain_name: lbrycrd_testnet 3 | blockchain_name: lbrycrd_regtest 4 | lbryum_servers: 5 | # - spv1.lbry.com:50001 #Production Wallet Server 6 | - walletserver:50001 7 | save_blobs: true 8 | save_files: false 9 | reflect_streams: false #for the love of god, don't upload regtest streams to reflector! 10 | share_usage_data: false 11 | tcp_port: 3333 12 | udp_port: 4444 13 | use_upnp: true -------------------------------------------------------------------------------- /e2e/lbrynet/docker/checkmount.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## TODO: Make a bit more aware of the run mode of this appliance in case there is ever a test mode enabled in the start.sh 4 | mountpoint=/home/lbrynet 5 | 6 | if ! grep -qs ".* $mountpoint " /proc/mounts; then 7 | echo "$mountpoint not mounted, refusing to run." 8 | ## TODO: We should have documentation that this error references directly with a URL as to why it won't run without a volume. 9 | exit 1 10 | else 11 | bash -c "$*" 12 | fi 13 | -------------------------------------------------------------------------------- /metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/lbryio/ytsync/v5/configs" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/promauto" 8 | ) 9 | 10 | var ( 11 | Durations = promauto.NewHistogramVec(prometheus.HistogramOpts{ 12 | Namespace: "ytsync", 13 | Subsystem: configs.Configuration.GetHostname(), 14 | Name: "duration", 15 | Help: "The durations of the individual modules", 16 | }, []string{"path"}) 17 | ) 18 | -------------------------------------------------------------------------------- /shared/shared_test.go: -------------------------------------------------------------------------------- 1 | package shared 2 | 3 | import ( 4 | "testing" 5 | 6 | "gotest.tools/assert" 7 | ) 8 | 9 | func TestSyncFlags_VideosToSync(t *testing.T) { 10 | f := SyncFlags{} 11 | assert.Equal(t, f.VideosToSync(0), 0) 12 | assert.Equal(t, f.VideosToSync(1), 10) 13 | assert.Equal(t, f.VideosToSync(5), 10) 14 | assert.Equal(t, f.VideosToSync(10), 10) 15 | assert.Equal(t, f.VideosToSync(101), 50) 16 | assert.Equal(t, f.VideosToSync(500), 80) 17 | assert.Equal(t, f.VideosToSync(21000), 1000) 18 | f.VideosLimit = 1337 19 | assert.Equal(t, f.VideosToSync(21), 1337) 20 | } 21 | -------------------------------------------------------------------------------- /e2e/lbrynet/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | networks: 3 | lbry-network: 4 | external: true 5 | 6 | services: 7 | ############# 8 | ## Lbrynet ## 9 | ############# 10 | lbrynet: 11 | image: lbry/lbrynet:v0.99.0 12 | restart: "no" 13 | networks: 14 | lbry-network: 15 | ipv4_address: 10.6.1.3 16 | ports: 17 | - "15100:5279" 18 | - "15101:5280" 19 | environment: 20 | - LBRY_STREAMING_SERVER=0.0.0.0:5280 21 | volumes: 22 | - "../persist/data/.lbrynet:/home/lbrynet" 23 | - "./settings:/etc/lbry" #Put your daemon_settings.yml here 24 | -------------------------------------------------------------------------------- /e2e/lbrycrd/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | networks: 3 | lbry-network: 4 | external: true 5 | 6 | services: 7 | ############# 8 | ## Lbrycrd ## 9 | ############# 10 | lbrycrd: 11 | image: lbry/lbrycrd:v0.12.4.1 12 | restart: always 13 | networks: 14 | lbry-network: 15 | ipv4_address: 10.6.1.1 16 | ports: 17 | - "15201:29246" 18 | - "15200:29245" 19 | expose: 20 | - "29246" 21 | - "29245" 22 | ## host volumes for persistent data such as wallet private keys. 23 | volumes: 24 | - "../persist/data:/data" 25 | environment: 26 | - RUN_MODE=regtest 27 | -------------------------------------------------------------------------------- /ip_manager/throttle_test.go: -------------------------------------------------------------------------------- 1 | package ip_manager 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestAll(t *testing.T) { 8 | pool, err := GetIPPool() 9 | if err != nil { 10 | t.Fatal(err) 11 | } 12 | ip, err := pool.GetIP() 13 | if err != nil { 14 | t.Fatal(err) 15 | } 16 | t.Log(ip) 17 | pool.ReleaseIP(ip) 18 | ip2, err := pool.GetIP() 19 | if err != nil { 20 | t.Fatal(err) 21 | } 22 | if ip == ip2 && len(pool.ips) > 1 { 23 | t.Fatalf("the same IP was returned twice! %s, %s", ip, ip2) 24 | } 25 | t.Log(ip2) 26 | pool.ReleaseIP(ip2) 27 | 28 | for range pool.ips { 29 | _, err = pool.GetIP() 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | } 34 | next, err := pool.nextIP() 35 | if err != nil { 36 | t.Logf("%s", err.Error()) 37 | } else { 38 | t.Fatal(next) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BINARY=ytsync 2 | 3 | DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd) 4 | BIN_DIR = ${DIR}/bin 5 | IMPORT_PATH = github.com/lbryio/ytsync 6 | 7 | VERSION = $(shell git --git-dir=${DIR}/.git describe --dirty --always --long --abbrev=7) 8 | LDFLAGS = -ldflags "-X ${IMPORT_PATH}/meta.Version=${VERSION} -X ${IMPORT_PATH}/meta.Time=$(shell date +%s)" 9 | 10 | 11 | .PHONY: build clean test lint 12 | .DEFAULT_GOAL: build 13 | 14 | 15 | build: 16 | mkdir -p ${BIN_DIR} && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} main.go 17 | 18 | clean: 19 | if [ -f ${BIN_DIR}/${BINARY} ]; then rm ${BIN_DIR}/${BINARY}; fi 20 | 21 | test: 22 | go test ./... -v -cover 23 | 24 | lint: 25 | go get github.com/alecthomas/gometalinter && gometalinter --install && gometalinter ./... -------------------------------------------------------------------------------- /e2e/chainquery/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | 3 | services: 4 | ########### 5 | ## MYSQL ## 6 | ########### 7 | mysql: 8 | image: mysql:5.7.23 9 | restart: "no" 10 | ports: 11 | - 3306:3306 12 | volumes: 13 | - "../persist/chainquery/db:/var/lib/mysql" 14 | ## This one may need to be tweaked based on where you run this docker-compose from. 15 | - "../stuff/my.cnf:/etc/mysql/conf.d/chainquery-optimizations.cnf" 16 | ################ 17 | ## Chainquery ## 18 | ################ 19 | chainquery: 20 | image: lbry/chainquery:v1.8.1 21 | restart: "no" 22 | ports: 23 | - 6300:6300 24 | depends_on: 25 | - mysql 26 | ## TODO: Uncomment this in a docker-compose.override.yml to allow for external configurations. 27 | volumes: 28 | - "../persist/chainquery/config/chainqueryconfig.toml:/etc/chainquery/chainqueryconfig.toml" -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # This is an example goreleaser.yaml file with some sane defaults. 2 | # Make sure to check the documentation at http://goreleaser.com 3 | builds: 4 | - env: 5 | - CGO_ENABLED=0 6 | goos: 7 | - linux 8 | goarch: 9 | - amd64 10 | ldflags: 11 | - -X "{{ .Env.IMPORTPATH }}/meta.semVersion={{ .Tag }}" -X "{{ .Env.IMPORTPATH }}/meta.version={{ .Env.VERSIONSHORT }}" -X "{{ .Env.IMPORTPATH }}/meta.versionLong={{ .Env.VERSIONLONG }}" -X "{{ .Env.IMPORTPATH }}/meta.commitMsg={{ .Env.COMMITMSG }}" 12 | archives: 13 | - id: zip 14 | name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}' 15 | replacements: 16 | linux: Linux 17 | amd64: x86_64 18 | format: zip 19 | checksum: 20 | name_template: 'checksums.txt' 21 | snapshot: 22 | name_template: "{{ .Tag }}-next" 23 | changelog: 24 | sort: asc 25 | filters: 26 | exclude: 27 | - '^docs:' 28 | - '^test:' -------------------------------------------------------------------------------- /config.json.example: -------------------------------------------------------------------------------- 1 | { 2 | "slack_token": "", 3 | "slack_channel": "ytsync-dev", 4 | "internal_apis_endpoint": "http://localhost:15400", 5 | "internal_apis_auth_token": "ytsyntoken", 6 | "lbrycrd_string": "tcp://lbry:lbry@localhost:15200", 7 | "wallet_s3_config": { 8 | "id": "", 9 | "secret": "", 10 | "region": "us-east-1", 11 | "bucket": "ytsync-wallets", 12 | "endpoint": "" 13 | }, 14 | "blockchaindb_s3_config": { 15 | "id": "", 16 | "secret": "", 17 | "region": "us-east-1", 18 | "bucket": "blockchaindbs", 19 | "endpoint": "" 20 | }, 21 | "thumbnails_s3_config": { 22 | "id": "", 23 | "secret": "", 24 | "region": "us-east-1", 25 | "bucket": "thumbnails.lbry.com", 26 | "endpoint": "" 27 | }, 28 | "aws_thumbnails_s3_config": { 29 | "id": "", 30 | "secret": "", 31 | "region": "us-east-1", 32 | "bucket": "thumbnails.lbry.com", 33 | "endpoint": "" 34 | } 35 | } -------------------------------------------------------------------------------- /sources/shared.go: -------------------------------------------------------------------------------- 1 | package sources 2 | 3 | import ( 4 | "strings" 5 | "sync" 6 | 7 | "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 8 | "github.com/lbryio/ytsync/v5/namer" 9 | ) 10 | 11 | type SyncSummary struct { 12 | ClaimID string 13 | ClaimName string 14 | } 15 | 16 | func publishAndRetryExistingNames(daemon *jsonrpc.Client, title, filename string, amount float64, options jsonrpc.StreamCreateOptions, namer *namer.Namer, walletLock *sync.RWMutex) (*SyncSummary, error) { 17 | walletLock.RLock() 18 | defer walletLock.RUnlock() 19 | for { 20 | name := namer.GetNextName(title) 21 | response, err := daemon.StreamCreate(name, filename, amount, options) 22 | if err != nil { 23 | if strings.Contains(err.Error(), "failed: Multiple claims (") { 24 | continue 25 | } 26 | return nil, err 27 | } 28 | PublishedClaim := response.Outputs[0] 29 | return &SyncSummary{ClaimID: PublishedClaim.ClaimID, ClaimName: name}, nil 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /e2e/walletserver/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | networks: 3 | lbry-network: 4 | external: true 5 | 6 | services: 7 | ################### 8 | ## Wallet Server ## 9 | ################### 10 | walletserver: 11 | image: lbry/wallet-server:v0.73.1 12 | restart: always 13 | networks: 14 | lbry-network: 15 | ipv4_address: 10.6.1.2 16 | volumes: 17 | - "../persist/data/.walletserver/database:/database" 18 | environment: 19 | - DB_DIRECTORY=/database 20 | - MAX_SEND=1000000000000000000000 21 | - DAEMON_URL=http://lbry:lbry@lbrycrd:29245/ 22 | - MAX_SUBS=1000000000000 23 | - BANDWIDTH_LIMIT=80000000000 24 | - SESSION_TIMEOUT=10000000000000000000000000 25 | - TCP_PORT=50001 26 | #network_mode: host 27 | #network_mode: bridge 28 | ports: 29 | - "50001:50001" 30 | expose: 31 | - "50001" 32 | ulimits: 33 | nofile: 90000 34 | # command: lbry.wallet.server.coin.LBC 35 | command: lbry.wallet.server.coin.LBCRegTest -------------------------------------------------------------------------------- /downloader/downloader_test.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/lbryio/ytsync/v5/ip_manager" 7 | "github.com/lbryio/ytsync/v5/sdk" 8 | 9 | "github.com/lbryio/lbry.go/v2/extras/stop" 10 | 11 | "github.com/sirupsen/logrus" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestGetPlaylistVideoIDs(t *testing.T) { 16 | videoIDs, err := GetPlaylistVideoIDs("UCJ0-OtVpF0wOKEqT2Z1HEtA", 50, nil, nil) 17 | if err != nil { 18 | logrus.Error(err) 19 | } 20 | for _, id := range videoIDs { 21 | println(id) 22 | } 23 | } 24 | 25 | func TestGetVideoInformation(t *testing.T) { 26 | s := stop.New() 27 | ip, err := ip_manager.GetIPPool(s) 28 | assert.NoError(t, err) 29 | video, err := GetVideoInformation("kDGOHNpRjzc", s.Ch(), ip) 30 | assert.NoError(t, err) 31 | assert.NotNil(t, video) 32 | logrus.Info(video.ID) 33 | } 34 | 35 | func Test_getUploadTime(t *testing.T) { 36 | configs := sdk.APIConfig{} 37 | got, err := getUploadTime(&configs, "kDGOHNpRjzc", nil, "20060102") 38 | assert.NoError(t, err) 39 | t.Log(got) 40 | } 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017-2020 LBRY Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 6 | "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the 8 | following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 11 | 12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 13 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 14 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 | -------------------------------------------------------------------------------- /util/log_wrapper.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/lbryio/lbry.go/v2/extras/util" 7 | log "github.com/sirupsen/logrus" 8 | ) 9 | 10 | // SendErrorToSlack Sends an error message to the default channel and to the process log. 11 | func SendErrorToSlack(format string, a ...interface{}) { 12 | message := format 13 | if len(a) > 0 { 14 | message = fmt.Sprintf(format, a...) 15 | } 16 | log.Errorln(message) 17 | log.SetLevel(log.InfoLevel) //I don't want to change the underlying lib so this will do... 18 | err := util.SendToSlack(":sos: ```" + message + "```") 19 | log.SetLevel(log.DebugLevel) 20 | if err != nil { 21 | log.Errorln(err) 22 | } 23 | } 24 | 25 | // SendInfoToSlack Sends an info message to the default channel and to the process log. 26 | func SendInfoToSlack(format string, a ...interface{}) { 27 | message := format 28 | if len(a) > 0 { 29 | message = fmt.Sprintf(format, a...) 30 | } 31 | log.Infoln(message) 32 | log.SetLevel(log.InfoLevel) //I don't want to change the underlying lib so this will do... 33 | err := util.SendToSlack(":information_source: " + message) 34 | log.SetLevel(log.DebugLevel) 35 | if err != nil { 36 | log.Errorln(err) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /e2e/supporty/supporty.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "strconv" 6 | "strings" 7 | 8 | "github.com/lbryio/ytsync/v5/util" 9 | 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | func main() { 14 | if len(os.Args) != 6 { 15 | logrus.Info(strings.Join(os.Args, ",")) 16 | logrus.Fatal("Not enough arguments: name, claimID, address, blockchainName, claimAmount") 17 | } 18 | println("Supporty!") 19 | lbrycrd, err := util.GetLbrycrdClient(os.Getenv("LBRYCRD_STRING")) 20 | if err != nil { 21 | logrus.Fatal(err) 22 | } 23 | if lbrycrd == nil { 24 | logrus.Fatal("Lbrycrd Client is nil") 25 | } 26 | amount, err := strconv.ParseFloat(os.Args[5], 64) 27 | if err != nil { 28 | logrus.Error(err) 29 | } 30 | name := os.Args[1] 31 | claimid := os.Args[2] 32 | claimAddress := os.Args[3] 33 | blockChainName := os.Args[4] 34 | logrus.Infof("Supporting %s[%s] with %.2f LBC on chain %s at address %s", name, claimid, amount, blockChainName, claimAddress) 35 | hash, err := lbrycrd.SupportClaim(name, claimid, claimAddress, blockChainName, amount) 36 | if err != nil { 37 | logrus.Error(err) 38 | } 39 | if hash == nil { 40 | logrus.Fatal("Tx not created!") 41 | } 42 | logrus.Info("Tx: ", hash.String()) 43 | } 44 | -------------------------------------------------------------------------------- /namer/names_test.go: -------------------------------------------------------------------------------- 1 | package namer 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func Test_getClaimNameFromTitle(t *testing.T) { 10 | name := getClaimNameFromTitle("СтопХам - \"В ожидании ответа\"", 0) 11 | assert.Equal(t, "стопхам-в-ожидании", name) 12 | name = getClaimNameFromTitle("SADB - \"A Weak Woman With a Strong Hood\"", 0) 13 | assert.Equal(t, "sadb-a-weak-woman-with-a-strong-hood", name) 14 | name = getClaimNameFromTitle("錢包整理術 5 Tips、哪種錢包最NG?|有錢人默默在做的「錢包整理術」 ft.@SHIN LI", 0) 15 | assert.Equal(t, "錢包整理術-5-tips-哪種錢包最ng", name) 16 | name = getClaimNameFromTitle("اسرع-طريقة-لتختيم", 0) 17 | assert.Equal(t, "اسرع-طريقة-لتختيم", name) 18 | name = getClaimNameFromTitle("شكرا على 380 مشترك😍😍😍😍 لي يريد دعم ادا وصلنا المقطع 40 لايك وراح ادعم قناتين", 0) 19 | assert.Equal(t, "شكرا-على-380-مشترك😍😍😍", name) 20 | name = getClaimNameFromTitle("test-@", 0) 21 | assert.Equal(t, "test", name) 22 | name = getClaimNameFromTitle("『あなたはただの空の殻でした』", 0) 23 | assert.Equal(t, "『あなたはただの空の殻でした』", name) 24 | name = getClaimNameFromTitle("精靈樂章-這樣的夥伴沒問題嗎 幽暗隕石坑(夢魘) 王有無敵狀態...要會閃不然會被秒(無課)", 2) 25 | assert.Equal(t, "精靈樂章-這樣的夥伴沒問題嗎-2", name) 26 | name = getClaimNameFromTitle("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 50) 27 | assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-50", name) 28 | } 29 | -------------------------------------------------------------------------------- /e2e/chainquery/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ## Get the latest source and extract it for the app container. 2 | ## Design choices, two RUN layers intended to keep builds faster, the zipped 3 | FROM ubuntu:18.04 as prep 4 | LABEL MAINTAINER="leopere [at] nixc [dot] us" 5 | RUN apt-get update && \ 6 | apt-get -y install unzip curl telnet wait-for-it && \ 7 | apt-get autoclean -y && \ 8 | rm -rf /var/lib/apt/lists/* 9 | WORKDIR / 10 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 11 | COPY ./start.sh start 12 | COPY ./healthcheck.sh healthcheck 13 | ARG VERSION="master" 14 | RUN curl -s -o /chainquery http://build.lbry.io/chainquery/branch-"${VERSION}"/chainquery && \ 15 | chmod +x /chainquery 16 | 17 | 18 | FROM ubuntu:18.04 as app 19 | RUN apt-get update && \ 20 | apt-get -y install telnet wait-for-it && \ 21 | apt-get autoclean -y && \ 22 | rm -rf /var/lib/apt/lists/* 23 | ARG VERSION="master" 24 | ADD https://raw.githubusercontent.com/lbryio/chainquery/"${VERSION}"/config/default/chainqueryconfig.toml /etc/lbry/chainqueryconfig.toml.orig 25 | RUN adduser chainquery --gecos GECOS --shell /bin/bash --disabled-password --home /home/chainquery && \ 26 | chown -R chainquery:chainquery /etc/lbry 27 | COPY --from=prep ./healthcheck /chainquery /start /usr/bin/ 28 | HEALTHCHECK --interval=1m --timeout=30s \ 29 | CMD healthcheck 30 | EXPOSE 6300 31 | USER chainquery 32 | STOPSIGNAL SIGINT 33 | CMD ["start"] -------------------------------------------------------------------------------- /e2e/lbrynet/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ## This base image is for running the latest lbrynet-daemon release. 2 | FROM ubuntu:18.04 as prep 3 | LABEL MAINTAINER="leopere [at] nixc [dot] us" 4 | RUN apt-get update && apt-get -y install unzip curl telnet wait-for-it 5 | 6 | ## Add lbrynet 7 | ARG VERSION="latest" 8 | RUN URL=$(curl -s https://api.github.com/repos/lbryio/lbry-sdk/releases/$(if [ "${VERSION}" = 'latest' ]; then echo "latest"; else echo "tags/${VERSION}"; fi) | grep browser_download_url | grep lbrynet-linux.zip | cut -d'"' -f4) && echo $URL && curl -L -o /lbrynet.linux.zip $URL 9 | 10 | COPY start.sh /usr/bin/start 11 | COPY checkmount.sh /usr/bin/checkmount 12 | RUN unzip /lbrynet.linux.zip -d /lbrynet/ && \ 13 | mv /lbrynet/lbrynet /usr/bin && \ 14 | chmod a+x /usr/bin/checkmount /usr/bin/start /usr/bin/lbrynet 15 | 16 | FROM ubuntu:18.04 as app 17 | COPY --from=prep /usr/bin/start /usr/bin/checkmount /usr/bin/lbrynet /usr/bin/ 18 | RUN adduser lbrynet --gecos GECOS --shell /bin/bash --disabled-password --home /home/lbrynet 19 | ## Daemon port [Intended for internal use] 20 | ## LBRYNET talks to peers on port 3333 [Intended for external use] this port is used to discover other lbrynet daemons with blobs. 21 | ## Expose 5566 Reflector port to listen on 22 | ## Expose 5279 Port the daemon API will listen on 23 | ## the lbryumx aka Wallet port [Intended for internal use] 24 | #EXPOSE 4444 3333 5566 5279 50001 25 | USER lbrynet 26 | ENTRYPOINT ["/usr/bin/checkmount"] 27 | CMD ["start"] -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | os: linux 2 | dist: bionic 3 | language: go 4 | go: 5 | - 1.17.x 6 | 7 | install: true 8 | 9 | cache: 10 | directories: 11 | - $HOME/.cache/go-build 12 | - $HOME/gopath/pkg/mod 13 | 14 | services: 15 | - docker 16 | 17 | addons: 18 | apt: 19 | update: true 20 | packages: 21 | - ffmpeg 22 | - tree 23 | - python3-pip 24 | 25 | before_script: 26 | - sudo pip3 install -U yt-dlp 27 | - sudo add-apt-repository -y ppa:savoury1/ffmpeg4 28 | 29 | env: 30 | global: 31 | #GITHUB_TOKEN 32 | - secure: "Ps3KocRP5xnM3/uA99CeYhDTVxRIuW7fGyrtqBeRWZW0cXzeA4XCTKxqcFbrPUPw67XkrBVgE58JDdWoQEJ7tm67PjMm/ltp5Evhx/QAJDh+YSofXyGDVpG1mrTZFI66R3NVVJLkSGALMkuWWXvfYZeU//AworJbyRoaIK/CVt5OP23i5N4tdd5UXc5dfLuYqnKRynyMmCkz9c3yEIQMXoPhG2hx7l7L2BeMJvcKmVhkSN7nQayjnrbUXGm/IRqrb88lvkyBevN5E3IB2V5IKEieIPZjbD/N0IfcnAt89Z96tgDhtIbx3ZvXm92lsvHA8buqQpG9d2AmSi6GKs64lQcnGeM5o0wER2JHWl1OSa1Nr/UAo5Xb/PM65Yt3yZE8AuMKHBmbfDSBzdkTXx58AeDzFUd3kMXD/fFjeQQWyXFlOss3ygH9SObl827Txmz9OJqZaxabs5Q3AP6m3EjKjz7zfLfrgpcxJM2WBiU1bN0ZxUgZkImy/CHk5gCZ7vhcnaLiDO4HZnzY/aRJwKYQPE5i0O2nHpIfovqkc0DFBA7U/7Cjin7e1E0UZvF3meLOxMqkfc6X7QTxqQpt2Tej6jlpdxw4CTLwGUhGkAw9IAPkUB3L0EbZ1/ksGhNvGDvUeSTq8hYdMAPmA+k9jS6653V4SQ+qBMy5++tbr5AeZQI=" 33 | 34 | script: 35 | #- ./e2e/e2e.sh # Hold until we can resolve the /var/tmp issue - talk to beamer/niko 36 | - make 37 | 38 | deploy: 39 | provider: script 40 | skip_cleanup: true 41 | script: ./scripts/deploy.sh 42 | file: bin/ytsync 43 | on: 44 | repo: lbryio/ytsync 45 | tags: true 46 | -------------------------------------------------------------------------------- /e2e/data_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | #Add a ytsync user 6 | ADDYTSYNCUSER='INSERT INTO user (given_name) VALUE("ytsync user")' 7 | mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCUSER" 8 | #Insert an auth token for the user to be used by ytsync 9 | ADDYTSYNCAUTHTOKEN='INSERT INTO auth_token (user_id, value) VALUE(1,"ytsyntoken")' 10 | mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCAUTHTOKEN" 11 | #Give priveledges to ytsync user 12 | ASSIGNGROOP='INSERT INTO user_groop (user_id, groop_id) VALUE( 1,3)' 13 | mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ASSIGNGROOP" 14 | 15 | #Add youtuber to sync 16 | ADDYTSYNCER='INSERT INTO user (given_name) VALUE("youtuber")' 17 | mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCER" 18 | #Insert an auth token for the youtuber to be used by ytsync 19 | ADDYTSYNCAUTHTOKEN='INSERT INTO auth_token (user_id, value) VALUE(2,"youtubertoken")' 20 | mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCAUTHTOKEN" 21 | #Add their youtube channel to be synced 22 | ADDYTCHANNEL="INSERT INTO youtube_data (user_id, status_token,desired_lbry_channel,channel_id,channel_name,status,created_at,source,total_videos,total_subscribers,should_sync,redeemable,total_views,reviewed,last_uploaded_video,length_limit,size_limit,reward_amount,reward_expiration) 23 | VALUE(2,'3qzGyuVjQaf7t4pKKu2Er1NRW2LJkeWw','$1','$2','СтопХам','queued','2019-08-01 00:00:00','sync',1000,1000,1,1,10000,1,'$3',60,2048,0,'2019-08-01 00:00:00')" 24 | mysql -u lbry -plbry -D lbry -h "127.0.0.1" --default-character-set=utf8 -P 15500 -e "$ADDYTCHANNEL" 25 | -------------------------------------------------------------------------------- /e2e/lbrycrd/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 as prep 2 | LABEL MAINTAINER="leopere [at] nixc [dot] us" 3 | ## TODO: Implement version pinning. `apt-get install curl=` 4 | RUN apt-get update && \ 5 | apt-get -y install unzip curl build-essential && \ 6 | apt-get autoclean -y && \ 7 | rm -rf /var/lib/apt/lists/* 8 | WORKDIR / 9 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 10 | COPY ./start.sh start 11 | COPY ./healthcheck.sh healthcheck 12 | COPY ./advance_blocks.sh advance 13 | COPY ./fix-permissions.c fix-permissions.c 14 | 15 | ## Add lbrycrd - Change the version below to create an image for a different tag/version 16 | ARG VERSION="v0.12.4.1" 17 | RUN URL=$(curl -s https://api.github.com/repos/lbryio/lbrycrd/releases/$(if [ "${VERSION}" = 'latest' ]; then echo "latest"; else echo "tags/${VERSION}"; fi) | grep browser_download_url | grep lbrycrd-linux.zip | cut -d'"' -f4) && echo $URL && curl -L -o /lbrycrd-linux.zip $URL 18 | 19 | RUN unzip ./lbrycrd-linux.zip && \ 20 | gcc fix-permissions.c -o fix-permissions && \ 21 | chmod +x ./lbrycrdd ./lbrycrd-cli ./lbrycrd-tx ./start ./healthcheck ./fix-permissions ./advance 22 | 23 | FROM ubuntu:18.04 as app 24 | COPY --from=prep /lbrycrdd /lbrycrd-cli /lbrycrd-tx /start /healthcheck /fix-permissions /advance /usr/bin/ 25 | RUN addgroup --gid 1000 lbrycrd && \ 26 | adduser lbrycrd --uid 1000 --gid 1000 --gecos GECOS --shell /bin/bash --disabled-password --home /data && \ 27 | mkdir /etc/lbry && \ 28 | chown lbrycrd /etc/lbry && \ 29 | chmod a+s /usr/bin/fix-permissions 30 | VOLUME ["/data"] 31 | WORKDIR /data 32 | ## TODO: Implement healthcheck. 33 | # HEALTHCHECK ["healthcheck"] 34 | EXPOSE 9246 9245 11337 29245 35 | 36 | USER lbrycrd 37 | CMD ["start"] -------------------------------------------------------------------------------- /sources/youtubeVideo_test.go: -------------------------------------------------------------------------------- 1 | package sources 2 | 3 | import ( 4 | "regexp" 5 | "testing" 6 | 7 | "github.com/abadojack/whatlanggo" 8 | "github.com/sirupsen/logrus" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestLanguageDetection(t *testing.T) { 13 | description := `Om lättkränkta muslimer, och den bristande logiken i vad som anses vara att vanära profeten. Från Moderata riksdagspolitikern Hanif Balis podcast "God Ton", avsnitt 108, från oktober 2020, efter terrordådet där en fransk lärare fick huvudet avskuret efter att undervisat sin mångkulturella klass om frihet.` 14 | info := whatlanggo.Detect(description) 15 | logrus.Infof("confidence: %.2f", info.Confidence) 16 | assert.True(t, info.IsReliable()) 17 | assert.True(t, info.Lang.Iso6391() != "") 18 | assert.Equal(t, "sv", info.Lang.Iso6391()) 19 | 20 | description = `🥳週四直播 | 晚上來開個賽車🔰歡迎各位一起來玩! - PonPonLin蹦蹦林` 21 | info = whatlanggo.Detect(description) 22 | logrus.Infof("confidence: %.2f", info.Confidence) 23 | assert.True(t, info.IsReliable()) 24 | assert.True(t, info.Lang.Iso6391() != "") 25 | assert.Equal(t, "zh", info.Lang.Iso6391()) 26 | 27 | description = `成為這個頻道的會員並獲得獎勵: 28 | https://www.youtube.com/channel/UCOQFrooz-YGHjYb7s3-MrsQ/join 29 | _____________________________________________ 30 | 想聽我既音樂作品可以去下面LINK 31 | streetvoice 街聲: 32 | https://streetvoice.com/CTLam331/ 33 | _____________________________________________ 34 | 想學結他、鋼琴 35 | 有關音樂制作工作 36 | 都可以搵我~ 37 | 大家快D訂閱喇 38 | 不定期出片 39 | 40 | 41 | 42 | 43 | Website: http://ctlam331.wixsite.com/ctlamusic 44 | FB PAGE:https://www.facebook.com/ctlam331 45 | IG:ctlamusic` 46 | urlsRegex := regexp.MustCompile(`(?m) ?(f|ht)(tp)(s?)(://)(.*)[.|/](.*)`) 47 | descriptionSample := urlsRegex.ReplaceAllString(description, "") 48 | info = whatlanggo.Detect(descriptionSample) 49 | logrus.Infof("confidence: %.2f", info.Confidence) 50 | assert.True(t, info.IsReliable()) 51 | assert.True(t, info.Lang.Iso6391() != "") 52 | assert.Equal(t, "zh", info.Lang.Iso6391()) 53 | } 54 | -------------------------------------------------------------------------------- /configs/configs.go: -------------------------------------------------------------------------------- 1 | package configs 2 | 3 | import ( 4 | "os" 5 | "regexp" 6 | 7 | "github.com/lbryio/lbry.go/v2/extras/errors" 8 | 9 | "github.com/aws/aws-sdk-go/aws" 10 | "github.com/aws/aws-sdk-go/aws/credentials" 11 | log "github.com/sirupsen/logrus" 12 | "github.com/tkanos/gonfig" 13 | ) 14 | 15 | type S3Configs struct { 16 | ID string `json:"id"` 17 | Secret string `json:"secret"` 18 | Region string `json:"region"` 19 | Bucket string `json:"bucket"` 20 | Endpoint string `json:"endpoint"` 21 | } 22 | type Configs struct { 23 | SlackToken string `json:"slack_token"` 24 | SlackChannel string `json:"slack_channel"` 25 | InternalApisEndpoint string `json:"internal_apis_endpoint"` 26 | InternalApisAuthToken string `json:"internal_apis_auth_token"` 27 | LbrycrdString string `json:"lbrycrd_string"` 28 | WalletS3Config S3Configs `json:"wallet_s3_config"` 29 | BlockchaindbS3Config S3Configs `json:"blockchaindb_s3_config"` 30 | AWSThumbnailsS3Config S3Configs `json:"aws_thumbnails_s3_config"` 31 | ThumbnailsS3Config S3Configs `json:"thumbnails_s3_config"` 32 | } 33 | 34 | var Configuration *Configs 35 | 36 | func Init(configPath string) error { 37 | if Configuration != nil { 38 | return nil 39 | } 40 | c := Configs{} 41 | err := gonfig.GetConf(configPath, &c) 42 | if err != nil { 43 | return errors.Err(err) 44 | } 45 | Configuration = &c 46 | return nil 47 | } 48 | 49 | func (s *S3Configs) GetS3AWSConfig() *aws.Config { 50 | return &aws.Config{ 51 | Credentials: credentials.NewStaticCredentials(s.ID, s.Secret, ""), 52 | Region: &s.Region, 53 | Endpoint: &s.Endpoint, 54 | S3ForcePathStyle: aws.Bool(true), 55 | } 56 | } 57 | func (c *Configs) GetHostname() string { 58 | var hostname string 59 | 60 | var err error 61 | hostname, err = os.Hostname() 62 | if err != nil { 63 | log.Error("could not detect system hostname") 64 | hostname = "ytsync_unknown" 65 | } 66 | reg, err := regexp.Compile("[^a-zA-Z0-9_]+") 67 | if err == nil { 68 | hostname = reg.ReplaceAllString(hostname, "_") 69 | 70 | } 71 | if len(hostname) > 30 { 72 | hostname = hostname[0:30] 73 | } 74 | return hostname 75 | } 76 | -------------------------------------------------------------------------------- /e2e/chainquery/docker/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ## Config setup 4 | 5 | ## Setup Values 6 | DEBUGMODE=$(echo "debugmode=$DEBUGMODE") 7 | LBRYCRDURL=$(echo "lbrycrdurl=\"rpc://$RPC_USER:$RPC_PASSWORD@10.5.1.2:9245\"") 8 | MYSQLDSN=$(echo "mysqldsn=\"$MYSQL_USER:$MYSQL_PASSWORD@tcp($MYSQL_SERVER:3306)/$MYSQL_DATABASE\"") 9 | APIMYSQLDSN=$(echo "apimysqldsn=\"$MYSQL_USER:$MYSQL_PASSWORD@tcp($MYSQL_SERVER:3306)/$MYSQL_DATABASE\"") 10 | 11 | ## Setup Defaults 12 | DEBUGMODE_DEFAULT='#DEFAULT-debugmode=false' 13 | LBRYCRDURL_DEFAULT='#DEFAULT-lbrycrdurl="rpc://lbry:lbry@localhost:9245"' 14 | MYSQLDSN_DEFAULT='#DEFAULT-mysqldsn="lbry:lbry@tcp(localhost:3306)/chainquery"' 15 | APIMYSQLDSN_DEFAULT='#DEFAULT-apimysqldsn="lbry:lbry@tcp(localhost:3306)/chainquery"' 16 | 17 | ## Add setup value variable name to this list to get processed on container start 18 | CONFIG_SETTINGS=( 19 | DEBUGMODE 20 | LBRYCRDURL 21 | MYSQLDSN 22 | APIMYSQLDSN 23 | ) 24 | 25 | function set_configs() { 26 | ## Set configs on container start if not already set. 27 | for i in "${!CONFIG_SETTINGS[@]}"; do 28 | ## Indirect references http://tldp.org/LDP/abs/html/ivr.html 29 | eval FROM_STRING=\$"${CONFIG_SETTINGS[$i]}_DEFAULT" 30 | eval TO_STRING=\$${CONFIG_SETTINGS[$i]} 31 | ## TODO: Add a bit more magic to make sure that you're only configuring things if not set by config mounts. 32 | sed -i "s~$FROM_STRING~"$TO_STRING"~g" /etc/lbry/chainqueryconfig.toml 33 | done 34 | echo "Reading config for debugging." 35 | cat /etc/lbry/chainqueryconfig.toml 36 | } 37 | 38 | if [[ ! -f /etc/lbry/chainqueryconfig.toml ]]; then 39 | echo "[INFO]: Did not find chainqueryconfig.toml" 40 | echo " Installing default and configuring with provided environment variables if any." 41 | ## Install fresh copy of config file. 42 | echo "cp -v /etc/lbry/chainqueryconfig.toml.orig /etc/lbry/chainqueryconfig.toml" 43 | cp -v /etc/lbry/chainqueryconfig.toml.orig /etc/lbry/chainqueryconfig.toml 44 | ls -lAh /etc/lbry/ 45 | set_configs 46 | else 47 | echo "[INFO]: Found a copy of chainqueryconfig.toml in /etc/lbry" 48 | fi 49 | 50 | ## For now keeping this simple. Potentially eventually add all command args as envvars for the Dockerfile or use safe way to add args via docker-compose.yml 51 | chainquery serve --configpath "/etc/lbry/" -------------------------------------------------------------------------------- /blobs_reflector/reflect.go: -------------------------------------------------------------------------------- 1 | package blobs_reflector 2 | 3 | import ( 4 | "encoding/json" 5 | "io/ioutil" 6 | "os" 7 | "os/user" 8 | "path/filepath" 9 | 10 | "github.com/lbryio/lbry.go/v2/extras/errors" 11 | "github.com/lbryio/reflector.go/cmd" 12 | "github.com/lbryio/reflector.go/db" 13 | "github.com/lbryio/reflector.go/reflector" 14 | "github.com/lbryio/reflector.go/store" 15 | "github.com/sirupsen/logrus" 16 | 17 | "github.com/lbryio/ytsync/v5/util" 18 | ) 19 | 20 | var dbHandle *db.SQL 21 | 22 | func ReflectAndClean() error { 23 | err := reflectBlobs() 24 | if err != nil { 25 | return err 26 | } 27 | return util.CleanupLbrynet() 28 | } 29 | 30 | func loadConfig(path string) (cmd.Config, error) { 31 | var c cmd.Config 32 | 33 | raw, err := ioutil.ReadFile(path) 34 | if err != nil { 35 | if os.IsNotExist(err) { 36 | return c, errors.Err("config file not found") 37 | } 38 | return c, err 39 | } 40 | 41 | err = json.Unmarshal(raw, &c) 42 | return c, err 43 | } 44 | 45 | func reflectBlobs() error { 46 | if util.IsBlobReflectionOff() { 47 | return nil 48 | } 49 | logrus.Infoln("reflecting blobs...") 50 | //make sure lbrynet is off 51 | running, err := util.IsLbrynetRunning() 52 | if err != nil { 53 | return err 54 | } 55 | if running { 56 | return errors.Prefix("cannot reflect blobs as the daemon is running", err) 57 | } 58 | logrus.SetLevel(logrus.InfoLevel) 59 | defer logrus.SetLevel(logrus.DebugLevel) 60 | ex, err := os.Executable() 61 | if err != nil { 62 | return errors.Err(err) 63 | } 64 | exPath := filepath.Dir(ex) 65 | config, err := loadConfig(exPath + "/prism_config.json") 66 | if err != nil { 67 | return errors.Err(err) 68 | } 69 | if dbHandle == nil { 70 | dbHandle = new(db.SQL) 71 | err = dbHandle.Connect(config.DBConn) 72 | if err != nil { 73 | return errors.Err(err) 74 | } 75 | } 76 | st := store.NewDBBackedStore(store.NewS3Store(config.AwsID, config.AwsSecret, config.BucketRegion, config.BucketName), dbHandle, false) 77 | 78 | uploadWorkers := 10 79 | uploader := reflector.NewUploader(dbHandle, st, uploadWorkers, false, false) 80 | usr, err := user.Current() 81 | if err != nil { 82 | return errors.Err(err) 83 | } 84 | blobsDir := usr.HomeDir + "/.lbrynet/blobfiles/" 85 | err = uploader.Upload(blobsDir) 86 | if err != nil { 87 | return errors.Err(err) 88 | } 89 | if uploader.GetSummary().Err > 0 { 90 | return errors.Err("not al blobs were reflected. Errors: %d", uploader.GetSummary().Err) 91 | } 92 | return nil 93 | } 94 | -------------------------------------------------------------------------------- /namer/names.go: -------------------------------------------------------------------------------- 1 | package namer 2 | 3 | import ( 4 | "crypto/md5" 5 | "encoding/hex" 6 | "fmt" 7 | "regexp" 8 | "strconv" 9 | "strings" 10 | "sync" 11 | ) 12 | 13 | var claimNameRegexp = regexp.MustCompile(`[=&#:$@%??;、\\"/<>%{}||^~\x60[\]\s]`) 14 | 15 | type Namer struct { 16 | mu *sync.Mutex 17 | names map[string]bool 18 | } 19 | 20 | func NewNamer() *Namer { 21 | return &Namer{ 22 | mu: &sync.Mutex{}, 23 | names: make(map[string]bool), 24 | } 25 | } 26 | 27 | func (n *Namer) SetNames(names map[string]bool) { 28 | n.names = names 29 | } 30 | 31 | func (n *Namer) GetNextName(prefix string) string { 32 | n.mu.Lock() 33 | defer n.mu.Unlock() 34 | 35 | attempt := 1 36 | var name string 37 | for { 38 | name = getClaimNameFromTitle(prefix, attempt) 39 | if _, exists := n.names[name]; !exists { 40 | break 41 | } 42 | attempt++ 43 | } 44 | 45 | //if for some reasons the title can't be converted in a valid claim name (too short or not latin) then we use a hash 46 | attempt = 1 47 | if len(name) < 2 { 48 | sum := md5.Sum([]byte(prefix)) 49 | for { 50 | name = fmt.Sprintf("%s-%d", hex.EncodeToString(sum[:])[:15], attempt) 51 | if _, exists := n.names[name]; !exists { 52 | break 53 | } 54 | attempt++ 55 | } 56 | } 57 | 58 | n.names[name] = true 59 | 60 | return name 61 | } 62 | 63 | // TODO: clean this up some 64 | func getClaimNameFromTitle(title string, attempt int) string { 65 | suffix := "" 66 | if attempt > 1 { 67 | suffix = "-" + strconv.Itoa(attempt) 68 | } 69 | maxLen := 40 - len(suffix) 70 | 71 | chunks := strings.Split(strings.ToLower(strings.Trim(claimNameRegexp.ReplaceAllString(title, "-"), "-")), "-") 72 | 73 | name := chunks[0] 74 | if len(name) > maxLen { 75 | return truncateUnicode(name, maxLen) + suffix 76 | } 77 | 78 | for _, chunk := range chunks[1:] { 79 | if chunk == "" { 80 | continue 81 | } 82 | tmpName := name + "-" + chunk 83 | if len(tmpName) > maxLen { 84 | if len(name) < 20 { 85 | name = truncateUnicode(tmpName, maxLen-len(name)) 86 | } 87 | break 88 | } 89 | name = tmpName 90 | } 91 | 92 | return name + suffix 93 | } 94 | 95 | func truncateUnicode(name string, limit int) string { 96 | reNameBlacklist := regexp.MustCompile(`(&|>|<|\/|:|\n|\r)*`) 97 | name = reNameBlacklist.ReplaceAllString(name, "") 98 | result := name 99 | chars := 0 100 | for i := range name { 101 | if chars >= limit { 102 | result = name[:i] 103 | break 104 | } 105 | chars++ 106 | } 107 | return result 108 | } 109 | -------------------------------------------------------------------------------- /util/archive.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "archive/tar" 5 | "io" 6 | "io/fs" 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/lbryio/lbry.go/v2/extras/errors" 11 | ) 12 | 13 | func CreateTarball(tarballFilePath string, filePaths []string) error { 14 | file, err := os.Create(tarballFilePath) 15 | if err != nil { 16 | return errors.Err("Could not create tarball file '%s', got error '%s'", tarballFilePath, err.Error()) 17 | } 18 | defer file.Close() 19 | 20 | tarWriter := tar.NewWriter(file) 21 | defer tarWriter.Close() 22 | 23 | for _, filePath := range filePaths { 24 | err := addFileToTarWriter(filePath, tarWriter) 25 | if err != nil { 26 | return errors.Err("Could not add file '%s', to tarball, got error '%s'", filePath, err.Error()) 27 | } 28 | } 29 | 30 | return nil 31 | } 32 | 33 | func addFileToTarWriter(filePath string, tarWriter *tar.Writer) error { 34 | file, err := os.Open(filePath) 35 | if err != nil { 36 | return errors.Err("Could not open file '%s', got error '%s'", filePath, err.Error()) 37 | } 38 | defer file.Close() 39 | 40 | stat, err := file.Stat() 41 | if err != nil { 42 | return errors.Err("Could not get stat for file '%s', got error '%s'", filePath, err.Error()) 43 | } 44 | 45 | header := &tar.Header{ 46 | Name: stat.Name(), 47 | Size: stat.Size(), 48 | Mode: int64(stat.Mode()), 49 | ModTime: stat.ModTime(), 50 | } 51 | 52 | err = tarWriter.WriteHeader(header) 53 | if err != nil { 54 | return errors.Err("Could not write header for file '%s', got error '%s'", filePath, err.Error()) 55 | } 56 | 57 | _, err = io.Copy(tarWriter, file) 58 | if err != nil { 59 | return errors.Err("Could not copy the file '%s' data to the tarball, got error '%s'", filePath, err.Error()) 60 | } 61 | 62 | return nil 63 | } 64 | 65 | func Untar(tarball, target string) error { 66 | reader, err := os.Open(tarball) 67 | if err != nil { 68 | return errors.Err(err) 69 | } 70 | defer reader.Close() 71 | tarReader := tar.NewReader(reader) 72 | 73 | for { 74 | header, err := tarReader.Next() 75 | if err == io.EOF { 76 | break 77 | } else if err != nil { 78 | return errors.Err(err) 79 | } 80 | 81 | path := filepath.Join(target, header.Name) 82 | info := header.FileInfo() 83 | if info.IsDir() { 84 | if err = os.MkdirAll(path, info.Mode()); err != nil { 85 | return errors.Err(err) 86 | } 87 | continue 88 | } 89 | 90 | err = extractFile(path, info, tarReader) 91 | if err != nil { 92 | return err 93 | } 94 | } 95 | return nil 96 | } 97 | 98 | func extractFile(path string, info fs.FileInfo, tarReader *tar.Reader) error { 99 | file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) 100 | if err != nil { 101 | return errors.Err(err) 102 | } 103 | defer file.Close() 104 | _, err = io.Copy(file, tarReader) 105 | if err != nil { 106 | return errors.Err(err) 107 | } 108 | return nil 109 | } 110 | -------------------------------------------------------------------------------- /timing/timing.go: -------------------------------------------------------------------------------- 1 | package timing 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | "time" 7 | 8 | "github.com/lbryio/ytsync/v5/metrics" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | type Timing struct { 13 | component string 14 | milliseconds int64 15 | min int64 16 | max int64 17 | invocations int32 18 | } 19 | 20 | var timings *sync.Map 21 | 22 | func TimedComponent(component string) *Timing { 23 | if timings == nil { 24 | timings = &sync.Map{} 25 | } 26 | stored, _ := timings.LoadOrStore(component, &Timing{ 27 | component: component, 28 | milliseconds: 0, 29 | min: int64(99999999), 30 | }) 31 | t, _ := stored.(*Timing) 32 | return t 33 | } 34 | 35 | func ClearTimings() { 36 | if timings == nil { 37 | return 38 | } 39 | timings.Range(func(key interface{}, value interface{}) bool { 40 | timings.Delete(key) 41 | return true 42 | }) 43 | } 44 | 45 | func Report() { 46 | var totalTime time.Duration 47 | timings.Range(func(key interface{}, value interface{}) bool { 48 | totalTime += value.(*Timing).Get() 49 | return true 50 | }) 51 | timings.Range(func(key interface{}, value interface{}) bool { 52 | component := key 53 | componentRuntime := value.(*Timing).Get().String() 54 | percentTime := float64(value.(*Timing).Get()) / float64(totalTime) * 100 55 | invocations := value.(*Timing).Invocations() 56 | avgTime := (time.Duration(int64(float64(value.(*Timing).Get()) / float64(value.(*Timing).Invocations())))).String() 57 | minRuntime := value.(*Timing).Min().String() 58 | maxRuntime := value.(*Timing).Max().String() 59 | logrus.Printf("component %s ran for %s (%.2f%% of the total time) - invoked %d times with an average of %s per call, a minimum of %s and a maximum of %s", 60 | component, 61 | componentRuntime, 62 | percentTime, 63 | invocations, 64 | avgTime, 65 | minRuntime, 66 | maxRuntime, 67 | ) 68 | return true 69 | }) 70 | } 71 | 72 | func (t *Timing) Add(d time.Duration) { 73 | metrics.Durations.WithLabelValues(t.component).Observe(d.Seconds()) 74 | atomic.AddInt64(&t.milliseconds, d.Milliseconds()) 75 | for { 76 | oldMin := atomic.LoadInt64(&t.min) 77 | if d.Milliseconds() < oldMin { 78 | if atomic.CompareAndSwapInt64(&t.min, oldMin, d.Milliseconds()) { 79 | break 80 | } 81 | } else { 82 | break 83 | } 84 | } 85 | for { 86 | oldMax := atomic.LoadInt64(&t.max) 87 | if d.Milliseconds() > oldMax { 88 | if atomic.CompareAndSwapInt64(&t.max, oldMax, d.Milliseconds()) { 89 | break 90 | } 91 | } else { 92 | break 93 | } 94 | } 95 | atomic.AddInt32(&t.invocations, 1) 96 | } 97 | 98 | func (t *Timing) Get() time.Duration { 99 | ms := atomic.LoadInt64(&t.milliseconds) 100 | return time.Duration(ms) * time.Millisecond 101 | } 102 | 103 | func (t *Timing) Invocations() int32 { 104 | return atomic.LoadInt32(&t.invocations) 105 | } 106 | 107 | func (t *Timing) Min() time.Duration { 108 | ms := atomic.LoadInt64(&t.min) 109 | return time.Duration(ms) * time.Millisecond 110 | } 111 | func (t *Timing) Max() time.Duration { 112 | ms := atomic.LoadInt64(&t.max) 113 | return time.Duration(ms) * time.Millisecond 114 | } 115 | -------------------------------------------------------------------------------- /tags_manager/tags_mapping_test.go: -------------------------------------------------------------------------------- 1 | package tags_manager 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestSanitizeTags(t *testing.T) { 9 | got, err := SanitizeTags([]string{"this", "super", "expensive", "test", "has", "a lot of", "crypto", "currency", "in it", "trump", "will build the", "wall"}, "UCNQfQvFMPnInwsU_iGYArJQ") 10 | if err != nil { 11 | t.Error(err) 12 | return 13 | } 14 | expectedTags := []string{ 15 | "blockchain", 16 | "switzerland", 17 | "news", 18 | "science & technology", 19 | "economics", 20 | "experiments", 21 | "this", 22 | "in it", 23 | "will build the", 24 | "has", 25 | "crypto", 26 | "trump", 27 | "wall", 28 | "expensive", 29 | "currency", 30 | "a lot of", 31 | } 32 | if len(expectedTags) != len(got) { 33 | t.Error("number of tags differ") 34 | return 35 | } 36 | outer: 37 | for _, et := range expectedTags { 38 | for _, t := range got { 39 | if et == t { 40 | continue outer 41 | } 42 | } 43 | t.Error("tag not found") 44 | return 45 | } 46 | 47 | } 48 | func TestNormalizeTag(t *testing.T) { 49 | tags := []string{ 50 | "blockchain", 51 | "Switzerland", 52 | "news ", 53 | " science & Technology ", 54 | "economics", 55 | "experiments", 56 | "this", 57 | "in it", 58 | "will build the (WOOPS)", 59 | "~has", 60 | "crypto", 61 | "trump", 62 | "wall", 63 | "expensive", 64 | "!currency", 65 | " a lot of ", 66 | "#", 67 | "#whatever", 68 | "#123", 69 | "#123 Something else", 70 | "#123aaa", 71 | "!asdasd", 72 | "CASA BLANCA", 73 | "wwe 2k18 Elimination chamber!", 74 | "pero'", 75 | "però", 76 | "è proprio", 77 | "Ep 29", 78 | "sctest29 Keddr", 79 | "mortal kombat 11 shang tsung", 80 | "!asdasd!", 81 | } 82 | normalizedTags := make([]string, 0, len(tags)) 83 | for _, tag := range tags { 84 | got, err := normalizeTag(tag) 85 | if err != nil { 86 | t.Error(err) 87 | return 88 | } 89 | if got != "" { 90 | normalizedTags = append(normalizedTags, got) 91 | } 92 | fmt.Printf("Got tag: '%s'\n", got) 93 | } 94 | expected := []string{ 95 | "blockchain", 96 | "switzerland", 97 | "news", 98 | "science & technology", 99 | "economics", 100 | "experiments", 101 | "this", 102 | "in it", 103 | "will build the", 104 | "has", 105 | "crypto", 106 | "trump", 107 | "wall", 108 | "expensive", 109 | "currency", 110 | "a lot of", 111 | "whatever", 112 | "123", 113 | "something else", 114 | "123aaa", 115 | "asdasd", 116 | "casa blanca", 117 | "wwe 2k18 elimination chamber", 118 | "pero", 119 | "però", 120 | "è proprio", 121 | "ep 29", 122 | "sctest29 keddr", 123 | "mortal kombat 11 shang tsung", 124 | "asdasd", 125 | } 126 | if !Equal(normalizedTags, expected) { 127 | t.Error("result not as expected") 128 | return 129 | } 130 | 131 | } 132 | func Equal(a, b []string) bool { 133 | if len(a) != len(b) { 134 | fmt.Printf("expected length %d but got %d", len(b), len(a)) 135 | return false 136 | } 137 | for i, v := range a { 138 | if v != b[i] { 139 | fmt.Printf("expected %s but bot %s\n", b[i], v) 140 | return false 141 | } 142 | } 143 | return true 144 | } 145 | -------------------------------------------------------------------------------- /thumbs/uploader.go: -------------------------------------------------------------------------------- 1 | package thumbs 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "os" 7 | "strings" 8 | 9 | "github.com/lbryio/ytsync/v5/configs" 10 | "github.com/lbryio/ytsync/v5/downloader/ytdl" 11 | 12 | "github.com/lbryio/lbry.go/v2/extras/errors" 13 | 14 | "github.com/aws/aws-sdk-go/aws" 15 | "github.com/aws/aws-sdk-go/aws/session" 16 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 17 | log "github.com/sirupsen/logrus" 18 | ) 19 | 20 | type thumbnailUploader struct { 21 | name string 22 | originalUrl string 23 | mirroredUrl string 24 | s3Config aws.Config 25 | } 26 | 27 | const thumbnailPath = "/tmp/ytsync_thumbnails/" 28 | const ThumbnailEndpoint = "https://thumbnails.lbry.com/" 29 | 30 | func (u *thumbnailUploader) downloadThumbnail() error { 31 | _ = os.Mkdir(thumbnailPath, 0777) 32 | img, err := os.Create("/tmp/ytsync_thumbnails/" + u.name) 33 | if err != nil { 34 | return errors.Err(err) 35 | } 36 | defer img.Close() 37 | if strings.HasPrefix(u.originalUrl, "//") { 38 | u.originalUrl = "https:" + u.originalUrl 39 | } 40 | resp, err := http.Get(u.originalUrl) 41 | if err != nil { 42 | return errors.Err(err) 43 | } 44 | defer resp.Body.Close() 45 | 46 | _, err = io.Copy(img, resp.Body) 47 | if err != nil { 48 | return errors.Err(err) 49 | } 50 | return nil 51 | } 52 | 53 | func (u *thumbnailUploader) uploadThumbnail() error { 54 | key := &u.name 55 | thumb, err := os.Open("/tmp/ytsync_thumbnails/" + u.name) 56 | if err != nil { 57 | return errors.Err(err) 58 | } 59 | defer thumb.Close() 60 | 61 | s3Session, err := session.NewSession(&u.s3Config) 62 | if err != nil { 63 | return errors.Err(err) 64 | } 65 | 66 | uploader := s3manager.NewUploader(s3Session) 67 | 68 | _, err = uploader.Upload(&s3manager.UploadInput{ 69 | Bucket: aws.String("thumbnails.lbry.com"), 70 | Key: key, 71 | Body: thumb, 72 | ACL: aws.String("public-read"), 73 | ContentType: aws.String("image/jpeg"), 74 | CacheControl: aws.String("public, max-age=2592000"), 75 | }) 76 | 77 | u.mirroredUrl = ThumbnailEndpoint + u.name 78 | return errors.Err(err) 79 | } 80 | 81 | func (u *thumbnailUploader) deleteTmpFile() { 82 | err := os.Remove("/tmp/ytsync_thumbnails/" + u.name) 83 | if err != nil { 84 | log.Infof("failed to delete local thumbnail file: %s", err.Error()) 85 | } 86 | } 87 | func MirrorThumbnail(url string, name string) (string, error) { 88 | tu := thumbnailUploader{ 89 | originalUrl: url, 90 | name: name, 91 | s3Config: *configs.Configuration.AWSThumbnailsS3Config.GetS3AWSConfig(), 92 | } 93 | err := tu.downloadThumbnail() 94 | if err != nil { 95 | return "", err 96 | } 97 | defer tu.deleteTmpFile() 98 | 99 | err = tu.uploadThumbnail() 100 | if err != nil { 101 | return "", err 102 | } 103 | 104 | //this is our own S3 storage 105 | tu2 := thumbnailUploader{ 106 | originalUrl: url, 107 | name: name, 108 | s3Config: *configs.Configuration.ThumbnailsS3Config.GetS3AWSConfig(), 109 | } 110 | err = tu2.uploadThumbnail() 111 | if err != nil { 112 | return "", err 113 | } 114 | 115 | return tu.mirroredUrl, nil 116 | } 117 | 118 | func GetBestThumbnail(thumbnails []ytdl.Thumbnail) *ytdl.Thumbnail { 119 | var bestWidth ytdl.Thumbnail 120 | for _, thumbnail := range thumbnails { 121 | if bestWidth.Width < thumbnail.Width { 122 | bestWidth = thumbnail 123 | } 124 | } 125 | return &bestWidth 126 | } 127 | -------------------------------------------------------------------------------- /e2e/chainqueryconfig.toml: -------------------------------------------------------------------------------- 1 | #Debug mode outputs specific information to the console 2 | #DEFAULT: false 3 | #debugmode= 4 | 5 | #DebugQueryMode outputs SQL Boiler queries to the console. 6 | #DEFAULT: false 7 | #debugquerymode= 8 | 9 | #LBRYcrd URL is required for chainquery to query the blockchain 10 | #DEFAULT: "rpc://lbry:lbry@localhost:9245" 11 | lbrycrdurl="rpc://lbry:lbry@lbrycrd:29245" 12 | 13 | #MySQL DSN is required for chainquery to store information. 14 | #DEFAULT: "lbry:lbry@tcp(localhost:3306)/chainquery" 15 | #SUGGESTED: "lbry:lbry@unix(/var/run/mysqld/mysqld.sock)/chainquery" 16 | mysqldsn="lbry:lbry@tcp(mysql:3306)/chainquery" 17 | 18 | #API MySQL DSN is required for chainquery to expose a SQL query service 19 | #DEFAULT: "lbry:lbry@tcp(localhost:3306)/chainquery" 20 | #SUGGESTED: "lbry:lbry@unix(/var/run/mysqld/mysqld.sock)/chainquery" 21 | apimysqldsn="lbry:lbry@tcp(mysql:3306)/chainquery" 22 | 23 | #API Host and Port is required for the API Server to bind and listen on. 24 | #DEFAULT: "0.0.0.0:6300" 25 | #apihostport= 26 | 27 | #Profile mode enables and disables the reporting of a profile for chainquery 28 | #DEFAULT: false 29 | #profilemode= 30 | 31 | #Daemon mode tells chainquery how hard it should work catch up processing the blockchain 32 | #deamonmode=0 #BeastMode it continuously process block after block until caughtup. 33 | #daemonmode=1 #SlowAndSteadyMode it will process block with a frequency of 1 block every 100ms 34 | #daemonmode=2 #DelayMode it will process a block with a configured delay frequency (set via 'processingdelay') 35 | #daemonmode=3 #DaemonMode it will process a block every iteration of the daemon. 36 | #DEFAULT: 0 37 | #deamonmode= 38 | 39 | #Default client timeout is for communication with the api of chainquery 40 | #DEFAULT: 20 #Measured in seconds 41 | #defaultclienttimeout= 42 | 43 | #Processing delay is used to determine how frequently chainquery should process a block 44 | # It is only used if Daemon mode is set to delay mode 45 | #DEFAULT: 100 #Measured in milliseconds 46 | #processingdelay= 47 | 48 | #Daemon delay is the frequency at which chainquery checks for work to do. 49 | #DEFAULT: 1 #Measured in seconds 50 | #daemondelay= 51 | 52 | #Profiling options - will output the time take for certain opertions related to the below category 53 | #DEFAULT: false (for all 3 params) 54 | #daemonprofile= 55 | #lbrycrdprofile= 56 | #mysqlprofile= 57 | 58 | #Slack Hook URL allows slack integration. All logging info level and above is posted to a slack channel. 59 | #DEFAULT: "" 60 | #slackhookurl= 61 | 62 | #Slack Channel is the channel that you want the messages to appear. Works together with the hook url. 63 | #DEFAULT: "" 64 | #slackchannel= 65 | 66 | #Slack Log Level tells chainquery what level of logging will be sent to the slack channel. It will log all levels below 67 | # it as well. Panic=0,Fatal=1,Error=2,Warning=3,Info=4,Debug=5 68 | #DEFAULT: 0 69 | #slackloglevel= 70 | 71 | #The command that should be executed to trigger a self update of the software. For linux, for example, `.sh` 72 | #DEFAULT: "" 73 | #autoupdatecommand= 74 | 75 | #Twilio service of chainquery to send specifically important information to key users of the Chainquery install. 76 | #DEFAULT: 77 | ##twiliosid="" 78 | ##twilioauthtoken="" 79 | ##smsrecipients=["",""] 80 | ##smsfromphonenumber="" 81 | #twiliosid= 82 | #twilioauthtoken= 83 | #smsrecipients= 84 | #smsfromphonenumber= 85 | 86 | #API Keys - Disallowed by default unless keys are entered. 87 | #DEFAULT: [] 88 | #apikeys= 89 | 90 | #Max Failures - Specifies the number of failures that can happen in processing a transaction. This is for parallel 91 | #transaction processing which puts a transaction to the back of the processing queue if it fails. It can fail say if its 92 | #source output to spend is not already processed. 93 | #DEFAULT: 1000 94 | #maxfailures= 95 | 96 | #Block Chain Name - Specifies the chain params for parsing blocks, transactions, claims, and addresses. valid choices are 97 | #lbrycrd_main, lbrycrd_testnet, and lbrycrd_regtest. 98 | #DEFAULT: "lbrycrd_main" 99 | blockchainname="lbrycrd_regtest" -------------------------------------------------------------------------------- /e2e/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | services: 3 | ############# 4 | ## Lbrycrd ## 5 | ############# 6 | lbrycrd: 7 | image: lbry/lbrycrd:v0.17.3.2-deprecatedrpc 8 | restart: "no" 9 | ports: 10 | - "15201:29246" 11 | - "15200:29245" 12 | expose: 13 | - "29246" 14 | - "29245" 15 | ## host volumes for persistent data such as wallet private keys. 16 | volumes: 17 | - "./persist:/data" 18 | environment: 19 | - RUN_MODE=regtest 20 | ################### 21 | ## Wallet Server ## 22 | ################### 23 | walletserver: 24 | image: lbry/wallet-server:v0.101.1 25 | restart: always 26 | environment: 27 | - DB_DIRECTORY=/database 28 | - MAX_SEND=1000000000000000000000 29 | - DAEMON_URL=http://lbry:lbry@lbrycrd:29245 30 | - MAX_SUBS=1000000000000 31 | - BANDWIDTH_LIMIT=80000000000 32 | - SESSION_TIMEOUT=10000000000000000000000000 33 | - TCP_PORT=50001 34 | - ELASTIC_HOST=es01 35 | ports: 36 | - "15300:50001" 37 | expose: 38 | - "50001" 39 | depends_on: 40 | - lbrycrd 41 | - es01 42 | ulimits: 43 | nofile: 44 | soft: 90000 45 | hard: 90000 46 | #command: lbry.wallet.server.coin.LBC 47 | command: lbry.wallet.server.coin.LBCRegTest 48 | ############# 49 | ## elasticsearch ## 50 | ############# 51 | es01: 52 | image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0 53 | container_name: es01 54 | environment: 55 | - node.name=es01 56 | - discovery.type=single-node 57 | - indices.query.bool.max_clause_count=8196 58 | - bootstrap.memory_lock=true 59 | - "ES_JAVA_OPTS=-Xms4g -Xmx4g" 60 | ulimits: 61 | memlock: 62 | soft: -1 63 | hard: -1 64 | ports: 65 | - "9200:9200" 66 | expose: 67 | - "9200" 68 | ############# 69 | ## Lbrynet ## 70 | ############# 71 | lbrynet: 72 | image: lbry/lbrynet:v0.99.0 73 | restart: always 74 | ports: 75 | - "15100:5279" 76 | - "15101:5280" 77 | expose: 78 | - "5279" 79 | - "5280" 80 | depends_on: 81 | - walletserver 82 | environment: 83 | - LBRY_STREAMING_SERVER=0.0.0.0:5280 84 | - LBRY_FEE_PER_NAME_CHAR=0 85 | volumes: 86 | - "./persist/.lbrynet:/home/lbrynet" 87 | - ".:/etc/lbry" #Put your daemon_settings.yml here 88 | # /private/var/tmp for OSX and /var/tmp for Linux 89 | - "${LOCAL_TMP_DIR}" 90 | ########### 91 | ## MySQL ## 92 | ########### 93 | mysql: 94 | image: mysql/mysql-server:5.7.33 95 | restart: "no" 96 | ports: 97 | - "15500:3306" 98 | expose: 99 | - "3306" 100 | environment: 101 | - MYSQL_ALLOW_EMPTY_PASSWORD=true 102 | - MYSQL_DATABASE=lbry 103 | - MYSQL_USER=lbry 104 | - MYSQL_PASSWORD=lbry 105 | - MYSQL_LOG_CONSOLE=true 106 | volumes: 107 | - "./init.sql:/docker-entrypoint-initdb.d/init.sql" 108 | - "./chainquery/docker/my.cnf:/etc/mysql/conf.d/chainquery-optimizations.cnf" 109 | ################### 110 | ## Internal APIs ## 111 | ################### 112 | internalapis: 113 | image: odyseeteam/internal-apis:master 114 | restart: "no" 115 | ports: 116 | - "15400:8080" 117 | expose: 118 | - "8080" 119 | depends_on: 120 | - mysql 121 | - lbrycrd 122 | environment: 123 | - MYSQL_DSN=lbry:lbry@tcp(mysql:3306)/lbry 124 | - LBRYCRD_CONNECT=rpc://lbry:lbry@lbrycrd:29245 125 | - REPLICA_DSN=lbry:lbry@tcp(mysql:3306)/lbry 126 | entrypoint: wait-for-it -t 0 chainquery:6300 -- wait-for-it -t 0 lbrycrd:29245 -- ./latest serve 127 | ################ 128 | ## Chainquery ## 129 | ################ 130 | chainquery: 131 | image: odyseeteam/chainquery:master 132 | restart: "no" 133 | ports: 134 | - 6300:6300 135 | depends_on: 136 | - lbrycrd 137 | - mysql 138 | ## TODO: Uncomment this in a docker-compose.override.yml to allow for external configurations. 139 | volumes: 140 | - ./chainqueryconfig.toml:/etc/lbry/chainqueryconfig.toml 141 | entrypoint: wait-for-it -t 0 lbrycrd:29245 -- wait-for-it -t 0 mysql:3306 -- start -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # YTSync Tool 2 | [![Build Status](https://travis-ci.com/lbryio/ytsync.svg?branch=master)](https://travis-ci.com/lbryio/ytsync) 3 | 4 | This tool serves lbry to parse youtube channels that want their content mirrored on LBRY. 5 | 6 | The tool downloads the entire set of public videos from a given channel, publishes them to LBRY and populates our private database in order to keep track of what's publishes. 7 | With the support of said database, the tool is also able to keep all the channels updated. 8 | 9 | 10 | # Requirements 11 | - lbrynet SDK https://github.com/lbryio/lbry-sdk/releases (We strive to keep the latest release of ytsync compatible with the latest major release of the SDK) 12 | - a lbrycrd node running (localhost or on a remote machine) with credits in it 13 | - internal-apis (you cannot run this one yourself) 14 | - python3-pip 15 | - yt-dlp (`pip3 install -U yt-dlp`) 16 | - ffmpeg (latest) 17 | 18 | # Setup 19 | - make sure daemon is stopped and can be controlled through `systemctl` (find example below) 20 | - extract the ytsync binary anywhere 21 | - create and fill `config.json` using [this example](config.json.example) 22 | 23 | ## systemd script example 24 | `/etc/systemd/system/lbrynet.service` 25 | ``` 26 | [Unit] 27 | Description="LBRYnet daemon" 28 | After=network.target 29 | 30 | [Service] 31 | Environment="HOME=/home/lbry" 32 | ExecStart=/opt/lbry/lbrynet start 33 | User=lbry 34 | Group=lbry 35 | Restart=on-failure 36 | KillMode=process 37 | 38 | [Install] 39 | WantedBy=multi-user.target 40 | ``` 41 | 42 | # Instructions 43 | 44 | ``` 45 | Publish youtube channels into LBRY network automatically. 46 | 47 | Usage: 48 | ytsync [flags] 49 | 50 | Flags: 51 | --after int Specify from when to pull jobs [Unix time](Default: 0) 52 | --before int Specify until when to pull jobs [Unix time](Default: current Unix time) (default 1669311891) 53 | --channelID string If specified, only this channel will be synced. 54 | --concurrent-jobs int how many jobs to process concurrently (default 1) 55 | -h, --help help for ytsync 56 | --limit int limit the amount of channels to sync 57 | --max-length int Maximum video length to process (in hours) (default 2) 58 | --max-size int Maximum video size to process (in MB) (default 2048) 59 | --max-tries int Number of times to try a publish that fails (default 3) 60 | --no-transfers Skips the transferring process of videos, channels and supports 61 | --quick Look up only the last 50 videos from youtube 62 | --remove-db-unpublished Remove videos from the database that are marked as published but aren't really published 63 | --run-once Whether the process should be stopped after one cycle or not 64 | --skip-space-check Do not perform free space check on startup 65 | --status string Specify which queue to pull from. Overrides --update 66 | --status2 string Specify which secondary queue to pull from. 67 | --takeover-existing-channel If channel exists and we don't own it, take over the channel 68 | --update Update previously synced channels instead of syncing new ones 69 | --upgrade-metadata Upgrade videos if they're on the old metadata version 70 | --videos-limit int how many videos to process per channel (leave 0 for automatic detection) 71 | 72 | ``` 73 | 74 | ## Running from Source 75 | 76 | Clone the repository and run `make` 77 | 78 | ## License 79 | 80 | This project is MIT licensed. For the full license, see [LICENSE](LICENSE). 81 | 82 | ## Contributing 83 | 84 | Contributions to this project are welcome, encouraged, and compensated. For more details, see [CONTRIBUTING](https://lbry.tech/contribute). 85 | 86 | ## Security 87 | 88 | We take security seriously. Please contact [security@lbry.io](mailto:security@lbry.io) regarding any security issues. Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it. 89 | 90 | ## Contact 91 | 92 | The primary contact for this project is [Niko Storni](https://github.com/nikooo777) (niko@lbry.com). 93 | 94 | ## Additional Info and Links 95 | 96 | - [https://lbry.com](https://lbry.com) - The live LBRY website 97 | - [Discord Chat](https://chat.lbry.com) - A chat room for the LBRYians 98 | - [Email us](mailto:hello@lbry.com) - LBRY Support email 99 | - [Twitter](https://twitter.com/@lbryio) - LBRY Twitter page 100 | - [Facebook](https://www.facebook.com/lbryio/) - LBRY Facebook page 101 | - [Reddit](https://reddit.com/r/lbry) - LBRY Reddit page 102 | - [Telegram](https://t.me/lbryofficial) - Telegram group 103 | -------------------------------------------------------------------------------- /0001-lbry-patch.patch: -------------------------------------------------------------------------------- 1 | From 30380338ba9af01696c94b61f0597131638eaec1 Mon Sep 17 00:00:00 2001 2 | From: Niko Storni 3 | Date: Mon, 16 Dec 2019 00:13:36 +0100 4 | Subject: [PATCH] lbry-patch 5 | 6 | --- 7 | youtube_dl/extractor/youtube.py | 45 +++++++++++++++++++++++++-------- 8 | 1 file changed, 35 insertions(+), 10 deletions(-) 9 | 10 | diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py 11 | index b913d07a6..cd66a5b01 100644 12 | --- a/youtube_dl/extractor/youtube.py 13 | +++ b/youtube_dl/extractor/youtube.py 14 | @@ -10,6 +10,7 @@ import random 15 | import re 16 | import time 17 | import traceback 18 | +import subprocess 19 | 20 | from .common import InfoExtractor, SearchInfoExtractor 21 | from ..jsinterp import JSInterpreter 22 | @@ -536,6 +537,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 23 | 24 | _GEO_BYPASS = False 25 | 26 | + _WGET_429_RATE_LIMIT = 8191 27 | + _WGET_BINARY = "wget" 28 | + 29 | IE_NAME = 'youtube' 30 | _TESTS = [ 31 | { 32 | @@ -1254,6 +1258,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 33 | """ Return a string representation of a signature """ 34 | return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) 35 | 36 | + def _rate_limit_download(self, url, video_id, note=None): 37 | + if note is None: 38 | + self.report_download_webpage(video_id) 39 | + elif note is not False: 40 | + if video_id is None: 41 | + self.to_screen('%s' % (note,)) 42 | + else: 43 | + self.to_screen('%s: %s' % (video_id, note)) 44 | + source_address = self._downloader.params.get('source_address') 45 | + return subprocess.run([self._WGET_BINARY, '-q', '--limit-rate', str(self._WGET_429_RATE_LIMIT), '--bind-address', source_address, '-O', '-', url], check=True, stdout=subprocess.PIPE).stdout.decode(encoding='UTF-8') 46 | + 47 | def _extract_signature_function(self, video_id, player_url, example_sig): 48 | id_m = re.match( 49 | r'.*?-(?P[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P[a-z]+)$', 50 | @@ -1678,7 +1693,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 51 | 52 | # Get video webpage 53 | url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id 54 | - video_webpage = self._download_webpage(url, video_id) 55 | + video_webpage = self._rate_limit_download(url, video_id) 56 | 57 | # Attempt to extract SWF player URL 58 | mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) 59 | @@ -1736,10 +1751,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 60 | r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''), 61 | }) 62 | video_info_url = proto + '://www.youtube.com/get_video_info?' + data 63 | - video_info_webpage = self._download_webpage( 64 | + video_info_webpage = self._rate_limit_download( 65 | video_info_url, video_id, 66 | - note='Refetching age-gated info webpage', 67 | - errnote='unable to download video info webpage') 68 | + note='Refetching age-gated info webpage') 69 | video_info = compat_parse_qs(video_info_webpage) 70 | pl_response = video_info.get('player_response', [None])[0] 71 | player_response = extract_player_response(pl_response, video_id) 72 | @@ -1777,7 +1791,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 73 | # The general idea is to take a union of itags of both DASH manifests (for example 74 | # video with such 'manifest behavior' see https://github.com/ytdl-org/youtube-dl/issues/6093) 75 | self.report_video_info_webpage_download(video_id) 76 | - for el in ('embedded', 'detailpage', 'vevo', ''): 77 | + for el in ('', 'embedded', 'detailpage', 'vevo'): 78 | query = { 79 | 'video_id': video_id, 80 | 'ps': 'default', 81 | @@ -1789,11 +1803,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 82 | query['el'] = el 83 | if sts: 84 | query['sts'] = sts 85 | - video_info_webpage = self._download_webpage( 86 | - '%s://www.youtube.com/get_video_info' % proto, 87 | - video_id, note=False, 88 | - errnote='unable to download video info webpage', 89 | - fatal=False, query=query) 90 | + 91 | + if el == '': 92 | + base_url = 'https://youtube.com/get_video_info?video_id={}'.format(video_id) 93 | + else: 94 | + base_url = 'https://youtube.com/get_video_info' 95 | + 96 | + for q in query: 97 | + if q is None or q is "": 98 | + continue 99 | + if query[q] is None or query[q] is "": 100 | + continue 101 | + 102 | + base_url = base_url + "?{}={}".format(q, query[q]) 103 | + 104 | + video_info_webpage = self._rate_limit_download(base_url, video_id) 105 | + 106 | if not video_info_webpage: 107 | continue 108 | get_video_info = compat_parse_qs(video_info_webpage) 109 | -- 110 | 2.17.1 111 | 112 | -------------------------------------------------------------------------------- /e2e/e2e.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | #Always compile ytsync 6 | make 7 | #Always compile supporty 8 | cd e2e/supporty && make && cd ../.. 9 | 10 | #OVERRIDE this in your .env file if running from mac. Check docker-compose.yml for details 11 | export LOCAL_TMP_DIR="/var/tmp:/var/tmp" 12 | 13 | #Private Variables Set in local installations: SLACK_TOKEN,YOUTUBE_API_KEY,AWS_S3_ID,AWS_S3_SECRET,AWS_S3_REGION,AWS_S3_BUCKET 14 | touch -a .env && set -o allexport; source ./.env; set +o allexport 15 | echo "LOCAL_TMP_DIR=$LOCAL_TMP_DIR" 16 | # Compose settings - docker only 17 | export LBRYNET_ADDRESS="http://localhost:15100" 18 | export LBRYCRD_STRING="tcp://lbry:lbry@localhost:15200" #required for supporty 19 | export LBRYNET_USE_DOCKER=true 20 | export REFLECT_BLOBS=false 21 | export CLEAN_ON_STARTUP=true 22 | export REGTEST=true 23 | # Local settings 24 | export BLOBS_DIRECTORY="$(pwd)/e2e/blobsfiles" 25 | export LBRYNET_DIR="$(pwd)/e2e/persist/.lbrynet/.local/share/lbry/lbrynet/" 26 | export LBRYUM_DIR="$(pwd)/e2e/persist/.lbrynet/.local/share/lbry/lbryum" 27 | export TMP_DIR="/var/tmp" 28 | export CHAINNAME="lbrycrd_regtest" 29 | export UID 30 | 31 | cd ./e2e 32 | docker-compose stop 33 | docker-compose rm -f 34 | echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin 35 | docker-compose pull 36 | if [[ -d persist ]]; then rm -rf persist; fi 37 | mkdir -m 0777 -p ./persist 38 | mkdir -m 777 -p ./persist/.walletserver 39 | mkdir -m 777 -p ./persist/.lbrynet 40 | #sudo chown -Rv 999:999 ./persist/.walletserver 41 | #sudo chown -Rv 1000:1000 ./persist/.lbrynet 42 | docker-compose up -d 43 | printf 'waiting for internal apis' 44 | until curl --output /dev/null --silent --head --fail http://localhost:15400; do 45 | printf '.' 46 | sleep 1 47 | done 48 | echo "successfully started..." 49 | 50 | channelToSync="UCMn-zv1SE-2y6vyewscfFqw" 51 | channelName=@whatever"$(date +%s)" 52 | latestVideoID="yPJgjiMbmX0" 53 | 54 | #Data Setup for test 55 | ./data_setup.sh "$channelName" "$channelToSync" "$latestVideoID" 56 | 57 | # Execute the sync test! 58 | ./../bin/ytsync --channelID "$channelToSync" --videos-limit 2 --concurrent-jobs 4 --quick #Force channel intended...just in case. This channel lines up with the api container 59 | status=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT status FROM youtube_data WHERE id=1') 60 | videoStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT status FROM synced_video WHERE id=1') 61 | videoClaimID1=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT publish.claim_id FROM synced_video INNER JOIN publish ON publish.id = synced_video.publish_id WHERE synced_video.id=1') 62 | videoClaimID2=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT publish.claim_id FROM synced_video INNER JOIN publish ON publish.id = synced_video.publish_id WHERE synced_video.id=2') 63 | videoClaimAddress1=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT claim_address FROM claim WHERE id=2') 64 | videoClaimAddress2=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT claim_address FROM claim WHERE id=3') 65 | # Create Supports for published claim 66 | ./supporty/supporty "$channelName" "${videoClaimID1}" "${videoClaimAddress1}" lbrycrd_regtest 1.0 67 | ./supporty/supporty "$channelName" "${videoClaimID2}" "${videoClaimAddress2}" lbrycrd_regtest 2.0 68 | ./supporty/supporty "$channelName" "${videoClaimID2}" "${videoClaimAddress2}" lbrycrd_regtest 3.0 69 | ./supporty/supporty "$channelName" "${videoClaimID1}" "${videoClaimAddress1}" lbrycrd_regtest 3.0 70 | curl --data-binary '{"jsonrpc":"1.0","id":"curltext","method":"generate","params":[1]}' -H 'content-type:text/plain;' --user lbry:lbry http://localhost:15200 71 | # Reset status for transfer test 72 | mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e "UPDATE youtube_data SET status = 'queued' WHERE id = 1" 73 | # Trigger transfer api 74 | curl -i -H 'Accept: application/json' -H 'Content-Type: application/json' 'http://localhost:15400/yt/transfer?auth_token=youtubertoken&address=n4eYeXAYmHo4YRUDEfsEhucy8y5LKRMcHg&public_key=tpubDA9GDAntyJu4hD3wU7175p7CuV6DWbYXfyb2HedBA3yuBp9HZ4n3QE4Ex6RHCSiEuVp2nKAL1Lzf2ZLo9ApaFgNaJjG6Xo1wB3iEeVbrDZp' 75 | # Execute the transfer test! 76 | ./../bin/ytsync --channelID $channelToSync --videos-limit 2 --concurrent-jobs 4 --quick #Force channel intended...just in case. This channel lines up with the api container 77 | # Check that the channel and the video are marked as transferred and that all supports are spent 78 | channelTransferStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT distinct transfer_state FROM youtube_data') 79 | videoTransferStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT distinct transferred FROM synced_video') 80 | nrUnspentSupports=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT COUNT(*) FROM chainquery.support INNER JOIN output ON output.transaction_hash = support.transaction_hash_id AND output.vout = support.vout WHERE output.is_spent = 0') 81 | if [[ $status != "synced" || $videoStatus != "published" || $channelTransferStatus != "2" || $videoTransferStatus != "1" || $nrUnspentSupports != "1" ]]; then 82 | echo "~~!!!~~~FAILED~~~!!!~~" 83 | echo "Channel Status: $status" 84 | echo "Video Status: $videoStatus" 85 | echo "Channel Transfer Status: $channelTransferStatus" 86 | echo "Video Transfer Status: $videoTransferStatus" 87 | echo "Nr Unspent Supports: $nrUnspentSupports" 88 | #docker-compose logs --tail="all" lbrycrd 89 | #docker-compose logs --tail="all" walletserver 90 | #docker-compose logs --tail="all" lbrynet 91 | #docker-compose logs --tail="all" internalapis 92 | exit 1; 93 | else 94 | echo "SUCCESSSSSSSSSSSSS!" 95 | fi; 96 | docker-compose down -------------------------------------------------------------------------------- /e2e/lbrycrd/docker/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | CONFIG_PATH=/etc/lbry/lbrycrd.conf 3 | 4 | function override_config_option() { 5 | # Remove existing config line from a config file 6 | # and replace with environment fed value. 7 | # Does nothing if the variable does not exist. 8 | # var Name of ENV variable 9 | # option Name of config option 10 | # config Path of config file 11 | local var=$1 option=$2 config=$3 12 | if [[ -v $var ]]; then 13 | # Remove the existing config option: 14 | sed -i "/^$option\W*=/d" "$config" 15 | # Add the value from the environment: 16 | echo "$option=${!var}" >> "$config" 17 | fi 18 | } 19 | 20 | function set_config() { 21 | if [ -d "$CONFIG_PATH" ]; then 22 | echo "$CONFIG_PATH is a directory when it should be a file." 23 | exit 1 24 | elif [ -f "$CONFIG_PATH" ]; then 25 | echo "Merging the mounted config file with environment variables." 26 | local MERGED_CONFIG=/tmp/lbrycrd_merged.conf 27 | cat $CONFIG_PATH > $MERGED_CONFIG 28 | echo "" >> $MERGED_CONFIG 29 | override_config_option PORT port $MERGED_CONFIG 30 | override_config_option RPC_USER rpcuser $MERGED_CONFIG 31 | override_config_option RPC_PASSWORD rpcpassword $MERGED_CONFIG 32 | override_config_option RPC_ALLOW_IP rpcallowip $MERGED_CONFIG 33 | override_config_option RPC_PORT rpcport $MERGED_CONFIG 34 | override_config_option RPC_BIND rpcbind $MERGED_CONFIG 35 | # Make the new merged config file the new CONFIG_PATH 36 | # This ensures that the original file the user mounted remains unmodified 37 | CONFIG_PATH=$MERGED_CONFIG 38 | else 39 | echo "Creating a fresh config file from environment variables." 40 | ## Set config params 41 | { 42 | echo "port=${PORT=9246}" 43 | echo "rpcuser=${RPC_USER=lbry}" 44 | echo "rpcpassword=${RPC_PASSWORD=lbry}" 45 | echo "rpcallowip=${RPC_ALLOW_IP=127.0.0.1/24}" 46 | echo "rpcport=${RPC_PORT=9245}" 47 | echo "rpcbind=${RPC_BIND=0.0.0.0}" 48 | echo "deprecatedrpc=accounts" 49 | echo "deprecatedrpc=validateaddress" 50 | echo "deprecatedrpc=signrawtransaction" 51 | } >> $CONFIG_PATH 52 | fi 53 | echo "Config: " 54 | cat $CONFIG_PATH 55 | } 56 | 57 | ## Ensure perms are correct prior to running main binary 58 | /usr/bin/fix-permissions 59 | 60 | ## You can optionally specify a run mode if you want to use lbry defined presets for compatibility. 61 | case $RUN_MODE in 62 | default ) 63 | set_config 64 | lbrycrdd -server -conf=$CONFIG_PATH -printtoconsole 65 | ;; 66 | ## If it's a first run you need to do a full index including all transactions 67 | ## tx index creates an index of every single transaction in the block history if 68 | ## not specified it will only create an index for transactions that are related to the wallet or have unspent outputs. 69 | ## This is generally specific to chainquery. 70 | reindex ) 71 | ## Apply this RUN_MODE in the case you need to update a dataset. NOTE: you do not need to use `RUN_MODE reindex` for more than one complete run. 72 | set_config 73 | lbrycrdd -server -txindex -reindex -conf=$CONFIG_PATH -printtoconsole 74 | ;; 75 | chainquery ) 76 | ## If your only goal is to run Chainquery against this instance of lbrycrd and you're starting a 77 | ## fresh local dataset use this run mode. 78 | set_config 79 | lbrycrdd -server -txindex -conf=$CONFIG_PATH -printtoconsole 80 | ;; 81 | regtest ) 82 | ## Set config params 83 | ## TODO: Make this more automagic in the future. 84 | mkdir -p "$(dirname $CONFIG_PATH)" 85 | echo "rpcuser=lbry" > $CONFIG_PATH 86 | echo "rpcpassword=lbry" >> $CONFIG_PATH 87 | echo "rpcport=29245" >> $CONFIG_PATH 88 | echo "rpcbind=0.0.0.0" >> $CONFIG_PATH 89 | echo "rpcallowip=0.0.0.0/0" >> $CONFIG_PATH 90 | echo "regtest=1" >> $CONFIG_PATH 91 | echo "txindex=1" >> $CONFIG_PATH 92 | echo "server=1" >> $CONFIG_PATH 93 | echo "printtoconsole=1" >> $CONFIG_PATH 94 | echo "deprecatedrpc=accounts" >> $CONFIG_PATH 95 | echo "deprecatedrpc=validateaddress" >> $CONFIG_PATH 96 | echo "deprecatedrpc=signrawtransaction" >> $CONFIG_PATH 97 | echo "vbparams=segwit:0:999999999999" >> $CONFIG_PATH 98 | echo "addresstype=legacy" >> $CONFIG_PATH 99 | 100 | #nohup advance &>/dev/null & 101 | lbrycrdd -conf=$CONFIG_PATH $1 102 | ;; 103 | testnet ) 104 | ## Set config params 105 | ## TODO: Make this more automagic in the future. 106 | mkdir -p "$(dirname $CONFIG_PATH)" 107 | echo "rpcuser=lbry" > $CONFIG_PATH 108 | echo "rpcpassword=lbry" >> $CONFIG_PATH 109 | echo "rpcport=29245" >> $CONFIG_PATH 110 | echo "rpcbind=0.0.0.0" >> $CONFIG_PATH 111 | echo "rpcallowip=0.0.0.0/0" >> $CONFIG_PATH 112 | echo "testnet=1" >> $CONFIG_PATH 113 | echo "txindex=1" >> $CONFIG_PATH 114 | echo "server=1" >> $CONFIG_PATH 115 | echo "printtoconsole=1" >> $CONFIG_PATH 116 | echo "deprecatedrpc=accounts" >> $CONFIG_PATH 117 | echo "deprecatedrpc=validateaddress" >> $CONFIG_PATH 118 | echo "deprecatedrpc=signrawtransaction" >> $CONFIG_PATH 119 | 120 | #nohup advance &>/dev/null & 121 | lbrycrdd -conf=$CONFIG_PATH $1 122 | ;; 123 | * ) 124 | echo "Error, you must define a RUN_MODE environment variable." 125 | echo "Available options are testnet, regtest, chainquery, default, and reindex" 126 | ;; 127 | esac -------------------------------------------------------------------------------- /downloader/ytdl/Video.go: -------------------------------------------------------------------------------- 1 | package ytdl 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/lbryio/ytsync/v5/sdk" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | type YtdlVideo struct { 11 | ID string `json:"id"` 12 | Title string `json:"title"` 13 | Thumbnails []Thumbnail `json:"thumbnails"` 14 | Description string `json:"description"` 15 | ChannelID string `json:"channel_id"` 16 | Duration int `json:"duration"` 17 | Categories []string `json:"categories"` 18 | Tags []string `json:"tags"` 19 | IsLive bool `json:"is_live"` 20 | LiveStatus string `json:"live_status"` 21 | ReleaseTimestamp *int64 `json:"release_timestamp"` 22 | uploadDateForReal *time.Time 23 | Availability string `json:"availability"` 24 | ReleaseDate string `json:"release_date"` 25 | UploadDate string `json:"upload_date"` 26 | 27 | //WasLive bool `json:"was_live"` 28 | //Formats interface{} `json:"formats"` 29 | //Thumbnail string `json:"thumbnail"` 30 | //Uploader string `json:"uploader"` 31 | //UploaderID string `json:"uploader_id"` 32 | //UploaderURL string `json:"uploader_url"` 33 | //ChannelURL string `json:"channel_url"` 34 | //ViewCount int `json:"view_count"` 35 | //AverageRating interface{} `json:"average_rating"` 36 | //AgeLimit int `json:"age_limit"` 37 | //WebpageURL string `json:"webpage_url"` 38 | //PlayableInEmbed bool `json:"playable_in_embed"` 39 | //AutomaticCaptions interface{} `json:"automatic_captions"` 40 | //Subtitles interface{} `json:"subtitles"` 41 | //Chapters interface{} `json:"chapters"` 42 | //LikeCount int `json:"like_count"` 43 | //Channel string `json:"channel"` 44 | //ChannelFollowerCount int `json:"channel_follower_count"` 45 | //OriginalURL string `json:"original_url"` 46 | //WebpageURLBasename string `json:"webpage_url_basename"` 47 | //WebpageURLDomain string `json:"webpage_url_domain"` 48 | //Extractor string `json:"extractor"` 49 | //ExtractorKey string `json:"extractor_key"` 50 | //Playlist interface{} `json:"playlist"` 51 | //PlaylistIndex interface{} `json:"playlist_index"` 52 | //DisplayID string `json:"display_id"` 53 | //Fulltitle string `json:"fulltitle"` 54 | //DurationString string `json:"duration_string"` 55 | //RequestedSubtitles interface{} `json:"requested_subtitles"` 56 | //HasDrm bool `json:"__has_drm"` 57 | //RequestedFormats interface{} `json:"requested_formats"` 58 | //Format string `json:"format"` 59 | //FormatID string `json:"format_id"` 60 | //Ext string `json:"ext"` 61 | //Protocol string `json:"protocol"` 62 | //Language interface{} `json:"language"` 63 | //FormatNote string `json:"format_note"` 64 | //FilesizeApprox int `json:"filesize_approx"` 65 | //Tbr float64 `json:"tbr"` 66 | //Width int `json:"width"` 67 | //Height int `json:"height"` 68 | //Resolution string `json:"resolution"` 69 | //Fps int `json:"fps"` 70 | //DynamicRange string `json:"dynamic_range"` 71 | //Vcodec string `json:"vcodec"` 72 | //Vbr float64 `json:"vbr"` 73 | //StretchedRatio interface{} `json:"stretched_ratio"` 74 | //Acodec string `json:"acodec"` 75 | //Abr float64 `json:"abr"` 76 | //Asr int `json:"asr"` 77 | //Epoch int `json:"epoch"` 78 | //Filename string `json:"filename"` 79 | //Urls string `json:"urls"` 80 | //Type string `json:"_type"` 81 | } 82 | 83 | type Thumbnail struct { 84 | URL string `json:"url"` 85 | Preference int `json:"preference"` 86 | ID string `json:"id"` 87 | Height int `json:"height,omitempty"` 88 | Width int `json:"width,omitempty"` 89 | Resolution string `json:"resolution,omitempty"` 90 | } 91 | 92 | func (v *YtdlVideo) GetUploadTime() time.Time { 93 | //priority list: 94 | // release timestamp from yt 95 | // release timestamp from morty 96 | // release date from yt 97 | // upload date from yt 98 | if v.uploadDateForReal != nil { 99 | return *v.uploadDateForReal 100 | } 101 | 102 | var ytdlReleaseTimestamp time.Time 103 | if v.ReleaseTimestamp != nil && *v.ReleaseTimestamp > 0 { 104 | ytdlReleaseTimestamp = time.Unix(*v.ReleaseTimestamp, 0).UTC() 105 | } 106 | //get morty timestamp 107 | var mortyReleaseTimestamp time.Time 108 | mortyRelease, err := sdk.GetAPIsConfigs().GetReleasedDate(v.ID) 109 | if err != nil { 110 | logrus.Error(err) 111 | } else if mortyRelease != nil { 112 | mortyReleaseTimestamp, err = time.ParseInLocation(time.RFC3339, mortyRelease.ReleaseTime, time.UTC) 113 | if err != nil { 114 | logrus.Error(err) 115 | } 116 | } 117 | 118 | ytdlReleaseDate, err := time.Parse("20060102", v.ReleaseDate) 119 | if err != nil { 120 | logrus.Error(err) 121 | } 122 | ytdlUploadDate, err := time.Parse("20060102", v.UploadDate) 123 | if err != nil { 124 | logrus.Error(err) 125 | } 126 | if !ytdlReleaseTimestamp.IsZero() { 127 | v.uploadDateForReal = &ytdlReleaseTimestamp 128 | } else if !mortyReleaseTimestamp.IsZero() { 129 | v.uploadDateForReal = &mortyReleaseTimestamp 130 | } else if !ytdlReleaseDate.IsZero() { 131 | v.uploadDateForReal = &ytdlReleaseDate 132 | } else { 133 | v.uploadDateForReal = &ytdlUploadDate 134 | } 135 | 136 | return *v.uploadDateForReal 137 | } 138 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "net/http" 7 | "os" 8 | "time" 9 | 10 | "github.com/lbryio/ytsync/v5/configs" 11 | "github.com/lbryio/ytsync/v5/manager" 12 | "github.com/lbryio/ytsync/v5/shared" 13 | ytUtils "github.com/lbryio/ytsync/v5/util" 14 | 15 | "github.com/lbryio/lbry.go/v2/extras/errors" 16 | "github.com/lbryio/lbry.go/v2/extras/util" 17 | 18 | "github.com/prometheus/client_golang/prometheus/promhttp" 19 | log "github.com/sirupsen/logrus" 20 | "github.com/spf13/cobra" 21 | ) 22 | 23 | var Version string 24 | 25 | const defaultMaxTries = 3 26 | 27 | var ( 28 | cliFlags shared.SyncFlags 29 | maxVideoLength int 30 | ) 31 | 32 | func main() { 33 | rand.Seed(time.Now().UnixNano()) 34 | log.SetLevel(log.DebugLevel) 35 | customFormatter := new(log.TextFormatter) 36 | customFormatter.TimestampFormat = "2006-01-02 15:04:05" 37 | customFormatter.FullTimestamp = true 38 | log.SetFormatter(customFormatter) 39 | http.Handle("/metrics", promhttp.Handler()) 40 | go func() { 41 | log.Error(http.ListenAndServe(":2112", nil)) 42 | }() 43 | cmd := &cobra.Command{ 44 | Use: "ytsync", 45 | Short: "Publish youtube channels into LBRY network automatically.", 46 | Run: ytSync, 47 | Args: cobra.RangeArgs(0, 0), 48 | } 49 | 50 | cmd.Flags().IntVar(&cliFlags.MaxTries, "max-tries", defaultMaxTries, "Number of times to try a publish that fails") 51 | cmd.Flags().BoolVar(&cliFlags.TakeOverExistingChannel, "takeover-existing-channel", false, "If channel exists and we don't own it, take over the channel") 52 | cmd.Flags().IntVar(&cliFlags.Limit, "limit", 0, "limit the amount of channels to sync") 53 | cmd.Flags().BoolVar(&cliFlags.SkipSpaceCheck, "skip-space-check", false, "Do not perform free space check on startup") 54 | cmd.Flags().BoolVar(&cliFlags.SyncUpdate, "update", false, "Update previously synced channels instead of syncing new ones") 55 | cmd.Flags().BoolVar(&cliFlags.SingleRun, "run-once", false, "Whether the process should be stopped after one cycle or not") 56 | cmd.Flags().BoolVar(&cliFlags.RemoveDBUnpublished, "remove-db-unpublished", false, "Remove videos from the database that are marked as published but aren't really published") 57 | cmd.Flags().BoolVar(&cliFlags.UpgradeMetadata, "upgrade-metadata", false, "Upgrade videos if they're on the old metadata version") 58 | cmd.Flags().BoolVar(&cliFlags.DisableTransfers, "no-transfers", false, "Skips the transferring process of videos, channels and supports") 59 | cmd.Flags().BoolVar(&cliFlags.QuickSync, "quick", false, "Look up only the last 50 videos from youtube") 60 | cmd.Flags().StringVar(&cliFlags.Status, "status", "", "Specify which queue to pull from. Overrides --update") 61 | cmd.Flags().StringVar(&cliFlags.SecondaryStatus, "status2", "", "Specify which secondary queue to pull from.") 62 | cmd.Flags().StringVar(&cliFlags.ChannelID, "channelID", "", "If specified, only this channel will be synced.") 63 | cmd.Flags().Int64Var(&cliFlags.SyncFrom, "after", time.Unix(0, 0).Unix(), "Specify from when to pull jobs [Unix time](Default: 0)") 64 | cmd.Flags().Int64Var(&cliFlags.SyncUntil, "before", time.Now().AddDate(1, 0, 0).Unix(), "Specify until when to pull jobs [Unix time](Default: current Unix time)") 65 | cmd.Flags().IntVar(&cliFlags.ConcurrentJobs, "concurrent-jobs", 1, "how many jobs to process concurrently") 66 | cmd.Flags().IntVar(&cliFlags.VideosLimit, "videos-limit", 0, "how many videos to process per channel (leave 0 for automatic detection)") 67 | cmd.Flags().IntVar(&cliFlags.MaxVideoSize, "max-size", 2048, "Maximum video size to process (in MB)") 68 | cmd.Flags().IntVar(&maxVideoLength, "max-length", 2, "Maximum video length to process (in hours)") 69 | 70 | if err := cmd.Execute(); err != nil { 71 | fmt.Println(err) 72 | os.Exit(1) 73 | } 74 | } 75 | 76 | func ytSync(cmd *cobra.Command, args []string) { 77 | err := configs.Init("./config.json") 78 | if err != nil { 79 | log.Fatalf("could not parse configuration file: %s", errors.FullTrace(err)) 80 | } 81 | 82 | if configs.Configuration.SlackToken == "" { 83 | log.Error("A slack token was not present in the config! Slack messages disabled!") 84 | } else { 85 | util.InitSlack(configs.Configuration.SlackToken, configs.Configuration.SlackChannel, configs.Configuration.GetHostname()) 86 | } 87 | 88 | if cliFlags.Status != "" && !util.InSlice(cliFlags.Status, shared.SyncStatuses) { 89 | log.Errorf("status must be one of the following: %v\n", shared.SyncStatuses) 90 | return 91 | } 92 | 93 | if cliFlags.MaxTries < 1 { 94 | log.Errorln("setting --max-tries less than 1 doesn't make sense") 95 | return 96 | } 97 | 98 | if cliFlags.Limit < 0 { 99 | log.Errorln("setting --limit less than 0 (unlimited) doesn't make sense") 100 | return 101 | } 102 | cliFlags.MaxVideoLength = time.Duration(maxVideoLength) * time.Hour 103 | 104 | if configs.Configuration.InternalApisEndpoint == "" { 105 | log.Errorln("An Internal APIs Endpoint was not defined") 106 | return 107 | } 108 | if configs.Configuration.InternalApisAuthToken == "" { 109 | log.Errorln("An Internal APIs auth token was not defined") 110 | return 111 | } 112 | if configs.Configuration.WalletS3Config.ID == "" || configs.Configuration.WalletS3Config.Region == "" || configs.Configuration.WalletS3Config.Bucket == "" || configs.Configuration.WalletS3Config.Secret == "" || configs.Configuration.WalletS3Config.Endpoint == "" { 113 | log.Errorln("Wallet S3 configuration is incomplete") 114 | return 115 | } 116 | if configs.Configuration.BlockchaindbS3Config.ID == "" || configs.Configuration.BlockchaindbS3Config.Region == "" || configs.Configuration.BlockchaindbS3Config.Bucket == "" || configs.Configuration.BlockchaindbS3Config.Secret == "" || configs.Configuration.BlockchaindbS3Config.Endpoint == "" { 117 | log.Errorln("Blockchain DBs S3 configuration is incomplete") 118 | return 119 | } 120 | if configs.Configuration.LbrycrdString == "" { 121 | log.Infoln("Using default (local) lbrycrd instance. Set lbrycrd_string if you want to use something else") 122 | } 123 | 124 | blobsDir := ytUtils.GetBlobsDir() 125 | 126 | sm := manager.NewSyncManager( 127 | cliFlags, 128 | blobsDir, 129 | ) 130 | err = sm.Start() 131 | if err != nil { 132 | ytUtils.SendErrorToSlack(errors.FullTrace(err)) 133 | } 134 | ytUtils.SendInfoToSlack("Syncing process terminated!") 135 | } 136 | -------------------------------------------------------------------------------- /ip_manager/throttle.go: -------------------------------------------------------------------------------- 1 | package ip_manager 2 | 3 | import ( 4 | "net" 5 | "sort" 6 | "sync" 7 | "time" 8 | 9 | "github.com/asaskevich/govalidator" 10 | "github.com/lbryio/lbry.go/v2/extras/errors" 11 | "github.com/lbryio/lbry.go/v2/extras/stop" 12 | "github.com/lbryio/ytsync/v5/util" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | const IPCooldownPeriod = 20 * time.Second 17 | const unbanTimeout = 48 * time.Hour 18 | 19 | var stopper = stop.New() 20 | 21 | type IPPool struct { 22 | ips []throttledIP 23 | lock *sync.RWMutex 24 | stopGrp *stop.Group 25 | } 26 | 27 | type throttledIP struct { 28 | IP string 29 | UsedForVideo string 30 | LastUse time.Time 31 | Throttled bool 32 | InUse bool 33 | } 34 | 35 | var ipPoolInstance *IPPool 36 | 37 | func GetIPPool(stopGrp *stop.Group) (*IPPool, error) { 38 | if ipPoolInstance != nil { 39 | return ipPoolInstance, nil 40 | } 41 | addrs, err := net.InterfaceAddrs() 42 | if err != nil { 43 | return nil, errors.Err(err) 44 | } 45 | var pool []throttledIP 46 | for _, address := range addrs { 47 | if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() { 48 | if ipnet.IP.To16() != nil && govalidator.IsIPv6(ipnet.IP.String()) { 49 | pool = append(pool, throttledIP{ 50 | IP: ipnet.IP.String(), 51 | LastUse: time.Now().Add(-5 * time.Minute), 52 | }) 53 | } else if ipnet.IP.To4() != nil && govalidator.IsIPv4(ipnet.IP.String()) { 54 | pool = append(pool, throttledIP{ 55 | IP: ipnet.IP.String(), 56 | LastUse: time.Now().Add(-5 * time.Minute), 57 | }) 58 | } 59 | } 60 | } 61 | ipPoolInstance = &IPPool{ 62 | ips: pool, 63 | lock: &sync.RWMutex{}, 64 | stopGrp: stopGrp, 65 | } 66 | //ticker := time.NewTicker(10 * time.Second) 67 | //go func() { 68 | // for { 69 | // select { 70 | // case <-stopGrp.Ch(): 71 | // return 72 | // case <-ticker.C: 73 | // ipPoolInstance.lock.RLock() 74 | // for _, ip := range ipPoolInstance.ips { 75 | // log.Debugf("IP: %s\tInUse: %t\tVideoID: %s\tThrottled: %t\tLastUse: %.1f", ip.IP, ip.InUse, ip.UsedForVideo, ip.Throttled, time.Since(ip.LastUse).Seconds()) 76 | // } 77 | // ipPoolInstance.lock.RUnlock() 78 | // } 79 | // } 80 | //}() 81 | return ipPoolInstance, nil 82 | } 83 | 84 | // AllThrottled checks whether the IPs provided are all throttled. 85 | // returns false if at least one IP is not throttled 86 | // Not thread safe, should use locking when called 87 | func AllThrottled(ips []throttledIP) bool { 88 | for _, i := range ips { 89 | if !i.Throttled { 90 | return false 91 | } 92 | } 93 | return true 94 | } 95 | 96 | // AllInUse checks whether the IPs provided are all currently in use. 97 | // returns false if at least one IP is not in use AND is not throttled 98 | // Not thread safe, should use locking when called 99 | func AllInUse(ips []throttledIP) bool { 100 | for _, i := range ips { 101 | if !i.InUse && !i.Throttled { 102 | return false 103 | } 104 | } 105 | return true 106 | } 107 | 108 | func (i *IPPool) ReleaseIP(ip string) { 109 | i.lock.Lock() 110 | defer i.lock.Unlock() 111 | for j := range i.ips { 112 | localIP := &i.ips[j] 113 | if localIP.IP == ip { 114 | localIP.InUse = false 115 | localIP.LastUse = time.Now() 116 | return 117 | } 118 | } 119 | util.SendErrorToSlack("something went wrong while releasing the IP %s as we reached the end of the function", ip) 120 | } 121 | 122 | func (i *IPPool) ReleaseAll() { 123 | i.lock.Lock() 124 | defer i.lock.Unlock() 125 | for j := range i.ips { 126 | if i.ips[j].Throttled { 127 | continue 128 | } 129 | localIP := &i.ips[j] 130 | localIP.InUse = false 131 | } 132 | } 133 | 134 | func (i *IPPool) SetThrottled(ip string) { 135 | i.lock.Lock() 136 | defer i.lock.Unlock() 137 | var tIP *throttledIP 138 | for j, _ := range i.ips { 139 | localIP := &i.ips[j] 140 | if localIP.IP == ip { 141 | if localIP.Throttled { 142 | return 143 | } 144 | localIP.Throttled = true 145 | tIP = localIP 146 | break 147 | } 148 | } 149 | util.SendErrorToSlack("%s set to throttled", ip) 150 | 151 | stopper.Add(1) 152 | go func(tIP *throttledIP) { 153 | defer stopper.Done() 154 | unbanTimer := time.NewTimer(unbanTimeout) 155 | select { 156 | case <-unbanTimer.C: 157 | i.lock.Lock() 158 | tIP.Throttled = false 159 | i.lock.Unlock() 160 | util.SendInfoToSlack("%s set back to not throttled", ip) 161 | case <-i.stopGrp.Ch(): 162 | unbanTimer.Stop() 163 | } 164 | }(tIP) 165 | } 166 | 167 | var ErrAllInUse = errors.Base("all IPs are in use, try again") 168 | var ErrAllThrottled = errors.Base("all IPs are throttled") 169 | var ErrResourceLock = errors.Base("error getting next ip, did you forget to lock on the resource?") 170 | var ErrInterruptedByUser = errors.Base("interrupted by user") 171 | 172 | func (i *IPPool) nextIP(forVideo string) (*throttledIP, error) { 173 | i.lock.Lock() 174 | defer i.lock.Unlock() 175 | 176 | sort.Slice(i.ips, func(j, k int) bool { 177 | return i.ips[j].LastUse.Before(i.ips[k].LastUse) 178 | }) 179 | 180 | if !AllThrottled(i.ips) { 181 | if AllInUse(i.ips) { 182 | return nil, errors.Err(ErrAllInUse) 183 | } 184 | 185 | var nextIP *throttledIP 186 | for j := range i.ips { 187 | ip := &i.ips[j] 188 | if ip.InUse || ip.Throttled { 189 | continue 190 | } 191 | nextIP = ip 192 | break 193 | } 194 | if nextIP == nil { 195 | return nil, errors.Err(ErrResourceLock) 196 | } 197 | nextIP.InUse = true 198 | nextIP.UsedForVideo = forVideo 199 | return nextIP, nil 200 | } 201 | return nil, errors.Err(ErrAllThrottled) 202 | } 203 | 204 | func (i *IPPool) GetIP(forVideo string) (string, error) { 205 | for { 206 | ip, err := i.nextIP(forVideo) 207 | if err != nil { 208 | if errors.Is(err, ErrAllInUse) { 209 | select { 210 | case <-i.stopGrp.Ch(): 211 | return "", errors.Err(ErrInterruptedByUser) 212 | default: 213 | time.Sleep(5 * time.Second) 214 | continue 215 | } 216 | } else if errors.Is(err, ErrAllThrottled) { 217 | return "throttled", err 218 | } 219 | return "", err 220 | } 221 | if time.Since(ip.LastUse) < IPCooldownPeriod { 222 | log.Debugf("The IP %s is too hot, waiting for %.1f seconds before continuing", ip.IP, (IPCooldownPeriod - time.Since(ip.LastUse)).Seconds()) 223 | time.Sleep(IPCooldownPeriod - time.Since(ip.LastUse)) 224 | } 225 | return ip.IP, nil 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /manager/manager.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "sync" 7 | "syscall" 8 | "time" 9 | 10 | "github.com/lbryio/ytsync/v5/blobs_reflector" 11 | "github.com/lbryio/ytsync/v5/configs" 12 | "github.com/lbryio/ytsync/v5/ip_manager" 13 | "github.com/lbryio/ytsync/v5/namer" 14 | "github.com/lbryio/ytsync/v5/sdk" 15 | "github.com/lbryio/ytsync/v5/shared" 16 | logUtils "github.com/lbryio/ytsync/v5/util" 17 | 18 | "github.com/lbryio/lbry.go/v2/extras/errors" 19 | "github.com/lbryio/lbry.go/v2/extras/util" 20 | 21 | log "github.com/sirupsen/logrus" 22 | ) 23 | 24 | type SyncManager struct { 25 | CliFlags shared.SyncFlags 26 | ApiConfig *sdk.APIConfig 27 | LbrycrdDsn string 28 | 29 | blobsDir string 30 | channelsToSync []Sync 31 | } 32 | 33 | func NewSyncManager(cliFlags shared.SyncFlags, blobsDir string) *SyncManager { 34 | return &SyncManager{ 35 | CliFlags: cliFlags, 36 | blobsDir: blobsDir, 37 | LbrycrdDsn: configs.Configuration.LbrycrdString, 38 | ApiConfig: sdk.GetAPIsConfigs(), 39 | } 40 | } 41 | func (s *SyncManager) enqueueChannel(channel *shared.YoutubeChannel) { 42 | s.channelsToSync = append(s.channelsToSync, Sync{ 43 | DbChannelData: channel, 44 | Manager: s, 45 | namer: namer.NewNamer(), 46 | hardVideoFailure: hardVideoFailure{ 47 | lock: &sync.Mutex{}, 48 | }, 49 | }) 50 | } 51 | 52 | func (s *SyncManager) Start() error { 53 | if logUtils.ShouldCleanOnStartup() { 54 | err := logUtils.CleanForStartup() 55 | if err != nil { 56 | return err 57 | } 58 | } 59 | 60 | var lastChannelProcessed string 61 | var secondLastChannelProcessed string 62 | syncCount := 0 63 | for { 64 | s.channelsToSync = make([]Sync, 0, 10) // reset sync queue 65 | err := s.checkUsedSpace() 66 | if err != nil { 67 | return errors.Err(err) 68 | } 69 | shouldInterruptLoop := false 70 | 71 | if s.CliFlags.IsSingleChannelSync() { 72 | channels, err := s.ApiConfig.FetchChannels("", &s.CliFlags) 73 | if err != nil { 74 | return errors.Err(err) 75 | } 76 | if len(channels) != 1 { 77 | return errors.Err("Expected 1 channel, %d returned", len(channels)) 78 | } 79 | s.enqueueChannel(&channels[0]) 80 | shouldInterruptLoop = true 81 | } else { 82 | var queuesToSync []string 83 | if s.CliFlags.Status != "" { 84 | queuesToSync = append(queuesToSync, shared.StatusSyncing, s.CliFlags.Status) 85 | } else if s.CliFlags.SyncUpdate { 86 | queuesToSync = append(queuesToSync, shared.StatusSyncing, shared.StatusSynced) 87 | } else { 88 | queuesToSync = append(queuesToSync, shared.StatusSyncing, shared.StatusQueued) 89 | } 90 | if s.CliFlags.SecondaryStatus != "" { 91 | queuesToSync = append(queuesToSync, s.CliFlags.SecondaryStatus) 92 | } 93 | queues: 94 | for _, q := range queuesToSync { 95 | channels, err := s.ApiConfig.FetchChannels(q, &s.CliFlags) 96 | if err != nil { 97 | return err 98 | } 99 | log.Infof("Currently processing the \"%s\" queue with %d channels", q, len(channels)) 100 | for _, c := range channels { 101 | s.enqueueChannel(&c) 102 | queueAll := q == shared.StatusFailed || q == shared.StatusSyncing 103 | if !queueAll { 104 | break queues 105 | } 106 | } 107 | log.Infof("Drained the \"%s\" queue", q) 108 | } 109 | } 110 | if len(s.channelsToSync) == 0 { 111 | log.Infoln("No channels to sync. Pausing 5 minutes!") 112 | time.Sleep(5 * time.Minute) 113 | } 114 | for _, sync := range s.channelsToSync { 115 | if lastChannelProcessed == sync.DbChannelData.ChannelId && secondLastChannelProcessed == lastChannelProcessed { 116 | util.SendToSlack("We just killed a sync for %s to stop looping! (%s)", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId) 117 | stopTheLoops := errors.Err("Found channel %s running 3 times, set it to failed, and reprocess later", sync.DbChannelData.DesiredChannelName) 118 | sync.setChannelTerminationStatus(&stopTheLoops) 119 | continue 120 | } 121 | secondLastChannelProcessed = lastChannelProcessed 122 | lastChannelProcessed = sync.DbChannelData.ChannelId 123 | shouldNotCount := false 124 | logUtils.SendInfoToSlack("Syncing %s (%s) to LBRY! total processed channels since startup: %d", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId, syncCount+1) 125 | err := sync.FullCycle() 126 | //TODO: THIS IS A TEMPORARY WORK AROUND FOR THE STUPID IP LOCKUP BUG 127 | ipPool, _ := ip_manager.GetIPPool(sync.grp) 128 | if ipPool != nil { 129 | ipPool.ReleaseAll() 130 | } 131 | 132 | if err != nil { 133 | if strings.Contains(err.Error(), "quotaExceeded") { 134 | logUtils.SleepUntilQuotaReset() 135 | } 136 | fatalErrors := []string{ 137 | "default_wallet already exists", 138 | "WALLET HAS NOT BEEN MOVED TO THE WALLET BACKUP DIR", 139 | "NotEnoughFunds", 140 | "no space left on device", 141 | "there was a problem uploading the wallet", 142 | "the channel in the wallet is different than the channel in the database", 143 | "this channel does not belong to this wallet!", 144 | "You already have a stream claim published under the name", 145 | } 146 | 147 | if util.SubstringInSlice(err.Error(), fatalErrors) { 148 | return errors.Prefix("@Nikooo777 this requires manual intervention! Exiting...", err) 149 | } 150 | shouldNotCount = strings.Contains(err.Error(), "this youtube channel is being managed by another server") 151 | if !shouldNotCount { 152 | logUtils.SendInfoToSlack("A non fatal error was reported by the sync process.\n%s", errors.FullTrace(err)) 153 | } 154 | } 155 | err = logUtils.CleanupMetadata() 156 | if err != nil { 157 | log.Errorf("something went wrong while trying to clear out the video metadata directory: %s", errors.FullTrace(err)) 158 | } 159 | err = blobs_reflector.ReflectAndClean() 160 | if err != nil { 161 | return errors.Prefix("@Nikooo777 something went wrong while reflecting blobs", err) 162 | } 163 | logUtils.SendInfoToSlack("%s (%s) reached an end. Total processed channels since startup: %d", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId, syncCount+1) 164 | if !shouldNotCount { 165 | syncCount++ 166 | } 167 | if sync.IsInterrupted() || (s.CliFlags.Limit != 0 && syncCount >= s.CliFlags.Limit) { 168 | shouldInterruptLoop = true 169 | break 170 | } 171 | } 172 | if shouldInterruptLoop || s.CliFlags.SingleRun { 173 | break 174 | } 175 | } 176 | return nil 177 | } 178 | 179 | func (s *SyncManager) checkUsedSpace() error { 180 | usedPctile, err := GetUsedSpace(logUtils.GetBlobsDir()) 181 | if err != nil { 182 | return errors.Err(err) 183 | } 184 | if usedPctile >= 0.90 && !s.CliFlags.SkipSpaceCheck { 185 | return errors.Err(fmt.Sprintf("more than 90%% of the space has been used. use --skip-space-check to ignore. Used: %.1f%%", usedPctile*100)) 186 | } 187 | log.Infof("disk usage: %.1f%%", usedPctile*100) 188 | return nil 189 | } 190 | 191 | // GetUsedSpace returns a value between 0 and 1, with 0 being completely empty and 1 being full, for the disk that holds the provided path 192 | func GetUsedSpace(path string) (float32, error) { 193 | var stat syscall.Statfs_t 194 | err := syscall.Statfs(path, &stat) 195 | if err != nil { 196 | return 0, err 197 | } 198 | // Available blocks * size per block = available space in bytes 199 | all := stat.Blocks * uint64(stat.Bsize) 200 | free := stat.Bfree * uint64(stat.Bsize) 201 | used := all - free 202 | 203 | return float32(used) / float32(all), nil 204 | } 205 | -------------------------------------------------------------------------------- /shared/shared.go: -------------------------------------------------------------------------------- 1 | package shared 2 | 3 | import ( 4 | "encoding/json" 5 | "time" 6 | 7 | "github.com/lbryio/lbry.go/v2/extras/errors" 8 | ) 9 | 10 | type Fee struct { 11 | Amount string `json:"amount"` 12 | Address string `json:"address"` 13 | Currency string `json:"currency"` 14 | } 15 | type YoutubeChannel struct { 16 | ChannelId string `json:"channel_id"` 17 | TotalVideos uint `json:"total_videos"` 18 | TotalSubscribers uint `json:"total_subscribers"` 19 | DesiredChannelName string `json:"desired_channel_name"` 20 | Fee *Fee `json:"fee"` 21 | ChannelClaimID string `json:"channel_claim_id"` 22 | TransferState int `json:"transfer_state"` 23 | PublishAddress PublishAddress `json:"publish_address"` 24 | PublicKey string `json:"public_key"` 25 | LengthLimit int `json:"length_limit"` 26 | SizeLimit int `json:"size_limit"` 27 | LastUploadedVideo string `json:"last_uploaded_video"` 28 | WipeDB bool `json:"wipe_db"` 29 | Language string `json:"language"` 30 | } 31 | 32 | type PublishAddress struct { 33 | Address string `json:"address"` 34 | IsMine bool `json:"is_mine"` 35 | } 36 | 37 | func (p *PublishAddress) UnmarshalJSON(data []byte) error { 38 | var s string 39 | if err := json.Unmarshal(data, &s); err != nil { 40 | return errors.Err(err) 41 | } 42 | p.Address = s 43 | p.IsMine = false 44 | return nil 45 | } 46 | 47 | var FatalErrors = []string{ 48 | ":5279: read: connection reset by peer", 49 | "no space left on device", 50 | "NotEnoughFunds", 51 | "Cannot publish using channel", 52 | "cannot concatenate 'str' and 'NoneType' objects", 53 | "more than 90% of the space has been used.", 54 | "Couldn't find private key for id", 55 | "You already have a stream claim published under the name", 56 | "Missing inputs", 57 | } 58 | var ErrorsNoRetry = []string{ 59 | "Requested format is not available", 60 | "non 200 status code received", 61 | "This video contains content from", 62 | "dont know which claim to update", 63 | "uploader has not made this video available in your country", 64 | "download error: AccessDenied: Access Denied", 65 | "Playback on other websites has been disabled by the video owner", 66 | "Error in daemon: Cannot publish empty file", 67 | "Error extracting sts from embedded url response", 68 | "Unable to extract signature tokens", 69 | "Client.Timeout exceeded while awaiting headers", 70 | "the video is too big to sync, skipping for now", 71 | "video is too long to process", 72 | "video is too short to process", 73 | "no compatible format available for this video", 74 | "Watch this video on YouTube.", 75 | "have blocked it on copyright grounds", 76 | "the video must be republished as we can't get the right size", 77 | "HTTP Error 403", 78 | "giving up after 0 fragment retries", 79 | "Sorry about that", 80 | "This video is not available", 81 | "Video unavailable", 82 | "requested format not available", 83 | "interrupted by user", 84 | "Sign in to confirm your age", 85 | "This video is unavailable", 86 | "video is a live stream and hasn't completed yet", 87 | "Premieres in", 88 | "Private video", 89 | "This live event will begin in", 90 | "This video has been removed by the uploader", 91 | "Premiere will begin shortly", 92 | "cannot unmarshal number 0.0", 93 | "default youtube thumbnail found", 94 | "livestream is likely bugged", 95 | } 96 | var WalletErrors = []string{ 97 | "Not enough funds to cover this transaction", 98 | "failed: Not enough funds", 99 | "Error in daemon: Insufficient funds, please deposit additional LBC", 100 | //"Missing inputs", 101 | } 102 | var BlockchainErrors = []string{ 103 | "txn-mempool-conflict", 104 | "too-long-mempool-chain", 105 | } 106 | var NeverRetryFailures = []string{ 107 | "Error extracting sts from embedded url response", 108 | "Unable to extract signature tokens", 109 | "the video is too big to sync, skipping for now", 110 | "video is too long to process", 111 | "video is too short to process", 112 | "This video contains content from", 113 | "no compatible format available for this video", 114 | "Watch this video on YouTube.", 115 | "have blocked it on copyright grounds", 116 | "giving up after 0 fragment retries", 117 | "Sign in to confirm your age", 118 | "Playback on other websites has been disabled by the video owner", 119 | "uploader has not made this video available in your country", 120 | "This video has been removed by the uploader", 121 | "Video unavailable", 122 | "Video is not available - hardcoded fix", 123 | } 124 | 125 | type SyncFlags struct { 126 | TakeOverExistingChannel bool 127 | SkipSpaceCheck bool 128 | SyncUpdate bool 129 | SingleRun bool 130 | RemoveDBUnpublished bool 131 | UpgradeMetadata bool 132 | DisableTransfers bool 133 | QuickSync bool 134 | MaxTries int 135 | Refill int 136 | Limit int 137 | Status string 138 | SecondaryStatus string 139 | ChannelID string 140 | SyncFrom int64 141 | SyncUntil int64 142 | ConcurrentJobs int 143 | VideosLimit int 144 | MaxVideoSize int 145 | MaxVideoLength time.Duration 146 | } 147 | 148 | // VideosToSync dynamically figures out how many videos should be synced for a given subs count if nothing was otherwise specified 149 | func (f *SyncFlags) VideosToSync(totalSubscribers uint) int { 150 | if f.VideosLimit > 0 { 151 | return f.VideosLimit 152 | } 153 | defaultVideosToSync := map[int]int{ 154 | 10000: 1000, 155 | 5000: 500, 156 | 1000: 400, 157 | 800: 250, 158 | 600: 200, 159 | 200: 80, 160 | 100: 20, 161 | 1: 10, 162 | } 163 | videosToSync := 0 164 | for s, r := range defaultVideosToSync { 165 | if int(totalSubscribers) >= s && r > videosToSync { 166 | videosToSync = r 167 | } 168 | } 169 | return videosToSync 170 | } 171 | 172 | func (f *SyncFlags) IsSingleChannelSync() bool { 173 | return f.ChannelID != "" 174 | } 175 | 176 | type VideoStatus struct { 177 | ChannelID string 178 | VideoID string 179 | Status string 180 | ClaimID string 181 | ClaimName string 182 | FailureReason string 183 | Size *int64 184 | MetaDataVersion uint 185 | IsTransferred *bool 186 | } 187 | 188 | const ( 189 | StatusPending = "pending" // waiting for permission to sync 190 | StatusPendingEmail = "pendingemail" // permission granted but missing email 191 | StatusQueued = "queued" // in sync queue. will be synced soon 192 | StatusPendingUpgrade = "pendingupgrade" // in sync queue. will be synced soon 193 | StatusSyncing = "syncing" // syncing now 194 | StatusSynced = "synced" // done 195 | StatusWipeDb = "pendingdbwipe" // in sync queue. lbryum database will be pruned 196 | StatusFailed = "failed" 197 | StatusFinalized = "finalized" // no more changes allowed 198 | StatusAbandoned = "abandoned" // deleted on youtube or banned 199 | StatusAgeRestricted = "agerestricted" // one or more videos are age restricted and should be reprocessed with special keys 200 | ) 201 | 202 | var SyncStatuses = []string{StatusPending, StatusPendingEmail, StatusPendingUpgrade, StatusQueued, StatusSyncing, StatusSynced, StatusFailed, StatusFinalized, StatusAbandoned, StatusWipeDb, StatusAgeRestricted} 203 | 204 | const LatestMetadataVersion = 2 205 | 206 | const ( 207 | VideoStatusPublished = "published" 208 | VideoStatusFailed = "failed" 209 | VideoStatusUpgradeFailed = "upgradefailed" 210 | VideoStatusUnpublished = "unpublished" 211 | VideoStatusTransferFailed = "transferfailed" 212 | ) 213 | 214 | var VideoSyncStatuses = []string{VideoStatusPublished, VideoStatusFailed, VideoStatusUpgradeFailed, VideoStatusUnpublished, VideoStatusTransferFailed} 215 | 216 | const ( 217 | TransferStateNotTouched = iota 218 | TransferStatePending 219 | TransferStateComplete 220 | TransferStateManual 221 | ) 222 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | go 1.17 2 | 3 | module github.com/lbryio/ytsync/v5 4 | 5 | replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19 6 | 7 | //replace github.com/lbryio/lbry.go/v2 => /home/niko/go/src/github.com/lbryio/lbry.go/ 8 | //replace github.com/lbryio/reflector.go => /home/niko/go/src/github.com/lbryio/reflector.go/ 9 | 10 | require ( 11 | github.com/abadojack/whatlanggo v1.0.1 12 | github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d 13 | github.com/aws/aws-sdk-go v1.44.6 14 | github.com/davecgh/go-spew v1.1.1 15 | github.com/docker/docker v20.10.17+incompatible 16 | github.com/lbryio/lbry.go/v2 v2.7.2-0.20220815204100-2adb8af5b68c 17 | github.com/lbryio/reflector.go v1.1.3-0.20220730181028-f5d30b1a6e79 18 | github.com/mitchellh/go-ps v1.0.0 19 | github.com/prometheus/client_golang v1.12.1 20 | github.com/shopspring/decimal v1.3.1 21 | github.com/sirupsen/logrus v1.9.0 22 | github.com/spf13/cobra v1.4.0 23 | github.com/stretchr/testify v1.7.1 24 | github.com/tkanos/gonfig v0.0.0-20210106201359-53e13348de2f 25 | github.com/vbauerster/mpb/v7 v7.4.1 26 | gopkg.in/vansante/go-ffprobe.v2 v2.0.3 27 | gotest.tools v2.2.0+incompatible 28 | ) 29 | 30 | require ( 31 | github.com/Microsoft/go-winio v0.5.1 // indirect 32 | github.com/VividCortex/ewma v1.2.0 // indirect 33 | github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect 34 | github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect 35 | github.com/beorn7/perks v1.0.1 // indirect 36 | github.com/bluele/gcache v0.0.2 // indirect 37 | github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7 // indirect 38 | github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3 // indirect 39 | github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect 40 | github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect 41 | github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect 42 | github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect 43 | github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 // indirect 44 | github.com/cespare/xxhash/v2 v2.1.2 // indirect 45 | github.com/cheekybits/genny v1.0.0 // indirect 46 | github.com/docker/distribution v2.8.1+incompatible // indirect 47 | github.com/docker/go-connections v0.4.0 // indirect 48 | github.com/docker/go-units v0.4.0 // indirect 49 | github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db // indirect 50 | github.com/fatih/structs v1.1.0 // indirect 51 | github.com/fsnotify/fsnotify v1.4.9 // indirect 52 | github.com/ghodss/yaml v1.0.0 // indirect 53 | github.com/gin-contrib/sse v0.1.0 // indirect 54 | github.com/gin-gonic/gin v1.7.7 // indirect 55 | github.com/go-errors/errors v1.1.1 // indirect 56 | github.com/go-ini/ini v1.48.0 // indirect 57 | github.com/go-playground/locales v0.13.0 // indirect 58 | github.com/go-playground/universal-translator v0.17.0 // indirect 59 | github.com/go-playground/validator/v10 v10.4.1 // indirect 60 | github.com/go-sql-driver/mysql v1.6.0 // indirect 61 | github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect 62 | github.com/gofrs/uuid v3.2.0+incompatible // indirect 63 | github.com/gogo/protobuf v1.3.2 // indirect 64 | github.com/golang/protobuf v1.5.2 // indirect 65 | github.com/google/btree v1.0.1 // indirect 66 | github.com/google/go-cmp v0.5.7 // indirect 67 | github.com/gorilla/mux v1.8.0 // indirect 68 | github.com/gorilla/rpc v1.2.0 // indirect 69 | github.com/gorilla/websocket v1.4.2 // indirect 70 | github.com/hashicorp/errwrap v1.1.0 // indirect 71 | github.com/hashicorp/go-immutable-radix v1.1.0 // indirect 72 | github.com/hashicorp/go-msgpack v0.5.5 // indirect 73 | github.com/hashicorp/go-multierror v1.1.1 // indirect 74 | github.com/hashicorp/go-sockaddr v1.0.2 // indirect 75 | github.com/hashicorp/golang-lru v0.5.4 // indirect 76 | github.com/hashicorp/hcl v1.0.0 // indirect 77 | github.com/hashicorp/memberlist v0.3.0 // indirect 78 | github.com/hashicorp/serf v0.9.7 // indirect 79 | github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf // indirect 80 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 81 | github.com/jmespath/go-jmespath v0.4.0 // indirect 82 | github.com/johntdyer/slack-go v0.0.0-20180213144715-95fac1160b22 // indirect 83 | github.com/johntdyer/slackrus v0.0.0-20211215141436-33e4a270affb // indirect 84 | github.com/json-iterator/go v1.1.12 // indirect 85 | github.com/karrick/godirwalk v1.17.0 // indirect 86 | github.com/kr/text v0.2.0 // indirect 87 | github.com/lbryio/chainquery v1.9.0 // indirect 88 | github.com/lbryio/lbry.go v1.1.2 // indirect 89 | github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6 // indirect 90 | github.com/leodido/go-urn v1.2.0 // indirect 91 | github.com/lucas-clemente/quic-go v0.28.1 // indirect 92 | github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 // indirect 93 | github.com/magiconair/properties v1.8.1 // indirect 94 | github.com/marten-seemann/qpack v0.2.1 // indirect 95 | github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect 96 | github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect 97 | github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect 98 | github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect 99 | github.com/mattn/go-isatty v0.0.12 // indirect 100 | github.com/mattn/go-runewidth v0.0.13 // indirect 101 | github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect 102 | github.com/miekg/dns v1.1.41 // indirect 103 | github.com/mitchellh/mapstructure v1.5.0 // indirect 104 | github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect 105 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 106 | github.com/modern-go/reflect2 v1.0.2 // indirect 107 | github.com/morikuni/aec v1.0.0 // indirect 108 | github.com/nxadm/tail v1.4.8 // indirect 109 | github.com/onsi/ginkgo v1.16.4 // indirect 110 | github.com/onsi/gomega v1.17.0 // indirect 111 | github.com/opencontainers/go-digest v1.0.0 // indirect 112 | github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect 113 | github.com/pelletier/go-toml v1.9.3 // indirect 114 | github.com/pkg/errors v0.9.1 // indirect 115 | github.com/pmezard/go-difflib v1.0.0 // indirect 116 | github.com/prometheus/client_model v0.2.0 // indirect 117 | github.com/prometheus/common v0.32.1 // indirect 118 | github.com/prometheus/procfs v0.7.3 // indirect 119 | github.com/rivo/uniseg v0.2.0 // indirect 120 | github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect 121 | github.com/slack-go/slack v0.10.3 // indirect 122 | github.com/spf13/afero v1.4.1 // indirect 123 | github.com/spf13/cast v1.4.1 // indirect 124 | github.com/spf13/jwalterweatherman v1.0.0 // indirect 125 | github.com/spf13/pflag v1.0.5 // indirect 126 | github.com/spf13/viper v1.7.1 // indirect 127 | github.com/subosito/gotenv v1.2.0 // indirect 128 | github.com/ugorji/go/codec v1.1.7 // indirect 129 | github.com/volatiletech/inflect v0.0.0-20170731032912-e7201282ae8d // indirect 130 | github.com/volatiletech/null v8.0.0+incompatible // indirect 131 | github.com/volatiletech/sqlboiler v3.4.0+incompatible // indirect 132 | github.com/ybbus/jsonrpc v2.1.2+incompatible // indirect 133 | go.uber.org/atomic v1.9.0 // indirect 134 | golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect 135 | golang.org/x/mod v0.4.2 // indirect 136 | golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect 137 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect 138 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect 139 | golang.org/x/text v0.3.7 // indirect 140 | golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect 141 | golang.org/x/tools v0.1.5 // indirect 142 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect 143 | google.golang.org/protobuf v1.27.1 // indirect 144 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 145 | gopkg.in/ini.v1 v1.60.2 // indirect 146 | gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 // indirect 147 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 148 | gopkg.in/yaml.v2 v2.4.0 // indirect 149 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect 150 | gotest.tools/v3 v3.2.0 // indirect 151 | ) 152 | -------------------------------------------------------------------------------- /manager/s3_storage.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "strings" 7 | "time" 8 | 9 | "github.com/lbryio/ytsync/v5/configs" 10 | "github.com/lbryio/ytsync/v5/util" 11 | 12 | "github.com/lbryio/lbry.go/v2/extras/errors" 13 | 14 | "github.com/aws/aws-sdk-go/aws" 15 | "github.com/aws/aws-sdk-go/aws/awserr" 16 | "github.com/aws/aws-sdk-go/aws/session" 17 | "github.com/aws/aws-sdk-go/service/s3" 18 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 19 | log "github.com/sirupsen/logrus" 20 | ) 21 | 22 | func (s *Sync) getS3Downloader(config *aws.Config) (*s3manager.Downloader, error) { 23 | s3Session, err := session.NewSession(config) 24 | if err != nil { 25 | return nil, errors.Prefix("error starting session", err) 26 | } 27 | downloader := s3manager.NewDownloader(s3Session) 28 | return downloader, nil 29 | } 30 | 31 | func (s *Sync) getS3Uploader(config *aws.Config) (*s3manager.Uploader, error) { 32 | s3Session, err := session.NewSession(config) 33 | if err != nil { 34 | return nil, errors.Prefix("error starting session", err) 35 | } 36 | uploader := s3manager.NewUploader(s3Session) 37 | return uploader, nil 38 | } 39 | 40 | func (s *Sync) downloadWallet() error { 41 | defaultWalletDir, defaultTempWalletDir, key, err := s.getWalletPaths() 42 | if err != nil { 43 | return errors.Err(err) 44 | } 45 | downloader, err := s.getS3Downloader(configs.Configuration.WalletS3Config.GetS3AWSConfig()) 46 | if err != nil { 47 | return err 48 | } 49 | out, err := os.Create(defaultTempWalletDir) 50 | if err != nil { 51 | return errors.Prefix("error creating temp wallet", err) 52 | } 53 | defer out.Close() 54 | 55 | bytesWritten, err := downloader.Download(out, &s3.GetObjectInput{ 56 | Bucket: aws.String(configs.Configuration.WalletS3Config.Bucket), 57 | Key: key, 58 | }) 59 | if err != nil { 60 | // Casting to the awserr.Error type will allow you to inspect the error 61 | // code returned by the service in code. The error code can be used 62 | // to switch on context specific functionality. In this case a context 63 | // specific error message is printed to the user based on the bucket 64 | // and key existing. 65 | // 66 | // For information on other S3 API error codes see: 67 | // http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html 68 | if aerr, ok := err.(awserr.Error); ok { 69 | code := aerr.Code() 70 | if code == s3.ErrCodeNoSuchKey { 71 | return errors.Err("wallet not on S3") 72 | } 73 | } 74 | return err 75 | } else if bytesWritten == 0 { 76 | return errors.Err("zero bytes written") 77 | } 78 | 79 | err = os.Rename(defaultTempWalletDir, defaultWalletDir) 80 | if err != nil { 81 | return errors.Prefix("error replacing temp wallet for default wallet", err) 82 | } 83 | 84 | return nil 85 | } 86 | 87 | func (s *Sync) downloadBlockchainDB() error { 88 | if util.IsRegTest() { 89 | return nil // tests fail if we re-use the same blockchain DB 90 | } 91 | defaultBDBPath, defaultTempBDBPath, key, err := s.getBlockchainDBPaths() 92 | if err != nil { 93 | return errors.Err(err) 94 | } 95 | files, err := filepath.Glob(defaultBDBPath + "*") 96 | if err != nil { 97 | return errors.Err(err) 98 | } 99 | for _, f := range files { 100 | err = os.Remove(f) 101 | if err != nil { 102 | return errors.Err(err) 103 | } 104 | } 105 | if s.DbChannelData.WipeDB { 106 | return nil 107 | } 108 | downloader, err := s.getS3Downloader(configs.Configuration.BlockchaindbS3Config.GetS3AWSConfig()) 109 | if err != nil { 110 | return errors.Err(err) 111 | } 112 | out, err := os.Create(defaultTempBDBPath) 113 | if err != nil { 114 | return errors.Prefix("error creating temp blockchain DB file", err) 115 | } 116 | defer out.Close() 117 | 118 | bytesWritten, err := downloader.Download(out, &s3.GetObjectInput{ 119 | Bucket: aws.String(configs.Configuration.BlockchaindbS3Config.Bucket), 120 | Key: key, 121 | }) 122 | if err != nil { 123 | // Casting to the awserr.Error type will allow you to inspect the error 124 | // code returned by the service in code. The error code can be used 125 | // to switch on context specific functionality. In this case a context 126 | // specific error message is printed to the user based on the bucket 127 | // and key existing. 128 | // 129 | // For information on other S3 API error codes see: 130 | // http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html 131 | if aerr, ok := err.(awserr.Error); ok { 132 | code := aerr.Code() 133 | if code == s3.ErrCodeNoSuchKey { 134 | return nil // let ytsync sync the database by itself 135 | } 136 | } 137 | return errors.Err(err) 138 | } else if bytesWritten == 0 { 139 | return errors.Err("zero bytes written") 140 | } 141 | 142 | blockchainDbDir := strings.Replace(defaultBDBPath, "blockchain.db", "", -1) 143 | err = util.Untar(defaultTempBDBPath, blockchainDbDir) 144 | if err != nil { 145 | return errors.Prefix("error extracting blockchain.db files", err) 146 | } 147 | err = os.Remove(defaultTempBDBPath) 148 | if err != nil { 149 | return errors.Err(err) 150 | } 151 | log.Printf("blockchain.db data downloaded and extracted to %s", blockchainDbDir) 152 | return nil 153 | } 154 | 155 | func (s *Sync) getWalletPaths() (defaultWallet, tempWallet string, key *string, err error) { 156 | defaultWallet = os.Getenv("HOME") + "/.lbryum/wallets/default_wallet" 157 | tempWallet = os.Getenv("HOME") + "/.lbryum/wallets/tmp_wallet" 158 | key = aws.String("/wallets/" + s.DbChannelData.ChannelId) 159 | if util.IsRegTest() { 160 | defaultWallet = os.Getenv("HOME") + "/.lbryum_regtest/wallets/default_wallet" 161 | tempWallet = os.Getenv("HOME") + "/.lbryum_regtest/wallets/tmp_wallet" 162 | key = aws.String("/regtest/" + s.DbChannelData.ChannelId) 163 | } 164 | 165 | lbryumDir := os.Getenv("LBRYUM_DIR") 166 | if lbryumDir != "" { 167 | defaultWallet = lbryumDir + "/wallets/default_wallet" 168 | tempWallet = lbryumDir + "/wallets/tmp_wallet" 169 | } 170 | 171 | if _, err := os.Stat(defaultWallet); !os.IsNotExist(err) { 172 | return "", "", nil, errors.Err("default_wallet already exists") 173 | } 174 | return 175 | } 176 | 177 | func (s *Sync) getBlockchainDBPaths() (defaultDB, tempDB string, key *string, err error) { 178 | lbryumDir := os.Getenv("LBRYUM_DIR") 179 | if lbryumDir == "" { 180 | if util.IsRegTest() { 181 | lbryumDir = os.Getenv("HOME") + "/.lbryum_regtest" 182 | } else { 183 | lbryumDir = os.Getenv("HOME") + "/.lbryum" 184 | } 185 | } 186 | defaultDB = lbryumDir + "/lbc_mainnet/blockchain.db" 187 | tempDB = lbryumDir + "/lbc_mainnet/tmp_blockchain.tar" 188 | key = aws.String("/blockchain_dbs/" + s.DbChannelData.ChannelId + ".tar") 189 | if util.IsRegTest() { 190 | defaultDB = lbryumDir + "/lbc_regtest/blockchain.db" 191 | tempDB = lbryumDir + "/lbc_regtest/tmp_blockchain.tar" 192 | key = aws.String("/regtest_dbs/" + s.DbChannelData.ChannelId + ".tar") 193 | } 194 | return 195 | } 196 | 197 | func (s *Sync) uploadWallet() error { 198 | defaultWalletDir := util.GetDefaultWalletPath() 199 | key := aws.String("/wallets/" + s.DbChannelData.ChannelId) 200 | if util.IsRegTest() { 201 | key = aws.String("/regtest/" + s.DbChannelData.ChannelId) 202 | } 203 | 204 | if _, err := os.Stat(defaultWalletDir); os.IsNotExist(err) { 205 | return errors.Err("default_wallet does not exist") 206 | } 207 | 208 | uploader, err := s.getS3Uploader(configs.Configuration.WalletS3Config.GetS3AWSConfig()) 209 | if err != nil { 210 | return err 211 | } 212 | 213 | file, err := os.Open(defaultWalletDir) 214 | if err != nil { 215 | return err 216 | } 217 | defer file.Close() 218 | 219 | start := time.Now() 220 | 221 | for time.Since(start) < 30*time.Minute { 222 | _, err = uploader.Upload(&s3manager.UploadInput{ 223 | Bucket: aws.String(configs.Configuration.WalletS3Config.Bucket), 224 | Key: key, 225 | Body: file, 226 | }) 227 | if err != nil { 228 | time.Sleep(30 * time.Second) 229 | continue 230 | } 231 | break 232 | } 233 | if err != nil { 234 | return errors.Prefix("there was a problem uploading the wallet to S3", errors.Err(err)) 235 | } 236 | log.Println("wallet uploaded to S3") 237 | 238 | return os.Remove(defaultWalletDir) 239 | } 240 | 241 | func (s *Sync) uploadBlockchainDB() error { 242 | defaultBDBDir, _, key, err := s.getBlockchainDBPaths() 243 | if err != nil { 244 | return errors.Err(err) 245 | } 246 | 247 | if _, err := os.Stat(defaultBDBDir); os.IsNotExist(err) { 248 | return errors.Err("blockchain.db does not exist") 249 | } 250 | files, err := filepath.Glob(defaultBDBDir + "*") 251 | if err != nil { 252 | return errors.Err(err) 253 | } 254 | tarPath := strings.Replace(defaultBDBDir, "blockchain.db", "", -1) + s.DbChannelData.ChannelId + ".tar" 255 | err = util.CreateTarball(tarPath, files) 256 | if err != nil { 257 | return err 258 | } 259 | 260 | uploader, err := s.getS3Uploader(configs.Configuration.BlockchaindbS3Config.GetS3AWSConfig()) 261 | if err != nil { 262 | return err 263 | } 264 | 265 | file, err := os.Open(tarPath) 266 | if err != nil { 267 | return err 268 | } 269 | defer file.Close() 270 | 271 | _, err = uploader.Upload(&s3manager.UploadInput{ 272 | Bucket: aws.String(configs.Configuration.BlockchaindbS3Config.Bucket), 273 | Key: key, 274 | Body: file, 275 | }) 276 | if err != nil { 277 | return err 278 | } 279 | log.Println("blockchain.db files uploaded to S3") 280 | err = os.Remove(tarPath) 281 | if err != nil { 282 | return errors.Err(err) 283 | } 284 | return os.Remove(defaultBDBDir) 285 | } 286 | -------------------------------------------------------------------------------- /downloader/downloader.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "math" 8 | "net" 9 | "net/http" 10 | "net/url" 11 | "os" 12 | "os/exec" 13 | "path" 14 | "strings" 15 | "time" 16 | 17 | "github.com/davecgh/go-spew/spew" 18 | "github.com/lbryio/ytsync/v5/downloader/ytdl" 19 | "github.com/lbryio/ytsync/v5/ip_manager" 20 | "github.com/lbryio/ytsync/v5/sdk" 21 | "github.com/lbryio/ytsync/v5/shared" 22 | util2 "github.com/lbryio/ytsync/v5/util" 23 | 24 | "github.com/lbryio/lbry.go/v2/extras/errors" 25 | "github.com/lbryio/lbry.go/v2/extras/stop" 26 | "github.com/lbryio/lbry.go/v2/extras/util" 27 | 28 | "github.com/sirupsen/logrus" 29 | ) 30 | 31 | func GetPlaylistVideoIDs(channelName string, maxVideos int, stopChan stop.Chan, pool *ip_manager.IPPool) ([]string, error) { 32 | args := []string{"--skip-download", "https://www.youtube.com/channel/" + channelName + "/videos", "--get-id", "--flat-playlist", "--cookies", "cookies.txt", "--playlist-end", fmt.Sprintf("%d", maxVideos)} 33 | ids, err := run(channelName, args, stopChan, pool) 34 | if err != nil { 35 | return nil, errors.Err(err) 36 | } 37 | videoIDs := make([]string, 0, maxVideos) 38 | for i, v := range ids { 39 | if v == "" { 40 | continue 41 | } 42 | if i >= maxVideos { 43 | break 44 | } 45 | videoIDs = append(videoIDs, v) 46 | } 47 | return videoIDs, nil 48 | } 49 | 50 | const releaseTimeFormat = "2006-01-02, 15:04:05 (MST)" 51 | 52 | func GetVideoInformation(videoID string, stopChan stop.Chan, pool *ip_manager.IPPool) (*ytdl.YtdlVideo, error) { 53 | args := []string{ 54 | "--skip-download", 55 | "--write-info-json", 56 | fmt.Sprintf("https://www.youtube.com/watch?v=%s", videoID), 57 | "--cookies", 58 | "cookies.txt", 59 | "-o", 60 | path.Join(util2.GetVideoMetadataDir(), videoID), 61 | } 62 | _, err := run(videoID, args, stopChan, pool) 63 | if err != nil { 64 | return nil, errors.Err(err) 65 | } 66 | 67 | f, err := os.Open(path.Join(util2.GetVideoMetadataDir(), videoID+".info.json")) 68 | if err != nil { 69 | return nil, errors.Err(err) 70 | } 71 | // defer the closing of our jsonFile so that we can parse it later on 72 | defer f.Close() 73 | // read our opened jsonFile as a byte array. 74 | byteValue, _ := ioutil.ReadAll(f) 75 | 76 | var video *ytdl.YtdlVideo 77 | err = json.Unmarshal(byteValue, &video) 78 | if err != nil { 79 | return nil, errors.Err(err) 80 | } 81 | 82 | return video, nil 83 | } 84 | 85 | var errNotScraped = errors.Base("not yet scraped by caa.iti.gr") 86 | var errUploadTimeEmpty = errors.Base("upload time is empty") 87 | var errStatusParse = errors.Base("could not parse status, got number, need string") 88 | var errConnectionIssue = errors.Base("there was a connection issue with the api") 89 | 90 | func slack(format string, a ...interface{}) { 91 | fmt.Printf(format+"\n", a...) 92 | util.SendToSlack(format, a...) 93 | } 94 | 95 | func triggerScrape(videoID string, ip *net.TCPAddr) error { 96 | //slack("Triggering scrape for %s", videoID) 97 | u, err := url.Parse("https://caa.iti.gr/verify_videoV3") 98 | q := u.Query() 99 | q.Set("twtimeline", "0") 100 | q.Set("url", "https://www.youtube.com/watch?v="+videoID) 101 | u.RawQuery = q.Encode() 102 | //slack("GET %s", u.String()) 103 | 104 | client := getClient(ip) 105 | req, err := http.NewRequest(http.MethodGet, u.String(), nil) 106 | if err != nil { 107 | return errors.Err(err) 108 | } 109 | req.Header.Set("User-Agent", ChromeUA) 110 | 111 | res, err := client.Do(req) 112 | if err != nil { 113 | return errors.Err(err) 114 | } 115 | defer res.Body.Close() 116 | 117 | var response struct { 118 | Message string `json:"message"` 119 | Status string `json:"status"` 120 | VideoURL string `json:"video_url"` 121 | } 122 | err = json.NewDecoder(res.Body).Decode(&response) 123 | if err != nil { 124 | if strings.Contains(err.Error(), "cannot unmarshal number") { 125 | return errors.Err(errStatusParse) 126 | } 127 | if strings.Contains(err.Error(), "no route to host") { 128 | return errors.Err(errConnectionIssue) 129 | } 130 | return errors.Err(err) 131 | } 132 | 133 | switch response.Status { 134 | case "removed_video": 135 | return errors.Err("video previously removed from service") 136 | case "no_video": 137 | return errors.Err("they say 'video cannot be found'. wtf?") 138 | default: 139 | spew.Dump(response) 140 | } 141 | 142 | return nil 143 | //https://caa.iti.gr/caa/api/v4/videos/reports/h-tuxHS5lSM 144 | } 145 | 146 | func getUploadTime(config *sdk.APIConfig, videoID string, ip *net.TCPAddr, uploadDate string) (string, error) { 147 | //slack("Getting upload time for %s", videoID) 148 | release, err := config.GetReleasedDate(videoID) 149 | if err != nil { 150 | logrus.Error(err) 151 | } 152 | ytdlUploadDate, err := time.Parse("20060102", uploadDate) 153 | if err != nil { 154 | logrus.Error(err) 155 | } 156 | if release != nil { 157 | //const sqlTimeFormat = "2006-01-02 15:04:05" 158 | sqlTime, err := time.ParseInLocation(time.RFC3339, release.ReleaseTime, time.UTC) 159 | if err == nil { 160 | hoursDiff := math.Abs(sqlTime.Sub(ytdlUploadDate).Hours()) 161 | if hoursDiff > 48 { 162 | logrus.Infof("upload day from APIs differs from the ytdl one by more than 2 days.") 163 | } else { 164 | return sqlTime.Format(releaseTimeFormat), nil 165 | } 166 | } else { 167 | logrus.Error(err) 168 | } 169 | } 170 | 171 | return ytdlUploadDate.Format(releaseTimeFormat), nil 172 | } 173 | 174 | func getClient(ip *net.TCPAddr) *http.Client { 175 | if ip == nil { 176 | return http.DefaultClient 177 | } 178 | 179 | return &http.Client{ 180 | Transport: &http.Transport{ 181 | Proxy: http.ProxyFromEnvironment, 182 | DialContext: (&net.Dialer{ 183 | LocalAddr: ip, 184 | Timeout: 30 * time.Second, 185 | KeepAlive: 30 * time.Second, 186 | }).DialContext, 187 | MaxIdleConns: 100, 188 | IdleConnTimeout: 90 * time.Second, 189 | TLSHandshakeTimeout: 10 * time.Second, 190 | ExpectContinueTimeout: 1 * time.Second, 191 | }, 192 | } 193 | } 194 | 195 | const ( 196 | GoogleBotUA = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" 197 | ChromeUA = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36" 198 | maxAttempts = 3 199 | extractionError = "YouTube said: Unable to extract video data" 200 | throttledError = "HTTP Error 429" 201 | AlternateThrottledError = "returned non-zero exit status 8" 202 | youtubeDlError = "exit status 1" 203 | videoPremiereError = "Premieres in" 204 | liveEventError = "This live event will begin in" 205 | ) 206 | 207 | func run(use string, args []string, stopChan stop.Chan, pool *ip_manager.IPPool) ([]string, error) { 208 | var useragent []string 209 | var lastError error 210 | for attempts := 0; attempts < maxAttempts; attempts++ { 211 | sourceAddress, err := getIPFromPool(use, stopChan, pool) 212 | if err != nil { 213 | return nil, err 214 | } 215 | argsForCommand := append(args, "--source-address", sourceAddress) 216 | argsForCommand = append(argsForCommand, useragent...) 217 | binary := "yt-dlp" 218 | cmd := exec.Command(binary, argsForCommand...) 219 | 220 | res, err := runCmd(cmd, stopChan) 221 | pool.ReleaseIP(sourceAddress) 222 | if err == nil { 223 | return res, nil 224 | } 225 | lastError = err 226 | if strings.Contains(err.Error(), youtubeDlError) { 227 | if util.SubstringInSlice(err.Error(), shared.ErrorsNoRetry) { 228 | break 229 | } 230 | if strings.Contains(err.Error(), extractionError) { 231 | logrus.Warnf("known extraction error: %s", errors.FullTrace(err)) 232 | useragent = nextUA(useragent) 233 | } 234 | if strings.Contains(err.Error(), throttledError) || strings.Contains(err.Error(), AlternateThrottledError) { 235 | pool.SetThrottled(sourceAddress) 236 | //we don't want throttle errors to count toward the max retries 237 | attempts-- 238 | } 239 | } 240 | } 241 | return nil, lastError 242 | } 243 | 244 | func nextUA(current []string) []string { 245 | if len(current) == 0 { 246 | return []string{"--user-agent", GoogleBotUA} 247 | } 248 | return []string{"--user-agent", ChromeUA} 249 | } 250 | 251 | func runCmd(cmd *exec.Cmd, stopChan stop.Chan) ([]string, error) { 252 | logrus.Infof("running yt-dlp cmd: %s", strings.Join(cmd.Args, " ")) 253 | var err error 254 | stderr, err := cmd.StderrPipe() 255 | if err != nil { 256 | return nil, errors.Err(err) 257 | } 258 | stdout, err := cmd.StdoutPipe() 259 | if err != nil { 260 | return nil, errors.Err(err) 261 | } 262 | err = cmd.Start() 263 | if err != nil { 264 | return nil, errors.Err(err) 265 | } 266 | outLog, err := ioutil.ReadAll(stdout) 267 | if err != nil { 268 | return nil, errors.Err(err) 269 | } 270 | errorLog, err := ioutil.ReadAll(stderr) 271 | if err != nil { 272 | return nil, errors.Err(err) 273 | } 274 | done := make(chan error, 1) 275 | go func() { 276 | done <- cmd.Wait() 277 | }() 278 | 279 | select { 280 | case <-stopChan: 281 | err := cmd.Process.Kill() 282 | if err != nil { 283 | return nil, errors.Prefix("failed to kill command after stopper cancellation", err) 284 | } 285 | return nil, errors.Err("interrupted by user") 286 | case err := <-done: 287 | if err != nil { 288 | //return nil, errors.Prefix("yt-dlp "+strings.Join(cmd.Args, " ")+" ["+string(errorLog)+"]", err) 289 | return nil, errors.Prefix(string(errorLog), err) 290 | } 291 | return strings.Split(strings.Replace(string(outLog), "\r\n", "\n", -1), "\n"), nil 292 | } 293 | } 294 | 295 | func getIPFromPool(use string, stopChan stop.Chan, pool *ip_manager.IPPool) (sourceAddress string, err error) { 296 | for { 297 | sourceAddress, err = pool.GetIP(use) 298 | if err != nil { 299 | if errors.Is(err, ip_manager.ErrAllThrottled) { 300 | select { 301 | case <-stopChan: 302 | return "", errors.Err("interrupted by user") 303 | 304 | default: 305 | time.Sleep(ip_manager.IPCooldownPeriod) 306 | continue 307 | } 308 | } else { 309 | return "", err 310 | } 311 | } 312 | break 313 | } 314 | return 315 | } 316 | -------------------------------------------------------------------------------- /manager/transfer.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | "time" 8 | 9 | "github.com/lbryio/lbry.go/v2/extras/errors" 10 | "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 11 | "github.com/lbryio/lbry.go/v2/extras/stop" 12 | "github.com/lbryio/lbry.go/v2/extras/util" 13 | "github.com/lbryio/ytsync/v5/shared" 14 | "github.com/lbryio/ytsync/v5/timing" 15 | 16 | log "github.com/sirupsen/logrus" 17 | ) 18 | 19 | func waitConfirmations(s *Sync) error { 20 | start := time.Now() 21 | defer func(start time.Time) { 22 | timing.TimedComponent("waitConfirmations").Add(time.Since(start)) 23 | }(start) 24 | defaultAccount, err := s.getDefaultAccount() 25 | if err != nil { 26 | return err 27 | } 28 | allConfirmed := false 29 | waitCount := 0 30 | waiting: 31 | for !allConfirmed && waitCount < 2 { 32 | utxolist, err := s.daemon.UTXOList(&defaultAccount, 1, 10000) 33 | if err != nil { 34 | return err 35 | } else if utxolist == nil { 36 | return errors.Err("no response") 37 | } 38 | 39 | for _, utxo := range utxolist.Items { 40 | if utxo.Confirmations <= 0 { 41 | err = s.waitForNewBlock() 42 | if err != nil { 43 | return err 44 | } 45 | waitCount++ 46 | continue waiting 47 | } 48 | } 49 | allConfirmed = true 50 | } 51 | return nil 52 | } 53 | 54 | type abandonResponse struct { 55 | ClaimID string 56 | Error error 57 | Amount float64 58 | } 59 | 60 | func abandonSupports(s *Sync) (float64, error) { 61 | start := time.Now() 62 | defer func(start time.Time) { 63 | timing.TimedComponent("abandonSupports").Add(time.Since(start)) 64 | }(start) 65 | totalPages := uint64(1) 66 | var allSupports []jsonrpc.Claim 67 | defaultAccount, err := s.getDefaultAccount() 68 | if err != nil { 69 | return 0, err 70 | } 71 | for page := uint64(1); page <= totalPages; page++ { 72 | supports, err := s.daemon.SupportList(&defaultAccount, page, 50) 73 | if err != nil { 74 | supports, err = s.daemon.SupportList(&defaultAccount, page, 50) 75 | if err != nil { 76 | return 0, errors.Prefix("cannot list supports", err) 77 | } 78 | } 79 | allSupports = append(allSupports, (*supports).Items...) 80 | totalPages = (*supports).TotalPages 81 | } 82 | producerWG := &stop.Group{} 83 | 84 | claimIDChan := make(chan string, len(allSupports)) 85 | abandonRspChan := make(chan abandonResponse, len(allSupports)) 86 | alreadyAbandoned := make(map[string]bool, len(allSupports)) 87 | producerWG.Add(1) 88 | go func() { 89 | defer producerWG.Done() 90 | for _, support := range allSupports { 91 | _, ok := alreadyAbandoned[support.ClaimID] 92 | if ok { 93 | continue 94 | } 95 | alreadyAbandoned[support.ClaimID] = true 96 | claimIDChan <- support.ClaimID 97 | } 98 | }() 99 | consumerWG := &stop.Group{} 100 | //TODO: remove this once the SDK team fixes their RPC bugs.... 101 | s.daemon.SetRPCTimeout(60 * time.Second) 102 | defer s.daemon.SetRPCTimeout(5 * time.Minute) 103 | for i := 0; i < s.Manager.CliFlags.ConcurrentJobs; i++ { 104 | consumerWG.Add(1) 105 | go func() { 106 | defer consumerWG.Done() 107 | outer: 108 | for { 109 | claimID, more := <-claimIDChan 110 | if !more { 111 | return 112 | } else { 113 | summary, err := s.daemon.TxoSpend(util.PtrToString("support"), &claimID, nil, nil, nil, &defaultAccount) 114 | if err != nil { 115 | if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") { 116 | log.Errorf("Support abandon for %s timed out, retrying...", claimID) 117 | summary, err = s.daemon.TxoSpend(util.PtrToString("support"), &claimID, nil, nil, nil, &defaultAccount) 118 | if err != nil { 119 | //TODO GUESS HOW MUCH LBC WAS RELEASED THAT WE DON'T KNOW ABOUT, because screw you SDK 120 | abandonRspChan <- abandonResponse{ 121 | ClaimID: claimID, 122 | Error: err, 123 | Amount: 0, // this is likely wrong, but oh well... there is literally nothing I can do about it 124 | } 125 | continue 126 | } 127 | } else { 128 | abandonRspChan <- abandonResponse{ 129 | ClaimID: claimID, 130 | Error: err, 131 | Amount: 0, 132 | } 133 | continue 134 | } 135 | } 136 | if summary == nil || len(*summary) < 1 { 137 | abandonRspChan <- abandonResponse{ 138 | ClaimID: claimID, 139 | Error: errors.Err("error abandoning supports: no outputs while abandoning %s", claimID), 140 | Amount: 0, 141 | } 142 | continue 143 | } 144 | var outputAmount float64 145 | for _, tx := range *summary { 146 | amount, err := strconv.ParseFloat(tx.Outputs[0].Amount, 64) 147 | if err != nil { 148 | abandonRspChan <- abandonResponse{ 149 | ClaimID: claimID, 150 | Error: errors.Err(err), 151 | Amount: 0, 152 | } 153 | continue outer 154 | } 155 | outputAmount += amount 156 | } 157 | if err != nil { 158 | abandonRspChan <- abandonResponse{ 159 | ClaimID: claimID, 160 | Error: errors.Err(err), 161 | Amount: 0, 162 | } 163 | continue 164 | } 165 | log.Infof("Abandoned supports of %.4f LBC for claim %s", outputAmount, claimID) 166 | abandonRspChan <- abandonResponse{ 167 | ClaimID: claimID, 168 | Error: nil, 169 | Amount: outputAmount, 170 | } 171 | continue 172 | } 173 | } 174 | }() 175 | } 176 | producerWG.Wait() 177 | close(claimIDChan) 178 | consumerWG.Wait() 179 | close(abandonRspChan) 180 | 181 | totalAbandoned := 0.0 182 | for r := range abandonRspChan { 183 | if r.Error != nil { 184 | log.Errorf("Failed abandoning supports for %s: %s", r.ClaimID, r.Error.Error()) 185 | continue 186 | } 187 | totalAbandoned += r.Amount 188 | } 189 | return totalAbandoned, nil 190 | } 191 | 192 | type updateInfo struct { 193 | ClaimID string 194 | streamUpdateOptions *jsonrpc.StreamUpdateOptions 195 | videoStatus *shared.VideoStatus 196 | } 197 | 198 | func transferVideos(s *Sync) error { 199 | start := time.Now() 200 | defer func(start time.Time) { 201 | timing.TimedComponent("transferVideos").Add(time.Since(start)) 202 | }(start) 203 | cleanTransfer := true 204 | 205 | streamChan := make(chan updateInfo, s.Manager.CliFlags.ConcurrentJobs) 206 | account, err := s.getDefaultAccount() 207 | if err != nil { 208 | return err 209 | } 210 | streams, err := s.daemon.StreamList(&account, 1, 30000) 211 | if err != nil { 212 | return errors.Err(err) 213 | } 214 | producerWG := &stop.Group{} 215 | producerWG.Add(1) 216 | go func() { 217 | defer producerWG.Done() 218 | for _, video := range s.syncedVideos { 219 | if !video.Published || video.Transferred || video.MetadataVersion != shared.LatestMetadataVersion { 220 | continue 221 | } 222 | 223 | var stream *jsonrpc.Claim = nil 224 | for _, c := range streams.Items { 225 | if c.ClaimID != video.ClaimID || (c.SigningChannel != nil && c.SigningChannel.ClaimID != s.DbChannelData.ChannelClaimID) { 226 | continue 227 | } 228 | stream = &c 229 | break 230 | } 231 | if stream == nil { 232 | return 233 | } 234 | 235 | streamUpdateOptions := jsonrpc.StreamUpdateOptions{ 236 | StreamCreateOptions: &jsonrpc.StreamCreateOptions{ 237 | ClaimCreateOptions: jsonrpc.ClaimCreateOptions{ 238 | ClaimAddress: &s.DbChannelData.PublishAddress.Address, 239 | FundingAccountIDs: []string{ 240 | account, 241 | }, 242 | }, 243 | }, 244 | Bid: util.PtrToString(fmt.Sprintf("%.5f", publishAmount/2.)), 245 | } 246 | videoStatus := shared.VideoStatus{ 247 | ChannelID: s.DbChannelData.ChannelId, 248 | VideoID: video.VideoID, 249 | ClaimID: video.ClaimID, 250 | ClaimName: video.ClaimName, 251 | Status: shared.VideoStatusPublished, 252 | IsTransferred: util.PtrToBool(true), 253 | } 254 | streamChan <- updateInfo{ 255 | ClaimID: video.ClaimID, 256 | streamUpdateOptions: &streamUpdateOptions, 257 | videoStatus: &videoStatus, 258 | } 259 | } 260 | }() 261 | 262 | consumerWG := &stop.Group{} 263 | for i := 0; i < s.Manager.CliFlags.ConcurrentJobs; i++ { 264 | consumerWG.Add(1) 265 | go func(worker int) { 266 | defer consumerWG.Done() 267 | for { 268 | ui, more := <-streamChan 269 | if !more { 270 | return 271 | } else { 272 | err := s.streamUpdate(&ui) 273 | if err != nil { 274 | cleanTransfer = false 275 | } 276 | } 277 | } 278 | }(i) 279 | } 280 | producerWG.Wait() 281 | close(streamChan) 282 | consumerWG.Wait() 283 | 284 | if !cleanTransfer { 285 | return errors.Err("A video has failed to transfer for the channel...skipping channel transfer") 286 | } 287 | return nil 288 | } 289 | 290 | func (s *Sync) streamUpdate(ui *updateInfo) error { 291 | start := time.Now() 292 | result, updateError := s.daemon.StreamUpdate(ui.ClaimID, *ui.streamUpdateOptions) 293 | timing.TimedComponent("transferStreamUpdate").Add(time.Since(start)) 294 | if updateError != nil { 295 | ui.videoStatus.FailureReason = updateError.Error() 296 | ui.videoStatus.Status = shared.VideoStatusTransferFailed 297 | ui.videoStatus.IsTransferred = util.PtrToBool(false) 298 | } else { 299 | ui.videoStatus.IsTransferred = util.PtrToBool(len(result.Outputs) != 0) 300 | } 301 | log.Infof("TRANSFERRED %t", *ui.videoStatus.IsTransferred) 302 | statusErr := s.Manager.ApiConfig.MarkVideoStatus(*ui.videoStatus) 303 | if statusErr != nil { 304 | return errors.Prefix(statusErr.Error(), updateError) 305 | } 306 | return errors.Err(updateError) 307 | } 308 | 309 | func transferChannel(s *Sync) error { 310 | start := time.Now() 311 | defer func(start time.Time) { 312 | timing.TimedComponent("transferChannel").Add(time.Since(start)) 313 | }(start) 314 | account, err := s.getDefaultAccount() 315 | if err != nil { 316 | return err 317 | } 318 | channelClaims, err := s.daemon.ChannelList(&account, 1, 50, nil) 319 | if err != nil { 320 | return errors.Err(err) 321 | } 322 | var channelClaim *jsonrpc.Transaction = nil 323 | for _, c := range channelClaims.Items { 324 | if c.ClaimID != s.DbChannelData.ChannelClaimID { 325 | continue 326 | } 327 | channelClaim = &c 328 | break 329 | } 330 | if channelClaim == nil { 331 | return nil 332 | } 333 | 334 | updateOptions := jsonrpc.ChannelUpdateOptions{ 335 | Bid: util.PtrToString(fmt.Sprintf("%.6f", channelClaimAmount-0.005)), 336 | ChannelCreateOptions: jsonrpc.ChannelCreateOptions{ 337 | ClaimCreateOptions: jsonrpc.ClaimCreateOptions{ 338 | ClaimAddress: &s.DbChannelData.PublishAddress.Address, 339 | }, 340 | }, 341 | } 342 | result, err := s.daemon.ChannelUpdate(s.DbChannelData.ChannelClaimID, updateOptions) 343 | if err != nil { 344 | return errors.Err(err) 345 | } 346 | log.Infof("TRANSFERRED %t", len(result.Outputs) != 0) 347 | 348 | return nil 349 | } 350 | -------------------------------------------------------------------------------- /util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/exec" 7 | "os/user" 8 | "path/filepath" 9 | "strconv" 10 | "time" 11 | 12 | "github.com/lbryio/lbry.go/v2/extras/errors" 13 | "github.com/lbryio/lbry.go/v2/lbrycrd" 14 | "github.com/lbryio/ytsync/v5/configs" 15 | "github.com/lbryio/ytsync/v5/timing" 16 | 17 | "github.com/docker/docker/api/types" 18 | "github.com/docker/docker/api/types/filters" 19 | "github.com/docker/docker/client" 20 | "github.com/mitchellh/go-ps" 21 | log "github.com/sirupsen/logrus" 22 | ) 23 | 24 | func GetBlobsDir() string { 25 | blobsDir := os.Getenv("BLOBS_DIRECTORY") 26 | if blobsDir == "" { 27 | usr, err := user.Current() 28 | if err != nil { 29 | log.Error(err.Error()) 30 | return "" 31 | } 32 | blobsDir = usr.HomeDir + "/.lbrynet/blobfiles/" 33 | } 34 | 35 | return blobsDir 36 | } 37 | 38 | func IsBlobReflectionOff() bool { 39 | return os.Getenv("REFLECT_BLOBS") == "false" 40 | } 41 | 42 | func GetLBRYNetDir() string { 43 | lbrynetDir := os.Getenv("LBRYNET_DIR") 44 | if lbrynetDir == "" { 45 | usr, err := user.Current() 46 | if err != nil { 47 | log.Errorln(err.Error()) 48 | return "" 49 | } 50 | return usr.HomeDir + "/.lbrynet/" 51 | } 52 | return lbrynetDir 53 | } 54 | 55 | func GetLbryumDir() string { 56 | lbryumDir := os.Getenv("LBRYUM_DIR") 57 | if lbryumDir == "" { 58 | usr, err := user.Current() 59 | if err != nil { 60 | log.Errorln(err.Error()) 61 | return "" 62 | } 63 | return usr.HomeDir + "/.lbryum/" 64 | } 65 | return lbryumDir + "/" 66 | } 67 | 68 | const ALL = true 69 | const ONLINE = false 70 | 71 | func GetLBRYNetContainer(all bool) (*types.Container, error) { 72 | return getDockerContainer("lbrynet", all) 73 | } 74 | 75 | func getDockerContainer(name string, all bool) (*types.Container, error) { 76 | cli, err := client.NewEnvClient() 77 | if err != nil { 78 | panic(err) 79 | } 80 | filters := filters.NewArgs() 81 | filters.Add("name", name) 82 | containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{All: all, Filters: filters}) 83 | if err != nil { 84 | panic(err) 85 | } 86 | if len(containers) == 0 { 87 | return nil, nil 88 | } 89 | if len(containers) > 1 { 90 | return nil, errors.Err("more than one %s container found", name) 91 | } 92 | 93 | return &containers[0], nil 94 | } 95 | 96 | func IsUsingDocker() bool { 97 | useDocker, err := strconv.ParseBool(os.Getenv("LBRYNET_USE_DOCKER")) 98 | if err != nil { 99 | return false 100 | } 101 | return useDocker 102 | } 103 | 104 | func IsRegTest() bool { 105 | usesRegtest, err := strconv.ParseBool(os.Getenv("REGTEST")) 106 | if err != nil { 107 | return false 108 | } 109 | return usesRegtest 110 | } 111 | 112 | func GetLbrycrdClient(lbrycrdString string) (*lbrycrd.Client, error) { 113 | chainName := os.Getenv("CHAINNAME") 114 | chainParams, ok := lbrycrd.ChainParamsMap[chainName] 115 | if !ok { 116 | chainParams = lbrycrd.MainNetParams 117 | } 118 | var lbrycrdd *lbrycrd.Client 119 | var err error 120 | if lbrycrdString == "" { 121 | lbrycrdd, err = lbrycrd.NewWithDefaultURL(&chainParams) 122 | if err != nil { 123 | return nil, err 124 | } 125 | } else { 126 | lbrycrdd, err = lbrycrd.New(lbrycrdString, &chainParams) 127 | if err != nil { 128 | return nil, err 129 | } 130 | } 131 | 132 | return lbrycrdd, nil 133 | } 134 | 135 | func ShouldCleanOnStartup() bool { 136 | shouldClean, err := strconv.ParseBool(os.Getenv("CLEAN_ON_STARTUP")) 137 | if err != nil { 138 | return false 139 | } 140 | return shouldClean 141 | } 142 | 143 | func IsLbrynetRunning() (bool, error) { 144 | if IsUsingDocker() { 145 | container, err := GetLBRYNetContainer(ONLINE) 146 | if err != nil { 147 | return false, err 148 | } 149 | return container != nil, nil 150 | } 151 | 152 | processes, err := ps.Processes() 153 | if err != nil { 154 | return true, errors.Err(err) 155 | } 156 | var daemonProcessId = -1 157 | for _, p := range processes { 158 | if p.Executable() == "lbrynet" { 159 | daemonProcessId = p.Pid() 160 | break 161 | } 162 | } 163 | 164 | running := daemonProcessId != -1 165 | return running, nil 166 | } 167 | 168 | func CleanForStartup() error { 169 | if !IsRegTest() { 170 | return errors.Err("never cleanup wallet outside of regtest and with caution. this should only be done in local testing and requires regtest to be on") 171 | } 172 | 173 | running, err := IsLbrynetRunning() 174 | if err != nil { 175 | return err 176 | } 177 | if running { 178 | err := StopDaemon() 179 | if err != nil { 180 | return err 181 | } 182 | } 183 | 184 | err = CleanupLbrynet() 185 | if err != nil { 186 | return errors.Err(err) 187 | } 188 | 189 | lbrycrd, err := GetLbrycrdClient(configs.Configuration.LbrycrdString) 190 | if err != nil { 191 | return errors.Prefix("error getting lbrycrd client", err) 192 | } 193 | height, err := lbrycrd.GetBlockCount() 194 | if err != nil { 195 | return errors.Err(err) 196 | } 197 | const minBlocksForUTXO = 200 198 | if height < minBlocksForUTXO { 199 | //Start reg test with some credits 200 | txs, err := lbrycrd.Generate(uint32(minBlocksForUTXO) - uint32(height)) 201 | if err != nil { 202 | return errors.Err(err) 203 | } 204 | log.Debugf("REGTEST: Generated %d transactions to get some LBC!", len(txs)) 205 | } 206 | 207 | defaultWalletDir := GetDefaultWalletPath() 208 | _, err = os.Stat(defaultWalletDir) 209 | if os.IsNotExist(err) { 210 | return nil 211 | } 212 | return errors.Err(os.Remove(defaultWalletDir)) 213 | } 214 | 215 | func CleanupLbrynet() error { 216 | //make sure lbrynet is off 217 | running, err := IsLbrynetRunning() 218 | if err != nil { 219 | return err 220 | } 221 | if running { 222 | return errors.Prefix("cannot cleanup lbrynet as the daemon is running", err) 223 | } 224 | lbrynetDir := GetLBRYNetDir() 225 | files, err := filepath.Glob(lbrynetDir + "lbrynet.sqlite*") 226 | if err != nil { 227 | return errors.Err(err) 228 | } 229 | for _, f := range files { 230 | err = os.Remove(f) 231 | if err != nil { 232 | return errors.Err(err) 233 | } 234 | } 235 | blobsDir := GetBlobsDir() 236 | err = os.RemoveAll(blobsDir) 237 | if err != nil { 238 | return errors.Err(err) 239 | } 240 | err = os.Mkdir(blobsDir, 0777) 241 | if err != nil { 242 | return errors.Err(err) 243 | } 244 | 245 | lbryumDir := GetLbryumDir() 246 | ledger := "lbc_mainnet" 247 | if IsRegTest() { 248 | ledger = "lbc_regtest" 249 | } 250 | lbryumDir = lbryumDir + ledger 251 | 252 | files, err = filepath.Glob(lbryumDir + "/blockchain.db*") 253 | if err != nil { 254 | return errors.Err(err) 255 | } 256 | for _, f := range files { 257 | err = os.Remove(f) 258 | if err != nil { 259 | return errors.Err(err) 260 | } 261 | } 262 | return nil 263 | } 264 | 265 | var metadataDirInitialized = false 266 | 267 | func GetVideoMetadataDir() string { 268 | dir := "./videos_metadata" 269 | if !metadataDirInitialized { 270 | metadataDirInitialized = true 271 | _ = os.MkdirAll(dir, 0755) 272 | } 273 | return dir 274 | } 275 | 276 | func CleanupMetadata() error { 277 | dir := GetVideoMetadataDir() 278 | err := os.RemoveAll(dir) 279 | if err != nil { 280 | return errors.Err(err) 281 | } 282 | metadataDirInitialized = false 283 | return nil 284 | } 285 | 286 | func SleepUntilQuotaReset() { 287 | PST, _ := time.LoadLocation("America/Los_Angeles") 288 | t := time.Now().In(PST) 289 | n := time.Date(t.Year(), t.Month(), t.Day(), 24, 2, 0, 0, PST) 290 | d := n.Sub(t) 291 | if d < 0 { 292 | n = n.Add(24 * time.Hour) 293 | d = n.Sub(t) 294 | } 295 | log.Infof("gotta sleep %s until the quota resets", d.String()) 296 | time.Sleep(d) 297 | } 298 | 299 | func StartDaemon() error { 300 | start := time.Now() 301 | defer func(start time.Time) { 302 | timing.TimedComponent("startDaemon").Add(time.Since(start)) 303 | }(start) 304 | if IsUsingDocker() { 305 | return startDaemonViaDocker() 306 | } 307 | return startDaemonViaSystemd() 308 | } 309 | 310 | func StopDaemon() error { 311 | start := time.Now() 312 | defer func(start time.Time) { 313 | timing.TimedComponent("stopDaemon").Add(time.Since(start)) 314 | }(start) 315 | if IsUsingDocker() { 316 | return stopDaemonViaDocker() 317 | } 318 | return stopDaemonViaSystemd() 319 | } 320 | 321 | func startDaemonViaDocker() error { 322 | container, err := GetLBRYNetContainer(true) 323 | if err != nil { 324 | return err 325 | } 326 | 327 | cli, err := client.NewEnvClient() 328 | if err != nil { 329 | panic(err) 330 | } 331 | 332 | err = cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}) 333 | if err != nil { 334 | return errors.Err(err) 335 | } 336 | 337 | return nil 338 | } 339 | 340 | func stopDaemonViaDocker() error { 341 | container, err := GetLBRYNetContainer(ONLINE) 342 | if err != nil { 343 | return err 344 | } 345 | 346 | cli, err := client.NewEnvClient() 347 | if err != nil { 348 | panic(err) 349 | } 350 | 351 | err = cli.ContainerStop(context.Background(), container.ID, nil) 352 | if err != nil { 353 | return errors.Err(err) 354 | } 355 | 356 | return nil 357 | } 358 | 359 | func startDaemonViaSystemd() error { 360 | err := exec.Command("/usr/bin/sudo", "/bin/systemctl", "start", "lbrynet.service").Run() 361 | if err != nil { 362 | return errors.Err(err) 363 | } 364 | return nil 365 | } 366 | 367 | func stopDaemonViaSystemd() error { 368 | err := exec.Command("/usr/bin/sudo", "/bin/systemctl", "stop", "lbrynet.service").Run() 369 | if err != nil { 370 | return errors.Err(err) 371 | } 372 | return nil 373 | } 374 | 375 | func GetDefaultWalletPath() string { 376 | defaultWalletDir := os.Getenv("HOME") + "/.lbryum/wallets/default_wallet" 377 | if IsRegTest() { 378 | defaultWalletDir = os.Getenv("HOME") + "/.lbryum_regtest/wallets/default_wallet" 379 | } 380 | 381 | walletPath := os.Getenv("LBRYUM_DIR") 382 | if walletPath != "" { 383 | defaultWalletDir = walletPath + "/wallets/default_wallet" 384 | } 385 | return defaultWalletDir 386 | } 387 | func GetBlockchainDBPath() string { 388 | lbryumDir := os.Getenv("LBRYUM_DIR") 389 | if lbryumDir == "" { 390 | if IsRegTest() { 391 | lbryumDir = os.Getenv("HOME") + "/.lbryum_regtest" 392 | } else { 393 | lbryumDir = os.Getenv("HOME") + "/.lbryum" 394 | } 395 | } 396 | defaultDB := lbryumDir + "/lbc_mainnet/blockchain.db" 397 | if IsRegTest() { 398 | defaultDB = lbryumDir + "/lbc_regtest/blockchain.db" 399 | } 400 | return defaultDB 401 | } 402 | func GetBlockchainDirectoryName() string { 403 | ledger := "lbc_mainnet" 404 | if IsRegTest() { 405 | ledger = "lbc_regtest" 406 | } 407 | return ledger 408 | } 409 | 410 | func DirSize(path string) (int64, error) { 411 | var size int64 412 | err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { 413 | if err != nil { 414 | return err 415 | } 416 | if !info.IsDir() { 417 | size += info.Size() 418 | } 419 | return err 420 | }) 421 | return size, err 422 | } 423 | -------------------------------------------------------------------------------- /ytapi/ytapi.go: -------------------------------------------------------------------------------- 1 | package ytapi 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "regexp" 10 | "sort" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | "time" 15 | 16 | "github.com/lbryio/ytsync/v5/shared" 17 | logUtils "github.com/lbryio/ytsync/v5/util" 18 | "github.com/vbauerster/mpb/v7" 19 | 20 | "github.com/lbryio/ytsync/v5/downloader/ytdl" 21 | 22 | "github.com/lbryio/ytsync/v5/downloader" 23 | "github.com/lbryio/ytsync/v5/ip_manager" 24 | "github.com/lbryio/ytsync/v5/sdk" 25 | "github.com/lbryio/ytsync/v5/sources" 26 | 27 | "github.com/lbryio/lbry.go/v2/extras/errors" 28 | "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 29 | "github.com/lbryio/lbry.go/v2/extras/stop" 30 | "github.com/lbryio/lbry.go/v2/extras/util" 31 | 32 | log "github.com/sirupsen/logrus" 33 | ) 34 | 35 | type Video interface { 36 | Size() *int64 37 | ID() string 38 | IDAndNum() string 39 | PlaylistPosition() int 40 | PublishedAt() time.Time 41 | Sync(*jsonrpc.Client, sources.SyncParams, *sdk.SyncedVideo, bool, *sync.RWMutex, *sync.WaitGroup, *mpb.Progress) (*sources.SyncSummary, error) 42 | } 43 | 44 | type byPublishedAt []Video 45 | 46 | func (a byPublishedAt) Len() int { return len(a) } 47 | func (a byPublishedAt) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 48 | func (a byPublishedAt) Less(i, j int) bool { return a[i].PublishedAt().Before(a[j].PublishedAt()) } 49 | 50 | type VideoParams struct { 51 | VideoDir string 52 | Stopper *stop.Group 53 | IPPool *ip_manager.IPPool 54 | } 55 | 56 | var mostRecentlyFailedChannel string // TODO: fix this hack! 57 | 58 | func GetVideosToSync(channelID string, syncedVideos map[string]sdk.SyncedVideo, quickSync bool, maxVideos int, videoParams VideoParams, lastUploadedVideo string) ([]Video, error) { 59 | var videos []Video 60 | if quickSync && maxVideos > 50 { 61 | maxVideos = 50 62 | } 63 | allVideos, err := downloader.GetPlaylistVideoIDs(channelID, maxVideos, videoParams.Stopper.Ch(), videoParams.IPPool) 64 | if err != nil { 65 | return nil, errors.Err(err) 66 | } 67 | videoIDs := make([]string, 0, len(allVideos)) 68 | for _, video := range allVideos { 69 | sv, ok := syncedVideos[video] 70 | if ok && util.SubstringInSlice(sv.FailureReason, shared.NeverRetryFailures) { 71 | continue 72 | } 73 | videoIDs = append(videoIDs, video) 74 | } 75 | log.Infof("Got info for %d videos from youtube downloader", len(videoIDs)) 76 | 77 | playlistMap := make(map[string]int64) 78 | for i, videoID := range videoIDs { 79 | playlistMap[videoID] = int64(i) 80 | } 81 | //this will ensure that we at least try to sync the video that was marked as last uploaded video in the database. 82 | if lastUploadedVideo != "" { 83 | _, ok := playlistMap[lastUploadedVideo] 84 | if !ok { 85 | playlistMap[lastUploadedVideo] = 0 86 | videoIDs = append(videoIDs, lastUploadedVideo) 87 | } 88 | } 89 | 90 | if len(videoIDs) < 1 { 91 | if channelID == mostRecentlyFailedChannel { 92 | return nil, errors.Err("playlist items not found") 93 | } 94 | mostRecentlyFailedChannel = channelID 95 | } 96 | 97 | vids, err := getVideos(channelID, videoIDs, videoParams.Stopper.Ch(), videoParams.IPPool) 98 | if err != nil { 99 | return nil, err 100 | } 101 | 102 | for _, item := range vids { 103 | positionInList := playlistMap[item.ID] 104 | videoToAdd, err := sources.NewYoutubeVideo(videoParams.VideoDir, item, positionInList, videoParams.Stopper, videoParams.IPPool) 105 | if err != nil { 106 | return nil, errors.Err(err) 107 | } 108 | videos = append(videos, videoToAdd) 109 | } 110 | 111 | for k, v := range syncedVideos { 112 | newMetadataVersion := int8(2) 113 | if !v.Published && v.MetadataVersion >= newMetadataVersion { 114 | continue 115 | } 116 | if _, ok := playlistMap[k]; !ok { 117 | videos = append(videos, sources.NewMockedVideo(videoParams.VideoDir, k, channelID, videoParams.Stopper, videoParams.IPPool)) 118 | } 119 | } 120 | 121 | sort.Sort(byPublishedAt(videos)) 122 | 123 | return videos, nil 124 | } 125 | 126 | // CountVideosInChannel is unused for now... keeping it here just in case 127 | func CountVideosInChannel(channelID string) (int, error) { 128 | url := "https://socialblade.com/youtube/channel/" + channelID 129 | 130 | req, _ := http.NewRequest("GET", url, nil) 131 | 132 | req.Header.Add("User-Agent", downloader.ChromeUA) 133 | req.Header.Add("Accept", "*/*") 134 | req.Header.Add("Host", "socialblade.com") 135 | 136 | res, err := http.DefaultClient.Do(req) 137 | if err != nil { 138 | return 0, errors.Err(err) 139 | } 140 | defer res.Body.Close() 141 | 142 | var line string 143 | scanner := bufio.NewScanner(res.Body) 144 | for scanner.Scan() { 145 | if strings.Contains(scanner.Text(), "youtube-stats-header-uploads") { 146 | line = scanner.Text() 147 | break 148 | } 149 | } 150 | 151 | if err := scanner.Err(); err != nil { 152 | return 0, err 153 | } 154 | if line == "" { 155 | return 0, errors.Err("upload count line not found") 156 | } 157 | 158 | matches := regexp.MustCompile(">([0-9]+)<").FindStringSubmatch(line) 159 | if len(matches) != 2 { 160 | return 0, errors.Err("upload count not found with regex") 161 | } 162 | 163 | num, err := strconv.Atoi(matches[1]) 164 | if err != nil { 165 | return 0, errors.Err(err) 166 | } 167 | 168 | return num, nil 169 | } 170 | 171 | func ChannelInfo(channelID string) (*YoutubeStatsResponse, error) { 172 | url := "https://www.youtube.com/channel/" + channelID + "/about" 173 | 174 | req, _ := http.NewRequest("GET", url, nil) 175 | 176 | req.Header.Add("User-Agent", downloader.ChromeUA) 177 | req.Header.Add("Accept", "*/*") 178 | 179 | res, err := http.DefaultClient.Do(req) 180 | if err != nil { 181 | return nil, errors.Err(err) 182 | } 183 | defer res.Body.Close() 184 | body, err := ioutil.ReadAll(res.Body) 185 | if err != nil { 186 | return nil, errors.Err(err) 187 | } 188 | pageBody := string(body) 189 | dataStartIndex := strings.Index(pageBody, "window[\"ytInitialData\"] = ") + 26 190 | if dataStartIndex == 25 { 191 | dataStartIndex = strings.Index(pageBody, "var ytInitialData = ") + 20 192 | } 193 | dataEndIndex := strings.Index(pageBody, "]}}};") + 4 194 | if dataEndIndex < dataStartIndex { 195 | return nil, errors.Err("start index is lower than end index. cannot extract channel info!") 196 | } 197 | data := pageBody[dataStartIndex:dataEndIndex] 198 | var decodedResponse YoutubeStatsResponse 199 | err = json.Unmarshal([]byte(data), &decodedResponse) 200 | if err != nil { 201 | return nil, errors.Err(err) 202 | } 203 | 204 | return &decodedResponse, nil 205 | } 206 | 207 | func getVideos(channelID string, videoIDs []string, stopChan stop.Chan, ipPool *ip_manager.IPPool) ([]*ytdl.YtdlVideo, error) { 208 | config := sdk.GetAPIsConfigs() 209 | var videos []*ytdl.YtdlVideo 210 | for _, videoID := range videoIDs { 211 | if len(videoID) < 5 { 212 | continue 213 | } 214 | select { 215 | case <-stopChan: 216 | return videos, errors.Err("interrupted by user") 217 | default: 218 | } 219 | 220 | state, err := config.VideoState(videoID) 221 | if err != nil { 222 | return nil, errors.Err(err) 223 | } 224 | if state == "published" { 225 | continue 226 | } 227 | video, err := downloader.GetVideoInformation(videoID, stopChan, ipPool) 228 | if err != nil { 229 | errSDK := config.MarkVideoStatus(shared.VideoStatus{ 230 | ChannelID: channelID, 231 | VideoID: videoID, 232 | Status: "failed", 233 | FailureReason: err.Error(), 234 | }) 235 | logUtils.SendErrorToSlack(fmt.Sprintf("Skipping video (%s): %s", videoID, errors.FullTrace(err))) 236 | if errSDK != nil { 237 | return nil, errors.Err(errSDK) 238 | } 239 | } else { 240 | videos = append(videos, video) 241 | } 242 | } 243 | return videos, nil 244 | } 245 | 246 | type YoutubeStatsResponse struct { 247 | Contents struct { 248 | TwoColumnBrowseResultsRenderer struct { 249 | Tabs []struct { 250 | TabRenderer struct { 251 | Title string `json:"title"` 252 | Selected bool `json:"selected"` 253 | Content struct { 254 | SectionListRenderer struct { 255 | Contents []struct { 256 | ItemSectionRenderer struct { 257 | Contents []struct { 258 | ChannelAboutFullMetadataRenderer struct { 259 | Description struct { 260 | SimpleText string `json:"simpleText"` 261 | } `json:"description"` 262 | ViewCountText struct { 263 | SimpleText string `json:"simpleText"` 264 | } `json:"viewCountText"` 265 | JoinedDateText struct { 266 | Runs []struct { 267 | Text string `json:"text"` 268 | } `json:"runs"` 269 | } `json:"joinedDateText"` 270 | CanonicalChannelURL string `json:"canonicalChannelUrl"` 271 | BypassBusinessEmailCaptcha bool `json:"bypassBusinessEmailCaptcha"` 272 | Title struct { 273 | SimpleText string `json:"simpleText"` 274 | } `json:"title"` 275 | Avatar struct { 276 | Thumbnails []struct { 277 | URL string `json:"url"` 278 | Width int `json:"width"` 279 | Height int `json:"height"` 280 | } `json:"thumbnails"` 281 | } `json:"avatar"` 282 | ShowDescription bool `json:"showDescription"` 283 | DescriptionLabel struct { 284 | Runs []struct { 285 | Text string `json:"text"` 286 | } `json:"runs"` 287 | } `json:"descriptionLabel"` 288 | DetailsLabel struct { 289 | Runs []struct { 290 | Text string `json:"text"` 291 | } `json:"runs"` 292 | } `json:"detailsLabel"` 293 | ChannelID string `json:"channelId"` 294 | } `json:"channelAboutFullMetadataRenderer"` 295 | } `json:"contents"` 296 | } `json:"itemSectionRenderer"` 297 | } `json:"contents"` 298 | } `json:"sectionListRenderer"` 299 | } `json:"content"` 300 | } `json:"tabRenderer"` 301 | } `json:"tabs"` 302 | } `json:"twoColumnBrowseResultsRenderer"` 303 | } `json:"contents"` 304 | Header struct { 305 | C4TabbedHeaderRenderer struct { 306 | ChannelID string `json:"channelId"` 307 | Title string `json:"title"` 308 | Avatar struct { 309 | Thumbnails []struct { 310 | URL string `json:"url"` 311 | Width int `json:"width"` 312 | Height int `json:"height"` 313 | } `json:"thumbnails"` 314 | } `json:"avatar"` 315 | Banner struct { 316 | Thumbnails []struct { 317 | URL string `json:"url"` 318 | Width int `json:"width"` 319 | Height int `json:"height"` 320 | } `json:"thumbnails"` 321 | } `json:"banner"` 322 | VisitTracking struct { 323 | RemarketingPing string `json:"remarketingPing"` 324 | } `json:"visitTracking"` 325 | SubscriberCountText struct { 326 | SimpleText string `json:"simpleText"` 327 | } `json:"subscriberCountText"` 328 | } `json:"c4TabbedHeaderRenderer"` 329 | } `json:"header"` 330 | Metadata struct { 331 | ChannelMetadataRenderer struct { 332 | Title string `json:"title"` 333 | Description string `json:"description"` 334 | RssURL string `json:"rssUrl"` 335 | ChannelConversionURL string `json:"channelConversionUrl"` 336 | ExternalID string `json:"externalId"` 337 | Keywords string `json:"keywords"` 338 | OwnerUrls []string `json:"ownerUrls"` 339 | Avatar struct { 340 | Thumbnails []struct { 341 | URL string `json:"url"` 342 | Width int `json:"width"` 343 | Height int `json:"height"` 344 | } `json:"thumbnails"` 345 | } `json:"avatar"` 346 | ChannelURL string `json:"channelUrl"` 347 | IsFamilySafe bool `json:"isFamilySafe"` 348 | VanityChannelURL string `json:"vanityChannelUrl"` 349 | } `json:"channelMetadataRenderer"` 350 | } `json:"metadata"` 351 | Topbar struct { 352 | DesktopTopbarRenderer struct { 353 | CountryCode string `json:"countryCode"` 354 | } `json:"desktopTopbarRenderer"` 355 | } `json:"topbar"` 356 | Microformat struct { 357 | MicroformatDataRenderer struct { 358 | URLCanonical string `json:"urlCanonical"` 359 | Title string `json:"title"` 360 | Description string `json:"description"` 361 | Thumbnail struct { 362 | Thumbnails []struct { 363 | URL string `json:"url"` 364 | Width int `json:"width"` 365 | Height int `json:"height"` 366 | } `json:"thumbnails"` 367 | } `json:"thumbnail"` 368 | SiteName string `json:"siteName"` 369 | AppName string `json:"appName"` 370 | AndroidPackage string `json:"androidPackage"` 371 | IosAppStoreID string `json:"iosAppStoreId"` 372 | IosAppArguments string `json:"iosAppArguments"` 373 | OgType string `json:"ogType"` 374 | URLApplinksWeb string `json:"urlApplinksWeb"` 375 | URLApplinksIos string `json:"urlApplinksIos"` 376 | URLApplinksAndroid string `json:"urlApplinksAndroid"` 377 | URLTwitterIos string `json:"urlTwitterIos"` 378 | URLTwitterAndroid string `json:"urlTwitterAndroid"` 379 | TwitterCardType string `json:"twitterCardType"` 380 | TwitterSiteHandle string `json:"twitterSiteHandle"` 381 | SchemaDotOrgType string `json:"schemaDotOrgType"` 382 | Noindex bool `json:"noindex"` 383 | Unlisted bool `json:"unlisted"` 384 | FamilySafe bool `json:"familySafe"` 385 | Tags []string `json:"tags"` 386 | } `json:"microformatDataRenderer"` 387 | } `json:"microformat"` 388 | } 389 | -------------------------------------------------------------------------------- /sdk/api.go: -------------------------------------------------------------------------------- 1 | package sdk 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "net/http" 8 | "net/url" 9 | "regexp" 10 | "strconv" 11 | "strings" 12 | "time" 13 | 14 | "github.com/lbryio/lbry.go/v2/extras/errors" 15 | "github.com/lbryio/lbry.go/v2/extras/null" 16 | "github.com/lbryio/ytsync/v5/configs" 17 | "github.com/lbryio/ytsync/v5/shared" 18 | 19 | "github.com/lbryio/ytsync/v5/util" 20 | 21 | log "github.com/sirupsen/logrus" 22 | ) 23 | 24 | const ( 25 | MaxReasonLength = 490 26 | ) 27 | 28 | type APIConfig struct { 29 | ApiURL string 30 | ApiToken string 31 | HostName string 32 | } 33 | 34 | var instance *APIConfig 35 | 36 | func GetAPIsConfigs() *APIConfig { 37 | if instance == nil { 38 | instance = &APIConfig{ 39 | ApiURL: configs.Configuration.InternalApisEndpoint, 40 | ApiToken: configs.Configuration.InternalApisAuthToken, 41 | HostName: configs.Configuration.GetHostname(), 42 | } 43 | } 44 | return instance 45 | } 46 | 47 | func (a *APIConfig) FetchChannels(status string, cliFlags *shared.SyncFlags) ([]shared.YoutubeChannel, error) { 48 | type apiJobsResponse struct { 49 | Success bool `json:"success"` 50 | Error null.String `json:"error"` 51 | Data []shared.YoutubeChannel `json:"data"` 52 | } 53 | endpoint := a.ApiURL + "/yt/jobs" 54 | res, err := http.PostForm(endpoint, url.Values{ 55 | "auth_token": {a.ApiToken}, 56 | "sync_status": {status}, 57 | "min_videos": {strconv.Itoa(1)}, 58 | "after": {strconv.Itoa(int(cliFlags.SyncFrom))}, 59 | "before": {strconv.Itoa(int(cliFlags.SyncUntil))}, 60 | "sync_server": {a.HostName}, 61 | "channel_id": {cliFlags.ChannelID}, 62 | }) 63 | if err != nil { 64 | util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error()) 65 | time.Sleep(30 * time.Second) 66 | return a.FetchChannels(status, cliFlags) 67 | } 68 | defer res.Body.Close() 69 | body, _ := ioutil.ReadAll(res.Body) 70 | if res.StatusCode != http.StatusOK { 71 | util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint) 72 | log.Debugln(string(body)) 73 | time.Sleep(30 * time.Second) 74 | return a.FetchChannels(status, cliFlags) 75 | } 76 | var response apiJobsResponse 77 | err = json.Unmarshal(body, &response) 78 | if err != nil { 79 | return nil, errors.Err(err) 80 | } 81 | if response.Data == nil { 82 | return nil, errors.Err(response.Error) 83 | } 84 | log.Printf("Fetched channels: %d", len(response.Data)) 85 | return response.Data, nil 86 | } 87 | 88 | type SyncedVideo struct { 89 | VideoID string `json:"video_id"` 90 | Published bool `json:"published"` 91 | FailureReason string `json:"failure_reason"` 92 | ClaimName string `json:"claim_name"` 93 | ClaimID string `json:"claim_id"` 94 | Size int64 `json:"size"` 95 | MetadataVersion int8 `json:"metadata_version"` 96 | Transferred bool `json:"transferred"` 97 | IsLbryFirst bool `json:"is_lbry_first"` 98 | } 99 | 100 | func sanitizeFailureReason(s *string) { 101 | re := regexp.MustCompile("[[:^ascii:]]") 102 | *s = strings.Replace(re.ReplaceAllLiteralString(*s, ""), "\n", " ", -1) 103 | 104 | if len(*s) > MaxReasonLength { 105 | *s = (*s)[:MaxReasonLength] 106 | } 107 | } 108 | 109 | func (a *APIConfig) SetChannelCert(certHex string, channelID string) error { 110 | type apiSetChannelCertResponse struct { 111 | Success bool `json:"success"` 112 | Error null.String `json:"error"` 113 | Data string `json:"data"` 114 | } 115 | 116 | endpoint := a.ApiURL + "/yt/channel_cert" 117 | 118 | res, err := http.PostForm(endpoint, url.Values{ 119 | "channel_claim_id": {channelID}, 120 | "channel_cert": {certHex}, 121 | "auth_token": {a.ApiToken}, 122 | }) 123 | if err != nil { 124 | util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error()) 125 | time.Sleep(30 * time.Second) 126 | return a.SetChannelCert(certHex, channelID) 127 | } 128 | defer res.Body.Close() 129 | body, _ := ioutil.ReadAll(res.Body) 130 | if res.StatusCode != http.StatusOK { 131 | util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint) 132 | log.Debugln(string(body)) 133 | time.Sleep(30 * time.Second) 134 | return a.SetChannelCert(certHex, channelID) 135 | } 136 | var response apiSetChannelCertResponse 137 | err = json.Unmarshal(body, &response) 138 | if err != nil { 139 | return errors.Err(err) 140 | } 141 | if !response.Error.IsNull() { 142 | return errors.Err(response.Error.String) 143 | } 144 | 145 | return nil 146 | } 147 | 148 | func (a *APIConfig) SetChannelStatus(channelID string, status string, failureReason string, transferState *int) (map[string]SyncedVideo, map[string]bool, error) { 149 | type apiChannelStatusResponse struct { 150 | Success bool `json:"success"` 151 | Error null.String `json:"error"` 152 | Data []SyncedVideo `json:"data"` 153 | } 154 | endpoint := a.ApiURL + "/yt/channel_status" 155 | 156 | sanitizeFailureReason(&failureReason) 157 | params := url.Values{ 158 | "channel_id": {channelID}, 159 | "sync_server": {a.HostName}, 160 | "auth_token": {a.ApiToken}, 161 | "sync_status": {status}, 162 | "failure_reason": {failureReason}, 163 | } 164 | if transferState != nil { 165 | params.Add("transfer_state", strconv.Itoa(*transferState)) 166 | } 167 | res, err := http.PostForm(endpoint, params) 168 | if err != nil { 169 | util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error()) 170 | time.Sleep(30 * time.Second) 171 | return a.SetChannelStatus(channelID, status, failureReason, transferState) 172 | } 173 | defer res.Body.Close() 174 | body, _ := ioutil.ReadAll(res.Body) 175 | if res.StatusCode >= http.StatusInternalServerError { 176 | util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint) 177 | log.Debugln(string(body)) 178 | time.Sleep(30 * time.Second) 179 | return a.SetChannelStatus(channelID, status, failureReason, transferState) 180 | } 181 | var response apiChannelStatusResponse 182 | err = json.Unmarshal(body, &response) 183 | if err != nil { 184 | return nil, nil, errors.Err(err) 185 | } 186 | if !response.Error.IsNull() { 187 | return nil, nil, errors.Err(response.Error.String) 188 | } 189 | if response.Data != nil { 190 | svs := make(map[string]SyncedVideo) 191 | claimNames := make(map[string]bool) 192 | for _, v := range response.Data { 193 | svs[v.VideoID] = v 194 | if v.ClaimName != "" { 195 | claimNames[v.ClaimName] = v.Published 196 | } 197 | } 198 | return svs, claimNames, nil 199 | } 200 | return nil, nil, errors.Err("invalid API response. Status code: %d", res.StatusCode) 201 | } 202 | 203 | func (a *APIConfig) SetChannelClaimID(channelID string, channelClaimID string) error { 204 | type apiChannelStatusResponse struct { 205 | Success bool `json:"success"` 206 | Error null.String `json:"error"` 207 | Data string `json:"data"` 208 | } 209 | endpoint := a.ApiURL + "/yt/set_channel_claim_id" 210 | res, err := http.PostForm(endpoint, url.Values{ 211 | "channel_id": {channelID}, 212 | "auth_token": {a.ApiToken}, 213 | "channel_claim_id": {channelClaimID}, 214 | }) 215 | if err != nil { 216 | util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error()) 217 | time.Sleep(30 * time.Second) 218 | return a.SetChannelClaimID(channelID, channelClaimID) 219 | } 220 | defer res.Body.Close() 221 | body, _ := ioutil.ReadAll(res.Body) 222 | if res.StatusCode != http.StatusOK { 223 | util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint) 224 | log.Debugln(string(body)) 225 | time.Sleep(30 * time.Second) 226 | return a.SetChannelClaimID(channelID, channelClaimID) 227 | } 228 | var response apiChannelStatusResponse 229 | err = json.Unmarshal(body, &response) 230 | if err != nil { 231 | return errors.Err(err) 232 | } 233 | if !response.Error.IsNull() { 234 | return errors.Err(response.Error.String) 235 | } 236 | if response.Data != "ok" { 237 | return errors.Err("Unexpected API response") 238 | } 239 | return nil 240 | } 241 | 242 | const ( 243 | VideoStatusPublished = "published" 244 | VideoStatusUpgradeFailed = "upgradefailed" 245 | VideoStatusFailed = "failed" 246 | ) 247 | 248 | func (a *APIConfig) DeleteVideos(videos []string) error { 249 | endpoint := a.ApiURL + "/yt/video_delete" 250 | videoIDs := strings.Join(videos, ",") 251 | vals := url.Values{ 252 | "video_ids": {videoIDs}, 253 | "auth_token": {a.ApiToken}, 254 | } 255 | res, err := http.PostForm(endpoint, vals) 256 | if err != nil { 257 | util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error()) 258 | time.Sleep(30 * time.Second) 259 | return a.DeleteVideos(videos) 260 | } 261 | defer res.Body.Close() 262 | body, _ := ioutil.ReadAll(res.Body) 263 | if res.StatusCode != http.StatusOK { 264 | util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint) 265 | log.Debugln(string(body)) 266 | time.Sleep(30 * time.Second) 267 | return a.DeleteVideos(videos) 268 | } 269 | var response struct { 270 | Success bool `json:"success"` 271 | Error null.String `json:"error"` 272 | Data null.String `json:"data"` 273 | } 274 | err = json.Unmarshal(body, &response) 275 | if err != nil { 276 | return errors.Err(err) 277 | } 278 | if !response.Error.IsNull() { 279 | return errors.Err(response.Error.String) 280 | } 281 | 282 | if !response.Data.IsNull() && response.Data.String == "ok" { 283 | return nil 284 | } 285 | return errors.Err("invalid API response. Status code: %d", res.StatusCode) 286 | } 287 | 288 | func (a *APIConfig) MarkVideoStatus(status shared.VideoStatus) error { 289 | endpoint := a.ApiURL + "/yt/video_status" 290 | 291 | sanitizeFailureReason(&status.FailureReason) 292 | vals := url.Values{ 293 | "youtube_channel_id": {status.ChannelID}, 294 | "video_id": {status.VideoID}, 295 | "status": {status.Status}, 296 | "auth_token": {a.ApiToken}, 297 | } 298 | if status.Status == VideoStatusPublished || status.Status == VideoStatusUpgradeFailed { 299 | if status.ClaimID == "" || status.ClaimName == "" { 300 | return errors.Err("claimID (%s) or claimName (%s) missing", status.ClaimID, status.ClaimName) 301 | } 302 | vals.Add("published_at", strconv.FormatInt(time.Now().Unix(), 10)) 303 | vals.Add("claim_id", status.ClaimID) 304 | vals.Add("claim_name", status.ClaimName) 305 | if status.MetaDataVersion > 0 { 306 | vals.Add("metadata_version", fmt.Sprintf("%d", status.MetaDataVersion)) 307 | } 308 | if status.Size != nil { 309 | vals.Add("size", strconv.FormatInt(*status.Size, 10)) 310 | } 311 | } 312 | if status.FailureReason != "" { 313 | vals.Add("failure_reason", status.FailureReason) 314 | } 315 | if status.IsTransferred != nil { 316 | vals.Add("transferred", strconv.FormatBool(*status.IsTransferred)) 317 | } 318 | res, err := http.PostForm(endpoint, vals) 319 | if err != nil { 320 | util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error()) 321 | time.Sleep(30 * time.Second) 322 | return a.MarkVideoStatus(status) 323 | } 324 | defer res.Body.Close() 325 | body, _ := ioutil.ReadAll(res.Body) 326 | if res.StatusCode != http.StatusOK { 327 | util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint) 328 | log.Debugln(string(body)) 329 | time.Sleep(30 * time.Second) 330 | return a.MarkVideoStatus(status) 331 | } 332 | var response struct { 333 | Success bool `json:"success"` 334 | Error null.String `json:"error"` 335 | Data null.String `json:"data"` 336 | } 337 | err = json.Unmarshal(body, &response) 338 | if err != nil { 339 | return err 340 | } 341 | if !response.Error.IsNull() { 342 | return errors.Err(response.Error.String) 343 | } 344 | if !response.Data.IsNull() && response.Data.String == "ok" { 345 | return nil 346 | } 347 | return errors.Err("invalid API response. Status code: %d", res.StatusCode) 348 | } 349 | 350 | func (a *APIConfig) VideoState(videoID string) (string, error) { 351 | endpoint := a.ApiURL + "/yt/video_state" 352 | vals := url.Values{ 353 | "video_id": {videoID}, 354 | "auth_token": {a.ApiToken}, 355 | } 356 | 357 | res, err := http.PostForm(endpoint, vals) 358 | if err != nil { 359 | util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error()) 360 | time.Sleep(30 * time.Second) 361 | return a.VideoState(videoID) 362 | } 363 | defer res.Body.Close() 364 | body, _ := ioutil.ReadAll(res.Body) 365 | if res.StatusCode == http.StatusNotFound { 366 | return "not_found", nil 367 | } 368 | if res.StatusCode != http.StatusOK { 369 | util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint) 370 | log.Debugln(string(body)) 371 | time.Sleep(30 * time.Second) 372 | return a.VideoState(videoID) 373 | } 374 | var response struct { 375 | Success bool `json:"success"` 376 | Error null.String `json:"error"` 377 | Data null.String `json:"data"` 378 | } 379 | err = json.Unmarshal(body, &response) 380 | if err != nil { 381 | return "", errors.Err(err) 382 | } 383 | if !response.Error.IsNull() { 384 | return "", errors.Err(response.Error.String) 385 | } 386 | if !response.Data.IsNull() { 387 | return response.Data.String, nil 388 | } 389 | return "", errors.Err("invalid API response. Status code: %d", res.StatusCode) 390 | } 391 | 392 | type VideoRelease struct { 393 | ID uint64 `json:"id"` 394 | YoutubeDataID uint64 `json:"youtube_data_id"` 395 | VideoID string `json:"video_id"` 396 | ReleaseTime string `json:"release_time"` 397 | CreatedAt string `json:"created_at"` 398 | UpdatedAt string `json:"updated_at"` 399 | } 400 | 401 | func (a *APIConfig) GetReleasedDate(videoID string) (*VideoRelease, error) { 402 | endpoint := a.ApiURL + "/yt/released" 403 | vals := url.Values{ 404 | "video_id": {videoID}, 405 | "auth_token": {a.ApiToken}, 406 | } 407 | 408 | res, err := http.PostForm(endpoint, vals) 409 | if err != nil { 410 | util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error()) 411 | time.Sleep(30 * time.Second) 412 | return a.GetReleasedDate(videoID) 413 | } 414 | defer res.Body.Close() 415 | body, _ := ioutil.ReadAll(res.Body) 416 | if res.StatusCode == http.StatusNotFound { 417 | return nil, nil 418 | } 419 | if res.StatusCode != http.StatusOK { 420 | util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint) 421 | log.Debugln(string(body)) 422 | time.Sleep(30 * time.Second) 423 | return a.GetReleasedDate(videoID) 424 | } 425 | var response struct { 426 | Success bool `json:"success"` 427 | Error null.String `json:"error"` 428 | Data VideoRelease `json:"data"` 429 | } 430 | err = json.Unmarshal(body, &response) 431 | if err != nil { 432 | return nil, errors.Err(err) 433 | } 434 | if !response.Error.IsNull() { 435 | return nil, errors.Err(response.Error.String) 436 | } 437 | if response.Data.ReleaseTime != "" { 438 | return &response.Data, nil 439 | } 440 | return nil, errors.Err("invalid API response. Status code: %d", res.StatusCode) 441 | } 442 | -------------------------------------------------------------------------------- /manager/setup.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "strconv" 7 | "strings" 8 | "time" 9 | 10 | "github.com/lbryio/lbry.go/v2/extras/errors" 11 | "github.com/lbryio/lbry.go/v2/extras/jsonrpc" 12 | "github.com/lbryio/lbry.go/v2/extras/util" 13 | "github.com/lbryio/ytsync/v5/shared" 14 | "github.com/lbryio/ytsync/v5/timing" 15 | logUtils "github.com/lbryio/ytsync/v5/util" 16 | "github.com/lbryio/ytsync/v5/ytapi" 17 | 18 | "github.com/lbryio/ytsync/v5/tags_manager" 19 | "github.com/lbryio/ytsync/v5/thumbs" 20 | 21 | "github.com/shopspring/decimal" 22 | log "github.com/sirupsen/logrus" 23 | ) 24 | 25 | func (s *Sync) enableAddressReuse() error { 26 | accountsResponse, err := s.daemon.AccountList(1, 50) 27 | if err != nil { 28 | return errors.Err(err) 29 | } 30 | accounts := make([]jsonrpc.Account, 0, len(accountsResponse.Items)) 31 | ledger := "lbc_mainnet" 32 | if logUtils.IsRegTest() { 33 | ledger = "lbc_regtest" 34 | } 35 | for _, a := range accountsResponse.Items { 36 | if *a.Ledger == ledger { 37 | accounts = append(accounts, a) 38 | } 39 | } 40 | 41 | for _, a := range accounts { 42 | _, err = s.daemon.AccountSet(a.ID, jsonrpc.AccountSettings{ 43 | ChangeMaxUses: util.PtrToInt(1000), 44 | ReceivingMaxUses: util.PtrToInt(100), 45 | }) 46 | if err != nil { 47 | return errors.Err(err) 48 | } 49 | } 50 | return nil 51 | } 52 | func (s *Sync) walletSetup() error { 53 | start := time.Now() 54 | defer func(start time.Time) { 55 | timing.TimedComponent("walletSetup").Add(time.Since(start)) 56 | }(start) 57 | //prevent unnecessary concurrent execution and publishing while refilling/reallocating UTXOs 58 | s.walletMux.Lock() 59 | defer s.walletMux.Unlock() 60 | err := s.ensureChannelOwnership() 61 | if err != nil { 62 | return err 63 | } 64 | 65 | balanceResp, err := s.daemon.AccountBalance(nil) 66 | if err != nil { 67 | return err 68 | } else if balanceResp == nil { 69 | return errors.Err("no response") 70 | } 71 | balance, err := strconv.ParseFloat(balanceResp.Available.String(), 64) 72 | if err != nil { 73 | return errors.Err(err) 74 | } 75 | log.Debugf("Starting balance is %.4f", balance) 76 | 77 | videosOnYoutube := int(s.DbChannelData.TotalVideos) 78 | 79 | log.Debugf("Source channel has %d videos", videosOnYoutube) 80 | if videosOnYoutube == 0 { 81 | return nil 82 | } 83 | 84 | s.syncedVideosMux.RLock() 85 | publishedCount := 0 86 | notUpgradedCount := 0 87 | failedCount := 0 88 | for _, sv := range s.syncedVideos { 89 | if sv.Published { 90 | publishedCount++ 91 | if sv.MetadataVersion < 2 { 92 | notUpgradedCount++ 93 | } 94 | } else { 95 | failedCount++ 96 | } 97 | } 98 | s.syncedVideosMux.RUnlock() 99 | 100 | log.Debugf("We already allocated credits for %d published videos and %d failed videos", publishedCount, failedCount) 101 | 102 | if videosOnYoutube > s.Manager.CliFlags.VideosToSync(s.DbChannelData.TotalSubscribers) { 103 | videosOnYoutube = s.Manager.CliFlags.VideosToSync(s.DbChannelData.TotalSubscribers) 104 | } 105 | unallocatedVideos := videosOnYoutube - (publishedCount + failedCount) 106 | if unallocatedVideos < 0 { 107 | unallocatedVideos = 0 108 | } 109 | channelFee := channelClaimAmount 110 | channelAlreadyClaimed := s.DbChannelData.ChannelClaimID != "" 111 | if channelAlreadyClaimed { 112 | channelFee = 0.0 113 | } 114 | requiredBalance := float64(unallocatedVideos)*(publishAmount+estimatedMaxTxFee) + channelFee 115 | if s.Manager.CliFlags.UpgradeMetadata { 116 | requiredBalance += float64(notUpgradedCount) * estimatedMaxTxFee 117 | } 118 | 119 | refillAmount := 0.0 120 | if balance < requiredBalance || balance < minimumAccountBalance { 121 | refillAmount = math.Max(math.Max(requiredBalance-balance, minimumAccountBalance-balance), minimumRefillAmount) 122 | } 123 | 124 | if s.Manager.CliFlags.Refill > 0 { 125 | refillAmount += float64(s.Manager.CliFlags.Refill) 126 | } 127 | 128 | if refillAmount > 0 { 129 | err := s.addCredits(refillAmount) 130 | if err != nil { 131 | return errors.Err(err) 132 | } 133 | } else if balance > requiredBalance { 134 | extraLBC := balance - requiredBalance 135 | if extraLBC > 5 { 136 | sendBackAmount := extraLBC - 1 137 | logUtils.SendInfoToSlack("channel %s has %.1f credits which is %.1f more than it requires (%.1f). We should send at least %.1f that back.", s.DbChannelData.ChannelId, balance, extraLBC, requiredBalance, sendBackAmount) 138 | } 139 | } 140 | 141 | claimAddress, err := s.daemon.AddressList(nil, nil, 1, 20) 142 | if err != nil { 143 | return err 144 | } else if claimAddress == nil { 145 | return errors.Err("could not get an address") 146 | } 147 | if s.DbChannelData.PublishAddress.Address == "" || !s.shouldTransfer() { 148 | s.DbChannelData.PublishAddress.Address = string(claimAddress.Items[0].Address) 149 | s.DbChannelData.PublishAddress.IsMine = true 150 | } 151 | if s.DbChannelData.PublishAddress.Address == "" { 152 | return errors.Err("found blank claim address") 153 | } 154 | 155 | err = s.ensureEnoughUTXOs() 156 | if err != nil { 157 | return err 158 | } 159 | 160 | return nil 161 | } 162 | 163 | func (s *Sync) getDefaultAccount() (string, error) { 164 | start := time.Now() 165 | defer func(start time.Time) { 166 | timing.TimedComponent("getDefaultAccount").Add(time.Since(start)) 167 | }(start) 168 | if s.defaultAccountID == "" { 169 | accountsResponse, err := s.daemon.AccountList(1, 50) 170 | if err != nil { 171 | return "", errors.Err(err) 172 | } 173 | ledger := "lbc_mainnet" 174 | if logUtils.IsRegTest() { 175 | ledger = "lbc_regtest" 176 | } 177 | for _, a := range accountsResponse.Items { 178 | if *a.Ledger == ledger { 179 | if a.IsDefault { 180 | s.defaultAccountID = a.ID 181 | break 182 | } 183 | } 184 | } 185 | 186 | if s.defaultAccountID == "" { 187 | return "", errors.Err("No default account found") 188 | } 189 | } 190 | return s.defaultAccountID, nil 191 | } 192 | 193 | func (s *Sync) ensureEnoughUTXOs() error { 194 | start := time.Now() 195 | defer func(start time.Time) { 196 | timing.TimedComponent("ensureEnoughUTXOs").Add(time.Since(start)) 197 | }(start) 198 | defaultAccount, err := s.getDefaultAccount() 199 | if err != nil { 200 | return err 201 | } 202 | 203 | utxolist, err := s.daemon.UTXOList(&defaultAccount, 1, 10000) 204 | if err != nil { 205 | return err 206 | } else if utxolist == nil { 207 | return errors.Err("no response") 208 | } 209 | 210 | target := 40 211 | slack := int(float32(0.1) * float32(target)) 212 | count := 0 213 | confirmedCount := 0 214 | 215 | for _, utxo := range utxolist.Items { 216 | amount, _ := strconv.ParseFloat(utxo.Amount, 64) 217 | if utxo.IsMyOutput && utxo.Type == "payment" && amount > 0.001 { 218 | if utxo.Confirmations > 0 { 219 | confirmedCount++ 220 | } 221 | count++ 222 | } 223 | } 224 | log.Infof("utxo count: %d (%d confirmed)", count, confirmedCount) 225 | UTXOWaitThreshold := 16 226 | if count < target-slack { 227 | balance, err := s.daemon.AccountBalance(&defaultAccount) 228 | if err != nil { 229 | return err 230 | } else if balance == nil { 231 | return errors.Err("no response") 232 | } 233 | 234 | balanceAmount, err := strconv.ParseFloat(balance.Available.String(), 64) 235 | if err != nil { 236 | return errors.Err(err) 237 | } 238 | //this is dumb but sometimes the balance is negative and it breaks everything, so let's check again 239 | if balanceAmount < 0 { 240 | log.Infof("negative balance of %.2f found. Waiting to retry...", balanceAmount) 241 | time.Sleep(10 * time.Second) 242 | balanceAmount, err = strconv.ParseFloat(balance.Available.String(), 64) 243 | if err != nil { 244 | return errors.Err(err) 245 | } 246 | } 247 | maxUTXOs := uint64(500) 248 | desiredUTXOCount := uint64(math.Floor((balanceAmount) / 0.1)) 249 | if desiredUTXOCount > maxUTXOs { 250 | desiredUTXOCount = maxUTXOs 251 | } 252 | if desiredUTXOCount < uint64(confirmedCount) { 253 | return nil 254 | } 255 | availableBalance, _ := balance.Available.Float64() 256 | log.Infof("Splitting balance of %.3f evenly between %d UTXOs", availableBalance, desiredUTXOCount) 257 | 258 | broadcastFee := 0.1 259 | prefillTx, err := s.daemon.AccountFund(defaultAccount, defaultAccount, fmt.Sprintf("%.4f", balanceAmount-broadcastFee), desiredUTXOCount, false) 260 | if err != nil { 261 | return err 262 | } else if prefillTx == nil { 263 | return errors.Err("no response") 264 | } 265 | if confirmedCount < UTXOWaitThreshold { 266 | err = s.waitForNewBlock() 267 | if err != nil { 268 | return err 269 | } 270 | } 271 | } else if confirmedCount < UTXOWaitThreshold { 272 | log.Println("Waiting for previous txns to confirm") 273 | err := s.waitForNewBlock() 274 | if err != nil { 275 | return err 276 | } 277 | } 278 | 279 | return nil 280 | } 281 | 282 | func (s *Sync) waitForNewBlock() error { 283 | defer func(start time.Time) { timing.TimedComponent("waitForNewBlock").Add(time.Since(start)) }(time.Now()) 284 | 285 | log.Printf("regtest: %t, docker: %t", logUtils.IsRegTest(), logUtils.IsUsingDocker()) 286 | status, err := s.daemon.Status() 287 | if err != nil { 288 | return err 289 | } 290 | 291 | for status.Wallet.Blocks == 0 || status.Wallet.BlocksBehind != 0 { 292 | time.Sleep(5 * time.Second) 293 | status, err = s.daemon.Status() 294 | if err != nil { 295 | return err 296 | } 297 | } 298 | 299 | currentBlock := status.Wallet.Blocks 300 | for i := 0; status.Wallet.Blocks <= currentBlock; i++ { 301 | if i%3 == 0 { 302 | log.Printf("Waiting for new block (%d)...", currentBlock+1) 303 | } 304 | if logUtils.IsRegTest() && logUtils.IsUsingDocker() { 305 | err = s.GenerateRegtestBlock() 306 | if err != nil { 307 | return err 308 | } 309 | } 310 | time.Sleep(10 * time.Second) 311 | status, err = s.daemon.Status() 312 | if err != nil { 313 | return err 314 | } 315 | } 316 | time.Sleep(5 * time.Second) 317 | return nil 318 | } 319 | 320 | func (s *Sync) GenerateRegtestBlock() error { 321 | lbrycrd, err := logUtils.GetLbrycrdClient(s.Manager.LbrycrdDsn) 322 | if err != nil { 323 | return errors.Prefix("error getting lbrycrd client", err) 324 | } 325 | 326 | txs, err := lbrycrd.Generate(1) 327 | if err != nil { 328 | return errors.Prefix("error generating new block", err) 329 | } 330 | 331 | for _, tx := range txs { 332 | log.Info("Generated tx: ", tx.String()) 333 | } 334 | return nil 335 | } 336 | 337 | func (s *Sync) ensureChannelOwnership() error { 338 | defer func(start time.Time) { timing.TimedComponent("ensureChannelOwnership").Add(time.Since(start)) }(time.Now()) 339 | 340 | if s.DbChannelData.DesiredChannelName == "" { 341 | return errors.Err("no channel name set") 342 | } 343 | 344 | channels, err := s.daemon.ChannelList(nil, 1, 500, nil) 345 | if err != nil { 346 | return err 347 | } else if channels == nil { 348 | return errors.Err("no channel response") 349 | } 350 | 351 | var channelToUse *jsonrpc.Transaction 352 | if len((*channels).Items) > 0 { 353 | if s.DbChannelData.ChannelClaimID == "" { 354 | return errors.Err("this channel does not have a recorded claimID in the database. To prevent failures, updates are not supported until an entry is manually added in the database") 355 | } 356 | for _, c := range (*channels).Items { 357 | log.Debugf("checking listed channel %s (%s)", c.ClaimID, c.Name) 358 | if c.ClaimID != s.DbChannelData.ChannelClaimID { 359 | continue 360 | } 361 | if c.Name != s.DbChannelData.DesiredChannelName { 362 | return errors.Err("the channel in the wallet is different than the channel in the database") 363 | } 364 | channelToUse = &c 365 | break 366 | } 367 | if channelToUse == nil { 368 | return errors.Err("this wallet has channels but not a single one is ours! Expected claim_id: %s (%s)", s.DbChannelData.ChannelClaimID, s.DbChannelData.DesiredChannelName) 369 | } 370 | } else if s.DbChannelData.TransferState == shared.TransferStateComplete { 371 | return errors.Err("the channel was transferred but appears to have been abandoned!") 372 | } else if s.DbChannelData.ChannelClaimID != "" { 373 | return errors.Err("the database has a channel recorded (%s) but nothing was found in our control", s.DbChannelData.ChannelClaimID) 374 | } 375 | 376 | channelUsesOldMetadata := false 377 | if channelToUse != nil { 378 | channelUsesOldMetadata = channelToUse.Value.GetThumbnail() == nil || (len(channelToUse.Value.GetLanguages()) == 0 && s.DbChannelData.Language != "") 379 | if !channelUsesOldMetadata { 380 | return nil 381 | } 382 | } 383 | 384 | balanceResp, err := s.daemon.AccountBalance(nil) 385 | if err != nil { 386 | return err 387 | } else if balanceResp == nil { 388 | return errors.Err("no response") 389 | } 390 | balance, err := decimal.NewFromString(balanceResp.Available.String()) 391 | if err != nil { 392 | return errors.Err(err) 393 | } 394 | 395 | if balance.LessThan(decimal.NewFromFloat(channelClaimAmount)) { 396 | err = s.addCredits(channelClaimAmount + estimatedMaxTxFee*3) 397 | if err != nil { 398 | return err 399 | } 400 | } 401 | 402 | channelInfo, err := ytapi.ChannelInfo(s.DbChannelData.ChannelId) 403 | if err != nil { 404 | if strings.Contains(err.Error(), "invalid character 'e' looking for beginning of value") { 405 | logUtils.SendInfoToSlack("failed to get channel data for %s. Waiting 1 minute to retry", s.DbChannelData.ChannelId) 406 | time.Sleep(1 * time.Minute) 407 | channelInfo, err = ytapi.ChannelInfo(s.DbChannelData.ChannelId) 408 | if err != nil { 409 | return err 410 | } 411 | } else { 412 | return err 413 | } 414 | } 415 | 416 | thumbnail := channelInfo.Header.C4TabbedHeaderRenderer.Avatar.Thumbnails[len(channelInfo.Header.C4TabbedHeaderRenderer.Avatar.Thumbnails)-1].URL 417 | thumbnailURL, err := thumbs.MirrorThumbnail(thumbnail, s.DbChannelData.ChannelId) 418 | if err != nil { 419 | return err 420 | } 421 | 422 | var bannerURL *string 423 | if channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails != nil { 424 | bURL, err := thumbs.MirrorThumbnail(channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails[len(channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails)-1].URL, 425 | "banner-"+s.DbChannelData.ChannelId, 426 | ) 427 | if err != nil { 428 | return err 429 | } 430 | bannerURL = &bURL 431 | } 432 | 433 | var languages []string = nil 434 | if s.DbChannelData.Language != "" { 435 | languages = []string{s.DbChannelData.Language} 436 | } 437 | 438 | var locations []jsonrpc.Location = nil 439 | if channelInfo.Topbar.DesktopTopbarRenderer.CountryCode != "" { 440 | locations = []jsonrpc.Location{{Country: &channelInfo.Topbar.DesktopTopbarRenderer.CountryCode}} 441 | } 442 | var c *jsonrpc.TransactionSummary 443 | var recoveredChannelClaimID string 444 | claimCreateOptions := jsonrpc.ClaimCreateOptions{ 445 | Title: &channelInfo.Microformat.MicroformatDataRenderer.Title, 446 | Description: &channelInfo.Metadata.ChannelMetadataRenderer.Description, 447 | Tags: tags_manager.GetTagsForChannel(s.DbChannelData.ChannelId), 448 | Languages: languages, 449 | Locations: locations, 450 | ThumbnailURL: &thumbnailURL, 451 | } 452 | if channelUsesOldMetadata { 453 | da, err := s.getDefaultAccount() 454 | if err != nil { 455 | return err 456 | } 457 | if s.DbChannelData.TransferState <= 1 { 458 | c, err = s.daemon.ChannelUpdate(s.DbChannelData.ChannelClaimID, jsonrpc.ChannelUpdateOptions{ 459 | ClearTags: util.PtrToBool(true), 460 | ClearLocations: util.PtrToBool(true), 461 | ClearLanguages: util.PtrToBool(true), 462 | ChannelCreateOptions: jsonrpc.ChannelCreateOptions{ 463 | AccountID: &da, 464 | FundingAccountIDs: []string{ 465 | da, 466 | }, 467 | ClaimCreateOptions: claimCreateOptions, 468 | CoverURL: bannerURL, 469 | }, 470 | }) 471 | } else { 472 | logUtils.SendInfoToSlack("%s (%s) has a channel with old metadata but isn't in our control anymore. Ignoring", s.DbChannelData.DesiredChannelName, s.DbChannelData.ChannelClaimID) 473 | return nil 474 | } 475 | } else { 476 | c, err = s.daemon.ChannelCreate(s.DbChannelData.DesiredChannelName, channelClaimAmount, jsonrpc.ChannelCreateOptions{ 477 | ClaimCreateOptions: claimCreateOptions, 478 | CoverURL: bannerURL, 479 | }) 480 | if err != nil { 481 | claimId, err2 := s.getChannelClaimIDForTimedOutCreation() 482 | if err2 != nil { 483 | err = errors.Prefix(err2.Error(), err) 484 | } else { 485 | recoveredChannelClaimID = claimId 486 | } 487 | } 488 | } 489 | if err != nil { 490 | return err 491 | } 492 | if recoveredChannelClaimID != "" { 493 | s.DbChannelData.ChannelClaimID = recoveredChannelClaimID 494 | } else { 495 | s.DbChannelData.ChannelClaimID = c.Outputs[0].ClaimID 496 | } 497 | return s.Manager.ApiConfig.SetChannelClaimID(s.DbChannelData.ChannelId, s.DbChannelData.ChannelClaimID) 498 | } 499 | 500 | //getChannelClaimIDForTimedOutCreation is a raw function that returns the only channel that exists in the wallet 501 | // this is used because the SDK sucks and can't figure out when to return when creating a claim... 502 | func (s *Sync) getChannelClaimIDForTimedOutCreation() (string, error) { 503 | channels, err := s.daemon.ChannelList(nil, 1, 500, nil) 504 | if err != nil { 505 | return "", err 506 | } else if channels == nil { 507 | return "", errors.Err("no channel response") 508 | } 509 | if len((*channels).Items) != 1 { 510 | return "", errors.Err("more than one channel found when trying to recover from SDK failure in creating the channel") 511 | } 512 | desiredChannel := (*channels).Items[0] 513 | if desiredChannel.Name != s.DbChannelData.DesiredChannelName { 514 | return "", errors.Err("the channel found in the wallet has a different name than the one we expected") 515 | } 516 | 517 | return desiredChannel.ClaimID, nil 518 | } 519 | 520 | func (s *Sync) addCredits(amountToAdd float64) error { 521 | start := time.Now() 522 | defer func(start time.Time) { 523 | timing.TimedComponent("addCredits").Add(time.Since(start)) 524 | }(start) 525 | log.Printf("Adding %f credits", amountToAdd) 526 | lbrycrdd, err := logUtils.GetLbrycrdClient(s.Manager.LbrycrdDsn) 527 | if err != nil { 528 | return err 529 | } 530 | 531 | defaultAccount, err := s.getDefaultAccount() 532 | if err != nil { 533 | return err 534 | } 535 | addressResp, err := s.daemon.AddressUnused(&defaultAccount) 536 | if err != nil { 537 | return err 538 | } else if addressResp == nil { 539 | return errors.Err("no response") 540 | } 541 | address := string(*addressResp) 542 | 543 | _, err = lbrycrdd.SimpleSend(address, amountToAdd) 544 | if err != nil { 545 | return err 546 | } 547 | 548 | wait := 15 * time.Second 549 | log.Println("Waiting " + wait.String() + " for lbryum to let us know we have the new transaction") 550 | time.Sleep(wait) 551 | 552 | return nil 553 | } 554 | --------------------------------------------------------------------------------