├── .github ├── assets │ └── logo.png └── workflows │ ├── notifications.yml │ └── binary.yml ├── .gitignore ├── main.go ├── internal ├── utils │ ├── socket-windows.go │ ├── socket-unix.go │ ├── http-client.go │ ├── functions.go │ └── vars.go ├── downloaders │ ├── git-clone │ │ ├── auth.go │ │ ├── initial.go │ │ └── download.go │ ├── github-release │ │ ├── download.go │ │ ├── initial.go │ │ └── helpers.go │ ├── live-stream │ │ ├── initial.go │ │ ├── extractors.go │ │ ├── helpers.go │ │ └── download.go │ ├── youtube │ │ ├── download.go │ │ ├── helpers.go │ │ └── initial.go │ ├── s3 │ │ ├── initial.go │ │ ├── download.go │ │ └── helpers.go │ ├── youtube-music │ │ ├── initial.go │ │ ├── download.go │ │ └── metadata.go │ ├── google-drive │ │ ├── download.go │ │ ├── initial.go │ │ ├── auth.go │ │ └── helpers.go │ └── http │ │ ├── simple-downloader.go │ │ ├── multi-chunk-handlers.go │ │ ├── multi-downloader.go │ │ └── initial.go ├── output │ ├── vars.go │ └── manager.go └── scheduler │ └── scheduler.go ├── cmd ├── clean.go ├── http.go ├── s3.go ├── youtube.go ├── github-release.go ├── google-drive.go ├── youtube-music.go ├── git-clone.go ├── live-stream.go ├── root.go └── batch.go ├── LICENSE ├── go.mod └── go.sum /.github/assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanq16/danzo/HEAD/.github/assets/logo.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | danzo 2 | download.yaml 3 | .danzo-temp 4 | .danzo-token.json 5 | danzo-gdrive.json 6 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/tanq16/danzo/cmd" 4 | 5 | func main() { 6 | cmd.Execute() 7 | } 8 | -------------------------------------------------------------------------------- /internal/utils/socket-windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | 3 | package utils 4 | 5 | import ( 6 | "syscall" 7 | ) 8 | 9 | func setSocketOptions(fd uintptr) { 10 | syscall.SetsockoptInt(syscall.Handle(fd), syscall.IPPROTO_TCP, syscall.TCP_NODELAY, 1) // Disable Nagle's algorithm 11 | syscall.SetsockoptInt(syscall.Handle(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUF, DefaultBufferSize) 12 | syscall.SetsockoptInt(syscall.Handle(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUF, DefaultBufferSize) 13 | } 14 | -------------------------------------------------------------------------------- /internal/utils/socket-unix.go: -------------------------------------------------------------------------------- 1 | //go:build linux || darwin 2 | 3 | package utils 4 | 5 | import ( 6 | "syscall" 7 | ) 8 | 9 | func setSocketOptions(fd uintptr) { 10 | syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, syscall.TCP_NODELAY, 1) // Disable Nagle's algorithm 11 | // syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, syscall.TCP_WINDOW_CLAMP, 2*bufferSize) 12 | // syscall.SetsockoptString(int(fd), syscall.IPPROTO_TCP, syscall.TCP_CONGESTION, "cubic") 13 | syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUF, DefaultBufferSize) 14 | syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUF, DefaultBufferSize) 15 | } 16 | -------------------------------------------------------------------------------- /cmd/clean.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tanq16/danzo/internal/utils" 7 | ) 8 | 9 | func newCleanCmd() *cobra.Command { 10 | return &cobra.Command{ 11 | Use: "clean [path]", 12 | Short: "Clean up temporary files", 13 | Args: cobra.MaximumNArgs(1), 14 | Run: func(cmd *cobra.Command, args []string) { 15 | if len(args) == 0 { 16 | log.Debug().Str("op", "cmd/clean").Msgf("Cleaning local files in current directory") 17 | utils.CleanLocal() 18 | } else { 19 | log.Debug().Str("op", "cmd/clean").Msgf("Cleaning temp files for %s", args[0]) 20 | utils.CleanFunction(args[0]) 21 | } 22 | }, 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /cmd/http.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tanq16/danzo/internal/scheduler" 7 | "github.com/tanq16/danzo/internal/utils" 8 | ) 9 | 10 | func newHTTPCmd() *cobra.Command { 11 | var outputPath string 12 | 13 | cmd := &cobra.Command{ 14 | Use: "http [URL] [--output OUTPUT_PATH]", 15 | Short: "Download file via HTTP/HTTPS", 16 | Args: cobra.ExactArgs(1), 17 | Run: func(cmd *cobra.Command, args []string) { 18 | url := args[0] 19 | job := utils.DanzoJob{ 20 | JobType: "http", 21 | URL: url, 22 | OutputPath: outputPath, 23 | Connections: connections, 24 | ProgressType: "progress", 25 | HTTPClientConfig: globalHTTPConfig, 26 | Metadata: make(map[string]any), 27 | } 28 | jobs := []utils.DanzoJob{job} 29 | log.Debug().Str("op", "cmd/http").Msgf("Starting scheduler with %d jobs", len(jobs)) 30 | scheduler.Run(jobs, workers) 31 | }, 32 | } 33 | 34 | cmd.Flags().StringVarP(&outputPath, "output", "o", "", "Output file path") 35 | return cmd 36 | } 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Tanishq 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /cmd/s3.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tanq16/danzo/internal/scheduler" 7 | "github.com/tanq16/danzo/internal/utils" 8 | ) 9 | 10 | func newS3Cmd() *cobra.Command { 11 | var outputPath string 12 | var profile string 13 | 14 | cmd := &cobra.Command{ 15 | Use: "s3 [BUCKET/KEY or s3://BUCKET/KEY] [--output OUTPUT_PATH] [--profile PROFILE]", 16 | Short: "Download files from AWS S3", 17 | Args: cobra.ExactArgs(1), 18 | Run: func(cmd *cobra.Command, args []string) { 19 | job := utils.DanzoJob{ 20 | JobType: "s3", 21 | URL: args[0], 22 | OutputPath: outputPath, 23 | Connections: connections, 24 | ProgressType: "progress", 25 | HTTPClientConfig: globalHTTPConfig, 26 | Metadata: make(map[string]any), 27 | } 28 | job.Metadata["profile"] = profile 29 | jobs := []utils.DanzoJob{job} 30 | log.Debug().Str("op", "cmd/s3").Msgf("Starting scheduler with %d jobs", len(jobs)) 31 | scheduler.Run(jobs, workers) 32 | }, 33 | } 34 | 35 | cmd.Flags().StringVarP(&outputPath, "output", "o", "", "Output path") 36 | cmd.Flags().StringVar(&profile, "profile", "default", "AWS profile to use") 37 | return cmd 38 | } 39 | -------------------------------------------------------------------------------- /cmd/youtube.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tanq16/danzo/internal/scheduler" 7 | "github.com/tanq16/danzo/internal/utils" 8 | ) 9 | 10 | func newYouTubeCmd() *cobra.Command { 11 | var outputPath string 12 | var format string 13 | 14 | cmd := &cobra.Command{ 15 | Use: "youtube [URL] [--output OUTPUT_PATH] [--format FORMAT]", 16 | Short: "Download YouTube videos", 17 | Aliases: []string{"yt"}, 18 | Args: cobra.ExactArgs(1), 19 | Run: func(cmd *cobra.Command, args []string) { 20 | job := utils.DanzoJob{ 21 | JobType: "youtube", 22 | URL: args[0], 23 | OutputPath: outputPath, 24 | ProgressType: "stream", 25 | HTTPClientConfig: globalHTTPConfig, 26 | Metadata: make(map[string]any), 27 | } 28 | if format != "" { 29 | job.Metadata["format"] = format 30 | } 31 | jobs := []utils.DanzoJob{job} 32 | log.Debug().Str("op", "cmd/youtube").Msgf("Starting scheduler with %d jobs", len(jobs)) 33 | scheduler.Run(jobs, workers) 34 | }, 35 | } 36 | 37 | cmd.Flags().StringVarP(&outputPath, "output", "o", "", "Output file path") 38 | cmd.Flags().StringVar(&format, "format", "decent", "Video format (best, 1080p, 720p, etc.)") 39 | return cmd 40 | } 41 | -------------------------------------------------------------------------------- /cmd/github-release.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tanq16/danzo/internal/scheduler" 7 | "github.com/tanq16/danzo/internal/utils" 8 | ) 9 | 10 | func newGHReleaseCmd() *cobra.Command { 11 | var outputPath string 12 | var manual bool 13 | 14 | cmd := &cobra.Command{ 15 | Use: "github-release [USER/REPO or URL] [--output OUTPUT_PATH] [--manual]", 16 | Short: "Download a release asset for a GitHub repository", 17 | Aliases: []string{"ghrelease", "ghr"}, 18 | Args: cobra.ExactArgs(1), 19 | Run: func(cmd *cobra.Command, args []string) { 20 | job := utils.DanzoJob{ 21 | JobType: "github-release", 22 | URL: args[0], 23 | OutputPath: outputPath, 24 | Connections: connections, 25 | ProgressType: "progress", 26 | HTTPClientConfig: globalHTTPConfig, 27 | Metadata: make(map[string]any), 28 | } 29 | job.Metadata["manual"] = manual 30 | jobs := []utils.DanzoJob{job} 31 | log.Debug().Str("op", "cmd/github-release").Msgf("Starting scheduler with %d jobs", len(jobs)) 32 | scheduler.Run(jobs, workers) 33 | }, 34 | } 35 | 36 | cmd.Flags().StringVarP(&outputPath, "output", "o", "", "Output file path") 37 | cmd.Flags().BoolVar(&manual, "manual", false, "Manually select release version and asset") 38 | return cmd 39 | } 40 | -------------------------------------------------------------------------------- /cmd/google-drive.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tanq16/danzo/internal/scheduler" 7 | "github.com/tanq16/danzo/internal/utils" 8 | ) 9 | 10 | func newGDriveCmd() *cobra.Command { 11 | var outputPath string 12 | var apiKey string 13 | var credentialsFile string 14 | 15 | cmd := &cobra.Command{ 16 | Use: "google-drive [URL] [--output OUTPUT_PATH] [--api-key YOUR_KEY] [--creds creds.json]", 17 | Short: "Download files or folders from Google Drive", 18 | Aliases: []string{"gdrive", "gd", "drive"}, 19 | Args: cobra.ExactArgs(1), 20 | Run: func(cmd *cobra.Command, args []string) { 21 | job := utils.DanzoJob{ 22 | JobType: "google-drive", 23 | URL: args[0], 24 | OutputPath: outputPath, 25 | Connections: connections, 26 | ProgressType: "progress", 27 | HTTPClientConfig: globalHTTPConfig, 28 | Metadata: make(map[string]any), 29 | } 30 | if apiKey != "" { 31 | job.Metadata["apiKey"] = apiKey 32 | } 33 | if credentialsFile != "" { 34 | job.Metadata["credentialsFile"] = credentialsFile 35 | } 36 | jobs := []utils.DanzoJob{job} 37 | log.Debug().Str("op", "cmd/google-drive").Msgf("Starting scheduler with %d jobs", len(jobs)) 38 | scheduler.Run(jobs, workers) 39 | }, 40 | } 41 | 42 | cmd.Flags().StringVarP(&outputPath, "output", "o", "", "Output path") 43 | cmd.Flags().StringVar(&apiKey, "api-key", "", "Google Drive API key") 44 | cmd.Flags().StringVar(&credentialsFile, "creds", "", "OAuth credentials JSON file") 45 | return cmd 46 | } 47 | -------------------------------------------------------------------------------- /cmd/youtube-music.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tanq16/danzo/internal/scheduler" 7 | "github.com/tanq16/danzo/internal/utils" 8 | ) 9 | 10 | func newYTMusicCmd() *cobra.Command { 11 | var outputPath string 12 | var deezerID string 13 | var appleID string 14 | 15 | cmd := &cobra.Command{ 16 | Use: "youtube-music [URL] [--output OUTPUT_PATH] [--deezer DEEZER_ID] [--apple APPLE_ID]", 17 | Short: "Download YouTube music with metadata", 18 | Aliases: []string{"ytm", "yt-music"}, 19 | Args: cobra.ExactArgs(1), 20 | Run: func(cmd *cobra.Command, args []string) { 21 | job := utils.DanzoJob{ 22 | JobType: "youtube-music", 23 | URL: args[0], 24 | OutputPath: outputPath, 25 | ProgressType: "stream", 26 | HTTPClientConfig: globalHTTPConfig, 27 | Metadata: make(map[string]any), 28 | } 29 | if deezerID != "" { 30 | job.Metadata["musicClient"] = "deezer" 31 | job.Metadata["musicID"] = deezerID 32 | } else if appleID != "" { 33 | job.Metadata["musicClient"] = "apple" 34 | job.Metadata["musicID"] = appleID 35 | } 36 | jobs := []utils.DanzoJob{job} 37 | log.Debug().Str("op", "cmd/youtube-music").Msgf("Starting scheduler with %d jobs", len(jobs)) 38 | scheduler.Run(jobs, workers) 39 | }, 40 | } 41 | 42 | cmd.Flags().StringVarP(&outputPath, "output", "o", "", "Output file path") 43 | cmd.Flags().StringVar(&deezerID, "deezer", "", "Deezer track ID for metadata") 44 | cmd.Flags().StringVar(&appleID, "apple", "", "Apple Music track ID for metadata") 45 | return cmd 46 | } 47 | -------------------------------------------------------------------------------- /internal/downloaders/git-clone/auth.go: -------------------------------------------------------------------------------- 1 | package gitclone 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/go-git/go-git/v5/plumbing/transport" 9 | "github.com/go-git/go-git/v5/plumbing/transport/http" 10 | "github.com/go-git/go-git/v5/plumbing/transport/ssh" 11 | "github.com/rs/zerolog/log" 12 | ) 13 | 14 | func getAuthMethod(repoURL string, metadata map[string]any) (transport.AuthMethod, error) { 15 | tokenStr, ok := metadata["token"] 16 | token := "" 17 | if ok { 18 | token = tokenStr.(string) 19 | log.Debug().Str("op", "git-clone/auth").Msg("token found") 20 | } 21 | if token != "" { 22 | if strings.Contains(repoURL, "github.com") { 23 | return &http.BasicAuth{ 24 | Username: "oauth2", 25 | Password: token, 26 | }, nil 27 | } else if strings.Contains(repoURL, "gitlab.com") { 28 | return &http.BasicAuth{ 29 | Username: "oauth2", 30 | Password: token, 31 | }, nil 32 | } else if strings.Contains(repoURL, "bitbucket.org") { 33 | return &http.BasicAuth{ 34 | Username: "x-token-auth", 35 | Password: token, 36 | }, nil 37 | } 38 | } 39 | sshKeyPath := "" 40 | sshKeyStr, ok := metadata["sshKey"] 41 | if ok { 42 | log.Debug().Str("op", "git-clone/auth").Msg("sshKey found") 43 | sshKeyPath = sshKeyStr.(string) 44 | } 45 | if sshKeyPath != "" { 46 | publicKeys, err := ssh.NewPublicKeysFromFile("git", sshKeyPath, "") 47 | if err != nil { 48 | return nil, fmt.Errorf("couldn't load SSH key: %v", err) 49 | } 50 | return publicKeys, nil 51 | } 52 | log.Debug().Str("op", "git-clone/auth").Msg("no authentication method found") 53 | return nil, errors.New("no authentication method found") 54 | } 55 | -------------------------------------------------------------------------------- /internal/downloaders/github-release/download.go: -------------------------------------------------------------------------------- 1 | package ghrelease 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/rs/zerolog/log" 7 | danzohttp "github.com/tanq16/danzo/internal/downloaders/http" 8 | "github.com/tanq16/danzo/internal/utils" 9 | ) 10 | 11 | func (d *GitReleaseDownloader) Download(job *utils.DanzoJob) error { 12 | downloadURL := job.Metadata["downloadURL"].(string) 13 | fileSize := job.Metadata["fileSize"].(int64) 14 | client := utils.NewDanzoHTTPClient(job.HTTPClientConfig) 15 | progressCh := make(chan int64) 16 | progressDone := make(chan struct{}) 17 | log.Info().Str("op", "github-release/download").Msgf("downloading %s", downloadURL) 18 | 19 | // Progress tracking goroutine 20 | go func() { 21 | defer close(progressDone) 22 | var totalDownloaded int64 23 | startTime := time.Now() 24 | ticker := time.NewTicker(100 * time.Millisecond) 25 | defer ticker.Stop() 26 | log.Debug().Str("op", "github-release/download").Msg("progress tracking goroutine started") 27 | for { 28 | select { 29 | case bytes, ok := <-progressCh: 30 | if !ok { 31 | if job.ProgressFunc != nil { 32 | job.ProgressFunc(totalDownloaded, fileSize) 33 | } 34 | return 35 | } 36 | totalDownloaded += bytes 37 | case <-ticker.C: 38 | if job.ProgressFunc != nil { 39 | job.ProgressFunc(totalDownloaded, fileSize) 40 | } 41 | job.Metadata["elapsedTime"] = time.Since(startTime).Seconds() 42 | } 43 | } 44 | }() 45 | 46 | log.Debug().Str("op", "github-release/download").Msg("calling simple download") 47 | err := danzohttp.PerformSimpleDownload(downloadURL, job.OutputPath, client, progressCh) 48 | <-progressDone 49 | return err 50 | } 51 | -------------------------------------------------------------------------------- /cmd/git-clone.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/spf13/cobra" 6 | "github.com/tanq16/danzo/internal/scheduler" 7 | "github.com/tanq16/danzo/internal/utils" 8 | ) 9 | 10 | func newGitCloneCmd() *cobra.Command { 11 | var outputPath string 12 | var depth int 13 | var token string 14 | var sshKey string 15 | 16 | cmd := &cobra.Command{ 17 | Use: "git-clone [REPO_URL] [--output OUTPUT_PATH] [--depth DEPTH] [--token GIT_TOKEN] [--ssh SSH_KEY_PATH]", 18 | Short: "Clone a Git repository", 19 | Aliases: []string{"gitclone", "gitc", "git", "clone"}, 20 | Args: cobra.ExactArgs(1), 21 | Run: func(cmd *cobra.Command, args []string) { 22 | job := utils.DanzoJob{ 23 | JobType: "git-clone", 24 | URL: args[0], 25 | OutputPath: outputPath, 26 | ProgressType: "stream", 27 | HTTPClientConfig: globalHTTPConfig, 28 | Metadata: make(map[string]any), 29 | } 30 | if depth > 0 { 31 | job.Metadata["depth"] = depth 32 | } 33 | if token != "" { 34 | job.Metadata["token"] = token 35 | } 36 | if sshKey != "" { 37 | job.Metadata["sshKey"] = sshKey 38 | } 39 | jobs := []utils.DanzoJob{job} 40 | log.Debug().Str("op", "cmd/git-clone").Msgf("Starting scheduler with %d jobs", len(jobs)) 41 | scheduler.Run(jobs, workers) 42 | }, 43 | } 44 | 45 | cmd.Flags().StringVarP(&outputPath, "output", "o", "", "Output directory path") 46 | cmd.Flags().IntVar(&depth, "depth", 0, "Clone depth (0 for full history)") 47 | cmd.Flags().StringVar(&token, "token", "", "Git token for authentication") 48 | cmd.Flags().StringVar(&sshKey, "ssh", "", "SSH key path for authentication") 49 | return cmd 50 | } 51 | -------------------------------------------------------------------------------- /internal/downloaders/live-stream/initial.go: -------------------------------------------------------------------------------- 1 | package m3u8 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | "os" 7 | "path/filepath" 8 | "time" 9 | 10 | "github.com/rs/zerolog/log" 11 | "github.com/tanq16/danzo/internal/utils" 12 | ) 13 | 14 | type M3U8Downloader struct{} 15 | 16 | func (d *M3U8Downloader) ValidateJob(job *utils.DanzoJob) error { 17 | // Validation happens after chunklist URL extraction (if) 18 | if _, ok := job.Metadata["extract"]; !ok { 19 | parsedURL, err := url.Parse(job.URL) 20 | if err != nil { 21 | return fmt.Errorf("invalid URL: %v", err) 22 | } 23 | if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { 24 | return fmt.Errorf("unsupported scheme: %s", parsedURL.Scheme) 25 | } 26 | } 27 | log.Info().Str("op", "live-stream/initial").Msgf("job validated for %s", job.URL) 28 | return nil 29 | } 30 | 31 | func (d *M3U8Downloader) BuildJob(job *utils.DanzoJob) error { 32 | extractor, _ := job.Metadata["extract"].(string) 33 | log.Info().Str("op", "live-stream/initial").Msgf("Using extractor: %s", extractor) 34 | if err := runExtractor(job); err != nil { 35 | return fmt.Errorf("extractor failed: %v", err) 36 | } 37 | log.Info().Str("op", "live-stream/initial").Msgf("URL extracted: %s", job.URL) 38 | if job.OutputPath == "" { 39 | job.OutputPath = fmt.Sprintf("stream_%s.mp4", time.Now().Format("2006-01-02_15-04")) 40 | } 41 | if existingFile, err := os.Stat(job.OutputPath); err == nil && existingFile != nil { 42 | job.OutputPath = utils.RenewOutputPath(job.OutputPath) 43 | } 44 | tempDir := filepath.Join(filepath.Dir(job.OutputPath), ".danzo-temp", "m3u8_"+time.Now().Format("20060102150405")) 45 | job.Metadata["tempDir"] = tempDir 46 | log.Info().Str("op", "live-stream/initial").Msgf("job built for %s", job.URL) 47 | return nil 48 | } 49 | -------------------------------------------------------------------------------- /.github/workflows/notifications.yml: -------------------------------------------------------------------------------- 1 | name: Custom Notifications 2 | on: 3 | schedule: 4 | - cron: '00 17 * * 6' # 5:00 pm UTC every saturday 5 | issues: 6 | types: [opened, edited, deleted, closed] 7 | issue_comment: 8 | types: [created] 9 | workflow_run: 10 | workflows: ["Build Binary"] 11 | types: [completed] 12 | pull_request_target: 13 | types: [opened, closed, edited, review_requested] 14 | 15 | jobs: 16 | issue-comment-notification: 17 | if: github.event_name == 'issues' || github.event_name == 'issue_comment' 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Notify on Issue or Comment 21 | if: github.actor != 'Tanq16' 22 | run: | 23 | curl -H "Content-Type: application/json" -X POST \ 24 | -d "{\"content\": \"*New issue/comment from **${{ github.actor }}***\n${{ github.event.issue.html_url }}\"}" ${{ secrets.DISCORD_WEBHOOK }} 25 | 26 | build-status-notification: 27 | if: github.event_name == 'workflow_run' 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: Notify on Build Status 31 | run: | 32 | curl -H "Content-Type: application/json" -X POST \ 33 | -d "{\"content\": \"*Workflow run for **${{ github.repository }}***\n${{ github.event.workflow_run.name }} - ${{ github.event.workflow_run.conclusion }}\"}" ${{ secrets.DISCORD_WEBHOOK }} 34 | 35 | pull-request-notification: 36 | if: github.event_name == 'pull_request_target' 37 | runs-on: ubuntu-latest 38 | steps: 39 | - name: Notify on PR related activities 40 | if: github.actor != 'Tanq16' 41 | run: | 42 | curl -H "Content-Type: application/json" -X POST \ 43 | -d "{\"content\": \"*New PR activity from **${{ github.actor }}***\n${{ github.event.pull_request.html_url }}\"}" ${{ secrets.DISCORD_WEBHOOK }} 44 | -------------------------------------------------------------------------------- /cmd/live-stream.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/rs/zerolog/log" 7 | "github.com/spf13/cobra" 8 | "github.com/tanq16/danzo/internal/scheduler" 9 | "github.com/tanq16/danzo/internal/utils" 10 | ) 11 | 12 | func newM3U8Cmd() *cobra.Command { 13 | var outputPath string 14 | var extract string 15 | var videoPassword string 16 | 17 | cmd := &cobra.Command{ 18 | Use: "live-stream [URL] [--output OUTPUT_PATH] [--extract EXTRACTOR] [--password PASSWORD]", 19 | Short: "Download HLS/M3U8 live streams", 20 | Aliases: []string{"hls", "m3u8", "livestream", "stream"}, 21 | Args: cobra.ExactArgs(1), 22 | Run: func(cmd *cobra.Command, args []string) { 23 | job := utils.DanzoJob{ 24 | JobType: "live-stream", 25 | URL: args[0], 26 | OutputPath: outputPath, 27 | Connections: connections, 28 | ProgressType: "progress", 29 | HTTPClientConfig: globalHTTPConfig, 30 | Metadata: make(map[string]any), 31 | } 32 | if extract == "" { 33 | if strings.Contains(job.URL, "dailymotion.com") || strings.Contains(job.URL, "dai.ly") { 34 | extract = "dailymotion" 35 | } else if strings.Contains(job.URL, "rumble.com") { 36 | extract = "rumble" 37 | } 38 | } 39 | if extract != "" { 40 | job.Metadata["extract"] = extract 41 | } 42 | if videoPassword != "" { 43 | job.Metadata["password"] = videoPassword 44 | } 45 | jobs := []utils.DanzoJob{job} 46 | log.Debug().Str("op", "cmd/live-stream").Msgf("Starting scheduler with %d jobs", len(jobs)) 47 | scheduler.Run(jobs, workers) 48 | }, 49 | } 50 | 51 | cmd.Flags().StringVarP(&outputPath, "output", "o", "", "Output file path (default: stream_[timestamp].mp4)") 52 | cmd.Flags().StringVarP(&extract, "extract", "e", "", "Site-specific extractor to use (e.g., rumble, dailymotion)") 53 | cmd.Flags().StringVar(&videoPassword, "video-password", "", "Password for protected videos") 54 | return cmd 55 | } 56 | -------------------------------------------------------------------------------- /internal/downloaders/youtube/download.go: -------------------------------------------------------------------------------- 1 | package youtube 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "os/exec" 8 | "strings" 9 | 10 | "github.com/rs/zerolog/log" 11 | "github.com/tanq16/danzo/internal/utils" 12 | ) 13 | 14 | func (d *YouTubeDownloader) Download(job *utils.DanzoJob) error { 15 | ytdlpPath := job.Metadata["ytdlpPath"].(string) 16 | ytdlpFormat := job.Metadata["ytdlpFormat"].(string) 17 | ffmpegPath := job.Metadata["ffmpegPath"].(string) 18 | args := []string{ 19 | "--progress", 20 | "--newline", 21 | "--no-warnings", 22 | "-f", ytdlpFormat, 23 | "--ffmpeg-location", ffmpegPath, 24 | "-o", job.OutputPath, 25 | "--no-playlist", 26 | job.URL, 27 | } 28 | cmd := exec.Command(ytdlpPath, args...) 29 | log.Debug().Str("op", "youtube/download").Msgf("Executing yt-dlp command: %s", cmd.String()) 30 | 31 | stdout, err := cmd.StdoutPipe() 32 | if err != nil { 33 | log.Error().Str("op", "youtube/download").Err(err).Msg("Error creating stdout pipe") 34 | return fmt.Errorf("error creating stdout pipe: %v", err) 35 | } 36 | stderr, err := cmd.StderrPipe() 37 | if err != nil { 38 | log.Error().Str("op", "youtube/download").Err(err).Msg("Error creating stderr pipe") 39 | return fmt.Errorf("error creating stderr pipe: %v", err) 40 | } 41 | if err := cmd.Start(); err != nil { 42 | log.Error().Str("op", "youtube/download").Err(err).Msg("Error starting yt-dlp") 43 | return fmt.Errorf("error starting yt-dlp: %v", err) 44 | } 45 | 46 | go processStream(stdout, job.StreamFunc) 47 | go processStream(stderr, job.StreamFunc) 48 | if err := cmd.Wait(); err != nil { 49 | log.Error().Str("op", "youtube/download").Err(err).Msg("yt-dlp command failed") 50 | return fmt.Errorf("yt-dlp failed: %v", err) 51 | } 52 | log.Info().Str("op", "youtube/download").Msgf("yt-dlp download completed for %s", job.URL) 53 | return nil 54 | } 55 | 56 | func processStream(reader io.Reader, streamFunc func(string)) { 57 | scanner := bufio.NewScanner(reader) 58 | for scanner.Scan() { 59 | line := strings.TrimSpace(scanner.Text()) 60 | if line != "" && streamFunc != nil { 61 | streamFunc(line) 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /internal/downloaders/git-clone/initial.go: -------------------------------------------------------------------------------- 1 | package gitclone 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | 9 | "github.com/rs/zerolog/log" 10 | "github.com/tanq16/danzo/internal/utils" 11 | ) 12 | 13 | type GitCloneDownloader struct{} 14 | 15 | func (d *GitCloneDownloader) ValidateJob(job *utils.DanzoJob) error { 16 | provider, owner, repo, err := parseGitURL(job.URL) 17 | if err != nil { 18 | return err 19 | } 20 | job.Metadata["provider"] = provider 21 | job.Metadata["owner"] = owner 22 | job.Metadata["repo"] = repo 23 | log.Info().Str("op", "git-clone/initial").Msgf("job validated for %s/%s/%s", provider, owner, repo) 24 | return nil 25 | } 26 | 27 | func (d *GitCloneDownloader) BuildJob(job *utils.DanzoJob) error { 28 | provider := job.Metadata["provider"].(string) 29 | owner := job.Metadata["owner"].(string) 30 | repo := job.Metadata["repo"].(string) 31 | cloneURL := fmt.Sprintf("https://%s/%s/%s", provider, owner, repo) 32 | job.Metadata["cloneURL"] = cloneURL 33 | if job.OutputPath == "" { 34 | job.OutputPath = repo 35 | } 36 | if info, err := os.Stat(job.OutputPath); err == nil && info.IsDir() { 37 | job.OutputPath = utils.RenewOutputPath(job.OutputPath) 38 | } 39 | outputDir := filepath.Dir(job.OutputPath) 40 | if err := os.MkdirAll(outputDir, 0755); err != nil { 41 | return fmt.Errorf("error creating output directory: %v", err) 42 | } 43 | log.Info().Str("op", "git-clone/initial").Msgf("job built for %s/%s/%s", provider, owner, repo) 44 | return nil 45 | } 46 | 47 | func parseGitURL(url string) (string, string, string, error) { 48 | url = strings.TrimSpace(url) 49 | url = strings.TrimSuffix(url, ".git") 50 | url = strings.TrimSuffix(url, "/") 51 | url = strings.TrimPrefix(url, "https://") 52 | url = strings.TrimPrefix(url, "http://") 53 | parts := strings.Split(url, "/") 54 | if len(parts) < 3 { 55 | return "", "", "", fmt.Errorf("invalid git URL format, expected provider/owner/repo") 56 | } 57 | provider := parts[0] 58 | owner := parts[1] 59 | repo := parts[2] 60 | switch provider { 61 | case "github.com", "gitlab.com", "bitbucket.org": 62 | default: 63 | return "", "", "", fmt.Errorf("unsupported git provider: %s", provider) 64 | } 65 | return provider, owner, repo, nil 66 | } 67 | -------------------------------------------------------------------------------- /internal/output/vars.go: -------------------------------------------------------------------------------- 1 | package output 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/charmbracelet/lipgloss" 7 | ) 8 | 9 | var ( 10 | successStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("37")) // dark green 11 | errorStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("9")) // red 12 | warningStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("11")) // yellow 13 | pendingStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("12")) // blue 14 | infoStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("14")) // cyan 15 | debugStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("250")) // light grey 16 | detailStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("13")) // purple 17 | streamStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("240")) // grey 18 | ) 19 | 20 | var StyleSymbols = map[string]string{ 21 | "pass": "✓", 22 | "fail": "✗", 23 | "warning": "!", 24 | "pending": "◉", 25 | "info": "ℹ", 26 | "arrow": "→", 27 | "bullet": "•", 28 | "dot": "·", 29 | "hline": "━", 30 | } 31 | 32 | func PrintSuccess(text string) { 33 | fmt.Println(successStyle.Render(text)) 34 | } 35 | func PrintError(text string) { 36 | fmt.Println(errorStyle.Render(text)) 37 | } 38 | func PrintWarning(text string) { 39 | fmt.Println(warningStyle.Render(text)) 40 | } 41 | func PrintPending(text string) { 42 | fmt.Println(pendingStyle.Render(text)) 43 | } 44 | func PrintInfo(text string) { 45 | fmt.Println(infoStyle.Render(text)) 46 | } 47 | func PrintDebug(text string) { 48 | fmt.Println(debugStyle.Render(text)) 49 | } 50 | func PrintDetail(text string) { 51 | fmt.Println(detailStyle.Render(text)) 52 | } 53 | func PrintStream(text string) { 54 | fmt.Println(streamStyle.Render(text)) 55 | } 56 | func FSuccess(text string) string { 57 | return successStyle.Render(text) 58 | } 59 | func FError(text string) string { 60 | return errorStyle.Render(text) 61 | } 62 | func FWarning(text string) string { 63 | return warningStyle.Render(text) 64 | } 65 | func FPending(text string) string { 66 | return pendingStyle.Render(text) 67 | } 68 | func FInfo(text string) string { 69 | return infoStyle.Render(text) 70 | } 71 | func FDebug(text string) string { 72 | return debugStyle.Render(text) 73 | } 74 | func FDetail(text string) string { 75 | return detailStyle.Render(text) 76 | } 77 | func FStream(text string) string { 78 | return streamStyle.Render(text) 79 | } 80 | -------------------------------------------------------------------------------- /internal/downloaders/s3/initial.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/rs/zerolog/log" 8 | "github.com/tanq16/danzo/internal/utils" 9 | ) 10 | 11 | type S3Downloader struct{} 12 | 13 | func (d *S3Downloader) ValidateJob(job *utils.DanzoJob) error { 14 | bucket, key, err := parseS3URL(job.URL) 15 | if err != nil { 16 | return err 17 | } 18 | job.Metadata["bucket"] = bucket 19 | job.Metadata["key"] = key 20 | log.Info().Str("op", "s3/initial").Msgf("job validated for s3://%s/%s", bucket, key) 21 | return nil 22 | } 23 | 24 | func (d *S3Downloader) BuildJob(job *utils.DanzoJob) error { 25 | bucket := job.Metadata["bucket"].(string) 26 | key := job.Metadata["key"].(string) 27 | profile := job.Metadata["profile"].(string) 28 | s3Client, err := getS3Client(profile) 29 | if err != nil { 30 | return fmt.Errorf("error creating S3 client: %v", err) 31 | } 32 | 33 | // Check if it's a file or folder 34 | fileType, size, err := getS3ObjectInfo(bucket, key, s3Client) 35 | if err != nil { 36 | return fmt.Errorf("error getting S3 object info: %v", err) 37 | } 38 | job.Metadata["fileType"] = fileType 39 | job.Metadata["size"] = size 40 | log.Debug().Str("op", "s3/initial").Msgf("Determined object type: %s, size: %d", fileType, size) 41 | 42 | if job.OutputPath == "" { 43 | if fileType == "folder" { 44 | parts := strings.Split(strings.TrimSuffix(key, "/"), "/") 45 | job.OutputPath = parts[len(parts)-1] 46 | if job.OutputPath == "" { 47 | job.OutputPath = bucket 48 | } 49 | } else { 50 | parts := strings.Split(key, "/") 51 | job.OutputPath = parts[len(parts)-1] 52 | } 53 | } 54 | 55 | if fileType == "folder" { 56 | if exists, err := directoryExists(job.OutputPath); err == nil && exists { 57 | job.OutputPath = utils.RenewOutputPath(job.OutputPath) 58 | } 59 | } else { 60 | if exists, err := fileExists(job.OutputPath); err == nil && exists { 61 | job.OutputPath = utils.RenewOutputPath(job.OutputPath) 62 | } 63 | } 64 | log.Info().Str("op", "s3/initial").Msgf("job built for s3://%s/%s", bucket, key) 65 | return nil 66 | } 67 | 68 | func parseS3URL(url string) (string, string, error) { 69 | url = strings.TrimPrefix(url, "s3://") 70 | parts := strings.SplitN(url, "/", 2) 71 | if len(parts) < 1 || parts[0] == "" { 72 | return "", "", fmt.Errorf("invalid S3 URL format") 73 | } 74 | bucket := parts[0] 75 | key := "" 76 | if len(parts) > 1 { 77 | key = parts[1] 78 | } 79 | return bucket, key, nil 80 | } 81 | -------------------------------------------------------------------------------- /internal/downloaders/github-release/initial.go: -------------------------------------------------------------------------------- 1 | package ghrelease 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | "strings" 7 | 8 | "github.com/rs/zerolog/log" 9 | "github.com/tanq16/danzo/internal/utils" 10 | ) 11 | 12 | type GitReleaseDownloader struct{} 13 | 14 | func (d *GitReleaseDownloader) ValidateJob(job *utils.DanzoJob) error { 15 | owner, repo, err := parseGitHubURL(job.URL) 16 | if err != nil { 17 | return err 18 | } 19 | job.Metadata["owner"] = owner 20 | job.Metadata["repo"] = repo 21 | log.Info().Str("op", "github-release/initial").Msgf("job validated for %s/%s", owner, repo) 22 | return nil 23 | } 24 | 25 | func (d *GitReleaseDownloader) BuildJob(job *utils.DanzoJob) error { 26 | owner := job.Metadata["owner"].(string) 27 | repo := job.Metadata["repo"].(string) 28 | manual := job.Metadata["manual"].(bool) 29 | client := utils.NewDanzoHTTPClient(job.HTTPClientConfig) 30 | var assets []map[string]any 31 | var tagName string 32 | var err error 33 | 34 | log.Debug().Str("op", "github-release/initial").Msgf("fetching release info for %s/%s", owner, repo) 35 | assets, tagName, err = getGitHubReleaseAssets(owner, repo, client) 36 | if err != nil { 37 | return fmt.Errorf("error fetching release info: %v", err) 38 | } 39 | // Try auto-select first; not working, prompt for manual or fail 40 | log.Debug().Str("op", "github-release/initial").Msgf("auto-selecting asset for %s/%s", runtime.GOOS, runtime.GOARCH) 41 | downloadURL, size, err := selectGitHubLatestAsset(assets) 42 | if err != nil { 43 | return err 44 | } 45 | if downloadURL == "" && !manual { 46 | log.Error().Str("op", "github-release/initial").Msgf("could not automatically select asset for %s/%s, no --manual flag", runtime.GOOS, runtime.GOARCH) 47 | return fmt.Errorf("could not automatically select asset for platform %s/%s, use --manual flag", runtime.GOOS, runtime.GOARCH) 48 | } 49 | if manual { 50 | log.Debug().Str("op", "github-release/initial").Msgf("prompting for manual asset selection for %s/%s", runtime.GOOS, runtime.GOARCH) 51 | job.PauseFunc() 52 | downloadURL, size, err = promptGitHubAssetSelection(assets, tagName) 53 | job.ResumeFunc() 54 | if err != nil { 55 | return err 56 | } 57 | } 58 | 59 | urlParts := strings.Split(downloadURL, "/") 60 | filename := urlParts[len(urlParts)-1] 61 | if job.OutputPath == "" { 62 | job.OutputPath = filename 63 | } 64 | job.Metadata["downloadURL"] = downloadURL 65 | job.Metadata["fileSize"] = size 66 | job.Metadata["tagName"] = tagName 67 | log.Info().Str("op", "github-release/initial").Msgf("job built for %s/%s", owner, repo) 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /internal/downloaders/youtube-music/initial.go: -------------------------------------------------------------------------------- 1 | package youtubemusic 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/exec" 7 | "path/filepath" 8 | "strings" 9 | 10 | "github.com/rs/zerolog/log" 11 | "github.com/tanq16/danzo/internal/downloaders/youtube" 12 | "github.com/tanq16/danzo/internal/utils" 13 | ) 14 | 15 | type YTMusicDownloader struct{} 16 | 17 | func (d *YTMusicDownloader) ValidateJob(job *utils.DanzoJob) error { 18 | if !strings.Contains(job.URL, "youtube.com/watch") && 19 | !strings.Contains(job.URL, "youtu.be/") && 20 | !strings.Contains(job.URL, "music.youtube.com") { 21 | return fmt.Errorf("invalid YouTube URL") 22 | } 23 | if client, ok := job.Metadata["musicClient"].(string); ok { 24 | if client != "deezer" && client != "apple" { 25 | return fmt.Errorf("unsupported music client: %s", client) 26 | } 27 | if _, ok := job.Metadata["musicID"].(string); !ok { 28 | return fmt.Errorf("music ID required for %s", client) 29 | } 30 | } 31 | log.Info().Str("op", "youtube-music/initial").Msgf("job validated for %s", job.URL) 32 | return nil 33 | } 34 | 35 | func (d *YTMusicDownloader) BuildJob(job *utils.DanzoJob) error { 36 | ytdlpPath, err := youtube.EnsureYtdlp() 37 | if err != nil { 38 | return fmt.Errorf("error ensuring yt-dlp: %v", err) 39 | } 40 | job.Metadata["ytdlpPath"] = ytdlpPath 41 | log.Debug().Str("op", "youtube-music/initial").Msgf("Using yt-dlp at: %s", ytdlpPath) 42 | 43 | ffmpegPath, err := ensureFFmpeg() 44 | if err != nil { 45 | return fmt.Errorf("error ensuring ffmpeg: %v", err) 46 | } 47 | job.Metadata["ffmpegPath"] = ffmpegPath 48 | log.Debug().Str("op", "youtube-music/initial").Msgf("Using ffmpeg at: %s", ffmpegPath) 49 | 50 | if job.OutputPath == "" { 51 | job.OutputPath = "%(title)s.m4a" 52 | } else if !strings.HasSuffix(job.OutputPath, ".m4a") { 53 | // Force .m4a extension 54 | job.OutputPath = strings.TrimSuffix(job.OutputPath, filepath.Ext(job.OutputPath)) + ".m4a" 55 | } 56 | 57 | if info, err := os.Stat(job.OutputPath); err == nil && !info.IsDir() { 58 | job.OutputPath = utils.RenewOutputPath(job.OutputPath) 59 | log.Debug().Str("op", "youtube-music/initial").Msgf("Output path renewed to %s", job.OutputPath) 60 | } 61 | log.Info().Str("op", "youtube-music/initial").Msgf("job built for %s", job.URL) 62 | return nil 63 | } 64 | 65 | func ensureFFmpeg() (string, error) { 66 | path, err := exec.LookPath("ffmpeg") 67 | if err == nil { 68 | return path, nil 69 | } 70 | log.Error().Str("op", "youtube-music/initial").Msg("ffmpeg not found in PATH. Please install it.") 71 | return "", fmt.Errorf("ffmpeg not found in PATH, please install manually") 72 | } 73 | -------------------------------------------------------------------------------- /internal/downloaders/youtube/helpers.go: -------------------------------------------------------------------------------- 1 | package youtube 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "os" 8 | "path/filepath" 9 | "runtime" 10 | 11 | "github.com/rs/zerolog/log" 12 | "github.com/tanq16/danzo/internal/utils" 13 | ) 14 | 15 | func downloadYtdlp() (string, error) { 16 | goos := runtime.GOOS 17 | goarch := runtime.GOARCH 18 | var filename string 19 | switch { 20 | case goos == "windows" && goarch == "amd64": 21 | filename = "yt-dlp.exe" 22 | case goos == "windows" && goarch == "arm64": 23 | filename = "yt-dlp_arm64.exe" 24 | case goos == "linux" && goarch == "amd64": 25 | filename = "yt-dlp_linux" 26 | case goos == "linux" && goarch == "arm64": 27 | filename = "yt-dlp_linux_aarch64" 28 | case goos == "darwin": 29 | filename = "yt-dlp_macos" 30 | default: 31 | return "", fmt.Errorf("unsupported OS/arch: %s/%s", goos, goarch) 32 | } 33 | 34 | tempDir := ".danzo-temp" 35 | if err := os.MkdirAll(tempDir, 0755); err != nil { 36 | log.Error().Str("op", "youtube/helpers").Err(err).Msg("Error creating temp directory") 37 | return "", fmt.Errorf("error creating temp directory: %v", err) 38 | } 39 | downloadURL := fmt.Sprintf("https://github.com/yt-dlp/yt-dlp/releases/latest/download/%s", filename) 40 | filePath := filepath.Join(tempDir, "yt-dlp") 41 | if goos == "windows" { 42 | filePath += ".exe" 43 | } 44 | 45 | log.Info().Str("op", "youtube/helpers").Msgf("Downloading yt-dlp from %s to %s", downloadURL, filePath) 46 | if err := downloadFile(downloadURL, filePath); err != nil { 47 | log.Error().Str("op", "youtube/helpers").Err(err).Msg("Failed to download yt-dlp") 48 | return "", err 49 | } 50 | if goos != "windows" { 51 | if err := os.Chmod(filePath, 0755); err != nil { 52 | log.Error().Str("op", "youtube/helpers").Err(err).Msg("Failed to set permissions for yt-dlp") 53 | return "", fmt.Errorf("error setting permissions: %v", err) 54 | } 55 | } 56 | log.Info().Str("op", "youtube/helpers").Msg("yt-dlp downloaded successfully") 57 | return filePath, nil 58 | } 59 | 60 | func downloadFile(url, filepath string) error { 61 | log.Debug().Str("op", "youtube/helpers").Msgf("Downloading file from %s to %s", url, filepath) 62 | client := utils.NewDanzoHTTPClient(utils.HTTPClientConfig{}) 63 | req, err := http.NewRequest("GET", url, nil) 64 | if err != nil { 65 | return err 66 | } 67 | resp, err := client.Do(req) 68 | if err != nil { 69 | return err 70 | } 71 | defer resp.Body.Close() 72 | if resp.StatusCode != http.StatusOK { 73 | return fmt.Errorf("bad status: %s", resp.Status) 74 | } 75 | out, err := os.Create(filepath) 76 | if err != nil { 77 | return err 78 | } 79 | defer out.Close() 80 | _, err = io.Copy(out, resp.Body) 81 | return err 82 | } 83 | -------------------------------------------------------------------------------- /internal/downloaders/git-clone/download.go: -------------------------------------------------------------------------------- 1 | package gitclone 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/exec" 7 | "path/filepath" 8 | "runtime" 9 | "strconv" 10 | "strings" 11 | 12 | "github.com/go-git/go-git/v5" 13 | "github.com/rs/zerolog/log" 14 | "github.com/tanq16/danzo/internal/utils" 15 | ) 16 | 17 | type gitCloneProgressWriter struct { 18 | streamFunc func(string) 19 | } 20 | 21 | func (p *gitCloneProgressWriter) Write(data []byte) (int, error) { 22 | message := strings.TrimSpace(string(data)) 23 | if message != "" && p.streamFunc != nil { 24 | p.streamFunc(message) 25 | } 26 | return len(data), nil 27 | } 28 | 29 | func (d *GitCloneDownloader) Download(job *utils.DanzoJob) error { 30 | cloneURL := job.Metadata["cloneURL"].(string) 31 | depth, _ := job.Metadata["depth"].(int) 32 | auth, err := getAuthMethod(cloneURL, job.Metadata) 33 | if err != nil && job.StreamFunc != nil { 34 | job.StreamFunc(fmt.Sprintf("Warning: %v", err)) 35 | } 36 | progress := &gitCloneProgressWriter{ 37 | streamFunc: job.StreamFunc, 38 | } 39 | cloneOptions := &git.CloneOptions{ 40 | URL: cloneURL, 41 | Progress: progress, 42 | Auth: auth, 43 | } 44 | log.Debug().Str("op", "git-clone/download").Msg("clone options created") 45 | if depth > 0 { 46 | cloneOptions.Depth = depth 47 | } 48 | if job.StreamFunc != nil { 49 | job.StreamFunc(fmt.Sprintf("Cloning %s", cloneURL)) 50 | } 51 | log.Info().Str("op", "git-clone/download").Msg("initiating clone") 52 | _, err = git.PlainClone(job.OutputPath, false, cloneOptions) 53 | if err != nil { 54 | log.Error().Str("op", "git-clone/download").Msgf("git clone failed: %v", err) 55 | return fmt.Errorf("git clone failed: %v", err) 56 | } 57 | log.Info().Str("op", "git-clone/download").Msg("clone completed") 58 | size, err := getDirSize(job.OutputPath) 59 | if err == nil && job.StreamFunc != nil { 60 | job.StreamFunc(fmt.Sprintf("Clone complete - Total size: %s", utils.FormatBytes(uint64(size)))) 61 | } 62 | return nil 63 | } 64 | 65 | func getDirSize(path string) (int64, error) { 66 | // Use "du" if available (faster option) 67 | if runtime.GOOS == "linux" || runtime.GOOS == "darwin" { 68 | cmd := exec.Command("du", "-s", "-b", path) 69 | output, err := cmd.CombinedOutput() 70 | if err == nil { 71 | parts := strings.Split(string(output), "\t") 72 | if len(parts) > 0 { 73 | size, err := strconv.ParseInt(parts[0], 10, 64) 74 | if err == nil { 75 | return size, nil 76 | } 77 | } 78 | } 79 | } 80 | var size int64 81 | err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { 82 | if err != nil { 83 | return err 84 | } 85 | if !info.IsDir() { 86 | size += info.Size() 87 | } 88 | return nil 89 | }) 90 | return size, err 91 | } 92 | -------------------------------------------------------------------------------- /internal/utils/http-client.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "net" 5 | "net/http" 6 | "net/http/cookiejar" 7 | "net/url" 8 | "syscall" 9 | "time" 10 | 11 | "github.com/rs/zerolog/log" 12 | ) 13 | 14 | type HTTPClientConfig struct { 15 | Jar *cookiejar.Jar 16 | Timeout time.Duration 17 | KATimeout time.Duration 18 | ProxyURL string 19 | ProxyUsername string 20 | ProxyPassword string 21 | UserAgent string 22 | Headers map[string]string 23 | HighThreadMode bool // advanced socket options for high concurrency 24 | } 25 | 26 | type HTTPDoer interface { 27 | Do(req *http.Request) (*http.Response, error) 28 | SetHeader(key, value string) 29 | } 30 | 31 | type DanzoHTTPClient struct { 32 | client *http.Client 33 | config HTTPClientConfig 34 | } 35 | 36 | func NewDanzoHTTPClient(cfg HTTPClientConfig) *DanzoHTTPClient { 37 | if cfg.Timeout == 0 { 38 | cfg.Timeout = 60 * time.Second 39 | } 40 | if cfg.KATimeout == 0 { 41 | cfg.KATimeout = 60 * time.Second 42 | } 43 | if cfg.Jar == nil { 44 | jar, _ := cookiejar.New(nil) 45 | cfg.Jar = jar 46 | } 47 | transport := &http.Transport{ 48 | IdleConnTimeout: cfg.KATimeout, 49 | MaxIdleConns: 100, 50 | MaxIdleConnsPerHost: 100, 51 | DisableCompression: true, 52 | MaxConnsPerHost: 0, 53 | } 54 | if cfg.HighThreadMode { 55 | transport.DialContext = (&net.Dialer{ 56 | Timeout: 30 * time.Second, 57 | KeepAlive: 30 * time.Second, 58 | DualStack: true, 59 | Control: func(network, address string, c syscall.RawConn) error { 60 | return c.Control(func(fd uintptr) { 61 | setSocketOptions(fd) 62 | }) 63 | }, 64 | }).DialContext 65 | log.Debug().Str("op", "utils/http-client").Msg("Using high thread mode") 66 | } 67 | if cfg.ProxyURL != "" { 68 | proxyURL, err := url.Parse(cfg.ProxyURL) 69 | if err == nil { 70 | if cfg.ProxyUsername != "" { 71 | if cfg.ProxyPassword != "" { 72 | proxyURL.User = url.UserPassword(cfg.ProxyUsername, cfg.ProxyPassword) 73 | } else { 74 | proxyURL.User = url.User(cfg.ProxyUsername) 75 | } 76 | } 77 | log.Debug().Str("op", "utils/http-client").Msgf("Using proxy: %s", proxyURL.String()) 78 | transport.Proxy = http.ProxyURL(proxyURL) 79 | } 80 | } 81 | return &DanzoHTTPClient{ 82 | client: &http.Client{ 83 | Timeout: cfg.Timeout, 84 | Transport: transport, 85 | Jar: cfg.Jar, 86 | }, 87 | config: cfg, 88 | } 89 | } 90 | 91 | func (d *DanzoHTTPClient) SetHeader(key, value string) { 92 | d.config.Headers[key] = value 93 | } 94 | 95 | func (d *DanzoHTTPClient) Do(req *http.Request) (*http.Response, error) { 96 | if d.config.UserAgent != "" { 97 | req.Header.Set("User-Agent", d.config.UserAgent) 98 | } else if d.config.UserAgent == "randomize" { 99 | req.Header.Set("User-Agent", GetRandomUserAgent()) 100 | } else { 101 | req.Header.Set("User-Agent", "Danzo-CLI") 102 | } 103 | for k, v := range d.config.Headers { 104 | req.Header.Set(k, v) 105 | } 106 | return d.client.Do(req) 107 | } 108 | -------------------------------------------------------------------------------- /internal/utils/functions.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | func GetRandomUserAgent() string { 12 | return userAgents[time.Now().UnixNano()%int64(len(userAgents))] 13 | } 14 | 15 | func RenewOutputPath(outputPath string) string { 16 | dir := filepath.Dir(outputPath) 17 | base := filepath.Base(outputPath) 18 | ext := filepath.Ext(base) 19 | name := base[:len(base)-len(ext)] 20 | index := 1 21 | for { 22 | outputPath = filepath.Join(dir, fmt.Sprintf("%s-(%d)%s", name, index, ext)) 23 | if _, err := os.Stat(outputPath); os.IsNotExist(err) { 24 | return outputPath 25 | } 26 | index++ 27 | } 28 | } 29 | 30 | func ParseHeaderArgs(headers []string) map[string]string { 31 | result := make(map[string]string) 32 | for _, header := range headers { 33 | parts := strings.SplitN(header, ":", 2) 34 | if len(parts) == 2 { 35 | key := strings.TrimSpace(parts[0]) 36 | value := strings.TrimSpace(parts[1]) 37 | result[key] = value 38 | } 39 | } 40 | return result 41 | } 42 | 43 | func FormatBytes(bytes uint64) string { 44 | const unit = 1024 45 | if bytes < unit { 46 | return fmt.Sprintf("%d B", bytes) 47 | } 48 | div, exp := uint64(unit), 0 49 | for n := bytes / unit; n >= unit; n /= unit { 50 | div *= unit 51 | exp++ 52 | } 53 | return fmt.Sprintf("%.2f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) 54 | } 55 | 56 | func FormatSpeed(bytes int64, elapsed float64) string { 57 | if elapsed == 0 { 58 | return "0 B/s" 59 | } 60 | bps := float64(bytes) / elapsed 61 | formatted := FormatBytes(uint64(bps)) 62 | return formatted[:len(formatted)-1] + "B/s" // Slice off "B" and add "B/s" 63 | } 64 | 65 | func CleanLocal() error { 66 | tempDir := filepath.Join(filepath.Dir("."), ".danzo-temp") 67 | _, err := os.Stat(tempDir) 68 | if os.IsNotExist(err) { 69 | return nil 70 | } 71 | if err != nil { 72 | return err 73 | } 74 | if err := os.RemoveAll(tempDir); err != nil { 75 | return err 76 | } 77 | return nil 78 | } 79 | 80 | func CleanFunction(outputPath string) error { 81 | tempDir := filepath.Join(filepath.Dir(outputPath), ".danzo-temp") 82 | files, err := os.ReadDir(tempDir) 83 | if err != nil { 84 | return err 85 | } 86 | partPrefix := filepath.Base(outputPath) + ".part" 87 | for _, file := range files { 88 | filePath := filepath.Join(tempDir, file.Name()) 89 | if strings.HasPrefix(file.Name(), partPrefix) { 90 | if file.IsDir() { 91 | if err := os.RemoveAll(filePath); err != nil { 92 | return err 93 | } 94 | } else { 95 | if err := os.Remove(filePath); err != nil { 96 | return err 97 | } 98 | } 99 | } 100 | // Also remove m3u8_* directories (from live-stream downloads) 101 | if file.IsDir() && strings.HasPrefix(file.Name(), "m3u8_") { 102 | if err := os.RemoveAll(filePath); err != nil { 103 | return err 104 | } 105 | } 106 | } 107 | remainingFiles, err := os.ReadDir(tempDir) 108 | if err != nil { 109 | return err 110 | } 111 | if len(remainingFiles) == 0 { 112 | if err := os.Remove(tempDir); err != nil { 113 | return err 114 | } 115 | } 116 | return nil 117 | } 118 | -------------------------------------------------------------------------------- /.github/workflows/binary.yml: -------------------------------------------------------------------------------- 1 | name: Build Binary 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | 7 | permissions: 8 | contents: write 9 | packages: write 10 | 11 | jobs: 12 | create-release: 13 | runs-on: ubuntu-latest 14 | outputs: 15 | version: ${{ steps.version.outputs.new_version }} 16 | steps: 17 | - uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | 21 | - name: Determine Version 22 | id: version 23 | run: | 24 | # Get the latest version tag, default to v0.1 if none exists 25 | LATEST_TAG=$(gh release list -L 1 | cut -f 1 | sed 's/Release //' || echo "v0.0") 26 | LATEST_TAG=${LATEST_TAG:-v0.0} 27 | 28 | # Extract current version numbers 29 | MAJOR=$(echo $LATEST_TAG | cut -d. -f1 | sed 's/v//') 30 | MINOR=$(echo $LATEST_TAG | cut -d. -f2) 31 | 32 | # Check commit message for version bump 33 | if git log -1 --pretty=%B | grep -i "version bump"; then 34 | NEW_VERSION="v$((MAJOR + 1)).0" 35 | else 36 | NEW_VERSION="v$MAJOR.$((MINOR + 1))" 37 | fi 38 | 39 | echo "Previous version: $LATEST_TAG" 40 | echo "New version: $NEW_VERSION" 41 | echo "new_version=$NEW_VERSION" >> "$GITHUB_OUTPUT" 42 | env: 43 | GH_TOKEN: ${{ github.token }} 44 | 45 | - name: Create Release 46 | id: create_release 47 | run: | 48 | gh release create "${{ steps.version.outputs.new_version }}" \ 49 | --title "Release ${{ steps.version.outputs.new_version }}" \ 50 | --draft \ 51 | --notes "Release ${{ steps.version.outputs.new_version }}" \ 52 | --target ${{ github.sha }} 53 | env: 54 | GH_TOKEN: ${{ github.token }} 55 | 56 | build: 57 | needs: create-release 58 | runs-on: ubuntu-latest 59 | strategy: 60 | matrix: 61 | os: [linux, windows, darwin] 62 | arch: [amd64, arm64] 63 | steps: 64 | - uses: actions/checkout@v4 65 | 66 | - name: Set up Go 67 | uses: actions/setup-go@v5 68 | with: 69 | go-version: '1.23' 70 | 71 | - name: Build Binary 72 | run: | 73 | GOOS=${{ matrix.os }} GOARCH=${{ matrix.arch }} go build -ldflags="-s -w -X github.com/tanq16/danzo/cmd.DanzoVersion=${{ needs.create-release.outputs.version }}" -o danzo${{ matrix.os == 'windows' && '.exe' || '' }} . 74 | zip danzo-${{ matrix.os }}-${{ matrix.arch }}.zip danzo${{ matrix.os == 'windows' && '.exe' || '' }} LICENSE README.md 75 | 76 | - name: Upload Release Asset 77 | run: | 78 | gh release upload "${{ needs.create-release.outputs.version }}" \ 79 | "danzo-${{ matrix.os }}-${{ matrix.arch }}.zip" --clobber 80 | env: 81 | GH_TOKEN: ${{ github.token }} 82 | 83 | publish: 84 | needs: [create-release, build] 85 | runs-on: ubuntu-latest 86 | steps: 87 | - uses: actions/checkout@v4 88 | 89 | - name: Publish Release 90 | run: | 91 | gh release edit "${{ needs.create-release.outputs.version }}" --draft=false 92 | env: 93 | GH_TOKEN: ${{ github.token }} 94 | -------------------------------------------------------------------------------- /internal/downloaders/youtube-music/download.go: -------------------------------------------------------------------------------- 1 | package youtubemusic 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "os/exec" 8 | "strings" 9 | 10 | "github.com/rs/zerolog/log" 11 | "github.com/tanq16/danzo/internal/utils" 12 | ) 13 | 14 | func (d *YTMusicDownloader) Download(job *utils.DanzoJob) error { 15 | ytdlpPath := job.Metadata["ytdlpPath"].(string) 16 | ffmpegPath := job.Metadata["ffmpegPath"].(string) 17 | tempOutput := strings.TrimSuffix(job.OutputPath, ".m4a") 18 | args := []string{ 19 | "--progress", 20 | "--newline", 21 | "--no-warnings", 22 | "-x", // Extract audio 23 | "--audio-format", "m4a", 24 | "--audio-quality", "0", // Best quality 25 | "--ffmpeg-location", ffmpegPath, 26 | "-o", tempOutput + ".%(ext)s", 27 | "--no-playlist", 28 | job.URL, 29 | } 30 | cmd := exec.Command(ytdlpPath, args...) 31 | log.Debug().Str("op", "youtube-music/download").Msgf("Executing yt-dlp command: %s", cmd.String()) 32 | 33 | stdout, err := cmd.StdoutPipe() 34 | if err != nil { 35 | log.Error().Str("op", "youtube-music/download").Err(err).Msg("Error creating stdout pipe") 36 | return fmt.Errorf("error creating stdout pipe: %v", err) 37 | } 38 | stderr, err := cmd.StderrPipe() 39 | if err != nil { 40 | log.Error().Str("op", "youtube-music/download").Err(err).Msg("Error creating stderr pipe") 41 | return fmt.Errorf("error creating stderr pipe: %v", err) 42 | } 43 | if err := cmd.Start(); err != nil { 44 | log.Error().Str("op", "youtube-music/download").Err(err).Msg("Error starting yt-dlp") 45 | return fmt.Errorf("error starting yt-dlp: %v", err) 46 | } 47 | 48 | go processStream(stdout, job.StreamFunc) 49 | go processStream(stderr, job.StreamFunc) 50 | if err := cmd.Wait(); err != nil { 51 | log.Error().Str("op", "youtube-music/download").Err(err).Msg("yt-dlp command failed") 52 | return fmt.Errorf("yt-dlp failed: %v", err) 53 | } 54 | log.Debug().Str("op", "youtube-music/download").Msgf("yt-dlp audio extraction completed for %s", job.URL) 55 | 56 | // Apply metadata if music client is specified 57 | if musicClient, ok := job.Metadata["musicClient"].(string); ok { 58 | musicID := job.Metadata["musicID"].(string) 59 | if job.StreamFunc != nil { 60 | job.StreamFunc(fmt.Sprintf("Fetching metadata from %s...", musicClient)) 61 | } 62 | // Ensure output path ends with .m4a 63 | finalPath := job.OutputPath 64 | if !strings.HasSuffix(finalPath, ".m4a") { 65 | finalPath = tempOutput + ".m4a" 66 | } 67 | log.Debug().Str("op", "youtube-music/download").Msgf("Applying music metadata from %s", musicClient) 68 | err := addMusicMetadata(tempOutput+".m4a", finalPath, musicClient, musicID, job.StreamFunc) 69 | if err != nil { 70 | log.Warn().Str("op", "youtube-music/download").Err(err).Msg("Failed to add metadata") 71 | if job.StreamFunc != nil { 72 | job.StreamFunc(fmt.Sprintf("Warning: Failed to add metadata: %v", err)) 73 | } 74 | } 75 | } 76 | log.Info().Str("op", "youtube-music/download").Msgf("YouTube music download completed for %s", job.URL) 77 | return nil 78 | } 79 | 80 | func processStream(reader io.Reader, streamFunc func(string)) { 81 | scanner := bufio.NewScanner(reader) 82 | for scanner.Scan() { 83 | line := strings.TrimSpace(scanner.Text()) 84 | if line != "" && streamFunc != nil { 85 | streamFunc(line) 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "time" 7 | 8 | "github.com/rs/zerolog" 9 | "github.com/rs/zerolog/log" 10 | "github.com/spf13/cobra" 11 | "github.com/tanq16/danzo/internal/utils" 12 | ) 13 | 14 | var DanzoVersion = "dev" 15 | 16 | var ( 17 | // Global flags 18 | proxyURL string 19 | proxyUsername string 20 | proxyPassword string 21 | userAgent string 22 | headers []string 23 | workers int 24 | connections int 25 | debugFlag bool 26 | ) 27 | 28 | // Global HTTP client config that will be passed to subcommands 29 | var globalHTTPConfig utils.HTTPClientConfig 30 | 31 | var rootCmd = &cobra.Command{ 32 | Use: "danzo", 33 | Short: "Danzo is a swiss-army knife CLI download manager", 34 | Version: DanzoVersion, 35 | CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true}, 36 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 37 | // Build global HTTP config from flags 38 | globalHTTPConfig = utils.HTTPClientConfig{ 39 | Jar: nil, 40 | ProxyURL: proxyURL, 41 | ProxyUsername: proxyUsername, 42 | ProxyPassword: proxyPassword, 43 | UserAgent: userAgent, 44 | Headers: utils.ParseHeaderArgs(headers), 45 | } 46 | }, 47 | } 48 | 49 | func setupLogs() { 50 | zerolog.TimeFieldFormat = zerolog.TimeFormatUnix 51 | output := zerolog.ConsoleWriter{ 52 | Out: os.Stdout, 53 | TimeFormat: time.DateTime, 54 | NoColor: false, // Enable color output 55 | } 56 | log.Logger = zerolog.New(output).With().Timestamp().Logger() 57 | zerolog.SetGlobalLevel(zerolog.Disabled) 58 | if debugFlag { 59 | zerolog.SetGlobalLevel(zerolog.DebugLevel) 60 | utils.GlobalDebugFlag = true 61 | } else { 62 | zerolog.SetGlobalLevel(zerolog.Disabled) 63 | utils.GlobalDebugFlag = false 64 | } 65 | } 66 | 67 | func Execute() { 68 | if err := rootCmd.Execute(); err != nil { 69 | fmt.Fprintln(os.Stderr, err) 70 | os.Exit(1) 71 | } 72 | } 73 | 74 | func init() { 75 | rootCmd.SetHelpCommand(&cobra.Command{Hidden: true}) 76 | rootCmd.PersistentFlags().BoolVar(&debugFlag, "debug", false, "Enable debug logging") 77 | cobra.OnInitialize(setupLogs) 78 | 79 | // Global flags 80 | rootCmd.PersistentFlags().StringVarP(&proxyURL, "proxy", "p", "", "HTTP/HTTPS proxy URL") 81 | rootCmd.PersistentFlags().StringVar(&proxyUsername, "proxy-username", "", "Proxy username") 82 | rootCmd.PersistentFlags().StringVar(&proxyPassword, "proxy-password", "", "Proxy password") 83 | rootCmd.PersistentFlags().StringVarP(&userAgent, "user-agent", "a", "Danzo-CLI", "User agent") 84 | rootCmd.PersistentFlags().StringArrayVarP(&headers, "header", "H", []string{}, "Custom headers") 85 | rootCmd.PersistentFlags().IntVarP(&workers, "workers", "w", 1, "Number of parallel workers") 86 | rootCmd.PersistentFlags().IntVarP(&connections, "connections", "c", 8, "Number of connections per download") 87 | 88 | registerCommands() 89 | fmt.Println() 90 | } 91 | 92 | func registerCommands() { 93 | rootCmd.AddCommand(newCleanCmd()) 94 | rootCmd.AddCommand(newHTTPCmd()) 95 | rootCmd.AddCommand(newM3U8Cmd()) 96 | rootCmd.AddCommand(newS3Cmd()) 97 | rootCmd.AddCommand(newGitCloneCmd()) 98 | rootCmd.AddCommand(newGHReleaseCmd()) 99 | rootCmd.AddCommand(newGDriveCmd()) 100 | rootCmd.AddCommand(newYouTubeCmd()) 101 | rootCmd.AddCommand(newYTMusicCmd()) 102 | rootCmd.AddCommand(newBatchCmd()) 103 | } 104 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tanq16/danzo 2 | 3 | go 1.24.1 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go-v2 v1.36.3 7 | github.com/aws/aws-sdk-go-v2/config v1.29.12 8 | github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 9 | github.com/charmbracelet/lipgloss v1.1.0 10 | github.com/go-git/go-git/v5 v5.14.0 11 | github.com/goccy/go-yaml v1.17.1 12 | github.com/google/uuid v1.6.0 13 | github.com/rs/zerolog v1.34.0 14 | github.com/spf13/cobra v1.9.1 15 | golang.org/x/oauth2 v0.28.0 16 | golang.org/x/term v0.29.0 17 | ) 18 | 19 | require ( 20 | cloud.google.com/go/compute/metadata v0.6.0 // indirect 21 | dario.cat/mergo v1.0.0 // indirect 22 | github.com/Microsoft/go-winio v0.6.2 // indirect 23 | github.com/ProtonMail/go-crypto v1.1.5 // indirect 24 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect 25 | github.com/aws/aws-sdk-go-v2/credentials v1.17.65 // indirect 26 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect 27 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect 28 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect 29 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect 30 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect 31 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect 32 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect 33 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect 34 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect 35 | github.com/aws/aws-sdk-go-v2/service/sso v1.25.2 // indirect 36 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.0 // indirect 37 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect 38 | github.com/aws/smithy-go v1.22.2 // indirect 39 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect 40 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect 41 | github.com/charmbracelet/x/ansi v0.8.0 // indirect 42 | github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect 43 | github.com/charmbracelet/x/term v0.2.1 // indirect 44 | github.com/cloudflare/circl v1.6.0 // indirect 45 | github.com/cyphar/filepath-securejoin v0.4.1 // indirect 46 | github.com/emirpasic/gods v1.18.1 // indirect 47 | github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect 48 | github.com/go-git/go-billy/v5 v5.6.2 // indirect 49 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect 50 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 51 | github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect 52 | github.com/kevinburke/ssh_config v1.2.0 // indirect 53 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 54 | github.com/mattn/go-colorable v0.1.13 // indirect 55 | github.com/mattn/go-isatty v0.0.20 // indirect 56 | github.com/mattn/go-runewidth v0.0.16 // indirect 57 | github.com/muesli/termenv v0.16.0 // indirect 58 | github.com/pjbgf/sha1cd v0.3.2 // indirect 59 | github.com/rivo/uniseg v0.4.7 // indirect 60 | github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect 61 | github.com/skeema/knownhosts v1.3.1 // indirect 62 | github.com/spf13/pflag v1.0.6 // indirect 63 | github.com/xanzy/ssh-agent v0.3.3 // indirect 64 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect 65 | golang.org/x/crypto v0.35.0 // indirect 66 | golang.org/x/net v0.35.0 // indirect 67 | golang.org/x/sys v0.31.0 // indirect 68 | gopkg.in/warnings.v0 v0.1.2 // indirect 69 | ) 70 | -------------------------------------------------------------------------------- /internal/downloaders/s3/download.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "strings" 7 | "sync" 8 | "sync/atomic" 9 | 10 | "github.com/rs/zerolog/log" 11 | "github.com/tanq16/danzo/internal/utils" 12 | ) 13 | 14 | func (d *S3Downloader) Download(job *utils.DanzoJob) error { 15 | bucket := job.Metadata["bucket"].(string) 16 | key := job.Metadata["key"].(string) 17 | fileType := job.Metadata["fileType"].(string) 18 | profile := job.Metadata["profile"].(string) 19 | s3Client, err := getS3Client(profile) 20 | if err != nil { 21 | return fmt.Errorf("error creating S3 client: %v", err) 22 | } 23 | if fileType == "folder" { 24 | log.Info().Str("op", "s3/download").Msgf("Starting folder download for s3://%s/%s", bucket, key) 25 | return d.downloadFolder(job, bucket, key, s3Client) 26 | } else { 27 | log.Info().Str("op", "s3/download").Msgf("Starting file download for s3://%s/%s", bucket, key) 28 | return d.downloadFile(job, bucket, key, s3Client) 29 | } 30 | } 31 | 32 | func (d *S3Downloader) downloadFile(job *utils.DanzoJob, bucket, key string, s3Client *S3Client) error { 33 | size := job.Metadata["size"].(int64) 34 | progressCh := make(chan int64, 100) 35 | defer close(progressCh) 36 | go func() { 37 | var totalDownloaded int64 38 | for bytes := range progressCh { 39 | totalDownloaded += bytes 40 | if job.ProgressFunc != nil { 41 | job.ProgressFunc(totalDownloaded, size) 42 | } 43 | } 44 | }() 45 | return performS3Download(bucket, key, job.OutputPath, s3Client, progressCh) 46 | } 47 | 48 | func (d *S3Downloader) downloadFolder(job *utils.DanzoJob, bucket, prefix string, s3Client *S3Client) error { 49 | objects, err := listS3Objects(bucket, prefix, s3Client) 50 | if err != nil { 51 | return fmt.Errorf("error listing objects: %v", err) 52 | } 53 | if len(objects) == 0 { 54 | return fmt.Errorf("no objects found in s3://%s/%s", bucket, prefix) 55 | } 56 | log.Debug().Str("op", "s3/download").Msgf("Found %d objects to download in folder", len(objects)) 57 | var totalSize int64 58 | for _, obj := range objects { 59 | totalSize += obj.Size 60 | } 61 | 62 | var totalDownloaded int64 63 | var mu sync.Mutex 64 | var downloadErr error 65 | jobCh := make(chan s3Object, len(objects)) 66 | for _, obj := range objects { 67 | jobCh <- obj 68 | } 69 | close(jobCh) 70 | numWorkers := min(job.Connections, len(objects)) 71 | log.Debug().Str("op", "s3/download").Msgf("Using %d parallel workers for folder download", numWorkers) 72 | 73 | var wg sync.WaitGroup 74 | for range numWorkers { 75 | wg.Add(1) 76 | go func() { 77 | defer wg.Done() 78 | for obj := range jobCh { 79 | // Create relative path for output 80 | relPath := strings.TrimPrefix(obj.Key, prefix) 81 | relPath = strings.TrimPrefix(relPath, "/") 82 | outputPath := filepath.Join(job.OutputPath, relPath) 83 | // Create directory if needed 84 | if err := createDirectory(filepath.Dir(outputPath)); err != nil { 85 | mu.Lock() 86 | if downloadErr == nil { 87 | downloadErr = fmt.Errorf("error creating directory: %v", err) 88 | } 89 | mu.Unlock() 90 | return 91 | } 92 | // Track progress 93 | progressCh := make(chan int64, 100) 94 | go func(ch <-chan int64) { 95 | for bytes := range ch { 96 | downloaded := atomic.AddInt64(&totalDownloaded, bytes) 97 | if job.ProgressFunc != nil { 98 | job.ProgressFunc(downloaded, totalSize) 99 | } 100 | } 101 | }(progressCh) 102 | 103 | err := performS3Download(bucket, obj.Key, outputPath, s3Client, progressCh) 104 | close(progressCh) 105 | if err != nil { 106 | mu.Lock() 107 | if downloadErr == nil { 108 | downloadErr = fmt.Errorf("error downloading %s: %v", obj.Key, err) 109 | } 110 | mu.Unlock() 111 | return 112 | } 113 | } 114 | }() 115 | } 116 | wg.Wait() 117 | return downloadErr 118 | } 119 | -------------------------------------------------------------------------------- /internal/downloaders/google-drive/download.go: -------------------------------------------------------------------------------- 1 | package gdrive 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | 9 | "github.com/rs/zerolog/log" 10 | danzohttp "github.com/tanq16/danzo/internal/downloaders/http" 11 | "github.com/tanq16/danzo/internal/utils" 12 | ) 13 | 14 | func (d *GDriveDownloader) Download(job *utils.DanzoJob) error { 15 | token := job.Metadata["token"].(string) 16 | isFolder := job.Metadata["isFolder"].(bool) 17 | totalSize := job.Metadata["totalSize"].(int64) 18 | client := utils.NewDanzoHTTPClient(job.HTTPClientConfig) 19 | log.Info().Str("op", "google-drive/download").Msgf("downloading gdrive file; isFolder:%v", isFolder) 20 | if isFolder { 21 | return d.downloadFolder(job, token, client, totalSize) 22 | } else { 23 | return d.downloadFile(job, token, client, totalSize) 24 | } 25 | } 26 | 27 | func (d *GDriveDownloader) downloadFile(job *utils.DanzoJob, token string, client *utils.DanzoHTTPClient, totalSize int64) error { 28 | fileID := job.Metadata["fileID"].(string) 29 | progressCh := make(chan int64) 30 | go func() { 31 | var downloaded int64 32 | for bytes := range progressCh { 33 | downloaded += bytes 34 | if job.ProgressFunc != nil { 35 | job.ProgressFunc(downloaded, totalSize) 36 | } 37 | } 38 | }() 39 | config := utils.HTTPDownloadConfig{ 40 | URL: job.URL, 41 | OutputPath: job.OutputPath, 42 | HTTPClientConfig: job.HTTPClientConfig, 43 | } 44 | return performGDriveDownload(config, token, fileID, client, progressCh) 45 | } 46 | 47 | func (d *GDriveDownloader) downloadFolder(job *utils.DanzoJob, token string, client *utils.DanzoHTTPClient, totalSize int64) error { 48 | files := job.Metadata["folderFiles"].([]map[string]any) 49 | if err := os.MkdirAll(job.OutputPath, 0755); err != nil { 50 | return fmt.Errorf("error creating folder: %v", err) 51 | } 52 | var totalDownloaded int64 53 | for _, file := range files { 54 | fileID := file["id"].(string) 55 | fileName := file["name"].(string) 56 | mimeType := file["mimeType"].(string) 57 | // Skip Google Docs files 58 | if strings.HasPrefix(mimeType, "application/vnd.google-apps.") { 59 | continue 60 | } 61 | outputPath := filepath.Join(job.OutputPath, fileName) 62 | progressCh := make(chan int64) 63 | go func(ch <-chan int64) { 64 | for bytes := range ch { 65 | totalDownloaded += bytes 66 | if job.ProgressFunc != nil { 67 | job.ProgressFunc(totalDownloaded, totalSize) 68 | } 69 | } 70 | }(progressCh) 71 | config := utils.HTTPDownloadConfig{ 72 | URL: fmt.Sprintf("https://drive.google.com/file/d/%s/view", fileID), 73 | OutputPath: outputPath, 74 | HTTPClientConfig: job.HTTPClientConfig, 75 | } 76 | err := performGDriveDownload(config, token, fileID, client, progressCh) 77 | if err != nil { 78 | return fmt.Errorf("error downloading %s: %v", fileName, err) 79 | } 80 | } 81 | return nil 82 | } 83 | 84 | func performGDriveDownload(config utils.HTTPDownloadConfig, token string, fileID string, client *utils.DanzoHTTPClient, progressCh chan<- int64) error { 85 | outputDir := filepath.Dir(config.OutputPath) 86 | if err := os.MkdirAll(outputDir, 0755); err != nil { 87 | return fmt.Errorf("error creating output directory: %v", err) 88 | } 89 | isOAuth := !strings.HasPrefix(token, "AIza") 90 | var downloadURL string 91 | if isOAuth { 92 | downloadURL = fmt.Sprintf("%s/%s?alt=media", driveAPIURL, fileID) 93 | client.SetHeader("Authorization", "Bearer "+token) 94 | } else { 95 | downloadURL = fmt.Sprintf("%s/%s?alt=media&key=%s", driveAPIURL, fileID, token) 96 | } 97 | log.Debug().Str("op", "google-drive/download").Msgf("performing simple http download for %s", downloadURL) 98 | err := danzohttp.PerformSimpleDownload(downloadURL, config.OutputPath, client, progressCh) 99 | if err != nil { 100 | return fmt.Errorf("error downloading Google Drive file: %v", err) 101 | } 102 | return nil 103 | } 104 | -------------------------------------------------------------------------------- /internal/utils/vars.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "errors" 5 | "regexp" 6 | "time" 7 | ) 8 | 9 | var GlobalDebugFlag bool 10 | 11 | type Downloader interface { 12 | Download(job *DanzoJob) error 13 | BuildJob(job *DanzoJob) error 14 | ValidateJob(job *DanzoJob) error 15 | } 16 | 17 | // A single download job for Danzo 18 | type DanzoJob struct { 19 | ID string 20 | JobType string 21 | OutputPath string 22 | ProgressType string 23 | ProgressFunc func(downloaded, total int64) 24 | StreamFunc func(line string) 25 | URL string 26 | Connections int 27 | Metadata map[string]any 28 | HTTPClientConfig HTTPClientConfig 29 | PauseFunc func() // Request pause for output 30 | ResumeFunc func() // Request resume for output 31 | } 32 | 33 | type HTTPDownloadConfig struct { 34 | URL string 35 | OutputPath string 36 | Connections int 37 | HTTPClientConfig HTTPClientConfig 38 | } 39 | 40 | type HTTPDownloadChunk struct { 41 | ID int 42 | StartByte int64 43 | EndByte int64 44 | Downloaded int64 45 | Completed bool 46 | Retries int 47 | LastError error 48 | StartTime time.Time 49 | FinishTime time.Time 50 | } 51 | 52 | type HTTPDownloadJob struct { 53 | Config HTTPDownloadConfig 54 | FileSize int64 55 | Chunks []HTTPDownloadChunk 56 | StartTime time.Time 57 | TempFiles []string 58 | } 59 | 60 | const DefaultBufferSize = 1024 * 1024 * 8 // 8MB buffer 61 | const LogFile = ".danzo.log" 62 | 63 | var ErrRangeRequestsNotSupported = errors.New("range requests are not supported") 64 | var ChunkIDRegex = regexp.MustCompile(`\.part(\d+)$`) 65 | 66 | // Local-only User-Agent list 67 | var userAgents = []string{ 68 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36", 69 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36", 70 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0", 71 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36", 72 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36", 73 | "Mozilla/5.0 (X11; Linux x86_64; rv:135.0) Gecko/20100101 Firefox/135.0", 74 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.3 Safari/605.1.15", 75 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:135.0) Gecko/20100101 Firefox/135.0", 76 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:136.0) Gecko/20100101 Firefox/136.0", 77 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36", 78 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", 79 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", 80 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36", 81 | "Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0", 82 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.2 Safari/605.1.15", 83 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36 Edg/132.0.0.0", 84 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:128.0) Gecko/20100101 Firefox/128.0", 85 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.1.1 Safari/605.1.15", 86 | "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/115.0", 87 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 YaBrowser/27.7.7.7 Yowser/2.5 Safari/537.36", 88 | "Mozilla/5.0 (Windows NT 10.0; rv:109.0) Gecko/20100101 Firefox/115.0", 89 | "curl/7.88.1", 90 | "Wget/1.21.4", 91 | } 92 | -------------------------------------------------------------------------------- /internal/downloaders/google-drive/initial.go: -------------------------------------------------------------------------------- 1 | package gdrive 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strconv" 7 | 8 | "github.com/rs/zerolog/log" 9 | "github.com/tanq16/danzo/internal/utils" 10 | ) 11 | 12 | type GDriveDownloader struct{} 13 | 14 | func (d *GDriveDownloader) ValidateJob(job *utils.DanzoJob) error { 15 | fileID, err := extractFileID(job.URL) 16 | if err != nil { 17 | return err 18 | } 19 | job.Metadata["fileID"] = fileID 20 | 21 | _, hasAPIKey := job.Metadata["apiKey"].(string) 22 | credentialsFile, hasCredsFile := job.Metadata["credentialsFile"].(string) 23 | if !hasAPIKey && !hasCredsFile { 24 | return fmt.Errorf("either --api-key or --credentials must be provided") 25 | } 26 | if hasAPIKey && hasCredsFile { 27 | return fmt.Errorf("only one of --api-key or --credentials can be provided") 28 | } 29 | if hasCredsFile { 30 | if _, err := os.Stat(credentialsFile); err != nil { 31 | return fmt.Errorf("credentials file not found: %v", err) 32 | } 33 | } 34 | log.Info().Str("op", "google-drive/initial").Msgf("job validated for %s", job.URL) 35 | return nil 36 | } 37 | 38 | func (d *GDriveDownloader) BuildJob(job *utils.DanzoJob) error { 39 | fileID := job.Metadata["fileID"].(string) 40 | var token string 41 | var err error 42 | if apiKey, ok := job.Metadata["apiKey"].(string); ok { 43 | log.Debug().Str("op", "google-drive/initial").Msgf("using API key") 44 | token = apiKey 45 | } else if credFile, ok := job.Metadata["credentialsFile"].(string); ok { 46 | job.PauseFunc() 47 | log.Debug().Str("op", "google-drive/initial").Msgf("using credentials file") 48 | token, err = getAccessTokenFromCredentials(credFile) 49 | job.ResumeFunc() 50 | if err != nil { 51 | return fmt.Errorf("error getting OAuth token: %v", err) 52 | } 53 | } 54 | log.Debug().Str("op", "google-drive/initial").Msgf("token retrieved") 55 | job.Metadata["token"] = token 56 | 57 | client := utils.NewDanzoHTTPClient(job.HTTPClientConfig) 58 | metadata, _, err := getFileMetadata(job.URL, client, token) 59 | if err != nil { 60 | return fmt.Errorf("error getting metadata: %v", err) 61 | } 62 | log.Debug().Str("op", "google-drive/initial").Msgf("retrieved item metadata") 63 | 64 | // Check if it's a folder 65 | mimeType, _ := metadata["mimeType"].(string) 66 | if mimeType == "application/vnd.google-apps.folder" { 67 | job.Metadata["isFolder"] = true 68 | log.Debug().Str("op", "google-drive/initial").Msgf("detected folder, listing contents") 69 | files, err := listFolderContents(fileID, token, client) 70 | if err != nil { 71 | return fmt.Errorf("error listing folder contents: %v", err) 72 | } 73 | job.Metadata["folderFiles"] = files 74 | var totalSize int64 75 | for _, file := range files { 76 | if size, ok := file["size"].(string); ok { 77 | if sizeInt, err := strconv.ParseInt(size, 10, 64); err == nil { 78 | totalSize += sizeInt 79 | } 80 | } 81 | } 82 | job.Metadata["totalSize"] = totalSize 83 | log.Debug().Str("op", "google-drive/initial").Msgf("recorded total size as %v", totalSize) 84 | if job.OutputPath == "" { 85 | job.OutputPath = metadata["name"].(string) 86 | } 87 | } else { 88 | log.Debug().Str("op", "google-drive/initial").Msgf("detected file") 89 | job.Metadata["isFolder"] = false 90 | if job.OutputPath == "" { 91 | job.OutputPath = metadata["name"].(string) 92 | } 93 | if sizeStr, ok := metadata["size"].(string); ok { 94 | size, _ := strconv.ParseInt(sizeStr, 10, 64) 95 | job.Metadata["totalSize"] = size 96 | } 97 | } 98 | if info, err := os.Stat(job.OutputPath); err == nil { 99 | if job.Metadata["isFolder"].(bool) && info.IsDir() { 100 | job.OutputPath = utils.RenewOutputPath(job.OutputPath) 101 | } else if !job.Metadata["isFolder"].(bool) && !info.IsDir() { 102 | job.OutputPath = utils.RenewOutputPath(job.OutputPath) 103 | } 104 | } 105 | log.Info().Str("op", "google-drive/initial").Msgf("job built for gdrive %s", fileID) 106 | return nil 107 | } 108 | -------------------------------------------------------------------------------- /internal/downloaders/http/simple-downloader.go: -------------------------------------------------------------------------------- 1 | package danzohttp 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "os" 8 | "path/filepath" 9 | "time" 10 | 11 | "github.com/rs/zerolog/log" 12 | "github.com/tanq16/danzo/internal/utils" 13 | ) 14 | 15 | func PerformSimpleDownload(url, outputPath string, client *utils.DanzoHTTPClient, progressCh chan<- int64) error { 16 | defer close(progressCh) 17 | tempDir := filepath.Join(filepath.Dir(outputPath), ".danzo-temp") 18 | if err := os.MkdirAll(tempDir, 0755); err != nil { 19 | return fmt.Errorf("error creating temp directory: %v", err) 20 | } 21 | tempOutputPath := fmt.Sprintf("%s.part", filepath.Join(tempDir, filepath.Base(outputPath))) 22 | maxRetries := 5 23 | var lastErr error 24 | 25 | for retry := range maxRetries { 26 | if retry > 0 { 27 | log.Warn().Str("op", "http/simple-downloader").Msgf("Retrying download for %s (attempt %d/%d)", outputPath, retry+1, maxRetries) 28 | time.Sleep(time.Duration(retry+1) * 500 * time.Millisecond) // Exponential backoff 29 | } 30 | err := downloadAttempt(url, tempOutputPath, client, progressCh) 31 | if err != nil { 32 | lastErr = err 33 | log.Error().Str("op", "http/simple-downloader").Err(err).Msgf("Download attempt %d failed", retry+1) 34 | continue 35 | } 36 | if err := os.Rename(tempOutputPath, outputPath); err != nil { 37 | return fmt.Errorf("error renaming (finalizing) output file: %v", err) 38 | } 39 | log.Info().Str("op", "http/simple-downloader").Msgf("Simple download successful for %s", outputPath) 40 | return nil 41 | } 42 | return fmt.Errorf("download failed after %d retries: %w", maxRetries, lastErr) 43 | } 44 | 45 | func downloadAttempt(url, tempOutputPath string, client *utils.DanzoHTTPClient, progressCh chan<- int64) error { 46 | var resumeOffset int64 = 0 47 | fileMode := os.O_CREATE | os.O_WRONLY 48 | if fileInfo, err := os.Stat(tempOutputPath); err == nil { 49 | resumeOffset = fileInfo.Size() 50 | fileMode |= os.O_APPEND 51 | } else { 52 | fileMode |= os.O_TRUNC 53 | } 54 | 55 | outFile, err := os.OpenFile(tempOutputPath, fileMode, 0644) 56 | if err != nil { 57 | return fmt.Errorf("error creating output file: %v", err) 58 | } 59 | defer outFile.Close() 60 | 61 | req, err := http.NewRequest("GET", url, nil) 62 | if err != nil { 63 | return fmt.Errorf("error creating GET request: %v", err) 64 | } 65 | 66 | if resumeOffset > 0 { 67 | req.Header.Set("Range", fmt.Sprintf("bytes=%d-", resumeOffset)) 68 | log.Debug().Str("op", "http/simple-downloader").Msgf("Resuming download from offset %d", resumeOffset) 69 | } 70 | req.Header.Set("Connection", "keep-alive") 71 | resp, err := client.Do(req) 72 | if err != nil { 73 | return fmt.Errorf("error executing GET request: %v", err) 74 | } 75 | defer resp.Body.Close() 76 | 77 | if resumeOffset > 0 { 78 | if resp.StatusCode != http.StatusPartialContent { 79 | log.Warn().Str("op", "http/simple-downloader").Msgf("Server does not support resume (status %d). Restarting download.", resp.StatusCode) 80 | // Reset and restart download from scratch 81 | outFile.Close() 82 | outFile, err = os.OpenFile(tempOutputPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) 83 | if err != nil { 84 | return fmt.Errorf("error creating output file: %v", err) 85 | } 86 | resumeOffset = 0 87 | } else { 88 | progressCh <- resumeOffset 89 | } 90 | } else if resp.StatusCode != http.StatusOK { 91 | return fmt.Errorf("unexpected status code: %d", resp.StatusCode) 92 | } 93 | buffer := make([]byte, utils.DefaultBufferSize) 94 | for { 95 | bytesRead, readErr := resp.Body.Read(buffer) 96 | if bytesRead > 0 { 97 | _, writeErr := outFile.Write(buffer[:bytesRead]) 98 | if writeErr != nil { 99 | return fmt.Errorf("error writing to output file: %v", writeErr) 100 | } 101 | progressCh <- int64(bytesRead) 102 | } 103 | if readErr != nil { 104 | if readErr == io.EOF { 105 | break 106 | } 107 | return fmt.Errorf("error reading response body: %v", readErr) 108 | } 109 | } 110 | outFile.Sync() 111 | return nil 112 | } 113 | -------------------------------------------------------------------------------- /internal/downloaders/google-drive/auth.go: -------------------------------------------------------------------------------- 1 | package gdrive 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "os" 9 | "path/filepath" 10 | 11 | "github.com/rs/zerolog/log" 12 | "github.com/tanq16/danzo/internal/output" 13 | "github.com/tanq16/danzo/internal/utils" 14 | "golang.org/x/oauth2" 15 | "golang.org/x/oauth2/google" 16 | "golang.org/x/term" 17 | ) 18 | 19 | func getAccessTokenFromCredentials(credentialsFile string) (string, error) { 20 | b, err := os.ReadFile(credentialsFile) 21 | if err != nil { 22 | return "", fmt.Errorf("unable to read credentials file: %v", err) 23 | } 24 | log.Debug().Str("op", "google-drive/auth").Msgf("using credentials from %s", credentialsFile) 25 | config, err := google.ConfigFromJSON(b, "https://www.googleapis.com/auth/drive.readonly") 26 | if err != nil { 27 | log.Error().Str("op", "google-drive/auth").Msgf("unable to parse client secret file: %v", err) 28 | return "", fmt.Errorf("unable to parse client secret file: %v", err) 29 | } 30 | 31 | tokenFile := ".danzo-token.json" 32 | token, err := getOAuthToken(config, tokenFile) 33 | if err != nil { 34 | return "", fmt.Errorf("unable to get OAuth token: %v", err) 35 | } 36 | if !token.Valid() { 37 | if token.RefreshToken != "" { 38 | tokenSource := config.TokenSource(context.Background(), token) 39 | newToken, err := tokenSource.Token() 40 | if err != nil { 41 | return "", fmt.Errorf("unable to refresh token: %v", err) 42 | } 43 | token = newToken 44 | // Save refreshed token 45 | if err := saveToken(tokenFile, token); err != nil { 46 | log.Warn().Str("op", "google-drive/auth").Msgf("unable to save refreshed token: %v", err) 47 | } 48 | } else { 49 | return "", errors.New("OAuth token is expired and cannot be refreshed") 50 | } 51 | } 52 | return token.AccessToken, nil 53 | } 54 | 55 | func getOAuthToken(config *oauth2.Config, tokenFile string) (*oauth2.Token, error) { 56 | token, err := tokenFromFile(tokenFile) 57 | if err == nil { 58 | log.Debug().Str("op", "google-drive/auth").Msgf("existing token retrieved") 59 | return token, nil 60 | } 61 | log.Debug().Str("op", "google-drive/auth").Msgf("no existing token retrieved, get new one with OAuth flow") 62 | authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline) 63 | output.PrintDetail("\nVisit this URL to get the authorization code:\n") 64 | fmt.Printf("%s\n", authURL) 65 | output.PrintDetail("\nAfter authorizing, enter the authorization code:") 66 | var authCode string 67 | if _, err := fmt.Scan(&authCode); err != nil { 68 | return nil, fmt.Errorf("unable to read authorization code: %v", err) 69 | } 70 | log.Debug().Str("op", "google-drive/auth").Msgf("exchanging scanned auth code for token") 71 | token, err = config.Exchange(context.Background(), authCode) 72 | if err != nil { 73 | return nil, fmt.Errorf("unable to exchange auth code for token: %v", err) 74 | } 75 | if err := saveToken(tokenFile, token); err != nil { 76 | log.Warn().Str("op", "google-drive/auth").Msgf("unable to save new token: %v", err) 77 | } 78 | clearLength := 6 79 | clearLength += len(authURL)/getTerminalWidth() + 1 80 | if !utils.GlobalDebugFlag { 81 | fmt.Printf("\033[%dA\033[J", clearLength) 82 | } 83 | return token, nil 84 | } 85 | 86 | func tokenFromFile(file string) (*oauth2.Token, error) { 87 | f, err := os.Open(file) 88 | if err != nil { 89 | return nil, err 90 | } 91 | defer f.Close() 92 | token := &oauth2.Token{} 93 | err = json.NewDecoder(f).Decode(token) 94 | log.Debug().Str("op", "google-drive/auth").Msgf("token retrieved from file") 95 | return token, err 96 | } 97 | 98 | func saveToken(file string, token *oauth2.Token) error { 99 | dir := filepath.Dir(file) 100 | if dir != "." && dir != "" { 101 | if err := os.MkdirAll(dir, 0700); err != nil { 102 | return fmt.Errorf("unable to create token directory: %v", err) 103 | } 104 | } 105 | f, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) 106 | if err != nil { 107 | return fmt.Errorf("unable to cache oauth token: %v", err) 108 | } 109 | defer f.Close() 110 | err = json.NewEncoder(f).Encode(token) 111 | if err != nil { 112 | return fmt.Errorf("unable to encode token: %v", err) 113 | } 114 | return nil 115 | } 116 | 117 | func getTerminalWidth() int { 118 | width, _, err := term.GetSize(int(os.Stdout.Fd())) 119 | if err != nil || width <= 0 { 120 | return 80 // Default fallback width 121 | } 122 | return width 123 | } 124 | -------------------------------------------------------------------------------- /internal/downloaders/s3/helpers.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "os" 8 | "strings" 9 | 10 | "github.com/aws/aws-sdk-go-v2/aws" 11 | "github.com/aws/aws-sdk-go-v2/config" 12 | "github.com/aws/aws-sdk-go-v2/service/s3" 13 | "github.com/rs/zerolog/log" 14 | "github.com/tanq16/danzo/internal/utils" 15 | ) 16 | 17 | type S3Client struct { 18 | client *s3.Client 19 | } 20 | 21 | type s3Object struct { 22 | Key string 23 | Size int64 24 | } 25 | 26 | func getS3Client(profile string) (*S3Client, error) { 27 | log.Debug().Str("op", "s3/helpers").Msgf("Loading AWS config with profile: %s", profile) 28 | cfg, err := config.LoadDefaultConfig(context.Background(), 29 | config.WithSharedConfigProfile(profile), 30 | config.WithRetryMode("adaptive"), 31 | ) 32 | if err != nil { 33 | return nil, fmt.Errorf("error loading AWS config: %v", err) 34 | } 35 | 36 | return &S3Client{ 37 | client: s3.NewFromConfig(cfg), 38 | }, nil 39 | } 40 | 41 | func getS3ObjectInfo(bucket, key string, client *S3Client) (string, int64, error) { 42 | // Try HEAD request first 43 | log.Debug().Str("op", "s3/helpers").Msgf("Checking if s3://%s/%s is a file", bucket, key) 44 | headObj, err := client.client.HeadObject(context.Background(), &s3.HeadObjectInput{ 45 | Bucket: aws.String(bucket), 46 | Key: aws.String(key), 47 | }) 48 | if err == nil { // It's a file 49 | size := int64(0) 50 | if headObj.ContentLength != nil { 51 | size = *headObj.ContentLength 52 | } 53 | log.Debug().Str("op", "s3/helpers").Msgf("Object is a file with size %d", size) 54 | return "file", size, nil 55 | } 56 | // Check if it's a folder by listing with prefix 57 | log.Debug().Str("op", "s3/helpers").Msgf("Checking if s3://%s/%s is a folder", bucket, key) 58 | result, err := client.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ 59 | Bucket: aws.String(bucket), 60 | Prefix: aws.String(key), 61 | MaxKeys: aws.Int32(1), 62 | }) 63 | if err != nil { 64 | return "", 0, fmt.Errorf("error accessing S3 object: %v", err) 65 | } 66 | if len(result.Contents) > 0 || len(result.CommonPrefixes) > 0 { 67 | log.Debug().Str("op", "s3/helpers").Msg("Object is a folder") 68 | return "folder", -1, nil 69 | } 70 | return "", 0, fmt.Errorf("S3 object not found") 71 | } 72 | 73 | func listS3Objects(bucket, prefix string, client *S3Client) ([]s3Object, error) { 74 | log.Debug().Str("op", "s3/helpers").Msgf("Listing objects in s3://%s with prefix %s", bucket, prefix) 75 | var objects []s3Object 76 | paginator := s3.NewListObjectsV2Paginator(client.client, &s3.ListObjectsV2Input{ 77 | Bucket: aws.String(bucket), 78 | Prefix: aws.String(prefix), 79 | }) 80 | for paginator.HasMorePages() { 81 | page, err := paginator.NextPage(context.Background()) 82 | if err != nil { 83 | return nil, fmt.Errorf("error listing objects: %v", err) 84 | } 85 | for _, obj := range page.Contents { 86 | if obj.Key != nil && obj.Size != nil { 87 | // Skip directories (0-byte objects ending with /) 88 | if *obj.Size == 0 && strings.HasSuffix(*obj.Key, "/") { 89 | continue 90 | } 91 | objects = append(objects, s3Object{ 92 | Key: *obj.Key, 93 | Size: *obj.Size, 94 | }) 95 | } 96 | } 97 | } 98 | return objects, nil 99 | } 100 | 101 | func performS3Download(bucket, key, outputPath string, client *S3Client, progressCh chan<- int64) error { 102 | log.Debug().Str("op", "s3/helpers").Msgf("Downloading s3://%s/%s to %s", bucket, key, outputPath) 103 | result, err := client.client.GetObject(context.Background(), &s3.GetObjectInput{ 104 | Bucket: aws.String(bucket), 105 | Key: aws.String(key), 106 | }) 107 | if err != nil { 108 | return fmt.Errorf("error getting object: %v", err) 109 | } 110 | defer result.Body.Close() 111 | file, err := os.Create(outputPath) 112 | if err != nil { 113 | return fmt.Errorf("error creating file: %v", err) 114 | } 115 | defer file.Close() 116 | 117 | // Download with progress 118 | buffer := make([]byte, utils.DefaultBufferSize) 119 | for { 120 | n, err := result.Body.Read(buffer) 121 | if n > 0 { 122 | _, writeErr := file.Write(buffer[:n]) 123 | if writeErr != nil { 124 | return fmt.Errorf("error writing file: %v", writeErr) 125 | } 126 | progressCh <- int64(n) 127 | } 128 | if err == io.EOF { 129 | break 130 | } 131 | if err != nil { 132 | return fmt.Errorf("error reading object: %v", err) 133 | } 134 | } 135 | return nil 136 | } 137 | 138 | func directoryExists(path string) (bool, error) { 139 | info, err := os.Stat(path) 140 | if os.IsNotExist(err) { 141 | return false, nil 142 | } 143 | if err != nil { 144 | return false, err 145 | } 146 | return info.IsDir(), nil 147 | } 148 | 149 | func fileExists(path string) (bool, error) { 150 | info, err := os.Stat(path) 151 | if os.IsNotExist(err) { 152 | return false, nil 153 | } 154 | if err != nil { 155 | return false, err 156 | } 157 | return !info.IsDir(), nil 158 | } 159 | 160 | func createDirectory(path string) error { 161 | return os.MkdirAll(path, 0755) 162 | } 163 | -------------------------------------------------------------------------------- /cmd/batch.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | "github.com/goccy/go-yaml" 9 | "github.com/rs/zerolog/log" 10 | "github.com/spf13/cobra" 11 | "github.com/tanq16/danzo/internal/scheduler" 12 | "github.com/tanq16/danzo/internal/utils" 13 | ) 14 | 15 | type BatchEntry struct { 16 | OutputPath string `yaml:"op,omitempty"` 17 | Link string `yaml:"link"` 18 | } 19 | 20 | type BatchFile map[string][]BatchEntry 21 | 22 | func newBatchCmd() *cobra.Command { 23 | cmd := &cobra.Command{ 24 | Use: "batch [YAML_FILE] [OPTIONS]", 25 | Short: "Process multiple downloads from a YAML file", 26 | Args: cobra.ExactArgs(1), 27 | Run: func(cmd *cobra.Command, args []string) { 28 | yamlFile := args[0] 29 | data, err := os.ReadFile(yamlFile) 30 | log.Debug().Str("op", "cmd/batch").Msgf("Reading YAML file: %s", yamlFile) 31 | if err != nil { 32 | log.Error().Str("op", "cmd/batch").Msgf("Error reading YAML file: %v", err) 33 | fmt.Fprintf(os.Stderr, "Error reading YAML file: %v\n", err) 34 | os.Exit(1) 35 | } 36 | var batchFile BatchFile 37 | if err := yaml.Unmarshal(data, &batchFile); err != nil { 38 | log.Error().Str("op", "cmd/batch").Msgf("Error parsing YAML file: %v", err) 39 | fmt.Fprintf(os.Stderr, "Error parsing YAML file: %v\n", err) 40 | os.Exit(1) 41 | } 42 | jobs := buildJobsFromBatch(batchFile) 43 | if len(jobs) == 0 { 44 | log.Error().Str("op", "cmd/batch").Msgf("No valid jobs found in the batch file") 45 | fmt.Fprintf(os.Stderr, "No valid jobs found in the batch file\n") 46 | os.Exit(1) 47 | } 48 | log.Debug().Str("op", "cmd/batch").Msgf("Starting scheduler with %d jobs", len(jobs)) 49 | scheduler.Run(jobs, workers) 50 | }, 51 | } 52 | return cmd 53 | } 54 | 55 | func buildJobsFromBatch(batchFile BatchFile) []utils.DanzoJob { 56 | var jobs []utils.DanzoJob 57 | for jobType, entries := range batchFile { 58 | normalizedType := normalizeJobType(jobType) 59 | if normalizedType == "" { 60 | log.Warn().Str("op", "cmd/batch").Msgf("Unknown job type '%s', skipping...", jobType) 61 | fmt.Fprintf(os.Stderr, "Warning: Unknown job type '%s', skipping...\n", jobType) 62 | continue 63 | } 64 | for _, entry := range entries { 65 | if entry.Link == "" { 66 | log.Warn().Str("op", "cmd/batch").Msgf("Empty link found in %s section, skipping...", jobType) 67 | fmt.Fprintf(os.Stderr, "Warning: Empty link found in %s section, skipping...\n", jobType) 68 | continue 69 | } 70 | job := utils.DanzoJob{ 71 | JobType: normalizedType, 72 | URL: entry.Link, 73 | OutputPath: entry.OutputPath, 74 | HTTPClientConfig: globalHTTPConfig, 75 | Metadata: make(map[string]any), 76 | } 77 | switch normalizedType { 78 | case "http", "google-drive", "github-release", "live-stream": 79 | job.Connections = connections 80 | job.ProgressType = "progress" 81 | case "s3": 82 | job.Connections = connections 83 | job.ProgressType = "progress" 84 | job.Metadata["profile"] = "default" 85 | case "youtube", "youtube-music", "git-clone": 86 | job.ProgressType = "stream" 87 | default: 88 | job.ProgressType = "progress" 89 | } 90 | addJobTypeSpecificMetadata(&job, normalizedType) 91 | jobs = append(jobs, job) 92 | } 93 | } 94 | log.Debug().Str("op", "cmd/batch").Msgf("Built %d jobs from batch file", len(jobs)) 95 | return jobs 96 | } 97 | 98 | func normalizeJobType(jobType string) string { 99 | typeMap := map[string]string{ 100 | "http": "http", 101 | "https": "http", 102 | "s3": "s3", 103 | "gdrive": "google-drive", 104 | "googledrive": "google-drive", 105 | "google-drive": "google-drive", 106 | "gitclone": "git-clone", 107 | "git-clone": "git-clone", 108 | "git": "git-clone", 109 | "ghr": "github-release", 110 | "ghrelease": "github-release", 111 | "gh-release": "github-release", 112 | "github": "github-release", 113 | "github-release": "github-release", 114 | "m3u8": "live-stream", 115 | "hls": "live-stream", 116 | "http-livestream": "live-stream", 117 | "live-stream": "live-stream", 118 | "youtube": "youtube", 119 | "yt": "youtube", 120 | "ytm": "yt-music", 121 | "ytmusic": "yt-music", 122 | "youtube-music": "yt-music", 123 | "yt-music": "yt-music", 124 | } 125 | normalized := "" 126 | for key, value := range typeMap { 127 | if key == jobType || key == strings.ToLower(jobType) { 128 | normalized = value 129 | break 130 | } 131 | } 132 | return normalized 133 | } 134 | 135 | func addJobTypeSpecificMetadata(job *utils.DanzoJob, jobType string) { 136 | switch jobType { 137 | case "youtube": 138 | if _, ok := job.Metadata["format"]; !ok { 139 | job.Metadata["format"] = "decent" 140 | } 141 | case "github-release": 142 | job.Metadata["manual"] = false 143 | case "git-clone": 144 | if _, ok := job.Metadata["depth"]; !ok { 145 | job.Metadata["depth"] = 0 146 | } 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /internal/downloaders/http/multi-chunk-handlers.go: -------------------------------------------------------------------------------- 1 | package danzohttp 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "os" 9 | "path/filepath" 10 | "sync" 11 | "time" 12 | 13 | "github.com/rs/zerolog/log" 14 | "github.com/tanq16/danzo/internal/utils" 15 | ) 16 | 17 | func chunkedDownload(job *utils.HTTPDownloadJob, chunk *utils.HTTPDownloadChunk, client *utils.DanzoHTTPClient, wg *sync.WaitGroup, progressCh chan<- int64, mutex *sync.Mutex) { 18 | defer wg.Done() 19 | tempDir := filepath.Join(filepath.Dir(job.Config.OutputPath), ".danzo-temp") 20 | tempFileName := filepath.Join(tempDir, fmt.Sprintf("%s.part%d", filepath.Base(job.Config.OutputPath), chunk.ID)) 21 | expectedSize := chunk.EndByte - chunk.StartByte + 1 22 | resumeOffset := int64(0) 23 | if fileInfo, err := os.Stat(tempFileName); err == nil { 24 | resumeOffset = fileInfo.Size() 25 | if resumeOffset == expectedSize { 26 | log.Debug().Str("op", "http/multi-chunk-handlers").Msgf("Chunk %d already completed", chunk.ID) 27 | mutex.Lock() 28 | job.TempFiles = append(job.TempFiles, tempFileName) 29 | mutex.Unlock() 30 | chunk.Downloaded = resumeOffset 31 | chunk.Completed = true 32 | progressCh <- resumeOffset 33 | return 34 | } else if resumeOffset > 0 && resumeOffset < expectedSize { 35 | log.Debug().Str("op", "http/multi-chunk-handlers").Msgf("Resuming chunk %d from %d bytes", chunk.ID, resumeOffset) 36 | } else if chunk.Downloaded > 0 { 37 | os.Remove(tempFileName) 38 | resumeOffset = 0 39 | } 40 | } 41 | maxRetries := 5 42 | for retry := range maxRetries { 43 | if retry > 0 { 44 | log.Warn().Str("op", "http/multi-chunk-handlers").Msgf("Retrying chunk %d (attempt %d/%d)", chunk.ID, retry+1, maxRetries) 45 | time.Sleep(time.Duration(retry+1) * 500 * time.Millisecond) // Backoff 46 | if fileInfo, err := os.Stat(tempFileName); err == nil { 47 | currentSize := fileInfo.Size() 48 | if currentSize != chunk.Downloaded && chunk.Downloaded > 0 { 49 | os.Remove(tempFileName) 50 | progressCh <- -chunk.Downloaded // Subtract from progress 51 | chunk.Downloaded = 0 52 | resumeOffset = 0 53 | } 54 | } 55 | } 56 | if err := downloadSingleChunk(job, chunk, client, tempFileName, progressCh, resumeOffset); err != nil { 57 | log.Error().Str("op", "http/multi-chunk-handlers").Err(err).Msgf("Failed to download chunk %d", chunk.ID) 58 | continue 59 | } 60 | // On success 61 | log.Debug().Str("op", "http/multi-chunk-handlers").Msgf("Chunk %d download successful", chunk.ID) 62 | mutex.Lock() 63 | job.TempFiles = append(job.TempFiles, tempFileName) 64 | mutex.Unlock() 65 | chunk.Completed = true 66 | return 67 | } 68 | log.Error().Str("op", "http/multi-chunk-handlers").Msgf("Chunk %d failed after %d retries", chunk.ID, maxRetries) 69 | } 70 | 71 | func downloadSingleChunk(job *utils.HTTPDownloadJob, chunk *utils.HTTPDownloadChunk, client *utils.DanzoHTTPClient, tempFileName string, progressCh chan<- int64, resumeOffset int64) error { 72 | flag := os.O_WRONLY | os.O_CREATE 73 | if resumeOffset > 0 { 74 | flag |= os.O_APPEND 75 | } else { 76 | flag |= os.O_TRUNC 77 | } 78 | tempFile, err := os.OpenFile(tempFileName, flag, 0644) 79 | if err != nil { 80 | return fmt.Errorf("error opening temp file: %v", err) 81 | } 82 | defer tempFile.Close() 83 | 84 | startByte := chunk.StartByte + resumeOffset 85 | rangeHeader := fmt.Sprintf("bytes=%d-%d", startByte, chunk.EndByte) 86 | req, err := http.NewRequest("GET", job.Config.URL, nil) 87 | if err != nil { 88 | return err 89 | } 90 | req.Header.Set("Range", rangeHeader) 91 | req.Header.Set("Connection", "keep-alive") 92 | resp, err := client.Do(req) 93 | if err != nil { 94 | return err 95 | } 96 | defer resp.Body.Close() 97 | if resp.StatusCode != http.StatusPartialContent { 98 | return fmt.Errorf("unexpected status code: %d", resp.StatusCode) 99 | } 100 | contentRange := resp.Header.Get("Content-Range") 101 | if contentRange == "" { 102 | return errors.New("missing Content-Range header") 103 | } 104 | 105 | if resumeOffset > 0 { 106 | progressCh <- resumeOffset 107 | chunk.Downloaded = resumeOffset 108 | } 109 | remainingBytes := chunk.EndByte - startByte + 1 110 | buffer := make([]byte, utils.DefaultBufferSize) 111 | newBytes := int64(0) 112 | for { 113 | bytesRead, err := resp.Body.Read(buffer) 114 | if bytesRead > 0 { 115 | _, writeErr := tempFile.Write(buffer[:bytesRead]) 116 | if writeErr != nil { 117 | return writeErr 118 | } 119 | newBytes += int64(bytesRead) 120 | chunk.Downloaded += int64(bytesRead) 121 | progressCh <- int64(bytesRead) 122 | } 123 | if err != nil { 124 | if err == io.EOF { 125 | break 126 | } 127 | return err 128 | } 129 | } 130 | if newBytes != remainingBytes { 131 | return fmt.Errorf("size mismatch: expected %d remaining bytes, got %d bytes this session", remainingBytes, newBytes) 132 | } 133 | totalExpectedSize := chunk.EndByte - chunk.StartByte + 1 134 | if chunk.Downloaded != totalExpectedSize { 135 | return fmt.Errorf("total size mismatch: expected %d total bytes, got %d bytes", totalExpectedSize, chunk.Downloaded) 136 | } 137 | return nil 138 | } 139 | -------------------------------------------------------------------------------- /internal/downloaders/http/multi-downloader.go: -------------------------------------------------------------------------------- 1 | package danzohttp 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path/filepath" 9 | "sort" 10 | "strconv" 11 | "sync" 12 | "time" 13 | 14 | "github.com/rs/zerolog/log" 15 | "github.com/tanq16/danzo/internal/utils" 16 | ) 17 | 18 | func PerformMultiDownload(config utils.HTTPDownloadConfig, client *utils.DanzoHTTPClient, fileSize int64, progressCh chan<- int64) error { 19 | job := utils.HTTPDownloadJob{ 20 | Config: config, 21 | FileSize: fileSize, 22 | StartTime: time.Now(), 23 | } 24 | tempDir := filepath.Join(filepath.Dir(config.OutputPath), ".danzo-temp") 25 | if err := os.MkdirAll(tempDir, 0755); err != nil { 26 | return fmt.Errorf("error creating temp directory: %v", err) 27 | } 28 | log.Debug().Str("op", "http/multi-downloader").Msgf("Temporary directory created at %s", tempDir) 29 | 30 | // Setup chunks 31 | mutex := &sync.Mutex{} 32 | chunkSize := fileSize / int64(config.Connections) 33 | var currentPosition int64 = 0 34 | for i := range config.Connections { 35 | startByte := currentPosition 36 | endByte := startByte + chunkSize - 1 37 | if i == config.Connections-1 { 38 | endByte = fileSize - 1 39 | } 40 | if endByte >= fileSize { 41 | endByte = fileSize - 1 42 | } 43 | if endByte >= startByte { 44 | job.Chunks = append(job.Chunks, utils.HTTPDownloadChunk{ 45 | ID: i, 46 | StartByte: startByte, 47 | EndByte: endByte, 48 | }) 49 | } 50 | currentPosition = endByte + 1 51 | } 52 | log.Debug().Str("op", "http/multi-downloader").Msgf("Created %d chunks for download", len(job.Chunks)) 53 | 54 | // Start connection goroutines 55 | var wg sync.WaitGroup 56 | for i := range job.Chunks { 57 | wg.Add(1) 58 | go chunkedDownload(&job, &job.Chunks[i], client, &wg, progressCh, mutex) 59 | } 60 | 61 | // Wait for all downloads to complete 62 | log.Debug().Str("op", "http/multi-downloader").Msg("Waiting for all chunks to download") 63 | wg.Wait() 64 | close(progressCh) 65 | allCompleted := true 66 | var incompleteChunks []int 67 | for i, chunk := range job.Chunks { 68 | if !chunk.Completed { 69 | allCompleted = false 70 | incompleteChunks = append(incompleteChunks, i) 71 | } 72 | } 73 | if !allCompleted { 74 | return fmt.Errorf("download incomplete: %d chunks failed: %v", len(incompleteChunks), incompleteChunks) 75 | } 76 | 77 | // Assemble the file 78 | log.Info().Str("op", "http/multi-downloader").Msg("All chunks downloaded, assembling file") 79 | err := assembleFile(job) 80 | if err != nil { 81 | return fmt.Errorf("error assembling file: %v", err) 82 | } 83 | log.Info().Str("op", "http/multi-downloader").Msg("File assembled successfully") 84 | return nil 85 | } 86 | 87 | func assembleFile(job utils.HTTPDownloadJob) error { 88 | allChunksCompleted := true 89 | for _, chunk := range job.Chunks { 90 | if !chunk.Completed { 91 | allChunksCompleted = false 92 | } 93 | } 94 | if !allChunksCompleted { 95 | return errors.New("not all chunks were completed successfully") 96 | } 97 | tempFiles := make([]string, len(job.TempFiles)) 98 | copy(tempFiles, job.TempFiles) 99 | sort.Slice(tempFiles, func(i, j int) bool { 100 | idI, errI := extractChunkID(tempFiles[i]) 101 | idJ, errJ := extractChunkID(tempFiles[j]) 102 | if errI != nil || errJ != nil { 103 | return tempFiles[i] < tempFiles[j] // Fallback to string comparison 104 | } 105 | return idI < idJ 106 | }) 107 | destFile, err := os.Create(job.Config.OutputPath) 108 | if err != nil { 109 | return err 110 | } 111 | defer destFile.Close() 112 | 113 | var totalWritten int64 = 0 114 | for _, tempFilePath := range tempFiles { 115 | tempFile, err := os.Open(tempFilePath) 116 | if err != nil { 117 | return fmt.Errorf("error opening chunk file %s: %v", tempFilePath, err) 118 | } 119 | fileInfo, err := tempFile.Stat() 120 | if err != nil { 121 | tempFile.Close() 122 | return fmt.Errorf("error getting chunk file info: %v", err) 123 | } 124 | chunkSize := fileInfo.Size() 125 | written, err := io.Copy(destFile, tempFile) 126 | tempFile.Close() 127 | if err != nil { 128 | return fmt.Errorf("error copying chunk data: %v", err) 129 | } 130 | if written != chunkSize { 131 | return fmt.Errorf("error: wrote %d bytes but chunk size is %d", written, chunkSize) 132 | } 133 | totalWritten += written 134 | } 135 | if totalWritten != job.FileSize { 136 | return fmt.Errorf("error: total written bytes (%d) doesn't match expected file size (%d)", totalWritten, job.FileSize) 137 | } 138 | log.Debug().Str("op", "http/multi-downloader").Msgf("Successfully wrote %d bytes to %s", totalWritten, job.Config.OutputPath) 139 | 140 | // Cleanup temporary files 141 | log.Debug().Str("op", "http/multi-downloader").Msg("Cleaning up temporary chunk files") 142 | for _, tempFilePath := range tempFiles { 143 | os.Remove(tempFilePath) 144 | } 145 | return nil 146 | } 147 | 148 | func extractChunkID(filename string) (int, error) { 149 | matches := utils.ChunkIDRegex.FindStringSubmatch(filename) 150 | if len(matches) < 2 { 151 | return -1, fmt.Errorf("could not extract chunk ID from %s", filename) 152 | } 153 | return strconv.Atoi(matches[1]) 154 | } 155 | -------------------------------------------------------------------------------- /internal/downloaders/google-drive/helpers.go: -------------------------------------------------------------------------------- 1 | package gdrive 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | "net/url" 8 | "regexp" 9 | "strings" 10 | 11 | "github.com/rs/zerolog/log" 12 | "github.com/tanq16/danzo/internal/utils" 13 | ) 14 | 15 | var ( 16 | driveFileRegex = regexp.MustCompile(`https://drive\.google\.com/file/d/([^/]+)`) 17 | driveShortLinkRegex = regexp.MustCompile(`https://drive\.google\.com/open\?id=([^&\s]+)`) 18 | driveFolderRegex = regexp.MustCompile(`https://drive\.google\.com/drive/folders/([^/]+)`) 19 | ) 20 | 21 | const driveAPIURL = "https://www.googleapis.com/drive/v3/files" 22 | 23 | func extractFileID(rawURL string) (string, error) { 24 | if matches := driveFileRegex.FindStringSubmatch(rawURL); len(matches) > 1 { 25 | return matches[1], nil 26 | } 27 | if matches := driveShortLinkRegex.FindStringSubmatch(rawURL); len(matches) > 1 { 28 | return matches[1], nil 29 | } 30 | if matches := driveFolderRegex.FindStringSubmatch(rawURL); len(matches) > 1 { 31 | return matches[1], nil 32 | } 33 | // Heuristically try to extract from URL parameters 34 | parsedURL, err := url.Parse(rawURL) 35 | if err != nil { 36 | return "", err 37 | } 38 | idParam := parsedURL.Query().Get("id") 39 | if idParam != "" { 40 | return idParam, nil 41 | } 42 | return "", fmt.Errorf("unable to extract file ID from URL: %s", rawURL) 43 | } 44 | 45 | func getFileMetadata(rawURL string, client *utils.DanzoHTTPClient, token string) (map[string]any, string, error) { 46 | fileID, err := extractFileID(rawURL) 47 | if err != nil { 48 | log.Error().Str("op", "google-drive/helpers").Msgf("error extracting file ID: %v", err) 49 | return nil, "", fmt.Errorf("error extracting file ID: %v", err) 50 | } 51 | log.Debug().Str("op", "google-drive/helpers").Msgf("extracted file ID: %s", fileID) 52 | isOAuth := !strings.HasPrefix(token, "AIza") 53 | var metadataURL string 54 | if isOAuth { 55 | metadataURL = fmt.Sprintf("%s/%s?fields=name,size,mimeType", driveAPIURL, fileID) 56 | } else { 57 | metadataURL = fmt.Sprintf("%s/%s?fields=name,size,mimeType&key=%s", driveAPIURL, fileID, token) 58 | } 59 | 60 | req, err := http.NewRequest("GET", metadataURL, nil) 61 | if err != nil { 62 | return nil, "", fmt.Errorf("error creating metadata request: %v", err) 63 | } 64 | req.Header.Set("Accept", "application/json") 65 | if isOAuth { 66 | req.Header.Set("Authorization", "Bearer "+token) 67 | } 68 | resp, err := client.Do(req) 69 | if err != nil { 70 | return nil, "", fmt.Errorf("error fetching file metadata: %v", err) 71 | } 72 | defer resp.Body.Close() 73 | if resp.StatusCode != http.StatusOK { 74 | log.Error().Str("op", "google-drive/helpers").Msgf("failed to get file metadata, status: %d", resp.StatusCode) 75 | return nil, "", fmt.Errorf("failed to get file metadata, status: %d", resp.StatusCode) 76 | } 77 | var metadata map[string]any 78 | err = json.NewDecoder(resp.Body).Decode(&metadata) 79 | if err != nil { 80 | return nil, "", fmt.Errorf("error parsing metadata response: %v", err) 81 | } 82 | log.Debug().Str("op", "google-drive/helpers").Msgf("file metadata retrieved") 83 | return metadata, fileID, nil 84 | } 85 | 86 | func listFolderContents(folderID, token string, client *utils.DanzoHTTPClient) ([]map[string]any, error) { 87 | var files []map[string]any 88 | pageToken := "" 89 | isOAuth := !strings.HasPrefix(token, "AIza") 90 | log.Debug().Str("op", "google-drive/helpers").Msgf("listing folder contents for %s", folderID) 91 | for { 92 | var url string 93 | if isOAuth { 94 | url = fmt.Sprintf("%s?q='%s'+in+parents&fields=nextPageToken,files(id,name,size,mimeType)&pageSize=1000", 95 | driveAPIURL, folderID) 96 | if pageToken != "" { 97 | url += "&pageToken=" + pageToken 98 | } 99 | } else { 100 | url = fmt.Sprintf("%s?q='%s'+in+parents&fields=nextPageToken,files(id,name,size,mimeType)&pageSize=1000&key=%s", 101 | driveAPIURL, folderID, token) 102 | if pageToken != "" { 103 | url += "&pageToken=" + pageToken 104 | } 105 | } 106 | req, err := http.NewRequest("GET", url, nil) 107 | if err != nil { 108 | return nil, err 109 | } 110 | if isOAuth { 111 | req.Header.Set("Authorization", "Bearer "+token) 112 | } 113 | req.Header.Set("Accept", "application/json") 114 | 115 | resp, err := client.Do(req) 116 | if err != nil { 117 | return nil, err 118 | } 119 | defer resp.Body.Close() 120 | if resp.StatusCode != http.StatusOK { 121 | return nil, fmt.Errorf("failed to list folder contents: %d", resp.StatusCode) 122 | } 123 | 124 | var result map[string]any 125 | if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { 126 | return nil, err 127 | } 128 | if items, ok := result["files"].([]any); ok { 129 | log.Debug().Str("op", "google-drive/helpers").Msgf("listing %d items", len(items)) 130 | for _, item := range items { 131 | if fileMap, ok := item.(map[string]any); ok { 132 | files = append(files, fileMap) 133 | } 134 | } 135 | } 136 | 137 | if nextToken, ok := result["nextPageToken"].(string); ok && nextToken != "" { 138 | pageToken = nextToken 139 | log.Debug().Str("op", "google-drive/helpers").Msgf("listing next page") 140 | } else { 141 | break 142 | } 143 | } 144 | return files, nil 145 | } 146 | -------------------------------------------------------------------------------- /internal/downloaders/youtube/initial.go: -------------------------------------------------------------------------------- 1 | package youtube 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/exec" 7 | "path/filepath" 8 | "runtime" 9 | "strings" 10 | 11 | "github.com/rs/zerolog/log" 12 | "github.com/tanq16/danzo/internal/utils" 13 | ) 14 | 15 | type YouTubeDownloader struct{} 16 | 17 | var ytdlpFormats = map[string]string{ 18 | "best": "bestvideo+bestaudio/best", 19 | "best60": "bestvideo[fps<=60]+bestaudio/best", 20 | "bestmp4": "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]", 21 | "decent": "bestvideo[height<=1080]+bestaudio/best", 22 | "decent60": "bestvideo[height<=1080][fps<=60]+bestaudio/best", 23 | "cheap": "bestvideo[height<=720]+bestaudio/best", 24 | "1080p": "bestvideo[height=1080][ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]", 25 | "1080p60": "bestvideo[height=1080][fps<=60][ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]", 26 | "720p": "bestvideo[height=720][ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]", 27 | "480p": "bestvideo[height=480][ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]", 28 | "audio": "bestaudio[ext=m4a]/bestaudio", 29 | } 30 | 31 | func (d *YouTubeDownloader) ValidateJob(job *utils.DanzoJob) error { 32 | if !strings.Contains(job.URL, "youtube.com/watch") && 33 | !strings.Contains(job.URL, "youtu.be/") && 34 | !strings.Contains(job.URL, "music.youtube.com") { 35 | return fmt.Errorf("invalid YouTube URL") 36 | } 37 | if format, ok := job.Metadata["format"].(string); ok { 38 | if _, exists := ytdlpFormats[format]; !exists { 39 | return fmt.Errorf("unsupported format: %s", format) 40 | } 41 | } 42 | log.Info().Str("op", "youtube/initial").Msgf("job validated for %s", job.URL) 43 | return nil 44 | } 45 | 46 | func (d *YouTubeDownloader) BuildJob(job *utils.DanzoJob) error { 47 | format, ok := job.Metadata["format"].(string) 48 | if !ok || format == "" { 49 | format = "decent" 50 | job.Metadata["format"] = format 51 | } 52 | job.Metadata["ytdlpFormat"] = ytdlpFormats[format] 53 | log.Debug().Str("op", "youtube/initial").Msgf("Using format key '%s' for yt-dlp format '%s'", format, ytdlpFormats[format]) 54 | 55 | ytdlpPath, err := EnsureYtdlp() 56 | if err != nil { 57 | return fmt.Errorf("error ensuring yt-dlp: %v", err) 58 | } 59 | job.Metadata["ytdlpPath"] = ytdlpPath 60 | log.Debug().Str("op", "youtube/initial").Msgf("Using yt-dlp at: %s", ytdlpPath) 61 | 62 | ffmpegPath, err := EnsureFFmpeg() 63 | if err != nil { 64 | return fmt.Errorf("error ensuring ffmpeg: %v", err) 65 | } 66 | job.Metadata["ffmpegPath"] = ffmpegPath 67 | log.Debug().Str("op", "youtube/initial").Msgf("Using ffmpeg at: %s", ffmpegPath) 68 | 69 | ffprobePath, err := ensureFFprobe() 70 | if err != nil { 71 | return fmt.Errorf("error ensuring ffprobe: %v", err) 72 | } 73 | job.Metadata["ffprobePath"] = ffprobePath 74 | log.Debug().Str("op", "youtube/initial").Msgf("Using ffprobe at: %s", ffprobePath) 75 | 76 | if job.OutputPath == "" { 77 | job.OutputPath = "%(title)s.%(ext)s" 78 | } 79 | log.Info().Str("op", "youtube/initial").Msgf("job built for %s", job.URL) 80 | return nil 81 | } 82 | 83 | func EnsureYtdlp() (string, error) { 84 | path, err := exec.LookPath("yt-dlp") 85 | if err == nil { 86 | log.Debug().Str("op", "youtube/initial").Msgf("yt-dlp found in PATH: %s", path) 87 | return path, nil 88 | } 89 | execDir, err := os.Executable() 90 | if err == nil { 91 | ytdlpPath := filepath.Join(filepath.Dir(execDir), "yt-dlp") 92 | if runtime.GOOS == "windows" { 93 | ytdlpPath += ".exe" 94 | } 95 | if _, err := os.Stat(ytdlpPath); err == nil { 96 | log.Debug().Str("op", "youtube/initial").Msgf("yt-dlp found in executable directory: %s", ytdlpPath) 97 | return ytdlpPath, nil 98 | } 99 | } 100 | log.Warn().Str("op", "youtube/initial").Msg("yt-dlp not found, attempting download") 101 | return downloadYtdlp() 102 | } 103 | 104 | func EnsureFFmpeg() (string, error) { 105 | path, err := exec.LookPath("ffmpeg") 106 | if err == nil { 107 | log.Debug().Str("op", "youtube/initial").Msgf("ffmpeg found in PATH: %s", path) 108 | return path, nil 109 | } 110 | execDir, err := os.Executable() 111 | if err == nil { 112 | ffmpegPath := filepath.Join(filepath.Dir(execDir), "ffmpeg") 113 | if runtime.GOOS == "windows" { 114 | ffmpegPath += ".exe" 115 | } 116 | if _, err := os.Stat(ffmpegPath); err == nil { 117 | log.Debug().Str("op", "youtube/initial").Msgf("ffmpeg found in executable directory: %s", ffmpegPath) 118 | return ffmpegPath, nil 119 | } 120 | } 121 | log.Error().Str("op", "youtube/initial").Msg("ffmpeg not found in PATH or executable directory. Please install it.") 122 | return "", fmt.Errorf("ffmpeg not found in PATH, please install manually") 123 | } 124 | 125 | func ensureFFprobe() (string, error) { 126 | path, err := exec.LookPath("ffprobe") 127 | if err == nil { 128 | log.Debug().Str("op", "youtube/initial").Msgf("ffprobe found in PATH: %s", path) 129 | return path, nil 130 | } 131 | execDir, err := os.Executable() 132 | if err == nil { 133 | ffprobePath := filepath.Join(filepath.Dir(execDir), "ffprobe") 134 | if runtime.GOOS == "windows" { 135 | ffprobePath += ".exe" 136 | } 137 | if _, err := os.Stat(ffprobePath); err == nil { 138 | log.Debug().Str("op", "youtube/initial").Msgf("ffprobe found in executable directory: %s", ffprobePath) 139 | return ffprobePath, nil 140 | } 141 | } 142 | log.Error().Str("op", "youtube/initial").Msg("ffprobe not found in PATH or executable directory. Please install it.") 143 | return "", fmt.Errorf("ffprobe not found in PATH, please install manually") 144 | } 145 | -------------------------------------------------------------------------------- /internal/scheduler/scheduler.go: -------------------------------------------------------------------------------- 1 | package scheduler 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/rs/zerolog/log" 8 | gitclone "github.com/tanq16/danzo/internal/downloaders/git-clone" 9 | ghrelease "github.com/tanq16/danzo/internal/downloaders/github-release" 10 | gdrive "github.com/tanq16/danzo/internal/downloaders/google-drive" 11 | httpDownloader "github.com/tanq16/danzo/internal/downloaders/http" 12 | m3u8 "github.com/tanq16/danzo/internal/downloaders/live-stream" 13 | "github.com/tanq16/danzo/internal/downloaders/s3" 14 | "github.com/tanq16/danzo/internal/downloaders/youtube" 15 | youtubemusic "github.com/tanq16/danzo/internal/downloaders/youtube-music" 16 | "github.com/tanq16/danzo/internal/output" 17 | "github.com/tanq16/danzo/internal/utils" 18 | ) 19 | 20 | type Scheduler struct { 21 | outputMgr *output.Manager 22 | pauseRequestCh chan struct{} 23 | resumeRequestCh chan struct{} 24 | singleJobMode bool 25 | } 26 | 27 | var downloaderRegistry = map[string]utils.Downloader{ 28 | "http": &httpDownloader.HTTPDownloader{}, 29 | "s3": &s3.S3Downloader{}, 30 | "google-drive": &gdrive.GDriveDownloader{}, 31 | "git-clone": &gitclone.GitCloneDownloader{}, 32 | "github-release": &ghrelease.GitReleaseDownloader{}, 33 | "live-stream": &m3u8.M3U8Downloader{}, 34 | "youtube": &youtube.YouTubeDownloader{}, 35 | "youtube-music": &youtubemusic.YTMusicDownloader{}, 36 | } 37 | 38 | func Run(jobs []utils.DanzoJob, numWorkers int) { 39 | s := &Scheduler{ 40 | outputMgr: output.NewManager(), 41 | pauseRequestCh: make(chan struct{}), 42 | resumeRequestCh: make(chan struct{}), 43 | singleJobMode: len(jobs) == 1, 44 | } 45 | log.Debug().Str("op", "scheduler").Msgf("Starting output manager") 46 | s.outputMgr.StartDisplay() 47 | defer s.outputMgr.StopDisplay() 48 | 49 | outputDirs := make(map[string]bool) 50 | allSuccessful := true 51 | var mu sync.Mutex 52 | if s.singleJobMode { 53 | go s.handlePauseResume() 54 | } 55 | 56 | log.Debug().Str("op", "scheduler").Msgf("Send jobs to pipeline") 57 | jobCh := make(chan utils.DanzoJob, len(jobs)) 58 | for _, job := range jobs { 59 | jobCh <- job 60 | } 61 | close(jobCh) 62 | 63 | log.Debug().Str("op", "scheduler").Msgf("Start %d workers", numWorkers) 64 | var wg sync.WaitGroup 65 | for range numWorkers { 66 | wg.Add(1) 67 | go func() { 68 | defer wg.Done() 69 | s.processJobs(jobCh, &outputDirs, &allSuccessful, &mu) 70 | }() 71 | } 72 | 73 | wg.Wait() 74 | log.Debug().Str("op", "scheduler").Msgf("All workers done") 75 | if allSuccessful { 76 | log.Debug().Str("op", "scheduler").Msgf("Clean up output dirs after successful jobs") 77 | for dir := range outputDirs { 78 | utils.CleanFunction(dir) 79 | } 80 | } else { 81 | log.Error().Str("op", "scheduler").Msgf("Not all jobs were successful") 82 | } 83 | } 84 | 85 | func (s *Scheduler) handlePauseResume() { 86 | for { 87 | select { 88 | case <-s.pauseRequestCh: 89 | s.outputMgr.Pause() 90 | case <-s.resumeRequestCh: 91 | s.outputMgr.Resume() 92 | } 93 | } 94 | } 95 | 96 | func (s *Scheduler) processJobs(jobCh <-chan utils.DanzoJob, outputDirs *map[string]bool, allSuccessful *bool, mu *sync.Mutex) { 97 | for job := range jobCh { 98 | log.Debug().Str("op", "scheduler/processJobs").Msgf("Processing job %s", job.OutputPath) 99 | funcID := s.outputMgr.RegisterFunction(job.OutputPath) 100 | downloader, exists := downloaderRegistry[job.JobType] 101 | if !exists { 102 | s.outputMgr.ReportError(funcID, fmt.Errorf("unknown job type: %s", job.JobType)) 103 | s.outputMgr.SetMessage(funcID, fmt.Sprintf("Error: Unknown job type %s", job.JobType)) 104 | continue 105 | } 106 | 107 | log.Debug().Str("op", "scheduler/processJobs").Msgf("Downloader found for %s", job.JobType) 108 | s.outputMgr.SetStatus(funcID, "pending") 109 | s.outputMgr.SetMessage(funcID, fmt.Sprintf("Validating %s job", job.JobType)) 110 | log.Debug().Str("op", "scheduler/processJobs").Msgf("Validating job %s", job.OutputPath) 111 | err := downloader.ValidateJob(&job) 112 | if err != nil { 113 | log.Error().Str("op", "scheduler/processJobs").Msgf("Validation failed for %s", job.OutputPath) 114 | s.outputMgr.ReportError(funcID, fmt.Errorf("validation failed: %v", err)) 115 | s.outputMgr.SetMessage(funcID, fmt.Sprintf("Validation failed for %s", job.OutputPath)) 116 | continue 117 | } 118 | 119 | log.Info().Str("op", "scheduler/processJobs").Msgf("Preparing job %s", job.OutputPath) 120 | s.outputMgr.SetMessage(funcID, fmt.Sprintf("Preparing %s job", job.JobType)) 121 | if s.singleJobMode { 122 | job.PauseFunc = func() { s.pauseRequestCh <- struct{}{} } 123 | job.ResumeFunc = func() { s.resumeRequestCh <- struct{}{} } 124 | } 125 | err = downloader.BuildJob(&job) 126 | if err != nil { 127 | log.Error().Str("op", "scheduler/processJobs").Msgf("Build failed for %s", job.OutputPath) 128 | if err.Error() == "file already exists with same size" { 129 | s.outputMgr.SetStatus(funcID, "success") 130 | s.outputMgr.SetMessage(funcID, fmt.Sprintf("File already exists: %s", job.OutputPath)) 131 | s.outputMgr.Complete(funcID, "") 132 | continue 133 | } 134 | s.outputMgr.ReportError(funcID, fmt.Errorf("build failed: %v", err)) 135 | s.outputMgr.SetMessage(funcID, fmt.Sprintf("Build failed for %s", job.OutputPath)) 136 | continue 137 | } 138 | 139 | log.Debug().Str("op", "scheduler/processJobs").Msgf("Setting progress type for %s", job.OutputPath) 140 | switch job.ProgressType { 141 | case "progress": 142 | job.ProgressFunc = func(downloaded, total int64) { 143 | if total > 0 { 144 | s.outputMgr.AddProgressBarToStream(funcID, downloaded, total) 145 | } 146 | } 147 | case "stream": 148 | job.StreamFunc = func(line string) { 149 | s.outputMgr.AddStreamLine(funcID, line) 150 | } 151 | } 152 | 153 | log.Info().Str("op", "scheduler/processJobs").Msgf("Performing download for %s", job.OutputPath) 154 | s.outputMgr.SetMessage(funcID, fmt.Sprintf("Downloading %s", job.OutputPath)) 155 | err = downloader.Download(&job) 156 | if err != nil { 157 | log.Error().Str("op", "scheduler/processJobs").Msgf("Download failed for %s", job.OutputPath) 158 | mu.Lock() 159 | *allSuccessful = false 160 | mu.Unlock() 161 | s.outputMgr.ReportError(funcID, fmt.Errorf("download failed: %v", err)) 162 | s.outputMgr.SetMessage(funcID, fmt.Sprintf("Download failed for %s", job.OutputPath)) 163 | continue 164 | } 165 | log.Info().Str("op", "scheduler/processJobs").Msgf("Download completed for %s", job.OutputPath) 166 | mu.Lock() 167 | (*outputDirs)[job.OutputPath] = true 168 | mu.Unlock() 169 | 170 | s.outputMgr.Complete(funcID, fmt.Sprintf("Completed %s", job.OutputPath)) 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /internal/downloaders/github-release/helpers.go: -------------------------------------------------------------------------------- 1 | package ghrelease 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | "os" 9 | "regexp" 10 | "runtime" 11 | "strconv" 12 | "strings" 13 | 14 | "github.com/rs/zerolog/log" 15 | "github.com/tanq16/danzo/internal/utils" 16 | ) 17 | 18 | var assetSelectMap = map[string][]string{ 19 | "linuxamd64": {"linux-amd64", "linux_amd64", "linux-x86_64", "linux-x86-64", "linux_x86_64", "linux_x86-64", "amd64-linux", "x86_64-linux", "x86-64-linux", "amd64_linux", "x86_64_linux", "x86-64_linux"}, 20 | "linuxarm64": {"linux-arm64", "linux_arm64", "linux-aarch64", "linux_aarch64", "arm64-linux", "aarch64-linux", "arm64_linux", "aarch64_linux"}, 21 | "windowsamd64": {"windows-amd64", "windows_amd64", "windows-x86_64", "windows-x86-64", "windows_x86_64", "windows_x86-64", "amd64-windows", "x86_64-windows", "x86-64-windows", "amd64_windows", "x86_64_windows", "x86-64_windows"}, 22 | "windowsarm64": {"windows-arm64", "windows_arm64", "windows-aarch64", "windows_aarch64", "arm64-windows", "aarch64-windows", "arm64_windows", "aarch64_windows"}, 23 | "darwinamd64": {"darwin-amd64", "darwin_amd64", "darwin-x86_64", "darwin-x86-64", "darwin_x86_64", "darwin_x86-64", "amd64-darwin", "x86_64-darwin", "x86-64-darwin", "amd64_darwin", "x86_64_darwin", "x86-64_darwin"}, 24 | "darwinarm64": {"darwin-arm64", "darwin_arm64", "darwin-aarch64", "darwin_aarch64", "arm64-darwin", "aarch64-darwin", "arm64_darwin", "aarch64_darwin"}, 25 | } 26 | 27 | var assetSelectMapFallback = map[string][]string{ 28 | "linuxamd64": {"linux", "gnu", "x86-64", "x86_64", "amd64", "amd"}, 29 | "linuxarm64": {"linux", "gnu", "arm", "arm64"}, 30 | "windowsamd64": {"exe", "x86-64", "x86_64", "amd64", "amd"}, 31 | "windowsarm64": {"exe", "arm", "arm64"}, 32 | "darwinamd64": {"darwin", "apple", "x86-64", "x86_64", "amd64", "amd"}, 33 | "darwinarm64": {"darwin", "apple", "arm", "arm64"}, 34 | } 35 | 36 | var repoPatterns = []*regexp.Regexp{ 37 | regexp.MustCompile(`^https?://github\.com/([^/]+)/([^/]+)/?.*$`), 38 | regexp.MustCompile(`^github\.com/([^/]+)/([^/]+)/?.*$`), 39 | regexp.MustCompile(`^([^/]+)/([^/]+)$`), 40 | } 41 | 42 | var ignoredAssets = []string{ 43 | "license", "readme", "changelog", "checksums", "sha256checksum", ".sha256", 44 | } 45 | 46 | func parseGitHubURL(url string) (string, string, error) { 47 | url = strings.TrimSuffix(strings.TrimSpace(url), "/") 48 | for _, pattern := range repoPatterns { 49 | matches := pattern.FindStringSubmatch(url) 50 | if len(matches) >= 3 { 51 | return matches[1], matches[2], nil 52 | } 53 | } 54 | return "", "", fmt.Errorf("invalid GitHub repository format: %s", url) 55 | } 56 | 57 | func getGitHubReleaseAssets(owner, repo string, client *utils.DanzoHTTPClient) ([]map[string]any, string, error) { 58 | apiURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo) 59 | req, err := http.NewRequest("GET", apiURL, nil) 60 | if err != nil { 61 | log.Error().Str("op", "github-release/helpers").Msgf("error creating API request: %v", err) 62 | return nil, "", fmt.Errorf("error creating API request: %v", err) 63 | } 64 | resp, err := client.Do(req) 65 | if err != nil { 66 | log.Error().Str("op", "github-release/helpers").Msgf("error making API request: %v", err) 67 | return nil, "", fmt.Errorf("error making API request: %v", err) 68 | } 69 | defer resp.Body.Close() 70 | if resp.StatusCode != http.StatusOK { 71 | log.Error().Str("op", "github-release/helpers").Msgf("API request failed with status code: %d", resp.StatusCode) 72 | return nil, "", fmt.Errorf("API request failed with status code: %d", resp.StatusCode) 73 | } 74 | log.Debug().Str("op", "github-release/helpers").Msg("API request successful") 75 | 76 | var release map[string]any 77 | if err := json.NewDecoder(resp.Body).Decode(&release); err != nil { 78 | return nil, "", fmt.Errorf("error decoding API response: %v", err) 79 | } 80 | tagName, _ := release["tag_name"].(string) 81 | assets, ok := release["assets"].([]any) 82 | if !ok { 83 | log.Warn().Str("op", "github-release/helpers").Msg("no assets found in the release") 84 | return nil, "", fmt.Errorf("no assets found in the release") 85 | } 86 | log.Info().Str("op", "github-release/helpers").Msgf("found %d assets in the release", len(assets)) 87 | var assetList []map[string]any 88 | for _, asset := range assets { 89 | assetMap, ok := asset.(map[string]any) 90 | if ok { 91 | assetList = append(assetList, assetMap) 92 | } 93 | } 94 | if len(assetList) == 0 { 95 | return nil, "", fmt.Errorf("no assets found in the release") 96 | } 97 | return assetList, tagName, nil 98 | } 99 | 100 | func promptGitHubAssetSelection(assets []map[string]any, tagName string) (string, int64, error) { 101 | fmt.Printf("Release: %s\nAvailable assets:\n", tagName) 102 | for i, asset := range assets { 103 | name, _ := asset["name"].(string) 104 | size, _ := asset["size"].(float64) 105 | fmt.Printf("%d. %s (%.2f MB)\n", i+1, name, float64(size)/1024/1024) 106 | } 107 | fmt.Print("\nEnter the number of the asset to download: ") 108 | reader := bufio.NewReader(os.Stdin) 109 | input, err := reader.ReadString('\n') 110 | if err != nil { 111 | return "", 0, fmt.Errorf("error reading input: %v", err) 112 | } 113 | 114 | input = strings.TrimSpace(input) 115 | selection, err := strconv.Atoi(input) 116 | if err != nil { 117 | return "", 0, fmt.Errorf("invalid selection: %v", err) 118 | } 119 | if selection < 1 || selection > len(assets) { 120 | return "", 0, fmt.Errorf("selection out of range") 121 | } 122 | linesUsed := len(assets) + 4 // Assets list + Release line + Prompt line + Input line + newline 123 | fmt.Printf("\033[%dA\033[J", linesUsed) 124 | log.Info().Str("op", "github-release/helpers").Msgf("selected asset: %s", assets[selection-1]["name"]) 125 | 126 | selectedAsset := assets[selection-1] 127 | downloadURL, _ := selectedAsset["browser_download_url"].(string) 128 | size, _ := selectedAsset["size"].(float64) 129 | return downloadURL, int64(size), nil 130 | } 131 | 132 | func selectGitHubLatestAsset(assets []map[string]any) (string, int64, error) { 133 | platformKey := fmt.Sprintf("%s%s", runtime.GOOS, runtime.GOARCH) 134 | for _, asset := range assets { 135 | assetName, _ := asset["name"].(string) 136 | assetNameLower := strings.ToLower(assetName) 137 | isIgnored := false 138 | for _, ignored := range ignoredAssets { 139 | if strings.Contains(assetNameLower, ignored) { 140 | isIgnored = true 141 | break 142 | } 143 | } 144 | if isIgnored { 145 | continue 146 | } 147 | for _, key := range assetSelectMap[platformKey] { 148 | if strings.Contains(assetNameLower, key) { 149 | downloadURL, _ := asset["browser_download_url"].(string) 150 | size, _ := asset["size"].(float64) 151 | return downloadURL, int64(size), nil 152 | } 153 | } 154 | } 155 | return "", 0, nil 156 | } 157 | -------------------------------------------------------------------------------- /internal/downloaders/http/initial.go: -------------------------------------------------------------------------------- 1 | package danzohttp 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "mime" 7 | "net/http" 8 | "net/url" 9 | "os" 10 | "regexp" 11 | "strconv" 12 | "strings" 13 | "time" 14 | 15 | "github.com/rs/zerolog/log" 16 | "github.com/tanq16/danzo/internal/utils" 17 | ) 18 | 19 | type HTTPDownloader struct{} 20 | 21 | func (d *HTTPDownloader) ValidateJob(job *utils.DanzoJob) error { 22 | parsedURL, err := url.Parse(job.URL) 23 | if err != nil { 24 | return fmt.Errorf("invalid URL: %v", err) 25 | } 26 | if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { 27 | return fmt.Errorf("unsupported scheme: %s", parsedURL.Scheme) 28 | } 29 | client := utils.NewDanzoHTTPClient(job.HTTPClientConfig) 30 | req, err := http.NewRequest("HEAD", job.URL, nil) 31 | if err != nil { 32 | return fmt.Errorf("error creating request: %v", err) 33 | } 34 | log.Debug().Str("op", "http/initial").Msgf("Sending HEAD request to %s", job.URL) 35 | resp, err := client.Do(req) 36 | if err != nil { 37 | return fmt.Errorf("error checking URL: %v", err) 38 | } 39 | defer resp.Body.Close() 40 | if resp.StatusCode == http.StatusMovedPermanently || resp.StatusCode == http.StatusFound { 41 | if location := resp.Header.Get("Location"); location != "" { 42 | log.Debug().Str("op", "http/initial").Msgf("URL redirected to %s", location) 43 | job.URL = location 44 | } 45 | } else if resp.StatusCode == http.StatusNotFound { 46 | return fmt.Errorf("URL not found (404)") 47 | } else if resp.StatusCode >= 400 { 48 | return fmt.Errorf("server returned error: %d", resp.StatusCode) 49 | } 50 | log.Info().Str("op", "http/initial").Msgf("job validated for %s", job.URL) 51 | return nil 52 | } 53 | 54 | func (d *HTTPDownloader) BuildJob(job *utils.DanzoJob) error { 55 | job.HTTPClientConfig.HighThreadMode = job.Connections > 5 56 | client := utils.NewDanzoHTTPClient(job.HTTPClientConfig) 57 | fileSize, fileName, err := getFileInfo(job.URL, client) 58 | if err != nil && err != utils.ErrRangeRequestsNotSupported { 59 | return fmt.Errorf("error getting file info: %v", err) 60 | } 61 | log.Debug().Str("op", "http/initial").Msgf("File info retrieved: size=%d, name=%s, rangeSupported=%v", fileSize, fileName, err != utils.ErrRangeRequestsNotSupported) 62 | 63 | if job.OutputPath == "" && fileName != "" { 64 | job.OutputPath = fileName 65 | } else if job.OutputPath == "" { 66 | parsedURL, _ := url.Parse(job.URL) 67 | pathParts := strings.Split(parsedURL.Path, "/") 68 | job.OutputPath = pathParts[len(pathParts)-1] 69 | if job.OutputPath == "" { 70 | job.OutputPath = "download" 71 | } 72 | } 73 | 74 | if existingFile, err := os.Stat(job.OutputPath); err == nil { 75 | if fileSize > 0 && existingFile.Size() == fileSize { 76 | return fmt.Errorf("file already exists with same size") 77 | } 78 | job.OutputPath = utils.RenewOutputPath(job.OutputPath) 79 | log.Debug().Str("op", "http/initial").Msgf("Output path renewed to %s", job.OutputPath) 80 | } 81 | job.Metadata["fileSize"] = fileSize 82 | job.Metadata["rangeSupported"] = err != utils.ErrRangeRequestsNotSupported 83 | log.Info().Str("op", "http/initial").Msgf("job built for %s", job.URL) 84 | return nil 85 | } 86 | 87 | func (d *HTTPDownloader) Download(job *utils.DanzoJob) error { 88 | client := utils.NewDanzoHTTPClient(job.HTTPClientConfig) 89 | fileSize, _ := job.Metadata["fileSize"].(int64) 90 | rangeSupported, _ := job.Metadata["rangeSupported"].(bool) 91 | progressCh := make(chan int64, 100) 92 | progressDone := make(chan struct{}) 93 | startTime := time.Now() 94 | 95 | go func() { 96 | defer close(progressDone) 97 | var totalDownloaded int64 98 | var lastUpdate time.Time 99 | var lastBytes int64 100 | ticker := time.NewTicker(100 * time.Millisecond) 101 | defer ticker.Stop() 102 | for { 103 | select { 104 | case bytes, ok := <-progressCh: 105 | if !ok { 106 | if job.ProgressFunc != nil { 107 | job.ProgressFunc(totalDownloaded, fileSize) 108 | } 109 | return 110 | } 111 | totalDownloaded += bytes 112 | 113 | case <-ticker.C: 114 | if totalDownloaded > lastBytes { 115 | if job.ProgressFunc != nil { 116 | job.ProgressFunc(totalDownloaded, fileSize) 117 | } 118 | elapsed := time.Since(lastUpdate).Seconds() 119 | if elapsed > 0 { 120 | speed := float64(totalDownloaded-lastBytes) / elapsed 121 | job.Metadata["downloadSpeed"] = speed 122 | job.Metadata["elapsedTime"] = time.Since(startTime).Seconds() 123 | } 124 | lastUpdate = time.Now() 125 | lastBytes = totalDownloaded 126 | } 127 | } 128 | } 129 | }() 130 | 131 | var err error 132 | if !rangeSupported || job.Connections == 1 { 133 | log.Debug().Str("op", "http/initial").Msg("Using simple downloader (range not supported or 1 connection)") 134 | err = PerformSimpleDownload(job.URL, job.OutputPath, client, progressCh) 135 | } else if fileSize/int64(job.Connections) < 2*utils.DefaultBufferSize { 136 | log.Debug().Str("op", "http/initial").Msg("Using simple downloader (chunk size too small)") 137 | err = PerformSimpleDownload(job.URL, job.OutputPath, client, progressCh) 138 | } else { 139 | log.Debug().Str("op", "http/initial").Msg("Using multi-chunk downloader") 140 | config := utils.HTTPDownloadConfig{ 141 | URL: job.URL, 142 | OutputPath: job.OutputPath, 143 | Connections: job.Connections, 144 | HTTPClientConfig: job.HTTPClientConfig, 145 | } 146 | err = PerformMultiDownload(config, client, fileSize, progressCh) 147 | } 148 | 149 | // Close progress channel and wait for final update 150 | // close(progressCh) 151 | <-progressDone 152 | 153 | job.Metadata["totalDownloaded"] = fileSize 154 | job.Metadata["totalTime"] = time.Since(startTime).Seconds() 155 | return err 156 | } 157 | 158 | func getFileInfo(link string, client *utils.DanzoHTTPClient) (int64, string, error) { 159 | req, err := http.NewRequest("HEAD", link, nil) 160 | if err != nil { 161 | return 0, "", err 162 | } 163 | resp, err := client.Do(req) 164 | if err != nil { 165 | return 0, "", err 166 | } 167 | defer resp.Body.Close() 168 | filename := "" 169 | filenameRegex := regexp.MustCompile(`[^a-zA-Z0-9_\-\. ]+`) 170 | if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { 171 | if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { 172 | if fn, ok := params["filename"]; ok && fn != "" { 173 | filename = filenameRegex.ReplaceAllString(fn, "_") 174 | } else if fn, ok := params["filename*"]; ok && fn != "" { 175 | if strings.HasPrefix(fn, "UTF-8''") { 176 | unescaped, _ := url.PathUnescape(strings.TrimPrefix(fn, "UTF-8''")) 177 | filename = filenameRegex.ReplaceAllString(unescaped, "_") 178 | } 179 | } 180 | } 181 | } 182 | if resp.Header.Get("Accept-Ranges") != "bytes" { 183 | return 0, filename, utils.ErrRangeRequestsNotSupported 184 | } 185 | contentLength := resp.Header.Get("Content-Length") 186 | if contentLength == "" { 187 | return 0, filename, errors.New("server didn't provide Content-Length header") 188 | } 189 | size, err := strconv.ParseInt(contentLength, 10, 64) 190 | if err != nil { 191 | return 0, filename, err 192 | } 193 | if size <= 0 { 194 | return 0, filename, errors.New("invalid file size reported by server") 195 | } 196 | return size, filename, nil 197 | } 198 | -------------------------------------------------------------------------------- /internal/downloaders/live-stream/extractors.go: -------------------------------------------------------------------------------- 1 | package m3u8 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "maps" 8 | "net/http" 9 | "net/http/cookiejar" 10 | "regexp" 11 | "strings" 12 | 13 | "github.com/rs/zerolog/log" 14 | "github.com/tanq16/danzo/internal/utils" 15 | ) 16 | 17 | // JSON response from Rumble embedJS endpoint 18 | type RumbleJSResponse struct { 19 | U struct { 20 | HLS struct { 21 | URL string `json:"url"` 22 | } `json:"hls"` 23 | } `json:"u"` 24 | UA struct { 25 | HLS map[string]struct { 26 | URL string `json:"url"` 27 | } `json:"hls"` 28 | } `json:"ua"` 29 | } 30 | 31 | // JSON response from Dailymotion metadata endpoint 32 | type DailymotionMetadata struct { 33 | Qualities map[string][]struct { 34 | Type string `json:"type"` 35 | URL string `json:"url"` 36 | } `json:"qualities"` 37 | } 38 | 39 | func runExtractor(job *utils.DanzoJob) error { 40 | extractor, _ := job.Metadata["extract"].(string) 41 | switch strings.ToLower(extractor) { 42 | case "rumble": 43 | return extractRumbleURL(job) 44 | case "dailymotion": 45 | return extractDailymotionURL(job) 46 | default: 47 | return nil 48 | } 49 | } 50 | 51 | func extractRumbleURL(job *utils.DanzoJob) error { 52 | log.Debug().Str("op", "live-stream/extractor").Msgf("Extracting Rumble URL from %s", job.URL) 53 | videoID, err := getRumbleVideoID(job.URL) 54 | if err != nil { 55 | return err 56 | } 57 | log.Debug().Str("op", "live-stream/extractor").Msgf("Found Rumble video ID: %s", videoID) 58 | m3u8URL, err := getRumbleM3U8FromVideoID(videoID, job.HTTPClientConfig) 59 | if err != nil { 60 | return err 61 | } 62 | job.URL = m3u8URL 63 | return nil 64 | } 65 | 66 | func getRumbleVideoID(pageURL string) (string, error) { 67 | jar, _ := cookiejar.New(nil) 68 | client := &http.Client{Jar: jar} 69 | req, err := http.NewRequest("GET", pageURL, nil) 70 | if err != nil { 71 | return "", fmt.Errorf("failed to create request for rumble page: %w", err) 72 | } 73 | req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36") 74 | req.Header.Set("Connection", "keep-alive") 75 | req.Header.Set("Upgrade-Insecure-Requests", "1") 76 | resp, err := client.Do(req) 77 | if err != nil { 78 | return "", fmt.Errorf("failed to fetch rumble page: %w", err) 79 | } 80 | defer resp.Body.Close() 81 | body, err := io.ReadAll(resp.Body) 82 | if err != nil { 83 | return "", fmt.Errorf("failed to read rumble page body: %w", err) 84 | } 85 | re := regexp.MustCompile(`"embedUrl":\s*"https://rumble\.com/embed/([^/"]+)/"`) 86 | // re := regexp.MustCompile(`https://rumble\.com/embed/([^&",/]*)`) 87 | matches := re.FindStringSubmatch(string(body)) 88 | if len(matches) >= 2 { 89 | return matches[1], nil 90 | } 91 | return "", fmt.Errorf("could not find rumble video ID in page source") 92 | } 93 | 94 | func getRumbleM3U8FromVideoID(videoID string, clientConfig utils.HTTPClientConfig) (string, error) { 95 | jsonURL := fmt.Sprintf("https://rumble.com/embedJS/u3/?request=video&ver=2&v=%s", videoID) 96 | newClientConfig := clientConfig 97 | newClientConfig.Headers = make(map[string]string) 98 | maps.Copy(newClientConfig.Headers, clientConfig.Headers) 99 | newClientConfig.Headers["Referer"] = "https://rumble.com/" 100 | client := utils.NewDanzoHTTPClient(newClientConfig) 101 | req, err := http.NewRequest("GET", jsonURL, nil) 102 | if err != nil { 103 | return "", fmt.Errorf("failed to create request for rumble json: %w", err) 104 | } 105 | resp, err := client.Do(req) 106 | if err != nil { 107 | return "", fmt.Errorf("failed to fetch rumble json: %w", err) 108 | } 109 | defer resp.Body.Close() 110 | var data RumbleJSResponse 111 | if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { 112 | return "", fmt.Errorf("failed to decode rumble json: %w", err) 113 | } 114 | if data.U.HLS.URL != "" { 115 | return data.U.HLS.URL, nil 116 | } 117 | if auto, ok := data.UA.HLS["auto"]; ok && auto.URL != "" { 118 | return auto.URL, nil 119 | } 120 | return "", fmt.Errorf("could not find m3u8 url in rumble json response") 121 | } 122 | 123 | func extractDailymotionURL(job *utils.DanzoJob) error { 124 | log.Debug().Str("op", "live-stream/extractor").Msgf("Extracting Dailymotion URL from %s", job.URL) 125 | videoID, err := getDailymotionVideoID(job.URL) 126 | if err != nil { 127 | return err 128 | } 129 | log.Debug().Str("op", "live-stream/extractor").Msgf("Found Dailymotion video ID: %s", videoID) 130 | m3u8URL, err := getDailymotionM3U8FromVideoID(videoID, job.HTTPClientConfig) 131 | if err != nil { 132 | return err 133 | } 134 | job.URL = m3u8URL 135 | return nil 136 | } 137 | 138 | func getDailymotionVideoID(pageURL string) (string, error) { 139 | re := regexp.MustCompile(`dai\.ly/([^/?&#]+)`) // dai.ly/{id} 140 | if matches := re.FindStringSubmatch(pageURL); len(matches) >= 2 { 141 | return matches[1], nil 142 | } 143 | re = regexp.MustCompile(`dailymotion\.[a-z]{2,3}/video/([^/?&#]+)`) // dailymotion.com/video/{id} 144 | if matches := re.FindStringSubmatch(pageURL); len(matches) >= 2 { 145 | return matches[1], nil 146 | } 147 | re = regexp.MustCompile(`[?&]video=([^&#]+)`) // player.html?video={id} 148 | if matches := re.FindStringSubmatch(pageURL); len(matches) >= 2 { 149 | return matches[1], nil 150 | } 151 | return "", fmt.Errorf("could not extract Dailymotion video ID from URL: %s", pageURL) 152 | } 153 | 154 | func getDailymotionM3U8FromVideoID(videoID string, clientConfig utils.HTTPClientConfig) (string, error) { 155 | metadataURL := fmt.Sprintf("https://www.dailymotion.com/player/metadata/video/%s", videoID) 156 | newClientConfig := clientConfig 157 | newClientConfig.Headers = make(map[string]string) 158 | maps.Copy(newClientConfig.Headers, clientConfig.Headers) 159 | newClientConfig.Headers["Referer"] = "https://www.dailymotion.com/" 160 | newClientConfig.Headers["Origin"] = "https://www.dailymotion.com" 161 | client := utils.NewDanzoHTTPClient(newClientConfig) 162 | req, err := http.NewRequest("GET", metadataURL+"?app=com.dailymotion.neon", nil) 163 | if err != nil { 164 | return "", fmt.Errorf("failed to create request for dailymotion metadata: %w", err) 165 | } 166 | resp, err := client.Do(req) 167 | if err != nil { 168 | return "", fmt.Errorf("failed to fetch dailymotion metadata: %w", err) 169 | } 170 | defer resp.Body.Close() 171 | var metadata DailymotionMetadata 172 | if err := json.NewDecoder(resp.Body).Decode(&metadata); err != nil { 173 | return "", fmt.Errorf("failed to decode dailymotion metadata: %w", err) 174 | } 175 | qualityPriority := []string{"auto", "1080", "720", "480", "380", "240"} 176 | for _, quality := range qualityPriority { 177 | if mediaList, ok := metadata.Qualities[quality]; ok { 178 | for _, media := range mediaList { 179 | if media.Type == "application/x-mpegURL" && media.URL != "" { 180 | log.Debug().Str("op", "live-stream/extractor").Msgf("Found m3u8 URL at quality %s", quality) 181 | return media.URL, nil 182 | } 183 | } 184 | } 185 | } 186 | for quality, mediaList := range metadata.Qualities { 187 | for _, media := range mediaList { 188 | if media.Type == "application/x-mpegURL" && media.URL != "" { 189 | log.Debug().Str("op", "live-stream/extractor").Msgf("Found m3u8 URL at quality %s", quality) 190 | return media.URL, nil 191 | } 192 | } 193 | } 194 | return "", fmt.Errorf("could not find m3u8 URL in dailymotion metadata response") 195 | } 196 | -------------------------------------------------------------------------------- /internal/output/manager.go: -------------------------------------------------------------------------------- 1 | package output 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | "sync" 8 | "time" 9 | 10 | "github.com/tanq16/danzo/internal/utils" 11 | ) 12 | 13 | type JobStatus string 14 | 15 | const ( 16 | StatusPending JobStatus = "pending" 17 | StatusActive JobStatus = "active" 18 | StatusSuccess JobStatus = "success" 19 | StatusError JobStatus = "error" 20 | ) 21 | 22 | type JobOutput struct { 23 | ID int 24 | Name string 25 | Status JobStatus 26 | Message string 27 | StreamLines []string 28 | StartTime time.Time 29 | CompletedTime time.Duration 30 | } 31 | 32 | type Manager struct { 33 | jobs map[int]*JobOutput 34 | mu sync.RWMutex 35 | lastLineCount int 36 | maxStreamLines int 37 | doneCh chan struct{} 38 | pauseCh chan bool 39 | isPaused bool 40 | wg sync.WaitGroup 41 | jobCounter int 42 | } 43 | 44 | func NewManager() *Manager { 45 | return &Manager{ 46 | jobs: make(map[int]*JobOutput), 47 | maxStreamLines: 10, 48 | doneCh: make(chan struct{}), 49 | pauseCh: make(chan bool), 50 | isPaused: false, 51 | } 52 | } 53 | 54 | func (m *Manager) RegisterFunction(name string) int { 55 | m.mu.Lock() 56 | defer m.mu.Unlock() 57 | m.jobCounter++ 58 | id := m.jobCounter 59 | m.jobs[id] = &JobOutput{ 60 | ID: id, 61 | Name: name, 62 | Status: StatusPending, 63 | StreamLines: []string{}, 64 | StartTime: time.Now(), 65 | } 66 | return id 67 | } 68 | 69 | func (m *Manager) SetMessage(id int, message string) { 70 | m.mu.Lock() 71 | defer m.mu.Unlock() 72 | if job, exists := m.jobs[id]; exists { 73 | job.Message = message 74 | if job.Status == StatusPending { 75 | job.Status = StatusActive 76 | } 77 | } 78 | } 79 | 80 | func (m *Manager) SetStatus(id int, status string) { 81 | m.mu.Lock() 82 | defer m.mu.Unlock() 83 | if job, exists := m.jobs[id]; exists { 84 | switch status { 85 | case "pending": 86 | job.Status = StatusPending 87 | case "success": 88 | job.Status = StatusSuccess 89 | case "error": 90 | job.Status = StatusError 91 | default: 92 | job.Status = StatusActive 93 | } 94 | } 95 | } 96 | 97 | func (m *Manager) Complete(id int, message string) { 98 | m.mu.Lock() 99 | defer m.mu.Unlock() 100 | if job, exists := m.jobs[id]; exists { 101 | job.Status = StatusSuccess 102 | job.CompletedTime = time.Since(job.StartTime).Round(time.Second) 103 | job.StreamLines = []string{} // Clear streams on completion 104 | if message != "" { 105 | job.Message = message 106 | } else { 107 | job.Message = fmt.Sprintf("Completed %s", job.Name) 108 | } 109 | } 110 | } 111 | 112 | func (m *Manager) ReportError(id int, err error) { 113 | m.mu.Lock() 114 | defer m.mu.Unlock() 115 | if job, exists := m.jobs[id]; exists { 116 | job.Status = StatusError 117 | job.Message = fmt.Sprintf("Failed: %v", err) 118 | } 119 | } 120 | 121 | func (m *Manager) AddStreamLine(id int, line string) { 122 | m.mu.Lock() 123 | defer m.mu.Unlock() 124 | if job, exists := m.jobs[id]; exists { 125 | job.StreamLines = append(job.StreamLines, line) 126 | if len(job.StreamLines) > m.maxStreamLines { 127 | job.StreamLines = job.StreamLines[len(job.StreamLines)-m.maxStreamLines:] 128 | } 129 | } 130 | } 131 | 132 | func (m *Manager) AddProgressBarToStream(id int, current, total int64) { 133 | m.mu.Lock() 134 | defer m.mu.Unlock() 135 | if job, exists := m.jobs[id]; exists { 136 | progressBar := printProgressBar(current, total, 30) 137 | elapsed := time.Since(job.StartTime).Seconds() 138 | speed := utils.FormatSpeed(current, elapsed) 139 | sizeDisplay := fmt.Sprintf("%s / %s", utils.FormatBytes(uint64(current)), utils.FormatBytes(uint64(total))) 140 | display := fmt.Sprintf("%s%s %s %s", progressBar, debugStyle.Render(sizeDisplay), StyleSymbols["bullet"], debugStyle.Render(speed)) 141 | job.StreamLines = []string{display} 142 | } 143 | } 144 | 145 | func (m *Manager) Pause() { 146 | m.pauseCh <- true 147 | } 148 | 149 | func (m *Manager) Resume() { 150 | m.pauseCh <- false 151 | } 152 | 153 | func (m *Manager) StartDisplay() { 154 | m.wg.Add(1) 155 | go func() { 156 | defer m.wg.Done() 157 | timePerUpdate := 300 * time.Millisecond 158 | if utils.GlobalDebugFlag { 159 | timePerUpdate = 3 * time.Second // slow refresh for debug mode 160 | } 161 | ticker := time.NewTicker(timePerUpdate) 162 | defer ticker.Stop() 163 | for { 164 | select { 165 | case <-ticker.C: 166 | if !m.isPaused { 167 | m.updateDisplay() 168 | } 169 | case m.isPaused = <-m.pauseCh: 170 | case <-m.doneCh: 171 | m.updateDisplay() 172 | m.showSummary() 173 | return 174 | } 175 | } 176 | }() 177 | } 178 | 179 | func (m *Manager) StopDisplay() { 180 | close(m.doneCh) 181 | m.wg.Wait() 182 | } 183 | 184 | func (m *Manager) updateDisplay() { 185 | m.mu.RLock() 186 | defer m.mu.RUnlock() 187 | if m.lastLineCount > 0 && !utils.GlobalDebugFlag { 188 | fmt.Printf("\033[%dA\033[J", m.lastLineCount) 189 | } 190 | 191 | var lines []string 192 | allJobs := make([]*JobOutput, 0, len(m.jobs)) 193 | for _, job := range m.jobs { 194 | allJobs = append(allJobs, job) 195 | } 196 | sort.Slice(allJobs, func(i, j int) bool { 197 | return allJobs[i].ID < allJobs[j].ID 198 | }) 199 | 200 | // Group jobs by status 201 | var active, pending, completed []*JobOutput 202 | for _, job := range allJobs { 203 | switch job.Status { 204 | case StatusPending: 205 | pending = append(pending, job) 206 | case StatusSuccess, StatusError: 207 | completed = append(completed, job) 208 | default: 209 | active = append(active, job) 210 | } 211 | } 212 | 213 | // Display active jobs 214 | for _, job := range active { 215 | elapsed := time.Since(job.StartTime).Round(time.Second) 216 | lines = append(lines, fmt.Sprintf(" %s %s %s", getStatusIcon(job.Status), debugStyle.Render(elapsed.String()), pendingStyle.Render(job.Message))) 217 | for _, stream := range job.StreamLines { 218 | lines = append(lines, fmt.Sprintf(" %s", streamStyle.Render(stream))) 219 | } 220 | } 221 | 222 | // Display pending jobs 223 | for _, job := range pending { 224 | lines = append(lines, fmt.Sprintf(" %s %s", getStatusIcon(job.Status), pendingStyle.Render("Waiting..."))) 225 | } 226 | 227 | // Display completed jobs (limit to last 8 if too many) 228 | if len(completed) > 10 { 229 | lines = append(lines, infoStyle.Render(fmt.Sprintf(" %d jobs completed (showing last 8)...", len(completed)))) 230 | completed = completed[len(completed)-8:] 231 | } 232 | for _, job := range completed { 233 | totalTime := job.CompletedTime 234 | if job.CompletedTime == 0 { 235 | totalTime = time.Since(job.StartTime).Round(time.Second) 236 | job.CompletedTime = totalTime 237 | } 238 | style := successStyle 239 | if job.Status == StatusError { 240 | style = errorStyle 241 | } 242 | lines = append(lines, fmt.Sprintf(" %s %s %s", getStatusIcon(job.Status), debugStyle.Render(totalTime.String()), style.Render(job.Message))) 243 | } 244 | 245 | // Print all lines 246 | if len(lines) > 0 { 247 | fmt.Println(strings.Join(lines, "\n")) 248 | } 249 | m.lastLineCount = len(lines) 250 | } 251 | 252 | func (m *Manager) showSummary() { 253 | m.mu.RLock() 254 | defer m.mu.RUnlock() 255 | var success, errors int 256 | for _, job := range m.jobs { 257 | switch job.Status { 258 | case StatusSuccess: 259 | success++ 260 | case StatusError: 261 | errors++ 262 | } 263 | } 264 | fmt.Println() 265 | fmt.Println(" " + successStyle.Render(fmt.Sprintf("Completed %d of %d", success, len(m.jobs)))) 266 | if errors > 0 { 267 | fmt.Println(" " + errorStyle.Render(fmt.Sprintf("Failed %d of %d", errors, len(m.jobs)))) 268 | } 269 | fmt.Println() 270 | } 271 | 272 | func getStatusIcon(status JobStatus) string { 273 | switch status { 274 | case StatusSuccess: 275 | return successStyle.Render(StyleSymbols["pass"]) 276 | case StatusError: 277 | return errorStyle.Render(StyleSymbols["fail"]) 278 | case StatusPending: 279 | return pendingStyle.Render(StyleSymbols["pending"]) 280 | default: 281 | return pendingStyle.Render(StyleSymbols["pending"]) 282 | } 283 | } 284 | 285 | func printProgressBar(current, total int64, width int) string { 286 | if width <= 0 { 287 | width = 30 288 | } 289 | if total <= 0 { 290 | total = 1 291 | } 292 | if current < 0 { 293 | current = 0 294 | } 295 | if current > total { 296 | current = total 297 | } 298 | percent := float64(current) / float64(total) 299 | filled := max(0, min(int(percent*float64(width)), width)) 300 | bar := StyleSymbols["bullet"] 301 | bar += strings.Repeat(StyleSymbols["hline"], filled) 302 | if filled < width { 303 | bar += strings.Repeat(" ", width-filled) 304 | } 305 | bar += StyleSymbols["bullet"] 306 | return debugStyle.Render(fmt.Sprintf("%s %.1f%% %s ", bar, percent*100, StyleSymbols["bullet"])) 307 | } 308 | -------------------------------------------------------------------------------- /internal/downloaders/live-stream/helpers.go: -------------------------------------------------------------------------------- 1 | package m3u8 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/url" 9 | "os" 10 | "strings" 11 | "sync" 12 | 13 | "github.com/rs/zerolog/log" 14 | "github.com/tanq16/danzo/internal/utils" 15 | ) 16 | 17 | type M3U8Info struct { 18 | VideoSegmentURLs []string 19 | AudioSegmentURLs []string 20 | VideoInitSegment string 21 | AudioInitSegment string 22 | HasSeparateAudio bool 23 | } 24 | 25 | func getM3U8Contents(manifestURL string, client *utils.DanzoHTTPClient) (string, error) { 26 | req, err := http.NewRequest("GET", manifestURL, nil) 27 | if err != nil { 28 | return "", fmt.Errorf("error creating request: %v", err) 29 | } 30 | resp, err := client.Do(req) 31 | if err != nil { 32 | return "", fmt.Errorf("error fetching m3u8 manifest: %v", err) 33 | } 34 | defer resp.Body.Close() 35 | if resp.StatusCode != http.StatusOK { 36 | return "", fmt.Errorf("server returned status code %d", resp.StatusCode) 37 | } 38 | content, err := io.ReadAll(resp.Body) 39 | if err != nil { 40 | return "", fmt.Errorf("error reading manifest content: %v", err) 41 | } 42 | log.Debug().Str("op", "live-stream/helpers").Msgf("Successfully read manifest from %s", manifestURL) 43 | return string(content), nil 44 | } 45 | 46 | type audioTrack struct { 47 | url string 48 | quality int 49 | } 50 | 51 | func parseM3U8Content(content, manifestURL string, client *utils.DanzoHTTPClient) (*M3U8Info, error) { 52 | baseURL, err := url.Parse(manifestURL) 53 | if err != nil { 54 | return nil, fmt.Errorf("error parsing manifest URL: %v", err) 55 | } 56 | scanner := bufio.NewScanner(strings.NewReader(content)) 57 | var segmentURLs []string 58 | var masterPlaylistURLs []string 59 | var masterPlaylistBandwidths []int 60 | var audioTracks []audioTrack 61 | var isMasterPlaylist bool 62 | var initSegment string 63 | var currentBandwidth int 64 | 65 | for scanner.Scan() { 66 | line := strings.TrimSpace(scanner.Text()) 67 | if line == "" { 68 | continue 69 | } 70 | if strings.HasPrefix(line, "#EXT-X-MAP:") { 71 | if idx := strings.Index(line, `URI="`); idx != -1 { 72 | uriStart := idx + 5 73 | if uriEnd := strings.Index(line[uriStart:], `"`); uriEnd != -1 { 74 | uri := line[uriStart : uriStart+uriEnd] 75 | initSegment, err = resolveURL(baseURL, uri) 76 | if err != nil { 77 | return nil, fmt.Errorf("error resolving init segment URL: %v", err) 78 | } 79 | log.Debug().Str("op", "live-stream/helpers").Msgf("Found init segment: %s", initSegment) 80 | } 81 | } 82 | continue 83 | } 84 | if strings.HasPrefix(line, "#EXT-X-MEDIA:") && strings.Contains(line, "TYPE=AUDIO") { 85 | isMasterPlaylist = true 86 | log.Debug().Str("op", "live-stream/helpers").Msgf("Found audio media line: %s", line) 87 | var audioURL string 88 | if idx := strings.Index(line, `URI="`); idx != -1 { 89 | uriStart := idx + 5 90 | if uriEnd := strings.Index(line[uriStart:], `"`); uriEnd != -1 { 91 | uri := line[uriStart : uriStart+uriEnd] 92 | audioURL, err = resolveURL(baseURL, uri) 93 | if err != nil { 94 | return nil, fmt.Errorf("error resolving audio URL: %v", err) 95 | } 96 | } 97 | } 98 | quality := 0 99 | groupID := "" 100 | if idx := strings.Index(line, `GROUP-ID="`); idx != -1 { 101 | groupStart := idx + 10 102 | if groupEnd := strings.Index(line[groupStart:], `"`); groupEnd != -1 { 103 | groupID = strings.ToLower(line[groupStart : groupStart+groupEnd]) 104 | log.Debug().Str("op", "live-stream/helpers").Msgf("Found GROUP-ID: %s", groupID) 105 | if strings.Contains(groupID, "high") { 106 | quality = 3 107 | } else if strings.Contains(groupID, "medium") { 108 | quality = 2 109 | } else if strings.Contains(groupID, "low") { 110 | quality = 1 111 | } 112 | } 113 | } 114 | if quality == 0 { 115 | if idx := strings.Index(line, `NAME="`); idx != -1 { 116 | nameStart := idx + 6 117 | if nameEnd := strings.Index(line[nameStart:], `"`); nameEnd != -1 { 118 | name := strings.ToLower(line[nameStart : nameStart+nameEnd]) 119 | log.Debug().Str("op", "live-stream/helpers").Msgf("Found NAME: %s", name) 120 | if strings.Contains(name, "high") { 121 | quality = 3 122 | } else if strings.Contains(name, "medium") { 123 | quality = 2 124 | } else if strings.Contains(name, "low") { 125 | quality = 1 126 | } 127 | } 128 | } 129 | } 130 | if audioURL != "" { 131 | audioTracks = append(audioTracks, audioTrack{url: audioURL, quality: quality}) 132 | } 133 | continue 134 | } 135 | if strings.HasPrefix(line, "#") && !strings.Contains(line, "#EXT-X-STREAM-INF") { 136 | continue 137 | } 138 | if strings.Contains(line, "#EXT-X-STREAM-INF") { 139 | isMasterPlaylist = true 140 | currentBandwidth = 0 141 | if idx := strings.Index(line, "BANDWIDTH="); idx != -1 { 142 | bandwidthStart := idx + 10 143 | bandwidthEnd := bandwidthStart 144 | for bandwidthEnd < len(line) && line[bandwidthEnd] >= '0' && line[bandwidthEnd] <= '9' { 145 | bandwidthEnd++ 146 | } 147 | if bandwidthEnd > bandwidthStart { 148 | fmt.Sscanf(line[bandwidthStart:bandwidthEnd], "%d", ¤tBandwidth) 149 | } 150 | } 151 | continue 152 | } 153 | if !strings.HasPrefix(line, "#") { 154 | segmentURL, err := resolveURL(baseURL, line) 155 | if err != nil { 156 | return nil, fmt.Errorf("error resolving URL: %v", err) 157 | } 158 | if isMasterPlaylist { 159 | masterPlaylistURLs = append(masterPlaylistURLs, segmentURL) 160 | masterPlaylistBandwidths = append(masterPlaylistBandwidths, currentBandwidth) 161 | currentBandwidth = 0 162 | } else { 163 | segmentURLs = append(segmentURLs, segmentURL) 164 | } 165 | } 166 | } 167 | if err := scanner.Err(); err != nil { 168 | return nil, fmt.Errorf("error scanning m3u8 content: %v", err) 169 | } 170 | 171 | if isMasterPlaylist && len(masterPlaylistURLs) > 0 { 172 | bestVariantIdx := 0 173 | maxBandwidth := 0 174 | for i, bandwidth := range masterPlaylistBandwidths { 175 | if bandwidth > maxBandwidth { 176 | maxBandwidth = bandwidth 177 | bestVariantIdx = i 178 | } 179 | } 180 | log.Debug().Str("op", "live-stream/helpers").Msgf("Detected master playlist with %d video variants and %d audio tracks, selecting variant with bandwidth %d", len(masterPlaylistURLs), len(audioTracks), maxBandwidth) 181 | 182 | videoContent, err := getM3U8Contents(masterPlaylistURLs[bestVariantIdx], client) 183 | if err != nil { 184 | return nil, fmt.Errorf("error fetching video sub-playlist: %v", err) 185 | } 186 | videoInfo, err := parseM3U8Content(videoContent, masterPlaylistURLs[bestVariantIdx], client) 187 | if err != nil { 188 | return nil, fmt.Errorf("error parsing video sub-playlist: %v", err) 189 | } 190 | 191 | if len(audioTracks) > 0 { 192 | bestAudioIdx := 0 193 | maxQuality := 0 194 | for i, track := range audioTracks { 195 | log.Debug().Str("op", "live-stream/helpers").Msgf("Audio track %d: url=%s, quality=%d", i, track.url, track.quality) 196 | if track.quality > maxQuality { 197 | maxQuality = track.quality 198 | bestAudioIdx = i 199 | } 200 | } 201 | log.Debug().Str("op", "live-stream/helpers").Msgf("Selected audio track %d with quality %d", bestAudioIdx, maxQuality) 202 | audioContent, err := getM3U8Contents(audioTracks[bestAudioIdx].url, client) 203 | if err != nil { 204 | return nil, fmt.Errorf("error fetching audio sub-playlist: %v", err) 205 | } 206 | audioInfo, err := parseM3U8Content(audioContent, audioTracks[bestAudioIdx].url, client) 207 | if err != nil { 208 | return nil, fmt.Errorf("error parsing audio sub-playlist: %v", err) 209 | } 210 | 211 | return &M3U8Info{ 212 | VideoSegmentURLs: videoInfo.VideoSegmentURLs, 213 | AudioSegmentURLs: audioInfo.VideoSegmentURLs, 214 | VideoInitSegment: videoInfo.VideoInitSegment, 215 | AudioInitSegment: audioInfo.VideoInitSegment, 216 | HasSeparateAudio: true, 217 | }, nil 218 | } 219 | 220 | return videoInfo, nil 221 | } 222 | 223 | return &M3U8Info{ 224 | VideoSegmentURLs: segmentURLs, 225 | VideoInitSegment: initSegment, 226 | HasSeparateAudio: false, 227 | }, nil 228 | } 229 | 230 | func resolveURL(baseURL *url.URL, urlStr string) (string, error) { 231 | if strings.HasPrefix(urlStr, "http://") || strings.HasPrefix(urlStr, "https://") { 232 | return urlStr, nil 233 | } 234 | relURL, err := url.Parse(urlStr) 235 | if err != nil { 236 | return "", err 237 | } 238 | absURL := baseURL.ResolveReference(relURL) 239 | return absURL.String(), nil 240 | } 241 | 242 | func calculateTotalSize(segmentURLs []string, numWorkers int, client *utils.DanzoHTTPClient) (int64, []int64, error) { 243 | segmentSizes := make([]int64, len(segmentURLs)) 244 | var totalSize int64 245 | var mu sync.Mutex 246 | var sizeErr error 247 | type sizeJob struct { 248 | index int 249 | url string 250 | } 251 | jobCh := make(chan sizeJob, len(segmentURLs)) 252 | for i, url := range segmentURLs { 253 | jobCh <- sizeJob{index: i, url: url} 254 | } 255 | close(jobCh) 256 | log.Debug().Str("op", "live-stream/helpers").Msg("Calculating total size of all segments") 257 | var wg sync.WaitGroup 258 | for range numWorkers { 259 | wg.Add(1) 260 | go func() { 261 | defer wg.Done() 262 | for job := range jobCh { 263 | size, err := getSize(job.url, client) 264 | if err != nil { 265 | mu.Lock() 266 | if sizeErr == nil { 267 | sizeErr = err 268 | } 269 | mu.Unlock() 270 | continue 271 | } 272 | mu.Lock() 273 | segmentSizes[job.index] = size 274 | totalSize += size 275 | mu.Unlock() 276 | } 277 | }() 278 | } 279 | wg.Wait() 280 | if sizeErr != nil { 281 | return 0, nil, sizeErr 282 | } 283 | return totalSize, segmentSizes, nil 284 | } 285 | 286 | func getSize(url string, client *utils.DanzoHTTPClient) (int64, error) { 287 | req, err := http.NewRequest("HEAD", url, nil) 288 | if err != nil { 289 | return 0, err 290 | } 291 | resp, err := client.Do(req) 292 | if err != nil { 293 | return 0, err 294 | } 295 | defer resp.Body.Close() 296 | if resp.StatusCode != http.StatusOK { 297 | return 0, fmt.Errorf("server returned status code %d", resp.StatusCode) 298 | } 299 | contentLength := resp.Header.Get("Content-Length") 300 | if contentLength == "" { 301 | return 0, fmt.Errorf("no content length") 302 | } 303 | var size int64 304 | fmt.Sscanf(contentLength, "%d", &size) 305 | return size, nil 306 | } 307 | 308 | func downloadSegment(segmentURL, outputPath string, client *utils.DanzoHTTPClient) (int64, error) { 309 | req, err := http.NewRequest("GET", segmentURL, nil) 310 | if err != nil { 311 | return 0, fmt.Errorf("error creating request: %v", err) 312 | } 313 | resp, err := client.Do(req) 314 | if err != nil { 315 | return 0, fmt.Errorf("error downloading segment: %v", err) 316 | } 317 | defer resp.Body.Close() 318 | if resp.StatusCode != http.StatusOK { 319 | return 0, fmt.Errorf("server returned status code %d", resp.StatusCode) 320 | } 321 | outFile, err := os.Create(outputPath) 322 | if err != nil { 323 | return 0, fmt.Errorf("error creating output file: %v", err) 324 | } 325 | defer outFile.Close() 326 | written, err := io.Copy(outFile, resp.Body) 327 | if err != nil { 328 | return 0, fmt.Errorf("error writing segment: %v", err) 329 | } 330 | return written, nil 331 | } 332 | -------------------------------------------------------------------------------- /internal/downloaders/youtube-music/metadata.go: -------------------------------------------------------------------------------- 1 | package youtubemusic 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "os" 9 | "os/exec" 10 | "path/filepath" 11 | "regexp" 12 | "strings" 13 | "time" 14 | 15 | "github.com/google/uuid" 16 | "github.com/rs/zerolog/log" 17 | "github.com/tanq16/danzo/internal/utils" 18 | ) 19 | 20 | type DeezerResponse struct { 21 | ID int `json:"id"` 22 | Title string `json:"title"` 23 | Artist struct { 24 | Name string `json:"name"` 25 | } `json:"artist"` 26 | Album struct { 27 | Title string `json:"title"` 28 | Cover string `json:"cover_xl"` 29 | } `json:"album"` 30 | ReleaseDate string `json:"release_date"` 31 | TrackNumber int `json:"track_position"` 32 | DiskNumber int `json:"disk_number"` 33 | Contributors []struct { 34 | Name string `json:"name"` 35 | Role string `json:"role"` 36 | } `json:"contributors"` 37 | } 38 | 39 | type ITunesResponse struct { 40 | ResultCount int `json:"resultCount"` 41 | Results []struct { 42 | TrackName string `json:"trackName"` 43 | ArtistName string `json:"artistName"` 44 | CollectionName string `json:"collectionName"` 45 | ReleaseDate string `json:"releaseDate"` 46 | PrimaryGenreName string `json:"primaryGenreName"` 47 | TrackNumber int `json:"trackNumber"` 48 | DiscNumber int `json:"discNumber"` 49 | TrackCount int `json:"trackCount"` 50 | DiscCount int `json:"discCount"` 51 | ArtworkUrl100 string `json:"artworkUrl100"` 52 | } `json:"results"` 53 | } 54 | 55 | var httpConfig = utils.HTTPClientConfig{ 56 | Timeout: 30 * time.Second, 57 | KATimeout: 30 * time.Second, 58 | } 59 | 60 | func addMusicMetadata(inputPath, outputPath, musicClient, musicId string, streamFunc func(string)) error { 61 | switch musicClient { 62 | case "deezer": 63 | return addDeezerMetadata(inputPath, outputPath, musicId, streamFunc) 64 | case "apple": 65 | return addAppleMetadata(inputPath, outputPath, musicId, streamFunc) 66 | default: 67 | return fmt.Errorf("unsupported music client: %s", musicClient) 68 | } 69 | } 70 | 71 | func addAppleMetadata(inputPath, outputPath, musicId string, streamFunc func(string)) error { 72 | client := utils.NewDanzoHTTPClient(httpConfig) 73 | apiURL := fmt.Sprintf("https://itunes.apple.com/lookup?id=%s&entity=song", musicId) 74 | log.Debug().Str("op", "youtube-music/metadata").Msgf("Fetching iTunes metadata from: %s", apiURL) 75 | req, _ := http.NewRequest("GET", apiURL, nil) 76 | resp, err := client.Do(req) 77 | if err != nil { 78 | log.Error().Str("op", "youtube-music/metadata").Err(err).Msg("Error fetching iTunes metadata") 79 | return fmt.Errorf("error fetching metadata: %v", err) 80 | } 81 | defer resp.Body.Close() 82 | 83 | if resp.StatusCode != http.StatusOK { 84 | log.Error().Str("op", "youtube-music/metadata").Msgf("iTunes API request failed with status code %d", resp.StatusCode) 85 | return fmt.Errorf("API request failed with status code %d", resp.StatusCode) 86 | } 87 | var itunesResp ITunesResponse 88 | if err := json.NewDecoder(resp.Body).Decode(&itunesResp); err != nil { 89 | log.Error().Str("op", "youtube-music/metadata").Err(err).Msg("Error parsing iTunes response") 90 | return fmt.Errorf("error parsing response: %v", err) 91 | } 92 | if itunesResp.ResultCount == 0 || len(itunesResp.Results) == 0 { 93 | log.Warn().Str("op", "youtube-music/metadata").Msgf("No results found for iTunes ID: %s", musicId) 94 | return fmt.Errorf("no results found for iTunes ID: %s", musicId) 95 | } 96 | log.Debug().Str("op", "youtube-music/metadata").Msg("Successfully fetched and parsed iTunes metadata") 97 | 98 | trackInfo := itunesResp.Results[0] 99 | tempDir := filepath.Join(filepath.Dir(outputPath), ".danzo-temp") 100 | if err := os.MkdirAll(tempDir, 0755); err != nil { 101 | return fmt.Errorf("error creating temp directory: %v", err) 102 | } 103 | defer os.RemoveAll(tempDir) 104 | fileMarker := uuid.New().String() 105 | var artworkPath string 106 | 107 | if trackInfo.ArtworkUrl100 != "" { 108 | highResArtwork := strings.Replace(trackInfo.ArtworkUrl100, "100x100", "1000x1000", 1) 109 | artworkPath = filepath.Join(tempDir, fileMarker+".jpg") 110 | err := downloadFile(highResArtwork, artworkPath, client) 111 | if err != nil { 112 | log.Warn().Str("op", "youtube-music/metadata").Err(err).Msg("Failed to download artwork") 113 | artworkPath = "" 114 | } 115 | } 116 | return applyMetadataWithFFmpeg(inputPath, outputPath, trackInfo, artworkPath, streamFunc) 117 | } 118 | 119 | func addDeezerMetadata(inputPath, outputPath, musicId string, streamFunc func(string)) error { 120 | client := utils.NewDanzoHTTPClient(httpConfig) 121 | apiURL := fmt.Sprintf("https://api.deezer.com/track/%s", musicId) 122 | log.Debug().Str("op", "youtube-music/metadata").Msgf("Fetching Deezer metadata from: %s", apiURL) 123 | req, _ := http.NewRequest("GET", apiURL, nil) 124 | resp, err := client.Do(req) 125 | if err != nil { 126 | log.Error().Str("op", "youtube-music/metadata").Err(err).Msg("Error fetching Deezer metadata") 127 | return fmt.Errorf("error fetching metadata: %v", err) 128 | } 129 | defer resp.Body.Close() 130 | 131 | if resp.StatusCode != http.StatusOK { 132 | log.Error().Str("op", "youtube-music/metadata").Msgf("Deezer API request failed with status code %d", resp.StatusCode) 133 | return fmt.Errorf("API request failed with status code %d", resp.StatusCode) 134 | } 135 | var deezerResp DeezerResponse 136 | if err := json.NewDecoder(resp.Body).Decode(&deezerResp); err != nil { 137 | log.Error().Str("op", "youtube-music/metadata").Err(err).Msg("Error parsing Deezer response") 138 | return fmt.Errorf("error parsing response: %v", err) 139 | } 140 | log.Debug().Str("op", "youtube-music/metadata").Msg("Successfully fetched and parsed Deezer metadata") 141 | tempDir := filepath.Join(filepath.Dir(outputPath), ".danzo-temp") 142 | if err := os.MkdirAll(tempDir, 0755); err != nil { 143 | return fmt.Errorf("error creating temp directory: %v", err) 144 | } 145 | defer os.RemoveAll(tempDir) 146 | 147 | fileMarker := uuid.New().String() 148 | var artworkPath string 149 | if deezerResp.Album.Cover != "" { 150 | artworkPath = filepath.Join(tempDir, fileMarker+".jpg") 151 | err := downloadFile(deezerResp.Album.Cover, artworkPath, client) 152 | if err != nil { 153 | log.Warn().Str("op", "youtube-music/metadata").Err(err).Msg("Failed to download artwork") 154 | artworkPath = "" 155 | } 156 | } 157 | return applyDeezerMetadataWithFFmpeg(inputPath, outputPath, deezerResp, artworkPath, streamFunc) 158 | } 159 | 160 | func applyMetadataWithFFmpeg(inputPath, outputPath string, trackInfo struct { 161 | TrackName string `json:"trackName"` 162 | ArtistName string `json:"artistName"` 163 | CollectionName string `json:"collectionName"` 164 | ReleaseDate string `json:"releaseDate"` 165 | PrimaryGenreName string `json:"primaryGenreName"` 166 | TrackNumber int `json:"trackNumber"` 167 | DiscNumber int `json:"discNumber"` 168 | TrackCount int `json:"trackCount"` 169 | DiscCount int `json:"discCount"` 170 | ArtworkUrl100 string `json:"artworkUrl100"` 171 | }, artworkPath string, streamFunc func(string)) error { 172 | tempDir := filepath.Dir(artworkPath) 173 | if tempDir == "" { 174 | tempDir = filepath.Dir(outputPath) 175 | } 176 | metadataPath := filepath.Join(tempDir, uuid.New().String()+".txt") 177 | escapeRegex := regexp.MustCompile(`[^a-zA-Z0-9\s\-_]`) 178 | escapeRE := func(s string) string { 179 | return escapeRegex.ReplaceAllString(s, "") 180 | } 181 | metadataContent := fmt.Sprintf(";FFMETADATA1\ntitle=%s\nartist=%s\nalbum=%s\n", 182 | escapeRE(trackInfo.TrackName), escapeRE(trackInfo.ArtistName), escapeRE(trackInfo.CollectionName)) 183 | 184 | if trackInfo.ReleaseDate != "" { 185 | if len(trackInfo.ReleaseDate) > 10 { 186 | extractedDate, _ := time.Parse("2006-01-02T15:04:05Z", trackInfo.ReleaseDate) 187 | metadataContent += fmt.Sprintf("date=%s\n", extractedDate.Format("2006-01-02")) 188 | } else { 189 | metadataContent += fmt.Sprintf("date=%s\n", escapeRE(trackInfo.ReleaseDate)) 190 | } 191 | } 192 | if trackInfo.PrimaryGenreName != "" { 193 | metadataContent += fmt.Sprintf("genre=%s\n", escapeRE(trackInfo.PrimaryGenreName)) 194 | } 195 | if trackInfo.TrackNumber > 0 { 196 | if trackInfo.TrackCount > 0 { 197 | metadataContent += fmt.Sprintf("track=%d/%d\n", trackInfo.TrackNumber, trackInfo.TrackCount) 198 | } else { 199 | metadataContent += fmt.Sprintf("track=%d\n", trackInfo.TrackNumber) 200 | } 201 | } 202 | if trackInfo.DiscNumber > 0 { 203 | if trackInfo.DiscCount > 0 { 204 | metadataContent += fmt.Sprintf("disc=%d/%d\n", trackInfo.DiscNumber, trackInfo.DiscCount) 205 | } else { 206 | metadataContent += fmt.Sprintf("disc=%d\n", trackInfo.DiscNumber) 207 | } 208 | } 209 | 210 | if err := os.WriteFile(metadataPath, []byte(metadataContent), 0644); err != nil { 211 | return fmt.Errorf("error writing metadata file: %v", err) 212 | } 213 | args := []string{"-i", inputPath, "-i", metadataPath} 214 | if artworkPath != "" { 215 | args = append(args, "-i", artworkPath, "-map", "0", "-map", "2") 216 | args = append(args, "-disposition:v:0", "attached_pic") 217 | } 218 | args = append(args, "-map_metadata", "1", "-codec", "copy") 219 | args = append(args, "-id3v2_version", "3", "-y", outputPath) 220 | 221 | cmd := exec.Command("ffmpeg", args...) 222 | log.Debug().Str("op", "youtube-music/metadata").Msgf("Applying metadata with ffmpeg: %s", cmd.String()) 223 | output, err := cmd.CombinedOutput() 224 | if err != nil { 225 | log.Error().Str("op", "youtube-music/metadata").Err(err).Msgf("FFmpeg error: %s", string(output)) 226 | return fmt.Errorf("FFmpeg error: %v\nOutput: %s", err, string(output)) 227 | } 228 | if streamFunc != nil { 229 | streamFunc("Metadata applied successfully") 230 | } 231 | log.Info().Str("op", "youtube-music/metadata").Msgf("Metadata successfully applied to %s", outputPath) 232 | return nil 233 | } 234 | 235 | func applyDeezerMetadataWithFFmpeg(inputPath, outputPath string, deezerResp DeezerResponse, artworkPath string, streamFunc func(string)) error { 236 | tempDir := filepath.Dir(artworkPath) 237 | if tempDir == "" { 238 | tempDir = filepath.Dir(outputPath) 239 | } 240 | metadataPath := filepath.Join(tempDir, uuid.New().String()+".txt") 241 | escapeRegex := regexp.MustCompile(`[^a-zA-Z0-9\s\-_]`) 242 | escapeRE := func(s string) string { 243 | return escapeRegex.ReplaceAllString(s, "") 244 | } 245 | metadataContent := fmt.Sprintf(";FFMETADATA1\ntitle=%s\nartist=%s\nalbum=%s\n", 246 | escapeRE(deezerResp.Title), escapeRE(deezerResp.Artist.Name), escapeRE(deezerResp.Album.Title)) 247 | 248 | if len(deezerResp.ReleaseDate) > 4 { 249 | metadataContent += fmt.Sprintf("date=%s\n", escapeRE(deezerResp.ReleaseDate)) 250 | } 251 | 252 | for _, contributor := range deezerResp.Contributors { 253 | if strings.Contains(strings.ToLower(contributor.Role), "compos") { 254 | metadataContent += fmt.Sprintf("composer=%s\n", escapeRE(contributor.Name)) 255 | break 256 | } 257 | } 258 | if deezerResp.TrackNumber > 0 { 259 | metadataContent += fmt.Sprintf("track=%d\n", deezerResp.TrackNumber) 260 | } 261 | if deezerResp.DiskNumber > 0 { 262 | metadataContent += fmt.Sprintf("disc=%d\n", deezerResp.DiskNumber) 263 | } 264 | if err := os.WriteFile(metadataPath, []byte(metadataContent), 0644); err != nil { 265 | return fmt.Errorf("error writing metadata file: %v", err) 266 | } 267 | 268 | args := []string{"-i", inputPath, "-i", metadataPath} 269 | if artworkPath != "" { 270 | args = append(args, "-i", artworkPath, "-map", "0", "-map", "2") 271 | args = append(args, "-disposition:v:0", "attached_pic") 272 | } 273 | args = append(args, "-map_metadata", "1", "-codec", "copy") 274 | args = append(args, "-id3v2_version", "3", "-y", outputPath) 275 | 276 | cmd := exec.Command("ffmpeg", args...) 277 | log.Debug().Str("op", "youtube-music/metadata").Msgf("Applying metadata with ffmpeg: %s", cmd.String()) 278 | output, err := cmd.CombinedOutput() 279 | if err != nil { 280 | log.Error().Str("op", "youtube-music/metadata").Err(err).Msgf("FFmpeg error: %s", string(output)) 281 | return fmt.Errorf("FFmpeg error: %v\nOutput: %s", err, string(output)) 282 | } 283 | if streamFunc != nil { 284 | streamFunc("Metadata applied successfully") 285 | } 286 | log.Info().Str("op", "youtube-music/metadata").Msgf("Metadata successfully applied to %s", outputPath) 287 | return nil 288 | } 289 | 290 | func downloadFile(url, filepath string, client *utils.DanzoHTTPClient) error { 291 | req, _ := http.NewRequest("GET", url, nil) 292 | resp, err := client.Do(req) 293 | if err != nil { 294 | return err 295 | } 296 | defer resp.Body.Close() 297 | if resp.StatusCode != http.StatusOK { 298 | return fmt.Errorf("bad status: %s", resp.Status) 299 | } 300 | out, err := os.Create(filepath) 301 | if err != nil { 302 | return err 303 | } 304 | defer out.Close() 305 | _, err = io.Copy(out, resp.Body) 306 | return err 307 | } 308 | -------------------------------------------------------------------------------- /internal/downloaders/live-stream/download.go: -------------------------------------------------------------------------------- 1 | package m3u8 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/exec" 7 | "path/filepath" 8 | "strings" 9 | "sync" 10 | "sync/atomic" 11 | 12 | "github.com/rs/zerolog/log" 13 | "github.com/tanq16/danzo/internal/utils" 14 | ) 15 | 16 | func (d *M3U8Downloader) Download(job *utils.DanzoJob) error { 17 | tempDir := job.Metadata["tempDir"].(string) 18 | if err := os.MkdirAll(tempDir, 0755); err != nil { 19 | return fmt.Errorf("error creating temp directory: %v", err) 20 | } 21 | var downloadErr error 22 | defer func() { 23 | if downloadErr == nil { 24 | os.RemoveAll(tempDir) 25 | } else { 26 | log.Warn().Str("op", "live-stream/download").Msgf("Preserving segments in %s due to error", tempDir) 27 | } 28 | }() 29 | 30 | client := utils.NewDanzoHTTPClient(job.HTTPClientConfig) 31 | log.Debug().Str("op", "live-stream/download").Msgf("Fetching manifest from %s", job.URL) 32 | manifestContent, err := getM3U8Contents(job.URL, client) 33 | if err != nil { 34 | downloadErr = fmt.Errorf("error fetching manifest: %v", err) 35 | return downloadErr 36 | } 37 | m3u8Info, err := parseM3U8Content(manifestContent, job.URL, client) 38 | if err != nil { 39 | downloadErr = fmt.Errorf("error processing manifest: %v", err) 40 | return downloadErr 41 | } 42 | 43 | if len(m3u8Info.VideoSegmentURLs) == 0 { 44 | downloadErr = fmt.Errorf("no video segments found in manifest") 45 | return downloadErr 46 | } 47 | 48 | if m3u8Info.HasSeparateAudio { 49 | log.Info().Str("op", "live-stream/download").Msgf("Found %d video segments and %d audio segments", len(m3u8Info.VideoSegmentURLs), len(m3u8Info.AudioSegmentURLs)) 50 | if err := downloadAndMergeSeparateStreams(m3u8Info, job, tempDir, client); err != nil { 51 | downloadErr = err 52 | return downloadErr 53 | } 54 | } else { 55 | log.Info().Str("op", "live-stream/download").Msgf("Found %d segments to download", len(m3u8Info.VideoSegmentURLs)) 56 | if err := downloadAndMergeSingleStream(m3u8Info, job, tempDir, client); err != nil { 57 | downloadErr = err 58 | return downloadErr 59 | } 60 | } 61 | 62 | log.Info().Str("op", "live-stream/download").Msg("Download completed successfully") 63 | return nil 64 | } 65 | 66 | func downloadAndMergeSingleStream(m3u8Info *M3U8Info, job *utils.DanzoJob, tempDir string, client *utils.DanzoHTTPClient) error { 67 | segmentURLs := m3u8Info.VideoSegmentURLs 68 | totalSize, segmentSizes, err := calculateTotalSize(segmentURLs, job.Connections, client) 69 | if err != nil { 70 | log.Warn().Str("op", "live-stream/download").Msgf("Could not calculate total size accurately: %v. Using estimate.", err) 71 | totalSize = int64(len(segmentURLs)) * 1024 * 1024 72 | segmentSizes = make([]int64, len(segmentURLs)) 73 | for i := range segmentSizes { 74 | segmentSizes[i] = 1024 * 1024 75 | } 76 | } 77 | job.Metadata["totalSize"] = totalSize 78 | job.Metadata["segmentSizes"] = segmentSizes 79 | log.Debug().Str("op", "live-stream/download").Msgf("Total estimated size: %s", utils.FormatBytes(uint64(totalSize))) 80 | 81 | isFMP4 := detectFMP4Format(job.URL, segmentURLs) 82 | if isFMP4 { 83 | log.Debug().Str("op", "live-stream/download").Msg("Detected fMP4 format segments") 84 | } 85 | 86 | var totalDownloaded int64 87 | wrappedProgressFunc := func(incrementalSize, _ int64) { 88 | newTotal := atomic.AddInt64(&totalDownloaded, incrementalSize) 89 | if job.ProgressFunc != nil { 90 | job.ProgressFunc(newTotal, totalSize) 91 | } 92 | } 93 | 94 | log.Info().Str("op", "live-stream/download").Msg("Starting parallel download of segments") 95 | segmentFiles, err := downloadSegmentsParallel(segmentURLs, tempDir, job.Connections, client, wrappedProgressFunc, totalSize, isFMP4) 96 | if err != nil { 97 | return fmt.Errorf("error downloading segments: %v", err) 98 | } 99 | log.Info().Str("op", "live-stream/download").Msg("All segments downloaded, merging with ffmpeg") 100 | if err := mergeSegments(segmentFiles, job.OutputPath, isFMP4, m3u8Info.VideoInitSegment, tempDir, client); err != nil { 101 | return fmt.Errorf("error merging segments: %v", err) 102 | } 103 | return nil 104 | } 105 | 106 | func downloadAndMergeSeparateStreams(m3u8Info *M3U8Info, job *utils.DanzoJob, tempDir string, client *utils.DanzoHTTPClient) error { 107 | videoDir := filepath.Join(tempDir, "video") 108 | audioDir := filepath.Join(tempDir, "audio") 109 | if err := os.MkdirAll(videoDir, 0755); err != nil { 110 | return fmt.Errorf("error creating video directory: %v", err) 111 | } 112 | if err := os.MkdirAll(audioDir, 0755); err != nil { 113 | return fmt.Errorf("error creating audio directory: %v", err) 114 | } 115 | 116 | videoSegmentURLs := m3u8Info.VideoSegmentURLs 117 | audioSegmentURLs := m3u8Info.AudioSegmentURLs 118 | 119 | totalVideoSize, _, err := calculateTotalSize(videoSegmentURLs, job.Connections, client) 120 | if err != nil { 121 | log.Warn().Str("op", "live-stream/download").Msg("Could not calculate video size, using estimate") 122 | totalVideoSize = int64(len(videoSegmentURLs)) * 1024 * 1024 123 | } 124 | totalAudioSize, _, err := calculateTotalSize(audioSegmentURLs, job.Connections, client) 125 | if err != nil { 126 | log.Warn().Str("op", "live-stream/download").Msg("Could not calculate audio size, using estimate") 127 | totalAudioSize = int64(len(audioSegmentURLs)) * 512 * 1024 128 | } 129 | totalSize := totalVideoSize + totalAudioSize 130 | log.Debug().Str("op", "live-stream/download").Msgf("Total estimated size: %s (video: %s, audio: %s)", 131 | utils.FormatBytes(uint64(totalSize)), utils.FormatBytes(uint64(totalVideoSize)), utils.FormatBytes(uint64(totalAudioSize))) 132 | 133 | isVideoFMP4 := detectFMP4Format(job.URL, videoSegmentURLs) 134 | isAudioFMP4 := detectFMP4Format(job.URL, audioSegmentURLs) 135 | 136 | var totalDownloaded int64 137 | wrappedProgressFunc := func(incrementalDownloaded, _ int64) { 138 | newTotal := atomic.AddInt64(&totalDownloaded, incrementalDownloaded) 139 | if job.ProgressFunc != nil { 140 | job.ProgressFunc(newTotal, totalSize) 141 | } 142 | } 143 | 144 | log.Info().Str("op", "live-stream/download").Msg("Starting parallel download of video segments") 145 | videoFiles, videoErr := downloadSegmentsParallel(videoSegmentURLs, videoDir, job.Connections, client, wrappedProgressFunc, totalVideoSize, isVideoFMP4) 146 | 147 | log.Info().Str("op", "live-stream/download").Msg("Starting parallel download of audio segments") 148 | audioFiles, audioErr := downloadSegmentsParallel(audioSegmentURLs, audioDir, job.Connections, client, wrappedProgressFunc, totalAudioSize, isAudioFMP4) 149 | 150 | if videoErr != nil && audioErr != nil { 151 | return fmt.Errorf("both video and audio downloads failed - video: %v, audio: %v", videoErr, audioErr) 152 | } 153 | 154 | tempVideoPath := filepath.Join(tempDir, "video_temp.mp4") 155 | tempAudioPath := filepath.Join(tempDir, "audio_temp.m4a") 156 | 157 | var finalErr error 158 | 159 | if videoErr == nil { 160 | log.Info().Str("op", "live-stream/download").Msg("Merging video segments") 161 | if err := mergeSegments(videoFiles, tempVideoPath, isVideoFMP4, m3u8Info.VideoInitSegment, videoDir, client); err != nil { 162 | return fmt.Errorf("error merging video segments: %v", err) 163 | } 164 | } 165 | 166 | if audioErr == nil { 167 | log.Info().Str("op", "live-stream/download").Msg("Merging audio segments") 168 | if err := mergeSegments(audioFiles, tempAudioPath, isAudioFMP4, m3u8Info.AudioInitSegment, audioDir, client); err != nil { 169 | if videoErr == nil { 170 | if err := os.Rename(tempVideoPath, job.OutputPath); err != nil { 171 | return fmt.Errorf("error saving video-only output: %v", err) 172 | } 173 | return fmt.Errorf("audio merge failed, saved video-only: %v", err) 174 | } 175 | return fmt.Errorf("error merging audio segments: %v", err) 176 | } 177 | } 178 | 179 | if videoErr == nil && audioErr == nil { 180 | log.Info().Str("op", "live-stream/download").Msg("Merging video and audio streams") 181 | if err := mergeVideoAndAudio(tempVideoPath, tempAudioPath, job.OutputPath); err != nil { 182 | return fmt.Errorf("error merging video and audio: %v", err) 183 | } 184 | } else if videoErr == nil && audioErr != nil { 185 | log.Warn().Str("op", "live-stream/download").Msg("Audio download failed, saving video-only") 186 | if err := os.Rename(tempVideoPath, job.OutputPath); err != nil { 187 | return fmt.Errorf("error saving video-only output: %v", err) 188 | } 189 | finalErr = fmt.Errorf("audio download failed: %v", audioErr) 190 | } else if audioErr == nil && videoErr != nil { 191 | log.Warn().Str("op", "live-stream/download").Msg("Video download failed, saving audio-only") 192 | if err := os.Rename(tempAudioPath, job.OutputPath); err != nil { 193 | return fmt.Errorf("error saving audio-only output: %v", err) 194 | } 195 | finalErr = fmt.Errorf("video download failed: %v", videoErr) 196 | } 197 | 198 | return finalErr 199 | } 200 | 201 | func detectFMP4Format(manifestURL string, segmentURLs []string) bool { 202 | if strings.Contains(manifestURL, "sf=fmp4") || 203 | strings.Contains(manifestURL, "/fmp4/") || 204 | strings.Contains(manifestURL, "frag") { 205 | return true 206 | } 207 | if len(segmentURLs) > 0 { 208 | firstSegment := segmentURLs[0] 209 | if strings.Contains(firstSegment, "/fmp4/") || 210 | strings.Contains(firstSegment, ".m4s") || 211 | strings.Contains(firstSegment, "frag") || 212 | strings.Contains(firstSegment, ".mp4") { 213 | return true 214 | } 215 | } 216 | return false 217 | } 218 | 219 | func downloadSegmentsParallel(segmentURLs []string, outputDir string, numWorkers int, client *utils.DanzoHTTPClient, progressFunc func(int64, int64), totalSize int64, isFMP4 bool) ([]string, error) { 220 | var downloadedFiles []string 221 | var mu sync.Mutex 222 | var downloadErr error 223 | type segmentJob struct { 224 | index int 225 | url string 226 | } 227 | jobCh := make(chan segmentJob, len(segmentURLs)) 228 | for i, url := range segmentURLs { 229 | jobCh <- segmentJob{index: i, url: url} 230 | } 231 | close(jobCh) 232 | downloadedFiles = make([]string, len(segmentURLs)) 233 | ext := ".ts" 234 | if isFMP4 { 235 | ext = ".m4s" 236 | } 237 | 238 | var wg sync.WaitGroup 239 | for range numWorkers { 240 | wg.Add(1) 241 | go func() { 242 | defer wg.Done() 243 | for job := range jobCh { 244 | outputPath := filepath.Join(outputDir, fmt.Sprintf("segment_%04d%s", job.index, ext)) 245 | size, err := downloadSegment(job.url, outputPath, client) 246 | if err != nil { 247 | mu.Lock() 248 | if downloadErr == nil { 249 | downloadErr = fmt.Errorf("error downloading segment %d: %v", job.index, err) 250 | } 251 | mu.Unlock() 252 | return 253 | } 254 | mu.Lock() 255 | downloadedFiles[job.index] = outputPath 256 | mu.Unlock() 257 | if progressFunc != nil { 258 | progressFunc(size, totalSize) 259 | } 260 | } 261 | }() 262 | } 263 | 264 | wg.Wait() 265 | if downloadErr != nil { 266 | return nil, downloadErr 267 | } 268 | return downloadedFiles, nil 269 | } 270 | 271 | func mergeSegments(segmentFiles []string, outputPath string, isFMP4 bool, initSegment string, tempDir string, client *utils.DanzoHTTPClient) error { 272 | if isFMP4 { 273 | return mergeFMP4Segments(segmentFiles, outputPath, initSegment, tempDir, client) 274 | } 275 | return mergeTSSegments(segmentFiles, outputPath) 276 | } 277 | 278 | func mergeTSSegments(segmentFiles []string, outputPath string) error { 279 | tempListFile := filepath.Join(filepath.Dir(outputPath), ".segment_list.txt") 280 | f, err := os.Create(tempListFile) 281 | if err != nil { 282 | return fmt.Errorf("error creating segment list file: %v", err) 283 | } 284 | defer os.Remove(tempListFile) 285 | for _, file := range segmentFiles { 286 | absPath, err := filepath.Abs(file) 287 | if err != nil { 288 | absPath = file 289 | } 290 | fmt.Fprintf(f, "file '%s'\n", absPath) 291 | } 292 | f.Close() 293 | cmd := exec.Command( 294 | "ffmpeg", 295 | "-f", "concat", 296 | "-safe", "0", 297 | "-i", tempListFile, 298 | "-c", "copy", 299 | "-y", 300 | outputPath, 301 | ) 302 | log.Debug().Str("op", "live-stream/download").Msgf("Executing ffmpeg command: %s", cmd.String()) 303 | output, err := cmd.CombinedOutput() 304 | if err != nil { 305 | log.Error().Str("op", "live-stream/download").Msgf("FFmpeg output:\n%s", string(output)) 306 | return fmt.Errorf("ffmpeg error: %v\nOutput: %s", err, string(output)) 307 | } 308 | return nil 309 | } 310 | 311 | func mergeFMP4Segments(segmentFiles []string, outputPath string, initSegment string, tempDir string, client *utils.DanzoHTTPClient) error { 312 | tempConcatFile := filepath.Join(filepath.Dir(outputPath), ".concat_temp.m4s") 313 | defer os.Remove(tempConcatFile) 314 | log.Debug().Str("op", "live-stream/download").Msgf("Concatenating %d fMP4 segments", len(segmentFiles)) 315 | outFile, err := os.Create(tempConcatFile) 316 | if err != nil { 317 | return fmt.Errorf("error creating temp concat file: %v", err) 318 | } 319 | if initSegment != "" { 320 | log.Debug().Str("op", "live-stream/download").Msg("Downloading init segment") 321 | initPath := filepath.Join(tempDir, "init.mp4") 322 | _, err := downloadSegment(initSegment, initPath, client) 323 | if err != nil { 324 | outFile.Close() 325 | return fmt.Errorf("error downloading init segment: %v", err) 326 | } 327 | initData, err := os.ReadFile(initPath) 328 | if err != nil { 329 | outFile.Close() 330 | return fmt.Errorf("error reading init segment: %v", err) 331 | } 332 | if _, err := outFile.Write(initData); err != nil { 333 | outFile.Close() 334 | return fmt.Errorf("error writing init segment: %v", err) 335 | } 336 | log.Debug().Str("op", "live-stream/download").Msgf("Init segment written (%d bytes)", len(initData)) 337 | } 338 | for i, segmentFile := range segmentFiles { 339 | data, err := os.ReadFile(segmentFile) 340 | if err != nil { 341 | outFile.Close() 342 | return fmt.Errorf("error reading segment %d: %v", i, err) 343 | } 344 | if _, err := outFile.Write(data); err != nil { 345 | outFile.Close() 346 | return fmt.Errorf("error writing segment %d: %v", i, err) 347 | } 348 | } 349 | outFile.Close() 350 | 351 | log.Debug().Str("op", "live-stream/download").Msg("Remuxing concatenated fMP4 segments") 352 | cmd := exec.Command( 353 | "ffmpeg", 354 | "-i", tempConcatFile, 355 | "-c", "copy", 356 | "-movflags", "+faststart", 357 | "-y", 358 | outputPath, 359 | ) 360 | log.Debug().Str("op", "live-stream/download").Msgf("Executing ffmpeg command: %s", cmd.String()) 361 | output, err := cmd.CombinedOutput() 362 | if err != nil { 363 | log.Error().Str("op", "live-stream/download").Msgf("FFmpeg failed with error: %v", err) 364 | log.Error().Str("op", "live-stream/download").Msgf("FFmpeg output:\n%s", string(output)) 365 | return fmt.Errorf("ffmpeg error: %v\nOutput: %s", err, string(output)) 366 | } 367 | return nil 368 | } 369 | 370 | func mergeVideoAndAudio(videoPath, audioPath, outputPath string) error { 371 | cmd := exec.Command( 372 | "ffmpeg", 373 | "-i", videoPath, 374 | "-i", audioPath, 375 | "-c", "copy", 376 | "-map", "0:v:0", 377 | "-map", "1:a:0", 378 | "-bsf:a:0", "aac_adtstoasc", 379 | "-movflags", "+faststart", 380 | "-y", 381 | outputPath, 382 | ) 383 | log.Debug().Str("op", "live-stream/download").Msgf("Executing ffmpeg command: %s", cmd.String()) 384 | output, err := cmd.CombinedOutput() 385 | if err != nil { 386 | log.Error().Str("op", "live-stream/download").Msgf("FFmpeg output:\n%s", string(output)) 387 | return fmt.Errorf("ffmpeg error: %v\nOutput: %s", err, string(output)) 388 | } 389 | return nil 390 | } 391 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= 2 | cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= 3 | dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= 4 | dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= 5 | github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= 6 | github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= 7 | github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= 8 | github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4= 9 | github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= 10 | github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= 11 | github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= 12 | github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= 13 | github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= 14 | github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= 15 | github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= 16 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= 17 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= 18 | github.com/aws/aws-sdk-go-v2/config v1.29.12 h1:Y/2a+jLPrPbHpFkpAAYkVEtJmxORlXoo5k2g1fa2sUo= 19 | github.com/aws/aws-sdk-go-v2/config v1.29.12/go.mod h1:xse1YTjmORlb/6fhkWi8qJh3cvZi4JoVNhc+NbJt4kI= 20 | github.com/aws/aws-sdk-go-v2/credentials v1.17.65 h1:q+nV2yYegofO/SUXruT+pn4KxkxmaQ++1B/QedcKBFM= 21 | github.com/aws/aws-sdk-go-v2/credentials v1.17.65/go.mod h1:4zyjAuGOdikpNYiSGpsGz8hLGmUzlY8pc8r9QQ/RXYQ= 22 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= 23 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= 24 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= 25 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= 26 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= 27 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= 28 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= 29 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= 30 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= 31 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= 32 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= 33 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= 34 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU= 35 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= 36 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= 37 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= 38 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= 39 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= 40 | github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 h1:jIiopHEV22b4yQP2q36Y0OmwLbsxNWdWwfZRR5QRRO4= 41 | github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= 42 | github.com/aws/aws-sdk-go-v2/service/sso v1.25.2 h1:pdgODsAhGo4dvzC3JAG5Ce0PX8kWXrTZGx+jxADD+5E= 43 | github.com/aws/aws-sdk-go-v2/service/sso v1.25.2/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= 44 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.0 h1:90uX0veLKcdHVfvxhkWUQSCi5VabtwMLFutYiRke4oo= 45 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.0/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= 46 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc= 47 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= 48 | github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= 49 | github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= 50 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= 51 | github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= 52 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= 53 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= 54 | github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= 55 | github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= 56 | github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= 57 | github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= 58 | github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= 59 | github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= 60 | github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= 61 | github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= 62 | github.com/cloudflare/circl v1.6.0 h1:cr5JKic4HI+LkINy2lg3W2jF8sHCVTBncJr5gIIq7qk= 63 | github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= 64 | github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 65 | github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= 66 | github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= 67 | github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= 68 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 69 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 70 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 71 | github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= 72 | github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= 73 | github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= 74 | github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= 75 | github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= 76 | github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= 77 | github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= 78 | github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= 79 | github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= 80 | github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= 81 | github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= 82 | github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= 83 | github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= 84 | github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= 85 | github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY= 86 | github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= 87 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 88 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= 89 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= 90 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 91 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 92 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 93 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 94 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 95 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 96 | github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= 97 | github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= 98 | github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= 99 | github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= 100 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 101 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 102 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 103 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 104 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 105 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 106 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 107 | github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= 108 | github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= 109 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 110 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 111 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 112 | github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 113 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 114 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 115 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= 116 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 117 | github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= 118 | github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= 119 | github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= 120 | github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= 121 | github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= 122 | github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= 123 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 124 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 125 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 126 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 127 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 128 | github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= 129 | github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= 130 | github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= 131 | github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= 132 | github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= 133 | github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= 134 | github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= 135 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 136 | github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= 137 | github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= 138 | github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= 139 | github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= 140 | github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= 141 | github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= 142 | github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= 143 | github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= 144 | github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 145 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 146 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 147 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 148 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 149 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 150 | github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= 151 | github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= 152 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= 153 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= 154 | golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= 155 | golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= 156 | golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= 157 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= 158 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= 159 | golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 160 | golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= 161 | golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= 162 | golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= 163 | golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= 164 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 165 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 166 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 167 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 168 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 169 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 170 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 171 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 172 | golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 173 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 174 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 175 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 176 | golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= 177 | golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= 178 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 179 | golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= 180 | golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= 181 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 182 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 183 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 184 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 185 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 186 | gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= 187 | gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= 188 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 189 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 190 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 191 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 192 | --------------------------------------------------------------------------------