├── VERSION ├── backend ├── drive │ ├── test │ │ ├── files │ │ │ ├── example1.ods │ │ │ ├── example2.doc │ │ │ └── example3.odt │ │ └── about.json │ ├── drive_test.go │ ├── saInfo.go │ ├── saInfo_test.go │ ├── upload.go │ ├── metadata.go │ └── drive_internal_test.go └── all │ └── all.go ├── cmd ├── selfupdate │ ├── writable_unsupported.go │ ├── noselfupdate.go │ ├── writable_unix.go │ ├── writable_windows.go │ ├── selfupdate.md │ ├── selfupdate_test.go │ └── selfupdate.go ├── version │ ├── version_test.go │ └── version.go ├── all │ └── all.go └── copy │ └── copy.go ├── bin ├── tools.go ├── nfpm.yaml ├── upload-github ├── resource_windows.go ├── cross-compile.go └── get-github-release.go ├── gclone.go ├── .gitignore ├── .vscode └── launch.json ├── README.md ├── Makefile ├── go.mod └── .github └── workflows └── build.yml /VERSION: -------------------------------------------------------------------------------- 1 | v1.69.2-mod1.6.2 -------------------------------------------------------------------------------- /backend/drive/test/files/example1.ods: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dogbutcat/gclone/HEAD/backend/drive/test/files/example1.ods -------------------------------------------------------------------------------- /backend/drive/test/files/example2.doc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dogbutcat/gclone/HEAD/backend/drive/test/files/example2.doc -------------------------------------------------------------------------------- /backend/drive/test/files/example3.odt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dogbutcat/gclone/HEAD/backend/drive/test/files/example3.odt -------------------------------------------------------------------------------- /cmd/selfupdate/writable_unsupported.go: -------------------------------------------------------------------------------- 1 | //go:build (plan9 || js) && !noselfupdate 2 | // +build plan9 js 3 | // +build !noselfupdate 4 | 5 | package selfupdate 6 | 7 | func writable(path string) bool { 8 | return true 9 | } 10 | -------------------------------------------------------------------------------- /cmd/selfupdate/noselfupdate.go: -------------------------------------------------------------------------------- 1 | //go:build noselfupdate 2 | 3 | package selfupdate 4 | 5 | import ( 6 | "github.com/rclone/rclone/lib/buildinfo" 7 | ) 8 | 9 | func init() { 10 | buildinfo.Tags = append(buildinfo.Tags, "noselfupdate") 11 | } 12 | -------------------------------------------------------------------------------- /cmd/selfupdate/writable_unix.go: -------------------------------------------------------------------------------- 1 | //go:build !windows && !plan9 && !js && !noselfupdate 2 | // +build !windows,!plan9,!js,!noselfupdate 3 | 4 | package selfupdate 5 | 6 | import ( 7 | "golang.org/x/sys/unix" 8 | ) 9 | 10 | func writable(path string) bool { 11 | return unix.Access(path, unix.W_OK) == nil 12 | } 13 | -------------------------------------------------------------------------------- /bin/tools.go: -------------------------------------------------------------------------------- 1 | // This file is no meaningful except for mod tidy keeping those indirect mod(s) avoid to be deleted. 2 | // those mod(s) is relevant with build action part, no infurence with local build or functionality 3 | // DO NOT remove those if needing github-action work. 4 | package main 5 | 6 | import ( 7 | _ "github.com/dop251/scsu" 8 | _ "golang.org/x/mobile/event/key" 9 | ) 10 | -------------------------------------------------------------------------------- /cmd/selfupdate/writable_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows && !noselfupdate 2 | // +build windows,!noselfupdate 3 | 4 | package selfupdate 5 | 6 | import ( 7 | "os" 8 | ) 9 | 10 | func writable(path string) bool { 11 | info, err := os.Stat(path) 12 | const UserWritableBit = 128 13 | if err == nil { 14 | return info.Mode().Perm()&UserWritableBit != 0 15 | } 16 | return false 17 | } 18 | -------------------------------------------------------------------------------- /gclone.go: -------------------------------------------------------------------------------- 1 | // Sync files and directories to and from local and remote object stores 2 | // 3 | // Nick Craig-Wood 4 | package main 5 | 6 | import ( 7 | _ "github.com/dogbutcat/gclone/backend/all" // import all backends 8 | "github.com/rclone/rclone/cmd" 9 | _ "github.com/dogbutcat/gclone/cmd/all" // import all commands 10 | _ "github.com/rclone/rclone/lib/plugin" // import plugins 11 | ) 12 | 13 | func main() { 14 | cmd.Main() 15 | } 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | vendor/ 16 | 17 | *~ 18 | _junk/ 19 | rclone 20 | rclone.exe 21 | build 22 | docs/public 23 | rclone.iml 24 | .idea 25 | .history 26 | *.test 27 | *.log 28 | *.iml 29 | fuzz-build.zip 30 | *.orig 31 | *.rej 32 | .DS_Store 33 | resource_windows_*.syso 34 | -------------------------------------------------------------------------------- /bin/nfpm.yaml: -------------------------------------------------------------------------------- 1 | name: "gclone" 2 | arch: "{{.Arch}}" 3 | platform: "linux" 4 | version: "{{.Version}}" 5 | section: "default" 6 | priority: "extra" 7 | provides: 8 | - gclone 9 | maintainer: "dogbutcat" 10 | description: | 11 | gclone - "rsync for cloud storage" 12 | is a command line program to sync files and directories to and 13 | from most cloud providers. It can also mount, tree, ncdu and lots 14 | of other useful things. 15 | vendor: "gclone" 16 | homepage: "https://rclone.org" 17 | license: "MIT" 18 | contents: 19 | - src: ./gclone 20 | dst: /usr/bin/gclone 21 | -------------------------------------------------------------------------------- /backend/drive/drive_test.go: -------------------------------------------------------------------------------- 1 | // Test Drive filesystem interface 2 | 3 | package drive 4 | 5 | import ( 6 | "testing" 7 | 8 | "github.com/rclone/rclone/fs" 9 | "github.com/rclone/rclone/fstest/fstests" 10 | ) 11 | 12 | // TestIntegration runs integration tests against the remote 13 | func TestIntegration(t *testing.T) { 14 | fstests.Run(t, &fstests.Opt{ 15 | RemoteName: "TestDrive:", 16 | NilObject: (*Object)(nil), 17 | ChunkedUpload: fstests.ChunkedUploadConfig{ 18 | MinChunkSize: minChunkSize, 19 | CeilChunkSize: fstests.NextPowerOfTwo, 20 | }, 21 | }) 22 | } 23 | 24 | func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { 25 | return f.setUploadChunkSize(cs) 26 | } 27 | 28 | func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { 29 | return f.setUploadCutoff(cs) 30 | } 31 | 32 | var ( 33 | _ fstests.SetUploadChunkSizer = (*Fs)(nil) 34 | _ fstests.SetUploadCutoffer = (*Fs)(nil) 35 | ) 36 | -------------------------------------------------------------------------------- /bin/upload-github: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Upload a release 4 | # 5 | # Needs the gh tool from https://github.com/cli/cli 6 | 7 | set -e 8 | 9 | REPO="dogbutcat/gclone" 10 | 11 | if [ "$1" == "" ]; then 12 | echo "Syntax: $0 Version" 13 | exit 1 14 | fi 15 | VERSION="$1" 16 | #ANCHOR=$(grep '^## v' docs/content/changelog.md | head -1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g') 17 | 18 | function create_release() { 19 | cat > "/tmp/${VERSION}-release-notes" < if need `mount` function, cgofuse is required, 40 | 41 | ## Instructions 42 | ### 1.service_account_file_path Configuration 43 | add `service_account_file_path` Configuration.For dynamic replacement service_account_file(sa file). Replace configuration when `rateLimitExceeded` error occurs 44 | `rclone.conf` example: 45 | ``` 46 | [gc] 47 | type = drive 48 | scope = drive 49 | service_account_file = /root/accounts/1.json 50 | service_account_file_path = /root/accounts/ 51 | root_folder_id = root 52 | ``` 53 | `/root/accounts/` Folder contains multiple access and edit permissions ***service account file(*.json)***. 54 | 55 | ### 2.Support incoming id 56 | If the original rclone is across team disks or shared folders, multiple configuration drive letters are required for operation. 57 | gclone supports incoming id operation 58 | ``` 59 | gclone copy gc:{folder_id1} gc:{folder_id2} --drive-server-side-across-configs 60 | ``` 61 | folder_id1 can be:Common directory, shared directory, team disk. 62 | 63 | ``` 64 | gclone copy gc:{folder_id1} gc:{folder_id2}/media/ --drive-server-side-across-configs 65 | 66 | ``` 67 | 68 | ``` 69 | gclone copy gc:{share_field_id} gc:{folder_id2} --drive-server-side-across-configs 70 | ``` 71 | 72 | ### 3.Support command line option `--drive-service-account-file-path` 73 | 74 | ```sh 75 | gclone copy gc:{share_field_id} gc:{folder_id2} --drive-service-account-file-path=${SOMEWHERE_STORE_SAs} 76 | ``` 77 | 78 | ### 4.Support command line option `--drive-rolling-sa` and `--drive-rolling-count` 79 | 80 | ```sh 81 | gclone copy gc:{share_field_id} gc:{folder_id2} --drive-rolling-sa --drive-rolling-count=1 82 | ``` 83 | 84 | > What is rolling sa? 85 | 86 | - This option main for backup large drive, intent for using all sa more balance, not to consume one sa in one time, as I found there's some wired scen when sa was consumed. 87 | 88 | > What is rolling count? 89 | 90 | - every action in rclone using go routine, actually it is for waitgroup count, same sa will use within action. 91 | By default is 1, not recommand set value over 4, in my test bigger file should with smaller count. 92 | 93 | ### 5.Support command line option `--drive-random-pick-sa` 94 | 95 | ```sh 96 | gclone copy gc:{share_field_id} gc:{folder_id2} --drive-random-pick-sa --drive-rolling-sa --drive-rolling-count=1 97 | ``` 98 | 99 | - take random sa file from `service account file path` config instead of configed one. Good companion with `rolling sa` config. 100 | 101 | ### 6.Support new command `gselfupdate` and `gversion` 102 | 103 | From `1.64.0-mod1.6.0` gclone add new command to update itself, parallel working with rclone's `selfupdate` 104 | 105 | ```sh 106 | gclone gselfupdate [--check] [--output [of]] [--version [v]] [--package [zip|deb|rpm]] 107 | ``` 108 | 109 | ## CAVEATS 110 | 111 | Creating Service Accounts (SAs) allows you to bypass some of Google's quotas. Tools like autorclone and clonebot (gclone) automatically rotate SAs for continuous multi-terabyte file transfer. 112 | 113 | > Quotas SAs **CAN** bypass: 114 | 115 | * Google 'copy/upload' quota (750GB/account/day) 116 | * Google 'download' quota (10TB/account/day) 117 | 118 | > Quotas SAs **CANNOT** bypass: 119 | 120 | * Google Shared Drive quota (~20TB/drive/day) 121 | * Google file owner quota (~2TB/day) 122 | -------------------------------------------------------------------------------- /backend/drive/saInfo.go: -------------------------------------------------------------------------------- 1 | package drive 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "os" 7 | "path" 8 | "strings" 9 | "time" 10 | ) 11 | 12 | type Sa struct { 13 | saPath string 14 | isStale bool 15 | } 16 | 17 | type SaInfo struct { 18 | sas map[int]Sa 19 | activeIdx int 20 | saPool map[string]int 21 | } 22 | 23 | func (sa *SaInfo) updateSas(data []string, activeSa string) { 24 | if len(data) == 0 || activeSa == "" { 25 | return 26 | } 27 | convSas := make(map[int]Sa) 28 | convData := make(map[string]int) 29 | 30 | for i, v := range data { 31 | convSas[i] = Sa{saPath: v, isStale: false} 32 | convData[v] = i 33 | } 34 | sa.sas = convSas 35 | sa.saPool = convData 36 | 37 | if result := sa.findIdxByStrInPool(activeSa); result != -1 { 38 | sa.activeIdx = result 39 | } else { 40 | existLen := len(sa.sas) 41 | sa.sas[existLen] = Sa{saPath: activeSa, isStale: false} 42 | sa.saPool[activeSa] = existLen 43 | sa.activeIdx = existLen 44 | } 45 | } 46 | 47 | func (sa *SaInfo) findIdxByStrInPool(str string) int { 48 | result := -1 49 | for k, v := range sa.saPool { 50 | if k == str { 51 | result = v 52 | } 53 | } 54 | return result 55 | } 56 | 57 | func (sa *SaInfo) findIdxByStr(str string) int { 58 | result := -1 59 | for k, v := range sa.sas { 60 | if v.saPath == str { 61 | result = k 62 | } 63 | } 64 | return result 65 | } 66 | 67 | func (sa *SaInfo) rollup() string { 68 | existLen := len(sa.sas) 69 | nextIdx := -1 70 | for i := sa.activeIdx + 1; i < existLen; i++ { 71 | if !sa.sas[i].isStale { 72 | nextIdx = i 73 | break 74 | } 75 | } 76 | if nextIdx == -1 { 77 | for i := 0; i < sa.activeIdx; i++ { 78 | if !sa.sas[i].isStale { 79 | nextIdx = i 80 | break 81 | } 82 | } 83 | } 84 | if nextIdx == -1 { 85 | return "" 86 | } else { 87 | return sa.sas[nextIdx].saPath 88 | } 89 | 90 | } 91 | 92 | func (sa *SaInfo) activeSa(saPath string) { 93 | if entry, ok := sa.saPool[saPath]; ok { 94 | sa.activeIdx = entry 95 | } 96 | } 97 | 98 | func (sa *SaInfo) staleSa(target string) (bool, string) { 99 | if target == "" { 100 | target = sa.sas[sa.activeIdx].saPath 101 | } 102 | oldIdx := sa.saPool[target] 103 | if entry, ok := sa.sas[oldIdx]; ok { 104 | entry.isStale = true 105 | sa.sas[oldIdx] = entry 106 | } 107 | delete(sa.saPool, target) 108 | if sa.isPoolEmpty() { 109 | sa.activeIdx = -1 110 | return true, "" 111 | } 112 | if ret := sa.randomPick(); ret != -1 { 113 | sa.activeIdx = ret 114 | return false, sa.sas[ret].saPath 115 | } else { 116 | return true, "" 117 | } 118 | 119 | } 120 | 121 | func (sa *SaInfo) randomPick() int { 122 | existLen := len(sa.saPool) 123 | if existLen == 0 { 124 | return -1 125 | } 126 | 127 | seed := time.Now().UnixNano() + int64(rand.Intn(1000000)) 128 | rand_source := rand.NewSource(seed) 129 | rand_instance := rand.New(rand_source) 130 | r := rand_instance.Intn(existLen) 131 | 132 | // fmt.Println(time.Now().UnixNano()) 133 | // fmt.Println("seed: ", seed) 134 | // fmt.Println("random val:", r) 135 | // fmt.Println(time.Now().UnixNano()) 136 | 137 | var nextIdx int 138 | for _, v := range sa.saPool { 139 | if r == 0 { 140 | nextIdx = v 141 | break 142 | } 143 | r-- 144 | } 145 | return nextIdx 146 | } 147 | 148 | func (sa *SaInfo) isPoolEmpty() bool { 149 | if len(sa.saPool) == 0 { 150 | return true 151 | } else { 152 | return false 153 | } 154 | } 155 | 156 | func (sa *SaInfo) revertStaleSa(target string) { 157 | if target == "" { 158 | return 159 | } 160 | if oldIdx := sa.findIdxByStr(target); oldIdx != -1 { 161 | if entry, ok := sa.sas[oldIdx]; ok { 162 | entry.isStale = false 163 | sa.saPool[target] = oldIdx 164 | sa.sas[oldIdx] = entry 165 | } 166 | } 167 | 168 | } 169 | 170 | func (sa *SaInfo) loadInfoFromDir(dirPath string, activeSa string) { 171 | var fileNames []string 172 | pathSeparator := string(os.PathSeparator) 173 | if !strings.HasSuffix(dirPath, pathSeparator) { 174 | dirPath += pathSeparator 175 | } 176 | 177 | dir, err := os.Open(dirPath) 178 | if err != nil { 179 | fmt.Println("read ServiceAccount Folder error") 180 | } 181 | 182 | defer dir.Close() 183 | 184 | dir_list, err := dir.ReadDir(-1) 185 | 186 | if err != nil { 187 | fmt.Println("read ServiceAccountFilePath Files error") 188 | } 189 | for _, v := range dir_list { 190 | filePath := fmt.Sprintf("%s%s", dirPath, v.Name()) 191 | if path.Ext(filePath) == ".json" { 192 | //fmt.Println(filePath) 193 | fileNames = append(fileNames, filePath) 194 | } 195 | } 196 | sa.updateSas(fileNames, activeSa) 197 | } 198 | -------------------------------------------------------------------------------- /cmd/copy/copy.go: -------------------------------------------------------------------------------- 1 | // Package copy provides the copy command. 2 | package copy 3 | 4 | import ( 5 | "context" 6 | "strings" 7 | 8 | "github.com/rclone/rclone/cmd" 9 | "github.com/rclone/rclone/fs/config/flags" 10 | "github.com/rclone/rclone/fs/operations" 11 | "github.com/rclone/rclone/fs/operations/operationsflags" 12 | "github.com/rclone/rclone/fs/sync" 13 | "github.com/spf13/cobra" 14 | ) 15 | 16 | var ( 17 | createEmptySrcDirs = false 18 | loggerOpt = operations.LoggerOpt{} 19 | loggerFlagsOpt = operationsflags.AddLoggerFlagsOptions{} 20 | ) 21 | 22 | func init() { 23 | cmd.Root.AddCommand(commandDefinition) 24 | cmdFlags := commandDefinition.Flags() 25 | flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy", "") 26 | operationsflags.AddLoggerFlags(cmdFlags, &loggerOpt, &loggerFlagsOpt) 27 | loggerOpt.LoggerFn = operations.NewDefaultLoggerFn(&loggerOpt) 28 | } 29 | 30 | var commandDefinition = &cobra.Command{ 31 | Use: "copy source:path dest:path", 32 | Short: `Copy files from source to dest, skipping identical files.`, 33 | // Note: "|" will be replaced by backticks below 34 | Long: strings.ReplaceAll(`Copy the source to the destination. Does not transfer files that are 35 | identical on source and destination, testing by size and modification 36 | time or MD5SUM. Doesn't delete files from the destination. If you 37 | want to also delete files from destination, to make it match source, 38 | use the [sync](/commands/rclone_sync/) command instead. 39 | 40 | Note that it is always the contents of the directory that is synced, 41 | not the directory itself. So when source:path is a directory, it's the 42 | contents of source:path that are copied, not the directory name and 43 | contents. 44 | 45 | To copy single files, use the [copyto](/commands/rclone_copyto/) 46 | command instead. 47 | 48 | If dest:path doesn't exist, it is created and the source:path contents 49 | go there. 50 | 51 | For example 52 | 53 | rclone copy source:sourcepath dest:destpath 54 | 55 | Let's say there are two files in sourcepath 56 | 57 | sourcepath/one.txt 58 | sourcepath/two.txt 59 | 60 | This copies them to 61 | 62 | destpath/one.txt 63 | destpath/two.txt 64 | 65 | Not to 66 | 67 | destpath/sourcepath/one.txt 68 | destpath/sourcepath/two.txt 69 | 70 | If you are familiar with |rsync|, rclone always works as if you had 71 | written a trailing |/| - meaning "copy the contents of this directory". 72 | This applies to all commands and whether you are talking about the 73 | source or destination. 74 | 75 | See the [--no-traverse](/docs/#no-traverse) option for controlling 76 | whether rclone lists the destination directory or not. Supplying this 77 | option when copying a small number of files into a large destination 78 | can speed transfers up greatly. 79 | 80 | For example, if you have many files in /path/to/src but only a few of 81 | them change every day, you can copy all the files which have changed 82 | recently very efficiently like this: 83 | 84 | rclone copy --max-age 24h --no-traverse /path/to/src remote: 85 | 86 | 87 | Rclone will sync the modification times of files and directories if 88 | the backend supports it. If metadata syncing is required then use the 89 | |--metadata| flag. 90 | 91 | Note that the modification time and metadata for the root directory 92 | will **not** be synced. See https://github.com/rclone/rclone/issues/7652 93 | for more info. 94 | 95 | **Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics. 96 | 97 | **Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything. 98 | 99 | `, "|", "`") + operationsflags.Help(), 100 | Annotations: map[string]string{ 101 | "groups": "Copy,Filter,Listing,Important", 102 | }, 103 | Run: func(command *cobra.Command, args []string) { 104 | cmd.CheckArgs(2, 2, command, args) 105 | fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args) 106 | if len(fsrc.Root()) > 7 && fsrc.Root()[0:7] == "isFile:" { 107 | srcFileName = fsrc.Root()[7:] 108 | } 109 | cmd.Run(true, true, command, func() error { 110 | ctx := context.Background() 111 | close, err := operationsflags.ConfigureLoggers(ctx, fdst, command, &loggerOpt, loggerFlagsOpt) 112 | if err != nil { 113 | return err 114 | } 115 | defer close() 116 | 117 | if loggerFlagsOpt.AnySet() { 118 | ctx = operations.WithSyncLogger(ctx, loggerOpt) 119 | } 120 | 121 | if srcFileName == "" { 122 | return sync.CopyDir(ctx, fdst, fsrc, createEmptySrcDirs) 123 | } 124 | return operations.CopyFile(ctx, fdst, fsrc, srcFileName, srcFileName) 125 | }) 126 | }, 127 | } 128 | -------------------------------------------------------------------------------- /bin/resource_windows.go: -------------------------------------------------------------------------------- 1 | // Utility program to generate Gclone-specific Windows resource system object 2 | // file (.syso), that can be picked up by a following go build for embedding 3 | // version information and icon resources into a rclone binary. 4 | // 5 | // Run it with "go generate", or "go run" to be able to customize with 6 | // command-line flags. Note that this program is intended to be run directly 7 | // from its original location in the source tree: Default paths are absolute 8 | // within the current source tree, which is convenient because it makes it 9 | // oblivious to the working directory, and it gives identical result whether 10 | // run by "go generate" or "go run", but it will not make sense if this 11 | // program's source is moved out from the source tree. 12 | // 13 | // Can be used for rclone.exe (default), and other binaries such as 14 | // librclone.dll (must be specified with flag -binary). 15 | // 16 | 17 | //go:generate go run resource_windows.go 18 | //go:build tools 19 | 20 | package main 21 | 22 | import ( 23 | "flag" 24 | "fmt" 25 | "log" 26 | "path" 27 | "runtime" 28 | "strings" 29 | 30 | "github.com/coreos/go-semver/semver" 31 | "github.com/josephspurrier/goversioninfo" 32 | "github.com/rclone/rclone/fs" 33 | ) 34 | 35 | func main() { 36 | // Get path of directory containing the current source file to use for absolute path references within the code tree (as described above) 37 | projectDir := "" 38 | _, sourceFile, _, ok := runtime.Caller(0) 39 | if ok { 40 | projectDir = path.Dir(path.Dir(sourceFile)) // Root of the current project working directory 41 | } 42 | 43 | // Define flags 44 | binary := flag.String("binary", "gclone.exe", `The name of the binary to generate resource for, e.g. "gclone.exe" or "libgclone.dll"`) 45 | arch := flag.String("arch", runtime.GOARCH, `Architecture of resource file, or the target GOARCH, "386", "amd64", "arm", or "arm64"`) 46 | version := flag.String("version", fs.Version, "Version number or tag name") 47 | icon := flag.String("icon", path.Join(projectDir, "graphics/logo/ico/logo_symbol_color.ico"), "Path to icon file to embed in an .exe binary") 48 | dir := flag.String("dir", projectDir, "Path to output directory where to write the resulting system object file (.syso), with a default name according to -arch (resource_windows_.syso), only considered if not -syso is specified") 49 | syso := flag.String("syso", "", "Path to output resource system object file (.syso) to be created/overwritten, ignores -dir") 50 | 51 | // Parse command-line flags 52 | flag.Parse() 53 | 54 | // Handle default value for -file which depends on optional -dir and -arch 55 | if *syso == "" { 56 | // Use default filename, which includes target GOOS (hardcoded "windows") 57 | // and GOARCH (from argument -arch) as suffix, to avoid any race conditions, 58 | // and also this will be recognized by go build when it is consuming the 59 | // .syso file and will only be used for builds with matching os/arch. 60 | *syso = path.Join(*dir, fmt.Sprintf("resource_windows_%s.syso", *arch)) 61 | } 62 | 63 | // Parse version/tag string argument as a SemVer 64 | stringVersion := strings.TrimPrefix(*version, "v") 65 | semanticVersion, err := semver.NewVersion(stringVersion) 66 | if err != nil { 67 | log.Fatalf("Invalid version number: %v", err) 68 | } 69 | 70 | // Extract binary extension 71 | binaryExt := path.Ext(*binary) 72 | 73 | // Create the version info configuration container 74 | vi := &goversioninfo.VersionInfo{} 75 | 76 | // FixedFileInfo 77 | vi.FixedFileInfo.FileOS = "040004" // VOS_NT_WINDOWS32 78 | if strings.EqualFold(binaryExt, ".exe") { 79 | vi.FixedFileInfo.FileType = "01" // VFT_APP 80 | } else if strings.EqualFold(binaryExt, ".dll") { 81 | vi.FixedFileInfo.FileType = "02" // VFT_DLL 82 | } else { 83 | log.Fatalf("Specified binary must have extension .exe or .dll") 84 | } 85 | // FixedFileInfo.FileVersion 86 | vi.FixedFileInfo.FileVersion.Major = int(semanticVersion.Major) 87 | vi.FixedFileInfo.FileVersion.Minor = int(semanticVersion.Minor) 88 | vi.FixedFileInfo.FileVersion.Patch = int(semanticVersion.Patch) 89 | vi.FixedFileInfo.FileVersion.Build = 0 90 | // FixedFileInfo.ProductVersion 91 | vi.FixedFileInfo.ProductVersion.Major = int(semanticVersion.Major) 92 | vi.FixedFileInfo.ProductVersion.Minor = int(semanticVersion.Minor) 93 | vi.FixedFileInfo.ProductVersion.Patch = int(semanticVersion.Patch) 94 | vi.FixedFileInfo.ProductVersion.Build = 0 95 | 96 | // StringFileInfo 97 | vi.StringFileInfo.CompanyName = "https://rclone.org" 98 | vi.StringFileInfo.ProductName = "Gclone" 99 | vi.StringFileInfo.FileDescription = "Gclone" 100 | vi.StringFileInfo.InternalName = (*binary)[:len(*binary)-len(binaryExt)] 101 | vi.StringFileInfo.OriginalFilename = *binary 102 | vi.StringFileInfo.LegalCopyright = "The Gclone Authors" 103 | vi.StringFileInfo.FileVersion = stringVersion 104 | vi.StringFileInfo.ProductVersion = stringVersion 105 | 106 | // Icon (only relevant for .exe, not .dll) 107 | if *icon != "" && strings.EqualFold(binaryExt, ".exe") { 108 | vi.IconPath = *icon 109 | } 110 | 111 | // Build native structures from the configuration data 112 | vi.Build() 113 | 114 | // Write the native structures as binary data to a buffer 115 | vi.Walk() 116 | 117 | // Write the binary data buffer to file 118 | if err := vi.WriteSyso(*syso, *arch); err != nil { 119 | log.Fatalf(`Failed to generate Windows %s resource system object file for %v with path "%v": %v`, *arch, *binary, *syso, err) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /backend/drive/saInfo_test.go: -------------------------------------------------------------------------------- 1 | package drive 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestUpdate(t *testing.T) { 11 | a := &SaInfo{} 12 | b := []string{"a", "b", "c", "d"} 13 | 14 | a.updateSas(b, "a") 15 | assert.Equal(t, 0, a.activeIdx) 16 | a.updateSas(b, "d") 17 | assert.Equal(t, 3, a.activeIdx) 18 | a.updateSas(b, "e") 19 | assert.Equal(t, 4, a.activeIdx) 20 | } 21 | 22 | func TestActive(t *testing.T) { 23 | a := &SaInfo{} 24 | b := []string{"a", "b", "c", "d"} 25 | a.updateSas(b, "a") 26 | 27 | a.activeSa("c") 28 | assert.Equal(t, 2, a.activeIdx) 29 | a.activeSa("f") 30 | assert.Equal(t, 2, a.activeIdx) 31 | } 32 | 33 | func TestStale(t *testing.T) { 34 | a := &SaInfo{} 35 | b := []string{"a", "b", "c", "d"} 36 | a.updateSas(b, "a") 37 | 38 | err, newOne := a.staleSa("") 39 | assert.Equal(t, false, err) 40 | fmt.Println(newOne) 41 | assert.NotEqual(t, "a", newOne) 42 | assert.Equal(t, 3, len(a.saPool)) 43 | assert.Equal(t, 4, len(a.sas)) 44 | 45 | a.activeSa(newOne) 46 | assert.NotEqual(t, 0, a.activeIdx) 47 | 48 | err, newOne = a.staleSa("") 49 | fmt.Println(err) 50 | assert.Equal(t, false, err) 51 | assert.Equal(t, 2, len(a.saPool)) 52 | a.activeSa(newOne) 53 | fmt.Println(a.activeIdx) 54 | } 55 | 56 | func TestStaleEnd(t *testing.T) { 57 | a := &SaInfo{} 58 | b := []string{"a", "b"} 59 | a.updateSas(b, "a") 60 | 61 | err, newOne := a.staleSa("") 62 | assert.Equal(t, false, err) 63 | assert.NotEqual(t, "a", newOne) 64 | assert.Equal(t, 1, len(a.saPool)) 65 | assert.Equal(t, true, a.sas[0].isStale) 66 | a.activeSa(newOne) 67 | 68 | err, newOne = a.staleSa("") 69 | assert.Equal(t, true, err) 70 | assert.Equal(t, "", newOne) 71 | } 72 | 73 | func TestRollingDirect(t *testing.T) { 74 | a := &SaInfo{} 75 | b := []string{"a", "b", "c"} 76 | a.updateSas(b, "a") 77 | 78 | nextSa := a.rollup() 79 | assert.NotEqual(t, "a", nextSa) 80 | assert.Equal(t, "b", nextSa) 81 | a.activeSa(nextSa) 82 | assert.NotEqual(t, 0, a.activeIdx) 83 | assert.NotEqual(t, true, a.sas[0].isStale) 84 | assert.Equal(t, 1, a.activeIdx) 85 | 86 | nextSa = a.rollup() 87 | assert.NotEqual(t, "b", nextSa) 88 | assert.Equal(t, "c", nextSa) 89 | a.activeSa(nextSa) 90 | assert.NotEqual(t, 1, a.activeIdx) 91 | assert.NotEqual(t, true, a.sas[1].isStale) 92 | assert.Equal(t, 2, a.activeIdx) 93 | 94 | nextSa = a.rollup() 95 | assert.NotEqual(t, "c", nextSa) 96 | assert.Equal(t, "a", nextSa) 97 | a.activeSa(nextSa) 98 | assert.NotEqual(t, 2, a.activeIdx) 99 | assert.NotEqual(t, true, a.sas[2].isStale) 100 | assert.Equal(t, 0, a.activeIdx) 101 | } 102 | 103 | func TestRollingWithStale(t *testing.T) { 104 | a := &SaInfo{} 105 | b := []string{"a", "b", "c", "d"} 106 | a.updateSas(b, "a") 107 | 108 | err, newOne := a.staleSa("") 109 | assert.Equal(t, false, err) 110 | fmt.Println(newOne) 111 | a.activeSa(newOne) 112 | assert.NotEqual(t, "a", newOne) 113 | 114 | nextSa := a.rollup() 115 | fmt.Println("nextSa: ", nextSa) 116 | a.activeSa(nextSa) 117 | assert.NotEqual(t, 0, a.activeIdx) 118 | 119 | nextSa = a.rollup() 120 | fmt.Println("nextSa: ", nextSa) 121 | idx := a.saPool[nextSa] 122 | a.activeSa(nextSa) 123 | assert.NotEqual(t, 0, a.activeIdx) 124 | 125 | err, newOne = a.staleSa("") 126 | assert.Equal(t, false, err) 127 | fmt.Println(newOne) 128 | a.activeSa(newOne) 129 | assert.NotEqual(t, "a", newOne) 130 | 131 | nextSa = a.rollup() 132 | fmt.Println("nextSa: ", nextSa) 133 | assert.NotEqual(t, 0, a.activeIdx) 134 | assert.NotEqual(t, idx, a.activeIdx) 135 | a.activeSa(nextSa) 136 | 137 | nextSa = a.rollup() 138 | fmt.Println("nextSa: ", nextSa) 139 | a.activeSa(nextSa) 140 | assert.NotEqual(t, 0, a.activeIdx) 141 | assert.NotEqual(t, idx, a.activeIdx) 142 | idx = a.saPool[nextSa] 143 | 144 | err, newOne = a.staleSa("") 145 | assert.Equal(t, false, err) 146 | fmt.Println(newOne) 147 | a.activeSa(newOne) 148 | assert.NotEqual(t, "a", newOne) 149 | 150 | nextSa = a.rollup() 151 | fmt.Println("nextSa: ", nextSa) 152 | a.activeSa(nextSa) 153 | assert.NotEqual(t, 0, a.activeIdx) 154 | assert.NotEqual(t, idx, a.activeIdx) 155 | } 156 | 157 | func TestEmptyInit(t *testing.T) { 158 | a := &SaInfo{} 159 | b := []string{} 160 | a.updateSas(b, "") 161 | 162 | assert.Equal(t, true, a.isPoolEmpty()) 163 | } 164 | 165 | func TestRevertStaleSa(t *testing.T) { 166 | a := &SaInfo{} 167 | b := []string{"a", "b", "c", "d"} 168 | a.updateSas(b, "a") 169 | 170 | _, step2Sa := a.staleSa("") 171 | a.activeSa(step2Sa) 172 | step2Idx := a.activeIdx 173 | 174 | assert.NotEqual(t, 0, a.activeIdx) 175 | assert.Equal(t, step2Sa, a.sas[a.activeIdx].saPath) 176 | 177 | _, step3Sa := a.staleSa("") 178 | a.activeSa(step3Sa) 179 | assert.NotEqual(t, step2Idx, a.activeIdx) 180 | assert.Equal(t, step3Sa, a.sas[a.activeIdx].saPath) 181 | assert.Equal(t, true, a.sas[0].isStale) 182 | assert.Equal(t, true, a.sas[step2Idx].isStale) 183 | 184 | a.revertStaleSa("a") 185 | assert.Equal(t, false, a.sas[0].isStale) 186 | assert.Equal(t, true, a.sas[step2Idx].isStale) 187 | 188 | a.revertStaleSa("f") 189 | assert.Equal(t, false, a.sas[0].isStale) 190 | assert.Equal(t, true, a.sas[step2Idx].isStale) 191 | 192 | } 193 | 194 | func TestRandomPick(t *testing.T) { 195 | a := &SaInfo{} 196 | b := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"} 197 | a.updateSas(b, "b") 198 | 199 | fmt.Println(a.randomPick()) 200 | fmt.Println(a.randomPick()) 201 | fmt.Println(a.randomPick()) 202 | fmt.Println(a.randomPick()) 203 | fmt.Println(a.randomPick()) 204 | fmt.Println(a.randomPick()) 205 | } 206 | -------------------------------------------------------------------------------- /cmd/version/version.go: -------------------------------------------------------------------------------- 1 | // Package version provides the version command. 2 | package version 3 | 4 | import ( 5 | "context" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "runtime" 11 | "strings" 12 | "time" 13 | 14 | "github.com/coreos/go-semver/semver" 15 | "github.com/rclone/rclone/cmd" 16 | "github.com/rclone/rclone/fs" 17 | "github.com/rclone/rclone/fs/config/flags" 18 | "github.com/rclone/rclone/fs/fshttp" 19 | "github.com/rclone/rclone/lib/buildinfo" 20 | "github.com/spf13/cobra" 21 | ) 22 | 23 | var ( 24 | check = false 25 | ) 26 | 27 | func init() { 28 | cmd.Root.AddCommand(commandDefinition) 29 | cmdFlags := commandDefinition.Flags() 30 | flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version", "") 31 | } 32 | 33 | var commandDefinition = &cobra.Command{ 34 | Use: "gversion", 35 | Short: `Show the version number.`, 36 | Long: ` 37 | Show the gclone version number, the go version, the build target 38 | OS and architecture, the runtime OS and kernel version and bitness, 39 | build tags and the type of executable (static or dynamic). 40 | 41 | For example: 42 | 43 | $ gclone gversion 44 | gclone v1.64.0-mod1.6.0 45 | - os/version: ubuntu 18.04 (64 bit) 46 | - os/kernel: 4.15.0-136-generic (x86_64) 47 | - os/type: linux 48 | - os/arch: amd64 49 | - go/version: go1.16 50 | - go/linking: static 51 | - go/tags: none 52 | 53 | Note: before gclone version 1.64 the os/type and os/arch lines were merged, 54 | and the "go/version" line was tagged as "go version". 55 | 56 | If you supply the --check flag, then it will do an online check to 57 | compare your version with the latest release and the latest beta. 58 | 59 | $ gclone gversion --check 60 | yours: v1.64.0-mod1.6.0 61 | latest: v1.67.0-mod1.6.2 (released 2024-07-25) 62 | 63 | Or 64 | 65 | $ gclone gversion --check 66 | yours: v1.64.0-mod1.6.0 67 | latest: v1.67.0-mod1.6.2 (released 2024-07-25) 68 | upgrade: https://github.com/dogbutcat/gclone/releases/latest 69 | 70 | `, 71 | Annotations: map[string]string{ 72 | "versionIntroduced": "v1.64", 73 | }, 74 | Run: func(command *cobra.Command, args []string) { 75 | ctx := context.Background() 76 | cmd.CheckArgs(0, 0, command, args) 77 | if check { 78 | CheckVersion(ctx) 79 | } else { 80 | ShowVersion() 81 | } 82 | }, 83 | } 84 | 85 | // copy code from `cmd.ShowVersion` for customization 86 | func ShowVersion() { 87 | osVersion, osKernel := buildinfo.GetOSVersion() 88 | if osVersion == "" { 89 | osVersion = "unknown" 90 | } 91 | if osKernel == "" { 92 | osKernel = "unknown" 93 | } 94 | 95 | linking, tagString := buildinfo.GetLinkingAndTags() 96 | 97 | arch := buildinfo.GetArch() 98 | 99 | fmt.Printf("gclone %s\n", fs.Version) 100 | fmt.Printf("- os/version: %s\n", osVersion) 101 | fmt.Printf("- os/kernel: %s\n", osKernel) 102 | fmt.Printf("- os/type: %s\n", runtime.GOOS) 103 | fmt.Printf("- os/arch: %s\n", arch) 104 | fmt.Printf("- go/version: %s\n", runtime.Version()) 105 | fmt.Printf("- go/linking: %s\n", linking) 106 | fmt.Printf("- go/tags: %s\n", tagString) 107 | } 108 | 109 | // strip a leading v off the string 110 | func stripV(s string) (string, string) { 111 | if len(s) > 0 && s[0] == 'v' { 112 | delimeter := "-" 113 | if strings.Contains(s, "-mod") { 114 | delimeter = "-mod" 115 | } 116 | p := strings.Split(s[1:], delimeter) 117 | if p[1] == "DEV" { 118 | p[1] = "1.0.0" 119 | } 120 | return p[0], p[1] 121 | } 122 | return s, "1.0.0" 123 | } 124 | 125 | // GetVersion gets the version available for download 126 | func GetVersion(ctx context.Context, url string) (v *semver.Version, vs string, date time.Time, err error) { 127 | resp, err := fshttp.NewClient(ctx).Get(url) 128 | if err != nil { 129 | return v, vs, date, err 130 | } 131 | defer fs.CheckClose(resp.Body, &err) 132 | if resp.StatusCode != http.StatusOK { 133 | return v, vs, date, errors.New(resp.Status) 134 | } 135 | bodyBytes, err := io.ReadAll(resp.Body) 136 | if err != nil { 137 | return v, vs, date, err 138 | } 139 | vs = strings.TrimSpace(string(bodyBytes)) 140 | vs = strings.TrimPrefix(vs, "gclone ") 141 | vs = strings.TrimRight(vs, "β") 142 | date, err = http.ParseTime(resp.Header.Get("Last-Modified")) 143 | if err != nil { 144 | return v, vs, date, err 145 | } 146 | // v, err = semver.NewVersion(stripV(vs)) 147 | return v, vs, date, err 148 | } 149 | 150 | // CheckVersion checks the installed version against available downloads 151 | func CheckVersion(ctx context.Context) { 152 | // fs.Version = "v1.62.1-mod1.5.2" // TODO: remember to takedown 153 | v, m := stripV(fs.Version) 154 | vCurrent, err := semver.NewVersion(v) 155 | vModCurrent, errMod := semver.NewVersion(m) 156 | // vCurrent, err := semver.NewVersion(stripV(fs.Version)) 157 | if err != nil { 158 | fs.Errorf(nil, "Failed to parse version: %v", err) 159 | } 160 | if errMod != nil { 161 | fs.Errorf(nil, "Failed to parse mod version: %v", errMod) 162 | } 163 | const timeFormat = "2006-01-02" 164 | 165 | printVersion := func(what, url string) { 166 | _, vs, t, err := GetVersion(ctx, url+"/download/version.txt") 167 | vNew, vMod := stripV(vs) 168 | v, _ := semver.NewVersion(vNew) 169 | m, _ := semver.NewVersion(vMod) 170 | if err != nil { 171 | fs.Errorf(nil, "Failed to get gclone %s version: %v", what, err) 172 | return 173 | } 174 | fmt.Printf("%-8s%-40v %20s\n", 175 | what+":", 176 | vs, 177 | "(released "+t.Format(timeFormat)+")", 178 | ) 179 | if v.Compare(*vCurrent) > 0 { 180 | fmt.Printf(" upgrade: %s\n", url) 181 | return 182 | } 183 | if v.Compare(*vCurrent) == 0 && m.Compare(*vModCurrent) > 0 { 184 | fmt.Printf(" upgrade: %s\n", url) 185 | return 186 | } 187 | } 188 | fmt.Printf("yours: %-13s\n", fs.Version) 189 | printVersion( 190 | "latest", 191 | "https://github.com/dogbutcat/gclone/releases/latest", 192 | ) 193 | // printVersion( 194 | // "beta", 195 | // "https://beta.rclone.org/", 196 | // ) 197 | if strings.HasSuffix(fs.Version, "-DEV") { 198 | fmt.Println("Your version is compiled from git so comparisons may be wrong.") 199 | } 200 | } 201 | 202 | func ConvertV(s string) (v *semver.Version, m *semver.Version) { 203 | v1, v2 := stripV(s) 204 | v, _ = semver.NewVersion(v1) 205 | m, _ = semver.NewVersion(v2) 206 | return v, m 207 | } 208 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL = bash 2 | # Branch we are working on 3 | BRANCH := $(or $(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD)) 4 | # Tag of the current commit, if any. If this is not "" then we are building a release 5 | RELEASE_TAG := $(shell git tag -l --points-at HEAD) 6 | # Version of last release (may not be on this branch) 7 | VERSION := $(shell cat VERSION) 8 | # Last tag on this branch 9 | LAST_TAG := $(shell git describe --tags --abbrev=0) 10 | # Next version 11 | NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}') 12 | NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}') 13 | # If we are working on a release, override branch to master 14 | ifdef RELEASE_TAG 15 | BRANCH := master 16 | LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^) 17 | endif 18 | TAG_BRANCH := .$(BRANCH) 19 | BRANCH_PATH := branch/$(BRANCH)/ 20 | # If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH 21 | ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),) 22 | TAG_BRANCH := 23 | BRANCH_PATH := 24 | endif 25 | # Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit) 26 | VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD) 27 | # TAG is current version + commit number + commit + branch 28 | TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH) 29 | ifdef RELEASE_TAG 30 | TAG := $(RELEASE_TAG) 31 | endif 32 | GO_VERSION := $(shell go version) 33 | GO_OS := $(shell go env GOOS) 34 | ifdef BETA_SUBDIR 35 | BETA_SUBDIR := /$(BETA_SUBDIR) 36 | endif 37 | BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR) 38 | BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ 39 | BETA_UPLOAD_ROOT := memstore:beta-rclone-org 40 | BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH) 41 | # Pass in GOTAGS=xyz on the make command line to set build tags 42 | ifdef GOTAGS 43 | BUILDTAGS=-tags "$(GOTAGS)" 44 | LINTTAGS=--build-tags "$(GOTAGS)" 45 | endif 46 | 47 | .PHONY: gclone test_all vars version 48 | 49 | gclone: 50 | ifeq ($(GO_OS),windows) 51 | go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso 52 | endif 53 | go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) 54 | ifeq ($(GO_OS),windows) 55 | rm resource_windows_`go env GOARCH`.syso 56 | endif 57 | mkdir -p `go env GOPATH`/bin/ 58 | cp -av gclone`go env GOEXE` `go env GOPATH`/bin/gclone`go env GOEXE`.new 59 | mv -v `go env GOPATH`/bin/gclone`go env GOEXE`.new `go env GOPATH`/bin/gclone`go env GOEXE` 60 | 61 | test_all: 62 | go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all 63 | 64 | vars: 65 | @echo SHELL="'$(SHELL)'" 66 | @echo BRANCH="'$(BRANCH)'" 67 | @echo TAG="'$(TAG)'" 68 | @echo VERSION="'$(VERSION)'" 69 | @echo GO_VERSION="'$(GO_VERSION)'" 70 | @echo BETA_URL="'$(BETA_URL)'" 71 | 72 | btest: 73 | @echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip 74 | @echo "Copied markdown of beta release to clip board" 75 | 76 | version: 77 | @echo '$(TAG)' 78 | 79 | # Full suite of integration tests 80 | test: rclone test_all 81 | -test_all 2>&1 | tee test_all.log 82 | @echo "Written logs in test_all.log" 83 | 84 | # Quick test 85 | quicktest: 86 | RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./... 87 | 88 | racequicktest: 89 | RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./... 90 | 91 | compiletest: 92 | RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./... 93 | 94 | # Do source code quality checks 95 | check: rclone 96 | @echo "-- START CODE QUALITY REPORT -------------------------------" 97 | @golangci-lint run $(LINTTAGS) ./... 98 | @echo "-- END CODE QUALITY REPORT ---------------------------------" 99 | 100 | # Get the build dependencies 101 | build_dep: 102 | go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz' 103 | 104 | # Get the release dependencies we only install on linux 105 | release_dep_linux: 106 | go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest 107 | 108 | # Update dependencies 109 | showupdates: 110 | @echo "*** Direct dependencies that could be updated ***" 111 | @GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null 112 | 113 | # Update direct dependencies only 114 | updatedirect: 115 | GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all) 116 | GO111MODULE=on go mod tidy 117 | 118 | # Update direct and indirect dependencies and test dependencies 119 | update: 120 | GO111MODULE=on go get -d -u -t ./... 121 | GO111MODULE=on go mod tidy 122 | 123 | # Tidy the module dependencies 124 | tidy: 125 | GO111MODULE=on go mod tidy 126 | 127 | doc: 128 | @echo "doc part" 129 | 130 | install: gclone 131 | install -d ${DESTDIR}/usr/bin 132 | install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/gclone 133 | 134 | clean: 135 | go clean ./... 136 | find . -name \*~ | xargs -r rm -f 137 | rm -rf build docs/public 138 | rm -f gclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log 139 | 140 | website: 141 | 142 | 143 | tarball: 144 | git archive -9 --format=tar.gz --prefix=gclone-$(TAG)/ -o build/gclone-$(TAG).tar.gz $(TAG) 145 | 146 | vendorball: 147 | go mod vendor 148 | tar -zcf build/gclone-$(TAG)-vendor.tar.gz vendor 149 | rm -rf vendor 150 | 151 | sign_upload: 152 | cd build && md5sum gclone-v* | gpg --clearsign > MD5SUMS 153 | cd build && sha1sum gclone-v* | gpg --clearsign > SHA1SUMS 154 | cd build && sha256sum gclone-v* | gpg --clearsign > SHA256SUMS 155 | 156 | check_sign: 157 | cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c 158 | cd build && gpg --verify SHA1SUMS && gpg --decrypt SHA1SUMS | sha1sum -c 159 | cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c 160 | 161 | upload_github: cross 162 | ./bin/upload-github $(TAG) 163 | 164 | cross: 165 | go run bin/cross-compile.go -release "" $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) 166 | 167 | beta: 168 | 169 | log_since_last_release: 170 | git log $(LAST_TAG).. 171 | 172 | compile_all: 173 | go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) 174 | 175 | ci_upload: 176 | sudo chown -R $$USER build 177 | find build -type l -delete 178 | gzip -r9v -S .$(TAG).gz build 179 | ./bin/upload-github $(TAG) 180 | 181 | ci_beta: 182 | 183 | # Fetch the binary builds from GitHub actions 184 | fetch_binaries: 185 | 186 | serve: 187 | 188 | tag: retag doc 189 | 190 | retag: 191 | @echo "Version is $(VERSION)" 192 | git tag -f -s -m "Version $(VERSION)" $(VERSION) 193 | 194 | startdev: 195 | 196 | startstable: 197 | 198 | winzip: 199 | zip -9 gclone-$(TAG).zip gclone.exe 200 | 201 | fixgclonedepmissing: 202 | go mod download golang.org/x/mobile 203 | go mod download github.com/dop251/scsu -------------------------------------------------------------------------------- /backend/drive/test/about.json: -------------------------------------------------------------------------------- 1 | { 2 | "importFormats": { 3 | "text/tab-separated-values": [ 4 | "application/vnd.google-apps.spreadsheet" 5 | ], 6 | "application/x-vnd.oasis.opendocument.presentation": [ 7 | "application/vnd.google-apps.presentation" 8 | ], 9 | "image/jpeg": [ 10 | "application/vnd.google-apps.document" 11 | ], 12 | "image/bmp": [ 13 | "application/vnd.google-apps.document" 14 | ], 15 | "image/gif": [ 16 | "application/vnd.google-apps.document" 17 | ], 18 | "application/vnd.ms-excel.sheet.macroenabled.12": [ 19 | "application/vnd.google-apps.spreadsheet" 20 | ], 21 | "application/vnd.openxmlformats-officedocument.wordprocessingml.template": [ 22 | "application/vnd.google-apps.document" 23 | ], 24 | "application/vnd.ms-powerpoint.presentation.macroenabled.12": [ 25 | "application/vnd.google-apps.presentation" 26 | ], 27 | "application/vnd.ms-word.template.macroenabled.12": [ 28 | "application/vnd.google-apps.document" 29 | ], 30 | "application/vnd.openxmlformats-officedocument.wordprocessingml.document": [ 31 | "application/vnd.google-apps.document" 32 | ], 33 | "image/pjpeg": [ 34 | "application/vnd.google-apps.document" 35 | ], 36 | "application/vnd.google-apps.script+text/plain": [ 37 | "application/vnd.google-apps.script" 38 | ], 39 | "application/vnd.ms-excel": [ 40 | "application/vnd.google-apps.spreadsheet" 41 | ], 42 | "application/vnd.sun.xml.writer": [ 43 | "application/vnd.google-apps.document" 44 | ], 45 | "application/vnd.ms-word.document.macroenabled.12": [ 46 | "application/vnd.google-apps.document" 47 | ], 48 | "application/vnd.ms-powerpoint.slideshow.macroenabled.12": [ 49 | "application/vnd.google-apps.presentation" 50 | ], 51 | "text/rtf": [ 52 | "application/vnd.google-apps.document" 53 | ], 54 | "text/plain": [ 55 | "application/vnd.google-apps.document" 56 | ], 57 | "application/vnd.oasis.opendocument.spreadsheet": [ 58 | "application/vnd.google-apps.spreadsheet" 59 | ], 60 | "application/x-vnd.oasis.opendocument.spreadsheet": [ 61 | "application/vnd.google-apps.spreadsheet" 62 | ], 63 | "image/png": [ 64 | "application/vnd.google-apps.document" 65 | ], 66 | "application/x-vnd.oasis.opendocument.text": [ 67 | "application/vnd.google-apps.document" 68 | ], 69 | "application/msword": [ 70 | "application/vnd.google-apps.document" 71 | ], 72 | "application/pdf": [ 73 | "application/vnd.google-apps.document" 74 | ], 75 | "application/json": [ 76 | "application/vnd.google-apps.script" 77 | ], 78 | "application/x-msmetafile": [ 79 | "application/vnd.google-apps.drawing" 80 | ], 81 | "application/vnd.openxmlformats-officedocument.spreadsheetml.template": [ 82 | "application/vnd.google-apps.spreadsheet" 83 | ], 84 | "application/vnd.ms-powerpoint": [ 85 | "application/vnd.google-apps.presentation" 86 | ], 87 | "application/vnd.ms-excel.template.macroenabled.12": [ 88 | "application/vnd.google-apps.spreadsheet" 89 | ], 90 | "image/x-bmp": [ 91 | "application/vnd.google-apps.document" 92 | ], 93 | "application/rtf": [ 94 | "application/vnd.google-apps.document" 95 | ], 96 | "application/vnd.openxmlformats-officedocument.presentationml.template": [ 97 | "application/vnd.google-apps.presentation" 98 | ], 99 | "image/x-png": [ 100 | "application/vnd.google-apps.document" 101 | ], 102 | "text/html": [ 103 | "application/vnd.google-apps.document" 104 | ], 105 | "application/vnd.oasis.opendocument.text": [ 106 | "application/vnd.google-apps.document" 107 | ], 108 | "application/vnd.openxmlformats-officedocument.presentationml.presentation": [ 109 | "application/vnd.google-apps.presentation" 110 | ], 111 | "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [ 112 | "application/vnd.google-apps.spreadsheet" 113 | ], 114 | "application/vnd.google-apps.script+json": [ 115 | "application/vnd.google-apps.script" 116 | ], 117 | "application/vnd.openxmlformats-officedocument.presentationml.slideshow": [ 118 | "application/vnd.google-apps.presentation" 119 | ], 120 | "application/vnd.ms-powerpoint.template.macroenabled.12": [ 121 | "application/vnd.google-apps.presentation" 122 | ], 123 | "text/csv": [ 124 | "application/vnd.google-apps.spreadsheet" 125 | ], 126 | "application/vnd.oasis.opendocument.presentation": [ 127 | "application/vnd.google-apps.presentation" 128 | ], 129 | "image/jpg": [ 130 | "application/vnd.google-apps.document" 131 | ], 132 | "text/richtext": [ 133 | "application/vnd.google-apps.document" 134 | ] 135 | }, 136 | "exportFormats": { 137 | "application/vnd.google-apps.document": [ 138 | "application/rtf", 139 | "application/vnd.oasis.opendocument.text", 140 | "text/html", 141 | "application/pdf", 142 | "application/epub+zip", 143 | "application/zip", 144 | "application/vnd.openxmlformats-officedocument.wordprocessingml.document", 145 | "text/plain" 146 | ], 147 | "application/vnd.google-apps.spreadsheet": [ 148 | "application/x-vnd.oasis.opendocument.spreadsheet", 149 | "text/tab-separated-values", 150 | "application/pdf", 151 | "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", 152 | "text/csv", 153 | "application/zip", 154 | "application/vnd.oasis.opendocument.spreadsheet" 155 | ], 156 | "application/vnd.google-apps.jam": [ 157 | "application/pdf" 158 | ], 159 | "application/vnd.google-apps.script": [ 160 | "application/vnd.google-apps.script+json" 161 | ], 162 | "application/vnd.google-apps.presentation": [ 163 | "application/vnd.oasis.opendocument.presentation", 164 | "application/pdf", 165 | "application/vnd.openxmlformats-officedocument.presentationml.presentation", 166 | "text/plain" 167 | ], 168 | "application/vnd.google-apps.form": [ 169 | "application/zip" 170 | ], 171 | "application/vnd.google-apps.drawing": [ 172 | "image/svg+xml", 173 | "image/png", 174 | "application/pdf", 175 | "image/jpeg" 176 | ] 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /cmd/selfupdate/selfupdate_test.go: -------------------------------------------------------------------------------- 1 | //go:build !noselfupdate 2 | // +build !noselfupdate 3 | 4 | package selfupdate 5 | 6 | import ( 7 | "context" 8 | "os" 9 | "os/exec" 10 | "path/filepath" 11 | "regexp" 12 | "runtime" 13 | "testing" 14 | "time" 15 | 16 | "github.com/rclone/rclone/fs" 17 | "github.com/rclone/rclone/fstest/testy" 18 | "github.com/stretchr/testify/assert" 19 | ) 20 | 21 | func TestGetVersion(t *testing.T) { 22 | testy.SkipUnreliable(t) 23 | 24 | ctx := context.Background() 25 | 26 | // a beta version can only have "v" prepended 27 | resultVer, _, err := GetVersion(ctx, true, "1.2.3.4") 28 | assert.NoError(t, err) 29 | assert.Equal(t, "v1.2.3.4", resultVer) 30 | 31 | // but a stable version syntax should be checked 32 | _, _, err = GetVersion(ctx, false, "1") 33 | assert.Error(t, err) 34 | _, _, err = GetVersion(ctx, false, "1.") 35 | assert.Error(t, err) 36 | _, _, err = GetVersion(ctx, false, "1.2.") 37 | assert.Error(t, err) 38 | _, _, err = GetVersion(ctx, false, "1.2.3.4") 39 | assert.Error(t, err) 40 | 41 | // incomplete stable version should have micro release added 42 | resultVer, _, err = GetVersion(ctx, false, "1.52") 43 | assert.NoError(t, err) 44 | assert.Equal(t, "v1.52.3", resultVer) 45 | } 46 | 47 | func TestInstallOnLinux(t *testing.T) { 48 | testy.SkipUnreliable(t) 49 | if runtime.GOOS != "linux" { 50 | t.Skip("this is a Linux only test") 51 | } 52 | 53 | // Prepare for test 54 | ctx := context.Background() 55 | testDir := t.TempDir() 56 | path := filepath.Join(testDir, "rclone") 57 | 58 | regexVer := regexp.MustCompile(`v[0-9]\S+`) 59 | 60 | betaVer, _, err := GetVersion(ctx, true, "") 61 | assert.NoError(t, err) 62 | 63 | // Must do nothing if version isn't changing 64 | assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path, Version: fs.Version})) 65 | 66 | // Must fail on non-writable file 67 | assert.NoError(t, os.WriteFile(path, []byte("test"), 0644)) 68 | assert.NoError(t, os.Chmod(path, 0000)) 69 | defer func() { 70 | _ = os.Chmod(path, 0644) 71 | }() 72 | err = (InstallUpdate(ctx, &Options{Beta: true, Output: path})) 73 | assert.Error(t, err) 74 | assert.Contains(t, err.Error(), "run self-update as root") 75 | 76 | // Must keep non-standard permissions 77 | assert.NoError(t, os.Chmod(path, 0644)) 78 | assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path})) 79 | 80 | info, err := os.Stat(path) 81 | assert.NoError(t, err) 82 | assert.Equal(t, os.FileMode(0644), info.Mode().Perm()) 83 | 84 | // Must remove temporary files 85 | files, err := os.ReadDir(testDir) 86 | assert.NoError(t, err) 87 | assert.Equal(t, 1, len(files)) 88 | 89 | // Must contain valid executable 90 | assert.NoError(t, os.Chmod(path, 0755)) 91 | cmd := exec.Command(path, "version") 92 | output, err := cmd.CombinedOutput() 93 | assert.NoError(t, err) 94 | assert.True(t, cmd.ProcessState.Success()) 95 | assert.Equal(t, betaVer, regexVer.FindString(string(output))) 96 | } 97 | 98 | func TestRenameOnWindows(t *testing.T) { 99 | testy.SkipUnreliable(t) 100 | if runtime.GOOS != "windows" { 101 | t.Skip("this is a Windows only test") 102 | } 103 | 104 | // Prepare for test 105 | ctx := context.Background() 106 | 107 | testDir := t.TempDir() 108 | 109 | path := filepath.Join(testDir, "rclone.exe") 110 | regexVer := regexp.MustCompile(`v[0-9]\S+`) 111 | 112 | stableVer, _, err := GetVersion(ctx, false, "") 113 | assert.NoError(t, err) 114 | 115 | betaVer, _, err := GetVersion(ctx, true, "") 116 | assert.NoError(t, err) 117 | 118 | // Must not create temporary files when target doesn't exist 119 | assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path})) 120 | 121 | files, err := os.ReadDir(testDir) 122 | assert.NoError(t, err) 123 | assert.Equal(t, 1, len(files)) 124 | 125 | // Must save running executable as the "old" file 126 | cmdWait := exec.Command(path, "config") 127 | stdinWait, err := cmdWait.StdinPipe() // Make it run waiting for input 128 | assert.NoError(t, err) 129 | assert.NoError(t, cmdWait.Start()) 130 | 131 | assert.NoError(t, InstallUpdate(ctx, &Options{Beta: false, Output: path})) 132 | files, err = os.ReadDir(testDir) 133 | assert.NoError(t, err) 134 | assert.Equal(t, 2, len(files)) 135 | 136 | pathOld := filepath.Join(testDir, "rclone.old.exe") 137 | _, err = os.Stat(pathOld) 138 | assert.NoError(t, err) 139 | 140 | cmd := exec.Command(path, "version") 141 | output, err := cmd.CombinedOutput() 142 | assert.NoError(t, err) 143 | assert.True(t, cmd.ProcessState.Success()) 144 | assert.Equal(t, stableVer, regexVer.FindString(string(output))) 145 | 146 | cmdOld := exec.Command(pathOld, "version") 147 | output, err = cmdOld.CombinedOutput() 148 | assert.NoError(t, err) 149 | assert.True(t, cmdOld.ProcessState.Success()) 150 | assert.Equal(t, betaVer, regexVer.FindString(string(output))) 151 | 152 | // Stop previous waiting executable, run new and saved executables 153 | _ = stdinWait.Close() 154 | _ = cmdWait.Wait() 155 | time.Sleep(100 * time.Millisecond) 156 | 157 | cmdWait = exec.Command(path, "config") 158 | stdinWait, err = cmdWait.StdinPipe() 159 | assert.NoError(t, err) 160 | assert.NoError(t, cmdWait.Start()) 161 | 162 | cmdWaitOld := exec.Command(pathOld, "config") 163 | stdinWaitOld, err := cmdWaitOld.StdinPipe() 164 | assert.NoError(t, err) 165 | assert.NoError(t, cmdWaitOld.Start()) 166 | 167 | // Updating when the "old" executable is running must produce a random "old" file 168 | assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path})) 169 | files, err = os.ReadDir(testDir) 170 | assert.NoError(t, err) 171 | assert.Equal(t, 3, len(files)) 172 | 173 | // Stop all waiting executables 174 | _ = stdinWait.Close() 175 | _ = cmdWait.Wait() 176 | _ = stdinWaitOld.Close() 177 | _ = cmdWaitOld.Wait() 178 | time.Sleep(100 * time.Millisecond) 179 | } 180 | 181 | func TestIsLatest(t *testing.T) { 182 | vNow := "v1.63.1-mod1.5.2" 183 | vNext := "v1.64.0-DEV" 184 | assert.False(t, isLatest(vNow, vNext)) 185 | vNow = "v1.64.0-DEV" 186 | vNext = "v1.63.1-mod1.5.2" 187 | assert.True(t, isLatest(vNow, vNext)) 188 | // local base over next base should true 189 | vNow = "v1.64.0-mod1.5.2" 190 | vNext = "v1.63.1-mod1.5.2" 191 | assert.True(t, isLatest(vNow, vNext)) 192 | // next base over local should false 193 | vNow = "v1.63.1-mod1.5.2" 194 | vNext = "v1.64.0-mod1.5.2" 195 | assert.False(t, isLatest(vNow, vNext)) 196 | // local eq next should true 197 | vNow = "v1.64.0-mod1.5.2" 198 | vNext = "v1.64.0-mod1.5.2" 199 | assert.True(t, isLatest(vNow, vNext)) 200 | // next mod over local should false 201 | vNow = "v1.63.1-mod1.5.2" 202 | vNext = "v1.63.1-mod1.5.3" 203 | assert.False(t, isLatest(vNow, vNext)) 204 | // local mod over next should true 205 | vNow = "v1.63.1-mod1.5.5" 206 | vNext = "v1.63.1-mod1.5.3" 207 | assert.True(t, isLatest(vNow, vNext)) 208 | // next base over local while local mod over next should false 209 | // logically not possible 210 | vNow = "v1.63.1-mod1.5.3" 211 | vNext = "v1.64.0-mod1.5.2" 212 | assert.False(t, isLatest(vNow, vNext)) 213 | // next base and mod over local should false 214 | vNow = "v1.63.1-mod1.5.2" 215 | vNext = "v1.64.1-mod1.5.3" 216 | assert.False(t, isLatest(vNow, vNext)) 217 | // next mod over local while base is eq should false 218 | vNow = "v1.63.1-mod1.5.2" 219 | vNext = "v1.63.1-mod1.5.3" 220 | assert.False(t, isLatest(vNow, vNext)) 221 | } 222 | -------------------------------------------------------------------------------- /backend/drive/upload.go: -------------------------------------------------------------------------------- 1 | // Upload for drive 2 | // 3 | // Docs 4 | // Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable 5 | // Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices 6 | // Files insert: https://developers.google.com/drive/v2/reference/files/insert 7 | // Files update: https://developers.google.com/drive/v2/reference/files/update 8 | // 9 | // This contains code adapted from google.golang.org/api (C) the GO AUTHORS 10 | 11 | package drive 12 | 13 | import ( 14 | "bytes" 15 | "context" 16 | "encoding/json" 17 | "fmt" 18 | "io" 19 | "net/http" 20 | "net/url" 21 | "strconv" 22 | 23 | "github.com/rclone/rclone/fs" 24 | "github.com/rclone/rclone/fs/fserrors" 25 | "github.com/rclone/rclone/lib/readers" 26 | "google.golang.org/api/drive/v3" 27 | "google.golang.org/api/googleapi" 28 | ) 29 | 30 | const ( 31 | // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. 32 | statusResumeIncomplete = 308 33 | ) 34 | 35 | // resumableUpload is used by the generated APIs to provide resumable uploads. 36 | // It is not used by developers directly. 37 | type resumableUpload struct { 38 | f *Fs 39 | remote string 40 | // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". 41 | URI string 42 | // Media is the object being uploaded. 43 | Media io.Reader 44 | // MediaType defines the media type, e.g. "image/jpeg". 45 | MediaType string 46 | // ContentLength is the full size of the object being uploaded. 47 | ContentLength int64 48 | // Return value 49 | ret *drive.File 50 | } 51 | 52 | // Upload the io.Reader in of size bytes with contentType and info 53 | func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) { 54 | params := url.Values{ 55 | "alt": {"json"}, 56 | "uploadType": {"resumable"}, 57 | "fields": {partialFields}, 58 | } 59 | params.Set("supportsAllDrives", "true") 60 | if f.opt.KeepRevisionForever { 61 | params.Set("keepRevisionForever", "true") 62 | } 63 | urls := "https://www.googleapis.com/upload/drive/v3/files" 64 | method := "POST" 65 | if fileID != "" { 66 | params.Set("setModifiedDate", "true") 67 | urls += "/{fileId}" 68 | method = "PATCH" 69 | } 70 | urls += "?" + params.Encode() 71 | var res *http.Response 72 | var err error 73 | err = f.pacer.Call(func() (bool, error) { 74 | var body io.Reader 75 | body, err = googleapi.WithoutDataWrapper.JSONReader(info) 76 | if err != nil { 77 | return false, err 78 | } 79 | var req *http.Request 80 | req, err = http.NewRequestWithContext(ctx, method, urls, body) 81 | if err != nil { 82 | return false, err 83 | } 84 | googleapi.Expand(req.URL, map[string]string{ 85 | "fileId": fileID, 86 | }) 87 | req.Header.Set("Content-Type", "application/json; charset=UTF-8") 88 | req.Header.Set("X-Upload-Content-Type", contentType) 89 | if size >= 0 { 90 | req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size)) 91 | } 92 | res, err = f.client.Do(req) 93 | if err == nil { 94 | defer googleapi.CloseBody(res) 95 | err = googleapi.CheckResponse(res) 96 | } 97 | return f.shouldRetry(ctx, err) 98 | }) 99 | if err != nil { 100 | return nil, err 101 | } 102 | loc := res.Header.Get("Location") 103 | rx := &resumableUpload{ 104 | f: f, 105 | remote: remote, 106 | URI: loc, 107 | Media: in, 108 | MediaType: contentType, 109 | ContentLength: size, 110 | } 111 | return rx.Upload(ctx) 112 | } 113 | 114 | // Make an http.Request for the range passed in 115 | func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request { 116 | req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body) 117 | req.ContentLength = reqSize 118 | totalSize := "*" 119 | if rx.ContentLength >= 0 { 120 | totalSize = strconv.FormatInt(rx.ContentLength, 10) 121 | } 122 | if reqSize != 0 { 123 | req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize)) 124 | } else { 125 | req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize)) 126 | } 127 | req.Header.Set("Content-Type", rx.MediaType) 128 | return req 129 | } 130 | 131 | // Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil 132 | func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) { 133 | _, _ = chunk.Seek(0, io.SeekStart) 134 | req := rx.makeRequest(ctx, start, chunk, chunkSize) 135 | res, err := rx.f.client.Do(req) 136 | if err != nil { 137 | return 599, err 138 | } 139 | defer googleapi.CloseBody(res) 140 | if res.StatusCode == statusResumeIncomplete { 141 | return res.StatusCode, nil 142 | } 143 | err = googleapi.CheckResponse(res) 144 | if err != nil { 145 | return res.StatusCode, err 146 | } 147 | 148 | // When the entire file upload is complete, the server 149 | // responds with an HTTP 201 Created along with any metadata 150 | // associated with this resource. If this request had been 151 | // updating an existing entity rather than creating a new one, 152 | // the HTTP response code for a completed upload would have 153 | // been 200 OK. 154 | // 155 | // So parse the response out of the body. We aren't expecting 156 | // any other 2xx codes, so we parse it unconditionally on 157 | // StatusCode 158 | if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil { 159 | return 598, err 160 | } 161 | 162 | return res.StatusCode, nil 163 | } 164 | 165 | // Upload uploads the chunks from the input 166 | // It retries each chunk using the pacer and --low-level-retries 167 | func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) { 168 | start := int64(0) 169 | var StatusCode int 170 | var err error 171 | buf := make([]byte, int(rx.f.opt.ChunkSize)) 172 | for finished := false; !finished; { 173 | var reqSize int64 174 | var chunk io.ReadSeeker 175 | if rx.ContentLength >= 0 { 176 | // If size known use repeatable reader for smoother bwlimit 177 | if start >= rx.ContentLength { 178 | break 179 | } 180 | reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize)) 181 | chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize) 182 | } else { 183 | // If size unknown read into buffer 184 | var n int 185 | n, err = readers.ReadFill(rx.Media, buf) 186 | if err == io.EOF { 187 | // Send the last chunk with the correct ContentLength 188 | // otherwise Google doesn't know we've finished 189 | rx.ContentLength = start + int64(n) 190 | finished = true 191 | } else if err != nil { 192 | return nil, err 193 | } 194 | reqSize = int64(n) 195 | chunk = bytes.NewReader(buf[:reqSize]) 196 | } 197 | 198 | // Transfer the chunk 199 | err = rx.f.pacer.Call(func() (bool, error) { 200 | fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize) 201 | StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize) 202 | again, err := rx.f.shouldRetry(ctx, err) 203 | if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK { 204 | again = false 205 | err = nil 206 | } 207 | return again, err 208 | }) 209 | if err != nil { 210 | return nil, err 211 | } 212 | 213 | start += reqSize 214 | } 215 | // Resume or retry uploads that fail due to connection interruptions or 216 | // any 5xx errors, including: 217 | // 218 | // 500 Internal Server Error 219 | // 502 Bad Gateway 220 | // 503 Service Unavailable 221 | // 504 Gateway Timeout 222 | // 223 | // Use an exponential backoff strategy if any 5xx server error is 224 | // returned when resuming or retrying upload requests. These errors can 225 | // occur if a server is getting overloaded. Exponential backoff can help 226 | // alleviate these kinds of problems during periods of high volume of 227 | // requests or heavy network traffic. Other kinds of requests should not 228 | // be handled by exponential backoff but you can still retry a number of 229 | // them. When retrying these requests, limit the number of times you 230 | // retry them. For example your code could limit to ten retries or less 231 | // before reporting an error. 232 | // 233 | // Handle 404 Not Found errors when doing resumable uploads by starting 234 | // the entire upload over from the beginning. 235 | if rx.ret == nil { 236 | return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode) 237 | } 238 | return rx.ret, nil 239 | } 240 | -------------------------------------------------------------------------------- /bin/cross-compile.go: -------------------------------------------------------------------------------- 1 | //go:build ignore 2 | 3 | // Cross compile rclone - in go because I hate bash ;-) 4 | 5 | package main 6 | 7 | import ( 8 | "flag" 9 | "fmt" 10 | "log" 11 | "os" 12 | "os/exec" 13 | "path" 14 | "path/filepath" 15 | "regexp" 16 | "runtime" 17 | "sort" 18 | "strings" 19 | "sync" 20 | "text/template" 21 | "time" 22 | ) 23 | 24 | var ( 25 | // Flags 26 | debug = flag.Bool("d", false, "Print commands instead of running them") 27 | parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel") 28 | copyAs = flag.String("release", "", "Make copies of the releases with this name") 29 | gitLog = flag.String("git-log", "", "git log to include as well") 30 | include = flag.String("include", "^.*$", "os/arch regexp to include") 31 | exclude = flag.String("exclude", "^$", "os/arch regexp to exclude") 32 | cgo = flag.Bool("cgo", false, "Use cgo for the build") 33 | noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running") 34 | tags = flag.String("tags", "", "Space separated list of build tags") 35 | buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag") 36 | compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip") 37 | extraEnv = flag.String("env", "", "comma separated list of VAR=VALUE env vars to set") 38 | macOSSDK = flag.String("macos-sdk", "", "macOS SDK to use") 39 | macOSArch = flag.String("macos-arch", "", "macOS arch to use") 40 | extraCgoCFlags = flag.String("cgo-cflags", "", "extra CGO_CFLAGS") 41 | extraCgoLdFlags = flag.String("cgo-ldflags", "", "extra CGO_LDFLAGS") 42 | ) 43 | 44 | // GOOS/GOARCH pairs we build for 45 | // 46 | // If the GOARCH contains a - it is a synthetic arch with more parameters 47 | var osarches = []string{ 48 | "windows/386", 49 | "windows/amd64", 50 | "windows/arm64", 51 | "darwin/amd64", 52 | "darwin/arm64", 53 | "linux/386", 54 | "linux/amd64", 55 | "linux/arm", 56 | "linux/arm-v6", 57 | "linux/arm-v7", 58 | "linux/arm64", 59 | "linux/mips", 60 | "linux/mipsle", 61 | "freebsd/386", 62 | "freebsd/amd64", 63 | "freebsd/arm", 64 | "freebsd/arm-v6", 65 | "freebsd/arm-v7", 66 | "netbsd/386", 67 | "netbsd/amd64", 68 | "netbsd/arm", 69 | "netbsd/arm-v6", 70 | "netbsd/arm-v7", 71 | "openbsd/386", 72 | "openbsd/amd64", 73 | "plan9/386", 74 | "plan9/amd64", 75 | "solaris/amd64", 76 | // "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed 77 | } 78 | 79 | // Special environment flags for a given arch 80 | var archFlags = map[string][]string{ 81 | "386": {"GO386=softfloat"}, 82 | "mips": {"GOMIPS=softfloat"}, 83 | "mipsle": {"GOMIPS=softfloat"}, 84 | "arm": {"GOARM=5"}, 85 | "arm-v6": {"GOARM=6"}, 86 | "arm-v7": {"GOARM=7"}, 87 | } 88 | 89 | // Map Go architectures to NFPM architectures 90 | // Any missing are passed straight through 91 | var goarchToNfpm = map[string]string{ 92 | "arm": "arm5", 93 | "arm-v6": "arm6", 94 | "arm-v7": "arm7", 95 | } 96 | 97 | // runEnv - run a shell command with env 98 | func runEnv(args, env []string) error { 99 | if *debug { 100 | args = append([]string{"echo"}, args...) 101 | } 102 | cmd := exec.Command(args[0], args[1:]...) 103 | if env != nil { 104 | cmd.Env = append(os.Environ(), env...) 105 | } 106 | if *debug { 107 | log.Printf("args = %v, env = %v\n", args, cmd.Env) 108 | } 109 | out, err := cmd.CombinedOutput() 110 | if err != nil { 111 | log.Print("----------------------------") 112 | log.Printf("Failed to run %v: %v", args, err) 113 | log.Printf("Command output was:\n%s", out) 114 | log.Print("----------------------------") 115 | } 116 | return err 117 | } 118 | 119 | // run a shell command 120 | func run(args ...string) { 121 | err := runEnv(args, nil) 122 | if err != nil { 123 | log.Fatalf("Exiting after error: %v", err) 124 | } 125 | } 126 | 127 | // chdir or die 128 | func chdir(dir string) { 129 | err := os.Chdir(dir) 130 | if err != nil { 131 | log.Fatalf("Couldn't cd into %q: %v", dir, err) 132 | } 133 | } 134 | 135 | // substitute data from go template file in to file out 136 | func substitute(inFile, outFile string, data interface{}) { 137 | t, err := template.ParseFiles(inFile) 138 | if err != nil { 139 | log.Fatalf("Failed to read template file %q: %v", inFile, err) 140 | } 141 | out, err := os.Create(outFile) 142 | if err != nil { 143 | log.Fatalf("Failed to create output file %q: %v", outFile, err) 144 | } 145 | defer func() { 146 | err := out.Close() 147 | if err != nil { 148 | log.Fatalf("Failed to close output file %q: %v", outFile, err) 149 | } 150 | }() 151 | err = t.Execute(out, data) 152 | if err != nil { 153 | log.Fatalf("Failed to substitute template file %q: %v", inFile, err) 154 | } 155 | } 156 | 157 | // build the zip package return its name 158 | func buildZip(dir string) string { 159 | // Now build the zip 160 | // run("cp", "-a", "../MANUAL.txt", filepath.Join(dir, "README.txt")) 161 | // run("cp", "-a", "../MANUAL.html", filepath.Join(dir, "README.html")) 162 | // run("cp", "-a", "../gclone.1", dir) 163 | if *gitLog != "" { 164 | run("cp", "-a", *gitLog, dir) 165 | } 166 | zip := dir + ".zip" 167 | run("zip", "-r9", zip, dir) 168 | return zip 169 | } 170 | 171 | // Build .deb and .rpm packages 172 | // 173 | // It returns a list of artifacts it has made 174 | func buildDebAndRpm(dir, version, goarch string) []string { 175 | // Make internal version number acceptable to .deb and .rpm 176 | pkgVersion := version[1:] 177 | pkgVersion = strings.ReplaceAll(pkgVersion, "β", "-beta") 178 | pkgVersion = strings.ReplaceAll(pkgVersion, "-", ".") 179 | nfpmArch, ok := goarchToNfpm[goarch] 180 | if !ok { 181 | nfpmArch = goarch 182 | } 183 | 184 | // Make nfpm.yaml from the template 185 | substitute("../bin/nfpm.yaml", path.Join(dir, "nfpm.yaml"), map[string]string{ 186 | "Version": pkgVersion, 187 | "Arch": nfpmArch, 188 | }) 189 | 190 | // build them 191 | var artifacts []string 192 | for _, pkg := range []string{".deb", ".rpm"} { 193 | artifact := dir + pkg 194 | run("bash", "-c", "cd "+dir+" && nfpm -f nfpm.yaml pkg -t ../"+artifact) 195 | artifacts = append(artifacts, artifact) 196 | } 197 | 198 | return artifacts 199 | } 200 | 201 | // Trip a version suffix off the arch if present 202 | func stripVersion(goarch string) string { 203 | i := strings.Index(goarch, "-") 204 | if i < 0 { 205 | return goarch 206 | } 207 | return goarch[:i] 208 | } 209 | 210 | // run the command returning trimmed output 211 | func runOut(command ...string) string { 212 | out, err := exec.Command(command[0], command[1:]...).Output() 213 | if err != nil { 214 | log.Fatalf("Failed to run %q: %v", command, err) 215 | } 216 | return strings.TrimSpace(string(out)) 217 | } 218 | 219 | // Generate Windows resource system object file (.syso), which can be picked 220 | // up by the following go build for embedding version information and icon 221 | // resources into the executable. 222 | func generateResourceWindows(version, arch string) func() { 223 | sysoPath := fmt.Sprintf("../resource_windows_%s.syso", arch) // Use explicit destination filename, even though it should be same as default, so that we are sure we have the correct reference to it 224 | if err := os.Remove(sysoPath); !os.IsNotExist(err) { 225 | // Note: This one we choose to treat as fatal, to avoid any risk of picking up an old .syso file without noticing. 226 | log.Fatalf("Failed to remove existing Windows %s resource system object file %s: %v", arch, sysoPath, err) 227 | } 228 | args := []string{"go", "run", "../bin/resource_windows.go", "-arch", arch, "-version", version, "-syso", sysoPath} 229 | if err := runEnv(args, nil); err != nil { 230 | log.Printf("Warning: Couldn't generate Windows %s resource system object file, binaries will not have version information or icon embedded", arch) 231 | return nil 232 | } 233 | if _, err := os.Stat(sysoPath); err != nil { 234 | log.Printf("Warning: Couldn't find generated Windows %s resource system object file, binaries will not have version information or icon embedded", arch) 235 | return nil 236 | } 237 | return func() { 238 | if err := os.Remove(sysoPath); err != nil && !os.IsNotExist(err) { 239 | log.Printf("Warning: Couldn't remove generated Windows %s resource system object file %s: %v. Please remove it manually.", arch, sysoPath, err) 240 | } 241 | } 242 | } 243 | 244 | // build the binary in dir returning success or failure 245 | func compileArch(version, goos, goarch, dir string) bool { 246 | log.Printf("Compiling %s/%s into %s", goos, goarch, dir) 247 | goarchBase := stripVersion(goarch) 248 | output := filepath.Join(dir, "gclone") 249 | if goos == "windows" { 250 | output += ".exe" 251 | if cleanupFn := generateResourceWindows(version, goarchBase); cleanupFn != nil { 252 | defer cleanupFn() 253 | } 254 | } 255 | err := os.MkdirAll(dir, 0777) 256 | if err != nil { 257 | log.Fatalf("Failed to mkdir: %v", err) 258 | } 259 | args := []string{ 260 | "go", "build", 261 | "--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version, 262 | "-trimpath", 263 | "-o", output, 264 | "-tags", *tags, 265 | } 266 | if *buildmode != "" { 267 | args = append(args, 268 | "-buildmode", *buildmode, 269 | ) 270 | } 271 | args = append(args, 272 | "..", 273 | ) 274 | env := []string{ 275 | "GOOS=" + goos, 276 | "GOARCH=" + goarchBase, 277 | } 278 | if *extraEnv != "" { 279 | env = append(env, strings.Split(*extraEnv, ",")...) 280 | } 281 | var ( 282 | cgoCFlags []string 283 | cgoLdFlags []string 284 | ) 285 | if *macOSSDK != "" { 286 | flag := "-isysroot " + runOut("xcrun", "--sdk", *macOSSDK, "--show-sdk-path") 287 | cgoCFlags = append(cgoCFlags, flag) 288 | cgoLdFlags = append(cgoLdFlags, flag) 289 | } 290 | if *macOSArch != "" { 291 | flag := "-arch " + *macOSArch 292 | cgoCFlags = append(cgoCFlags, flag) 293 | cgoLdFlags = append(cgoLdFlags, flag) 294 | } 295 | if *extraCgoCFlags != "" { 296 | cgoCFlags = append(cgoCFlags, *extraCgoCFlags) 297 | } 298 | if *extraCgoLdFlags != "" { 299 | cgoLdFlags = append(cgoLdFlags, *extraCgoLdFlags) 300 | } 301 | if len(cgoCFlags) > 0 { 302 | env = append(env, "CGO_CFLAGS="+strings.Join(cgoCFlags, " ")) 303 | } 304 | if len(cgoLdFlags) > 0 { 305 | env = append(env, "CGO_LDFLAGS="+strings.Join(cgoLdFlags, " ")) 306 | } 307 | if !*cgo { 308 | env = append(env, "CGO_ENABLED=0") 309 | } else { 310 | env = append(env, "CGO_ENABLED=1") 311 | } 312 | if flags, ok := archFlags[goarch]; ok { 313 | env = append(env, flags...) 314 | } 315 | err = runEnv(args, env) 316 | if err != nil { 317 | log.Printf("Error compiling %s/%s: %v", goos, goarch, err) 318 | return false 319 | } 320 | if !*compileOnly { 321 | if goos != "js" { 322 | artifacts := []string{buildZip(dir)} 323 | // build a .deb and .rpm if appropriate 324 | if goos == "linux" { 325 | artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...) 326 | } 327 | if *copyAs != "" { 328 | for _, artifact := range artifacts { 329 | run("ln", artifact, strings.Replace(artifact, "-"+version, "-"+*copyAs, 1)) 330 | } 331 | } 332 | } 333 | // tidy up 334 | run("rm", "-rf", dir) 335 | } 336 | log.Printf("Done compiling %s/%s", goos, goarch) 337 | return true 338 | } 339 | 340 | func compile(version string) { 341 | start := time.Now() 342 | wg := new(sync.WaitGroup) 343 | run := make(chan func(), *parallel) 344 | for i := 0; i < *parallel; i++ { 345 | wg.Add(1) 346 | go func() { 347 | defer wg.Done() 348 | for f := range run { 349 | f() 350 | } 351 | }() 352 | } 353 | includeRe, err := regexp.Compile(*include) 354 | if err != nil { 355 | log.Fatalf("Bad -include regexp: %v", err) 356 | } 357 | excludeRe, err := regexp.Compile(*exclude) 358 | if err != nil { 359 | log.Fatalf("Bad -exclude regexp: %v", err) 360 | } 361 | compiled := 0 362 | var failuresMu sync.Mutex 363 | var failures []string 364 | for _, osarch := range osarches { 365 | if excludeRe.MatchString(osarch) || !includeRe.MatchString(osarch) { 366 | continue 367 | } 368 | parts := strings.Split(osarch, "/") 369 | if len(parts) != 2 { 370 | log.Fatalf("Bad osarch %q", osarch) 371 | } 372 | goos, goarch := parts[0], parts[1] 373 | userGoos := goos 374 | if goos == "darwin" { 375 | userGoos = "osx" 376 | } 377 | dir := filepath.Join("gclone-" + version + "-" + userGoos + "-" + goarch) 378 | run <- func() { 379 | if !compileArch(version, goos, goarch, dir) { 380 | failuresMu.Lock() 381 | failures = append(failures, goos+"/"+goarch) 382 | failuresMu.Unlock() 383 | } 384 | } 385 | compiled++ 386 | } 387 | close(run) 388 | wg.Wait() 389 | log.Printf("Compiled %d arches in %v", compiled, time.Since(start)) 390 | if len(failures) > 0 { 391 | sort.Strings(failures) 392 | log.Printf("%d compile failures:\n %s\n", len(failures), strings.Join(failures, "\n ")) 393 | os.Exit(1) 394 | } 395 | } 396 | 397 | func main() { 398 | flag.Parse() 399 | args := flag.Args() 400 | if len(args) != 1 { 401 | log.Fatalf("Syntax: %s ", os.Args[0]) 402 | } 403 | version := args[0] 404 | if !*noClean { 405 | run("rm", "-rf", "build") 406 | run("mkdir", "build") 407 | } 408 | chdir("build") 409 | err := os.WriteFile("version.txt", []byte(fmt.Sprintf("gclone %s\n", version)), 0666) 410 | if err != nil { 411 | log.Fatalf("Couldn't write version.txt: %v", err) 412 | } 413 | compile(version) 414 | } 415 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/dogbutcat/gclone 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5 // indirect 7 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 // indirect 8 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 // indirect 9 | github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 // indirect 10 | github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 // indirect 11 | github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect 12 | github.com/Files-com/files-sdk-go/v3 v3.2.218 // indirect 13 | github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect 14 | github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e // indirect 15 | github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 // indirect 16 | github.com/abbot/go-http-auth v0.4.0 // indirect 17 | github.com/anacrolix/dms v1.7.2 // indirect 18 | github.com/anacrolix/log v0.16.0 // indirect 19 | github.com/atotto/clipboard v0.1.4 // indirect 20 | github.com/aws/aws-sdk-go-v2 v1.38.0 // indirect 21 | github.com/aws/aws-sdk-go-v2/config v1.31.0 // indirect 22 | github.com/aws/aws-sdk-go-v2/credentials v1.18.4 // indirect 23 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 // indirect 24 | github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0 // indirect 25 | github.com/aws/smithy-go v1.22.5 // indirect 26 | github.com/buengese/sgzip v0.1.1 // indirect 27 | github.com/cloudinary/cloudinary-go/v2 v2.12.0 // indirect 28 | github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect 29 | github.com/colinmarc/hdfs/v2 v2.4.0 // indirect 30 | github.com/coreos/go-semver v0.3.1 31 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 32 | github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect 33 | github.com/gabriel-vasile/mimetype v1.4.9 // indirect 34 | github.com/gdamore/tcell/v2 v2.8.1 // indirect 35 | github.com/go-chi/chi/v5 v5.2.2 // indirect 36 | github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect 37 | github.com/go-git/go-billy/v5 v5.6.2 // indirect 38 | github.com/google/uuid v1.6.0 // indirect 39 | github.com/hanwen/go-fuse/v2 v2.8.0 // indirect 40 | github.com/henrybear327/Proton-API-Bridge v1.0.0 // indirect 41 | github.com/henrybear327/go-proton-api v1.0.0 // indirect 42 | github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect 43 | github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 // indirect 44 | github.com/josephspurrier/goversioninfo v1.5.0 45 | github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect 46 | github.com/klauspost/compress v1.18.0 // indirect 47 | github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect 48 | github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect 49 | github.com/lanrat/extsort v1.4.0 // indirect 50 | github.com/mattn/go-colorable v0.1.14 // indirect 51 | github.com/mattn/go-runewidth v0.0.16 // indirect 52 | github.com/mitchellh/go-homedir v1.1.0 // indirect 53 | github.com/moby/sys/mountinfo v0.7.2 // indirect 54 | github.com/ncw/swift/v2 v2.0.4 // indirect 55 | github.com/oracle/oci-go-sdk/v65 v65.98.0 // indirect 56 | github.com/patrickmn/go-cache v2.1.0+incompatible // indirect 57 | github.com/peterh/liner v1.2.2 // indirect 58 | github.com/pkg/sftp v1.13.9 // indirect 59 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 60 | github.com/prometheus/client_golang v1.23.0 // indirect 61 | github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect 62 | github.com/rclone/gofakes3 v0.0.4 // indirect 63 | github.com/rfjakob/eme v1.1.2 // indirect 64 | github.com/rivo/uniseg v0.4.7 // indirect 65 | github.com/shirou/gopsutil/v4 v4.25.7 // indirect 66 | github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect 67 | github.com/spf13/cobra v1.9.1 68 | github.com/spf13/pflag v1.0.7 // indirect 69 | github.com/stretchr/testify v1.10.0 70 | github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 // indirect 71 | github.com/unknwon/goconfig v1.0.0 // indirect 72 | github.com/willscott/go-nfs v0.0.3 // indirect 73 | github.com/winfsp/cgofuse v1.6.0 // indirect 74 | github.com/xanzy/ssh-agent v0.3.3 // indirect 75 | github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect 76 | github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect 77 | github.com/zeebo/blake3 v0.2.4 // indirect 78 | github.com/zeebo/xxh3 v1.0.2 // indirect 79 | go.etcd.io/bbolt v1.4.2 // indirect 80 | goftp.io/server/v2 v2.0.1 // indirect 81 | golang.org/x/crypto v0.41.0 // indirect 82 | golang.org/x/net v0.43.0 // indirect 83 | golang.org/x/oauth2 v0.30.0 84 | golang.org/x/sync v0.16.0 85 | golang.org/x/sys v0.35.0 86 | golang.org/x/text v0.28.0 // indirect 87 | golang.org/x/time v0.12.0 // indirect 88 | google.golang.org/api v0.247.0 89 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect 90 | gopkg.in/validator.v2 v2.0.1 // indirect 91 | gopkg.in/yaml.v3 v3.0.1 // indirect 92 | storj.io/uplink v1.13.1 // indirect 93 | ) 94 | 95 | require ( 96 | cloud.google.com/go/auth v0.16.4 // indirect 97 | cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect 98 | cloud.google.com/go/compute/metadata v0.8.0 // indirect 99 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect 100 | github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect 101 | github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect 102 | github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect 103 | github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect 104 | github.com/ProtonMail/go-srp v0.0.7 // indirect 105 | github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect 106 | github.com/PuerkitoBio/goquery v1.10.3 // indirect 107 | github.com/akavel/rsrc v0.10.2 // indirect 108 | github.com/anacrolix/generics v0.0.3 // indirect 109 | github.com/andybalholm/cascadia v1.3.3 // indirect 110 | github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect 111 | github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect 112 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect 113 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect 114 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect 115 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 // indirect 116 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect 117 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 // indirect 118 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect 119 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 // indirect 120 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 // indirect 121 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 // indirect 122 | github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 // indirect 123 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 // indirect 124 | github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 // indirect 125 | github.com/beorn7/perks v1.0.1 // indirect 126 | github.com/bradenaw/juniper v0.15.3 // indirect 127 | github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect 128 | github.com/calebcase/tmpfile v1.0.3 // indirect 129 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 130 | github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect 131 | github.com/cloudflare/circl v1.6.1 // indirect 132 | github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect 133 | github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect 134 | github.com/creasty/defaults v1.8.0 // indirect 135 | github.com/cronokirby/saferith v0.33.0 // indirect 136 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 137 | github.com/ebitengine/purego v0.8.4 // indirect 138 | github.com/emersion/go-message v0.18.2 // indirect 139 | github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect 140 | github.com/felixge/httpsnoop v1.0.4 // indirect 141 | github.com/flynn/noise v1.1.0 // indirect 142 | github.com/gdamore/encoding v1.0.1 // indirect 143 | github.com/geoffgarside/ber v1.2.0 // indirect 144 | github.com/go-logr/logr v1.4.3 // indirect 145 | github.com/go-logr/stdr v1.2.2 // indirect 146 | github.com/go-ole/go-ole v1.3.0 // indirect 147 | github.com/go-openapi/errors v0.22.2 // indirect 148 | github.com/go-openapi/strfmt v0.23.0 // indirect 149 | github.com/go-playground/locales v0.14.1 // indirect 150 | github.com/go-playground/universal-translator v0.18.1 // indirect 151 | github.com/go-playground/validator/v10 v10.27.0 // indirect 152 | github.com/go-resty/resty/v2 v2.16.5 // indirect 153 | github.com/gofrs/flock v0.12.1 // indirect 154 | github.com/gogo/protobuf v1.3.2 // indirect 155 | github.com/golang-jwt/jwt/v5 v5.3.0 // indirect 156 | github.com/google/s2a-go v0.1.9 // indirect 157 | github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect 158 | github.com/googleapis/gax-go/v2 v2.15.0 // indirect 159 | github.com/gorilla/schema v1.4.1 // indirect 160 | github.com/hashicorp/errwrap v1.1.0 // indirect 161 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 162 | github.com/hashicorp/go-multierror v1.1.1 // indirect 163 | github.com/hashicorp/go-retryablehttp v0.7.8 // indirect 164 | github.com/hashicorp/go-uuid v1.0.3 // indirect 165 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 166 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 167 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 168 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 169 | github.com/jcmturner/gofork v1.7.6 // indirect 170 | github.com/jcmturner/goidentity/v6 v6.0.1 // indirect 171 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 172 | github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect 173 | github.com/klauspost/cpuid/v2 v2.3.0 // indirect 174 | github.com/kr/fs v0.1.0 // indirect 175 | github.com/kylelemons/godebug v1.1.0 // indirect 176 | github.com/leodido/go-urn v1.4.0 // indirect 177 | github.com/lpar/date v1.0.0 // indirect 178 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 179 | github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect 180 | github.com/mattn/go-isatty v0.0.20 // indirect 181 | github.com/minio/xxml v0.0.3 // indirect 182 | github.com/mitchellh/mapstructure v1.5.0 // indirect 183 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 184 | github.com/oklog/ulid v1.3.1 // indirect 185 | github.com/panjf2000/ants/v2 v2.11.3 // indirect 186 | github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect 187 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect 188 | github.com/pkg/errors v0.9.1 // indirect 189 | github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect 190 | github.com/prometheus/client_model v0.6.2 // indirect 191 | github.com/prometheus/common v0.65.0 // indirect 192 | github.com/prometheus/procfs v0.17.0 // indirect 193 | github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 // indirect 194 | github.com/relvacode/iso8601 v1.6.0 // indirect 195 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 196 | github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect 197 | github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect 198 | github.com/samber/lo v1.51.0 // indirect 199 | github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect 200 | github.com/sirupsen/logrus v1.9.3 // indirect 201 | github.com/sony/gobreaker v1.0.0 // indirect 202 | github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect 203 | github.com/tklauser/go-sysconf v0.3.15 // indirect 204 | github.com/tklauser/numcpus v0.10.0 // indirect 205 | github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect 206 | github.com/yusufpapurcu/wmi v1.2.4 // indirect 207 | github.com/zeebo/errs v1.4.0 // indirect 208 | go.mongodb.org/mongo-driver v1.17.4 // indirect 209 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 210 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect 211 | go.opentelemetry.io/otel v1.37.0 // indirect 212 | go.opentelemetry.io/otel/metric v1.37.0 // indirect 213 | go.opentelemetry.io/otel/trace v1.37.0 // indirect 214 | go.yaml.in/yaml/v2 v2.4.2 // indirect 215 | golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 // indirect 216 | golang.org/x/tools v0.36.0 // indirect 217 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect 218 | google.golang.org/grpc v1.74.2 // indirect 219 | google.golang.org/protobuf v1.36.7 // indirect 220 | gopkg.in/yaml.v2 v2.4.0 // indirect 221 | moul.io/http2curl/v2 v2.3.0 // indirect 222 | sigs.k8s.io/yaml v1.6.0 // indirect 223 | storj.io/common v0.0.0-20250808122759-804533d519c1 // indirect 224 | storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect 225 | storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect 226 | storj.io/infectious v0.0.2 // indirect 227 | storj.io/picobuf v0.0.4 // indirect 228 | ) 229 | 230 | require ( 231 | github.com/IBM/go-sdk-core/v5 v5.21.0 // indirect 232 | github.com/Microsoft/go-winio v0.6.2 // indirect 233 | github.com/ProtonMail/go-crypto v1.3.0 // indirect 234 | github.com/golang-jwt/jwt/v4 v4.5.2 // indirect 235 | github.com/pkg/xattr v0.4.12 // indirect 236 | golang.org/x/term v0.34.0 // indirect 237 | ) 238 | 239 | require ( 240 | github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00 241 | github.com/rclone/rclone v1.71.0 242 | golang.org/x/mobile v0.0.0-20250808145247-395d808d53cd 243 | ) 244 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Github Actions build for rclone 3 | # -*- compile-command: "yamllint -f parsable build.yml" -*- 4 | 5 | name: build 6 | 7 | # Trigger the workflow on push or pull request 8 | on: 9 | push: 10 | tags: 11 | - '**' 12 | pull_request: 13 | 14 | jobs: 15 | build: 16 | timeout-minutes: 60 17 | defaults: 18 | run: 19 | shell: bash 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24'] 24 | 25 | include: 26 | - job_name: linux 27 | os: ubuntu-latest 28 | go: '>=1.25.0-rc.1' 29 | gotags: cmount 30 | build_flags: '-include "^linux/"' 31 | check: false 32 | quicktest: false 33 | racequicktest: false 34 | librclonetest: false 35 | deploy: true 36 | 37 | - job_name: linux_386 38 | os: ubuntu-latest 39 | go: '>=1.25.0-rc.1' 40 | goarch: 386 41 | gotags: cmount 42 | quicktest: false 43 | 44 | - job_name: mac_amd64 45 | os: macos-latest 46 | go: '>=1.25.0-rc.1' 47 | gotags: 'cmount' 48 | build_flags: '-include "^darwin/amd64" -cgo' 49 | quicktest: false 50 | racequicktest: false 51 | deploy: true 52 | 53 | - job_name: mac_arm64 54 | os: macos-latest 55 | go: '>=1.25.0-rc.1' 56 | gotags: 'cmount' 57 | build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' 58 | deploy: true 59 | 60 | - job_name: windows 61 | os: windows-latest 62 | go: '>=1.25.0-rc.1' 63 | gotags: cmount 64 | cgo: '0' 65 | build_flags: '-include "^windows/"' 66 | build_args: '-buildmode exe' 67 | quicktest: false 68 | deploy: true 69 | 70 | - job_name: other_os 71 | os: ubuntu-latest 72 | go: '>=1.25.0-rc.1' 73 | build_flags: '-exclude "^(windows/|darwin/|linux/)"' 74 | compile_all: true 75 | deploy: true 76 | 77 | - job_name: go1.24 78 | os: ubuntu-latest 79 | go: '1.24' 80 | quicktest: false 81 | racequicktest: false 82 | 83 | name: ${{ matrix.job_name }} 84 | 85 | runs-on: ${{ matrix.os }} 86 | 87 | steps: 88 | - name: Checkout 89 | uses: actions/checkout@v5 90 | with: 91 | fetch-depth: 0 92 | 93 | - name: Install Go 94 | uses: actions/setup-go@v5 95 | with: 96 | go-version: ${{ matrix.go }} 97 | check-latest: true 98 | 99 | - name: Set environment variables 100 | run: | 101 | echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV 102 | echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV 103 | echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV 104 | if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi 105 | if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi 106 | 107 | - name: Install Libraries on Linux 108 | run: | 109 | sudo modprobe fuse 110 | sudo chmod 666 /dev/fuse 111 | sudo chown root:$USER /etc/fuse.conf 112 | sudo apt-get update 113 | sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common 114 | if: matrix.os == 'ubuntu-latest' 115 | 116 | - name: Install Libraries on macOS 117 | run: | 118 | # https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788 119 | # https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008 120 | unset HOMEBREW_NO_INSTALL_FROM_API 121 | brew untap --force homebrew/core 122 | brew untap --force homebrew/cask 123 | brew update 124 | brew install --cask macfuse 125 | brew install git-annex git-annex-remote-rclone 126 | if: matrix.os == 'macos-latest' 127 | 128 | - name: Install Libraries on Windows 129 | shell: powershell 130 | run: | 131 | $ProgressPreference = 'SilentlyContinue' 132 | choco install -y winfsp zip 133 | echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append 134 | if ($env:GOARCH -eq "386") { 135 | choco install -y mingw --forcex86 --force 136 | echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append 137 | } 138 | # Copy mingw32-make.exe to make.exe so the same command line 139 | # can be used on Windows as on macOS and Linux 140 | $path = (get-command mingw32-make.exe).Path 141 | Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe') 142 | if: matrix.os == 'windows-latest' 143 | 144 | - name: Print Go version and environment 145 | run: | 146 | printf "Using go at: $(which go)\n" 147 | printf "Go version: $(go version)\n" 148 | printf "\n\nGo environment:\n\n" 149 | go env 150 | printf "\n\nRclone environment:\n\n" 151 | make vars 152 | printf "\n\nSystem environment:\n\n" 153 | env 154 | 155 | - name: Run tests 156 | run: | 157 | make quicktest 158 | if: matrix.quicktest 159 | 160 | - name: Race test 161 | run: | 162 | make racequicktest 163 | if: matrix.racequicktest 164 | 165 | - name: Compile all architectures test 166 | run: | 167 | make 168 | make compile_all 169 | if: matrix.compile_all 170 | 171 | - name: Deploy built binaries 172 | run: | 173 | if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi 174 | make upload_github 175 | # make compile_all 176 | # working-directory: '$(modulePath)' 177 | # Deploy binaries if enabled in config && not a PR && not a fork 178 | if: matrix.deploy && github.head_ref == '' && github.repository == 'dogbutcat/gclone' 179 | env: 180 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 181 | 182 | # lint: 183 | # if: inputs.manual || (github.repository == 'dogbutcat/gclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) 184 | # timeout-minutes: 30 185 | # name: "lint" 186 | # runs-on: ubuntu-latest 187 | 188 | # steps: 189 | # - name: Get runner parameters 190 | # id: get-runner-parameters 191 | # shell: bash 192 | # run: | 193 | # echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT 194 | # echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT 195 | 196 | # - name: Checkout 197 | # uses: actions/checkout@v4 198 | 199 | # - name: Install Go 200 | # id: setup-go 201 | # uses: actions/setup-go@v5 202 | # with: 203 | # go-version: '>=1.23.0-rc.1' 204 | # check-latest: true 205 | # cache: false 206 | 207 | # - name: Cache 208 | # uses: actions/cache@v4 209 | # with: 210 | # path: | 211 | # ~/go/pkg/mod 212 | # ~/.cache/go-build 213 | # ~/.cache/golangci-lint 214 | # key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }} 215 | # restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}- 216 | 217 | # - name: Code quality test (Linux) 218 | # uses: golangci/golangci-lint-action@v6 219 | # with: 220 | # version: latest 221 | # skip-cache: true 222 | 223 | # - name: Code quality test (Windows) 224 | # uses: golangci/golangci-lint-action@v6 225 | # env: 226 | # GOOS: "windows" 227 | # with: 228 | # version: latest 229 | # skip-cache: true 230 | 231 | # - name: Code quality test (macOS) 232 | # uses: golangci/golangci-lint-action@v6 233 | # env: 234 | # GOOS: "darwin" 235 | # with: 236 | # version: latest 237 | # skip-cache: true 238 | 239 | # - name: Code quality test (FreeBSD) 240 | # uses: golangci/golangci-lint-action@v6 241 | # env: 242 | # GOOS: "freebsd" 243 | # with: 244 | # version: latest 245 | # skip-cache: true 246 | 247 | # - name: Code quality test (OpenBSD) 248 | # uses: golangci/golangci-lint-action@v6 249 | # env: 250 | # GOOS: "openbsd" 251 | # with: 252 | # version: latest 253 | # skip-cache: true 254 | 255 | # - name: Install govulncheck 256 | # run: go install golang.org/x/vuln/cmd/govulncheck@latest 257 | 258 | # - name: Scan for vulnerabilities 259 | # run: govulncheck ./... 260 | 261 | android: 262 | if: inputs.manual || (github.repository == 'dogbutcat/gclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) 263 | timeout-minutes: 30 264 | name: "android-all" 265 | runs-on: ubuntu-latest 266 | 267 | steps: 268 | - name: Checkout 269 | uses: actions/checkout@v5 270 | with: 271 | fetch-depth: 0 272 | 273 | # Upgrade together with NDK version 274 | - name: Set up Go 275 | uses: actions/setup-go@v5 276 | with: 277 | go-version: '>=1.25.0-rc.1' 278 | 279 | - name: Set global environment variables 280 | run: | 281 | echo "VERSION=$(make version)" >> $GITHUB_ENV 282 | 283 | - name: build native gclone 284 | run: | 285 | make 286 | 287 | - name: install gomobile 288 | run: | 289 | go install golang.org/x/mobile/cmd/gobind@latest 290 | go install golang.org/x/mobile/cmd/gomobile@latest 291 | env PATH=$PATH:~/go/bin gomobile init 292 | echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV 293 | 294 | - name: arm-v7a gomobile build 295 | run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile 296 | 297 | - name: arm-v7a Set environment variables 298 | run: | 299 | echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV 300 | echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV 301 | echo 'GOOS=android' >> $GITHUB_ENV 302 | echo 'GOARCH=arm' >> $GITHUB_ENV 303 | echo 'GOARM=7' >> $GITHUB_ENV 304 | echo 'CGO_ENABLED=1' >> $GITHUB_ENV 305 | echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV 306 | 307 | - name: arm-v7a build 308 | run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/gclone-android-${RCLONE_NDK_VERSION}-armv7a . 309 | 310 | - name: arm64-v8a Set environment variables 311 | run: | 312 | echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV 313 | echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV 314 | echo 'GOOS=android' >> $GITHUB_ENV 315 | echo 'GOARCH=arm64' >> $GITHUB_ENV 316 | echo 'CGO_ENABLED=1' >> $GITHUB_ENV 317 | echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV 318 | 319 | - name: arm64-v8a build 320 | run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/gclone-android-${RCLONE_NDK_VERSION}-armv8a . 321 | 322 | - name: x86 Set environment variables 323 | run: | 324 | echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV 325 | echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV 326 | echo 'GOOS=android' >> $GITHUB_ENV 327 | echo 'GOARCH=386' >> $GITHUB_ENV 328 | echo 'CGO_ENABLED=1' >> $GITHUB_ENV 329 | echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV 330 | 331 | - name: x86 build 332 | run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/gclone-android-${RCLONE_NDK_VERSION}-x86 . 333 | 334 | - name: x64 Set environment variables 335 | run: | 336 | echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV 337 | echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV 338 | echo 'GOOS=android' >> $GITHUB_ENV 339 | echo 'GOARCH=amd64' >> $GITHUB_ENV 340 | echo 'CGO_ENABLED=1' >> $GITHUB_ENV 341 | echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV 342 | 343 | - name: x64 build 344 | run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/gclone-android-${RCLONE_NDK_VERSION}-x64 . 345 | 346 | - name: Upload artifacts 347 | run: | 348 | make ci_upload 349 | env: 350 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /bin/get-github-release.go: -------------------------------------------------------------------------------- 1 | //go:build ignore 2 | 3 | // Get the latest release from a github project 4 | // 5 | // If GITHUB_USER and GITHUB_TOKEN are set then these will be used to 6 | // authenticate the request which is useful to avoid rate limits. 7 | 8 | package main 9 | 10 | import ( 11 | "archive/tar" 12 | "compress/bzip2" 13 | "compress/gzip" 14 | "encoding/json" 15 | "flag" 16 | "fmt" 17 | "io" 18 | "log" 19 | "net/http" 20 | "net/url" 21 | "os" 22 | "os/exec" 23 | "path" 24 | "path/filepath" 25 | "regexp" 26 | "runtime" 27 | "strings" 28 | "time" 29 | 30 | "github.com/rclone/rclone/lib/rest" 31 | "golang.org/x/net/html" 32 | "golang.org/x/sys/unix" 33 | ) 34 | 35 | var ( 36 | // Flags 37 | install = flag.Bool("install", false, "Install the downloaded package using sudo dpkg -i.") 38 | extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.") 39 | bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.") 40 | useAPI = flag.Bool("use-api", false, "Use the API for finding the release instead of scraping the page.") 41 | // Globals 42 | matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`) 43 | osAliases = map[string][]string{ 44 | "darwin": {"macos", "osx"}, 45 | } 46 | archAliases = map[string][]string{ 47 | "amd64": {"x86_64"}, 48 | } 49 | ) 50 | 51 | // A github release 52 | // 53 | // Made by pasting the JSON into https://mholt.github.io/json-to-go/ 54 | type Release struct { 55 | URL string `json:"url"` 56 | AssetsURL string `json:"assets_url"` 57 | UploadURL string `json:"upload_url"` 58 | HTMLURL string `json:"html_url"` 59 | ID int `json:"id"` 60 | TagName string `json:"tag_name"` 61 | TargetCommitish string `json:"target_commitish"` 62 | Name string `json:"name"` 63 | Draft bool `json:"draft"` 64 | Author struct { 65 | Login string `json:"login"` 66 | ID int `json:"id"` 67 | AvatarURL string `json:"avatar_url"` 68 | GravatarID string `json:"gravatar_id"` 69 | URL string `json:"url"` 70 | HTMLURL string `json:"html_url"` 71 | FollowersURL string `json:"followers_url"` 72 | FollowingURL string `json:"following_url"` 73 | GistsURL string `json:"gists_url"` 74 | StarredURL string `json:"starred_url"` 75 | SubscriptionsURL string `json:"subscriptions_url"` 76 | OrganizationsURL string `json:"organizations_url"` 77 | ReposURL string `json:"repos_url"` 78 | EventsURL string `json:"events_url"` 79 | ReceivedEventsURL string `json:"received_events_url"` 80 | Type string `json:"type"` 81 | SiteAdmin bool `json:"site_admin"` 82 | } `json:"author"` 83 | Prerelease bool `json:"prerelease"` 84 | CreatedAt time.Time `json:"created_at"` 85 | PublishedAt time.Time `json:"published_at"` 86 | Assets []struct { 87 | URL string `json:"url"` 88 | ID int `json:"id"` 89 | Name string `json:"name"` 90 | Label string `json:"label"` 91 | Uploader struct { 92 | Login string `json:"login"` 93 | ID int `json:"id"` 94 | AvatarURL string `json:"avatar_url"` 95 | GravatarID string `json:"gravatar_id"` 96 | URL string `json:"url"` 97 | HTMLURL string `json:"html_url"` 98 | FollowersURL string `json:"followers_url"` 99 | FollowingURL string `json:"following_url"` 100 | GistsURL string `json:"gists_url"` 101 | StarredURL string `json:"starred_url"` 102 | SubscriptionsURL string `json:"subscriptions_url"` 103 | OrganizationsURL string `json:"organizations_url"` 104 | ReposURL string `json:"repos_url"` 105 | EventsURL string `json:"events_url"` 106 | ReceivedEventsURL string `json:"received_events_url"` 107 | Type string `json:"type"` 108 | SiteAdmin bool `json:"site_admin"` 109 | } `json:"uploader"` 110 | ContentType string `json:"content_type"` 111 | State string `json:"state"` 112 | Size int `json:"size"` 113 | DownloadCount int `json:"download_count"` 114 | CreatedAt time.Time `json:"created_at"` 115 | UpdatedAt time.Time `json:"updated_at"` 116 | BrowserDownloadURL string `json:"browser_download_url"` 117 | } `json:"assets"` 118 | TarballURL string `json:"tarball_url"` 119 | ZipballURL string `json:"zipball_url"` 120 | Body string `json:"body"` 121 | } 122 | 123 | // checks if a path has write access 124 | func writable(path string) bool { 125 | return unix.Access(path, unix.W_OK) == nil 126 | } 127 | 128 | // Directory to install releases in by default 129 | // 130 | // Find writable directories on $PATH. Use $GOPATH/bin if that is on 131 | // the path and writable or use the first writable directory which is 132 | // in $HOME or failing that the first writable directory. 133 | // 134 | // Returns "" if none of the above were found 135 | func defaultBinDir() string { 136 | home := os.Getenv("HOME") 137 | var ( 138 | bin string 139 | homeBin string 140 | goHomeBin string 141 | gopath = os.Getenv("GOPATH") 142 | ) 143 | for _, dir := range strings.Split(os.Getenv("PATH"), ":") { 144 | if writable(dir) { 145 | if strings.HasPrefix(dir, home) { 146 | if homeBin != "" { 147 | homeBin = dir 148 | } 149 | if gopath != "" && strings.HasPrefix(dir, gopath) && goHomeBin == "" { 150 | goHomeBin = dir 151 | } 152 | } 153 | if bin == "" { 154 | bin = dir 155 | } 156 | } 157 | } 158 | if goHomeBin != "" { 159 | return goHomeBin 160 | } 161 | if homeBin != "" { 162 | return homeBin 163 | } 164 | return bin 165 | } 166 | 167 | // read the body or an error message 168 | func readBody(in io.Reader) string { 169 | data, err := io.ReadAll(in) 170 | if err != nil { 171 | return fmt.Sprintf("Error reading body: %v", err.Error()) 172 | } 173 | return string(data) 174 | } 175 | 176 | // Get an asset URL and name 177 | func getAsset(project string, matchName *regexp.Regexp) (string, string) { 178 | url := "https://api.github.com/repos/" + project + "/releases/latest" 179 | log.Printf("Fetching asset info for %q from %q", project, url) 180 | user, pass := os.Getenv("GITHUB_USER"), os.Getenv("GITHUB_TOKEN") 181 | req, err := http.NewRequest("GET", url, nil) 182 | if err != nil { 183 | log.Fatalf("Failed to make http request %q: %v", url, err) 184 | } 185 | if user != "" && pass != "" { 186 | log.Printf("Fetching using GITHUB_USER and GITHUB_TOKEN") 187 | req.SetBasicAuth(user, pass) 188 | } 189 | resp, err := http.DefaultClient.Do(req) 190 | if err != nil { 191 | log.Fatalf("Failed to fetch release info %q: %v", url, err) 192 | } 193 | if resp.StatusCode != http.StatusOK { 194 | log.Printf("Error: %s", readBody(resp.Body)) 195 | log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, url, resp.Status) 196 | } 197 | var release Release 198 | err = json.NewDecoder(resp.Body).Decode(&release) 199 | if err != nil { 200 | log.Fatalf("Failed to decode release info: %v", err) 201 | } 202 | err = resp.Body.Close() 203 | if err != nil { 204 | log.Fatalf("Failed to close body: %v", err) 205 | } 206 | 207 | for _, asset := range release.Assets { 208 | //log.Printf("Finding %s", asset.Name) 209 | if matchName.MatchString(asset.Name) && isOurOsArch(asset.Name) { 210 | return asset.BrowserDownloadURL, asset.Name 211 | } 212 | } 213 | log.Fatalf("Didn't find asset in info") 214 | return "", "" 215 | } 216 | 217 | // Get an asset URL and name by scraping the downloads page 218 | // 219 | // This doesn't use the API so isn't rate limited when not using GITHUB login details 220 | func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetURL string, assetName string) { 221 | baseURL := "https://github.com/" + project + "/releases" 222 | log.Printf("Fetching asset info for %q from %q", project, baseURL) 223 | base, err := url.Parse(baseURL) 224 | if err != nil { 225 | log.Fatalf("URL Parse failed: %v", err) 226 | } 227 | resp, err := http.Get(baseURL) 228 | if err != nil { 229 | log.Fatalf("Failed to fetch release info %q: %v", baseURL, err) 230 | } 231 | defer resp.Body.Close() 232 | if resp.StatusCode != http.StatusOK { 233 | log.Printf("Error: %s", readBody(resp.Body)) 234 | log.Fatalf("Bad status %d when fetching %q release info: %s", resp.StatusCode, baseURL, resp.Status) 235 | } 236 | doc, err := html.Parse(resp.Body) 237 | if err != nil { 238 | log.Fatalf("Failed to parse web page: %v", err) 239 | } 240 | var walk func(*html.Node) 241 | walk = func(n *html.Node) { 242 | if n.Type == html.ElementNode && n.Data == "a" { 243 | for _, a := range n.Attr { 244 | if a.Key == "href" { 245 | if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) { 246 | if u, err := rest.URLJoin(base, a.Val); err == nil { 247 | if assetName == "" { 248 | assetName = name 249 | assetURL = u.String() 250 | } 251 | } 252 | } 253 | break 254 | } 255 | } 256 | } 257 | for c := n.FirstChild; c != nil; c = c.NextSibling { 258 | walk(c) 259 | } 260 | } 261 | walk(doc) 262 | if assetName == "" || assetURL == "" { 263 | log.Fatalf("Didn't find URL in page") 264 | } 265 | return assetURL, assetName 266 | } 267 | 268 | // isOurOsArch returns true if s contains our OS and our Arch 269 | func isOurOsArch(s string) bool { 270 | s = strings.ToLower(s) 271 | check := func(base string, aliases map[string][]string) bool { 272 | names := []string{base} 273 | names = append(names, aliases[base]...) 274 | for _, name := range names { 275 | if strings.Contains(s, name) { 276 | return true 277 | } 278 | } 279 | return false 280 | } 281 | return check(runtime.GOARCH, archAliases) && check(runtime.GOOS, osAliases) 282 | } 283 | 284 | // get a file for download 285 | func getFile(url, fileName string) { 286 | log.Printf("Downloading %q from %q", fileName, url) 287 | 288 | out, err := os.Create(fileName) 289 | if err != nil { 290 | log.Fatalf("Failed to open %q: %v", fileName, err) 291 | } 292 | 293 | resp, err := http.Get(url) 294 | if err != nil { 295 | log.Fatalf("Failed to fetch asset %q: %v", url, err) 296 | } 297 | if resp.StatusCode != http.StatusOK { 298 | log.Printf("Error: %s", readBody(resp.Body)) 299 | log.Fatalf("Bad status %d when fetching %q asset: %s", resp.StatusCode, url, resp.Status) 300 | } 301 | 302 | n, err := io.Copy(out, resp.Body) 303 | if err != nil { 304 | log.Fatalf("Error while downloading: %v", err) 305 | } 306 | 307 | err = resp.Body.Close() 308 | if err != nil { 309 | log.Fatalf("Failed to close body: %v", err) 310 | } 311 | err = out.Close() 312 | if err != nil { 313 | log.Fatalf("Failed to close output file: %v", err) 314 | } 315 | 316 | log.Printf("Downloaded %q (%d bytes)", fileName, n) 317 | } 318 | 319 | // run a shell command 320 | func run(args ...string) { 321 | cmd := exec.Command(args[0], args[1:]...) 322 | cmd.Stdout = os.Stdout 323 | cmd.Stderr = os.Stderr 324 | err := cmd.Run() 325 | if err != nil { 326 | log.Fatalf("Failed to run %v: %v", args, err) 327 | } 328 | } 329 | 330 | // Untars fileName from srcFile 331 | func untar(srcFile, fileName, extractDir string) { 332 | f, err := os.Open(srcFile) 333 | if err != nil { 334 | log.Fatalf("Couldn't open tar: %v", err) 335 | } 336 | defer func() { 337 | err := f.Close() 338 | if err != nil { 339 | log.Fatalf("Couldn't close tar: %v", err) 340 | } 341 | }() 342 | 343 | var in io.Reader = f 344 | 345 | srcExt := filepath.Ext(srcFile) 346 | if srcExt == ".gz" || srcExt == ".tgz" { 347 | gzf, err := gzip.NewReader(f) 348 | if err != nil { 349 | log.Fatalf("Couldn't open gzip: %v", err) 350 | } 351 | in = gzf 352 | } else if srcExt == ".bz2" { 353 | in = bzip2.NewReader(f) 354 | } 355 | 356 | tarReader := tar.NewReader(in) 357 | 358 | for { 359 | header, err := tarReader.Next() 360 | if err == io.EOF { 361 | break 362 | } 363 | if err != nil { 364 | log.Fatalf("Trouble reading tar file: %v", err) 365 | } 366 | name := header.Name 367 | switch header.Typeflag { 368 | case tar.TypeReg: 369 | baseName := filepath.Base(name) 370 | if baseName == fileName { 371 | outPath := filepath.Join(extractDir, fileName) 372 | out, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777) 373 | if err != nil { 374 | log.Fatalf("Couldn't open output file: %v", err) 375 | } 376 | n, err := io.Copy(out, tarReader) 377 | if err != nil { 378 | log.Fatalf("Couldn't write output file: %v", err) 379 | } 380 | if err = out.Close(); err != nil { 381 | log.Fatalf("Couldn't close output: %v", err) 382 | } 383 | log.Printf("Wrote %s (%d bytes) as %q", fileName, n, outPath) 384 | } 385 | } 386 | } 387 | } 388 | 389 | func main() { 390 | flag.Parse() 391 | args := flag.Args() 392 | if len(args) != 2 { 393 | log.Fatalf("Syntax: %s ", os.Args[0]) 394 | } 395 | project, nameRe := args[0], args[1] 396 | if !matchProject.MatchString(project) { 397 | log.Fatalf("Project %q must be in form user/project", project) 398 | } 399 | matchName, err := regexp.Compile(nameRe) 400 | if err != nil { 401 | log.Fatalf("Invalid regexp for name %q: %v", nameRe, err) 402 | } 403 | 404 | var assetURL, assetName string 405 | if *useAPI { 406 | assetURL, assetName = getAsset(project, matchName) 407 | } else { 408 | assetURL, assetName = getAssetFromReleasesPage(project, matchName) 409 | } 410 | fileName := filepath.Join(os.TempDir(), assetName) 411 | getFile(assetURL, fileName) 412 | 413 | if *install { 414 | log.Printf("Installing %s", fileName) 415 | run("sudo", "dpkg", "--force-bad-version", "-i", fileName) 416 | log.Printf("Installed %s", fileName) 417 | } else if *extract != "" { 418 | if *bindir == "" { 419 | log.Fatalf("Need to set -bindir") 420 | } 421 | log.Printf("Unpacking %s from %s and installing into %s", *extract, fileName, *bindir) 422 | untar(fileName, *extract, *bindir+"/") 423 | } 424 | } 425 | -------------------------------------------------------------------------------- /cmd/selfupdate/selfupdate.go: -------------------------------------------------------------------------------- 1 | //go:build !noselfupdate 2 | 3 | // Package selfupdate provides the selfupdate command. 4 | package selfupdate 5 | 6 | import ( 7 | "archive/zip" 8 | "bytes" 9 | "context" 10 | _ "embed" 11 | "errors" 12 | "fmt" 13 | "io" 14 | "net/http" 15 | "os" 16 | "os/exec" 17 | "path/filepath" 18 | "runtime" 19 | "strings" 20 | 21 | "github.com/rclone/rclone/cmd" 22 | "github.com/rclone/rclone/cmd/cmount" 23 | "github.com/rclone/rclone/fs" 24 | "github.com/rclone/rclone/fs/config/flags" 25 | "github.com/rclone/rclone/fs/fshttp" 26 | "github.com/rclone/rclone/lib/buildinfo" 27 | "github.com/rclone/rclone/lib/random" 28 | "github.com/spf13/cobra" 29 | 30 | versionCmd "github.com/dogbutcat/gclone/cmd/version" 31 | ) 32 | 33 | //go:embed selfupdate.md 34 | var selfUpdateHelp string 35 | 36 | // Options contains options for the self-update command 37 | type Options struct { 38 | Check bool 39 | Output string // output path 40 | Beta bool // mutually exclusive with Stable (false means "stable") 41 | Stable bool // mutually exclusive with Beta 42 | Version string 43 | Package string // package format: zip, deb, rpm (empty string means "zip") 44 | } 45 | 46 | // Opt is options set via command line 47 | var Opt = Options{} 48 | 49 | func init() { 50 | cmd.Root.AddCommand(cmdSelfUpdate) 51 | cmdFlags := cmdSelfUpdate.Flags() 52 | flags.BoolVarP(cmdFlags, &Opt.Check, "check", "", Opt.Check, "Check for latest release, do not download", "") 53 | flags.StringVarP(cmdFlags, &Opt.Output, "output", "", Opt.Output, "Save the downloaded binary at a given path (default: replace running binary)", "") 54 | // flags.BoolVarP(cmdFlags, &Opt.Stable, "stable", "", Opt.Stable, "Install stable release (this is the default)", "") 55 | // flags.BoolVarP(cmdFlags, &Opt.Beta, "beta", "", Opt.Beta, "place holder", "") 56 | flags.StringVarP(cmdFlags, &Opt.Version, "version", "", Opt.Version, "Install the given gclone version (default: latest)", "") 57 | flags.StringVarP(cmdFlags, &Opt.Package, "package", "", Opt.Package, "Package format: zip|deb|rpm (default: zip)", "") 58 | } 59 | 60 | var cmdSelfUpdate = &cobra.Command{ 61 | Use: "gselfupdate", 62 | Aliases: []string{"g-self-update"}, 63 | Short: `Update the gclone binary.`, 64 | Long: strings.ReplaceAll(selfUpdateHelp, "|", "`"), 65 | Annotations: map[string]string{ 66 | "versionIntroduced": "v1.64", 67 | }, 68 | Run: func(command *cobra.Command, args []string) { 69 | ctx := context.Background() 70 | cmd.CheckArgs(0, 0, command, args) 71 | Opt.Beta = false 72 | Opt.Stable = true 73 | if Opt.Package == "" { 74 | Opt.Package = "zip" 75 | } 76 | gotActionFlags := Opt.Stable || Opt.Beta || Opt.Output != "" || Opt.Version != "" || Opt.Package != "zip" 77 | if Opt.Check && !gotActionFlags { 78 | versionCmd.CheckVersion(ctx) 79 | return 80 | } 81 | if Opt.Package != "zip" { 82 | if Opt.Package != "deb" && Opt.Package != "rpm" { 83 | fs.Fatalf(nil, "--package should be one of zip|deb|rpm") 84 | } 85 | if runtime.GOOS != "linux" { 86 | fs.Fatalf(nil, ".deb and .rpm packages are supported only on Linux") 87 | } else if os.Geteuid() != 0 && !Opt.Check { 88 | fs.Fatalf(nil, ".deb and .rpm must be installed by root") 89 | } 90 | if Opt.Output != "" && !Opt.Check { 91 | fmt.Println("Warning: --output is ignored with --package deb|rpm") 92 | } 93 | } 94 | if err := InstallUpdate(context.Background(), &Opt); err != nil { 95 | fs.Fatalf(nil, "Error: %v", err) 96 | } 97 | }, 98 | } 99 | 100 | // GetVersion can get the latest release number from the download site 101 | // or massage a stable release number - prepend semantic "v" prefix 102 | // or find the latest micro release for a given major.minor release. 103 | // Note: this will not be applied to beta releases. 104 | func GetVersion(ctx context.Context, beta bool, version string) (newVersion, siteURL string, err error) { 105 | siteURL = "https://github.com/dogbutcat/gclone/releases/download/" 106 | if beta { 107 | fmt.Println("No beta version support.") 108 | return 109 | } 110 | 111 | if version == "" { 112 | // Request the latest release number from the download site 113 | _, newVersion, _, err = versionCmd.GetVersion( 114 | ctx, 115 | "https://github.com/dogbutcat/gclone/releases/latest/download"+ 116 | "/version.txt", 117 | ) 118 | return 119 | } 120 | 121 | newVersion = version 122 | if version[0] != 'v' { 123 | newVersion = "v" + version 124 | } 125 | if beta { 126 | return 127 | } 128 | 129 | return 130 | } 131 | 132 | func isLatest(vA string, vB string) bool { 133 | vA1, vA2 := versionCmd.ConvertV(vA) 134 | vB1, vB2 := versionCmd.ConvertV(vB) 135 | if vB1.Compare(*vA1) > 0 { 136 | return false 137 | } 138 | if vB1.Compare(*vA1) == 0 && vB2.Compare(*vA2) > 0 { 139 | return false 140 | } 141 | return true 142 | } 143 | 144 | // InstallUpdate performs gclone self-update 145 | func InstallUpdate(ctx context.Context, opt *Options) error { 146 | // Find the latest release number 147 | if opt.Stable && opt.Beta { 148 | return errors.New("--stable and --beta are mutually exclusive") 149 | } 150 | 151 | // The `cmount` tag is added by cmd/cmount/mount.go only if build is static. 152 | _, tags := buildinfo.GetLinkingAndTags() 153 | if strings.Contains(" "+tags+" ", " cmount ") && !cmount.ProvidedBy(runtime.GOOS) { 154 | return errors.New("updating would discard the mount FUSE capability, aborting") 155 | } 156 | 157 | newVersion, siteURL, err := GetVersion(ctx, opt.Beta, opt.Version) 158 | if err != nil { 159 | return fmt.Errorf("unable to detect new version: %w", err) 160 | } 161 | 162 | oldVersion := fs.Version 163 | 164 | // not specified version and installed version is newer then remote 165 | if opt.Version == "" && isLatest(oldVersion, newVersion) { 166 | fs.Logf(nil, "gclone is up to date") 167 | return nil 168 | } 169 | 170 | // Install .deb/.rpm package if requested by user 171 | if opt.Package == "deb" || opt.Package == "rpm" { 172 | if opt.Check { 173 | fmt.Println("Warning: --package flag is ignored in --check mode") 174 | } else { 175 | err := installPackage(ctx, opt.Beta, newVersion, siteURL, opt.Package) 176 | if err == nil { 177 | fs.Logf(nil, "Successfully updated gclone package from version %s to version %s", oldVersion, newVersion) 178 | } 179 | return err 180 | } 181 | } 182 | 183 | // Get the current executable path 184 | executable, err := os.Executable() 185 | if err != nil { 186 | return fmt.Errorf("unable to find executable: %w", err) 187 | } 188 | 189 | targetFile := opt.Output 190 | if targetFile == "" { 191 | targetFile = executable 192 | } 193 | 194 | if opt.Check { 195 | fmt.Printf("Without --check this would install gclone version %s at %s\n", newVersion, targetFile) 196 | return nil 197 | } 198 | 199 | // Make temporary file names and check for possible access errors in advance 200 | var newFile string 201 | if newFile, err = makeRandomExeName(targetFile, "new"); err != nil { 202 | return err 203 | } 204 | savedFile := "" 205 | if runtime.GOOS == "windows" { 206 | savedFile = targetFile 207 | savedFile = strings.TrimSuffix(savedFile, ".exe") 208 | savedFile += ".old.exe" 209 | } 210 | 211 | if savedFile == executable || newFile == executable { 212 | return fmt.Errorf("%s: a temporary file would overwrite the executable, specify a different --output path", targetFile) 213 | } 214 | 215 | if err := verifyAccess(targetFile); err != nil { 216 | return err 217 | } 218 | 219 | // Download the update as a temporary file 220 | err = downloadUpdate(ctx, opt.Beta, newVersion, siteURL, newFile, "zip") 221 | if err != nil { 222 | return fmt.Errorf("failed to update gclone: %w", err) 223 | } 224 | 225 | err = replaceExecutable(targetFile, newFile, savedFile) 226 | if err == nil { 227 | fs.Logf(nil, "Successfully updated gclone from version %s to version %s", oldVersion, newVersion) 228 | } 229 | return err 230 | } 231 | 232 | func installPackage(ctx context.Context, beta bool, version, siteURL, packageFormat string) error { 233 | tempFile, err := os.CreateTemp("", "gclone.*."+packageFormat) 234 | if err != nil { 235 | return fmt.Errorf("unable to write temporary package: %w", err) 236 | } 237 | packageFile := tempFile.Name() 238 | _ = tempFile.Close() 239 | defer func() { 240 | if rmErr := os.Remove(packageFile); rmErr != nil { 241 | fs.Errorf(nil, "%s: could not remove temporary package: %v", packageFile, rmErr) 242 | } 243 | }() 244 | if err := downloadUpdate(ctx, beta, version, siteURL, packageFile, packageFormat); err != nil { 245 | return err 246 | } 247 | 248 | packageCommand := "dpkg" 249 | if packageFormat == "rpm" { 250 | packageCommand = "rpm" 251 | } 252 | cmd := exec.Command(packageCommand, "-i", packageFile) 253 | cmd.Stdout = os.Stdout 254 | cmd.Stderr = os.Stderr 255 | if err := cmd.Run(); err != nil { 256 | return fmt.Errorf("failed to run %s: %v", packageCommand, err) 257 | } 258 | return nil 259 | } 260 | 261 | func replaceExecutable(targetFile, newFile, savedFile string) error { 262 | // Copy permission bits from the old executable 263 | // (it was extracted with mode 0755) 264 | fileInfo, err := os.Lstat(targetFile) 265 | if err == nil { 266 | if err = os.Chmod(newFile, fileInfo.Mode()); err != nil { 267 | return fmt.Errorf("failed to set permission: %w", err) 268 | } 269 | } 270 | 271 | if err = os.Remove(targetFile); os.IsNotExist(err) { 272 | err = nil 273 | } 274 | 275 | if err != nil && savedFile != "" { 276 | // Windows forbids removal of a running executable so we rename it. 277 | // For starters, rename download as the original file with ".old.exe" appended. 278 | var saveErr error 279 | if saveErr = os.Remove(savedFile); os.IsNotExist(saveErr) { 280 | saveErr = nil 281 | } 282 | if saveErr == nil { 283 | saveErr = os.Rename(targetFile, savedFile) 284 | } 285 | if saveErr != nil { 286 | // The ".old" file cannot be removed or cannot be renamed to. 287 | // This usually means that the running executable has a name with ".old". 288 | // This can happen in very rare cases, but we ought to handle it. 289 | // Try inserting a randomness in the name to mitigate it. 290 | fs.Debugf(nil, "%s: cannot replace old file, randomizing name", savedFile) 291 | 292 | savedFile, saveErr = makeRandomExeName(targetFile, "old") 293 | if saveErr == nil { 294 | if saveErr = os.Remove(savedFile); os.IsNotExist(saveErr) { 295 | saveErr = nil 296 | } 297 | } 298 | if saveErr == nil { 299 | saveErr = os.Rename(targetFile, savedFile) 300 | } 301 | } 302 | if saveErr == nil { 303 | fs.Infof(nil, "The old executable was saved as %s", savedFile) 304 | err = nil 305 | } 306 | } 307 | 308 | if err == nil { 309 | err = os.Rename(newFile, targetFile) 310 | } 311 | if err != nil { 312 | if rmErr := os.Remove(newFile); rmErr != nil { 313 | fs.Errorf(nil, "%s: could not remove temporary file: %v", newFile, rmErr) 314 | } 315 | return err 316 | } 317 | return nil 318 | } 319 | 320 | func makeRandomExeName(baseName, extension string) (string, error) { 321 | const maxAttempts = 5 322 | 323 | if runtime.GOOS == "windows" { 324 | baseName = strings.TrimSuffix(baseName, ".exe") 325 | extension += ".exe" 326 | } 327 | 328 | for attempt := 0; attempt < maxAttempts; attempt++ { 329 | filename := fmt.Sprintf("%s.%s.%s", baseName, random.String(4), extension) 330 | if _, err := os.Stat(filename); os.IsNotExist(err) { 331 | return filename, nil 332 | } 333 | } 334 | 335 | return "", fmt.Errorf("cannot find a file name like %s.xxxx.%s", baseName, extension) 336 | } 337 | 338 | func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, packageFormat string) error { 339 | osName := runtime.GOOS 340 | if osName == "darwin" { 341 | osName = "osx" 342 | } 343 | arch := runtime.GOARCH 344 | if arch == "arm" { 345 | // Check the ARM compatibility level of the current CPU. 346 | // We don't know if this matches the gclone binary currently running, it 347 | // could for example be a ARMv6 variant running on a ARMv7 compatible CPU, 348 | // so we will simply pick the best possible variant. 349 | switch buildinfo.GetSupportedGOARM() { 350 | case 7: 351 | // This system can run any binaries built with GOARCH=arm, including GOARM=7. 352 | // Pick the ARMv7 variant of gclone, published with suffix "arm-v7". 353 | arch = "arm-v7" 354 | case 6: 355 | // This system can run binaries built with GOARCH=arm and GOARM=6 or lower. 356 | // Pick the ARMv6 variant of gclone, published with suffix "arm-v6". 357 | arch = "arm-v6" 358 | case 5: 359 | // This system can only run binaries built with GOARCH=arm and GOARM=5. 360 | // Pick the ARMv5 variant of gclone, which also works without hardfloat, 361 | // published with suffix "arm". 362 | arch = "arm" 363 | } 364 | } 365 | archiveFilename := fmt.Sprintf("gclone-%s-%s-%s.%s", version, osName, arch, packageFormat) 366 | archiveURL := fmt.Sprintf("%s/%s/%s", siteURL, version, archiveFilename) 367 | archiveBuf, err := downloadFile(ctx, archiveURL) 368 | if err != nil { 369 | return err 370 | } 371 | // gotHash := sha256.Sum256(archiveBuf) 372 | // strHash := hex.EncodeToString(gotHash[:]) 373 | // fs.Debugf(nil, "downloaded release archive with hashsum %s from %s", strHash, archiveURL) 374 | 375 | // CI/CD does not provide hashsums for beta releases 376 | // if !beta { 377 | // if err := verifyHashsum(ctx, siteURL, version, archiveFilename, gotHash[:]); err != nil { 378 | // return err 379 | // } 380 | // } 381 | 382 | if packageFormat == "deb" || packageFormat == "rpm" { 383 | if err := os.WriteFile(newFile, archiveBuf, 0644); err != nil { 384 | return fmt.Errorf("cannot write temporary .%s: %w", packageFormat, err) 385 | } 386 | return nil 387 | } 388 | 389 | entryName := fmt.Sprintf("gclone-%s-%s-%s/gclone", version, osName, arch) 390 | if runtime.GOOS == "windows" { 391 | entryName += ".exe" 392 | } 393 | 394 | // Extract executable to a temporary file, then replace it by an instant rename 395 | err = extractZipToFile(archiveBuf, entryName, newFile) 396 | if err != nil { 397 | return err 398 | } 399 | fs.Debugf(nil, "extracted %s to %s", entryName, newFile) 400 | return nil 401 | } 402 | 403 | func verifyAccess(file string) error { 404 | admin := "root" 405 | if runtime.GOOS == "windows" { 406 | admin = "Administrator" 407 | } 408 | 409 | fileInfo, fileErr := os.Lstat(file) 410 | 411 | if fileErr != nil { 412 | dir := filepath.Dir(file) 413 | dirInfo, dirErr := os.Lstat(dir) 414 | if dirErr != nil { 415 | return dirErr 416 | } 417 | if !dirInfo.Mode().IsDir() { 418 | return fmt.Errorf("%s: parent path is not a directory, specify a different path using --output", dir) 419 | } 420 | if !writable(dir) { 421 | return fmt.Errorf("%s: directory is not writable, please run self-update as %s", dir, admin) 422 | } 423 | } 424 | 425 | if fileErr == nil && !fileInfo.Mode().IsRegular() { 426 | return fmt.Errorf("%s: path is not a normal file, specify a different path using --output", file) 427 | } 428 | 429 | if fileErr == nil && !writable(file) { 430 | return fmt.Errorf("%s: file is not writable, run self-update as %s", file, admin) 431 | } 432 | 433 | return nil 434 | } 435 | 436 | // func findFileHash(buf []byte, filename string) (hash []byte, err error) { 437 | // lines := bufio.NewScanner(bytes.NewReader(buf)) 438 | // for lines.Scan() { 439 | // tokens := strings.Split(lines.Text(), " ") 440 | // if len(tokens) == 2 && tokens[1] == filename { 441 | // if hash, err := hex.DecodeString(tokens[0]); err == nil { 442 | // return hash, nil 443 | // } 444 | // } 445 | // } 446 | // return nil, fmt.Errorf("%s: unable to find hash", filename) 447 | // } 448 | 449 | func extractZipToFile(buf []byte, entryName, newFile string) error { 450 | zipReader, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) 451 | if err != nil { 452 | return err 453 | } 454 | 455 | var reader io.ReadCloser 456 | for _, entry := range zipReader.File { 457 | if entry.Name == entryName { 458 | reader, err = entry.Open() 459 | break 460 | } 461 | } 462 | if reader == nil || err != nil { 463 | return fmt.Errorf("%s: file not found in archive", entryName) 464 | } 465 | defer func() { 466 | _ = reader.Close() 467 | }() 468 | 469 | err = os.Remove(newFile) 470 | if err != nil && !os.IsNotExist(err) { 471 | return fmt.Errorf("%s: unable to create new file: %v", newFile, err) 472 | } 473 | writer, err := os.OpenFile(newFile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, os.FileMode(0755)) 474 | if err != nil { 475 | return err 476 | } 477 | 478 | _, err = io.Copy(writer, reader) 479 | _ = writer.Close() 480 | if err != nil { 481 | if rmErr := os.Remove(newFile); rmErr != nil { 482 | fs.Errorf(nil, "%s: could not remove temporary file: %v", newFile, rmErr) 483 | } 484 | } 485 | return err 486 | } 487 | 488 | func downloadFile(ctx context.Context, url string) ([]byte, error) { 489 | resp, err := fshttp.NewClient(ctx).Get(url) 490 | if err != nil { 491 | return nil, err 492 | } 493 | defer fs.CheckClose(resp.Body, &err) 494 | if resp.StatusCode != http.StatusOK { 495 | return nil, fmt.Errorf("failed with %s downloading %s", resp.Status, url) 496 | } 497 | return io.ReadAll(resp.Body) 498 | } 499 | -------------------------------------------------------------------------------- /backend/drive/metadata.go: -------------------------------------------------------------------------------- 1 | package drive 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "maps" 8 | "strconv" 9 | "strings" 10 | "sync" 11 | 12 | "github.com/rclone/rclone/fs" 13 | "github.com/rclone/rclone/fs/fserrors" 14 | "github.com/rclone/rclone/lib/errcount" 15 | "golang.org/x/sync/errgroup" 16 | drive "google.golang.org/api/drive/v3" 17 | "google.golang.org/api/googleapi" 18 | ) 19 | 20 | // system metadata keys which this backend owns 21 | var systemMetadataInfo = map[string]fs.MetadataHelp{ 22 | "content-type": { 23 | Help: "The MIME type of the file.", 24 | Type: "string", 25 | Example: "text/plain", 26 | }, 27 | "mtime": { 28 | Help: "Time of last modification with mS accuracy.", 29 | Type: "RFC 3339", 30 | Example: "2006-01-02T15:04:05.999Z07:00", 31 | }, 32 | "btime": { 33 | Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.", 34 | Type: "RFC 3339", 35 | Example: "2006-01-02T15:04:05.999Z07:00", 36 | }, 37 | "copy-requires-writer-permission": { 38 | Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.", 39 | Type: "boolean", 40 | Example: "true", 41 | }, 42 | "writers-can-share": { 43 | Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.", 44 | Type: "boolean", 45 | Example: "false", 46 | }, 47 | "viewed-by-me": { 48 | Help: "Whether the file has been viewed by this user.", 49 | Type: "boolean", 50 | Example: "true", 51 | ReadOnly: true, 52 | }, 53 | "owner": { 54 | Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.", 55 | Type: "string", 56 | Example: "user@example.com", 57 | }, 58 | "permissions": { 59 | Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.", 60 | Type: "JSON", 61 | Example: "{}", 62 | }, 63 | "folder-color-rgb": { 64 | Help: "The color for a folder or a shortcut to a folder as an RGB hex string.", 65 | Type: "string", 66 | Example: "881133", 67 | }, 68 | "description": { 69 | Help: "A short description of the file.", 70 | Type: "string", 71 | Example: "Contract for signing", 72 | }, 73 | "starred": { 74 | Help: "Whether the user has starred the file.", 75 | Type: "boolean", 76 | Example: "false", 77 | }, 78 | "labels": { 79 | Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.", 80 | Type: "JSON", 81 | Example: "[]", 82 | }, 83 | } 84 | 85 | // Extra fields we need to fetch to implement the system metadata above 86 | var metadataFields = googleapi.Field(strings.Join([]string{ 87 | "copyRequiresWriterPermission", 88 | "description", 89 | "folderColorRgb", 90 | "hasAugmentedPermissions", 91 | "owners", 92 | "permissionIds", 93 | "permissions", 94 | "properties", 95 | "starred", 96 | "viewedByMe", 97 | "viewedByMeTime", 98 | "writersCanShare", 99 | }, ",")) 100 | 101 | // Fields we need to read from permissions 102 | var permissionsFields = googleapi.Field(strings.Join([]string{ 103 | "*", 104 | "permissionDetails/*", 105 | }, ",")) 106 | 107 | // getPermission returns permissions for the fileID and permissionID passed in 108 | func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) { 109 | f.permissionsMu.Lock() 110 | defer f.permissionsMu.Unlock() 111 | if useCache { 112 | perm = f.permissions[permissionID] 113 | if perm != nil { 114 | return perm, false, nil 115 | } 116 | } 117 | fs.Debugf(f, "Fetching permission %q", permissionID) 118 | err = f.pacer.Call(func() (bool, error) { 119 | perm, err = f.svc.Permissions.Get(fileID, permissionID). 120 | Fields(permissionsFields). 121 | SupportsAllDrives(true). 122 | Context(ctx).Do() 123 | return f.shouldRetry(ctx, err) 124 | }) 125 | if err != nil { 126 | return nil, false, err 127 | } 128 | 129 | inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited 130 | 131 | cleanPermission(perm) 132 | 133 | // cache the permission 134 | f.permissions[permissionID] = perm 135 | 136 | return perm, inherited, err 137 | } 138 | 139 | // Set the permissions on the info 140 | func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) { 141 | errs := errcount.New() 142 | for _, perm := range permissions { 143 | if perm.Role == "owner" { 144 | // ignore owner permissions - these are set with owner 145 | continue 146 | } 147 | cleanPermissionForWrite(perm) 148 | err := f.pacer.Call(func() (bool, error) { 149 | _, err := f.svc.Permissions.Create(info.Id, perm). 150 | SupportsAllDrives(true). 151 | SendNotificationEmail(false). 152 | Context(ctx).Do() 153 | return f.shouldRetry(ctx, err) 154 | }) 155 | if err != nil { 156 | fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err) 157 | errs.Add(err) 158 | } 159 | } 160 | err = errs.Err("failed to set permission") 161 | if err != nil { 162 | err = fserrors.NoRetryError(err) 163 | } 164 | return err 165 | } 166 | 167 | // Clean attributes from permissions which we can't write 168 | func cleanPermissionForWrite(perm *drive.Permission) { 169 | perm.Deleted = false 170 | perm.DisplayName = "" 171 | perm.Id = "" 172 | perm.Kind = "" 173 | perm.PermissionDetails = nil 174 | perm.TeamDrivePermissionDetails = nil 175 | } 176 | 177 | // Clean and cache the permission if not already cached 178 | func (f *Fs) cleanAndCachePermission(perm *drive.Permission) { 179 | f.permissionsMu.Lock() 180 | defer f.permissionsMu.Unlock() 181 | cleanPermission(perm) 182 | if _, found := f.permissions[perm.Id]; !found { 183 | f.permissions[perm.Id] = perm 184 | } 185 | } 186 | 187 | // Clean fields we don't need to keep from the permission 188 | func cleanPermission(perm *drive.Permission) { 189 | // DisplayName: Output only. The "pretty" name of the value of the 190 | // permission. The following is a list of examples for each type of 191 | // permission: * `user` - User's full name, as defined for their Google 192 | // account, such as "Joe Smith." * `group` - Name of the Google Group, 193 | // such as "The Company Administrators." * `domain` - String domain 194 | // name, such as "thecompany.com." * `anyone` - No `displayName` is 195 | // present. 196 | perm.DisplayName = "" 197 | 198 | // Kind: Output only. Identifies what kind of resource this is. Value: 199 | // the fixed string "drive#permission". 200 | perm.Kind = "" 201 | 202 | // PermissionDetails: Output only. Details of whether the permissions on 203 | // this shared drive item are inherited or directly on this item. This 204 | // is an output-only field which is present only for shared drive items. 205 | perm.PermissionDetails = nil 206 | 207 | // PhotoLink: Output only. A link to the user's profile photo, if 208 | // available. 209 | perm.PhotoLink = "" 210 | 211 | // TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use 212 | // `permissionDetails` instead. 213 | perm.TeamDrivePermissionDetails = nil 214 | } 215 | 216 | // Fields we need to read from labels 217 | var labelsFields = googleapi.Field(strings.Join([]string{ 218 | "*", 219 | }, ",")) 220 | 221 | // getLabels returns labels for the fileID passed in 222 | func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) { 223 | fs.Debugf(f, "Fetching labels for %q", fileID) 224 | listLabels := f.svc.Files.ListLabels(fileID). 225 | Fields(labelsFields). 226 | Context(ctx) 227 | for { 228 | var info *drive.LabelList 229 | err = f.pacer.Call(func() (bool, error) { 230 | info, err = listLabels.Do() 231 | return f.shouldRetry(ctx, err) 232 | }) 233 | if err != nil { 234 | return nil, err 235 | } 236 | labels = append(labels, info.Labels...) 237 | if info.NextPageToken == "" { 238 | break 239 | } 240 | listLabels.PageToken(info.NextPageToken) 241 | } 242 | for _, label := range labels { 243 | cleanLabel(label) 244 | } 245 | return labels, nil 246 | } 247 | 248 | // Set the labels on the info 249 | func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) { 250 | if len(labels) == 0 { 251 | return nil 252 | } 253 | req := drive.ModifyLabelsRequest{} 254 | for _, label := range labels { 255 | req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{ 256 | FieldModifications: labelFieldsToFieldModifications(label.Fields), 257 | LabelId: label.Id, 258 | }) 259 | } 260 | err = f.pacer.Call(func() (bool, error) { 261 | _, err = f.svc.Files.ModifyLabels(info.Id, &req). 262 | Context(ctx).Do() 263 | return f.shouldRetry(ctx, err) 264 | }) 265 | if err != nil { 266 | return fmt.Errorf("failed to set labels: %w", err) 267 | } 268 | return nil 269 | } 270 | 271 | // Convert label fields into something which can set the fields 272 | func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) { 273 | for id, field := range fields { 274 | var emails []string 275 | for _, user := range field.User { 276 | emails = append(emails, user.EmailAddress) 277 | } 278 | out = append(out, &drive.LabelFieldModification{ 279 | // FieldId: The ID of the field to be modified. 280 | FieldId: id, 281 | 282 | // SetDateValues: Replaces the value of a dateString Field with these 283 | // new values. The string must be in the RFC 3339 full-date format: 284 | // YYYY-MM-DD. 285 | SetDateValues: field.DateString, 286 | 287 | // SetIntegerValues: Replaces the value of an `integer` field with these 288 | // new values. 289 | SetIntegerValues: field.Integer, 290 | 291 | // SetSelectionValues: Replaces a `selection` field with these new 292 | // values. 293 | SetSelectionValues: field.Selection, 294 | 295 | // SetTextValues: Sets the value of a `text` field. 296 | SetTextValues: field.Text, 297 | 298 | // SetUserValues: Replaces a `user` field with these new values. The 299 | // values must be valid email addresses. 300 | SetUserValues: emails, 301 | }) 302 | } 303 | return out 304 | } 305 | 306 | // Clean fields we don't need to keep from the label 307 | func cleanLabel(label *drive.Label) { 308 | // Kind: This is always drive#label 309 | label.Kind = "" 310 | 311 | for name, field := range label.Fields { 312 | // Kind: This is always drive#labelField. 313 | field.Kind = "" 314 | 315 | // Note the fields are copies so we need to write them 316 | // back to the map 317 | label.Fields[name] = field 318 | } 319 | } 320 | 321 | // Parse the metadata from drive item 322 | // 323 | // It should return nil if there is no Metadata 324 | func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) { 325 | metadata := make(fs.Metadata, 16) 326 | 327 | // Dump user metadata first as it overrides system metadata 328 | maps.Copy(metadata, info.Properties) 329 | 330 | // System metadata 331 | metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission) 332 | metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare) 333 | metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe) 334 | metadata["content-type"] = info.MimeType 335 | 336 | // Owners: Output only. The owner of this file. Only certain legacy 337 | // files may have more than one owner. This field isn't populated for 338 | // items in shared drives. 339 | if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 { 340 | user := info.Owners[0] 341 | if len(info.Owners) > 1 { 342 | fs.Logf(o, "Ignoring more than 1 owner") 343 | } 344 | if user != nil { 345 | id := user.EmailAddress 346 | if id == "" { 347 | id = user.DisplayName 348 | } 349 | metadata["owner"] = id 350 | } 351 | } 352 | 353 | if o.fs.opt.MetadataPermissions.IsSet(rwRead) { 354 | // We only write permissions out if they are not inherited. 355 | // 356 | // On My Drives permissions seem to be attached to every item 357 | // so they will always be written out. 358 | // 359 | // On Shared Drives only non-inherited permissions will be 360 | // written out. 361 | 362 | // To read the inherited permissions flag will mean we need to 363 | // read the permissions for each object and the cache will be 364 | // useless. However shared drives don't return permissions 365 | // only permissionIds so will need to fetch them for each 366 | // object. We use HasAugmentedPermissions to see if there are 367 | // special permissions before fetching them to save transactions. 368 | 369 | // HasAugmentedPermissions: Output only. Whether there are permissions 370 | // directly on this file. This field is only populated for items in 371 | // shared drives. 372 | if o.fs.isTeamDrive && !info.HasAugmentedPermissions { 373 | // Don't process permissions if there aren't any specifically set 374 | fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds)) 375 | info.Permissions = nil 376 | info.PermissionIds = nil 377 | } 378 | 379 | // PermissionIds: Output only. List of permission IDs for users with 380 | // access to this file. 381 | // 382 | // Only process these if we have no Permissions 383 | if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 { 384 | info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds)) 385 | g, gCtx := errgroup.WithContext(ctx) 386 | g.SetLimit(o.fs.ci.Checkers) 387 | var mu sync.Mutex // protect the info.Permissions from concurrent writes 388 | for _, permissionID := range info.PermissionIds { 389 | permissionID := permissionID 390 | g.Go(func() error { 391 | // must fetch the team drive ones individually to check the inherited flag 392 | perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive) 393 | if err != nil { 394 | return fmt.Errorf("failed to read permission: %w", err) 395 | } 396 | // Don't write inherited permissions out 397 | if inherited { 398 | return nil 399 | } 400 | // Don't write owner role out - these are covered by the owner metadata 401 | if perm.Role == "owner" { 402 | return nil 403 | } 404 | mu.Lock() 405 | info.Permissions = append(info.Permissions, perm) 406 | mu.Unlock() 407 | return nil 408 | }) 409 | } 410 | err = g.Wait() 411 | if err != nil { 412 | return err 413 | } 414 | } else { 415 | // Clean the fetched permissions 416 | for _, perm := range info.Permissions { 417 | o.fs.cleanAndCachePermission(perm) 418 | } 419 | } 420 | 421 | // Permissions: Output only. The full list of permissions for the file. 422 | // This is only available if the requesting user can share the file. Not 423 | // populated for items in shared drives. 424 | if len(info.Permissions) > 0 { 425 | buf, err := json.Marshal(info.Permissions) 426 | if err != nil { 427 | return fmt.Errorf("failed to marshal permissions: %w", err) 428 | } 429 | metadata["permissions"] = string(buf) 430 | } 431 | 432 | // Permission propagation 433 | // https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation 434 | // Leads me to believe that in non shared drives, permissions 435 | // are added to each item when you set permissions for a 436 | // folder whereas in shared drives they are inherited and 437 | // placed on the item directly. 438 | } 439 | 440 | if info.FolderColorRgb != "" { 441 | metadata["folder-color-rgb"] = info.FolderColorRgb 442 | } 443 | if info.Description != "" { 444 | metadata["description"] = info.Description 445 | } 446 | metadata["starred"] = fmt.Sprint(info.Starred) 447 | metadata["btime"] = info.CreatedTime 448 | metadata["mtime"] = info.ModifiedTime 449 | 450 | if o.fs.opt.MetadataLabels.IsSet(rwRead) { 451 | // FIXME would be really nice if we knew if files had labels 452 | // before listing but we need to know all possible label IDs 453 | // to get it in the listing. 454 | 455 | labels, err := o.fs.getLabels(ctx, actualID(info.Id)) 456 | if err != nil { 457 | return fmt.Errorf("failed to fetch labels: %w", err) 458 | } 459 | buf, err := json.Marshal(labels) 460 | if err != nil { 461 | return fmt.Errorf("failed to marshal labels: %w", err) 462 | } 463 | metadata["labels"] = string(buf) 464 | } 465 | 466 | o.metadata = &metadata 467 | return nil 468 | } 469 | 470 | // Set the owner on the info 471 | func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) { 472 | perm := drive.Permission{ 473 | Role: "owner", 474 | EmailAddress: owner, 475 | // Type: The type of the grantee. Valid values are: * `user` * `group` * 476 | // `domain` * `anyone` When creating a permission, if `type` is `user` 477 | // or `group`, you must provide an `emailAddress` for the user or group. 478 | // When `type` is `domain`, you must provide a `domain`. There isn't 479 | // extra information required for an `anyone` type. 480 | Type: "user", 481 | } 482 | err = f.pacer.Call(func() (bool, error) { 483 | _, err = f.svc.Permissions.Create(info.Id, &perm). 484 | SupportsAllDrives(true). 485 | TransferOwnership(true). 486 | // SendNotificationEmail(false). - required apparently! 487 | Context(ctx).Do() 488 | return f.shouldRetry(ctx, err) 489 | }) 490 | if err != nil { 491 | return fmt.Errorf("failed to set owner: %w", err) 492 | } 493 | return nil 494 | } 495 | 496 | // Call back to set metadata that can't be set on the upload/update 497 | // 498 | // The *drive.File passed in holds the current state of the drive.File 499 | // and this should update it with any modifications. 500 | type updateMetadataFn func(context.Context, *drive.File) error 501 | 502 | // read the metadata from meta and write it into updateInfo 503 | // 504 | // update should be true if this is being used to create metadata for 505 | // an update/PATCH call as the rules on what can be updated are 506 | // slightly different there. 507 | // 508 | // It returns a callback which should be called to finish the updates 509 | // after the data is uploaded. 510 | func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) { 511 | callbackFns := []updateMetadataFn{} 512 | callback = func(ctx context.Context, info *drive.File) error { 513 | for _, fn := range callbackFns { 514 | err := fn(ctx, info) 515 | if err != nil { 516 | return err 517 | } 518 | } 519 | return nil 520 | } 521 | // merge metadata into request and user metadata 522 | for k, v := range meta { 523 | k, v := k, v 524 | // parse a boolean from v and write into out 525 | parseBool := func(out *bool) error { 526 | b, err := strconv.ParseBool(v) 527 | if err != nil { 528 | return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err) 529 | } 530 | *out = b 531 | return nil 532 | } 533 | switch k { 534 | case "copy-requires-writer-permission": 535 | if isFolder { 536 | fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v) 537 | } else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil { 538 | return nil, err 539 | } 540 | case "writers-can-share": 541 | if !f.isTeamDrive { 542 | if err := parseBool(&updateInfo.WritersCanShare); err != nil { 543 | return nil, err 544 | } 545 | } else { 546 | fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v) 547 | } 548 | case "viewed-by-me": 549 | // Can't write this 550 | case "content-type": 551 | updateInfo.MimeType = v 552 | case "owner": 553 | if !f.opt.MetadataOwner.IsSet(rwWrite) { 554 | continue 555 | } 556 | // Can't set Owner on upload so need to set afterwards 557 | callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error { 558 | err := f.setOwner(ctx, info, v) 559 | if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) { 560 | fs.Errorf(f, "Ignoring error as failok is set: %v", err) 561 | return nil 562 | } 563 | return err 564 | }) 565 | case "permissions": 566 | if !f.opt.MetadataPermissions.IsSet(rwWrite) { 567 | continue 568 | } 569 | var perms []*drive.Permission 570 | err := json.Unmarshal([]byte(v), &perms) 571 | if err != nil { 572 | return nil, fmt.Errorf("failed to unmarshal permissions: %w", err) 573 | } 574 | // Can't set Permissions on upload so need to set afterwards 575 | callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error { 576 | err := f.setPermissions(ctx, info, perms) 577 | if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) { 578 | // We've already logged the permissions errors individually here 579 | fs.Debugf(f, "Ignoring error as failok is set: %v", err) 580 | return nil 581 | } 582 | return err 583 | }) 584 | case "labels": 585 | if !f.opt.MetadataLabels.IsSet(rwWrite) { 586 | continue 587 | } 588 | var labels []*drive.Label 589 | err := json.Unmarshal([]byte(v), &labels) 590 | if err != nil { 591 | return nil, fmt.Errorf("failed to unmarshal labels: %w", err) 592 | } 593 | // Can't set Labels on upload so need to set afterwards 594 | callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error { 595 | err := f.setLabels(ctx, info, labels) 596 | if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) { 597 | fs.Errorf(f, "Ignoring error as failok is set: %v", err) 598 | return nil 599 | } 600 | return err 601 | }) 602 | case "folder-color-rgb": 603 | updateInfo.FolderColorRgb = v 604 | case "description": 605 | updateInfo.Description = v 606 | case "starred": 607 | if err := parseBool(&updateInfo.Starred); err != nil { 608 | return nil, err 609 | } 610 | case "btime": 611 | if update { 612 | fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v) 613 | } else { 614 | updateInfo.CreatedTime = v 615 | } 616 | case "mtime": 617 | updateInfo.ModifiedTime = v 618 | default: 619 | if updateInfo.Properties == nil { 620 | updateInfo.Properties = make(map[string]string, 1) 621 | } 622 | updateInfo.Properties[k] = v 623 | } 624 | } 625 | return callback, nil 626 | } 627 | 628 | // Fetch metadata and update updateInfo if --metadata is in use 629 | func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) { 630 | meta, err := fs.GetMetadataOptions(ctx, f, src, options) 631 | if err != nil { 632 | return nil, fmt.Errorf("failed to read metadata from source object: %w", err) 633 | } 634 | callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false) 635 | if err != nil { 636 | return nil, fmt.Errorf("failed to update metadata from source object: %w", err) 637 | } 638 | return callback, nil 639 | } 640 | -------------------------------------------------------------------------------- /backend/drive/drive_internal_test.go: -------------------------------------------------------------------------------- 1 | package drive 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "mime" 11 | "os" 12 | "path" 13 | "path/filepath" 14 | "strings" 15 | "testing" 16 | "time" 17 | 18 | _ "github.com/rclone/rclone/backend/local" 19 | "github.com/rclone/rclone/fs" 20 | "github.com/rclone/rclone/fs/filter" 21 | "github.com/rclone/rclone/fs/fserrors" 22 | "github.com/rclone/rclone/fs/hash" 23 | "github.com/rclone/rclone/fs/operations" 24 | "github.com/rclone/rclone/fs/sync" 25 | "github.com/rclone/rclone/fstest" 26 | "github.com/rclone/rclone/fstest/fstests" 27 | "github.com/rclone/rclone/lib/random" 28 | "github.com/stretchr/testify/assert" 29 | "github.com/stretchr/testify/require" 30 | "google.golang.org/api/drive/v3" 31 | "google.golang.org/api/googleapi" 32 | ) 33 | 34 | func TestDriveScopes(t *testing.T) { 35 | for _, test := range []struct { 36 | in string 37 | want []string 38 | wantFlag bool 39 | }{ 40 | {"", []string{ 41 | "https://www.googleapis.com/auth/drive", 42 | }, false}, 43 | {" drive.file , drive.readonly", []string{ 44 | "https://www.googleapis.com/auth/drive.file", 45 | "https://www.googleapis.com/auth/drive.readonly", 46 | }, false}, 47 | {" drive.file , drive.appfolder", []string{ 48 | "https://www.googleapis.com/auth/drive.file", 49 | "https://www.googleapis.com/auth/drive.appfolder", 50 | }, true}, 51 | } { 52 | got := driveScopes(test.in) 53 | assert.Equal(t, test.want, got, test.in) 54 | gotFlag := driveScopesContainsAppFolder(got) 55 | assert.Equal(t, test.wantFlag, gotFlag, test.in) 56 | } 57 | } 58 | 59 | /* 60 | var additionalMimeTypes = map[string]string{ 61 | "application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm", 62 | "application/vnd.ms-excel.template.macroenabled.12": ".xltm", 63 | "application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm", 64 | "application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm", 65 | "application/vnd.ms-powerpoint.template.macroenabled.12": ".potm", 66 | "application/vnd.ms-powerpoint": ".ppt", 67 | "application/vnd.ms-word.document.macroenabled.12": ".docm", 68 | "application/vnd.ms-word.template.macroenabled.12": ".dotm", 69 | "application/vnd.openxmlformats-officedocument.presentationml.template": ".potx", 70 | "application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx", 71 | "application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx", 72 | "application/vnd.sun.xml.writer": ".sxw", 73 | "text/richtext": ".rtf", 74 | } 75 | */ 76 | 77 | // Load the example export formats into exportFormats for testing 78 | func TestInternalLoadExampleFormats(t *testing.T) { 79 | fetchFormatsOnce.Do(func() {}) 80 | buf, err := os.ReadFile(filepath.FromSlash("test/about.json")) 81 | var about struct { 82 | ExportFormats map[string][]string `json:"exportFormats,omitempty"` 83 | ImportFormats map[string][]string `json:"importFormats,omitempty"` 84 | } 85 | require.NoError(t, err) 86 | require.NoError(t, json.Unmarshal(buf, &about)) 87 | _exportFormats = fixMimeTypeMap(about.ExportFormats) 88 | _importFormats = fixMimeTypeMap(about.ImportFormats) 89 | } 90 | 91 | func TestInternalParseExtensions(t *testing.T) { 92 | for _, test := range []struct { 93 | in string 94 | want []string 95 | wantErr error 96 | }{ 97 | {"doc", []string{".doc"}, nil}, 98 | {" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil}, 99 | {"docx,svg,Docx", []string{".docx", ".svg"}, nil}, 100 | {"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)}, 101 | } { 102 | extensions, _, gotErr := parseExtensions(test.in) 103 | if test.wantErr == nil { 104 | assert.NoError(t, gotErr) 105 | } else { 106 | assert.EqualError(t, gotErr, test.wantErr.Error()) 107 | } 108 | assert.Equal(t, test.want, extensions) 109 | } 110 | 111 | // Test it is appending 112 | extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx") 113 | assert.NoError(t, gotErr) 114 | assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions) 115 | } 116 | 117 | func TestInternalFindExportFormat(t *testing.T) { 118 | ctx := context.Background() 119 | item := &drive.File{ 120 | Name: "file", 121 | MimeType: "application/vnd.google-apps.document", 122 | } 123 | for _, test := range []struct { 124 | extensions []string 125 | wantExtension string 126 | wantMimeType string 127 | }{ 128 | {[]string{}, "", ""}, 129 | {[]string{".pdf"}, ".pdf", "application/pdf"}, 130 | {[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"}, 131 | {[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"}, 132 | {[]string{".xls", ".csv", ".svg"}, "", ""}, 133 | } { 134 | f := new(Fs) 135 | f.exportExtensions = test.extensions 136 | gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item) 137 | assert.Equal(t, test.wantExtension, gotExtension) 138 | if test.wantExtension != "" { 139 | assert.Equal(t, item.Name+gotExtension, gotFilename) 140 | } else { 141 | assert.Equal(t, "", gotFilename) 142 | } 143 | assert.Equal(t, test.wantMimeType, gotMimeType) 144 | assert.Equal(t, true, gotIsDocument) 145 | } 146 | } 147 | 148 | func TestMimeTypesToExtension(t *testing.T) { 149 | for mimeType, extension := range _mimeTypeToExtension { 150 | extensions, err := mime.ExtensionsByType(mimeType) 151 | assert.NoError(t, err) 152 | assert.Contains(t, extensions, extension) 153 | } 154 | } 155 | 156 | func TestExtensionToMimeType(t *testing.T) { 157 | for mimeType, extension := range _mimeTypeToExtension { 158 | gotMimeType := mime.TypeByExtension(extension) 159 | mediatype, _, err := mime.ParseMediaType(gotMimeType) 160 | assert.NoError(t, err) 161 | assert.Equal(t, mimeType, mediatype) 162 | } 163 | } 164 | 165 | func TestExtensionsForExportFormats(t *testing.T) { 166 | if _exportFormats == nil { 167 | t.Error("exportFormats == nil") 168 | } 169 | for fromMT, toMTs := range _exportFormats { 170 | for _, toMT := range toMTs { 171 | if !isInternalMimeType(toMT) { 172 | extensions, err := mime.ExtensionsByType(toMT) 173 | assert.NoError(t, err, "invalid MIME type %q", toMT) 174 | assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT) 175 | } 176 | } 177 | } 178 | } 179 | 180 | func TestExtensionsForImportFormats(t *testing.T) { 181 | t.Skip() 182 | if _importFormats == nil { 183 | t.Error("_importFormats == nil") 184 | } 185 | for fromMT := range _importFormats { 186 | if !isInternalMimeType(fromMT) { 187 | extensions, err := mime.ExtensionsByType(fromMT) 188 | assert.NoError(t, err, "invalid MIME type %q", fromMT) 189 | assert.NotEmpty(t, extensions, "No extension found for %q", fromMT) 190 | } 191 | } 192 | } 193 | 194 | func (f *Fs) InternalTestShouldRetry(t *testing.T) { 195 | ctx := context.Background() 196 | gatewayTimeout := googleapi.Error{ 197 | Code: 503, 198 | } 199 | timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout) 200 | assert.True(t, timeoutRetry) 201 | assert.Equal(t, &gatewayTimeout, timeoutError) 202 | generic403 := googleapi.Error{ 203 | Code: 403, 204 | } 205 | rLEItem := googleapi.ErrorItem{ 206 | Reason: "rateLimitExceeded", 207 | Message: "User rate limit exceeded.", 208 | } 209 | generic403.Errors = append(generic403.Errors, rLEItem) 210 | oldStopUpload := f.opt.StopOnUploadLimit 211 | oldStopDownload := f.opt.StopOnDownloadLimit 212 | f.opt.StopOnUploadLimit = true 213 | f.opt.StopOnDownloadLimit = true 214 | defer func() { 215 | f.opt.StopOnUploadLimit = oldStopUpload 216 | f.opt.StopOnDownloadLimit = oldStopDownload 217 | }() 218 | expectedRLError := fserrors.FatalError(&generic403) 219 | rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403) 220 | assert.False(t, rateLimitRetry) 221 | assert.Equal(t, rateLimitErr, expectedRLError) 222 | dQEItem := googleapi.ErrorItem{ 223 | Reason: "downloadQuotaExceeded", 224 | } 225 | generic403.Errors[0] = dQEItem 226 | expectedDQError := fserrors.FatalError(&generic403) 227 | downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403) 228 | assert.False(t, downloadQuotaRetry) 229 | assert.Equal(t, downloadQuotaError, expectedDQError) 230 | tDFLEItem := googleapi.ErrorItem{ 231 | Reason: "teamDriveFileLimitExceeded", 232 | } 233 | generic403.Errors[0] = tDFLEItem 234 | expectedTDFLError := fserrors.FatalError(&generic403) 235 | teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403) 236 | assert.False(t, teamDriveFileLimitRetry) 237 | assert.Equal(t, teamDriveFileLimitError, expectedTDFLError) 238 | qEItem := googleapi.ErrorItem{ 239 | Reason: "quotaExceeded", 240 | } 241 | generic403.Errors[0] = qEItem 242 | expectedQuotaError := fserrors.FatalError(&generic403) 243 | quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403) 244 | assert.False(t, quotaExceededRetry) 245 | assert.Equal(t, quotaExceededError, expectedQuotaError) 246 | 247 | sqEItem := googleapi.ErrorItem{ 248 | Reason: "storageQuotaExceeded", 249 | } 250 | generic403.Errors[0] = sqEItem 251 | expectedStorageQuotaError := fserrors.FatalError(&generic403) 252 | storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403) 253 | assert.False(t, storageQuotaExceededRetry) 254 | assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError) 255 | } 256 | 257 | func (f *Fs) InternalTestDocumentImport(t *testing.T) { 258 | oldAllow := f.opt.AllowImportNameChange 259 | f.opt.AllowImportNameChange = true 260 | defer func() { 261 | f.opt.AllowImportNameChange = oldAllow 262 | }() 263 | 264 | testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files")) 265 | require.NoError(t, err) 266 | 267 | testFilesFs, err := fs.NewFs(context.Background(), testFilesPath) 268 | require.NoError(t, err) 269 | 270 | _, f.importMimeTypes, err = parseExtensions("odt,ods,doc") 271 | require.NoError(t, err) 272 | 273 | err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc") 274 | require.NoError(t, err) 275 | } 276 | 277 | func (f *Fs) InternalTestDocumentUpdate(t *testing.T) { 278 | testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files")) 279 | require.NoError(t, err) 280 | 281 | testFilesFs, err := fs.NewFs(context.Background(), testFilesPath) 282 | require.NoError(t, err) 283 | 284 | _, f.importMimeTypes, err = parseExtensions("odt,ods,doc") 285 | require.NoError(t, err) 286 | 287 | err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods") 288 | require.NoError(t, err) 289 | } 290 | 291 | func (f *Fs) InternalTestDocumentExport(t *testing.T) { 292 | var buf bytes.Buffer 293 | var err error 294 | 295 | f.exportExtensions, _, err = parseExtensions("txt") 296 | require.NoError(t, err) 297 | 298 | obj, err := f.NewObject(context.Background(), "example2.txt") 299 | require.NoError(t, err) 300 | 301 | rc, err := obj.Open(context.Background()) 302 | require.NoError(t, err) 303 | defer func() { require.NoError(t, rc.Close()) }() 304 | 305 | _, err = io.Copy(&buf, rc) 306 | require.NoError(t, err) 307 | text := buf.String() 308 | 309 | for _, excerpt := range []string{ 310 | "Lorem ipsum dolor sit amet, consectetur", 311 | "porta at ultrices in, consectetur at augue.", 312 | } { 313 | require.Contains(t, text, excerpt) 314 | } 315 | } 316 | 317 | func (f *Fs) InternalTestDocumentLink(t *testing.T) { 318 | var buf bytes.Buffer 319 | var err error 320 | 321 | f.exportExtensions, _, err = parseExtensions("link.html") 322 | require.NoError(t, err) 323 | 324 | obj, err := f.NewObject(context.Background(), "example2.link.html") 325 | require.NoError(t, err) 326 | 327 | rc, err := obj.Open(context.Background()) 328 | require.NoError(t, err) 329 | defer func() { require.NoError(t, rc.Close()) }() 330 | 331 | _, err = io.Copy(&buf, rc) 332 | require.NoError(t, err) 333 | text := buf.String() 334 | 335 | require.True(t, strings.HasPrefix(text, "")) 336 | require.True(t, strings.HasSuffix(text, "\n")) 337 | for _, excerpt := range []string{ 338 | ` & ? + ≠/z.txt` 349 | existingSubDir = "êé" 350 | ) 351 | 352 | // TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts 353 | func (f *Fs) InternalTestShortcuts(t *testing.T) { 354 | ctx := context.Background() 355 | srcObj, err := f.NewObject(ctx, existingFile) 356 | require.NoError(t, err) 357 | srcHash, err := srcObj.Hash(ctx, hash.MD5) 358 | require.NoError(t, err) 359 | assert.NotEqual(t, "", srcHash) 360 | t.Run("Errors", func(t *testing.T) { 361 | _, err := f.makeShortcut(ctx, "", f, "") 362 | assert.Error(t, err) 363 | assert.Contains(t, err.Error(), "can't be root") 364 | 365 | _, err = f.makeShortcut(ctx, "notfound", f, "dst") 366 | assert.Error(t, err) 367 | assert.Contains(t, err.Error(), "can't find source") 368 | 369 | _, err = f.makeShortcut(ctx, existingFile, f, existingFile) 370 | assert.Error(t, err) 371 | assert.Contains(t, err.Error(), "not overwriting") 372 | assert.Contains(t, err.Error(), "existing file") 373 | 374 | _, err = f.makeShortcut(ctx, existingFile, f, existingDir) 375 | assert.Error(t, err) 376 | assert.Contains(t, err.Error(), "not overwriting") 377 | assert.Contains(t, err.Error(), "existing directory") 378 | }) 379 | t.Run("File", func(t *testing.T) { 380 | dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt") 381 | require.NoError(t, err) 382 | require.NotNil(t, dstObj) 383 | assert.Equal(t, "shortcut.txt", dstObj.Remote()) 384 | dstHash, err := dstObj.Hash(ctx, hash.MD5) 385 | require.NoError(t, err) 386 | assert.Equal(t, srcHash, dstHash) 387 | require.NoError(t, dstObj.Remove(ctx)) 388 | }) 389 | t.Run("Dir", func(t *testing.T) { 390 | dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir") 391 | require.NoError(t, err) 392 | require.Nil(t, dstObj) 393 | entries, err := f.List(ctx, "shortcutdir") 394 | require.NoError(t, err) 395 | require.Equal(t, 1, len(entries)) 396 | require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote()) 397 | require.NoError(t, f.Rmdir(ctx, "shortcutdir")) 398 | }) 399 | t.Run("Command", func(t *testing.T) { 400 | _, err := f.Command(ctx, "shortcut", []string{"one"}, nil) 401 | require.Error(t, err) 402 | require.Contains(t, err.Error(), "need exactly 2 arguments") 403 | 404 | _, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{ 405 | "target": "doesnotexistremote:", 406 | }) 407 | require.Error(t, err) 408 | require.Contains(t, err.Error(), "couldn't find target") 409 | 410 | _, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{ 411 | "target": ".", 412 | }) 413 | require.Error(t, err) 414 | require.Contains(t, err.Error(), "target is not a drive backend") 415 | 416 | dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{ 417 | "target": fs.ConfigString(f), 418 | }) 419 | require.NoError(t, err) 420 | dstObj := dstObjI.(*Object) 421 | assert.Equal(t, "shortcut2.txt", dstObj.Remote()) 422 | dstHash, err := dstObj.Hash(ctx, hash.MD5) 423 | require.NoError(t, err) 424 | assert.Equal(t, srcHash, dstHash) 425 | require.NoError(t, dstObj.Remove(ctx)) 426 | 427 | dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil) 428 | require.NoError(t, err) 429 | dstObj = dstObjI.(*Object) 430 | assert.Equal(t, "shortcut3.txt", dstObj.Remote()) 431 | dstHash, err = dstObj.Hash(ctx, hash.MD5) 432 | require.NoError(t, err) 433 | assert.Equal(t, srcHash, dstHash) 434 | require.NoError(t, dstObj.Remove(ctx)) 435 | }) 436 | } 437 | 438 | // TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash 439 | func (f *Fs) InternalTestUnTrash(t *testing.T) { 440 | ctx := context.Background() 441 | 442 | // Make some objects, one in a subdir 443 | contents := random.String(100) 444 | file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now()) 445 | obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false) 446 | file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now()) 447 | _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false) 448 | 449 | // Check objects 450 | checkObjects := func() { 451 | fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{ 452 | file1, 453 | file2, 454 | }, []string{ 455 | "trashDir/subdir", 456 | }, f.Precision()) 457 | } 458 | checkObjects() 459 | 460 | // Make sure we are using the trash 461 | require.Equal(t, true, f.opt.UseTrash) 462 | 463 | // Remove the object and the dir 464 | require.NoError(t, obj1.Remove(ctx)) 465 | require.NoError(t, f.Purge(ctx, "trashDir/subdir")) 466 | 467 | // Check objects gone 468 | fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision()) 469 | 470 | // Restore the object and directory 471 | r, err := f.unTrashDir(ctx, "trashDir", true) 472 | require.NoError(t, err) 473 | assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r) 474 | 475 | // Check objects restored 476 | checkObjects() 477 | 478 | // Remove the test dir 479 | require.NoError(t, f.Purge(ctx, "trashDir")) 480 | } 481 | 482 | // TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID 483 | func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) { 484 | ctx := context.Background() 485 | obj, err := f.NewObject(ctx, existingFile) 486 | require.NoError(t, err) 487 | o := obj.(*Object) 488 | 489 | dir := t.TempDir() 490 | 491 | checkFile := func(name string) { 492 | filePath := filepath.Join(dir, name) 493 | fi, err := os.Stat(filePath) 494 | require.NoError(t, err) 495 | assert.Equal(t, int64(100), fi.Size()) 496 | err = os.Remove(filePath) 497 | require.NoError(t, err) 498 | } 499 | 500 | t.Run("BadID", func(t *testing.T) { 501 | err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/") 502 | require.Error(t, err) 503 | assert.Contains(t, err.Error(), "couldn't find id") 504 | }) 505 | 506 | t.Run("Directory", func(t *testing.T) { 507 | rootID, err := f.dirCache.RootID(ctx, false) 508 | require.NoError(t, err) 509 | err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/") 510 | require.Error(t, err) 511 | assert.Contains(t, err.Error(), "can't moveid directory") 512 | }) 513 | 514 | t.Run("MoveWithoutDestName", func(t *testing.T) { 515 | err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/") 516 | require.NoError(t, err) 517 | checkFile(path.Base(existingFile)) 518 | }) 519 | 520 | t.Run("CopyWithoutDestName", func(t *testing.T) { 521 | err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/") 522 | require.NoError(t, err) 523 | checkFile(path.Base(existingFile)) 524 | }) 525 | 526 | t.Run("MoveWithDestName", func(t *testing.T) { 527 | err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt") 528 | require.NoError(t, err) 529 | checkFile("potato.txt") 530 | }) 531 | 532 | t.Run("CopyWithDestName", func(t *testing.T) { 533 | err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt") 534 | require.NoError(t, err) 535 | checkFile("potato.txt") 536 | }) 537 | } 538 | 539 | // TestIntegration/FsMkdir/FsPutFiles/Internal/Query 540 | func (f *Fs) InternalTestQuery(t *testing.T) { 541 | ctx := context.Background() 542 | var err error 543 | t.Run("BadQuery", func(t *testing.T) { 544 | _, err = f.query(ctx, "this is a bad query") 545 | require.Error(t, err) 546 | assert.Contains(t, err.Error(), "failed to execute query") 547 | }) 548 | 549 | t.Run("NoMatch", func(t *testing.T) { 550 | results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir)) 551 | require.NoError(t, err) 552 | assert.Len(t, results, 0) 553 | }) 554 | 555 | t.Run("GoodQuery", func(t *testing.T) { 556 | pathSegments := strings.Split(existingFile, "/") 557 | var parent string 558 | for _, item := range pathSegments { 559 | // the file name contains ' characters which must be escaped 560 | escapedItem := f.opt.Enc.FromStandardName(item) 561 | escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`) 562 | escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`) 563 | 564 | results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem)) 565 | require.NoError(t, err) 566 | require.True(t, len(results) > 0) 567 | for _, result := range results { 568 | assert.True(t, len(result.Id) > 0) 569 | assert.Equal(t, result.Name, item) 570 | } 571 | parent = fmt.Sprintf("'%s' in parents and ", results[0].Id) 572 | } 573 | }) 574 | } 575 | 576 | // TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery 577 | func (f *Fs) InternalTestAgeQuery(t *testing.T) { 578 | // Check set up for filtering 579 | assert.True(t, f.Features().FilterAware) 580 | 581 | opt := &filter.Options{} 582 | err := opt.MaxAge.Set("1h") 583 | assert.NoError(t, err) 584 | flt, err := filter.NewFilter(opt) 585 | assert.NoError(t, err) 586 | 587 | defCtx := context.Background() 588 | fltCtx := filter.ReplaceConfig(defCtx, flt) 589 | 590 | testCtx1 := fltCtx 591 | testCtx2 := filter.SetUseFilter(testCtx1, true) 592 | testCtx3, testCancel := context.WithCancel(testCtx2) 593 | testCtx4 := filter.SetUseFilter(testCtx3, false) 594 | testCancel() 595 | assert.False(t, filter.GetUseFilter(testCtx1)) 596 | assert.True(t, filter.GetUseFilter(testCtx2)) 597 | assert.True(t, filter.GetUseFilter(testCtx3)) 598 | assert.False(t, filter.GetUseFilter(testCtx4)) 599 | 600 | subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir") 601 | subFsResult, err := fs.NewFs(defCtx, subRemote) 602 | require.NoError(t, err) 603 | subFs, isDriveFs := subFsResult.(*Fs) 604 | require.True(t, isDriveFs) 605 | 606 | tempDir1 := t.TempDir() 607 | tempFs1, err := fs.NewFs(defCtx, tempDir1) 608 | require.NoError(t, err) 609 | 610 | tempDir2 := t.TempDir() 611 | tempFs2, err := fs.NewFs(defCtx, tempDir2) 612 | require.NoError(t, err) 613 | 614 | file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"} 615 | _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true) 616 | 617 | // validate sync/copy 618 | const timeQuery = "(modifiedTime >= '" 619 | 620 | assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false)) 621 | assert.NotContains(t, subFs.lastQuery, timeQuery) 622 | 623 | assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false)) 624 | assert.Contains(t, subFs.lastQuery, timeQuery) 625 | 626 | assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false)) 627 | assert.Contains(t, subFs.lastQuery, timeQuery) 628 | 629 | assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false)) 630 | assert.NotContains(t, subFs.lastQuery, timeQuery) 631 | 632 | // validate list/walk 633 | devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0) 634 | require.NoError(t, errOpen) 635 | defer func() { 636 | _ = devNull.Close() 637 | }() 638 | 639 | assert.NoError(t, operations.List(defCtx, subFs, devNull)) 640 | assert.NotContains(t, subFs.lastQuery, timeQuery) 641 | 642 | assert.NoError(t, operations.List(fltCtx, subFs, devNull)) 643 | assert.Contains(t, subFs.lastQuery, timeQuery) 644 | } 645 | 646 | func (f *Fs) InternalTest(t *testing.T) { 647 | // These tests all depend on each other so run them as nested tests 648 | t.Run("DocumentImport", func(t *testing.T) { 649 | f.InternalTestDocumentImport(t) 650 | t.Run("DocumentUpdate", func(t *testing.T) { 651 | f.InternalTestDocumentUpdate(t) 652 | t.Run("DocumentExport", func(t *testing.T) { 653 | f.InternalTestDocumentExport(t) 654 | t.Run("DocumentLink", func(t *testing.T) { 655 | f.InternalTestDocumentLink(t) 656 | }) 657 | }) 658 | }) 659 | }) 660 | t.Run("Shortcuts", f.InternalTestShortcuts) 661 | t.Run("UnTrash", f.InternalTestUnTrash) 662 | t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID) 663 | t.Run("Query", f.InternalTestQuery) 664 | t.Run("AgeQuery", f.InternalTestAgeQuery) 665 | t.Run("ShouldRetry", f.InternalTestShouldRetry) 666 | } 667 | 668 | var _ fstests.InternalTester = (*Fs)(nil) 669 | --------------------------------------------------------------------------------