├── .gitignore ├── DOCS.md ├── LICENSE ├── Makefile ├── README.md ├── archive.go ├── config.go ├── detect.go ├── dl.go ├── eget.go ├── extract.go ├── find.go ├── flags.go ├── go.mod ├── go.sum ├── home └── home.go ├── man └── eget.md ├── test ├── eget.toml └── test_eget.go ├── tools ├── build-all.go └── build-version.go ├── verify.go └── version.go /.gitignore: -------------------------------------------------------------------------------- 1 | eget 2 | eget.exe 3 | eget-* 4 | todo.txt 5 | eget.1 6 | /test/* 7 | !/test/test_eget.go 8 | !/test/eget.toml 9 | .eget.toml 10 | -------------------------------------------------------------------------------- /DOCS.md: -------------------------------------------------------------------------------- 1 | # Eget Documentation 2 | 3 | Eget works in four phases: 4 | 5 | * Find: determine a list of assets that may be installed. 6 | * Detect: determine which asset in the list should be downloaded for the target system. 7 | * Verify: verify the checksum of the asset if possible. 8 | * Extract: determine which file within the asset to extract. 9 | 10 | If you are interested in reading the source code, there is one file for each 11 | phase, and the `eget.go` main file runs a routine that combines them all 12 | together. 13 | 14 | ## Find 15 | 16 | If the input is a repo identifier, the Find phase queries `api.github.com` with 17 | the repo and reads the list of assets from the response JSON. If a direct URL 18 | is provided, the Find phase just returns the direct URL without doing any work. 19 | 20 | ## Detect 21 | 22 | The Detect phase attempts to determine what OS and architecture each asset is 23 | built for. This is done by matching a regular expression for each 24 | OS/architecture that Eget knows about. The match rules are shown below, and are 25 | case insensitive. 26 | 27 | | OS | Match Rule | 28 | | ------------- | -------------------- | 29 | | `darwin` | `darwin\|mac.?os\|osx` | 30 | | `windows` | `win\|windows` | 31 | | `linux` | `linux` | 32 | | `netbsd` | `netbsd` | 33 | | `openbsd` | `openbsd` | 34 | | `freebsd` | `freebsd` | 35 | | `android` | `android` | 36 | | `illumos` | `illumos` | 37 | | `solaris` | `solaris` | 38 | | `plan9` | `plan9` | 39 | 40 | | Architecture | Match Rule | 41 | | ------------- | ----------------------------- | 42 | | `amd64` | `x64\|amd64\|x86(-\|_)?64` | 43 | | `386` | `x32\|amd32\|x86(-\|_)?32\|i?386` | 44 | | `arm` | `arm` | 45 | | `arm64` | `arm64\|armv8\|aarch64` | 46 | | `riscv64` | `riscv64` | 47 | 48 | If you would like a new OS/Architecture to be added, or find a case where the 49 | auto-detection is not adequate (within reason), please open an issue. 50 | 51 | Using the direct OS/Architecture (left column of the above tables) name in your 52 | prebuilt zip file names will always allow Eget to auto-detect correctly, 53 | although Eget will often auto-detect correctly for other names as well. 54 | 55 | ## Verify 56 | 57 | During verification, Eget will attempt to verify the checksum of the downloaded 58 | asset. If the user has provided a checksum, or asked Eget to simply print the 59 | checksum, it will do so. Otherwise it may do auto-detection. If it is 60 | downloading an asset called `xxx`, and there is another asset called 61 | `xxx.sha256` or `xxx.sha256sum`, Eget will automatically verify the SHA-256 62 | checksum of the downloaded asset against the one contained in the 63 | `.sha256`/`.sha256sum` file. 64 | 65 | ## Extract 66 | 67 | During extraction, Eget will detect the type of archive and compression, and 68 | use this information to extract the requested file. If there is no requested 69 | file, Eget will extract a file with executable permissions, with priority given 70 | to files that have the same name as the repo. If multiple files with executable 71 | permissions exist and none of them match the repo name, Eget will ask the user 72 | to choose. Files ending in `.exe` are also assumed to be executable, regardless 73 | of permissions within the archive. 74 | 75 | Eget supports the following filetypes for assets: 76 | 77 | * `.tar.gz`/`.tgz`: tar archive with gzip compression. 78 | * `.tar.bz2`: tar archive with bzip2 compression. 79 | * `.tar.xz`: tar archive with xz compression. 80 | * `.tar`: tar archive with no compression. 81 | * `.zip`: zip archive. 82 | * `.gz`: single file with gzip compression. 83 | * `.bz2`: single file with bzip2 compression. 84 | * `.xz`: single file with xz compression. 85 | * otherwise: single file. 86 | 87 | If a single file is "extracted" (no tar or zip archive), it will be marked 88 | executable automatically. 89 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021: Zachary Yedidia. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 21 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VERSION = $(shell GOOS=$(shell go env GOHOSTOS) GOARCH=$(shell go env GOHOSTARCH) \ 2 | go run tools/build-version.go) 3 | SYSTEM = ${GOOS}_${GOARCH} 4 | GOVARS = -X main.Version=$(VERSION) 5 | 6 | build: 7 | go build -trimpath -ldflags "-s -w $(GOVARS)" . 8 | 9 | build-dist: 10 | go build -trimpath -ldflags "-s -w $(GOVARS)" -o dist/bin/eget-$(VERSION)-$(SYSTEM) . 11 | 12 | install: 13 | go install -trimpath -ldflags "-s -w $(GOVARS)" . 14 | 15 | fmt: 16 | gofmt -s -w . 17 | 18 | vet: 19 | go vet 20 | 21 | eget: 22 | go build -trimpath -ldflags "-s -w $(GOVARS)" . 23 | 24 | test: eget 25 | cd test; EGET_CONFIG=eget.toml EGET_BIN= TEST_EGET=../eget go run test_eget.go 26 | 27 | eget.1: man/eget.md 28 | pandoc man/eget.md -s -t man -o eget.1 29 | 30 | package: build-dist eget.1 31 | mkdir -p dist/eget-$(VERSION)-$(SYSTEM) 32 | cp README.md dist/eget-$(VERSION)-$(SYSTEM) 33 | cp LICENSE dist/eget-$(VERSION)-$(SYSTEM) 34 | cp eget.1 dist/eget-$(VERSION)-$(SYSTEM) 35 | if [ "${GOOS}" = "windows" ]; then\ 36 | cp dist/bin/eget-$(VERSION)-$(SYSTEM) dist/eget-$(VERSION)-$(SYSTEM)/eget.exe;\ 37 | cd dist;\ 38 | zip -r -q -T eget-$(VERSION)-$(SYSTEM).zip eget-$(VERSION)-$(SYSTEM);\ 39 | else\ 40 | cp dist/bin/eget-$(VERSION)-$(SYSTEM) dist/eget-$(VERSION)-$(SYSTEM)/eget;\ 41 | cd dist;\ 42 | tar -czf eget-$(VERSION)-$(SYSTEM).tar.gz eget-$(VERSION)-$(SYSTEM);\ 43 | fi 44 | 45 | version: 46 | echo "package main\n\nvar Version = \"$(VERSION)+src\"" > version.go 47 | 48 | clean: 49 | rm -f test/eget.1 test/fd test/micro test/nvim test/pandoc test/rg.exe 50 | rm -rf dist 51 | 52 | .PHONY: build clean install package version fmt vet test 53 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Eget: easy pre-built binary installation 2 | 3 | [![Go Report Card](https://goreportcard.com/badge/github.com/zyedidia/eget)](https://goreportcard.com/report/github.com/zyedidia/eget) 4 | [![Release](https://img.shields.io/github/release/zyedidia/eget.svg?label=Release)](https://github.com/zyedidia/eget/releases) 5 | [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/zyedidia/eget/blob/master/LICENSE) 6 | 7 | **Eget** is the best way to easily get pre-built binaries for your favorite 8 | tools. It downloads and extracts pre-built binaries from releases on GitHub. To 9 | use it, provide a repository and Eget will search through the assets from the 10 | latest release in an attempt to find a suitable prebuilt binary for your 11 | system. If one is found, the asset will be downloaded and Eget will extract the 12 | binary to the current directory. Eget should only be used for installing 13 | simple, static prebuilt binaries, where the extracted binary is all that is 14 | needed for installation. For more complex installation, you may use the 15 | `--download-only` option, and perform extraction manually. 16 | 17 | ![Eget Demo](https://github.com/zyedidia/blobs/blob/master/eget-demo.gif) 18 | 19 | For software maintainers, if you provide prebuilt binaries on GitHub, you can 20 | list `eget` as a one-line method for users to install your software. 21 | 22 | Eget has a number of detection mechanisms and should work out-of-the-box with 23 | most software that is distributed via single binaries on GitHub releases. First 24 | try using Eget on your software, it may already just work. Otherwise, see the 25 | FAQ for a clear set of rules to make your software compatible with Eget. 26 | 27 | For more in-depth documentation, see [DOCS.md](DOCS.md). 28 | 29 | # Examples 30 | 31 | ``` 32 | eget zyedidia/micro --tag nightly 33 | eget jgm/pandoc --to /usr/local/bin 34 | eget junegunn/fzf 35 | eget neovim/neovim 36 | eget ogham/exa --asset ^musl 37 | eget --system darwin/amd64 sharkdp/fd 38 | eget BurntSushi/ripgrep 39 | eget -f eget.1 zyedidia/eget 40 | eget zachjs/sv2v 41 | eget https://go.dev/dl/go1.17.5.linux-amd64.tar.gz --file go --to ~/go1.17.5 42 | eget --all --file '*' ActivityWatch/activitywatch 43 | ``` 44 | 45 | # How to get Eget 46 | 47 | Before you can get anything, you have to get Eget. If you already have Eget and want to upgrade, use `eget zyedidia/eget`. 48 | 49 | ### Quick-install script 50 | 51 | ``` 52 | curl -o eget.sh https://zyedidia.github.io/eget.sh 53 | shasum -a 256 eget.sh # verify with hash below 54 | bash eget.sh 55 | ``` 56 | 57 | Or alternatively (less secure): 58 | 59 | ``` 60 | curl https://zyedidia.github.io/eget.sh | sh 61 | ``` 62 | 63 | You can then place the downloaded binary in a location on your `$PATH` such as `/usr/local/bin`. 64 | 65 | To verify the script, the sha256 checksum is `0e64b8a3c13f531da005096cc364ac77835bda54276fedef6c62f3dbdc1ee919` (use `shasum -a 256 eget.sh` after downloading the script). 66 | 67 | One of the reasons to use eget is to avoid running curl into bash, but unfortunately you can't eget eget until you have eget. 68 | 69 | ### Homebrew 70 | 71 | ``` 72 | brew install eget 73 | ``` 74 | 75 | ### Chocolatey 76 | 77 | ``` 78 | choco install eget 79 | ``` 80 | 81 | ### Pre-built binaries 82 | 83 | Pre-built binaries are available on the [releases](https://github.com/zyedidia/eget/releases) page. 84 | 85 | ### From source 86 | 87 | Install the latest released version: 88 | 89 | ``` 90 | go install github.com/zyedidia/eget@latest 91 | ``` 92 | 93 | or install from HEAD: 94 | 95 | ``` 96 | git clone https://github.com/zyedidia/eget 97 | cd eget 98 | make build # or go build (produces incomplete version information) 99 | ``` 100 | 101 | A man page can be generated by cloning the repository and running `make eget.1` 102 | (requires pandoc). You can also use `eget` to download the man page: `eget -f eget.1 zyedidia/eget`. 103 | 104 | # Usage 105 | 106 | The `TARGET` argument passed to Eget should either be a GitHub repository, 107 | formatted as `user/repo`, in which case Eget will search the release assets, a 108 | direct URL, in which case Eget will directly download and extract from the 109 | given URL, or a local file, in which case Eget will extract directly from the 110 | local file. 111 | 112 | If Eget downloads an asset called `xxx` and there also exists an asset called 113 | `xxx.sha256` or `xxx.sha256sum`, Eget will automatically verify that the 114 | SHA-256 checksum of the downloaded asset matches the one contained in that 115 | file, and abort installation if a mismatch occurs. 116 | 117 | When installing an executable, Eget will place it in the current directory by 118 | default. If the environment variable `EGET_BIN` is non-empty, Eget will 119 | place the executable in that directory. 120 | 121 | Directories can also be specified as files to extract, and all files within 122 | them will be extracted. For example: 123 | 124 | ``` 125 | eget https://go.dev/dl/go1.17.5.linux-amd64.tar.gz --file go --to ~/go1.17.5 126 | ``` 127 | 128 | GitHub limits API requests to 60 per hour for unauthenticated users. If you 129 | would like to perform more requests (up to 5,000 per hour), you can set up a 130 | personal access token and assign it to an environment variable named either 131 | `GITHUB_TOKEN` or `EGET_GITHUB_TOKEN` when running Eget. If both are set, 132 | `EGET_GITHUB_TOKEN` will take precedence. Eget will read this variable and 133 | send the token as authorization with requests to GitHub. It is also possible 134 | to read the token from a file by using `@/path/to/file` as the token value. 135 | 136 | ``` 137 | Usage: 138 | eget [OPTIONS] TARGET 139 | 140 | Application Options: 141 | -t, --tag= tagged release to use instead of latest 142 | --pre-release include pre-releases when fetching the latest version 143 | --source download the source code for the target repo instead of a release 144 | --to= move to given location after extracting 145 | -s, --system= target system to download for (use "all" for all choices) 146 | -f, --file= glob to select files for extraction 147 | --all extract all candidate files 148 | -q, --quiet only print essential output 149 | -d, --download-only stop after downloading the asset (no extraction) 150 | --upgrade-only only download if release is more recent than current version 151 | -a, --asset= download a specific asset containing the given string; can be specified multiple times for additional filtering; use ^ for anti-match 152 | --sha256 show the SHA-256 hash of the downloaded asset 153 | --verify-sha256= verify the downloaded asset checksum against the one provided 154 | --rate show GitHub API rate limiting information 155 | -r, --remove remove the given file from $EGET_BIN or the current directory 156 | -v, --version show version information 157 | -h, --help show this help message 158 | -D, --download-all download all projects defined in the config file 159 | -k, --disable-ssl disable SSL verification for download 160 | ``` 161 | 162 | # Configuration 163 | 164 | Eget can be configured using a TOML file located at `~/.eget.toml` or it will fallback to the expected `XDG_CONFIG_HOME` directory of your os. Alternatively, 165 | the configuration file can be located in the same directory as the Eget binary or the path specified with the environment variable `EGET_CONFIG`. 166 | 167 | Both global settings can be configured, as well as setting on a per-repository basis. 168 | 169 | Sections can be named either `global` or `"owner/repo"`, where `owner` and `repo` 170 | are the owner and repository name of the target repository (not that the `owner/repo` 171 | format is quoted). 172 | 173 | For example, the following configuration file will set the `--to` flag to `~/bin` for 174 | all repositories, and will set the `--to` flag to `~/.local/bin` for the `zyedidia/micro` 175 | repository. 176 | 177 | ```toml 178 | [global] 179 | target = "~/bin" 180 | 181 | ["zyedidia/micro"] 182 | target = "~/.local/bin" 183 | ``` 184 | 185 | ## Available settings - global section 186 | 187 | | Setting | Related Flag | Description | Default | 188 | | --- | --- | --- | --- | 189 | | `github_token` | `N/A` | GitHub API token to use for requests | `""` | 190 | | `all` | `--all` | Whether to extract all candidate files. | `false` | 191 | | `download_only` | `--download-only` | Whether to stop after downloading the asset (no extraction). | `false` | 192 | | `download_source` | `--source` | Whether to download the source code for the target repo instead of a release. | `false` | 193 | | `file` | `--file` | The glob to select files for extraction. | `*` | 194 | | `quiet` | `--quiet` | Whether to only print essential output. | `false` | 195 | | `show_hash` | `--sha256` | Whether to show the SHA-256 hash of the downloaded asset. | `false` | 196 | | `system` | `--system` | The target system to download for. | `all` | 197 | | `target` | `--to` | The directory to move the downloaded file to after extraction. | `.` | 198 | | `upgrade_only` | `--upgrade-only` | Whether to only download if release is more recent than current version. | `false` | 199 | 200 | ## Available settings - repository sections 201 | 202 | | Setting | Related Flag | Description | Default | 203 | | --- | --- | --- | --- | 204 | | `all` | `--all` | Whether to extract all candidate files. | `false` | 205 | | `asset_filters` | `--asset` | An array of partial asset names to filter the available assets for download. | `[]` | 206 | | `download_only` | `--download-only` | Whether to stop after downloading the asset (no extraction). | `false` | 207 | | `download_source` | `--source` | Whether to download the source code for the target repo instead of a release. | `false` | 208 | | `file` | `--file` | The glob to select files for extraction. | `*` | 209 | | `quiet` | `--quiet` | Whether to only print essential output. | `false` | 210 | | `show_hash` | `--sha256` | Whether to show the SHA-256 hash of the downloaded asset. | `false` | 211 | | `system` | `--system` | The target system to download for. | `all` | 212 | | `target` | `--to` | The directory to move the downloaded file to after extraction. | `.` | 213 | | `upgrade_only` | `--upgrade-only` | Whether to only download if release is more recent than current version. | `false` | 214 | | `verify_sha256` | `--verify-sha256` | Verify the sha256 hash of the asset against a provided hash. | `""` | 215 | 216 | 217 | ## Example configuration 218 | 219 | ```toml 220 | [global] 221 | github_token = "ghp_1234567890" 222 | quiet = false 223 | show_hash = false 224 | upgrade_only = true 225 | target = "./test" 226 | 227 | ["zyedidia/micro"] 228 | upgrade_only = false 229 | show_hash = true 230 | asset_filters = [ "static", ".tar.gz" ] 231 | target = "~/.local/bin/micro" 232 | ``` 233 | 234 | By using the configuration above, you could run the following command to download the latest release of `micro`: 235 | 236 | ```bash 237 | eget zyedidia/micro 238 | ``` 239 | 240 | Without the configuration, you would need to run the following command instead: 241 | 242 | ```bash 243 | export EGET_GITHUB_TOKEN=ghp_1234567890 &&\ 244 | eget zyedidia/micro --to ~/.local/bin/micro --sha256 --asset static --asset .tar.gz 245 | ``` 246 | 247 | # FAQ 248 | 249 | ### How is this different from a package manager? 250 | 251 | Eget only downloads pre-built binaries uploaded to GitHub by the developers of 252 | the repository. It does not maintain a central list of packages, nor does it do 253 | any dependency management. Eget does not "install" executables by placing them 254 | in system-wide directories (such as `/usr/local/bin`) unless instructed, and it 255 | does not maintain a registry for uninstallation. Eget works best for installing 256 | software that comes as a single binary with no additional files needed (CLI 257 | tools made in Go, Rust, or Haskell tend to fit this description). 258 | 259 | ### Does Eget keep track of installed binaries? 260 | 261 | Eget does not maintain any sort of manifest containing information about 262 | installed binaries. In general, Eget does not maintain any state across 263 | invocations. However, Eget does support the `--upgrade-only` option, which 264 | will first check `EGET_BIN` to determine if you have already downloaded the 265 | tool you are trying to install -- if so it will only download a new version if 266 | the GitHub release is newer than the binary on your file system. 267 | 268 | ### Is this secure? 269 | 270 | Eget does not run any downloaded code -- it just finds executables from GitHub 271 | releases and downloads/extracts them. If you trust the code you are downloading 272 | (i.e. if you trust downloading pre-built binaries from GitHub) then using Eget 273 | is perfectly safe. If Eget finds a matching asset ending in `.sha256` or 274 | `.sha256sum`, the SHA-256 checksum of your download will be automatically 275 | verified. You can also use the `--sha256` or `--verify-sha256` options to 276 | manually verify the SHA-256 checksums of your downloads (checksums are provided 277 | in an alternative manner by your download source). 278 | 279 | ### Does this work only for GitHub repositories? 280 | 281 | At the moment Eget supports searching GitHub releases, direct URLs, and local 282 | files. If you provide a direct URL instead of a GitHub repository, Eget will 283 | skip the detection phase and download directly from the given URL. If you 284 | provide a local file, Eget will skip detection and download and just perform 285 | extraction from the local file. 286 | 287 | ### How can I make my software compatible with Eget? 288 | 289 | Eget should work out-of-the-box with many methods for releasing software, and 290 | does not require that you build your release process for Eget in particular. 291 | However, here are some rules that will guarantee compatibility with Eget. 292 | 293 | - Provide your pre-built binaries as GitHub release assets. 294 | - Format the system name as `OS_Arch` and include it in every pre-built binary 295 | name. Supported OSes are `darwin`/`macos`, `windows`, `linux`, `netbsd`, 296 | `openbsd`, `freebsd`, `android`, `illumos`, `solaris`, `plan9`. Supported 297 | architectures are `amd64`, `i386`, `arm`, `arm64`, `riscv64`. 298 | - If desired, include `*.sha256` files for each asset, containing the SHA-256 299 | checksum of each asset. These checksums will be automatically verified by 300 | Eget. 301 | - Include only a single executable or appimage per system in each release archive. 302 | - Use `.tar.gz`, `.tar.bz2`, `.tar.xz`, `.tar`, or `.zip` for archives. You may 303 | also directly upload the executable without an archive, or a compressed 304 | executable ending in `.gz`, `.bz2`, or `.xz`. 305 | 306 | ### Does this work with monorepos? 307 | 308 | Yes, you can pass a tag or tag identifier with the `--tag TAG` option. If no 309 | tag exactly matches, Eget will look for the latest release with a tag that 310 | contains `TAG`. So if your repository contains releases for multiple different 311 | projects, just pass the appropriate tag (for the project you want) to Eget, and 312 | it will find the latest release for that particular project (as long as 313 | releases for that project are given tags that contain the project name). 314 | 315 | # Contributing 316 | 317 | If you find a bug, have a suggestion, or something else, please open an issue 318 | for discussion. I am sometimes prone to leaving pull requests unmerged, so 319 | please double check with me before investing lots of time into implementing a 320 | pull request. See [DOCS.md](DOCS.md) for more in-depth documentation. 321 | -------------------------------------------------------------------------------- /archive.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "archive/tar" 5 | "archive/zip" 6 | "bytes" 7 | "fmt" 8 | "io" 9 | "io/fs" 10 | "strings" 11 | ) 12 | 13 | type FileType byte 14 | 15 | const ( 16 | TypeNormal FileType = iota 17 | TypeDir 18 | TypeLink 19 | TypeSymlink 20 | TypeOther 21 | ) 22 | 23 | func tarft(typ byte) FileType { 24 | switch typ { 25 | case tar.TypeReg: 26 | return TypeNormal 27 | case tar.TypeDir: 28 | return TypeDir 29 | case tar.TypeLink: 30 | return TypeLink 31 | case tar.TypeSymlink: 32 | return TypeSymlink 33 | } 34 | return TypeOther 35 | } 36 | 37 | type File struct { 38 | Name string 39 | LinkName string 40 | Mode fs.FileMode 41 | Type FileType 42 | } 43 | 44 | func (f File) Dir() bool { 45 | return f.Type == TypeDir 46 | } 47 | 48 | type Archive interface { 49 | Next() (File, error) 50 | ReadAll() ([]byte, error) 51 | } 52 | 53 | type TarArchive struct { 54 | r *tar.Reader 55 | } 56 | 57 | func NewTarArchive(data []byte, decompress DecompFn) (Archive, error) { 58 | r := bytes.NewReader(data) 59 | dr, err := decompress(r) 60 | if err != nil { 61 | return nil, err 62 | } 63 | return &TarArchive{ 64 | r: tar.NewReader(dr), 65 | }, nil 66 | } 67 | 68 | func (t *TarArchive) Next() (File, error) { 69 | for { 70 | hdr, err := t.r.Next() 71 | if err != nil { 72 | return File{}, err 73 | } 74 | ft := tarft(hdr.Typeflag) 75 | if ft != TypeOther { 76 | return File{ 77 | Name: hdr.Name, 78 | LinkName: hdr.Linkname, 79 | Mode: fs.FileMode(hdr.Mode), 80 | Type: ft, 81 | }, err 82 | } 83 | } 84 | } 85 | 86 | func (t *TarArchive) ReadAll() ([]byte, error) { 87 | return io.ReadAll(t.r) 88 | } 89 | 90 | type ZipArchive struct { 91 | r *zip.Reader 92 | idx int 93 | } 94 | 95 | // decompressor does nothing for a zip archive because it already has built-in 96 | // compression. 97 | func NewZipArchive(data []byte, d DecompFn) (Archive, error) { 98 | r := bytes.NewReader(data) 99 | zr, err := zip.NewReader(r, int64(len(data))) 100 | return &ZipArchive{ 101 | r: zr, 102 | idx: -1, 103 | }, err 104 | } 105 | 106 | func (z *ZipArchive) Next() (File, error) { 107 | z.idx++ 108 | 109 | if z.idx < 0 || z.idx >= len(z.r.File) { 110 | return File{}, io.EOF 111 | } 112 | 113 | f := z.r.File[z.idx] 114 | 115 | typ := TypeNormal 116 | if strings.HasSuffix(f.Name, "/") { 117 | typ = TypeDir 118 | } 119 | 120 | return File{ 121 | Name: f.Name, 122 | Mode: f.Mode(), 123 | Type: typ, 124 | }, nil 125 | } 126 | 127 | func (z *ZipArchive) ReadAll() ([]byte, error) { 128 | if z.idx < 0 || z.idx >= len(z.r.File) { 129 | return nil, io.EOF 130 | } 131 | f := z.r.File[z.idx] 132 | rc, err := f.Open() 133 | if err != nil { 134 | return nil, fmt.Errorf("zip extract: %w", err) 135 | } 136 | defer rc.Close() 137 | data, err := io.ReadAll(rc) 138 | return data, err 139 | } 140 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "runtime" 9 | 10 | "github.com/BurntSushi/toml" 11 | "github.com/jessevdk/go-flags" 12 | "github.com/zyedidia/eget/home" 13 | ) 14 | 15 | type ConfigGlobal struct { 16 | All bool `toml:"all"` 17 | DownloadOnly bool `toml:"download_only"` 18 | File string `toml:"file"` 19 | GithubToken string `toml:"github_token"` 20 | Quiet bool `toml:"quiet"` 21 | ShowHash bool `toml:"show_hash"` 22 | Source bool `toml:"download_source"` 23 | System string `toml:"system"` 24 | Target string `toml:"target"` 25 | UpgradeOnly bool `toml:"upgrade_only"` 26 | } 27 | 28 | type ConfigRepository struct { 29 | All bool `toml:"all"` 30 | AssetFilters []string `toml:"asset_filters"` 31 | DownloadOnly bool `toml:"download_only"` 32 | File string `toml:"file"` 33 | Name string `toml:"name"` 34 | Quiet bool `toml:"quiet"` 35 | ShowHash bool `toml:"show_hash"` 36 | Source bool `toml:"download_source"` 37 | System string `toml:"system"` 38 | Tag string `toml:"tag"` 39 | Target string `toml:"target"` 40 | UpgradeOnly bool `toml:"upgrade_only"` 41 | Verify string `toml:"verify_sha256"` 42 | DisableSSL bool `toml:"disable_ssl"` 43 | } 44 | 45 | type Config struct { 46 | Meta struct { 47 | Keys []string 48 | MetaData *toml.MetaData 49 | } 50 | Global ConfigGlobal `toml:"global"` 51 | Repositories map[string]ConfigRepository 52 | } 53 | 54 | func LoadConfigurationFile(path string) (Config, error) { 55 | var conf Config 56 | meta, err := toml.DecodeFile(path, &conf) 57 | 58 | if err != nil { 59 | return conf, err 60 | } 61 | 62 | meta, err = toml.DecodeFile(path, &conf.Repositories) 63 | 64 | conf.Meta.Keys = make([]string, len(meta.Keys())) 65 | 66 | for i, key := range meta.Keys() { 67 | conf.Meta.Keys[i] = key.String() 68 | } 69 | 70 | conf.Meta.MetaData = &meta 71 | 72 | return conf, err 73 | } 74 | 75 | func GetOSConfigPath(homePath string) string { 76 | var configDir string 77 | 78 | defaultConfig := map[string]string{ 79 | "windows": "LocalAppData", 80 | "default": ".config", 81 | } 82 | 83 | var goos string 84 | switch runtime.GOOS { 85 | case "windows": 86 | configDir = os.Getenv("LOCALAPPDATA") 87 | goos = "windows" 88 | default: 89 | configDir = os.Getenv("XDG_CONFIG_HOME") 90 | goos = "default" 91 | } 92 | 93 | if configDir == "" { 94 | configDir = filepath.Join(homePath, defaultConfig[goos]) 95 | } 96 | 97 | return filepath.Join(configDir, "eget", "eget.toml") 98 | } 99 | 100 | func InitializeConfig() (*Config, error) { 101 | var err error 102 | var config Config 103 | 104 | homePath, _ := os.UserHomeDir() 105 | appName := "eget" 106 | 107 | if configFilePath, ok := os.LookupEnv("EGET_CONFIG"); ok { 108 | if config, err = LoadConfigurationFile(configFilePath); err != nil && !errors.Is(err, os.ErrNotExist) { 109 | return nil, fmt.Errorf("%s: %w", configFilePath, err) 110 | } 111 | } else { 112 | configFilePath := homePath + "/." + appName + ".toml" 113 | if config, err = LoadConfigurationFile(configFilePath); err != nil && !errors.Is(err, os.ErrNotExist) { 114 | return nil, fmt.Errorf("%s: %w", configFilePath, err) 115 | } 116 | } 117 | 118 | if err != nil { 119 | configFilePath := appName + ".toml" 120 | if config, err = LoadConfigurationFile(configFilePath); err != nil && !errors.Is(err, os.ErrNotExist) { 121 | return nil, fmt.Errorf("%s: %w", configFilePath, err) 122 | } 123 | } 124 | 125 | configFallBackPath := GetOSConfigPath(homePath) 126 | if err != nil && configFallBackPath != "" { 127 | if config, err = LoadConfigurationFile(configFallBackPath); err != nil && !errors.Is(err, os.ErrNotExist) { 128 | return nil, fmt.Errorf("%s: %w", configFallBackPath, err) 129 | } 130 | } 131 | 132 | if err != nil { 133 | config = Config{ 134 | Global: ConfigGlobal{ 135 | All: false, 136 | DownloadOnly: false, 137 | GithubToken: "", 138 | Quiet: false, 139 | ShowHash: false, 140 | Source: false, 141 | UpgradeOnly: false, 142 | }, 143 | Repositories: make(map[string]ConfigRepository, 0), 144 | } 145 | 146 | return &config, nil 147 | } 148 | 149 | delete(config.Repositories, "global") 150 | 151 | // set default global values 152 | if !config.Meta.MetaData.IsDefined("global", "all") { 153 | config.Global.All = false 154 | } 155 | 156 | if !config.Meta.MetaData.IsDefined("global", "github_token") { 157 | config.Global.GithubToken = "" 158 | } 159 | 160 | if !config.Meta.MetaData.IsDefined("global", "quiet") { 161 | config.Global.Quiet = false 162 | } 163 | 164 | if !config.Meta.MetaData.IsDefined("global", "download_only") { 165 | config.Global.DownloadOnly = false 166 | } 167 | 168 | if !config.Meta.MetaData.IsDefined("global", "show_hash") { 169 | config.Global.ShowHash = false 170 | } 171 | 172 | if !config.Meta.MetaData.IsDefined("global", "upgrade_only") { 173 | config.Global.UpgradeOnly = false 174 | } 175 | 176 | // set default repository values 177 | for name, repo := range config.Repositories { 178 | 179 | if !config.Meta.MetaData.IsDefined(name, "all") { 180 | repo.All = config.Global.All 181 | } 182 | 183 | if !config.Meta.MetaData.IsDefined(name, "asset_filters") { 184 | repo.AssetFilters = []string{} 185 | } 186 | 187 | if !config.Meta.MetaData.IsDefined(name, "download_only") { 188 | repo.DownloadOnly = config.Global.DownloadOnly 189 | } 190 | 191 | if !config.Meta.MetaData.IsDefined(name, "quiet") { 192 | repo.Quiet = config.Global.Quiet 193 | } 194 | 195 | if !config.Meta.MetaData.IsDefined(name, "show_hash") { 196 | repo.ShowHash = config.Global.ShowHash 197 | } 198 | 199 | if !config.Meta.MetaData.IsDefined(name, "target") && config.Global.Target != "" { 200 | repo.Target = config.Global.Target 201 | } 202 | 203 | if !config.Meta.MetaData.IsDefined(name, "upgrade_only") { 204 | repo.UpgradeOnly = config.Global.UpgradeOnly 205 | } 206 | 207 | if !config.Meta.MetaData.IsDefined(name, "download_source") { 208 | repo.Source = config.Global.Source 209 | } 210 | 211 | config.Repositories[name] = repo 212 | } 213 | 214 | return &config, nil 215 | } 216 | 217 | func update[T any](config T, cli *T) T { 218 | if cli == nil { 219 | return config 220 | } 221 | return *cli 222 | } 223 | 224 | // Move the loaded configuration file global options into the opts variable 225 | func SetGlobalOptionsFromConfig(config *Config, parser *flags.Parser, opts *Flags, cli CliFlags) error { 226 | if config.Global.GithubToken != "" && os.Getenv("EGET_GITHUB_TOKEN") == "" { 227 | os.Setenv("EGET_GITHUB_TOKEN", config.Global.GithubToken) 228 | } 229 | 230 | opts.Tag = update("", cli.Tag) 231 | opts.Prerelease = update(false, cli.Prerelease) 232 | opts.Source = update(config.Global.Source, cli.Source) 233 | targ := update(config.Global.Target, cli.Output) 234 | expanded, err := home.Expand(targ) 235 | if err != nil { 236 | return err 237 | } 238 | opts.Output = expanded 239 | opts.System = update(config.Global.System, cli.System) 240 | opts.ExtractFile = update("", cli.ExtractFile) 241 | opts.All = update(config.Global.All, cli.All) 242 | opts.Quiet = update(config.Global.Quiet, cli.Quiet) 243 | opts.DLOnly = update(config.Global.DownloadOnly, cli.DLOnly) 244 | opts.UpgradeOnly = update(config.Global.UpgradeOnly, cli.UpgradeOnly) 245 | opts.Asset = update([]string{}, cli.Asset) 246 | opts.Hash = update(config.Global.ShowHash, cli.Hash) 247 | opts.Verify = update("", cli.Verify) 248 | opts.Remove = update(false, cli.Remove) 249 | opts.DisableSSL = update(false, cli.DisableSSL) 250 | return nil 251 | } 252 | 253 | // Move the loaded configuration file project options into the opts variable 254 | func SetProjectOptionsFromConfig(config *Config, parser *flags.Parser, opts *Flags, cli CliFlags, projectName string) error { 255 | for name, repo := range config.Repositories { 256 | if name == projectName { 257 | opts.All = update(repo.All, cli.All) 258 | opts.Asset = update(repo.AssetFilters, cli.Asset) 259 | opts.DLOnly = update(repo.DownloadOnly, cli.DLOnly) 260 | opts.ExtractFile = update(repo.File, cli.ExtractFile) 261 | opts.Hash = update(repo.ShowHash, cli.Hash) 262 | targ, err := home.Expand(repo.Target) 263 | if err != nil { 264 | return err 265 | } 266 | opts.Output = update(targ, cli.Output) 267 | opts.Quiet = update(repo.Quiet, cli.Quiet) 268 | opts.Source = update(repo.Source, cli.Source) 269 | opts.System = update(repo.System, cli.System) 270 | opts.Tag = update(repo.Tag, cli.Tag) 271 | opts.UpgradeOnly = update(repo.UpgradeOnly, cli.UpgradeOnly) 272 | opts.Verify = update(repo.Verify, cli.Verify) 273 | opts.DisableSSL = update(repo.DisableSSL, cli.DisableSSL) 274 | break 275 | } 276 | } 277 | return nil 278 | } 279 | -------------------------------------------------------------------------------- /detect.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "path" 6 | "regexp" 7 | "strings" 8 | ) 9 | 10 | // A Detector selects an asset from a list of possibilities. 11 | type Detector interface { 12 | // Detect takes a list of possible assets and returns a direct match. If a 13 | // single direct match is not found, it returns a list of candidates and an 14 | // error explaining what happened. 15 | Detect(assets []string) (string, []string, error) 16 | } 17 | 18 | type DetectorChain struct { 19 | detectors []Detector 20 | system Detector 21 | } 22 | 23 | func (dc *DetectorChain) Detect(assets []string) (string, []string, error) { 24 | for _, d := range dc.detectors { 25 | choice, candidates, err := d.Detect(assets) 26 | if len(candidates) == 0 && err != nil { 27 | return "", nil, err 28 | } else if len(candidates) == 0 { 29 | return choice, nil, nil 30 | } else { 31 | assets = candidates 32 | } 33 | } 34 | choice, candidates, err := dc.system.Detect(assets) 35 | if len(candidates) == 0 && err != nil { 36 | return "", nil, err 37 | } else if len(candidates) == 0 { 38 | return choice, nil, nil 39 | } else if len(candidates) >= 1 { 40 | assets = candidates 41 | } 42 | return "", assets, fmt.Errorf("%d candidates found for asset chain", len(assets)) 43 | } 44 | 45 | // An OS represents a target operating system. 46 | type OS struct { 47 | name string 48 | regex *regexp.Regexp 49 | anti *regexp.Regexp 50 | priority *regexp.Regexp // matches to priority are better than normal matches 51 | } 52 | 53 | // Match returns true if the given archive name is likely to store a binary for 54 | // this OS. Also returns if this is a priority match. 55 | func (os *OS) Match(s string) (bool, bool) { 56 | if os.anti != nil && os.anti.MatchString(s) { 57 | return false, false 58 | } 59 | if os.priority != nil { 60 | return os.regex.MatchString(s), os.priority.MatchString(s) 61 | } 62 | return os.regex.MatchString(s), false 63 | } 64 | 65 | var ( 66 | OSDarwin = OS{ 67 | name: "darwin", 68 | regex: regexp.MustCompile(`(?i)(darwin|mac.?(os)?|osx)`), 69 | } 70 | OSWindows = OS{ 71 | name: "windows", 72 | regex: regexp.MustCompile(`(?i)([^r]win|windows)`), 73 | } 74 | OSLinux = OS{ 75 | name: "linux", 76 | regex: regexp.MustCompile(`(?i)(linux|ubuntu)`), 77 | anti: regexp.MustCompile(`(?i)(android)`), 78 | priority: regexp.MustCompile(`\.appimage$`), 79 | } 80 | OSNetBSD = OS{ 81 | name: "netbsd", 82 | regex: regexp.MustCompile(`(?i)(netbsd)`), 83 | } 84 | OSFreeBSD = OS{ 85 | name: "freebsd", 86 | regex: regexp.MustCompile(`(?i)(freebsd)`), 87 | } 88 | OSOpenBSD = OS{ 89 | name: "openbsd", 90 | regex: regexp.MustCompile(`(?i)(openbsd)`), 91 | } 92 | OSAndroid = OS{ 93 | name: "android", 94 | regex: regexp.MustCompile(`(?i)(android)`), 95 | } 96 | OSIllumos = OS{ 97 | name: "illumos", 98 | regex: regexp.MustCompile(`(?i)(illumos)`), 99 | } 100 | OSSolaris = OS{ 101 | name: "solaris", 102 | regex: regexp.MustCompile(`(?i)(solaris)`), 103 | } 104 | OSPlan9 = OS{ 105 | name: "plan9", 106 | regex: regexp.MustCompile(`(?i)(plan9)`), 107 | } 108 | ) 109 | 110 | // a map of GOOS values to internal OS matchers 111 | var goosmap = map[string]OS{ 112 | "darwin": OSDarwin, 113 | "windows": OSWindows, 114 | "linux": OSLinux, 115 | "netbsd": OSNetBSD, 116 | "openbsd": OSOpenBSD, 117 | "freebsd": OSFreeBSD, 118 | "android": OSAndroid, 119 | "illumos": OSIllumos, 120 | "solaris": OSSolaris, 121 | "plan9": OSPlan9, 122 | } 123 | 124 | // An Arch represents a system architecture, such as amd64, i386, arm or others. 125 | type Arch struct { 126 | name string 127 | regex *regexp.Regexp 128 | } 129 | 130 | // Match returns true if this architecture is likely supported by the given 131 | // archive name. 132 | func (a *Arch) Match(s string) bool { 133 | return a.regex.MatchString(s) 134 | } 135 | 136 | var ( 137 | ArchAMD64 = Arch{ 138 | name: "amd64", 139 | regex: regexp.MustCompile(`(?i)(x64|amd64|x86(-|_)?64)`), 140 | } 141 | ArchI386 = Arch{ 142 | name: "386", 143 | regex: regexp.MustCompile(`(?i)(x32|amd32|x86(-|_)?32|i?386)`), 144 | } 145 | ArchArm = Arch{ 146 | name: "arm", 147 | regex: regexp.MustCompile(`(?i)(arm32|armv6|arm\b)`), 148 | } 149 | ArchArm64 = Arch{ 150 | name: "arm64", 151 | regex: regexp.MustCompile(`(?i)(arm64|armv8|aarch64)`), 152 | } 153 | ArchRiscv64 = Arch{ 154 | name: "riscv64", 155 | regex: regexp.MustCompile(`(?i)(riscv64)`), 156 | } 157 | ) 158 | 159 | // a map from GOARCH values to internal architecture matchers 160 | var goarchmap = map[string]Arch{ 161 | "amd64": ArchAMD64, 162 | "386": ArchI386, 163 | "arm": ArchArm, 164 | "arm64": ArchArm64, 165 | "riscv64": ArchRiscv64, 166 | } 167 | 168 | // AllDetector matches every asset. If there is only one asset, it is returned 169 | // as a direct match. If there are multiple assets they are all returned as 170 | // candidates. 171 | type AllDetector struct{} 172 | 173 | func (a *AllDetector) Detect(assets []string) (string, []string, error) { 174 | if len(assets) == 1 { 175 | return assets[0], nil, nil 176 | } 177 | return "", assets, fmt.Errorf("%d matches found", len(assets)) 178 | } 179 | 180 | // SingleAssetDetector finds a single named asset. If Anti is true it finds all 181 | // assets that don't contain Asset. 182 | type SingleAssetDetector struct { 183 | Asset string 184 | Anti bool 185 | } 186 | 187 | func (s *SingleAssetDetector) Detect(assets []string) (string, []string, error) { 188 | var candidates []string 189 | for _, a := range assets { 190 | if !s.Anti && path.Base(a) == s.Asset { 191 | return a, nil, nil 192 | } 193 | if !s.Anti && strings.Contains(path.Base(a), s.Asset) { 194 | candidates = append(candidates, a) 195 | } 196 | if s.Anti && !strings.Contains(path.Base(a), s.Asset) { 197 | candidates = append(candidates, a) 198 | } 199 | } 200 | if len(candidates) == 1 { 201 | return candidates[0], nil, nil 202 | } else if len(candidates) > 1 { 203 | return "", candidates, fmt.Errorf("%d candidates found for asset `%s`", len(candidates), s.Asset) 204 | } 205 | return "", nil, fmt.Errorf("asset `%s` not found", s.Asset) 206 | } 207 | 208 | // A SystemDetector matches a particular OS/Arch system pair. 209 | type SystemDetector struct { 210 | Os OS 211 | Arch Arch 212 | } 213 | 214 | // NewSystemDetector returns a new detector for the given OS/Arch as given by 215 | // Go OS/Arch names. 216 | func NewSystemDetector(sos, sarch string) (*SystemDetector, error) { 217 | os, ok := goosmap[sos] 218 | if !ok { 219 | return nil, fmt.Errorf("unsupported target OS: %s", sos) 220 | } 221 | arch, ok := goarchmap[sarch] 222 | if !ok { 223 | return nil, fmt.Errorf("unsupported target arch: %s", sarch) 224 | } 225 | return &SystemDetector{ 226 | Os: os, 227 | Arch: arch, 228 | }, nil 229 | } 230 | 231 | // Detect extracts the assets that match this detector's OS/Arch pair. If one 232 | // direct OS/Arch match is found, it is returned. If multiple OS/Arch matches 233 | // are found they are returned as candidates. If multiple assets that only 234 | // match the OS are found, and no full OS/Arch matches are found, the OS 235 | // matches are returned as candidates. Otherwise all assets are returned as 236 | // candidates. 237 | func (d *SystemDetector) Detect(assets []string) (string, []string, error) { 238 | var priority []string 239 | var matches []string 240 | var candidates []string 241 | all := make([]string, 0, len(assets)) 242 | for _, a := range assets { 243 | if strings.HasSuffix(a, ".sha256") || strings.HasSuffix(a, ".sha256sum") { 244 | // skip checksums (they will be checked later by the verifier) 245 | continue 246 | } 247 | 248 | os, extra := d.Os.Match(a) 249 | if extra { 250 | priority = append(priority, a) 251 | } 252 | arch := d.Arch.Match(a) 253 | if os && arch { 254 | matches = append(matches, a) 255 | } 256 | if os { 257 | candidates = append(candidates, a) 258 | } 259 | all = append(all, a) 260 | } 261 | if len(priority) == 1 { 262 | return priority[0], nil, nil 263 | } else if len(priority) > 1 { 264 | return "", priority, fmt.Errorf("%d priority matches found", len(matches)) 265 | } else if len(matches) == 1 { 266 | return matches[0], nil, nil 267 | } else if len(matches) > 1 { 268 | return "", matches, fmt.Errorf("%d matches found", len(matches)) 269 | } else if len(candidates) == 1 { 270 | return candidates[0], nil, nil 271 | } else if len(candidates) > 1 { 272 | return "", candidates, fmt.Errorf("%d candidates found (unsure architecture)", len(candidates)) 273 | } else if len(all) == 1 { 274 | return all[0], nil, nil 275 | } 276 | return "", all, fmt.Errorf("no candidates found") 277 | } 278 | -------------------------------------------------------------------------------- /dl.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/tls" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "os" 11 | "strings" 12 | "time" 13 | 14 | pb "github.com/schollz/progressbar/v3" 15 | "github.com/zyedidia/eget/home" 16 | ) 17 | 18 | func tokenFrom(s string) (string, error) { 19 | if strings.HasPrefix(s, "@") { 20 | f, err := home.Expand(s[1:]) 21 | if err != nil { 22 | return "", err 23 | } 24 | b, err := os.ReadFile(f) 25 | return strings.TrimRight(string(b), "\r\n"), nil 26 | } 27 | return s, nil 28 | } 29 | 30 | var ErrNoToken = errors.New("no github token") 31 | 32 | func getGithubToken() (string, error) { 33 | if os.Getenv("EGET_GITHUB_TOKEN") != "" { 34 | return tokenFrom(os.Getenv("EGET_GITHUB_TOKEN")) 35 | } 36 | if os.Getenv("GITHUB_TOKEN") != "" { 37 | return tokenFrom(os.Getenv("GITHUB_TOKEN")) 38 | } 39 | return "", ErrNoToken 40 | } 41 | 42 | func SetAuthHeader(req *http.Request) *http.Request { 43 | token, err := getGithubToken() 44 | if err != nil && !errors.Is(err, ErrNoToken) { 45 | fmt.Fprintln(os.Stderr, "warning: not using github token:", err) 46 | } 47 | 48 | if req.URL.Scheme == "https" && req.Host == "api.github.com" && err == nil { 49 | if opts.DisableSSL { 50 | fmt.Fprintln(os.Stderr, "error: cannot use GitHub token if SSL verification is disabled") 51 | os.Exit(1) 52 | } 53 | req.Header.Set("Authorization", fmt.Sprintf("token %s", token)) 54 | } 55 | 56 | return req 57 | } 58 | 59 | func Get(url string) (*http.Response, error) { 60 | req, err := http.NewRequest("GET", url, nil) 61 | 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | req = SetAuthHeader(req) 67 | 68 | proxyClient := &http.Client{Transport: &http.Transport{ 69 | Proxy: http.ProxyFromEnvironment, 70 | TLSClientConfig: &tls.Config{InsecureSkipVerify: opts.DisableSSL}, 71 | }} 72 | 73 | return proxyClient.Do(req) 74 | } 75 | 76 | type RateLimitJson struct { 77 | Resources map[string]RateLimit 78 | } 79 | 80 | type RateLimit struct { 81 | Limit int 82 | Remaining int 83 | Reset int64 84 | } 85 | 86 | func (r RateLimit) ResetTime() time.Time { 87 | return time.Unix(r.Reset, 0) 88 | } 89 | 90 | func (r RateLimit) String() string { 91 | now := time.Now() 92 | rtime := r.ResetTime() 93 | if rtime.Before(now) { 94 | return fmt.Sprintf("Limit: %d, Remaining: %d, Reset: %v", r.Limit, r.Remaining, rtime) 95 | } else { 96 | return fmt.Sprintf( 97 | "Limit: %d, Remaining: %d, Reset: %v (%v)", 98 | r.Limit, r.Remaining, rtime, rtime.Sub(now).Round(time.Second), 99 | ) 100 | } 101 | } 102 | 103 | func GetRateLimit() (RateLimit, error) { 104 | url := "https://api.github.com/rate_limit" 105 | req, err := http.NewRequest("GET", url, nil) 106 | if err != nil { 107 | return RateLimit{}, err 108 | } 109 | 110 | req = SetAuthHeader(req) 111 | 112 | req.Header.Set("Accept", "application/vnd.github.v3+json") 113 | 114 | resp, err := http.DefaultClient.Do(req) 115 | if err != nil { 116 | return RateLimit{}, err 117 | } 118 | 119 | defer resp.Body.Close() 120 | 121 | b, err := io.ReadAll(resp.Body) 122 | if err != nil { 123 | return RateLimit{}, err 124 | } 125 | 126 | var parsed RateLimitJson 127 | err = json.Unmarshal(b, &parsed) 128 | 129 | return parsed.Resources["core"], err 130 | } 131 | 132 | // Download the file at 'url' and write the http response body to 'out'. The 133 | // 'getbar' function allows the caller to construct a progress bar given the 134 | // size of the file being downloaded, and the download will write to the 135 | // returned progress bar. 136 | func Download(url string, out io.Writer, getbar func(size int64) *pb.ProgressBar) error { 137 | if IsLocalFile(url) { 138 | f, err := os.Open(url) 139 | if err != nil { 140 | return err 141 | } 142 | defer f.Close() 143 | _, err = io.Copy(out, f) 144 | return err 145 | } 146 | 147 | resp, err := Get(url) 148 | if err != nil { 149 | return err 150 | } 151 | defer resp.Body.Close() 152 | 153 | if resp.StatusCode != http.StatusOK { 154 | body, err := io.ReadAll(resp.Body) 155 | if err != nil { 156 | return err 157 | } 158 | return fmt.Errorf("download error: %d: %s", resp.StatusCode, body) 159 | } 160 | 161 | bar := getbar(resp.ContentLength) 162 | _, err = io.Copy(io.MultiWriter(out, bar), resp.Body) 163 | return err 164 | } 165 | -------------------------------------------------------------------------------- /eget.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "io/fs" 9 | "net/url" 10 | "os" 11 | "os/exec" 12 | "path" 13 | "path/filepath" 14 | "regexp" 15 | "runtime" 16 | "strings" 17 | "time" 18 | 19 | "github.com/jessevdk/go-flags" 20 | pb "github.com/schollz/progressbar/v3" 21 | ) 22 | 23 | func fatal(a ...interface{}) { 24 | fmt.Fprintln(os.Stderr, a...) 25 | os.Exit(1) 26 | } 27 | 28 | // IsUrl returns true if s is a valid URL. 29 | func IsUrl(s string) bool { 30 | u, err := url.Parse(s) 31 | return err == nil && u.Scheme != "" && u.Host != "" 32 | } 33 | 34 | // Cut is strings.Cut 35 | func Cut(s, sep string) (before, after string, found bool) { 36 | if i := strings.Index(s, sep); i >= 0 { 37 | return s[:i], s[i+len(sep):], true 38 | } 39 | return s, "", false 40 | } 41 | 42 | var ghrgx = regexp.MustCompile(`^(http(s)?://)?github\.com/[\w,\-,_]+/[\w,\-,_]+(.git)?(/)?$`) 43 | 44 | // IsGithubUrl returns true if s is a URL with github.com as the host. 45 | func IsGithubUrl(s string) bool { 46 | return ghrgx.MatchString(s) 47 | } 48 | 49 | func IsLocalFile(s string) bool { 50 | _, err := os.Stat(s) 51 | return err == nil 52 | } 53 | 54 | // IsDirectory returns true if the file at 'path' is a directory. 55 | func IsDirectory(path string) bool { 56 | fileInfo, err := os.Stat(path) 57 | if err != nil { 58 | return false 59 | } 60 | return fileInfo.IsDir() 61 | } 62 | 63 | // searches for an asset thaat has the same name as the requested one but 64 | // ending with .sha256 or .sha256sum 65 | func checksumAsset(asset string, assets []string) string { 66 | for _, a := range assets { 67 | if a == asset+".sha256sum" || a == asset+".sha256" { 68 | return a 69 | } 70 | } 71 | return "" 72 | } 73 | 74 | // Determine the appropriate Finder to use. If opts.URL is provided, we use 75 | // a DirectAssetFinder. Otherwise we use a GithubAssetFinder. When a Github 76 | // repo is provided, we assume the repo name is the 'tool' name (for direct 77 | // URLs, the tool name is unknown and remains empty). 78 | func getFinder(project string, opts *Flags) (finder Finder, tool string) { 79 | if IsLocalFile(project) || (IsUrl(project) && !IsGithubUrl(project)) { 80 | finder = &DirectAssetFinder{ 81 | URL: project, 82 | } 83 | opts.System = "all" 84 | } else { 85 | if IsGithubUrl(project) { 86 | _, after, found := Cut(project, "github.com/") 87 | if found { 88 | project = strings.Trim(after, "/") 89 | } else { 90 | fatal(fmt.Sprintf("invalid GitHub repo URL %s", project)) 91 | } 92 | } 93 | 94 | repo := project 95 | if strings.Count(repo, "/") != 1 { 96 | fatal("invalid argument (must be of the form `user/repo`)") 97 | } 98 | parts := strings.Split(repo, "/") 99 | if parts[0] == "" || parts[1] == "" { 100 | fatal("invalid argument (must be of the form `user/repo`)") 101 | } 102 | tool = parts[1] 103 | 104 | if opts.Source { 105 | tag := "master" 106 | if opts.Tag != "" { 107 | tag = opts.Tag 108 | } 109 | finder = &GithubSourceFinder{ 110 | Repo: repo, 111 | Tag: tag, 112 | Tool: tool, 113 | } 114 | } else { 115 | tag := "latest" 116 | if opts.Tag != "" { 117 | tag = fmt.Sprintf("tags/%s", opts.Tag) 118 | } 119 | 120 | var mint time.Time 121 | if opts.UpgradeOnly { 122 | parts := strings.Split(project, "/") 123 | last := parts[len(parts)-1] 124 | mint = bintime(last, opts.Output) 125 | } 126 | 127 | finder = &GithubAssetFinder{ 128 | Repo: repo, 129 | Tag: tag, 130 | Prerelease: opts.Prerelease, 131 | MinTime: mint, 132 | } 133 | } 134 | } 135 | return finder, tool 136 | } 137 | 138 | func getVerifier(sumAsset string, opts *Flags) (verifier Verifier, err error) { 139 | if opts.Verify != "" { 140 | verifier, err = NewSha256Verifier(opts.Verify) 141 | } else if sumAsset != "" { 142 | verifier = &Sha256AssetVerifier{ 143 | AssetURL: sumAsset, 144 | } 145 | } else if opts.Hash { 146 | verifier = &Sha256Printer{} 147 | } else { 148 | verifier = &NoVerifier{} 149 | } 150 | return verifier, err 151 | } 152 | 153 | // Determine the appropriate detector. If the --system is 'all', we use an 154 | // AllDetector, which will just return all assets. Otherwise we use the 155 | // --system pair provided by the user, or the runtime.GOOS/runtime.GOARCH 156 | // pair by default (the host system OS/Arch pair). 157 | func getDetector(opts *Flags) (detector Detector, err error) { 158 | var system Detector 159 | if opts.System == "all" { 160 | system = &AllDetector{} 161 | } else if opts.System != "" { 162 | split := strings.Split(opts.System, "/") 163 | if len(split) < 2 { 164 | fatal("system descriptor must be os/arch") 165 | } 166 | system, err = NewSystemDetector(split[0], split[1]) 167 | } else { 168 | system, err = NewSystemDetector(runtime.GOOS, runtime.GOARCH) 169 | } 170 | 171 | if len(opts.Asset) >= 1 { 172 | detectors := make([]Detector, len(opts.Asset)) 173 | for i, a := range opts.Asset { 174 | anti := strings.HasPrefix(a, "^") 175 | if anti { 176 | a = a[1:] 177 | } 178 | detectors[i] = &SingleAssetDetector{ 179 | Asset: a, 180 | Anti: anti, 181 | } 182 | } 183 | detector = &DetectorChain{ 184 | detectors: detectors, 185 | system: system, 186 | } 187 | } else { 188 | detector = system 189 | } 190 | return detector, err 191 | } 192 | 193 | // Determine which extractor to use. If --download-only is provided, we 194 | // just "extract" the downloaded archive to itself. Otherwise we try to 195 | // extract the literal file provided by --file, or by default we just 196 | // extract a binary with the tool name that was possibly auto-detected 197 | // above. 198 | func getExtractor(url, tool string, opts *Flags) (extractor Extractor, err error) { 199 | if opts.DLOnly { 200 | extractor = &SingleFileExtractor{ 201 | Name: path.Base(url), 202 | Rename: path.Base(url), 203 | Decompress: func(r io.Reader) (io.Reader, error) { 204 | return r, nil 205 | }, 206 | } 207 | } else if opts.ExtractFile != "" { 208 | gc, err := NewGlobChooser(opts.ExtractFile) 209 | if err != nil { 210 | return nil, err 211 | } 212 | extractor = NewExtractor(path.Base(url), tool, gc) 213 | } else { 214 | extractor = NewExtractor(path.Base(url), tool, &BinaryChooser{ 215 | Tool: tool, 216 | }) 217 | } 218 | return extractor, nil 219 | } 220 | 221 | // Write an extracted file to disk with a new name. 222 | func writeFile(data []byte, rename string, mode fs.FileMode) error { 223 | if rename[0] == '-' { 224 | // if the output is '-', just print it to stdout 225 | _, err := os.Stdout.Write(data) 226 | return err 227 | } 228 | 229 | // remove file if it exists already 230 | os.Remove(rename) 231 | // make parent directories if necessary 232 | os.MkdirAll(filepath.Dir(rename), 0755) 233 | 234 | f, err := os.OpenFile(rename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) 235 | if err != nil { 236 | return err 237 | } 238 | defer f.Close() 239 | _, err = f.Write(data) 240 | return err 241 | } 242 | 243 | // Would really like generics to implement this... 244 | // Make the user select one of the choices and return the index of the 245 | // selection. 246 | func userSelect(choices []interface{}) int { 247 | for i, c := range choices { 248 | fmt.Fprintf(os.Stderr, "(%d) %v\n", i+1, c) 249 | } 250 | var choice int 251 | for { 252 | fmt.Fprint(os.Stderr, "Enter selection number: ") 253 | _, err := fmt.Scanf("%d", &choice) 254 | if err == nil && (choice <= 0 || choice > len(choices)) { 255 | err = fmt.Errorf("%d is out of bounds", choice) 256 | } 257 | if err == nil { 258 | break 259 | } 260 | 261 | if errors.Is(err, io.EOF) { 262 | fatal("Error reading selection") 263 | } 264 | 265 | fmt.Fprintf(os.Stderr, "Invalid selection: %v\n", err) 266 | } 267 | return choice 268 | } 269 | 270 | func bintime(bin string, to string) (t time.Time) { 271 | file := "" 272 | dir := "." 273 | if to != "" && IsDirectory(to) { 274 | // direct directory 275 | dir = to 276 | } else if ebin := os.Getenv("EGET_BIN"); ebin != "" { 277 | dir = ebin 278 | } 279 | 280 | if to != "" && !strings.ContainsRune(to, os.PathSeparator) { 281 | // path joined possible with eget bin 282 | bin = to 283 | } else if to != "" && !IsDirectory(to) { 284 | // direct path 285 | file = to 286 | } 287 | 288 | if file == "" { 289 | file = filepath.Join(dir, bin) 290 | } 291 | fi, err := os.Stat(file) 292 | if err != nil { 293 | return 294 | } 295 | return fi.ModTime() 296 | } 297 | 298 | func downloadConfigRepositories(config *Config) error { 299 | hasError := false 300 | errorList := []error{} 301 | 302 | binary, err := os.Executable() 303 | 304 | if err != nil { 305 | binary = os.Args[0] 306 | } 307 | 308 | for name, _ := range config.Repositories { 309 | cmd := exec.Command(binary, name) 310 | cmd.Stderr = os.Stderr 311 | 312 | err := cmd.Run() 313 | if err != nil { 314 | hasError = true 315 | errorList = append(errorList, err) 316 | } 317 | } 318 | 319 | if hasError { 320 | return fmt.Errorf("one or more errors occurred while downloading: %v", errorList) 321 | } 322 | 323 | return nil 324 | } 325 | 326 | var opts Flags 327 | 328 | func main() { 329 | var cli CliFlags 330 | 331 | flagparser := flags.NewParser(&cli, flags.PassDoubleDash|flags.PrintErrors) 332 | flagparser.Usage = "[OPTIONS] TARGET" 333 | args, err := flagparser.Parse() 334 | 335 | if err != nil { 336 | os.Exit(1) 337 | } 338 | 339 | if cli.Version { 340 | fmt.Println("eget version", Version) 341 | os.Exit(0) 342 | } 343 | 344 | if cli.Help { 345 | flagparser.WriteHelp(os.Stdout) 346 | os.Exit(0) 347 | } 348 | 349 | config, err := InitializeConfig() 350 | if err != nil { 351 | fatal(err) 352 | } 353 | 354 | err = SetGlobalOptionsFromConfig(config, flagparser, &opts, cli) 355 | if err != nil { 356 | fatal(err) 357 | } 358 | 359 | if cli.Rate { 360 | rdat, err := GetRateLimit() 361 | if err != nil { 362 | fatal(err) 363 | } 364 | fmt.Println(rdat) 365 | os.Exit(0) 366 | } 367 | 368 | target := "" 369 | 370 | if len(args) > 0 { 371 | target = args[0] 372 | } 373 | 374 | err = SetProjectOptionsFromConfig(config, flagparser, &opts, cli, target) 375 | if err != nil { 376 | fatal(err) 377 | } 378 | 379 | if cli.DownloadAll { 380 | err = downloadConfigRepositories(config) 381 | 382 | if err != nil { 383 | fatal(err) 384 | } 385 | 386 | os.Exit(0) 387 | } 388 | 389 | if len(args) <= 0 { 390 | fmt.Println("no target given") 391 | flagparser.WriteHelp(os.Stdout) 392 | os.Exit(0) 393 | } 394 | 395 | if opts.DisableSSL { 396 | fmt.Fprintln(os.Stderr, "warning: SSL verification is disabled") 397 | } 398 | 399 | if opts.Remove { 400 | ebin := os.Getenv("EGET_BIN") 401 | err := os.Remove(filepath.Join(ebin, target)) 402 | if err != nil { 403 | fmt.Fprintln(os.Stderr, err) 404 | os.Exit(1) 405 | } 406 | fmt.Printf("Removed `%s`\n", filepath.Join(ebin, target)) 407 | os.Exit(0) 408 | } 409 | 410 | // when --quiet is passed, send non-essential output to io.Discard 411 | var output io.Writer = os.Stderr 412 | if opts.Quiet { 413 | output = io.Discard 414 | } 415 | 416 | finder, tool := getFinder(target, &opts) 417 | assets, err := finder.Find() 418 | if err != nil { 419 | if errors.Is(err, ErrNoUpgrade) { 420 | fmt.Fprintf(output, "%s: %v\n", target, err) 421 | os.Exit(0) 422 | } 423 | fatal(err) 424 | } 425 | 426 | detector, err := getDetector(&opts) 427 | if err != nil { 428 | fatal(err) 429 | } 430 | 431 | // get the url and candidates from the detector 432 | url, candidates, err := detector.Detect(assets) 433 | if len(candidates) != 0 && err != nil { 434 | // if multiple candidates are returned, the user must select manually which one to download 435 | fmt.Fprintf(os.Stderr, "%v: please select manually\n", err) 436 | choices := make([]interface{}, len(candidates)) 437 | for i := range candidates { 438 | choices[i] = path.Base(candidates[i]) 439 | } 440 | choice := userSelect(choices) 441 | url = candidates[choice-1] 442 | } else if err != nil { 443 | fatal(err) 444 | } 445 | 446 | // print the URL 447 | fmt.Fprintf(output, "%s\n", url) 448 | 449 | // download with progress bar 450 | buf := &bytes.Buffer{} 451 | err = Download(url, buf, func(size int64) *pb.ProgressBar { 452 | var pbout io.Writer = os.Stderr 453 | if opts.Quiet { 454 | pbout = io.Discard 455 | } 456 | return pb.NewOptions64(size, 457 | pb.OptionSetWriter(pbout), 458 | pb.OptionShowBytes(true), 459 | pb.OptionSetWidth(10), 460 | pb.OptionThrottle(65*time.Millisecond), 461 | pb.OptionShowCount(), 462 | pb.OptionSpinnerType(14), 463 | pb.OptionFullWidth(), 464 | pb.OptionSetDescription("Downloading"), 465 | pb.OptionOnCompletion(func() { 466 | fmt.Fprint(pbout, "\n") 467 | }), 468 | pb.OptionSetTheme(pb.Theme{ 469 | Saucer: "=", 470 | SaucerHead: ">", 471 | SaucerPadding: " ", 472 | BarStart: "[", 473 | BarEnd: "]", 474 | })) 475 | }) 476 | if err != nil { 477 | fatal(fmt.Sprintf("%s (URL: %s)", err, url)) 478 | } 479 | 480 | body := buf.Bytes() 481 | 482 | sumAsset := checksumAsset(url, assets) 483 | verifier, err := getVerifier(sumAsset, &opts) 484 | if err != nil { 485 | fatal(err) 486 | } 487 | err = verifier.Verify(body) 488 | if err != nil { 489 | fatal(err) 490 | } else if opts.Verify == "" && sumAsset != "" { 491 | fmt.Fprintf(output, "Checksum verified with %s\n", path.Base(sumAsset)) 492 | } else if opts.Verify != "" { 493 | fmt.Fprintf(output, "Checksum verified\n") 494 | } 495 | 496 | extractor, err := getExtractor(url, tool, &opts) 497 | if err != nil { 498 | fatal(err) 499 | } 500 | 501 | // get extraction candidates 502 | bin, bins, err := extractor.Extract(body, opts.All) 503 | if len(bins) != 0 && err != nil && !opts.All { 504 | // if there are multiple candidates, have the user select manually 505 | fmt.Fprintf(os.Stderr, "%v: please select manually\n", err) 506 | choices := make([]interface{}, len(bins)+1) 507 | for i := range bins { 508 | choices[i] = bins[i] 509 | } 510 | choices[len(bins)] = "all" 511 | choice := userSelect(choices) 512 | if choice == len(bins)+1 { 513 | opts.All = true 514 | } else { 515 | bin = bins[choice-1] 516 | } 517 | } else if err != nil && len(bins) == 0 { 518 | fatal(err) 519 | } 520 | if len(bins) == 0 { 521 | bins = []ExtractedFile{bin} 522 | } 523 | 524 | extract := func(bin ExtractedFile) { 525 | mode := bin.Mode() 526 | 527 | // write the extracted file to a file on disk, in the --to directory if 528 | // requested 529 | out := filepath.Base(bin.Name) 530 | if opts.Output == "-" { 531 | out = "-" 532 | } else if opts.Output != "" && IsDirectory(opts.Output) { 533 | out = filepath.Join(opts.Output, out) 534 | } else if opts.Output != "" && opts.All { 535 | os.MkdirAll(opts.Output, 0755) 536 | out = filepath.Join(opts.Output, out) 537 | } else { 538 | if opts.Output != "" { 539 | out = opts.Output 540 | } 541 | // only use $EGET_BIN if all of the following are true 542 | // 1. $EGET_BIN is non-empty 543 | // 2. --to is not a path (not a path if no path separator is found) 544 | // 3. The extracted file is executable 545 | if os.Getenv("EGET_BIN") != "" && !strings.ContainsRune(out, os.PathSeparator) && mode&0111 != 0 && !bin.Dir { 546 | out = filepath.Join(os.Getenv("EGET_BIN"), out) 547 | } 548 | } 549 | 550 | err = bin.Extract(out) 551 | if err != nil { 552 | fatal(err) 553 | } 554 | 555 | fmt.Fprintf(output, "Extracted `%s` to `%s`\n", bin.ArchiveName, out) 556 | } 557 | 558 | if opts.All { 559 | for _, bin := range bins { 560 | extract(bin) 561 | } 562 | } else { 563 | extract(bin) 564 | } 565 | } 566 | -------------------------------------------------------------------------------- /extract.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "compress/bzip2" 7 | "compress/gzip" 8 | "fmt" 9 | "io" 10 | "io/fs" 11 | "os" 12 | "path/filepath" 13 | "strings" 14 | 15 | "github.com/gobwas/glob" 16 | "github.com/klauspost/compress/zstd" 17 | "github.com/ulikunitz/xz" 18 | ) 19 | 20 | // An Extractor reads in some archive data and extracts a particular file from 21 | // it. If there are multiple candidates it returns a list and an error 22 | // explaining what happened. 23 | type Extractor interface { 24 | Extract(data []byte, multiple bool) (ExtractedFile, []ExtractedFile, error) 25 | } 26 | 27 | // An ExtractedFile contains the data, name, and permissions of a file in the 28 | // archive. 29 | type ExtractedFile struct { 30 | Name string // name to extract to 31 | ArchiveName string // name in archive 32 | mode fs.FileMode 33 | Extract func(to string) error 34 | Dir bool 35 | } 36 | 37 | // Mode returns the filemode of the extracted file. 38 | func (e ExtractedFile) Mode() fs.FileMode { 39 | return modeFrom(e.Name, e.mode) 40 | } 41 | 42 | func modeFrom(fname string, mode fs.FileMode) fs.FileMode { 43 | if isExec(fname, mode) { 44 | return mode | 0111 45 | } 46 | return mode 47 | } 48 | 49 | // String returns the archive name of this extracted file 50 | func (e ExtractedFile) String() string { 51 | return e.ArchiveName 52 | } 53 | 54 | // A Chooser selects a file. It may list the file as a direct match (should be 55 | // immediately extracted if found), or a possible match (only extract if it is 56 | // the only match, or if the user manually requests it). 57 | type Chooser interface { 58 | Choose(name string, dir bool, mode fs.FileMode) (direct bool, possible bool) 59 | } 60 | 61 | // NewExtractor constructs an extractor for the given archive file using the 62 | // given chooser. It will construct extractors for files ending in '.tar.gz', 63 | // '.tar.bz2', '.tar', '.zip'. After these matches, if the file ends with 64 | // '.gz', '.bz2' it will be decompressed and copied. Other files will simply 65 | // be copied without any decompression or extraction. 66 | func NewExtractor(filename string, tool string, chooser Chooser) Extractor { 67 | if tool == "" { 68 | tool = filename 69 | } 70 | 71 | gunzipper := func(r io.Reader) (io.Reader, error) { 72 | return gzip.NewReader(r) 73 | } 74 | b2unzipper := func(r io.Reader) (io.Reader, error) { 75 | return bzip2.NewReader(r), nil 76 | } 77 | xunzipper := func(r io.Reader) (io.Reader, error) { 78 | return xz.NewReader(bufio.NewReader(r)) 79 | } 80 | zstdunzipper := func(r io.Reader) (io.Reader, error) { 81 | return zstd.NewReader(r) 82 | } 83 | nounzipper := func(r io.Reader) (io.Reader, error) { 84 | return r, nil 85 | } 86 | 87 | switch { 88 | case strings.HasSuffix(filename, ".tar.gz"), strings.HasSuffix(filename, ".tgz"): 89 | return &ArchiveExtractor{ 90 | File: chooser, 91 | Ar: NewTarArchive, 92 | Decompress: gunzipper, 93 | } 94 | case strings.HasSuffix(filename, ".tar.bz2"), strings.HasSuffix(filename, ".tbz"): 95 | return &ArchiveExtractor{ 96 | File: chooser, 97 | Ar: NewTarArchive, 98 | Decompress: b2unzipper, 99 | } 100 | case strings.HasSuffix(filename, ".tar.xz"), strings.HasSuffix(filename, ".txz"): 101 | return &ArchiveExtractor{ 102 | File: chooser, 103 | Ar: NewTarArchive, 104 | Decompress: xunzipper, 105 | } 106 | case strings.HasSuffix(filename, ".tar.zst"): 107 | return &ArchiveExtractor{ 108 | File: chooser, 109 | Ar: NewTarArchive, 110 | Decompress: zstdunzipper, 111 | } 112 | case strings.HasSuffix(filename, ".tar"): 113 | return &ArchiveExtractor{ 114 | File: chooser, 115 | Ar: NewTarArchive, 116 | Decompress: nounzipper, 117 | } 118 | case strings.HasSuffix(filename, ".zip"): 119 | return &ArchiveExtractor{ 120 | Ar: NewZipArchive, 121 | File: chooser, 122 | } 123 | case strings.HasSuffix(filename, ".gz"): 124 | return &SingleFileExtractor{ 125 | Rename: tool, 126 | Name: filename, 127 | Decompress: gunzipper, 128 | } 129 | case strings.HasSuffix(filename, ".bz2"): 130 | return &SingleFileExtractor{ 131 | Rename: tool, 132 | Name: filename, 133 | Decompress: b2unzipper, 134 | } 135 | case strings.HasSuffix(filename, ".xz"): 136 | return &SingleFileExtractor{ 137 | Rename: tool, 138 | Name: filename, 139 | Decompress: xunzipper, 140 | } 141 | case strings.HasSuffix(filename, ".zst"): 142 | return &SingleFileExtractor{ 143 | Rename: tool, 144 | Name: filename, 145 | Decompress: zstdunzipper, 146 | } 147 | default: 148 | return &SingleFileExtractor{ 149 | Rename: tool, 150 | Name: filename, 151 | Decompress: nounzipper, 152 | } 153 | } 154 | } 155 | 156 | type ArchiveFn func(data []byte, decomp DecompFn) (Archive, error) 157 | type DecompFn func(r io.Reader) (io.Reader, error) 158 | 159 | type ArchiveExtractor struct { 160 | File Chooser 161 | Ar ArchiveFn 162 | Decompress DecompFn 163 | } 164 | 165 | type link struct { 166 | newname string 167 | oldname string 168 | sym bool 169 | } 170 | 171 | func (l link) Write() error { 172 | // remove file if it exists already 173 | os.Remove(l.newname) 174 | // make parent directories if necessary 175 | os.MkdirAll(filepath.Dir(l.newname), 0755) 176 | 177 | if l.sym { 178 | return os.Symlink(l.oldname, l.newname) 179 | } 180 | return os.Link(l.oldname, l.newname) 181 | } 182 | 183 | func (a *ArchiveExtractor) Extract(data []byte, multiple bool) (ExtractedFile, []ExtractedFile, error) { 184 | var candidates []ExtractedFile 185 | var dirs []string 186 | 187 | ar, err := a.Ar(data, a.Decompress) 188 | if err != nil { 189 | return ExtractedFile{}, nil, err 190 | } 191 | for { 192 | f, err := ar.Next() 193 | if err == io.EOF { 194 | break 195 | } 196 | if err != nil { 197 | return ExtractedFile{}, nil, fmt.Errorf("extract: %w", err) 198 | } 199 | var hasdir bool 200 | for _, d := range dirs { 201 | if strings.HasPrefix(f.Name, d) { 202 | hasdir = true 203 | break 204 | } 205 | } 206 | if hasdir { 207 | continue 208 | } 209 | direct, possible := a.File.Choose(f.Name, f.Dir(), f.Mode) 210 | if direct || possible { 211 | name := rename(f.Name, f.Name) 212 | 213 | fdata, err := ar.ReadAll() 214 | if err != nil { 215 | return ExtractedFile{}, nil, fmt.Errorf("extract: %w", err) 216 | } 217 | 218 | var extract func(to string) error 219 | if !f.Dir() { 220 | extract = func(to string) error { 221 | return writeFile(fdata, to, modeFrom(name, f.Mode)) 222 | } 223 | } else { 224 | dirs = append(dirs, f.Name) 225 | extract = func(to string) error { 226 | ar, err := a.Ar(data, a.Decompress) 227 | if err != nil { 228 | return err 229 | } 230 | var links []link 231 | for { 232 | subf, err := ar.Next() 233 | if err == io.EOF { 234 | break 235 | } else if err != nil { 236 | return fmt.Errorf("extract: %w", err) 237 | } else if !strings.HasPrefix(subf.Name, f.Name) { 238 | continue 239 | } else if subf.Dir() { 240 | os.MkdirAll(filepath.Join(to, subf.Name[len(f.Name):]), 0755) 241 | continue 242 | } else if subf.Type == TypeLink || subf.Type == TypeSymlink { 243 | newname := filepath.Join(to, subf.Name[len(f.Name):]) 244 | oldname := subf.LinkName 245 | links = append(links, link{ 246 | newname: newname, 247 | oldname: oldname, 248 | sym: subf.Type == TypeSymlink, 249 | }) 250 | continue 251 | } 252 | 253 | fdata, err := ar.ReadAll() 254 | if err != nil { 255 | return fmt.Errorf("extract: %w", err) 256 | } 257 | name = filepath.Join(to, subf.Name[len(f.Name):]) 258 | err = writeFile(fdata, name, subf.Mode) 259 | if err != nil { 260 | return fmt.Errorf("extract: %w", err) 261 | } 262 | } 263 | for _, l := range links { 264 | if err := l.Write(); err != nil && err != os.ErrExist { 265 | return fmt.Errorf("extract: %w", err) 266 | } 267 | } 268 | return nil 269 | } 270 | } 271 | 272 | ef := ExtractedFile{ 273 | Name: name, 274 | ArchiveName: f.Name, 275 | mode: f.Mode, 276 | Extract: extract, 277 | Dir: f.Dir(), 278 | } 279 | if direct && !multiple { 280 | return ef, nil, err 281 | } 282 | if err == nil { 283 | candidates = append(candidates, ef) 284 | } 285 | } 286 | } 287 | if len(candidates) == 1 { 288 | return candidates[0], nil, nil 289 | } else if len(candidates) == 0 { 290 | return ExtractedFile{}, candidates, fmt.Errorf("target %v not found in archive", a.File) 291 | } 292 | return ExtractedFile{}, candidates, fmt.Errorf("%d candidates for target %v found", len(candidates), a.File) 293 | } 294 | 295 | // SingleFileExtractor extracts files called 'Name' after decompressing the 296 | // file with 'Decompress'. 297 | type SingleFileExtractor struct { 298 | Rename string 299 | Name string 300 | Decompress func(r io.Reader) (io.Reader, error) 301 | } 302 | 303 | func (sf *SingleFileExtractor) Extract(data []byte, multiple bool) (ExtractedFile, []ExtractedFile, error) { 304 | name := rename(sf.Name, sf.Rename) 305 | return ExtractedFile{ 306 | Name: name, 307 | ArchiveName: sf.Name, 308 | mode: 0666, 309 | Extract: func(to string) error { 310 | r := bytes.NewReader(data) 311 | dr, err := sf.Decompress(r) 312 | if err != nil { 313 | return err 314 | } 315 | 316 | decdata, err := io.ReadAll(dr) 317 | if err != nil { 318 | return err 319 | } 320 | return writeFile(decdata, to, modeFrom(name, 0666)) 321 | }, 322 | }, nil, nil 323 | } 324 | 325 | // attempt to rename 'file' to an appropriate executable name 326 | func rename(file string, nameguess string) string { 327 | if isDefinitelyNotExec(file) { 328 | return file 329 | } 330 | 331 | var rename string 332 | if strings.HasSuffix(file, ".appimage") { 333 | // remove the .appimage extension 334 | rename = file[:len(file)-len(".appimage")] 335 | } else if strings.HasSuffix(file, ".exe") { 336 | // directly use xxx.exe 337 | rename = file 338 | } else { 339 | // otherwise use the rename guess 340 | rename = nameguess 341 | } 342 | return rename 343 | } 344 | 345 | // A BinaryChooser selects executable files. If the executable file has the 346 | // name 'Tool' it is considered a direct match. If the file is only executable, 347 | // it is a possible match. 348 | type BinaryChooser struct { 349 | Tool string 350 | } 351 | 352 | func (b *BinaryChooser) Choose(name string, dir bool, mode fs.FileMode) (bool, bool) { 353 | if dir { 354 | return false, false 355 | } 356 | 357 | fmatch := filepath.Base(name) == b.Tool || 358 | filepath.Base(name) == b.Tool+".exe" || 359 | filepath.Base(name) == b.Tool+".appimage" 360 | 361 | possible := !mode.IsDir() && isExec(name, mode.Perm()) 362 | return fmatch && possible, possible 363 | } 364 | 365 | func (b *BinaryChooser) String() string { 366 | return fmt.Sprintf("exe `%s`", b.Tool) 367 | } 368 | 369 | func isDefinitelyNotExec(file string) bool { 370 | // file is definitely not executable if it is .deb, .1, or .txt 371 | return strings.HasSuffix(file, ".deb") || strings.HasSuffix(file, ".1") || 372 | strings.HasSuffix(file, ".txt") 373 | } 374 | 375 | func isExec(file string, mode os.FileMode) bool { 376 | if isDefinitelyNotExec(file) { 377 | return false 378 | } 379 | 380 | // file is executable if it is one of the following: 381 | // *.exe, *.appimage, no extension, executable file permissions 382 | return strings.HasSuffix(file, ".exe") || 383 | strings.HasSuffix(file, ".appimage") || 384 | !strings.Contains(file, ".") || 385 | mode&0111 != 0 386 | } 387 | 388 | // LiteralFileChooser selects files with the name 'File'. 389 | type LiteralFileChooser struct { 390 | File string 391 | } 392 | 393 | func (lf *LiteralFileChooser) Choose(name string, dir bool, mode fs.FileMode) (bool, bool) { 394 | return false, filepath.Base(name) == filepath.Base(lf.File) && strings.HasSuffix(name, lf.File) 395 | } 396 | 397 | func (lf *LiteralFileChooser) String() string { 398 | return fmt.Sprintf("`%s`", lf.File) 399 | } 400 | 401 | type GlobChooser struct { 402 | expr string 403 | g glob.Glob 404 | all bool 405 | } 406 | 407 | func NewGlobChooser(gl string) (*GlobChooser, error) { 408 | g, err := glob.Compile(gl, '/') 409 | return &GlobChooser{ 410 | g: g, 411 | expr: gl, 412 | all: gl == "*" || gl == "/", 413 | }, err 414 | } 415 | 416 | func (gc *GlobChooser) Choose(name string, dir bool, mode fs.FileMode) (bool, bool) { 417 | if gc.all { 418 | return true, true 419 | } 420 | if len(name) > 0 && name[len(name)-1] == '/' { 421 | name = name[:len(name)-1] 422 | } 423 | return false, gc.g.Match(filepath.Base(name)) || gc.g.Match(name) 424 | } 425 | 426 | func (gc *GlobChooser) String() string { 427 | return fmt.Sprintf("`%s`", gc.expr) 428 | } 429 | -------------------------------------------------------------------------------- /find.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | // A Finder returns a list of URLs making up a project's assets. 14 | type Finder interface { 15 | Find() ([]string, error) 16 | } 17 | 18 | // A GithubRelease matches the Assets portion of Github's release API json. 19 | type GithubRelease struct { 20 | Assets []struct { 21 | DownloadURL string `json:"browser_download_url"` 22 | } `json:"assets"` 23 | 24 | Prerelease bool `json:"prerelease"` 25 | Tag string `json:"tag_name"` 26 | CreatedAt time.Time `json:"created_at"` 27 | } 28 | 29 | type GithubError struct { 30 | Code int 31 | Status string 32 | Body []byte 33 | Url string 34 | } 35 | type errResponse struct { 36 | Message string `json:"message"` 37 | Doc string `json:"documentation_url"` 38 | } 39 | 40 | func (ge *GithubError) Error() string { 41 | var msg errResponse 42 | json.Unmarshal(ge.Body, &msg) 43 | 44 | if ge.Code == http.StatusForbidden { 45 | return fmt.Sprintf("%s: %s: %s", ge.Status, msg.Message, msg.Doc) 46 | } 47 | return fmt.Sprintf("%s (URL: %s)", ge.Status, ge.Url) 48 | } 49 | 50 | // A GithubAssetFinder finds assets for the given Repo at the given tag. Tags 51 | // must be given as 'tag/'. Use 'latest' to get the latest release. 52 | type GithubAssetFinder struct { 53 | Repo string 54 | Tag string 55 | Prerelease bool 56 | MinTime time.Time // release must be after MinTime to be found 57 | } 58 | 59 | var ErrNoUpgrade = errors.New("requested release is not more recent than current version") 60 | 61 | func (f *GithubAssetFinder) Find() ([]string, error) { 62 | if f.Prerelease && f.Tag == "latest" { 63 | tag, err := f.getLatestTag() 64 | if err != nil { 65 | return nil, err 66 | } 67 | f.Tag = fmt.Sprintf("tags/%s", tag) 68 | } 69 | 70 | // query github's API for this repo/tag pair. 71 | url := fmt.Sprintf("https://api.github.com/repos/%s/releases/%s", f.Repo, f.Tag) 72 | resp, err := Get(url) 73 | if err != nil { 74 | return nil, err 75 | } 76 | 77 | defer resp.Body.Close() 78 | 79 | if resp.StatusCode != http.StatusOK { 80 | body, err := io.ReadAll(resp.Body) 81 | if err != nil { 82 | return nil, err 83 | } 84 | if strings.HasPrefix(f.Tag, "tags/") && resp.StatusCode == http.StatusNotFound { 85 | return f.FindMatch() 86 | } 87 | return nil, &GithubError{ 88 | Status: resp.Status, 89 | Code: resp.StatusCode, 90 | Body: body, 91 | Url: url, 92 | } 93 | } 94 | 95 | // read and unmarshal the resulting json 96 | body, err := io.ReadAll(resp.Body) 97 | if err != nil { 98 | return nil, err 99 | } 100 | 101 | var release GithubRelease 102 | err = json.Unmarshal(body, &release) 103 | if err != nil { 104 | return nil, err 105 | } 106 | 107 | if release.CreatedAt.Before(f.MinTime) { 108 | return nil, ErrNoUpgrade 109 | } 110 | 111 | // accumulate all assets from the json into a slice 112 | assets := make([]string, 0, len(release.Assets)) 113 | for _, a := range release.Assets { 114 | assets = append(assets, a.DownloadURL) 115 | } 116 | 117 | return assets, nil 118 | } 119 | 120 | func (f *GithubAssetFinder) FindMatch() ([]string, error) { 121 | tag := f.Tag[len("tags/"):] 122 | 123 | for page := 1; ; page++ { 124 | url := fmt.Sprintf("https://api.github.com/repos/%s/releases?page=%d", f.Repo, page) 125 | resp, err := Get(url) 126 | if err != nil { 127 | return nil, err 128 | } 129 | 130 | defer resp.Body.Close() 131 | 132 | if resp.StatusCode != http.StatusOK { 133 | body, err := io.ReadAll(resp.Body) 134 | if err != nil { 135 | return nil, err 136 | } 137 | return nil, &GithubError{ 138 | Status: resp.Status, 139 | Code: resp.StatusCode, 140 | Body: body, 141 | Url: url, 142 | } 143 | } 144 | 145 | // read and unmarshal the resulting json 146 | body, err := io.ReadAll(resp.Body) 147 | if err != nil { 148 | return nil, err 149 | } 150 | 151 | var releases []GithubRelease 152 | err = json.Unmarshal(body, &releases) 153 | if err != nil { 154 | return nil, err 155 | } 156 | 157 | for _, r := range releases { 158 | if !f.Prerelease && r.Prerelease { 159 | continue 160 | } 161 | if strings.Contains(r.Tag, tag) && !r.CreatedAt.Before(f.MinTime) { 162 | // we have a winner 163 | assets := make([]string, 0, len(r.Assets)) 164 | for _, a := range r.Assets { 165 | assets = append(assets, a.DownloadURL) 166 | } 167 | return assets, nil 168 | } 169 | } 170 | 171 | if len(releases) < 30 { 172 | break 173 | } 174 | } 175 | 176 | return nil, fmt.Errorf("no matching tag for '%s'", tag) 177 | } 178 | 179 | // finds the latest pre-release and returns the tag 180 | func (f *GithubAssetFinder) getLatestTag() (string, error) { 181 | url := fmt.Sprintf("https://api.github.com/repos/%s/releases", f.Repo) 182 | resp, err := Get(url) 183 | if err != nil { 184 | return "", fmt.Errorf("pre-release finder: %w", err) 185 | } 186 | 187 | var releases []GithubRelease 188 | 189 | body, err := io.ReadAll(resp.Body) 190 | if err != nil { 191 | return "", fmt.Errorf("pre-release finder: %w", err) 192 | } 193 | err = json.Unmarshal(body, &releases) 194 | if err != nil { 195 | return "", fmt.Errorf("pre-release finder: %w", err) 196 | } 197 | 198 | if len(releases) <= 0 { 199 | return "", fmt.Errorf("no releases found") 200 | } 201 | 202 | return releases[0].Tag, nil 203 | } 204 | 205 | // A DirectAssetFinder returns the embedded URL directly as the only asset. 206 | type DirectAssetFinder struct { 207 | URL string 208 | } 209 | 210 | func (f *DirectAssetFinder) Find() ([]string, error) { 211 | return []string{f.URL}, nil 212 | } 213 | 214 | type GithubSourceFinder struct { 215 | Tool string 216 | Repo string 217 | Tag string 218 | } 219 | 220 | func (f *GithubSourceFinder) Find() ([]string, error) { 221 | return []string{fmt.Sprintf("https://github.com/%s/tarball/%s/%s.tar.gz", f.Repo, f.Tag, f.Tool)}, nil 222 | } 223 | -------------------------------------------------------------------------------- /flags.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | type Flags struct { 4 | Tag string 5 | Prerelease bool 6 | Source bool 7 | Output string 8 | System string 9 | ExtractFile string 10 | All bool 11 | Quiet bool 12 | DLOnly bool 13 | UpgradeOnly bool 14 | Asset []string 15 | Hash bool 16 | Verify string 17 | Remove bool 18 | DisableSSL bool 19 | } 20 | 21 | type CliFlags struct { 22 | Tag *string `short:"t" long:"tag" description:"tagged release to use instead of latest"` 23 | Prerelease *bool `long:"pre-release" description:"include pre-releases when fetching the latest version"` 24 | Source *bool `long:"source" description:"download the source code for the target repo instead of a release"` 25 | Output *string `long:"to" description:"move to given location after extracting"` 26 | System *string `short:"s" long:"system" description:"target system to download for (use \"all\" for all choices)"` 27 | ExtractFile *string `short:"f" long:"file" description:"glob to select files for extraction"` 28 | All *bool `long:"all" description:"extract all candidate files"` 29 | Quiet *bool `short:"q" long:"quiet" description:"only print essential output"` 30 | DLOnly *bool `short:"d" long:"download-only" description:"stop after downloading the asset (no extraction)"` 31 | UpgradeOnly *bool `long:"upgrade-only" description:"only download if release is more recent than current version"` 32 | Asset *[]string `short:"a" long:"asset" description:"download a specific asset containing the given string; can be specified multiple times for additional filtering; use ^ for anti-match"` 33 | Hash *bool `long:"sha256" description:"show the SHA-256 hash of the downloaded asset"` 34 | Verify *string `long:"verify-sha256" description:"verify the downloaded asset checksum against the one provided"` 35 | Rate bool `long:"rate" description:"show GitHub API rate limiting information"` 36 | Remove *bool `short:"r" long:"remove" description:"remove the given file from $EGET_BIN or the current directory"` 37 | Version bool `short:"v" long:"version" description:"show version information"` 38 | Help bool `short:"h" long:"help" description:"show this help message"` 39 | DownloadAll bool `short:"D" long:"download-all" description:"download all projects defined in the config file"` 40 | DisableSSL *bool `short:"k" long:"disable-ssl" description:"disable SSL verification for download requests"` 41 | } 42 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/zyedidia/eget 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/BurntSushi/toml v1.2.1 7 | github.com/blang/semver v3.5.1+incompatible 8 | github.com/gobwas/glob v0.2.3 9 | github.com/jessevdk/go-flags v1.5.0 10 | github.com/schollz/progressbar/v3 v3.8.2 11 | github.com/ulikunitz/xz v0.5.10 12 | ) 13 | 14 | require ( 15 | github.com/klauspost/compress v1.15.15 // indirect 16 | github.com/mattn/go-runewidth v0.0.13 // indirect 17 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect 18 | github.com/rivo/uniseg v0.2.0 // indirect 19 | github.com/stretchr/testify v1.8.0 // indirect 20 | golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect 21 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect 22 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect 23 | ) 24 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= 2 | github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= 3 | github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= 4 | github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= 5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 7 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 8 | github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= 9 | github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= 10 | github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= 11 | github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= 12 | github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= 13 | github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= 14 | github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= 15 | github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 16 | github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= 17 | github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 18 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= 19 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= 20 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 21 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 22 | github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= 23 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 24 | github.com/schollz/progressbar/v3 v3.8.2 h1:2kZJwZCpb+E/V79kGO7daeq+hUwUJW0A5QD1Wv455dA= 25 | github.com/schollz/progressbar/v3 v3.8.2/go.mod h1:9KHLdyuXczIsyStQwzvW8xiELskmX7fQMaZdN23nAv8= 26 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 27 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 28 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 29 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 30 | github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= 31 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 32 | github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= 33 | github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= 34 | golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 35 | golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= 36 | golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= 37 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 38 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 39 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 40 | golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 41 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 42 | golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 43 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= 44 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 45 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 46 | golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 47 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= 48 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 49 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 50 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 51 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 52 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 53 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 54 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 55 | -------------------------------------------------------------------------------- /home/home.go: -------------------------------------------------------------------------------- 1 | package home 2 | 3 | import ( 4 | "fmt" 5 | "os/user" 6 | "path/filepath" 7 | "strings" 8 | ) 9 | 10 | func Home() (string, error) { 11 | userData, err := user.Current() 12 | if err != nil { 13 | return "", fmt.Errorf("find homedir: %w", err) 14 | } 15 | return userData.HomeDir, err 16 | } 17 | 18 | // Expand takes a path as input and replaces ~ at the start of the path with the user's 19 | // home directory. Does nothing if the path does not start with '~'. 20 | func Expand(path string) (string, error) { 21 | if !strings.HasPrefix(path, "~") { 22 | return path, nil 23 | } 24 | 25 | var userData *user.User 26 | var err error 27 | 28 | homeString := strings.Split(filepath.ToSlash(path), "/")[0] 29 | if homeString == "~" { 30 | userData, err = user.Current() 31 | if err != nil { 32 | return "", fmt.Errorf("expand tilde: %w", err) 33 | } 34 | } else { 35 | userData, err = user.Lookup(homeString[1:]) 36 | if err != nil { 37 | return "", fmt.Errorf("expand tilde: %w", err) 38 | } 39 | } 40 | 41 | home := userData.HomeDir 42 | 43 | return strings.Replace(path, homeString, home, 1), nil 44 | } 45 | -------------------------------------------------------------------------------- /man/eget.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: eget 3 | section: 1 4 | header: Eget Manual 5 | --- 6 | 7 | # NAME 8 | eget - easily install prebuilt binaries from GitHub 9 | 10 | # SYNOPSIS 11 | eget `[--version] [--help] [OPTIONS] TARGET` 12 | 13 | # DESCRIPTION 14 | Eget is a tool for downloading and extracting prebuilt binaries from releases 15 | on GitHub. To use it, provide a repository and Eget will search through the 16 | assets from the latest release in an attempt to find a suitable prebuilt 17 | binary for your system. If one is found, the asset will be downloaded and 18 | Eget will extract the binary to the current directory. Eget should only be 19 | used for installing simple, static prebuilt binaries, where the extracted 20 | binary is all that is needed for installation. For more complex installation, 21 | you may use the `--download-only` option, and perform extraction manually. 22 | 23 | The **`PROJECT`** argument passed to Eget should either be a GitHub 24 | repository, formatted as **`user/repo`**, in which case Eget will search the 25 | release assets, a direct URL, in which case Eget will directly download and 26 | extract from the given URL, or a local file, in which case Eget will extract 27 | directly from the local file. 28 | 29 | If Eget downloads an asset called `xxx` and there also exists an asset called 30 | `xxx.sha256` or `xxx.sha256sum`, Eget will automatically verify that the 31 | SHA-256 checksum of the downloaded asset matches the one contained in that 32 | file, and abort installation if a mismatch occurs. 33 | 34 | When installing an executable, Eget will place it in the current directory by 35 | default. If the environment variable **`EGET_BIN`** is non-empty, Eget will 36 | place the executable in that directory. The `--to` flag may also be used to 37 | customize the install location. 38 | 39 | Directories can also be specified as files to extract, and all files within 40 | them will be extracted. For example: 41 | 42 | eget https://go.dev/dl/go1.17.5.linux-amd64.tar.gz --file go --to ~/go1.17.5 43 | 44 | GitHub limits API requests to 60 per hour for unauthenticated users. If you 45 | would like to perform more requests (up to 5,000 per hour), you can set up a 46 | personal access token and assign it to an environment variable named either 47 | **`GITHUB_TOKEN`** or **`EGET_GITHUB_TOKEN`** when running Eget. If both are set, 48 | **`EGET_GITHUB_TOKEN`** will take precedence. Eget will read this variable and 49 | send the token as authorization with requests to GitHub. It is also possible to 50 | read the token from a file by using `@/path/to/file` as the token value. 51 | 52 | The behavior of Eget is configurable in a number of ways via options. 53 | Documentation for these options is provided below. 54 | 55 | # OPTIONS 56 | `-t, --tag=` 57 | 58 | : Use the given tagged release instead of the latest release. If the project does not have a tag that matches exactly, eget will look for a tag that contains the given string, and use the latest one. Example: **`eget -t nightly zyedidia/micro`**. 59 | 60 | `--pre-release` 61 | 62 | : Include pre-releases when fetching the latest version. This will get the latest overall release, even if it is a pre-release. 63 | 64 | `--source` 65 | 66 | : Download the source code for the repository (only works for GitHub repositories) rather than a release. Downloads from the "master" branch by default. Use `--tag` to download a different tag or branch. 67 | 68 | `--to=` 69 | 70 | : Move the executable to the given name after extraction. If the name is `-`, it the data will be written to stdout. Example: **`eget zyedidia/micro --to /usr/local/bin`**. Example: **`eget --asset nvim.appimage --to nvim neovim/neovim`**. 71 | 72 | `-s, --system=` 73 | 74 | : Use the given system as the target instead of the host. Systems follow the notation 'OS/Arch', where OS is a valid OS (darwin, windows, linux, netbsd, openbsd, freebsd, android, illumos, solaris, plan9), and Arch is a valid architecture (amd64, 386, arm, arm64, riscv64). If the special value **all** is used, all possibilities are given and the user must select manually. Example: **`eget -s darwin/amd64 zyedidia/micro`**. 75 | 76 | `-f, --file=` 77 | 78 | : Extract the file that matches the given glob. You may want use this option to extract non-binary files. Example: **`eget -f LICENSE zyedidia/micro`**. 79 | 80 | `--all` 81 | 82 | : Extract all candidate files. 83 | 84 | `-q, --quiet` 85 | 86 | : Only print essential output. 87 | 88 | `--download-only` 89 | 90 | : Stop after downloading the asset. This prevents Eget from performing extraction, allowing you to perform manual installation after the asset is downloaded. 91 | 92 | `--download-all` 93 | 94 | : Download all projects defined in the configuration file. 95 | 96 | --upgrade-only 97 | 98 | : Only download the asset if the release is more recent than an existing asset with the same name in `$EGET_BIN`, or the current directory if `$EGET_BIN` is not defined. 99 | 100 | `-a, --asset=` 101 | 102 | : Download a specific asset containing the given string. If there is an exact match with an asset, that asset is used regardless (except when using `^`). If the argument begins with a `^`, then any asset that does not match the argument is a candidate. This option can be specified multiple times for additional filtering. Example: **`eget --asset nvim.appimage neovim/neovim`**. Example **`eget --download-only --asset amd64.deb --asset musl sharkdp/bat`**. If the assets are filterable using the `--system` detector (i.e., if applying the detector does not remove all candidates), the system detector is applied. Use `--system all` to always consider all assets. 103 | 104 | `--sha256` 105 | 106 | : Show the SHA-256 hash of the downloaded asset. This can be used to verify that the asset is not corrupted. 107 | 108 | `--verify-sha256=` 109 | 110 | : Verify the SHA-256 hash of the downloaded asset against the one provided as an argument. Similar to `--sha256`, but Eget will do the verification for you. 111 | 112 | `--rate` 113 | 114 | : Show GitHub API rate limiting information. 115 | 116 | `--remove` 117 | 118 | : Remove the target file from `$EGET_BIN` (or the current directory if unset). Note that this flag is boolean, and means eget will treat `TARGET` as a file to be removed. 119 | 120 | `-k, --disable-ssl` 121 | 122 | : Disable SSL certificate verification for GET requests. Cannot be used in combination with a `GITHUB_TOKEN`. 123 | 124 | `-v, --version` 125 | 126 | : Show version information. 127 | 128 | `-h, --help` 129 | 130 | : Show a help message. 131 | 132 | # CONFIGURATION 133 | Eget can be configured using a TOML file located at `~/.eget.toml`. Alternatively, 134 | the configuration file can be located in the same directory as the Eget binary. 135 | 136 | Both global settings can be configured, as well as setting on a per-repository basis. 137 | 138 | Sections can be named either `global` or `"owner/repo"`, where `owner` and `repo` 139 | are the owner and repository name of the target repository (not that the `owner/repo` 140 | format is quoted). 141 | 142 | For example, the following configuration file will set the `--to` flag to `~/bin` for 143 | all repositories, and will set the `--to` flag to `~/.local/bin` for the `zyedidia/micro` 144 | repository. 145 | 146 | ```toml 147 | [global] 148 | target = "~/bin" 149 | 150 | ["zyedidia/micro"] 151 | target = "~/.local/bin" 152 | ``` 153 | 154 | More complete example configuration: 155 | 156 | ```toml 157 | [global] 158 | github_token = "ghp_1234567890" 159 | quiet = false 160 | show_hash = false 161 | upgrade_only = true 162 | target = "./test" 163 | 164 | ["zyedidia/micro"] 165 | upgrade_only = false 166 | show_hash = true 167 | asset_filters = [ "static", ".tar.gz" ] 168 | target = "~/.local/bin/micro" 169 | ``` 170 | 171 | By using the configuration above, you could run the following command to download 172 | the latest release of `micro`: 173 | **`eget zyedidia/micro`** 174 | 175 | Without the configuration, you would need to run the following command instead: 176 | **`eget zyedidia/micro --to ~/.local/bin/micro --sha256 --asset static --asset .tar.gz`** 177 | 178 | ## Available settings 179 | 180 | `all` 181 | 182 | : Whether to extract all candidate files. 183 | 184 | `asset_filters` 185 | 186 | : An array of partial asset names to filter the available assets for download. 187 | 188 | `download_only` 189 | 190 | : Whether to stop after downloading the asset (no extraction). 191 | 192 | `file` 193 | 194 | : The glob to select files for extraction. 195 | 196 | `github_token` 197 | 198 | : GitHub API token to use for requests. 199 | 200 | `quiet` 201 | 202 | : Whether to only print essential output. 203 | 204 | `show_hash` 205 | 206 | : Whether to show the SHA-256 hash of the downloaded asset. 207 | 208 | `system` 209 | 210 | : The target system to download for. 211 | 212 | `target` 213 | 214 | : The directory to move the downloaded file to after extraction. 215 | 216 | `upgrade_only` 217 | 218 | : Whether to only download if release is more recent than current version. 219 | 220 | # FOR MAINTAINERS 221 | 222 | To guarantee compatibility of your software's pre-built binaries with Eget, you 223 | can follow these rules. 224 | 225 | * Provide your pre-built binaries as GitHub release assets. 226 | * Format the system name as `OS_Arch` and include it in every pre-built binary 227 | name. Supported OSes are `darwin`/`macos`, `windows`, `linux`, `netbsd`, `openbsd`, 228 | `freebsd`, `android`, `illumos`, `solaris`, `plan9`. Supported architectures 229 | are `amd64`, `i386`, `arm`, `arm64`, `riscv64`. 230 | * If desired, include `*.sha256` files for each asset, containing the SHA-256 231 | checksum of each asset. These checksums will be automatically verified by 232 | Eget. 233 | * Include only a single executable or appimage per system in each release archive. 234 | * Use `.tar.gz`, `.tar.bz2`, `.tar.xz`, `.tar`, or `.zip` for archives. You may 235 | also directly upload the executable without an archive, or a compressed 236 | executable ending in `.gz`, `.bz2`, or `.xz`. 237 | 238 | If you don't follow these rules, Eget may still work well with your software. 239 | Eget's auto-detection is much more relaxed than what is required by these 240 | rules, but if you follow these rules your software is guaranteed to be 241 | compatible with Eget. 242 | 243 | # BUGS 244 | 245 | See GitHub Issues: 246 | 247 | # AUTHOR 248 | 249 | Zachary Yedidia 250 | -------------------------------------------------------------------------------- /test/eget.toml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zyedidia/eget/0983deab36378a1536bf9dc97d5d56f6e5dd3477/test/eget.toml -------------------------------------------------------------------------------- /test/test_eget.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/exec" 7 | ) 8 | 9 | func fileExists(path string) error { 10 | _, err := os.Stat(path) 11 | return err 12 | } 13 | 14 | func run(name string, args ...string) error { 15 | cmd := exec.Command(name, args...) 16 | cmd.Stdout = os.Stdout 17 | cmd.Stderr = os.Stderr 18 | 19 | return cmd.Run() 20 | } 21 | 22 | func must(err error) { 23 | if err != nil { 24 | fmt.Fprintln(os.Stderr, err) 25 | os.Exit(1) 26 | } 27 | } 28 | 29 | func main() { 30 | eget := os.Getenv("TEST_EGET") 31 | 32 | must(run(eget, "--system", "linux/amd64", "jgm/pandoc")) 33 | must(fileExists("pandoc")) 34 | 35 | must(run(eget, "zyedidia/micro", "--tag", "nightly", "--asset", "osx")) 36 | must(fileExists("micro")) 37 | 38 | must(run(eget, "--asset", "nvim.appimage", "--to", "nvim", "neovim/neovim")) 39 | must(fileExists("nvim")) 40 | 41 | must(run(eget, "--system", "darwin/amd64", "sharkdp/fd")) 42 | must(fileExists("fd")) 43 | 44 | must(run(eget, "--system", "windows/amd64", "--asset", "windows-gnu", "BurntSushi/ripgrep")) 45 | must(fileExists("rg.exe")) 46 | 47 | must(run(eget, "-f", "eget.1", "zyedidia/eget")) 48 | must(fileExists("eget.1")) 49 | 50 | fmt.Println("ALL TESTS PASS") 51 | } 52 | -------------------------------------------------------------------------------- /tools/build-all.go: -------------------------------------------------------------------------------- 1 | //go:build ignore 2 | // +build ignore 3 | 4 | package main 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "os/exec" 10 | "runtime" 11 | "sync" 12 | ) 13 | 14 | type Target struct { 15 | OS string 16 | Arch string 17 | } 18 | 19 | func main() { 20 | targets := []struct { 21 | OS string 22 | Arch string 23 | }{ 24 | {"darwin", "amd64"}, 25 | {"darwin", "arm64"}, 26 | {"freebsd", "amd64"}, 27 | {"linux", "amd64"}, 28 | {"linux", "386"}, 29 | {"linux", "arm64"}, 30 | {"linux", "arm"}, 31 | {"openbsd", "amd64"}, 32 | {"windows", "amd64"}, 33 | {"windows", "386"}, 34 | } 35 | 36 | compile := func(platform, architecture string, wg *sync.WaitGroup) { 37 | defer wg.Done() 38 | 39 | cmd := exec.Command("make", "package") 40 | cmd.Stdout = os.Stdout 41 | cmd.Stderr = os.Stderr 42 | cmd.Env = os.Environ() 43 | cgo := "0" 44 | if runtime.GOOS == "darwin" { 45 | cgo = "1" 46 | } else { 47 | fmt.Println("warning: it is recommended to cross-compile on Mac, for cgo") 48 | } 49 | cmd.Env = append(cmd.Env, 50 | fmt.Sprintf("GOOS=%s", platform), 51 | fmt.Sprintf("GOARCH=%s", architecture), 52 | fmt.Sprintf("GOMAXPROCS=%d", runtime.NumCPU()), 53 | fmt.Sprintf("CGO_ENABLED=%s", cgo), 54 | ) 55 | 56 | err := cmd.Run() 57 | if err != nil { 58 | fmt.Fprintln(os.Stderr, err) 59 | } 60 | 61 | fmt.Printf("finished building %s-%s\n", platform, architecture) 62 | } 63 | 64 | var wg sync.WaitGroup 65 | 66 | wg.Add(len(targets)) 67 | 68 | for _, t := range targets { 69 | fmt.Printf("starting build for %s-%s\n", t.OS, t.Arch) 70 | go compile(t.OS, t.Arch, &wg) 71 | } 72 | 73 | wg.Wait() 74 | } 75 | -------------------------------------------------------------------------------- /tools/build-version.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os/exec" 6 | "strings" 7 | 8 | "github.com/blang/semver" 9 | ) 10 | 11 | func getTag(match ...string) (string, *semver.PRVersion) { 12 | args := append([]string{ 13 | "describe", "--tags", 14 | }, match...) 15 | tag, err := exec.Command("git", args...).Output() 16 | if err != nil { 17 | return "", nil 18 | } 19 | tagParts := strings.Split(string(tag), "-") 20 | if len(tagParts) == 3 { 21 | if ahead, err := semver.NewPRVersion(tagParts[1]); err == nil { 22 | return tagParts[0], &ahead 23 | } 24 | } else if len(tagParts) == 4 { 25 | if ahead, err := semver.NewPRVersion(tagParts[2]); err == nil { 26 | return tagParts[0] + "-" + tagParts[1], &ahead 27 | } 28 | } 29 | 30 | return string(tag), nil 31 | } 32 | 33 | func main() { 34 | if tags, err := exec.Command("git", "tag").Output(); err != nil || len(tags) == 0 { 35 | // no tags found -- fetch them 36 | exec.Command("git", "fetch", "--tags").Run() 37 | } 38 | // Find the last vX.X.X Tag and get how many builds we are ahead of it. 39 | versionStr, ahead := getTag("--match", "v*") 40 | version, err := semver.ParseTolerant(versionStr) 41 | if err != nil { 42 | // no version tag found so just return what ever we can find. 43 | fmt.Println("0.0.0-unknown") 44 | return 45 | } 46 | // Get the tag of the current revision. 47 | tag, _ := getTag("--exact-match") 48 | if tag == versionStr { 49 | // Seems that we are going to build a release. 50 | // So the version number should already be correct. 51 | fmt.Println(version.String()) 52 | return 53 | } 54 | 55 | // If we don't have any tag assume "dev" 56 | if tag == "" || strings.HasPrefix(tag, "nightly") { 57 | tag = "dev" 58 | } 59 | // Get the most likely next version: 60 | if !strings.Contains(version.String(), "rc") { 61 | version.Patch = version.Patch + 1 62 | } 63 | 64 | if pr, err := semver.NewPRVersion(tag); err == nil { 65 | // append the tag as pre-release name 66 | version.Pre = append(version.Pre, pr) 67 | } 68 | 69 | if ahead != nil { 70 | // if we know how many commits we are ahead of the last release, append that too. 71 | version.Pre = append(version.Pre, *ahead) 72 | } 73 | 74 | fmt.Println(version.String()) 75 | } 76 | -------------------------------------------------------------------------------- /verify.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "fmt" 8 | "io" 9 | ) 10 | 11 | type Verifier interface { 12 | Verify(b []byte) error 13 | } 14 | 15 | type NoVerifier struct{} 16 | 17 | func (n *NoVerifier) Verify(b []byte) error { 18 | return nil 19 | } 20 | 21 | type Sha256Error struct { 22 | Expected []byte 23 | Got []byte 24 | } 25 | 26 | func (e *Sha256Error) Error() string { 27 | return fmt.Sprintf("sha256 checksum mismatch:\nexpected: %x\ngot: %x", e.Expected, e.Got) 28 | } 29 | 30 | type Sha256Verifier struct { 31 | Expected []byte 32 | } 33 | 34 | func NewSha256Verifier(expectedHex string) (*Sha256Verifier, error) { 35 | expected, _ := hex.DecodeString(expectedHex) 36 | if len(expected) != sha256.Size { 37 | return nil, fmt.Errorf("sha256sum (%s) too small: %d bytes decoded", expectedHex, len(expectedHex)) 38 | } 39 | return &Sha256Verifier{ 40 | Expected: expected, 41 | }, nil 42 | } 43 | 44 | func (s256 *Sha256Verifier) Verify(b []byte) error { 45 | sum := sha256.Sum256(b) 46 | if bytes.Equal(sum[:], s256.Expected) { 47 | return nil 48 | } 49 | return &Sha256Error{ 50 | Expected: s256.Expected, 51 | Got: sum[:], 52 | } 53 | } 54 | 55 | type Sha256Printer struct{} 56 | 57 | func (s256 *Sha256Printer) Verify(b []byte) error { 58 | sum := sha256.Sum256(b) 59 | fmt.Printf("%x\n", sum) 60 | return nil 61 | } 62 | 63 | type Sha256AssetVerifier struct { 64 | AssetURL string 65 | } 66 | 67 | func (s256 *Sha256AssetVerifier) Verify(b []byte) error { 68 | resp, err := Get(s256.AssetURL) 69 | if err != nil { 70 | return err 71 | } 72 | defer resp.Body.Close() 73 | data, err := io.ReadAll(resp.Body) 74 | if err != nil { 75 | return err 76 | } 77 | expected := make([]byte, sha256.Size) 78 | n, err := hex.Decode(expected, data) 79 | if n < sha256.Size { 80 | return fmt.Errorf("sha256sum (%s) too small: %d bytes decoded", string(data), n) 81 | } 82 | sum := sha256.Sum256(b) 83 | if bytes.Equal(sum[:], expected[:n]) { 84 | return nil 85 | } 86 | return &Sha256Error{ 87 | Expected: expected[:n], 88 | Got: sum[:], 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | var Version = "1.3.4+src" 4 | --------------------------------------------------------------------------------