├── io ├── testdata │ ├── oneline │ ├── project │ │ ├── dir │ │ │ └── f │ │ └── go.mod │ ├── twolines │ └── threelines ├── multiwriter.go ├── multiwriter_test.go ├── cmd_test.go ├── fileutils_test.go └── cmd.go ├── .frogbot └── frogbot-config.yml ├── unarchive ├── testdata │ ├── archives │ │ ├── win.tar │ │ ├── win.zip │ │ ├── unix.zip │ │ ├── unix.tar.gz │ │ ├── win.tar.gz │ │ ├── dot-dir.tar.gz │ │ ├── softlink-rel.zip │ │ ├── softlink-cousin.zip │ │ ├── softlink-rel.tar.gz │ │ ├── softlink-cousin.tar.gz │ │ ├── strip-components.zip │ │ ├── softlink-uncle-file.zip │ │ ├── strip-components.tar.gz │ │ ├── softlink-uncle-file.tar.gz │ │ ├── strip-components.tar │ │ ├── unix.tar │ │ ├── softlink-cousin.tar │ │ ├── softlink-uncle-file.tar │ │ └── softlink-rel.tar │ └── zipslip │ │ ├── rel.zip │ │ ├── abs.tar.gz │ │ ├── rel.tar.gz │ │ ├── softlink-abs.zip │ │ ├── softlink-rel.zip │ │ ├── softlink-uncle.zip │ │ ├── softlink-abs.tar.gz │ │ ├── softlink-rel.tar.gz │ │ ├── hardlink-tilde.tar.gz │ │ ├── softlink-uncle.tar.gz │ │ ├── abs.tar │ │ ├── rel.tar │ │ ├── softlink-abs.tar │ │ ├── softlink-rel.tar │ │ ├── softlink-uncle.tar │ │ ├── hardlink-tilde.tar │ │ └── softlink-loop.tar ├── archive_test.go └── archive.go ├── .github ├── ISSUE_TEMPLATE │ ├── question.md │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md ├── release.yml └── workflows │ ├── test.yml │ ├── cla.yml │ ├── analysis.yml │ ├── frogbot-scan-pull-request.yml │ └── frogbot-scan-repository.yml ├── CONTRIBUTING.md ├── .gitignore ├── crypto ├── key_generator.go ├── checksum_test.go ├── aes_encryption_test.go ├── aes_encryption.go └── checksum.go ├── go.mod ├── stringutils ├── wildcards_test.go └── wildcards.go ├── safeconvert ├── int.go └── int_test.go ├── README.md ├── log ├── logger_test.go └── logger.go ├── datastructures ├── set_test.go └── set.go ├── lru ├── lru.go ├── lru_base.go └── lru_test.go ├── version ├── version_test.go └── version.go ├── fanout ├── reader.go ├── readall_reader.go └── reader_test.go ├── http ├── retryexecutor │ ├── retryexecutor_test.go │ └── retryexecutor.go └── filestream │ ├── filestream_test.go │ └── filestream.go ├── go.sum ├── parallel ├── runner_test.go ├── bounded_runner_test.go └── runner.go └── LICENSE /io/testdata/oneline: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /io/testdata/project/dir/f: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /io/testdata/project/go.mod: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.frogbot/frogbot-config.yml: -------------------------------------------------------------------------------- 1 | - params: 2 | git: 3 | repoName: gofrog 4 | branches: 5 | - dev -------------------------------------------------------------------------------- /unarchive/testdata/archives/win.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/win.tar -------------------------------------------------------------------------------- /unarchive/testdata/archives/win.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/win.zip -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/rel.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/rel.zip -------------------------------------------------------------------------------- /unarchive/testdata/archives/unix.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/unix.zip -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/abs.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/abs.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/rel.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/rel.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/archives/unix.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/unix.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/archives/win.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/win.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/archives/dot-dir.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/dot-dir.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-rel.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/softlink-rel.zip -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-abs.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/softlink-abs.zip -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-rel.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/softlink-rel.zip -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-uncle.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/softlink-uncle.zip -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-cousin.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/softlink-cousin.zip -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-rel.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/softlink-rel.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-abs.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/softlink-abs.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-rel.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/softlink-rel.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-cousin.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/softlink-cousin.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/archives/strip-components.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/strip-components.zip -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/hardlink-tilde.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/hardlink-tilde.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-uncle.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/zipslip/softlink-uncle.tar.gz -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-uncle-file.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/softlink-uncle-file.zip -------------------------------------------------------------------------------- /unarchive/testdata/archives/strip-components.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/strip-components.tar.gz -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ❓ Question 3 | about: Ask a question 4 | title: '' 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-uncle-file.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jfrog/gofrog/HEAD/unarchive/testdata/archives/softlink-uncle-file.tar.gz -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # 📖 Guidelines 2 | 3 | - If the existing tests do not already cover your changes, please add tests. 4 | - Pull requests should be created on the _dev_ branch. 5 | - Please run `go fmt ./...` for formatting the code before submitting the pull request. 6 | 7 | # 🕵️ Running Tests 8 | 9 | To run the tests, execute the following command: 10 | 11 | ```sh 12 | go test -v ./... 13 | ``` 14 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | - [ ] All [tests](https://github.com/jfrog/gofrog#tests) passed. If this feature is not already covered by the tests, I added new tests. 2 | - [ ] This pull request is on the dev branch. 3 | - [ ] I used gofmt for formatting the code before submitting the pull request. 4 | - [ ] I labeled this pull request with one of the following: 'breaking change', 'new feature', 'bug', or 'ignore for release' 5 | 6 | --- 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | .idea 20 | *.iml 21 | _testmain.go 22 | 23 | *.exe 24 | *.test 25 | *.prof 26 | 27 | # IDEs 28 | .vscode 29 | .idea 30 | *.iml 31 | 32 | # IOS 33 | *.DS_Store -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | exclude: 3 | labels: 4 | - ignore for release 5 | categories: 6 | - title: Breaking Changes 🚨 7 | labels: 8 | - breaking change 9 | - title: Exciting New Features 🎉 10 | labels: 11 | - new feature 12 | - title: Improvements 🌱 13 | labels: 14 | - improvement 15 | - title: Bug Fixes 🛠 16 | labels: 17 | - bug 18 | - title: Other Changes 📚 19 | labels: 20 | - "*" 21 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | on: 3 | push: 4 | branches: 5 | - "**" 6 | tags-ignore: 7 | - "**" 8 | pull_request: 9 | jobs: 10 | test: 11 | runs-on: ${{ matrix.os }}-latest 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | os: [ubuntu, windows, macOS] 16 | steps: 17 | - name: Checkout Source 18 | uses: actions/checkout@v4 19 | 20 | - name: Setup Go with cache 21 | uses: jfrog/.github/actions/install-go-with-cache@main 22 | 23 | - name: Tests 24 | run: go test -v -race -covermode atomic -coverprofile=covprofile ./... 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🐞 Bug report 3 | about: Create a report to help us improve 4 | title: "" 5 | labels: bug 6 | assignees: "" 7 | --- 8 | 9 | **Describe the bug** 10 | A clear and concise description of what the bug is. 11 | 12 | **To Reproduce** 13 | Steps to reproduce the behavior 14 | 15 | **Expected behavior** 16 | A clear and concise description of what you expected to happen. 17 | 18 | **Screenshots** 19 | If applicable, add screenshots to help explain your problem. 20 | 21 | **Versions** 22 | 23 | Gofrog version: 24 | 25 | **Additional context** 26 | Add any other context about the problem here. 27 | -------------------------------------------------------------------------------- /.github/workflows/cla.yml: -------------------------------------------------------------------------------- 1 | name: "CLA Assistant" 2 | on: 3 | # issue_comment triggers this action on each comment on issues and pull requests 4 | issue_comment: 5 | types: [created] 6 | pull_request_target: 7 | types: [opened,synchronize] 8 | 9 | jobs: 10 | CLAssistant: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Run CLA Check 14 | uses: jfrog/.github/actions/cla@main 15 | with: 16 | event_comment_body: ${{ github.event.comment.body }} 17 | event_name: ${{ github.event_name }} 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | CLA_SIGN_TOKEN: ${{ secrets.CLA_SIGN_TOKEN }} -------------------------------------------------------------------------------- /io/testdata/twolines: -------------------------------------------------------------------------------- 1 | 2 | 781d76ae5f48ddd1674161acd90024758fd6e14c {"key":"make-fetch-happen:request-cache:https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz","integrity":"sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==","time":1644222657888,"size":2821,"metadata":{"time":1644222657863,"url":"https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz","reqHeaders":{},"resHeaders":{"cache-control":"public, immutable, max-age=31557600","content-type":"application/octet-stream","date":"Mon, 07 Feb 2022 08:30:58 GMT","etag":"\"69fd1c7bc68c850139d20aefed955a71\"","last-modified":"Fri, 04 Oct 2019 11:29:17 GMT","vary":"Accept-Encoding"},"options":{"compress":true}}} -------------------------------------------------------------------------------- /io/testdata/threelines: -------------------------------------------------------------------------------- 1 | 2 | 781d76ae5f48ddd1674161acd90024758fd6e14c {"key":"make-fetch-happen:request-cache:https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz","integrity":"sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==","time":1644222657888,"size":2821,"metadata":{"time":1644222657863,"url":"https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz","reqHeaders":{},"resHeaders":{"cache-control":"public, immutable, max-age=31557600","content-type":"application/octet-stream","date":"Mon, 07 Feb 2022 08:30:58 GMT","etag":"\"69fd1c7bc68c850139d20aefed955a71\"","last-modified":"Fri, 04 Oct 2019 11:29:17 GMT","vary":"Accept-Encoding"},"options":{"compress":true}}} 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ⭐️ Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: feature request 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like to see** 14 | A clear and concise description of the new feature. 15 | 16 | **Describe alternatives you've considered** 17 | If applicable, a clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/analysis.yml: -------------------------------------------------------------------------------- 1 | name: Static Code Analysis 2 | on: 3 | push: 4 | branches: 5 | - "**" 6 | tags-ignore: 7 | - "**" 8 | pull_request: 9 | jobs: 10 | Static-Check: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout Source 14 | uses: actions/checkout@v4 15 | 16 | - name: Setup Go with cache 17 | uses: jfrog/.github/actions/install-go-with-cache@main 18 | 19 | - name: Run golangci lint 20 | uses: jfrog/.github/actions/golangci-lint@main 21 | Go-Sec: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - name: Checkout Source 25 | uses: actions/checkout@v4 26 | 27 | - name: Setup Go with cache 28 | uses: jfrog/.github/actions/install-go-with-cache@main 29 | 30 | - name: Run Go-Sec scanner 31 | uses: jfrog/.github/actions/gosec-scanner@main -------------------------------------------------------------------------------- /crypto/key_generator.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "errors" 8 | "fmt" 9 | ) 10 | 11 | func GenerateRandomKeyString(keySize int) (string, error) { 12 | b, err := generateRandomBytes(keySize) 13 | return hex.EncodeToString(b), err 14 | } 15 | 16 | // generate random key with specific size 17 | func generateRandomBytes(n int) ([]byte, error) { 18 | b := make([]byte, n) 19 | _, err := rand.Read(b) 20 | // Note that err == nil only if we read len(b) bytes. 21 | if err != nil { 22 | return nil, err 23 | } 24 | return b, nil 25 | } 26 | 27 | // keyId is first 6 chars of hashed(sha256) signing key 28 | func GenerateKeyId(key string) (string, error) { 29 | if len(key) == 0 { 30 | return "", errors.New("signing key is empty") 31 | } 32 | h := sha256.New() 33 | h.Write([]byte(key)) 34 | sha256 := fmt.Sprintf("%x", h.Sum(nil)) 35 | return sha256[:6], nil 36 | } 37 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/jfrog/gofrog 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/gookit/color v1.5.4 7 | github.com/jfrog/archiver/v3 v3.6.0 8 | github.com/minio/sha256-simd v1.0.1 9 | github.com/pkg/errors v0.9.1 10 | github.com/stretchr/testify v1.9.0 11 | ) 12 | 13 | require ( 14 | github.com/andybalholm/brotli v1.1.0 // indirect 15 | github.com/davecgh/go-spew v1.1.1 // indirect 16 | github.com/dsnet/compress v0.0.1 // indirect 17 | github.com/golang/snappy v0.0.4 // indirect 18 | github.com/klauspost/compress v1.17.4 // indirect 19 | github.com/klauspost/cpuid/v2 v2.2.3 // indirect 20 | github.com/klauspost/pgzip v1.2.6 // indirect 21 | github.com/nwaples/rardecode v1.1.3 // indirect 22 | github.com/pierrec/lz4/v4 v4.1.21 // indirect 23 | github.com/pmezard/go-difflib v1.0.0 // indirect 24 | github.com/ulikunitz/xz v0.5.11 // indirect 25 | github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect 26 | github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect 27 | golang.org/x/sync v0.6.0 28 | golang.org/x/sys v0.17.0 // indirect 29 | gopkg.in/yaml.v3 v3.0.1 // indirect 30 | ) 31 | -------------------------------------------------------------------------------- /stringutils/wildcards_test.go: -------------------------------------------------------------------------------- 1 | package stringutils 2 | 3 | import ( 4 | "fmt" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestMatchWildcardPattern(t *testing.T) { 10 | tests := []struct { 11 | pattern string 12 | str string 13 | expectedMatched bool 14 | expectError bool 15 | }{ 16 | {"abc", "abc", true, false}, 17 | {"abc", "abcd", false, false}, 18 | {"abc", "ab", false, false}, 19 | {"abc*", "abc", true, false}, 20 | {"abc*", "abcd", true, false}, 21 | {"abc*fg", "abcdefg", true, false}, 22 | {"abc*fg", "abdefg", false, false}, 23 | {"a*c*fg", "abcdefg", true, false}, 24 | {"a*c*fg", "abdefg", false, false}, 25 | {"a*[c", "ab[c", true, false}, 26 | } 27 | 28 | for _, tc := range tests { 29 | t.Run(fmt.Sprintf("pattern: %s, str: %s", tc.pattern, tc.str), func(t *testing.T) { 30 | actualMatched, err := MatchWildcardPattern(tc.pattern, tc.str) 31 | if tc.expectError { 32 | assert.Error(t, err) 33 | return 34 | } 35 | assert.NoError(t, err) 36 | assert.Equal(t, tc.expectedMatched, actualMatched) 37 | }) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /io/multiwriter.go: -------------------------------------------------------------------------------- 1 | package io 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | 7 | "golang.org/x/sync/errgroup" 8 | ) 9 | 10 | var ErrShortWrite = errors.New("the number of bytes written is less than the length of the input") 11 | 12 | type asyncMultiWriter struct { 13 | writers []io.Writer 14 | limit int 15 | } 16 | 17 | // AsyncMultiWriter creates a writer that duplicates its writes to all the 18 | // provided writers asynchronous 19 | func AsyncMultiWriter(limit int, writers ...io.Writer) io.Writer { 20 | w := make([]io.Writer, len(writers)) 21 | copy(w, writers) 22 | return &asyncMultiWriter{writers: w, limit: limit} 23 | } 24 | 25 | // Writes data asynchronously to each writer and waits for all of them to complete. 26 | // In case of an error, the writing will not complete. 27 | func (amw *asyncMultiWriter) Write(p []byte) (int, error) { 28 | eg := errgroup.Group{} 29 | eg.SetLimit(amw.limit) 30 | for _, w := range amw.writers { 31 | currentWriter := w 32 | eg.Go(func() error { 33 | n, err := currentWriter.Write(p) 34 | if err != nil { 35 | return err 36 | } 37 | if n != len(p) { 38 | return ErrShortWrite 39 | } 40 | return nil 41 | }) 42 | } 43 | 44 | return len(p), eg.Wait() 45 | } 46 | -------------------------------------------------------------------------------- /io/multiwriter_test.go: -------------------------------------------------------------------------------- 1 | package io 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestAsyncMultiWriter(t *testing.T) { 12 | for _, limit := range []int{1, 2} { 13 | var buf1, buf2 bytes.Buffer 14 | multiWriter := AsyncMultiWriter(limit, &buf1, &buf2) 15 | 16 | data := []byte("test data") 17 | n, err := multiWriter.Write(data) 18 | assert.NoError(t, err) 19 | assert.Equal(t, len(data), n) 20 | 21 | // Check if data is correctly written to both writers 22 | assert.Equal(t, string(data), buf1.String()) 23 | assert.Equal(t, string(data), buf2.String()) 24 | } 25 | } 26 | 27 | func TestAsyncMultiWriter_Error(t *testing.T) { 28 | expectedErr := errors.New("write error") 29 | 30 | // Mock writer that always returns an error 31 | mockWriter := &mockWriter{writeErr: expectedErr} 32 | multiWriter := AsyncMultiWriter(2, mockWriter) 33 | 34 | _, err := multiWriter.Write([]byte("test data")) 35 | assert.Equal(t, expectedErr, err) 36 | } 37 | 38 | // Mock writer to simulate Write errors 39 | type mockWriter struct { 40 | writeErr error 41 | } 42 | 43 | func (m *mockWriter) Write(p []byte) (int, error) { 44 | return 0, m.writeErr 45 | } 46 | -------------------------------------------------------------------------------- /stringutils/wildcards.go: -------------------------------------------------------------------------------- 1 | package stringutils 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | // MatchWildcardPattern returns whether str matches the pattern, which may contain wildcards. 9 | func MatchWildcardPattern(pattern string, str string) (matched bool, err error) { 10 | regexpPattern := WildcardPatternToRegExp(pattern) 11 | r, err := regexp.Compile(regexpPattern) 12 | if err != nil { 13 | return false, err 14 | } 15 | return r.MatchString(str), nil 16 | } 17 | 18 | // WildcardPatternToRegExp converts a wildcard pattern to a regular expression. 19 | func WildcardPatternToRegExp(localPath string) string { 20 | localPath = EscapeSpecialChars(localPath) 21 | var wildcard = ".*" 22 | localPath = strings.ReplaceAll(localPath, "*", wildcard) 23 | if strings.HasSuffix(localPath, "/") || strings.HasSuffix(localPath, "\\") { 24 | localPath += wildcard 25 | } 26 | return "^" + localPath + "$" 27 | } 28 | 29 | func EscapeSpecialChars(path string) string { 30 | // We don't replace other special characters (like parenthesis) because they're used in the placeholders logic of the JFrog CLI. 31 | var specialChars = []string{".", "^", "$", "+", "[", "]"} 32 | for _, char := range specialChars { 33 | path = strings.ReplaceAll(path, char, "\\"+char) 34 | } 35 | return path 36 | } 37 | -------------------------------------------------------------------------------- /crypto/checksum_test.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | const ( 11 | fileContent = "Why did the robot bring a ladder to the bar? It heard the drinks were on the house." 12 | expectedMd5 = "70bd6370a86813f2504020281e4a2e2e" 13 | expectedSha1 = "8c3578ac814c9f02803001a5d3e5d78a7fd0f9cc" 14 | expectedSha256 = "093d901b28a59f7d95921f3f4fb97a03fe7a1cf8670507ffb1d6f9a01b3e890a" 15 | ) 16 | 17 | func TestGetFileChecksums(t *testing.T) { 18 | // Create a temporary file 19 | tempFile, err := os.CreateTemp("", "TestGetFileChecksums") 20 | assert.NoError(t, err) 21 | defer func() { 22 | assert.NoError(t, tempFile.Close()) 23 | assert.NoError(t, os.Remove(tempFile.Name())) 24 | }() 25 | 26 | // Write something to the file 27 | _, err = tempFile.Write([]byte(fileContent)) 28 | assert.NoError(t, err) 29 | 30 | // Calculate only sha1 and match 31 | checksums, err := GetFileChecksums(tempFile.Name(), SHA1) 32 | assert.NoError(t, err) 33 | assert.Len(t, checksums, 1) 34 | assert.Equal(t, expectedSha1, checksums[SHA1]) 35 | 36 | // Calculate md5, sha1 and sha256 checksums and match 37 | checksums, err = GetFileChecksums(tempFile.Name()) 38 | assert.NoError(t, err) 39 | assert.Equal(t, expectedMd5, checksums[MD5]) 40 | assert.Equal(t, expectedSha1, checksums[SHA1]) 41 | assert.Equal(t, expectedSha256, checksums[SHA256]) 42 | } 43 | -------------------------------------------------------------------------------- /safeconvert/int.go: -------------------------------------------------------------------------------- 1 | package safeconvert 2 | 3 | import ( 4 | "errors" 5 | "math" 6 | ) 7 | 8 | // IntToUint converts int to uint safely, checking for negative values. 9 | func IntToUint(i int) (uint, error) { 10 | if i < 0 { 11 | return 0, errors.New("cannot convert negative int to uint") 12 | } 13 | return uint(i), nil 14 | } 15 | 16 | // UintToInt converts uint to int safely, checking for overflow. 17 | func UintToInt(u uint) (int, error) { 18 | if u > math.MaxInt { 19 | return 0, errors.New("integer overflow: uint value exceeds max int value") 20 | } 21 | return int(u), nil 22 | } 23 | 24 | // Int64ToUint64 converts int64 to uint64 safely, checking for negative values. 25 | func Int64ToUint64(i int64) (uint64, error) { 26 | if i < 0 { 27 | return 0, errors.New("cannot convert negative int64 to uint64") 28 | } 29 | return uint64(i), nil 30 | } 31 | 32 | // Uint64ToInt64 converts uint64 to int64 safely, checking for overflow. 33 | func Uint64ToInt64(u uint64) (int64, error) { 34 | if u > math.MaxInt64 { 35 | return 0, errors.New("integer overflow: uint64 value exceeds max int64 value") 36 | } 37 | return int64(u), nil 38 | } 39 | 40 | // SafeUint64ToInt converts uint64 to int safely, checking for overflow. 41 | func Uint64ToInt(u uint64) (int, error) { 42 | if u > uint64(math.MaxInt) { 43 | return 0, errors.New("integer overflow: uint64 value exceeds max int value") 44 | } 45 | return int(u), nil 46 | } 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Go Frog 2 | 3 | A collection of Go utilities used by JFrog products. 4 | 5 | ## Project status 6 | 7 | | Branch | Status | 8 | | :----: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | 9 | | master | [![Tests](https://github.com/jfrog/gofrog/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/jfrog/gofrog/actions/workflows/test.yml?query=branch%3Amaster) [![Static Analysis](https://github.com/jfrog/gofrog/actions/workflows/analysis.yml/badge.svg?branch=master)](https://github.com/jfrog/gofrog/actions/workflows/analysis.yml) | 10 | | dev | [![Tests](https://github.com/jfrog/gofrog/actions/workflows/test.yml/badge.svg?branch=dev)](https://github.com/jfrog/gofrog/actions/workflows/test.yml?query=branch%3Adev) [![Static Analysis](https://github.com/jfrog/gofrog/actions/workflows/analysis.yml/badge.svg?branch=dev)](https://github.com/jfrog/gofrog/actions/workflows/analysis.yml) | 11 | -------------------------------------------------------------------------------- /log/logger_test.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func SetEnvironmentVariableForLogLevel(t *testing.T, level string) { 11 | assert.NoError(t, os.Setenv(LogLevelEnv, level)) 12 | } 13 | 14 | func ResetEnvironmentVariableForLogLevel(t *testing.T) { 15 | assert.NoError(t, os.Unsetenv(LogLevelEnv)) 16 | } 17 | 18 | func TestLogger_WithDefaultInfoLogLevel_LogsInfoAndAbove(t *testing.T) { 19 | // Ensure default INFO level 20 | SetEnvironmentVariableForLogLevel(t, "") 21 | defer ResetEnvironmentVariableForLogLevel(t) 22 | 23 | logger := NewLogger(getLogLevel()) 24 | 25 | assert.Equal(t, INFO, logger.GetLogLevel()) 26 | } 27 | 28 | func TestLogger_WithEnvironmentVariableSetToDebug_LogsAllLevels(t *testing.T) { 29 | SetEnvironmentVariableForLogLevel(t, "DEBUG") 30 | defer ResetEnvironmentVariableForLogLevel(t) 31 | 32 | logger := NewLogger(getLogLevel()) 33 | 34 | assert.Equal(t, DEBUG, logger.GetLogLevel()) 35 | } 36 | 37 | func TestLogger_WithEnvironmentVariableSetToError_LogsOnlyErrors(t *testing.T) { 38 | SetEnvironmentVariableForLogLevel(t, "ERROR") 39 | defer ResetEnvironmentVariableForLogLevel(t) 40 | 41 | logger := NewLogger(getLogLevel()) 42 | 43 | assert.Equal(t, ERROR, logger.GetLogLevel()) 44 | } 45 | 46 | func TestLogger_SetLogLevelChangesLogLevelAtRuntime(t *testing.T) { 47 | logger := NewLogger(INFO) 48 | logger.SetLogLevel(DEBUG) 49 | 50 | assert.Equal(t, DEBUG, logger.GetLogLevel()) 51 | } 52 | 53 | func TestLogger_ConcurrentAccessToSetLogLevel_DoesNotPanic(t *testing.T) { 54 | logger := NewLogger(INFO) 55 | 56 | done := make(chan bool) 57 | for i := range 10 { 58 | go func() { 59 | logger.SetLogLevel(LevelType(i % 4)) 60 | done <- true 61 | }() 62 | } 63 | 64 | for range 10 { 65 | <-done 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /crypto/aes_encryption_test.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "crypto/sha256" 5 | "fmt" 6 | "strings" 7 | "testing" 8 | ) 9 | 10 | var signingKey, keyId string 11 | 12 | func init() { 13 | var err error 14 | signingKey, err = GenerateRandomKeyString(32) 15 | if err != nil { 16 | panic("Failed to generate signingKey") 17 | } 18 | h := sha256.New() 19 | sha256 := fmt.Sprintf("%x", h.Sum(nil)) 20 | keyId = sha256[:6] 21 | } 22 | 23 | func TestDecryptEncryptWithValidKey(t *testing.T) { 24 | var text = "Text to encrypt" 25 | cipherText, err := Encrypt(text, signingKey, keyId) 26 | if err != nil { 27 | t.Fatal(err) 28 | } 29 | clearText, err := Decrypt(cipherText, signingKey, keyId) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | if clearText != "Text to encrypt" { 34 | t.Fatal("Expect cipher text to match 8001f1$aes256$ArMu9srTA6prKSoIYLctw87TQy7xX6tex1heE43QH7NAgGr4Z-TjA1sFrw==") 35 | } 36 | } 37 | 38 | func TestDecryptUnformattedCipherText(t *testing.T) { 39 | var text = "Text to encrypt" 40 | FormattedCipherText, err := Encrypt(text, signingKey, keyId) 41 | if err != nil { 42 | t.Fatal(err) 43 | } 44 | formatEncryption := keyId + "$" + "aes256" + "$" 45 | // Keep cipher text only 46 | cipherText := strings.ReplaceAll(FormattedCipherText, formatEncryption, "") 47 | _, err = Decrypt(cipherText, signingKey, keyId) 48 | if err == nil || (err != nil && err.Error() != "cipher text is not well formatted") { 49 | t.Fatal("Expect error Cipher text is not well formatted") 50 | } 51 | } 52 | 53 | func TestIsTextEncrypted(t *testing.T) { 54 | var text = "Text to encrypt with very long text" 55 | formatEncryption := keyId + "$" + "aes256" + "$" + text 56 | // Keep cipher text only 57 | isEncrypted, err := IsTextEncrypted(formatEncryption, signingKey, keyId) 58 | if isEncrypted { 59 | t.Fatal(err) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /datastructures/set_test.go: -------------------------------------------------------------------------------- 1 | package datastructures 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func generateNewSetWithData() *Set[int] { 9 | set := MakeSet[int]() 10 | set.Add(3) 11 | set.Add(5) 12 | set.Add(7) 13 | 14 | return set 15 | } 16 | 17 | func TestSetExistsAndAdd(t *testing.T) { 18 | testSet := generateNewSetWithData() 19 | assert.True(t, testSet.Exists(3)) 20 | assert.False(t, testSet.Exists(4)) 21 | } 22 | 23 | func TestSetRemove(t *testing.T) { 24 | testSet := generateNewSetWithData() 25 | assert.NoError(t, testSet.Remove(5)) 26 | assert.Equal(t, testSet.Size(), 2) 27 | assert.False(t, testSet.Exists(5)) 28 | } 29 | 30 | func TestSetToSlice(t *testing.T) { 31 | testSet := generateNewSetWithData() 32 | slice := testSet.ToSlice() 33 | assert.Equal(t, len(slice), 3) 34 | assert.Contains(t, slice, 3) 35 | assert.Contains(t, slice, 5) 36 | assert.Contains(t, slice, 7) 37 | } 38 | 39 | func TestMakeSetFromElements(t *testing.T) { 40 | intSlice := []int{1, 2, 3} 41 | intSet := MakeSetFromElements(intSlice...) 42 | assert.ElementsMatch(t, intSet.ToSlice(), intSlice) 43 | 44 | stringSlice := []string{"frog", "frogger", "froggy"} 45 | stringSet := MakeSetFromElements(stringSlice...) 46 | assert.ElementsMatch(t, stringSet.ToSlice(), stringSlice) 47 | } 48 | 49 | func TestSetsIntersection(t *testing.T) { 50 | testSet := generateNewSetWithData() 51 | anotherSet := MakeSet[int]() 52 | intersectedSet := testSet.Intersect(anotherSet) 53 | assert.Equal(t, 0, intersectedSet.Size()) 54 | 55 | anotherSet.Add(3) 56 | intersectedSet = testSet.Intersect(anotherSet) 57 | assert.Equal(t, 1, intersectedSet.Size()) 58 | } 59 | 60 | func TestSetsUnion(t *testing.T) { 61 | testSet := generateNewSetWithData() 62 | anotherSet := MakeSet[int]() 63 | unionedSet := testSet.Union(anotherSet) 64 | assert.Equal(t, 3, unionedSet.Size()) 65 | 66 | anotherSet.Add(4) 67 | unionedSet = testSet.Union(anotherSet) 68 | assert.Equal(t, 4, unionedSet.Size()) 69 | } 70 | -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/abs.tar: -------------------------------------------------------------------------------- 1 | /tmp/bla/file000644 000767 000000 00000000006 14120641257 013754 0ustar00yahaviwheel000000 000000 Hello 2 | -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/rel.tar: -------------------------------------------------------------------------------- 1 | ../file000644 000767 000000 00000000006 14120641257 012552 0ustar00yahaviwheel000000 000000 Hello 2 | -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-abs.tar: -------------------------------------------------------------------------------- 1 | softlink-abs/000755 000767 000000 00000000000 14120642126 014071 5ustar00yahaviwheel000000 000000 softlink-abs/softlink-abs000755 000767 000000 00000000000 14120642015 020655 2/tmp/bla/fileustar00yahaviwheel000000 000000 -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-rel.tar: -------------------------------------------------------------------------------- 1 | softlink-rel/000755 000767 000000 00000000000 14274400344 014112 5ustar00yahaviwheel000000 000000 softlink-rel/softlink-rel000755 000767 000000 00000000000 14274400344 017727 2../../fileustar00yahaviwheel000000 000000 -------------------------------------------------------------------------------- /lru/lru.go: -------------------------------------------------------------------------------- 1 | package lru 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | type Cache struct { 9 | cache *cacheBase 10 | lock sync.Mutex 11 | noSync bool 12 | } 13 | 14 | func New(size int, options ...func(*Cache)) *Cache { 15 | c := &Cache{cache: newCacheBase(size)} 16 | for _, option := range options { 17 | option(c) 18 | } 19 | return c 20 | } 21 | 22 | func WithExpiry(expiry time.Duration) func(c *Cache) { 23 | return func(c *Cache) { 24 | c.cache.Expiry = expiry 25 | } 26 | } 27 | 28 | func WithEvictionCallback(onEvicted func(key string, value interface{})) func(c *Cache) { 29 | return func(c *Cache) { 30 | c.cache.OnEvicted = onEvicted 31 | } 32 | } 33 | 34 | func WithoutSync() func(c *Cache) { 35 | return func(c *Cache) { 36 | c.noSync = true 37 | } 38 | } 39 | 40 | func (c *Cache) Add(key string, value interface{}) { 41 | if !c.noSync { 42 | c.lock.Lock() 43 | defer c.lock.Unlock() 44 | } 45 | c.cache.Add(key, value) 46 | } 47 | 48 | func (c *Cache) Get(key string) (value interface{}, ok bool) { 49 | if !c.noSync { 50 | c.lock.Lock() 51 | defer c.lock.Unlock() 52 | } 53 | return c.cache.Get(key) 54 | } 55 | 56 | // Updates element's value without updating its "Least-Recently-Used" status 57 | func (c *Cache) UpdateElement(key string, value interface{}) { 58 | if !c.noSync { 59 | c.lock.Lock() 60 | defer c.lock.Unlock() 61 | } 62 | c.cache.UpdateElement(key, value) 63 | 64 | } 65 | 66 | func (c *Cache) Remove(key string) { 67 | if !c.noSync { 68 | c.lock.Lock() 69 | defer c.lock.Unlock() 70 | } 71 | c.cache.Remove(key) 72 | } 73 | 74 | func (c *Cache) RemoveOldest() { 75 | if !c.noSync { 76 | c.lock.Lock() 77 | defer c.lock.Unlock() 78 | } 79 | c.cache.RemoveOldest() 80 | } 81 | 82 | func (c *Cache) Len() int { 83 | if !c.noSync { 84 | c.lock.Lock() 85 | defer c.lock.Unlock() 86 | } 87 | return c.cache.Len() 88 | } 89 | 90 | func (c *Cache) Clear() { 91 | if !c.noSync { 92 | c.lock.Lock() 93 | defer c.lock.Unlock() 94 | } 95 | c.cache.Clear() 96 | } 97 | -------------------------------------------------------------------------------- /datastructures/set.go: -------------------------------------------------------------------------------- 1 | package datastructures 2 | 3 | import "fmt" 4 | 5 | type Set[T comparable] struct { 6 | container map[T]struct{} 7 | } 8 | 9 | // MakeSet initialize the set 10 | func MakeSet[T comparable]() *Set[T] { 11 | return &Set[T]{ 12 | container: make(map[T]struct{}), 13 | } 14 | } 15 | 16 | func MakeSetFromElements[T comparable](elements ...T) *Set[T] { 17 | set := MakeSet[T]() 18 | for _, element := range elements { 19 | set.Add(element) 20 | } 21 | return set 22 | } 23 | 24 | func (set *Set[T]) Exists(key T) bool { 25 | _, exists := set.container[key] 26 | return exists 27 | } 28 | 29 | func (set *Set[T]) Add(key T) { 30 | set.container[key] = struct{}{} 31 | } 32 | 33 | func (set *Set[T]) AddElements(elements ...T) { 34 | for _, element := range elements { 35 | set.Add(element) 36 | } 37 | } 38 | 39 | func (set *Set[T]) Remove(key T) error { 40 | _, exists := set.container[key] 41 | if !exists { 42 | return fmt.Errorf("remove Error: item doesn't exist in set") 43 | } 44 | delete(set.container, key) 45 | return nil 46 | } 47 | 48 | func (set *Set[T]) Size() int { 49 | return len(set.container) 50 | } 51 | 52 | func (set *Set[T]) ToSlice() []T { 53 | var slice []T 54 | for key := range set.container { 55 | slice = append(slice, key) 56 | } 57 | 58 | return slice 59 | } 60 | 61 | func (set *Set[T]) Intersect(setB *Set[T]) *Set[T] { 62 | intersectSet := MakeSet[T]() 63 | if setB == nil { 64 | return intersectSet 65 | } 66 | bigSet, smallSet := setB, set 67 | if set.Size() > setB.Size() { 68 | bigSet, smallSet = set, setB 69 | } 70 | 71 | for key := range smallSet.container { 72 | if bigSet.Exists(key) { 73 | intersectSet.Add(key) 74 | } 75 | } 76 | return intersectSet 77 | } 78 | 79 | func (set *Set[T]) Union(setB *Set[T]) *Set[T] { 80 | if setB == nil { 81 | return set 82 | } 83 | unionSet := MakeSet[T]() 84 | for key := range set.container { 85 | unionSet.Add(key) 86 | } 87 | for key := range setB.container { 88 | unionSet.Add(key) 89 | } 90 | return unionSet 91 | } 92 | -------------------------------------------------------------------------------- /unarchive/testdata/archives/strip-components.tar: -------------------------------------------------------------------------------- 1 | top_folder/000755 000765 000024 00000000000 14546533546 013531 5ustar00omerzstaff000000 000000 top_folder/nested_folder_1/000755 000765 000024 00000000000 14546533537 016566 5ustar00omerzstaff000000 000000 top_folder/nested_folder_2/000755 000765 000024 00000000000 14546533546 016567 5ustar00omerzstaff000000 000000 -------------------------------------------------------------------------------- /version/version_test.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import "testing" 4 | 5 | func TestCompare(t *testing.T) { 6 | tests := []struct { 7 | ver1 string 8 | ver2 string 9 | expected int 10 | }{ 11 | {"1.0.0", "1.0.0", 0}, 12 | {"1.0.1", "1.0.0", 1}, 13 | {"5.10.0", "5.5.2", 1}, 14 | {"5.5.2", "5.15.2", -1}, 15 | {"5.6.2", "5.50.2", -1}, 16 | {"5.5.2", "5.0.2", 1}, 17 | {"15.5.2", "6.0.2", 1}, 18 | {"51.5.2", "6.0.2", 1}, 19 | {"5.0.3", "5.0.20", -1}, 20 | {"5.0.20", "5.0.3", 1}, 21 | {"1.0.0", "1.0.1", -1}, 22 | {"1.0.x-SNAPSHOT", "1.0.x-SNAPSHOT", 0}, 23 | {"1.1.x-SNAPSHOT", "1.0.x-SNAPSHOT", 1}, 24 | {"2.0.x-SNAPSHOT", "1.0.x-SNAPSHOT", 1}, 25 | {"1.0", "1.0.x-SNAPSHOT", -1}, 26 | {"1.1", "1.0.x-SNAPSHOT", 1}, 27 | {"1.0.x-SNAPSHOT", "1.0", 1}, 28 | {"1.0.x-SNAPSHOT", "1.1", -1}, 29 | {"1", "2", -1}, 30 | {"1.0", "2.0", -1}, 31 | {"2.1", "2.0", 1}, 32 | {"2.a", "2.b", -1}, 33 | {"b", "a", 1}, 34 | {"1.0", "1", 0}, 35 | {"1.1", "1", 1}, 36 | {"1", "1.1", -1}, 37 | {"", "1", -1}, 38 | {"1", "", 1}, 39 | {"6.x-SNAPSHOT", "5.5.2", 1}, 40 | {"6.x-SNAPSHOT", "6.5.0", -1}, 41 | {"6.5.x-SNAPSHOT", "6.5.2", -1}, 42 | {"7.x-SNAPSHOT", "6.x-SNAPSHOT", 1}, 43 | {"6.1.x-SNAPSHOT", "6.2.x-SNAPSHOT", -1}, 44 | {"go1.13", "go1.13.0", 0}, 45 | {"go1.13", "go1.14.1", -1}, 46 | {"go1.13", "go1.12.100", 1}, 47 | } 48 | for _, test := range tests { 49 | t.Run(test.ver1+":"+test.ver2, func(t *testing.T) { 50 | version := Version{version: test.ver2} 51 | result := version.Compare(test.ver1) 52 | if result != test.expected { 53 | t.Error("ver1:", test.ver1, "ver2:", test.ver2, "Expecting:", test.expected, "got:", result) 54 | } 55 | }) 56 | } 57 | } 58 | 59 | func TestAtLeast(t *testing.T) { 60 | tests := []struct { 61 | ver1 string 62 | ver2 string 63 | expected bool 64 | }{ 65 | {"1.0.0", "1.0.0", true}, 66 | {"1.0.1", "1.0.0", true}, 67 | {"5.10.0", "5.5.2", true}, 68 | {"1.0.x-SNAPSHOT", "1.0.x-SNAPSHOT", true}, 69 | {"1.1.x-SNAPSHOT", "1.0.x-SNAPSHOT", true}, 70 | {"2.0.x-SNAPSHOT", "1.0.x-SNAPSHOT", true}, 71 | {"development", "5.5", true}, 72 | {"6.2.0", "6.5.0", false}, 73 | {"6.6.0", "6.8.0", false}, 74 | } 75 | for _, test := range tests { 76 | t.Run(test.ver1+":"+test.ver2, func(t *testing.T) { 77 | version := Version{version: test.ver1} 78 | result := version.AtLeast(test.ver2) 79 | if result != test.expected { 80 | t.Error("ver1:", test.ver1, "ver2:", test.ver2, "Expecting:", test.expected, "got:", result) 81 | } 82 | }) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /fanout/reader.go: -------------------------------------------------------------------------------- 1 | package fanout 2 | 3 | import ( 4 | "io" 5 | "sync" 6 | ) 7 | 8 | // A reader that emits its read to multiple consumers using an io.Reader Read(p []byte) (int, error) func 9 | type Reader struct { 10 | reader io.Reader 11 | consumers []Consumer 12 | pipeReaders []*io.PipeReader 13 | pipeWriters []*io.PipeWriter 14 | multiWriter io.Writer 15 | } 16 | 17 | type Consumer interface { 18 | Read([]byte) error 19 | } 20 | 21 | type ConsumerFunc func([]byte) error 22 | 23 | func (f ConsumerFunc) Read(p []byte) error { 24 | return f(p) 25 | } 26 | 27 | func NewReader(reader io.Reader, consumers ...Consumer) *Reader { 28 | procLen := len(consumers) 29 | pipeReaders := make([]*io.PipeReader, procLen) 30 | pipeWriters := make([]*io.PipeWriter, procLen) 31 | // Create pipe r/w for each reader 32 | for i := 0; i < procLen; i++ { 33 | pr, pw := io.Pipe() 34 | pipeReaders[i] = pr 35 | pipeWriters[i] = pw 36 | } 37 | multiWriter := io.MultiWriter(toWriters(pipeWriters)...) 38 | return &Reader{reader: reader, consumers: consumers, pipeReaders: pipeReaders, 39 | pipeWriters: pipeWriters, multiWriter: multiWriter} 40 | } 41 | 42 | func (r *Reader) Read(p []byte) (int, error) { 43 | procLen := len(r.consumers) 44 | errs := make(chan error, procLen) 45 | done := make(chan bool, procLen) 46 | 47 | var n int 48 | var e error 49 | 50 | var wg sync.WaitGroup 51 | wg.Add(1) 52 | go func() { 53 | defer wg.Done() 54 | defer r.close() 55 | // Read from reader and fan out to the writers 56 | n, err := r.reader.Read(p) 57 | if err != nil { 58 | // Do not wrap the read err or EOF will not be handled 59 | e = err 60 | } else { 61 | _, err = r.multiWriter.Write(p[:n]) 62 | if err != nil { 63 | e = err 64 | } 65 | } 66 | }() 67 | 68 | for i, sr := range r.consumers { 69 | go func(sr Consumer, pos int) { 70 | buf := make([]byte, len(p)) 71 | l, perr := r.pipeReaders[pos].Read(buf) 72 | if perr != nil { 73 | errs <- perr 74 | return 75 | } 76 | rerr := sr.Read(buf[:l]) 77 | if rerr != nil { 78 | errs <- rerr 79 | return 80 | } 81 | done <- true 82 | }(sr, i) 83 | } 84 | 85 | wg.Wait() 86 | for range r.consumers { 87 | select { 88 | case err := <-errs: 89 | e = err 90 | case <-done: 91 | } 92 | } 93 | return n, e 94 | } 95 | 96 | func (r *Reader) close() (err error) { 97 | for _, pw := range r.pipeWriters { 98 | e := pw.Close() 99 | if err != nil { 100 | err = e 101 | } 102 | } 103 | return 104 | } 105 | -------------------------------------------------------------------------------- /http/retryexecutor/retryexecutor_test.go: -------------------------------------------------------------------------------- 1 | package retryexecutor 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "github.com/jfrog/gofrog/log" 7 | "github.com/stretchr/testify/assert" 8 | "testing" 9 | ) 10 | 11 | func TestRetryExecutorSuccess(t *testing.T) { 12 | retriesToPerform := 10 13 | breakRetriesAt := 4 14 | runCount := 0 15 | executor := RetryExecutor{ 16 | MaxRetries: retriesToPerform, 17 | RetriesIntervalMilliSecs: 0, 18 | ErrorMessage: "Testing RetryExecutor", 19 | ExecutionHandler: func() (bool, error) { 20 | runCount++ 21 | if runCount == breakRetriesAt { 22 | log.Warn("Breaking after", runCount-1, "retries") 23 | return false, nil 24 | } 25 | return true, nil 26 | }, 27 | } 28 | 29 | assert.NoError(t, executor.Execute()) 30 | assert.Equal(t, breakRetriesAt, runCount) 31 | } 32 | 33 | func TestRetryExecutorTimeoutWithDefaultError(t *testing.T) { 34 | retriesToPerform := 5 35 | runCount := 0 36 | 37 | executor := RetryExecutor{ 38 | MaxRetries: retriesToPerform, 39 | RetriesIntervalMilliSecs: 0, 40 | ErrorMessage: "Testing RetryExecutor", 41 | ExecutionHandler: func() (bool, error) { 42 | runCount++ 43 | return true, nil 44 | }, 45 | } 46 | 47 | assert.Equal(t, executor.Execute(), TimeoutError{executor.getTimeoutErrorMsg()}) 48 | assert.Equal(t, retriesToPerform+1, runCount) 49 | } 50 | 51 | func TestRetryExecutorTimeoutWithCustomError(t *testing.T) { 52 | retriesToPerform := 5 53 | runCount := 0 54 | 55 | executionHandler := errors.New("retry failed due to reason") 56 | 57 | executor := RetryExecutor{ 58 | MaxRetries: retriesToPerform, 59 | RetriesIntervalMilliSecs: 0, 60 | ErrorMessage: "Testing RetryExecutor", 61 | ExecutionHandler: func() (bool, error) { 62 | runCount++ 63 | return true, executionHandler 64 | }, 65 | } 66 | 67 | assert.Equal(t, executor.Execute(), executionHandler) 68 | assert.Equal(t, retriesToPerform+1, runCount) 69 | } 70 | 71 | func TestRetryExecutorCancel(t *testing.T) { 72 | retriesToPerform := 5 73 | runCount := 0 74 | 75 | retryContext, cancelFunc := context.WithCancel(context.Background()) 76 | executor := RetryExecutor{ 77 | Context: retryContext, 78 | MaxRetries: retriesToPerform, 79 | RetriesIntervalMilliSecs: 0, 80 | ErrorMessage: "Testing RetryExecutor", 81 | ExecutionHandler: func() (bool, error) { 82 | runCount++ 83 | return true, nil 84 | }, 85 | } 86 | 87 | cancelFunc() 88 | assert.EqualError(t, executor.Execute(), context.Canceled.Error()) 89 | assert.Equal(t, 1, runCount) 90 | } 91 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | ) 7 | 8 | type Version struct { 9 | version string 10 | } 11 | 12 | func NewVersion(version string) *Version { 13 | return &Version{version: version} 14 | } 15 | 16 | func (v *Version) GetVersion() string { 17 | return v.version 18 | } 19 | 20 | func (v *Version) SetVersion(version string) { 21 | v.version = version 22 | } 23 | 24 | // If ver1 == version returns 0 25 | // If ver1 > version returns 1 26 | // If ver1 < version returns -1 27 | func (v *Version) Compare(ver1 string) int { 28 | switch { 29 | case ver1 == v.version: 30 | return 0 31 | case ver1 == "development": 32 | return 1 33 | case v.version == "development": 34 | return -1 35 | } 36 | 37 | ver1Tokens := strings.Split(ver1, ".") 38 | ver2Tokens := strings.Split(v.version, ".") 39 | 40 | maxIndex := len(ver1Tokens) 41 | if len(ver2Tokens) > maxIndex { 42 | maxIndex = len(ver2Tokens) 43 | } 44 | 45 | for tokenIndex := 0; tokenIndex < maxIndex; tokenIndex++ { 46 | ver1Token := "0" 47 | if len(ver1Tokens) >= tokenIndex+1 { 48 | ver1Token = strings.TrimSpace(ver1Tokens[tokenIndex]) 49 | } 50 | ver2Token := "0" 51 | if len(ver2Tokens) >= tokenIndex+1 { 52 | ver2Token = strings.TrimSpace(ver2Tokens[tokenIndex]) 53 | } 54 | compare := compareTokens(ver1Token, ver2Token) 55 | if compare != 0 { 56 | return compare 57 | } 58 | } 59 | 60 | return 0 61 | } 62 | 63 | // Returns true if this version is larger or equals from the version sent as an argument. 64 | func (v *Version) AtLeast(minVersion string) bool { 65 | return v.Compare(minVersion) <= 0 66 | } 67 | 68 | func compareTokens(ver1Token, ver2Token string) int { 69 | if ver1Token == ver2Token { 70 | return 0 71 | } 72 | 73 | // Ignoring error because we strip all the non-numeric values in advance. 74 | ver1Number, ver1Suffix := splitNumberAndSuffix(ver1Token) 75 | ver1TokenInt, _ := strconv.Atoi(ver1Number) 76 | ver2Number, ver2Suffix := splitNumberAndSuffix(ver2Token) 77 | ver2TokenInt, _ := strconv.Atoi(ver2Number) 78 | 79 | switch { 80 | case ver1TokenInt > ver2TokenInt: 81 | return 1 82 | case ver1TokenInt < ver2TokenInt: 83 | return -1 84 | case len(ver1Suffix) == 0: // Version with suffix is higher than the same version without suffix 85 | return -1 86 | case len(ver2Suffix) == 0: 87 | return 1 88 | default: 89 | return strings.Compare(ver1Token, ver2Token) 90 | } 91 | } 92 | 93 | func splitNumberAndSuffix(token string) (string, string) { 94 | numeric := "" 95 | var i int 96 | for i = 0; i < len(token); i++ { 97 | n := token[i : i+1] 98 | if _, err := strconv.Atoi(n); err != nil { 99 | break 100 | } 101 | numeric += n 102 | } 103 | if len(numeric) == 0 { 104 | return "0", token 105 | } 106 | return numeric, token[i:] 107 | } 108 | -------------------------------------------------------------------------------- /unarchive/testdata/archives/unix.tar: -------------------------------------------------------------------------------- 1 | dir/000755 000767 000000 00000000000 14123330561 012253 5ustar00yahaviwheel000000 000000 dir/file000644 000767 000000 00000000006 14123330561 013111 0ustar00yahaviwheel000000 000000 Hello 2 | link000755 000767 000000 00000000000 14123330602 013762 2dir/fileustar00yahaviwheel000000 000000 -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-uncle.tar: -------------------------------------------------------------------------------- 1 | a/000755 000767 000000 00000000000 14413757256 011735 5ustar00yahaviwheel000000 000000 a/c/000755 000767 000000 00000000000 14413757256 012157 5ustar00yahaviwheel000000 000000 a/b/000755 000767 000000 00000000000 14413757304 012150 5ustar00yahaviwheel000000 000000 a/b/softlink-uncle000755 000767 000000 00000000000 14413757304 015401 2../custar00yahaviwheel000000 000000 -------------------------------------------------------------------------------- /http/filestream/filestream_test.go: -------------------------------------------------------------------------------- 1 | package filestream 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "io" 7 | "mime/multipart" 8 | "os" 9 | "path/filepath" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | var targetDir string 17 | 18 | func TestWriteFilesToStreamAndReadFilesFromStream(t *testing.T) { 19 | sourceDir := t.TempDir() 20 | // Create 2 file to be transferred via our multipart stream 21 | file1 := &FileInfo{Name: "test1.txt", Path: filepath.Join(sourceDir, "test1.txt")} 22 | file2 := &FileInfo{Name: "test2.txt", Path: filepath.Join(sourceDir, "test2.txt")} 23 | file1Content := []byte("test content1") 24 | file2Content := []byte("test content2") 25 | assert.NoError(t, os.WriteFile(file1.Path, file1Content, 0600)) 26 | assert.NoError(t, os.WriteFile(file2.Path, file2Content, 0600)) 27 | 28 | // Create the multipart writer that will stream our files 29 | body := &bytes.Buffer{} 30 | multipartWriter := multipart.NewWriter(body) 31 | assert.NoError(t, WriteFilesToStream(multipartWriter, []*FileInfo{file1, file2})) 32 | 33 | // Create local temp dir that will store our files 34 | targetDir = t.TempDir() 35 | 36 | // Create the multipart reader that will read the files from the stream 37 | multipartReader := multipart.NewReader(body, multipartWriter.Boundary()) 38 | assert.NoError(t, ReadFilesFromStream(multipartReader, simpleFileWriter)) 39 | 40 | // Validate file 1 transferred successfully 41 | content, err := os.ReadFile(filepath.Join(targetDir, file1.Name)) 42 | assert.NoError(t, err) 43 | assert.Equal(t, file1Content, content) 44 | 45 | // Validate file 2 transferred successfully 46 | content, err = os.ReadFile(filepath.Join(targetDir, file2.Name)) 47 | assert.NoError(t, err) 48 | assert.Equal(t, file2Content, content) 49 | } 50 | 51 | func TestWriteFilesToStreamWithError(t *testing.T) { 52 | nonExistentFileName := "nonexistent.txt" 53 | // Create a FileInfo with a non-existent file 54 | file := &FileInfo{Name: nonExistentFileName, Path: nonExistentFileName} 55 | 56 | // Create a buffer and a multipart writer 57 | body := &bytes.Buffer{} 58 | multipartWriter := multipart.NewWriter(body) 59 | 60 | // Call WriteFilesToStream and expect an error 61 | err := WriteFilesToStream(multipartWriter, []*FileInfo{file}) 62 | assert.Error(t, err) 63 | 64 | multipartReader := multipart.NewReader(body, multipartWriter.Boundary()) 65 | form, err := multipartReader.ReadForm(10 * 1024) 66 | require.NoError(t, err) 67 | 68 | assert.Len(t, form.Value[ErrorType], 1) 69 | var multipartErr MultipartError 70 | assert.NoError(t, json.Unmarshal([]byte(form.Value[ErrorType][0]), &multipartErr)) 71 | 72 | assert.Equal(t, nonExistentFileName, multipartErr.FileName) 73 | assert.NotEmpty(t, multipartErr.ErrMessage) 74 | } 75 | 76 | func simpleFileWriter(fileName string) (fileWriter []io.WriteCloser, err error) { 77 | writer, err := os.Create(filepath.Join(targetDir, fileName)) 78 | if err != nil { 79 | return nil, err 80 | } 81 | return []io.WriteCloser{writer}, nil 82 | } 83 | -------------------------------------------------------------------------------- /http/retryexecutor/retryexecutor.go: -------------------------------------------------------------------------------- 1 | package retryexecutor 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "github.com/jfrog/gofrog/log" 8 | "time" 9 | ) 10 | 11 | type ExecutionHandlerFunc func() (shouldRetry bool, err error) 12 | 13 | type RetryExecutor struct { 14 | // The context 15 | Context context.Context 16 | 17 | // The amount of retries to perform. 18 | MaxRetries int 19 | 20 | // Number of milliseconds to sleep between retries. 21 | RetriesIntervalMilliSecs int 22 | 23 | // Message to display when retrying. 24 | ErrorMessage string 25 | 26 | // Prefix to print at the beginning of each log. 27 | LogMsgPrefix string 28 | 29 | // ExecutionHandler is the operation to run with retries. 30 | ExecutionHandler ExecutionHandlerFunc 31 | } 32 | 33 | func (runner *RetryExecutor) Execute() error { 34 | var err error 35 | var shouldRetry bool 36 | for i := 0; i <= runner.MaxRetries; i++ { 37 | // Run ExecutionHandler 38 | shouldRetry, err = runner.ExecutionHandler() 39 | 40 | // If we should not retry, return. 41 | if !shouldRetry { 42 | return err 43 | } 44 | if cancelledErr := runner.checkCancelled(); cancelledErr != nil { 45 | return cancelledErr 46 | } 47 | 48 | // Print retry log message 49 | runner.LogRetry(i, err) 50 | 51 | // Going to sleep for RetryInterval milliseconds 52 | if runner.RetriesIntervalMilliSecs > 0 && i < runner.MaxRetries { 53 | time.Sleep(time.Millisecond * time.Duration(runner.RetriesIntervalMilliSecs)) 54 | } 55 | } 56 | // If the error is not nil, return it and log the timeout message. Otherwise, generate new error. 57 | if err != nil { 58 | log.Info(runner.getTimeoutErrorMsg()) 59 | return err 60 | } 61 | return TimeoutError{runner.getTimeoutErrorMsg()} 62 | } 63 | 64 | // Error of this type will be returned if the executor reaches timeout and no other error is returned by the execution handler. 65 | type TimeoutError struct { 66 | errMsg string 67 | } 68 | 69 | func (retryErr TimeoutError) Error() string { 70 | return retryErr.errMsg 71 | } 72 | 73 | func (runner *RetryExecutor) getTimeoutErrorMsg() string { 74 | prefix := "" 75 | if runner.LogMsgPrefix != "" { 76 | prefix = runner.LogMsgPrefix + " " 77 | } 78 | return fmt.Sprintf("%sexecutor timeout after %v attempts with %v milliseconds wait intervals", prefix, runner.MaxRetries, runner.RetriesIntervalMilliSecs) 79 | } 80 | 81 | func (runner *RetryExecutor) LogRetry(attemptNumber int, err error) { 82 | message := fmt.Sprintf("%s(Attempt %v)", runner.LogMsgPrefix, attemptNumber+1) 83 | if runner.ErrorMessage != "" { 84 | message = fmt.Sprintf("%s - %s", message, runner.ErrorMessage) 85 | } 86 | if err != nil { 87 | message = fmt.Sprintf("%s: %s", message, err.Error()) 88 | } 89 | 90 | if err != nil || runner.ErrorMessage != "" { 91 | log.Warn(message) 92 | } else { 93 | log.Debug(message) 94 | } 95 | } 96 | 97 | func (runner *RetryExecutor) checkCancelled() error { 98 | if runner.Context == nil { 99 | return nil 100 | } 101 | contextErr := runner.Context.Err() 102 | if errors.Is(contextErr, context.Canceled) { 103 | log.Info("Retry executor was cancelled") 104 | return contextErr 105 | } 106 | return nil 107 | } 108 | -------------------------------------------------------------------------------- /lru/lru_base.go: -------------------------------------------------------------------------------- 1 | // Package go-lru implements an LRU cache. 2 | // It is based on the 3 | // LRU implementation in groupcache: 4 | // https://github.com/golang/groupcache/tree/master/lru 5 | package lru 6 | 7 | import ( 8 | "container/list" 9 | "time" 10 | ) 11 | 12 | type cacheBase struct { 13 | Expiry time.Duration 14 | Size int 15 | 16 | // OnEvicted optionally specifies a callback function to be 17 | // executed when an entry is purged from the cache. 18 | OnEvicted func(key string, value interface{}) 19 | 20 | ll *list.List 21 | cache map[string]*list.Element 22 | } 23 | 24 | type entry struct { 25 | key string 26 | value interface{} 27 | timeInsert int64 28 | } 29 | 30 | func newCacheBase(size int) *cacheBase { 31 | return &cacheBase{Size: size, cache: make(map[string]*list.Element), ll: list.New()} 32 | } 33 | 34 | func (c *cacheBase) Add(key string, value interface{}) { 35 | var epochNow int64 36 | if c.Expiry != time.Duration(0) { 37 | epochNow = time.Now().UnixNano() / int64(time.Millisecond) 38 | } 39 | if ee, ok := c.cache[key]; ok { 40 | c.ll.MoveToFront(ee) 41 | if ent, entOk := ee.Value.(*entry); entOk { 42 | ent.value = value 43 | ent.timeInsert = epochNow 44 | } 45 | return 46 | } 47 | ele := c.ll.PushFront(&entry{key, value, epochNow}) 48 | c.cache[key] = ele 49 | if c.Size != 0 && c.ll.Len() > c.Size { 50 | c.RemoveOldest() 51 | } 52 | } 53 | 54 | func (c *cacheBase) Get(key string) (value interface{}, ok bool) { 55 | if ele, hit := c.cache[key]; hit { 56 | if c.Expiry != time.Duration(0) { 57 | unixNow := time.Now().UnixNano() / int64(time.Millisecond) 58 | unixExpiry := int64(c.Expiry / time.Millisecond) 59 | if ent, ok := ele.Value.(*entry); ok { 60 | if (unixNow - ent.timeInsert) > unixExpiry { 61 | c.removeElement(ele) 62 | return nil, false 63 | } 64 | } 65 | } 66 | c.ll.MoveToFront(ele) 67 | if ent, ok := ele.Value.(*entry); ok { 68 | return ent.value, true 69 | } 70 | } 71 | return nil, false 72 | } 73 | 74 | // Updates element's value without updating its "Least-Recently-Used" status 75 | func (c *cacheBase) UpdateElement(key string, value interface{}) { 76 | if ee, ok := c.cache[key]; ok { 77 | if ent, ok := ee.Value.(*entry); ok { 78 | ent.value = value 79 | return 80 | } 81 | } 82 | } 83 | 84 | func (c *cacheBase) Remove(key string) { 85 | if ele, hit := c.cache[key]; hit { 86 | c.removeElement(ele) 87 | } 88 | } 89 | 90 | func (c *cacheBase) RemoveOldest() { 91 | ele := c.ll.Back() 92 | if ele != nil { 93 | c.removeElement(ele) 94 | } 95 | } 96 | 97 | func (c *cacheBase) removeElement(e *list.Element) { 98 | c.ll.Remove(e) 99 | kv, ok := e.Value.(*entry) 100 | if ok { 101 | delete(c.cache, kv.key) 102 | if c.OnEvicted != nil { 103 | c.OnEvicted(kv.key, kv.value) 104 | } 105 | } 106 | } 107 | 108 | // Len returns the number of items in the cache. 109 | func (c *cacheBase) Len() int { 110 | return c.ll.Len() 111 | } 112 | 113 | // Clear purges all stored items from the cache. 114 | func (c *cacheBase) Clear() { 115 | for _, e := range c.cache { 116 | kv, ok := e.Value.(*entry) 117 | if ok { 118 | if c.OnEvicted != nil { 119 | c.OnEvicted(kv.key, kv.value) 120 | } 121 | delete(c.cache, kv.key) 122 | } 123 | } 124 | c.ll.Init() 125 | } 126 | -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-cousin.tar: -------------------------------------------------------------------------------- 1 | a/000755 000767 000000 00000000000 14413757256 011735 5ustar00yahaviwheel000000 000000 a/c/000755 000767 000000 00000000000 14414004617 012142 5ustar00yahaviwheel000000 000000 a/b/000755 000767 000000 00000000000 14414004756 012145 5ustar00yahaviwheel000000 000000 a/b/softlink-cousin000755 000767 000000 00000000000 14414004756 016013 2../c/dustar00yahaviwheel000000 000000 a/c/d000644 000767 000000 00000000000 14414004617 012276 0ustar00yahaviwheel000000 000000 -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-uncle-file.tar: -------------------------------------------------------------------------------- 1 | a/000755 000767 000000 00000000000 14414015024 011712 5ustar00yahaviwheel000000 000000 a/c000644 000767 000000 00000000003 14414015024 012050 0ustar00yahaviwheel000000 000000 hi 2 | a/b/000755 000767 000000 00000000000 14414015112 012131 5ustar00yahaviwheel000000 000000 a/b/softlink-uncle000755 000767 000000 00000000000 14414015112 015362 2../custar00yahaviwheel000000 000000 -------------------------------------------------------------------------------- /io/cmd_test.go: -------------------------------------------------------------------------------- 1 | package io 2 | 3 | import ( 4 | "errors" 5 | "regexp" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | var matchAllRegexp = regexp.MustCompile(".*") 12 | var errParsing = errors.New("parsing error") 13 | 14 | func TestRunCmdWithOutputParser(t *testing.T) { 15 | config := NewCommand("go", "", []string{"version"}) 16 | parserCalled := false 17 | stdout, stderr, exitOk, err := RunCmdWithOutputParser(config, false, &CmdOutputPattern{ 18 | RegExp: matchAllRegexp, 19 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { 20 | parserCalled = true 21 | return pattern.Line, nil 22 | }, 23 | }) 24 | assert.NoError(t, err) 25 | assert.True(t, parserCalled) 26 | assert.True(t, exitOk) 27 | assert.Contains(t, stdout, "go version") 28 | assert.Empty(t, stderr) 29 | } 30 | 31 | func TestRunCmdWithOutputParserError(t *testing.T) { 32 | config := NewCommand("go", "", []string{"version"}) 33 | _, _, exitOk, err := RunCmdWithOutputParser(config, false, &CmdOutputPattern{ 34 | RegExp: matchAllRegexp, 35 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { return pattern.Line, errParsing }, 36 | }) 37 | assert.ErrorContains(t, err, "parsing error") 38 | assert.False(t, exitOk) 39 | } 40 | 41 | var processLineCases = []struct { 42 | name string 43 | cmdOutputPatterns []*CmdOutputPattern 44 | line string 45 | expectedOutput string 46 | expectError bool 47 | }{ 48 | {"Empty", []*CmdOutputPattern{}, "", "", false}, 49 | 50 | {"Simple", []*CmdOutputPattern{{ 51 | RegExp: matchAllRegexp, 52 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { return pattern.Line, nil }, 53 | }}, "hello", "hello", false}, 54 | 55 | {"Append character once", []*CmdOutputPattern{{ 56 | RegExp: matchAllRegexp, 57 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { return pattern.Line[1:], nil }, 58 | }}, "hello", "ello", false}, 59 | 60 | {"Append character twice", []*CmdOutputPattern{ 61 | { 62 | RegExp: matchAllRegexp, 63 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { return pattern.Line + "l", nil }, 64 | }, 65 | { 66 | RegExp: matchAllRegexp, 67 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { return pattern.Line + "o", nil }, 68 | }, 69 | }, "hel", "hello", false}, 70 | 71 | {"Doesn't match", []*CmdOutputPattern{ 72 | { 73 | RegExp: regexp.MustCompile("doesn't match"), 74 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { return pattern.Line + "aaaaaa", nil }, 75 | }, 76 | { 77 | RegExp: matchAllRegexp, 78 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { return pattern.Line + "o", nil }, 79 | }, 80 | }, "hell", "hello", false}, 81 | 82 | {"Parsing error", []*CmdOutputPattern{{ 83 | RegExp: matchAllRegexp, 84 | ExecFunc: func(pattern *CmdOutputPattern) (string, error) { return "", errParsing }, 85 | }}, "hello", "", true}, 86 | } 87 | 88 | func TestProcessLine(t *testing.T) { 89 | for _, testCase := range processLineCases { 90 | t.Run(testCase.name, func(t *testing.T) { 91 | errChan := make(chan error, 1) 92 | defer close(errChan) 93 | processedLine, hasErrors := processLine(testCase.cmdOutputPatterns, testCase.line, errChan) 94 | if testCase.expectError { 95 | assert.True(t, hasErrors) 96 | assert.ErrorIs(t, errParsing, <-errChan) 97 | } else { 98 | assert.False(t, hasErrors) 99 | assert.Equal(t, testCase.expectedOutput, processedLine) 100 | } 101 | }) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /safeconvert/int_test.go: -------------------------------------------------------------------------------- 1 | package safeconvert 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "math" 6 | "testing" 7 | ) 8 | 9 | func TestSafeIntToUint(t *testing.T) { 10 | tests := []struct { 11 | input int 12 | expected uint 13 | errExpected bool 14 | }{ 15 | {input: 10, expected: 10}, 16 | {input: -1, expected: 0, errExpected: true}, 17 | {input: 0, expected: 0}, 18 | } 19 | 20 | for _, test := range tests { 21 | result, err := IntToUint(test.input) 22 | if test.errExpected { 23 | assert.Error(t, err, "Expected an error for input: %d", test.input) 24 | } else { 25 | assert.NoError(t, err, "Did not expect an error for input: %d", test.input) 26 | assert.Equal(t, test.expected, result, "Expected result does not match") 27 | } 28 | } 29 | } 30 | 31 | func TestSafeUintToInt(t *testing.T) { 32 | tests := []struct { 33 | input uint 34 | expected int 35 | errExpected bool 36 | }{ 37 | {input: 10, expected: 10}, 38 | {input: uint(math.MaxInt), expected: math.MaxInt}, 39 | {input: uint(math.MaxInt) + 1, expected: 0, errExpected: true}, 40 | } 41 | 42 | for _, test := range tests { 43 | result, err := UintToInt(test.input) 44 | if test.errExpected { 45 | assert.Error(t, err, "Expected an error for input: %d", test.input) 46 | } else { 47 | assert.NoError(t, err, "Did not expect an error for input: %d", test.input) 48 | assert.Equal(t, test.expected, result, "Expected result does not match") 49 | } 50 | } 51 | } 52 | 53 | func TestSafeInt64ToUint64(t *testing.T) { 54 | tests := []struct { 55 | input int64 56 | expected uint64 57 | errExpected bool 58 | }{ 59 | {input: 10, expected: 10}, 60 | {input: -1, expected: 0, errExpected: true}, 61 | {input: 0, expected: 0}, 62 | } 63 | 64 | for _, test := range tests { 65 | result, err := Int64ToUint64(test.input) 66 | if test.errExpected { 67 | assert.Error(t, err, "Expected an error for input: %d", test.input) 68 | } else { 69 | assert.NoError(t, err, "Did not expect an error for input: %d", test.input) 70 | assert.Equal(t, test.expected, result, "Expected result does not match") 71 | } 72 | } 73 | } 74 | 75 | func TestSafeUint64ToInt64(t *testing.T) { 76 | tests := []struct { 77 | input uint64 78 | expected int64 79 | errExpected bool 80 | }{ 81 | {input: 10, expected: 10}, 82 | {input: math.MaxInt64, expected: math.MaxInt64}, 83 | {input: math.MaxInt64 + 1, expected: 0, errExpected: true}, 84 | } 85 | 86 | for _, test := range tests { 87 | result, err := Uint64ToInt64(test.input) 88 | if test.errExpected { 89 | assert.Error(t, err, "Expected an error for input: %d", test.input) 90 | } else { 91 | assert.NoError(t, err, "Did not expect an error for input: %d", test.input) 92 | assert.Equal(t, test.expected, result, "Expected result does not match") 93 | } 94 | } 95 | } 96 | 97 | func TestSafeUint64ToInt(t *testing.T) { 98 | tests := []struct { 99 | input uint64 100 | expected int 101 | errExpected bool 102 | }{ 103 | {input: 10, expected: 10}, 104 | {input: uint64(math.MaxInt), expected: math.MaxInt}, 105 | {input: uint64(math.MaxInt) + 1, expected: 0, errExpected: true}, 106 | } 107 | 108 | for _, test := range tests { 109 | result, err := Uint64ToInt(test.input) 110 | if test.errExpected { 111 | assert.Error(t, err, "Expected an error for input: %d", test.input) 112 | } else { 113 | assert.NoError(t, err, "Did not expect an error for input: %d", test.input) 114 | assert.Equal(t, test.expected, result, "Expected result does not match") 115 | } 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /crypto/aes_encryption.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "crypto/aes" 5 | "crypto/cipher" 6 | "crypto/rand" 7 | "encoding/base64" 8 | "encoding/hex" 9 | "errors" 10 | "io" 11 | "strings" 12 | ) 13 | 14 | // AES encryption using GCM mode (widely adopted because of its efficiency and performance) 15 | // The key argument should be the AES key, 16 | // either 16, 24, or 32 bytes corresponding to the AES-128, AES-192 or AES-256 algorithms, respectively 17 | func encrypt(plaintext []byte, key []byte) (string, error) { 18 | c, err := aes.NewCipher(key) 19 | if err != nil { 20 | return "", err 21 | } 22 | 23 | gcm, err := cipher.NewGCM(c) 24 | if err != nil { 25 | return "", err 26 | } 27 | 28 | nonce := make([]byte, gcm.NonceSize()) 29 | if _, err = io.ReadFull(rand.Reader, nonce); err != nil { 30 | return "", err 31 | } 32 | ciphertext := gcm.Seal(nonce, nonce, plaintext, nil) 33 | return base64.URLEncoding.EncodeToString(ciphertext), nil 34 | } 35 | 36 | func decrypt(ciphertext []byte, key []byte) (string, error) { 37 | c, err := aes.NewCipher(key) 38 | if err != nil { 39 | return "", err 40 | } 41 | 42 | gcm, err := cipher.NewGCM(c) 43 | if err != nil { 44 | return "", err 45 | } 46 | 47 | nonceSize := gcm.NonceSize() 48 | if len(ciphertext) < nonceSize { 49 | return "", errors.New("ciphertext too short") 50 | } 51 | 52 | //#nosec G407 53 | nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:] 54 | //#nosec G407 55 | decryptedBytes, err := gcm.Open(nil, nonce, ciphertext, nil) 56 | return string(decryptedBytes), err 57 | } 58 | 59 | // format encrypted text , keyId is first 6 chars of hashed(sha256) signing key 60 | // {{key-id}}${{algo}}${{encrypted-value}} 61 | // example: e67gef$aes256$adsad321424324fdsdfs3Rddi90oP34xV 62 | func Encrypt(text, key, keyId string) (string, error) { 63 | // hex decoding of key 64 | keyByte, err := hex.DecodeString(key) 65 | if err != nil { 66 | return "", err 67 | } 68 | cipherText, err := encrypt([]byte(text), keyByte) 69 | if err != nil { 70 | return "", err 71 | } 72 | return keyId + "$" + "aes256" + "$" + cipherText, nil 73 | } 74 | 75 | func Decrypt(formattedCipherText, key, keyId string) (string, error) { 76 | formatEncryption := keyId + "$" + "aes256" + "$" 77 | if !strings.Contains(formattedCipherText, formatEncryption) { 78 | return "", errors.New("cipher text is not well formatted") 79 | } 80 | // keep cipher text only 81 | cipherText := strings.ReplaceAll(formattedCipherText, formatEncryption, "") 82 | // hex decoding of key and text 83 | KeyByte, err := hex.DecodeString(key) 84 | if err != nil { 85 | return "", err 86 | } 87 | cipherTextByte, err := base64.URLEncoding.DecodeString(cipherText) 88 | if err != nil { 89 | return "", err 90 | } 91 | text, err := decrypt(cipherTextByte, KeyByte) 92 | if err != nil { 93 | return "", err 94 | } 95 | return text, nil 96 | } 97 | 98 | func IsTextEncrypted(formattedCipherText, key, keyId string) (bool, error) { 99 | formatEncryption := keyId + "$" + "aes256" + "$" 100 | if !strings.Contains(formattedCipherText, formatEncryption) { 101 | return false, errors.New("cipher text is not well formatted") 102 | } 103 | // Keep cipher text only 104 | cipherText := strings.ReplaceAll(formattedCipherText, formatEncryption, "") 105 | // hex decoding of key and text 106 | keyByte, err := hex.DecodeString(key) 107 | if err != nil { 108 | return false, err 109 | } 110 | cipherTextByte, err := base64.URLEncoding.DecodeString(cipherText) 111 | if err != nil { 112 | return false, err 113 | } 114 | _, err = decrypt(cipherTextByte, keyByte) 115 | if err != nil { 116 | return false, err 117 | } 118 | return true, nil 119 | } 120 | -------------------------------------------------------------------------------- /fanout/readall_reader.go: -------------------------------------------------------------------------------- 1 | package fanout 2 | 3 | import ( 4 | "fmt" 5 | "github.com/pkg/errors" 6 | "io" 7 | "sync" 8 | ) 9 | 10 | // A reader that emits its read to multiple consumers using a ReadAll(p []byte) ([]interface{}, error) func 11 | type ReadAllReader struct { 12 | reader io.Reader 13 | consumers []ReadAllConsumer 14 | pipeReaders []*io.PipeReader 15 | pipeWriters []*io.PipeWriter 16 | results chan *readerResult 17 | errs chan error 18 | } 19 | 20 | type ReadAllConsumer interface { 21 | ReadAll(io.Reader) (interface{}, error) 22 | } 23 | 24 | type ReadAllConsumerFunc func(io.Reader) (interface{}, error) 25 | 26 | func (f ReadAllConsumerFunc) ReadAll(r io.Reader) (interface{}, error) { 27 | return f(r) 28 | } 29 | 30 | type readerResult struct { 31 | data interface{} 32 | pos int 33 | } 34 | 35 | /* 36 | [inr]--r-- 37 | |--w--[pw]--|--[pr]--r 38 | |--w--[pw]--|--[pr]--r 39 | |--w--[pw]--|--[pr]--r 40 | */ 41 | 42 | func NewReadAllReader(reader io.Reader, consumers ...ReadAllConsumer) *ReadAllReader { 43 | procLen := len(consumers) 44 | pipeReaders := make([]*io.PipeReader, procLen) 45 | pipeWriters := make([]*io.PipeWriter, procLen) 46 | done := make(chan *readerResult, procLen) 47 | errs := make(chan error, procLen) 48 | // Create pipe r/w for each reader 49 | for i := 0; i < procLen; i++ { 50 | pr, pw := io.Pipe() 51 | pipeReaders[i] = pr 52 | pipeWriters[i] = pw 53 | } 54 | return &ReadAllReader{reader, consumers, pipeReaders, pipeWriters, done, errs} 55 | } 56 | 57 | func toWriters(pipeWriters []*io.PipeWriter) (writers []io.Writer) { 58 | // Convert to an array of io.Writers so it can be taken by a variadic func 59 | // See: https://groups.google.com/forum/#!topic/golang-nuts/zU3BqD5mKs8 60 | writers = make([]io.Writer, len(pipeWriters)) 61 | for i, w := range pipeWriters { 62 | writers[i] = w 63 | } 64 | return 65 | } 66 | 67 | func (r *ReadAllReader) GetReader(i int) io.Reader { 68 | return r.pipeReaders[i] 69 | } 70 | 71 | func (r *ReadAllReader) ReadAll() ([]interface{}, error) { 72 | defer close(r.results) 73 | defer close(r.errs) 74 | 75 | for i, sr := range r.consumers { 76 | go func(sr ReadAllConsumer, pos int) { 77 | reader := r.pipeReaders[pos] 78 | // The reader might stop but the writer hasn't done 79 | // Closing the pipe will cause an error to the writer which will cause all readers to stop as well 80 | defer reader.Close() 81 | ret, perr := sr.ReadAll(reader) 82 | if perr != nil { 83 | r.errs <- errors.WithStack(perr) 84 | return 85 | } 86 | r.results <- &readerResult{ret, pos} 87 | }(sr, i) 88 | } 89 | var multiWriterError error 90 | var wg sync.WaitGroup 91 | wg.Add(1) 92 | go func() { 93 | defer wg.Done() 94 | defer r.close() 95 | mw := io.MultiWriter(toWriters(r.pipeWriters)...) 96 | _, err := io.Copy(mw, r.reader) 97 | if err != nil { 98 | // probably caused due to closed pipe reader 99 | multiWriterError = fmt.Errorf("fanout multiwriter error: %v ", err) 100 | } 101 | }() 102 | wg.Wait() 103 | return getAllReadersResult(r, multiWriterError) 104 | } 105 | 106 | func (r *ReadAllReader) close() { 107 | for _, pw := range r.pipeWriters { 108 | _ = pw.Close() 109 | } 110 | } 111 | 112 | func getAllReadersResult(r *ReadAllReader, err error) ([]interface{}, error) { 113 | results := make([]interface{}, len(r.consumers)) 114 | lastError := err 115 | for range r.consumers { 116 | select { 117 | case e := <-r.errs: 118 | lastError = e 119 | case result := <-r.results: 120 | results[result.pos] = result.data 121 | } 122 | } 123 | if lastError != nil { 124 | return nil, lastError 125 | } 126 | return results, nil 127 | } 128 | -------------------------------------------------------------------------------- /http/filestream/filestream.go: -------------------------------------------------------------------------------- 1 | package filestream 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "mime/multipart" 10 | "os" 11 | 12 | ioutils "github.com/jfrog/gofrog/io" 13 | ) 14 | 15 | const ( 16 | FileType = "file" 17 | ErrorType = "error" 18 | ) 19 | 20 | type MultipartError struct { 21 | FileName string `json:"file_name"` 22 | ErrMessage string `json:"error_message"` 23 | } 24 | 25 | // The expected type of function that should be provided to the ReadFilesFromStream func, that returns the writer that should handle each file 26 | type FileWriterFunc func(fileName string) (writers []io.WriteCloser, err error) 27 | 28 | func ReadFilesFromStream(multipartReader *multipart.Reader, fileWritersFunc FileWriterFunc) error { 29 | for { 30 | // Read the next file streamed from client 31 | fileReader, err := multipartReader.NextPart() 32 | if err != nil { 33 | if errors.Is(err, io.EOF) { 34 | break 35 | } 36 | return fmt.Errorf("failed to read file: %w", err) 37 | } 38 | if err = readFile(fileReader, fileWritersFunc); err != nil { 39 | return err 40 | } 41 | 42 | } 43 | return nil 44 | } 45 | 46 | func readFile(fileReader *multipart.Part, fileWriterFunc FileWriterFunc) (err error) { 47 | fileName := fileReader.FileName() 48 | fileWriter, err := fileWriterFunc(fileName) 49 | if err != nil { 50 | return err 51 | } 52 | var writers []io.Writer 53 | for _, writer := range fileWriter { 54 | defer ioutils.Close(writer, &err) 55 | // Create a multi writer that will write the file to all the provided writers 56 | // We read multipart once and write to multiple writers, so we can't use the same multipart writer multiple times 57 | writers = append(writers, writer) 58 | } 59 | if _, err = io.Copy(ioutils.AsyncMultiWriter(10, writers...), fileReader); err != nil { 60 | return fmt.Errorf("failed writing '%s' file: %w", fileName, err) 61 | } 62 | return nil 63 | } 64 | 65 | type FileInfo struct { 66 | Name string 67 | Path string 68 | } 69 | 70 | func WriteFilesToStream(multipartWriter *multipart.Writer, filesList []*FileInfo) (err error) { 71 | // Close finishes the multipart message and writes the trailing 72 | // boundary end line to the output, thereby marking the EOF. 73 | defer ioutils.Close(multipartWriter, &err) 74 | for _, file := range filesList { 75 | if err = writeFile(multipartWriter, file); err != nil { 76 | // Returning the error from writeFile with a possible error from the writeErr function 77 | return errors.Join(err, writeErr(multipartWriter, file, err)) 78 | } 79 | } 80 | 81 | return nil 82 | } 83 | 84 | func writeFile(multipartWriter *multipart.Writer, file *FileInfo) (err error) { 85 | fileReader, err := os.Open(file.Path) 86 | if err != nil { 87 | return fmt.Errorf("failed opening file %q: %w", file.Name, err) 88 | } 89 | defer ioutils.Close(fileReader, &err) 90 | fileWriter, err := multipartWriter.CreateFormFile(FileType, file.Name) 91 | if err != nil { 92 | return fmt.Errorf("failed to create form file for %q: %w", file.Name, err) 93 | } 94 | _, err = io.Copy(fileWriter, fileReader) 95 | return err 96 | } 97 | 98 | func writeErr(multipartWriter *multipart.Writer, file *FileInfo, writeFileErr error) error { 99 | fileWriter, err := multipartWriter.CreateFormField(ErrorType) 100 | if err != nil { 101 | return fmt.Errorf("failed to create form field: %w", err) 102 | } 103 | 104 | multipartErr := MultipartError{FileName: file.Name, ErrMessage: writeFileErr.Error()} 105 | multipartErrJSON, err := json.Marshal(multipartErr) 106 | if err != nil { 107 | return fmt.Errorf("failed to marshal multipart error for file %q: %w", file.Name, err) 108 | } 109 | 110 | _, err = io.Copy(fileWriter, bytes.NewReader(multipartErrJSON)) 111 | return err 112 | } 113 | -------------------------------------------------------------------------------- /lru/lru_test.go: -------------------------------------------------------------------------------- 1 | package lru 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func TestGet(t *testing.T) { 9 | var getTests = []struct { 10 | name string 11 | keyToAdd string 12 | keyToGet string 13 | expectedOk bool 14 | }{ 15 | {"string_hit", "myKey", "myKey", true}, 16 | {"string_miss", "myKey", "nonsense", false}, 17 | } 18 | for _, tt := range getTests { 19 | c := New(0) 20 | c.Add(tt.keyToAdd, 1234) 21 | val, ok := c.Get(tt.keyToGet) 22 | if ok != tt.expectedOk { 23 | t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok) 24 | } else if ok && val != 1234 { 25 | t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val) 26 | } 27 | } 28 | } 29 | 30 | func TestEviction(t *testing.T) { 31 | c := New(3) 32 | c.Add("e1", true) 33 | c.Add("e2", true) 34 | c.Add("e3", false) 35 | c.Add("e4", false) 36 | 37 | _, ok := c.Get("e1") 38 | if ok { 39 | t.Fatal("Did not expect to find element e1 in cache after adding e4") 40 | } 41 | _, ok = c.Get("e2") 42 | if !ok { 43 | t.Fatal("Expected to find element e2 in cache after adding e4") 44 | } 45 | 46 | c.Add("e5", true) 47 | _, ok = c.Get("e2") 48 | if !ok { 49 | t.Fatal("Expected to find element e2 in cache after adding e5 because it was accessed recently") 50 | } 51 | _, ok = c.Get("e3") 52 | if ok { 53 | t.Fatal("Did not expect to find element e3 in cache after adding e5 since e2 was accessed before it") 54 | } 55 | } 56 | 57 | func TestRemove(t *testing.T) { 58 | c := New(0) 59 | c.Add("myKey", 1234) 60 | if val, ok := c.Get("myKey"); !ok { 61 | t.Fatal("TestRemove returned no match") 62 | } else if val != 1234 { 63 | t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val) 64 | } 65 | 66 | c.Remove("myKey") 67 | if _, ok := c.Get("myKey"); ok { 68 | t.Fatal("TestRemove returned a removed entry") 69 | } 70 | } 71 | 72 | func TestPurge(t *testing.T) { 73 | c := New(2) 74 | l := c.Len() 75 | if l != 0 { 76 | t.Fatalf("Expected length to be 1 but got %d", l) 77 | } 78 | c.Add("e1", 1) 79 | l = c.Len() 80 | if l != 1 { 81 | t.Fatalf("Expected length to be 1 but got %d", l) 82 | } 83 | c.Add("e2", 2) 84 | l = c.Len() 85 | if l != 2 { 86 | t.Fatalf("Expected length to be 2 but got %d", l) 87 | } 88 | c.Add("e3", 3) 89 | l = c.Len() 90 | if l != 2 { 91 | t.Fatalf("Expected length to be 2 but got %d", l) 92 | } 93 | if _, ok := c.Get("e1"); ok { 94 | t.Fatal("Expected not to get value for e1 but it was not found") 95 | } 96 | if _, ok := c.Get("e2"); !ok { 97 | t.Fatal("Expected to get value for e2 but it was not found") 98 | } 99 | if _, ok := c.Get("e3"); !ok { 100 | t.Fatal("Expected to get value for e2 but it was not found") 101 | } 102 | 103 | c.Clear() 104 | l = c.Len() 105 | if _, ok := c.Get("e2"); ok { 106 | t.Fatal("Expected not to get value for e2 but it was found") 107 | } 108 | if _, ok := c.Get("e3"); ok { 109 | t.Fatal("Expected not to get value for e3 but it was found") 110 | } 111 | if l != 0 { 112 | t.Fatalf("Expected length to be 0 after clearing cache, but got %d", l) 113 | } 114 | } 115 | 116 | func TestExpiry(t *testing.T) { 117 | c := New(3, WithExpiry(time.Second)) 118 | c.Add("e1", 1) 119 | c.Add("e2", 2) 120 | time.Sleep(500 * time.Millisecond) 121 | c.Add("e3", 3) 122 | if _, ok := c.Get("e1"); !ok { 123 | t.Fatal("Expected to get value for e1 but it was not found") 124 | } 125 | if _, ok := c.Get("e2"); !ok { 126 | t.Fatal("Expected to get value for e2 but it was not found") 127 | } 128 | if _, ok := c.Get("e3"); !ok { 129 | t.Fatal("Expected to get value for e3 but it was not found") 130 | } 131 | l := c.Len() 132 | if l != 3 { 133 | t.Fatalf("Expected length to be 3 but got %d", l) 134 | } 135 | time.Sleep(700 * time.Millisecond) 136 | if _, ok := c.Get("e1"); ok { 137 | t.Fatal("Expected not to get value for e1 but it was found") 138 | } 139 | if _, ok := c.Get("e2"); ok { 140 | t.Fatal("Expected not to get value for e2 but it was found") 141 | } 142 | if _, ok := c.Get("e3"); !ok { 143 | t.Fatal("Expected to get value for e3 but it was not found") 144 | } 145 | l = c.Len() 146 | if l != 1 { 147 | t.Fatalf("Expected length to be 1 but got %d", l) 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= 2 | github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= 3 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= 6 | github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= 7 | github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= 8 | github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= 9 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 10 | github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= 11 | github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= 12 | github.com/jfrog/archiver/v3 v3.6.0 h1:OVZ50vudkIQmKMgA8mmFF9S0gA47lcag22N13iV3F1w= 13 | github.com/jfrog/archiver/v3 v3.6.0/go.mod h1:fCAof46C3rAXgZurS8kNRNdSVMKBbZs+bNNhPYxLldI= 14 | github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= 15 | github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= 16 | github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= 17 | github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= 18 | github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= 19 | github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= 20 | github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= 21 | github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= 22 | github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= 23 | github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= 24 | github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc= 25 | github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= 26 | github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= 27 | github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= 28 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 29 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 30 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 31 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 32 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 33 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 34 | github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= 35 | github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= 36 | github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= 37 | github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= 38 | github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= 39 | github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8= 40 | github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= 41 | golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= 42 | golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 43 | golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 44 | golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= 45 | golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 46 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 47 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 48 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 49 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 50 | -------------------------------------------------------------------------------- /unarchive/testdata/archives/softlink-rel.tar: -------------------------------------------------------------------------------- 1 | softlink-rel/000755 000767 000000 00000000000 14274402641 014114 5ustar00yahaviwheel000000 000000 softlink-rel/a/000755 000767 000000 00000000000 14274402571 014336 5ustar00yahaviwheel000000 000000 softlink-rel/b/000755 000767 000000 00000000000 14274402522 014333 5ustar00yahaviwheel000000 000000 softlink-rel/b/c/000755 000767 000000 00000000000 14274402526 014561 5ustar00yahaviwheel000000 000000 softlink-rel/b/c/d/000755 000767 000000 00000000000 14274402533 015002 5ustar00yahaviwheel000000 000000 softlink-rel/b/c/d/file000644 000767 000000 00000000000 14274402533 015632 0ustar00yahaviwheel000000 000000 softlink-rel/a/softlink-rel000755 000767 000000 00000000000 14274402571 020626 2../b/c/d/fileustar00yahaviwheel000000 000000 -------------------------------------------------------------------------------- /log/logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "log" 7 | "os" 8 | "strings" 9 | "sync" 10 | 11 | "github.com/gookit/color" 12 | ) 13 | 14 | const ( 15 | LogLevelEnv = "JFROG_LOG_LEVEL" 16 | ) 17 | 18 | type Log interface { 19 | Debug(a ...interface{}) 20 | Info(a ...interface{}) 21 | Warn(a ...interface{}) 22 | Error(a ...interface{}) 23 | Output(a ...interface{}) 24 | GetLogLevel() LevelType 25 | } 26 | 27 | var ( 28 | // The logger instance 29 | _logger *Logger 30 | // Used to ensure _logger is initialized only once 31 | once sync.Once 32 | ) 33 | 34 | func GetLogger() *Logger { 35 | once.Do(func() { 36 | _logger = NewLogger(getLogLevel()) 37 | }) 38 | return _logger 39 | } 40 | 41 | type LevelType int 42 | 43 | const ( 44 | ERROR LevelType = iota 45 | WARN 46 | INFO 47 | DEBUG 48 | ) 49 | 50 | func getLogLevel() LevelType { 51 | switch strings.ToUpper(os.Getenv(LogLevelEnv)) { 52 | case "ERROR": 53 | return ERROR 54 | case "WARN": 55 | return WARN 56 | case "DEBUG": 57 | return DEBUG 58 | default: 59 | return INFO 60 | } 61 | } 62 | 63 | type Logger struct { 64 | LogLevel LevelType 65 | OutputLog *log.Logger 66 | DebugLog *log.Logger 67 | InfoLog *log.Logger 68 | WarnLog *log.Logger 69 | ErrorLog *log.Logger 70 | // Mutex to protect access to the logger 71 | mu sync.Mutex 72 | } 73 | 74 | func NewLogger(logLevel LevelType) *Logger { 75 | logger := new(Logger) 76 | logger.SetLogLevel(logLevel) 77 | logger.SetOutputWriter() 78 | logger.SetLogsWriter() 79 | return logger 80 | } 81 | 82 | func (logger *Logger) SetLogLevel(levelEnum LevelType) { 83 | logger.mu.Lock() 84 | defer logger.mu.Unlock() 85 | logger.LogLevel = levelEnum 86 | } 87 | 88 | func (logger *Logger) SetOutputWriter() { 89 | logger.OutputLog = log.New(io.Writer(os.Stdout), "", 0) 90 | } 91 | 92 | func (logger *Logger) SetLogsWriter() { 93 | stdErrWriter := io.Writer(os.Stderr) 94 | logger.DebugLog = log.New(stdErrWriter, getLogPrefix(DEBUG), 0) 95 | logger.InfoLog = log.New(stdErrWriter, getLogPrefix(INFO), 0) 96 | logger.WarnLog = log.New(stdErrWriter, getLogPrefix(WARN), 0) 97 | logger.ErrorLog = log.New(stdErrWriter, getLogPrefix(ERROR), 0) 98 | } 99 | 100 | var prefixStyles = map[LevelType]struct { 101 | logLevel string 102 | color color.Color 103 | }{ 104 | DEBUG: {logLevel: "Debug", color: color.Cyan}, 105 | INFO: {logLevel: "Info", color: color.Blue}, 106 | WARN: {logLevel: "Warn", color: color.Yellow}, 107 | ERROR: {logLevel: "Error", color: color.Red}, 108 | } 109 | 110 | func getLogPrefix(logType LevelType) string { 111 | if logPrefixStyle, ok := prefixStyles[logType]; ok { 112 | return fmt.Sprintf("[%s] ", logPrefixStyle.logLevel) 113 | } 114 | return "" 115 | } 116 | 117 | func Debug(a ...interface{}) { 118 | GetLogger().Debug(a...) 119 | } 120 | 121 | func Debugf(format string, a ...interface{}) { 122 | GetLogger().Debug(fmt.Sprintf(format, a...)) 123 | } 124 | 125 | func Info(a ...interface{}) { 126 | GetLogger().Info(a...) 127 | } 128 | 129 | func Infof(format string, a ...interface{}) { 130 | GetLogger().Info(fmt.Sprintf(format, a...)) 131 | } 132 | 133 | func Warn(a ...interface{}) { 134 | GetLogger().Warn(a...) 135 | } 136 | 137 | func Error(a ...interface{}) { 138 | GetLogger().Error(a...) 139 | } 140 | 141 | func Output(a ...interface{}) { 142 | GetLogger().Output(a...) 143 | } 144 | 145 | func (logger *Logger) GetLogLevel() LevelType { 146 | return logger.LogLevel 147 | } 148 | 149 | func (logger *Logger) Debug(a ...interface{}) { 150 | logger.mu.Lock() 151 | defer logger.mu.Unlock() 152 | if logger.GetLogLevel() >= DEBUG { 153 | logger.Println(logger.DebugLog, a...) 154 | } 155 | } 156 | 157 | func (logger *Logger) Info(a ...interface{}) { 158 | logger.mu.Lock() 159 | defer logger.mu.Unlock() 160 | if logger.GetLogLevel() >= INFO { 161 | logger.Println(logger.InfoLog, a...) 162 | } 163 | } 164 | 165 | func (logger *Logger) Warn(a ...interface{}) { 166 | logger.mu.Lock() 167 | defer logger.mu.Unlock() 168 | if logger.GetLogLevel() >= WARN { 169 | logger.Println(logger.WarnLog, a...) 170 | } 171 | } 172 | 173 | func (logger *Logger) Error(a ...interface{}) { 174 | logger.mu.Lock() 175 | defer logger.mu.Unlock() 176 | if logger.GetLogLevel() >= ERROR { 177 | logger.Println(logger.ErrorLog, a...) 178 | } 179 | } 180 | 181 | func (logger *Logger) Output(a ...interface{}) { 182 | logger.mu.Lock() 183 | defer logger.mu.Unlock() 184 | logger.Println(logger.OutputLog, a...) 185 | } 186 | 187 | func (logger *Logger) Println(log *log.Logger, values ...interface{}) { 188 | log.Println(values...) 189 | } 190 | -------------------------------------------------------------------------------- /fanout/reader_test.go: -------------------------------------------------------------------------------- 1 | package fanout 2 | 3 | import ( 4 | "bytes" 5 | //#nosec G505 -- Sha1 is supported by Artifactory. 6 | "crypto/sha1" 7 | "crypto/sha256" 8 | "encoding/hex" 9 | "errors" 10 | "github.com/stretchr/testify/assert" 11 | "io" 12 | "strings" 13 | "testing" 14 | ) 15 | 16 | const input = "yogreshobuddy!" 17 | const sha1sum = "a967c390de10f37dab8eb33549c6304ded62e951" 18 | const sha2sum = "72a0230d6e5eebb437a9069ebb390171284192e9a993938d02cb0aaae003fd1c" 19 | 20 | var ( 21 | inputBytes = []byte(input) 22 | ) 23 | 24 | func TestFanoutRead(t *testing.T) { 25 | proc := func(r io.Reader) (interface{}, error) { 26 | hash := sha256.New() 27 | if _, err := io.Copy(hash, r); err != nil { 28 | t.Fatal(t) 29 | } 30 | return hash.Sum(nil), nil 31 | } 32 | 33 | // Using a closure argument instead of results 34 | var sum3 []byte 35 | proc1 := func(r io.Reader) (rt interface{}, er error) { 36 | hash := sha256.New() 37 | if _, err := io.Copy(hash, r); err != nil { 38 | t.Fatal(t) 39 | } 40 | sum3 = hash.Sum(nil) 41 | return 42 | } 43 | 44 | r := bytes.NewReader(inputBytes) 45 | fr := NewReadAllReader(r, ReadAllConsumerFunc(proc), ReadAllConsumerFunc(proc), ReadAllConsumerFunc(proc1)) 46 | results, err := fr.ReadAll() 47 | 48 | if err != nil { 49 | t.Error(err) 50 | } 51 | sum1, ok := results[0].([]byte) 52 | assert.True(t, ok) 53 | sum2, ok := results[1].([]byte) 54 | assert.True(t, ok) 55 | 56 | sum1str := hex.EncodeToString(sum1) 57 | sum2str := hex.EncodeToString(sum2) 58 | sum3str := hex.EncodeToString(sum3) 59 | 60 | if sum1str != sum2str || sum1str != sum3str { 61 | t.Errorf("Sum1 %s, Sum2 %s, and Sum3 %s are not all the same", sum1str, sum2str, sum3str) 62 | } 63 | 64 | if sum1str != sha2sum { 65 | t.Errorf("Checksum is not as expected: %s != %s", sum1str, sha2sum) 66 | } 67 | } 68 | 69 | func TestFanoutProgressiveRead(t *testing.T) { 70 | //#nosec G401 -- Sha1 is supported by Artifactory. 71 | hash1 := sha1.New() 72 | proc1 := func(p []byte) (err error) { 73 | if _, err := hash1.Write(p); err != nil { 74 | t.Fatal(t) 75 | } 76 | return 77 | } 78 | 79 | hash2 := sha256.New() 80 | proc2 := func(p []byte) (err error) { 81 | if _, err := hash2.Write(p); err != nil { 82 | t.Fatal(t) 83 | } 84 | return 85 | } 86 | 87 | r := bytes.NewReader(inputBytes) 88 | pfr := NewReader(r, ConsumerFunc(proc1), ConsumerFunc(proc2)) 89 | 90 | _, err := io.ReadAll(pfr) 91 | if err != nil { 92 | t.Error(err) 93 | } 94 | 95 | sum1 := hash1.Sum(nil) 96 | sum1str := hex.EncodeToString(sum1) 97 | if sum1str != sha1sum { 98 | t.Errorf("Sha1 is not as expected: %s != %s", sum1str, sha1sum) 99 | } 100 | sum2 := hash2.Sum(nil) 101 | sum2str := hex.EncodeToString(sum2) 102 | if sum2str != sha2sum { 103 | t.Errorf("Sha2 is not as expected: %s != %s", sum2str, sha2sum) 104 | } 105 | } 106 | 107 | func TestFanoutProgressiveReadError(t *testing.T) { 108 | const errmsg = "ERRSHA1" 109 | //#nosec G401 -- Sha1 is supported by Artifactory. 110 | hash1 := sha1.New() 111 | proc1 := func(p []byte) (err error) { 112 | return errors.New(errmsg) 113 | } 114 | 115 | hash2 := sha256.New() 116 | proc2 := func(p []byte) (err error) { 117 | if _, err := hash2.Write(p); err != nil { 118 | t.Fatal(t) 119 | } 120 | return 121 | } 122 | 123 | r := bytes.NewReader(inputBytes) 124 | pfr := NewReader(r, ConsumerFunc(proc1), ConsumerFunc(proc2)) 125 | 126 | _, err := io.ReadAll(pfr) 127 | if err == nil { 128 | t.Fatal("Expected a non-nil error") 129 | } 130 | if err.Error() != errmsg { 131 | t.Fatalf("Error message is different from: %s", errmsg) 132 | } 133 | 134 | sum1 := hash1.Sum(nil) 135 | sum1str := hex.EncodeToString(sum1) 136 | if sum1str == sha1sum { 137 | t.Errorf("Sha1 is not as expected: %s != %s", sum1str, sha1sum) 138 | } 139 | var sum2str string 140 | if err == nil { 141 | sum2 := hash2.Sum(nil) 142 | sum2str = hex.EncodeToString(sum2) 143 | } 144 | if sum2str == sha2sum { 145 | t.Error("Sha2 calculation should have terminated a head of time due to an error") 146 | } 147 | } 148 | 149 | // This scenario can cause deadlock 150 | func TestSyncReadOnError(t *testing.T) { 151 | proc1 := func(r1 io.Reader) (interface{}, error) { 152 | n, e := io.ReadAll(r1) 153 | return n, e 154 | } 155 | 156 | proc2 := func(r2 io.Reader) (interface{}, error) { 157 | buf := make([]byte, 1) 158 | n, err := io.ReadFull(r2, buf) 159 | return n, err 160 | 161 | } 162 | 163 | pfr := NewReadAllReader(strings.NewReader("someNotTooShortString"), ReadAllConsumerFunc(proc1), ReadAllConsumerFunc(proc2)) 164 | _, _ = pfr.ReadAll() 165 | } 166 | -------------------------------------------------------------------------------- /io/fileutils_test.go: -------------------------------------------------------------------------------- 1 | package io 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestClose(t *testing.T) { 14 | var err error 15 | f, err := os.Create(filepath.Join(t.TempDir(), "test")) 16 | assert.NoError(t, err) 17 | 18 | Close(f, &err) 19 | assert.NoError(t, err) 20 | 21 | // Try closing the same file again and expect error 22 | Close(f, &err) 23 | assert.Error(t, err) 24 | 25 | // Check that both errors are aggregated 26 | err = errors.New("original error") 27 | Close(f, &err) 28 | assert.Len(t, strings.Split(err.Error(), "\n"), 2) 29 | 30 | nilErr := new(error) 31 | Close(f, nilErr) 32 | assert.NotNil(t, nilErr) 33 | } 34 | 35 | func TestFindFileInDirAndParents(t *testing.T) { 36 | const goModFileName = "go.mod" 37 | wd, err := os.Getwd() 38 | assert.NoError(t, err) 39 | projectRoot := filepath.Join(wd, "testdata", "project") 40 | 41 | // Find the file in the current directory 42 | root, err := FindFileInDirAndParents(projectRoot, goModFileName) 43 | assert.NoError(t, err) 44 | assert.Equal(t, projectRoot, root) 45 | 46 | // Find the file in the current directory's parent 47 | projectSubDirectory := filepath.Join(projectRoot, "dir") 48 | root, err = FindFileInDirAndParents(projectSubDirectory, goModFileName) 49 | assert.NoError(t, err) 50 | assert.Equal(t, projectRoot, root) 51 | 52 | // Look for a file that doesn't exist 53 | _, err = FindFileInDirAndParents(projectRoot, "notexist") 54 | assert.Error(t, err) 55 | } 56 | 57 | func TestReadNLines(t *testing.T) { 58 | wd, err := os.Getwd() 59 | assert.NoError(t, err) 60 | path := filepath.Join(wd, "testdata", "oneline") 61 | lines, err := ReadNLines(path, 2) 62 | assert.NoError(t, err) 63 | assert.Len(t, lines, 1) 64 | assert.True(t, strings.HasPrefix(lines[0], "")) 65 | 66 | path = filepath.Join(wd, "testdata", "twolines") 67 | lines, err = ReadNLines(path, 2) 68 | assert.NoError(t, err) 69 | assert.Len(t, lines, 2) 70 | assert.True(t, strings.HasPrefix(lines[1], "781")) 71 | assert.True(t, strings.HasSuffix(lines[1], ":true}}}")) 72 | 73 | path = filepath.Join(wd, "testdata", "threelines") 74 | lines, err = ReadNLines(path, 2) 75 | assert.NoError(t, err) 76 | assert.Len(t, lines, 2) 77 | assert.True(t, strings.HasPrefix(lines[1], "781")) 78 | assert.True(t, strings.HasSuffix(lines[1], ":true}}}")) 79 | } 80 | 81 | func TestCreateTempDir(t *testing.T) { 82 | tempDir, err := CreateTempDir() 83 | assert.NoError(t, err) 84 | 85 | assert.DirExists(t, tempDir) 86 | 87 | defer func() { 88 | // Check that a timestamp can be extracted from the temp dir name 89 | timestamp, err := extractTimestamp(tempDir) 90 | assert.NoError(t, err) 91 | assert.False(t, timestamp.IsZero()) 92 | 93 | assert.NoError(t, os.RemoveAll(tempDir)) 94 | }() 95 | } 96 | 97 | func TestMoveFile_New(t *testing.T) { 98 | // Init test 99 | sourcePath, destPath := initMoveTest(t) 100 | 101 | // Move file 102 | assert.NoError(t, MoveFile(sourcePath, destPath)) 103 | 104 | // Assert expected file paths 105 | assert.FileExists(t, destPath) 106 | assert.NoFileExists(t, sourcePath) 107 | } 108 | 109 | func TestMoveFile_Override(t *testing.T) { 110 | // Init test 111 | sourcePath, destPath := initMoveTest(t) 112 | err := os.WriteFile(destPath, []byte("dst"), 0600) 113 | assert.NoError(t, err) 114 | 115 | // Move file 116 | assert.NoError(t, MoveFile(sourcePath, destPath)) 117 | 118 | // Assert file overidden 119 | assert.FileExists(t, destPath) 120 | destFileContent, err := os.ReadFile(destPath) 121 | assert.NoError(t, err) 122 | assert.Equal(t, "src", string(destFileContent)) 123 | 124 | // Assert source file removed 125 | assert.NoFileExists(t, sourcePath) 126 | } 127 | 128 | func TestMoveFile_NoPerm(t *testing.T) { 129 | // Init test 130 | sourcePath, destPath := initMoveTest(t) 131 | err := os.WriteFile(destPath, []byte("dst"), 0600) 132 | assert.NoError(t, err) 133 | 134 | // Remove all permissions from destination file 135 | assert.NoError(t, os.Chmod(destPath, 0000)) 136 | _, err = os.Create(destPath) 137 | assert.Error(t, err) 138 | 139 | // Move file 140 | assert.NoError(t, MoveFile(sourcePath, destPath)) 141 | 142 | // Assert file overidden 143 | assert.FileExists(t, destPath) 144 | destFileContent, err := os.ReadFile(destPath) 145 | assert.NoError(t, err) 146 | assert.Equal(t, "src", string(destFileContent)) 147 | 148 | // Assert source file removed 149 | assert.NoFileExists(t, sourcePath) 150 | } 151 | 152 | func initMoveTest(t *testing.T) (sourcePath, destPath string) { 153 | // Create source and destination paths 154 | tmpDir := t.TempDir() 155 | sourcePath = filepath.Join(tmpDir, "src") 156 | destPath = filepath.Join(tmpDir, "dst") 157 | 158 | // Write content to source file 159 | err := os.WriteFile(sourcePath, []byte("src"), 0600) 160 | assert.NoError(t, err) 161 | return 162 | } 163 | -------------------------------------------------------------------------------- /crypto/checksum.go: -------------------------------------------------------------------------------- 1 | package crypto 2 | 3 | import ( 4 | "bufio" 5 | "regexp" 6 | 7 | // #nosec G501 -- md5 is supported by Artifactory. 8 | "crypto/md5" 9 | // #nosec G505 -- sha1 is supported by Artifactory. 10 | "crypto/sha1" 11 | "fmt" 12 | "hash" 13 | "io" 14 | "os" 15 | 16 | ioutils "github.com/jfrog/gofrog/io" 17 | "github.com/minio/sha256-simd" 18 | ) 19 | 20 | type Algorithm int 21 | 22 | const ( 23 | MD5 Algorithm = iota 24 | SHA1 25 | SHA256 26 | ) 27 | 28 | var algorithmFunc = map[Algorithm]func() hash.Hash{ 29 | // Go native crypto algorithms: 30 | MD5: md5.New, 31 | //#nosec G401 -- Sha1 is supported by Artifactory. 32 | SHA1: sha1.New, 33 | // sha256-simd algorithm: 34 | SHA256: sha256.New, 35 | } 36 | 37 | type Checksum struct { 38 | Sha1 string `json:"sha1,omitempty"` 39 | Md5 string `json:"md5,omitempty"` 40 | Sha256 string `json:"sha256,omitempty"` 41 | } 42 | 43 | func (c *Checksum) IsEmpty() bool { 44 | return c.Md5 == "" && c.Sha1 == "" && c.Sha256 == "" 45 | } 46 | 47 | // If the 'other' checksum matches the current one, return true. 48 | // 'other' checksum may contain regex values for sha1, sha256 and md5. 49 | func (c *Checksum) IsEqual(other Checksum) (bool, error) { 50 | match, err := regexp.MatchString(other.Md5, c.Md5) 51 | if !match || err != nil { 52 | return false, err 53 | } 54 | match, err = regexp.MatchString(other.Sha1, c.Sha1) 55 | if !match || err != nil { 56 | return false, err 57 | } 58 | match, err = regexp.MatchString(other.Sha256, c.Sha256) 59 | if !match || err != nil { 60 | return false, err 61 | } 62 | 63 | return true, nil 64 | } 65 | 66 | func GetFileChecksums(filePath string, checksumType ...Algorithm) (checksums map[Algorithm]string, err error) { 67 | file, err := os.Open(filePath) 68 | if err != nil { 69 | return 70 | } 71 | defer ioutils.Close(file, &err) 72 | return CalcChecksums(file, checksumType...) 73 | } 74 | 75 | // CalcChecksums calculates all hashes at once using AsyncMultiWriter. The file is therefore read only once. 76 | func CalcChecksums(reader io.Reader, checksumType ...Algorithm) (map[Algorithm]string, error) { 77 | hashes, err := calcChecksums(reader, checksumType...) 78 | if err != nil { 79 | return nil, err 80 | } 81 | results := sumResults(hashes) 82 | return results, nil 83 | } 84 | 85 | // CalcChecksumsBytes calculates hashes like `CalcChecksums`, returns result as bytes 86 | func CalcChecksumsBytes(reader io.Reader, checksumType ...Algorithm) (map[Algorithm][]byte, error) { 87 | hashes, err := calcChecksums(reader, checksumType...) 88 | if err != nil { 89 | return nil, err 90 | } 91 | results := sumResultsBytes(hashes) 92 | return results, nil 93 | } 94 | 95 | func calcChecksums(reader io.Reader, checksumType ...Algorithm) (map[Algorithm]hash.Hash, error) { 96 | hashes := getChecksumByAlgorithm(checksumType...) 97 | var multiWriter io.Writer 98 | pageSize := os.Getpagesize() 99 | sizedReader := bufio.NewReaderSize(reader, pageSize) 100 | var hashWriter []io.Writer 101 | for _, v := range hashes { 102 | hashWriter = append(hashWriter, v) 103 | } 104 | multiWriter = ioutils.AsyncMultiWriter(pageSize, hashWriter...) 105 | _, err := io.Copy(multiWriter, sizedReader) 106 | if err != nil { 107 | return nil, err 108 | } 109 | return hashes, nil 110 | } 111 | 112 | func sumResults(hashes map[Algorithm]hash.Hash) map[Algorithm]string { 113 | results := map[Algorithm]string{} 114 | for k, v := range hashes { 115 | results[k] = fmt.Sprintf("%x", v.Sum(nil)) 116 | } 117 | return results 118 | } 119 | 120 | func sumResultsBytes(hashes map[Algorithm]hash.Hash) map[Algorithm][]byte { 121 | results := map[Algorithm][]byte{} 122 | for k, v := range hashes { 123 | results[k] = v.Sum(nil) 124 | } 125 | return results 126 | } 127 | 128 | func getChecksumByAlgorithm(checksumType ...Algorithm) map[Algorithm]hash.Hash { 129 | hashes := map[Algorithm]hash.Hash{} 130 | if len(checksumType) == 0 { 131 | for k, v := range algorithmFunc { 132 | hashes[k] = v() 133 | } 134 | return hashes 135 | } 136 | 137 | for _, v := range checksumType { 138 | hashes[v] = algorithmFunc[v]() 139 | } 140 | return hashes 141 | } 142 | 143 | func CalcChecksumDetails(filePath string) (checksum Checksum, err error) { 144 | file, err := os.Open(filePath) 145 | if err != nil { 146 | return 147 | } 148 | defer ioutils.Close(file, &err) 149 | 150 | checksums, err := CalcChecksums(file) 151 | if err != nil { 152 | return Checksum{}, err 153 | } 154 | checksum = Checksum{Md5: checksums[MD5], Sha1: checksums[SHA1], Sha256: checksums[SHA256]} 155 | return 156 | } 157 | 158 | type FileDetails struct { 159 | Checksum Checksum 160 | Size int64 161 | } 162 | 163 | func GetFileDetails(filePath string, includeChecksums bool) (details *FileDetails, err error) { 164 | details = new(FileDetails) 165 | if includeChecksums { 166 | details.Checksum, err = CalcChecksumDetails(filePath) 167 | if err != nil { 168 | return 169 | } 170 | } else { 171 | details.Checksum = Checksum{} 172 | } 173 | 174 | fileInfo, err := os.Stat(filePath) 175 | if err != nil { 176 | return 177 | } 178 | details.Size = fileInfo.Size() 179 | return 180 | } 181 | -------------------------------------------------------------------------------- /unarchive/archive_test.go: -------------------------------------------------------------------------------- 1 | package unarchive 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestUnarchive(t *testing.T) { 12 | tests := []string{"zip", "tar", "tar.gz"} 13 | uarchiver := Unarchiver{} 14 | for _, extension := range tests { 15 | t.Run(extension, func(t *testing.T) { 16 | // Create temp directory 17 | tmpDir := t.TempDir() 18 | 19 | // Run unarchive on archive created on Unix 20 | err := runUnarchive(t, uarchiver, "unix."+extension, "archives", filepath.Join(tmpDir, "unix")) 21 | assert.NoError(t, err) 22 | assert.FileExists(t, filepath.Join(tmpDir, "unix", "link")) 23 | assert.FileExists(t, filepath.Join(tmpDir, "unix", "dir", "file")) 24 | 25 | // Run unarchive on archive created on Windows 26 | err = runUnarchive(t, uarchiver, "win."+extension, "archives", filepath.Join(tmpDir, "win")) 27 | assert.NoError(t, err) 28 | assert.FileExists(t, filepath.Join(tmpDir, "win", "link.lnk")) 29 | assert.FileExists(t, filepath.Join(tmpDir, "win", "dir", "file.txt")) 30 | }) 31 | } 32 | } 33 | 34 | var unarchiveSymlinksCases = []struct { 35 | prefix string 36 | expectedFiles []string 37 | }{ 38 | {prefix: "softlink-rel", expectedFiles: []string{filepath.Join("softlink-rel", "a", "softlink-rel"), filepath.Join("softlink-rel", "b", "c", "d", "file")}}, 39 | {prefix: "softlink-cousin", expectedFiles: []string{filepath.Join("a", "b", "softlink-cousin"), filepath.Join("a", "c", "d")}}, 40 | {prefix: "softlink-uncle-file", expectedFiles: []string{filepath.Join("a", "b", "softlink-uncle"), filepath.Join("a", "c")}}, 41 | } 42 | 43 | func TestUnarchiveSymlink(t *testing.T) { 44 | testExtensions := []string{"zip", "tar", "tar.gz"} 45 | uarchiver := Unarchiver{} 46 | for _, extension := range testExtensions { 47 | t.Run(extension, func(t *testing.T) { 48 | for _, testCase := range unarchiveSymlinksCases { 49 | t.Run(testCase.prefix, func(t *testing.T) { 50 | // Create temp directory 51 | tmpDir := t.TempDir() 52 | 53 | // Run unarchive 54 | err := runUnarchive(t, uarchiver, testCase.prefix+"."+extension, "archives", tmpDir) 55 | assert.NoError(t, err) 56 | 57 | // Assert the all expected files were extracted 58 | for _, expectedFiles := range testCase.expectedFiles { 59 | assert.FileExists(t, filepath.Join(tmpDir, expectedFiles)) 60 | } 61 | }) 62 | } 63 | }) 64 | } 65 | } 66 | 67 | func TestUnarchiveZipSlip(t *testing.T) { 68 | tests := []struct { 69 | testType string 70 | archives []string 71 | errorSuffix string 72 | }{ 73 | {"rel", []string{"zip", "tar", "tar.gz"}, "illegal path in archive: '../file'"}, 74 | {"abs", []string{"tar", "tar.gz"}, "illegal path in archive: '/tmp/bla/file'"}, 75 | {"softlink-abs", []string{"zip", "tar", "tar.gz"}, "illegal link path in archive: '/tmp/bla/file'"}, 76 | {"softlink-rel", []string{"zip", "tar", "tar.gz"}, "illegal link path in archive: '../../file'"}, 77 | {"softlink-loop", []string{"tar"}, "a link can't lead to an ancestor directory"}, 78 | {"softlink-uncle", []string{"zip", "tar", "tar.gz"}, "a link can't lead to an ancestor directory"}, 79 | {"hardlink-tilde", []string{"tar", "tar.gz"}, "walking hardlink: illegal link path in archive: '~/../../../../../../../../../Users/Shared/sharedFile.txt'"}, 80 | } 81 | 82 | uarchiver := Unarchiver{} 83 | for _, test := range tests { 84 | t.Run(test.testType, func(t *testing.T) { 85 | // Create temp directory 86 | tmpDir := t.TempDir() 87 | 88 | for _, archive := range test.archives { 89 | // Unarchive and make sure an error returns 90 | err := runUnarchive(t, uarchiver, test.testType+"."+archive, "zipslip", tmpDir) 91 | assert.Error(t, err) 92 | assert.Contains(t, err.Error(), test.errorSuffix) 93 | } 94 | }) 95 | } 96 | } 97 | 98 | func TestUnarchiveWithStripComponents(t *testing.T) { 99 | tests := []string{"zip", "tar", "tar.gz"} 100 | uarchiver := Unarchiver{} 101 | uarchiver.StripComponents = 1 102 | for _, extension := range tests { 103 | t.Run(extension, func(t *testing.T) { 104 | // Create temp directory 105 | tmpDir := t.TempDir() 106 | 107 | // Run unarchive on archive created on Unix 108 | err := runUnarchive(t, uarchiver, "strip-components."+extension, "archives", filepath.Join(tmpDir, "unix")) 109 | assert.NoError(t, err) 110 | assert.DirExists(t, filepath.Join(tmpDir, "unix", "nested_folder_1")) 111 | assert.DirExists(t, filepath.Join(tmpDir, "unix", "nested_folder_2")) 112 | 113 | // Run unarchive on archive created on Windows 114 | err = runUnarchive(t, uarchiver, "strip-components."+extension, "archives", filepath.Join(tmpDir, "win")) 115 | assert.NoError(t, err) 116 | assert.DirExists(t, filepath.Join(tmpDir, "win", "nested_folder_1")) 117 | assert.DirExists(t, filepath.Join(tmpDir, "win", "nested_folder_2")) 118 | }) 119 | } 120 | } 121 | 122 | // Test unarchive file with a directory named "." in the root directory 123 | func TestUnarchiveDotDir(t *testing.T) { 124 | // Create temp directory 125 | tmpDir := t.TempDir() 126 | 127 | // Run unarchive 128 | err := runUnarchive(t, Unarchiver{}, "dot-dir.tar.gz", "archives", tmpDir+string(os.PathSeparator)) 129 | assert.NoError(t, err) 130 | assert.DirExists(t, filepath.Join(tmpDir, "dir")) 131 | } 132 | 133 | func runUnarchive(t *testing.T, uarchiver Unarchiver, archiveFileName, sourceDir, targetDir string) error { 134 | archivePath := filepath.Join("testdata", sourceDir, archiveFileName) 135 | assert.True(t, IsSupportedArchive(archivePath)) 136 | return uarchiver.Unarchive(filepath.Join("testdata", sourceDir, archiveFileName), archiveFileName, targetDir) 137 | } 138 | -------------------------------------------------------------------------------- /.github/workflows/frogbot-scan-pull-request.yml: -------------------------------------------------------------------------------- 1 | name: "Frogbot Scan Pull Request" 2 | on: 3 | pull_request_target: 4 | types: [ opened, synchronize ] 5 | permissions: 6 | pull-requests: write 7 | contents: read 8 | jobs: 9 | scan-pull-request: 10 | runs-on: ubuntu-latest 11 | # A pull request needs to be approved before Frogbot scans it. Any GitHub user who is associated with the 12 | # "frogbot" GitHub environment can approve the pull request to be scanned. 13 | environment: frogbot 14 | steps: 15 | - name: Setup Go with cache 16 | uses: jfrog/.github/actions/install-go-with-cache@main 17 | 18 | - uses: jfrog/frogbot@v2 19 | env: 20 | JFROG_CLI_LOG_LEVEL: "DEBUG" 21 | # [Mandatory] 22 | # JFrog platform URL (This functionality requires version 3.29.0 or above of Xray) 23 | JF_URL: ${{ secrets.FROGBOT_URL }} 24 | 25 | # [Mandatory if JF_USER and JF_PASSWORD are not provided] 26 | # JFrog access token with 'read' permissions on Xray service 27 | JF_ACCESS_TOKEN: ${{ secrets.FROGBOT_ACCESS_TOKEN }} 28 | 29 | # [Mandatory] 30 | # The GitHub token is automatically generated for the job 31 | JF_GIT_TOKEN: ${{ secrets.GITHUB_TOKEN }} 32 | 33 | # [Optional, default: https://api.github.com] 34 | # API endpoint to GitHub 35 | # JF_GIT_API_ENDPOINT: https://github.example.com 36 | 37 | # [Optional] 38 | # By default, the Frogbot workflows download the Frogbot executable as well as other tools 39 | # needed from https://releases.jfrog.io 40 | # If the machine that runs Frogbot has no access to the internet, follow these steps to allow the 41 | # executable to be downloaded from an Artifactory instance, which the machine has access to: 42 | # 43 | # 1. Login to the Artifactory UI, with a user who has admin credentials. 44 | # 2. Create a Remote Repository with the following properties set. 45 | # Under the 'Basic' tab: 46 | # Package Type: Generic 47 | # URL: https://releases.jfrog.io 48 | # Under the 'Advanced' tab: 49 | # Uncheck the 'Store Artifacts Locally' option 50 | # 3. Set the value of the 'JF_RELEASES_REPO' variable with the Repository Key you created. 51 | # JF_RELEASES_REPO: "" 52 | 53 | # [Optional] 54 | # Configure the SMTP server to enable Frogbot to send emails with detected secrets in pull request scans. 55 | # SMTP server URL including should the relevant port: (Example: smtp.server.com:8080) 56 | JF_SMTP_SERVER: ${{ secrets.JF_SMTP_SERVER }} 57 | 58 | # [Mandatory if JF_SMTP_SERVER is set] 59 | # The username required for authenticating with the SMTP server. 60 | JF_SMTP_USER: ${{ secrets.JF_SMTP_USER }} 61 | 62 | # [Mandatory if JF_SMTP_SERVER is set] 63 | # The password associated with the username required for authentication with the SMTP server. 64 | JF_SMTP_PASSWORD: ${{ secrets.JF_SMTP_PASSWORD }} 65 | 66 | # [Optional] 67 | # List of comma separated email addresses to receive email notifications about secrets 68 | # detected during pull request scanning. The notification is also sent to the email set 69 | # in the committer git profile regardless of whether this variable is set or not. 70 | JF_EMAIL_RECEIVERS: "eco-system@jfrog.com" 71 | 72 | ########################################################################## 73 | ## If your project uses a 'frogbot-config.yml' file, you can define ## 74 | ## the following variables inside the file, instead of here. ## 75 | ########################################################################## 76 | 77 | # [Mandatory if the two conditions below are met] 78 | # 1. The project uses yarn 2, NuGet or .NET Core to download its dependencies 79 | # 2. The `installCommand` variable isn't set in your frogbot-config.yml file. 80 | # 81 | # The command that installs the project dependencies (e.g "nuget restore") 82 | # JF_INSTALL_DEPS_CMD: "" 83 | 84 | # [Optional, default: "."] 85 | # Relative path to the root of the project in the Git repository 86 | # JF_WORKING_DIR: path/to/project/dir 87 | 88 | # [Optional] 89 | # Xray Watches. Learn more about them here: https://www.jfrog.com/confluence/display/JFROG/Configuring+Xray+Watches 90 | # JF_WATCHES: ,... 91 | 92 | # [Optional] 93 | # JFrog project. Learn more about it here: https://www.jfrog.com/confluence/display/JFROG/Projects 94 | # JF_PROJECT: 95 | 96 | # [Optional, default: "FALSE"] 97 | # Displays all existing vulnerabilities, including the ones that were added by the pull request. 98 | # JF_INCLUDE_ALL_VULNERABILITIES: "TRUE" 99 | 100 | # [Optional, default: "TRUE"] 101 | # Fails the Frogbot task if any security issue is found. 102 | # JF_FAIL: "FALSE" 103 | 104 | # [Optional] 105 | # Frogbot will download the project dependencies if they're not cached locally. To download the 106 | # dependencies from a virtual repository in Artifactory, set the name of the repository. There's no 107 | # need to set this value, if it is set in the frogbot-config.yml file. 108 | # JF_DEPS_REPO: "" 109 | 110 | # [Optional, Default: "FALSE"] 111 | # If TRUE, Frogbot creates a single pull request with all the fixes. 112 | # If false, Frogbot creates a separate pull request for each fix. 113 | # JF_GIT_AGGREGATE_FIXES: "FALSE" 114 | 115 | # [Optional, Default: "FALSE"] 116 | # Handle vulnerabilities with fix versions only 117 | # JF_FIXABLE_ONLY: "TRUE" 118 | 119 | # [Optional] 120 | # Set the minimum severity for vulnerabilities that should be fixed and commented on in pull requests 121 | # The following values are accepted: Low, Medium, High or Critical 122 | # JF_MIN_SEVERITY: "" -------------------------------------------------------------------------------- /.github/workflows/frogbot-scan-repository.yml: -------------------------------------------------------------------------------- 1 | name: "Frogbot Scan Repository" 2 | on: 3 | workflow_dispatch: 4 | schedule: 5 | # The repository will be scanned once a day at 00:00 GMT. 6 | - cron: "0 0 * * *" 7 | permissions: 8 | contents: write 9 | pull-requests: write 10 | security-events: write 11 | jobs: 12 | scan-repository: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | matrix: 16 | # The repository scanning will be triggered periodically on the following branches. 17 | branch: [ "master" ] 18 | steps: 19 | - name: Setup Go with cache 20 | uses: jfrog/.github/actions/install-go-with-cache@main 21 | 22 | - uses: jfrog/frogbot@v2 23 | env: 24 | JFROG_CLI_LOG_LEVEL: "DEBUG" 25 | # [Mandatory] 26 | # JFrog platform URL (This functionality requires version 3.29.0 or above of Xray) 27 | JF_URL: ${{ secrets.FROGBOT_URL }} 28 | 29 | # [Mandatory if JF_USER and JF_PASSWORD are not provided] 30 | # JFrog access token with 'read' permissions on Xray service 31 | JF_ACCESS_TOKEN: ${{ secrets.FROGBOT_ACCESS_TOKEN }} 32 | 33 | # [Mandatory if JF_ACCESS_TOKEN is not provided] 34 | # JFrog username with 'read' permissions for Xray. Must be provided with JF_PASSWORD 35 | # JF_USER: ${{ secrets.JF_USER }} 36 | 37 | # [Mandatory if JF_ACCESS_TOKEN is not provided] 38 | # JFrog password. Must be provided with JF_USER 39 | # JF_PASSWORD: ${{ secrets.JF_PASSWORD }} 40 | 41 | # [Mandatory] 42 | # The GitHub token is automatically generated for the job 43 | JF_GIT_TOKEN: ${{ secrets.GITHUB_TOKEN }} 44 | 45 | # [Mandatory] 46 | # The name of the branch on which Frogbot will perform the scan 47 | JF_GIT_BASE_BRANCH: ${{ matrix.branch }} 48 | 49 | # [Optional, default: https://api.github.com] 50 | # API endpoint to GitHub 51 | # JF_GIT_API_ENDPOINT: https://github.example.com 52 | 53 | # [Optional] 54 | # By default, the Frogbot workflows download the Frogbot executable as well as other tools 55 | # needed from https://releases.jfrog.io 56 | # If the machine that runs Frogbot has no access to the internet, follow these steps to allow the 57 | # executable to be downloaded from an Artifactory instance, which the machine has access to: 58 | # 59 | # 1. Login to the Artifactory UI, with a user who has admin credentials. 60 | # 2. Create a Remote Repository with the following properties set. 61 | # Under the 'Basic' tab: 62 | # Package Type: Generic 63 | # URL: https://releases.jfrog.io 64 | # Under the 'Advanced' tab: 65 | # Uncheck the 'Store Artifacts Locally' option 66 | # 3. Set the value of the 'JF_RELEASES_REPO' variable with the Repository Key you created. 67 | # JF_RELEASES_REPO: "" 68 | 69 | ########################################################################## 70 | ## If your project uses a 'frogbot-config.yml' file, you can define ## 71 | ## the following variables inside the file, instead of here. ## 72 | ########################################################################## 73 | 74 | # [Optional, default: "."] 75 | # Relative path to the root of the project in the Git repository 76 | # JF_WORKING_DIR: path/to/project/dir 77 | 78 | # [Optional] 79 | # Xray Watches. Learn more about them here: https://www.jfrog.com/confluence/display/JFROG/Configuring+Xray+Watches 80 | # JF_WATCHES: ,... 81 | 82 | # [Optional] 83 | # JFrog project. Learn more about it here: https://www.jfrog.com/confluence/display/JFROG/Projects 84 | # JF_PROJECT: 85 | 86 | # [Optional, default: "TRUE"] 87 | # Fails the Frogbot task if any security issue is found. 88 | # JF_FAIL: "FALSE" 89 | 90 | # [Optional] 91 | # Frogbot will download the project dependencies, if they're not cached locally. To download the 92 | # dependencies from a virtual repository in Artifactory, set the name of the repository. There's no 93 | # need to set this value, if it is set in the frogbot-config.yml file. 94 | # JF_DEPS_REPO: "" 95 | 96 | # [Optional] 97 | # Template for the branch name generated by Frogbot when creating pull requests with fixes. 98 | # The template must include ${BRANCH_NAME_HASH}, to ensure that the generated branch name is unique. 99 | # The template can optionally include the ${IMPACTED_PACKAGE} and ${FIX_VERSION} variables. 100 | # JF_BRANCH_NAME_TEMPLATE: "frogbot-${IMPACTED_PACKAGE}-${BRANCH_NAME_HASH}" 101 | 102 | # [Optional] 103 | # Template for the commit message generated by Frogbot when creating pull requests with fixes 104 | # The template can optionally include the ${IMPACTED_PACKAGE} and ${FIX_VERSION} variables. 105 | # JF_COMMIT_MESSAGE_TEMPLATE: "Upgrade ${IMPACTED_PACKAGE} to ${FIX_VERSION}" 106 | 107 | # [Optional] 108 | # Template for the pull request title generated by Frogbot when creating pull requests with fixes. 109 | # The template can optionally include the ${IMPACTED_PACKAGE} and ${FIX_VERSION} variables. 110 | # JF_PULL_REQUEST_TITLE_TEMPLATE: "[🐸 Frogbot] Upgrade ${IMPACTED_PACKAGE} to ${FIX_VERSION}" 111 | 112 | # [Optional, Default: "FALSE"] 113 | # If TRUE, Frogbot creates a single pull request with all the fixes. 114 | # If FALSE, Frogbot creates a separate pull request for each fix. 115 | # JF_GIT_AGGREGATE_FIXES: "FALSE" 116 | 117 | # [Optional, Default: "FALSE"] 118 | # Handle vulnerabilities with fix versions only 119 | # JF_FIXABLE_ONLY: "TRUE" 120 | 121 | # [Optional] 122 | # Set the minimum severity for vulnerabilities that should be fixed and commented on in pull requests 123 | # The following values are accepted: Low, Medium, High or Critical 124 | # JF_MIN_SEVERITY: "" 125 | 126 | # [Optional, Default: eco-system+frogbot@jfrog.com] 127 | # Set the email of the commit author 128 | # JF_GIT_EMAIL_AUTHOR: "" -------------------------------------------------------------------------------- /parallel/runner_test.go: -------------------------------------------------------------------------------- 1 | package parallel 2 | 3 | import ( 4 | "crypto/rand" 5 | "errors" 6 | "fmt" 7 | "math/big" 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | var errTest = errors.New("some error") 16 | 17 | func TestIsStarted(t *testing.T) { 18 | runner := NewBounedRunner(1, false) 19 | _, err := runner.AddTask(func(i int) error { 20 | return nil 21 | }) 22 | assert.NoError(t, err) 23 | runner.Done() 24 | runner.Run() 25 | assert.True(t, runner.IsStarted()) 26 | } 27 | 28 | func TestAddTask(t *testing.T) { 29 | const count = 70 30 | results := make(chan int, 100) 31 | 32 | runner := NewRunner(4, count, false) 33 | var expectedTotal int 34 | var expectedErrorTotal int 35 | for i := 0; i < count; i++ { 36 | expectedTotal += i 37 | if float64(i) > float64(count)/2 { 38 | expectedErrorTotal += i 39 | } 40 | 41 | x := i 42 | _, err := runner.AddTask(func(int) error { 43 | results <- x 44 | n, err := rand.Int(rand.Reader, big.NewInt(50)) 45 | assert.NoError(t, err) 46 | time.Sleep(time.Millisecond * time.Duration(n.Int64())) 47 | if float64(x) > float64(count)/2 { 48 | return fmt.Errorf("second half value %d not counted", x) 49 | } 50 | return nil 51 | }) 52 | assert.NoError(t, err) 53 | } 54 | runner.Done() 55 | runner.Run() 56 | 57 | close(results) 58 | var resultsTotal int 59 | for result := range results { 60 | resultsTotal += result 61 | } 62 | assert.Equal(t, expectedTotal, resultsTotal) 63 | 64 | var errorsTotal int 65 | for k, v := range runner.Errors() { 66 | if v != nil { 67 | errorsTotal += k 68 | } 69 | } 70 | assert.Equal(t, expectedErrorTotal, errorsTotal) 71 | assert.NotZero(t, errorsTotal) 72 | } 73 | 74 | func TestAddTaskWithError(t *testing.T) { 75 | // Create new runner 76 | runner := NewRunner(1, 1, false) 77 | 78 | // Add task with error 79 | var receivedError = new(error) 80 | onError := func(err error) { *receivedError = err } 81 | taskFunc := func(int) error { return errTest } 82 | _, err := runner.AddTaskWithError(taskFunc, onError) 83 | assert.NoError(t, err) 84 | 85 | // Wait for task to finish 86 | runner.Done() 87 | runner.Run() 88 | 89 | // Assert error captured 90 | assert.Equal(t, errTest, *receivedError) 91 | assert.Equal(t, errTest, runner.Errors()[0]) 92 | } 93 | 94 | func TestCancel(t *testing.T) { 95 | // Create new runner 96 | runner := NewBounedRunner(1, false) 97 | 98 | // Cancel to prevent receiving another tasks 99 | runner.Cancel(false) 100 | 101 | // Add task and expect error 102 | _, err := runner.AddTask(func(int) error { return nil }) 103 | assert.ErrorContains(t, err, "runner stopped") 104 | } 105 | 106 | func TestForceCancel(t *testing.T) { 107 | // Create new runner 108 | const capacity = 10 109 | runner := NewRunner(1, capacity, true) 110 | // Run tasks 111 | for i := 0; i < capacity; i++ { 112 | taskId := i 113 | _, err := runner.AddTask(func(int) error { 114 | assert.Less(t, taskId, 9) 115 | time.Sleep(100 * time.Millisecond) 116 | return nil 117 | }) 118 | assert.NoError(t, err) 119 | } 120 | var wg sync.WaitGroup 121 | wg.Add(1) 122 | go func() { 123 | defer wg.Done() 124 | runner.Run() 125 | }() 126 | go func() { 127 | time.Sleep(200 * time.Millisecond) 128 | runner.Cancel(true) 129 | }() 130 | wg.Wait() 131 | 132 | assert.InDelta(t, 5, runner.started, 4) 133 | } 134 | 135 | func TestFailFast(t *testing.T) { 136 | // Create new runner with fail-fast 137 | runner := NewBounedRunner(1, true) 138 | 139 | // Add task that returns an error 140 | _, err := runner.AddTask(func(int) error { 141 | return errTest 142 | }) 143 | assert.NoError(t, err) 144 | 145 | // Wait for task to finish 146 | runner.Run() 147 | 148 | // Add another task and expect error 149 | _, err = runner.AddTask(func(int) error { 150 | return nil 151 | }) 152 | assert.ErrorContains(t, err, "runner stopped") 153 | } 154 | 155 | func TestNotifyFinished(t *testing.T) { 156 | // Create new runner 157 | runner := NewBounedRunner(1, false) 158 | runner.SetFinishedNotification(true) 159 | 160 | // Cancel to prevent receiving another tasks 161 | runner.Cancel(false) 162 | <-runner.GetFinishedNotification() 163 | } 164 | 165 | func TestMaxParallel(t *testing.T) { 166 | // Create new runner with capacity of 10 and max parallelism of 3 167 | const capacity = 10 168 | const parallelism = 3 169 | runner := NewRunner(parallelism, capacity, false) 170 | 171 | // Run tasks in parallel 172 | for i := 0; i < capacity; i++ { 173 | _, err := runner.AddTask(func(int) error { 174 | // Assert in range between 1 and 3 175 | assert.InDelta(t, 2, runner.ActiveThreads(), 1) 176 | assert.InDelta(t, 2, runner.OpenThreads(), 1) 177 | time.Sleep(100 * time.Millisecond) 178 | return nil 179 | }) 180 | assert.NoError(t, err) 181 | } 182 | 183 | // Wait for tasks to finish 184 | runner.Done() 185 | runner.Run() 186 | assert.Equal(t, uint32(capacity), runner.started) 187 | } 188 | 189 | func TestResetFinishNotificationIfActive(t *testing.T) { 190 | // Create 2 runners 191 | const capacity = 10 192 | const parallelism = 3 193 | runnerOne := NewRunner(parallelism, capacity, false) 194 | runnerOne.SetFinishedNotification(true) 195 | runnerTwo := NewRunner(parallelism, capacity, false) 196 | runnerTwo.SetFinishedNotification(true) 197 | 198 | // Add 10 tasks to runner one. Each task provides tasks to runner two. 199 | for i := 0; i < capacity; i++ { 200 | _, err := runnerOne.AddTask(func(int) error { 201 | time.Sleep(time.Millisecond * 100) 202 | _, err := runnerTwo.AddTask(func(int) error { 203 | time.Sleep(time.Millisecond) 204 | return nil 205 | }) 206 | assert.NoError(t, err) 207 | return nil 208 | }) 209 | assert.NoError(t, err) 210 | } 211 | 212 | // Create a goroutine waiting for the finish notification of the first runner before running "Done". 213 | go func() { 214 | <-runnerOne.GetFinishedNotification() 215 | runnerOne.Done() 216 | }() 217 | 218 | // Start running the second runner in a different goroutine to make it non-blocking. 219 | go func() { 220 | runnerTwo.Run() 221 | }() 222 | 223 | // Run the first runner. This is a blocking method. 224 | runnerOne.Run() 225 | 226 | // Reset runner two's finish notification to ensure we receive it only after all tasks assigned to runner two are completed. 227 | runnerTwo.ResetFinishNotificationIfActive() 228 | 229 | // Receive the finish notification and ensure that we have truly completed the task. 230 | <-runnerTwo.GetFinishedNotification() 231 | assert.Zero(t, runnerTwo.ActiveThreads()) 232 | runnerTwo.Done() 233 | } 234 | -------------------------------------------------------------------------------- /parallel/bounded_runner_test.go: -------------------------------------------------------------------------------- 1 | package parallel 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | "math/big" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | const numOfProducerCycles = 100 15 | const numOfConsumers = 10 16 | 17 | type taskCreatorFunc func(int, chan int) TaskFunc 18 | 19 | func TestSuccessfulFlow(t *testing.T) { 20 | var expectedTotal int 21 | results := make(chan int, numOfProducerCycles) 22 | runner := NewBounedRunner(numOfConsumers, true) 23 | errorsQueue := NewErrorsQueue(1) 24 | var wg sync.WaitGroup 25 | 26 | // Produce 27 | wg.Add(1) 28 | go func() { 29 | defer wg.Done() 30 | expectedTotal = produceTasks(runner, results, errorsQueue, createSuccessfulFlowTaskFunc) 31 | }() 32 | 33 | // Consume 34 | wg.Add(1) 35 | go func() { 36 | defer func() { 37 | wg.Done() 38 | close(results) 39 | }() 40 | runner.Run() 41 | }() 42 | 43 | wg.Wait() 44 | checkResult(expectedTotal, results, t) 45 | } 46 | 47 | func TestStopOperationsOnTaskError(t *testing.T) { 48 | expectedTotal := 1275 49 | results := make(chan int, numOfProducerCycles) 50 | runner := NewBounedRunner(numOfConsumers, true) 51 | errorsQueue := NewErrorsQueue(1) 52 | var wg sync.WaitGroup 53 | 54 | // Produce 55 | wg.Add(1) 56 | go func() { 57 | defer func() { 58 | wg.Done() 59 | }() 60 | produceTasks(runner, results, errorsQueue, createTaskWithErrorFunc) 61 | }() 62 | 63 | // Consume 64 | wg.Add(1) 65 | go func() { 66 | defer func() { 67 | wg.Done() 68 | close(results) 69 | }() 70 | runner.Run() 71 | }() 72 | 73 | wg.Wait() 74 | err := errorsQueue.GetError().Error() 75 | if !strings.Contains(err, "above 50 going to stop") { 76 | t.Error("Unexpected Error message. Expected: num: 51, above 50 going to stop", "Got:", err) 77 | } 78 | checkResult(expectedTotal, results, t) 79 | } 80 | 81 | func TestContinueOperationsOnTaskError(t *testing.T) { 82 | expectedTotal := 1275 83 | errorsExpectedTotal := 3675 84 | results := make(chan int, numOfProducerCycles) 85 | errorsQueue := NewErrorsQueue(100) 86 | runner := NewBounedRunner(numOfConsumers, false) 87 | var wg sync.WaitGroup 88 | 89 | // Produce 90 | wg.Add(1) 91 | go func() { 92 | defer func() { 93 | wg.Done() 94 | }() 95 | produceTasks(runner, results, errorsQueue, createTaskWithIntAsErrorFunc) 96 | }() 97 | 98 | // Consume 99 | wg.Add(1) 100 | go func() { 101 | defer func() { 102 | wg.Done() 103 | close(results) 104 | }() 105 | runner.Run() 106 | }() 107 | 108 | wg.Wait() 109 | checkResult(expectedTotal, results, t) 110 | checkErrorsResult(errorsExpectedTotal, errorsQueue, t) 111 | } 112 | 113 | func TestFailFastOnTaskError(t *testing.T) { 114 | expectedTotal := 1275 115 | errorsExpectedTotal := 51 116 | results := make(chan int, numOfProducerCycles) 117 | errorsQueue := NewErrorsQueue(100) 118 | runner := NewBounedRunner(numOfConsumers, true) 119 | var wg sync.WaitGroup 120 | 121 | // Produce 122 | wg.Add(1) 123 | go func() { 124 | defer func() { 125 | wg.Done() 126 | }() 127 | produceTasks(runner, results, errorsQueue, createTaskWithIntAsErrorFunc) 128 | }() 129 | 130 | // Consume 131 | wg.Add(1) 132 | go func() { 133 | defer func() { 134 | wg.Done() 135 | close(results) 136 | }() 137 | runner.Run() 138 | }() 139 | 140 | wg.Wait() 141 | checkResult(expectedTotal, results, t) 142 | 143 | // TODO: Fix this test 144 | // This test is fragile since 1 or more goroutines may be executing and failing fast in parallel, 145 | // calling the error handler and increasing the result. So we cannot use accurate comparison. 146 | // Here we only take care of uo to 1 additional concurrent failfast. 147 | errTotal := 0 148 | for { 149 | err := errorsQueue.GetError() 150 | if err == nil { 151 | break 152 | } 153 | x, _ := strconv.Atoi(err.Error()) 154 | errTotal += x 155 | } 156 | if errTotal < errorsExpectedTotal || errTotal > 2*errorsExpectedTotal+1 { 157 | t.Error("Unexpected results err total. Expected:", errorsExpectedTotal, "Got:", errTotal) 158 | } 159 | } 160 | 161 | func checkErrorsResult(errorsExpectedTotal int, errorsQueue *ErrorsQueue, t *testing.T) { 162 | resultsTotal := 0 163 | for { 164 | err := errorsQueue.GetError() 165 | if err == nil { 166 | break 167 | } 168 | x, _ := strconv.Atoi(err.Error()) 169 | resultsTotal += x 170 | } 171 | if resultsTotal != errorsExpectedTotal { 172 | t.Error("Unexpected results err total. Expected:", errorsExpectedTotal, "Got:", resultsTotal) 173 | } 174 | } 175 | 176 | func checkResult(expectedTotal int, results <-chan int, t *testing.T) { 177 | var resultsTotal int 178 | for result := range results { 179 | resultsTotal += result 180 | } 181 | if resultsTotal != expectedTotal { 182 | t.Error("Unexpected results total. Expected:", expectedTotal, "Got:", resultsTotal) 183 | } 184 | } 185 | 186 | func produceTasks(runner *runner, results chan int, errorsQueue *ErrorsQueue, taskCreator taskCreatorFunc) int { 187 | defer runner.Done() 188 | var expectedTotal int 189 | for i := 0; i < numOfProducerCycles; i++ { 190 | taskFunc := taskCreator(i, results) 191 | _, err := runner.AddTaskWithError(taskFunc, errorsQueue.AddErrorNonBlocking) 192 | if err != nil { 193 | break 194 | } 195 | expectedTotal += i 196 | } 197 | return expectedTotal 198 | } 199 | 200 | func createSuccessfulFlowTaskFunc(num int, result chan int) TaskFunc { 201 | return func(threadId int) error { 202 | result <- num 203 | n, err := rand.Int(rand.Reader, big.NewInt(50)) 204 | if err != nil { 205 | return err 206 | } 207 | time.Sleep(time.Millisecond * time.Duration(n.Int64())) 208 | return nil 209 | } 210 | } 211 | 212 | func createTaskWithErrorFunc(num int, result chan int) TaskFunc { 213 | return func(threadId int) error { 214 | if num > 50 { 215 | return fmt.Errorf("num: %d, above 50 going to stop", num) 216 | } 217 | result <- num 218 | n, err := rand.Int(rand.Reader, big.NewInt(50)) 219 | if err != nil { 220 | return err 221 | } 222 | time.Sleep(time.Millisecond * time.Duration(n.Int64())) 223 | return nil 224 | } 225 | } 226 | 227 | func createTaskWithIntAsErrorFunc(num int, result chan int) TaskFunc { 228 | return func(threadId int) error { 229 | if num > 50 { 230 | return fmt.Errorf("%d", num) 231 | } 232 | result <- num 233 | n, err := rand.Int(rand.Reader, big.NewInt(50)) 234 | if err != nil { 235 | return err 236 | } 237 | time.Sleep(time.Millisecond * time.Duration(n.Int64())) 238 | return nil 239 | } 240 | } 241 | 242 | type ErrorsQueue struct { 243 | errorsChan chan (error) 244 | } 245 | 246 | func NewErrorsQueue(size int) *ErrorsQueue { 247 | queueSize := 1 248 | if size > 1 { 249 | queueSize = size 250 | } 251 | return &ErrorsQueue{errorsChan: make(chan error, queueSize)} 252 | } 253 | 254 | func (errQueue *ErrorsQueue) AddErrorNonBlocking(err error) { 255 | select { 256 | case errQueue.errorsChan <- err: 257 | default: 258 | return 259 | } 260 | } 261 | 262 | func (errQueue *ErrorsQueue) GetError() error { 263 | select { 264 | case err := <-errQueue.errorsChan: 265 | return err 266 | default: 267 | return nil 268 | } 269 | } 270 | -------------------------------------------------------------------------------- /io/cmd.go: -------------------------------------------------------------------------------- 1 | package io 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "os" 10 | "os/exec" 11 | "regexp" 12 | "strings" 13 | "sync" 14 | ) 15 | 16 | // Executes an external process and returns its output. 17 | // If the returned output is not needed, use the RunCmd function instead , for better performance. 18 | func RunCmdOutput(config CmdConfig) (string, error) { 19 | for k, v := range config.GetEnv() { 20 | if err := os.Setenv(k, v); err != nil { 21 | return "", err 22 | } 23 | } 24 | cmd := config.GetCmd() 25 | if config.GetErrWriter() == nil { 26 | cmd.Stderr = os.Stderr 27 | } else { 28 | cmd.Stderr = config.GetErrWriter() 29 | defer config.GetErrWriter().Close() 30 | } 31 | output, err := cmd.Output() 32 | return string(output), err 33 | } 34 | 35 | // Runs an external process and prints its output to stdout / stderr. 36 | func RunCmd(config CmdConfig) error { 37 | for k, v := range config.GetEnv() { 38 | if err := os.Setenv(k, v); err != nil { 39 | return err 40 | } 41 | } 42 | 43 | cmd := config.GetCmd() 44 | if config.GetStdWriter() == nil { 45 | cmd.Stdout = os.Stdout 46 | } else { 47 | cmd.Stdout = config.GetStdWriter() 48 | defer config.GetStdWriter().Close() 49 | } 50 | 51 | if config.GetErrWriter() == nil { 52 | cmd.Stderr = os.Stderr 53 | } else { 54 | cmd.Stderr = config.GetErrWriter() 55 | defer config.GetErrWriter().Close() 56 | } 57 | err := cmd.Start() 58 | if err != nil { 59 | return err 60 | } 61 | err = cmd.Wait() 62 | // If the command fails to run or doesn't complete successfully ExitError is returned. 63 | // We would like to return a regular error instead of ExitError, 64 | // because some frameworks (such as codegangsta used by JFrog CLI) automatically exit when this error is returned. 65 | var exitError *exec.ExitError 66 | if errors.As(err, &exitError) { 67 | err = errors.New(err.Error()) 68 | } 69 | 70 | return err 71 | } 72 | 73 | // Executes the command and captures the output. 74 | // Analyze each line to match the provided regex. 75 | // Returns the complete stdout output of the command. 76 | func RunCmdWithOutputParser(config CmdConfig, prompt bool, regExpStruct ...*CmdOutputPattern) (stdOut string, errorOut string, exitOk bool, err error) { 77 | var wg sync.WaitGroup 78 | for k, v := range config.GetEnv() { 79 | if err = os.Setenv(k, v); err != nil { 80 | return 81 | } 82 | } 83 | 84 | cmd := config.GetCmd() 85 | stdoutReader, stderrReader, err := createCommandReaders(cmd) 86 | if err != nil { 87 | return 88 | } 89 | if err = cmd.Start(); err != nil { 90 | return 91 | } 92 | errChan := make(chan error) 93 | stdoutBuilder := strings.Builder{} 94 | wg.Add(1) 95 | go func() { 96 | defer wg.Done() 97 | for stdoutReader.Scan() { 98 | line, _ := processLine(regExpStruct, stdoutReader.Text(), errChan) 99 | if prompt { 100 | fmt.Fprintf(os.Stderr, line+"\n") 101 | } 102 | stdoutBuilder.WriteString(line) 103 | stdoutBuilder.WriteRune('\n') 104 | } 105 | }() 106 | stderrBuilder := strings.Builder{} 107 | wg.Add(1) 108 | go func() { 109 | defer wg.Done() 110 | for stderrReader.Scan() { 111 | line, hasError := processLine(regExpStruct, stderrReader.Text(), errChan) 112 | if prompt { 113 | fmt.Fprintf(os.Stderr, line+"\n") 114 | } 115 | stderrBuilder.WriteString(line) 116 | stderrBuilder.WriteRune('\n') 117 | if hasError { 118 | break 119 | } 120 | } 121 | }() 122 | 123 | go func() { 124 | wg.Wait() 125 | close(errChan) 126 | }() 127 | 128 | for err = range errChan { 129 | return 130 | } 131 | stdOut = stdoutBuilder.String() 132 | errorOut = stderrBuilder.String() 133 | 134 | err = cmd.Wait() 135 | if err != nil { 136 | return 137 | } 138 | exitOk = true 139 | var exitError *exec.ExitError 140 | if errors.As(err, &exitError) { 141 | // The program has exited with an exit code != 0 142 | exitOk = false 143 | } 144 | return 145 | } 146 | 147 | // Run all the input regExpStruct array on the input stdout or stderr line. 148 | // If an error occurred, add it to the error channel. 149 | // regExpStruct - Array of command output patterns to process the line 150 | // line - string line from stdout or stderr 151 | // errChan - if an error occurred, add it to this channel 152 | func processLine(regExpStruct []*CmdOutputPattern, line string, errChan chan error) (processedLine string, hasError bool) { 153 | var err error 154 | processedLine = line 155 | for _, regExp := range regExpStruct { 156 | if !regExp.RegExp.MatchString(processedLine) { 157 | continue 158 | } 159 | results := CmdOutputPattern{ 160 | RegExp: regExp.RegExp, 161 | MatchedResults: regExp.RegExp.FindStringSubmatch(processedLine), 162 | Line: processedLine, 163 | ExecFunc: regExp.ExecFunc, 164 | } 165 | processedLine, err = regExp.ExecFunc(&results) 166 | if err != nil { 167 | errChan <- err 168 | hasError = true 169 | break 170 | } 171 | } 172 | return 173 | } 174 | 175 | // Create command stdout and stderr readers. 176 | // The returned readers are automatically closed after the running command exit and shouldn't be closed explicitly. 177 | // cmd - The command to execute 178 | func createCommandReaders(cmd *exec.Cmd) (*bufio.Scanner, *bufio.Scanner, error) { 179 | stdoutReader, err := cmd.StdoutPipe() 180 | if err != nil { 181 | return nil, nil, err 182 | } 183 | 184 | stderrReader, err := cmd.StderrPipe() 185 | if err != nil { 186 | return nil, nil, err 187 | } 188 | 189 | return bufio.NewScanner(stdoutReader), bufio.NewScanner(stderrReader), nil 190 | } 191 | 192 | type CmdConfig interface { 193 | GetCmd() *exec.Cmd 194 | GetEnv() map[string]string 195 | GetStdWriter() io.WriteCloser 196 | GetErrWriter() io.WriteCloser 197 | } 198 | 199 | // RegExp - The regexp that the line will be searched upon. 200 | // MatchedResults - The slice result that was found by the regexp 201 | // Line - The output line from the external process 202 | // ExecFunc - The function to execute 203 | type CmdOutputPattern struct { 204 | RegExp *regexp.Regexp 205 | MatchedResults []string 206 | Line string 207 | ExecFunc func(pattern *CmdOutputPattern) (string, error) 208 | } 209 | 210 | type Command struct { 211 | Executable string 212 | CmdName string 213 | CmdArgs []string 214 | Dir string 215 | StrWriter io.WriteCloser 216 | ErrWriter io.WriteCloser 217 | } 218 | 219 | func NewCommand(executable, cmdName string, cmdArgs []string) *Command { 220 | return &Command{Executable: executable, CmdName: cmdName, CmdArgs: cmdArgs} 221 | } 222 | 223 | func (config *Command) RunWithOutput() (data []byte, err error) { 224 | cmd := config.GetCmd() 225 | var stdout bytes.Buffer 226 | var stderr bytes.Buffer 227 | cmd.Stdout = &stdout 228 | cmd.Stderr = &stderr 229 | err = cmd.Run() 230 | if err != nil { 231 | return nil, fmt.Errorf("failed running command: '%s %s' with error: %s - %s", 232 | cmd.Dir, 233 | strings.Join(cmd.Args, " "), 234 | err.Error(), 235 | stderr.String(), 236 | ) 237 | } 238 | return stdout.Bytes(), nil 239 | } 240 | 241 | func (config *Command) GetCmd() (cmd *exec.Cmd) { 242 | var cmdStr []string 243 | if config.CmdName != "" { 244 | cmdStr = append(cmdStr, config.CmdName) 245 | } 246 | if len(config.CmdArgs) > 0 { 247 | cmdStr = append(cmdStr, config.CmdArgs...) 248 | } 249 | cmd = exec.Command(config.Executable, cmdStr...) 250 | cmd.Dir = config.Dir 251 | return 252 | } 253 | 254 | func (config *Command) GetEnv() map[string]string { 255 | return map[string]string{} 256 | } 257 | 258 | func (config *Command) GetStdWriter() io.WriteCloser { 259 | return config.StrWriter 260 | } 261 | 262 | func (config *Command) GetErrWriter() io.WriteCloser { 263 | return config.ErrWriter 264 | } 265 | -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/hardlink-tilde.tar: -------------------------------------------------------------------------------- 1 | hardlink0000644000000000000000000000000000000000000017407 1~/../../../../../../../../../Users/Shared/sharedFile.txtustar00 -------------------------------------------------------------------------------- /unarchive/testdata/zipslip/softlink-loop.tar: -------------------------------------------------------------------------------- 1 | a/b/c/d/e/f/g/h0000644000000000000000000000000000000000000011616 2../../../../../../ustar00a/b/c/d/e/f/g/h/passwd_link0000644000000000000000000000000000000000000016131 2../../../../../../etc/passwdustar00innocentFile0000644000000000000000000000000000000000000012754 2a/passwd_linkustar00 -------------------------------------------------------------------------------- /parallel/runner.go: -------------------------------------------------------------------------------- 1 | package parallel 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | "sync/atomic" 7 | "time" 8 | ) 9 | 10 | const waitForTasksTime = 10 * time.Second 11 | 12 | type Runner interface { 13 | AddTask(TaskFunc) (int, error) 14 | AddTaskWithError(TaskFunc, OnErrorFunc) (int, error) 15 | Run() 16 | Done() 17 | Cancel(bool) 18 | Errors() map[int]error 19 | ActiveThreads() uint32 20 | OpenThreads() uint32 21 | IsStarted() bool 22 | SetMaxParallel(int) 23 | GetFinishedNotification() chan bool 24 | SetFinishedNotification(bool) 25 | ResetFinishNotificationIfActive() 26 | } 27 | 28 | type TaskFunc func(int) error 29 | 30 | type OnErrorFunc func(error) 31 | 32 | type task struct { 33 | run TaskFunc 34 | onError OnErrorFunc 35 | num uint32 36 | } 37 | 38 | type runner struct { 39 | // Tasks waiting to be executed. 40 | tasks chan *task 41 | // Tasks counter, used to give each task an identifier (task.num). 42 | taskId uint32 43 | // True when Cancel was invoked 44 | cancel atomic.Bool 45 | // Used to make sure that cancel is called only once. 46 | cancelOnce sync.Once 47 | // Used to make sure that done is called only once. 48 | doneOnce sync.Once 49 | // The maximum number of threads running in parallel. 50 | maxParallel int 51 | // If true, the runner will be cancelled on the first error thrown from a task. 52 | failFast bool 53 | // Indicates that the runner received some tasks and started executing them. 54 | started uint32 55 | // A WaitGroup that waits for all the threads to close. 56 | threadsWaitGroup sync.WaitGroup 57 | // Threads counter, used to give each thread an identifier (threadId). 58 | threadCount atomic.Uint32 59 | // The number of open threads. 60 | openThreads atomic.Uint32 61 | // A lock on openThreads. 62 | openThreadsLock sync.Mutex 63 | // The number of threads currently running tasks. 64 | activeThreads atomic.Uint32 65 | // The number of tasks in the queue. 66 | totalTasksInQueue atomic.Uint32 67 | // Indicate that the runner has finished. 68 | finishedNotifier chan bool 69 | // Indicates that the finish channel is closed. 70 | finishedNotifierChannelClosed bool 71 | // A lock for the finishedNotifier check. 72 | finishedNotifierLock sync.Mutex 73 | // A flag that allows receiving a notification through a channel, when the runner finishes executing all the tasks. 74 | finishedNotificationEnabled bool 75 | // A map of errors keyed by threadId. 76 | errors map[int]error 77 | // A lock on the errors map. 78 | errorsLock sync.Mutex 79 | } 80 | 81 | // Create a new capacity runner - a runner we can add tasks to without blocking as long as the capacity is not reached. 82 | // maxParallel - number of go routines for task processing, maxParallel always will be a positive number. 83 | // acceptBeforeBlocking - number of tasks that can be added until a free processing goroutine is needed. 84 | // failFast - is set to true the will stop on first error. 85 | func NewRunner(maxParallel int, capacity uint, failFast bool) *runner { 86 | consumers := maxParallel 87 | if consumers < 1 { 88 | consumers = 1 89 | } 90 | if capacity < 1 { 91 | capacity = 1 92 | } 93 | r := &runner{ 94 | finishedNotifier: make(chan bool, 1), 95 | maxParallel: consumers, 96 | failFast: failFast, 97 | cancel: atomic.Bool{}, 98 | tasks: make(chan *task, capacity), 99 | } 100 | r.errors = make(map[int]error) 101 | return r 102 | } 103 | 104 | // Create a new single capacity runner - a runner we can only add tasks to as long as there is a free goroutine in the 105 | // Run() loop to handle it. 106 | // maxParallel - number of go routines for task processing, maxParallel always will be a positive number. 107 | // failFast - if set to true the runner will stop on first error. 108 | func NewBounedRunner(maxParallel int, failFast bool) *runner { 109 | return NewRunner(maxParallel, 1, failFast) 110 | } 111 | 112 | // Add a task to the producer channel, in case of cancellation event (caused by @Cancel()) will return non nil error. 113 | func (r *runner) AddTask(t TaskFunc) (int, error) { 114 | return r.addTask(t, nil) 115 | } 116 | 117 | // t - the actual task which will be performed by the consumer. 118 | // onError - execute on the returned error while running t 119 | // Return the task number assigned to t. Useful to collect errors from the errors map (see @Errors()) 120 | func (r *runner) AddTaskWithError(t TaskFunc, errorHandler OnErrorFunc) (int, error) { 121 | return r.addTask(t, errorHandler) 122 | } 123 | 124 | func (r *runner) addTask(t TaskFunc, errorHandler OnErrorFunc) (int, error) { 125 | nextCount := atomic.AddUint32(&r.taskId, 1) 126 | task := &task{run: t, num: nextCount - 1, onError: errorHandler} 127 | 128 | if r.cancel.Load() { 129 | return -1, errors.New("runner stopped") 130 | } 131 | r.totalTasksInQueue.Add(1) 132 | r.tasks <- task 133 | return int(task.num), nil 134 | } 135 | 136 | // Run r.maxParallel go routines in order to consume all the tasks 137 | // If a task returns an error and failFast is on all goroutines will stop and the runner will be notified. 138 | // Notice: Run() is a blocking operation. 139 | func (r *runner) Run() { 140 | if r.finishedNotificationEnabled { 141 | // This go routine awaits for an execution of a task. The runner will finish its run if no tasks were executed for waitForTasksTime. 142 | go func() { 143 | time.Sleep(waitForTasksTime) 144 | if !r.IsStarted() { 145 | r.notifyFinished() 146 | } 147 | }() 148 | } 149 | 150 | for i := 0; i < r.maxParallel; i++ { 151 | r.addThread() 152 | } 153 | r.threadsWaitGroup.Wait() 154 | } 155 | 156 | // Done is used to notify that no more tasks will be produced. 157 | func (r *runner) Done() { 158 | r.doneOnce.Do(func() { 159 | close(r.tasks) 160 | }) 161 | } 162 | 163 | // GetFinishedNotification returns the finishedNotifier channel, which notifies when the runner is done. 164 | // In order to use the finishedNotifier channel, you must set the finishedNotificationEnabled variable. 165 | func (r *runner) GetFinishedNotification() chan bool { 166 | return r.finishedNotifier 167 | } 168 | 169 | // IsStarted is true when a task was executed, false otherwise. 170 | func (r *runner) IsStarted() bool { 171 | return r.started > 0 172 | } 173 | 174 | // Cancel stops the Runner from getting new tasks and empties the tasks queue. 175 | // No new tasks will be executed, and tasks that already started will continue running and won't be interrupted. 176 | // If this Runner is already cancelled, then this function will do nothing. 177 | // force - If true, pending tasks in the queue will not be handled. 178 | func (r *runner) Cancel(force bool) { 179 | // No more adding tasks 180 | r.cancel.Store(true) 181 | if force { 182 | r.Done() 183 | } 184 | r.cancelOnce.Do(func() { 185 | // Consume all tasks left 186 | for len(r.tasks) > 0 { 187 | <-r.tasks 188 | } 189 | if r.finishedNotificationEnabled { 190 | r.notifyFinished() 191 | } 192 | }) 193 | } 194 | 195 | // Errors Returns a map of errors keyed by the task number 196 | func (r *runner) Errors() map[int]error { 197 | return r.errors 198 | } 199 | 200 | // OpenThreads returns the number of open threads (including idle threads). 201 | func (r *runner) OpenThreads() uint32 { 202 | return r.openThreads.Load() 203 | } 204 | 205 | func (r *runner) ActiveThreads() uint32 { 206 | return r.activeThreads.Load() 207 | } 208 | 209 | func (r *runner) SetFinishedNotification(toEnable bool) { 210 | r.finishedNotificationEnabled = toEnable 211 | } 212 | 213 | // Recreates the finish notification channel. 214 | // This method helps manage a scenario involving two runners: "1" assigns tasks to "2". 215 | // Runner "2" might occasionally encounter periods without assigned tasks. 216 | // As a result, the finish notification for "2" might be triggered. 217 | // To tackle this issue, use the ResetFinishNotificationIfActive after all tasks assigned to "1" have been completed. 218 | func (r *runner) ResetFinishNotificationIfActive() { 219 | r.finishedNotifierLock.Lock() 220 | defer r.finishedNotifierLock.Unlock() 221 | 222 | // If no active threads, don't reset 223 | if r.activeThreads.Load() == 0 && r.totalTasksInQueue.Load() == 0 || r.cancel.Load() { 224 | return 225 | } 226 | 227 | r.finishedNotifier = make(chan bool, 1) 228 | r.finishedNotifierChannelClosed = false 229 | } 230 | 231 | func (r *runner) SetMaxParallel(newVal int) { 232 | if newVal < 1 { 233 | newVal = 1 234 | } 235 | if newVal == r.maxParallel { 236 | return 237 | } 238 | if newVal > r.maxParallel { 239 | for i := 0; i < newVal-r.maxParallel; i++ { 240 | r.addThread() 241 | } 242 | } 243 | // In case the number of threads is reduced, we set the new value to maxParallel, and each thread that finishes his 244 | // task checks if there are more open threads than maxParallel. If so, it kills itself. 245 | r.maxParallel = newVal 246 | } 247 | 248 | func (r *runner) addThread() { 249 | r.threadsWaitGroup.Add(1) 250 | nextThreadId := r.threadCount.Add(1) - 1 251 | go func(threadId int) { 252 | defer r.threadsWaitGroup.Done() 253 | r.openThreadsLock.Lock() 254 | r.openThreads.Add(1) 255 | r.openThreadsLock.Unlock() 256 | 257 | // Keep on taking tasks from the queue. 258 | for t := range r.tasks { 259 | // Increase the total of active threads. 260 | r.activeThreads.Add(1) 261 | atomic.AddUint32(&r.started, 1) 262 | // Run the task. 263 | e := t.run(threadId) 264 | // Decrease the total of active threads. 265 | r.activeThreads.Add(^uint32(0)) 266 | // Decrease the total of in progress tasks. 267 | r.totalTasksInQueue.Add(^uint32(0)) 268 | if r.finishedNotificationEnabled { 269 | r.finishedNotifierLock.Lock() 270 | // Notify that the runner has finished its job. 271 | if r.activeThreads.Load() == 0 && r.totalTasksInQueue.Load() == 0 { 272 | r.notifyFinished() 273 | } 274 | r.finishedNotifierLock.Unlock() 275 | } 276 | 277 | if e != nil { 278 | if t.onError != nil { 279 | t.onError(e) 280 | } 281 | 282 | // Save the error in the errors map. 283 | r.errorsLock.Lock() 284 | r.errors[int(t.num)] = e 285 | r.errorsLock.Unlock() 286 | 287 | if r.failFast { 288 | r.Cancel(false) 289 | break 290 | } 291 | } 292 | 293 | r.openThreadsLock.Lock() 294 | // If the total of open threads is larger than the maximum (maxParallel), then this thread should be closed. 295 | if int(r.openThreads.Load()) > r.maxParallel { 296 | r.openThreads.Add(^uint32(0)) 297 | r.openThreadsLock.Unlock() 298 | break 299 | } 300 | r.openThreadsLock.Unlock() 301 | } 302 | }(int(nextThreadId)) 303 | } 304 | 305 | func (r *runner) notifyFinished() { 306 | if !r.finishedNotifierChannelClosed { 307 | r.finishedNotifier <- true 308 | r.finishedNotifierChannelClosed = true 309 | close(r.finishedNotifier) 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /unarchive/archive.go: -------------------------------------------------------------------------------- 1 | package unarchive 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/jfrog/archiver/v3" 12 | 13 | "github.com/jfrog/gofrog/datastructures" 14 | ) 15 | 16 | type Unarchiver struct { 17 | BypassInspection bool 18 | StripComponents int 19 | } 20 | 21 | var supportedArchives = []archiver.ExtensionChecker{ 22 | &archiver.TarBrotli{}, &archiver.TarBz2{}, &archiver.TarGz{}, &archiver.TarLz4{}, &archiver.TarSz{}, &archiver.TarXz{}, &archiver.TarZstd{}, 23 | &archiver.Rar{}, &archiver.Tar{}, &archiver.Zip{}, &archiver.Brotli{}, &archiver.Gz{}, &archiver.Bz2{}, &archiver.Lz4{}, &archiver.Snappy{}, 24 | &archiver.Xz{}, &archiver.Zstd{}, 25 | } 26 | 27 | func IsSupportedArchive(filePath string) bool { 28 | archive, err := archiver.ByExtension(filePath) 29 | if err != nil { 30 | return false 31 | } 32 | _, ok := archive.(archiver.Unarchiver) 33 | return ok 34 | } 35 | 36 | // The 'archiver' dependency includes an API called 'Unarchive' to extract archive files. This API uses the archive file 37 | // extension to determine the archive type. 38 | // We therefore need to use the file name as it was in Artifactory, and not the file name which was downloaded. To achieve this, 39 | // we added a new implementation of the 'Unarchive' func and use it instead of the default one. 40 | // archivePath - Absolute or relative path to the archive, without the file name 41 | // archiveName - The archive file name 42 | // destinationPath - The extraction destination directory 43 | func (u *Unarchiver) Unarchive(archivePath, archiveName, destinationPath string) error { 44 | archive, err := u.byExtension(archiveName) 45 | if err != nil { 46 | return err 47 | } 48 | unarchiver, ok := archive.(archiver.Unarchiver) 49 | if !ok { 50 | return fmt.Errorf("format specified by source filename is not an archive format: " + archiveName) 51 | } 52 | if !u.BypassInspection { 53 | if err = inspectArchive(archive, archivePath, destinationPath); err != nil { 54 | return err 55 | } 56 | } 57 | 58 | return unarchiver.Unarchive(archivePath, destinationPath) 59 | } 60 | 61 | // Instead of using 'archiver.byExtension' that by default sets OverwriteExisting to false, we implement our own. 62 | func (u *Unarchiver) byExtension(filename string) (interface{}, error) { 63 | var ec interface{} 64 | for _, c := range supportedArchives { 65 | if err := c.CheckExt(filename); err == nil { 66 | ec = c 67 | break 68 | } 69 | } 70 | switch ec.(type) { 71 | case *archiver.Rar: 72 | archiveInstance := archiver.NewRar() 73 | archiveInstance.OverwriteExisting = true 74 | archiveInstance.StripComponents = u.StripComponents 75 | return archiveInstance, nil 76 | case *archiver.Tar: 77 | archiveInstance := archiver.NewTar() 78 | archiveInstance.OverwriteExisting = true 79 | archiveInstance.StripComponents = u.StripComponents 80 | return archiveInstance, nil 81 | case *archiver.TarBrotli: 82 | archiveInstance := archiver.NewTarBrotli() 83 | archiveInstance.OverwriteExisting = true 84 | archiveInstance.StripComponents = u.StripComponents 85 | return archiveInstance, nil 86 | case *archiver.TarBz2: 87 | archiveInstance := archiver.NewTarBz2() 88 | archiveInstance.OverwriteExisting = true 89 | archiveInstance.StripComponents = u.StripComponents 90 | return archiveInstance, nil 91 | case *archiver.TarGz: 92 | archiveInstance := archiver.NewTarGz() 93 | archiveInstance.OverwriteExisting = true 94 | archiveInstance.StripComponents = u.StripComponents 95 | return archiveInstance, nil 96 | case *archiver.TarLz4: 97 | archiveInstance := archiver.NewTarLz4() 98 | archiveInstance.OverwriteExisting = true 99 | archiveInstance.StripComponents = u.StripComponents 100 | return archiveInstance, nil 101 | case *archiver.TarSz: 102 | archiveInstance := archiver.NewTarSz() 103 | archiveInstance.OverwriteExisting = true 104 | archiveInstance.StripComponents = u.StripComponents 105 | return archiveInstance, nil 106 | case *archiver.TarXz: 107 | archiveInstance := archiver.NewTarXz() 108 | archiveInstance.OverwriteExisting = true 109 | archiveInstance.StripComponents = u.StripComponents 110 | return archiveInstance, nil 111 | case *archiver.TarZstd: 112 | archiveInstance := archiver.NewTarZstd() 113 | archiveInstance.OverwriteExisting = true 114 | archiveInstance.StripComponents = u.StripComponents 115 | return archiveInstance, nil 116 | case *archiver.Zip: 117 | archiveInstance := archiver.NewZip() 118 | archiveInstance.OverwriteExisting = true 119 | archiveInstance.StripComponents = u.StripComponents 120 | return archiveInstance, nil 121 | case *archiver.Gz: 122 | return archiver.NewGz(), nil 123 | case *archiver.Bz2: 124 | return archiver.NewBz2(), nil 125 | case *archiver.Lz4: 126 | return archiver.NewLz4(), nil 127 | case *archiver.Snappy: 128 | return archiver.NewSnappy(), nil 129 | case *archiver.Xz: 130 | return archiver.NewXz(), nil 131 | case *archiver.Zstd: 132 | return archiver.NewZstd(), nil 133 | } 134 | return nil, fmt.Errorf("format unrecognized by filename: %s", filename) 135 | } 136 | 137 | // Make sure the archive is free from Zip Slip and Zip symlinks attacks 138 | func inspectArchive(archive interface{}, localArchivePath, destinationDir string) error { 139 | // If the destination directory ends with a slash, delete it. 140 | // This is necessary to handle a situation where the entry path might be at the root of the destination directory, 141 | // but in such a case "/" is not a prefix of "". 142 | destinationDir = strings.TrimSuffix(destinationDir, string(os.PathSeparator)) 143 | walker, ok := archive.(archiver.Walker) 144 | if !ok { 145 | return fmt.Errorf("couldn't inspect archive: " + localArchivePath) 146 | } 147 | 148 | uplinksValidator := newUplinksValidator() 149 | err := walker.Walk(localArchivePath, func(archiveEntry archiver.File) error { 150 | header, err := extractArchiveEntryHeader(archiveEntry) 151 | if err != nil { 152 | return err 153 | } 154 | pathInArchive := getPathInArchive(destinationDir, "", header.EntryPath) 155 | if !strings.HasPrefix(pathInArchive, destinationDir) { 156 | return fmt.Errorf( 157 | "illegal path in archive: '%s'. To prevent Zip Slip exploit, the path can't lead to an entry outside '%s'", 158 | header.EntryPath, destinationDir) 159 | } 160 | if (archiveEntry.Mode()&os.ModeSymlink) != 0 || len(header.TargetLink) > 0 { 161 | var targetLink string 162 | if targetLink, err = checkSymlinkEntry(header, archiveEntry, destinationDir); err != nil { 163 | return err 164 | } 165 | uplinksValidator.addTargetLink(pathInArchive, targetLink) 166 | } 167 | uplinksValidator.addEntryFile(pathInArchive, archiveEntry.IsDir()) 168 | return err 169 | }) 170 | if err != nil { 171 | return err 172 | } 173 | return uplinksValidator.ensureNoUplinkDirs() 174 | } 175 | 176 | // Make sure the extraction path of the symlink entry target is under the destination dir 177 | func checkSymlinkEntry(header *archiveHeader, archiveEntry archiver.File, destinationDir string) (string, error) { 178 | targetLinkPath := header.TargetLink 179 | if targetLinkPath == "" { 180 | // The link destination path is not always in the archive header 181 | // In that case, we will look at the link content to get the link destination path 182 | content, err := io.ReadAll(archiveEntry.ReadCloser) 183 | if err != nil { 184 | return "", err 185 | } 186 | targetLinkPath = string(content) 187 | } 188 | 189 | targetPathInArchive := getPathInArchive(destinationDir, filepath.Dir(header.EntryPath), targetLinkPath) 190 | if !strings.HasPrefix(targetPathInArchive, destinationDir) { 191 | return "", fmt.Errorf( 192 | "illegal link path in archive: '%s'. To prevent Zip Slip Symlink exploit, the path can't lead to an entry outside '%s'", 193 | targetLinkPath, destinationDir) 194 | } 195 | 196 | return targetPathInArchive, nil 197 | } 198 | 199 | // Get the path in archive of the entry or the target link 200 | func getPathInArchive(destinationDir, entryDirInArchive, pathInArchive string) string { 201 | // If pathInArchive starts with '/' and we are on Windows, the path is illegal 202 | pathInArchive = strings.TrimSpace(pathInArchive) 203 | if os.IsPathSeparator('\\') && strings.HasPrefix(pathInArchive, "/") { 204 | return "" 205 | } 206 | 207 | pathInArchive = filepath.Clean(pathInArchive) 208 | if !filepath.IsAbs(pathInArchive) { 209 | // If path is relative, concatenate it to the destination dir 210 | pathInArchive = filepath.Join(destinationDir, entryDirInArchive, pathInArchive) 211 | } 212 | return pathInArchive 213 | } 214 | 215 | // Extract the header of the archive entry 216 | func extractArchiveEntryHeader(f archiver.File) (*archiveHeader, error) { 217 | headerBytes, err := json.Marshal(f.Header) 218 | if err != nil { 219 | return nil, err 220 | } 221 | archiveHeader := &archiveHeader{} 222 | err = json.Unmarshal(headerBytes, archiveHeader) 223 | return archiveHeader, err 224 | } 225 | 226 | type archiveHeader struct { 227 | EntryPath string `json:"Name,omitempty"` 228 | TargetLink string `json:"Linkname,omitempty"` 229 | } 230 | 231 | // This validator blocks the option to extract an archive with a link to an ancestor directory. 232 | // An ancestor directory is a directory located above the symlink in the hierarchy of the extraction dir, but not necessarily a direct ancestor. 233 | // For example, a sibling of a parent is an ancestor directory. 234 | // The purpose of the uplinksValidator is to prevent directories loop in the file system during extraction. 235 | type uplinksValidator struct { 236 | entryFiles *datastructures.Set[string] 237 | targetParentLinks map[string]string 238 | } 239 | 240 | func newUplinksValidator() *uplinksValidator { 241 | return &uplinksValidator{ 242 | // Set of all entries that are not directories in the archive 243 | entryFiles: datastructures.MakeSet[string](), 244 | // Map of all links in the archive pointing to an ancestor entry 245 | targetParentLinks: make(map[string]string), 246 | } 247 | } 248 | 249 | func (lv *uplinksValidator) addTargetLink(pathInArchive, targetLink string) { 250 | if strings.Count(targetLink, string(filepath.Separator)) < strings.Count(pathInArchive, string(filepath.Separator)) { 251 | // Add the target link only if it is an ancestor 252 | lv.targetParentLinks[pathInArchive] = targetLink 253 | } 254 | } 255 | 256 | func (lv *uplinksValidator) addEntryFile(entryFile string, isDir bool) { 257 | if !isDir { 258 | // Add the entry only if it is not a directory 259 | lv.entryFiles.Add(entryFile) 260 | } 261 | } 262 | 263 | // Iterate over all links pointing to an ancestor directories and files. 264 | // If a targetParentLink does not exist in the entryFiles list, it is a directory and therefore return an error. 265 | func (lv *uplinksValidator) ensureNoUplinkDirs() error { 266 | for pathInArchive, targetLink := range lv.targetParentLinks { 267 | if lv.entryFiles.Exists(targetLink) { 268 | // Target link to a file 269 | continue 270 | } 271 | // Target link to a directory 272 | return fmt.Errorf( 273 | "illegal target link path in archive: '%s' -> '%s'. To prevent Zip Slip symlink exploit, a link can't lead to an ancestor directory", 274 | pathInArchive, targetLink) 275 | } 276 | return nil 277 | } 278 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------