├── .gitignore ├── go.mod ├── cmd └── discover │ ├── cli │ ├── version.go │ └── discover.go │ └── main.go ├── pkg ├── common │ ├── utils.go │ └── types.go ├── parser │ ├── addon │ │ ├── utils.go │ │ ├── spdx.go │ │ ├── cdx.go │ │ ├── verify.go │ │ ├── addon.go │ │ └── operations.go │ └── dockerfile │ │ └── docker.go ├── engine │ └── core.go └── imagefs │ └── imagefs.go ├── Makefile ├── CONTRIBUTING.md ├── README.md ├── doc └── new-scenarios.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | orion 8 | 9 | # Test binary, built with `go test -c` 10 | *.test 11 | sample-dockerfiles 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Dependency directories (remove the comment below to include it) 17 | # vendor/ 18 | 19 | results.json 20 | results.spdx 21 | Makefile-lint 22 | my-sample-dockerfiles 23 | tapestry-discover -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tap8stry/orion 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/CycloneDX/cyclonedx-go v0.4.0 7 | github.com/docker/docker v20.10.9+incompatible // indirect 8 | github.com/google/go-containerregistry v0.6.0 9 | github.com/moby/buildkit v0.9.0 10 | github.com/opencontainers/image-spec v1.0.2 // indirect 11 | github.com/peterbourgon/ff/v3 v3.1.0 12 | github.com/pkg/errors v0.9.1 13 | go.hein.dev/go-version v0.1.0 14 | golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 15 | golang.org/x/mod v0.5.0 16 | k8s.io/release v0.11.0 17 | ) 18 | -------------------------------------------------------------------------------- /cmd/discover/cli/version.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package cli 18 | 19 | import ( 20 | "context" 21 | "flag" 22 | "fmt" 23 | 24 | "github.com/peterbourgon/ff/v3/ffcli" 25 | goVersion "go.hein.dev/go-version" 26 | ) 27 | 28 | var ( 29 | shortened = false 30 | version = "dev" 31 | commit = "none" 32 | date = "unknown" 33 | output = "json" 34 | ) 35 | 36 | //Version : 37 | func Version() *ffcli.Command { 38 | var ( 39 | flagset = flag.NewFlagSet("orion version", flag.ExitOnError) 40 | ) 41 | return &ffcli.Command{ 42 | Name: "version", 43 | ShortUsage: "orion version", 44 | ShortHelp: "Prints the orion version", 45 | FlagSet: flagset, 46 | Exec: func(ctx context.Context, args []string) error { 47 | resp := goVersion.FuncWithOutput(shortened, version, commit, date, output) 48 | fmt.Print(resp) 49 | return nil 50 | }, 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /cmd/discover/main.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package main 18 | 19 | import ( 20 | "context" 21 | "flag" 22 | "fmt" 23 | "os" 24 | 25 | "github.com/peterbourgon/ff/v3/ffcli" 26 | "github.com/tap8stry/orion/cmd/discover/cli" 27 | ) 28 | 29 | var ( 30 | rootFlagSet = flag.NewFlagSet("orion", flag.ExitOnError) 31 | ) 32 | 33 | func main() { 34 | root := &ffcli.Command{ 35 | ShortUsage: "orion [flags] ", 36 | FlagSet: rootFlagSet, 37 | Subcommands: []*ffcli.Command{ 38 | cli.Discover(), 39 | // Version 40 | cli.Version()}, 41 | Exec: func(context.Context, []string) error { 42 | return flag.ErrHelp 43 | }, 44 | } 45 | 46 | if err := root.Parse(os.Args[1:]); err != nil { 47 | printErrAndExit(err) 48 | } 49 | 50 | if err := root.Run(context.Background()); err != nil { 51 | printErrAndExit(err) 52 | } 53 | } 54 | 55 | func printErrAndExit(err error) { 56 | fmt.Fprintf(os.Stderr, "error: %v\n", err) 57 | os.Exit(1) 58 | } 59 | -------------------------------------------------------------------------------- /pkg/common/utils.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package common 18 | 19 | import ( 20 | "fmt" 21 | "io/ioutil" 22 | "os" 23 | "path/filepath" 24 | "regexp" 25 | "strings" 26 | ) 27 | 28 | // SearchFiles returns matched file patterns or nil if none found 29 | func SearchFiles(repoDir, pattern string) []string { 30 | matchedFiles := []string{} 31 | filepath.Walk(repoDir, func(path string, f os.FileInfo, _ error) error { 32 | if f != nil && !f.IsDir() { 33 | r, err := regexp.MatchString(pattern, f.Name()) 34 | if err == nil && r { 35 | matchedFiles = append(matchedFiles, path) 36 | } 37 | } 38 | return nil 39 | }) 40 | return matchedFiles 41 | } 42 | 43 | // TrimQuoteMarks returns a string with its quotation marks removed 44 | func TrimQuoteMarks(value string) string { 45 | str := value 46 | if (strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"")) || 47 | (strings.HasPrefix(value, "'") && strings.HasSuffix(value, "'")) { 48 | str = value[1 : len(value)-1] 49 | } 50 | return str 51 | } 52 | 53 | func SaveFile(filename string, data []byte) { 54 | if len(data) > 0 { 55 | err := ioutil.WriteFile(filename, data, 0644) 56 | if err != nil { 57 | fmt.Printf("\nerror writing data to %q: %s", filename, err.Error()) 58 | return 59 | } 60 | fmt.Printf("\nresults saved to: %q", filename) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VERSION=$(shell git describe --tags --always) 2 | COMMIT=$(shell git rev-parse HEAD) 3 | BUILD=$(shell date +%FT%T%z) 4 | PKG=github.com/tap8stry/orion/cmd/discover/cli 5 | 6 | LDFLAGS="-X $(PKG).version=$(VERSION) -X $(PKG).commit=$(COMMIT) -X $(PKG).date=$(BUILD)" 7 | 8 | .PHONY: all 9 | all: setup dep test cover fmt lint ci build 10 | 11 | .PHONY: setup 12 | setup: ## Install all the build and lint dependencies 13 | go get -u github.com/alecthomas/gometalinter 14 | go get -u golang.org/x/tools/cmd/cover 15 | go get -u github.com/golang/dep/cmd/dep 16 | gometalinter --install --update 17 | @$(MAKE) dep 18 | 19 | .PHONY: dep 20 | dep: ## Run dep ensure and prune 21 | dep ensure 22 | dep prune 23 | 24 | .PHONY: test 25 | test: ## Run all the tests 26 | echo 'mode: atomic' > coverage.txt && go test -covermode=atomic -coverprofile=coverage.txt -v -race -timeout=30s ./... 27 | 28 | 29 | .PHONY: cover 30 | cover: test ## Run all the tests and opens the coverage report 31 | go tool cover -html=coverage.txt 32 | 33 | .PHONY: fmt 34 | fmt: ## Run goimports on all go files 35 | find . -name '*.go' -not -wholename './vendor/*' | while read -r file; do goimports -w "$$file"; done 36 | 37 | .PHONY: lint 38 | lint: ## Run all the linters 39 | gometalinter --vendor --disable-all \ 40 | --enable=deadcode \ 41 | --enable=ineffassign \ 42 | --enable=gosimple \ 43 | --enable=staticcheck \ 44 | --enable=gofmt \ 45 | --enable=goimports \ 46 | --enable=misspell \ 47 | --enable=errcheck \ 48 | --enable=vet \ 49 | --enable=vetshadow \ 50 | --deadline=10m \ 51 | ./... 52 | 53 | .PHONY: ci 54 | ci: lint test ## Run all the tests and code checks 55 | 56 | .PHONY: build 57 | build: ## Build a version 58 | go build -ldflags ${LDFLAGS} -o orion cmd/discover/main.go 59 | 60 | .PHONY: clean 61 | clean: ## Remove temporary files 62 | go clean 63 | 64 | # Absolutely awesome: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html 65 | .PHONY: help 66 | help: 67 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 68 | 69 | .DEFAULT_GOAL := build 70 | -------------------------------------------------------------------------------- /cmd/discover/cli/discover.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package cli 18 | 19 | import ( 20 | "context" 21 | "encoding/json" 22 | "flag" 23 | "fmt" 24 | "strings" 25 | 26 | "github.com/peterbourgon/ff/v3/ffcli" 27 | "github.com/pkg/errors" 28 | "github.com/tap8stry/orion/pkg/common" 29 | "github.com/tap8stry/orion/pkg/engine" 30 | goVersion "go.hein.dev/go-version" 31 | ) 32 | 33 | //Discover : 34 | func Discover() *ffcli.Command { 35 | var ( 36 | flagset = flag.NewFlagSet("discover", flag.ExitOnError) 37 | dockerfile = flagset.String("f", "", "dockerfile pathname") 38 | image = flagset.String("i", "", "image name:tag") 39 | namespace = flagset.String("n", "", "SBOM namespace") 40 | outputfp = flagset.String("r", "", "output file path, default: ./result.spdx") 41 | format = flagset.String("o", "", "output format (json, spdx, cdx), default: spdx") 42 | savetrace = flagset.Bool("s", false, "save trace report, default: false") 43 | ) 44 | return &ffcli.Command{ 45 | Name: "discover", 46 | ShortUsage: "orion discover -f -i -n -r -o -s ", 47 | ShortHelp: `Discover software dependencies`, 48 | LongHelp: `Discover software dependencies not managed by package managers 49 | EXAMPLES 50 | # discover all dependencies not managed by package managers 51 | orion discover -f ./Dockerfile -i binderancient:latest -n https://github.com/myorg/myproject -r result.spdx -o spdx 52 | `, 53 | FlagSet: flagset, 54 | Exec: func(ctx context.Context, args []string) error { 55 | 56 | dopts := common.DiscoverOpts{ 57 | DockerfilePath: *dockerfile, 58 | Image: *image, 59 | Namespace: *namespace, 60 | OutFilepath: strings.TrimSpace(*outputfp), 61 | Format: *format, 62 | SaveTrace: *savetrace, 63 | } 64 | 65 | v := goVersion.Func(shortened, version, commit, date) 66 | var vjson = goVersion.Info{} 67 | json.Unmarshal([]byte(v), &vjson) 68 | if err := DiscoveryDeps(ctx, dopts, vjson.Version); err != nil { 69 | return errors.Wrapf(err, "discovery task for %s failed", dopts.DockerfilePath) 70 | } 71 | 72 | return nil 73 | }, 74 | } 75 | } 76 | 77 | //DiscoveryDeps : 78 | func DiscoveryDeps(ctx context.Context, dopts common.DiscoverOpts, version string) error { 79 | b, _ := json.Marshal(dopts) 80 | fmt.Printf("\nStart discovery tool verion %s with inputs: %q", version, string(b)) 81 | engine.StartDiscovery(context.Background(), dopts, version) 82 | return nil 83 | } 84 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributing In General 2 | Our project welcomes external contributions. If you have an itch, please feel 3 | free to scratch it. 4 | 5 | ### This md is under construction. For now, please refer to the information in README.md. 6 | 7 | To contribute code or documentation, please submit a **FIXME** [pull request](https://github.com/ibm/repo-template/pulls). 8 | 9 | A good way to familiarize yourself with the codebase and contribution process is 10 | to look for and tackle low-hanging fruit in the **FIXME** [issue tracker](https://github.com/ibm/repo-template/issues). 11 | Before embarking on a more ambitious contribution, please quickly [get in touch](#communication) with us. 12 | 13 | **Note: We appreciate your effort, and want to avoid a situation where a contribution 14 | requires extensive rework (by you or by us), sits in backlog for a long time, or 15 | cannot be accepted at all!** 16 | 17 | ### Proposing new features 18 | 19 | If you would like to implement a new feature, please **FIXME** [raise an issue](https://github.com/ibm/repo-template/issues) 20 | before sending a pull request so the feature can be discussed. This is to avoid 21 | you wasting your valuable time working on a feature that the project developers 22 | are not interested in accepting into the code base. 23 | 24 | ### Fixing bugs 25 | 26 | If you would like to fix a bug, please **FIXME** [raise an issue](https://github.com/ibm/repo-template/issues) before sending a 27 | pull request so it can be tracked. 28 | 29 | ### Merge approval 30 | 31 | The project maintainers use LGTM (Looks Good To Me) in comments on the code 32 | review to indicate acceptance. A change requires LGTMs from two of the 33 | maintainers of each component affected. 34 | 35 | For a list of the maintainers, see the [MAINTAINERS.md](MAINTAINERS.md) page. 36 | 37 | ## Legal 38 | 39 | Each source file must include a license header for the Apache 40 | Software License 2.0. Using the SPDX format is the simplest approach. 41 | e.g. 42 | 43 | ``` 44 | /* 45 | Copyright All Rights Reserved. 46 | 47 | SPDX-License-Identifier: Apache-2.0 48 | */ 49 | ``` 50 | 51 | We have tried to make it as easy as possible to make contributions. This 52 | applies to how we handle the legal aspects of contribution. We use the 53 | same approach - the [Developer's Certificate of Origin 1.1 (DCO)](https://github.com/hyperledger/fabric/blob/master/docs/source/DCO1.1.txt) - that the Linux® Kernel [community](https://elinux.org/Developer_Certificate_Of_Origin) 54 | uses to manage code contributions. 55 | 56 | We simply ask that when submitting a patch for review, the developer 57 | must include a sign-off statement in the commit message. 58 | 59 | Here is an example Signed-off-by line, which indicates that the 60 | submitter accepts the DCO: 61 | 62 | ``` 63 | Signed-off-by: John Doe 64 | ``` 65 | 66 | You can include this automatically when you commit a change to your 67 | local git repository using the following command: 68 | 69 | ``` 70 | git commit -s 71 | ``` 72 | 73 | ## Communication 74 | **FIXME** Please feel free to connect with us on our [Slack channel](link). 75 | 76 | ## Setup 77 | **FIXME** Please add any special setup instructions for your project to help the developer 78 | become productive quickly. 79 | 80 | ## Testing 81 | **FIXME** Please provide information that helps the developer test any changes they make 82 | before submitting. 83 | 84 | ## Coding style guidelines 85 | **FIXME** Optional, but recommended: please share any specific style guidelines you might 86 | have for your project. 87 | 88 | 89 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # orion 2 | 3 | The repository is a tool that generates software inventory of a container image, specifically the software installations that are not managed through package managers. Developers ofter install additional software artifacts through RUN shell commands and COPY/ADD during docker build besides OS packages and open-source software packages. Such installations need to be counted in order to produce a complete and accurate SBOM for security compliance and auditing purposes. 4 | 5 | There are a number of open-source tools on SBOM: (1) [tern](https://github.com/tern-tools/tern) for container image, (2) [spdx sbom generator](https://github.com/spdx/spdx-sbom-generator) for open-source software packages of various languages, and (3) [Kubernetes Release Tooling](https://github.com/kubernetes/release) for golang applications. 6 | 7 | Our project compliments these tools with the capabilities to track software artifacts installed outside of the package management tools, independent of the platform and language specific package management tools. The project also leavages the spdx module of [Kubernetes Release Tooling](https://github.com/kubernetes/release) in generating SPDX document. 8 | 9 | 10 | ### Features 11 | 12 | There are many ways that developers can compose their Dockerfile to install additional artifacts in docker build. We have selected some from the popular github projects (those with most stars). From examining them we extract scenarios/patterns that are used in these Dockerfiles. 13 | 14 | **The project has implemented the tracing capaibilities for artifacts installed by the following docker operations and shell commands, which covers the majority of the scenarios.** 15 | 16 | - WORKDIR 17 | - ARG 18 | - ENV 19 | - RUN 20 | - curl 21 | - wget 22 | - tar -x... 23 | - unzip 24 | - git clone, git checkout 25 | - cp 26 | - mv 27 | - cd 28 | - COPY 29 | - ADD 30 | 31 | **The scenarios/paterns yet to be addressed are listed [here](https://github.com/tap8stry/orion/blob/main/doc/new-scenarios.md) for further development.** 32 | 33 | 34 | ### How to run it 35 | 36 | 1. Clone the project and make a build 37 | 38 | ``` 39 | % git clone https://github.com/tap8stry/orion.git 40 | % cd orion 41 | % make 42 | ``` 43 | 44 | 2. Command to scan Dockefile and produce addon installation traces 45 | 46 | ``` 47 | % ./orion discover -f -n -r 48 | ``` 49 | 50 | where 51 | - dockerfile-path: Dockerfile pathname 52 | - sbom-namespace: namespace, e.g. your project's github repository URL 53 | - output-file-path: file name for saving discovery results. The traces is saved to `-trace.json`. 54 | 55 | 3. Command to produce/verify addon installation traces and produce SBOM report 56 | 57 | ``` 58 | % ./orion discover -f -n -i -r 59 | ``` 60 | The SBOM report is saved to `.spdx`. 61 | 62 | 4. Work around if encounter access permission issue when decompressing image tarball 63 | 64 | You may encounter error messages like the following when running the command 3. 65 | 66 | ``` 67 | error executing untar cmd: exit status 1 68 | error untar image layer "356f18f3a935b2f226093720b65383048249413ed99da72c87d5be58cc46661c.tar.gz": unable to untar an image file 69 | ``` 70 | 71 | This is caused by the access permission when decompressing the image tar file to a temperary file system. You can use `sudo` command to work around this problem, see the command below. 72 | 73 | ``` 74 | % sudo ./orion discover -d -n -i -f 75 | ``` -------------------------------------------------------------------------------- /pkg/parser/addon/utils.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package addon 18 | 19 | import ( 20 | "fmt" 21 | "strings" 22 | 23 | "github.com/tap8stry/orion/pkg/common" 24 | ) 25 | 26 | //RUN wget http://nodejs.org/dist/v14.17.6/node-v14.17.6-linux-$ARCH.tar.gz -O /tmp/node.tar.gz && tar -C /usr/local --strip-components 1 -xzf /tmp/node.tar.gz && rm -rf /home/jhipster/.cache/ /var/lib/apt/lists/* /tmp/* /var/tmp/* 27 | 28 | var osPkgMgmtTools = []string{ 29 | "apk ", 30 | "apt ", 31 | "apt-get", 32 | "dpkg ", 33 | "yum ", 34 | "rpm ", 35 | "deb ", 36 | } 37 | 38 | // replaceArgEnvVariable repleces the variable with its value and removes quotation marks if exist 39 | func replaceArgEnvVariable(str string, args map[string]string) string { 40 | newStr := str 41 | for key, value := range args { 42 | key1 := fmt.Sprintf("$%s", key) 43 | key2 := fmt.Sprintf("${%s}", key) 44 | key3 := "${" + key + "%%[a-z]*}" //used in python wget url, e.g. ${PYTHON_VERSION%%[a-z]*} 45 | if strings.Contains(newStr, key1) && len(value) > 0 { //replace only if the value is not empty 46 | newStr = strings.ReplaceAll(newStr, key1, value) 47 | } 48 | if strings.Contains(newStr, key2) && len(value) > 0 { 49 | newStr = strings.ReplaceAll(newStr, key2, value) 50 | } 51 | if strings.Contains(newStr, key3) && len(value) > 0 { 52 | newStr = strings.ReplaceAll(newStr, key3, value) 53 | } 54 | } 55 | newStr = common.TrimQuoteMarks(newStr) 56 | return newStr 57 | } 58 | 59 | // isOsInstall checks if the cmd is about OS package install 60 | func isOsInstall(cmd string) bool { 61 | for _, osPkgCmd := range osPkgMgmtTools { 62 | if strings.Contains(cmd, osPkgCmd) { 63 | return true 64 | } 65 | } 66 | return false 67 | } 68 | 69 | // existInInstallTrace checks if the source is in a destination of previous steps, therefore belongs to the same curl/wget installation 70 | func existInInstallTrace(traces map[int]common.Trace, source string) bool { 71 | for i := 0; i < len(traces); i++ { 72 | if strings.Contains(source, "*") { // source = "gradle-*.zip" 73 | if strings.Contains(traces[i].Destination, source[:strings.Index(source, "*")]) { 74 | return true 75 | } 76 | } else { 77 | if strings.HasPrefix(source, traces[i].Destination) { 78 | return true 79 | } 80 | } 81 | } 82 | return false 83 | } 84 | 85 | // checkEarlierInstalls checks if the source of trace matches a destination of previous steps of an install, add adds to the install's traces 86 | func checkEarlierInstalls(installs *[]common.InstallTrace, trace common.Trace) { 87 | 88 | for _, install := range *installs { 89 | for i := 0; i < len(install.Traces); i++ { 90 | if strings.Contains(trace.Source, "*") { // source = "gradle-*.zip" 91 | if strings.Contains(install.Traces[i].Destination, trace.Source[:strings.Index(trace.Source, "*")]) { 92 | m := make(map[int]common.Trace) 93 | m = install.Traces 94 | m[len(install.Traces)] = trace 95 | install.Traces = m 96 | return 97 | } 98 | } else { 99 | if strings.HasPrefix(trace.Source, install.Traces[i].Destination) { 100 | m := make(map[int]common.Trace) 101 | m = install.Traces 102 | m[len(install.Traces)] = trace 103 | install.Traces = m 104 | return 105 | } 106 | } 107 | } 108 | } 109 | return 110 | } 111 | -------------------------------------------------------------------------------- /pkg/parser/addon/spdx.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package addon 18 | 19 | import ( 20 | "fmt" 21 | "strings" 22 | 23 | "github.com/pkg/errors" 24 | "github.com/tap8stry/orion/pkg/common" 25 | "k8s.io/release/pkg/spdx" 26 | ) 27 | 28 | func GenerateSpdxReport(dockerfilename, image, namespace string, installs []common.VerifiedArtifact, toolversion string) (string, error) { 29 | oriontool := "https://github.com/tap8stry/orion" 30 | if len(toolversion) > 0 { 31 | oriontool += fmt.Sprintf("@%s", toolversion) 32 | } 33 | doc := spdx.NewDocument() 34 | doc.Name = "SPDX-Docker-Image-Addons-" + image 35 | doc.ID = "SPDXRef-DOCUMENT-FOR-ADDONS" 36 | doc.Namespace = namespace 37 | doc.Creator.Person = "Tester Tester" 38 | doc.Creator.Tool = []string{oriontool, "k8s.io/release/pkg/spdx"} 39 | fmt.Printf("\ncreate a new SPDX doc %q, namespace=%q", doc.Name, doc.Namespace) 40 | 41 | for _, ins := range installs { 42 | if ins.IsDownload { //create a Package, add the download file to package, add package to document 43 | dpkg := spdx.NewPackage() 44 | name := ins.DownloadLocation[strings.LastIndex(ins.DownloadLocation, "://")+3:] 45 | dpkg.Name = name 46 | dpkg.ID = "SPDXRef-Package-" + name 47 | dpkg.FileName = name 48 | dpkg.DownloadLocation = ins.DownloadLocation 49 | 50 | for _, art := range ins.Artifacts { 51 | if art.IsDirectory { 52 | myspdx := spdx.NewSPDX() 53 | pkg, err := myspdx.PackageFromDirectory(art.Path) 54 | if err != nil { 55 | fmt.Printf("\n\nerror creating package for directory %s, error = %s", art.Path, err.Error()) 56 | } 57 | pkg.FileName = art.Path[strings.Index(art.Path, "rootfs/")+7:] 58 | pkg.DownloadLocation = dpkg.DownloadLocation 59 | if err := dpkg.AddPackage(pkg); err != nil { 60 | fmt.Printf("\n\nerror in adding a directory to download package: %s", err.Error()) 61 | } 62 | } else { 63 | f := spdx.NewFile() 64 | name := art.Name[strings.LastIndex(art.Name, "rootfs/")+7:] 65 | f.FileName = name 66 | f.SourceFile = art.Path 67 | f.Name = name 68 | if err := dpkg.AddFile(f); err != nil { 69 | fmt.Printf("\n\nerror in adding a file to download package: %s", err.Error()) 70 | } 71 | } 72 | } 73 | if err := doc.AddPackage(dpkg); err != nil { 74 | fmt.Printf("\n\nerror in adding package to document: %s", err.Error()) 75 | } 76 | } else { 77 | for _, art := range ins.Artifacts { 78 | if art.IsDirectory { 79 | myspdx := spdx.NewSPDX() 80 | pkg, err := myspdx.PackageFromDirectory(art.Path) 81 | if err != nil { 82 | fmt.Printf("\n\nerror creating package for directory %s, error = %s", art.Path, err.Error()) 83 | } 84 | pkg.FileName = art.Path[strings.Index(art.Path, "rootfs/")+7:] 85 | if err := doc.AddPackage(pkg); err != nil { 86 | fmt.Printf("\n\nerror in adding a directory to download package: %s", err.Error()) 87 | } 88 | } else { 89 | f := spdx.NewFile() 90 | name := art.Name[strings.LastIndex(art.Name, "rootfs/")+7:] 91 | f.FileName = name 92 | f.SourceFile = art.Path 93 | f.Name = name 94 | if err := doc.AddFile(f); err != nil { 95 | fmt.Printf("\n\nerror in adding a file to download package: %s", err.Error()) 96 | } 97 | } 98 | } 99 | } 100 | } 101 | 102 | markup, err := doc.Render() 103 | if err != nil { 104 | fmt.Printf("\nerror in rendering SPDX document: %s", err.Error()) 105 | return "", errors.Wrap(err, "rendering SPDX document") 106 | } 107 | return markup, nil 108 | } 109 | -------------------------------------------------------------------------------- /pkg/engine/core.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package engine 18 | 19 | import ( 20 | "context" 21 | "encoding/json" 22 | "fmt" 23 | "io/ioutil" 24 | "os" 25 | "strings" 26 | 27 | "github.com/pkg/errors" 28 | "github.com/tap8stry/orion/pkg/common" 29 | "github.com/tap8stry/orion/pkg/parser/addon" 30 | "github.com/tap8stry/orion/pkg/parser/dockerfile" 31 | ) 32 | 33 | //StartDiscovery : entrypoint for discovery core function 34 | func StartDiscovery(ctx context.Context, dopts common.DiscoverOpts, toolversion string) error { 35 | //get Dockerfile 36 | dfile, err := dockerfile.GetDockerfile(dopts.DockerfilePath) 37 | if err != nil { 38 | return errors.Wrap(err, "reading dockerfile") 39 | } 40 | fmt.Printf("\ngot dockerfile %q", dfile.Filepath) 41 | 42 | var spdxReport string 43 | 44 | //get add-on traces per build stage 45 | for j, stage := range dfile.BuildStages { 46 | installTraces, envs, image := addon.DiscoverAddonArtifacts(&stage, dopts, dfile.BuildArgs) 47 | dfile.BuildStages[j].AddOnInstalls = append(dfile.BuildStages[j].AddOnInstalls, installTraces...) 48 | dfile.BuildStages[j].EnvVariables = envs 49 | dfile.BuildStages[j].Image = image 50 | fmt.Printf("\ngenerate addon traces for dockerfile %q, stage %q, %d addons found", dfile.Filepath, stage.StageID, len(installTraces)) 51 | } 52 | //save traces 53 | if dopts.SaveTrace { 54 | filename := fmt.Sprintf("%s-trace.%s", common.DefaultFilename, common.FormatJSON) 55 | if len(dopts.OutFilepath) > 0 { 56 | if strings.LastIndex(dopts.OutFilepath, ".") > 0 { 57 | filename = fmt.Sprintf("%s-trace.%s", dopts.OutFilepath[:strings.LastIndex(dopts.OutFilepath, ".")], common.FormatJSON) 58 | } else { 59 | filename = fmt.Sprintf("%s-trace.%s", dopts.OutFilepath, common.FormatJSON) 60 | } 61 | } 62 | data, _ := json.MarshalIndent(dfile, "", " ") 63 | common.SaveFile(filename, data) 64 | } 65 | 66 | //verify and produce SPDX if image provided 67 | if len(dopts.Image) > 0 { 68 | fmt.Printf("\nget image %q for dockerfile %q\n", dopts.Image, dfile.Filepath) 69 | buildContextDir, err := ioutil.TempDir(os.TempDir(), "build-ctx") 70 | if err != nil { 71 | fmt.Printf("\nerror creating build context dir: %s", err.Error()) 72 | return errors.Wrap(err, "creating build directory") 73 | } 74 | defer os.RemoveAll(buildContextDir) 75 | artifacts, containerimage, err := addon.VerifyAddOnInstalls(buildContextDir, dopts.Image, &dfile.BuildStages[len(dfile.BuildStages)-1]) 76 | if err != nil { 77 | fmt.Printf("\nerror verifying addon installs: %s", err.Error()) 78 | return errors.Wrap(err, "verifying add-ons against image") 79 | } 80 | 81 | if dopts.Format == common.FormatCdx { 82 | filename := getOutputFileName(dopts.OutFilepath, common.FormatCdx+"."+common.FormatJSON) 83 | err = addon.StoreCdxJSON(filename, containerimage, dopts.Namespace, artifacts) 84 | if err != nil { 85 | return errors.Wrap(err, "generating CycloneDX report") 86 | } 87 | } 88 | 89 | if dopts.Format == common.FormatSpdx || len(dopts.Format) == 0 { 90 | spdxReport, err = addon.GenerateSpdxReport(dfile.Filepath, dopts.Image, dopts.Namespace, artifacts, toolversion) 91 | if err != nil { 92 | return errors.Wrap(err, "generating spdx report") 93 | } 94 | filename := getOutputFileName(dopts.OutFilepath, common.FormatSpdx) 95 | common.SaveFile(filename, []byte(spdxReport)) 96 | fmt.Printf("\nclean up temporary files ...\n") 97 | return nil 98 | } 99 | } 100 | return nil 101 | } 102 | 103 | func getOutputFileName(outputfile, format string) string { 104 | filename := fmt.Sprintf("%s.%s", common.DefaultFilename, format) 105 | if len(outputfile) > 0 { 106 | if strings.LastIndex(outputfile, ".") > 0 { 107 | filename = fmt.Sprintf("%s.%s", outputfile[:strings.LastIndex(outputfile, ".")], format) 108 | } else { 109 | filename = fmt.Sprintf("%s.%s", outputfile, format) 110 | } 111 | } 112 | return filename 113 | } 114 | -------------------------------------------------------------------------------- /pkg/common/types.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package common 18 | 19 | import ( 20 | "github.com/moby/buildkit/frontend/dockerfile/parser" 21 | ) 22 | 23 | //DockerfileEcosystem : 24 | const ( 25 | DockerfileEcosystem = "dockerfile" 26 | FormatJSON = "json" 27 | FormatSpdx = "spdx" 28 | FormatCdx = "cdx" 29 | DefaultFilename = "results" 30 | ) 31 | 32 | //DiscoverOpts : 33 | type DiscoverOpts struct { 34 | DockerfilePath string 35 | OutFilepath string 36 | Format string 37 | Image string 38 | Namespace string 39 | SaveTrace bool 40 | } 41 | 42 | //Dockerfile : 43 | type Dockerfile struct { 44 | Filepath string `json:"filepath"` 45 | Filehash string `json:"filehash"` 46 | FileType string `json:"filetype"` 47 | BuildStages []BuildStage `json:"build_stages"` 48 | BuildArgs map[string]string `json:"build_args,omitempty"` 49 | } 50 | 51 | //BuildStage : 52 | type BuildStage struct { 53 | StageID string `json:"stage_id"` 54 | Context string `json:"key"` 55 | DependsOn string `json:"parent_stage"` 56 | ScratchBuild bool `json:"is_scratch_build"` 57 | StartLineNo int `json:"start_line"` 58 | EndLineNo int `json:"end_line"` 59 | Image Image `json:"base_image"` 60 | Packages []Package `json:"os_packages"` 61 | AppPackages []Package `json:"app_packages"` 62 | PackageOverride []PackageOverride `json:"package_override"` 63 | DockerFileCmds []*parser.Node `json:"-"` 64 | AddOnInstalls []InstallTrace `json:"addon_installs"` 65 | EnvVariables map[string]string `json:"env_variables,omitempty"` 66 | AddOnSpdxReport string `json:"addon_spdx_report"` 67 | } 68 | 69 | //Image : 70 | type Image struct { 71 | Name string `json:"name"` 72 | Tag string `json:"tag"` 73 | OSName string `json:"os_name"` 74 | OSVersion string `json:"os_version"` 75 | SHA256 string `json:"sha256"` 76 | Metadata string `json:"metadata"` 77 | Packages []Package `json:"packages"` 78 | Scanned bool `json:"scanned"` 79 | } 80 | 81 | //ManifestFile : 82 | type ManifestFile struct { 83 | CommitID string `json:"commitid"` 84 | GitURL string `json:"giturl"` 85 | GitBranch string `json:"gitbranch"` 86 | Filepath string `json:"filepath"` 87 | Filehash string `json:"filehash"` 88 | FileType string `json:"filetype"` 89 | Packages []Package `json:"packages"` 90 | Scanned bool `json:"scanned"` 91 | } 92 | 93 | //Package : 94 | type Package struct { 95 | Name string `json:"name"` 96 | Version string `json:"version"` 97 | Ecosystem string `json:"ecosystem"` 98 | Source string `json:"source,omitempty"` 99 | Key string `json:"key,omitempty"` 100 | Dependencies []string `json:"dependencies,omitempty"` 101 | } 102 | 103 | //PackageOverride : 104 | type PackageOverride struct { 105 | BasePackage string `json:"base_package"` 106 | OverridePackage string `json:"override_package"` 107 | } 108 | 109 | //Trace : step trace of dockerfile add-on installations via RUN curl/wget/ or COPY/ADD 110 | type Trace struct { 111 | Command string `json:"command"` 112 | Source string `json:"source,omitempty"` 113 | Destination string `json:"destination,omitempty"` 114 | Workdir string `json:"workdir,omitempty"` 115 | } 116 | 117 | //InstallTrace presents an add-on installation and its traces 118 | type InstallTrace struct { 119 | Origin string `json:"origin"` 120 | OriginHash string `json:"originhash,omitempty"` 121 | Traces map[int]Trace `json:"traces,omitempty"` 122 | } 123 | 124 | //CommandSet is a set of commands in their execution order 125 | type CommandSet struct { 126 | Commands map[int]string 127 | } 128 | 129 | type SpdxRelationship string 130 | 131 | type SpdxFile struct { 132 | FileName string `json:"filename"` 133 | SPDXID string `json:"spdxid"` 134 | FileChecksum string `json:"fileCheckSum,omitempty"` 135 | FileDownloadLocation string `json:"fileDownloadLocation,omitempty"` 136 | LicenseConcluded string `json:"licenseConcluded,omitempty"` 137 | LicenseInfoInFile string `json:"licenseInfoInfile,omitempty"` 138 | FileCopyrightText string `json:"fileCopyrightText,omitempty"` 139 | FileComment string `json:"fileComment,omitempty"` 140 | } 141 | 142 | //Artifact presents a resource 143 | type Artifact struct { 144 | Name string `json:"name"` 145 | Path string `json:"path"` 146 | Version string `json:"version,omitempty"` 147 | IsDirectory bool `json:"isDirectory"` 148 | SHA256 string `json:"sha256,omitempty"` 149 | } 150 | 151 | type VerifiedArtifact struct { 152 | IsDownload bool `json:"isDownload"` 153 | DownloadLocation string `json:"downloadLocation"` 154 | Comment string `json:"packageComment,omitempty"` 155 | Artifacts []Artifact `json:"artifacts,omitempty"` 156 | } 157 | -------------------------------------------------------------------------------- /pkg/parser/addon/cdx.go: -------------------------------------------------------------------------------- 1 | package addon 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | "strings" 8 | "time" 9 | 10 | cdx "github.com/CycloneDX/cyclonedx-go" 11 | "github.com/pkg/errors" 12 | "github.com/tap8stry/orion/pkg/common" 13 | ) 14 | 15 | //StoreCdxJSON : 16 | func StoreCdxJSON(outfp string, 17 | image common.Image, namespace string, installs []common.VerifiedArtifact) error { 18 | 19 | metadata := cdx.Metadata{ 20 | // Define metadata about the main component 21 | // (the component which the BOM will describe) 22 | Component: &cdx.Component{ 23 | BOMRef: fmt.Sprintf("image:%s@%s", image.Name, image.SHA256), 24 | Type: cdx.ComponentTypeContainer, 25 | Name: image.Name, 26 | Version: image.SHA256, 27 | }, 28 | // Use properties to include an internal identifier for this BOM 29 | // https://cyclonedx.org/use-cases/#properties--name-value-store 30 | Properties: &[]cdx.Property{ 31 | { 32 | Name: "internal:scan-timestamp", 33 | Value: time.Now().String(), 34 | }, 35 | }, 36 | } 37 | components := []cdx.Component{} 38 | imgC := createImageComponent(image) //for the image 39 | imgC.Components = &[]cdx.Component{} // for addon components in the image 40 | 41 | for _, ins := range installs { 42 | if ins.IsDownload { 43 | //create a componemt for each dowload 44 | downloadName := ins.DownloadLocation[strings.LastIndex(ins.DownloadLocation, "://")+3:] 45 | compD := createAssembleComponent(downloadName, ins.DownloadLocation) 46 | compD.Components = &[]cdx.Component{} 47 | for _, art := range ins.Artifacts { //create a component for each verified artifact 48 | compC := cdx.Component{} 49 | if art.IsDirectory { 50 | compC = createApplicationComponent(art.Path, downloadName, ins.DownloadLocation) 51 | } else { 52 | compC = createFileComponent(art.Path, downloadName, art.SHA256, ins.DownloadLocation) 53 | } 54 | //add artifact component to the download components 55 | *compD.Components = append(*compD.Components, compC) 56 | } 57 | //add download component to image component 58 | //*imgC.Components = append(*imgC.Components, compD) 59 | *imgC.Components = append(*imgC.Components, compD) 60 | } else { //artifacts from COPY/ADD operations 61 | sourse := ins.DownloadLocation 62 | for _, art := range ins.Artifacts { //create a component for each verified artifact 63 | compC := cdx.Component{} 64 | if art.IsDirectory { 65 | compC = createApplicationComponent(art.Path, sourse, ins.DownloadLocation) 66 | } else { 67 | fpath := art.Path[strings.Index(art.Path, "/rootfs")+7:] 68 | compC = createFileComponent(fpath, sourse, art.SHA256, ins.DownloadLocation) 69 | } 70 | //add artifact component to the image components 71 | *imgC.Components = append(*imgC.Components, compC) 72 | } 73 | } 74 | } 75 | components = append(components, imgC) 76 | 77 | // Assemble the BOM 78 | bom := cdx.NewBOM() 79 | bom.Metadata = &metadata 80 | bom.Components = &components 81 | 82 | // Encode the BOM 83 | fmt.Printf("\nresults saved to %s\n", outfp) 84 | if _, err := os.Stat(outfp); err == nil { //delete if exists to avoid any leftover of old contents 85 | fmt.Printf("an old report %s exists and delete it\n", outfp) 86 | os.Remove(outfp) 87 | } 88 | bomWriter, err := os.OpenFile(outfp, os.O_WRONLY|os.O_CREATE, 0600) 89 | if err != nil { 90 | fmt.Printf("error opening output file for writing: %v\n", err) 91 | } 92 | defer bomWriter.Close() 93 | 94 | encoder := cdx.NewBOMEncoder(bomWriter, cdx.BOMFileFormatJSON) 95 | encoder.SetPretty(true) 96 | if err := encoder.Encode(bom); err != nil { 97 | fmt.Printf("error encoding cdx format: %s\n", err.Error()) 98 | return errors.Wrap(err, "encoding CycloneDX BOM file") 99 | } 100 | return nil 101 | } 102 | 103 | func createImageComponent(img common.Image) cdx.Component { 104 | c := cdx.Component{} 105 | c.BOMRef = fmt.Sprintf("image:%s", img.SHA256) 106 | c.Type = cdx.ComponentTypeContainer 107 | c.Name = img.Name 108 | c.Version = img.Tag 109 | c.PackageURL = img.SHA256 110 | c.Components = &[]cdx.Component{ 111 | { 112 | BOMRef: fmt.Sprintf("os:%s@%s", img.OSName, img.OSVersion), 113 | Type: cdx.ComponentTypeOS, 114 | Name: img.OSName, 115 | Version: img.OSVersion, 116 | }, 117 | } 118 | return c 119 | } 120 | 121 | func createAssembleComponent(name, downloadURL string) cdx.Component { 122 | c := cdx.Component{ 123 | Type: cdx.ComponentTypeApplication, 124 | Supplier: &cdx.OrganizationalEntity{ 125 | URL: &[]string{downloadURL}, 126 | }, 127 | Name: name, 128 | PackageURL: downloadURL, 129 | } 130 | return c 131 | } 132 | 133 | func createApplicationComponent(filepath, group, downloadURL string) cdx.Component { 134 | c := cdx.Component{ 135 | Type: cdx.ComponentTypeApplication, 136 | Supplier: &cdx.OrganizationalEntity{ 137 | URL: &[]string{downloadURL}, 138 | }, 139 | Group: group, 140 | Name: path.Base(filepath), 141 | Hashes: &[]cdx.Hash{}, 142 | PackageURL: downloadURL, 143 | } 144 | return c 145 | } 146 | 147 | func createFileComponent(filepath, group, filehash, downloadURL string) cdx.Component { 148 | c := cdx.Component{ 149 | BOMRef: fmt.Sprintf("file:%s", filepath), 150 | Type: cdx.ComponentTypeFile, 151 | Supplier: &cdx.OrganizationalEntity{ 152 | URL: &[]string{downloadURL}, 153 | }, 154 | Group: group, 155 | Name: path.Base(filepath), 156 | Version: filehash, 157 | } 158 | c.Hashes = &[]cdx.Hash{ 159 | { 160 | Algorithm: "SHA-256", 161 | Value: filehash, 162 | }, 163 | } 164 | return c 165 | } 166 | 167 | func createPackageComponent(pkgName, pkgVersion, pkgURL string) cdx.Component { 168 | pkgRef := fmt.Sprintf("pkg:%s@%s", pkgName, pkgVersion) 169 | c := cdx.Component{} 170 | c.BOMRef = pkgRef 171 | c.Type = cdx.ComponentTypeLibrary 172 | c.Name = pkgName 173 | c.Version = pkgVersion 174 | c.PackageURL = pkgURL 175 | return c 176 | } 177 | 178 | func createOSComponent(osName, osVersion string) cdx.Component { 179 | osRef := fmt.Sprintf("os:%s@%s", osName, osVersion) 180 | c := cdx.Component{} 181 | c.BOMRef = osRef 182 | c.Type = cdx.ComponentTypeOS 183 | c.Name = osName 184 | c.Version = osVersion 185 | return c 186 | } 187 | 188 | func createPackageDependencies(srcBOMRef string, deps []string) cdx.Dependency { 189 | dRefs := []cdx.Dependency{} 190 | for _, d := range deps { 191 | pkgMeta := strings.Split(d, ":") 192 | if len(pkgMeta) == 2 { 193 | depPkgRef := fmt.Sprintf("pkg:%s@%s", pkgMeta[0], pkgMeta[1]) 194 | dRefs = append(dRefs, cdx.Dependency{Ref: depPkgRef}) 195 | } else { 196 | dRefs = append(dRefs, cdx.Dependency{Ref: d}) 197 | } 198 | } 199 | c := cdx.Dependency{ 200 | Ref: srcBOMRef, 201 | Dependencies: &dRefs, 202 | } 203 | return c 204 | } 205 | -------------------------------------------------------------------------------- /pkg/parser/dockerfile/docker.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package dockerfile 18 | 19 | import ( 20 | "errors" 21 | "fmt" 22 | "io/ioutil" 23 | "os" 24 | "reflect" 25 | "strings" 26 | 27 | "github.com/moby/buildkit/frontend/dockerfile/parser" 28 | "github.com/tap8stry/orion/pkg/common" 29 | "golang.org/x/crypto/sha3" 30 | ) 31 | 32 | const ( 33 | FROM = "from" 34 | RUN = "run" 35 | COPY = "copy" 36 | ADD = "add" 37 | WORKDIR = "workdir" 38 | ARG = "arg" 39 | ENV = "env" 40 | ) 41 | 42 | //GetDockerfileReader reads a file into Dockerfile parser 43 | func GetDockerfileReader(filepath string) (*parser.Result, error) { 44 | file, err := os.Open(filepath) 45 | defer file.Close() 46 | if err != nil { 47 | fmt.Printf("\nerror opening dockerfile: %s", err.Error()) 48 | return nil, err 49 | } 50 | 51 | res, err := parser.Parse(file) 52 | if err != nil { 53 | fmt.Printf("\nerror parsing dockerfile: %s", err.Error()) 54 | return nil, err 55 | } 56 | 57 | return res, nil 58 | } 59 | 60 | //GetDockerfile: gets dcokerfile 61 | func GetDockerfile(f string) (common.Dockerfile, error) { 62 | cm := common.Dockerfile{} 63 | cm.Filepath = f 64 | data, err := ioutil.ReadFile(f) 65 | if err != nil { 66 | fmt.Printf("\nerror reading dockerfile %q: %s", f, err.Error()) 67 | return cm, err 68 | } 69 | cm.Filehash = fmt.Sprintf("%x", sha3.Sum256(data)) 70 | cm.FileType = common.DockerfileEcosystem 71 | cm.BuildArgs, _ = DiscoverBuildArgs(f) 72 | cm.BuildStages, _ = DiscoverBuildStages(f, cm.Filehash) 73 | return cm, nil 74 | 75 | } 76 | 77 | //DiscoverDockerfile scans a directory and returns array of dockerfiles 78 | func DiscoverDockerfile(repoDir, filePattern string) []common.Dockerfile { 79 | d := []common.Dockerfile{} 80 | files := common.SearchFiles(repoDir, filePattern) 81 | for _, f := range files { 82 | cm := common.Dockerfile{} 83 | cm.Filepath = strings.Split(f, repoDir)[1] 84 | cm.Filepath = strings.TrimLeft(cm.Filepath, "/") 85 | data, _ := ioutil.ReadFile(f) 86 | cm.Filehash = fmt.Sprintf("%x", sha3.Sum256(data)) 87 | cm.FileType = common.DockerfileEcosystem 88 | cm.BuildArgs, _ = DiscoverBuildArgs(f) 89 | cm.BuildStages, _ = DiscoverBuildStages(f, cm.Filehash) 90 | d = append(d, cm) 91 | } 92 | return d 93 | } 94 | 95 | //DiscoverBuildArgs returns a map of docker build arguments defined before stages (FROM) 96 | func DiscoverBuildArgs(dockerfp string) (map[string]string, error) { 97 | docker, err := GetDockerfileReader(dockerfp) 98 | if err != nil { 99 | return nil, errors.New("unable to read the dockerfile") 100 | } 101 | buildargs := make(map[string]string) 102 | for _, cmd := range docker.AST.Children { 103 | if strings.EqualFold(cmd.Value, ARG) { 104 | args := ParseArgEnv(cmd.Original) 105 | for key, value := range args { 106 | buildargs[key] = value 107 | } 108 | } 109 | if strings.EqualFold(cmd.Value, FROM) { 110 | break 111 | } 112 | } 113 | return buildargs, nil 114 | } 115 | 116 | //ParseArgEnv parses an ARG/ENV instruction and returns variable name and value 117 | func ParseArgEnv(cmd string) map[string]string { 118 | args := make(map[string]string) 119 | cmdTokens := strings.Fields(cmd) 120 | if hasEqualMark(cmdTokens[1:]) { //use ARG name=value or ENV name=value 121 | for i := range cmdTokens[1:] { 122 | splits := strings.Split(cmdTokens[i+1], "=") 123 | args[splits[0]] = common.TrimQuoteMarks(splits[1]) 124 | } 125 | } else { //use ARG name value or ENV name value 126 | if len(cmdTokens) == 3 { 127 | args[cmdTokens[1]] = cmdTokens[2] 128 | } else { 129 | args[cmdTokens[1]] = "" 130 | } 131 | } 132 | return args 133 | } 134 | 135 | func hasEqualMark(strs []string) bool { 136 | for _, str := range strs { 137 | if !strings.Contains(str, "=") { 138 | return false 139 | } 140 | } 141 | return true 142 | } 143 | 144 | //DiscoverBuildStages : 145 | func DiscoverBuildStages(dockerfp, fileKey string) ([]common.BuildStage, error) { 146 | result, err := GetDockerfileReader(dockerfp) 147 | if err != nil { 148 | return nil, errors.New("unable to read the dockerfile") 149 | } 150 | stages := []common.BuildStage{} 151 | curStage := common.BuildStage{} 152 | lineIdx := 0 153 | buildStageIdx := 0 154 | 155 | for _, cmd := range result.AST.Children { 156 | lineIdx++ 157 | if strings.EqualFold(cmd.Value, FROM) { 158 | if curStage.StageID != "" { 159 | curStage.EndLineNo = lineIdx 160 | stages = append(stages, curStage) 161 | buildStageIdx++ 162 | } 163 | curStage = common.BuildStage{} 164 | 165 | if strings.Contains(strings.ToLower(cmd.Original), " as ") { 166 | cmdTokens := strings.Fields(cmd.Original) 167 | curStage.StageID = cmdTokens[len(cmdTokens)-1] 168 | curStage.Context = fmt.Sprintf("%s:%s", fileKey, curStage.StageID) 169 | } else { 170 | curStage.StageID = fmt.Sprintf("%d", buildStageIdx) 171 | curStage.Context = fmt.Sprintf("%s:%s", fileKey, curStage.StageID) 172 | } 173 | 174 | if strings.Contains(cmd.Original, "scratch") { 175 | curStage.ScratchBuild = true 176 | curStage.Image.Name = "scratch" 177 | curStage.Image.Tag = "" 178 | curStage.Image.SHA256 = "scratch_image_key" 179 | } else { 180 | curStage.ScratchBuild = false 181 | cmdTokens := strings.Fields(cmd.Original) 182 | imageName := cmdTokens[1] 183 | if strings.IndexAny(imageName, ":") > 0 { 184 | curStage.Image.Name = strings.Split(imageName, ":")[0] 185 | curStage.Image.Tag = strings.Split(imageName, ":")[1] 186 | if strings.Contains(curStage.Image.Tag, "@sha256") { 187 | curStage.Image.SHA256 = strings.Split(curStage.Image.Tag, "@")[1] 188 | } 189 | } else { 190 | curStage.Image.Name = imageName 191 | curStage.Image.Tag = "latest" 192 | } 193 | } 194 | curStage.StartLineNo = lineIdx 195 | } else if strings.EqualFold(cmd.Value, COPY) { 196 | copyFlags := cmd.Flags 197 | for _, flag := range copyFlags { 198 | if !strings.Contains(flag, "--from") { 199 | continue 200 | } 201 | parentStageID := strings.Split(flag, "=")[1] 202 | parentStage, err := getParentStage(stages, parentStageID) 203 | if err != nil { 204 | fmt.Printf("\nerror parsing dockerfile: %v", err) 205 | } 206 | curStage.DependsOn = parentStage.StageID 207 | } 208 | } 209 | curStage.DockerFileCmds = append(curStage.DockerFileCmds, cmd) 210 | } 211 | 212 | curStage.EndLineNo = lineIdx 213 | stages = append(stages, curStage) 214 | return stages, nil 215 | } 216 | 217 | func getParentStage(stages []common.BuildStage, stageID string) (common.BuildStage, error) { 218 | for _, stage := range stages { 219 | if reflect.DeepEqual(stage.StageID, stageID) { 220 | return stage, nil 221 | } 222 | } 223 | return common.BuildStage{}, fmt.Errorf("stage %v not found", stageID) 224 | } 225 | -------------------------------------------------------------------------------- /pkg/parser/addon/verify.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package addon 18 | 19 | import ( 20 | "crypto/sha256" 21 | "encoding/hex" 22 | "errors" 23 | "fmt" 24 | "io" 25 | "os" 26 | "path" 27 | "strings" 28 | 29 | "github.com/moby/buildkit/frontend/dockerfile/parser" 30 | "github.com/tap8stry/orion/pkg/common" 31 | "github.com/tap8stry/orion/pkg/imagefs" 32 | "golang.org/x/mod/sumdb/dirhash" 33 | ) 34 | 35 | const ( 36 | NOASSERTION = "NOASSERTION" 37 | NOSHA = "checksum calculation failed. a manual review of install traces is recommended." 38 | ) 39 | 40 | func displayDockerfile(pDfp string) { 41 | dat, err := os.ReadFile(pDfp) 42 | check(err) 43 | fmt.Print("\ntemp dockerfile = \n" + string(dat)) 44 | } 45 | func check(e error) { 46 | if e != nil { 47 | panic(e) 48 | } 49 | } 50 | 51 | //VerifyAddOnInstalls : 52 | func VerifyAddOnInstalls(buildContextDir, imagename string, buildStage *common.BuildStage) ([]common.VerifiedArtifact, common.Image, error) { 53 | containerimage := common.Image{} 54 | 55 | if len(buildStage.AddOnInstalls) == 0 { 56 | fmt.Printf("\nno AddOnInstalls found for build stage = %s", buildStage.StageID) 57 | return nil, containerimage, nil 58 | } 59 | 60 | fsdir, imgHash, imgOs, _, err := imagefs.Get(imagename, buildContextDir) 61 | if err != nil { 62 | fmt.Printf("\nerror in creating image filesystem: %s\n", err.Error()) 63 | return nil, containerimage, err 64 | } 65 | containerimage = common.Image{ 66 | Name: imagename, 67 | OSName: imgOs, 68 | SHA256: imgHash, 69 | } 70 | fmt.Printf("\nverify against image's filesystem at %q", fsdir) 71 | verified := verifyArtifacts(buildStage.AddOnInstalls, fsdir) 72 | return verified, containerimage, nil 73 | } 74 | 75 | func verifyArtifacts(installs []common.InstallTrace, fsdir string) []common.VerifiedArtifact { 76 | verifiedInstalls := []common.VerifiedArtifact{} 77 | for _, install := range installs { 78 | verified := common.VerifiedArtifact{ 79 | IsDownload: false, 80 | DownloadLocation: install.Origin, 81 | Comment: "", 82 | Artifacts: []common.Artifact{}, 83 | } 84 | 85 | for _, trace := range install.Traces { 86 | if strings.EqualFold(trace.Command, curlOperation) || 87 | strings.EqualFold(trace.Command, wgetOperation) || 88 | strings.EqualFold(trace.Command, gitOperation) || 89 | strings.EqualFold(trace.Command, gitCheckoutOperation) || 90 | strings.EqualFold(trace.Command, gitCloneOperation) { 91 | verified.IsDownload = true 92 | } 93 | filepath, isdir, err := getPath(trace, fsdir) 94 | if err != nil { //skip 95 | continue 96 | } 97 | art := common.Artifact{ 98 | Name: filepath, 99 | Path: filepath, 100 | Version: "", 101 | IsDirectory: isdir, 102 | } 103 | if !art.IsDirectory { 104 | sha, err := getSHA256(filepath) 105 | if err == nil { 106 | art.SHA256 = sha 107 | } 108 | } 109 | verified.Artifacts = append(verified.Artifacts, art) 110 | } 111 | verifiedInstalls = append(verifiedInstalls, verified) 112 | } 113 | fmt.Printf("\n# of verified artifacts = %d", len(verifiedInstalls)) 114 | return verifiedInstalls 115 | } 116 | 117 | func getPath(trace common.Trace, dir string) (string, bool, error) { 118 | var err error 119 | des := trace.Destination 120 | switch strings.Fields(trace.Command)[0] { 121 | case copyOperation: 122 | des = checkCOPYADDDestination(trace) 123 | case addOperation: 124 | des = checkCOPYADDDestination(trace) 125 | case cpOperation: 126 | des, err = checkCpDestination(trace, dir) 127 | if err != nil { 128 | return des, false, err 129 | } 130 | case mvOperation: 131 | des, err = checkMvDestination(trace, dir) 132 | if err != nil { 133 | return des, false, err 134 | } 135 | case tarOperation: //TODO: investigate how to determin the destination from 'tar -x' 136 | fmt.Printf("\ndestination unknown, skip the trace for %s, ", trace.Command) 137 | return des, false, errors.New("destination unknown, need manual review") 138 | case unzipOperation: //TODO: investigate how to determin the destination from 'unzip' 139 | fmt.Printf("\ndestination unknown, skip the trace for %s, ", trace.Command) 140 | return des, false, errors.New("destination unknown, need manual review") 141 | default: //do nothing 142 | } 143 | 144 | info, err := os.Stat(dir + des) 145 | if os.IsNotExist(err) { 146 | fmt.Printf("\nfolder/file %s does not exist for verifying", dir+des) 147 | return "", false, err 148 | } 149 | if strings.EqualFold(des, "/") || len(des) == 0 { // root directory 150 | fmt.Printf("\ndestination invalid, root directory %s found as destination", des) 151 | return des, info.IsDir(), errors.New("destination invalid") 152 | } 153 | if info.IsDir() { 154 | _, err = dirhash.HashDir(dir+des, "", dirhash.DefaultHash) // 155 | if err != nil { 156 | fmt.Printf("\nsha for folder %s cannot be calculated", dir+des) 157 | return dir + des, info.IsDir(), err 158 | } 159 | } 160 | return dir + des, info.IsDir(), nil 161 | } 162 | 163 | func checkCOPYADDDestination(trace common.Trace) string { 164 | des := trace.Destination 165 | so := trace.Source 166 | 167 | if strings.HasSuffix(so, "/") { 168 | so = so[:len(so)-1] 169 | } 170 | so = so[strings.LastIndex(so, "/")+1:] 171 | 172 | if strings.HasSuffix(des, "/") { //des is a directory 173 | des += so 174 | } 175 | if strings.HasSuffix(des, "/.") { 176 | des = des[:len(des)-1] + so 177 | } 178 | return des 179 | } 180 | 181 | func checkCpDestination(trace common.Trace, dir string) (string, error) { 182 | des := strings.TrimSpace(trace.Destination) 183 | des = strings.TrimSuffix(des, ".") //e.g. /usr/bin/. --> /usr/bin/ 184 | des = strings.TrimSuffix(des, "/") //e.g. /usr/bin/ --> /usr/bin 185 | despath := path.Join(dir, des) 186 | info, err := os.Stat(despath) // e.g. /tmp/build-ctx00032/rootfs/usr/bin 187 | if os.IsNotExist(err) { 188 | fmt.Printf("\nfolder/file %s does not exist: %s", despath, err.Error()) 189 | return "", fmt.Errorf("\nfolder/file %s does not exist", dir+des) 190 | } 191 | if info.IsDir() { // destination is a directory 192 | sostrs := strings.Split(trace.Source, "/") 193 | so := sostrs[len(sostrs)-1] 194 | if !strings.EqualFold(so, "*") { //do not add the widecard if used in source path, e.g. trace.Source="/gradle-*/*" 195 | des += "/" + so 196 | } 197 | } 198 | return des, nil 199 | } 200 | 201 | func checkMvDestination(trace common.Trace, dir string) (string, error) { 202 | des := trace.Destination 203 | sostrs := strings.Split(trace.Source, "/") 204 | so := sostrs[len(sostrs)-1] 205 | if strings.HasSuffix(des, "/.") { //e.g. /usr/bin/. 206 | des += "/" + so 207 | } 208 | return des, nil 209 | } 210 | 211 | func GeneratePartialDockerData(buildArgs map[string]string, cmds []*parser.Node) string { 212 | data := "" 213 | for k, v := range buildArgs { 214 | data += "ARG " + k + "=" + v + "\n" 215 | } 216 | for _, cmd := range cmds { 217 | data += cmd.Original + "\n" 218 | } 219 | return data 220 | } 221 | 222 | func getSHA256(filepath string) (string, error) { 223 | f, err := os.Open(filepath) 224 | if err != nil { 225 | fmt.Printf("\n failed in opening file %s", err.Error()) 226 | return "", err 227 | } 228 | defer f.Close() 229 | 230 | hasher := sha256.New() 231 | if _, err := io.Copy(hasher, f); err != nil { 232 | fmt.Printf("\n failed in io.Copy %s", err.Error()) 233 | return "", err 234 | } 235 | value := hex.EncodeToString(hasher.Sum(nil)) 236 | return value, nil 237 | } 238 | -------------------------------------------------------------------------------- /doc/new-scenarios.md: -------------------------------------------------------------------------------- 1 | ## Outstanding Installation Scenarios Observed in Sample Dockerfiles ## 2 | 3 | The following is a list of scenarios observed in the sample dockerfiles. They are currently under investigation and yet to be supported. 4 | 5 | 6 | ### Case 1: Cross stage reference to git origin ### 7 | `git clone` in one stage, a following stage mounts to the previous stage and `git fetch` or `git checkout` 8 | 9 | ``` 10 | FROM git AS containerd-src 11 | ARG CONTAINERD_VERSION 12 | ARG CONTAINERD_ALT_VERSION 13 | WORKDIR /usr/src 14 | RUN git clone https://github.com/containerd/containerd.git containerd 15 | 16 | FROM gobuild-base AS containerd-base 17 | WORKDIR /go/src/github.com/containerd/containerd 18 | ARG TARGETPLATFORM 19 | ENV CGO_ENABLED=1 BUILDTAGS=no_btrfs 20 | RUN xx-apk add musl-dev gcc && xx-go --wrap 21 | 22 | FROM containerd-base AS containerd 23 | ARG CONTAINERD_VERSION 24 | RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target=/root/.cache,type=cache \ 25 | git fetch origin \ 26 | && git checkout -q "$CONTAINERD_VERSION" \ 27 | && make bin/containerd \ 28 | && make bin/containerd-shim-runc-v2 \ 29 | && make bin/ctr \ 30 | && mv bin /out 31 | ``` 32 | 33 | ### Case 2: Run `make` command for application build ### 34 | 35 | ``` 36 | FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS idmap 37 | RUN apk add --no-cache git autoconf automake clang lld gettext-dev libtool make byacc binutils 38 | COPY --from=xx / / 39 | ARG SHADOW_VERSION 40 | RUN git clone https://github.com/shadow-maint/shadow.git /shadow && cd /shadow && git checkout $SHADOW_VERSION 41 | WORKDIR /shadow 42 | ARG TARGETPLATFORM 43 | RUN xx-apk add --no-cache musl-dev gcc libcap-dev 44 | RUN CC=$(xx-clang --print-target-triple)-clang ./autogen.sh --disable-nls --disable-man --without-audit --without-selinux --without-acl --without-attr --without-tcb --without-nscd --host $(xx-clang --print-target-triple) \ 45 | && make -j $(nproc) \ 46 | && xx-verify src/newuidmap src/newuidmap \ 47 | && cp src/newuidmap src/newgidmap /usr/bin 48 | ``` 49 | 50 | ### Case 3: `git clone` and `git checkout` are in separate RUN operations ### 51 | 52 | This requires corelation between `git checkout` and `git clone` in order to trace to the same git url. 53 | 54 | ``` 55 | FROM gobuild-base AS rootlesskit 56 | ARG ROOTLESSKIT_VERSION 57 | RUN git clone https://github.com/rootless-containers/rootlesskit.git /go/src/github.com/rootless-containers/rootlesskit 58 | WORKDIR /go/src/github.com/rootless-containers/rootlesskit 59 | ARG TARGETPLATFORM 60 | RUN --mount=target=/root/.cache,type=cache \ 61 | git checkout -q "$ROOTLESSKIT_VERSION" && \ 62 | CGO_ENABLED=0 xx-go build -o /rootlesskit ./cmd/rootlesskit && \ 63 | xx-verify --static /rootlesskit 64 | ``` 65 | 66 | This scenario can be avoided if some best practice is followed in Dockerfile writing, i.e. organize all the commands relating to a git repo under one `RUN` operation as shown below. 67 | 68 | ``` 69 | FROM gobuild-base AS rootlesskit 70 | ARG ROOTLESSKIT_VERSION 71 | ARG TARGETPLATFORM 72 | RUN --mount=target=/root/.cache,type=cache \ 73 | git clone https://github.com/rootless-containers/rootlesskit.git /go/src/github.com/rootless-containers/rootlesskit && \ 74 | cd /go/src/github.com/rootless-containers/rootlesskit && \ 75 | git checkout -q "$ROOTLESSKIT_VERSION" && \ 76 | CGO_ENABLED=0 xx-go build -o /rootlesskit ./cmd/rootlesskit && \ 77 | xx-verify --static /rootlesskit 78 | ``` 79 | 80 | ### Case 4: ENV value not available when parsing Dockerfile ### 81 | 82 | ``` 83 | WORKDIR $GOPATH/src/github.com/grafana/grafana 84 | COPY go.mod go.sum embed.go ./ 85 | ``` 86 | 87 | ### Case 5: Use of other commands to install python alternatives (see Dockerfile-compose) ### 88 | 89 | ``` 90 | RUN curl -L https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz | tar xzf - \ 91 | && cd Python-${PYTHON_VERSION} \ 92 | && ./configure --enable-optimizations --enable-shared --prefix=/usr LDFLAGS="-Wl,-rpath /usr/lib" \ 93 | && make altinstall 94 | RUN alternatives --install /usr/bin/python python /usr/bin/python2.7 50 95 | RUN alternatives --install /usr/bin/python python /usr/bin/python$(echo "${PYTHON_VERSION%.*}") 60 96 | RUN curl https://bootstrap.pypa.io/get-pip.py | python - 97 | ``` 98 | 99 | ### Case 6: `git remote add upstream` and `git pull` ### 100 | 101 | ``` 102 | RUN mkdir "$pandas_home" \ 103 | && git clone "https://github.com/$gh_username/pandas.git" "$pandas_home" \ 104 | && cd "$pandas_home" \ 105 | && git remote add upstream "https://github.com/pandas-dev/pandas.git" \ 106 | && git pull upstream master 107 | ``` 108 | 109 | ### Case 7: `--mount` and `install.sh` (Dockerfile-moby) ### 110 | 111 | ``` 112 | FROM base AS criu 113 | ARG DEBIAN_FRONTEND 114 | ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc 115 | RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ 116 | --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ 117 | echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \ 118 | && apt-get update \ 119 | && apt-get install -y --no-install-recommends criu \ 120 | && install -D /usr/sbin/criu /build/criu 121 | 122 | RUN --mount=type=cache,target=/root/.cache/go-build \ 123 | --mount=type=cache,target=/go/pkg/mod \ 124 | --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ 125 | PREFIX=/build /tmp/install/install.sh containerd 126 | ``` 127 | 128 | 129 | ### Case 8: Use of inline shell scripts (Dockerfile-moby) ### 130 | 131 | ``` 132 | FROM base AS registry 133 | WORKDIR /go/src/github.com/docker/distribution 134 | # Install two versions of the registry. The first one is a recent version that 135 | # supports both schema 1 and 2 manifests. The second one is an older version that 136 | # only supports schema1 manifests. This allows integration-cli tests to cover 137 | # push/pull with both schema1 and schema2 manifests. 138 | # The old version of the registry is not working on arm64, so installation is 139 | # skipped on that architecture. 140 | ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd 141 | ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 142 | RUN --mount=type=cache,target=/root/.cache/go-build \ 143 | --mount=type=cache,target=/go/pkg/mod \ 144 | --mount=type=tmpfs,target=/go/src/ \ 145 | set -x \ 146 | && git clone https://github.com/docker/distribution.git . \ 147 | && git checkout -q "$REGISTRY_COMMIT" \ 148 | && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ 149 | go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ 150 | && case $(dpkg --print-architecture) in \ 151 | amd64|armhf|ppc64*|s390x) \ 152 | git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ 153 | GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ 154 | go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ 155 | ;; \ 156 | esac 157 | ``` 158 | 159 | ### Case 9: Use of shell script file (Dockerfile-moby) ### 160 | 161 | ``` 162 | RUN /download-frozen-image-v2.sh /build \ 163 | busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ 164 | busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ 165 | debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \ 166 | hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ 167 | arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 168 | 169 | 170 | ``` 171 | 172 | ### Case 10: Changes to resource configurations using sed command ### 173 | 174 | ``` 175 | #(Dockerfile-chaosblade) 176 | FROM alpine:3.10.4 177 | RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g' /etc/apk/repositories 178 | ``` 179 | 180 | ``` 181 | #(Dockerfile-moby) 182 | FROM ${GOLANG_IMAGE} AS base 183 | RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache 184 | ARG APT_MIRROR 185 | RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ 186 | && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list 187 | ``` 188 | 189 | This could be a security exposure if changing to a malicious site. We may need to flag it. 190 | -------------------------------------------------------------------------------- /pkg/parser/addon/addon.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package addon 18 | 19 | import ( 20 | "strings" 21 | 22 | "github.com/tap8stry/orion/pkg/common" 23 | "github.com/tap8stry/orion/pkg/parser/dockerfile" 24 | ) 25 | 26 | const ( 27 | curlOperation = "curl" 28 | wgetOperation = "wget" 29 | tarOperation = "tar" 30 | unzipOperation = "unzip" 31 | cpOperation = "cp" 32 | mvOperation = "mv" 33 | cdOperation = "cd" 34 | mkdirOperation = "mkdir" 35 | gitOperation = "git" 36 | gitCloneOperation = "git clone" 37 | gitCheckoutOperation = "git checkout" 38 | copyOperation = "COPY" 39 | addOperation = "ADD" 40 | ) 41 | 42 | // DiscoverAddonArtifacts returns a list of artifaces installed by RUN curl/wget commands 43 | func DiscoverAddonArtifacts(buildStage *common.BuildStage, dopts common.DiscoverOpts, buildArgs map[string]string) ([]common.InstallTrace, map[string]string, common.Image) { 44 | installTraces := []common.InstallTrace{} 45 | workdir := "/" 46 | stageArgsEnvs := make(map[string]string) 47 | for k, v := range buildArgs { //copy the build Args 48 | stageArgsEnvs[k] = v 49 | } 50 | stageEnvs := make(map[string]string) 51 | 52 | for _, cmd := range buildStage.DockerFileCmds { 53 | if strings.EqualFold(cmd.Value, dockerfile.ARG) { //get ARGs 54 | args := dockerfile.ParseArgEnv(cmd.Original) 55 | for key, value := range args { 56 | if len(value) > 0 { // new ARG or new value to build ARGs 57 | stageArgsEnvs[key] = replaceArgEnvVariable(value, stageArgsEnvs) 58 | } 59 | } 60 | } 61 | if strings.EqualFold(cmd.Value, dockerfile.ENV) { //get ENVs 62 | envs := dockerfile.ParseArgEnv(cmd.Original) 63 | for key, value := range envs { 64 | value = replaceArgEnvVariable(value, stageArgsEnvs) //for ENV value referencing other args or envs 65 | stageArgsEnvs[key] = value 66 | stageEnvs[key] = value 67 | } 68 | } 69 | 70 | if strings.EqualFold(cmd.Value, dockerfile.WORKDIR) { //get WORKDIR 71 | workdir = replaceArgEnvVariable(cmd.Next.Value, stageArgsEnvs) 72 | continue 73 | } 74 | 75 | if strings.EqualFold(cmd.Value, dockerfile.RUN) && 76 | (strings.Contains(cmd.Next.Value, curlOperation) || 77 | strings.Contains(cmd.Next.Value, wgetOperation) || 78 | strings.Contains(cmd.Next.Value, gitOperation)) { // process RUN curl/wget/git 79 | 80 | installs := generateCurlWgetGitTraces(workdir, cmd.Next.Value, stageArgsEnvs) 81 | if len(installs) > 0 { 82 | installTraces = append(installTraces, installs...) 83 | } 84 | } 85 | 86 | if strings.EqualFold(cmd.Value, dockerfile.COPY) || strings.EqualFold(cmd.Value, dockerfile.ADD) { // process COPY/ADD 87 | installs := generateCopyAddTraces(workdir, cmd.Original, dopts.Namespace, stageArgsEnvs) 88 | if installs != nil { 89 | installTraces = append(installTraces, installs...) 90 | } 91 | } 92 | } 93 | //update base image with the Args values 94 | buildStage.Image.Name = replaceArgEnvVariable(buildStage.Image.Name, stageArgsEnvs) 95 | buildStage.Image.Tag = replaceArgEnvVariable(buildStage.Image.Tag, stageArgsEnvs) 96 | return installTraces, stageEnvs, buildStage.Image 97 | } 98 | 99 | // generateCurlWgetGitTraces produces the traces of one RUN of "curl" or/and "wget" install commands 100 | func generateCurlWgetGitTraces(workdir, cmd string, stageargs map[string]string) []common.InstallTrace { 101 | installTraces := []common.InstallTrace{} 102 | installsets := parseSubcommands(cmd) 103 | currentdir := workdir 104 | 105 | for index := range installsets { 106 | installTrace := common.InstallTrace{} 107 | m := make(map[int]common.Trace) 108 | j := 0 109 | gitcloneUrl := "" 110 | 111 | for k := 0; k < len(installsets[index].Commands); k++ { 112 | subCmd := installsets[index].Commands[k] 113 | args := parseLine(subCmd, " ") 114 | switch args[0] { 115 | case curlOperation: 116 | installTrace.Origin, m[j] = processCurl(args, currentdir, stageargs) 117 | j++ 118 | case wgetOperation: 119 | installTrace.Origin, m[j] = processWget(args, currentdir, stageargs) 120 | j++ 121 | case gitOperation: 122 | if len(args) > 2 && strings.EqualFold(args[1], strings.Fields(gitCloneOperation)[1]) { 123 | installTrace.Origin, m[j] = processGitClone(args, currentdir, stageargs) 124 | j++ 125 | gitcloneUrl = installTrace.Origin 126 | } 127 | if len(args) > 2 && strings.EqualFold(args[1], strings.Fields(gitCheckoutOperation)[1]) { 128 | m[j] = processGitCheckout(args, currentdir, gitcloneUrl, stageargs) 129 | j++ 130 | } 131 | case tarOperation: 132 | trace := processTar(args, currentdir, stageargs) 133 | if len(trace.Source) > 0 { 134 | if existInInstallTrace(m, trace.Source) { //belongs to the current install 135 | m[j] = trace 136 | j++ 137 | } else { 138 | checkEarlierInstalls(&installTraces, trace) 139 | } 140 | } 141 | case unzipOperation: 142 | trace := processUnzip(args, currentdir, stageargs) 143 | if len(trace.Source) > 0 { 144 | if existInInstallTrace(m, trace.Source) { 145 | m[j] = trace 146 | j++ 147 | } else { 148 | checkEarlierInstalls(&installTraces, trace) 149 | } 150 | } 151 | case cpOperation: 152 | trace := processCp(args, currentdir, stageargs) 153 | if len(trace.Source) > 0 { 154 | if existInInstallTrace(m, trace.Source) { 155 | m[j] = trace 156 | j++ 157 | } else { 158 | checkEarlierInstalls(&installTraces, trace) 159 | } 160 | } 161 | case mvOperation: 162 | trace := processMv(args, currentdir, stageargs) 163 | if len(trace.Source) > 0 { 164 | if existInInstallTrace(m, trace.Source) { 165 | m[j] = trace 166 | j++ 167 | } else { 168 | checkEarlierInstalls(&installTraces, trace) 169 | } 170 | } 171 | case cdOperation: //update the current dir 172 | currentdir = processCd(args, currentdir, stageargs) 173 | } 174 | } 175 | if len(installTrace.Origin) > 0 && len(m) > 0 { 176 | installTrace.Traces = m 177 | installTraces = append(installTraces, installTrace) 178 | } 179 | } 180 | return installTraces 181 | } 182 | 183 | // generateCopyTraces produces the traces of one RUN of "curl" or/and "wget" install commands 184 | func generateCopyAddTraces(workdir, cmd, namespace string, stageargs map[string]string) []common.InstallTrace { 185 | installTraces := []common.InstallTrace{} 186 | args := parseLine(cmd, " ") 187 | installTrace, err := processCopyAdd(args, workdir, namespace, stageargs) 188 | if err == nil { 189 | installTraces = append(installTraces, installTrace) 190 | } 191 | return installTraces 192 | } 193 | 194 | // parseLine parses a line by the separator into an array and trims spaces and quotation marks 195 | func parseLine(line, separator string) []string { 196 | cmds := strings.Split(line, separator) 197 | newCmds := []string{} 198 | for i := range cmds { 199 | for true { //remove all tabs 200 | cmds[i] = strings.ReplaceAll(cmds[i], "\t", "") 201 | if !strings.Contains(cmds[i], "\t") { 202 | break 203 | } 204 | } 205 | cmds[i] = strings.Trim(cmds[i], " ") 206 | if len(cmds[i]) > 0 { 207 | newCmds = append(newCmds, cmds[i]) 208 | } 209 | } 210 | return newCmds 211 | } 212 | 213 | // parseSubcommands parse shell commands in a docker RUN operation 214 | func parseSubcommands(line string) []common.CommandSet { 215 | sets := []common.CommandSet{} 216 | cmdSetMap := common.CommandSet{} 217 | m := make(map[int]string) 218 | first := true 219 | exclude := true //ignore subcmds before the first CURL/WGET/GIT 220 | j := 0 221 | 222 | separator := "&&" //default 223 | if !strings.Contains(line, "&&") && strings.Contains(line, "; \t") { //some shell scripts use '; \' as end of a command 224 | separator = "; \t" 225 | } 226 | 227 | cmds := parseLine(line, separator) 228 | for i := range cmds { 229 | if strings.HasPrefix(cmds[i], curlOperation) || strings.HasPrefix(cmds[i], wgetOperation) || strings.HasPrefix(cmds[i], gitCloneOperation) { 230 | exclude = false 231 | if first { 232 | first = false 233 | } else { 234 | cmdSetMap.Commands = m 235 | sets = append(sets, cmdSetMap) 236 | m = make(map[int]string) 237 | j = 0 238 | } 239 | m[j] = cmds[i] 240 | j++ 241 | } else { 242 | if !exclude { 243 | m[j] = cmds[i] 244 | j++ 245 | } 246 | } 247 | } 248 | cmdSetMap.Commands = m 249 | sets = append(sets, cmdSetMap) 250 | return sets 251 | } 252 | -------------------------------------------------------------------------------- /pkg/imagefs/imagefs.go: -------------------------------------------------------------------------------- 1 | package imagefs 2 | 3 | import ( 4 | "archive/tar" 5 | "compress/gzip" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "os" 10 | "path" 11 | "strings" 12 | 13 | "github.com/google/go-containerregistry/pkg/authn" 14 | "github.com/google/go-containerregistry/pkg/name" 15 | v1 "github.com/google/go-containerregistry/pkg/v1" 16 | "github.com/google/go-containerregistry/pkg/v1/remote" 17 | "github.com/google/go-containerregistry/pkg/v1/tarball" 18 | "github.com/pkg/errors" 19 | "k8s.io/release/pkg/spdx" 20 | ) 21 | 22 | //Get generates a file system of the image and returns file system path 23 | func Get(imageName, buildContextDir string) (string, string, string, string, error) { 24 | rootDir := "" 25 | imageRefs, err := getImageReferences(imageName) 26 | if err != nil { 27 | fmt.Printf("\nerror in getImageReferences: %s", err.Error()) 28 | return "", "", "", "", errors.Wrap(err, "getting image references from container registry") 29 | } 30 | if len(imageRefs) == 0 { 31 | return "", "", "", "", fmt.Errorf("\n%d image references found for %q", len(imageRefs), imageName) 32 | } 33 | refData := imageRefs[0] 34 | if len(imageRefs) > 1 { 35 | //e.g. images for different hardware architectures such as amd64, s390x, ppc64le 36 | //will pick amd64 if present, otherwise pick the first 37 | fmt.Printf("\n%d image references found for %q", len(imageRefs), imageName) 38 | for i, refData := range imageRefs { 39 | if strings.Contains(strings.ToLower(refData.Arch), "amd64") { 40 | refData = imageRefs[i] 41 | } 42 | } 43 | } 44 | 45 | fmt.Printf("\ndownload tarball for %q, arch=%q", imageName, refData.Arch) 46 | ref, err := name.ParseReference(refData.Digest) 47 | if err != nil { 48 | fmt.Printf("\nparsing reference %s", imageName) 49 | return "", "", "", "", errors.Wrap(err, "parsing image reference") 50 | } 51 | 52 | img, err := remote.Image(ref) 53 | if err != nil { 54 | fmt.Printf("\nerror getting image %q", ref.Name()) 55 | return "", "", "", "", errors.Wrap(err, "getting remote image") 56 | } 57 | 58 | rootDir, err = generateImageFileSystem(buildContextDir, img, ref) 59 | if err != nil { 60 | fmt.Printf("\nerror from getImageFileSystem(): %s", err.Error()) 61 | return "", "", "", "", errors.Wrap(err, "creating image fifle system") 62 | } 63 | return rootDir, refData.Digest, refData.OS, refData.Arch, nil 64 | } 65 | 66 | //generateImageFileSystem downloads the image and untar it to the directory 67 | func generateImageFileSystem(unpackdir string, img v1.Image, ref name.Reference) (string, error) { 68 | tarfile := path.Join(unpackdir, "image.tar") 69 | if err := tarball.WriteToFile(tarfile, ref, img); err != nil { 70 | fmt.Printf("\nerror writing image to %q", tarfile) 71 | return "", errors.Wrap(err, "writing image to file system") 72 | } 73 | fmt.Printf("\nwrote image to %q", tarfile) 74 | 75 | unpackDirRootfs := path.Join(unpackdir, "rootfs") 76 | os.MkdirAll(unpackDirRootfs, 0744) 77 | if err := untar(tarfile, unpackDirRootfs); err != nil { 78 | fmt.Printf("\nerror unpack %s to file system %s", tarfile, unpackDirRootfs) 79 | return unpackDirRootfs, errors.Wrap(err, "unpacking image tarball") 80 | } 81 | 82 | manifest, err := getManifest(path.Join(unpackDirRootfs, "manifest.json")) 83 | if err != nil { 84 | fmt.Printf("\nerror retrieving image manifest.json: %s", err.Error()) 85 | return "", errors.Wrap(err, "reading image manifest.json") 86 | } 87 | 88 | for _, file := range manifest.LayerFiles { //untar the tar.gz file for each layer 89 | filepath := path.Join(unpackDirRootfs, file) 90 | err = untar(filepath, unpackDirRootfs) 91 | if err != nil { 92 | fmt.Printf("\nerror untar image layer %q: %s", file, err.Error()) 93 | return "", errors.Wrapf(err, "untaring image layer %s", file) 94 | } 95 | } 96 | return unpackDirRootfs, nil 97 | } 98 | 99 | // getImageReferences gets a reference string and returns all image 100 | func getImageReferences(imageName string) ([]struct { 101 | Digest string 102 | Arch string 103 | OS string 104 | }, error) { 105 | ref, err := name.ParseReference(imageName) 106 | if err != nil { 107 | return nil, errors.Wrapf(err, "parsing image reference %s", imageName) 108 | } 109 | //descr, err := remote.Get(ref) 110 | descr, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) 111 | if err != nil { 112 | return nil, errors.Wrap(err, "fetching remote descriptor") 113 | } 114 | 115 | images := []struct { 116 | Digest string 117 | Arch string 118 | OS string 119 | }{} 120 | 121 | // If we got a digest, we reuse it as is 122 | if _, ok := ref.(name.Digest); ok { 123 | images = append(images, struct { 124 | Digest string 125 | Arch string 126 | OS string 127 | }{Digest: ref.(name.Digest).String()}) 128 | return images, nil 129 | } 130 | 131 | // If the reference is not an image, it has to work as a tag 132 | tag, ok := ref.(name.Tag) 133 | if !ok { 134 | return nil, errors.Errorf("could not cast tag from reference %s", imageName) 135 | } 136 | // If the reference points to an image, return it 137 | if descr.MediaType.IsImage() { 138 | fmt.Printf("Reference %s points to a single image", imageName) 139 | // Check if we can get an image 140 | im, err := descr.Image() 141 | if err != nil { 142 | return nil, errors.Wrap(err, "getting image from descriptor") 143 | } 144 | 145 | imageDigest, err := im.Digest() 146 | if err != nil { 147 | return nil, errors.Wrap(err, "while calculating image digest") 148 | } 149 | 150 | dig, err := name.NewDigest( 151 | fmt.Sprintf( 152 | "%s/%s@%s:%s", 153 | tag.RegistryStr(), tag.RepositoryStr(), 154 | imageDigest.Algorithm, imageDigest.Hex, 155 | ), 156 | ) 157 | if err != nil { 158 | return nil, errors.Wrap(err, "building single image digest") 159 | } 160 | 161 | fmt.Printf("Adding image digest %s from reference", dig.String()) 162 | return append(images, struct { 163 | Digest string 164 | Arch string 165 | OS string 166 | }{Digest: dig.String()}), nil 167 | } 168 | 169 | // Get the image index 170 | index, err := descr.ImageIndex() 171 | if err != nil { 172 | return nil, errors.Wrapf(err, "getting image index for %s", imageName) 173 | } 174 | indexManifest, err := index.IndexManifest() 175 | if err != nil { 176 | return nil, errors.Wrapf(err, "getting index manifest from %s", imageName) 177 | } 178 | fmt.Printf("Reference image index points to %d manifests", len(indexManifest.Manifests)) 179 | 180 | for _, manifest := range indexManifest.Manifests { 181 | dig, err := name.NewDigest( 182 | fmt.Sprintf( 183 | "%s/%s@%s:%s", 184 | tag.RegistryStr(), tag.RepositoryStr(), 185 | manifest.Digest.Algorithm, manifest.Digest.Hex, 186 | )) 187 | if err != nil { 188 | return nil, errors.Wrap(err, "generating digest for image") 189 | } 190 | 191 | fmt.Printf( 192 | "Adding image %s/%s@%s:%s (%s/%s)", 193 | tag.RegistryStr(), tag.RepositoryStr(), manifest.Digest.Algorithm, manifest.Digest.Hex, 194 | manifest.Platform.Architecture, manifest.Platform.OS, 195 | ) 196 | arch, osid := "", "" 197 | if manifest.Platform != nil { 198 | arch = manifest.Platform.Architecture 199 | osid = manifest.Platform.OS 200 | } 201 | images = append(images, 202 | struct { 203 | Digest string 204 | Arch string 205 | OS string 206 | }{ 207 | Digest: dig.String(), 208 | Arch: arch, 209 | OS: osid, 210 | }) 211 | } 212 | return images, nil 213 | } 214 | 215 | //untar .tar or .tar.gz file into target directory 216 | func untar(tarball, target string) error { 217 | fmt.Printf("\nuntar %q to %s", tarball, target) 218 | tarballfile, err := os.Open(tarball) 219 | defer tarballfile.Close() 220 | if err != nil { 221 | return errors.Wrap(err, "opening tarball") 222 | } 223 | defer os.Remove(tarball) 224 | 225 | var fileReader io.ReadCloser 226 | if strings.HasSuffix(tarball, ".gz") { 227 | if fileReader, err = gzip.NewReader(tarballfile); err != nil { 228 | return errors.Wrap(err, "creating gzip reader") 229 | } 230 | } else { 231 | fileReader = tarballfile 232 | } 233 | defer fileReader.Close() 234 | 235 | tarReader := tar.NewReader(fileReader) 236 | for { 237 | header, err := tarReader.Next() 238 | if err == io.EOF { 239 | break 240 | } else if err != nil { 241 | return errors.Wrap(err, "creating tar reader") 242 | } 243 | 244 | path := path.Join(target, header.Name) 245 | info := header.FileInfo() 246 | if info.IsDir() { 247 | if err = os.MkdirAll(path, info.Mode()); err != nil { 248 | return errors.Wrap(err, "creating directory") 249 | } 250 | continue 251 | } 252 | file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) 253 | if err != nil { 254 | return errors.Wrap(err, "creating a file") 255 | } 256 | _, err = io.Copy(file, tarReader) 257 | file.Close() 258 | if err != nil { 259 | return errors.Wrap(err, "copying from tarball to file") 260 | } 261 | } 262 | return nil 263 | } 264 | 265 | //getManifest reads the file and parses it for image manifest 266 | func getManifest(filename string) (spdx.ArchiveManifest, error) { 267 | manifestData := []spdx.ArchiveManifest{} 268 | 269 | manifestJSON, err := os.ReadFile(filename) 270 | if err != nil { 271 | return spdx.ArchiveManifest{}, errors.Wrap(err, "reading image manifest file") 272 | } 273 | if err := json.Unmarshal(manifestJSON, &manifestData); err != nil { 274 | fmt.Println(string(manifestJSON)) 275 | return spdx.ArchiveManifest{}, errors.Wrap(err, "unmarshalling image manifest%q: %s") 276 | } 277 | return manifestData[0], nil 278 | } 279 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /pkg/parser/addon/operations.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2020 IBM Corporation 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | 17 | package addon 18 | 19 | import ( 20 | "errors" 21 | "fmt" 22 | "strings" 23 | 24 | "github.com/tap8stry/orion/pkg/common" 25 | "golang.org/x/crypto/sha3" 26 | ) 27 | 28 | // processCurl parses curl command 29 | func processCurl(args []string, workdir string, stageargs map[string]string) (string, common.Trace) { 30 | trace := common.Trace{ 31 | Command: curlOperation, 32 | Source: "", 33 | Destination: workdir, 34 | Workdir: workdir, 35 | } 36 | origin := "" 37 | for i, arg := range args { 38 | arg = replaceArgEnvVariable(arg, stageargs) 39 | if strings.HasPrefix(arg, "http") { 40 | trace.Source = arg 41 | origin = trace.Source //the installation source 42 | } 43 | if (strings.HasPrefix(arg, "--output") || strings.HasPrefix(arg, "-o") || strings.EqualFold(arg, ">")) && len(args) > i+1 { 44 | trace.Destination = replaceArgEnvVariable(args[i+1], stageargs) //update the destination 45 | if !strings.HasPrefix(trace.Destination, "/") { 46 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/" + trace.Destination 47 | } 48 | } 49 | } 50 | return origin, trace 51 | } 52 | 53 | // processWget parses wget command 54 | func processWget(args []string, workdir string, stageargs map[string]string) (string, common.Trace) { 55 | trace := common.Trace{ 56 | Command: wgetOperation, 57 | Source: "", 58 | Destination: workdir, 59 | Workdir: workdir, 60 | } 61 | origin := "" 62 | defaultName := "" 63 | useDefaultName := true 64 | 65 | for i, arg := range args { 66 | arg = common.TrimQuoteMarks(arg) 67 | arg = replaceArgEnvVariable(arg, stageargs) 68 | if strings.HasPrefix(arg, "http") { 69 | trace.Source = arg 70 | origin = trace.Source 71 | splits := strings.Split(arg, "/") 72 | defaultName = replaceArgEnvVariable(splits[len(splits)-1], stageargs) 73 | } 74 | if strings.HasPrefix(arg, "-O") && len(args) > i+1 { //filename to store download in workdir 75 | trace.Destination = replaceArgEnvVariable(args[i+1], stageargs) 76 | if !strings.HasPrefix(trace.Destination, "/") { 77 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/" + trace.Destination 78 | } 79 | i++ 80 | useDefaultName = false 81 | } 82 | if strings.HasPrefix(arg, "-P") && len(args) > i+1 { //dir where download will be stored 83 | trace.Destination = replaceArgEnvVariable(args[i+1], stageargs) 84 | if !strings.HasPrefix(trace.Destination, "/") { 85 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/" + trace.Destination 86 | } 87 | } 88 | } 89 | if useDefaultName { 90 | trace.Destination = strings.TrimSuffix(trace.Destination, "/") + "/" + defaultName 91 | } 92 | return origin, trace 93 | } 94 | 95 | //processGitClone parses git clone command 96 | func processGitClone(args []string, workdir string, stageargs map[string]string) (string, common.Trace) { 97 | //ssh://[user@]host.xz[:port]/path/to/repo.git/ 98 | //http[s]://host.xz[:port]/path/to/repo.git/ 99 | 100 | trace := common.Trace{ 101 | Command: gitCloneOperation, 102 | Source: "", 103 | Destination: workdir, 104 | Workdir: workdir, 105 | } 106 | origin := "" 107 | 108 | for i, arg := range args { 109 | if strings.HasPrefix(arg, "ssh://") || strings.HasPrefix(arg, "https://") || strings.HasPrefix(arg, "http://") { 110 | trace.Source = replaceArgEnvVariable(arg, stageargs) 111 | origin = trace.Source 112 | if i < len(args)-1 { 113 | trace.Destination = replaceArgEnvVariable(args[i+1], stageargs) 114 | } else { 115 | splits := strings.Split(trace.Source, "/") 116 | trace.Destination = splits[len(splits)-1] 117 | } 118 | if !strings.HasPrefix(trace.Destination, "/") { 119 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/" + trace.Destination 120 | } 121 | return origin, trace 122 | } 123 | if strings.EqualFold(arg, "-b") && len(args) > i+1 { //add git branch 124 | trace.Command += " -b " + args[i+1] 125 | } 126 | } 127 | return origin, trace 128 | } 129 | 130 | //processGitCheckout parses git checkout command, recognizes only 'git checkout ' 131 | func processGitCheckout(args []string, workdir, giturl string, stageargs map[string]string) common.Trace { 132 | trace := common.Trace{ 133 | Command: gitCheckoutOperation, 134 | Source: "", 135 | Destination: workdir, 136 | Workdir: workdir, 137 | } 138 | if len(args) == 3 { 139 | trace.Source = replaceArgEnvVariable(args[2], stageargs) 140 | trace.Destination = replaceArgEnvVariable(workdir, stageargs) 141 | } 142 | return trace 143 | } 144 | 145 | // processTar parses tar command 146 | func processTar(args []string, workdir string, stageargs map[string]string) common.Trace { 147 | trace := common.Trace{ 148 | Command: tarOperation, 149 | Source: "", 150 | Destination: workdir, 151 | Workdir: workdir, 152 | } 153 | for j := 1; j < len(args); j++ { //start from j=1 to skip "tar") 154 | if (args[j] == "-C" || strings.HasPrefix(args[j], "--directory")) && len(args) > j+1 { 155 | trace.Destination = replaceArgEnvVariable(args[j+1], stageargs) 156 | j++ 157 | continue 158 | } 159 | if args[j] == "-xJC" && len(args) > j+1 { 160 | trace.Destination = replaceArgEnvVariable(args[j+1], stageargs) 161 | trace.Command += fmt.Sprintf(" %s", args[j]) 162 | j++ 163 | continue 164 | } 165 | if args[j] == "-f" && len(args) > j+1 { 166 | trace.Source = replaceArgEnvVariable(args[j+1], stageargs) 167 | j++ 168 | continue 169 | } 170 | if args[j] == "-xfC" && len(args) > j+2 { 171 | trace.Destination = replaceArgEnvVariable(args[j+1], stageargs) 172 | trace.Source = replaceArgEnvVariable(args[j+2], stageargs) 173 | trace.Command += fmt.Sprintf(" %s", args[j]) 174 | j += 2 175 | continue 176 | } 177 | if strings.Contains(args[j], "x") && strings.Contains(args[j], "f") && len(args) > j+1 { //could be -xvf, -xf, xvf, -zxvf... 178 | trace.Command += fmt.Sprintf(" %s", args[j]) 179 | trace.Source = replaceArgEnvVariable(args[j+1], stageargs) 180 | j++ 181 | continue 182 | } 183 | } 184 | if !strings.HasPrefix(trace.Source, "/") { 185 | trace.Source = strings.TrimSuffix(workdir, "/") + "/" + trace.Source 186 | } 187 | return trace 188 | } 189 | 190 | // processZip parses zip command 191 | func processUnzip(args []string, workdir string, stageargs map[string]string) common.Trace { 192 | trace := common.Trace{ 193 | Command: unzipOperation, 194 | Source: "", 195 | Destination: workdir, 196 | Workdir: workdir, 197 | } 198 | 199 | /* unzip [-Z] [-opts[modifiers]] file[.zip] [list] [-x xlist] [-d exdir] 200 | unzip latest.zip 201 | unzip filename.zip -d /path/to/directory 202 | unzip filename.zip -x file1-to-exclude file2-to-exclude 203 | unzip -P PasswOrd filename.zip */ 204 | for j := 1; j < len(args); j++ { //skip "unzip", e.g unzip gradle-*.zip 205 | if strings.HasPrefix(args[j], "-d") && len(args) > j+1 { 206 | trace.Destination = replaceArgEnvVariable(args[j+1], stageargs) 207 | j++ 208 | continue 209 | } 210 | if strings.HasPrefix(args[j], "-P") && len(args) > j+1 { //unzip -P file.zip 211 | j++ 212 | continue 213 | } 214 | if !strings.HasPrefix(args[j], "-") && len(trace.Source) == 0 { 215 | trace.Source = replaceArgEnvVariable(args[j], stageargs) 216 | if !strings.HasPrefix(trace.Source, "/") { 217 | trace.Source = strings.TrimSuffix(workdir, "/") + "/" + trace.Source 218 | } 219 | continue 220 | } 221 | } 222 | return trace 223 | } 224 | 225 | // processCp parses cp command 226 | func processCp(args []string, workdir string, stageargs map[string]string) common.Trace { 227 | trace := common.Trace{ 228 | Command: cpOperation, 229 | Workdir: workdir, 230 | } 231 | for j := 1; j < len(args); j++ { //skip j=0 ("cp") 232 | if !strings.HasPrefix(args[j], "-") && len(args) > j+1 { 233 | trace.Source = replaceArgEnvVariable(args[j], stageargs) 234 | if !strings.HasPrefix(trace.Source, "/") { 235 | trace.Source = strings.TrimSuffix(workdir, "/") + "/" + trace.Source 236 | } 237 | trace.Destination = replaceArgEnvVariable(args[j+1], stageargs) 238 | if !strings.HasPrefix(trace.Destination, "/") { 239 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/" + trace.Destination 240 | } 241 | break 242 | } 243 | } 244 | return trace 245 | } 246 | 247 | // processCd parses change directory command and returns the current dir (not support 'cd ../') 248 | func processCd(args []string, workdir string, stageargs map[string]string) string { 249 | //git clone https://github.com/alievk/avatarify-python.git /app/avatarify && cd /app/avatarify && git checkout 250 | var currentdir string 251 | if len(args) == 2 { 252 | if strings.EqualFold(args[1], "~") { 253 | currentdir = "~" 254 | } else { 255 | currentdir = replaceArgEnvVariable(args[1], stageargs) 256 | if !strings.HasPrefix(currentdir, "/") { 257 | currentdir = strings.TrimSuffix(workdir, "/") + "/" + currentdir 258 | } 259 | } 260 | } 261 | return currentdir 262 | } 263 | 264 | // processMv parses mv command 265 | func processMv(args []string, workdir string, stageargs map[string]string) common.Trace { 266 | trace := common.Trace{ 267 | Command: mvOperation, 268 | Workdir: workdir, 269 | } 270 | for j := 1; j < len(args); j++ { //skip j=0 ("mv") 271 | if !strings.HasPrefix(args[j], "-") && len(args) > j+1 { 272 | trace.Source = replaceArgEnvVariable(args[j], stageargs) 273 | if !strings.HasPrefix(trace.Source, "/") { 274 | trace.Source = strings.TrimSuffix(workdir, "/") + "/" + trace.Source 275 | } 276 | trace.Destination = replaceArgEnvVariable(args[j+1], stageargs) 277 | if !strings.HasPrefix(trace.Destination, "/") { 278 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/" + trace.Destination 279 | } 280 | break 281 | } 282 | } 283 | return trace 284 | } 285 | 286 | // processCopyAdd parses COPY and ADD operation, e.g. COPY (or ADD) [--chown=: --from=] ... 287 | func processCopyAdd(args []string, workdir, namespace string, stageargs map[string]string) (common.InstallTrace, error) { 288 | installTrace := common.InstallTrace{} 289 | trace := common.Trace{ 290 | Command: args[0], 291 | Workdir: workdir, 292 | } 293 | m := make(map[int]common.Trace) 294 | for j := 1; j < len(args); j++ { //skip j=0 ("COPY") 295 | if !strings.HasPrefix(args[j], "--") && len(args) > j+1 { 296 | trace.Source = replaceArgEnvVariable(strings.Join(args[j:len(args)-1], ","), stageargs) 297 | if strings.HasPrefix(trace.Source, "http") { 298 | installTrace.Origin = replaceArgEnvVariable(args[j], stageargs) 299 | } else { 300 | installTrace.Origin = fmt.Sprintf("%s", namespace) 301 | installTrace.OriginHash = fmt.Sprintf("%x", sha3.Sum256([]byte(namespace))) 302 | } 303 | trace.Destination = replaceArgEnvVariable(args[len(args)-1], stageargs) 304 | if !strings.HasPrefix(trace.Destination, "/") { 305 | if strings.EqualFold(trace.Destination, "./") { 306 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/." 307 | } else { 308 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/" + strings.TrimSpace(trace.Destination) 309 | } 310 | } 311 | m[0] = trace 312 | installTrace.Traces = m 313 | return installTrace, nil 314 | } 315 | if strings.HasPrefix(args[j], "--from=") && len(args) > j+2 { 316 | for i := j + 1; i < len(args); i++ { 317 | if strings.HasPrefix(args[i], "--") { 318 | continue 319 | } 320 | trace.Source = replaceArgEnvVariable(strings.Join(args[i:len(args)-1], ","), stageargs) 321 | trace.Destination = replaceArgEnvVariable(args[len(args)-1], stageargs) 322 | if !strings.HasPrefix(trace.Destination, "/") { 323 | if strings.EqualFold(trace.Destination, "./") { 324 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/." 325 | } else { 326 | trace.Destination = strings.TrimSuffix(workdir, "/") + "/" + trace.Destination 327 | } 328 | } 329 | m[0] = trace 330 | installTrace.Traces = m 331 | installTrace.Origin = fmt.Sprintf("buildstage:%s:%s", strings.TrimPrefix(args[j], "--from="), trace.Source) 332 | return installTrace, nil 333 | } 334 | } 335 | } 336 | return installTrace, errors.New("no trace can be produced") 337 | } 338 | --------------------------------------------------------------------------------