├── cmd ├── patchball │ ├── README │ ├── example-patch.json │ └── main.go ├── client │ ├── docker_test.go │ ├── reverseproxy_test.go │ ├── localserver.go │ ├── docker.go │ ├── syncer.go │ ├── cloudrun.go │ ├── reverseproxy.go │ └── main.go └── daemon │ ├── portcheck_test.go │ ├── nanny_test.go │ ├── portcheck.go │ ├── nanny.go │ ├── main.go │ └── server.go ├── .gitignore ├── go.mod ├── hack ├── upload-rundevd-release.sh └── make-rundev-client-release.sh ├── lib ├── types │ └── types.go ├── constants │ └── constants.go ├── dockerfile │ ├── buildcmd.go │ ├── parse.go │ ├── buildcmd_test.go │ └── parse_test.go ├── ignore │ ├── parse.go │ ├── parse_test.go │ ├── ignores.go │ └── ignores_test.go ├── handlerutil │ └── handlers.go └── fsutil │ ├── customstat.go │ ├── archive_test.go │ ├── unarchive.go │ ├── fsdiff.go │ ├── fstree_test.go │ ├── fstree.go │ ├── archive.go │ └── fsdiff_test.go ├── .github └── workflows │ └── go.yml ├── README.md ├── go.sum └── LICENSE /cmd/patchball/README: -------------------------------------------------------------------------------- 1 | patchball 2 | ========= 3 | 4 | patchball is a sample utility to test patch balls 5 | created from a local test directory with given config, 6 | used to manually verify the behavior, because I haven't 7 | written unit tests for these yet. 8 | -------------------------------------------------------------------------------- /cmd/patchball/example-patch.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "Type": 1, 4 | "Path": ".git" 5 | }, 6 | { 7 | "Type": 0, 8 | "Path": "Dockerfile" 9 | }, 10 | { 11 | "Type": 0, 12 | "Path": "Dockerfile.foo" 13 | }, 14 | { 15 | "Type": 0, 16 | "Path": "foo" 17 | } 18 | ] 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | 3 | # Binaries for programs and plugins 4 | *.exe 5 | *.exe~ 6 | *.dll 7 | *.so 8 | *.dylib 9 | 10 | # Test binary, built with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Dependency directories (remove the comment below to include it) 17 | # vendor/ 18 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ahmetb/rundev 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/ahmetb/pstree v0.0.0-20190815175305-245b319425b4 7 | github.com/bmatcuk/doublestar v1.1.5 8 | github.com/docker/docker v1.13.1 9 | github.com/google/go-cmp v0.3.0 10 | github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf 11 | github.com/google/uuid v1.1.1 12 | github.com/kr/pretty v0.1.0 13 | github.com/moby/buildkit v0.3.3 14 | github.com/pkg/errors v0.8.1 15 | google.golang.org/api v0.7.0 16 | gotest.tools v2.2.0+incompatible // indirect 17 | ) 18 | -------------------------------------------------------------------------------- /cmd/client/docker_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | -------------------------------------------------------------------------------- /hack/upload-rundevd-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2019 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -euo pipefail 17 | SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 18 | [[ -n "${DEBUG:-}" ]] && set -x 19 | 20 | cd "${SCRIPTDIR}/.." 21 | 22 | name="rundevd-v0.0.0-$(git describe --always --dirty)" 23 | bucket="${BUCKET:-rundev-test}" 24 | 25 | env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \ 26 | go build -o /dev/stdout ./cmd/daemon \ 27 | | gsutil -q cp - gs://"${bucket}/${name}" 1>&2 28 | 29 | echo "https://storage.googleapis.com/${bucket}/${name}" 30 | -------------------------------------------------------------------------------- /lib/types/types.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package types 16 | 17 | type ProcError struct { 18 | Message string `json:"message"` 19 | Output string `json:"output"` 20 | } 21 | 22 | type Cmd []string 23 | 24 | func (c Cmd) Command() string { 25 | if len(c) == 0 { 26 | return "" 27 | } 28 | return c[0] 29 | } 30 | 31 | func (c Cmd) Args() []string { 32 | if len(c) <= 1 { 33 | return nil 34 | } 35 | return c[1:] 36 | } 37 | 38 | type BuildCmd struct { 39 | C Cmd `json:"c"` 40 | On []string `json:"on,omitempty"` // file patterns 41 | } 42 | 43 | type BuildCmds []BuildCmd 44 | -------------------------------------------------------------------------------- /lib/constants/constants.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package constants 16 | 17 | const ( 18 | HdrRundevChecksum = `rundev-checksum` 19 | HdrRundevPatchPreconditionSum = `rundev-apply-if-checksum` 20 | HdrRundevClientSecret = `rundev-client-secret` 21 | 22 | MimeDumbRepeat = `application/vnd.rundev.repeat` 23 | MimeChecksumMismatch = `application/vnd.rundev.checksumMismatch+json` 24 | MimePatch = `application/vnd.rundev.patch+tar` 25 | MimeProcessError = `application/vnd.rundev.procError+json` 26 | 27 | WhiteoutDeleteSuffix = ".whiteout.del" 28 | ) 29 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name: Go 16 | on: 17 | - push 18 | jobs: 19 | build: 20 | name: Build 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Set up Go 1.12 24 | uses: actions/setup-go@v1 25 | with: 26 | version: 1.12 27 | id: go 28 | 29 | - name: Check out code into the Go module directory 30 | uses: actions/checkout@master 31 | 32 | - name: Download dependencies 33 | run: go mod download 34 | 35 | - name: Build rundev 36 | run: go build -o bin/rundev ./cmd/client 37 | 38 | - name: Build rundevd 39 | run: env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/rundevd ./cmd/daemon 40 | -------------------------------------------------------------------------------- /lib/dockerfile/buildcmd.go: -------------------------------------------------------------------------------- 1 | package dockerfile 2 | 3 | import ( 4 | "github.com/ahmetb/rundev/lib/types" 5 | "regexp" 6 | "strings" 7 | "unicode" 8 | ) 9 | 10 | var ( 11 | runCmdAnnotationPattern = regexp.MustCompile(`\s+#\s?rundev(\[(.*)\])?$`) 12 | ) 13 | 14 | // ParseBuildCmds extracts RUN commands from the last dockerfile stage with #rundev or #rundev[PATTERN,..] annotation. 15 | func ParseBuildCmds(d *Dockerfile) types.BuildCmds { 16 | var out types.BuildCmds 17 | for _, stmt := range d.Stmts() { 18 | switch stmt.Value { 19 | case "from": 20 | out = nil // reset 21 | case "run": 22 | if !runCmdAnnotationPattern.MatchString(stmt.Original) { 23 | continue 24 | } 25 | 26 | var conditions []string 27 | parts := runCmdAnnotationPattern.FindStringSubmatch(stmt.Original) 28 | if len(parts) > 1 { 29 | condStr := parts[len(parts)-1] 30 | conds := strings.Split(condStr, ",") 31 | if condStr != "" && len(conds) > 0 { 32 | conditions = conds 33 | } 34 | } 35 | 36 | c := parseCommand(stmt.Next, stmt.Attributes["json"]) 37 | cm := Cmd{c[0], c[1:]} 38 | 39 | // trim comment at the end of argv (as dockerfile parser isn't doing so) 40 | if len(cm.Args) > 0 { 41 | v := cm.Args[len(cm.Args)-1] 42 | v = runCmdAnnotationPattern.ReplaceAllString(v, "") 43 | v = strings.TrimRightFunc(v, unicode.IsSpace) 44 | cm.Args[len(cm.Args)-1] = v 45 | } 46 | out = append(out, types.BuildCmd{ 47 | C: cm.Flatten(), 48 | On: conditions, 49 | }) 50 | } 51 | } 52 | return out 53 | } 54 | -------------------------------------------------------------------------------- /hack/make-rundev-client-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2019 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -euo pipefail 17 | SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 18 | [[ -n "${DEBUG:-}" ]] && set -x 19 | 20 | cd "${SCRIPTDIR}/.." 21 | 22 | bucket="${BUCKET:-rundev-test}" 23 | subpath="${SUBPATH:-nightly/client}" 24 | file="rundev-$(date -u +%Y-%m-%d-%H%M%S)-$(git describe --always --dirty)" 25 | file_latest="rundev-latest" 26 | 27 | build_dir="$(mktemp -d)" 28 | trap 'rm -rf -- "${build_dir}"' EXIT 29 | 30 | for os in darwin linux; do 31 | echo >&2 "building $os" 32 | fp="${bucket}/${subpath}/${os}/${file}" 33 | fp_latest="${bucket}/${subpath}/${os}/${file_latest}" 34 | 35 | GOOS="${os}" GOARCH="amd64" go build -o "${build_dir}/out" ./cmd/client 36 | echo >&2 "uploading ${os}" 37 | gsutil -q cp "${build_dir}/out" gs://"${fp}" 1>&2 38 | gsutil -q cp "${build_dir}/out" gs://"${fp_latest}" 1>&2 39 | 40 | echo "-> https://storage.googleapis.com/${fp}" 41 | echo "-> https://storage.googleapis.com/${fp_latest}" 42 | done 43 | -------------------------------------------------------------------------------- /lib/ignore/parse.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package ignore 16 | 17 | import ( 18 | "github.com/docker/docker/builder/dockerignore" 19 | "github.com/pkg/errors" 20 | "io" 21 | "strings" 22 | ) 23 | 24 | // ParseDockerignore returns statements in a .dockerignore file contents 25 | // that can be matched to files with filepath.Match. 26 | // https://docs.docker.com/engine/reference/builder/#dockerignore-file 27 | func ParseDockerignore(r io.Reader) ([]string, error) { 28 | v, err := dockerignore.ReadAll(r) 29 | if err != nil { 30 | return nil, errors.Wrap(err, "failed to parse dockerignore format") 31 | } 32 | 33 | // TODO: it looks like implementing exceptions (!PATTERN) will be difficult for now. they're also rarely used. 34 | for _, p := range v { 35 | if strings.HasPrefix(p, "!") { 36 | return nil, errors.Errorf("exception rules in dockerignores are not yet supported (pattern: %s)", p) 37 | } 38 | } 39 | 40 | // validate paths 41 | for _, p := range v { 42 | if _, err := pathMatch(".", p); err != nil { 43 | return nil, errors.Wrapf(err, "failed to parse dockerignore pattern %s", p) 44 | } 45 | } 46 | return v, nil 47 | } 48 | -------------------------------------------------------------------------------- /lib/ignore/parse_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package ignore 16 | 17 | import ( 18 | "io" 19 | "reflect" 20 | "strings" 21 | "testing" 22 | ) 23 | 24 | func TestParseDockerignore(t *testing.T) { 25 | type args struct { 26 | r io.Reader 27 | } 28 | tests := []struct { 29 | name string 30 | in string 31 | want []string 32 | wantErr bool 33 | }{ 34 | { 35 | name: "empty file", 36 | in: "", 37 | want: nil, 38 | wantErr: false, 39 | }, 40 | { 41 | name: "good parse", 42 | in: `# comment 43 | a 44 | b/c 45 | d/** 46 | e/*f/* 47 | g?`, 48 | want: []string{"a", "b/c", "d/**", "e/*f/*", "g?"}, 49 | wantErr: false, 50 | }, 51 | { 52 | name: "exception rules not supported", 53 | in: `node_modules/** 54 | !node_modules/package.json`, 55 | want: nil, 56 | wantErr: true, 57 | }, 58 | { 59 | name: "bad pattern", 60 | in: "[-]", 61 | want: nil, 62 | wantErr: true, 63 | }, 64 | } 65 | for _, tt := range tests { 66 | t.Run(tt.name, func(t *testing.T) { 67 | got, err := ParseDockerignore(strings.NewReader(tt.in)) 68 | if (err != nil) != tt.wantErr { 69 | t.Errorf("ParseDockerignore() error = %v, wantErr %v, got=%v", err, tt.wantErr, got) 70 | return 71 | } 72 | if !reflect.DeepEqual(got, tt.want) { 73 | t.Errorf("ParseDockerignore() got = %v, want %v", got, tt.want) 74 | } 75 | }) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /cmd/daemon/portcheck_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "context" 19 | "github.com/pkg/errors" 20 | "net" 21 | "testing" 22 | "time" 23 | ) 24 | 25 | func Test_checkPortOpen(t *testing.T) { 26 | ok := newTCPPortChecker(9999).checkPort() 27 | if ok { 28 | t.Fatal("port should not be detected as open") 29 | } 30 | 31 | li, err := net.Listen("tcp", "localhost:56771") 32 | if err != nil { 33 | t.Fatal(err) 34 | } 35 | defer li.Close() 36 | 37 | ok = newTCPPortChecker(56771).checkPort() 38 | if !ok { 39 | t.Fatal("port should be detected as open") 40 | } 41 | } 42 | 43 | func Test_waitPort(t *testing.T) { 44 | ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) 45 | defer cancel() 46 | err := newTCPPortChecker(9999).waitPort(ctx) 47 | if err == nil { 48 | t.Fatal("should've gotten a context cancellation error") 49 | } 50 | underlying := errors.Cause(err) 51 | if underlying != context.DeadlineExceeded { 52 | t.Fatalf("inner error is not timeline exceeded: %+v", err) 53 | } 54 | 55 | li, err := net.Listen("tcp", "localhost:56771") 56 | if err != nil { 57 | t.Fatal(err) 58 | } 59 | defer li.Close() 60 | 61 | ctx2, cancel2 := context.WithTimeout(context.Background(), time.Second*10) 62 | defer cancel2() 63 | err = newTCPPortChecker(56771).waitPort(ctx2) 64 | if err != nil { 65 | t.Fatalf("got error from open port: %v", err) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /lib/ignore/ignores.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package ignore 16 | 17 | import ( 18 | "github.com/bmatcuk/doublestar" 19 | "path/filepath" 20 | "strings" 21 | ) 22 | 23 | type FileIgnores struct { 24 | patterns []string 25 | } 26 | 27 | // Ignored tests if given relative path is excluded. If FileIgnores is nil, 28 | // it would not ignore any files. 29 | func (f *FileIgnores) Ignored(path string) bool { 30 | if f == nil { 31 | return false 32 | } 33 | return ignored(path, f.patterns) 34 | } 35 | 36 | func NewFileIgnores(rules []string) *FileIgnores { return &FileIgnores{patterns: rules} } 37 | 38 | // ignored checks if the path (OS-dependent file separator) matches one of the exclusion rules (that are in dockerignore format). 39 | func ignored(path string, exclusions []string) bool { 40 | unixPath := filepath.ToSlash(path) 41 | for _, p := range exclusions { 42 | ok, _ := pathMatch(unixPath, p) // ignore error as it's checked as part of parsing the pattern 43 | if ok { 44 | return true 45 | } 46 | } 47 | return false 48 | } 49 | 50 | // pathMatch checks if given path with forward slashes matches the dockerignore pattern. 51 | // This supports double star (**) globbing, and patterns with leading slash (/). 52 | func pathMatch(unixPath string, pattern string) (bool, error) { 53 | pattern = strings.TrimPrefix(pattern, "/") // .dockerignore supports /a/b format, but we just need to make it relative. 54 | return doublestar.Match(pattern, unixPath) 55 | } 56 | -------------------------------------------------------------------------------- /lib/dockerfile/parse.go: -------------------------------------------------------------------------------- 1 | package dockerfile 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "github.com/moby/buildkit/frontend/dockerfile/parser" 7 | "github.com/pkg/errors" 8 | ) 9 | 10 | type Dockerfile struct { 11 | syntaxTree *parser.Node 12 | } 13 | 14 | func (d *Dockerfile) Stmts() []*parser.Node { return d.syntaxTree.Children } 15 | 16 | type Cmd struct { 17 | Cmd string 18 | Args []string 19 | } 20 | 21 | func (c Cmd) String() string { 22 | s := c.Cmd 23 | for _, a := range c.Args { 24 | s += fmt.Sprintf(" %q", a) 25 | } 26 | return s 27 | } 28 | 29 | func (c Cmd) Flatten() []string { 30 | if c.Cmd == "" { 31 | return nil 32 | } 33 | return append([]string{c.Cmd}, c.Args...) 34 | } 35 | 36 | func ParseDockerfile(b []byte) (*Dockerfile, error) { 37 | r, err := parser.Parse(bytes.NewReader(b)) 38 | if err != nil { 39 | return nil, errors.Wrap(err, "error parsing dockerfile") 40 | } 41 | if r.AST == nil { 42 | return nil, errors.Wrap(err, "ast was nil") 43 | } 44 | return &Dockerfile{r.AST}, nil 45 | } 46 | 47 | func ParseEntrypoint(d *Dockerfile) (Cmd, error) { 48 | var c Cmd 49 | var epVals, cmdVals []string 50 | for _, stmt := range d.Stmts() { 51 | switch stmt.Value { 52 | case "from": 53 | // reset (new stage) 54 | epVals = nil 55 | cmdVals = nil 56 | case "entrypoint": 57 | epVals = parseCommand(stmt.Next, stmt.Attributes["json"]) 58 | case "cmd": 59 | cmdVals = parseCommand(stmt.Next, stmt.Attributes["json"]) 60 | } 61 | } 62 | if len(epVals) == 0 && len(cmdVals) == 0 { 63 | return c, errors.New("no CMD or ENTRYPOINT values in dockerfile") 64 | } 65 | if len(epVals) == 0 { 66 | // CMD becomes the entrypoint 67 | return Cmd{cmdVals[0], cmdVals[1:]}, nil 68 | } 69 | // merge ENTRYPOINT argv and CMD values 70 | return Cmd{epVals[0], append(epVals[1:], cmdVals...)}, nil 71 | } 72 | 73 | // parseCommand parses CMD and ENTRYPOINT nodes, based on whether they're JSON lists or not. 74 | // Non-JSON values are wrapped in [/bin/sh -c VALUE] 75 | func parseCommand(n *parser.Node, json bool) []string { 76 | var out []string 77 | for n != nil { 78 | if !json { 79 | return []string{"/bin/sh", "-c", n.Value} 80 | } 81 | out = append(out, n.Value) 82 | n = n.Next 83 | } 84 | return out 85 | } 86 | -------------------------------------------------------------------------------- /lib/handlerutil/handlers.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Package handlerutil provides common handlers between 16 | // rundev client and daemon. 17 | package handlerutil 18 | 19 | import ( 20 | "bytes" 21 | "encoding/json" 22 | "fmt" 23 | "github.com/ahmetb/rundev/lib/constants" 24 | "github.com/ahmetb/rundev/lib/fsutil" 25 | "github.com/ahmetb/rundev/lib/ignore" 26 | "io" 27 | "net/http" 28 | ) 29 | 30 | func NewFSDebugHandler(dir string, ignores *ignore.FileIgnores) http.HandlerFunc { 31 | return func(w http.ResponseWriter, req *http.Request) { 32 | i := ignores 33 | if _, ok := req.URL.Query()["full"] ; ok{ 34 | i = nil // ?full disables the file exclusion rules 35 | } 36 | 37 | fs, err := fsutil.Walk(dir, i) 38 | if err != nil { 39 | w.WriteHeader(http.StatusInternalServerError) 40 | fmt.Fprintf(w,"failed to fetch local filesystem: %+v", err) 41 | return 42 | } 43 | w.Header().Set(constants.HdrRundevChecksum, fmt.Sprintf("%v", fs.RootChecksum())) 44 | var b bytes.Buffer 45 | enc := json.NewEncoder(&b) 46 | enc.SetIndent("", " ") 47 | if err := enc.Encode(fs); err != nil { 48 | w.WriteHeader(http.StatusInternalServerError) 49 | fmt.Fprintf(w,"failed to encode json: %+v", err) 50 | return 51 | } 52 | io.Copy(w,&b) 53 | } 54 | } 55 | 56 | 57 | // NewUnsupportedDebugEndpointHandler returns a 404 handler for debug 58 | // paths to prevent falling back to reverse proxy. 59 | func NewUnsupportedDebugEndpointHandler() http.HandlerFunc { 60 | return func(w http.ResponseWriter, req *http.Request) { 61 | w.WriteHeader(http.StatusNotFound) 62 | fmt.Fprintf(w, "not found: debug endpoint %s does not exist.", req.URL.Path) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /lib/fsutil/customstat.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package fsutil 16 | 17 | import ( 18 | "os" 19 | "time" 20 | ) 21 | 22 | // nanosecMaskingStat returns the nanosec portion of ModTime() in the underlying os.FileInfo. 23 | type nanosecMaskingStat struct{ s os.FileInfo } 24 | 25 | func (n nanosecMaskingStat) Name() string { return n.s.Name() } 26 | func (n nanosecMaskingStat) Size() int64 { return n.s.Size() } 27 | func (n nanosecMaskingStat) Mode() os.FileMode { return n.s.Mode() } 28 | func (n nanosecMaskingStat) ModTime() time.Time { return n.s.ModTime().Truncate(time.Second) } 29 | func (n nanosecMaskingStat) IsDir() bool { return n.s.IsDir() } 30 | func (n nanosecMaskingStat) Sys() interface{} { return n.s.Sys() } 31 | 32 | // zeroSizeStat returns zero size for the given os.FileInfo. 33 | type zeroSizeStat struct{ s os.FileInfo } 34 | 35 | func (c zeroSizeStat) Name() string { return c.s.Name() } 36 | func (c zeroSizeStat) Size() int64 { return 0 } 37 | func (c zeroSizeStat) Mode() os.FileMode { return c.s.Mode() } 38 | func (c zeroSizeStat) ModTime() time.Time { return c.s.ModTime() } 39 | func (c zeroSizeStat) IsDir() bool { return c.s.IsDir() } 40 | func (c zeroSizeStat) Sys() interface{} { return c.s.Sys() } 41 | 42 | type whiteoutStat struct { 43 | name string 44 | sys interface{} 45 | } 46 | 47 | func (w whiteoutStat) Name() string { return w.name } 48 | func (whiteoutStat) Size() int64 { return 0 } 49 | func (whiteoutStat) Mode() os.FileMode { return 0444 } 50 | func (whiteoutStat) ModTime() time.Time { return time.Unix(0, 0) } 51 | func (whiteoutStat) IsDir() bool { return false } 52 | func (w whiteoutStat) Sys() interface{} { return w.sys } 53 | -------------------------------------------------------------------------------- /lib/fsutil/archive_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package fsutil 16 | 17 | import ( 18 | "github.com/google/go-cmp/cmp" 19 | "io/ioutil" 20 | "os" 21 | "path/filepath" 22 | "strings" 23 | "testing" 24 | ) 25 | 26 | func Test_expandDirEntries(t *testing.T) { 27 | tmp, err := ioutil.TempDir(os.TempDir(), "") 28 | if err != nil { 29 | t.Fatal(err) 30 | } 31 | defer os.RemoveAll(tmp) 32 | 33 | files := []string{ 34 | "empty/", 35 | "foo/", 36 | "foo/file1", 37 | "foo/file2", 38 | "foo/nested/", 39 | "foo/nested/1", 40 | "foo/nested/2", 41 | "foo/nested/empty/", 42 | "zoo1", 43 | "zoo2", 44 | } 45 | for _, f := range files { 46 | fp := filepath.Join(tmp, filepath.FromSlash(f)) 47 | if strings.HasSuffix(f, "/") { 48 | if err := os.MkdirAll(strings.TrimRight(fp, "/"), 0755); err != nil { 49 | t.Fatal(err) 50 | } 51 | } else { 52 | if err := ioutil.WriteFile(fp, []byte{}, 0644); err != nil { 53 | t.Fatal(err) 54 | } 55 | } 56 | } 57 | 58 | expected := []string{ 59 | filepath.Join(tmp, ""), // root entry 60 | filepath.Join(tmp, "empty"), 61 | filepath.Join(tmp, "foo"), 62 | filepath.Join(tmp, "foo/file1"), 63 | filepath.Join(tmp, "foo/file2"), 64 | filepath.Join(tmp, "foo/nested"), 65 | filepath.Join(tmp, "foo/nested/1"), 66 | filepath.Join(tmp, "foo/nested/2"), 67 | filepath.Join(tmp, "foo/nested/empty"), 68 | filepath.Join(tmp, "zoo1"), 69 | filepath.Join(tmp, "zoo2")} 70 | 71 | v, err := expandDirEntries(tmp) 72 | if err != nil { 73 | t.Fatal(err) 74 | } 75 | var got []string 76 | for _, vv := range v { 77 | got = append(got, vv.fullPath) 78 | } 79 | 80 | if diff := cmp.Diff(expected, got); diff != "" { 81 | t.Fatal(diff) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /cmd/daemon/nanny_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "context" 19 | "testing" 20 | "time" 21 | ) 22 | 23 | func TestExecFails(t *testing.T) { 24 | n := newProcessNanny("non-existing", nil, procOpts{}) 25 | err := n.Restart() 26 | defer n.Kill() 27 | if err == nil { 28 | t.Fatal("no error?") 29 | } 30 | if n.Running() { 31 | t.Fatal("should not be running") 32 | } 33 | } 34 | 35 | func TestExec(t *testing.T) { 36 | n := newProcessNanny("sleep", []string{"100"}, procOpts{}) 37 | defer n.Kill() 38 | if n.Running() { 39 | t.Fatal("not started yet") 40 | } 41 | err := n.Restart() 42 | if err != nil { 43 | t.Fatal(err) 44 | } 45 | if !n.Running() { 46 | t.Fatal("not running") 47 | } 48 | n.Kill() 49 | time.Sleep(time.Millisecond * 100) // replace with a wait loop 50 | if n.Running() { 51 | t.Fatal("killed process still running") 52 | } 53 | } 54 | 55 | func TestExecReplaceRunning(t *testing.T) { 56 | n := newProcessNanny("sleep", []string{"1000"}, procOpts{}) 57 | defer n.Kill() 58 | if err := n.Restart(); err != nil { 59 | t.Fatal(err) 60 | } 61 | for i := 0; i < 100; i++ { 62 | if err := n.Restart(); err != nil { 63 | t.Fatal(err) 64 | } 65 | } 66 | if !n.Running() { 67 | t.Fatal("not running") 68 | } 69 | } 70 | 71 | func TestExecCapturesExit(t *testing.T) { 72 | n := newProcessNanny("sleep", []string{"0.1"}, procOpts{}) 73 | defer n.Kill() 74 | 75 | if err := n.Restart(); err != nil { 76 | t.Fatal() 77 | } 78 | ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*300) 79 | defer cancel() 80 | i := 0 81 | for n.Running() { 82 | select { 83 | case <-ctx.Done(): 84 | t.Fatalf("cmd did not terminate, iteration:%d", i) 85 | default: 86 | i++ 87 | } 88 | time.Sleep(time.Millisecond * 5) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /cmd/daemon/portcheck.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "context" 19 | "fmt" 20 | "github.com/pkg/errors" 21 | "net" 22 | "time" 23 | ) 24 | 25 | const ( 26 | defaultPortRetryInterval = time.Millisecond * 5 27 | defaultPortDialTimeout = time.Millisecond * 40 28 | ) 29 | 30 | type portChecker interface { 31 | checkPort() bool 32 | waitPort(context.Context) error 33 | } 34 | 35 | type tcpPortCheck struct { 36 | portNum int 37 | retryInterval time.Duration 38 | dialTimeout time.Duration 39 | } 40 | 41 | func newTCPPortChecker(port int) portChecker { 42 | return &tcpPortCheck{ 43 | portNum: port, 44 | retryInterval: defaultPortRetryInterval, 45 | dialTimeout: defaultPortDialTimeout} 46 | } 47 | 48 | func (t *tcpPortCheck) checkPort() bool { 49 | addr := net.JoinHostPort("localhost", fmt.Sprintf("%d", t.portNum)) 50 | conn, err := net.DialTimeout("tcp", addr, t.dialTimeout) 51 | defer func() { 52 | if conn != nil { 53 | conn.Close() 54 | } 55 | }() 56 | return err == nil 57 | } 58 | 59 | // waitPort waits for port to be connectable until the specified ctx is cancelled. 60 | func (t *tcpPortCheck) waitPort(ctx context.Context) error { 61 | // TODO: do we need to return error from this method? 62 | ch := make(chan struct{}, 1) 63 | defer close(ch) 64 | 65 | tick := time.NewTicker(t.retryInterval) 66 | defer tick.Stop() 67 | 68 | go func() { 69 | for { 70 | select { 71 | case <-ctx.Done(): 72 | return 73 | case <-tick.C: 74 | if ok := t.checkPort(); ok { 75 | ch <- struct{}{} 76 | return 77 | } 78 | time.Sleep(time.Millisecond * 10) 79 | } 80 | } 81 | }() 82 | 83 | select { 84 | case <-ch: 85 | return nil 86 | case <-ctx.Done(): 87 | return errors.Wrap(ctx.Err(), "quit waiting on port to open") 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /cmd/patchball/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "encoding/json" 19 | "flag" 20 | "fmt" 21 | "github.com/ahmetb/rundev/lib/fsutil" 22 | "github.com/ahmetb/rundev/lib/ignore" 23 | "io" 24 | "io/ioutil" 25 | "log" 26 | "os" 27 | ) 28 | 29 | var ( 30 | flOps string 31 | flDir string 32 | flDockerignore string 33 | ) 34 | 35 | func init() { 36 | flag.StringVar(&flOps, "ops-file", "", "json array file containing diff ops") 37 | flag.StringVar(&flDir, "dir", ".", "directory to look files for") 38 | flag.StringVar(&flDockerignore, "dockerignore", "", "specify path to parse dockerignore rules") 39 | flag.Parse() 40 | } 41 | 42 | func main() { 43 | log.SetOutput(os.Stderr) 44 | if flOps == "" { 45 | log.Fatal("-ops-file not specified") 46 | } else if flDir == "" { 47 | log.Fatal("-dir is empty") 48 | } 49 | 50 | var ignores *ignore.FileIgnores 51 | if flDockerignore != "" { 52 | f, err := os.Open(flDockerignore) 53 | if err != nil { 54 | log.Fatalf("failed to open -dockerignore: %+v", err) 55 | } 56 | defer f.Close() 57 | r, err := ignore.ParseDockerignore(f) 58 | if err != nil { 59 | log.Fatalf("failed to parse -dockerignore: %+v", err) 60 | } 61 | ignores = ignore.NewFileIgnores(r) 62 | log.Printf("info: parsed %d ignore rules", len(r)) 63 | } 64 | 65 | var ops []fsutil.DiffOp 66 | for _, op := range ops { 67 | fmt.Fprintf(os.Stderr, "%v\n", op) 68 | } 69 | b, err := ioutil.ReadFile(flOps) 70 | if err != nil { 71 | log.Fatalf("failed to open file: %+v", err) 72 | } 73 | if err := json.Unmarshal(b, &ops); err != nil { 74 | log.Fatalf("unmarshal error") 75 | } 76 | 77 | tar, _, err := fsutil.PatchArchive(flDir, ops, ignores) 78 | if err != nil { 79 | log.Fatalf("error creating patch archive: %+v", err) 80 | } 81 | io.Copy(os.Stdout, tar) 82 | } 83 | -------------------------------------------------------------------------------- /lib/dockerfile/buildcmd_test.go: -------------------------------------------------------------------------------- 1 | package dockerfile 2 | 3 | import ( 4 | "github.com/ahmetb/rundev/lib/types" 5 | "github.com/google/go-cmp/cmp" 6 | "testing" 7 | ) 8 | 9 | func TestParseBuildCmds(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | df string 13 | want types.BuildCmds 14 | }{ 15 | { 16 | name: "no run cmds", 17 | df: "FROM scratch", 18 | want: nil, 19 | }, 20 | { 21 | name: "not annotated run cms", 22 | df: `FROM scratch 23 | RUN apt-get install \ 24 | -qqy \ 25 | a b c && rm -rf /tmp/foo`, 26 | want: nil, 27 | }, 28 | { 29 | name: "some annotated run cmds", 30 | df: ` 31 | FROM scratch 32 | #rundev 33 | RUN date # xrundev 34 | RUN date # rundevx 35 | RUN date # rundev x 36 | RUN pip install -r requirements.txt # rundev 37 | RUN ["/src/hack/build.sh"] #rundev 38 | RUN date 39 | `, 40 | want: []types.BuildCmd{ 41 | {C: []string{"/bin/sh", "-c", "pip install -r requirements.txt"}}, 42 | {C: []string{"/src/hack/build.sh"}}, 43 | }, 44 | }, 45 | { 46 | name: "multi-stage", 47 | df: ` 48 | FROM foo 49 | RUN date #rundev 50 | FROM bar 51 | RUN xyz # rundev 52 | RUN ["uname","-a"] #rundev`, 53 | want: []types.BuildCmd{ 54 | {C: []string{"/bin/sh", "-c", "xyz"}}, 55 | {C: []string{"uname", "-a"}}}, 56 | }, 57 | { 58 | name: "multi-line run cmd annotated", 59 | df: `FROM scratch 60 | RUN apt-get -qqy install \ 61 | git \ 62 | libuv \ 63 | psmisc #rundev`, 64 | want: []types.BuildCmd{ 65 | { 66 | C: []string{"/bin/sh", "-c", 67 | "apt-get -qqy install \tgit \tlibuv " + 68 | "\tpsmisc"}}, 69 | }, 70 | }, 71 | { 72 | name: "run cmd with conditions", 73 | df: `RUN ["foo"] #rundev[requirements.txt,**.py]`, 74 | want: []types.BuildCmd{ 75 | { 76 | C: []string{"foo"}, 77 | On: []string{"requirements.txt", "**.py"}, 78 | }, 79 | }, 80 | }, 81 | { 82 | name: "run cmd with empty conditions default to nil", 83 | df: `RUN ["date"] #rundev[]`, 84 | want: []types.BuildCmd{{C: []string{"date"}}}, 85 | }, 86 | { 87 | name: "space before conditions", 88 | df: `RUN ["date"] # rundev [foo]`, 89 | want: nil, 90 | }, 91 | } 92 | for _, tt := range tests { 93 | t.Run(tt.name, func(t *testing.T) { 94 | df, err := ParseDockerfile([]byte(tt.df)) 95 | if err != nil { 96 | t.Fatalf("parsing dockerfile failed: %v", err) 97 | } 98 | got := ParseBuildCmds(df) 99 | 100 | if diff := cmp.Diff(tt.want, got); diff != "" { 101 | t.Errorf("ParseBuildCmds() returned unexpected results:\n%s", diff) 102 | } 103 | }) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /cmd/client/reverseproxy_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "fmt" 19 | "github.com/ahmetb/rundev/lib/constants" 20 | "net/http" 21 | "net/http/httptest" 22 | "testing" 23 | ) 24 | 25 | func TestReverseProxy_transmitsChecksum(t *testing.T) { 26 | visits := 0 27 | srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { 28 | v := req.Header.Get(constants.HdrRundevChecksum) 29 | if v == "" { 30 | w.WriteHeader(http.StatusBadRequest) 31 | fmt.Fprint(w, "empty checksum header") 32 | return 33 | } 34 | visits++ 35 | fmt.Fprintf(w, "checksum header: %s", v) 36 | })) 37 | defer srv.Close() 38 | 39 | syncer := newSyncer(syncOpts{ 40 | localDir: "/Users/ahmetb/workspace/junk/py-hello", // TODO(ahmetb) create tempdir for test 41 | }) 42 | 43 | rp, err := newReverseProxyHandler(srv.URL, syncer) 44 | if err != nil { 45 | t.Fatal(err) 46 | } 47 | rs := httptest.NewServer(rp) 48 | defer rs.Close() 49 | 50 | resp, err := http.Get(rs.URL + "/foo") 51 | if err != nil { 52 | t.Fatal(err) 53 | } 54 | defer resp.Body.Close() 55 | if resp.StatusCode != http.StatusOK { 56 | t.Fatalf("got status code: %d", resp.StatusCode) 57 | } 58 | if visits != 1 { 59 | t.Fatalf("%d visits recorded", visits) 60 | } 61 | } 62 | 63 | func TestReverseProxy_repeatsRequest(t *testing.T) { 64 | i := 0 65 | srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { 66 | i++ 67 | if i <= 2 { 68 | w.Header().Set("content-type", constants.MimeDumbRepeat) 69 | return 70 | } 71 | fmt.Fprintf(w, "done") 72 | })) 73 | defer srv.Close() 74 | syncer := newSyncer(syncOpts{ 75 | localDir: "/Users/ahmetb/workspace/junk/py-hello", // TODO(ahmetb) create tempdir for test 76 | }) 77 | rp, err := newReverseProxyHandler(srv.URL, syncer) 78 | if err != nil { 79 | t.Fatal(err) 80 | } 81 | rs := httptest.NewServer(rp) 82 | defer rs.Close() 83 | resp, err := http.Get(rs.URL + "/foo") 84 | if err != nil { 85 | t.Fatal(err) 86 | } 87 | defer resp.Body.Close() 88 | if i != 3 { 89 | t.Fatalf("unexpected amount of requests: %d", i) 90 | } 91 | 92 | } 93 | -------------------------------------------------------------------------------- /lib/ignore/ignores_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package ignore 16 | 17 | import "testing" 18 | 19 | func TestIgnored(t *testing.T) { 20 | type args struct { 21 | path string 22 | exclusions []string 23 | } 24 | tests := []struct { 25 | name string 26 | args args 27 | want bool 28 | }{ 29 | { 30 | name: "direct match", 31 | args: args{ 32 | path: "a/b", 33 | exclusions: []string{"a/b"}, 34 | }, 35 | want: true, 36 | }, 37 | { 38 | name: "direct match, single glob", 39 | args: args{ 40 | path: "a/b", 41 | exclusions: []string{"a/*"}, 42 | }, 43 | want: true, 44 | }, 45 | { 46 | name: "direct match, single glob in multi-nest", 47 | args: args{ 48 | path: "a/b/c", 49 | exclusions: []string{"a/*"}, 50 | }, 51 | want: false, 52 | }, 53 | { 54 | name: "direct match, nested glob", 55 | args: args{ 56 | path: "a/b/c", 57 | exclusions: []string{"a/*/*"}, 58 | }, 59 | want: true, 60 | }, 61 | { 62 | name: "direct match, double-star", 63 | args: args{ 64 | path: "a/b/c", 65 | exclusions: []string{"a/**"}, 66 | }, 67 | want: true, 68 | }, 69 | { 70 | name: "leading slash in pattern", 71 | args: args{ 72 | path: "a/b", 73 | exclusions: []string{"/a/*"}, 74 | }, 75 | want: true, 76 | }, 77 | { 78 | name: "sub-path is excluded if dir is excluded without star", 79 | args: args{ 80 | path: "__pycache__/foo", 81 | exclusions: []string{"__pycache__"}, 82 | }, 83 | want: false, 84 | }, 85 | { 86 | name: "extension match with double-star", 87 | args: args{ 88 | path: "a/b/c/foo.py", 89 | exclusions: []string{"**/*.py"}, 90 | }, 91 | want: true, 92 | }, 93 | { 94 | name: "no match", 95 | args: args{ 96 | path: "code.go", 97 | exclusions: []string{"**.py"}, 98 | }, 99 | want: false, 100 | }, 101 | } 102 | for _, tt := range tests { 103 | t.Run(tt.name, func(t *testing.T) { 104 | if got := ignored(tt.args.path, tt.args.exclusions); got != tt.want { 105 | t.Errorf("Ignored() = %v, want %v", got, tt.want) 106 | } 107 | }) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /cmd/client/localserver.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "fmt" 19 | "github.com/ahmetb/rundev/lib/handlerutil" 20 | "github.com/kr/pretty" 21 | "github.com/pkg/errors" 22 | "net/http" 23 | "net/http/httputil" 24 | "net/url" 25 | "os" 26 | ) 27 | 28 | type localServerOpts struct { 29 | sync *syncer 30 | proxyTarget string 31 | } 32 | 33 | type localServer struct { 34 | opts localServerOpts 35 | } 36 | 37 | func newLocalServer(opts localServerOpts) (http.Handler, error) { 38 | ls := &localServer{opts: opts} 39 | 40 | reverseProxy, err := newReverseProxyHandler(opts.proxyTarget, ls.opts.sync) 41 | if err != nil { 42 | return nil, errors.Wrap(err, "failed to initialize reverse proxy") 43 | } 44 | 45 | mux := http.NewServeMux() 46 | mux.HandleFunc("/rundev/fsz", handlerutil.NewFSDebugHandler(ls.opts.sync.opts.localDir, ls.opts.sync.opts.ignores)) 47 | mux.HandleFunc("/rundev/debugz", ls.debugHandler) 48 | mux.HandleFunc("/rundev/", handlerutil.NewUnsupportedDebugEndpointHandler()) 49 | mux.HandleFunc("/favicon.ico", handlerutil.NewUnsupportedDebugEndpointHandler()) // TODO(ahmetb) annoyance during testing on browser 50 | // TODO(ahmetb) add /rundev/syncz 51 | mux.Handle("/", reverseProxy) 52 | return mux, nil 53 | } 54 | 55 | func newReverseProxyHandler(addr string, sync *syncer) (http.Handler, error) { 56 | u, err := url.Parse(addr) 57 | if err != nil { 58 | return nil, errors.Wrapf(err, "failed to parse remote addr as url %s", addr) 59 | } 60 | rp := httputil.NewSingleHostReverseProxy(u) 61 | rp.Transport = withSyncingRoundTripper(rp.Transport, sync, u.Host) 62 | return rp, nil 63 | } 64 | 65 | func (srv *localServer) debugHandler(w http.ResponseWriter, req *http.Request) { 66 | checksum, err := srv.opts.sync.checksum() 67 | if err != nil { 68 | w.WriteHeader(http.StatusInternalServerError) 69 | fmt.Errorf("failed to fetch local filesystem: %+v", err) 70 | } 71 | fmt.Fprintf(w, "fs checksum: %v\n", checksum) 72 | fmt.Fprintf(w, "pid: %d\n", os.Getpid()) 73 | wd, _ := os.Getwd() 74 | fmt.Fprintf(w, "cwd: %s\n", wd) 75 | fmt.Fprint(w, "sync:\n") 76 | fmt.Fprintf(w, " dir: %# v\n", pretty.Formatter(srv.opts.sync.opts.localDir)) 77 | fmt.Fprintf(w, " target: %# v\n", pretty.Formatter(srv.opts.sync.opts.targetAddr)) 78 | fmt.Fprintf(w, " ignores: %# v\n", pretty.Formatter(srv.opts.sync.opts.ignores)) 79 | } 80 | -------------------------------------------------------------------------------- /lib/fsutil/unarchive.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package fsutil 16 | 17 | import ( 18 | "archive/tar" 19 | "compress/gzip" 20 | "github.com/ahmetb/rundev/lib/constants" 21 | "github.com/pkg/errors" 22 | "io" 23 | "os" 24 | "path/filepath" 25 | "strings" 26 | ) 27 | 28 | func ApplyPatch(dir string, r io.ReadCloser) ([]string, error) { 29 | var out []string 30 | gr, err := gzip.NewReader(r) 31 | if err != nil { 32 | return nil, errors.Wrap(err, "failed to initialize gzip reader") 33 | } 34 | tr := tar.NewReader(gr) 35 | for { 36 | hdr, err := tr.Next() 37 | if err == io.EOF { 38 | break 39 | } else if err != nil { 40 | return nil, errors.Wrap(err, "error reading tar header") 41 | } 42 | 43 | fn := hdr.Name 44 | out = append(out, fn) 45 | fpath := filepath.Join(dir, filepath.FromSlash(fn)) 46 | 47 | if hdr.Typeflag == tar.TypeDir { 48 | if err := os.MkdirAll(fpath, hdr.FileInfo().Mode()); err != nil { 49 | return nil, errors.Wrapf(err, "failed to mkdir for tar dir entry %s", fn) 50 | } 51 | continue 52 | } else if hdr.Typeflag != tar.TypeReg { 53 | return nil, errors.Errorf("found non-regular file entry in tar (type: %v) file: %s", hdr.Typeflag, hdr.Name) 54 | } 55 | 56 | if strings.HasSuffix(fn, constants.WhiteoutDeleteSuffix) { 57 | if err := os.RemoveAll(strings.TrimSuffix(fpath, constants.WhiteoutDeleteSuffix)); err != nil { 58 | return nil, errors.Wrapf(err, "failed to realize delete whiteout file %s", fn) 59 | } 60 | continue 61 | } 62 | 63 | // copy regular file 64 | f, err := os.OpenFile(fpath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, hdr.FileInfo().Mode()) 65 | if err != nil { 66 | return nil, errors.Wrapf(err, "failed to create file for tar entry %s", fn) 67 | } 68 | if _, err := io.Copy(f, tr); err != nil { 69 | return nil, errors.Wrapf(err, "failed to copy file contents for tar entry %s", fn) 70 | } 71 | if err := f.Close(); err != nil { 72 | return nil, errors.Wrapf(err, "failed to close copied file for tar entry %s", fn) 73 | } 74 | if err := os.Chmod(fpath, hdr.FileInfo().Mode()); err != nil { 75 | return nil, errors.Wrapf(err, "failed to chmod file for tar entry %s", fn) 76 | } 77 | if err := os.Chtimes(fpath, hdr.ModTime, hdr.ModTime); err != nil { 78 | return nil, errors.Wrapf(err, "failed to change times of copied file for tar entry %s", fn) 79 | } 80 | } 81 | return out, nil 82 | } 83 | -------------------------------------------------------------------------------- /lib/fsutil/fsdiff.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package fsutil 16 | 17 | import "path/filepath" 18 | 19 | type DiffType int 20 | 21 | const ( 22 | DiffOpAdd DiffType = iota 23 | DiffOpDel 24 | ) 25 | 26 | type DiffOp struct { 27 | Type DiffType 28 | Path string 29 | } 30 | 31 | func (o DiffOp) String() string { 32 | var s string 33 | switch o.Type { 34 | case DiffOpAdd: 35 | s = "A" 36 | case DiffOpDel: 37 | s = "D" 38 | default: 39 | s = "?" 40 | } 41 | return s + " " + o.Path 42 | } 43 | 44 | // FSDiff returns the operations that needs to be done on n2 to make it look like n1. 45 | func FSDiff(n1, n2 FSNode) []DiffOp { 46 | return fsDiffInner(n1, n2, ".") 47 | } 48 | 49 | func fsDiffInner(n1, n2 FSNode, base string) []DiffOp { 50 | var ops []DiffOp 51 | ln := n1.Nodes 52 | rn := n2.Nodes 53 | for len(ln) > 0 && len(rn) > 0 { 54 | l, r := ln[0], rn[0] 55 | 56 | if l.Name < r.Name { // file doesn't exist in r 57 | ops = append(ops, DiffOp{Type: DiffOpAdd, Path: canonicalPath(base, l.Name)}) 58 | ln = ln[1:] // advance 59 | } else if l.Name > r.Name { // file doesn't exist in l 60 | ops = append(ops, DiffOp{Type: DiffOpDel, Path: canonicalPath(base, r.Name)}) 61 | rn = rn[1:] 62 | } else { // l.Name == r.Name (same item) 63 | if l.Mode.IsDir() != r.Mode.IsDir() { // one of them is a directory 64 | ops = append(ops, DiffOp{Type: DiffOpDel, Path: canonicalPath(base, l.Name)}) 65 | ops = append(ops, DiffOp{Type: DiffOpAdd, Path: canonicalPath(base, l.Name)}) 66 | } else if l.checksum() != r.checksum() { 67 | if !l.Mode.IsDir() && !r.Mode.IsDir() { 68 | // Nodes are not dir, re-upload file 69 | ops = append(ops, DiffOp{Type: DiffOpAdd, Path: canonicalPath(base, l.Name)}) 70 | } else { 71 | // both Nodes are dir, recurse: 72 | ops = append(ops, fsDiffInner(l, r, canonicalPath(base, l.Name))...) 73 | } 74 | } 75 | ln, rn = ln[1:], rn[1:] 76 | } 77 | } 78 | // add remaining 79 | for _, l := range ln { 80 | ops = append(ops, DiffOp{Type: DiffOpAdd, Path: canonicalPath(base, l.Name)}) 81 | } 82 | for _, r := range rn { 83 | ops = append(ops, DiffOp{Type: DiffOpDel, Path: canonicalPath(base, r.Name)}) 84 | } 85 | return ops 86 | } 87 | 88 | // canonicalPath joins base and rel to create a canonical path string with unix path separator (/) independent of 89 | // current platform. 90 | func canonicalPath(base, rel string) string { 91 | return filepath.ToSlash(filepath.Join(base, rel)) 92 | } 93 | -------------------------------------------------------------------------------- /lib/fsutil/fstree_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package fsutil 16 | 17 | import ( 18 | "testing" 19 | "time" 20 | ) 21 | 22 | func TestWalk(t *testing.T) { 23 | _, err := Walk("../..", nil) // TODO remove 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | } 28 | 29 | func Test_checksum(t *testing.T) { 30 | f := func() FSNode { 31 | return FSNode{ 32 | Name: "n1", 33 | Mode: 1, 34 | Size: 2, 35 | Mtime: time.Unix(1564448884, 0)} 36 | } 37 | 38 | n := f() 39 | 40 | m := f() 41 | if a, b := n.checksum(), m.checksum(); a != b { 42 | t.Fatal("checksums aren't idempotent") 43 | } 44 | 45 | m = f() 46 | m.Name = "other" 47 | if a, b := n.checksum(), m.checksum(); a == b { 48 | t.Fatal("Name change didn't trigger Checksum change") 49 | } 50 | 51 | m = f() 52 | m.Size = 999 53 | if a, b := n.checksum(), m.checksum(); a == b { 54 | t.Fatal("Size change didn't trigger Checksum change") 55 | } 56 | 57 | m = f() 58 | m.Mode = 123 59 | if a, b := n.checksum(), m.checksum(); a == b { 60 | t.Fatal("Mode change didn't trigger Checksum change") 61 | } 62 | 63 | m = f() 64 | m.Mtime = time.Now() 65 | if a, b := n.checksum(), m.checksum(); a == b { 66 | t.Fatal("Mtime change didn't trigger Checksum change") 67 | } 68 | 69 | m = f() 70 | m.Nodes = []FSNode{f()} 71 | if a, b := n.checksum(), m.checksum(); a == b { 72 | t.Fatal("Nodes change didn't trigger Checksum change") 73 | } 74 | 75 | n1, n2 := f(), f() 76 | n1.Nodes = []FSNode{f(), f()} 77 | n2.Nodes = []FSNode{f(), f()} 78 | if a, b := n.checksum(), m.checksum(); a == b { 79 | t.Fatal("tree checksums are not idempotent") 80 | } 81 | n2.Nodes[1].Mode |= 0x1 82 | if a, b := n.checksum(), m.checksum(); a == b { 83 | t.Fatal("different child node led to the same Checksum") 84 | } 85 | } 86 | 87 | func TestChecksumRoot(t *testing.T) { 88 | fs := FSNode{ 89 | Name: "name1", 90 | Mode: 1, 91 | Size: 1, 92 | Mtime: time.Unix(1, 0), 93 | Nodes: nil, 94 | } 95 | 96 | c := fs.RootChecksum() 97 | fs.Name = "name2" 98 | 99 | if fs.RootChecksum() != c { 100 | t.Fatal("name change was not supposed to trigger root checksum change") 101 | } 102 | 103 | fs.Mode = 2 104 | if fs.RootChecksum() != c { 105 | t.Fatal("mode change was not supposed to trigger root checksum change") 106 | } 107 | 108 | fs.Size = 2 109 | if fs.RootChecksum() != c { 110 | t.Fatal("size change was not supposed to trigger root checksum change") 111 | } 112 | 113 | fs.Nodes = append(fs.Nodes, FSNode{Name: "bar"}) 114 | if fs.RootChecksum() == c { 115 | t.Fatal("nodes was supposed to trigger root checksum change") 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /cmd/daemon/nanny.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "bytes" 19 | "github.com/pkg/errors" 20 | "io" 21 | "log" 22 | "os" 23 | "os/exec" 24 | "strconv" 25 | "sync" 26 | "syscall" 27 | ) 28 | 29 | type nanny interface { 30 | Running() bool 31 | Restart() error // starts if not running 32 | Kill() 33 | } 34 | 35 | type procNanny struct { 36 | cmd string 37 | args []string 38 | opts procOpts 39 | 40 | mu sync.RWMutex 41 | proc *os.Process 42 | active bool 43 | } 44 | 45 | type procOpts struct { 46 | port int 47 | dir string 48 | logs *bytes.Buffer 49 | } 50 | 51 | func newProcessNanny(cmd string, args []string, opts procOpts) nanny { 52 | return &procNanny{ 53 | cmd: cmd, 54 | args: args, 55 | opts: opts, 56 | } 57 | } 58 | 59 | func (p *procNanny) Running() bool { 60 | p.mu.RLock() 61 | defer p.mu.RUnlock() 62 | return p.active 63 | } 64 | 65 | func (p *procNanny) Kill() { 66 | p.kill() 67 | } 68 | 69 | func (p *procNanny) Restart() error { 70 | return p.replace() 71 | } 72 | 73 | // kill sends a SIGKILL to the process if it's running. 74 | func (p *procNanny) kill() { 75 | p.mu.Lock() 76 | defer p.mu.Unlock() 77 | 78 | if p.proc != nil { 79 | pid := -p.proc.Pid // negative value: ID of process group 80 | log.Printf("killing pid %d", pid) 81 | // TODO using negative PID (pgrp kill) not working on gVisor 82 | if err := syscall.Kill(pid, syscall.SIGKILL); err != nil { 83 | log.Printf("warning: failed to kill: %v", err) 84 | } else { 85 | log.Printf("killed pid %d", pid) 86 | } 87 | 88 | p.proc.Release() 89 | p.proc = nil 90 | } 91 | p.active = false 92 | if p.opts.logs != nil { 93 | p.opts.logs.Reset() 94 | } 95 | } 96 | 97 | func (p *procNanny) replace() error { 98 | p.kill() 99 | 100 | newProc := exec.Command(p.cmd, p.args...) 101 | newProc.SysProcAttr = &syscall.SysProcAttr{ 102 | Setpgid: true} // create a new GID 103 | if p.opts.dir != "" { 104 | newProc.Dir = p.opts.dir 105 | } 106 | newProc.Stdout = io.MultiWriter(p.opts.logs, os.Stdout) 107 | newProc.Stderr = io.MultiWriter(p.opts.logs, os.Stderr) 108 | 109 | if p.opts.port > 0 { 110 | newProc.Env = append(os.Environ(), "PORT="+strconv.Itoa(p.opts.port)) 111 | } 112 | log.Printf("proc start") 113 | if err := newProc.Start(); err != nil { 114 | return errors.Wrap(err, "error starting process") 115 | } 116 | 117 | p.mu.Lock() 118 | p.proc = newProc.Process 119 | p.active = true 120 | p.mu.Unlock() 121 | 122 | go func(origProc *os.Process) { 123 | _ = newProc.Wait() 124 | p.mu.Lock() 125 | if p.proc == origProc { 126 | p.active = false 127 | } 128 | p.mu.Unlock() 129 | }(newProc.Process) 130 | 131 | return nil 132 | } 133 | -------------------------------------------------------------------------------- /lib/dockerfile/parse_test.go: -------------------------------------------------------------------------------- 1 | package dockerfile 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestParseDockerfileEntrypoint(t *testing.T) { 9 | tests := []struct { 10 | name string 11 | df string 12 | want Cmd 13 | wantErr bool 14 | }{ 15 | { 16 | name: "nothing", 17 | df: `MAINTAINER David Bowie`, 18 | wantErr: true, 19 | }, 20 | { 21 | name: "just cmd (non-json)", 22 | df: `CMD /bin/server`, 23 | want: Cmd{"/bin/sh", []string{"-c", "/bin/server"}}, 24 | }, 25 | { 26 | name: "just cmd (json)", 27 | df: `CMD ["/bin/server"]`, 28 | want: Cmd{"/bin/server", []string{}}, 29 | }, 30 | { 31 | name: "just entrypoint (non-json)", 32 | df: `ENTRYPOINT /bin/server`, 33 | want: Cmd{"/bin/sh", []string{"-c", "/bin/server"}}, 34 | }, 35 | { 36 | name: "repetitive entrypoint", 37 | df: `ENTRYPOINT ["/bin/date"] 38 | ENTRYPOINT ["/bin/server"]"`, 39 | want: Cmd{"/bin/server", []string{}}, 40 | }, 41 | { 42 | name: "repetitive cmd", 43 | df: `CMD ["/bin/date"] 44 | CMD ["/bin/server"]"`, 45 | want: Cmd{"/bin/server", []string{}}, 46 | }, 47 | { 48 | name: "just entrypoint (json)", 49 | df: `ENTRYPOINT ["/bin/server"]`, 50 | want: Cmd{"/bin/server", []string{}}, 51 | }, 52 | { 53 | name: "mix of entrypoint and cmd (both json)", 54 | df: `ENTRYPOINT ["/bin/server"] 55 | CMD ["arg1", "arg2"]`, 56 | want: Cmd{"/bin/server", []string{"arg1", "arg2"}}, 57 | }, 58 | { 59 | name: "mix of entrypoint and cmd (both json), ordering should not matter", 60 | df: `CMD ["arg1", "arg2"] 61 | ENTRYPOINT ["/bin/server"]`, 62 | want: Cmd{"/bin/server", []string{"arg1", "arg2"}}, 63 | }, 64 | { 65 | name: "mix of entrypoint and cmd (both json), earlier CMDs are ignored", 66 | df: `CMD ["arg0"] 67 | CMD ["arg1", "arg2"] 68 | ENTRYPOINT ["/bin/server"]`, 69 | want: Cmd{"/bin/server", []string{"arg1", "arg2"}}, 70 | }, 71 | { 72 | name: "mix of entrypoint (json) and cmd (non-json)", 73 | df: `ENTRYPOINT ["/bin/server"] 74 | CMD arg1 arg2`, 75 | want: Cmd{"/bin/server", []string{"/bin/sh", "-c", "arg1 arg2"}}, 76 | }, 77 | { 78 | name: "mix of entrypoint (non-json) and cmd (json)", 79 | df: `ENTRYPOINT /bin/server foo bar 80 | CMD ["arg1", "arg2"]`, // bad idea, as /bin/sh won't do anything with args after the main arg (that comes after -c) 81 | want: Cmd{"/bin/sh", []string{"-c", "/bin/server foo bar", "arg1", "arg2"}}, 82 | }, 83 | { 84 | name: "mix of entrypoint and cmd (both non-json)", 85 | df: `ENTRYPOINT /bin/server foo bar 86 | CMD arg1 arg2`, // bad idea, user probably already gets an error from their original dockerfile 87 | want: Cmd{"/bin/sh", []string{"-c", "/bin/server foo bar", "/bin/sh", "-c", "arg1 arg2"}}, 88 | }, 89 | { 90 | name: "multi stage (reset)", 91 | df: `FROM a1 AS c1 92 | ENTRYPOINT b 93 | CMD c d 94 | FROM a2 as c2 95 | ENTRYPOINT /bin/server`, 96 | want: Cmd{"/bin/sh", []string{"-c", "/bin/server"}}, 97 | }, 98 | } 99 | for _, tt := range tests { 100 | t.Run(tt.name, func(t *testing.T) { 101 | df, err := ParseDockerfile([]byte(tt.df)) 102 | if err != nil { 103 | t.Fatalf("parsing dockerfile failed: %v", err) 104 | } 105 | got, err := ParseEntrypoint(df) 106 | if (err != nil) != tt.wantErr { 107 | t.Errorf("ParseEntrypoint() error = %v, wantErr %v", err, tt.wantErr) 108 | return 109 | } 110 | if !reflect.DeepEqual(got, tt.want) { 111 | t.Errorf("ParseEntrypoint() got = %v, want %v", got, tt.want) 112 | } 113 | }) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /cmd/client/docker.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "bytes" 19 | "context" 20 | "encoding/json" 21 | "fmt" 22 | "github.com/ahmetb/rundev/lib/types" 23 | "github.com/pkg/errors" 24 | "io/ioutil" 25 | "os" 26 | "os/exec" 27 | "path/filepath" 28 | "strings" 29 | ) 30 | 31 | const ( 32 | dumbInitURL = `https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_amd64` 33 | rundevdURL = `https://storage.googleapis.com/rundev-test/rundevd-v0.0.0-b0bb9a5` 34 | ) 35 | 36 | type remoteRunOpts struct { 37 | syncDir string 38 | runCmd types.Cmd 39 | buildCmds types.BuildCmds 40 | clientSecret string 41 | ignoreRules []string 42 | } 43 | 44 | type buildOpts struct { 45 | dir string 46 | image string 47 | dockerfile []byte 48 | } 49 | 50 | func dockerBuildPush(ctx context.Context, opts buildOpts) error { 51 | b, err := exec.CommandContext(ctx, "docker", "version").CombinedOutput() 52 | if err != nil { 53 | errors.Wrapf(err, "local docker engine is unreachable, output=%s", string(b)) 54 | } 55 | args := []string{"build", "--tag=" + opts.image, opts.dir} 56 | if len(opts.dockerfile) > 0 { 57 | args = append(args, "--file=-") 58 | } 59 | cmd := exec.CommandContext(ctx, 60 | "docker", args...) 61 | if len(opts.dockerfile) > 0 { 62 | cmd.Stdin = bytes.NewReader(opts.dockerfile) 63 | } 64 | b, err = cmd.CombinedOutput() 65 | if err != nil { 66 | return errors.Wrapf(err, "building docker image failed, output=%s", string(b)) 67 | } 68 | b, err = exec.CommandContext(ctx, "docker", "push", opts.image).CombinedOutput() 69 | return errors.Wrapf(err, "building docker image failed, output=%s", string(b)) 70 | } 71 | 72 | func readDockerfile(dir string) ([]byte, error) { 73 | df, err := ioutil.ReadFile(filepath.Join(dir, "Dockerfile")) 74 | if err != nil { 75 | if os.IsNotExist(err) { 76 | return nil, errors.Errorf("Dockerfile not found at directory %s", dir) 77 | } 78 | return nil, errors.Wrap(err, "error reading Dockerfile") 79 | } 80 | return df, nil 81 | } 82 | 83 | func prepEntrypoint(opts remoteRunOpts) string { 84 | rc, _ := json.Marshal(opts.runCmd) 85 | cmd := []string{"/bin/rundevd", 86 | "-client-secret=" + opts.clientSecret, 87 | "-run-cmd", string(rc)} 88 | 89 | if len(opts.buildCmds) > 0 { 90 | bc, _ := json.Marshal(opts.buildCmds) 91 | cmd = append(cmd, "-build-cmds", string(bc)) 92 | } 93 | if len(opts.ignoreRules) > 0 { 94 | b, _ := json.Marshal(opts.ignoreRules) 95 | cmd = append(cmd, "-ignore-patterns", string(b)) 96 | } 97 | if opts.syncDir != "" { 98 | cmd = append(cmd, "-sync-dir="+opts.syncDir) 99 | } 100 | sw := new(strings.Builder) 101 | fmt.Fprintf(sw, "ADD %s /bin/dumb_init\n", dumbInitURL) 102 | fmt.Fprintf(sw, "ADD %s /bin/rundevd\n", rundevdURL) 103 | fmt.Fprintln(sw, "RUN chmod +x /bin/rundevd /bin/dumb_init") 104 | fmt.Fprintln(sw, `ENTRYPOINT ["/bin/dumb_init", "--"]`) 105 | fmt.Fprintf(sw, `CMD [`) 106 | for i, a := range cmd { 107 | fmt.Fprintf(sw, "%q", a) 108 | if i != len(cmd)-1 { 109 | sw.WriteString(", \\\n\t") 110 | } 111 | } 112 | sw.WriteString(`]`) 113 | return sw.String() 114 | } 115 | -------------------------------------------------------------------------------- /cmd/client/syncer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "encoding/json" 19 | "fmt" 20 | "github.com/ahmetb/rundev/lib/constants" 21 | "github.com/ahmetb/rundev/lib/fsutil" 22 | "github.com/ahmetb/rundev/lib/ignore" 23 | "github.com/pkg/errors" 24 | "io" 25 | "io/ioutil" 26 | "log" 27 | "net/http" 28 | ) 29 | 30 | type syncOpts struct { 31 | localDir string 32 | targetAddr string 33 | clientSecret string 34 | ignores *ignore.FileIgnores 35 | } 36 | 37 | type syncer struct { 38 | opts syncOpts 39 | } 40 | 41 | func newSyncer(opts syncOpts) *syncer { 42 | return &syncer{opts: opts} 43 | } 44 | 45 | func (s *syncer) checksum() (uint64, error) { 46 | fs, err := fsutil.Walk(s.opts.localDir, s.opts.ignores) 47 | if err != nil { 48 | return 0, errors.Wrap(err, "failed to walk the local fs") 49 | } 50 | return fs.RootChecksum(), nil 51 | } 52 | 53 | // uploadPatch creates and uploads a patch to remote endpoint to be 54 | // applied if it's currently at the given checksum. 55 | func (s *syncer) uploadPatch(remoteFS fsutil.FSNode, currentRemoteChecksum string) error { 56 | localFS, err := fsutil.Walk(s.opts.localDir, s.opts.ignores) 57 | if err != nil { 58 | return errors.Wrapf(err, "failed to walk local fs dir %s", s.opts.localDir) 59 | } 60 | localChecksum := localFS.RootChecksum() 61 | 62 | log.Printf("checksum mismatch local=%d remote=%s", localChecksum, currentRemoteChecksum) 63 | diff := fsutil.FSDiff(localFS, remoteFS) 64 | log.Printf("diff operations (%d)", len(diff)) 65 | for _, v := range diff { 66 | log.Printf(" %s", v) 67 | } 68 | 69 | tar, n, err := fsutil.PatchArchive(s.opts.localDir, diff, s.opts.ignores) 70 | if err != nil { 71 | return err 72 | } 73 | log.Printf("patch tarball is %d bytes. applying the patch.", n) 74 | 75 | url := s.opts.targetAddr + "/rundevd/patch" 76 | req, err := http.NewRequest(http.MethodPatch, url, tar) 77 | if err != nil { 78 | return errors.Wrap(err, "failed to create patch requeset") 79 | } 80 | req.Header.Set("Content-Type", constants.MimePatch) 81 | req.Header.Set(constants.HdrRundevClientSecret, s.opts.clientSecret) 82 | req.Header.Set(constants.HdrRundevPatchPreconditionSum, currentRemoteChecksum) 83 | req.Header.Set(constants.HdrRundevChecksum, fmt.Sprintf("%d", localChecksum)) 84 | resp, err := http.DefaultClient.Do(req) 85 | if err != nil { 86 | return errors.Wrap(err, "error making patch request") 87 | } 88 | defer resp.Body.Close() 89 | newRemoteChecksum := resp.Header.Get(constants.HdrRundevChecksum) 90 | if expected := http.StatusAccepted; resp.StatusCode != expected { 91 | b, _ := ioutil.ReadAll(resp.Body) 92 | return errors.Errorf("unexpected patch response status=%d (was expecting http %d) (new remote checksum: %s, old remote checksum: %s, local: %d). response body: %s", 93 | resp.StatusCode, expected, newRemoteChecksum, currentRemoteChecksum, localChecksum, string(b)) 94 | } 95 | log.Printf("patch applied (new remote checksum: %d)", localChecksum) 96 | return nil 97 | } 98 | 99 | // parseMismatchResponse decodes checksum mismatch response body which contains remote filesystem root node. 100 | func parseMismatchResponse(body io.ReadCloser) (fsutil.FSNode, error) { 101 | defer body.Close() 102 | var v fsutil.FSNode 103 | d := json.NewDecoder(body) 104 | d.DisallowUnknownFields() 105 | err := d.Decode(&v) 106 | return v, errors.Wrap(err, "failed to decode checksum mismatch response body") 107 | } 108 | -------------------------------------------------------------------------------- /lib/fsutil/fstree.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package fsutil 16 | 17 | import ( 18 | "encoding/binary" 19 | "fmt" 20 | "github.com/ahmetb/rundev/lib/ignore" 21 | "hash/fnv" 22 | "io/ioutil" 23 | "os" 24 | "path/filepath" 25 | "time" 26 | 27 | "github.com/pkg/errors" 28 | ) 29 | 30 | type FSNode struct { 31 | Name string `json:"name"` 32 | Mode os.FileMode `json:"mode"` 33 | Size int64 `json:"size,omitempty"` // zero for dirs and whiteout files 34 | Mtime time.Time `json:"mtime"` // in UTC, zero time for dirs 35 | Nodes []FSNode `json:"nodes,omitempty"` 36 | } 37 | 38 | func (f FSNode) String() string { 39 | return "(" + f.Mode.String() + ") " + 40 | f.Name + " (" + fmt.Sprintf("%d", len(f.Nodes)) + ") nodes" 41 | } 42 | 43 | // RootChecksum computes the checksum of the directory through its child nodes. 44 | // It doesn't take f’s own name, mode, size and mtime into account. 45 | func (f FSNode) RootChecksum() uint64 { 46 | return f.childrenChecksum() 47 | } 48 | 49 | // checksum computes the checksum of f based on f itself and its children. 50 | func (f FSNode) checksum() uint64 { 51 | h := fnv.New64() 52 | h.Write([]byte(f.Name)) 53 | a1 := uint64(f.Size) 54 | a2 := uint64(f.Mode) 55 | a3 := uint64(f.Mtime.UnixNano()) 56 | a4 := f.childrenChecksum() 57 | 58 | b := make([]byte, 8) 59 | binary.LittleEndian.PutUint64(b, a1) 60 | h.Write(b) 61 | binary.LittleEndian.PutUint64(b, a2) 62 | h.Write(b) 63 | binary.LittleEndian.PutUint64(b, a3) 64 | h.Write(b) 65 | binary.LittleEndian.PutUint64(b, a4) 66 | h.Write(b) 67 | return h.Sum64() 68 | } 69 | 70 | // childrenChecksum computes the checksum f’s child nodes. 71 | func (f FSNode) childrenChecksum() uint64 { 72 | h := fnv.New64() 73 | b := make([]byte, 8) 74 | for _, c := range f.Nodes { 75 | v := c.checksum() 76 | binary.LittleEndian.PutUint64(b, v) 77 | h.Write(b) 78 | } 79 | return h.Sum64() 80 | } 81 | 82 | func Walk(dir string, rules *ignore.FileIgnores) (FSNode, error) { 83 | fi, err := os.Stat(dir) 84 | if err != nil { 85 | return FSNode{}, errors.Wrapf(err, "failed to open directory %s", dir) 86 | } 87 | if !fi.IsDir() { 88 | return FSNode{}, errors.Errorf("path %s is not a directory", dir) 89 | } 90 | 91 | n, err := walkFile(dir, dir, fi, rules) 92 | n.Name = "$root" // value doesn't matter, but should be the same on local vs remote as we don't care about dir basename 93 | return n, errors.Wrap(err, "failed to traverse directory tree") 94 | } 95 | 96 | func walkFile(root, path string, fi os.FileInfo, rules *ignore.FileIgnores) (FSNode, error) { 97 | n := FSNode{ 98 | Name: fi.Name(), 99 | Mode: fi.Mode(), 100 | Size: fi.Size(), 101 | Mtime: fi.ModTime().Truncate(time.Second).UTC(), // tarballs don't support nsecs in time spec 102 | } 103 | if !fi.IsDir() { 104 | return n, nil 105 | } 106 | n.Size = 0 // zero size for dirs 107 | n.Mtime = time.Unix(0, 0).UTC() // zero time for dirs 108 | 109 | children, err := ioutil.ReadDir(path) 110 | if err != nil { 111 | return FSNode{}, errors.Wrapf(err, "failed to list files in directory %s", path) 112 | } 113 | if len(children) > 0 { 114 | n.Nodes = make([]FSNode, 0, len(children)) 115 | } 116 | for _, f := range children { 117 | childPath := filepath.Join(path, f.Name()) 118 | rel, _ := filepath.Rel(root, childPath) 119 | if rules.Ignored(rel) { 120 | continue 121 | } 122 | v, err := walkFile(root, childPath, f, rules) 123 | if err != nil { 124 | return FSNode{}, err 125 | } 126 | n.Nodes = append(n.Nodes, v) 127 | } 128 | return n, nil 129 | } 130 | -------------------------------------------------------------------------------- /cmd/client/cloudrun.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "bytes" 19 | "context" 20 | "fmt" 21 | "github.com/pkg/errors" 22 | "google.golang.org/api/googleapi" 23 | run "google.golang.org/api/run/v1alpha1" 24 | "log" 25 | "net/http" 26 | "os/exec" 27 | "strings" 28 | "time" 29 | "unicode" 30 | ) 31 | 32 | const ( 33 | cloudRunManagedPlatform = "managed" 34 | ) 35 | 36 | type cloudrunOpts struct { 37 | platform string 38 | project string 39 | 40 | region string // managed only 41 | cluster string // gke only 42 | clusterLocation string // gke only 43 | } 44 | 45 | func deployCloudRun(ctx context.Context, opts cloudrunOpts, appName, image string) (string, error) { 46 | args := []string{ 47 | "--project=" + opts.project, 48 | "--platform=" + opts.platform, 49 | } 50 | deployArgs := []string{ 51 | "--image=" + image, 52 | } 53 | if opts.platform == cloudRunManagedPlatform { 54 | args = append(args, "--region="+opts.region) 55 | deployArgs = append(deployArgs, "--allow-unauthenticated") 56 | } else { 57 | args = append(args, "--cluster="+opts.cluster) 58 | args = append(args, "--cluster-location="+opts.clusterLocation) 59 | } 60 | 61 | b, err := exec.CommandContext(ctx, "gcloud", 62 | append(append([]string{ 63 | "alpha", "run", "deploy", "-q", appName}, args...), deployArgs...)...).CombinedOutput() 64 | if err != nil { 65 | return "", errors.Wrapf(err, "cloud run deployment failed. output:\n%s", string(b)) 66 | } 67 | var stderr bytes.Buffer 68 | cmd := exec.CommandContext(ctx, "gcloud", 69 | append([]string{"beta", "run", "services", "describe", "-q", appName, 70 | "--format=get(status.url)"}, args...)...) 71 | cmd.Stderr = &stderr 72 | b, err = cmd.Output() 73 | if err != nil { 74 | return "", errors.Wrapf(err, "cloud run describe failed. stderr:\n%s", string(stderr.Bytes())) 75 | } 76 | return strings.TrimSpace(string(b)), nil 77 | } 78 | 79 | // cleanupCloudRun fires and forgets a delete request to Cloud Run. 80 | // TODO: make it work with CR-GKE as well. 81 | // TODO: looks like we can shell out to gcloud (will add extra several secs) and handle CR-GKE too. 82 | func cleanupCloudRun(appName, project, region string, timeout time.Duration) { 83 | log.Printf("cleaning up Cloud Run service %q", appName) 84 | cleanupCtx, cleanupCancel := context.WithTimeout(context.TODO(), timeout) 85 | defer cleanupCancel() 86 | rs, err := run.NewService(cleanupCtx) 87 | if err != nil { 88 | log.Printf("[warn] failed to initialize cloudrun client: %+v", err) 89 | return 90 | } 91 | rs.BasePath = strings.Replace(rs.BasePath, "://", "://"+region+"-", 1) 92 | uri := fmt.Sprintf("namespaces/%s/services/%s", project, appName) 93 | _, err = rs.Namespaces.Services.Delete(uri).Do() 94 | if err == nil { 95 | log.Printf("cleanup successful") 96 | return 97 | } 98 | if v, ok := err.(*googleapi.Error); ok { 99 | if v.Code == http.StatusNotFound { 100 | log.Printf("cloud run app already seems to be gone, that's weird...") 101 | return 102 | } 103 | log.Printf("[warn] run api cleanup call responded with error: %+v\nbody: %s", 104 | v, v.Body) 105 | } else { 106 | log.Printf("[warn] calling run api for cleanup failed: %+v", err) 107 | } 108 | } 109 | 110 | func currentProject(ctx context.Context) (string, error) { 111 | var stderr bytes.Buffer 112 | cmd := exec.CommandContext(ctx, "gcloud", "config", "get-value", "core/project", "-q") 113 | cmd.Stderr = &stderr 114 | b, err := cmd.Output() 115 | return strings.TrimRightFunc(string(b), unicode.IsSpace), 116 | errors.Wrapf(err, "failed to read current GCP project from gcloud: output=%q", stderr.String()) 117 | } 118 | -------------------------------------------------------------------------------- /cmd/client/reverseproxy.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "bytes" 19 | "encoding/json" 20 | "fmt" 21 | "github.com/ahmetb/rundev/lib/constants" 22 | "github.com/ahmetb/rundev/lib/types" 23 | "github.com/pkg/errors" 24 | "io/ioutil" 25 | "log" 26 | "net/http" 27 | "strings" 28 | "time" 29 | ) 30 | 31 | type syncingRoundTripper struct { 32 | sync *syncer 33 | next http.RoundTripper 34 | maxRetries int 35 | hostHdr string 36 | } 37 | 38 | func withSyncingRoundTripper(next http.RoundTripper, sync *syncer, host string) http.RoundTripper { 39 | if next == nil { 40 | next = http.DefaultTransport 41 | } 42 | return &syncingRoundTripper{ 43 | next: next, 44 | sync: sync, 45 | maxRetries: 10, 46 | hostHdr: host} 47 | } 48 | 49 | func (s *syncingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { 50 | start := time.Now() 51 | log.Printf("[reverse proxy] request received path=%s method=%s", req.URL.Path, req.Method) 52 | localChecksum, err := s.sync.checksum() 53 | if err != nil { 54 | return nil, err 55 | } 56 | req.Header.Set(constants.HdrRundevChecksum, fmt.Sprintf("%d", localChecksum)) 57 | 58 | // save request for repeating 59 | var body []byte 60 | if req.Body != nil { 61 | body, err = ioutil.ReadAll(req.Body) 62 | defer req.Body.Close() 63 | if err != nil { 64 | return nil, errors.Wrap(err, "failed to buffer request body") 65 | } 66 | } 67 | for retry := 0; retry < s.maxRetries; retry++ { 68 | if body != nil { 69 | req.Body = ioutil.NopCloser(bytes.NewReader(body)) 70 | } 71 | req.Host = s.hostHdr 72 | req.Header.Set("Host", s.hostHdr) 73 | 74 | // round-trip the request 75 | if retry != 0 { 76 | log.Printf("[reverse proxy] repeating request n=%d path=%s method=%s", retry, req.URL.Path, req.Method) 77 | } 78 | resp, err := s.next.RoundTrip(req) 79 | if err != nil { 80 | return nil, err // TODO(ahmetb) returning err from roundtrip method is not surfacing the error message in the response body, and prints a log to stderr by net/http's internal logger 81 | } 82 | ct := resp.Header.Get("content-type") 83 | switch ct { 84 | case constants.MimeProcessError: 85 | log.Printf("[reverse proxy] remote responded with process error") 86 | var pe types.ProcError 87 | if err := json.NewDecoder(resp.Body).Decode(&pe); err != nil { 88 | if resp.Body != nil { 89 | resp.Body.Close() 90 | } 91 | return nil, errors.Wrap(err, "failed to parse proc error response body") // TODO ahmetb mkErrorResp here 92 | } 93 | resp.Body.Close() 94 | return &http.Response{ 95 | StatusCode: resp.StatusCode, 96 | Body: ioutil.NopCloser(strings.NewReader(fmt.Sprintf("process error: %s\n\noutput:\n%s", pe.Message, pe.Output))), 97 | }, nil 98 | case constants.MimeDumbRepeat: 99 | // only for testing purposes 100 | log.Printf("[reverse proxy] remote responded with dumb-repeat") 101 | case constants.MimeChecksumMismatch: 102 | remoteSum := resp.Header.Get(constants.HdrRundevChecksum) 103 | log.Printf("[reverse proxy] remote responded with checksum mismatch (%s)", remoteSum) 104 | remoteFS, err := parseMismatchResponse(resp.Body) 105 | if err != nil { 106 | return nil, errors.Wrap(err, "failed to read remote fs in the response") // TODO mkErrorResp here 107 | } 108 | if err := s.sync.uploadPatch(remoteFS, remoteSum); err != nil { 109 | log.Printf("[retry %d] sync was failed: %v", retry, err) 110 | continue 111 | } 112 | default: 113 | log.Printf("[reverse proxy] request completed on retry=%d path=%s status=%d took=%v (%s)", retry, req.URL.Path, resp.StatusCode, time.Since(start), resp.Header.Get("content-type")) 114 | return resp, nil 115 | } 116 | } 117 | 118 | return &http.Response{ 119 | StatusCode: http.StatusInternalServerError, 120 | Body: ioutil.NopCloser(strings.NewReader(fmt.Sprintf("rundev tried %d times syncing code, but it was still getting a checksum mismatch.\n"+ 121 | "please report an issue with console logs, /rundev/fsz and /rundevd/fsz responses.", s.maxRetries))), 122 | }, nil 123 | } 124 | -------------------------------------------------------------------------------- /cmd/daemon/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "context" 19 | "encoding/json" 20 | "flag" 21 | "github.com/ahmetb/rundev/lib/ignore" 22 | "github.com/ahmetb/rundev/lib/types" 23 | "log" 24 | "net/http" 25 | "os" 26 | "os/signal" 27 | "time" 28 | ) 29 | 30 | var ( 31 | flRunCmd string 32 | flBuildCmds string 33 | flAddr string 34 | flSyncDir string 35 | flClientSecret string 36 | flIgnorePatterns string 37 | flChildPort int 38 | flProcessListenTimeout time.Duration 39 | ) 40 | 41 | func init() { 42 | log.SetFlags(log.Lmicroseconds) 43 | listenAddr := "localhost:8080" 44 | if p := os.Getenv("PORT"); p != "" { 45 | listenAddr = ":" + p 46 | } 47 | flag.StringVar(&flSyncDir, "sync-dir", ".", "directory to sync") 48 | flag.StringVar(&flClientSecret, "client-secret", "", "(optional) secret to authenticate patches from rundev client") 49 | flag.StringVar(&flAddr, "addr", listenAddr, "network address to start the daemon") 50 | flag.StringVar(&flBuildCmds, "build-cmds", "", "(JSON encoded [][]string) commands to rebuild the user app (inside the container)") 51 | flag.StringVar(&flRunCmd, "run-cmd", "", "(JSON array encoded as string) command to start the user app (inside the container)") 52 | flag.StringVar(&flIgnorePatterns, "ignore-patterns", "", "(JSON array encoded as string) exclusion rules in .dockerignore") 53 | flag.IntVar(&flChildPort, "user-port", 5555, "PORT environment variable passed to the user app") 54 | flag.DurationVar(&flProcessListenTimeout, "process-listen-timeout", time.Second*4, "time to wait for user app to listen on PORT") 55 | flag.Parse() 56 | } 57 | 58 | func main() { 59 | // TODO(ahmetb) instead of crashing the process on flag errors, consider serving error response type so it encourages a redeploy 60 | log.Printf("rundevd running as pid %d", os.Getpid()) 61 | ctx, cancel := context.WithCancel(context.Background()) 62 | defer cancel() 63 | signalCh := make(chan os.Signal, 1) 64 | signal.Notify(signalCh, os.Interrupt) 65 | go func() { 66 | sig := <-signalCh 67 | log.Printf("[debug] termination signal received: %s", sig) 68 | cancel() 69 | }() 70 | 71 | if flSyncDir == "" { 72 | log.Fatal("-sync-dir is empty") 73 | } 74 | // TODO(ahmetb) check if flSyncDir is a directory 75 | if flAddr == "" { 76 | log.Fatal("-addr is empty") 77 | } 78 | if flProcessListenTimeout <= 0 { 79 | log.Fatal("-process-listen-timeout must be positive") 80 | } 81 | if flChildPort <= 0 || flChildPort > 65535 { 82 | log.Fatalf("-user-port value (%d) is invalid", flChildPort) 83 | } 84 | if flRunCmd == "" { 85 | log.Fatal("-run-cmd is empty") 86 | } 87 | 88 | var runCmd types.Cmd 89 | if err := json.Unmarshal([]byte(flRunCmd), &runCmd); err != nil { 90 | log.Fatalf("failed to parse -run-cmd: %v", err) 91 | } else if len(runCmd) == 0 { 92 | log.Fatal("-run-cmd was empty (command array parsed into zero elements)") 93 | } 94 | 95 | var buildCmds types.BuildCmds 96 | if flBuildCmds != "" { 97 | if err := json.Unmarshal([]byte(flBuildCmds), &buildCmds); err != nil { 98 | log.Fatalf("failed to parse -build-cmds: %s", err) 99 | } 100 | } 101 | 102 | var ignorePatterns []string 103 | if flIgnorePatterns != "" { 104 | if err := json.Unmarshal([]byte(flIgnorePatterns), &ignorePatterns); err != nil { 105 | log.Fatalf("failed to parse -ignore-patterns: %v", err) 106 | } 107 | } 108 | 109 | handler := newDaemonServer(daemonOpts{ 110 | clientSecret: flClientSecret, 111 | syncDir: flSyncDir, 112 | runCmd: runCmd, 113 | buildCmds: buildCmds, 114 | childPort: flChildPort, 115 | portWaitTimeout: flProcessListenTimeout, 116 | ignores: ignore.NewFileIgnores(ignorePatterns), 117 | }) 118 | 119 | localServer := http.Server{ 120 | Handler: handler, 121 | Addr: flAddr} 122 | go func() { 123 | <-ctx.Done() 124 | log.Println("shutting down daemon server") 125 | localServer.Shutdown(ctx) 126 | }() 127 | log.Printf("daemon server starting at %s", flAddr) 128 | if err := localServer.ListenAndServe(); err != nil { 129 | if err == http.ErrServerClosed { 130 | log.Printf("local server shut down gracefully, exiting") 131 | os.Exit(0) 132 | } 133 | log.Fatalf("local server failed to start: %+v", err) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rundev (αlpha) 2 | 3 | rundev is a tool that provides rapid inner-loop development (save, build, 4 | deploy, browse) cycles for [Cloud Run] and Cloud Run on GKE. 5 | 6 | It syncs code from your development machine to Cloud Run container instances and 7 | circumvents the docker image build and Cloud Run re-deployment steps to provide 8 | rapid feedback loops. 9 | 10 | When you’re using Cloud Run, building a docker image, pushing the image, 11 | redeploying the app, and visiting the URL takes **over a minute**. 12 | 13 | Rundev brings the inner-loop latency to **under a second** for dynamic languages 14 | (like Python, Node.js). For compiled languages (like Go, Java), it shows nearly 15 | identical (or faster) compilation speeds to your development machine: 16 | 17 | | [Sample app][sa] iteration time | Cloud Run | Cloud Run on GKE | 18 | |--|--|--| 19 | | Python | <1s | <1s | 20 | | Node.js | ~1s | <1s | 21 | | Go | ~3s | ~1s | 22 | 23 | [sa]: https://cloud.google.com/run/docs/quickstarts/build-and-deploy 24 | [Cloud Run]: https://cloud.google.com/run 25 | 26 | ## User Guide 27 | 28 | 29 | 30 | - [Limitations](#limitations) 31 | - [Installation](#installation) 32 | - [Start developing](#start-developing) 33 | - [Syncing files](#syncing-files) 34 | - [Debug endpoints](#debug-endpoints) 35 | 36 | 37 | 38 | ### Limitations 39 | 40 | - Supports only [Cloud Run], and Cloud Run on GKE ([only][contract] HTTP apps 41 | listening on `$PORT`) 42 | - Requires your app to have a Dockerfile (Jib, pack etc. not supported) 43 | - Requires local `docker` daemon (only to build/push the image one-time) 44 | - For compiled languages, the compiler/SDK must be present in the final stage 45 | of the image (e.g. `javac` or `go` compiler) 46 | 47 | [contract]: https://cloud.google.com/run/docs/reference/container-contract 48 | 49 | ### Installation 50 | 51 | 52 | - Install a local docker-engine (e.g. Docker for Desktop) 53 | - Install `gcloud` CLI 54 | 55 | Install the nightly build of `rundev` client to your developer machine. 56 | 57 | Currently only [macOS][darwin] or [Linux][linux] are supported: 58 | 59 | ```sh 60 | install_dir="/usr/local/bin" && \ 61 | curl -sSLfo "${install_dir}/rundev" \ 62 | "https://storage.googleapis.com/rundev-test/nightly/client/$(uname | tr '[:upper:]' '[:lower:]')/rundev-latest" && \ 63 | chmod +x "${install_dir}/rundev" 64 | ``` 65 | 66 | [darwin]: https://storage.googleapis.com/rundev-test/nightly/client/darwin/rundev-latest 67 | [linux]: https://storage.googleapis.com/rundev-test/nightly/client/linux/rundev-latest 68 | 69 | ### Start developing 70 | 71 | For an application that picks up new source code by restarting, just run: 72 | 73 | ```sh 74 | rundev 75 | ``` 76 | 77 | If you have a compiled application, or an app that needs to specify build steps, 78 | annotate the `RUN` directives in the Dockerfile with `# rundev` comments: 79 | 80 | ```sh 81 | RUN go build -o /out/server . # rundev 82 | RUN npm install --production # rundev 83 | ``` 84 | 85 | You can use `#rundev` comment to run commands only when some files are updated: 86 | 87 | ```sh 88 | RUN go build -o /out/server . # rundev[**/**.go, go.*] 89 | RUN pip install -r requirements.txt # rundev[requirements.txt] 90 | ``` 91 | 92 | After `rundev` command deploys an app to Cloud Run for development, you see a 93 | log line as follows: 94 | 95 | ```text 96 | local proxy server starting at http://localhost:8080 (proxying to https://... 97 | ``` 98 | 99 | Visit http://localhost:8080 to access your application with live code syncing. 100 | 101 | Every time you visit this local server, `rundev` will ensure your local 102 | filesystem is in sync with the container’s filesystem: 103 | 104 | - if necessary, the modified files will be synced to Cloud Run app 105 | - your app will be rebuilt (if there are any build steps) and restarted 106 | - your query will be proxied to Cloud Run container instances. 107 | 108 | Try changing the code, and visit your address again to see the updated 109 | application. 110 | 111 | When you're done developing, hit Ctrl+C once for cleanup and exit. 112 | 113 | ### Syncing files 114 | 115 | Rundev uses a file-sync-over-HTTP protocol to securely sync files between the 116 | `rundev` client (running on your developer machine) and the `rundevd` daemon 117 | (running inside the container on Cloud Run). 118 | 119 | If you have any files that you don’t want to syncronize to the container (such 120 | as `.pyc` files, `.swp` files, or `node_modules` directory, or `.git` 121 | directory), use a [.dockerignore 122 | file](https://docs.docker.com/engine/reference/builder/#dockerignore-file) to 123 | specify such files. 124 | 125 | If you change the `.dockerignore` file or `Dockerfile`, you must restart 126 | the `rundev` session. 127 | 128 | ### Debug endpoints 129 | 130 | 131 | These are useful for me to debug when something is going wrong with fs syncing 132 | or process lifecycle. 133 | 134 | ```text 135 | /rundev/debugz : debug data for rundev client 136 | /rundev/fsz : local fs tree (+ ?full) 137 | 138 | /rundevd/fsz : remote fs tree (+ ?full) 139 | /rundevd/debugz : debug data for rundevd daemon 140 | /rundevd/procz : logs of current process 141 | /rundevd/pstree : process tree 142 | /rundevd/restart : restart the user process 143 | /rundevd/kill : kill the user process (or specify ?pid=) 144 | ``` 145 | 146 | --- 147 | 148 | This is not an official Google product. See [LICENSE](./LICENSE). 149 | -------------------------------------------------------------------------------- /lib/fsutil/archive.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package fsutil 16 | 17 | import ( 18 | "archive/tar" 19 | "bytes" 20 | "compress/gzip" 21 | "github.com/ahmetb/rundev/lib/constants" 22 | "github.com/ahmetb/rundev/lib/ignore" 23 | "github.com/pkg/errors" 24 | "io" 25 | "io/ioutil" 26 | "os" 27 | "path/filepath" 28 | ) 29 | 30 | // PatchArchive creates a tarball for given operations in baseDir and returns its size. 31 | func PatchArchive(baseDir string, ops []DiffOp, ignores *ignore.FileIgnores) (io.Reader, int, error) { 32 | var b bytes.Buffer 33 | gw, err := gzip.NewWriterLevel(&b, gzip.BestSpeed) 34 | if err != nil { 35 | return nil, -1, errors.Wrap(err, "failed to initialize gzip writer") 36 | } 37 | tw := tar.NewWriter(gw) 38 | 39 | files, err := normalizeFiles(baseDir, ops, ignores) 40 | if err != nil { 41 | return nil, -1, errors.Wrap(err, "failed to normalize file list") 42 | } 43 | 44 | for _, v := range files { 45 | if err := addFile(tw, v); err != nil { 46 | return nil, -1, errors.Wrap(err, "tar failure") 47 | } 48 | } 49 | if err := tw.Close(); err != nil { 50 | return nil, -1, errors.Wrap(err, "failed to finalize tarball writer") 51 | } 52 | if err := gw.Close(); err != nil { 53 | return nil, -1, errors.Wrap(err, "failed to finalize gzip writer") 54 | } 55 | return &b, b.Len(), nil 56 | } 57 | 58 | func addFile(tw *tar.Writer, file archiveFile) error { 59 | if file.stat.Mode()&os.ModeSymlink != 0 { 60 | return errors.Errorf("adding symlinks currently not supported, file:%s", file.fullPath) 61 | } 62 | hdr, err := tar.FileInfoHeader(file.stat, "") 63 | if err != nil { 64 | return errors.Wrapf(err, "failed to create tar header for file %s", file.fullPath) 65 | } 66 | hdr.Name = filepath.ToSlash(file.extractPath) // tar paths must be forward slash 67 | if err := tw.WriteHeader(hdr); err != nil { 68 | return errors.Wrap(err, "failed to write tar header") 69 | } 70 | if file.stat.Size() == 0 { 71 | return nil 72 | } 73 | f, err := os.Open(file.fullPath) 74 | if err != nil { 75 | return errors.Wrapf(err, "failed to open file %s for tar-ing", file.fullPath) 76 | } 77 | defer f.Close() 78 | _, err = io.Copy(tw, f) 79 | return errors.Wrapf(err, "failed to copy file %s into tar", hdr.Name) 80 | } 81 | 82 | type archiveFile struct { 83 | fullPath string 84 | extractPath string 85 | stat os.FileInfo 86 | } 87 | 88 | // normalizeFiles returns all list of files that should be added to the archive 89 | // by creating whiteout files (indicating deletions, and empty dir placeholders), 90 | // and recursively traversing directories to be added. 91 | func normalizeFiles(baseDir string, ops []DiffOp, ignores *ignore.FileIgnores) ([]archiveFile, error) { 92 | var out []archiveFile 93 | for _, op := range ops { 94 | fullPath := filepath.Join(baseDir, filepath.FromSlash(op.Path)) 95 | if op.Type == DiffOpDel { 96 | // create a whiteout file 97 | out = append(out, archiveFile{ 98 | fullPath: fullPath, 99 | extractPath: op.Path + constants.WhiteoutDeleteSuffix, 100 | stat: whiteoutStat{name: filepath.Base(fullPath)}, 101 | }) 102 | } else if op.Type == DiffOpAdd { 103 | fi, err := os.Stat(fullPath) 104 | if err != nil { 105 | return nil, errors.Wrapf(err, "failed to stat file %s for tar-ing", fullPath) 106 | } 107 | 108 | if ignores.Ignored(op.Path) { 109 | continue 110 | } 111 | 112 | if !fi.IsDir() { 113 | out = append(out, archiveFile{ 114 | fullPath: fullPath, 115 | extractPath: op.Path, 116 | stat: nanosecMaskingStat{fi}, 117 | }) 118 | } else { 119 | // directories must be traversed recursively 120 | files, err := expandDirEntries(fullPath) 121 | if err != nil { 122 | return nil, err 123 | } 124 | for _, f := range files { 125 | relPath, err := filepath.Rel(baseDir, f.fullPath) 126 | if err != nil { 127 | return nil, errors.Wrapf(err, "failed to calculate relative path (%s and %s)", baseDir, f.fullPath) 128 | } 129 | if ignores.Ignored(relPath) { 130 | continue 131 | } 132 | out = append(out, archiveFile{ 133 | fullPath: f.fullPath, 134 | extractPath: relPath, 135 | stat: nanosecMaskingStat{f.stat}, 136 | }) 137 | } 138 | } 139 | } else { 140 | return nil, errors.Errorf("unknown diff operation type (%v)", op.Type) 141 | } 142 | } 143 | return out, nil 144 | } 145 | 146 | type tarEntry struct { 147 | fullPath string 148 | stat os.FileInfo 149 | } 150 | 151 | // walkDir walks dir recursively to list directory end file entries in sorted order. 152 | func expandDirEntries(dir string) ([]tarEntry, error) { 153 | var out []tarEntry 154 | stat, err := os.Stat(dir) 155 | if err != nil { 156 | return nil, errors.Wrapf(err, "failed to read info for dir %s", dir) 157 | } 158 | ls, err := ioutil.ReadDir(dir) 159 | if err != nil { 160 | return nil, errors.Wrapf(err, "failed to read dir %s", dir) 161 | } 162 | 163 | // add self (dir entry) 164 | out = append(out, tarEntry{dir, zeroSizeStat{stat}}) 165 | // add child entries 166 | for _, fi := range ls { 167 | fp := filepath.Join(dir, fi.Name()) 168 | if !fi.IsDir() { 169 | v := tarEntry{fp, fi} 170 | out = append(out, v) 171 | } else { 172 | entries, err := expandDirEntries(fp) 173 | if err != nil { 174 | return nil, err 175 | } 176 | out = append(out, entries...) 177 | } 178 | } 179 | return out, nil 180 | } 181 | 182 | type countWriter struct{ n int } 183 | 184 | func (c *countWriter) Write(p []byte) (n int, err error) { 185 | c.n += len(p) 186 | return len(p), err 187 | } 188 | -------------------------------------------------------------------------------- /lib/fsutil/fsdiff_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package fsutil 16 | 17 | import ( 18 | "os" 19 | "testing" 20 | 21 | "github.com/google/go-cmp/cmp" 22 | ) 23 | 24 | func Test_fsDiff_empty(t *testing.T) { 25 | expected := []DiffOp(nil) 26 | got := FSDiff(FSNode{Name: "root1"}, FSNode{Name: "root2"}) 27 | if diff := cmp.Diff(expected, got); diff != "" { 28 | t.Fatalf("diff:\n%s", diff) 29 | } 30 | } 31 | 32 | func Test_fsDiff_rootDirNameNotCompared(t *testing.T) { 33 | fs1 := FSNode{ 34 | Name: "root1", 35 | Mode: os.ModeDir | os.ModePerm, 36 | Nodes: []FSNode{{Name: "a.txt"}, {Name: "b.txt"}}} 37 | fs2 := FSNode{ 38 | Name: "root2", 39 | Mode: os.ModeDir | os.ModePerm, 40 | Nodes: []FSNode{{Name: "a.txt"}, {Name: "b.txt"}}} 41 | 42 | got := FSDiff(fs1, fs2) 43 | expected := []DiffOp(nil) 44 | if diff := cmp.Diff(expected, got); diff != "" { 45 | t.Fatalf("diff:\n%s", diff) 46 | } 47 | } 48 | 49 | func Test_fsDiff_leftSideEmpty(t *testing.T) { 50 | fs1 := FSNode{ 51 | Name: "root1", 52 | Mode: os.ModeDir | os.ModePerm} 53 | fs2 := FSNode{ 54 | Name: "root2", 55 | Mode: os.ModeDir | os.ModePerm, 56 | Nodes: []FSNode{{Name: "a.txt"}, {Name: "b.txt"}}} 57 | 58 | expected := []DiffOp{ 59 | {DiffOpDel, "a.txt"}, 60 | {DiffOpDel, "b.txt"}, 61 | } 62 | got := FSDiff(fs1, fs2) 63 | if diff := cmp.Diff(expected, got); diff != "" { 64 | t.Fatalf("diff:\n%s", diff) 65 | } 66 | } 67 | 68 | func Test_fsDiff_rightSideEmpty(t *testing.T) { 69 | fs1 := FSNode{ 70 | Name: "root2", 71 | Mode: os.ModeDir | os.ModePerm, 72 | Nodes: []FSNode{{Name: "a.txt"}, {Name: "b.txt"}}} 73 | fs2 := FSNode{ 74 | Name: "root1", 75 | Mode: os.ModeDir | os.ModePerm} 76 | 77 | expected := []DiffOp{ 78 | {DiffOpAdd, "a.txt"}, 79 | {DiffOpAdd, "b.txt"}, 80 | } 81 | got := FSDiff(fs1, fs2) 82 | if diff := cmp.Diff(expected, got); diff != "" { 83 | t.Fatalf("diff:\n%s", diff) 84 | } 85 | } 86 | 87 | func Test_fsDiff_fileModification(t *testing.T) { 88 | fs1 := FSNode{ 89 | Name: "root1", 90 | Mode: os.ModeDir | os.ModePerm, 91 | Nodes: []FSNode{{Name: "a.txt"}, {Name: "b.txt", Mode: 0644}}} 92 | fs2 := FSNode{ 93 | Name: "root2", 94 | Mode: os.ModeDir | os.ModePerm, 95 | Nodes: []FSNode{{Name: "a.txt"}, {Name: "b.txt", Mode: 0600}}} 96 | 97 | expected := []DiffOp{ 98 | {DiffOpAdd, "b.txt"}, 99 | } 100 | got := FSDiff(fs1, fs2) 101 | if diff := cmp.Diff(expected, got); diff != "" { 102 | t.Fatalf("diff:\n%s", diff) 103 | } 104 | } 105 | 106 | func Test_fsDiff_fileAddDelete(t *testing.T) { 107 | fs1 := FSNode{ 108 | Name: "root1", 109 | Mode: os.ModeDir | os.ModePerm, 110 | Nodes: []FSNode{{Name: "a.txt"}, {Name: "b.txt"}}} 111 | fs2 := FSNode{ 112 | Name: "root2", 113 | Mode: os.ModeDir | os.ModePerm, 114 | Nodes: []FSNode{{Name: "b.txt"}, {Name: "c.txt"}}} 115 | 116 | expected := []DiffOp{ 117 | {DiffOpAdd, "a.txt"}, 118 | {DiffOpDel, "c.txt"}, 119 | } 120 | got := FSDiff(fs1, fs2) 121 | if diff := cmp.Diff(expected, got); diff != "" { 122 | t.Fatalf("diff:\n%s", diff) 123 | } 124 | } 125 | 126 | func Test_fsDiff_subDirectory(t *testing.T) { 127 | fs1 := FSNode{ 128 | Name: "root1", 129 | Mode: os.ModeDir | os.ModePerm, 130 | Nodes: []FSNode{ 131 | {Name: "a.txt"}, 132 | {Name: "subdir", 133 | Mode: os.ModeDir | os.ModePerm, 134 | Nodes: []FSNode{{Name: "b.txt"}, {Name: "c.txt"}}, 135 | }}} 136 | fs2 := FSNode{ 137 | Name: "root2", 138 | Mode: os.ModeDir | os.ModePerm, 139 | Nodes: []FSNode{ 140 | {Name: "a.txt"}, 141 | }} 142 | 143 | expected := []DiffOp{ 144 | {DiffOpAdd, "subdir"}, 145 | } 146 | got := FSDiff(fs1, fs2) 147 | if diff := cmp.Diff(expected, got); diff != "" { 148 | t.Fatalf("expected add subdir, got diff:\n%s", diff) 149 | } 150 | 151 | expected = []DiffOp{ 152 | {DiffOpDel, "subdir"}, 153 | } 154 | got = FSDiff(fs2, fs1) 155 | if diff := cmp.Diff(expected, got); diff != "" { 156 | t.Fatalf("expecte del subdir, got diff:\n%s", diff) 157 | } 158 | } 159 | 160 | func Test_fsDiff_directoryChangedToFile(t *testing.T) { 161 | fs1 := FSNode{ 162 | Name: "root1", 163 | Mode: os.ModeDir | os.ModePerm, 164 | Nodes: []FSNode{ 165 | { 166 | Name: "subdir", 167 | Mode: os.ModeDir | os.ModePerm, 168 | Nodes: []FSNode{{Name: "file1"}, {Name: "file2"}}, 169 | }, 170 | }, 171 | } 172 | 173 | fs2 := FSNode{ 174 | Name: "root2", 175 | Mode: os.ModeDir | os.ModePerm, 176 | Nodes: []FSNode{ 177 | { 178 | Name: "subdir", 179 | Mode: 0644, // now a file! 180 | }, 181 | }, 182 | } 183 | 184 | expected := []DiffOp{ 185 | {DiffOpDel, "subdir"}, 186 | {DiffOpAdd, "subdir"}, 187 | } 188 | 189 | got := FSDiff(fs1, fs2) 190 | if diff := cmp.Diff(expected, got); diff != "" { 191 | t.Fatalf("diff:\n%s", diff) 192 | } 193 | 194 | // switching the order shouldn't make any difference in diff ops 195 | got = FSDiff(fs2, fs1) 196 | if diff := cmp.Diff(expected, got); diff != "" { 197 | t.Fatalf("diff:\n%s", diff) 198 | } 199 | } 200 | 201 | func Test_fsDiff_interleaved(t *testing.T) { 202 | fs1 := FSNode{ 203 | Name: "root1", 204 | 205 | Mode: os.ModeDir | os.ModePerm, 206 | Nodes: []FSNode{ 207 | { 208 | Name: "subdir", 209 | Mode: os.ModeDir, 210 | Nodes: []FSNode{{Name: "a0"}, {Name: "a1"}, {Name: "a3"}, {Name: "a7"}}, 211 | }, 212 | }, 213 | } 214 | fs2 := FSNode{ 215 | Name: "root1", 216 | Mode: os.ModeDir | os.ModePerm, 217 | Nodes: []FSNode{ 218 | { 219 | Name: "subdir", 220 | Mode: os.ModeDir, 221 | Nodes: []FSNode{{Name: "a1"}, {Name: "a2"}, {Name: "a4"}, {Name: "a5"}, {Name: "a6"}, {Name: "a8"}}, 222 | }, 223 | }, 224 | } 225 | 226 | expected := []DiffOp{ 227 | {DiffOpAdd, "subdir/a0"}, 228 | {DiffOpDel, "subdir/a2"}, 229 | {DiffOpAdd, "subdir/a3"}, 230 | {DiffOpDel, "subdir/a4"}, 231 | {DiffOpDel, "subdir/a5"}, 232 | {DiffOpDel, "subdir/a6"}, 233 | {DiffOpAdd, "subdir/a7"}, 234 | {DiffOpDel, "subdir/a8"}, 235 | } 236 | got := FSDiff(fs1, fs2) 237 | if diff := cmp.Diff(expected, got); diff != "" { 238 | t.Fatalf("diff:\n%s", diff) 239 | } 240 | } 241 | 242 | func Test_fsDiff(t *testing.T) { 243 | // (fs1) (fs2) 244 | // 245 | // . . 246 | // ├-- e1 ├-- e1 247 | // | ├-- e1c1 | ├-- e1c1' 248 | // | └-- e1c2 | └–– e1c3 249 | // ├-- e3 ├-- e2 250 | // | └-- e3c1 ├-- e4 251 | // └-- e4 └-- e5 252 | // └–– e5c1 253 | fs1 := FSNode{ 254 | Name: "root1", 255 | Mode: os.ModeDir | os.ModePerm, 256 | Nodes: []FSNode{ 257 | {Name: "e1", 258 | Mode: os.ModeDir | os.ModePerm, 259 | Nodes: []FSNode{ 260 | {Name: "e1c1"}, 261 | {Name: "e1c2"}, 262 | }}, 263 | {Name: "e3", 264 | Mode: os.ModeDir | os.ModePerm, 265 | Nodes: []FSNode{{Name: "e3c1"}}}, 266 | {Name: "e4", 267 | Mode: os.ModeDir | os.ModePerm, 268 | Nodes: nil}, 269 | }, 270 | } 271 | 272 | fs2 := FSNode{ 273 | Name: "root2", 274 | Mode: os.ModeDir | os.ModePerm, 275 | Nodes: []FSNode{ 276 | {Name: "e1", 277 | Mode: os.ModeDir | os.ModePerm, 278 | Nodes: []FSNode{ 279 | {Name: "e1c1", Size: 200}, 280 | {Name: "e1c3"}, 281 | }}, 282 | {Name: "e2", 283 | Mode: os.ModeDir | os.ModePerm}, 284 | {Name: "e4", 285 | Mode: os.ModeDir | os.ModePerm}, 286 | {Name: "e5", 287 | Mode: os.ModeDir | os.ModePerm, 288 | Nodes: []FSNode{{Name: "e5c1"}}}, 289 | }, 290 | } 291 | 292 | expected := []DiffOp{ 293 | {DiffOpAdd, "e1/e1c1"}, 294 | {DiffOpAdd, "e1/e1c2"}, 295 | {DiffOpDel, "e1/e1c3"}, 296 | {DiffOpDel, "e2"}, 297 | {DiffOpAdd, "e3"}, 298 | {DiffOpDel, "e5"}, 299 | } 300 | got := FSDiff(fs1, fs2) 301 | if diff := cmp.Diff(expected, got); diff != "" { 302 | t.Fatalf("diff:\n%s", diff) 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /cmd/client/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "context" 19 | "flag" 20 | "github.com/ahmetb/rundev/lib/dockerfile" 21 | "github.com/ahmetb/rundev/lib/ignore" 22 | "github.com/ahmetb/rundev/lib/types" 23 | "github.com/google/shlex" 24 | "github.com/google/uuid" 25 | "log" 26 | "net/http" 27 | "os" 28 | "os/signal" 29 | "path/filepath" 30 | "regexp" 31 | "time" 32 | ) 33 | 34 | var ( 35 | flLocalDir *string 36 | flRemoteDir *string 37 | flAddr *string 38 | flBuildCmd *string 39 | flRunCmd *string 40 | flNoCloudRun *bool 41 | 42 | flCloudRunName *string 43 | flCloudRunCluster *string 44 | flCloudRunClusterLocation *string 45 | flCloudRunPlatform *string 46 | ) 47 | 48 | const ( 49 | appName = `rundev-app` 50 | runRegion = `us-central1` // TODO(ahmetb) allow user to configure 51 | localRundevdURL = "http://localhost:8888" // TODO(ahmetb) allow user to configure (albeit, just for debugging/dev rundev itself, a.k.a -no-cloudrun) 52 | cleanupDeadline = time.Second * 1 53 | ) 54 | 55 | func init() { 56 | log.SetFlags(log.Lmicroseconds) 57 | 58 | flLocalDir = flag.String("local-dir", ".", "local directory to sync") 59 | flRemoteDir = flag.String("remote-dir", "", "remote directory to sync (inside the container), defaults to container's WORKDIR") 60 | flAddr = flag.String("addr", "localhost:8080", "network address to start the local proxy server") 61 | flBuildCmd = flag.String("build-cmd", "", "(optional) command to re-build code (inside the container) after syncing,"+ 62 | "inferred from Dockerfile by default (add comment on RUN directives like #rundev") 63 | flRunCmd = flag.String("run-cmd", "", "(optional) command to start application (inside the container) after syncing, inferred from Dockerfile by default") 64 | 65 | flNoCloudRun = flag.Bool("no-cloudrun", false, "do not deploy to Cloud Run (you should start rundevd on localhost:8888)") 66 | flCloudRunName = flag.String("name", appName, "name of the Cloud Run service") 67 | flCloudRunPlatform = flag.String("platform", "managed", "(passthrough to gcloud) managed or gke") 68 | flCloudRunCluster = flag.String("cluster", "", "(passthrough to gcloud) required when -platform=gke") 69 | flCloudRunClusterLocation = flag.String("cluster-location", "", "(passthrough to gcloud) required when -platform=gke") 70 | flag.Parse() 71 | } 72 | 73 | func main() { 74 | clientSecret := uuid.New().String() 75 | ctx, cancel := context.WithCancel(context.Background()) 76 | defer cancel() 77 | signalCh := make(chan os.Signal, 1) 78 | signal.Notify(signalCh, os.Interrupt) 79 | go func() { 80 | sig := <-signalCh 81 | log.Printf("termination signal received: %s", sig) 82 | cancel() 83 | }() 84 | if fi, err := os.Stat(*flLocalDir); err != nil { 85 | log.Fatalf("cannot open -local-dir: %v", err) 86 | } else if !fi.IsDir() { 87 | log.Fatalf("-local-dir (%s) is not a directory (%s)", *flLocalDir, fi.Mode()) 88 | } 89 | 90 | if *flCloudRunPlatform == "" { 91 | log.Fatal("-platform is empty") 92 | } else if *flCloudRunPlatform != cloudRunManagedPlatform { 93 | if *flCloudRunCluster == "" { 94 | log.Fatal("-cluster is empty, must be supplied when -platform is specified") 95 | } else if *flCloudRunClusterLocation == "" { 96 | log.Fatal("-cluster-location is empty, must be supplied when -platform is specified") 97 | } 98 | } 99 | var fileIgnores *ignore.FileIgnores 100 | var ignoreRules []string 101 | if f, err := os.Open(filepath.Join(*flLocalDir, ".dockerignore")); err == nil { 102 | defer f.Close() 103 | ignoreRules, err = ignore.ParseDockerignore(f) 104 | if err != nil { 105 | log.Fatalf("failed to parse .dockerignore: %+v", err) 106 | } 107 | fileIgnores = ignore.NewFileIgnores(ignoreRules) 108 | log.Printf("[info] parsed %d rules from .dockerignore file", len(ignoreRules)) 109 | } else if os.IsNotExist(err) { 110 | log.Printf("if there are files you don't want to sync, you can create a .dockerignore file") 111 | } else { 112 | log.Fatalf("failed attempt to read .dockerignore file: %+v", err) 113 | } 114 | 115 | var rundevdURL string 116 | if *flNoCloudRun { 117 | rundevdURL = localRundevdURL 118 | log.Printf("not deploying to Cloud Run. make sure to start rundevd at %s", rundevdURL) 119 | } else { 120 | if *flCloudRunName == "" { 121 | log.Fatal("-name is empty") 122 | } 123 | log.Printf("starting one-time \"build & push & deploy\" to Cloud Run") 124 | project, err := currentProject(ctx) 125 | if err != nil { 126 | log.Fatalf("error reading current project ID from gcloud: %+v", err) 127 | } 128 | if project == "" { 129 | log.Fatalf("default project not set on gcloud. run: gcloud config set core/project PROJECT_NAME") 130 | } 131 | imageName := `gcr.io/` + project + `/` + *flCloudRunName 132 | 133 | df, err := readDockerfile(*flLocalDir) 134 | if err != nil { 135 | log.Fatal(err) 136 | } 137 | d, err := dockerfile.ParseDockerfile(df) 138 | if err != nil { 139 | log.Fatalf("failed to parse Dockerfile: %+v", err) 140 | } 141 | var runCmd dockerfile.Cmd 142 | if *flRunCmd == "" { 143 | runCmd, err = dockerfile.ParseEntrypoint(d) 144 | if err != nil { 145 | log.Fatalf("failed to parse entrypoint/cmd from dockerfile. try specifying -run-cmd? error: %+v", err) 146 | } 147 | log.Printf("[info] parsed entrypoint as %s", runCmd) 148 | } else { 149 | v, err := shlex.Split(*flRunCmd) 150 | if err != nil { 151 | log.Fatalf("failed to parse -run-cmd into commands and args: %+v", err) 152 | } 153 | runCmd = dockerfile.Cmd{v[0], v[1:]} 154 | } 155 | 156 | var buildCmds types.BuildCmds 157 | if *flBuildCmd == "" { 158 | v := dockerfile.ParseBuildCmds(d) 159 | if len(v) == 0 { 160 | log.Printf("[info] -build-cmd not specified: if you have steps to build your code after syncing, use this flag, or add #rundev comment to RUN statements in your Dockerfile") 161 | } else { 162 | buildCmds = v 163 | } 164 | } else { 165 | argv, err := shlex.Split(*flBuildCmd) 166 | if err != nil { 167 | log.Fatalf("failed to parse -build-cmd into commands and args: %+v", err) 168 | } 169 | log.Printf("[info] parsed -build-cmd as: %s", argv) 170 | buildCmds = []types.BuildCmd{ 171 | { 172 | C: argv, 173 | On: nil, 174 | }, 175 | } 176 | } 177 | 178 | ro := remoteRunOpts{ 179 | syncDir: *flRemoteDir, 180 | runCmd: runCmd.Flatten(), 181 | buildCmds: buildCmds, 182 | clientSecret: clientSecret, 183 | ignoreRules: ignoreRules, // TODO(ahmetb) use this 184 | } 185 | newEntrypoint := prepEntrypoint(ro) 186 | log.Printf("[info] injecting to dockerfile:\n%s", regexp.MustCompile("(?m)^").ReplaceAllString(newEntrypoint, "\t")) 187 | df = append(df, '\n') 188 | df = append(df, []byte(newEntrypoint)...) 189 | bo := buildOpts{ 190 | dir: *flLocalDir, 191 | image: imageName, 192 | dockerfile: df} 193 | log.Print("building and pushing docker image") 194 | if err := dockerBuildPush(ctx, bo); err != nil { 195 | log.Fatal(err) 196 | } 197 | log.Printf("built and pushed docker image: %s", imageName) 198 | 199 | log.Print("deploying to Cloud Run") 200 | appURL, err := deployCloudRun(ctx, cloudrunOpts{ 201 | platform: *flCloudRunPlatform, 202 | project: project, 203 | region: runRegion, 204 | cluster: *flCloudRunCluster, 205 | clusterLocation: *flCloudRunClusterLocation, 206 | }, *flCloudRunName, imageName) 207 | if err != nil { 208 | log.Fatalf("error deploying to Cloud Run: %+v", err) 209 | } 210 | defer cleanupCloudRun(*flCloudRunName, project, runRegion, cleanupDeadline) 211 | rundevdURL = appURL 212 | } 213 | sync := newSyncer(syncOpts{ 214 | localDir: *flLocalDir, 215 | targetAddr: rundevdURL, 216 | clientSecret: clientSecret, 217 | ignores: fileIgnores, 218 | }) 219 | localServerHandler, err := newLocalServer(localServerOpts{ 220 | proxyTarget: rundevdURL, 221 | sync: sync, 222 | }) 223 | if err != nil { 224 | log.Fatalf("failed to initialize local server: %+v", err) 225 | } 226 | localServer := http.Server{ 227 | Handler: localServerHandler, 228 | Addr: *flAddr} 229 | 230 | go func() { 231 | <-ctx.Done() 232 | log.Println("shutting down server") 233 | _ = localServer.Shutdown(ctx) // TODO(ahmetb) maybe use .Close? 234 | }() 235 | log.Printf("local proxy server starting at http://%s (proxying to %s)", *flAddr, rundevdURL) 236 | if err := localServer.ListenAndServe(); err != nil { 237 | if err == http.ErrServerClosed { 238 | log.Printf("local server shut down gracefully, exiting") 239 | } else { 240 | log.Fatalf("local server failed to start: %+v", err) 241 | } 242 | } 243 | } 244 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= 4 | cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= 5 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 6 | github.com/ahmetb/pstree v0.0.0-20190815175305-245b319425b4 h1:rt6ye4Bx0gM2lrsav+MokSnpkvjxPRM/+evMhMDbPJA= 7 | github.com/ahmetb/pstree v0.0.0-20190815175305-245b319425b4/go.mod h1:pWF4KgjwKiPf3g8yn5iGVJ5Fbc6AhyJtRedMcYTkxPc= 8 | github.com/bmatcuk/doublestar v1.1.5 h1:2bNwBOmhyFEFcoB3tGvTD5xanq+4kyOZlB8wFYbMjkk= 9 | github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= 10 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 11 | github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= 12 | github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= 13 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= 14 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 15 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 16 | github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 17 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 18 | github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= 19 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 20 | github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 21 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 22 | github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= 23 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 24 | github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= 25 | github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= 26 | github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf h1:7+FW5aGwISbqUtkfmIpZJGRgNFg2ioYPvFaUxdqpDsg= 27 | github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= 28 | github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= 29 | github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 30 | github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= 31 | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 32 | github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= 33 | github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 34 | github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= 35 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 36 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 37 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 38 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 39 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 40 | github.com/moby/buildkit v0.3.3 h1:7eh9tOdFSuE84Q5wvmUjXhEvqnO7nNiwja45Hr59+uc= 41 | github.com/moby/buildkit v0.3.3/go.mod h1:nnELdKPRkUAQR6pAB3mRU3+IlbqL3SSaAWqQL8k/K+4= 42 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 43 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 44 | go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= 45 | go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= 46 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 47 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 48 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 49 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 50 | golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 51 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 52 | golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 53 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 54 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 55 | golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 56 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 57 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 58 | golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= 59 | golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 60 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 61 | golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 62 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= 63 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 64 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 65 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 66 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 67 | golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 68 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 69 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 70 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 71 | golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= 72 | golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 73 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 74 | golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 75 | golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= 76 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 77 | golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 78 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 79 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 80 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 81 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 82 | golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 83 | golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 84 | google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= 85 | google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= 86 | google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= 87 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 88 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 89 | google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= 90 | google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 91 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 92 | google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 93 | google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 94 | google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= 95 | google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 96 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 97 | google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= 98 | google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= 99 | gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= 100 | gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= 101 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 102 | honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 103 | honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 104 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /cmd/daemon/server.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Google LLC 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "bytes" 19 | "context" 20 | "crypto/subtle" 21 | "encoding/json" 22 | "fmt" 23 | "github.com/ahmetb/pstree" 24 | "github.com/ahmetb/rundev/lib/constants" 25 | "github.com/ahmetb/rundev/lib/fsutil" 26 | "github.com/ahmetb/rundev/lib/handlerutil" 27 | "github.com/ahmetb/rundev/lib/ignore" 28 | "github.com/ahmetb/rundev/lib/types" 29 | "github.com/bmatcuk/doublestar" 30 | "github.com/google/uuid" 31 | "github.com/kr/pretty" 32 | "github.com/pkg/errors" 33 | "io" 34 | "log" 35 | "net/http" 36 | "os" 37 | "os/exec" 38 | "runtime" 39 | "strconv" 40 | "strings" 41 | "sync" 42 | "syscall" 43 | "time" 44 | ) 45 | 46 | type cmd struct { 47 | cmd string 48 | args []string 49 | } 50 | 51 | type daemonOpts struct { 52 | clientSecret string 53 | syncDir string 54 | runCmd types.Cmd 55 | buildCmds types.BuildCmds 56 | childPort int 57 | ignores *ignore.FileIgnores 58 | portWaitTimeout time.Duration 59 | } 60 | 61 | type daemonServer struct { 62 | opts daemonOpts 63 | incarnation string 64 | portCheck portChecker 65 | 66 | procLogs *bytes.Buffer 67 | patchLock sync.RWMutex 68 | lastUpdatedFiles []string 69 | 70 | nannyLock sync.Mutex 71 | procNanny nanny 72 | } 73 | 74 | func newDaemonServer(opts daemonOpts) http.Handler { 75 | logs := new(bytes.Buffer) 76 | r := &daemonServer{ 77 | opts: opts, 78 | incarnation: uuid.New().String(), 79 | procLogs: logs, 80 | portCheck: newTCPPortChecker(opts.childPort), 81 | procNanny: newProcessNanny(opts.runCmd.Command(), opts.runCmd.Args(), procOpts{ 82 | port: opts.childPort, 83 | dir: opts.syncDir, 84 | logs: logs, 85 | }), 86 | } 87 | 88 | mux := http.NewServeMux() 89 | mux.HandleFunc("/rundevd/fsz", handlerutil.NewFSDebugHandler(r.opts.syncDir, r.opts.ignores)) 90 | mux.HandleFunc("/rundevd/debugz", r.statusHandler) 91 | mux.HandleFunc("/rundevd/procz", r.logsHandler) 92 | mux.HandleFunc("/rundevd/pstree", r.psHandler) 93 | mux.HandleFunc("/rundevd/restart", r.restartHandler) 94 | mux.HandleFunc("/rundevd/kill", r.killHandler) 95 | mux.HandleFunc("/rundevd/patch", withClientSecretAuth(opts.clientSecret, r.patch)) 96 | mux.HandleFunc("/rundevd/", handlerutil.NewUnsupportedDebugEndpointHandler()) 97 | mux.HandleFunc("/", r.reverseProxyHandler) 98 | return mux 99 | } 100 | 101 | func withClientSecretAuth(secret string, hand http.HandlerFunc) http.HandlerFunc { 102 | if secret == "" { 103 | return hand 104 | } 105 | return func(w http.ResponseWriter, req *http.Request) { 106 | h := req.Header.Get(constants.HdrRundevClientSecret) 107 | if h == "" { 108 | w.WriteHeader(http.StatusUnauthorized) 109 | fmt.Fprintf(w, "%s header not specified", constants.HdrRundevClientSecret) 110 | return 111 | } else if subtle.ConstantTimeCompare([]byte(secret), []byte(h)) != 1 { 112 | w.WriteHeader(http.StatusForbidden) 113 | fmt.Fprintf(w, "client secret (%s header) on the request not matching the one configured on the daemon", constants.HdrRundevClientSecret) 114 | return 115 | } 116 | hand(w, req) 117 | } 118 | } 119 | 120 | func (srv *daemonServer) reverseProxyHandler(w http.ResponseWriter, req *http.Request) { 121 | srv.patchLock.RLock() 122 | defer srv.patchLock.RUnlock() 123 | 124 | id := uuid.New().String() 125 | rr := &responseRecorder{rw: w} 126 | w = rr 127 | start := time.Now() 128 | log.Printf("[rev proxy] request %s accepted: path=%s method=%s", id, req.URL.Path, req.Method) 129 | defer func() { 130 | log.Printf("[rev proxy] request %s complete: path=%s status=%d took=%v", id, req.URL.Path, rr.statusCode, time.Since(start)) 131 | }() 132 | 133 | reqChecksumHdr := req.Header.Get(constants.HdrRundevChecksum) 134 | if reqChecksumHdr == "" { 135 | writeErrorResp(w, http.StatusBadRequest, errors.Errorf("missing %s header from the client", constants.HdrRundevChecksum)) 136 | return 137 | } 138 | reqChecksum, err := strconv.ParseUint(reqChecksumHdr, 10, 64) 139 | if reqChecksumHdr == "" { 140 | writeErrorResp(w, http.StatusBadRequest, errors.Wrapf(err, "malformed %s", constants.HdrRundevChecksum)) 141 | return 142 | } 143 | 144 | fs, err := fsutil.Walk(srv.opts.syncDir, srv.opts.ignores) 145 | if err != nil { 146 | writeErrorResp(w, http.StatusInternalServerError, errors.Wrap(err, "failed to walk the sync directory")) 147 | return 148 | } 149 | respChecksum := fs.RootChecksum() 150 | w.Header().Set(constants.HdrRundevChecksum, fmt.Sprintf("%d", respChecksum)) 151 | 152 | if respChecksum != reqChecksum { 153 | writeChecksumMismatchResp(w, fs) 154 | return 155 | } 156 | srv.nannyLock.Lock() 157 | if !srv.procNanny.Running() { 158 | log.Printf("[rev proxy] user process not running, restarting") 159 | executed := 0 160 | for i, bc := range srv.opts.buildCmds { 161 | log.Printf("[build] build cmd (%d of %d): %v", i, len(srv.opts.buildCmds), bc) 162 | if len(bc.On) > 0 && !matches(srv.lastUpdatedFiles, bc.On) { 163 | log.Println("[build] updates files don't match, skip") 164 | continue 165 | } 166 | 167 | log.Println("[build] executing build command") 168 | cmd := exec.Command(bc.C.Command(), bc.C.Args()...) 169 | cmd.Dir = srv.opts.syncDir 170 | if b, err := cmd.CombinedOutput(); err != nil { 171 | srv.nannyLock.Unlock() 172 | log.Printf("[build] build cmd failure: %s", string(b)) 173 | writeProcError(w, fmt.Sprintf("executing -build-cmd (%v) failed: %s", bc, err), b) 174 | return 175 | } 176 | executed++ 177 | } 178 | log.Printf("executed %d of %d build cmds", executed, len(srv.opts.buildCmds)) 179 | 180 | if err := srv.procNanny.Restart(); err != nil { 181 | // TODO return structured response for errors 182 | writeProcError(w, fmt.Sprintf("failed to start child process: %+v", err), srv.procLogs.Bytes()) 183 | srv.nannyLock.Unlock() 184 | return 185 | } 186 | } 187 | srv.nannyLock.Unlock() 188 | 189 | // wait for port to open 190 | ctx, cancel := context.WithTimeout(req.Context(), srv.opts.portWaitTimeout) 191 | defer cancel() 192 | if err := srv.portCheck.waitPort(ctx); err != nil { 193 | writeProcError(w, fmt.Sprintf("child process did not start listening on $PORT (%d) in %v", srv.opts.childPort, srv.opts.portWaitTimeout), srv.procLogs.Bytes()) 194 | return 195 | } 196 | log.Println("[rev proxy] app port is ready, proxying") 197 | 198 | req.Host = fmt.Sprintf("localhost:%d", srv.opts.childPort) 199 | 200 | reqPath := req.URL.Path 201 | if req.URL.RawQuery != "" { 202 | reqPath += "?" + req.URL.RawQuery 203 | } 204 | defer req.Body.Close() 205 | preq, err := http.NewRequest(req.Method, fmt.Sprintf("http://localhost:%d"+reqPath, srv.opts.childPort), req.Body) 206 | if err != nil { 207 | writeProcError(w, fmt.Sprintf("failed to create reverse proxy request in rundevd:\nerror: %+v", err), nil) 208 | return 209 | } 210 | preq.Header = req.Header 211 | // reverse proxy manually 212 | resp, err := http.DefaultClient.Do(preq) 213 | if err != nil { 214 | writeProcError(w, fmt.Sprintf("user process failed to while handling the request:\nerror:%+v", err), srv.procLogs.Bytes()) 215 | return 216 | } 217 | // copy the response 218 | defer resp.Body.Close() 219 | for k, vals := range resp.Header { 220 | for _, v := range vals { 221 | w.Header().Add(k, v) 222 | } 223 | } 224 | w.WriteHeader(resp.StatusCode) 225 | _, _ = io.Copy(w, resp.Body) 226 | } 227 | 228 | func (srv *daemonServer) patch(w http.ResponseWriter, req *http.Request) { 229 | if req.Method != http.MethodPatch { 230 | w.WriteHeader(http.StatusMethodNotAllowed) 231 | return 232 | } 233 | if ct := req.Header.Get("content-type"); ct != constants.MimePatch { 234 | w.WriteHeader(http.StatusUnsupportedMediaType) 235 | return 236 | } 237 | 238 | expectedLocalChecksum := req.Header.Get(constants.HdrRundevPatchPreconditionSum) 239 | if expectedLocalChecksum == "" { 240 | w.WriteHeader(http.StatusBadRequest) 241 | fmt.Fprintf(w, "patch request did not contain %s header", constants.HdrRundevPatchPreconditionSum) 242 | return 243 | } 244 | 245 | incomingChecksum := req.Header.Get(constants.HdrRundevChecksum) 246 | if incomingChecksum == "" { 247 | w.WriteHeader(http.StatusBadRequest) 248 | fmt.Fprintf(w, "patch request did not contain %s header", constants.HdrRundevChecksum) 249 | return 250 | } 251 | 252 | // stop accepting new proxy or patch requests while potentially modifying fs 253 | srv.patchLock.Lock() 254 | defer srv.patchLock.Unlock() 255 | 256 | fs, err := fsutil.Walk(srv.opts.syncDir, srv.opts.ignores) 257 | if err != nil { 258 | w.WriteHeader(http.StatusInternalServerError) 259 | fmt.Fprintf(w, "failed to fetch local filesystem: %+v", err) 260 | return 261 | } 262 | localChecksum := fmt.Sprintf("%d", fs.RootChecksum()) 263 | if localChecksum == incomingChecksum { 264 | // no-op, already in sync 265 | w.WriteHeader(http.StatusAccepted) 266 | return 267 | } 268 | if localChecksum != expectedLocalChecksum { 269 | w.WriteHeader(http.StatusPreconditionFailed) 270 | w.Header().Set(constants.HdrRundevChecksum, localChecksum) 271 | return 272 | } 273 | log.Printf("applying patch (%s)", incomingChecksum) 274 | defer req.Body.Close() 275 | updated, err := fsutil.ApplyPatch(srv.opts.syncDir, req.Body) 276 | if err != nil { 277 | w.WriteHeader(http.StatusInternalServerError) 278 | fmt.Fprintf(w, "failed to uncompress patch tar: %+v", err) 279 | return 280 | } 281 | srv.lastUpdatedFiles = updated 282 | 283 | log.Printf("patch applied, killing process") 284 | 285 | srv.nannyLock.Lock() 286 | srv.procNanny.Kill() // restart the process on next proxied request 287 | srv.nannyLock.Unlock() 288 | 289 | w.WriteHeader(http.StatusAccepted) 290 | log.Printf("completed patch (%s), responding with %d %s", incomingChecksum, http.StatusAccepted, http.StatusText(http.StatusAccepted)) 291 | return 292 | } 293 | 294 | func (srv *daemonServer) restartHandler(w http.ResponseWriter, req *http.Request) { 295 | srv.nannyLock.Lock() 296 | defer srv.nannyLock.Unlock() 297 | 298 | if err := srv.procNanny.Restart(); err != nil { 299 | w.WriteHeader(http.StatusInternalServerError) 300 | fmt.Fprintf(w, "error restarting process: %+v", err) 301 | return 302 | } 303 | fmt.Fprintf(w, "ok") 304 | } 305 | 306 | func (srv *daemonServer) killHandler(w http.ResponseWriter, req *http.Request) { 307 | if pid := req.URL.Query().Get("pid"); pid != "" { 308 | pp, _ := strconv.Atoi(pid) 309 | if err := syscall.Kill(pp, syscall.SIGKILL); err != nil { 310 | w.WriteHeader(http.StatusInternalServerError) 311 | fmt.Fprintf(w, "failed to kill: %+v", err) 312 | return 313 | } 314 | fmt.Fprintf(w, "killed %d", pp) 315 | return 316 | } 317 | 318 | srv.nannyLock.Lock() 319 | srv.procNanny.Kill() 320 | fmt.Fprintf(w, "killed child process (if it was running)") 321 | srv.nannyLock.Unlock() 322 | } 323 | 324 | func (srv *daemonServer) logsHandler(w http.ResponseWriter, req *http.Request) { 325 | srv.nannyLock.Lock() 326 | defer srv.nannyLock.Unlock() 327 | b := srv.procLogs.Bytes() 328 | _, _ = w.Write(b) 329 | } 330 | 331 | func (srv *daemonServer) psHandler(w http.ResponseWriter, req *http.Request) { 332 | if runtime.GOOS != "linux" { 333 | fmt.Fprintf(w, "pstree not available on %q", runtime.GOOS) 334 | return 335 | } 336 | pids, err := pstree.New() 337 | if err != nil { 338 | fmt.Fprintf(w, "failed to get pstree: %+v", err) 339 | } 340 | var display func(io.Writer, int, int) 341 | display = func(out io.Writer, pid int, indent int) { 342 | proc := pids.Procs[pid] 343 | pp := fmt.Sprintf("pid=%d [ppid=%d,pgrp=%d] (%c) %s", proc.Stat.Pid, proc.Stat.Ppid, proc.Stat.Pgrp, proc.Stat.State, proc.Name) 344 | prefix := strings.Repeat(" ", indent) 345 | fmt.Fprintf(out, "%s%s\n", prefix, pp) 346 | for _, cid := range pids.Procs[pid].Children { 347 | display(out, cid, indent+1) 348 | } 349 | } 350 | display(w, 1, 0) 351 | } 352 | 353 | func (srv *daemonServer) statusHandler(w http.ResponseWriter, req *http.Request) { 354 | fs, err := fsutil.Walk(srv.opts.syncDir, srv.opts.ignores) 355 | if err != nil { 356 | w.WriteHeader(http.StatusInternalServerError) 357 | fmt.Fprintf(w, "failed to fetch local filesystem: %+v", err) 358 | return 359 | } 360 | fmt.Fprintf(w, "fs checksum: %v\n", fs.RootChecksum()) 361 | fmt.Fprintf(w, "pid: %d\n", os.Getpid()) 362 | fmt.Fprintf(w, "incarnation: %s\n", srv.incarnation) 363 | wd, _ := os.Getwd() 364 | fmt.Fprintf(w, "cwd: %s\n", wd) 365 | fmt.Fprintf(w, "child process running: %v\n", srv.procNanny.Running()) 366 | fmt.Fprint(w, "opts:\n") 367 | fmt.Fprintf(w, " ignores: %# v\n", pretty.Formatter(srv.opts.ignores)) 368 | fmt.Fprintf(w, " port wait timeout: %# v\n", pretty.Formatter(srv.opts.portWaitTimeout)) 369 | fmt.Fprintf(w, " run-cmd: %# v\n", pretty.Formatter(srv.opts.runCmd)) 370 | fmt.Fprintln(w, " build-cmds:") 371 | for _, v := range srv.opts.buildCmds { 372 | fmt.Fprintf(w, " -> %s (on: %s)\n", pretty.Formatter(v.C), pretty.Formatter(v.On)) 373 | } 374 | } 375 | 376 | func writeProcError(w http.ResponseWriter, msg string, logs []byte) { 377 | w.Header().Set("Content-Type", constants.MimeProcessError) 378 | w.WriteHeader(http.StatusInternalServerError) 379 | resp := types.ProcError{ 380 | Message: msg, 381 | Output: string(logs), 382 | } 383 | e := json.NewEncoder(w) 384 | e.SetIndent("", " ") 385 | if err := e.Encode(resp); err != nil { 386 | log.Printf("[WARNING] failed to encode process error into response body: %+v", err) 387 | } 388 | } 389 | 390 | func writeErrorResp(w http.ResponseWriter, code int, err error) { 391 | w.WriteHeader(code) 392 | fmt.Fprint(w, err.Error()) 393 | } 394 | 395 | func writeChecksumMismatchResp(w http.ResponseWriter, fs fsutil.FSNode) { 396 | w.Header().Set(constants.HdrRundevChecksum, fmt.Sprintf("%d", fs.RootChecksum())) 397 | w.Header().Set("Content-Type", constants.MimeChecksumMismatch) 398 | w.WriteHeader(http.StatusPreconditionFailed) 399 | 400 | var b bytes.Buffer 401 | if err := json.NewEncoder(&b).Encode(fs); err != nil { 402 | log.Printf("WARNING: %+v", errors.Wrap(err, "error while marshaling remote fs")) 403 | } 404 | _, _ = io.Copy(w, &b) 405 | } 406 | 407 | // matches checks any of the files match any of the patterns 408 | func matches(files, patterns []string) bool { 409 | for _, f := range files { 410 | for _, p := range patterns { 411 | if ok, _ := doublestar.Match(p, f); ok { 412 | return true 413 | } 414 | } 415 | } 416 | return false 417 | } 418 | 419 | type responseRecorder struct { 420 | rw http.ResponseWriter 421 | statusCode int 422 | } 423 | 424 | func (rr *responseRecorder) Header() http.Header { return rr.rw.Header() } 425 | func (rr *responseRecorder) Write(b []byte) (int, error) { return rr.rw.Write(b) } 426 | func (rr *responseRecorder) WriteHeader(statusCode int) { 427 | rr.statusCode = statusCode 428 | rr.rw.WriteHeader(statusCode) 429 | } 430 | --------------------------------------------------------------------------------