├── .arclint ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── Vagrantfile ├── adapter ├── basic │ └── adapter.go ├── lxc │ ├── adapter.go │ ├── adapter_test.go │ ├── cmd_wrapper.go │ ├── container.go │ ├── container_test.go │ ├── doc.go │ ├── executor.go │ └── flags.go └── util.go ├── ci ├── run_tests.sh └── setup.sh ├── client.go ├── client ├── adapter │ ├── adapter.go │ ├── adapter_test.go │ └── registry.go ├── cmd.go ├── cmd_wrapper.go ├── cmd_wrapper_test.go ├── config.go ├── config_test.go ├── filelog │ ├── filelog.go │ └── filelog_test.go ├── log.go ├── log_test.go ├── metrics.go └── reporter │ ├── defaultreporter.go │ ├── defaultreporter_test.go │ ├── noopreporter.go │ ├── registry.go │ └── reporter.go ├── cmd └── blacklist-remove │ └── blacklist-remove.go ├── common ├── atomicflag │ ├── flag.go │ └── flag_test.go ├── blacklist │ ├── blacklist.go │ ├── blacklist_test.go │ └── match.go ├── glob │ ├── glob.go │ └── glob_test.go ├── lockfile │ ├── lockfile.go │ └── lockfile_test.go ├── scopedlogger │ └── scopedlogger.go ├── sentry │ ├── sentry.go │ └── sentry_test.go ├── taggederr │ ├── taggederr.go │ └── taggederr_test.go └── version │ └── version.go ├── engine ├── engine.go ├── engine_test.go └── upstream_monitor.go ├── reporter ├── artifactstore │ ├── reporter.go │ └── reporter_test.go ├── jenkins │ └── reporter.go ├── mesos │ └── reporter.go └── multireporter │ └── reporter.go └── support ├── bootstrap-ubuntu.sh ├── bootstrap-vagrant.sh └── build.sh /.arclint: -------------------------------------------------------------------------------- 1 | { 2 | "linters": { 3 | "sample": { 4 | "type": "golint" 5 | } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | changes-client 2 | .arcconfig 3 | foo.log 4 | *.deb 5 | *.swp 6 | /.vagrant/ 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2014 Dropbox, Inc. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | CHANGES_CLIENT_BIN=${GOPATH}/bin/changes-client 3 | BLACKLIST_REMOVE_BIN=${GOPATH}/bin/blacklist-remove 4 | 5 | # changes-client is dynamically linked with lxc-dev installed on the machine producing the binary. 6 | # To avoid version incompatibilities, we force the same version of lxc-dev to be installed on the 7 | # instance running changes-client too. 8 | LXC_DEV_VERSION=`dpkg-query -W -f='$${Version}\n' lxc-dev` 9 | 10 | # Revision shows date of latest commit and abbreviated commit SHA 11 | # E.g., 1438708515-753e183 12 | REV=`git show -s --format=%ct-%h HEAD` 13 | 14 | all: 15 | @echo "Compiling changes-client" 16 | @make install 17 | @echo "changes-client linked against lxc-dev version:" $(LXC_DEV_VERSION) 18 | 19 | @echo "Setting up temp build folder" 20 | rm -rf /tmp/changes-client-build 21 | mkdir -p /tmp/changes-client-build/usr/bin 22 | cp $(CHANGES_CLIENT_BIN) /tmp/changes-client-build/usr/bin/ 23 | cp $(BLACKLIST_REMOVE_BIN) /tmp/changes-client-build/usr/bin/ 24 | 25 | @echo "Creating .deb file" 26 | fpm -s dir -t deb -n "changes-client" -v "`$(CHANGES_CLIENT_BIN) --version`" -C /tmp/changes-client-build \ 27 | --depends "lxc-dev (=$(LXC_DEV_VERSION))" -m dev-tools@dropbox.com --provides changes-client \ 28 | --description "A build client for Changes" --url https://www.github.com/dropbox/changes-client . 29 | 30 | test: 31 | @echo "==> Running tests" 32 | sudo GOPATH=${GOPATH} `which go` test -v ./... -timeout=120s -race 33 | 34 | 35 | dev: 36 | @make deps 37 | 38 | @echo "==> Building..." 39 | go build -v ./... 40 | 41 | 42 | install: 43 | go clean -i ./... 44 | go install -tags '$(TAGS)' -ldflags "-X github.com/dropbox/changes-client/common/version.gitVersion=$(REV)" -v ./... 45 | 46 | 47 | 48 | deps: 49 | @echo "==> Getting dependencies..." 50 | go get -v -u gopkg.in/lxc/go-lxc.v2 51 | go get -v -t ./... 52 | @echo "==> Caching base LXC image for tests" 53 | sudo lxc-create -n bootstrap -t ubuntu || true 54 | 55 | 56 | fmt: 57 | go fmt ./... 58 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ***NOTICE: THIS REPO IS NO LONGER UPDATED*** 2 | 3 | 4 | Changes Client 5 | ============== 6 | 7 | Can be used to run arbitrary commands and report to changes 8 | or jenkins. You will need standard Go code setup to compile this. 9 | 10 | Setup 11 | ----- 12 | 13 | ``` 14 | mkdir -p go/src go/bin go/pkg 15 | cd go 16 | export GOPATH=`pwd` 17 | 18 | go get github.com/dropbox/changes-client 19 | ``` 20 | 21 | Build 22 | ----- 23 | 24 | ``` 25 | go install github.com/dropbox/changes-client/client 26 | ``` 27 | 28 | The binary will be installed at `./bin/client` folder 29 | 30 | 31 | Example Run 32 | ----------- 33 | 34 | 35 | ``` 36 | ./bin/client --server "https://changes.build.itc.dropbox.com/api/0" --jobstep_id "bbc9a199-1b36-4f7d-9072-3974f32fdb1b" 37 | ``` 38 | 39 | > NOTE: There is no `/` at the end of `--server` 40 | 41 | 42 | Development 43 | ----------- 44 | 45 | A Vagrant VM is included to make development easy: 46 | 47 | ``` 48 | $ vagrant up --provision 49 | ``` 50 | 51 | Jump into the VM with `vagrant ssh`, and then use the `work` alias to hop into the environment: 52 | 53 | ``` 54 | $ work 55 | $ make dev 56 | $ make test 57 | ``` 58 | 59 | 60 | Building package 61 | ---------------- 62 | 63 | We use [fpm](https://github.com/jordansissel/fpm) to build our deb file. 64 | 65 | ``` 66 | $ work 67 | $ make 68 | ``` 69 | 70 | Thats it. A `.deb` file should be available as changes-client\_$VERSION\_amd64.deb 71 | 72 | Note that the LXC you build against needs to match prod. 73 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 5 | VAGRANTFILE_API_VERSION = "2" 6 | 7 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 8 | config.vm.box = "ubuntu/trusty64" 9 | 10 | config.vm.provider "virtualbox" do |v| 11 | v.memory = 2048 12 | v.cpus = 4 13 | end 14 | 15 | config.ssh.forward_agent = true 16 | 17 | config.vm.synced_folder "./", "/home/vagrant/src/github.com/dropbox/changes-client", owner: "vagrant", group: "vagrant" 18 | 19 | config.vm.provision :shell, :privileged => false, :path => "support/bootstrap-vagrant.sh" 20 | end 21 | -------------------------------------------------------------------------------- /adapter/basic/adapter.go: -------------------------------------------------------------------------------- 1 | package basic 2 | 3 | import ( 4 | "path/filepath" 5 | 6 | autil "github.com/dropbox/changes-client/adapter" 7 | "github.com/dropbox/changes-client/client" 8 | "github.com/dropbox/changes-client/client/adapter" 9 | ) 10 | 11 | type Adapter struct { 12 | config *client.Config 13 | workspace string 14 | } 15 | 16 | func (a *Adapter) Init(config *client.Config) error { 17 | if workspace, err := filepath.Abs(config.ArtifactSearchPath); err != nil { 18 | return err 19 | } else { 20 | a.workspace = workspace 21 | } 22 | 23 | a.config = config 24 | return nil 25 | } 26 | 27 | // Prepare the environment for future commands. This is run before any 28 | // commands are processed and is run once. 29 | func (a *Adapter) Prepare(clientLog *client.Log) (client.Metrics, error) { 30 | return nil, nil 31 | } 32 | 33 | // Runs a given command. This may be called multiple times depending 34 | func (a *Adapter) Run(cmd *client.Command, clientLog *client.Log) (*client.CommandResult, error) { 35 | cw := client.NewCmdWrapper([]string{cmd.Path}, cmd.Cwd, cmd.Env) 36 | return cw.Run(cmd.CaptureOutput, clientLog) 37 | } 38 | 39 | // Perform any cleanup actions within the environment. 40 | func (a *Adapter) Shutdown(clientLog *client.Log) (client.Metrics, error) { 41 | return nil, nil 42 | } 43 | 44 | // If applicable, capture a snapshot of the workspace for later re-use 45 | func (a *Adapter) CaptureSnapshot(outputSnapshot string, clientLog *client.Log) error { 46 | return nil 47 | } 48 | 49 | func (a *Adapter) GetRootFs() string { 50 | return "/" 51 | } 52 | 53 | func (a *Adapter) CollectArtifacts(artifacts []string, clientLog *client.Log) ([]string, error) { 54 | return autil.CollectArtifactsIn(a.workspace, artifacts, clientLog) 55 | } 56 | 57 | func (a *Adapter) GetArtifactRoot() string { 58 | return a.workspace 59 | } 60 | 61 | func New() adapter.Adapter { 62 | return &Adapter{} 63 | } 64 | 65 | func init() { 66 | adapter.Register("basic", New) 67 | } 68 | -------------------------------------------------------------------------------- /adapter/lxc/adapter.go: -------------------------------------------------------------------------------- 1 | // +build linux lxc 2 | 3 | package lxcadapter 4 | 5 | import ( 6 | "fmt" 7 | "io/ioutil" 8 | "log" 9 | "os" 10 | "path/filepath" 11 | "strings" 12 | "time" 13 | 14 | autil "github.com/dropbox/changes-client/adapter" 15 | "github.com/dropbox/changes-client/client" 16 | "github.com/dropbox/changes-client/client/adapter" 17 | "github.com/dropbox/changes-client/common/sentry" 18 | "gopkg.in/lxc/go-lxc.v2" 19 | ) 20 | 21 | type Adapter struct { 22 | config *client.Config 23 | container *Container 24 | artifactSource string 25 | } 26 | 27 | func (a *Adapter) Init(config *client.Config) error { 28 | snapshot := config.Snapshot.ID 29 | if snapshot != "" { 30 | if s3Bucket == "" { 31 | log.Print("[lxc] WARNING: s3bucket is not defined, snapshot ignored") 32 | snapshot = "" 33 | } else { 34 | snapshot = adapter.FormatUUID(snapshot) 35 | } 36 | } 37 | 38 | // In reality our goal is to make a switch completely to lz4, but we need to retain 39 | // compatibility with mesos builds for now, so we default to "xz" and also try 40 | // to not uncleanly die if its set to a weird value, also setting it to "xz." 41 | if compression != "xz" && compression != "lz4" { 42 | compression = "xz" 43 | log.Printf("[lxc] Warning: invalid compression %s, defaulting to lzma", compression) 44 | } 45 | if executorName == "" { 46 | // default executorName to process id if none is given 47 | executorName = fmt.Sprintf("pid-%d", os.Getpid()) 48 | } 49 | executor := &Executor{ 50 | Name: executorName, 51 | Directory: executorPath, 52 | } 53 | 54 | inputMountSource, err := ioutil.TempDir("", "changes-client-input-") 55 | if err != nil { 56 | return err 57 | } 58 | if err := os.Chmod(inputMountSource, 0755); err != nil { 59 | return err 60 | } 61 | // Dest must be a relative path 62 | inputMount := &BindMount{Source: inputMountSource, Dest: strings.TrimLeft(containerInputDirectory, "/"), Options: "ro,create=dir"} 63 | mounts := []*BindMount{inputMount} 64 | if bindMounts != "" { 65 | mountStrings := strings.Split(bindMounts, ",") 66 | for _, ms := range mountStrings { 67 | if mount, err := ParseBindMount(ms); err != nil { 68 | return err 69 | } else { 70 | mounts = append(mounts, mount) 71 | } 72 | } 73 | } 74 | 75 | mergeLimits := func(v int, other *int) int { 76 | if other != nil { 77 | if v == 0 || *other < v { 78 | return *other 79 | } 80 | } 81 | return v 82 | } 83 | 84 | cpuLimit := mergeLimits(cpus, config.ResourceLimits.Cpus) 85 | memoryLimit := mergeLimits(memory, config.ResourceLimits.Memory) 86 | 87 | container := &Container{ 88 | Name: config.JobstepID, 89 | Arch: arch, 90 | Dist: dist, 91 | Release: release, 92 | PreLaunch: preLaunch, 93 | PostLaunch: postLaunch, 94 | Snapshot: snapshot, 95 | OutputSnapshot: config.ExpectedSnapshot.ID, 96 | // TODO(dcramer): Move S3 logic into core engine 97 | S3Bucket: s3Bucket, 98 | MemoryLimit: memoryLimit, 99 | CpuLimit: cpuLimit, 100 | Compression: compression, 101 | Executor: executor, 102 | BindMounts: mounts, 103 | InputMountSource: inputMountSource, 104 | ImageCacheDir: "/var/cache/lxc/download", 105 | } 106 | 107 | // DebugConfig limits override standard config. 108 | var limits struct { 109 | CpuLimit *int 110 | MemoryLimit *int 111 | } 112 | if ok, err := config.GetDebugConfig("resourceLimits", &limits); err != nil { 113 | log.Printf("[lxc] %s", err) 114 | } else if ok { 115 | if limits.CpuLimit != nil { 116 | container.CpuLimit = *limits.CpuLimit 117 | } 118 | if limits.MemoryLimit != nil { 119 | container.MemoryLimit = *limits.MemoryLimit 120 | } 121 | } 122 | 123 | if _, err := config.GetDebugConfig("prelaunch_env", &container.preLaunchEnv); err != nil { 124 | log.Printf("[lxc] Failed to parse prelaunch_env: %s", err) 125 | } 126 | if _, err := config.GetDebugConfig("postlaunch_env", &container.postLaunchEnv); err != nil { 127 | log.Printf("[lxc] Failed to parse postlaunch_env: %s", err) 128 | } 129 | 130 | a.config = config 131 | a.container = container 132 | 133 | return nil 134 | } 135 | 136 | // Prepare the environment for future commands. This is run before any 137 | // commands are processed and is run once. 138 | func (a *Adapter) Prepare(clientLog *client.Log) (client.Metrics, error) { 139 | clientLog.Printf("LXC version: %s", lxc.Version()) 140 | metrics, err := a.container.Launch(clientLog) 141 | if err != nil { 142 | return metrics, err 143 | } 144 | 145 | containerArtifactSource := a.config.ArtifactSearchPath 146 | // ensure path is absolute 147 | if !filepath.IsAbs(containerArtifactSource) { 148 | containerArtifactSource = filepath.Join("/home/ubuntu", containerArtifactSource) 149 | } 150 | a.artifactSource = filepath.Join(a.container.RootFs(), containerArtifactSource) 151 | return metrics, err 152 | } 153 | 154 | // Runs a given command. This may be called multiple times depending 155 | func (a *Adapter) Run(cmd *client.Command, clientLog *client.Log) (*client.CommandResult, error) { 156 | return a.container.RunCommandInContainer(cmd, clientLog, "ubuntu") 157 | } 158 | 159 | // Perform any cleanup actions within the environment. 160 | func (a *Adapter) Shutdown(clientLog *client.Log) (client.Metrics, error) { 161 | const timeout = 30 * time.Second 162 | timer := time.AfterFunc(timeout, func() { 163 | sentry.Message(fmt.Sprintf("Took more than %s to shutdown LXC adapter", timeout), map[string]string{}) 164 | }) 165 | defer timer.Stop() 166 | metrics := a.container.logResourceUsageStats() 167 | if keepContainer || a.container.ShouldKeep() || shouldDebugKeep(clientLog, a.config) { 168 | defer a.container.Executor.Deregister() 169 | 170 | // Create a "named executor" which will never get cleaned 171 | // up by changes-client but allows the outside environment 172 | // to recognize that this container is still associated 173 | // with changes-client. 174 | // 175 | // This executor has the same name as the container rather 176 | // than the executor identifier provided by command-line 177 | // flags. The container name is generally unique as it 178 | // corresponds to a jobstep, unlike the executor identifier 179 | // which is defined to not be unique. 180 | executor := Executor{ 181 | Name: a.container.Name, 182 | Directory: a.container.Executor.Directory, 183 | } 184 | return metrics, executor.Register(a.container.Name) 185 | } 186 | if err := a.container.Destroy(); err != nil { 187 | return metrics, err 188 | } 189 | // remove our input bind mount 190 | return metrics, os.RemoveAll(a.container.InputMountSource) 191 | } 192 | 193 | // Parses debugConfig.lxc_keep_container_end_rfc3339 as an RFC3339 timestamp. 194 | // Example: "2015-10-08T19:31:56Z" or "2015-10-08T12:32:19-07:00" 195 | func shouldDebugKeep(clientLog *client.Log, cfg *client.Config) bool { 196 | const key = "lxc_keep_container_end_rfc3339" 197 | var keepEndtime string 198 | if ok, err := cfg.GetDebugConfig(key, &keepEndtime); err != nil { 199 | clientLog.Printf("[lxc] %s", err) 200 | return false 201 | } else if !ok { 202 | return false 203 | } 204 | endTime, err := time.Parse(time.RFC3339, keepEndtime) 205 | if err != nil { 206 | clientLog.Printf("[lxc] Couldn't parse %s %q as time: %s", key, keepEndtime, err) 207 | return false 208 | } 209 | return time.Now().Before(endTime) 210 | } 211 | 212 | func (a *Adapter) CaptureSnapshot(outputSnapshot string, clientLog *client.Log) error { 213 | outputSnapshot = adapter.FormatUUID(outputSnapshot) 214 | 215 | // Ensure the new snapshot image is associated with an executor file while 216 | // it is being created and uploaded so that we know it is being used. 217 | executor := Executor{ 218 | Name: a.container.Name + "-snapshot", 219 | Directory: a.container.Executor.Directory, 220 | } 221 | executor.Clean() 222 | if err := executor.Register(outputSnapshot); err != nil { 223 | return err 224 | } 225 | defer executor.Deregister() 226 | 227 | if err := a.container.CreateImage(outputSnapshot, clientLog); err != nil { 228 | return err 229 | } 230 | 231 | if a.container.S3Bucket != "" { 232 | if err := a.container.UploadImage(outputSnapshot, clientLog); err != nil { 233 | return err 234 | } 235 | } else { 236 | log.Printf("[lxc] warning: cannot upload snapshot, no s3 bucket specified") 237 | } 238 | return nil 239 | } 240 | 241 | func (a *Adapter) GetRootFs() string { 242 | return a.container.RootFs() 243 | } 244 | 245 | func (a *Adapter) CollectArtifacts(artifacts []string, clientLog *client.Log) ([]string, error) { 246 | log.Printf("[lxc] Searching for %s in %s", artifacts, a.artifactSource) 247 | return autil.CollectArtifactsIn(a.artifactSource, artifacts, clientLog) 248 | } 249 | 250 | func (a *Adapter) GetArtifactRoot() string { 251 | return a.artifactSource 252 | } 253 | 254 | func New() adapter.Adapter { 255 | return &Adapter{} 256 | } 257 | 258 | func init() { 259 | adapter.Register("lxc", New) 260 | } 261 | -------------------------------------------------------------------------------- /adapter/lxc/adapter_test.go: -------------------------------------------------------------------------------- 1 | // +build linux lxc 2 | 3 | package lxcadapter 4 | 5 | import ( 6 | "io/ioutil" 7 | "log" 8 | "os" 9 | "sync" 10 | "testing" 11 | "time" 12 | 13 | "github.com/dropbox/changes-client/client" 14 | "github.com/dropbox/changes-client/client/adapter" 15 | "github.com/hashicorp/go-version" 16 | "gopkg.in/lxc/go-lxc.v2" 17 | 18 | "github.com/stretchr/testify/assert" 19 | "github.com/stretchr/testify/require" 20 | ) 21 | 22 | // we want to output the log from running the container 23 | func reportLogChunks(clientLog *client.Log) { 24 | for chunk, ok := clientLog.GetChunk(); ok; chunk, ok = clientLog.GetChunk() { 25 | log.Print(string(chunk)) 26 | } 27 | } 28 | 29 | func ensureContainerRemoved(t *testing.T, name string) { 30 | container, err := lxc.NewContainer(name, lxc.DefaultConfigPath()) 31 | require.NoError(t, err) 32 | defer lxc.Release(container) 33 | 34 | if container.Running() { 35 | log.Println("Existing test container running. Executing Stop()") 36 | require.NoError(t, container.Stop()) 37 | } 38 | require.Equal(t, container.Running(), false) 39 | 40 | if container.Defined() { 41 | log.Println("Existing test container present. Executing Destroy()") 42 | require.NoError(t, container.Destroy()) 43 | } 44 | require.False(t, container.Defined()) 45 | } 46 | 47 | // For compatibility with existing deployments, any build of changes-client that uses 48 | // the LXC adapter must use LXC at this version or above. 49 | const minimumVersion = "1.1.2" 50 | 51 | func TestLxcVersion(t *testing.T) { 52 | minVers, e := version.NewVersion(minimumVersion) 53 | if e != nil { 54 | panic(e) 55 | } 56 | currentVers, e := version.NewVersion(lxc.Version()) 57 | require.Nil(t, e, "Couldn't can't parse LXC version %q; %s", lxc.Version(), e) 58 | require.False(t, currentVers.LessThan(minVers), "Version must be >= %s; was %s", minimumVersion, lxc.Version()) 59 | } 60 | 61 | const containerName = "84e6165919c04514a330fe789f367007" 62 | 63 | func TestCompleteFlow(t *testing.T) { 64 | executorTmpDir, err := ioutil.TempDir("/tmp", "adapter_test_executors") 65 | executorPath = executorTmpDir 66 | require.NoError(t, err) 67 | defer os.RemoveAll(executorPath) 68 | 69 | ensureContainerRemoved(t, containerName) 70 | 71 | clientLog := client.NewLog() 72 | adapter, err := adapter.Create("lxc") 73 | require.NoError(t, err) 74 | 75 | wg := sync.WaitGroup{} 76 | 77 | wg.Add(1) 78 | go func() { 79 | defer wg.Done() 80 | reportLogChunks(clientLog) 81 | }() 82 | 83 | config := &client.Config{ 84 | JobstepID: containerName, 85 | } 86 | 87 | require.NoError(t, adapter.Init(config)) 88 | 89 | // Set CpuLimit and MemoryLimit. 90 | // These values are usually set via flags that set `cpus` and `memory`. 91 | // This is to sanity check that the container doesn't fail to start with 92 | // reasonable values and our code for setting configs doesn't error out. 93 | // TODO: Should have tests that verify that these values have the desired effects. 94 | lxcAdapter, ok := adapter.(*Adapter) 95 | require.True(t, ok) 96 | lxcAdapter.container.CpuLimit = 1 97 | lxcAdapter.container.MemoryLimit = 512 98 | 99 | _, err = adapter.Prepare(clientLog) 100 | require.NoError(t, err) 101 | 102 | cmd, err := client.NewCommand("test", "#!/bin/bash -e\necho hello > foo.txt\nexit 0") 103 | require.NoError(t, err) 104 | 105 | var result *client.CommandResult 106 | result, err = adapter.Run(cmd, clientLog) 107 | require.NoError(t, err) 108 | require.Equal(t, "", string(result.Output)) 109 | require.True(t, result.Success) 110 | 111 | cmd, err = client.NewCommand("test", "#!/bin/bash -e\necho $HOME\nexit 0") 112 | cmd.CaptureOutput = true 113 | require.NoError(t, err) 114 | 115 | result, err = adapter.Run(cmd, clientLog) 116 | require.NoError(t, err) 117 | require.Equal(t, "/home/ubuntu\n", string(result.Output)) 118 | require.True(t, result.Success) 119 | 120 | cmd, err = client.NewCommand("test", "#!/bin/bash -e\ndd if=/dev/zero of=test.img bs=1M count=10 && mkfs.ext4 -b 1024 -j -F test.img && sudo mount -v -o loop test.img /mnt") 121 | cmd.CaptureOutput = true 122 | require.NoError(t, err) 123 | 124 | result, err = adapter.Run(cmd, clientLog) 125 | require.NoError(t, err) 126 | require.True(t, result.Success) 127 | 128 | // test with a command that expects stdin 129 | cmd, err = client.NewCommand("test", "#!/bin/bash -e\nread foo\nexit 1") 130 | require.NoError(t, err) 131 | 132 | result, err = adapter.Run(cmd, clientLog) 133 | require.NoError(t, err) 134 | require.Equal(t, "", string(result.Output)) 135 | require.False(t, result.Success) 136 | 137 | artifacts, err := adapter.CollectArtifacts([]string{"foo.txt"}, clientLog) 138 | require.NoError(t, err) 139 | require.Equal(t, 1, len(artifacts)) 140 | require.Regexp(t, ".*/home/ubuntu/foo.txt", artifacts[0]) 141 | 142 | // test that blacklist-remove is successfully mounted in the container 143 | // and can be run inside it. 144 | cmd, err = client.NewCommand("test", "/var/changes/input/blacklist-remove nonexistent.yaml") 145 | require.NoError(t, err) 146 | 147 | result, err = adapter.Run(cmd, clientLog) 148 | require.NoError(t, err) 149 | // running blacklist-remove with a nonexistent yaml file should print 150 | // a message and succeed 151 | require.True(t, result.Success) 152 | 153 | _, shutdownErr := adapter.Shutdown(clientLog) 154 | require.NoError(t, shutdownErr) 155 | 156 | clientLog.Close() 157 | 158 | wg.Wait() 159 | } 160 | 161 | func TestDebugKeep(t *testing.T) { 162 | clientLog := client.NewLog() 163 | go func() { 164 | clientLog.Drain() 165 | }() 166 | { 167 | future := time.Now().Add(10 * time.Minute) 168 | cfg1, e := client.LoadConfig([]byte(`{"debugConfig":{"lxc_keep_container_end_rfc3339": "` + future.Format(time.RFC3339) + `"}}`)) 169 | if e != nil { 170 | panic(e) 171 | } 172 | assert.True(t, shouldDebugKeep(clientLog, cfg1)) 173 | } 174 | 175 | { 176 | past := time.Now().Add(-10 * time.Minute) 177 | cfg2, e := client.LoadConfig([]byte(`{"debugConfig":{"lxc_keep_container_end_rfc3339": "` + past.Format(time.RFC3339) + `"}}`)) 178 | if e != nil { 179 | panic(e) 180 | } 181 | assert.False(t, shouldDebugKeep(clientLog, cfg2)) 182 | } 183 | 184 | assert.False(t, shouldDebugKeep(clientLog, new(client.Config))) 185 | } 186 | 187 | func TestDebugConfigInit(t *testing.T) { 188 | adapter, err := adapter.Create("lxc") 189 | require.NoError(t, err) 190 | 191 | config, e := client.LoadConfig([]byte(`{"debugConfig":{"resourceLimits": {"cpuLimit": 3, "memoryLimit": 9}}}`)) 192 | require.NoError(t, e) 193 | config.JobstepID = containerName 194 | require.NoError(t, adapter.Init(config)) 195 | require.Equal(t, 3, adapter.(*Adapter).container.CpuLimit) 196 | require.Equal(t, 9, adapter.(*Adapter).container.MemoryLimit) 197 | } 198 | 199 | func makeResetFunc(v *int) func() { 200 | val := *v 201 | return func() { 202 | *v = val 203 | } 204 | } 205 | 206 | func TestResourceLimitsInit(t *testing.T) { 207 | defer makeResetFunc(&cpus)() 208 | defer makeResetFunc(&memory)() 209 | ptrto := func(v int) *int { return &v } 210 | cases := []struct { 211 | CpusFlag int 212 | MemoryFlag int 213 | ResourceLimits client.ResourceLimits 214 | ExpectedCpus int 215 | ExpectedMemory int 216 | }{ 217 | {CpusFlag: 0, MemoryFlag: 0, ResourceLimits: client.ResourceLimits{}, 218 | ExpectedCpus: 0, ExpectedMemory: 0}, 219 | {CpusFlag: 8, MemoryFlag: 8000, 220 | ResourceLimits: client.ResourceLimits{Cpus: ptrto(4), Memory: ptrto(7000)}, 221 | ExpectedCpus: 4, ExpectedMemory: 7000}, 222 | {CpusFlag: 4, MemoryFlag: 7000, 223 | ResourceLimits: client.ResourceLimits{Cpus: ptrto(8), Memory: ptrto(8000)}, 224 | ExpectedCpus: 4, ExpectedMemory: 7000}, 225 | {CpusFlag: 0, MemoryFlag: 8000, 226 | ResourceLimits: client.ResourceLimits{Cpus: ptrto(4)}, 227 | ExpectedCpus: 4, ExpectedMemory: 8000}, 228 | } 229 | for i, c := range cases { 230 | cpus, memory = c.CpusFlag, c.MemoryFlag 231 | adapter, err := adapter.Create("lxc") 232 | require.NoError(t, err) 233 | 234 | var config client.Config 235 | config.ResourceLimits = c.ResourceLimits 236 | config.JobstepID = containerName 237 | assert.NoError(t, adapter.Init(&config)) 238 | lxcadapter := adapter.(*Adapter) 239 | assert.Equal(t, c.ExpectedCpus, lxcadapter.container.CpuLimit, "CpuLimit (case %v: %+v)", i, c) 240 | assert.Equal(t, c.ExpectedMemory, lxcadapter.container.MemoryLimit, "MemoryLimit (case %v: %+v)", i, c) 241 | } 242 | } 243 | -------------------------------------------------------------------------------- /adapter/lxc/cmd_wrapper.go: -------------------------------------------------------------------------------- 1 | // +build linux lxc 2 | 3 | package lxcadapter 4 | 5 | import ( 6 | "bytes" 7 | "fmt" 8 | "io" 9 | "log" 10 | "os" 11 | "path/filepath" 12 | "strings" 13 | "sync" 14 | "time" 15 | 16 | "github.com/dropbox/changes-client/client" 17 | "gopkg.in/lxc/go-lxc.v2" 18 | ) 19 | 20 | type LxcCommand struct { 21 | Args []string 22 | User string 23 | Env []string 24 | Cwd string 25 | } 26 | 27 | func NewLxcCommand(args []string, user string) *LxcCommand { 28 | return &LxcCommand{ 29 | Args: args, 30 | User: user, 31 | } 32 | } 33 | 34 | func timedWait(fn func(), timeout time.Duration) error { 35 | complete := make(chan bool) 36 | 37 | go func() { 38 | fn() 39 | complete <- true 40 | }() 41 | 42 | select { 43 | case <-time.After(timeout): 44 | return fmt.Errorf("Timed out waiting running method: %v", fn) 45 | case <-complete: 46 | return nil 47 | } 48 | } 49 | 50 | func (cw *LxcCommand) Run(captureOutput bool, clientLog *client.Log, container *lxc.Container) (*client.CommandResult, error) { 51 | clientLog.Printf("==> Executing %s", strings.Join(cw.Args, " ")) 52 | 53 | inreader, inwriter, err := os.Pipe() 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | cmdreader, cmdwriter, err := os.Pipe() 59 | if err != nil { 60 | return nil, err 61 | } 62 | 63 | var buffer *bytes.Buffer 64 | var reader io.Reader = cmdreader 65 | 66 | // If user has requested to buffer command output, tee output to in memory buffer. 67 | if captureOutput { 68 | buffer = &bytes.Buffer{} 69 | reader = io.TeeReader(cmdreader, buffer) 70 | } 71 | 72 | cmdwriterFd := cmdwriter.Fd() 73 | 74 | inreader.Close() 75 | inwriter.Close() 76 | 77 | cmdAsUser := generateCommand(cw.Args, cw.User) 78 | 79 | homeDir := getHomeDir(cw.User) 80 | 81 | cwd := cw.Cwd 82 | // ensure cwd is an absolute path 83 | if !filepath.IsAbs(cwd) { 84 | cwd = filepath.Join(homeDir, cwd) 85 | } 86 | 87 | env := []string{ 88 | fmt.Sprintf("USER=%s", cw.User), 89 | // TODO(dcramer): HOME is pretty hacky here 90 | fmt.Sprintf("HOME=%s", homeDir), 91 | fmt.Sprintf("PWD=%s", cwd), 92 | fmt.Sprintf("DEBIAN_FRONTEND=noninteractive"), 93 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", 94 | } 95 | for i := 0; i < len(cw.Env); i++ { 96 | env = append(env, cw.Env[i]) 97 | } 98 | 99 | var clientLogClosed sync.WaitGroup 100 | clientLogClosed.Add(1) 101 | go func() { 102 | defer clientLogClosed.Done() 103 | clientLog.WriteStream(reader) 104 | }() 105 | 106 | log.Printf("[lxc] Executing %s from [%s]", cmdAsUser, cwd) 107 | exitCode, err := container.RunCommandStatus(cmdAsUser, lxc.AttachOptions{ 108 | StdinFd: inwriter.Fd(), 109 | StdoutFd: cmdwriterFd, 110 | StderrFd: cmdwriterFd, 111 | Env: env, 112 | Cwd: cwd, 113 | Arch: lxc.X86_64, 114 | Namespaces: -1, 115 | UID: -1, 116 | GID: -1, 117 | ClearEnv: true, 118 | }) 119 | if err != nil { 120 | clientLog.Printf("Running the command failed: %s", err) 121 | cmdwriter.Close() 122 | return nil, err 123 | } 124 | 125 | // Wait 10 seconds for the pipe to close. If it doesn't we give up on actually closing 126 | // as a child process might be causing things to stick around. 127 | // XXX: this logic is duplicated in client.CmdWrapper 128 | if timedWait(func() { 129 | if err := cmdwriter.Close(); err != nil { 130 | clientLog.Printf("Error closing writer FD: %s", err) 131 | } 132 | }, 10*time.Second) != nil { 133 | clientLog.Printf("Failed to close all file descriptors! Ignoring and moving on..") 134 | } 135 | 136 | // If the container dup'd the file descriptors, closing cmdwriter doesn't close the stream. 137 | // cmdreader (at the other end of the OS pipe) will only close when all duplicates of cmdwriter 138 | // are closed. 139 | // 140 | // We've seen this hang happen just after phantomjs execution in the container - could just be a 141 | // coincidence. 142 | // 143 | // To avoid hanging forever waiting for the reader to close, add a timeout to the waitgroup Wait(). 144 | if timedWait(clientLogClosed.Wait, 5*time.Second) != nil { 145 | clientLog.Printf("Timed out waiting for waitGroup to complete") 146 | } 147 | 148 | clientLog.Printf("Command exited with status %d", exitCode) 149 | 150 | result := &client.CommandResult{ 151 | Success: exitCode == 0, 152 | } 153 | 154 | if captureOutput { 155 | result.Output = buffer.Bytes() 156 | } else { 157 | result.Output = []byte("") 158 | } 159 | return result, nil 160 | } 161 | 162 | func generateCommand(args []string, user string) []string { 163 | if user == "root" { 164 | return args 165 | } 166 | 167 | result := []string{"sudo", "-EHu", user} 168 | result = append(result, args...) 169 | return result 170 | } 171 | 172 | func getHomeDir(user string) string { 173 | if user == "root" { 174 | return "/root" 175 | } else { 176 | return filepath.Join("/home", user) 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /adapter/lxc/container_test.go: -------------------------------------------------------------------------------- 1 | // +build linux lxc 2 | 3 | package lxcadapter 4 | 5 | import ( 6 | "io/ioutil" 7 | "os" 8 | "path/filepath" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func ensureFileExists(path string) { 16 | if _, err := os.Stat(path); err != nil { 17 | err = ioutil.WriteFile(path, []byte("test\n"), 644) 18 | if err != nil { 19 | panic("Failed to create file") 20 | } 21 | } 22 | } 23 | 24 | func ensureFileDoesNotExist(path string) { 25 | if _, err := os.Stat(path); err == nil { 26 | err = os.Remove(path) 27 | if err != nil { 28 | panic("Failed to remove file") 29 | } 30 | } 31 | } 32 | 33 | func TestGetImageCompressionType(t *testing.T) { 34 | cacheDir, err := ioutil.TempDir("/tmp", "container_test") 35 | require.Nil(t, err) 36 | container := &Container{ 37 | Name: containerName, 38 | ImageCacheDir: cacheDir, 39 | Snapshot: "test_snapshot", 40 | } 41 | 42 | imagePath := filepath.Join(cacheDir, container.getImagePath("test_snapshot")) 43 | require.NoError(t, os.MkdirAll(imagePath, 755)) 44 | defer os.RemoveAll(cacheDir) 45 | 46 | lz4Path := filepath.Join(imagePath, "rootfs.tar.lz4") 47 | xzPath := filepath.Join(imagePath, "rootfs.tar.xz") 48 | 49 | // Test case where the image doesn't exist 50 | ensureFileDoesNotExist(lz4Path) 51 | ensureFileDoesNotExist(xzPath) 52 | _, ok := container.getImageCompressionType() 53 | require.False(t, ok) 54 | 55 | // Test lz4 case 56 | ensureFileExists(lz4Path) 57 | compressionType, ok := container.getImageCompressionType() 58 | require.True(t, ok) 59 | require.Equal(t, compressionType, "lz4") 60 | 61 | // Test xz case 62 | ensureFileDoesNotExist(lz4Path) 63 | ensureFileExists(xzPath) 64 | compressionType, ok = container.getImageCompressionType() 65 | assert.True(t, ok) 66 | assert.Equal(t, compressionType, "xz") 67 | } 68 | -------------------------------------------------------------------------------- /adapter/lxc/doc.go: -------------------------------------------------------------------------------- 1 | // +build !lxc 2 | 3 | package lxcadapter 4 | 5 | // This file is primarily in place to make sure the 'lxc' package can be build on non-Linux 6 | // machines. LXC is not supported on non-Linux machines. 7 | -------------------------------------------------------------------------------- /adapter/lxc/executor.go: -------------------------------------------------------------------------------- 1 | // +build linux lxc 2 | 3 | // Executor system, which assigns each container to a specific 4 | // "executor" which is essentially a single thread that runs jobs 5 | // on the host. We kill any container which was associated with 6 | // the same executor before starting our container, which prevents 7 | // hanging containers from changes-client getting SIGKILLed. 8 | package lxcadapter 9 | 10 | import ( 11 | "fmt" 12 | "gopkg.in/lxc/go-lxc.v2" 13 | "io/ioutil" 14 | "log" 15 | "os" 16 | "os/exec" 17 | "path" 18 | ) 19 | 20 | type Executor struct { 21 | Name string 22 | Directory string 23 | } 24 | 25 | // This file is a unique file owned by us and no other changes-client 26 | // process but it may be leftover from another process that has already 27 | // terminated. 28 | func (e *Executor) File() string { 29 | return path.Join(e.Directory, e.Name) 30 | } 31 | 32 | // Before we start a container, we verify that any previous container 33 | // started by the same executor is terminated. Since there is no other 34 | // changes-client with the same executor, this won't interfere with any 35 | // running jobs but lets us clean up the environment from previous runs. 36 | func (e *Executor) Clean() { 37 | if e.Name == "" { 38 | return 39 | } 40 | 41 | // An error here is considered to be fine (in fact, normal) 42 | // since it indicates that the executor file doesn't exist, 43 | // which is the normal state if the previous run finished 44 | // execution cleanly. 45 | leftoverNameBytes, err := ioutil.ReadFile(e.File()) 46 | if err == nil { 47 | leftoverName := string(leftoverNameBytes) 48 | log.Printf("[lxc] Detected leftover container: %s", leftoverName) 49 | os.Remove(e.File()) 50 | container, err := lxc.NewContainer(leftoverName, lxc.DefaultConfigPath()) 51 | 52 | // An error here probably indicates that the executor was killed 53 | // in such a bad state that the container was never finished being 54 | // created. We simply warn because this likely won't affect 55 | // the current run from proceeding. 56 | if err != nil { 57 | log.Printf("[lxc] Warning: Could not open leftover container: %s", leftoverName) 58 | return 59 | } 60 | if container.Running() { 61 | // The stop in the go-lxc api is not necessarily forceful 62 | // enough. We wish to guarantee that the container stops 63 | // so we use kill. 64 | // 65 | // XXX this could potentially have problems if there are 66 | // ever any rw-mounts into the container. 67 | log.Printf("[lxc] Killing leftover container: %s", leftoverName) 68 | err = exec.Command("lxc-stop", "-k", "-n", leftoverName).Run() 69 | if err != nil { 70 | log.Printf("[lxc] Error killing container: %s", err.Error()) 71 | return 72 | } 73 | } 74 | // in theory kill should always prevent the container from running - but we check 75 | // and warn just to be sure. 76 | if container.Running() { 77 | log.Printf("[lxc] Warning: Couldn't kill leftover container: %s", leftoverName) 78 | return 79 | } 80 | 81 | log.Printf("[lxc] Destroying leftover container: %s", leftoverName) 82 | container.Destroy() 83 | if container.Defined() { 84 | log.Printf("[lxc] Warning: Couldn't destroy leftover container: %s", leftoverName) 85 | return 86 | } 87 | log.Printf("[lxc] Successfully cleaned up state for executor %s", e.Name) 88 | } else { 89 | if os.IsNotExist(err) { 90 | log.Printf("[lxc] Executor doesn't exist, pre-existing state appears clean.") 91 | } else { 92 | log.Printf("[lxc] An unexpected io error occurred: %s", err) 93 | } 94 | } 95 | } 96 | 97 | // Create an executor file, registering the current container with the current 98 | // executor. 99 | func (e *Executor) Register(containerName string) error { 100 | if e.Name == "" { 101 | log.Printf("[lxc] Warning: Can't register a container without a name") 102 | return fmt.Errorf("Can't register a container without a name.") 103 | } 104 | 105 | log.Printf("[lxc] Creating executor for %s with container %s", 106 | e.File(), containerName) 107 | err := ioutil.WriteFile(e.File(), []byte(containerName), 0644) 108 | if err != nil { 109 | log.Printf("[lxc] Warning: Couldn't create executor file") 110 | return err 111 | } 112 | return nil 113 | } 114 | 115 | // By removing the executor we indicate that this run was cleanly finished 116 | // and that the container was destroyed. If we are keeping the container, 117 | // then we still remove the executor file to prevent another changes-client 118 | // from forcibly destroying the container. 119 | func (e *Executor) Deregister() { 120 | if e.Name == "" { 121 | return 122 | } 123 | 124 | log.Printf("[lxc] Removing executor for %s", e.Name) 125 | err := os.Remove(e.File()) 126 | if err != nil { 127 | log.Printf("[lxc] Warning: Unable to remove executor file") 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /adapter/lxc/flags.go: -------------------------------------------------------------------------------- 1 | package lxcadapter 2 | 3 | import ( 4 | "flag" 5 | ) 6 | 7 | // Flags are stored here so they are available even for non-LXC builds. 8 | // Ideally we'd only supply relevant flags, but an unexpectd flag can cause an early failure 9 | // that's awkward to report reliably. 10 | var ( 11 | preLaunch string 12 | postLaunch string 13 | s3Bucket string 14 | release string 15 | arch string 16 | dist string 17 | keepContainer bool 18 | memory int 19 | cpus int 20 | compression string 21 | executorName string 22 | executorPath string 23 | bindMounts string 24 | ) 25 | 26 | func init() { 27 | flag.StringVar(&preLaunch, "pre-launch", "", "Container pre-launch script") 28 | flag.StringVar(&postLaunch, "post-launch", "", "Container post-launch script") 29 | flag.StringVar(&s3Bucket, "s3-bucket", "", "S3 bucket name") 30 | flag.StringVar(&dist, "dist", "ubuntu", "Linux distribution") 31 | flag.StringVar(&release, "release", "trusty", "Distribution release") 32 | flag.StringVar(&arch, "arch", "amd64", "Linux architecture") 33 | // This is the compression algorithm to be used for creating an image. 34 | // The decompression used is determined by whether the image has the "xz" or "lz4" extension. 35 | flag.StringVar(&compression, "compression", "lz4", "compression algorithm (xz,lz4)") 36 | flag.StringVar(&bindMounts, "bind-mounts", "", "bind mounts. ::. comma separated.") 37 | 38 | // the executor should have the following properties: 39 | // - the maximum distinct values passed to executor is equal to the maximum 40 | // number of concurrently running jobs. 41 | // - no two changes-client processes should be called with the same 42 | // executor name 43 | // - if any process is calling changes-client with executor specified, then 44 | // all clients should use a specified executor 45 | // 46 | // if not all of these features can be met, then executor should not be specified 47 | // but parallel builds may not work correctly. 48 | // 49 | flag.StringVar(&executorName, "executor", "", "Executor (unique runner id)") 50 | flag.StringVar(&executorPath, "executor-path", "/var/lib/changes-client/executors", "Path to store executors") 51 | flag.IntVar(&memory, "memory", 0, "Memory limit (in MB)") 52 | flag.IntVar(&cpus, "cpus", 0, "CPU limit") 53 | flag.BoolVar(&keepContainer, "keep-container", false, "Do not destroy the container on cleanup") 54 | } 55 | -------------------------------------------------------------------------------- /adapter/util.go: -------------------------------------------------------------------------------- 1 | package adapter 2 | 3 | import ( 4 | "github.com/dropbox/changes-client/client" 5 | "github.com/dropbox/changes-client/common/glob" 6 | ) 7 | 8 | func CollectArtifactsIn(dir string, artifacts []string, clientLog *client.Log) ([]string, error) { 9 | matches, skipped, err := glob.GlobTreeRegular(dir, artifacts) 10 | for i, s := range skipped { 11 | if i == 10 { 12 | clientLog.Printf("And %d more.", len(skipped)-i) 13 | break 14 | } 15 | clientLog.Printf("Skipped matching non-regular file %s", s) 16 | } 17 | return matches, err 18 | } 19 | -------------------------------------------------------------------------------- /ci/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | export GOPATH=~/ 3 | export PATH=$GOPATH/bin:/usr/local/go/bin:$PATH 4 | WORKSPACE=$GOPATH/src/github.com/dropbox/changes-client 5 | cd $WORKSPACE 6 | 7 | echo Running vet... 8 | # TODO(anupc): Remove -printf=false 9 | # Fails with 'adapter/lxc/cmd_wrapper.go:44: arg fn in printf call is a function value, not a function call' 10 | go tool vet -all -printf=false . 11 | echo Done. 12 | # report non-'err' shadows 13 | (go tool vet -shadow -shadowstrict . 2>&1 | grep -v "declaration of err") || true 14 | 15 | go get github.com/jstemmer/go-junit-report 16 | sudo CHANGES=1 PATH=$PATH GOPATH=$GOPATH `which go` test -bench . ./... -timeout=120s -v -race | tee test.log 17 | EXIT_CODE=${PIPESTATUS[0]} 18 | echo Generating junit.xml... 19 | go-junit-report < test.log > junit.xml 20 | echo Done. 21 | exit ${EXIT_CODE} 22 | -------------------------------------------------------------------------------- /ci/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | support/bootstrap-ubuntu.sh 4 | export PATH=/usr/local/go/bin:$PATH 5 | export GOPATH=~/ 6 | cd $GOPATH/src/github.com/dropbox/changes-client 7 | PATH=$PATH GOPATH=$GOPATH make dev 8 | -------------------------------------------------------------------------------- /client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "io" 9 | "log" 10 | "os" 11 | 12 | "github.com/dropbox/changes-client/client" 13 | "github.com/dropbox/changes-client/client/adapter" 14 | "github.com/dropbox/changes-client/client/filelog" 15 | "github.com/dropbox/changes-client/client/reporter" 16 | "github.com/dropbox/changes-client/common/sentry" 17 | "github.com/dropbox/changes-client/common/version" 18 | "github.com/dropbox/changes-client/engine" 19 | "github.com/getsentry/raven-go" 20 | ) 21 | 22 | func main() { 23 | log.SetFlags(log.Lmicroseconds | log.Ldate) 24 | var ( 25 | showVersion = flag.Bool("version", false, "Prints changes-client version") 26 | exitResult = flag.Bool("exit-result", false, "Determine exit code from result--exit 1 on any execution failure or 99 on any infrastructure failure") 27 | showInfo = flag.Bool("showinfo", false, "Prints basic information about this binary in a stable json format and exits.") 28 | jobstepID = flag.String("jobstep_id", "", "Jobstep ID whose commands are to be executed") 29 | ) 30 | flag.Parse() 31 | 32 | if *showVersion { 33 | fmt.Println(version.GetVersion()) 34 | return 35 | } 36 | if *showInfo { 37 | // This is intended to be a reliable way to externally verify 38 | // the available functionality in this binary; change the format 39 | // only with great care. 40 | if d, e := json.MarshalIndent(map[string]interface{}{ 41 | "adapters": adapter.Names(), 42 | "reporters": reporter.Names(), 43 | "version": version.GetVersion(), 44 | }, "", " "); e != nil { 45 | panic(e) 46 | } else { 47 | fmt.Println(string(d)) 48 | return 49 | } 50 | } 51 | 52 | result := run(*jobstepID) 53 | exitCode := 0 54 | if *exitResult { 55 | switch result { 56 | case engine.RESULT_PASSED: 57 | exitCode = 0 58 | case engine.RESULT_INFRA_FAILED: 59 | // We use exit code 99 to signal to the generic-build script that 60 | // there was an infrastructure failure. Eventually, changes-client 61 | // will probably report infra failures to Changes directly. 62 | exitCode = 99 63 | default: 64 | exitCode = 1 65 | } 66 | } 67 | log.Println("[client] exit:", exitCode) 68 | os.Exit(exitCode) 69 | } 70 | 71 | // Returns whether run was successful. 72 | func run(jobstepID string) (result engine.Result) { 73 | infraLog, err := filelog.New(jobstepID, "infralog") 74 | if err != nil { 75 | log.Printf("[client] error creating infralog: %s", err) 76 | sentry.Error(err, map[string]string{}) 77 | } else { 78 | log.SetOutput(io.MultiWriter(os.Stderr, infraLog)) 79 | } 80 | 81 | var sentryClient *raven.Client 82 | if sentryClient = sentry.GetClient(); sentryClient != nil { 83 | log.Printf("Using Sentry; ProjectID=%s, URL=%s", sentryClient.ProjectID(), sentryClient.URL()) 84 | // Don't return until we're finished sending to Sentry. 85 | defer sentryClient.Wait() 86 | // Ensure main thread panics are caught and reported. 87 | defer func() { 88 | if p := recover(); p != nil { 89 | var err error 90 | switch rval := p.(type) { 91 | case error: 92 | err = rval 93 | default: 94 | err = errors.New(fmt.Sprint(rval)) 95 | } 96 | packet := raven.NewPacket(err.Error(), raven.NewException(err, raven.NewStacktrace(2, 3, nil))) 97 | log.Printf("[client] Sending panic to Sentry") 98 | _, ch := sentryClient.Capture(packet, map[string]string{}) 99 | if serr := <-ch; serr != nil { 100 | log.Printf("SENTRY ERROR: %s", serr) 101 | } 102 | // We consider panics an infra failure 103 | result = engine.RESULT_INFRA_FAILED 104 | } 105 | }() 106 | } else { 107 | log.Println("Sentry NOT ENABLED.") 108 | } 109 | 110 | // Error handling in place; now we begin. 111 | 112 | config, err := client.GetConfig(jobstepID) 113 | if err != nil { 114 | log.Printf("[client] error getting config: %s", err) 115 | sentry.Error(err, map[string]string{}) 116 | return engine.RESULT_INFRA_FAILED 117 | } 118 | if sentryClient != nil { 119 | sentryClient.SetTagsContext(map[string]string{ 120 | "projectslug": config.Project.Slug, 121 | "jobstep_id": config.JobstepID, 122 | }) 123 | } 124 | 125 | result, err = engine.RunBuildPlan(config, infraLog) 126 | if err != nil { 127 | sentry.Error(err, map[string]string{}) 128 | result = engine.RESULT_INFRA_FAILED 129 | } 130 | return result 131 | } 132 | -------------------------------------------------------------------------------- /client/adapter/adapter.go: -------------------------------------------------------------------------------- 1 | package adapter 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/dropbox/changes-client/client" 7 | ) 8 | 9 | type Adapter interface { 10 | // Init should be called before any other methods, and no more than once. 11 | Init(*client.Config) error 12 | // Prepare must be called no more than once, and must return successfully 13 | // before any method other than Init is called. Any returned metrics 14 | // will be reported via the active Reporter 15 | Prepare(*client.Log) (client.Metrics, error) 16 | Run(*client.Command, *client.Log) (*client.CommandResult, error) 17 | Shutdown(*client.Log) (client.Metrics, error) 18 | CaptureSnapshot(string, *client.Log) error 19 | GetRootFs() string 20 | CollectArtifacts([]string, *client.Log) ([]string, error) 21 | // Get absolute path to directory in which artifacts are searched 22 | GetArtifactRoot() string 23 | } 24 | 25 | func FormatUUID(uuid string) string { 26 | return fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:]) 27 | } 28 | -------------------------------------------------------------------------------- /client/adapter/adapter_test.go: -------------------------------------------------------------------------------- 1 | // +build linux lxc 2 | 3 | package adapter 4 | 5 | import ( 6 | "github.com/stretchr/testify/assert" 7 | "testing" 8 | ) 9 | 10 | func TestFormatUUID(t *testing.T) { 11 | res := FormatUUID("a6f70a68e4384cf68bcc2dd1a44b8554") 12 | assert.Equal(t, res, "a6f70a68-e438-4cf6-8bcc-2dd1a44b8554") 13 | } 14 | -------------------------------------------------------------------------------- /client/adapter/registry.go: -------------------------------------------------------------------------------- 1 | package adapter 2 | 3 | import "fmt" 4 | 5 | var registry = make(Registry) 6 | 7 | type Registry map[string]func() Adapter 8 | 9 | func (r Registry) register(name string, ctr func() Adapter) { 10 | r[name] = ctr 11 | } 12 | 13 | func (r Registry) names() []string { 14 | var res []string 15 | for k := range r { 16 | res = append(res, k) 17 | } 18 | return res 19 | } 20 | 21 | func Register(name string, ctr func() Adapter) error { 22 | registry.register(name, ctr) 23 | return nil 24 | } 25 | 26 | // Names returns the names of all registered Adapters. 27 | func Names() []string { 28 | return registry.names() 29 | } 30 | 31 | func Create(name string) (Adapter, error) { 32 | ctr, present := registry[name] 33 | if present { 34 | return ctr(), nil 35 | } 36 | return nil, fmt.Errorf("Adapter not found: %s", name) 37 | } 38 | -------------------------------------------------------------------------------- /client/cmd.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | ) 7 | 8 | type Command struct { 9 | ID string 10 | Path string 11 | Env []string 12 | Cwd string 13 | CaptureOutput bool 14 | } 15 | 16 | type CommandResult struct { 17 | Output []byte // buffered output if requested 18 | Success bool 19 | } 20 | 21 | // Build a new Command out of an arbitrary script 22 | // The script is written to disk and then executed ensuring that it can 23 | // be fairly arbitrary and provide its own shebang 24 | func NewCommand(id string, script string) (*Command, error) { 25 | f, err := ioutil.TempFile("", "script-") 26 | if err != nil { 27 | return nil, err 28 | } 29 | defer f.Close() 30 | 31 | if _, err := f.WriteString(script); err != nil { 32 | return nil, err 33 | } 34 | 35 | info, err := f.Stat() 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | if err := f.Chmod((info.Mode() & os.ModePerm) | 0111); err != nil { 41 | return nil, err 42 | } 43 | 44 | // TODO(dcramer): generate a better name 45 | return &Command{ 46 | ID: id, 47 | Path: f.Name(), 48 | }, nil 49 | } 50 | -------------------------------------------------------------------------------- /client/cmd_wrapper.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "os/exec" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | type CmdWrapper struct { 12 | cmd *exec.Cmd 13 | } 14 | 15 | func NewCmdWrapper(command []string, cwd string, env []string) *CmdWrapper { 16 | c := exec.Command(command[0], command[1:]...) 17 | c.Env = env 18 | c.Dir = cwd 19 | return &CmdWrapper{ 20 | cmd: c, 21 | } 22 | } 23 | 24 | func (cw *CmdWrapper) StdinPipe() (io.WriteCloser, error) { 25 | return cw.cmd.StdinPipe() 26 | } 27 | 28 | func (cw *CmdWrapper) CombinedOutputPipe() (io.ReadCloser, io.WriteCloser) { 29 | pr, pw := io.Pipe() 30 | 31 | cw.cmd.Stdout = pw 32 | cw.cmd.Stderr = pw 33 | 34 | return pr, pw 35 | } 36 | 37 | func (cw *CmdWrapper) Run(captureOutput bool, clientLog *Log) (*CommandResult, error) { 38 | stdin, err := cw.StdinPipe() 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | cmdreader, cmdwriter := cw.CombinedOutputPipe() 44 | 45 | clientLog.Printf("==> Executing %s", cw.cmd.Args) 46 | 47 | var buffer *bytes.Buffer 48 | var reader io.Reader = cmdreader 49 | 50 | // If user has requested to buffer command output, tee output to in memory buffer. 51 | if captureOutput { 52 | buffer = &bytes.Buffer{} 53 | reader = io.TeeReader(cmdreader, buffer) 54 | } 55 | 56 | err = cw.cmd.Start() 57 | 58 | stdin.Close() 59 | 60 | if err != nil { 61 | clientLog.Printf("Failed to start %s %s", cw.cmd.Args, err) 62 | return nil, err 63 | } 64 | 65 | wg := sync.WaitGroup{} 66 | wg.Add(1) 67 | go func() { 68 | clientLog.WriteStream(reader) 69 | wg.Done() 70 | }() 71 | 72 | err = cw.cmd.Wait() 73 | 74 | // Wait 10 seconds for the pipe to close. If it doesn't we give up on actually closing 75 | // as a child process might be causing things to stick around. 76 | // XXX: this logic is duplicated in lxcadapter.CmdWrapper 77 | timeLimit := time.After(10 * time.Second) 78 | sem := make(chan struct{}) // lol struct{} is cheaper than bool 79 | go func() { 80 | cmdwriter.Close() 81 | sem <- struct{}{} 82 | }() 83 | 84 | select { 85 | case <-timeLimit: 86 | clientLog.Printf("Failed to close all file descriptors! Ignoring and moving on..") 87 | break 88 | case <-sem: 89 | break 90 | } 91 | 92 | wg.Wait() 93 | 94 | switch err.(type) { 95 | case *exec.ExitError: 96 | // ExitError means non-zero exit status, handled by CommandResult below. 97 | case nil: 98 | // Success, nothing to do. 99 | default: 100 | // Some failure trying to run the command. 101 | return nil, err 102 | } 103 | 104 | result := &CommandResult{ 105 | Success: cw.cmd.ProcessState.Success(), 106 | } 107 | 108 | if captureOutput { 109 | result.Output = buffer.Bytes() 110 | } else { 111 | result.Output = []byte("") 112 | } 113 | return result, nil 114 | } 115 | -------------------------------------------------------------------------------- /client/cmd_wrapper_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | ) 7 | 8 | func TestRun(t *testing.T) { 9 | cw := NewCmdWrapper([]string{"/bin/bash", "-c", "echo -n 1"}, "", []string{}) 10 | log := NewLog() 11 | 12 | sem := make(chan bool) 13 | go func() { 14 | log.Drain() 15 | sem <- true 16 | }() 17 | 18 | result, err := cw.Run(true, log) 19 | log.Close() 20 | <-sem 21 | if err != nil { 22 | t.Fatal(err.Error()) 23 | } 24 | 25 | if !bytes.Equal(result.Output, []byte("1")) { 26 | t.Error("Did not buffer output") 27 | } 28 | } 29 | 30 | // if stdin is allowed this test will hang 31 | func TestRunIgnoresStdin(t *testing.T) { 32 | cw := NewCmdWrapper([]string{"/bin/bash", "-c", "read foo"}, "", []string{}) 33 | log := NewLog() 34 | 35 | sem := make(chan bool) 36 | go func() { 37 | log.Drain() 38 | sem <- true 39 | }() 40 | 41 | cw.Run(false, log) 42 | log.Close() 43 | <-sem 44 | } 45 | 46 | func TestRunFailToStart(t *testing.T) { 47 | cw := NewCmdWrapper([]string{"/bin/bash", "-c", "echo -n 1"}, "", []string{}) 48 | log := NewLog() 49 | 50 | sem := make(chan bool) 51 | go func() { 52 | log.Drain() 53 | sem <- true 54 | }() 55 | 56 | _, err := cw.Run(false, log) 57 | log.Close() 58 | <-sem 59 | if err != nil { 60 | t.Fatal(err.Error()) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /client/config.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "encoding/json" 5 | "flag" 6 | "fmt" 7 | "io/ioutil" 8 | "log" 9 | "net/http" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | var ( 15 | server string 16 | artifactSearchPath string 17 | upstreamMonitor bool 18 | debug bool 19 | ignoreSnapshots bool 20 | ) 21 | 22 | type ConfigCmd struct { 23 | ID string 24 | Script string 25 | Env map[string]string 26 | Cwd string 27 | Artifacts []string 28 | CaptureOutput bool 29 | Type struct { 30 | ID string 31 | } 32 | } 33 | 34 | // ResourceLimits describes all specified limits 35 | // that should be applied while executing the JobStep. 36 | type ResourceLimits struct { 37 | // Number of CPUs. 38 | Cpus *int 39 | // Memory limit in megabytes. 40 | Memory *int 41 | } 42 | 43 | type Config struct { 44 | Server string 45 | JobstepID string 46 | ArtifactSearchPath string 47 | UpstreamMonitor bool 48 | Snapshot struct { 49 | ID string 50 | } 51 | Source struct { 52 | Revision struct { 53 | Sha string 54 | } 55 | Patch struct { 56 | ID string 57 | } 58 | } 59 | Repository struct { 60 | URL string 61 | Backend struct { 62 | ID string 63 | } 64 | } 65 | Project struct { 66 | Name string 67 | Slug string 68 | } 69 | Cmds []ConfigCmd `json:"commands"` 70 | ExpectedSnapshot struct { 71 | // If this build is expected to generate a snapshot, this is the snapshot ID. 72 | ID string 73 | } 74 | 75 | ResourceLimits ResourceLimits 76 | 77 | DebugConfig map[string]*json.RawMessage `json:"debugConfig"` 78 | } 79 | 80 | // GetDebugConfig parses the debug config JSON at the given key to dest, returning whether the key 81 | // was present, and if it was, any error that occurred in trying to parse it to dest. 82 | func (c *Config) GetDebugConfig(key string, dest interface{}) (present bool, err error) { 83 | data, ok := c.DebugConfig[key] 84 | if !ok || data == nil { 85 | return false, nil 86 | } 87 | e := json.Unmarshal([]byte(*data), dest) 88 | if e != nil { 89 | e = fmt.Errorf("Malformed JSON in debug config key %q: %s", key, e) 90 | } 91 | return true, e 92 | } 93 | 94 | // GetDebugConfigBool is a helper to simplify basic gating; it acts just like GetDebugConfig, 95 | // but always uses the fallback value when the value is missing or there is an error. 96 | func (c *Config) GetDebugConfigBool(key string, fallback bool) bool { 97 | var val bool 98 | if ok, e := c.GetDebugConfig(key, &val); !ok || e != nil { 99 | return fallback 100 | } 101 | return val 102 | } 103 | 104 | // Duration is in nanoseconds and is multiplied by 2 on each retry 105 | // 106 | // We need to retry because there is a race condition in interactions 107 | // with Changes where the jenkins job is created before the jobstep 108 | // in Changes. This probably only occurs when there is a long running 109 | // transaction. We don't want to delay too much, so we start with a small 110 | // delay in case the jenkins job just got started very quickly, but then we delay 111 | // longer between each retry in case we have to wait for some long transaction 112 | // to occur. 113 | // 114 | // NOTE: Due to the nature of this race condition we only retry on 404s. 115 | func fetchConfig(url string, retries int, retryDelay time.Duration) (*Config, error) { 116 | resp, err := http.Get(url) 117 | if err != nil { 118 | return nil, err 119 | } 120 | defer resp.Body.Close() 121 | 122 | if resp.StatusCode != 200 { 123 | // The race condition ends up giving us a 404. If we got anything else, its 124 | // a real error and we shouldn't bother retrying. 125 | if retries == 0 || resp.StatusCode != 404 { 126 | err := fmt.Errorf("Request to fetch config failed with status code: %d", resp.StatusCode) 127 | return nil, err 128 | } else { 129 | log.Printf("Failed to fetch configuration (404). Retries left: %d", retries) 130 | time.Sleep(retryDelay) 131 | return fetchConfig(url, retries-1, retryDelay*2) 132 | } 133 | } 134 | 135 | body, err := ioutil.ReadAll(resp.Body) 136 | if err != nil { 137 | return nil, err 138 | } 139 | 140 | return LoadConfig(body) 141 | } 142 | 143 | func LoadConfig(content []byte) (*Config, error) { 144 | r := &Config{} 145 | err := json.Unmarshal(content, r) 146 | if err != nil { 147 | return nil, err 148 | } 149 | 150 | return r, nil 151 | } 152 | 153 | func GetConfig(jobstepID string) (*Config, error) { 154 | if server == "" { 155 | return nil, fmt.Errorf("Missing required configuration: server") 156 | } 157 | 158 | if jobstepID == "" { 159 | return nil, fmt.Errorf("Missing required configuration: jobstep_id") 160 | } 161 | 162 | server = strings.TrimRight(server, "/") 163 | 164 | url := server + "/jobsteps/" + jobstepID + "/" 165 | conf, err := fetchConfig(url, 8, 250*time.Millisecond) 166 | if err != nil { 167 | return nil, err 168 | } 169 | 170 | conf.Server = server 171 | conf.JobstepID = jobstepID 172 | conf.ArtifactSearchPath = artifactSearchPath 173 | conf.UpstreamMonitor = upstreamMonitor 174 | // deprecated flag 175 | if debug { 176 | conf.UpstreamMonitor = false 177 | } 178 | 179 | if ignoreSnapshots { 180 | conf.Snapshot.ID = "" 181 | } 182 | return conf, nil 183 | } 184 | 185 | func init() { 186 | flag.StringVar(&server, "server", "", "URL to get config from") 187 | flag.StringVar(&artifactSearchPath, "artifact-search-path", ".", "Folder where artifacts will be searched for relative to adapter root") 188 | flag.BoolVar(&upstreamMonitor, "upstream-monitor", true, "Indicates whether the client should monitor upstream for aborts") 189 | flag.BoolVar(&debug, "debug", false, "DEPRECATED. debug=true is the same as upstreamMonitor=false.") 190 | flag.BoolVar(&ignoreSnapshots, "no-snapshots", false, "Ignore any existing snapshots, and build a fresh environment") 191 | } 192 | -------------------------------------------------------------------------------- /client/config_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "net/http/httptest" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | const jobStepResponse = ` 14 | { 15 | "id": "549db9a70d4d4d258e0a6d475ccd8a15", 16 | "commands": [ 17 | { 18 | "id": "cmd_1", 19 | "script": "#!/bin/bash\necho -n $VAR", 20 | "env": {"VAR": "hello world"}, 21 | "cwd": "/tmp", 22 | "artifacts": ["junit.xml"] 23 | }, 24 | { 25 | "id": "cmd_2", 26 | "script": "#!/bin/bash\necho test", 27 | "cwd": "/tmp" 28 | } 29 | ], 30 | "repository": { 31 | "url": "git@github.com:dropbox/changes.git", 32 | "backend": { 33 | "id": "git" 34 | } 35 | }, 36 | "source": { 37 | "patch": { 38 | "id": "patch_1" 39 | }, 40 | "revision": { 41 | "sha": "aaaaaa" 42 | } 43 | }, 44 | "expectedSnapshot": { 45 | "id": "fed13008d3e94f6bb58e53237ad73f1d" 46 | }, 47 | "resourceLimits": { 48 | "cpus": 4, 49 | "memory": 8127 50 | }, 51 | "debugConfig": { 52 | "some_env": {"Name": "wat", "Val": 4} 53 | } 54 | } 55 | ` 56 | 57 | func TestGetConfig(t *testing.T) { 58 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 59 | w.Header().Set("Content-Type", "application/json") 60 | 61 | // XXX(dcramer): the input URL is the API base so these paths wouldn't include it 62 | if r.Method == "GET" { 63 | switch r.URL.Path { 64 | case "/jobsteps/549db9a70d4d4d258e0a6d475ccd8a15/": 65 | io.WriteString(w, jobStepResponse) 66 | } 67 | } 68 | })) 69 | defer ts.Close() 70 | 71 | server = ts.URL 72 | jobstepID := "549db9a70d4d4d258e0a6d475ccd8a15" 73 | 74 | config, err := GetConfig(jobstepID) 75 | assert.NoError(t, err) 76 | 77 | assert.Equal(t, config.Server, strings.TrimRight(ts.URL, "/")) 78 | 79 | assert.Equal(t, config.JobstepID, jobstepID) 80 | 81 | assert.Equal(t, config.Repository.Backend.ID, "git") 82 | 83 | assert.Equal(t, config.Repository.URL, "git@github.com:dropbox/changes.git") 84 | 85 | assert.Equal(t, config.Source.Patch.ID, "patch_1") 86 | 87 | assert.Equal(t, config.Source.Revision.Sha, "aaaaaa") 88 | 89 | assert.Equal(t, config.ExpectedSnapshot.ID, "fed13008d3e94f6bb58e53237ad73f1d") 90 | 91 | assert.Equal(t, len(config.Cmds), 2) 92 | 93 | assert.Equal(t, config.Cmds[0], ConfigCmd{ 94 | ID: "cmd_1", 95 | Script: "#!/bin/bash\necho -n $VAR", 96 | Cwd: "/tmp", 97 | Artifacts: []string{"junit.xml"}, 98 | Env: map[string]string{ 99 | "VAR": "hello world", 100 | }, 101 | }) 102 | 103 | assert.Equal(t, *config.ResourceLimits.Cpus, 4) 104 | assert.Equal(t, *config.ResourceLimits.Memory, 8127) 105 | 106 | type Pair struct { 107 | Name string 108 | Val int 109 | } 110 | var envthing Pair 111 | dok, derr := config.GetDebugConfig("some_env", &envthing) 112 | assert.True(t, dok) 113 | assert.NoError(t, derr) 114 | assert.Equal(t, envthing, Pair{"wat", 4}) 115 | } 116 | 117 | func TestParseResourceLimits(t *testing.T) { 118 | ptrto := func(p int) *int { 119 | return &p 120 | } 121 | cases := []struct { 122 | json string 123 | expected ResourceLimits 124 | }{ 125 | {`{"resourceLimits": {"cpus": 8}}`, ResourceLimits{Cpus: ptrto(8)}}, 126 | {`{"resourceLimits": {"memory": 8000}}`, ResourceLimits{Memory: ptrto(8000)}}, 127 | {`{"resourceLimits": {"cpus": 9, "memory": 8008}}`, 128 | ResourceLimits{Cpus: ptrto(9), Memory: ptrto(8008)}}, 129 | {`{"resourceLimits": {}}`, ResourceLimits{}}, 130 | {`{}`, ResourceLimits{}}, 131 | } 132 | for i, c := range cases { 133 | cfg, e := LoadConfig([]byte(c.json)) 134 | if e != nil { 135 | panic(e) 136 | } 137 | assert.Equal(t, c.expected, cfg.ResourceLimits, "case %v: %v", i, c.json) 138 | } 139 | 140 | } 141 | 142 | func TestDebugConfig(t *testing.T) { 143 | cases := []struct { 144 | json, key string 145 | dest interface{} 146 | Ok bool 147 | Error bool 148 | }{ 149 | // missing, no error, no ok 150 | {json: "{}", 151 | key: "absent", dest: new(string), 152 | Ok: false, Error: false}, 153 | // type mismatch, ok, but error 154 | {json: `{"debugConfig": {"foo": 44}}`, 155 | key: "foo", dest: new(string), 156 | Ok: true, Error: true}, 157 | // same as above, but proper type. 158 | {json: `{"debugConfig": {"foo": 44}}`, 159 | key: "foo", dest: new(int), 160 | Ok: true, Error: false}, 161 | } 162 | 163 | for _, c := range cases { 164 | cfg, e := LoadConfig([]byte(c.json)) 165 | if e != nil { 166 | panic(e) 167 | } 168 | ok, err := cfg.GetDebugConfig(c.key, c.dest) 169 | if ok != c.Ok { 170 | t.Errorf("For %q, extracting %q to %T, expected ok=%v, but was %v", 171 | c.json, c.key, c.dest, c.Ok, ok) 172 | } 173 | if (err != nil) != c.Error { 174 | msg := "expected" 175 | if !c.Error { 176 | msg = "didn't expect" 177 | } 178 | t.Errorf("For %q, extracting %q to %T, %s error, but got %#v", 179 | c.json, c.key, c.dest, msg, err) 180 | } 181 | } 182 | } 183 | 184 | func TestGetDebugConfigBool(t *testing.T) { 185 | cases := []struct { 186 | json, key string 187 | fallback, result bool 188 | }{ 189 | {json: "{}", 190 | key: "absent", fallback: true, 191 | result: true}, 192 | {json: `{"debugConfig": {"foo": "banana"}}`, 193 | key: "foo", fallback: true, 194 | result: true}, 195 | {json: `{"debugConfig": {"foo": "banana"}}`, 196 | key: "foo", fallback: false, 197 | result: false}, 198 | {json: `{"debugConfig": {"foo": true}}`, 199 | key: "foo", fallback: false, 200 | result: true}, 201 | {json: `{"debugConfig": {"foo": false}}`, 202 | key: "foo", fallback: true, 203 | result: false}, 204 | } 205 | 206 | for i, c := range cases { 207 | cfg, e := LoadConfig([]byte(c.json)) 208 | if e != nil { 209 | panic(e) 210 | } 211 | result := cfg.GetDebugConfigBool(c.key, c.fallback) 212 | if result != c.result { 213 | t.Errorf("%v: Expected %v, got %v (%q)", i, c.result, result, c.json) 214 | } 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /client/filelog/filelog.go: -------------------------------------------------------------------------------- 1 | package filelog 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "io/ioutil" 7 | "log" 8 | "os" 9 | "path/filepath" 10 | "sync" 11 | "time" 12 | 13 | "github.com/dropbox/changes-client/client/reporter" 14 | "github.com/dropbox/changes-client/common/sentry" 15 | ) 16 | 17 | const chunkSize = 40960 18 | 19 | // FileLog is an io.Writer that appends to a /tmp file and then periodically 20 | // tails these contents to a given reporter 21 | type FileLog struct { 22 | name string 23 | flushDelay time.Duration 24 | reporter reporter.Reporter 25 | reporterLock *sync.Mutex 26 | readFile *os.File 27 | writeFile *os.File 28 | shutdown chan struct{} 29 | shutdownComplete chan struct{} 30 | } 31 | 32 | // Create a new FileLog. Must use this rather than creating struct directly. 33 | func New(jobstepID, name string) (*FileLog, error) { 34 | return NewWithOptions(jobstepID, name, 4*time.Second, "") 35 | } 36 | 37 | // Same as New() but allows specifying how long to wait between flushing to the 38 | // reporter, and the root directory for the log file (which has a sensible default if empty). 39 | func NewWithOptions(jobstepID, name string, flushDelay time.Duration, rootDir string) (*FileLog, error) { 40 | if rootDir == "" { 41 | rootDir = filepath.Join(os.TempDir(), "changes-client") 42 | } 43 | if err := os.MkdirAll(rootDir, 0755); err != nil { 44 | return nil, err 45 | } 46 | directory, err := ioutil.TempDir(rootDir, jobstepID+"-") 47 | if err != nil { 48 | return nil, err 49 | } 50 | filename := filepath.Join(directory, fmt.Sprintf("%s.log", name)) 51 | f := &FileLog{name: name, flushDelay: flushDelay, reporterLock: &sync.Mutex{}, 52 | shutdown: make(chan struct{}), shutdownComplete: make(chan struct{})} 53 | f.writeFile, err = os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666) 54 | if err != nil { 55 | return nil, err 56 | } 57 | f.readFile, err = os.Open(filename) 58 | if err != nil { 59 | return nil, err 60 | } 61 | return f, nil 62 | } 63 | 64 | // Writes payload to temp file, which will eventually be sent to a reporter 65 | func (f *FileLog) Write(p []byte) (int, error) { 66 | return f.writeFile.Write(p) 67 | } 68 | 69 | // Begins reporting the contents of the log (as it is appended to) to the 70 | // given reporter. Before this, Write() calls only go into the temp file. 71 | // Should only be called once. 72 | func (f *FileLog) StartReporting(reporter reporter.Reporter) { 73 | f.reporterLock.Lock() 74 | defer f.reporterLock.Unlock() 75 | if f.reporter == nil { 76 | select { 77 | case <-f.shutdownComplete: 78 | // called after shutdown, just exit 79 | return 80 | default: 81 | } 82 | f.reporter = reporter 83 | go f.readFromFile() 84 | } else { 85 | panic("StartReporting called more than once--panicking") 86 | } 87 | } 88 | 89 | // Shutdown the log, blocking until any remaining contents are sent to the 90 | // reporter. Write() still goes to the temp file after this is called. 91 | func (f *FileLog) Shutdown() { 92 | f.reporterLock.Lock() 93 | defer f.reporterLock.Unlock() 94 | if f.reporter != nil { 95 | select { 96 | case f.shutdown <- struct{}{}: 97 | // block until readFromFile() finishes sending any remaining data 98 | <-f.shutdownComplete 99 | // in case shutdown has already completed 100 | case <-f.shutdownComplete: 101 | } 102 | } else { 103 | // normally readFromFile() closes f.shutdownComplete but if there's 104 | // no reporter/goroutine spawned, we close f.shutdownComplete 105 | // ourselves (if it's not closed already) to mark this filelog as 106 | // closed (e.g. to StartReporting()). 107 | select { 108 | case <-f.shutdownComplete: // already closed 109 | default: 110 | close(f.shutdownComplete) 111 | } 112 | } 113 | f.readFile.Close() 114 | // we don't close writeFile so that any remaining logs still at least go in 115 | // the temp file 116 | } 117 | 118 | // goroutine which periodically reads from the temp file and tails it to the reporter 119 | func (f *FileLog) readFromFile() { 120 | defer close(f.shutdownComplete) 121 | running := true 122 | for running { 123 | select { 124 | case <-time.After(f.flushDelay): 125 | case <-f.shutdown: 126 | running = false 127 | } 128 | var err error 129 | var send []byte 130 | b := make([]byte, chunkSize) 131 | for err == nil { 132 | var n int 133 | n, err = f.readFile.Read(b) 134 | send = append(send, b[:n]...) 135 | if len(send) >= chunkSize { 136 | if !f.reporter.PushLogChunk(f.name, send) { 137 | // push failed, wait for shutdown 138 | if running { 139 | <-f.shutdown 140 | } 141 | return 142 | } 143 | send = nil 144 | } 145 | } 146 | if len(send) > 0 { 147 | if !f.reporter.PushLogChunk(f.name, send) { 148 | // push failed, wait for shutdown 149 | if running { 150 | <-f.shutdown 151 | } 152 | return 153 | } 154 | } 155 | if err != io.EOF { 156 | log.Printf("Encountered error reading from %s log file: %s", f.name, err) 157 | sentry.Error(err, map[string]string{}) 158 | } 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /client/filelog/filelog_test.go: -------------------------------------------------------------------------------- 1 | package filelog 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path/filepath" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/require" 11 | 12 | "github.com/dropbox/changes-client/client/reporter" 13 | ) 14 | 15 | type logChunkReporter struct { 16 | logged chan string 17 | reporter.NoopReporter 18 | } 19 | 20 | func (lcr *logChunkReporter) PushLogChunk(name string, data []byte) bool { 21 | lcr.logged <- string(data) 22 | return true 23 | } 24 | 25 | type failReporter struct { 26 | reporter.NoopReporter 27 | } 28 | 29 | func (fr *failReporter) PushLogChunk(name string, data []byte) bool { 30 | return false 31 | } 32 | 33 | func newTempDir(t *testing.T) string { 34 | tempdir, err := ioutil.TempDir("", "filelog_test") 35 | require.NoError(t, err) 36 | return tempdir 37 | } 38 | 39 | func newLog(t *testing.T, flushDelay time.Duration, testDir string) *FileLog { 40 | log, err := NewWithOptions("1", "infralog", flushDelay, testDir) 41 | require.NoError(t, err) 42 | return log 43 | } 44 | 45 | func TestCreatesTempDir(t *testing.T) { 46 | tempdir := newTempDir(t) 47 | defer os.RemoveAll(tempdir) 48 | // will fail if this directory isn't created 49 | log := newLog(t, 0, filepath.Join(tempdir, "doesnt_exist")) 50 | log.Shutdown() 51 | } 52 | 53 | func TestWriteAfterReporter(t *testing.T) { 54 | tempdir := newTempDir(t) 55 | defer os.RemoveAll(tempdir) 56 | log := newLog(t, 0, tempdir) 57 | 58 | msg := "foo" 59 | reporter := &logChunkReporter{logged: make(chan string)} 60 | log.StartReporting(reporter) 61 | log.Write([]byte(msg)) 62 | require.Equal(t, msg, <-reporter.logged) 63 | log.Shutdown() 64 | } 65 | 66 | func TestWriteBeforeReporter(t *testing.T) { 67 | tempdir := newTempDir(t) 68 | defer os.RemoveAll(tempdir) 69 | log := newLog(t, 0, tempdir) 70 | 71 | bufferMsg1 := "foo" 72 | bufferMsg2 := "bar" 73 | noBufferMsg := "baz" 74 | reporter := &logChunkReporter{logged: make(chan string)} 75 | log.Write([]byte(bufferMsg1)) 76 | log.Write([]byte(bufferMsg2)) 77 | 78 | log.StartReporting(reporter) 79 | require.Equal(t, bufferMsg1+bufferMsg2, <-reporter.logged) 80 | 81 | log.Write([]byte(noBufferMsg)) 82 | require.Equal(t, noBufferMsg, <-reporter.logged) 83 | log.Shutdown() 84 | } 85 | 86 | func TestDelay(t *testing.T) { 87 | const delay = 20 * time.Millisecond 88 | tempdir := newTempDir(t) 89 | defer os.RemoveAll(tempdir) 90 | log := newLog(t, delay, tempdir) 91 | 92 | msg := "foo" 93 | reporter := &logChunkReporter{logged: make(chan string, 1)} 94 | log.StartReporting(reporter) 95 | log.Write([]byte(msg)) 96 | select { 97 | case <-reporter.logged: 98 | t.Fatalf("Should be waiting %s to log", delay) 99 | case <-time.After(delay / 2): 100 | } 101 | require.Equal(t, msg, <-reporter.logged) 102 | log.Shutdown() 103 | } 104 | 105 | func TestShutdownFlushes(t *testing.T) { 106 | tempdir := newTempDir(t) 107 | defer os.RemoveAll(tempdir) 108 | log := newLog(t, 1*time.Hour, tempdir) 109 | 110 | msg := "foo" 111 | reporter := &logChunkReporter{logged: make(chan string, 1)} 112 | log.StartReporting(reporter) 113 | log.Write([]byte(msg)) 114 | log.Shutdown() 115 | require.Equal(t, msg, <-reporter.logged) 116 | } 117 | 118 | func TestLateWrite(t *testing.T) { 119 | tempdir := newTempDir(t) 120 | defer os.RemoveAll(tempdir) 121 | log := newLog(t, 0, tempdir) 122 | 123 | log.Shutdown() 124 | _, err := log.Write([]byte("foo")) 125 | require.NoError(t, err) 126 | } 127 | 128 | func TestShutdownWaitsForSend(t *testing.T) { 129 | tempdir := newTempDir(t) 130 | defer os.RemoveAll(tempdir) 131 | log := newLog(t, 0, tempdir) 132 | rendez := make(chan bool) 133 | reporter := &logChunkReporter{logged: make(chan string)} 134 | log.StartReporting(reporter) 135 | 136 | didWait := false 137 | go func() { 138 | <-rendez 139 | _, err := log.Write([]byte("foo")) 140 | require.NoError(t, err) 141 | time.Sleep(30 * time.Millisecond) 142 | // if log.Shutdown() isn't waiting for the reporter to finish (and send to reporter.logged) 143 | // we should get a race detection and/or didWait will be false 144 | didWait = true 145 | <-reporter.logged 146 | }() 147 | rendez <- true 148 | time.Sleep(20 * time.Millisecond) 149 | log.Shutdown() 150 | require.True(t, didWait) 151 | } 152 | 153 | func TestStartReportingShutdownRace(t *testing.T) { 154 | tempdir := newTempDir(t) 155 | defer os.RemoveAll(tempdir) 156 | log := newLog(t, 0, tempdir) 157 | reporter := &logChunkReporter{logged: make(chan string, 1)} 158 | 159 | go log.StartReporting(reporter) 160 | log.Shutdown() 161 | } 162 | 163 | func TestStartReportingAfterShutdown(t *testing.T) { 164 | tempdir := newTempDir(t) 165 | defer os.RemoveAll(tempdir) 166 | log := newLog(t, 0, tempdir) 167 | reporter := &logChunkReporter{logged: make(chan string, 1)} 168 | 169 | log.Shutdown() 170 | log.Shutdown() 171 | log.StartReporting(reporter) 172 | require.Nil(t, log.reporter) 173 | } 174 | 175 | func TestShutdownRace(t *testing.T) { 176 | tempdir := newTempDir(t) 177 | defer os.RemoveAll(tempdir) 178 | log := newLog(t, 0, tempdir) 179 | reporter := &logChunkReporter{logged: make(chan string, 1)} 180 | log.StartReporting(reporter) 181 | 182 | go log.Shutdown() 183 | log.Shutdown() 184 | } 185 | 186 | func TestFailedReporter(t *testing.T) { 187 | tempdir := newTempDir(t) 188 | defer os.RemoveAll(tempdir) 189 | log := newLog(t, 0, tempdir) 190 | log.Write([]byte("foo")) 191 | reporter := &failReporter{} 192 | log.StartReporting(reporter) 193 | log.Shutdown() 194 | } 195 | 196 | func TestNoConflict(t *testing.T) { 197 | tempdir := newTempDir(t) 198 | defer os.RemoveAll(tempdir) 199 | log1, err1 := NewWithOptions("1", "infralog", 0, tempdir) 200 | require.NoError(t, err1) 201 | log1.Shutdown() 202 | log2, err2 := NewWithOptions("1", "infralog", 0, tempdir) 203 | require.NoError(t, err2) 204 | log2.Shutdown() 205 | } 206 | -------------------------------------------------------------------------------- /client/log.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "io" 9 | "log" 10 | "time" 11 | ) 12 | 13 | // After this many bytes are buffered, the buffered log data will be flushed. 14 | var byteFlushThreshold = flag.Int("log_chunk_size", 40960, "Size of log chunks to send to http server") 15 | 16 | // After this much time has elapsed, buffered log data will be flushed. 17 | const timeFlushThreshold = 4 * time.Second 18 | 19 | type Log struct { 20 | chunkchan chan []byte 21 | closed chan struct{} 22 | } 23 | 24 | type LogLine struct { 25 | line []byte 26 | err error 27 | } 28 | 29 | func NewLog() *Log { 30 | return &Log{ 31 | chunkchan: make(chan []byte), 32 | closed: make(chan struct{}), 33 | } 34 | } 35 | 36 | func (l *Log) Close() { 37 | close(l.closed) 38 | } 39 | 40 | // Sends the payload to the log, blocking until it is handled, and 41 | // returning an error only if it can't be (such as after 42 | // the log is closed). 43 | func (l *Log) write(payload []byte) error { 44 | log.Print(string(payload)) 45 | select { 46 | case <-l.closed: 47 | // TODO: Too noisy? 48 | log.Printf("WRITE AFTER CLOSE: %s", payload) 49 | return errors.New("Write after close") 50 | case l.chunkchan <- payload: 51 | return nil 52 | } 53 | } 54 | 55 | // Writes the payload (with a newline appended) to the console, and 56 | // uses Write to send it to the log. 57 | func (l *Log) Writeln(payload string) error { 58 | e := l.write([]byte(payload + "\n")) 59 | return e 60 | } 61 | 62 | // Repeatedly calls GetChunk() until Close is called. 63 | // Mostly useful for tests. 64 | func (l *Log) Drain() { 65 | for _, ok := l.GetChunk(); ok; _, ok = l.GetChunk() { 66 | } 67 | } 68 | 69 | // Returns the next log chunk, or a nil slice and false if 70 | // Close was called. 71 | func (l *Log) GetChunk() ([]byte, bool) { 72 | select { 73 | case ch := <-l.chunkchan: 74 | return ch, true 75 | case <-l.closed: 76 | return nil, false 77 | } 78 | } 79 | 80 | // Printf calls l.Writeln to print to the log. Arguments are handled in 81 | // the manner of fmt.Printf. 82 | // The output is guaranteed to be newline-terminated. 83 | func (l *Log) Printf(format string, v ...interface{}) error { 84 | return l.Writeln(fmt.Sprintf(format, v...)) 85 | } 86 | 87 | func (l *Log) WriteStream(pipe io.Reader) { 88 | lines := newLogLineReader(pipe) 89 | 90 | finished := false 91 | for !finished { 92 | var payload []byte 93 | timeLimit := time.After(timeFlushThreshold) 94 | 95 | for len(payload) < *byteFlushThreshold { 96 | var logLine *LogLine 97 | timeLimitExceeded := false 98 | 99 | select { 100 | case logLine = <-lines: 101 | case <-timeLimit: 102 | timeLimitExceeded = true 103 | } 104 | 105 | if timeLimitExceeded { 106 | break 107 | } 108 | 109 | payload = append(payload, logLine.line...) 110 | if logLine.err == io.EOF { 111 | finished = true 112 | break 113 | } 114 | 115 | if logLine.err != nil { 116 | finished = true 117 | payload = append(payload, logLine.err.Error()...) 118 | break 119 | } 120 | } 121 | 122 | if len(payload) > 0 { 123 | l.write(payload) 124 | } 125 | } 126 | } 127 | 128 | func newLogLineReader(pipe io.Reader) <-chan *LogLine { 129 | r := bufio.NewReader(pipe) 130 | ch := make(chan *LogLine) 131 | 132 | go func() { 133 | for { 134 | line, err := r.ReadBytes('\n') 135 | l := &LogLine{line: line, err: err} 136 | ch <- l 137 | 138 | if err != nil { 139 | return 140 | } 141 | } 142 | }() 143 | 144 | return ch 145 | } 146 | -------------------------------------------------------------------------------- /client/log_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestWriteStream(t *testing.T) { 11 | flag.Set("log_chunk_size", "3") 12 | 13 | in := []byte("aaa\naaa\naaa\n") 14 | log := NewLog() 15 | 16 | go func() { 17 | log.WriteStream(bytes.NewReader(in)) 18 | log.Close() 19 | }() 20 | 21 | cnt := 0 22 | for _, ok := log.GetChunk(); ok; _, ok = log.GetChunk() { 23 | cnt++ 24 | } 25 | 26 | if cnt != 3 { 27 | t.Fail() 28 | } 29 | } 30 | 31 | func TestWriteln(t *testing.T) { 32 | in := "aaa\naaa\naaa\n" 33 | log := NewLog() 34 | 35 | go func() { 36 | log.Writeln(in) 37 | log.Close() 38 | }() 39 | 40 | var out [][]byte 41 | for c, ok := log.GetChunk(); ok; c, ok = log.GetChunk() { 42 | out = append(out, c) 43 | } 44 | 45 | if len(out) != 1 { 46 | t.Fail() 47 | } 48 | 49 | if bytes.Equal(out[0], []byte(in)) { 50 | t.Fail() 51 | } 52 | } 53 | 54 | func drain(l *Log) string { 55 | var out []byte 56 | for c, ok := l.GetChunk(); ok; c, ok = l.GetChunk() { 57 | out = append(out, c...) 58 | } 59 | return string(out) 60 | } 61 | 62 | func TestPrintf(t *testing.T) { 63 | log := NewLog() 64 | 65 | const expected = "Hello 4 Worlds!\n" 66 | go func() { 67 | log.Printf("Hello %d %s!", 4, "Worlds") 68 | log.Close() 69 | }() 70 | 71 | result := drain(log) 72 | if result != expected { 73 | t.Fatalf("Expected %q, got %q", expected, result) 74 | } 75 | } 76 | 77 | func TestLateSend(t *testing.T) { 78 | log := NewLog() 79 | log.Close() 80 | e := log.Writeln("Hello!") 81 | if e == nil { 82 | t.Fatalf("Expected error on Writeln") 83 | } 84 | } 85 | 86 | func TestCloseUnblocks(t *testing.T) { 87 | log := NewLog() 88 | rendez := make(chan bool) 89 | go func() { 90 | <-rendez 91 | t.Log(log.Writeln("Late")) 92 | rendez <- true 93 | }() 94 | // Racy validation, but make sure that the writer can run before we close 95 | rendez <- true 96 | // Sleep here, because we'd like the Writeln call in the other goroutine to be 97 | // blocked when we call Close. We can't easily guarantee it, but with rendez and 98 | // the sleep, it's really likely. 99 | time.Sleep(20 * time.Millisecond) 100 | log.Close() 101 | <-rendez 102 | } 103 | -------------------------------------------------------------------------------- /client/metrics.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "log" 5 | "time" 6 | ) 7 | 8 | // type for recording metrics we might want to report (e.g. to Changes) 9 | type Metrics map[string]float64 10 | 11 | // convenience object for timing functions/sections. 12 | // See StartTimer() and Record() for usage. 13 | type Timer struct { 14 | startTime time.Time 15 | m Metrics 16 | } 17 | 18 | // set the duration for the given key 19 | func (m Metrics) SetDuration(key string, value time.Duration) { 20 | log.Printf("==> %q = %s", key, value) 21 | m[key] = value.Seconds() 22 | } 23 | 24 | // calculate duration since `start` and use that as duration for given key 25 | func (m Metrics) SetDurationSince(key string, start time.Time) time.Duration { 26 | dur := time.Since(start) 27 | m.SetDuration(key, dur) 28 | return dur 29 | } 30 | 31 | // return true if there are no actual metrics recorded 32 | func (m Metrics) Empty() bool { 33 | return len(m) == 0 34 | } 35 | 36 | // create and return a timer starting now 37 | func (m Metrics) StartTimer() Timer { 38 | return Timer{time.Now(), m} 39 | } 40 | 41 | // record a metric with the given key of the duration since this timer was created 42 | func (t Timer) Record(key string) time.Duration { 43 | return t.m.SetDurationSince(key, t.startTime) 44 | } 45 | -------------------------------------------------------------------------------- /client/reporter/defaultreporter.go: -------------------------------------------------------------------------------- 1 | package reporter 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "io" 9 | "log" 10 | "mime/multipart" 11 | "net/http" 12 | "os" 13 | "path/filepath" 14 | "time" 15 | 16 | "github.com/dropbox/changes-client/client" 17 | "github.com/dropbox/changes-client/common/sentry" 18 | ) 19 | 20 | var ( 21 | // The size of the payload queue. Once it reaches this size, 22 | // all writes (and thus all of the exported functions from this 23 | // module) will become blocking. 24 | maxPendingReports = 64 25 | 26 | // Maximum number of times we retry a payload until we give up 27 | // and panic. 28 | // 29 | // XXX the panic occurs during the publish goroutine, which might 30 | // not be well characterized for properly handling the error. 31 | numPublishRetries = 8 32 | 33 | // How long we wait before retrying a payload. We always wait 34 | // this amount of time so the name here is a bit of a misnomer. 35 | backoffTimeMs = 1000 36 | ) 37 | 38 | // With each reporter there is a goroutine associated with it that 39 | // listens to PublishChannel and shutdownChannel, publishing all 40 | // data from PublishChannel to the publishUri for the jobstepID 41 | // associated with the current build. Sending any information 42 | // to shutdownChannel causes the goroutine to stop. 43 | // 44 | // Notably this means that all of the methods in this module 45 | // are asynchronous and as a result there is a delay between 46 | // them successfully finishing and Changes actually acknowledging 47 | // them at the endpoint. More importantly, however, because the 48 | // requests are sent in a separate goroutine, the methods here may 49 | // succeed even when the endpoing requests fail. 50 | type DefaultReporter struct { 51 | // Note that this is not safe to send to after Shutdown() is called. 52 | PublishChannel chan ReportPayload 53 | jobstepID string 54 | publishUri string 55 | shutdownChannel chan struct{} 56 | } 57 | 58 | // All data that goes to the server is encompassed in a payload. 59 | type ReportPayload struct { 60 | Path string 61 | // A map of fields to their values. Note that the date field 62 | // will be automatically set when the data is sent. 63 | Data map[string]string 64 | Filename string 65 | } 66 | 67 | // Utility function for posting a request to a uri. The parameters 68 | // here, which more or less correspond to ReportPayload.Data, 69 | // are serialized to form the request body. The body is encoded 70 | // as a MIME multipart (see RFC 2338). 71 | // 72 | // The file is also added a as field in the request body. 73 | func httpPost(uri string, params map[string]string, file string) (resp *http.Response, err error) { 74 | body := &bytes.Buffer{} 75 | writer := multipart.NewWriter(body) 76 | 77 | for key, val := range params { 78 | err = writer.WriteField(key, val) 79 | if err != nil { 80 | log.Printf("[reporter] Couldn't write field %s", key) 81 | return nil, err 82 | } 83 | } 84 | 85 | if len(file) > 0 { 86 | handle, err := os.Open(file) 87 | if err != nil { 88 | return nil, err 89 | } 90 | 91 | if _, ok := params["name"]; !ok { 92 | err = writer.WriteField("name", filepath.Base(file)) 93 | if err != nil { 94 | return nil, err 95 | } 96 | } 97 | 98 | fileField, err := writer.CreateFormFile("file", filepath.Base(file)) 99 | if err != nil { 100 | return nil, err 101 | } 102 | 103 | _, err = io.Copy(fileField, handle) 104 | if err != nil { 105 | return nil, err 106 | } 107 | } 108 | 109 | _ = writer.Close() 110 | 111 | resp, err = http.Post(uri, writer.FormDataContentType(), body) 112 | 113 | if err != nil { 114 | return nil, err 115 | } 116 | 117 | // Close the Body channel immediately as we don't use it 118 | // and this loop can stay open for an extremely long period 119 | // of time 120 | resp.Body.Close() 121 | 122 | return resp, nil 123 | } 124 | 125 | // Utility method for sending a payload. This wraps httpPost in a framework 126 | // nicer for the Reporter itself, as it turns the ReportPayload into its 127 | // associated params (which corresponds to its data). We also attempt 128 | // httpPost multiple times in order to account for flakiness in the 129 | // network connection. This function is synchronous. 130 | func (r *DefaultReporter) SendPayload(rp ReportPayload) error { 131 | var ( 132 | resp *http.Response 133 | err error 134 | status string 135 | ) 136 | 137 | path := r.publishUri + rp.Path 138 | if rp.Data == nil { 139 | rp.Data = make(map[string]string) 140 | } 141 | 142 | rp.Data["date"] = time.Now().UTC().Format("2006-01-02T15:04:05.0Z") 143 | for tryCnt := 1; tryCnt <= numPublishRetries; tryCnt++ { 144 | log.Printf("[reporter] POST %s try: %d", path, tryCnt) 145 | resp, err = httpPost(path, rp.Data, rp.Filename) 146 | 147 | if resp != nil { 148 | status = resp.Status 149 | } else { 150 | status = "-1" 151 | } 152 | 153 | if resp != nil && resp.StatusCode/100 == 2 { 154 | break 155 | } 156 | 157 | var errmsg string 158 | if err != nil { 159 | errmsg = err.Error() 160 | } else { 161 | // If there wasn't an IO error, use the response body as the error message. 162 | var bodyData bytes.Buffer 163 | if _, e := bodyData.ReadFrom(resp.Body); e != nil { 164 | log.Printf("[reporter] Error reading POST %s response body: %s", path, e) 165 | } 166 | errmsg = bodyData.String() 167 | if len(errmsg) > 140 { 168 | // Keep it a reasonable length. 169 | errmsg = errmsg[:137] + "..." 170 | } 171 | } 172 | log.Printf("[reporter] POST %s failed, try: %d, resp: %s, err: %s", 173 | path, tryCnt, status, errmsg) 174 | 175 | /* We are unable to publish to the endpoint. 176 | * Fail fast and let the above layers handle the outage */ 177 | if tryCnt == numPublishRetries { 178 | return fmt.Errorf("reporter couldn't connect to publish endpoint %s; %s", path, errmsg) 179 | } 180 | log.Printf("[reporter] Sleep for %d ms", backoffTimeMs) 181 | time.Sleep(time.Duration(backoffTimeMs) * time.Millisecond) 182 | } 183 | return nil 184 | } 185 | 186 | // Continually listens to the publish channel and sends the payloads 187 | func transportSend(r *DefaultReporter) { 188 | for rp := range r.PublishChannel { 189 | r.SendPayload(rp) 190 | } 191 | r.shutdownChannel <- struct{}{} 192 | } 193 | 194 | func (r *DefaultReporter) JobstepAPIPath() string { 195 | return "/jobsteps/" + r.jobstepID + "/" 196 | } 197 | 198 | func (r *DefaultReporter) Init(c *client.Config) { 199 | log.Printf("[reporter] Construct reporter with publish uri: %s", c.Server) 200 | r.publishUri = c.Server 201 | r.shutdownChannel = make(chan struct{}) 202 | r.jobstepID = c.JobstepID 203 | r.PublishChannel = make(chan ReportPayload, maxPendingReports) 204 | // Initialize the goroutine that actually sends the requests. 205 | go transportSend(r) 206 | } 207 | 208 | // Close the publish and shutdown channels, which causes the inner goroutines to 209 | // terminate, thus cleaning up what is created by Init. 210 | func (r *DefaultReporter) Shutdown() { 211 | close(r.PublishChannel) 212 | <-r.shutdownChannel 213 | close(r.shutdownChannel) 214 | log.Print("[reporter] Shutdown complete") 215 | } 216 | 217 | func (r *DefaultReporter) PushSnapshotImageStatus(imgID string, status string) error { 218 | return r.SendPayload(ReportPayload{ 219 | Path: "/snapshotimages/" + imgID + "/", 220 | Data: map[string]string{ 221 | "status": status, 222 | }, 223 | }) 224 | } 225 | 226 | func (r *DefaultReporter) ReportMetrics(metrics client.Metrics) { 227 | if metrics.Empty() { 228 | return 229 | } 230 | data, err := json.Marshal(metrics) 231 | if err != nil { 232 | log.Printf("[reporter] Error encoding metrics: %s", err) 233 | sentry.Error(err, map[string]string{}) 234 | return 235 | } 236 | r.PublishChannel <- ReportPayload{Path: r.JobstepAPIPath(), Data: map[string]string{ 237 | "metrics": string(data), 238 | }} 239 | } 240 | 241 | func init() { 242 | flag.IntVar(&maxPendingReports, "max_pending_reports", 64, "Backlog size") 243 | flag.IntVar(&numPublishRetries, "num_publish_retries", 8, 244 | "Number of times to retry") 245 | flag.IntVar(&backoffTimeMs, "backoff_time_ms", 1000, 246 | "Time to wait between two consecutive retries") 247 | } 248 | -------------------------------------------------------------------------------- /client/reporter/defaultreporter_test.go: -------------------------------------------------------------------------------- 1 | package reporter 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/dropbox/changes-client/client" 10 | ) 11 | 12 | func TestSnapshotImageArtifact(t *testing.T) { 13 | const ( 14 | imageID = "testimage" 15 | status = "Active" 16 | ) 17 | err := fmt.Errorf("No request made") 18 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 19 | expectedPath := "/snapshotimages/" + imageID + "/" 20 | if r.URL.Path != expectedPath { 21 | err = fmt.Errorf("Incorrect path, expected %q got %q", expectedPath, r.URL.Path) 22 | } else if r.FormValue("status") != status { 23 | err = fmt.Errorf("Incorrect status, expected %q got %q", status, r.FormValue("status")) 24 | } else { 25 | err = nil 26 | } 27 | })) 28 | defer server.Close() 29 | 30 | r := DefaultReporter{} 31 | r.Init(&client.Config{Server: server.URL}) 32 | r.PushSnapshotImageStatus(imageID, status) 33 | 34 | // this won't return until the snapshot image status has been pushed 35 | // (or we've given up from too many retries) 36 | r.Shutdown() 37 | 38 | if err != nil { 39 | t.Fatal(err) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /client/reporter/noopreporter.go: -------------------------------------------------------------------------------- 1 | package reporter 2 | 3 | import ( 4 | "github.com/dropbox/changes-client/client" 5 | "github.com/dropbox/changes-client/client/adapter" 6 | ) 7 | 8 | type NoopReporter struct{} 9 | 10 | func (noop *NoopReporter) Init(_ *client.Config) {} 11 | func (noop *NoopReporter) PublishArtifacts(_ client.ConfigCmd, _ adapter.Adapter, _ *client.Log) error { 12 | return nil 13 | } 14 | func (noop *NoopReporter) PushCommandOutput(_, _ string, _ int, _ []byte) {} 15 | func (noop *NoopReporter) PushCommandStatus(_, _ string, _ int) {} 16 | func (noop *NoopReporter) PushJobstepStatus(_, _ string) {} 17 | func (noop *NoopReporter) PushLogChunk(_ string, _ []byte) bool { return true } 18 | func (noop *NoopReporter) PushSnapshotImageStatus(_, _ string) error { return nil } 19 | func (noop *NoopReporter) ReportMetrics(_ client.Metrics) {} 20 | func (noop *NoopReporter) Shutdown() {} 21 | 22 | var _ Reporter = (*NoopReporter)(nil) 23 | -------------------------------------------------------------------------------- /client/reporter/registry.go: -------------------------------------------------------------------------------- 1 | package reporter 2 | 3 | import "fmt" 4 | 5 | var registry = make(Registry) 6 | 7 | type Registry map[string]func() Reporter 8 | 9 | func (r Registry) register(name string, ctr func() Reporter) error { 10 | r[name] = ctr 11 | return nil 12 | } 13 | 14 | func (r Registry) names() []string { 15 | var res []string 16 | for k := range r { 17 | res = append(res, k) 18 | } 19 | return res 20 | } 21 | 22 | func Register(name string, ctr func() Reporter) error { 23 | return registry.register(name, ctr) 24 | } 25 | 26 | // Names returns the names of all registered Reporters. 27 | func Names() []string { 28 | return registry.names() 29 | } 30 | 31 | func Create(name string) (Reporter, error) { 32 | ctr, present := registry[name] 33 | if present { 34 | return ctr(), nil 35 | } 36 | return nil, fmt.Errorf("Reporter not found: %s", name) 37 | } 38 | -------------------------------------------------------------------------------- /client/reporter/reporter.go: -------------------------------------------------------------------------------- 1 | package reporter 2 | 3 | import ( 4 | "github.com/dropbox/changes-client/client" 5 | "github.com/dropbox/changes-client/client/adapter" 6 | ) 7 | 8 | // An abstract way of communicating things to Changes. 9 | type Reporter interface { 10 | Init(config *client.Config) 11 | Shutdown() 12 | 13 | // This function is not required to be synchronous, but it must do 14 | // something that will cause the artifacts to be published in the future. 15 | // In the case of Jenkins reporter builds, it moves the artifacts to a 16 | // location known by Jenkins, and considers these artifacts to be reported 17 | // as it relies on Jenkins to later pull those artifacts and send them to 18 | // Changes. Mesos sends the artifacts in a separate goroutine, so neither 19 | // reporter immediately publishes the artifacts. 20 | // 21 | // Jenkins and Mesos also take different approaches to detecting artifacts, 22 | // so this function is responsible for this as well. For Mesos builds, each 23 | // command lists the artifacts it is expected to return, but Jenkins builds 24 | // are expected to return any artifact within a folder. Since the detection 25 | // is different for each reporter and each detection relies on the adapter 26 | // to figure out where to actually look for files, a reference to the adapter 27 | // is required here. 28 | PublishArtifacts(cmd client.ConfigCmd, adapter adapter.Adapter, clientLog *client.Log) error 29 | 30 | PushSnapshotImageStatus(imgID string, status string) error 31 | 32 | // These are optional, implement empty functions to just not provide 33 | // this functionality as a reporter (ie, Jenkins). However it should 34 | // be noted that if no other machinery provides this functionality 35 | // (as is the case for Mesos builds) then these are absolutely required 36 | // as without them Changes will never receive updates. 37 | PushCommandStatus(cID string, status string, retCode int) 38 | PushCommandOutput(cID string, status string, retCode int, output []byte) 39 | PushJobstepStatus(status string, result string) 40 | // returns false if pushing the log chunk failed 41 | PushLogChunk(source string, payload []byte) bool 42 | 43 | // Report any collected metrics. This is optional, but can be used to e.g. 44 | // send metrics to Changes. 45 | ReportMetrics(metrics client.Metrics) 46 | } 47 | -------------------------------------------------------------------------------- /cmd/blacklist-remove/blacklist-remove.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "log" 6 | 7 | "github.com/dropbox/changes-client/common/blacklist" 8 | ) 9 | 10 | func main() { 11 | flag.Parse() 12 | args := flag.Args() 13 | if len(args) != 1 { 14 | log.Fatalf("Must provide a yaml file to parse") 15 | } 16 | 17 | yamlFile := args[0] 18 | if err := blacklist.RemoveBlacklistedFiles(".", yamlFile); err != nil { 19 | // will exit non-zero and thus lead to an infra fail 20 | log.Fatalf("[blacklist] Error removing blacklisted files: %s", err) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /common/atomicflag/flag.go: -------------------------------------------------------------------------------- 1 | package atomicflag 2 | 3 | import "sync/atomic" 4 | 5 | type AtomicFlag struct { 6 | val uint32 7 | } 8 | 9 | func (af *AtomicFlag) Set(val bool) { 10 | var intToSet uint32 = 0 11 | if val { 12 | intToSet = 1 13 | } 14 | atomic.StoreUint32(&af.val, intToSet) 15 | } 16 | 17 | func (af *AtomicFlag) Get() bool { 18 | return atomic.LoadUint32(&af.val) != 0 19 | } 20 | -------------------------------------------------------------------------------- /common/atomicflag/flag_test.go: -------------------------------------------------------------------------------- 1 | package atomicflag 2 | 3 | import "testing" 4 | 5 | func TestInitialValue(t *testing.T) { 6 | x := &AtomicFlag{} 7 | 8 | if x.Get() { 9 | t.Error("Initial value of flag should be false") 10 | } 11 | } 12 | 13 | func TestSetFalse(t *testing.T) { 14 | x := &AtomicFlag{} 15 | 16 | x.Set(false) 17 | 18 | if x.Get() { 19 | t.Error("Value after setting false should be false") 20 | } 21 | } 22 | 23 | func TestSetTrue(t *testing.T) { 24 | x := &AtomicFlag{} 25 | 26 | x.Set(true) 27 | 28 | if !x.Get() { 29 | t.Error("Value after setting false should be true") 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /common/blacklist/blacklist.go: -------------------------------------------------------------------------------- 1 | package blacklist 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path/filepath" 7 | "time" 8 | 9 | "gopkg.in/yaml.v2" 10 | 11 | "github.com/dropbox/changes-client/common/scopedlogger" 12 | "strings" 13 | ) 14 | 15 | type changesLocalConfig struct { 16 | RemoveBlacklistFiles bool `yaml:"build.remove-blacklisted-files"` 17 | FileBlacklist []string `yaml:"build.file-blacklist"` 18 | } 19 | 20 | func parseYaml(filename string) (changesLocalConfig, error) { 21 | var config changesLocalConfig 22 | data, err := ioutil.ReadFile(filename) 23 | if err != nil { 24 | return config, err 25 | } 26 | err = yaml.Unmarshal(data, &config) 27 | return config, err 28 | } 29 | 30 | var blacklistLog = scopedlogger.ScopedLogger{Scope: "blacklist"} 31 | 32 | type blentry struct { 33 | // The full blacklist pattern. 34 | full string 35 | // The prefix of the blacklist pattern containing no meta-characters. 36 | plainPrefix string 37 | } 38 | 39 | type blacklistMatcher struct { 40 | entries []blentry 41 | } 42 | 43 | func newMatcher(entries []string) blacklistMatcher { 44 | blentries := make([]blentry, 0, len(entries)) 45 | for _, ent := range entries { 46 | const metaChars = `*[?\` 47 | plainEnd := strings.IndexAny(ent, metaChars) 48 | if plainEnd == -1 { 49 | plainEnd = len(ent) 50 | } 51 | blentries = append(blentries, blentry{plainPrefix: ent[:plainEnd], full: ent}) 52 | } 53 | return blacklistMatcher{blentries} 54 | } 55 | 56 | func (blm blacklistMatcher) Match(relpath string) (bool, error) { 57 | for _, pattern := range blm.entries { 58 | // Fast-path for the common case (/foo/bar/*); if there isn't 59 | // a prefix match, fnMatch can't match. 60 | if !strings.HasPrefix(relpath, pattern.plainPrefix) { 61 | continue 62 | } 63 | if m, e := fnMatch(pattern.full, relpath); e != nil || m { 64 | return m, e 65 | } 66 | } 67 | return false, nil 68 | } 69 | 70 | // RemoveBlacklistedFiles parses the given yaml file and removes any blacklisted files in the yaml file from rootDir 71 | func RemoveBlacklistedFiles(rootDir string, yamlFile string) error { 72 | if _, err := os.Stat(yamlFile); os.IsNotExist(err) { 73 | // non-existent yaml file isn't an error 74 | blacklistLog.Printf("Project config doesn't exist. Not removing any files.") 75 | return nil 76 | } 77 | 78 | config, err := parseYaml(yamlFile) 79 | if err != nil { 80 | return err 81 | } 82 | 83 | if !config.RemoveBlacklistFiles { 84 | blacklistLog.Printf("Build not configured to remove blacklisted files") 85 | return nil 86 | } 87 | 88 | if len(config.FileBlacklist) == 0 { 89 | blacklistLog.Printf("No blacklist entries.") 90 | return nil 91 | } 92 | 93 | blmatcher := newMatcher(config.FileBlacklist) 94 | walkStart := time.Now() 95 | total := 0 96 | var matches []string 97 | visit := func(path string, f os.FileInfo, err error) error { 98 | // error visiting this path 99 | if err != nil { 100 | blacklistLog.Printf("Error walking path %s: %s", path, err) 101 | return err 102 | } 103 | relpath, err := filepath.Rel(rootDir, path) 104 | if err != nil { 105 | return err 106 | } 107 | total++ 108 | if m, e := blmatcher.Match(relpath); e != nil { 109 | return e 110 | } else if m { 111 | matches = append(matches, path) 112 | } 113 | return nil 114 | } 115 | 116 | if err := filepath.Walk(rootDir, visit); err != nil { 117 | return err 118 | } 119 | 120 | blacklistLog.Printf("Examined %v files in %s", total, time.Since(walkStart)) 121 | blacklistLog.Printf("Removing %d files", len(matches)) 122 | for _, match := range matches { 123 | if fi, e := os.Stat(match); e != nil { 124 | // don't error if file doesn't exist (we might've e.g. removed it's underlying directory) 125 | if !os.IsNotExist(e) { 126 | return e 127 | } 128 | } else if fi.IsDir() { 129 | if e := os.RemoveAll(match); e != nil { 130 | return e 131 | } 132 | } else { 133 | if e := os.Remove(match); e != nil { 134 | return e 135 | } 136 | } 137 | } 138 | blacklistLog.Printf("Success") 139 | return nil 140 | } 141 | -------------------------------------------------------------------------------- /common/blacklist/blacklist_test.go: -------------------------------------------------------------------------------- 1 | package blacklist 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "testing" 10 | ) 11 | 12 | func sameslice(s1 []string, s2 []string) bool { 13 | if len(s1) != len(s2) { 14 | return false 15 | } 16 | for idx, elem := range s1 { 17 | if elem != s2[idx] { 18 | return false 19 | } 20 | } 21 | return true 22 | } 23 | 24 | func makeyaml(path string, remove bool) error { 25 | template := ` 26 | build.remove-blacklisted-files: %t 27 | build.file-blacklist: 28 | - dir1/* 29 | - dir2/dir3/* 30 | - dir2/other.txt 31 | - dir2/*/baz.py 32 | - "[!a-z].txt" 33 | - toplevelfile.txt 34 | - nonexistent.txt 35 | ` 36 | contents := fmt.Sprintf(template, remove) 37 | return ioutil.WriteFile(path, []byte(contents), 0777) 38 | } 39 | 40 | func newfile(root, name string) error { 41 | path := filepath.Join(root, name) 42 | // we use trailing slash to signify a directory 43 | if strings.HasSuffix(name, "/") { 44 | if e := os.MkdirAll(path, 0777); e != nil { 45 | return e 46 | } 47 | return nil 48 | } 49 | if filepath.Dir(path) != root { 50 | if e := os.MkdirAll(filepath.Dir(path), 0777); e != nil { 51 | return e 52 | } 53 | } 54 | return ioutil.WriteFile(path, []byte("test"), 0777) 55 | } 56 | 57 | func newfiles(root string, names ...string) error { 58 | for _, n := range names { 59 | if e := newfile(root, n); e != nil { 60 | return e 61 | } 62 | } 63 | return nil 64 | } 65 | 66 | func TestParseYaml(t *testing.T) { 67 | dirname, err := ioutil.TempDir("", "parseyaml") 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | defer os.RemoveAll(dirname) 72 | yamlfile := filepath.Join(dirname, "foo.yaml") 73 | if err = makeyaml(yamlfile, true); err != nil { 74 | t.Fatal(err) 75 | } 76 | config, err := parseYaml(yamlfile) 77 | if err != nil { 78 | t.Fatal(err) 79 | } 80 | if !config.RemoveBlacklistFiles { 81 | t.Error("Config incorrectly read RemoveBlacklistFiles as false") 82 | } 83 | 84 | expected := []string{"dir1/*", "dir2/dir3/*", "dir2/other.txt", "dir2/*/baz.py", "[!a-z].txt", "toplevelfile.txt", "nonexistent.txt"} 85 | if !sameslice(config.FileBlacklist, expected) { 86 | t.Errorf("Config incorrectly parsed blacklisted files. Actual: %v, Expected: %v", config.FileBlacklist, expected) 87 | } 88 | } 89 | 90 | func removeBlacklistFilesHelper(t *testing.T, tempDirName string, remove bool) { 91 | dirname, err := ioutil.TempDir("", tempDirName) 92 | if err != nil { 93 | t.Fatal(err) 94 | } 95 | defer os.RemoveAll(dirname) 96 | cwd, err := os.Getwd() 97 | if err != nil { 98 | t.Fatal(err) 99 | } 100 | os.Chdir(dirname) 101 | defer os.Chdir(cwd) 102 | yamlfile := filepath.Join(dirname, "foo.yaml") 103 | if err = makeyaml(yamlfile, remove); err != nil { 104 | t.Fatal(err) 105 | } 106 | 107 | dontMatchBlacklist := []string{"dir1/", "dir2/", "dir2/dir3/", "dir2/foo.txt", "foo/toplevelfile.txt", "a.txt"} 108 | matchBlacklist := []string{"dir1/foo.txt", "dir1/other/", "dir1/other/bar.txt", "dir2/dir3/baz.yaml", "dir2/other.txt", "dir2/foo/bar/baz.py", "0.txt", "toplevelfile.txt"} 109 | 110 | var shouldExist []string 111 | var shouldntExist []string 112 | if remove { 113 | shouldExist = dontMatchBlacklist 114 | shouldntExist = matchBlacklist 115 | } else { 116 | // if yaml file says not to remove blacklisted files everything should still exist 117 | shouldExist = append(dontMatchBlacklist, matchBlacklist...) 118 | shouldntExist = []string{} 119 | } 120 | 121 | if err = newfiles(".", append(shouldExist, shouldntExist...)...); err != nil { 122 | t.Fatal(err) 123 | } 124 | 125 | err = RemoveBlacklistedFiles(".", "foo.yaml") 126 | if err != nil { 127 | t.Fatal(err) 128 | } 129 | 130 | for _, file := range shouldExist { 131 | if _, err := os.Stat(file); err != nil { 132 | if os.IsNotExist(err) { 133 | t.Errorf("File %s shouldn't have been removed but was", file) 134 | } else { 135 | t.Errorf("Error checking existence of %s: %s", file, err) 136 | } 137 | } 138 | } 139 | 140 | for _, file := range shouldntExist { 141 | if _, err := os.Stat(file); err != nil && !os.IsNotExist(err) { 142 | t.Errorf("Error checking non-existence of %s: %s", file, err) 143 | } else if err == nil { 144 | t.Errorf("File %s should have been removed but wasn't", file) 145 | } 146 | } 147 | } 148 | 149 | func TestRemoveBlacklistFilesTrue(t *testing.T) { 150 | removeBlacklistFilesHelper(t, "removeblacklistfilestrue", true) 151 | } 152 | 153 | func TestRemoveBlacklistFilesFalse(t *testing.T) { 154 | removeBlacklistFilesHelper(t, "removeblacklistfilesfalse", false) 155 | } 156 | 157 | func TestBlacklistNoYamlFile(t *testing.T) { 158 | dirname, err := ioutil.TempDir("", "blacklistnoyamlfile") 159 | if err != nil { 160 | t.Fatal(err) 161 | } 162 | defer os.RemoveAll(dirname) 163 | cwd, err := os.Getwd() 164 | if err != nil { 165 | t.Fatal(err) 166 | } 167 | os.Chdir(dirname) 168 | defer os.Chdir(cwd) 169 | err = RemoveBlacklistedFiles(".", "bar.yaml") 170 | if err != nil { 171 | t.Errorf("Encountered error when yaml file didn't exist: %s", err) 172 | } 173 | } 174 | 175 | func BenchmarkMatch(b *testing.B) { 176 | const matches = 4 177 | files := []string{ 178 | "rlfiltr/bootstrap/foo.txt", 179 | "go/src/fastcar/search/test.json", 180 | "ixvtn/pinsot/cabernet.xml", 181 | "dpkg/install_me.deb", 182 | "meatserver/meatserver/internal/dirty/olive.py", 183 | } 184 | matcher := newMatcher(bigPatternList) 185 | b.ResetTimer() 186 | for i := 0; i < b.N; i++ { 187 | matchcount := 0 188 | for _, fname := range files { 189 | if m, e := matcher.Match(fname); e != nil { 190 | panic(e) 191 | } else if m { 192 | matchcount++ 193 | } 194 | } 195 | if matchcount != matches { 196 | b.Fatalf("Expected %v matches, got %v", matches, matchcount) 197 | } 198 | } 199 | } 200 | 201 | // Blacklist that is remarkably similar to a large one in use in the wild. 202 | var bigPatternList = []string{ 203 | ".art_lib/*", 204 | "datalytics/*", 205 | "build_tools/*", 206 | "crime/*", 207 | "ci/iso/*", 208 | "ci/maps-*", 209 | "codesearch/*", 210 | "canardcenter/*", 211 | "configs/banddad/*", 212 | "configs/ci/*", 213 | "configs/config/global_abc.yaml", 214 | "configs/graphene-api-forte4/*", 215 | "configs/cribe-config/*", 216 | "configs/kaka/cfg_prod.yaml", 217 | "configs/kaka/config/*", 218 | "configs/kaka/supervisor.d/*", 219 | "configs/monitoring/*", 220 | "configs/register/abc_config_master.txt", 221 | "configs/register/abc_config_proxies.txt", 222 | "configs/register/abc_config_saves.txt", 223 | "configs/cribe/*", 224 | "configs/forte4_alerts/*", 225 | "cpp/blockvat/*", 226 | "cpp/fastcar/vacuut/*", 227 | "dpkg/*", 228 | "ixvtn.yaml", 229 | "ixvtn/allocation/*", 230 | "ixvtn/analytics/*", 231 | "ixvtn/analyticswebserver/*", 232 | "ixvtn/apx/*", 233 | "ixvtn/artifactory/*", 234 | "ixvtn/backups/*", 235 | "ixvtn/binbox/*", 236 | "ixvtn/blockvat/*", 237 | "ixvtn/gruemail/*", 238 | "ixvtn/volt/*", 239 | "ixvtn/BUILD", 240 | "ixvtn/capacity-dashboard/*", 241 | "ixvtn/cpnts/*", 242 | "ixvtn/celery/*", 243 | "ixvtn/ci/*", 244 | "ixvtn/commandcenter/*", 245 | "ixvtn/conman/*", 246 | "ixvtn/container/*", 247 | "ixvtn/contbun/*", 248 | "ixvtn/crashmash_service/*", 249 | "ixvtn/crons/*", 250 | "ixvtn/dashboards/*", 251 | "ixvtn/datastorks/*", 252 | "ixvtn/db/*", 253 | "ixvtn/dctools/*", 254 | "ixvtn/debshop/*", 255 | "ixvtn/desktop_notifier/*", 256 | "ixvtn/dns/*", 257 | "ixvtn/doptrack/*", 258 | "ixvtn/drraw/*", 259 | "ixvtn/dsh/*", 260 | "ixvtn/ecadmin/*", 261 | "ixvtn/mallstore/*", 262 | "ixvtn/email/*", 263 | "ixvtn/eventbot/*", 264 | "ixvtn/chexlog/*", 265 | "ixvtn/exits/*", 266 | "ixvtn/fio/*", 267 | "ixvtn/flanket/*", 268 | "ixvtn/flo/*", 269 | "ixvtn/ganglala/*", 270 | "ixvtn/code-actual/*", 271 | "ixvtn/git/*", 272 | "ixvtn/trouper/*", 273 | "ixvtn/gish/*", 274 | "ixvtn/hadroop/*", 275 | "ixvtn/snappypears/*", 276 | "ixvtn/haprolly/*", 277 | "ixvtn/hardware/*", 278 | "ixvtn/hegwig/*", 279 | "ixvtn/hermet/*", 280 | "ixvtn/hg/*", 281 | "ixvtn/hype/*", 282 | "ixvtn/hwmaint/*", 283 | "ixvtn/hyades/*", 284 | "ixvtn/install/*", 285 | "ixvtn/interviews/*", 286 | "ixvtn/irc/*", 287 | "ixvtn/mira/*", 288 | "ixvtn/kaka/*", 289 | "ixvtn/lifecycle/*", 290 | "ixvtn/mdb/*", 291 | "ixvtn/ramcached/*", 292 | "ixvtn/verot/*", 293 | "ixvtn/mibs/*", 294 | "ixvtn/misc/*", 295 | "ixvtn/mist/*", 296 | "ixvtn/mobilebot/*", 297 | "ixvtn/monit/*", 298 | "ixvtn/monitoring/*", 299 | "ixvtn/pysqlproxy/*", 300 | "ixvtn/nabios3/*", 301 | "ixvtn/net/*", 302 | "ixvtn/meninx/*", 303 | "ixvtn/rotserver/*", 304 | "ixvtn/nsot/*", 305 | "ixvtn/oncall/*", 306 | "ixvtn/opslog/*", 307 | "ixvtn/ops_server/*", 308 | "ixvtn/paperduly/*", 309 | "ixvtn/payments/*", 310 | "ixvtn/photosnot/*", 311 | "ixvtn/kingdom/*", 312 | "ixvtn/pollen/*", 313 | "ixvtn/pp/*", 314 | "ixvtn/presence/*", 315 | "ixvtn/presto/*", 316 | "ixvtn/proxyproxy/*", 317 | "ixvtn/puppet/*", 318 | "ixvtn/pinsot/*", 319 | "ixvtn/python/momra/*", 320 | "ixvtn/python/netdb/*", 321 | "ixvtn/python/syselg/*", 322 | "ixvtn/tabbotmq/*", 323 | "ixvtn/radar/*", 324 | "ixvtn/README", 325 | "ixvtn/redis/*", 326 | "ixvtn/reminders/*", 327 | "ixvtn/sbc_service/*", 328 | "ixvtn/scribe/*", 329 | "ixvtn/security/*", 330 | "ixvtn/sentry/*", 331 | "ixvtn/shortserver/*", 332 | "ixvtn/skybot/*", 333 | "ixvtn/soloma-server/*", 334 | "ixvtn/spark/*", 335 | "ixvtn/statsclerk/*", 336 | "ixvtn/system/*", 337 | "ixvtn/taskrunner/*", 338 | "ixvtn/task_worker/*", 339 | "ixvtn/tests/*", 340 | "ixvtn/texter/*", 341 | "ixvtn/thumbservice/*", 342 | "ixvtn/trac/*", 343 | "ixvtn/traffic/configs/*", 344 | "ixvtn/traffic/tests/*", 345 | "ixvtn/traffic/tools/*", 346 | "ixvtn/trapperkeeper/*", 347 | "ixvtn/utilization-dashboard/*", 348 | "ixvtn/venus/*", 349 | "ixvtn/verwatch/*", 350 | "ixvtn/wopiserver/*", 351 | "ixvtn/WORKSPACE", 352 | "ixvtn/maps/*", 353 | "fastcar/chef/*", 354 | "fastcar/cloudbin2/*", 355 | "fastcar/drtd/*", 356 | "fastcar/ipvs/*", 357 | "fastcar/kaka/bin/*", 358 | "fastcar/magic_mirror/configs/*", 359 | "fastcar/jc/build/*", 360 | "fastcar/jc/tests/*", 361 | "fastcar/noru/*", 362 | "fastcar/pp/*", 363 | "fastcar/proto/maps/*", 364 | "fastcar/racknrow/*", 365 | "fastcar/forte4/tools/singlenodesetup/*", 366 | "fastcar/maps/*", 367 | "go/src/fastcar/antenna/*", 368 | "go/src/fastcar/bandaid/*", 369 | "go/src/fastcar/build_tools/*", 370 | "go/src/fastcar/contbin/*", 371 | "go/src/fastcar/dbxinit/*", 372 | "go/src/fastcar/hack-week-recents/*", 373 | "go/src/fastcar/ipvs/*", 374 | "go/src/fastcar/isotester/*", 375 | "go/src/fastcar/jetstream/*", 376 | "go/src/fastcar/jc/bdb/*", 377 | "go/src/fastcar/jc/frontend/isotest.yaml", 378 | "go/src/fastcar/jc/frontend/lib/*", 379 | "go/src/fastcar/jc/frontend/jc_fe/*", 380 | "go/src/fastcar/jc/frontend/wrappers/*", 381 | "go/src/fastcar/jc/hdb/*", 382 | "go/src/fastcar/jc/hdb_scanner/*", 383 | "go/src/fastcar/jc/master/*", 384 | "go/src/fastcar/jc/mpzk/*", 385 | "go/src/fastcar/jc/osd/*", 386 | "go/src/fastcar/jc/reliability_sim/*", 387 | "go/src/fastcar/riviera/*", 388 | "go/src/fastcar/jc/size_estimator/*", 389 | "go/src/fastcar/jc/test_utils/*", 390 | "go/src/fastcar/jc/trash_inspector/*", 391 | "go/src/fastcar/jc/volmgr/*", 392 | "go/src/fastcar/jc/xzr/*", 393 | "go/src/fastcar/jc/xzv/*", 394 | "go/src/fastcar/jc/zfec/*", 395 | "go/src/fastcar/jc/zfec2/*", 396 | "go/src/fastcar/netflow/*", 397 | "go/src/fastcar/offline_indexer/*", 398 | "go/src/fastcar/s3/s3-proxy/*", 399 | "go/src/fastcar/cribe/*", 400 | "go/src/fastcar/cribe_shim/*", 401 | "go/src/fastcar/search/*", 402 | "go/src/fastcar/sonola/*", 403 | "go/src/fastcar/util/mock_rpc/*", 404 | "go/src/fastcar/forte4/*", 405 | "go/src/fastcar/maps/*", 406 | "java/*", 407 | "lint_plugins/*", 408 | "meatserver/meatserver/scripts/jc/*", 409 | "pip/*", 410 | "repo_migrations/*", 411 | "rust/*", 412 | "server-selenium.yaml", 413 | "server-static-analysis.yaml", 414 | "spark-submission/*", 415 | "static-analysis/*", 416 | "tests/fastcar/maps/*", 417 | "thirdparty/*", 418 | "rlfiltr/bootstrap/*", 419 | "rlfiltr/data/configs/remote_vms.yaml", 420 | "rlfiltr/puppet/modules/user/files/*", 421 | } 422 | -------------------------------------------------------------------------------- /common/blacklist/match.go: -------------------------------------------------------------------------------- 1 | package blacklist 2 | 3 | // fork of https://golang.org/src/path/filepath/match.go in which * matches separator (e.g. "/") characters 4 | // also uses ! instead of ^ to negate a character class (basically, tries to match Python fnmatch semantics) 5 | 6 | import ( 7 | "errors" 8 | "runtime" 9 | "strings" 10 | "unicode/utf8" 11 | ) 12 | 13 | // ErrBadPattern indicates a globbing pattern was malformed. 14 | var ErrBadPattern = errors.New("syntax error in pattern") 15 | 16 | // fnMatch reports whether name matches the shell file name pattern. 17 | // The pattern syntax is: 18 | // 19 | // pattern: 20 | // { term } 21 | // term: 22 | // '*' matches any sequence of characters 23 | // '?' matches any single character 24 | // '[' [ '!' ] { character-range } ']' 25 | // character class (must be non-empty) 26 | // c matches character c (c != '*', '?', '\\', '[') 27 | // '\\' c matches character c 28 | // 29 | // character-range: 30 | // c matches character c (c != '\\', '-', ']') 31 | // '\\' c matches character c 32 | // lo '-' hi matches character c for lo <= c <= hi 33 | // 34 | // fnMatch requires pattern to match all of name, not just a substring. 35 | // The only possible returned error is ErrBadPattern, when pattern 36 | // is malformed. 37 | // 38 | // On Windows, escaping is disabled. Instead, '\\' is treated as 39 | // path separator. 40 | // 41 | func fnMatch(pattern, name string) (matched bool, err error) { 42 | Pattern: 43 | for len(pattern) > 0 { 44 | var star bool 45 | var chunk string 46 | star, chunk, pattern = scanChunk(pattern) 47 | if star && chunk == "" { 48 | // Trailing * matches rest of string (including /) 49 | return true, nil 50 | } 51 | // Look for match at current position. 52 | t, ok, err := matchChunk(chunk, name) 53 | // if we're the last chunk, make sure we've exhausted the name 54 | // otherwise we'll give a false result even if we could still match 55 | // using the star 56 | if ok && (len(t) == 0 || len(pattern) > 0) { 57 | name = t 58 | continue 59 | } 60 | if err != nil { 61 | return false, err 62 | } 63 | if star { 64 | // Look for match skipping i+1 bytes. 65 | // Cannot skip /. 66 | for i := 0; i < len(name); i++ { 67 | t, ok, err := matchChunk(chunk, name[i+1:]) 68 | if ok { 69 | // if we're the last chunk, make sure we exhausted the name 70 | if len(pattern) == 0 && len(t) > 0 { 71 | continue 72 | } 73 | name = t 74 | continue Pattern 75 | } 76 | if err != nil { 77 | return false, err 78 | } 79 | } 80 | } 81 | return false, nil 82 | } 83 | return len(name) == 0, nil 84 | } 85 | 86 | // scanChunk gets the next segment of pattern, which is a non-star string 87 | // possibly preceded by a star. 88 | func scanChunk(pattern string) (star bool, chunk, rest string) { 89 | for len(pattern) > 0 && pattern[0] == '*' { 90 | pattern = pattern[1:] 91 | star = true 92 | } 93 | inrange := false 94 | var i int 95 | Scan: 96 | for i = 0; i < len(pattern); i++ { 97 | switch pattern[i] { 98 | case '\\': 99 | if runtime.GOOS != "windows" { 100 | // error check handled in matchChunk: bad pattern. 101 | if i+1 < len(pattern) { 102 | i++ 103 | } 104 | } 105 | case '[': 106 | inrange = true 107 | case ']': 108 | inrange = false 109 | case '*': 110 | if !inrange { 111 | break Scan 112 | } 113 | } 114 | } 115 | return star, pattern[0:i], pattern[i:] 116 | } 117 | 118 | // matchChunk checks whether chunk matches the beginning of s. 119 | // If so, it returns the remainder of s (after the match). 120 | // Chunk is all single-character operators: literals, char classes, and ?. 121 | func matchChunk(chunk, s string) (rest string, ok bool, err error) { 122 | for len(chunk) > 0 { 123 | if len(s) == 0 { 124 | return 125 | } 126 | switch chunk[0] { 127 | case '[': 128 | // character class 129 | r, n := utf8.DecodeRuneInString(s) 130 | s = s[n:] 131 | chunk = chunk[1:] 132 | // We can't end right after '[', we're expecting at least 133 | // a closing bracket and possibly a caret. 134 | if len(chunk) == 0 { 135 | err = ErrBadPattern 136 | return 137 | } 138 | // possibly negated 139 | negated := chunk[0] == '!' 140 | if negated { 141 | chunk = chunk[1:] 142 | } 143 | // parse all ranges 144 | match := false 145 | nrange := 0 146 | for { 147 | if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { 148 | chunk = chunk[1:] 149 | break 150 | } 151 | var lo, hi rune 152 | if lo, chunk, err = getEsc(chunk); err != nil { 153 | return 154 | } 155 | hi = lo 156 | if chunk[0] == '-' { 157 | if hi, chunk, err = getEsc(chunk[1:]); err != nil { 158 | return 159 | } 160 | } 161 | if lo <= r && r <= hi { 162 | match = true 163 | } 164 | nrange++ 165 | } 166 | if match == negated { 167 | return 168 | } 169 | 170 | case '?': 171 | _, n := utf8.DecodeRuneInString(s) 172 | s = s[n:] 173 | chunk = chunk[1:] 174 | 175 | case '\\': 176 | if runtime.GOOS != "windows" { 177 | chunk = chunk[1:] 178 | if len(chunk) == 0 { 179 | err = ErrBadPattern 180 | return 181 | } 182 | } 183 | fallthrough 184 | 185 | default: 186 | if chunk[0] != s[0] { 187 | return 188 | } 189 | s = s[1:] 190 | chunk = chunk[1:] 191 | } 192 | } 193 | return s, true, nil 194 | } 195 | 196 | // getEsc gets a possibly-escaped character from chunk, for a character class. 197 | func getEsc(chunk string) (r rune, nchunk string, err error) { 198 | if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { 199 | err = ErrBadPattern 200 | return 201 | } 202 | if chunk[0] == '\\' && runtime.GOOS != "windows" { 203 | chunk = chunk[1:] 204 | if len(chunk) == 0 { 205 | err = ErrBadPattern 206 | return 207 | } 208 | } 209 | r, n := utf8.DecodeRuneInString(chunk) 210 | if r == utf8.RuneError && n == 1 { 211 | err = ErrBadPattern 212 | } 213 | nchunk = chunk[n:] 214 | if len(nchunk) == 0 { 215 | err = ErrBadPattern 216 | } 217 | return 218 | } 219 | 220 | // hasMeta reports whether path contains any of the magic characters 221 | // recognized by fnMatch. 222 | func hasMeta(path string) bool { 223 | // TODO(niemeyer): Should other magic characters be added here? 224 | return strings.IndexAny(path, "*?[") >= 0 225 | } 226 | -------------------------------------------------------------------------------- /common/glob/glob.go: -------------------------------------------------------------------------------- 1 | // Copyright 2010 The Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package glob 6 | 7 | import ( 8 | "log" 9 | "os" 10 | "path/filepath" 11 | "strings" 12 | 13 | "github.com/dropbox/changes-client/common/sentry" 14 | ) 15 | 16 | // GlobTreeRegular walks root looking for regular (non-dir, non-device) files 17 | // that match the provided glob patterns and returns them in matches. 18 | // If a pattern contains a /, it is matched against the path relative to root 19 | // (ignoring leading slashes). Otherwise it is matched only against the basename. 20 | // Any non-regular files that match will be returned in skipped. 21 | // If there is an error, matches and skipped may be incomplete or empty. 22 | func GlobTreeRegular(root string, patterns []string) (matches []string, skipped []string, err error) { 23 | visit := func(path string, f os.FileInfo, err error) error { 24 | // error in visiting this path 25 | if err != nil { 26 | log.Printf("[glob] Error walking path %s: %s", path, err) 27 | sentry.Error(err, map[string]string{}) 28 | // log to sentry but continue walking the tree 29 | return nil 30 | } 31 | basename := filepath.Base(path) 32 | relpath, err := filepath.Rel(root, path) 33 | if err != nil { 34 | return err 35 | } 36 | for _, pattern := range patterns { 37 | strToMatch := basename 38 | if strings.Contains(pattern, "/") { 39 | if strings.HasPrefix(pattern, "/") { 40 | pattern = pattern[1:] 41 | } 42 | strToMatch = relpath 43 | } 44 | if m, e := filepath.Match(pattern, strToMatch); e != nil { 45 | return e 46 | } else if m { 47 | if f == nil || !f.Mode().IsRegular() { 48 | skipped = append(skipped, path) 49 | } else { 50 | matches = append(matches, path) 51 | } 52 | } 53 | } 54 | return nil 55 | } 56 | 57 | err = filepath.Walk(root, visit) 58 | return matches, skipped, err 59 | } 60 | -------------------------------------------------------------------------------- /common/glob/glob_test.go: -------------------------------------------------------------------------------- 1 | package glob 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | "syscall" 9 | "testing" 10 | ) 11 | 12 | func newfile(root, name string) error { 13 | path := filepath.Join(root, name) 14 | if filepath.Dir(path) != root { 15 | if e := os.MkdirAll(filepath.Dir(path), 0777); e != nil { 16 | return e 17 | } 18 | } 19 | return ioutil.WriteFile(path, []byte("test"), 0777) 20 | } 21 | 22 | func newfiles(root string, names ...string) error { 23 | for _, n := range names { 24 | if e := newfile(root, n); e != nil { 25 | return e 26 | } 27 | } 28 | return nil 29 | } 30 | 31 | func TestGlobRegular(t *testing.T) { 32 | dirname, e := ioutil.TempDir("", "globtree") 33 | if e != nil { 34 | t.Fatal(e) 35 | } 36 | defer os.RemoveAll(dirname) 37 | if e := newfiles(dirname, "base.xml", "foo/test.xml", "coverage.xml/ohmy.txt", "bar/ohsnap.xml", "tests.json", "foo/tests.json", "foo/bar/weird.json", "foo/bar/baz/weird.json", "bar/foo/weird.json"); e != nil { 38 | t.Fatal(e) 39 | } 40 | // TODO: Move this to a build tagged file once we have builders that don't support Mkfifo. 41 | if e := syscall.Mkfifo(filepath.Join(dirname, "special.xml"), 0777); e != nil { 42 | t.Fatal(e) 43 | } 44 | matches, skipped, e := GlobTreeRegular(dirname, []string{"*.xml", "/tests.json", "foo/*/weird.json"}) 45 | if e != nil { 46 | t.Fatal(e) 47 | } 48 | matches = stripPrefix(t, dirname, matches) 49 | skipped = stripPrefix(t, dirname, skipped) 50 | expected := []string{"base.xml", "foo/test.xml", "bar/ohsnap.xml", "tests.json", "foo/bar/weird.json"} 51 | if e := equalAnyOrder(expected, matches); e != nil { 52 | t.Errorf("GlobTreeRegular had unexpected matches: %s", e) 53 | } 54 | 55 | shouldSkip := []string{"coverage.xml", "special.xml"} 56 | if e := equalAnyOrder(shouldSkip, skipped); e != nil { 57 | t.Errorf("GlobTreeRegular had unexpected skips: %s", e) 58 | } 59 | } 60 | 61 | func stripPrefix(t *testing.T, prefix string, slice []string) []string { 62 | var strippedArray []string 63 | for _, elem := range slice { 64 | if rel, err := filepath.Rel(prefix, elem); err != nil { 65 | t.Fatal(err) 66 | } else { 67 | strippedArray = append(strippedArray, rel) 68 | } 69 | } 70 | return strippedArray 71 | } 72 | 73 | func equalAnyOrder(expected, actual []string) error { 74 | errMsg := "" 75 | for _, elem := range expected { 76 | if !contains(actual, elem) { 77 | errMsg += fmt.Sprintf("Expected %q but not found\n", elem) 78 | } 79 | } 80 | for _, elem := range actual { 81 | if !contains(expected, elem) { 82 | errMsg += fmt.Sprintf("Didn't expect %q but it was present\n", elem) 83 | } 84 | } 85 | if errMsg != "" { 86 | return fmt.Errorf(errMsg) 87 | } 88 | return nil 89 | } 90 | 91 | func contains(l []string, s string) bool { 92 | for _, v := range l { 93 | if s == v { 94 | return true 95 | } 96 | } 97 | return false 98 | } 99 | -------------------------------------------------------------------------------- /common/lockfile/lockfile.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012 Ingo Oeser 2 | 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package lockfile 22 | 23 | import ( 24 | "errors" 25 | "fmt" 26 | "io/ioutil" 27 | "os" 28 | "path/filepath" 29 | "syscall" 30 | ) 31 | 32 | type Lockfile struct { 33 | Path string 34 | } 35 | 36 | var ( 37 | ErrBusy = errors.New("Locked by other process") // If you get this, retry after a short sleep might help 38 | ErrNeedAbsPath = errors.New("Lockfiles must be given as absolute path names") 39 | ErrInvalidPid = errors.New("Lockfile contains invalid pid for system") 40 | ErrDeadOwner = errors.New("Lockfile contains pid of process not existent on this system anymore") 41 | ) 42 | 43 | // Describe a new filename located at path. It is expected to be an absolute path 44 | func New(path string) (*Lockfile, error) { 45 | if !filepath.IsAbs(path) { 46 | return nil, ErrNeedAbsPath 47 | } 48 | return &Lockfile{path}, nil 49 | } 50 | 51 | // Who owns the lockfile? 52 | func (l *Lockfile) GetOwner() (*os.Process, error) { 53 | name := l.Path 54 | 55 | // Ok, see, if we have a stale lockfile here 56 | content, err := ioutil.ReadFile(name) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | var pid int 62 | if _, err := fmt.Sscanln(string(content), &pid); err != nil { 63 | return nil, ErrInvalidPid 64 | } 65 | 66 | // try hard for pids. If no pid, the lockfile is junk anyway and we delete it. 67 | if pid > 0 { 68 | p, err := os.FindProcess(pid) 69 | if err != nil { 70 | return nil, err 71 | } 72 | err = p.Signal(os.Signal(syscall.Signal(0))) 73 | if err == nil { 74 | return p, nil 75 | } 76 | errno, ok := err.(syscall.Errno) 77 | if !ok { 78 | return nil, err 79 | } 80 | 81 | switch errno { 82 | case syscall.ESRCH: 83 | return nil, ErrDeadOwner 84 | case syscall.EPERM: 85 | return p, nil 86 | default: 87 | return nil, err 88 | } 89 | } else { 90 | return nil, ErrInvalidPid 91 | } 92 | } 93 | 94 | // Try to get Lockfile lock. Returns nil, if successful and and error describing the reason, it didn't work out. 95 | // Please note, that existing lockfiles containing pids of dead processes and lockfiles containing no pid at all 96 | // are deleted. 97 | func (l *Lockfile) TryLock() error { 98 | name := l.Path 99 | 100 | // This has been checked by New already. If we trigger here, 101 | // the caller didn't use New and re-implemented it's functionality badly. 102 | // So panic, that he might find this easily during testing. 103 | if !filepath.IsAbs(name) { 104 | panic(ErrNeedAbsPath) 105 | } 106 | 107 | tmplock, err := ioutil.TempFile(filepath.Dir(name), "") 108 | if err != nil { 109 | return err 110 | } else { 111 | defer tmplock.Close() 112 | defer os.Remove(tmplock.Name()) 113 | } 114 | 115 | _, err = tmplock.WriteString(fmt.Sprintf("%d\n", os.Getpid())) 116 | if err != nil { 117 | return err 118 | } 119 | 120 | // return value intentionally ignored, as ignoring it is part of the algorithm 121 | _ = os.Link(tmplock.Name(), name) 122 | 123 | fiTmp, err := os.Lstat(tmplock.Name()) 124 | if err != nil { 125 | return err 126 | } 127 | fiLock, err := os.Lstat(name) 128 | if err != nil { 129 | return err 130 | } 131 | 132 | // Success 133 | if os.SameFile(fiTmp, fiLock) { 134 | return nil 135 | } 136 | 137 | _, err = l.GetOwner() 138 | switch err { 139 | default: 140 | // Other errors -> defensively fail and let caller handle this 141 | return err 142 | case nil: 143 | return ErrBusy 144 | case ErrDeadOwner, ErrInvalidPid: 145 | // cases we can fix below 146 | } 147 | 148 | // clean stale/invalid lockfile 149 | err = os.Remove(name) 150 | if err != nil { 151 | return err 152 | } 153 | 154 | // now that we cleaned up the stale lockfile, let's recurse 155 | return l.TryLock() 156 | } 157 | 158 | // Release a lock again. Returns any error that happend during release of lock. 159 | func (l *Lockfile) Unlock() error { 160 | return os.Remove(l.Path) 161 | } 162 | -------------------------------------------------------------------------------- /common/lockfile/lockfile_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012 Ingo Oeser 2 | 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE.package lockfile_test 20 | 21 | package lockfile_test 22 | 23 | import ( 24 | "fmt" 25 | "github.com/dropbox/changes-client/common/lockfile" 26 | ) 27 | 28 | func ExampleLockfile() { 29 | lock, err := lockfile.New("/tmp/lock.me.now.lck") 30 | if err != nil { 31 | fmt.Printf("Cannot init lock. reason: %s\n", err) 32 | panic(err) 33 | } 34 | err = lock.TryLock() 35 | 36 | // Error handling is essential, as we only try to get the lock. 37 | if err != nil { 38 | fmt.Printf("Cannot lock \"%v\", reason: %s\n", lock, err) 39 | panic(err) 40 | } 41 | 42 | defer lock.Unlock() 43 | 44 | fmt.Println("Do stuff under lock") 45 | // Output: Do stuff under lock 46 | } 47 | -------------------------------------------------------------------------------- /common/scopedlogger/scopedlogger.go: -------------------------------------------------------------------------------- 1 | package scopedlogger 2 | 3 | import "log" 4 | 5 | // ScopedLogger prefixes any prints with [Scope] 6 | type ScopedLogger struct { 7 | Scope string 8 | } 9 | 10 | // Printf is a normal printf but with a scoped prefix 11 | func (sl ScopedLogger) Printf(format string, v ...interface{}) { 12 | log.Printf("["+sl.Scope+"] "+format, v...) 13 | } 14 | 15 | // Println is a normal println but with a scoped prefix 16 | func (sl ScopedLogger) Println(v ...interface{}) { 17 | log.Println(append([]interface{}{"[" + sl.Scope + "] "}, v...)...) 18 | } 19 | 20 | // Sub returns a ScopedLogger that is sub-scoped: it will prefix any prints with [Scope:name] 21 | func (sl ScopedLogger) Sub(name string) ScopedLogger { 22 | return ScopedLogger{sl.Scope + ":" + name} 23 | } 24 | -------------------------------------------------------------------------------- /common/sentry/sentry.go: -------------------------------------------------------------------------------- 1 | package sentry 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "strings" 9 | 10 | "sync" 11 | 12 | "github.com/dropbox/changes-client/common/taggederr" 13 | "github.com/dropbox/changes-client/common/version" 14 | "github.com/getsentry/raven-go" 15 | ) 16 | 17 | var ( 18 | sentryDsn = "" 19 | sentryClient *raven.Client 20 | sentryClientMux sync.Mutex 21 | ) 22 | 23 | type NoisyTransport struct { 24 | inner raven.Transport 25 | } 26 | 27 | // By default Sentry quietly drops errors and it is awkward to retrieve them at reporting sites, 28 | // so this wraps the Transport to log any errors encountered. 29 | func (nt *NoisyTransport) Send(url, authHeader string, packet *raven.Packet) error { 30 | err := nt.inner.Send(url, authHeader, packet) 31 | if err != nil { 32 | log.Printf("Error reporting to Sentry: %s (%s)", err, url) 33 | } 34 | return err 35 | } 36 | 37 | // Return our global Sentry client, or nil if none was configured. 38 | func GetClient() *raven.Client { 39 | sentryClientMux.Lock() 40 | defer sentryClientMux.Unlock() 41 | if sentryClient != nil { 42 | return sentryClient 43 | } 44 | 45 | if sentryDsn == "" { 46 | return nil 47 | } 48 | 49 | if client, err := raven.NewClient(sentryDsn, map[string]string{ 50 | "version": version.GetVersion(), 51 | }); err != nil { 52 | // TODO: Try to avoid potentially dying fatally in a getter; 53 | // we may want to log an error and move on, we might want defers 54 | // to fire, etc. This will probably mean not creating the client 55 | // lazily. 56 | log.Fatal(err) 57 | } else { 58 | sentryClient = client 59 | } 60 | sentryClient.Transport = &NoisyTransport{sentryClient.Transport} 61 | 62 | return sentryClient 63 | } 64 | 65 | func extractFromTagged(terr taggederr.TaggedErr, tags map[string]string) (error, map[string]string) { 66 | result := terr.GetTags() 67 | for k, v := range tags { 68 | result[k] = v 69 | } 70 | return terr.GetInner(), result 71 | } 72 | 73 | func Error(err error, tags map[string]string) { 74 | if sentryClient := GetClient(); sentryClient != nil { 75 | log.Printf("[Sentry Error] %s", err) 76 | if terr, ok := err.(taggederr.TaggedErr); ok { 77 | err, tags = extractFromTagged(terr, tags) 78 | } 79 | sentryClient.CaptureError(err, tags) 80 | } else { 81 | log.Printf("[Sentry Error Unsent] %s", err) 82 | } 83 | } 84 | 85 | func Message(str string, tags map[string]string) { 86 | if sentryClient := GetClient(); sentryClient != nil { 87 | log.Printf("[Sentry Message] %s", str) 88 | sentryClient.CaptureMessage(str, tags) 89 | } else { 90 | log.Printf("[Sentry Message Unsent] %s", str) 91 | } 92 | } 93 | 94 | // Warningf takes a format string and arguments with the same meaning as with fmt.Printf and 95 | // sends the message as a warning to Sentry if Sentry is configured. 96 | // It makes a best-effort attempt to process the arguments to ensure that Sentry buckets the 97 | // warning by the format string rather than by the specific values of the arguments. 98 | func Warningf(msgfmt string, args ...interface{}) { 99 | if sentryClient := GetClient(); sentryClient != nil { 100 | msg := fmt.Sprintf(msgfmt, args...) 101 | packet := makePacket(raven.WARNING, msg, fmtSanitize(msgfmt, args)) 102 | log.Printf("[Sentry Warning] %s", msg) 103 | sentryClient.Capture(packet, map[string]string{}) 104 | } else { 105 | log.Printf("[Sentry Warning Unsent] %s", fmt.Sprintf(msgfmt, args...)) 106 | } 107 | } 108 | 109 | func makePacket(severity raven.Severity, message string, ravenMsg *raven.Message) *raven.Packet { 110 | var ifaces []raven.Interface 111 | if ravenMsg != nil { 112 | ifaces = append(ifaces, ravenMsg) 113 | } 114 | p := raven.NewPacket(message, ifaces...) 115 | p.Level = severity 116 | return p 117 | } 118 | 119 | // The Sentry server doesn't speak Go fmt strings, so this tries to translate, 120 | // or if it can't, return nil so we can just fall back to the locally fmt'd version. 121 | func fmtSanitize(msg string, args []interface{}) *raven.Message { 122 | var newmsg bytes.Buffer 123 | newargs := append([]interface{}(nil), args...) 124 | argidx := 0 125 | const supportedVerbs = "qvds" 126 | lastpct := false 127 | for _, m := range msg { 128 | if lastpct { 129 | if strings.ContainsRune(supportedVerbs, m) { 130 | if argidx >= len(args) { 131 | return nil 132 | } 133 | newargs[argidx] = fmt.Sprintf("%"+string(m), args[argidx]) 134 | newmsg.WriteRune('s') 135 | argidx++ 136 | } else if m == '%' { 137 | newmsg.WriteRune('%') 138 | } else { 139 | return nil 140 | } 141 | lastpct = false 142 | } else { 143 | newmsg.WriteRune(m) 144 | lastpct = m == '%' 145 | } 146 | } 147 | if lastpct || argidx < len(args) { 148 | return nil 149 | } 150 | return &raven.Message{Message: newmsg.String(), Params: newargs} 151 | } 152 | 153 | func init() { 154 | flag.StringVar(&sentryDsn, "sentry-dsn", "", "Sentry DSN for reporting errors") 155 | } 156 | -------------------------------------------------------------------------------- /common/sentry/sentry_test.go: -------------------------------------------------------------------------------- 1 | package sentry 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "github.com/dropbox/changes-client/common/taggederr" 8 | raven "github.com/getsentry/raven-go" 9 | "reflect" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func TestExtractFromTagged(t *testing.T) { 15 | type M map[string]string 16 | base := errors.New("Test Error") 17 | terr := taggederr.Wrap(base).AddTag("x", "1").AddTag("y", "2") 18 | cases := []struct { 19 | override map[string]string 20 | result map[string]string 21 | }{ 22 | {override: nil, result: M{"x": "1", "y": "2"}}, 23 | {override: M{}, result: M{"x": "1", "y": "2"}}, 24 | {override: M{"x": "4"}, result: M{"x": "4", "y": "2"}}, 25 | {override: M{"z": "4"}, result: M{"x": "1", "y": "2", "z": "4"}}, 26 | } 27 | for i, c := range cases { 28 | // Nil override. 29 | oute, outt := extractFromTagged(terr, c.override) 30 | if oute != base { 31 | t.Errorf("%v: Expected base error, got %v", i, oute) 32 | } 33 | if !reflect.DeepEqual(outt, c.result) { 34 | t.Errorf("%v: Expected %v, got %v", i, c.result, outt) 35 | } 36 | } 37 | } 38 | 39 | func TestMakePacket(t *testing.T) { 40 | pkt := makePacket(raven.ERROR, "Hello World", &raven.Message{Message: "Hello %s", Params: []interface{}{"World"}}) 41 | type Wire struct { 42 | Message string 43 | Level string 44 | LogEntry struct { 45 | Message string 46 | Params []interface{} 47 | } 48 | } 49 | var out Wire 50 | if e := json.Unmarshal(pkt.JSON(), &out); e != nil { 51 | t.Fatal(e) 52 | } 53 | if out.Message != "Hello World" { 54 | t.Errorf("Bad message: %v", out.Message) 55 | } 56 | 57 | if fmt.Sprintf(out.LogEntry.Message, out.LogEntry.Params...) != "Hello World" { 58 | t.Errorf("Bad log entry: %#v", out.LogEntry) 59 | } 60 | } 61 | 62 | func TestFmtSanitize(t *testing.T) { 63 | mkargs := func(args ...interface{}) []interface{} { 64 | return args 65 | } 66 | result := func(fmtstr string, args ...interface{}) *raven.Message { 67 | return &raven.Message{Message: fmtstr, Params: args} 68 | } 69 | cases := []struct { 70 | Msg string 71 | Args []interface{} 72 | result *raven.Message 73 | }{ 74 | {"Hello %s", mkargs("World"), result("Hello %s", "World")}, 75 | {"Hello %q", mkargs("World"), result("Hello %s", `"World"`)}, 76 | {"Hello %v", mkargs("World"), result("Hello %s", "World")}, 77 | {"Hello %v", mkargs(1i), result("Hello %s", "(0+1i)")}, 78 | {"From %s to %s!", mkargs("Justin", "Kelly"), result("From %s to %s!", "Justin", "Kelly")}, 79 | {"Hello %%", mkargs(), result("Hello %%")}, 80 | {"Hello %q", mkargs(), nil}, 81 | {"Ran %v times", mkargs(4), result("Ran %s times", "4")}, 82 | {"Ran %q times", mkargs(4.5), result("Ran %s times", "%!q(float64=4.5)")}, 83 | {"Gone in %s", mkargs(59 * time.Second), result("Gone in %s", "59s")}, 84 | {"If %d then %t", mkargs(5, false), nil}, 85 | {"But %%v ", mkargs("what"), nil}, 86 | {"%%%", mkargs(), nil}, 87 | {"Hello there.", mkargs(), result("Hello there.")}, 88 | } 89 | 90 | for i, c := range cases { 91 | ravenMsg := fmtSanitize(c.Msg, c.Args) 92 | if !reflect.DeepEqual(ravenMsg, c.result) { 93 | t.Errorf("%v: Expected: %v but got %v", i, c.result, ravenMsg) 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /common/taggederr/taggederr.go: -------------------------------------------------------------------------------- 1 | package taggederr 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "sort" 8 | ) 9 | 10 | // Tags are stored internally as an immutable linked list 11 | // with a shared tail rather than a map or slice to make adding 12 | // tags cheap and thread-safe. This could behave badly if we had 13 | // many duplicate tags, but that isn't expected. 14 | type tag struct { 15 | k, v string 16 | next *tag 17 | } 18 | 19 | type TaggedErr struct { 20 | inner error 21 | tags *tag 22 | } 23 | 24 | // New creates a new TaggedErr from a string describing the issue. 25 | // This is a convenience wrapper around errors.New. 26 | func New(msg string) TaggedErr { 27 | return Wrap(errors.New(msg)) 28 | } 29 | 30 | // Newf creates a new TaggedErr from a format string and arguments 31 | // in the manner of fmt.Printf. 32 | // Currently, there is no benefit over fmt.Errorf aside from convenience, 33 | // but in the future the format string and arguments may be retained to 34 | // assist in error grouping in backends such as Sentry. 35 | func Newf(fmtstr string, args ...interface{}) TaggedErr { 36 | // TODO: When possible, retain the format string and args 37 | // so they can be supplied independent to backends such as Sentry 38 | // for improved grouping. 39 | return Wrap(fmt.Errorf(fmtstr, args...)) 40 | } 41 | 42 | // GetTags returns a new map with tags, preferring the value most recently 43 | // applied when a tag has been set more than once. 44 | func (t TaggedErr) GetTags() map[string]string { 45 | m := make(map[string]string) 46 | for tt := t.tags; tt != nil; tt = tt.next { 47 | if _, ok := m[tt.k]; !ok { 48 | m[tt.k] = tt.v 49 | } 50 | } 51 | return m 52 | } 53 | 54 | // GetInner returns the wrapped error value, which will never be nil. 55 | func (t TaggedErr) GetInner() error { 56 | return t.inner 57 | } 58 | 59 | func (t TaggedErr) Error() string { 60 | if t.tags != nil { 61 | var keys []string 62 | m := make(map[string]string) 63 | // We ignore all but the first occurance of a tag; 64 | // they are ordered from most to least recently tagged, and 65 | // newer tags override older. 66 | for tt := t.tags; tt != nil; tt = tt.next { 67 | if _, ok := m[tt.k]; !ok { 68 | m[tt.k] = tt.v 69 | keys = append(keys, tt.k) 70 | } 71 | } 72 | sort.Strings(keys) 73 | var buf bytes.Buffer 74 | buf.WriteByte('[') 75 | for i, k := range keys { 76 | if i != 0 { 77 | buf.WriteByte(',') 78 | } 79 | buf.WriteString(k) 80 | buf.WriteByte('=') 81 | buf.WriteString(m[k]) 82 | } 83 | buf.WriteString("]: ") 84 | buf.WriteString(t.inner.Error()) 85 | return buf.String() 86 | } 87 | return t.inner.Error() 88 | } 89 | 90 | // AddTag creates a new TaggedErr value with the specified tag from the 91 | // existing TaggedErr, keeping the wrapped value and all previous tags. 92 | // If a tag already exists with the given key, it will be ignored in the new 93 | // value. 94 | // No operations on the new TaggedErr will impact the parent value. 95 | func (t TaggedErr) AddTag(k, v string) TaggedErr { 96 | return TaggedErr{t.inner, &tag{k, v, t.tags}} 97 | } 98 | 99 | // Wrap wraps an error value to make it a TaggedErr, or exposes the 100 | // TaggedErr if the parameter is already a TaggedErr. 101 | // Passing nil will result in a panic. 102 | func Wrap(e error) TaggedErr { 103 | if e == nil { 104 | panic("taggederr.Wrap called with nil error") 105 | } 106 | if te, ok := e.(TaggedErr); ok { 107 | return te 108 | } 109 | return TaggedErr{inner: e, tags: nil} 110 | } 111 | -------------------------------------------------------------------------------- /common/taggederr/taggederr_test.go: -------------------------------------------------------------------------------- 1 | package taggederr_test 2 | 3 | import ( 4 | "errors" 5 | "github.com/dropbox/changes-client/common/taggederr" 6 | "reflect" 7 | "testing" 8 | ) 9 | 10 | func TestMsg(t *testing.T) { 11 | e := taggederr.Wrap(errors.New("ERROR")). 12 | AddTag("severity", "hilarity"). 13 | AddTag("death", "smoochy") 14 | expected := "[death=smoochy,severity=hilarity]: ERROR" 15 | if msg := e.Error(); msg != expected { 16 | t.Errorf("Got %v, expected %v", msg, expected) 17 | } 18 | } 19 | 20 | func TestFmtMsg(t *testing.T) { 21 | e := taggederr.Newf("Experienced %d sadnesses", 4) 22 | expected := "Experienced 4 sadnesses" 23 | if msg := e.Error(); msg != expected { 24 | t.Errorf("Got %v, expected %v", msg, expected) 25 | } 26 | } 27 | 28 | func TestGetTags(t *testing.T) { 29 | e := taggederr.New("ERROR") 30 | if len(e.GetTags()) != 0 { 31 | t.Error("Expected empty tags") 32 | } 33 | e2 := e.AddTag("a", "1").AddTag("b", "2") 34 | expected := map[string]string{ 35 | "a": "1", 36 | "b": "2", 37 | } 38 | if !reflect.DeepEqual(e2.GetTags(), expected) { 39 | t.Errorf("Expected %v, got %v", expected, e2.GetTags()) 40 | } 41 | } 42 | 43 | func TestTaggedErrSafeReuse(t *testing.T) { 44 | common := taggederr.New("Hello world") 45 | te1 := common. 46 | AddTag("name", "me"). 47 | AddTag("cat", "food") 48 | te2 := common. 49 | AddTag("name", "you"). 50 | AddTag("dog", "food") 51 | if te1.Error() == te2.Error() { 52 | t.Error("Messages shouldn't be the same") 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /common/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | const ( 4 | version = "0.1.1" 5 | ) 6 | 7 | var gitVersion string 8 | 9 | func GetVersion() string { 10 | return version + "-" + gitVersion 11 | } 12 | -------------------------------------------------------------------------------- /engine/engine.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "os" 10 | "os/signal" 11 | "sync" 12 | "time" 13 | 14 | "golang.org/x/net/context" 15 | 16 | "github.com/dropbox/changes-client/client" 17 | "github.com/dropbox/changes-client/client/adapter" 18 | "github.com/dropbox/changes-client/client/filelog" 19 | "github.com/dropbox/changes-client/client/reporter" 20 | "github.com/dropbox/changes-client/common/sentry" 21 | "github.com/dropbox/changes-client/common/version" 22 | 23 | _ "github.com/dropbox/changes-client/adapter/basic" 24 | _ "github.com/dropbox/changes-client/adapter/lxc" 25 | _ "github.com/dropbox/changes-client/reporter/artifactstore" 26 | _ "github.com/dropbox/changes-client/reporter/jenkins" 27 | _ "github.com/dropbox/changes-client/reporter/mesos" 28 | _ "github.com/dropbox/changes-client/reporter/multireporter" 29 | ) 30 | 31 | const ( 32 | STATUS_QUEUED = "queued" 33 | STATUS_IN_PROGRESS = "in_progress" 34 | STATUS_FINISHED = "finished" 35 | 36 | RESULT_PASSED Result = "passed" 37 | RESULT_FAILED Result = "failed" 38 | RESULT_ABORTED Result = "aborted" 39 | // Test results unreliable or unavailable due to infrastructure 40 | // issues. 41 | RESULT_INFRA_FAILED Result = "infra_failed" 42 | 43 | SNAPSHOT_ACTIVE = "active" 44 | SNAPSHOT_FAILED = "failed" 45 | ) 46 | 47 | type Result string 48 | 49 | func (r Result) String() string { 50 | return string(r) 51 | } 52 | 53 | // Convenience method to check for all types of failure. 54 | func (r Result) IsFailure() bool { 55 | switch r { 56 | case RESULT_FAILED, RESULT_INFRA_FAILED: 57 | return true 58 | } 59 | return false 60 | } 61 | 62 | var ( 63 | selectedAdapterFlag string 64 | selectedReporterFlag string 65 | outputSnapshotFlag string 66 | useExternalEnvFlag bool 67 | ) 68 | 69 | type Engine struct { 70 | config *client.Config 71 | clientLog *client.Log 72 | adapter adapter.Adapter 73 | reporter reporter.Reporter 74 | } 75 | 76 | func RunBuildPlan(config *client.Config, infraLog *filelog.FileLog) (Result, error) { 77 | currentReporter, err := reporter.Create(selectedReporterFlag) 78 | if err != nil { 79 | log.Printf("[engine] failed to initialize reporter: %s", selectedReporterFlag) 80 | return RESULT_INFRA_FAILED, err 81 | } 82 | currentReporter.Init(config) 83 | defer currentReporter.Shutdown() 84 | if infraLog != nil { 85 | infraLog.StartReporting(currentReporter) 86 | defer infraLog.Shutdown() 87 | } 88 | 89 | currentAdapter, err := adapter.Create(selectedAdapterFlag) 90 | if err != nil { 91 | log.Printf("[engine] failed to initialize adapter: %s", selectedAdapterFlag) 92 | return RESULT_INFRA_FAILED, err 93 | } 94 | 95 | log.Printf("[engine] started with reporter %s, adapter %s", selectedReporterFlag, selectedAdapterFlag) 96 | 97 | engine := &Engine{ 98 | config: config, 99 | clientLog: client.NewLog(), 100 | adapter: currentAdapter, 101 | reporter: currentReporter, 102 | } 103 | 104 | return engine.Run() 105 | } 106 | 107 | // Returns the ID to use for the generated snapshot, or an empty string if no 108 | // snapshot should be generated. Use this instead of the flag or config value. 109 | func (e *Engine) outputSnapshotID() string { 110 | // Until we're confident that the config always matches the flag, use the 111 | // flag to preserve behavior. 112 | return outputSnapshotFlag 113 | } 114 | 115 | // checkForSnapshotInconsistency returns an error if the output snapshot specified via flag 116 | // appears inconsistent with the value from the JobStep config. 117 | func (e *Engine) checkForSnapshotInconsistency() error { 118 | if outputSnapshotFlag != e.config.ExpectedSnapshot.ID { 119 | return fmt.Errorf("Output snapshot mismatch; flag was %q, but config was %q", 120 | outputSnapshotFlag, e.config.ExpectedSnapshot.ID) 121 | } 122 | return nil 123 | } 124 | 125 | func (e *Engine) Run() (Result, error) { 126 | var wg sync.WaitGroup 127 | wg.Add(1) 128 | go func() { 129 | reportLogChunks("console", e.clientLog, e.reporter) 130 | wg.Done() 131 | }() 132 | 133 | e.clientLog.Printf("changes-client version: %s", version.GetVersion()) 134 | e.clientLog.Printf("Running jobstep %s for %s (%s)", e.config.JobstepID, e.config.Project.Name, e.config.Project.Slug) 135 | if e.config.DebugConfig != nil { 136 | if jsout, err := json.MarshalIndent(e.config.DebugConfig, "", " "); err != nil { 137 | // Should never happen, but no use crashing about it. 138 | sentry.Error(err, map[string]string{}) 139 | } else { 140 | e.clientLog.Printf("Debug config: %s", jsout) 141 | } 142 | } 143 | 144 | if err := e.checkForSnapshotInconsistency(); err != nil { 145 | sentry.Error(err, map[string]string{}) 146 | // Ugly, but better to be consistent. 147 | // TODO(kylec): Remove this once we're confident in the config value. 148 | e.config.ExpectedSnapshot.ID = e.outputSnapshotID() 149 | } 150 | 151 | e.reporter.PushJobstepStatus(STATUS_IN_PROGRESS, "") 152 | 153 | result, err := e.runBuildPlan() 154 | 155 | e.clientLog.Printf("==> Build finished! Recorded result as %s", result) 156 | if err != nil { 157 | e.clientLog.Printf("==> Error: %s", err) 158 | } 159 | 160 | e.reporter.PushJobstepStatus(STATUS_FINISHED, result.String()) 161 | 162 | e.clientLog.Close() 163 | wg.Wait() 164 | 165 | return result, err 166 | } 167 | 168 | func (e *Engine) executeCommands() (Result, error) { 169 | for _, cmdConfig := range e.config.Cmds { 170 | e.clientLog.Printf("==> Running command %s", cmdConfig.ID) 171 | e.clientLog.Printf("==> with script %s", cmdConfig.Script) 172 | cmd, err := client.NewCommand(cmdConfig.ID, cmdConfig.Script) 173 | if err != nil { 174 | e.reporter.PushCommandStatus(cmd.ID, STATUS_FINISHED, 255) 175 | e.clientLog.Printf("==> Error creating command script: %s", err) 176 | return RESULT_INFRA_FAILED, err 177 | } 178 | e.reporter.PushCommandStatus(cmd.ID, STATUS_IN_PROGRESS, -1) 179 | 180 | cmd.CaptureOutput = cmdConfig.CaptureOutput 181 | 182 | var env []string 183 | // Some of our setups rely on external environment 184 | // variables, in which case we pass through our 185 | // entire environment to any commands we run. 186 | if useExternalEnvFlag { 187 | env = os.Environ() 188 | } 189 | for k, v := range cmdConfig.Env { 190 | env = append(env, k+"="+v) 191 | } 192 | cmd.Env = env 193 | 194 | if len(cmdConfig.Cwd) > 0 { 195 | cmd.Cwd = cmdConfig.Cwd 196 | } 197 | 198 | cmdResult, err := e.adapter.Run(cmd, e.clientLog) 199 | 200 | if err != nil { 201 | e.reporter.PushCommandStatus(cmd.ID, STATUS_FINISHED, 255) 202 | e.clientLog.Printf("==> Error running command: %s", err) 203 | return RESULT_INFRA_FAILED, err 204 | } 205 | result := RESULT_FAILED 206 | if cmdResult.Success { 207 | result = RESULT_PASSED 208 | if cmd.CaptureOutput { 209 | e.reporter.PushCommandOutput(cmd.ID, STATUS_FINISHED, 0, cmdResult.Output) 210 | } else { 211 | e.reporter.PushCommandStatus(cmd.ID, STATUS_FINISHED, 0) 212 | } 213 | } else { 214 | e.reporter.PushCommandStatus(cmd.ID, STATUS_FINISHED, 1) 215 | // infra_setup commands are generated and owned by Changes, so when they fail, 216 | // it is an infrastructural failure. 217 | if cmdConfig.Type.ID == "infra_setup" { 218 | return RESULT_INFRA_FAILED, 219 | fmt.Errorf("Failure while executing infrastructural setup command %s", cmdConfig.ID) 220 | } 221 | } 222 | 223 | t0 := time.Now() 224 | if err := e.reporter.PublishArtifacts(cmdConfig, e.adapter, e.clientLog); err != nil { 225 | e.clientLog.Printf("==> PublishArtifacts Error: %s after %s", err, time.Since(t0)) 226 | return RESULT_INFRA_FAILED, err 227 | } 228 | log.Printf("Took %s to publish artifacts.", time.Since(t0)) 229 | 230 | if result.IsFailure() { 231 | return result, nil 232 | } 233 | } 234 | 235 | // Made it through all commands without failure. Success. 236 | return RESULT_PASSED, nil 237 | } 238 | 239 | func (e *Engine) captureSnapshot() error { 240 | log.Printf("[adapter] Capturing snapshot %s", e.outputSnapshotID()) 241 | err := e.adapter.CaptureSnapshot(e.outputSnapshotID(), e.clientLog) 242 | if err != nil { 243 | log.Printf("[adapter] Failed to capture snapshot: %s", err) 244 | return err 245 | } 246 | return nil 247 | } 248 | 249 | func (e *Engine) runBuildPlan() (Result, error) { 250 | forceInfraFailure := false 251 | if _, err := e.config.GetDebugConfig("forceInfraFailure", &forceInfraFailure); err != nil { 252 | return RESULT_INFRA_FAILED, err 253 | } else if forceInfraFailure { 254 | return RESULT_INFRA_FAILED, errors.New("Infra failure forced for debugging") 255 | } 256 | 257 | ctx, cancelFunc := context.WithCancel(context.Background()) 258 | 259 | // capture ctrl+c and enforce a clean shutdown 260 | sigchan := make(chan os.Signal, 1) 261 | signal.Notify(sigchan, os.Interrupt) 262 | go func() { 263 | shuttingDown := false 264 | for _ = range sigchan { 265 | if shuttingDown { 266 | log.Printf("Second interrupt received. Terminating!") 267 | os.Exit(1) 268 | } 269 | 270 | shuttingDown = true 271 | 272 | log.Printf("Interrupted! Cancelling execution and cleaning up..") 273 | cancelFunc() 274 | } 275 | }() 276 | 277 | // We need to ensure that we're able to abort the build if upstream suggests 278 | // that it's been cancelled. 279 | if e.config.UpstreamMonitor { 280 | go func() { 281 | um := &UpstreamMonitor{ 282 | Config: e.config, 283 | } 284 | um.WaitUntilAbort() 285 | cancelFunc() 286 | }() 287 | } 288 | 289 | if err := e.adapter.Init(e.config); err != nil { 290 | log.Print(fmt.Sprintf("[adapter] %s", err)) 291 | e.clientLog.Printf("==> ERROR: Failed to initialize %s adapter", selectedAdapterFlag) 292 | return RESULT_INFRA_FAILED, err 293 | } 294 | 295 | metrics, err := e.adapter.Prepare(e.clientLog) 296 | if err != nil { 297 | log.Printf("[adapter] %s", err) 298 | e.clientLog.Printf("==> ERROR: %s adapter failed to prepare: %s", selectedAdapterFlag, err) 299 | return RESULT_INFRA_FAILED, err 300 | } 301 | defer func(engine *Engine) { 302 | shutdownMetrics, shutdownErr := engine.adapter.Shutdown(engine.clientLog) 303 | if shutdownErr != nil { 304 | log.Printf("[adapter] Error during shutdown: %s", err) 305 | } 306 | engine.reporter.ReportMetrics(shutdownMetrics) 307 | }(e) 308 | e.reporter.ReportMetrics(metrics) 309 | 310 | type cmdResult struct { 311 | result Result 312 | err error 313 | } 314 | // actually begin executing the build plan 315 | finished := make(chan cmdResult) 316 | go func() { 317 | r, cmderr := e.executeCommands() 318 | finished <- cmdResult{r, cmderr} 319 | }() 320 | 321 | var result Result 322 | select { 323 | case cmdresult := <-finished: 324 | if cmdresult.err != nil { 325 | return cmdresult.result, cmdresult.err 326 | } 327 | result = cmdresult.result 328 | case <-ctx.Done(): 329 | e.clientLog.Printf("==> ERROR: Build was aborted by upstream") 330 | return RESULT_ABORTED, nil 331 | } 332 | 333 | if result == RESULT_PASSED && e.outputSnapshotID() != "" { 334 | var snapshotStatus string 335 | sserr := e.captureSnapshot() 336 | if sserr != nil { 337 | snapshotStatus = SNAPSHOT_FAILED 338 | } else { 339 | snapshotStatus = SNAPSHOT_ACTIVE 340 | } 341 | if err := e.reporter.PushSnapshotImageStatus(e.outputSnapshotID(), snapshotStatus); err != nil { 342 | log.Printf("Failed to push snapshot image status: %s", err) 343 | if sserr == nil { 344 | sserr = err 345 | } 346 | } 347 | if sserr != nil { 348 | return RESULT_INFRA_FAILED, sserr 349 | } 350 | } 351 | return result, nil 352 | } 353 | 354 | func reportLogChunks(name string, clientLog *client.Log, r reporter.Reporter) { 355 | for ch, ok := clientLog.GetChunk(); ok; ch, ok = clientLog.GetChunk() { 356 | r.PushLogChunk(name, ch) 357 | } 358 | } 359 | 360 | func init() { 361 | flag.StringVar(&selectedAdapterFlag, "adapter", "basic", "Adapter to run build against") 362 | flag.StringVar(&selectedReporterFlag, "reporter", "multireporter", "Reporter to send results to") 363 | flag.StringVar(&outputSnapshotFlag, "save-snapshot", "", "Save the resulting container snapshot") 364 | flag.BoolVar(&useExternalEnvFlag, "use-external-env", true, "Whether to pass through changes-client's external environment to the commands it runs") 365 | } 366 | -------------------------------------------------------------------------------- /engine/engine_test.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/dropbox/changes-client/client" 8 | "github.com/dropbox/changes-client/client/adapter" 9 | "github.com/dropbox/changes-client/client/reporter" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | type noartReporter struct { 15 | reporter.NoopReporter 16 | } 17 | 18 | func (nar *noartReporter) PublishArtifacts(_ client.ConfigCmd, _ adapter.Adapter, _ *client.Log) error { 19 | return errors.New("Couldn't publish artifacts somehow") 20 | } 21 | 22 | type noopAdapter struct { 23 | cmdIdsToFail map[string]bool 24 | } 25 | 26 | // FailCommandForTest takes a Command ID, and ensures that any Command with that 27 | // id passed to Run will fail. Not quite so noop with this. 28 | func (na *noopAdapter) FailCommandForTest(id string) { 29 | if na.cmdIdsToFail == nil { 30 | na.cmdIdsToFail = make(map[string]bool) 31 | } 32 | na.cmdIdsToFail[id] = true 33 | } 34 | 35 | func (_ *noopAdapter) Init(*client.Config) error { return nil } 36 | func (_ *noopAdapter) Prepare(*client.Log) (client.Metrics, error) { return nil, nil } 37 | func (na *noopAdapter) Run(cmd *client.Command, _ *client.Log) (*client.CommandResult, error) { 38 | fail := na.cmdIdsToFail[cmd.ID] 39 | return &client.CommandResult{ 40 | Success: !fail, 41 | }, nil 42 | } 43 | func (_ *noopAdapter) Shutdown(*client.Log) (client.Metrics, error) { return nil, nil } 44 | func (_ *noopAdapter) CaptureSnapshot(string, *client.Log) error { return nil } 45 | func (_ *noopAdapter) GetRootFs() string { 46 | return "/" 47 | } 48 | func (_ *noopAdapter) GetArtifactRoot() string { 49 | return "/" 50 | } 51 | func (_ *noopAdapter) CollectArtifacts([]string, *client.Log) ([]string, error) { 52 | return nil, nil 53 | } 54 | 55 | func TestFailedArtifactInfraFails(t *testing.T) { 56 | nar := new(noartReporter) 57 | log := client.NewLog() 58 | defer log.Close() 59 | go log.Drain() 60 | eng := Engine{reporter: nar, 61 | clientLog: log, 62 | adapter: &noopAdapter{}, 63 | config: &client.Config{Cmds: []client.ConfigCmd{ 64 | {Artifacts: []string{"result.xml"}}, 65 | }}} 66 | r, e := eng.executeCommands() 67 | assert.Equal(t, r, RESULT_INFRA_FAILED) 68 | assert.Error(t, e) 69 | } 70 | 71 | func TestDebugForceInfraFailure(t *testing.T) { 72 | config, err := client.LoadConfig([]byte(`{"debugConfig": {"forceInfraFailure": true}}`)) 73 | assert.NoError(t, err) 74 | log := client.NewLog() 75 | defer log.Close() 76 | eng := Engine{reporter: &reporter.NoopReporter{}, 77 | clientLog: log, 78 | adapter: &noopAdapter{}, 79 | config: config, 80 | } 81 | 82 | result, err := eng.runBuildPlan() 83 | assert.Equal(t, result, RESULT_INFRA_FAILED) 84 | assert.Error(t, err) 85 | } 86 | 87 | func TestInfraSetupCommandFailsInfra(t *testing.T) { 88 | cmd := client.ConfigCmd{ID: "failme1234", Script: "exit 1"} 89 | cmd.Type.ID = "infra_setup" 90 | adapter := &noopAdapter{} 91 | adapter.FailCommandForTest(cmd.ID) 92 | log := client.NewLog() 93 | defer log.Close() 94 | go log.Drain() 95 | eng := Engine{reporter: &reporter.NoopReporter{}, 96 | clientLog: log, 97 | adapter: adapter, 98 | config: &client.Config{ 99 | Cmds: []client.ConfigCmd{cmd}, 100 | }, 101 | } 102 | 103 | result, err := eng.runBuildPlan() 104 | assert.Equal(t, RESULT_INFRA_FAILED, result) 105 | assert.Error(t, err) 106 | } 107 | 108 | func makeResetFunc(s *string) func() { 109 | previous := *s 110 | return func() { 111 | *s = previous 112 | } 113 | } 114 | 115 | func TestOutputSnapshotID(t *testing.T) { 116 | // Leave things as we found them. 117 | defer makeResetFunc(&outputSnapshotFlag)() 118 | 119 | type testcase struct { 120 | Flag, Config string 121 | // Whether we find an inconsistency. 122 | Error bool 123 | } 124 | cases := []testcase{ 125 | {Flag: "", Config: "1234", Error: true}, 126 | {Flag: "", Config: "", Error: false}, 127 | {Flag: "abcd", Config: "", Error: true}, 128 | {Flag: "abcd", Config: "abcd", Error: false}, 129 | {Flag: "abcd", Config: "1234", Error: true}, 130 | } 131 | for _, c := range cases { 132 | var cfg client.Config 133 | cfg.ExpectedSnapshot.ID = c.Config 134 | outputSnapshotFlag = c.Flag 135 | 136 | eng := Engine{config: &cfg} 137 | // For now, flag always wins. 138 | assert.Equal(t, eng.outputSnapshotID(), c.Flag, "For outputSnapshotID() with %#v", c) 139 | err := eng.checkForSnapshotInconsistency() 140 | if c.Error { 141 | assert.Error(t, err, "%#v", c) 142 | } else { 143 | assert.NoError(t, err, "%#v", c) 144 | } 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /engine/upstream_monitor.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/dropbox/changes-client/client" 7 | "hash/fnv" 8 | "io/ioutil" 9 | "log" 10 | "math/rand" 11 | "net/http" 12 | "time" 13 | ) 14 | 15 | type JobStep struct { 16 | Status struct { 17 | ID string 18 | } 19 | Result struct { 20 | ID string 21 | } 22 | } 23 | 24 | type UpstreamMonitor struct { 25 | Config *client.Config 26 | } 27 | 28 | type HeartbeatResponse struct { 29 | Finished bool 30 | Aborted bool 31 | } 32 | 33 | func (um *UpstreamMonitor) WaitUntilAbort() error { 34 | client := &http.Client{} 35 | 36 | h := fnv.New64() 37 | // seed with JobstepID so each jobstep hits Changes at slightly 38 | // different times 39 | h.Write([]byte(um.Config.JobstepID)) 40 | // make our own random generator so that heartbeat variance is 41 | // unaffected by other interspersed calls to math/rand 42 | randGen := rand.New(rand.NewSource(int64(h.Sum64()))) 43 | for { 44 | log.Printf("[upstream] sending heartbeat") 45 | 46 | hr, err := um.postHeartbeat(client) 47 | if err != nil { 48 | log.Printf("[upstream] %s", err) 49 | } else if hr.Finished { 50 | if hr.Aborted { 51 | log.Print("[upstream] JobStep was aborted") 52 | return nil 53 | } else { 54 | log.Print("[upstream] WARNING: JobStep marked as finished, but not aborted") 55 | return fmt.Errorf("JobStep marked as finished, but not aborted.") 56 | } 57 | } 58 | 59 | // vary sleep time by up to 10 seconds to avoid all shards sending 60 | // heartbeats at the same time 61 | time.Sleep(time.Duration(20+randGen.Intn(10)) * time.Second) 62 | } 63 | } 64 | 65 | func (um *UpstreamMonitor) postHeartbeat(client *http.Client) (*HeartbeatResponse, error) { 66 | url := um.Config.Server + "/jobsteps/" + um.Config.JobstepID + "/heartbeat/" 67 | 68 | req, err := http.NewRequest("POST", url, nil) 69 | if err != nil { 70 | return nil, err 71 | } 72 | 73 | resp, err := client.Do(req) 74 | if err != nil { 75 | return nil, err 76 | } 77 | defer resp.Body.Close() 78 | 79 | if resp.StatusCode == 410 { 80 | return &HeartbeatResponse{ 81 | Finished: true, 82 | Aborted: true, 83 | }, nil 84 | } 85 | 86 | if resp.StatusCode != 200 { 87 | return nil, fmt.Errorf("Request to fetch JobStep failed with status code: %d", resp.StatusCode) 88 | } 89 | 90 | body, err := ioutil.ReadAll(resp.Body) 91 | if err != nil { 92 | return nil, err 93 | } 94 | 95 | r := &JobStep{} 96 | if err := json.Unmarshal(body, r); err != nil { 97 | return nil, err 98 | } 99 | 100 | hr := &HeartbeatResponse{ 101 | Finished: r.Status.ID == STATUS_FINISHED, 102 | Aborted: r.Result.ID == RESULT_ABORTED.String(), 103 | } 104 | 105 | return hr, nil 106 | } 107 | -------------------------------------------------------------------------------- /reporter/artifactstore/reporter.go: -------------------------------------------------------------------------------- 1 | package artifactstorereporter 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "fmt" 7 | "io/ioutil" 8 | "log" 9 | "os" 10 | "path/filepath" 11 | "sync" 12 | "time" 13 | 14 | artifacts "github.com/dropbox/changes-artifacts/client" 15 | "github.com/dropbox/changes-client/client" 16 | "github.com/dropbox/changes-client/client/adapter" 17 | "github.com/dropbox/changes-client/client/reporter" 18 | "github.com/dropbox/changes-client/common/atomicflag" 19 | "github.com/dropbox/changes-client/common/sentry" 20 | ) 21 | 22 | var ( 23 | // Artifact server endpoint, like http://localhost:8001/ 24 | artifactServer string 25 | 26 | // Bucket in the artifact server where content is being stored. 27 | // Defaults to jobstepID if blank string is given. 28 | artifactBucketId string 29 | 30 | // Keep errors from escaping the Reporter. 31 | discardErrors bool 32 | ) 33 | 34 | const DefaultDeadline time.Duration = 30 * time.Second 35 | 36 | // Reporter instance to interact with artifact store API to post console logs and artifact files to 37 | // the artifact store. It uses the artifact store client to perform most operations. 38 | type Reporter struct { 39 | client *artifacts.ArtifactStoreClient 40 | bucket *artifacts.Bucket 41 | chunkedArtifacts map[string]*artifacts.ChunkedArtifact 42 | disabled atomicflag.AtomicFlag 43 | deadline time.Duration 44 | // If unset, falls back to artifactServer. 45 | serverURL string 46 | } 47 | 48 | func (r *Reporter) markDeadlineExceeded() { 49 | r.disabled.Set(true) 50 | } 51 | 52 | func (r *Reporter) isDisabled() bool { 53 | return r.disabled.Get() 54 | } 55 | 56 | func (r *Reporter) Init(c *client.Config) { 57 | server := r.serverURL 58 | if server == "" { 59 | server = artifactServer 60 | } 61 | r.runWithDeadline(r.deadline, func() { 62 | if server == "" { 63 | log.Printf("[artifactstorereporter] No artifact server url provided. Disabling reporter.") 64 | return 65 | } 66 | 67 | log.Printf("[artifactstorereporter] Setting up artifact store client: %s\n", server) 68 | r.client = artifacts.NewArtifactStoreClient(server) 69 | 70 | if len(artifactBucketId) == 0 { 71 | artifactBucketId = c.JobstepID 72 | } 73 | 74 | // TODO(anupc): At some point in the future, creating a new bucket should be driven by Changes 75 | // server, rather than being done by the test itself. It makes the process of integrating with 76 | // Changes common across both Mesos and Jenkins builds. 77 | // 78 | // TODO retry 79 | if bucket, err := r.client.NewBucket(artifactBucketId, "changes", 60); err != nil { 80 | sentry.Error(err, map[string]string{}) 81 | log.Printf("Error creating new bucket '%s' on artifact server: %s\n", artifactBucketId, err) 82 | return 83 | } else { 84 | log.Printf("Created new bucket %s\n", artifactBucketId) 85 | r.bucket = bucket 86 | } 87 | }) 88 | } 89 | 90 | func (r *Reporter) PushJobstepStatus(status string, result string) { 91 | // IGNORED - Not relevant 92 | } 93 | 94 | func (r *Reporter) PushCommandStatus(cID string, status string, retCode int) { 95 | // IGNORED - Not relevant 96 | } 97 | 98 | func (r *Reporter) PushSnapshotImageStatus(iID string, status string) error { 99 | // IGNORED - Not relevant 100 | return nil 101 | } 102 | 103 | func (r *Reporter) ReportMetrics(metrics client.Metrics) { 104 | // IGNORED - Not relevant 105 | } 106 | 107 | // source: Name of the log stream. Usually, differentiates between stdout and stderr streams. 108 | // payload: Stream of bytes to append to this stream. 109 | func (r *Reporter) PushLogChunk(source string, payload []byte) bool { 110 | retch := make(chan bool, 1) 111 | r.runWithDeadline(r.deadline, func() { 112 | if r.bucket == nil { 113 | retch <- false 114 | return 115 | } 116 | 117 | if _, ok := r.chunkedArtifacts[source]; !ok { 118 | if artifact, err := r.bucket.NewChunkedArtifact(source); err != nil { 119 | sentry.Error(err, map[string]string{}) 120 | log.Printf("Error creating log artifact for %s: %s", source, err) 121 | retch <- false 122 | return 123 | } else { 124 | log.Printf("Created new artifact with name %s", source) 125 | r.chunkedArtifacts[source] = artifact 126 | } 127 | } 128 | 129 | logstream := r.chunkedArtifacts[source] 130 | logstream.AppendLog(string(payload[:])) 131 | retch <- true 132 | }) 133 | select { 134 | case ret := <-retch: 135 | return ret 136 | default: 137 | return false 138 | } 139 | } 140 | 141 | func (r *Reporter) PushCommandOutput(cID string, status string, retCode int, output []byte) { 142 | // IGNORED - We don't support command level outputs yet. 143 | // TODO: At some point in the future, we can add a per-command artifact to track output of each different command. 144 | } 145 | 146 | // Transforms a function returning an error into a function with no return value, 147 | // with the error instead sent to a channel if not nil. 148 | // If the channel send can't immediately succeed, the error will be discarded. 149 | // The intended use is to allow multiple operations to run (potentially concurrently) and 150 | // to capture only the first non-nil error. 151 | func captureError(errch chan error, fn func() error) func() { 152 | return func() { 153 | if err := fn(); err != nil { 154 | select { 155 | case errch <- err: 156 | default: 157 | } 158 | } 159 | } 160 | } 161 | 162 | func constructArtifactRelativePath(absoluteArtifactPath string, artifactSourceDir string) string { 163 | if relativeName, err := filepath.Rel(artifactSourceDir, absoluteArtifactPath); err == nil { 164 | // If relative path was calculated without issues, use it to construct artifact name. 165 | return relativeName 166 | } 167 | 168 | // This will only happen if the artifact path was not absolute (we expect result from 169 | // CollectArtifacts to only have absolute file paths). 170 | log.Printf("[artifactstore] Could not determine relative file path. Using base file name as artifact name.") 171 | return absoluteArtifactPath 172 | } 173 | 174 | func (r *Reporter) PublishArtifacts(cmdCnf client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) error { 175 | // first non-nil error 176 | firstError := make(chan error, 1) 177 | r.runWithDeadline(r.deadline, captureError(firstError, func() error { 178 | if r.bucket == nil { 179 | return nil 180 | } 181 | 182 | if len(cmdCnf.Artifacts) == 0 { 183 | return nil 184 | } 185 | 186 | matches, err := a.CollectArtifacts(cmdCnf.Artifacts, clientLog) 187 | if err != nil { 188 | clientLog.Printf("[artifactstore] ERROR filtering artifacts: %s", err) 189 | return err 190 | } 191 | 192 | var wg sync.WaitGroup 193 | for _, artifact := range matches { 194 | wg.Add(1) 195 | go func(artifact string) { 196 | defer wg.Done() 197 | artifactName := filepath.Base(artifact) 198 | 199 | log.Printf("[artifactstore] Uploading %s (from %s)", artifactName, artifact) 200 | send := func() error { 201 | if f, err := os.Open(artifact); err != nil { 202 | clientLog.Printf("[artifactstore] Error opening file for streaming %s: %s", artifact, err) 203 | return err 204 | } else if stat, err := f.Stat(); err != nil { 205 | clientLog.Printf("[artifactstore] Error stat'ing file for streaming %s: %s", artifact, err) 206 | return err 207 | } else if stat.Size() == 0 { 208 | clientLog.Printf("[artifactstore] Ignoring zero-length artifact %s", artifact) 209 | return nil 210 | } else if sAfct, err := r.bucket.NewStreamedArtifact(constructArtifactRelativePath(artifact, a.GetArtifactRoot()), stat.Size()); err != nil { 211 | clientLog.Printf("[artifactstore] Error creating streaming artifact for %s: %s", artifact, err) 212 | return err 213 | } else { 214 | // TODO: If possible, avoid reading entire contents of the file into memory, and pass the 215 | // file io.Reader directly to http.Post. 216 | // 217 | // The reason it is done this way is because, using bytes.NewReader() ensures that 218 | // Content-Length header is set to a correct value. If not, it is left blank. Alternately, 219 | // we could remove this requirement from the server where Content-Length is verified before 220 | // starting upload to S3. 221 | if contents, err := ioutil.ReadAll(f); err != nil { 222 | clientLog.Printf("[artifactstore] Error reading file for streaming %s: %s", artifact, err) 223 | return err 224 | } else if err := sAfct.UploadArtifact(bytes.NewReader(contents)); err != nil { 225 | // TODO retry if not a terminal error 226 | clientLog.Printf("[artifactstore] Error uploading contents of %s: %s", artifact, err) 227 | return err 228 | } else { 229 | clientLog.Printf("[artifactstore] Successfully uploaded artifact %s to %s", artifact, sAfct.GetContentURL()) 230 | return nil 231 | } 232 | } 233 | } 234 | captureError(firstError, send)() 235 | }(artifact) 236 | } 237 | 238 | wg.Wait() 239 | // Any async errors will be sent to the channel 240 | return nil 241 | })) 242 | select { 243 | case err := <-firstError: 244 | if !discardErrors { 245 | return err 246 | } 247 | default: 248 | } 249 | return nil 250 | } 251 | 252 | func (r *Reporter) Shutdown() { 253 | r.runWithDeadline(r.deadline, r.shutdown) 254 | } 255 | 256 | func (r *Reporter) shutdown() { 257 | if r.bucket == nil { 258 | return 259 | } 260 | 261 | // Wait for queued uploads to complete. 262 | log.Printf("[artifactstore] Waiting for artifacts to upload...") 263 | for _, cArt := range r.chunkedArtifacts { 264 | if err := cArt.Flush(); err != nil { 265 | sentry.Error(err, map[string]string{}) 266 | } 267 | } 268 | log.Printf("[artifactstore] Artifacts finished uploading.") 269 | 270 | // Close the bucket. This implicitly closes all artifacts in the bucket. 271 | // TODO retry 272 | err := r.bucket.Close() 273 | if err != nil { 274 | sentry.Error(err, map[string]string{}) 275 | } 276 | } 277 | 278 | func (r *Reporter) runWithDeadline(t time.Duration, f func()) { 279 | if r.isDisabled() { 280 | log.Println("Reporter is disabled. Not calling method") 281 | return 282 | } 283 | 284 | done := make(chan bool, 1) 285 | go func() { 286 | f() 287 | done <- true 288 | }() 289 | 290 | select { 291 | case <-time.After(t): 292 | sentry.Error(fmt.Errorf("Timed out after %s\n", t), map[string]string{}) 293 | r.markDeadlineExceeded() 294 | return 295 | case <-done: 296 | return 297 | } 298 | } 299 | 300 | func New() reporter.Reporter { 301 | return &Reporter{chunkedArtifacts: make(map[string]*artifacts.ChunkedArtifact), deadline: DefaultDeadline} 302 | } 303 | 304 | func init() { 305 | reporter.Register("artifactstore", New) 306 | flag.StringVar(&artifactServer, "artifacts-server", "", "Artifacts server URL. If blank, this reporter is disabled.") 307 | flag.StringVar(&artifactBucketId, "artifacts-bucket-id", "", "Artifacts Bucket ID (inside the main bucket; not a real s3 bucket; must not exist)") 308 | flag.BoolVar(&discardErrors, "discard-artifactstore-errors", true, "Whether to keep ArtifactStore reporter errors from escaping and potentially triggering a failure") 309 | } 310 | -------------------------------------------------------------------------------- /reporter/artifactstore/reporter_test.go: -------------------------------------------------------------------------------- 1 | package artifactstorereporter 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/dropbox/changes-artifacts/client/testserver" 8 | "github.com/dropbox/changes-client/client" 9 | "github.com/dropbox/changes-client/client/adapter" 10 | ) 11 | 12 | func TestRunWithDeadline(t *testing.T) { 13 | r := &Reporter{} 14 | r.runWithDeadline(20*time.Millisecond, func() { 15 | time.Sleep(5 * time.Second) 16 | }) 17 | if !r.isDisabled() { 18 | t.Error("runWithDeadline did not intercept long running method") 19 | } 20 | } 21 | 22 | func TestInitTimeout(t *testing.T) { 23 | ts := testserver.NewTestServer(t) 24 | defer ts.CloseAndAssertExpectations() 25 | 26 | r := &Reporter{deadline: 100 * time.Millisecond, 27 | serverURL: ts.URL} 28 | ts.ExpectAndHang("POST", "/buckets/") 29 | 30 | r.Init(&client.Config{JobstepID: "jobstep"}) 31 | 32 | if !r.isDisabled() { 33 | t.Error("Init did not fail with deadline exceeded") 34 | } 35 | } 36 | 37 | type mockAdapter struct { 38 | adapter.Adapter 39 | } 40 | 41 | func (m *mockAdapter) CollectArtifacts([]string, *client.Log) ([]string, error) { 42 | return []string{"/etc/hosts"}, nil 43 | } 44 | 45 | func (m *mockAdapter) GetArtifactRoot() string { 46 | return "/" 47 | } 48 | 49 | func TestPublishArtifactsTimeout(t *testing.T) { 50 | ts := testserver.NewTestServer(t) 51 | defer ts.CloseAndAssertExpectations() 52 | 53 | r := &Reporter{deadline: 100 * time.Millisecond, 54 | serverURL: ts.URL} 55 | ts.ExpectAndRespond("POST", "/buckets/", 200, `{"Id": "jobstep"}`) 56 | 57 | r.Init(&client.Config{JobstepID: "jobstep"}) 58 | if r.isDisabled() { 59 | t.Error("Init should not fail with deadline exceeded") 60 | } 61 | 62 | ma := &mockAdapter{} 63 | ts.ExpectAndHang("POST", "/buckets/jobstep/artifacts") 64 | l := client.NewLog() 65 | go l.Drain() 66 | defer l.Close() 67 | r.PublishArtifacts(client.ConfigCmd{Artifacts: []string{"*hosts*"}}, ma, l) 68 | 69 | if !r.isDisabled() { 70 | t.Error("PublishArtifacts did not fail with deadline exceeded") 71 | } 72 | } 73 | 74 | func TestPublishArtifactsDoesntHang(t *testing.T) { 75 | r := &Reporter{deadline: 100 * time.Millisecond} 76 | r.Init(&client.Config{JobstepID: "jobstep"}) 77 | l := client.NewLog() 78 | go l.Drain() 79 | defer l.Close() 80 | var ma mockAdapter 81 | // No artifacts means it should finish immediately. 82 | r.PublishArtifacts(client.ConfigCmd{Artifacts: []string{}}, &ma, l) 83 | // There's no assertion here because we're implicitly verifying that even in the 84 | // absence of errors, PublishArtifacts terminates. 85 | // This may sound silly, but that was a bug we had, and this makes sure we won't again. 86 | } 87 | 88 | func TestShutdownTimeout(t *testing.T) { 89 | ts := testserver.NewTestServer(t) 90 | defer ts.CloseAndAssertExpectations() 91 | 92 | r := &Reporter{deadline: 100 * time.Millisecond, serverURL: ts.URL} 93 | ts.ExpectAndRespond("POST", "/buckets/", 200, `{"Id": "jobstep"}`) 94 | 95 | r.Init(&client.Config{JobstepID: "jobstep"}) 96 | if r.isDisabled() { 97 | t.Error("Init should not fail with deadline exceeded") 98 | } 99 | 100 | ts.ExpectAndHang("POST", "/buckets/jobstep/close") 101 | r.Shutdown() 102 | if !r.isDisabled() { 103 | t.Error("Shutdown did not fail with deadline exceeded") 104 | } 105 | } 106 | 107 | func TestPushLogChunkTimeout(t *testing.T) { 108 | ts := testserver.NewTestServer(t) 109 | defer ts.CloseAndAssertExpectations() 110 | 111 | r := &Reporter{deadline: 150 * time.Millisecond, serverURL: ts.URL} 112 | ts.ExpectAndRespond("POST", "/buckets/", 200, `{"Id": "jobstep"}`) 113 | 114 | r.Init(&client.Config{JobstepID: "jobstep"}) 115 | if r.isDisabled() { 116 | t.Error("Init should not fail with deadline exceeded") 117 | } 118 | 119 | ts.ExpectAndHang("POST", "/buckets/jobstep/artifacts") 120 | r.PushLogChunk("console", []byte("console contents")) 121 | if !r.isDisabled() { 122 | t.Error("PushLogChunk did not fail with deadline exceeded") 123 | } 124 | 125 | // This call should not even create a new request. If it does, testserver will throw an error 126 | // about an unexpected request. 127 | r.PushLogChunk("console", []byte("console contents")) 128 | } 129 | -------------------------------------------------------------------------------- /reporter/jenkins/reporter.go: -------------------------------------------------------------------------------- 1 | package jenkinsreporter 2 | 3 | import ( 4 | "flag" 5 | "log" 6 | "os/exec" 7 | "path" 8 | 9 | "github.com/dropbox/changes-client/client" 10 | "github.com/dropbox/changes-client/client/adapter" 11 | "github.com/dropbox/changes-client/client/reporter" 12 | "github.com/dropbox/changes-client/common/taggederr" 13 | ) 14 | 15 | var ( 16 | artifactDestination string 17 | ) 18 | 19 | type Reporter struct { 20 | reporter.DefaultReporter 21 | artifactDestination string 22 | } 23 | 24 | func (r *Reporter) Init(c *client.Config) { 25 | log.Printf("[reporter] Construct reporter with artifact destination: %s", artifactDestination) 26 | r.artifactDestination = artifactDestination 27 | r.DefaultReporter.Init(c) 28 | } 29 | 30 | func (r *Reporter) PushJobstepStatus(status string, result string) { 31 | } 32 | 33 | func (r *Reporter) PushCommandStatus(cID string, status string, retCode int) { 34 | } 35 | 36 | func (r *Reporter) PushLogChunk(source string, payload []byte) bool { 37 | return true 38 | } 39 | 40 | func (r *Reporter) PushCommandOutput(cID string, status string, retCode int, output []byte) { 41 | } 42 | 43 | // If we were running in an lxc container, the artifacts are already grouped 44 | // but they need to be copied from the container to the actual artifact 45 | // destination. Because we pass through the Jenkins environment variables 46 | // to the commands inside of the container, we expect that they be in the 47 | // same location as we expect them to be, except nested within the mounted filesystem. 48 | func (r *Reporter) PublishArtifacts(cmdCnf client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) error { 49 | if a.GetRootFs() == "/" { 50 | log.Printf("[reporter] RootFs is /, no need to move artifacts") 51 | return nil 52 | } 53 | 54 | // TODO: Create and use a.GetWorkspace() as artifactSource instead of double using 55 | // artifactDestination. 56 | artifactSource := path.Join(a.GetRootFs(), r.artifactDestination) 57 | log.Printf("[reporter] Copying artifacts from %s to: %s\n", artifactSource, r.artifactDestination) 58 | mkdircmd := exec.Command("mkdir", "-p", artifactDestination) 59 | if output, err := mkdircmd.CombinedOutput(); err != nil { 60 | log.Printf("[reporter] Failed to create artifact destination: %s", output) 61 | return tagged(err).AddTag("cmd", "mkdir") 62 | } 63 | 64 | // path.Join is not used here because path.Join(artifactSource, ".") results in just artifactSource. 65 | // The source needs to end in '/.' for cp to copy the directory contents and not the directory itself. 66 | cpcmd := exec.Command("cp", "-f", "-r", artifactSource+"/.", r.artifactDestination) 67 | if output, err := cpcmd.CombinedOutput(); err != nil { 68 | log.Printf("[reporter] Failed to push artifacts; possibly the source artifact folder did not exist: %s", output) 69 | return tagged(err).AddTag("cmd", "cp") 70 | } 71 | return nil 72 | } 73 | 74 | func tagged(e error) taggederr.TaggedErr { 75 | return taggederr.Wrap(e).AddTag("reporter", "jenkins") 76 | } 77 | 78 | func New() reporter.Reporter { 79 | return &Reporter{} 80 | } 81 | 82 | func init() { 83 | // XXX figure out a reasonable default for this value or default to "" 84 | // and sanity-check the reporter during Init. If this value is invalid 85 | // we should trigger an infastracture failure. 86 | flag.StringVar(&artifactDestination, "artifact-destination", "/dev/null", "Jenkins artifact destination") 87 | 88 | reporter.Register("jenkins", New) 89 | } 90 | -------------------------------------------------------------------------------- /reporter/mesos/reporter.go: -------------------------------------------------------------------------------- 1 | package mesosreporter 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os/exec" 7 | "strconv" 8 | 9 | "github.com/dropbox/changes-client/client" 10 | "github.com/dropbox/changes-client/client/adapter" 11 | "github.com/dropbox/changes-client/client/reporter" 12 | "github.com/dropbox/changes-client/common/sentry" 13 | ) 14 | 15 | // A reporter that connects and reports to a specific jobstep id. 16 | // Each jobstep id has a number of endpoints associated with it that 17 | // allows the reporter to update the status of logs, snapshots, etc. 18 | type Reporter struct { 19 | reporter.DefaultReporter 20 | dontPushLogChunks bool 21 | } 22 | 23 | func (r *Reporter) Init(c *client.Config) { 24 | r.dontPushLogChunks = c.GetDebugConfigBool("mesosDontPushLogChunks", true) 25 | r.DefaultReporter.Init(c) 26 | } 27 | 28 | func (r *Reporter) PushJobstepStatus(status string, result string) { 29 | log.Printf("[reporter] Pushing status %s", status) 30 | form := make(map[string]string) 31 | form["status"] = status 32 | if len(result) > 0 { 33 | form["result"] = result 34 | } 35 | 36 | if out, err := exec.Command("/bin/hostname", "-f").Output(); err != nil { 37 | sentry.Message(fmt.Sprintf("[reporter] Unable to detect hostname: %v", err), map[string]string{}) 38 | } else { 39 | form["node"] = string(out) 40 | } 41 | r.PublishChannel <- reporter.ReportPayload{Path: r.JobstepAPIPath(), Data: form, Filename: ""} 42 | } 43 | 44 | func (r *Reporter) PushCommandStatus(cID string, status string, retCode int) { 45 | form := make(map[string]string) 46 | form["status"] = status 47 | if retCode >= 0 { 48 | form["return_code"] = strconv.Itoa(retCode) 49 | } 50 | r.PublishChannel <- reporter.ReportPayload{Path: "/commands/" + cID + "/", Data: form, Filename: ""} 51 | } 52 | 53 | func (r *Reporter) PushLogChunk(source string, payload []byte) bool { 54 | if r.dontPushLogChunks { 55 | return true 56 | } 57 | // logappend endpoint only works for console logs 58 | if source != "console" { 59 | return true 60 | } 61 | form := make(map[string]string) 62 | form["source"] = source 63 | form["text"] = string(payload) 64 | r.PublishChannel <- reporter.ReportPayload{Path: r.JobstepAPIPath() + "logappend/", Data: form, Filename: ""} 65 | return true 66 | } 67 | 68 | func (r *Reporter) PushCommandOutput(cID string, status string, retCode int, output []byte) { 69 | form := make(map[string]string) 70 | form["status"] = status 71 | form["output"] = string(output) 72 | if retCode >= 0 { 73 | form["return_code"] = strconv.Itoa(retCode) 74 | } 75 | r.PublishChannel <- reporter.ReportPayload{Path: "/commands/" + cID + "/", Data: form, Filename: ""} 76 | } 77 | 78 | func (r *Reporter) PublishArtifacts(cmd client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) error { 79 | // The artifactstore reporter should handle all artifact publishing, so this does nothing. 80 | return nil 81 | } 82 | 83 | func New() reporter.Reporter { 84 | return &Reporter{} 85 | } 86 | 87 | func init() { 88 | reporter.Register("mesos", New) 89 | } 90 | -------------------------------------------------------------------------------- /reporter/multireporter/reporter.go: -------------------------------------------------------------------------------- 1 | package multireporter 2 | 3 | import ( 4 | "flag" 5 | "log" 6 | "strings" 7 | 8 | "github.com/dropbox/changes-client/client" 9 | "github.com/dropbox/changes-client/client/adapter" 10 | "github.com/dropbox/changes-client/client/reporter" 11 | "github.com/dropbox/changes-client/common/sentry" 12 | ) 13 | 14 | // Colon-separated list of downstream reporters to multiplex all Reporter operations to 15 | var reporterDestinations string 16 | 17 | // Sends out notifications to multiple reporters. 18 | // Currently used to dual-write logs and artifacts to both Changes DB and Artifact Store, while the 19 | // store is being evaluated for stability and performance. 20 | type Reporter struct { 21 | reporterDestinations []reporter.Reporter 22 | } 23 | 24 | func (r *Reporter) Init(c *client.Config) { 25 | reporters := strings.Split(reporterDestinations, ":") 26 | for _, rep := range reporters { 27 | if newRep, err := reporter.Create(rep); err != nil { 28 | if sentryClient := sentry.GetClient(); sentryClient != nil { 29 | sentryClient.CaptureError(err, map[string]string{}) 30 | } 31 | 32 | // Allow other reporters to proceed 33 | continue 34 | } else { 35 | log.Printf("[multireporter] Initialization successful: %s", rep) 36 | r.reporterDestinations = append(r.reporterDestinations, newRep) 37 | } 38 | } 39 | 40 | log.Printf("[multireporter] Setting up multiple client reporters: %s\n", reporters) 41 | 42 | for _, rep := range r.reporterDestinations { 43 | rep.Init(c) 44 | } 45 | } 46 | 47 | func (r *Reporter) PushJobstepStatus(status string, result string) { 48 | for _, r := range r.reporterDestinations { 49 | r.PushJobstepStatus(status, result) 50 | } 51 | } 52 | 53 | func (r *Reporter) PushCommandStatus(cID string, status string, retCode int) { 54 | for _, r := range r.reporterDestinations { 55 | r.PushCommandStatus(cID, status, retCode) 56 | } 57 | } 58 | 59 | func (r *Reporter) PushSnapshotImageStatus(iID string, status string) error { 60 | var firstError error 61 | for _, r := range r.reporterDestinations { 62 | if e := r.PushSnapshotImageStatus(iID, status); e != nil && firstError == nil { 63 | firstError = e 64 | } 65 | } 66 | return firstError 67 | } 68 | 69 | func (r *Reporter) PushLogChunk(source string, payload []byte) bool { 70 | success := true 71 | for _, r := range r.reporterDestinations { 72 | success = success && r.PushLogChunk(source, payload) 73 | } 74 | return success 75 | } 76 | 77 | func (r *Reporter) PushCommandOutput(cID string, status string, retCode int, output []byte) { 78 | for _, r := range r.reporterDestinations { 79 | r.PushCommandOutput(cID, status, retCode, output) 80 | } 81 | } 82 | 83 | func (r *Reporter) PublishArtifacts(cmdCnf client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) error { 84 | var firstError error 85 | for _, r := range r.reporterDestinations { 86 | if e := r.PublishArtifacts(cmdCnf, a, clientLog); e != nil && firstError == nil { 87 | firstError = e 88 | } 89 | } 90 | return firstError 91 | } 92 | 93 | func (r *Reporter) ReportMetrics(metrics client.Metrics) { 94 | for _, r := range r.reporterDestinations { 95 | r.ReportMetrics(metrics) 96 | } 97 | } 98 | 99 | func (r *Reporter) Shutdown() { 100 | for _, r := range r.reporterDestinations { 101 | r.Shutdown() 102 | } 103 | } 104 | 105 | func New() reporter.Reporter { 106 | return &Reporter{} 107 | } 108 | 109 | func init() { 110 | flag.StringVar(&reporterDestinations, "reporter-destinations", "mesos:artifactstore", "Colon-separated list of reporter destinations") 111 | 112 | reporter.Register("multireporter", New) 113 | } 114 | -------------------------------------------------------------------------------- /support/bootstrap-ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | export DEBIAN_FRONTEND=noninteractive 4 | 5 | GO_VERSION=1.6 6 | 7 | sudo apt-get install -y python-software-properties software-properties-common 8 | sudo add-apt-repository -y ppa:awstools-dev/awstools 9 | 10 | # If we're running on Changes, don't pick up LXC ppa 11 | if [ -z $CHANGES ] 12 | then 13 | sudo add-apt-repository -y ppa:ubuntu-lxc/stable 14 | fi 15 | 16 | sudo apt-get update -y 17 | 18 | # Install basic requirements 19 | sudo apt-get install -y git mercurial pkg-config wget 20 | 21 | # Install aws cli tools 22 | sudo apt-get install -y awscli 23 | 24 | # Install go 25 | re=\\bgo$GO_VERSION\\b 26 | if [ -x /usr/local/go/bin/go ] && [[ `/usr/local/go/bin/go version` =~ $re ]] 27 | then 28 | echo "Go binary already installed" 29 | else 30 | echo "Installing Go binary...." 31 | cd /tmp 32 | wget "http://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" 33 | sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" 34 | echo "Installed Go binary...." 35 | fi 36 | 37 | /usr/local/go/bin/go version 38 | 39 | # Install lxc 40 | sudo apt-get install -y libcgmanager0 lxc lxc-dev 41 | 42 | # Install fpm 43 | sudo apt-get install -y ruby-dev gcc 44 | fpm -h > /dev/null || sudo gem install fpm --no-ri --no-rdoc 45 | -------------------------------------------------------------------------------- /support/bootstrap-vagrant.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | cd /vagrant/ 4 | 5 | support/bootstrap-ubuntu.sh 6 | 7 | echo "alias work='cd \$GOPATH/src/github.com/dropbox/changes-client'" | sudo tee /etc/profile.d/work-alias.sh 8 | sudo chown -R `whoami` ~/src 9 | -------------------------------------------------------------------------------- /support/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | echo "==> Getting dependencies..." 4 | go get -v ./... 5 | 6 | echo "==> Building..." 7 | go build -v ./... 8 | --------------------------------------------------------------------------------