├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── COPYING ├── Makefile ├── README.md ├── cmd ├── mistry │ ├── README.md │ ├── main.go │ ├── main_test.go │ └── transport.go └── mistryd │ ├── config.go │ ├── config.sample.json │ ├── config.test.json │ ├── end_to_end_test.go │ ├── job.go │ ├── job_queue.go │ ├── job_test.go │ ├── main.go │ ├── metrics │ └── metrics.go │ ├── mistryd_test.go │ ├── project_queue.go │ ├── public │ ├── css │ │ └── foundation.min.css │ ├── index.html │ ├── js │ │ └── index.js │ └── templates │ │ └── show.html │ ├── server.go │ ├── server_test.go │ ├── testdata │ └── projects │ │ ├── bad_entrypoint │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── bootstrap-concurrent │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── bootstrap-twice │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── build-cache │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── build-coalescing-exitcode │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── build-coalescing │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── concurrent │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── concurrent2 │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── concurrent3 │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── concurrent4 │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── copy-folder │ │ ├── Dockerfile │ │ ├── docker-entrypoint.sh │ │ └── koko │ │ │ └── lala.txt │ │ ├── exit-code │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── failed-build-cleanup │ │ └── Dockerfile │ │ ├── failed-build-link │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── hanging-pending │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── image-build-failure │ │ └── Dockerfile │ │ ├── job-id-seeding │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── params │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── result-cache │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ ├── simple │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ │ └── sleep │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ ├── worker.go │ ├── worker_pool.go │ ├── worker_pool_test.go │ └── worker_test.go ├── contrib ├── fabfile.py.sample └── mistry-purge-builds ├── go.mod ├── go.sum ├── logo.png └── pkg ├── broker └── broker.go ├── filesystem ├── btrfs │ └── btrfs.go ├── filesystem.go └── plainfs │ └── plainfs.go ├── tailer └── tailer.go ├── types ├── build_info.go ├── doc.go ├── errors.go ├── job_request.go ├── params.go └── transport_method.go └── utils └── utils.go /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.test 3 | *.prof 4 | *.pyc 5 | 6 | /mistry 7 | /mistryd 8 | 9 | cmd/mistry/mistry 10 | cmd/mistryd/mistryd 11 | cmd/mistryd/statik 12 | config.json 13 | fabfile.py 14 | vendor/ 15 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | language: go 3 | before_install: 4 | - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh 5 | - yes id_rsa | ssh-keygen -t rsa -N "" 6 | - cat id_rsa.pub > ~/.ssh/authorized_keys 7 | - mv id_rsa* /home/travis/.ssh/ 8 | - chmod g-rw,o-rw /home/travis/.ssh/* 9 | go: 10 | - 1.10.x 11 | services: 12 | - docker 13 | install: 14 | - make deps 15 | - go get github.com/rakyll/statik 16 | script: 17 | - make 18 | addons: 19 | ssh_known_hosts: 20 | - localhost 21 | - 127.0.0.1 22 | - 0.0.0.0 23 | branches: 24 | only: 25 | - master 26 | - debian/stretch 27 | notifications: 28 | slack: 29 | secure: KuxnK08UM90p7fyK8SoqO7jjs+18b1UNRMcPgQvtZ1x4hWOEeEXt+azrDpU6ntWvzTrO8ow5azgYHpKcw2onM/XL4Ic2GQMGiWY+8qsnzr2G/wiuj4Lbq0BuQApal6/XEM8UH5yXundosokgqCD9iapCgr3umGGPkO1qYAEDnXJhZjhv4NhJ3Rn+4zBCmmsH4XkVtDvMueLaMeojofzpj4DvHAPQ4j6UmdaU37JjKI8tYlVt5PzRLu1fKEgn6QDTVjdryew441ubvfxzE1iE+wu/9Xqi9+rihCWfEzku3rK38XkIT3m8dLZmUINald03QV3B1inOBy9pHSpeV/S1XLCy65Bq84gjKzADPCqaAJCoBPKu7xJ7aOBJSmqDBHFbGJcfAJIQP+y3r4svBk+rU6hgt411ySoO8bOkDFR6Mf7W7rQw3Vds0S4n+i5KDO5m6fPmrPAI5kLmXNmAr9ipRQW8OfTPzZyqjBVZ5KYrWZUPPUJGURsCcy3iv3KcxadQIKntcScFy5bqdepR/xQd8cbJB9mu2ioDswsmhZvYZmxnWe9QSmkVuFdg2iiYQ0EVXZmH9Dpo4mcYRL8F3hFJ3D8l0QauPwaleFG+59CwfqY0Di1IH08UnH6x6MbnIF0PnEAV1lYo3jj8d98u3l0kaYJrfQDwQpLxmW05bzuUHA4= 30 | after_success: 31 | - bash <(curl -s https://codecov.io/bash) 32 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | Breaking changes are prefixed with a "[BREAKING]" label. 4 | 5 | ## master (unreleased) 6 | 7 | ### Added 8 | 9 | - [server] `mistryd --help` now displays the available filesystem adapters [[628ff12](https://github.com/skroutz/mistry/commit/628ff120062599ddb5bb0f2d41cc4d2ae47890ab)] 10 | 11 | ### Changed 12 | 13 | - [server] Removed debug logs coming from the web view [[28e9743](https://github.com/skroutz/mistry/commit/28e97433293fdddbf62089c1514bb15c7efbd829)] 14 | 15 | 16 | ### Fixed 17 | 18 | - [server] We would erroneously consider failed builds as successful, which resulted in some builds starting with cold caches instead of being incremental [[ab5ba18](https://github.com/skroutz/mistry/commit/ab5ba18b59ffd579834abd69e83c756263e4c858)] 19 | - [client] The client now accepts dynamic arguments in the form of `--foo bar` (in addition to `--foo=bar`). Previously, it would panic [[f209061](https://github.com/skroutz/mistry/commit/f209061cd16274e4a198ec7d3c8be05718874b93)] 20 | - [client] If the path passed to `--target` did not exist, it was erroneously created as a file [[1bfdeb4](https://github.com/skroutz/mistry/commit/1bfdeb4fccab06910be760d90d8bdef246fb4a3f)] 21 | - [server] Preserve directory structure inside the Docker images built by the server [[#125](https://github.com/skroutz/mistry/pull/125)] 22 | 23 | 24 | 25 | 26 | 27 | ## 0.1.0 (2018-10-01) 28 | 29 | ### Added 30 | 31 | - Support for opaque parameters [[#97](https://github.com/skroutz/mistry/pull/97)] 32 | - server: Version flag `--version/-v` [[5c20927](https://github.com/skroutz/mistry/commit/5c209278bd6bf1032a1958eb252098b9e1ae228a)] 33 | 34 | 35 | ### Fixed 36 | 37 | - server: Synchronize filesystem operations when symlinking [[502a42b](https://github.com/skroutz/mistry/commit/502a42b)] 38 | - server: Errors on the build bootstrap phase would not abort the build [[828eddc](https://github.com/skroutz/mistry/commit/828eddc)] 39 | - server: Socket FDs to docker were never closed [[b079128](b079128c018f145f013a5a2f2e3a51cfe37926e3)] 40 | - webview: improve render performance [[#76](https://github.com/skroutz/mistry/issues/76)] 41 | 42 | ### Changed 43 | 44 | - server: build info contains information about build errors [[7a3427](https://github.com/skroutz/mistry/commit/7a3427)] 45 | - server: build info contains information about build cache usage [[93fd733](https://github.com/skroutz/mistry/commit/93fd733)] 46 | - server: build info contains information about group [[5ff4cb1](https://github.com/skroutz/mistry/commit/5ff4cb1)] 47 | - server: build info contains information about build time [[65b3ef2](https://github.com/skroutz/mistry/commit/65b3ef2)] 48 | 49 | 50 | 51 | 52 | 53 | ## 0.0.2 (2018-05-15) 54 | 55 | ### Added 56 | 57 | - client: Output container stderr on non-zero exit code [[#85](https://github.com/skroutz/mistry/pull/85)] 58 | - client: Add a `--timeout` option to specify maximum time to wait for a job [[#81](https://github.com/skroutz/mistry/pull/70)] 59 | - server: Introduced a configuration option to limit the number of concurrent builds [[73c44ec](https://github.com/skroutz/mistry/commit/73c44ecc924260ccf61bad220eb26cd51a1f30d6)] 60 | - server: Add `--rebuild` option to rebuild the docker images of a selection of projects ignoring the image cache [[#70](https://github.com/skroutz/mistry/pull/70)] 61 | - client: Add `--rebuild` option to rebuild the docker image ignoring the image cache [[#70](https://github.com/skroutz/mistry/pull/70)] 62 | - client: Add `--clear-target` option to clear target path before fetching 63 | artifacts [[#63](https://github.com/skroutz/mistry/pull/63)] 64 | - client: Build logs are now displayed when in verbose mode [[#65](https://github.com/skroutz/mistry/pull/65)] 65 | - Asynchronous job scheduling [[#61](https://github.com/skroutz/mistry/pull/61)] 66 | - Web view [[#17](https://github.com/skroutz/mistry/pull/17)] 67 | 68 | ### Changed 69 | 70 | - **[BREAKING]** server: failed image builds are now always visible as ready [[#75](https://github.com/skroutz/mistry/issues/75)] 71 | - server: Job parameters are not logged, making the logs less verbose 72 | - **[BREAKING]** Failed build results are no longer cached [[#62](https://github.com/skroutz/mistry/pull/62)] 73 | - **[BREAKING]** client/server: Client and server binaries are renamed to "mistryd" and "mistry" respectively. 74 | Also project is now go-gettable. [[abbfb58](https://github.com/skroutz/mistry/commit/abbfb58d5a2aaf3eaebf9408d81ec7d459326416)] 75 | - client: default host is now 0.0.0.0 76 | 77 | ### Fixed 78 | 79 | - Don't delete build results on docker image build failure [[#75](https://github.com/skroutz/mistry/issues/75)] 80 | - If a container with the same name exists, we remove it so that the new container 81 | can run [[#20](https://github.com/skroutz/mistry/issues/20)] 82 | - Streaming log output in web view might occassionally hang [[7c07ca1](7c07ca177639cd6be7f9a860fb39c01370f35779)] 83 | 84 | ## 0.0.1 (2018-04-12) 85 | 86 | First release! 87 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | {one line to give the program's name and a brief idea of what it does.} 635 | Copyright (C) {year} {name of author} 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | {project} Copyright (C) {year} {fullname} 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: install build mistryd mistry test testall lint fmt clean 2 | 3 | CLIENT=mistry 4 | SERVER=mistryd 5 | BUILDCMD=go build -v 6 | TESTCMD=MISTRY_CLIENT_PATH="$(shell pwd)/$(CLIENT)" go test -v -race cmd/mistryd/*.go 7 | TESTCLICMD=go test -v -race cmd/mistry/*.go 8 | 9 | install: fmt test 10 | go install -v ./... 11 | 12 | build: mistryd mistry 13 | 14 | mistryd: generate 15 | $(BUILDCMD) -ldflags '-X main.VersionSuffix=$(shell git rev-parse HEAD)' -o $(SERVER) cmd/mistryd/*.go 16 | 17 | mistry: 18 | $(BUILDCMD) -o $(CLIENT) cmd/mistry/*.go 19 | 20 | test: generate mistry 21 | $(TESTCMD) --filesystem plain 22 | $(TESTCLICMD) 23 | 24 | testall: test 25 | $(TESTCMD) --filesystem btrfs 26 | 27 | test-cli: 28 | $(TESTCLICMD) 29 | 30 | deps: 31 | dep ensure -v 32 | 33 | lint: 34 | golint `go list ./... | grep -v /vendor/` 35 | 36 | fmt: 37 | ! go fmt ./... 2>&1 | tee /dev/tty | read 38 | 39 | clean: 40 | go clean ./... 41 | 42 | generate: 43 | go generate ./... 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![mistry logo](logo.png) 2 | 3 | ------------------------------------------------------------------------------ 4 | 5 | [![Build Status](https://api.travis-ci.org/skroutz/mistry.svg?branch=master)](https://travis-ci.org/skroutz/mistry) 6 | [![Go report](https://goreportcard.com/badge/github.com/skroutz/mistry)](https://goreportcard.com/report/github.com/skroutz/mistry) 7 | [![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) 8 | 9 | *mistry* is a general-purpose build server that enables fast workflows by 10 | employing artifact caching and incremental building techniques. 11 | 12 | mistry executes user-defined build steps inside isolated environments 13 | and saves build artifacts for later consumption. 14 | 15 | Refer to the introductory blog post *[Speeding Up Our Build Pipelines](https://engineering.skroutz.gr/blog/speeding-up-build-pipelines-with-mistry/)* 16 | for more information. 17 | 18 | At Skroutz we use mistry to speed our development and deployment 19 | processes: 20 | 21 | - Rails asset compilation (`rails assets:precompile`) 22 | - Bundler dependency resolution and download (`bundle install`) 23 | - Yarn dependency resolution and download (`yarn install`) 24 | 25 | In the above use cases, mistry executes these commands once they are needed for 26 | the first time and caches the results. Then, when anyone else executes the same 27 | commands (i.e. application servers, developer workstations, CI server etc.) 28 | they instantly get the results back. 29 | 30 | 31 | 32 | 33 | Features 34 | ------------------------------------------------------------------------------ 35 | 36 | - execute user-defined build steps in pre-defined environments, provided as Docker images 37 | - build artifact caching 38 | - incremental building (see [*"Build cache"*](https://github.com/skroutz/mistry/wiki/Build-cache)) 39 | - [CLI client](cmd/mistry/README.md) for interacting with the server (scheduling jobs etc.) 40 | via a JSON API 41 | - a web view for inspecting the progress of builds (see [*"Web view"*](#web-view)) 42 | - efficient use of disk space due to copy-on-write semantics (using [Btrfs snapshotting](https://en.wikipedia.org/wiki/Btrfs#Subvolumes_and_snapshots)) 43 | 44 | 45 | 46 | For more information visit the [wiki](https://github.com/skroutz/mistry/wiki). 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | Getting started 59 | ------------------------------------------------- 60 | You can get the binaries from the 61 | [latest releases](https://github.com/skroutz/mistry/releases). 62 | 63 | Alternatively, if you have Go 1.10 or later you can get the 64 | latest development version. 65 | 66 | NOTE: [statik](https://github.com/rakyll/statik) is a build-time dependency, 67 | so it should be installed in your system and present in your PATH. 68 | 69 | ```shell 70 | $ go get github.com/rakyll/statik 71 | 72 | # server 73 | $ go get -u github.com/skroutz/mistry/cmd/mistryd 74 | 75 | # client 76 | $ go get -u github.com/skroutz/mistry/cmd/mistry 77 | ``` 78 | 79 | 80 | 81 | 82 | 83 | Usage 84 | -------------------------------------------------- 85 | To boot the server a configuration file is needed: 86 | 87 | ```shell 88 | $ mistryd --config config.json 89 | ``` 90 | 91 | You can use the [sample config](cmd/mistryd/config.sample.json) as a starting 92 | point. 93 | 94 | Use `mistryd --help` for more info. 95 | 96 | 97 | 98 | ### Adding projects 99 | 100 | Projects are essentially directories with at minimum a `Dockerfile` at their 101 | root. Each project directory should be placed in the path denoted by 102 | `projects_path` (see [*Configuration*](#configuration). 103 | 104 | Refer to [*File system layout - Projects directory*](https://github.com/skroutz/mistry/wiki/File-system-layout#projects-directory) 105 | for more info. 106 | 107 | 108 | 109 | 110 | 111 | ### API 112 | 113 | Interacting with mistry (scheduling builds etc.) can be done in two ways: 114 | (1) using the [client](cmd/mistry/README.md) and (2) 115 | using the HTTP API directly (see below). 116 | 117 | We recommended using the client whenever possible. 118 | 119 | #### Client 120 | 121 | Schedule a build for project *foo* and download the artifacts: 122 | 123 | ```sh 124 | $ mistry build --project foo --target /tmp/foo 125 | ``` 126 | 127 | The above command will block until the build is complete and then download the 128 | resulting artifacts to `/tmp/foo/`. 129 | 130 | Schedule a build without fetching the artifacts: 131 | 132 | ```sh 133 | $ mistry build --project foo --no-wait 134 | ``` 135 | 136 | The above will just schedule the build and return immediately - it will not 137 | wait for it to complete and will not fetch the artifacts. 138 | 139 | For more info refer to the client's [README](cmd/mistry/README.md). 140 | 141 | #### HTTP Endpoints 142 | 143 | Schedule a new build without fetching artifacts (this is equivalent to passing 144 | `--no-wait` when using the client): 145 | 146 | ```shell 147 | $ curl -X POST /jobs \ 148 | -H 'Accept: application/json' \ 149 | -H 'Content-Type: application/json' \ 150 | -d '{"project": "foo"}' 151 | { 152 | "Params": {"foo": "xzv"}, 153 | "Path": "", 154 | "Cached": true, 155 | "Coalesced": false, 156 | "ExitCode": 0, 157 | "Err": null, 158 | "TransportMethod": "rsync" 159 | } 160 | ``` 161 | 162 | 163 | ### Web view 164 | 165 | mistry comes with a web view where progress and logs of each build can be 166 | inspected. 167 | 168 | Browse to http://0.0.0.0:8462 (or whatever address the server listens to). 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | Configuration 179 | ------------------------------------------------- 180 | Configuration is provided in JSON format. The following settings are currently 181 | supported: 182 | 183 | | Setting | Description | Default | 184 | | ------------- |:-------------:| -----:| 185 | | `projects_path` (string) | The path where project folders are located | "" | 186 | | `build_path` (string) | The root path where artifacts will be placed | "" | 187 | | `mounts` (object{string:string}) | The paths from the host machine that should be mounted inside the execution containers | {} | 188 | | `job_concurrency` (int) | Maximum number of builds that may run in parallel | (logical-cpu-count) | 189 | | `job_backlog` (int) | Used for back-pressure - maximum number of outstanding build requests. If exceeded subsequent build requests will fail | (job_concurrency * 2) | 190 | 191 | The paths denoted by `projects_path` and `build_path` should be 192 | present and writable by the user running the server. 193 | 194 | For an example refer to the [sample config](cmd/mistryd/config.sample.json). 195 | 196 | 197 | 198 | 199 | 200 | Development 201 | --------------------------------------------------- 202 | 203 | Before anything, make sure you install the dependencies: 204 | ```shell 205 | make deps 206 | ``` 207 | 208 | The tests will attempt to ssh to localhost. You will need to add your 209 | public key to the authorized keys as if you were setting this up to a remote 210 | host. 211 | 212 | ```shell 213 | cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys 214 | ``` 215 | 216 | To run the tests, the [Docker daemon](https://docs.docker.com/install/) should 217 | be running and SSH access to localhost should be configured. 218 | 219 | ```shell 220 | $ make test 221 | ``` 222 | 223 | Note: the command above may take more time the first time it's run, 224 | since some Docker images will have to be fetched from the internet. 225 | 226 | 227 | License 228 | ------------------------------------------------- 229 | mistry is released under the GNU General Public License version 3. See [COPYING](COPYING). 230 | 231 | mistry [logo](logo.png) contributed by @cyfugr 232 | -------------------------------------------------------------------------------- /cmd/mistry/README.md: -------------------------------------------------------------------------------- 1 | mistry client 2 | ==================================================== 3 | 4 | `mistry` is a CLI for interacting with the mistry server, `mistryd` via its 5 | HTTP API. It can schedule builds and download the resulting build artifacts. 6 | 7 | It supports blocking and non-blocking operation mode. 8 | 9 | For usage examples and information use `mistry build -h`. 10 | 11 | 12 | ## Development 13 | 14 | Before anything, make sure you install the dependencies: 15 | ```shell 16 | make deps 17 | ``` 18 | 19 | To build the client, execute the following from the repository root: 20 | ```sh 21 | $ make mistry 22 | ``` 23 | 24 | Likewise, to run the tests: 25 | ```sh 26 | $ make test-cli 27 | ``` 28 | 29 | 30 | License 31 | ------------------------------------------------- 32 | mistry is released under the GNU General Public License version 3. See [COPYING](/COPYING). 33 | 34 | 35 | -------------------------------------------------------------------------------- /cmd/mistry/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018-present Skroutz S.A. 2 | // 3 | // This program is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | // 8 | // This program is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | // 13 | // You should have received a copy of the GNU General Public License 14 | // along with this program. If not, see . 15 | package main 16 | 17 | import ( 18 | "bytes" 19 | "encoding/json" 20 | "errors" 21 | "fmt" 22 | "io/ioutil" 23 | "net/http" 24 | "net/url" 25 | "os" 26 | "os/user" 27 | "strings" 28 | "time" 29 | 30 | "github.com/skroutz/mistry/pkg/types" 31 | "github.com/urfave/cli" 32 | ) 33 | 34 | var transports = make(map[types.TransportMethod]Transport) 35 | 36 | func init() { 37 | transports[types.Scp] = Scp{} 38 | transports[types.Rsync] = Rsync{} 39 | } 40 | 41 | func main() { 42 | const JobsPath = "jobs" 43 | 44 | var ( 45 | project string 46 | group string 47 | target string 48 | host string 49 | port string 50 | transportUser string 51 | transport string 52 | verbose bool 53 | jsonResult bool 54 | noWait bool 55 | clearTarget bool 56 | rebuild bool 57 | timeout string 58 | ) 59 | 60 | currentUser, err := user.Current() 61 | if err != nil { 62 | fmt.Fprintln(os.Stderr, "Cannot fetch current user:", err) 63 | os.Exit(1) 64 | } 65 | 66 | cli.AppHelpTemplate = fmt.Sprintf(`%s 67 | WEBSITE: https://github.com/skroutz/mistry 68 | `, cli.AppHelpTemplate) 69 | 70 | cli.CommandHelpTemplate = fmt.Sprintf(`%s 71 | JOB PARAMETERS: 72 | -- dynamic options for the command 73 | 74 | EXAMPLES: 75 | 1. Schedule a job with a group and some parameters and put artifacts under 76 | /tmp/yarn using rsync. Prefixing a file name with @ will cause the contents 77 | of yarn.lock to be sent as parameters. Parameters prepended with '_' are opaque 78 | and do not affect the build result. 79 | 80 | $ {{.HelpName}} --host example.org --port 9090 --project yarn \ 81 | --group group_name --transport rsync --target /tmp/yarn \ 82 | -- --lockfile=@yarn.lock --foo=bar --_ignored=true 83 | 84 | 2. Schedule a build and exit early without waiting for the result by setting 85 | the no-wait flag. 86 | 87 | $ {{.HelpName}} --host example.org --port 9090 --project yarn --no-wait 88 | `, cli.CommandHelpTemplate) 89 | 90 | app := cli.NewApp() 91 | app.Usage = "schedule build jobs at the mistry service" 92 | app.HideVersion = true 93 | app.Commands = []cli.Command{ 94 | { 95 | Name: "build", 96 | Usage: "Schedule jobs and retrieve artifacts.", 97 | Flags: []cli.Flag{ 98 | cli.StringFlag{ 99 | Name: "host", 100 | Usage: "host to connect to", 101 | Destination: &host, 102 | Value: "0.0.0.0", 103 | }, 104 | cli.StringFlag{ 105 | Name: "port, p", 106 | Usage: "port to connect to", 107 | Destination: &port, 108 | Value: "8462", 109 | }, 110 | cli.StringFlag{ 111 | Name: "project", 112 | Usage: "job's project", 113 | Destination: &project, 114 | }, 115 | cli.StringFlag{ 116 | Name: "group, g", 117 | Usage: "group project builds (optional)", 118 | Destination: &group, 119 | }, 120 | cli.BoolFlag{ 121 | Name: "verbose, v", 122 | Destination: &verbose, 123 | }, 124 | cli.BoolFlag{ 125 | Name: "json-result", 126 | Usage: "output the build result in JSON format to STDOUT (implies verbose: false)", 127 | Destination: &jsonResult, 128 | }, 129 | cli.BoolFlag{ 130 | Name: "rebuild", 131 | Usage: "rebuild the docker image", 132 | Destination: &rebuild, 133 | }, 134 | cli.StringFlag{ 135 | Name: "timeout", 136 | Usage: "time to wait for the build to finish, accepts values as defined at https://golang.org/pkg/time/#ParseDuration", 137 | Destination: &timeout, 138 | Value: "60m", 139 | }, 140 | 141 | // transport flags 142 | cli.BoolFlag{ 143 | Name: "no-wait", 144 | Usage: "if set, schedule the job but don't fetch the artifacts", 145 | Destination: &noWait, 146 | }, 147 | cli.StringFlag{ 148 | Name: "transport", 149 | Usage: "the method to use for fetching artifacts", 150 | Destination: &transport, 151 | Value: types.Scp, 152 | }, 153 | cli.StringFlag{ 154 | Name: "transport-user", 155 | Usage: "user to fetch the artifacts with", 156 | Destination: &transportUser, 157 | Value: currentUser.Username, 158 | }, 159 | cli.StringFlag{ 160 | Name: "target, t", 161 | Usage: "the local directory where the artifacts will be saved", 162 | Destination: &target, 163 | Value: ".", 164 | }, 165 | cli.BoolFlag{ 166 | Name: "clear-target", 167 | Usage: "remove contents of the target directory before fetching artifacts", 168 | Destination: &clearTarget, 169 | }, 170 | }, 171 | Action: func(c *cli.Context) error { 172 | // Validate existence of mandatory arguments 173 | if host == "" { 174 | return errors.New("host cannot be empty") 175 | } 176 | if project == "" { 177 | return errors.New("project cannot be empty") 178 | } 179 | if target == "" { 180 | return errors.New("target cannot be empty") 181 | } 182 | if !noWait && transport == "" { 183 | return errors.New("you need to either specify a transport or use the async flag") 184 | } 185 | 186 | var ( 187 | clientTimeout time.Duration 188 | err error 189 | ) 190 | if timeout != "" { 191 | clientTimeout, err = time.ParseDuration(timeout) 192 | if err != nil { 193 | return err 194 | } 195 | } 196 | 197 | if jsonResult { 198 | verbose = false 199 | } 200 | 201 | var ( 202 | ts Transport 203 | tsExists bool 204 | ) 205 | 206 | ts, tsExists = transports[types.TransportMethod(transport)] 207 | if !tsExists { 208 | return fmt.Errorf("invalid transport argument (%v)", transport) 209 | } 210 | 211 | params := parseDynamicArgs(c.Args()) 212 | 213 | // Dynamic arguments starting with `@` are considered actual 214 | // files in the filesystem. 215 | // 216 | // For these arguments the params map contains the file 217 | // content. 218 | for k, v := range params { 219 | if strings.HasPrefix(v, "@") { 220 | data, err := ioutil.ReadFile(strings.TrimPrefix(v, "@")) 221 | if err != nil { 222 | return err 223 | } 224 | params[k] = string(data) 225 | } 226 | } 227 | 228 | // Ensure target exists and is a directory 229 | // 230 | // strip trailing slashes 231 | if os.IsPathSeparator(target[len(target)-1]) { 232 | target = target[:len(target)-1] 233 | } 234 | fi, err := os.Stat(target) 235 | if err != nil { 236 | if os.IsNotExist(err) { 237 | if verbose { 238 | fmt.Printf("Target path (%s) does not exist. Creating it..", target) 239 | } 240 | err = os.MkdirAll(target, os.ModePerm) 241 | if err != nil { 242 | return fmt.Errorf("Error creating target path (%s): %s", target, err) 243 | } 244 | } else { 245 | return fmt.Errorf("Error checking for target path (%s): %s", target, err) 246 | } 247 | } else if !fi.IsDir() { 248 | return fmt.Errorf("Target path (%s) is not a directory. Aborting.", target) 249 | 250 | } 251 | 252 | baseURL := fmt.Sprintf("http://%s:%s", host, port) 253 | url := baseURL + "/" + JobsPath 254 | if noWait { 255 | url += "?async" 256 | } 257 | 258 | jr := types.JobRequest{Project: project, Group: group, Params: params, Rebuild: rebuild} 259 | jrJSON, err := json.Marshal(jr) 260 | if err != nil { 261 | return err 262 | } 263 | 264 | if verbose { 265 | fmt.Printf("Scheduling %#v...\n", jr) 266 | } 267 | 268 | body, err := sendRequest(url, jrJSON, verbose, clientTimeout) 269 | if err != nil { 270 | if isTimeout(err) { 271 | return fmt.Errorf("The build did not finish after %s, %s", clientTimeout, err) 272 | } 273 | return err 274 | } 275 | 276 | if noWait { 277 | if verbose { 278 | fmt.Println("Build scheduled successfully") 279 | } 280 | return nil 281 | } 282 | 283 | // Transfer the result 284 | bi := types.NewBuildInfo() 285 | err = json.Unmarshal([]byte(body), bi) 286 | if err != nil { 287 | return err 288 | } 289 | 290 | if !jsonResult { 291 | fmt.Println("Logs can be found at", baseURL+"/"+bi.URL) 292 | } 293 | 294 | if verbose { 295 | fmt.Printf( 296 | "\nResult:\nStarted at: %s ExitCode: %v Params: %s Cached: %v Coalesced: %v\n\nLogs:\n%s\n", 297 | bi.StartedAt, bi.ExitCode, bi.Params, bi.Cached, bi.Coalesced, bi.ContainerStdouterr) 298 | } 299 | 300 | if jsonResult { 301 | fmt.Printf("%s\n", body) 302 | } 303 | 304 | if bi.ExitCode != types.ContainerSuccessExitCode { 305 | if bi.ContainerStderr != "" { 306 | fmt.Fprintln(os.Stderr, "Container error logs:\n", bi.ContainerStderr) 307 | } else { 308 | fmt.Fprintln(os.Stderr, "There are no container error logs.") 309 | } 310 | return fmt.Errorf("Build failed with exit code %d", bi.ExitCode) 311 | } 312 | 313 | if verbose { 314 | fmt.Println("Copying artifacts to", target, "...") 315 | } 316 | out, err := ts.Copy(transportUser, host, project, bi.Path+"/*", target, clearTarget) 317 | fmt.Println(out) 318 | if err != nil { 319 | return err 320 | } 321 | if verbose { 322 | fmt.Println("Artifacts copied to", target) 323 | } 324 | 325 | return nil 326 | }, 327 | }, 328 | } 329 | 330 | err = app.Run(os.Args) 331 | if err != nil { 332 | fmt.Fprintln(os.Stderr, err) 333 | os.Exit(1) 334 | } 335 | } 336 | 337 | func sendRequest(url string, reqBody []byte, verbose bool, timeout time.Duration) ([]byte, error) { 338 | client := &http.Client{Timeout: timeout} 339 | resp, err := client.Post(url, "application/json", bytes.NewBuffer(reqBody)) 340 | if err != nil { 341 | return nil, err 342 | } 343 | respBody, err := ioutil.ReadAll(resp.Body) 344 | if err != nil { 345 | return nil, err 346 | } 347 | 348 | if verbose { 349 | fmt.Printf("Server response: %#v\n", resp) 350 | } 351 | 352 | if resp.StatusCode == http.StatusServiceUnavailable { 353 | return nil, fmt.Errorf("(error: %d) Server is overloaded; try again later", resp.StatusCode) 354 | } else if resp.StatusCode != http.StatusCreated { 355 | return nil, fmt.Errorf("(error: %d) Error scheduling build: %s", resp.StatusCode, respBody) 356 | } 357 | 358 | return respBody, nil 359 | } 360 | 361 | func isTimeout(err error) bool { 362 | urlErr, ok := err.(*url.Error) 363 | return ok && urlErr.Timeout() 364 | } 365 | 366 | func parseDynamicArgs(args cli.Args) map[string]string { 367 | parsed := make(map[string]string) 368 | 369 | for i := 0; i < len(args); { 370 | arg := strings.TrimLeft(args[i], "--") 371 | 372 | if strings.Contains(arg, "=") { 373 | parts := strings.Split(arg, "=") 374 | parsed[parts[0]] = parts[1] 375 | i++ 376 | } else if i+1 < len(args) { 377 | parsed[arg] = args[i+1] 378 | i = i + 2 379 | } else { 380 | i++ 381 | 382 | } 383 | } 384 | 385 | return parsed 386 | } 387 | -------------------------------------------------------------------------------- /cmd/mistry/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/urfave/cli" 8 | ) 9 | 10 | func TestParseDynamicArgs(t *testing.T) { 11 | cases := []struct { 12 | In cli.Args 13 | Expected map[string]string 14 | }{ 15 | {cli.Args{"--a", "b", "c=d", "e=f", "--g", "h"}, 16 | map[string]string{"a": "b", "c": "d", "e": "f", "g": "h"}}, 17 | 18 | {cli.Args{"a=b", "--@c", "d", "e=f", "--g", "h"}, 19 | map[string]string{"a": "b", "@c": "d", "e": "f", "g": "h"}}, 20 | 21 | {cli.Args{"a=b", "--c", "d"}, 22 | map[string]string{"a": "b", "c": "d"}}, 23 | 24 | {cli.Args{"--a", "b", "@c=d"}, 25 | map[string]string{"a": "b", "@c": "d"}}, 26 | 27 | {cli.Args{"a=b", "c=d"}, 28 | map[string]string{"a": "b", "c": "d"}}, 29 | 30 | {cli.Args{"a", "b", "--c", "d"}, 31 | map[string]string{"a": "b", "c": "d"}}, 32 | 33 | {cli.Args{"--a", "b"}, map[string]string{"a": "b"}}, 34 | {cli.Args{"a=b"}, map[string]string{"a": "b"}}, 35 | } 36 | 37 | for _, c := range cases { 38 | actual := parseDynamicArgs(c.In) 39 | 40 | if !reflect.DeepEqual(actual, c.Expected) { 41 | t.Errorf("expected %v, got %v", c.Expected, actual) 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /cmd/mistry/transport.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "log" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/skroutz/mistry/pkg/utils" 12 | ) 13 | 14 | // Transport is the interface that wraps the basic Copy method, facilitating 15 | // downloading build artifacts from a mistry server. 16 | type Transport interface { 17 | // Copy downloads to dst the build artifacts from src. The value of 18 | // src depends on the underlying implementation. dst denotes a path 19 | // on the local filesystem. host is the hostname of the server. user 20 | // is an opaque field that depends on the underlying implementation. 21 | // 22 | // If clearDst is true the contents of dst (if any) should be removed 23 | // before downloading artifacts. 24 | Copy(user, host, project, src, dst string, clearDst bool) (string, error) 25 | } 26 | 27 | // Scp uses scp(1) to fetch build artifacts from the server via SSH. 28 | // 29 | // See man 1 scp. 30 | type Scp struct{} 31 | 32 | // Copy runs 'scp user@host:src dst'. If clearDst is set, all contents of dst will be 33 | // removed before the scp 34 | func (ts Scp) Copy(user, host, project, src, dst string, clearDst bool) (string, error) { 35 | if clearDst { 36 | err := removeDirContents(dst) 37 | if err != nil { 38 | return "", err 39 | } 40 | } 41 | return utils.RunCmd([]string{"scp", "-r", fmt.Sprintf("%s@%s:%s", user, host, src), dst}) 42 | } 43 | 44 | func removeDirContents(dir string) error { 45 | items, err := ioutil.ReadDir(dir) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | for _, item := range items { 51 | err = os.RemoveAll(filepath.Join(dir, item.Name())) 52 | if err != nil { 53 | return err 54 | } 55 | } 56 | return nil 57 | } 58 | 59 | // Rsync uses rsync(1) and the rsync protocol to fetch build artifacts from 60 | // the server. It is more efficient than Scp and the recommended transport 61 | // for production systems. 62 | // 63 | // See man 1 rsync. 64 | type Rsync struct{} 65 | 66 | // Copy runs 'rsync -rtlp user@host::mistry/src dst'. If clearDst is true, the --delete flag 67 | // will be set 68 | func (ts Rsync) Copy(user, host, project, src, dst string, clearDst bool) (string, error) { 69 | module := "mistry" 70 | 71 | idx := strings.Index(src, project) 72 | if idx == -1 { 73 | log.Fatalf("Expected '%s' to contain '%s'", src, project) 74 | } 75 | src = src[idx:] 76 | cmd := []string{"rsync", "-rtlp"} 77 | if clearDst { 78 | cmd = append(cmd, "--delete") 79 | } 80 | cmd = append(cmd, fmt.Sprintf("%s@%s::%s/%s", user, host, module, src), dst) 81 | 82 | return utils.RunCmd(cmd) 83 | } 84 | -------------------------------------------------------------------------------- /cmd/mistryd/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "io" 7 | "os" 8 | "runtime" 9 | "strconv" 10 | 11 | "github.com/skroutz/mistry/pkg/filesystem" 12 | "github.com/skroutz/mistry/pkg/utils" 13 | ) 14 | 15 | // Config holds the configuration values that the Server needs in order to 16 | // function. 17 | type Config struct { 18 | Addr string 19 | FileSystem filesystem.FileSystem 20 | UID string 21 | 22 | ProjectsPath string `json:"projects_path"` 23 | BuildPath string `json:"build_path"` 24 | Mounts map[string]string `json:"mounts"` 25 | 26 | Concurrency int `json:"job_concurrency"` 27 | Backlog int `json:"job_backlog"` 28 | } 29 | 30 | // ParseConfig accepts the listening address, a filesystem adapter and a 31 | // reader from which to parse the configuration, and returns a valid 32 | // Config or an error. 33 | func ParseConfig(addr string, fs filesystem.FileSystem, r io.Reader) (*Config, error) { 34 | if addr == "" { 35 | return nil, errors.New("addr must be provided") 36 | } 37 | 38 | cfg := new(Config) 39 | cfg.Addr = addr 40 | cfg.FileSystem = fs 41 | 42 | dec := json.NewDecoder(r) 43 | err := dec.Decode(cfg) 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | if cfg.UID == "" { 49 | cfg.UID = strconv.Itoa(os.Getuid()) 50 | } 51 | 52 | err = utils.PathIsDir(cfg.ProjectsPath) 53 | if err != nil { 54 | return nil, err 55 | } 56 | 57 | err = utils.PathIsDir(cfg.BuildPath) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | if cfg.Concurrency == 0 { 63 | // our work is CPU bound so number of cores is OK 64 | cfg.Concurrency = runtime.NumCPU() 65 | } 66 | 67 | if cfg.Backlog == 0 { 68 | // by default allow a request spike double the worker capacity 69 | cfg.Backlog = cfg.Concurrency * 2 70 | } 71 | 72 | return cfg, nil 73 | } 74 | -------------------------------------------------------------------------------- /cmd/mistryd/config.sample.json: -------------------------------------------------------------------------------- 1 | { 2 | "projects_path": "/var/lib/mistry/projects", 3 | "build_path": "/var/lib/mistry/data", 4 | "job_concurrency": 5, 5 | "job_backlog": 20 6 | } 7 | -------------------------------------------------------------------------------- /cmd/mistryd/config.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "projects_path": "testdata/projects", 3 | "build_path": "/tmp", 4 | "mounts": { 5 | "/tmp": "/tmp" 6 | }, 7 | "job_concurrency": 5, 8 | "job_backlog": 100 9 | } 10 | -------------------------------------------------------------------------------- /cmd/mistryd/end_to_end_test.go: -------------------------------------------------------------------------------- 1 | // Tests here verify that all components (CLI <-> Server <-> Worker) 2 | // interact together as expected. 3 | package main 4 | 5 | import ( 6 | "bufio" 7 | "bytes" 8 | "context" 9 | "encoding/json" 10 | "fmt" 11 | "io/ioutil" 12 | "os" 13 | "path/filepath" 14 | "strings" 15 | "sync" 16 | "testing" 17 | "time" 18 | 19 | "github.com/docker/docker/api/types/container" 20 | docker "github.com/docker/docker/client" 21 | "github.com/skroutz/mistry/pkg/types" 22 | ) 23 | 24 | func TestSimpleBuild(t *testing.T) { 25 | cmdout, cmderr, err := cliBuildJob("--project", "simple") 26 | if err != nil { 27 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 28 | } 29 | } 30 | 31 | func TestContainerErrorLogs(t *testing.T) { 32 | _, cmderr, err := cliBuildJob("--project", "bad_entrypoint", "--", "--test=error-logs") 33 | if err == nil { 34 | t.Fatal("Expected mistry-cli error") 35 | } 36 | expecteds := [2]string{"this is stderr", "missing_command: command not found"} 37 | 38 | for _, expected := range expecteds { 39 | if !strings.Contains(cmderr, expected) { 40 | t.Fatalf("Expected stderr to contain '%s'", expected) 41 | } 42 | } 43 | } 44 | 45 | func toCli(p types.Params) []string { 46 | cliParams := make([]string, len(p)) 47 | i := 0 48 | for k, v := range p { 49 | cliParams[i] = fmt.Sprintf("--%s=%s", k, v) 50 | i++ 51 | } 52 | return cliParams 53 | } 54 | 55 | func TestSimpleRebuild(t *testing.T) { 56 | // run a job, fetch its build time 57 | params := types.Params{"test": "rebuild-cli"} 58 | cmdout, cmderr, err := cliBuildJob("--project", "simple", "--", toCli(params)[0]) 59 | if err != nil { 60 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 61 | } 62 | 63 | j, err := NewJob("simple", params, "", testcfg) 64 | if err != nil { 65 | t.Fatalf("%s", err) 66 | } 67 | 68 | client, err := docker.NewEnvClient() 69 | if err != nil { 70 | t.Fatal(err) 71 | } 72 | 73 | i, _, err := client.ImageInspectWithRaw(context.Background(), j.Image) 74 | if err != nil { 75 | t.Fatal(err) 76 | } 77 | 78 | // remove the build directory for the job to run again 79 | err = testcfg.FileSystem.Remove(j.ReadyBuildPath) 80 | if err != nil { 81 | t.Fatal(err) 82 | } 83 | 84 | cmdout, cmderr, err = cliBuildJob("--project", "simple", "--rebuild", "--", toCli(params)[0]) 85 | if err != nil { 86 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 87 | } 88 | // fetch last build time, make sure it is different 89 | i2, _, err := client.ImageInspectWithRaw(context.Background(), j.Image) 90 | if err != nil { 91 | t.Fatal(err) 92 | } 93 | assertNotEq(i.Created, i2.Created, t) 94 | } 95 | 96 | func TestNonGroupSubsequentInvocation(t *testing.T) { 97 | cmdout, cmderr, err := cliBuildJob("--project", "bootstrap-twice") 98 | if err != nil { 99 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 100 | } 101 | // invoke the 2nd job with different params to trigger the bug 102 | cmdout, cmderr, err = cliBuildJob("--project", "bootstrap-twice", "--", "--foo=zxc") 103 | if err != nil { 104 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 105 | } 106 | } 107 | 108 | func TestBuildTimeout(t *testing.T) { 109 | timeout := "1s" 110 | _, stderr, err := cliBuildJob("--project", "sleep", "--timeout", timeout, "--", "--test=client-timeout") 111 | if err == nil { 112 | t.Fatalf("Expected timeout error") 113 | } 114 | assertEq(strings.Contains(stderr, "The build did not finish after "+timeout), true, t) 115 | } 116 | 117 | func TestAsyncSimpleBuild(t *testing.T) { 118 | cmdout, cmderr, err := cliBuildJob("--json-result", "--project", "simple", "--no-wait", "--", "--test=async") 119 | if err != nil { 120 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 121 | } 122 | assertEq(cmdout, "", t) 123 | assertEq(cmderr, "", t) 124 | 125 | // wait until the build is done and verify the result 126 | 127 | j, err := NewJob("simple", types.Params{"test": "async"}, "", testcfg) 128 | if err != nil { 129 | t.Fatalf("%s", err) 130 | } 131 | 132 | buildInfoPath := filepath.Join(j.ReadyBuildPath, BuildInfoFname) 133 | 134 | err = waitUntilExists(buildInfoPath) 135 | if err != nil { 136 | t.Fatalf("failed to find job build info at %s: %s", buildInfoPath, err) 137 | } 138 | 139 | bi := types.NewBuildInfo() 140 | biBlob, err := ioutil.ReadFile(buildInfoPath) 141 | if err != nil { 142 | t.Fatalf("%s", err) 143 | } 144 | err = json.Unmarshal(biBlob, bi) 145 | if err != nil { 146 | t.Fatalf("%s", err) 147 | } 148 | assertEq(bi.ExitCode, 0, t) 149 | } 150 | 151 | func waitUntilExists(path string) error { 152 | maxElapsed := 10 * time.Second 153 | start := time.Now() 154 | for { 155 | _, err := os.Stat(path) 156 | if os.IsNotExist(err) { 157 | elapsed := time.Since(start) 158 | if elapsed > maxElapsed { 159 | return fmt.Errorf("file was not found at %s after %s", path, maxElapsed) 160 | } 161 | time.Sleep(500 * time.Millisecond) 162 | } else if err != nil { 163 | return err 164 | } else { 165 | return nil 166 | } 167 | } 168 | } 169 | 170 | func TestExistingContainer(t *testing.T) { 171 | client, err := docker.NewEnvClient() 172 | failIfError(err, t) 173 | 174 | project := "simple" 175 | // randomize params to avoid name conflict errors 176 | // in the manual ContainerCreate called in the test 177 | params := types.Params{"testing": "existing-container-" + randomHexString()} 178 | 179 | j, err := NewJob(project, params, "", testcfg) 180 | failIfError(err, t) 181 | 182 | _, err = client.ContainerCreate( 183 | context.TODO(), 184 | &container.Config{User: testcfg.UID, Image: j.Image}, 185 | &container.HostConfig{NetworkMode: "host"}, 186 | nil, nil, j.Container) 187 | failIfError(err, t) 188 | 189 | cmdout, cmderr, err := cliBuildJob("--project", project, "--", toCli(params)[0]) 190 | if err != nil { 191 | t.Fatalf("Unexpected error %s, stdout: %s, stderr %s", err, cmdout, cmderr) 192 | } 193 | } 194 | 195 | func TestBuildRemoveTarget(t *testing.T) { 196 | // create a new temp target dir 197 | target, err := ioutil.TempDir("", "test-remove-target") 198 | failIfError(err, t) 199 | defer os.RemoveAll(target) 200 | 201 | // create 2 files: a /target/file.txt and a /target/dir/file2.txt 202 | dirName := filepath.Join(target, "dir") 203 | fileNames := []string{filepath.Join(target, "file.txt"), filepath.Join(dirName, "file2.txt")} 204 | 205 | err = os.Mkdir(dirName, 0755) 206 | failIfError(err, t) 207 | 208 | for _, filepath := range fileNames { 209 | f, err := os.Create(filepath) 210 | failIfError(err, t) 211 | f.Close() 212 | } 213 | cliArgs := cliDefaultArgs 214 | cliArgs.target = target 215 | 216 | // run the job with remove-target 217 | cmdout, cmderr, err := cliBuildJobArgs(cliArgs, "--project", "simple", "--clear-target") 218 | if err != nil { 219 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 220 | } 221 | // verify the files and directory have been deleted 222 | for _, path := range append(fileNames, dirName) { 223 | _, err = os.Stat(path) 224 | if err == nil { 225 | t.Fatalf("unexpected file found at %s", path) 226 | } else if !os.IsNotExist(err) { 227 | t.Fatalf("error when trying to check target file %s: %s", path, err) 228 | } 229 | } 230 | } 231 | 232 | func failIfError(err error, t *testing.T) { 233 | if err != nil { 234 | t.Fatalf("%s", err) 235 | } 236 | } 237 | 238 | func TestUnknownProject(t *testing.T) { 239 | expected := "Unknown project 'Idontexist'" 240 | 241 | _, cmderr, err := cliBuildJob("--project", "Idontexist") 242 | if err == nil { 243 | t.Fatal("Expected error") 244 | } 245 | if !strings.Contains(cmderr, expected) { 246 | t.Fatalf("Expected '%s' to contain '%s'", cmderr, expected) 247 | } 248 | } 249 | 250 | func TestJobParams(t *testing.T) { 251 | cmdout, cmderr, err := cliBuildJob("--project", "params", "--", "--foo=zxc") 252 | if err != nil { 253 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 254 | } 255 | 256 | out, err := ioutil.ReadFile(filepath.Join(cliDefaultArgs.target, "out.txt")) 257 | if err != nil { 258 | t.Fatal(err) 259 | } 260 | 261 | assert(string(out), "zxc", t) 262 | } 263 | 264 | func TestImageBuildFailure(t *testing.T) { 265 | expErr := "could not build docker image" 266 | 267 | _, cmderr, err := cliBuildJob("--project", "image-build-failure") 268 | if err == nil { 269 | t.Fatal("expected error") 270 | } 271 | 272 | if !strings.Contains(cmderr, expErr) { 273 | t.Fatalf("Expected '%s' to contain '%s'", cmderr, expErr) 274 | } 275 | j, err := NewJob("image-build-failure", types.Params{}, "", testcfg) 276 | if err != nil { 277 | t.Fatalf("failed to create job; %s", err) 278 | } 279 | 280 | errDockercmd := "apt-get install -y fofkoeakodksao" 281 | log, err := ReadJobLogs(j.ReadyBuildPath) 282 | if err != nil { 283 | t.Fatalf("failed to read job log; %s", err) 284 | } 285 | 286 | if !strings.Contains(string(log), errDockercmd) { 287 | t.Fatalf("Expected out log '%s' to contain '%s'", string(log), cmderr) 288 | } 289 | 290 | buildInfoPath := filepath.Join(j.ReadyBuildPath, BuildInfoFname) 291 | bi := types.NewBuildInfo() 292 | biBlob, err := ioutil.ReadFile(buildInfoPath) 293 | if err != nil { 294 | t.Fatalf("%s", err) 295 | } 296 | err = json.Unmarshal(biBlob, bi) 297 | if err != nil { 298 | t.Fatalf("%s", err) 299 | } 300 | assertEq(bi.ExitCode, types.ContainerPendingExitCode, t) 301 | } 302 | 303 | func TestLogs(t *testing.T) { 304 | // trigger a job 305 | cmdout, cmderr, err := cliBuildJob("--json-result", "--project", "simple", "--", "--testing=logs") 306 | if err != nil { 307 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 308 | } 309 | 310 | br, err := parseClientJSON(cmdout) 311 | if err != nil { 312 | t.Fatal(err) 313 | } 314 | 315 | // find the log file 316 | j, err := NewJob("simple", types.Params{"testing": "logs"}, "", testcfg) 317 | if err != nil { 318 | t.Fatalf("failed to create job: err: %#v", err) 319 | } 320 | log, err := ReadJobLogs(j.ReadyBuildPath) 321 | if err != nil { 322 | t.Fatalf("failed to read job log: err: %#v", err) 323 | } 324 | 325 | assertEq(br.ContainerStdouterr, string(log), t) 326 | } 327 | 328 | func TestLogsNotJson(t *testing.T) { 329 | // trigger a job and grab the logs 330 | cmdout, cmderr, err := cliBuildJob("--json-result", "--project", "simple", "--", "--testing=logsnotjson") 331 | if err != nil { 332 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 333 | } 334 | j, err := NewJob("simple", types.Params{"testing": "logsnotjson"}, "", testcfg) 335 | if err != nil { 336 | t.Fatalf("failed to create job: err: %#v", err) 337 | } 338 | logs, err := ReadJobLogs(j.ReadyBuildPath) 339 | if err != nil { 340 | t.Fatalf("failed to read job log: err: %#v", err) 341 | } 342 | 343 | // if any line in the log file can be parsed into a JSON, fail 344 | scanner := bufio.NewScanner(bytes.NewReader(logs)) 345 | for scanner.Scan() { 346 | line := scanner.Bytes() 347 | var v interface{} 348 | 349 | err := json.Unmarshal(line, &v) 350 | if err == nil { 351 | t.Fatalf("found JSON line in the logs: %s", line) 352 | } 353 | } 354 | } 355 | 356 | func TestExitCode(t *testing.T) { 357 | cmdout, _, err := cliBuildJob("--json-result", "--project", "exit-code") 358 | if err == nil { 359 | t.Fatal("expected error") 360 | } 361 | 362 | br, err := parseClientJSON(cmdout) 363 | if err != nil { 364 | t.Fatal(err) 365 | } 366 | 367 | assert(br.ExitCode, 77, t) 368 | } 369 | 370 | func TestCopyDir(t *testing.T) { 371 | cmdout, cmderr, err := cliBuildJob("--json-result", "--project", "copy-folder") 372 | if err != nil { 373 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 374 | } 375 | br, err := parseClientJSON(cmdout) 376 | if err != nil { 377 | panic(err) 378 | } 379 | 380 | assert(br.ExitCode, 0, t) 381 | } 382 | 383 | func TestSameGroupDifferentParams(t *testing.T) { 384 | cmdout1, cmderr1, err := cliBuildJob("--project", "result-cache", "--group", "foo", "--", "--foo=bar") 385 | if err != nil { 386 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout1, cmderr1, err) 387 | } 388 | out1, err := ioutil.ReadFile(filepath.Join(cliDefaultArgs.target, "out.txt")) 389 | if err != nil { 390 | t.Fatal(err) 391 | } 392 | 393 | cmdout2, cmderr2, err := cliBuildJob("--project", "result-cache", "--group", "foo", "--", "--foo=bar2") 394 | if err != nil { 395 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout2, cmderr2, err) 396 | } 397 | out2, err := ioutil.ReadFile(filepath.Join(cliDefaultArgs.target, "out.txt")) 398 | if err != nil { 399 | t.Fatal(err) 400 | } 401 | 402 | assertNotEq(out1, out2, t) 403 | } 404 | 405 | func TestResultCache(t *testing.T) { 406 | cmdout1, cmderr1, err := cliBuildJob("--json-result", "--project", "result-cache", "--group", "foo") 407 | if err != nil { 408 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout1, cmderr1, err) 409 | } 410 | out1, err := ioutil.ReadFile(filepath.Join(cliDefaultArgs.target, "out.txt")) 411 | if err != nil { 412 | t.Fatal(err) 413 | } 414 | br1, err := parseClientJSON(cmdout1) 415 | if err != nil { 416 | t.Fatal(err) 417 | } 418 | 419 | cmdout2, cmderr2, err := cliBuildJob("--json-result", "--project", "result-cache", "--group", "foo") 420 | if err != nil { 421 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout2, cmderr2, err) 422 | } 423 | out2, err := ioutil.ReadFile(filepath.Join(cliDefaultArgs.target, "out.txt")) 424 | if err != nil { 425 | t.Fatal(err) 426 | } 427 | br2, err := parseClientJSON(cmdout2) 428 | if err != nil { 429 | t.Fatal(err) 430 | } 431 | 432 | assertEq(out1, out2, t) 433 | assert(br1.Cached, false, t) 434 | assert(br2.Cached, true, t) 435 | assert(br1.ExitCode, 0, t) 436 | assert(br2.ExitCode, 0, t) 437 | } 438 | 439 | func TestRerunFailedBuild(t *testing.T) { 440 | // schedule a build that fails (non zero exit code) 441 | cmdout, _, err := cliBuildJob("--json-result", "--project", "exit-code") 442 | if err == nil { 443 | t.Fatal("expected error") 444 | } 445 | 446 | br, err := parseClientJSON(cmdout) 447 | if err != nil { 448 | t.Fatal(err) 449 | } 450 | 451 | assertNotEq(br.ExitCode, 0, t) 452 | 453 | // schedule it again, verify it ran a 2nd time by checking the start timestamp 454 | cmdout, _, err = cliBuildJob("--json-result", "--project", "exit-code") 455 | if err == nil { 456 | t.Fatal("expected error") 457 | } 458 | 459 | br2, err := parseClientJSON(cmdout) 460 | if err != nil { 461 | t.Fatal(err) 462 | } 463 | 464 | assert(br2.ExitCode, br.ExitCode, t) 465 | assertNotEq(br2.StartedAt, br.StartedAt, t) 466 | } 467 | 468 | func TestBuildCoalescingExitCode(t *testing.T) { 469 | var wg sync.WaitGroup 470 | var bi1, bi2 *types.BuildInfo 471 | 472 | wg.Add(1) 473 | go func() { 474 | defer wg.Done() 475 | cmdout, _, err := cliBuildJob("--json-result", "--project", "build-coalescing-exitcode") 476 | if err == nil { 477 | panic("Expected error") 478 | } 479 | bi1, err = parseClientJSON(cmdout) 480 | if err != nil { 481 | panic(err) 482 | } 483 | }() 484 | 485 | cmdout, _, err := cliBuildJob("--json-result", "--project", "build-coalescing-exitcode") 486 | if err == nil { 487 | t.Fatal("Expected error") 488 | } 489 | bi2, err = parseClientJSON(cmdout) 490 | if err != nil { 491 | t.Fatal(err) 492 | } 493 | 494 | wg.Wait() 495 | 496 | assert(bi1.ExitCode, 35, t) 497 | assertEq(bi1.ExitCode, bi2.ExitCode, t) 498 | 499 | assert(bi1.Cached, false, t) 500 | assertEq(bi1.Cached, bi2.Cached, t) 501 | 502 | assertNotEq(bi1.Coalesced, bi2.Coalesced, t) 503 | } 504 | 505 | func TestBuildCoalescing(t *testing.T) { 506 | var wg sync.WaitGroup 507 | var bi1, bi2 *types.BuildInfo 508 | 509 | wg.Add(1) 510 | go func() { 511 | defer wg.Done() 512 | cmdout, _, err := cliBuildJob("--json-result", "--project", "build-coalescing", "--group", "foo") 513 | if err != nil { 514 | panic(err) 515 | } 516 | bi1, err = parseClientJSON(cmdout) 517 | if err != nil { 518 | panic(err) 519 | } 520 | }() 521 | 522 | cmdout, _, err := cliBuildJob("--json-result", "--project", "build-coalescing", "--group", "foo") 523 | if err != nil { 524 | t.Fatal(err) 525 | } 526 | bi2, err = parseClientJSON(cmdout) 527 | if err != nil { 528 | t.Fatal(err) 529 | } 530 | 531 | wg.Wait() 532 | 533 | out, err := ioutil.ReadFile(filepath.Join(cliDefaultArgs.target, "out.txt")) 534 | if err != nil { 535 | t.Fatal(err) 536 | } 537 | 538 | assertEq(string(out), "coalescing!\n", t) 539 | 540 | assertNotEq(bi1.Coalesced, bi2.Coalesced, t) 541 | assert(bi1.ExitCode, 0, t) 542 | assertEq(bi1.ExitCode, bi2.ExitCode, t) 543 | } 544 | -------------------------------------------------------------------------------- /cmd/mistryd/job.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/rand" 7 | "crypto/sha256" 8 | "encoding/hex" 9 | "encoding/json" 10 | "errors" 11 | "fmt" 12 | "io" 13 | "io/ioutil" 14 | "log" 15 | "os" 16 | "path/filepath" 17 | "sort" 18 | "strings" 19 | "time" 20 | 21 | dockertypes "github.com/docker/docker/api/types" 22 | "github.com/docker/docker/api/types/container" 23 | "github.com/docker/docker/api/types/filters" 24 | "github.com/docker/docker/api/types/mount" 25 | docker "github.com/docker/docker/client" 26 | "github.com/docker/docker/pkg/jsonmessage" 27 | "github.com/docker/docker/pkg/stdcopy" 28 | "github.com/skroutz/mistry/pkg/filesystem" 29 | "github.com/skroutz/mistry/pkg/types" 30 | "github.com/skroutz/mistry/pkg/utils" 31 | ) 32 | 33 | // Job is the core unit of work. It is essentially something that needs to 34 | // be executed in order to produce the desired artifacts. 35 | type Job struct { 36 | ID string 37 | 38 | // user-provided 39 | Project string 40 | Params types.Params 41 | Group string 42 | 43 | // Rebuild indicates if Docker image cache will be bypassed. 44 | Rebuild bool 45 | 46 | RootBuildPath string 47 | PendingBuildPath string 48 | ReadyBuildPath string 49 | LatestBuildPath string 50 | ReadyDataPath string 51 | 52 | ProjectPath string 53 | 54 | // NOTE: after a job is complete, this points to an invalid (pending) 55 | // path 56 | BuildLogPath string 57 | BuildInfoFilePath string 58 | 59 | // docker-related 60 | Image string 61 | ImageTar []byte 62 | Container string 63 | 64 | StartedAt time.Time 65 | 66 | BuildInfo *types.BuildInfo 67 | State string 68 | 69 | Log *log.Logger 70 | } 71 | 72 | // NewJob returns a new Job for the given project. project and cfg cannot be 73 | // empty. 74 | func NewJob(project string, params types.Params, group string, cfg *Config) (*Job, error) { 75 | var err error 76 | 77 | if project == "" { 78 | return nil, errors.New("no project given") 79 | } 80 | 81 | if cfg == nil { 82 | return nil, errors.New("invalid configuration") 83 | } 84 | 85 | j := new(Job) 86 | j.Project = project 87 | j.Group = group 88 | j.Params = params 89 | j.ProjectPath = filepath.Join(cfg.ProjectsPath, j.Project) 90 | j.RootBuildPath = filepath.Join(cfg.BuildPath, j.Project) 91 | 92 | if j.Group == "" { 93 | j.LatestBuildPath = filepath.Join(j.RootBuildPath, "latest") 94 | } else { 95 | j.LatestBuildPath = filepath.Join(j.RootBuildPath, "groups", j.Group) 96 | } 97 | 98 | j.ImageTar, err = utils.Tar(j.ProjectPath) 99 | if err != nil { 100 | if os.IsNotExist(err) { 101 | return nil, fmt.Errorf("Unknown project '%s'", j.Project) 102 | } 103 | return nil, err 104 | } 105 | 106 | // compute ID 107 | keys := []string{} 108 | for k := range params { 109 | // params opaque to the build are not taken into account 110 | // when calculating a job's ID 111 | if strings.HasPrefix(k, "_") { 112 | continue 113 | } 114 | 115 | keys = append(keys, k) 116 | } 117 | sort.Strings(keys) 118 | 119 | seed := project + group 120 | for _, v := range keys { 121 | seed += v + params[v] 122 | } 123 | seed += string(j.ImageTar) 124 | 125 | j.ID = fmt.Sprintf("%x", sha256.Sum256([]byte(seed))) 126 | 127 | j.PendingBuildPath = filepath.Join(j.RootBuildPath, "pending", j.ID) 128 | j.ReadyBuildPath = filepath.Join(j.RootBuildPath, "ready", j.ID) 129 | j.ReadyDataPath = filepath.Join(j.ReadyBuildPath, DataDir) 130 | j.BuildLogPath = BuildLogPath(j.PendingBuildPath) 131 | j.BuildInfoFilePath = filepath.Join(j.PendingBuildPath, BuildInfoFname) 132 | 133 | j.Image = ImgCntPrefix + j.Project 134 | j.Container = ImgCntPrefix + j.ID 135 | 136 | j.StartedAt = time.Now() 137 | j.BuildInfo = types.NewBuildInfo() 138 | j.State = "pending" 139 | j.Log = log.New(os.Stderr, fmt.Sprintf("[%s] ", j), log.Ldate|log.Ltime) 140 | 141 | return j, nil 142 | } 143 | 144 | // BuildImage builds the Docker image denoted by j.Image. If there is an 145 | // error, it will be of type types.ErrImageBuild. 146 | func (j *Job) BuildImage(ctx context.Context, uid string, c *docker.Client, out io.Writer, pullParent, noCache bool) error { 147 | buildArgs := make(map[string]*string) 148 | buildArgs["uid"] = &uid 149 | buildOpts := dockertypes.ImageBuildOptions{ 150 | Tags: []string{j.Image}, 151 | BuildArgs: buildArgs, 152 | NetworkMode: "host", 153 | PullParent: pullParent, 154 | NoCache: noCache, 155 | ForceRemove: true, 156 | } 157 | resp, err := c.ImageBuild(context.Background(), bytes.NewBuffer(j.ImageTar), buildOpts) 158 | if err != nil { 159 | return types.ErrImageBuild{Image: j.Image, Err: err} 160 | } 161 | defer resp.Body.Close() 162 | 163 | err = jsonmessage.DisplayJSONMessagesStream(resp.Body, out, 0, false, nil) 164 | if err != nil { 165 | return types.ErrImageBuild{Image: j.Image, Err: err} 166 | } 167 | 168 | _, _, err = c.ImageInspectWithRaw(context.Background(), j.Image) 169 | if err != nil { 170 | return types.ErrImageBuild{Image: j.Image, Err: err} 171 | } 172 | 173 | return nil 174 | } 175 | 176 | // StartContainer creates and runs the container. It blocks until the container 177 | // exits and returns the exit code of the container command. If there was an error 178 | // starting the container, the exit code is irrelevant. 179 | // 180 | // NOTE: If there was an error with the user's dockerfile, the returned exit 181 | // code will be 1 and the error nil. 182 | func (j *Job) StartContainer(ctx context.Context, cfg *Config, c *docker.Client, out, outErr io.Writer) (int, error) { 183 | config := container.Config{User: cfg.UID, Image: j.Image} 184 | 185 | mnts := []mount.Mount{{Type: mount.TypeBind, Source: filepath.Join(j.PendingBuildPath, DataDir), Target: DataDir}} 186 | for src, target := range cfg.Mounts { 187 | mnts = append(mnts, mount.Mount{Type: mount.TypeBind, Source: src, Target: target}) 188 | } 189 | 190 | hostConfig := container.HostConfig{Mounts: mnts, AutoRemove: false, NetworkMode: "host"} 191 | 192 | err := renameIfExists(ctx, c, j.Container) 193 | res, err := c.ContainerCreate(ctx, &config, &hostConfig, nil, nil, j.Container) 194 | if err != nil { 195 | return 0, err 196 | } 197 | 198 | err = c.ContainerStart(ctx, res.ID, dockertypes.ContainerStartOptions{}) 199 | if err != nil { 200 | return 0, err 201 | } 202 | 203 | defer func(id string) { 204 | err = c.ContainerRemove(ctx, id, dockertypes.ContainerRemoveOptions{}) 205 | if err != nil { 206 | log.Printf("[%s] cannot remove container: %s", j, err) 207 | } 208 | }(res.ID) 209 | 210 | logs, err := c.ContainerLogs(ctx, res.ID, 211 | dockertypes.ContainerLogsOptions{Follow: true, ShowStdout: true, ShowStderr: true, 212 | Details: true}) 213 | if err != nil { 214 | return 0, err 215 | } 216 | defer logs.Close() 217 | 218 | _, err = stdcopy.StdCopy(out, io.MultiWriter(out, outErr), logs) 219 | if err != nil { 220 | return 0, err 221 | } 222 | 223 | var result struct { 224 | State struct { 225 | ExitCode int 226 | } 227 | } 228 | 229 | _, inspect, err := c.ContainerInspectWithRaw(ctx, res.ID, false) 230 | if err != nil { 231 | return 0, err 232 | } 233 | 234 | err = json.Unmarshal(inspect, &result) 235 | if err != nil { 236 | return 0, err 237 | } 238 | 239 | return result.State.ExitCode, nil 240 | } 241 | 242 | // renameIfExists searches for containers with the passed name and renames them 243 | // by appending a random suffix to their name 244 | func renameIfExists(ctx context.Context, c *docker.Client, name string) error { 245 | filter := filters.NewArgs() 246 | filter.Add("name", name) 247 | containers, err := c.ContainerList(ctx, dockertypes.ContainerListOptions{ 248 | Quiet: true, 249 | All: true, 250 | Limit: -1, 251 | Filters: filter, 252 | }) 253 | if err != nil { 254 | return err 255 | } 256 | for _, container := range containers { 257 | err := c.ContainerRename(ctx, container.ID, name+"-renamed-"+randomHexString()) 258 | if err != nil { 259 | return err 260 | } 261 | } 262 | return nil 263 | } 264 | 265 | func randomHexString() string { 266 | buf := make([]byte, 16) 267 | rand.Read(buf) 268 | return hex.EncodeToString(buf) 269 | } 270 | 271 | func (j *Job) String() string { 272 | return fmt.Sprintf( 273 | "{project=%s group=%s id=%s}", 274 | j.Project, j.Group, j.ID[:7]) 275 | } 276 | 277 | // MarshalJSON serializes the Job to JSON 278 | func (j *Job) MarshalJSON() ([]byte, error) { 279 | return json.Marshal(struct { 280 | ID string `json:"id"` 281 | Project string `json:"project"` 282 | StartedAt string `json:"startedAt"` 283 | BuildInfo types.BuildInfo `json:"buildInfo"` 284 | State string `json:"state"` 285 | }{ 286 | ID: j.ID, 287 | Project: j.Project, 288 | StartedAt: j.StartedAt.Format(DateFmt), 289 | BuildInfo: *j.BuildInfo, 290 | State: j.State, 291 | }) 292 | } 293 | 294 | // UnmarshalJSON deserializes JSON data and updates the Job 295 | // with them 296 | func (j *Job) UnmarshalJSON(data []byte) error { 297 | jData := &struct { 298 | ID string `json:"id"` 299 | Project string `json:"project"` 300 | StartedAt string `json:"startedAt"` 301 | BuildInfo types.BuildInfo `json:"buildInfo"` 302 | State string `json:"state"` 303 | }{} 304 | err := json.Unmarshal(data, &jData) 305 | if err != nil { 306 | return err 307 | } 308 | j.ID = jData.ID 309 | j.Project = jData.Project 310 | j.StartedAt, err = time.Parse(DateFmt, jData.StartedAt) 311 | if err != nil { 312 | return err 313 | } 314 | j.BuildInfo = &jData.BuildInfo 315 | j.State = jData.State 316 | 317 | return nil 318 | } 319 | 320 | // GetState determines the job's current state by using it's path in the filesystem. 321 | func GetState(path, project, id string) (string, error) { 322 | pPath := filepath.Join(path, project, "pending", id) 323 | rPath := filepath.Join(path, project, "ready", id) 324 | _, err := os.Stat(pPath) 325 | if err == nil { 326 | return "pending", nil 327 | } 328 | _, err = os.Stat(rPath) 329 | if err == nil { 330 | return "ready", nil 331 | } 332 | return "", fmt.Errorf("job with id=%s not found error", id) 333 | } 334 | 335 | // CloneSrcPath returns the build path that should be used as the base 336 | // point for j (ie. incremental building) or an empty string if none should 337 | // be used. 338 | func (j *Job) CloneSrcPath() string { 339 | cloneSrc := "" 340 | if j.Group != "" { 341 | var symlinkErr error 342 | cloneSrc, symlinkErr = filepath.EvalSymlinks(j.LatestBuildPath) 343 | if symlinkErr != nil { 344 | cloneSrc = "" 345 | s := "skipping build cache" 346 | if os.IsNotExist(symlinkErr) { 347 | j.Log.Printf("latest link doesn't exist, %s", s) 348 | } else { 349 | j.Log.Printf("error reading latest link: %s, %s", symlinkErr, s) 350 | } 351 | } 352 | } 353 | return cloneSrc 354 | } 355 | 356 | // BootstrapBuildDir creates all required build directories. Cleans the 357 | // pending directory if there were any errors. 358 | func (j *Job) BootstrapBuildDir(fs filesystem.FileSystem) error { 359 | var err error 360 | 361 | cloneSrc := j.CloneSrcPath() 362 | 363 | if cloneSrc == "" { 364 | err = fs.Create(j.PendingBuildPath) 365 | } else { 366 | err = fs.Clone(cloneSrc, j.PendingBuildPath) 367 | j.BuildInfo.Incremental = true 368 | } 369 | if err != nil { 370 | return workErr("could not create pending build path", err) 371 | } 372 | 373 | // if we cloned, empty the params dir 374 | if cloneSrc != "" { 375 | err = os.RemoveAll(filepath.Join(j.PendingBuildPath, DataDir, ParamsDir)) 376 | if err != nil { 377 | return workErr("could not remove params dir", err) 378 | } 379 | } 380 | 381 | dirs := [4]string{ 382 | filepath.Join(j.PendingBuildPath, DataDir), 383 | filepath.Join(j.PendingBuildPath, DataDir, CacheDir), 384 | filepath.Join(j.PendingBuildPath, DataDir, ArtifactsDir), 385 | filepath.Join(j.PendingBuildPath, DataDir, ParamsDir), 386 | } 387 | 388 | for _, dir := range dirs { 389 | err = utils.EnsureDirExists(dir) 390 | if err != nil { 391 | return workErr("could not ensure directory exists", err) 392 | } 393 | } 394 | return err 395 | } 396 | 397 | // BuildLogPath returns the path of the job logs found at jobPath 398 | func BuildLogPath(jobPath string) string { 399 | return filepath.Join(jobPath, BuildLogFname) 400 | } 401 | 402 | // ReadJobLogs returns the job logs found at jobPath 403 | func ReadJobLogs(jobPath string) ([]byte, error) { 404 | buildLogPath := BuildLogPath(jobPath) 405 | 406 | log, err := ioutil.ReadFile(buildLogPath) 407 | if err != nil { 408 | return nil, err 409 | } 410 | return log, nil 411 | } 412 | 413 | // ReadJobBuildInfo returns a BuildInfo from the given path. If logs is true, 414 | // BuildInfo.Log will contain the build logs. 415 | func ReadJobBuildInfo(path string, logs bool) (*types.BuildInfo, error) { 416 | buildInfoPath := filepath.Join(path, BuildInfoFname) 417 | buildInfo := types.NewBuildInfo() 418 | 419 | buildInfoBytes, err := ioutil.ReadFile(buildInfoPath) 420 | if err != nil { 421 | return nil, err 422 | } 423 | 424 | err = json.Unmarshal(buildInfoBytes, &buildInfo) 425 | if err != nil { 426 | return nil, err 427 | } 428 | 429 | if logs { 430 | log, err := ReadJobLogs(path) 431 | if err != nil { 432 | return nil, err 433 | } 434 | buildInfo.ContainerStdouterr = string(log) 435 | } 436 | 437 | return buildInfo, nil 438 | } 439 | -------------------------------------------------------------------------------- /cmd/mistryd/job_queue.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // JobQueue holds the jobs that are enqueued currently in the server. It allows 8 | // used as a means to do build coalescing. 9 | type JobQueue struct { 10 | sync.Mutex 11 | jobs map[string]bool 12 | } 13 | 14 | // NewJobQueue returns a new JobQueue ready for use. 15 | func NewJobQueue() *JobQueue { 16 | return &JobQueue{jobs: make(map[string]bool)} 17 | } 18 | 19 | // Add registers j to the list of pending jobs currently in the queue. 20 | // It returns false if an identical job is already enqueued. 21 | func (q *JobQueue) Add(j *Job) bool { 22 | q.Lock() 23 | defer q.Unlock() 24 | 25 | if q.jobs[j.ID] { 26 | return false 27 | } 28 | 29 | q.jobs[j.ID] = true 30 | return true 31 | } 32 | 33 | // Delete removes j from q. 34 | func (q *JobQueue) Delete(j *Job) { 35 | q.Lock() 36 | defer q.Unlock() 37 | 38 | delete(q.jobs, j.ID) 39 | } 40 | -------------------------------------------------------------------------------- /cmd/mistryd/job_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/skroutz/mistry/pkg/types" 9 | ) 10 | 11 | func TestJobID(t *testing.T) { 12 | project := "job-id-seeding" 13 | params := types.Params{"foo": "bar"} 14 | group := "zzz" 15 | 16 | j1, err := NewJob(project, params, group, testcfg) 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | 21 | j2, err := NewJob(project, params, group, testcfg) 22 | if err != nil { 23 | t.Fatal(err) 24 | } 25 | assertEq(j1.ID, j2.ID, t) 26 | 27 | // params seeding 28 | j3, err := NewJob(project, make(types.Params), group, testcfg) 29 | if err != nil { 30 | t.Fatal(err) 31 | } 32 | assertNotEq(j1.ID, j3.ID, t) 33 | 34 | // group seeding 35 | j4, err := NewJob(project, params, "c", testcfg) 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | assertNotEq(j1.ID, j4.ID, t) 40 | 41 | // project seeding (new empty file) 42 | path := filepath.Join("testdata", "projects", project, "foo") 43 | os.Remove(path) // in case there's a leftover from a previous run 44 | f, err := os.Create(path) 45 | if err != nil { 46 | t.Fatal(err) 47 | } 48 | defer os.Remove(path) 49 | j5, err := NewJob(project, params, group, testcfg) 50 | if err != nil { 51 | t.Fatal(err) 52 | } 53 | assertNotEq(j1.ID, j5.ID, t) 54 | 55 | // project seeding (new non-empty file) 56 | _, err = f.Write([]byte("foo")) 57 | if err != nil { 58 | t.Fatal(err) 59 | } 60 | j6, err := NewJob(project, params, group, testcfg) 61 | if err != nil { 62 | t.Fatal(err) 63 | } 64 | assertNotEq(j5.ID, j6.ID, t) 65 | assertNotEq(j1.ID, j6.ID, t) 66 | 67 | err = f.Close() 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | 72 | opqParams := params 73 | opqParams["_production"] = "ignored" 74 | 75 | // check that params prepended with _ are ignored for ID creation 76 | j7, err := NewJob(project, opqParams, group, testcfg) 77 | if err != nil { 78 | t.Fatal(err) 79 | } 80 | assertEq(j6.ID, j7.ID, t) 81 | 82 | } 83 | -------------------------------------------------------------------------------- /cmd/mistryd/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018-present Skroutz S.A. 2 | // 3 | // This program is free software: you can redistribute it and/or modify 4 | // it under the terms of the GNU General Public License as published by 5 | // the Free Software Foundation, either version 3 of the License, or 6 | // (at your option) any later version. 7 | // 8 | // This program is distributed in the hope that it will be useful, 9 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | // GNU General Public License for more details. 12 | // 13 | // You should have received a copy of the GNU General Public License 14 | // along with this program. If not, see . 15 | package main 16 | 17 | import ( 18 | "fmt" 19 | "log" 20 | "math/rand" 21 | "os" 22 | "strings" 23 | "sync" 24 | "time" 25 | 26 | "github.com/skroutz/mistry/pkg/filesystem" 27 | _ "github.com/skroutz/mistry/pkg/filesystem/btrfs" 28 | _ "github.com/skroutz/mistry/pkg/filesystem/plainfs" 29 | "github.com/urfave/cli" 30 | ) 31 | 32 | const ( 33 | // DataDir is the root path where all the data of a given project 34 | // are placed. 35 | DataDir = "/data" 36 | 37 | // CacheDir is the directory inside DataDir, containing 38 | // user-generated files that should be persisted between builds. 39 | CacheDir = "/cache" 40 | 41 | // ArtifactsDir is the directory inside DataDir, containing the build 42 | // artifacts. 43 | ArtifactsDir = "/artifacts" 44 | 45 | // ParamsDir is the directory inside Datadir, containing the job 46 | // parameters of the build. 47 | ParamsDir = "/params" 48 | 49 | // BuildLogFname is the file inside DataDir, containing the build log. 50 | BuildLogFname = "out.log" 51 | 52 | // BuildInfoFname is the file inside DataDir, containing the build 53 | // info. 54 | BuildInfoFname = "build_info.json" 55 | 56 | // ImgCntPrefix is the common prefix added to the names of all 57 | // Docker images/containers created by mistry. 58 | ImgCntPrefix = "mistry-" 59 | 60 | // DateFmt is the date format used throughout build dates. 61 | DateFmt = "Mon, 02 Jan 2006 15:04:05" 62 | ) 63 | 64 | // Version contains the release version of the server, adhering to SemVer. 65 | const Version = "0.1.0" 66 | 67 | // VersionSuffix is populated at build-time with -ldflags and typically 68 | // contains the Git SHA1 of the tip that the binary is build from. It is then 69 | // appended to Version. 70 | var VersionSuffix string 71 | 72 | func init() { 73 | rand.Seed(time.Now().UnixNano()) 74 | } 75 | 76 | func main() { 77 | availableFS := []string{} 78 | for fs := range filesystem.Registry { 79 | availableFS = append(availableFS, fs) 80 | } 81 | fs := "[" + strings.Join(availableFS, ", ") + "]" 82 | 83 | app := cli.NewApp() 84 | app.Name = "mistry" 85 | app.Usage = "A powerful building service" 86 | app.HideVersion = false 87 | app.Version = Version 88 | if VersionSuffix != "" { 89 | app.Version = Version + "-" + VersionSuffix[:7] 90 | } 91 | app.Flags = []cli.Flag{ 92 | cli.StringFlag{ 93 | Name: "addr, a", 94 | Value: "0.0.0.0:8462", 95 | Usage: "Host and port to listen to", 96 | }, 97 | cli.StringFlag{ 98 | Name: "config, c", 99 | Value: "config.json", 100 | Usage: "Load configuration from `FILE`", 101 | }, 102 | cli.StringFlag{ 103 | Name: "filesystem", 104 | Value: "plain", 105 | Usage: "Which filesystem adapter to use. Options: " + fs, 106 | }, 107 | } 108 | app.Action = func(c *cli.Context) error { 109 | cfg, err := parseConfigFromCli(c) 110 | if err != nil { 111 | return err 112 | } 113 | err = SetUp(cfg) 114 | if err != nil { 115 | return err 116 | } 117 | return StartServer(cfg) 118 | } 119 | app.Commands = []cli.Command{ 120 | { 121 | Name: "rebuild", 122 | Usage: "Rebuild docker images for all projects.", 123 | Flags: []cli.Flag{ 124 | cli.BoolFlag{ 125 | Name: "fail-fast", 126 | Usage: "exit immediately on first error", 127 | }, 128 | cli.StringSliceFlag{ 129 | Name: "project, p", 130 | Usage: "the project to build. Multiple projects can be specified. If not passed, all projects are built", 131 | }, 132 | cli.BoolFlag{ 133 | Name: "verbose, v", 134 | Usage: "print logs from docker build and run", 135 | }, 136 | }, 137 | Action: func(c *cli.Context) error { 138 | cfg, err := parseConfigFromCli(c.Parent()) 139 | if err != nil { 140 | return err 141 | } 142 | 143 | logger := log.New(os.Stdout, "", 0) 144 | r, err := RebuildImages(cfg, logger, c.StringSlice("project"), c.Bool("fail-fast"), c.Bool("verbose")) 145 | if err != nil { 146 | return err 147 | } 148 | if len(r.failed) > 0 { 149 | return fmt.Errorf("%s", r) 150 | } 151 | fmt.Printf("Finished. %s\n", r) 152 | return nil 153 | }, 154 | }, 155 | } 156 | 157 | err := app.Run(os.Args) 158 | if err != nil { 159 | fmt.Println(err) 160 | os.Exit(1) 161 | } 162 | } 163 | 164 | // SetUp accepts a Config and performs necessary initialization tasks. 165 | func SetUp(cfg *Config) error { 166 | err := PruneZombieBuilds(cfg) 167 | if err != nil { 168 | return err 169 | } 170 | 171 | return nil 172 | } 173 | 174 | func parseConfigFromCli(c *cli.Context) (*Config, error) { 175 | fs, err := filesystem.Get(c.String("filesystem")) 176 | if err != nil { 177 | return nil, err 178 | } 179 | f, err := os.Open(c.String("config")) 180 | if err != nil { 181 | return nil, fmt.Errorf("cannot parse configuration; %s", err) 182 | } 183 | cfg, err := ParseConfig(c.String("addr"), fs, f) 184 | if err != nil { 185 | return nil, err 186 | } 187 | return cfg, nil 188 | } 189 | 190 | // StartServer sets up and spawns starts the HTTP server 191 | func StartServer(cfg *Config) error { 192 | var wg sync.WaitGroup 193 | s, err := NewServer(cfg, log.New(os.Stderr, "[http] ", log.LstdFlags), true) 194 | if err != nil { 195 | return err 196 | } 197 | 198 | wg.Add(1) 199 | go func() { 200 | defer wg.Done() 201 | err := s.ListenAndServe() 202 | if err != nil { 203 | log.Fatal(err) 204 | } 205 | }() 206 | s.Log.Printf("Listening on %s...", cfg.Addr) 207 | wg.Wait() 208 | s.workerPool.Stop() 209 | return nil 210 | } 211 | -------------------------------------------------------------------------------- /cmd/mistryd/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "log" 7 | "time" 8 | 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/prometheus/client_golang/prometheus/promauto" 11 | ) 12 | 13 | // Recorder holds the collectors used by mistry to export data to prometheus. 14 | type Recorder struct { 15 | Log *log.Logger 16 | 17 | BuildsHosted *prometheus.GaugeVec 18 | BuildsStarted *prometheus.CounterVec 19 | BuildsFinished *prometheus.CounterVec 20 | BuildsCoalesced *prometheus.CounterVec 21 | BuildsProcessedIncrementally *prometheus.CounterVec 22 | BuildsSucceeded *prometheus.HistogramVec 23 | BuildsFailed *prometheus.HistogramVec 24 | CacheUtilization *prometheus.CounterVec 25 | } 26 | 27 | const namespace = "mistry" 28 | 29 | // NewRecorder initializes a Recorder and sets up the collectors. 30 | func NewRecorder(logger *log.Logger) *Recorder { 31 | r := new(Recorder) 32 | r.Log = logger 33 | 34 | r.BuildsHosted = promauto.NewGaugeVec( 35 | prometheus.GaugeOpts{ 36 | Namespace: namespace, 37 | Name: "builds_hosted", 38 | Help: "The number of finished build hosted currently in the server", 39 | }, 40 | []string{"project"}, 41 | ) 42 | 43 | r.BuildsStarted = promauto.NewCounterVec( 44 | prometheus.CounterOpts{ 45 | Namespace: namespace, 46 | Name: "builds_started", 47 | Help: "The total number builds started by the server", 48 | }, 49 | []string{"project"}, 50 | ) 51 | 52 | r.BuildsFinished = promauto.NewCounterVec( 53 | prometheus.CounterOpts{ 54 | Namespace: namespace, 55 | Name: "builds_finished", 56 | Help: "The number of builds finished.", 57 | }, 58 | []string{"project"}, 59 | ) 60 | 61 | r.BuildsCoalesced = promauto.NewCounterVec( 62 | prometheus.CounterOpts{ 63 | Namespace: namespace, 64 | Name: "builds_coalesced", 65 | Help: "The number of builds that coalesced and were not processed", 66 | }, 67 | []string{"project"}, 68 | ) 69 | 70 | r.BuildsProcessedIncrementally = promauto.NewCounterVec( 71 | prometheus.CounterOpts{ 72 | Namespace: namespace, 73 | Name: "builds_processed_incrementally", 74 | Help: "The number builds processed incrementally by the server", 75 | }, 76 | []string{"project"}, 77 | ) 78 | 79 | // The buckets we create start at 2 minutes and we create 3 buckets of 80 | // 2 minute intervals. 81 | buildTimeBuckets := prometheus.LinearBuckets(120, 120, 3) 82 | 83 | r.BuildsSucceeded = promauto.NewHistogramVec( 84 | prometheus.HistogramOpts{ 85 | Namespace: namespace, 86 | Name: "builds_succeeded_seconds", 87 | Help: "Build duration and count for successful results.", 88 | Buckets: buildTimeBuckets, 89 | }, 90 | []string{"project"}, 91 | ) 92 | 93 | r.BuildsFailed = promauto.NewHistogramVec( 94 | prometheus.HistogramOpts{ 95 | Namespace: namespace, 96 | Name: "builds_failed_seconds", 97 | Help: "Build duration and count for failed results.", 98 | Buckets: buildTimeBuckets, 99 | }, 100 | []string{"project"}, 101 | ) 102 | 103 | r.CacheUtilization = promauto.NewCounterVec( 104 | prometheus.CounterOpts{ 105 | Namespace: namespace, 106 | Name: "cache_utilization", 107 | Help: "Build result cache utilization", 108 | }, 109 | []string{"project"}, 110 | ) 111 | 112 | return r 113 | } 114 | 115 | // RecordHostedBuilds reads the number of builds for the project by counting 116 | // the folders under its directories. 117 | func (r *Recorder) RecordHostedBuilds(buildPath, projectsPath string) { 118 | projects, err := ioutil.ReadDir(projectsPath) 119 | if err != nil { 120 | r.Log.Printf("Failed to read project directory: %s", projectsPath) 121 | 122 | return 123 | } 124 | 125 | for _, project := range projects { 126 | buildDir := fmt.Sprintf("%s/%s/ready", buildPath, project.Name()) 127 | builds, err := ioutil.ReadDir(buildDir) 128 | if err != nil { 129 | r.Log.Printf("Failed to read data directory: %s", buildDir) 130 | 131 | continue 132 | } 133 | 134 | labels := prometheus.Labels{"project": project.Name()} 135 | r.BuildsHosted.With(labels).Set(float64(len(builds))) 136 | } 137 | } 138 | 139 | // RecordBuildStarted records a build started, independently from its outcome. 140 | func (r *Recorder) RecordBuildStarted(project string) { 141 | r.BuildsStarted.With(prometheus.Labels{"project": project}).Inc() 142 | } 143 | 144 | // RecordBuildCoalesced records a project's build when coalesced. 145 | func (r *Recorder) RecordBuildCoalesced(project string) { 146 | r.BuildsCoalesced.With(prometheus.Labels{"project": project}).Inc() 147 | } 148 | 149 | // RecordBuildFinished records a project's state, whether it was build incrementally 150 | // and tis duration. 151 | func (r *Recorder) RecordBuildFinished( 152 | project string, 153 | success bool, 154 | incremental bool, 155 | duration time.Duration, 156 | ) { 157 | labels := prometheus.Labels{"project": project} 158 | 159 | r.BuildsFinished.With(labels).Inc() 160 | 161 | if success { 162 | if incremental { 163 | r.BuildsProcessedIncrementally.With(labels).Inc() 164 | } 165 | 166 | r.BuildsSucceeded.With(labels).Observe(duration.Seconds()) 167 | } else { 168 | r.BuildsFailed.With(labels).Observe(duration.Seconds()) 169 | } 170 | } 171 | 172 | // RecordCacheUtilization records a project build's cache utilization. 173 | func (r *Recorder) RecordCacheUtilization(project string) { 174 | r.CacheUtilization.With(prometheus.Labels{"project": project}).Inc() 175 | } 176 | -------------------------------------------------------------------------------- /cmd/mistryd/mistryd_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "flag" 8 | "fmt" 9 | "io/ioutil" 10 | "log" 11 | "net" 12 | "net/http" 13 | "net/http/httptest" 14 | "os" 15 | "os/exec" 16 | "os/user" 17 | "path/filepath" 18 | "reflect" 19 | "strings" 20 | "testing" 21 | "time" 22 | 23 | docker "github.com/docker/docker/client" 24 | "github.com/skroutz/mistry/pkg/filesystem" 25 | "github.com/skroutz/mistry/pkg/types" 26 | ) 27 | 28 | var ( 29 | testcfg *Config 30 | server *Server 31 | logger *log.Logger 32 | params = make(types.Params) 33 | 34 | // mistry-cli args 35 | cliDefaultArgs CliCommonArgs 36 | 37 | addrFlag string 38 | configFlag string 39 | filesystemFlag string 40 | ) 41 | 42 | type CliCommonArgs struct { 43 | host string 44 | port string 45 | username string 46 | target string 47 | } 48 | 49 | func TestMain(m *testing.M) { 50 | flag.StringVar(&addrFlag, "addr", "127.0.0.1:8462", "") 51 | flag.StringVar(&configFlag, "config", "config.test.json", "") 52 | flag.StringVar(&filesystemFlag, "filesystem", "plain", "") 53 | flag.Parse() 54 | 55 | parts := strings.Split(addrFlag, ":") 56 | if len(parts) != 2 { 57 | panic("invalid addr argument") 58 | } 59 | cliDefaultArgs.host = parts[0] 60 | cliDefaultArgs.port = parts[1] 61 | 62 | fs, err := filesystem.Get(filesystemFlag) 63 | if err != nil { 64 | panic(err) 65 | } 66 | f, err := os.Open(configFlag) 67 | if err != nil { 68 | panic(err) 69 | } 70 | testcfg, err = ParseConfig(addrFlag, fs, f) 71 | if err != nil { 72 | panic(err) 73 | } 74 | 75 | tmpdir, err := ioutil.TempDir("", "mistry-tests") 76 | if err != nil { 77 | panic(err) 78 | } 79 | // on macOS '/tmp' is a symlink to '/private/tmp' 80 | testcfg.BuildPath, err = filepath.EvalSymlinks(tmpdir) 81 | if err != nil { 82 | panic(err) 83 | } 84 | 85 | user, err := user.Current() 86 | if err != nil { 87 | panic(err) 88 | } 89 | cliDefaultArgs.username = user.Username 90 | 91 | logger = log.New(os.Stderr, "[http] ", log.LstdFlags) 92 | 93 | server, err = NewServer(testcfg, logger, false) 94 | if err != nil { 95 | panic(err) 96 | } 97 | 98 | go func() { 99 | err := SetUp(testcfg) 100 | if err != nil { 101 | panic(err) 102 | } 103 | err = StartServer(testcfg) 104 | if err != nil { 105 | panic(err) 106 | } 107 | 108 | }() 109 | waitForServer(cliDefaultArgs.port) 110 | 111 | cliDefaultArgs.target, err = ioutil.TempDir("", "mistry-test-artifacts") 112 | if err != nil { 113 | panic(err) 114 | } 115 | 116 | result := m.Run() 117 | if result == 0 { 118 | err = os.RemoveAll(testcfg.BuildPath) 119 | if err != nil { 120 | panic(err) 121 | } 122 | err = os.RemoveAll(cliDefaultArgs.target) 123 | if err != nil { 124 | panic(err) 125 | } 126 | } 127 | 128 | os.Exit(result) 129 | } 130 | 131 | func TestPruneZombieBuilds(t *testing.T) { 132 | project := "hanging-pending" 133 | cmdout, cmderr, err := cliBuildJob("--project", project) 134 | if err != nil { 135 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 136 | } 137 | path := filepath.Join(testcfg.BuildPath, project, "pending") 138 | err = testcfg.FileSystem.Create(filepath.Join(path, "foo")) 139 | if err != nil { 140 | t.Fatal(err) 141 | } 142 | err = testcfg.FileSystem.Create(filepath.Join(path, "bar")) 143 | if err != nil { 144 | t.Fatal(err) 145 | } 146 | 147 | err = PruneZombieBuilds(testcfg) 148 | if err != nil { 149 | t.Fatal(err) 150 | } 151 | 152 | hangingPendingBuilds, err := ioutil.ReadDir(path) 153 | if err != nil { 154 | t.Fatal(err) 155 | } 156 | if len(hangingPendingBuilds) != 0 { 157 | t.Fatalf("Expected to have cleaned up all zombie pending builds, found %d", len(hangingPendingBuilds)) 158 | } 159 | } 160 | 161 | func TestRebuildImages(t *testing.T) { 162 | // run a job, fetch its build time 163 | params := types.Params{"test": "rebuild-server"} 164 | cmdout, cmderr, err := cliBuildJob("--project", "simple", "--", toCli(params)[0]) 165 | if err != nil { 166 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 167 | } 168 | 169 | j, err := NewJob("simple", params, "", testcfg) 170 | if err != nil { 171 | t.Fatalf("%s", err) 172 | } 173 | 174 | client, err := docker.NewEnvClient() 175 | if err != nil { 176 | t.Fatal(err) 177 | } 178 | 179 | i, _, err := client.ImageInspectWithRaw(context.Background(), j.Image) 180 | if err != nil { 181 | t.Fatal(err) 182 | } 183 | 184 | r, err := RebuildImages(testcfg, logger, []string{"simple"}, true, true) 185 | failIfError(err, t) 186 | assertEq(r.successful, 1, t) 187 | assertEq(len(r.failed), 0, t) 188 | 189 | // fetch last build time, make sure it is different 190 | i2, _, err := client.ImageInspectWithRaw(context.Background(), j.Image) 191 | if err != nil { 192 | t.Fatal(err) 193 | } 194 | assertNotEq(i.Created, i2.Created, t) 195 | } 196 | 197 | func TestRebuildImagesNonExistingProject(t *testing.T) { 198 | r, err := RebuildImages(testcfg, logger, []string{"shouldnotexist"}, true, true) 199 | assertEq(r.successful, 0, t) 200 | assertEq(r.failed, []string{"shouldnotexist"}, t) 201 | if err == nil { 202 | t.Fatal("Expected unknown project error") 203 | } 204 | } 205 | 206 | func readOut(bi *types.BuildInfo, path string) (string, error) { 207 | s := strings.Replace(bi.Path, "/data/artifacts", "", -1) 208 | out, err := ioutil.ReadFile(filepath.Join(s, "data", path, "out.txt")) 209 | if err != nil { 210 | return "", err 211 | } 212 | return string(out), nil 213 | } 214 | 215 | func assertEq(a, b interface{}, t *testing.T) { 216 | if !reflect.DeepEqual(a, b) { 217 | t.Fatalf("Expected %#v and %#v to be equal", a, b) 218 | } 219 | } 220 | 221 | func assert(act, exp interface{}, t *testing.T) { 222 | if !reflect.DeepEqual(act, exp) { 223 | t.Fatalf("Expected %#v to be %#v", act, exp) 224 | } 225 | } 226 | 227 | func assertNotEq(a, b interface{}, t *testing.T) { 228 | if reflect.DeepEqual(a, b) { 229 | t.Fatalf("Expected %#v and %#v to not be equal", a, b) 230 | } 231 | } 232 | 233 | // postJob issues an HTTP request with jr to the server. It returns an error if 234 | // the request was not successful. 235 | func postJob(jr types.JobRequest) (*types.BuildInfo, error) { 236 | body, err := json.Marshal(jr) 237 | if err != nil { 238 | return nil, fmt.Errorf("cannot marshal %#v; %s", jr, err) 239 | } 240 | 241 | req := httptest.NewRequest("POST", "http://example.com/foo", bytes.NewReader(body)) 242 | w := httptest.NewRecorder() 243 | server.HandleNewJob(w, req) 244 | 245 | resp := w.Result() 246 | if resp.StatusCode != http.StatusCreated { 247 | body, err := ioutil.ReadAll(resp.Body) 248 | if err != nil { 249 | log.Fatal("Could not read response body + ", err.Error()) 250 | } 251 | return nil, fmt.Errorf("Expected status=201, got %d | body: %s", resp.StatusCode, body) 252 | } 253 | body, err = ioutil.ReadAll(resp.Body) 254 | if err != nil { 255 | return nil, err 256 | } 257 | 258 | buildInfo := new(types.BuildInfo) 259 | err = json.Unmarshal(body, buildInfo) 260 | if err != nil { 261 | return nil, fmt.Errorf("cannot unmarshal %#v; %s", string(body), err) 262 | } 263 | 264 | return buildInfo, nil 265 | } 266 | 267 | func waitForServer(port string) { 268 | backoff := 50 * time.Millisecond 269 | 270 | for i := 0; i < 10; i++ { 271 | conn, err := net.DialTimeout("tcp", ":"+port, 1*time.Second) 272 | if err != nil { 273 | time.Sleep(backoff) 274 | continue 275 | } 276 | err = conn.Close() 277 | if err != nil { 278 | log.Fatal(err) 279 | } 280 | return 281 | } 282 | log.Fatalf("Server on port %s not up after 10 retries", port) 283 | } 284 | 285 | func parseClientJSON(s string) (*types.BuildInfo, error) { 286 | bi := new(types.BuildInfo) 287 | err := json.Unmarshal([]byte(s), bi) 288 | if err != nil { 289 | return nil, fmt.Errorf("Couldn't unmarshall '%s'", s) 290 | } 291 | return bi, nil 292 | } 293 | 294 | // cliBuildJob uses the CLI binary to issue a new job request to the server. 295 | // It returns an error if the request could not be issued or if the job 296 | // failed to build. 297 | // 298 | // NOTE: The CLI binary is expected to be present in the directory denoted by 299 | // MISTRY_CLIENT_PATH environment variable or, if empty, from the current 300 | // working directory where the tests are ran from. 301 | func cliBuildJob(args ...string) (string, string, error) { 302 | return cliBuildJobArgs(cliDefaultArgs, args...) 303 | } 304 | 305 | func cliBuildJobArgs(cliArgs CliCommonArgs, args ...string) (string, string, error) { 306 | clientPath := os.Getenv("MISTRY_CLIENT_PATH") 307 | if clientPath == "" { 308 | clientPath = "./mistry" 309 | } 310 | args = append([]string{ 311 | clientPath, "build", 312 | "--verbose", 313 | "--host", cliArgs.host, 314 | "--port", cliArgs.port, 315 | "--target", cliArgs.target, 316 | "--transport-user", cliArgs.username}, 317 | args...) 318 | 319 | stdout := new(bytes.Buffer) 320 | stderr := new(bytes.Buffer) 321 | 322 | cmd := exec.Command(args[0], args[1:]...) 323 | cmd.Stdout = stdout 324 | cmd.Stderr = stderr 325 | 326 | err := cmd.Run() 327 | return stdout.String(), stderr.String(), err 328 | } 329 | -------------------------------------------------------------------------------- /cmd/mistryd/project_queue.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "sync" 4 | 5 | // ProjectQueue provides a per-project mutex. 6 | type ProjectQueue struct { 7 | mu sync.Mutex 8 | p map[string]*sync.Mutex 9 | } 10 | 11 | // NewProjectQueue creates a new empty struct 12 | func NewProjectQueue() *ProjectQueue { 13 | return &ProjectQueue{p: make(map[string]*sync.Mutex)} 14 | } 15 | 16 | // Lock locks the project's mutex 17 | func (q *ProjectQueue) Lock(project string) { 18 | q.mu.Lock() 19 | plock, ok := q.p[project] 20 | if !ok { 21 | q.p[project] = new(sync.Mutex) 22 | plock = q.p[project] 23 | } 24 | q.mu.Unlock() 25 | plock.Lock() 26 | } 27 | 28 | // Unlock unlocks the project's mutex 29 | func (q *ProjectQueue) Unlock(project string) { 30 | q.mu.Lock() 31 | q.p[project].Unlock() 32 | q.mu.Unlock() 33 | } 34 | -------------------------------------------------------------------------------- /cmd/mistryd/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | The mistry jobs 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 |
17 |
18 |

mistry

19 |
20 |
21 | 22 |
23 |
24 | 25 |
26 |

Jobs

27 |
28 | 29 |
30 |
31 |
32 |
33 | 34 | 35 | -------------------------------------------------------------------------------- /cmd/mistryd/public/js/index.js: -------------------------------------------------------------------------------- 1 | const JobsRoot = document.getElementById('js-jobs') 2 | 3 | class Jobs extends React.Component { 4 | constructor(props) { 5 | super(props) 6 | this.every = props.every 7 | this.state = {} 8 | }; 9 | 10 | fetchJobs() { 11 | fetch("/index"). 12 | then(response => response.json()). 13 | then(data => this.setState({ jobs: data })); 14 | }; 15 | 16 | componentDidMount() { 17 | this.fetchJobs(); 18 | this.interval = setInterval(() => this.fetchJobs(), this.every); 19 | }; 20 | 21 | componentWillUnmount() { 22 | clearInterval(this.interval); 23 | }; 24 | 25 | render() { 26 | if (this.state.jobs == undefined) { 27 | return ( 28 |
29 |

No jobs...

30 |
31 | ); 32 | } 33 | 34 | let jobs = this.state.jobs; 35 | return ( 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | {jobs.map(function(j, idx){ 47 | return ( 48 | 49 | 50 | 51 | 52 | 53 | 54 | ) 55 | })} 56 | 57 |
IDProjectStarted AtState
{j.id} {j.project}{j.startedAt}{j.state}
58 | ); 59 | } 60 | } 61 | 62 | ReactDOM.render(, JobsRoot) 63 | -------------------------------------------------------------------------------- /cmd/mistryd/public/templates/show.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Job Logs 8 | 9 | 10 | 11 | 12 |
13 |
14 |

mistry

15 |
16 |
17 | 18 |
19 |
20 |
21 |

Job: {{.ID}}

22 |
23 |
24 |
25 |
26 |
27 |

Details

28 |
29 |

30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |

Logs

38 |
39 |

40 |
41 |
42 |
43 |
44 | 45 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /cmd/mistryd/server.go: -------------------------------------------------------------------------------- 1 | //go:generate statik -src=./public -f 2 | package main 3 | 4 | import ( 5 | "bufio" 6 | "bytes" 7 | "context" 8 | "encoding/json" 9 | "errors" 10 | "fmt" 11 | "html/template" 12 | "io" 13 | "io/ioutil" 14 | "log" 15 | "net/http" 16 | "os" 17 | "path/filepath" 18 | "sort" 19 | "strings" 20 | "time" 21 | 22 | "github.com/docker/docker/api/types/filters" 23 | docker "github.com/docker/docker/client" 24 | units "github.com/docker/go-units" 25 | "github.com/rakyll/statik/fs" 26 | "github.com/skroutz/mistry/cmd/mistryd/metrics" 27 | _ "github.com/skroutz/mistry/cmd/mistryd/statik" 28 | "github.com/skroutz/mistry/pkg/broker" 29 | "github.com/skroutz/mistry/pkg/types" 30 | 31 | "github.com/prometheus/client_golang/prometheus/promhttp" 32 | ) 33 | 34 | // Server is the component that performs the actual work (builds images, runs 35 | // commands etc.). It also exposes the JSON API by which users interact with 36 | // mistry. 37 | type Server struct { 38 | Log *log.Logger 39 | 40 | fs http.FileSystem 41 | srv *http.Server 42 | jq *JobQueue 43 | cfg *Config 44 | workerPool *WorkerPool 45 | 46 | // synchronizes access to the filesystem on a per-project basis 47 | pq *ProjectQueue 48 | 49 | // web-view related 50 | br *broker.Broker 51 | 52 | // related to prometheus 53 | metrics *metrics.Recorder 54 | } 55 | 56 | // NewServer accepts a non-nil configuration and an optional logger, and 57 | // returns a new Server. 58 | // If logger is nil, server logs are disabled. 59 | func NewServer(cfg *Config, logger *log.Logger, enableMetrics bool) (*Server, error) { 60 | var err error 61 | 62 | if cfg == nil { 63 | return nil, errors.New("config cannot be nil") 64 | } 65 | 66 | if logger == nil { 67 | logger = log.New(ioutil.Discard, "", 0) 68 | } 69 | 70 | s := new(Server) 71 | mux := http.NewServeMux() 72 | 73 | s.fs, err = fs.New() 74 | if err != nil { 75 | logger.Fatal(err) 76 | } 77 | 78 | mux.Handle("/", http.StripPrefix("/", http.FileServer(s.fs))) 79 | mux.HandleFunc("/jobs", s.HandleNewJob) 80 | mux.HandleFunc("/index/", s.HandleIndex) 81 | mux.HandleFunc("/job/", s.HandleShowJob) 82 | mux.HandleFunc("/log/", s.HandleServerPush) 83 | mux.Handle("/metrics", promhttp.Handler()) 84 | 85 | s.srv = &http.Server{Handler: mux, Addr: cfg.Addr} 86 | s.cfg = cfg 87 | s.Log = logger 88 | s.jq = NewJobQueue() 89 | s.pq = NewProjectQueue() 90 | s.br = broker.NewBroker(s.Log) 91 | s.workerPool = NewWorkerPool(s, cfg.Concurrency, cfg.Backlog, logger) 92 | 93 | if enableMetrics { 94 | s.metrics = metrics.NewRecorder(logger) 95 | } 96 | 97 | return s, nil 98 | } 99 | 100 | // HandleNewJob receives requests for new jobs and builds them. 101 | func (s *Server) HandleNewJob(w http.ResponseWriter, r *http.Request) { 102 | if r.Method != "POST" { 103 | http.Error(w, "Expected POST, got "+r.Method, http.StatusMethodNotAllowed) 104 | return 105 | } 106 | 107 | body, err := ioutil.ReadAll(r.Body) 108 | if err != nil { 109 | http.Error(w, "Error reading request body: "+err.Error(), http.StatusBadRequest) 110 | return 111 | } 112 | r.Body.Close() 113 | 114 | jr := types.JobRequest{} 115 | err = json.Unmarshal(body, &jr) 116 | if err != nil { 117 | http.Error(w, fmt.Sprintf("Error unmarshalling body '%s' to Job: %s", body, err), 118 | http.StatusBadRequest) 119 | return 120 | } 121 | j, err := NewJob(jr.Project, jr.Params, jr.Group, s.cfg) 122 | if err != nil { 123 | http.Error(w, fmt.Sprintf("Error creating new job %v: %s", jr, err), 124 | http.StatusInternalServerError) 125 | return 126 | } 127 | j.Rebuild = jr.Rebuild 128 | 129 | // send the work item to the worker pool 130 | future, err := s.workerPool.SendWork(j) 131 | if err != nil { 132 | // the in-memory queue is overloaded, we have to wait for the workers to pick 133 | // up new items. 134 | // return a 503 to signal that the server is overloaded and for clients to try 135 | // again later 136 | // 503 is an appropriate status code to signal that the server is overloaded 137 | // for all users, while 429 would have been used if we implemented user-specific 138 | // throttling 139 | s.Log.Print("Failed to send message to work queue") 140 | w.WriteHeader(http.StatusServiceUnavailable) 141 | return 142 | } 143 | 144 | // if async, we're done, otherwise wait for the result in the result channel 145 | _, async := r.URL.Query()["async"] 146 | if async { 147 | s.Log.Printf("Scheduled %s", j) 148 | w.WriteHeader(http.StatusCreated) 149 | } else { 150 | s.Log.Printf("Scheduled %s and waiting for result...", j) 151 | s.writeWorkResult(j, future.Wait(), w) 152 | } 153 | } 154 | 155 | func (s *Server) writeWorkResult(j *Job, r WorkResult, w http.ResponseWriter) { 156 | if r.Err != nil { 157 | http.Error(w, fmt.Sprintf("Error building %s: %s", j, r.Err), 158 | http.StatusInternalServerError) 159 | return 160 | } 161 | 162 | w.WriteHeader(http.StatusCreated) 163 | w.Header().Set("Content-Type", "application/json") 164 | 165 | resp, err := json.Marshal(r.BuildInfo) 166 | if err != nil { 167 | s.Log.Print(err) 168 | } 169 | _, err = w.Write([]byte(resp)) 170 | if err != nil { 171 | s.Log.Printf("Error writing response for %s: %s", j, err) 172 | } 173 | } 174 | 175 | // HandleIndex returns all available jobs. 176 | func (s *Server) HandleIndex(w http.ResponseWriter, r *http.Request) { 177 | if r.Method != "GET" { 178 | http.Error(w, "Expected GET, got "+r.Method, http.StatusMethodNotAllowed) 179 | return 180 | } 181 | 182 | jobs, err := s.getJobs() 183 | if err != nil { 184 | s.Log.Printf("cannot get jobs for path %s; %s", s.cfg.BuildPath, err) 185 | w.WriteHeader(http.StatusInternalServerError) 186 | return 187 | } 188 | sort.Slice(jobs, func(i, j int) bool { 189 | return jobs[j].StartedAt.Before(jobs[i].StartedAt) 190 | }) 191 | 192 | resp, err := json.Marshal(jobs) 193 | if err != nil { 194 | s.Log.Printf("cannot marshal jobs '%#v'; %s", jobs, err) 195 | w.WriteHeader(http.StatusInternalServerError) 196 | return 197 | } 198 | 199 | w.WriteHeader(http.StatusOK) 200 | w.Header().Set("Content-Type", "application/json") 201 | 202 | _, err = w.Write(resp) 203 | if err != nil { 204 | s.Log.Printf("cannot write response %s", err) 205 | w.WriteHeader(http.StatusInternalServerError) 206 | return 207 | } 208 | } 209 | 210 | // HandleShowJob receives requests for a job and produces the appropriate output 211 | // based on the content type of the request. 212 | func (s *Server) HandleShowJob(w http.ResponseWriter, r *http.Request) { 213 | if r.Method != "GET" { 214 | http.Error(w, "Expected GET, got "+r.Method, http.StatusMethodNotAllowed) 215 | return 216 | } 217 | 218 | parts := strings.Split(r.URL.Path, "/") 219 | if len(parts) != 4 { 220 | w.WriteHeader(http.StatusBadRequest) 221 | return 222 | } 223 | project := parts[2] 224 | id := parts[3] 225 | 226 | state, err := GetState(s.cfg.BuildPath, project, id) 227 | if err != nil { 228 | s.Log.Print(err) 229 | w.WriteHeader(http.StatusInternalServerError) 230 | return 231 | } 232 | jPath := filepath.Join(s.cfg.BuildPath, project, state, id) 233 | 234 | buildInfo, err := ReadJobBuildInfo(jPath, true) 235 | if err != nil { 236 | s.Log.Print(err) 237 | w.WriteHeader(http.StatusInternalServerError) 238 | return 239 | } 240 | 241 | j := Job{ 242 | BuildInfo: buildInfo, 243 | ID: id, 244 | Project: project, 245 | State: state, 246 | } 247 | 248 | if r.Header.Get("Content-type") == "application/json" { 249 | jData, err := json.Marshal(j) 250 | if err != nil { 251 | s.Log.Print(err) 252 | w.WriteHeader(http.StatusInternalServerError) 253 | return 254 | } 255 | 256 | w.Header().Set("Content-Type", "application/json") 257 | _, err = w.Write(jData) 258 | if err != nil { 259 | s.Log.Printf("HandleShowJob: error writing Content-Type header: %s", err) 260 | } 261 | return 262 | } 263 | 264 | f, err := s.fs.Open("/templates/show.html") 265 | if err != nil { 266 | s.Log.Print(err) 267 | w.WriteHeader(http.StatusInternalServerError) 268 | return 269 | } 270 | 271 | tmplBody, err := ioutil.ReadAll(f) 272 | if err != nil { 273 | s.Log.Print(err) 274 | w.WriteHeader(http.StatusInternalServerError) 275 | return 276 | } 277 | 278 | tmpl := template.New("jobshow") 279 | tmpl, err = tmpl.Parse(string(tmplBody)) 280 | if err != nil { 281 | s.Log.Print(err) 282 | w.WriteHeader(http.StatusInternalServerError) 283 | return 284 | } 285 | 286 | buf := new(bytes.Buffer) 287 | err = tmpl.Execute(buf, j) 288 | if err != nil { 289 | s.Log.Print(err) 290 | w.WriteHeader(http.StatusInternalServerError) 291 | return 292 | } 293 | _, err = buf.WriteTo(w) 294 | if err != nil { 295 | s.Log.Print(err) 296 | w.WriteHeader(http.StatusInternalServerError) 297 | return 298 | } 299 | } 300 | 301 | func getJobURL(j *Job) string { 302 | return strings.Join([]string{"job", j.Project, j.ID}, "/") 303 | } 304 | 305 | // HandleServerPush emits build logs as Server-SentEvents (SSE). 306 | func (s *Server) HandleServerPush(w http.ResponseWriter, r *http.Request) { 307 | if r.Method != "GET" { 308 | http.Error(w, "Expected GET, got "+r.Method, http.StatusMethodNotAllowed) 309 | return 310 | } 311 | 312 | parts := strings.Split(r.URL.Path, "/") 313 | if len(parts) != 4 { 314 | w.WriteHeader(http.StatusBadRequest) 315 | return 316 | } 317 | project := parts[2] 318 | id := parts[3] 319 | 320 | state, err := GetState(s.cfg.BuildPath, project, id) 321 | if err != nil { 322 | s.Log.Print(err) 323 | w.WriteHeader(http.StatusInternalServerError) 324 | return 325 | } 326 | 327 | // Decide whether to tail the log file and keep the connection alive for 328 | // sending server side events. 329 | if state != "pending" { 330 | w.WriteHeader(http.StatusNoContent) 331 | return 332 | } 333 | 334 | flusher, ok := w.(http.Flusher) 335 | if !ok { 336 | http.Error(w, "Streaming unsupported!", http.StatusInternalServerError) 337 | return 338 | } 339 | 340 | w.Header().Set("Content-Type", "text/event-stream") 341 | w.Header().Set("Cache-Control", "no-cache") 342 | w.Header().Set("Connection", "keep-alive") 343 | w.Header().Set("Access-Control-Allow-Origin", "*") 344 | 345 | jPath := filepath.Join(s.cfg.BuildPath, project, state, id) 346 | buildLogPath := filepath.Join(jPath, BuildLogFname) 347 | client := &broker.Client{ID: id, Data: make(chan []byte), Extra: buildLogPath} 348 | s.br.NewClients <- client 349 | 350 | go func() { 351 | <-w.(http.CloseNotifier).CloseNotify() 352 | s.br.ClosingClients <- client 353 | }() 354 | 355 | for { 356 | msg, ok := <-client.Data 357 | if !ok { 358 | break 359 | } 360 | _, err := fmt.Fprintf(w, "data: %s\n\n", msg) 361 | if err != nil { 362 | s.Log.Printf("HandleServerPush: error writing log data to client: %s", err) 363 | } 364 | 365 | flusher.Flush() 366 | } 367 | } 368 | 369 | // ListenAndServe listens on the TCP network address s.srv.Addr and handle 370 | // requests on incoming connections. ListenAndServe always returns a 371 | // non-nil error. 372 | func (s *Server) ListenAndServe() error { 373 | s.Log.Printf("Configuration: %#v", s.cfg) 374 | go s.br.ListenForClients() 375 | 376 | go func() { 377 | for { 378 | s.metrics.RecordHostedBuilds(s.cfg.BuildPath, s.cfg.ProjectsPath) 379 | time.Sleep(5 * time.Minute) 380 | } 381 | }() 382 | 383 | return s.srv.ListenAndServe() 384 | } 385 | 386 | type pruneResult struct { 387 | prunedImages int 388 | prunedContainers int 389 | reclaimedSpace uint64 390 | } 391 | 392 | // RebuildResult contains result data on the rebuild operation 393 | type RebuildResult struct { 394 | successful int 395 | failed []string 396 | pruneResult 397 | } 398 | 399 | func (r RebuildResult) String() string { 400 | var failedNames string 401 | if len(r.failed) > 0 { 402 | failedNames = ", Failed names: " + strings.Join(r.failed, ", ") 403 | } 404 | 405 | return fmt.Sprintf( 406 | "Rebuilt: %d, Pruned images: %d, Pruned containers: %d, Reclaimed: %s, Failed: %d%s", 407 | r.successful, r.prunedImages, r.prunedContainers, units.HumanSize(float64(r.reclaimedSpace)), 408 | len(r.failed), failedNames) 409 | } 410 | 411 | // RebuildImages rebuilds images for all projects, and prunes any dangling images 412 | func RebuildImages(cfg *Config, log *log.Logger, projects []string, stopErr, verbose bool) (RebuildResult, error) { 413 | var err error 414 | r := RebuildResult{} 415 | if len(projects) == 0 { 416 | projects, err = getProjects(cfg) 417 | if err != nil { 418 | return r, err 419 | } 420 | } 421 | 422 | client, err := docker.NewEnvClient() 423 | if err != nil { 424 | return r, err 425 | } 426 | 427 | ctx := context.Background() 428 | for _, project := range projects { 429 | start := time.Now() 430 | log.Printf("Rebuilding %s...\n", project) 431 | j, err := NewJob(project, types.Params{}, "", cfg) 432 | if err != nil { 433 | r.failed = append(r.failed, project) 434 | if stopErr { 435 | return r, err 436 | } 437 | log.Printf("Failed to instantiate %s job with error: %s\n", project, err) 438 | } else { 439 | var buildErr error 440 | if verbose { 441 | // pipe image build logs to the logger 442 | pr, pw := io.Pipe() 443 | buildResult := make(chan error) 444 | 445 | go func() { 446 | err := j.BuildImage(ctx, cfg.UID, client, pw, true, true) 447 | pErr := pw.Close() 448 | if pErr != nil { 449 | // as of Go 1.10 this is never non-nil 450 | log.Printf("Unexpected PipeWriter.Close() error: %s\n", pErr) 451 | } 452 | buildResult <- err 453 | }() 454 | 455 | scanner := bufio.NewScanner(pr) 456 | for scanner.Scan() { 457 | log.Print(scanner.Text()) 458 | } 459 | buildErr = <-buildResult 460 | } else { 461 | // discard image build logs 462 | buildErr = j.BuildImage(ctx, cfg.UID, client, ioutil.Discard, true, true) 463 | } 464 | 465 | if buildErr != nil { 466 | r.failed = append(r.failed, project) 467 | if stopErr { 468 | return r, buildErr 469 | } 470 | log.Printf("Failed to build %s job %s with error: %s\n", project, j.ID, buildErr) 471 | } else { 472 | log.Printf("Rebuilt %s in %s\n", project, time.Now().Sub(start).Truncate(time.Millisecond)) 473 | r.successful++ 474 | } 475 | } 476 | } 477 | r.pruneResult, err = dockerPruneUnused(ctx, client) 478 | if err != nil { 479 | return r, err 480 | } 481 | return r, nil 482 | } 483 | 484 | // dockerPruneUnused prunes stopped containers and unused images 485 | func dockerPruneUnused(ctx context.Context, c *docker.Client) (pruneResult, error) { 486 | // prune containers before images, this will allow more images to be eligible for clean up 487 | noFilters := filters.NewArgs() 488 | cr, err := c.ContainersPrune(ctx, noFilters) 489 | if err != nil { 490 | return pruneResult{}, err 491 | } 492 | ir, err := c.ImagesPrune(ctx, noFilters) 493 | if err != nil { 494 | return pruneResult{}, err 495 | } 496 | return pruneResult{ 497 | prunedImages: len(ir.ImagesDeleted), 498 | prunedContainers: len(cr.ContainersDeleted), 499 | reclaimedSpace: ir.SpaceReclaimed + cr.SpaceReclaimed}, nil 500 | } 501 | 502 | // PruneZombieBuilds removes any pending builds from the filesystem. 503 | func PruneZombieBuilds(cfg *Config) error { 504 | projects, err := getProjects(cfg) 505 | if err != nil { 506 | return err 507 | } 508 | l := log.New(os.Stderr, "[cleanup] ", log.LstdFlags) 509 | 510 | for _, p := range projects { 511 | pendingPath := filepath.Join(cfg.BuildPath, p, "pending") 512 | pendingBuilds, err := ioutil.ReadDir(pendingPath) 513 | if err != nil { 514 | l.Printf("error reading pending builds; skipping project (%s): %s", p, err) 515 | continue 516 | } 517 | 518 | for _, pending := range pendingBuilds { 519 | pendingBuildPath := filepath.Join(pendingPath, pending.Name()) 520 | err = cfg.FileSystem.Remove(pendingBuildPath) 521 | if err != nil { 522 | return fmt.Errorf("Error pruning zombie build '%s' of project '%s'", pending.Name(), p) 523 | } 524 | l.Printf("Pruned zombie build '%s' of project '%s'", pending.Name(), p) 525 | } 526 | } 527 | return nil 528 | } 529 | 530 | func getProjects(cfg *Config) ([]string, error) { 531 | root := cfg.ProjectsPath 532 | folders, err := ioutil.ReadDir(root) 533 | if err != nil { 534 | return nil, err 535 | } 536 | 537 | projects := []string{} 538 | 539 | for _, f := range folders { 540 | if !f.IsDir() { 541 | continue 542 | } 543 | 544 | _, err := os.Stat(filepath.Join(root, f.Name(), "Dockerfile")) 545 | if err != nil { 546 | if os.IsNotExist(err) { 547 | fmt.Println(filepath.Join(root, f.Name(), "Dockerfile"), "doesn't exist") 548 | continue 549 | } 550 | return nil, err 551 | } 552 | 553 | projects = append(projects, f.Name()) 554 | } 555 | 556 | return projects, nil 557 | } 558 | 559 | // getJobs returns all pending and ready jobs. 560 | func (s *Server) getJobs() ([]Job, error) { 561 | var pendingJobs, readyJobs []os.FileInfo 562 | jobs := []Job{} 563 | projects := []string{} 564 | 565 | // find projects 566 | folders, err := ioutil.ReadDir(s.cfg.BuildPath) 567 | if err != nil { 568 | return nil, fmt.Errorf("cannot scan projects; %s", err) 569 | } 570 | for _, f := range folders { 571 | if f.IsDir() { 572 | projects = append(projects, f.Name()) 573 | } 574 | } 575 | 576 | for _, p := range projects { 577 | pendingPath := filepath.Join(s.cfg.BuildPath, p, "pending") 578 | _, err := os.Stat(pendingPath) 579 | pendingExists := !os.IsNotExist(err) 580 | if err != nil && !os.IsNotExist(err) { 581 | return nil, fmt.Errorf("cannot check if pending path exists; %s", err) 582 | } 583 | readyPath := filepath.Join(s.cfg.BuildPath, p, "ready") 584 | _, err = os.Stat(readyPath) 585 | readyExists := !os.IsNotExist(err) 586 | if err != nil && !os.IsNotExist(err) { 587 | return nil, fmt.Errorf("cannot check if ready path exists; %s", err) 588 | } 589 | 590 | if pendingExists { 591 | pendingJobs, err = ioutil.ReadDir(pendingPath) 592 | if err != nil { 593 | return nil, fmt.Errorf("cannot scan pending jobs of project %s; %s", p, err) 594 | } 595 | } 596 | if readyExists { 597 | readyJobs, err = ioutil.ReadDir(readyPath) 598 | if err != nil { 599 | return nil, fmt.Errorf("cannot scan ready jobs of project %s; %s", p, err) 600 | } 601 | } 602 | 603 | getJob := func(path, jobID, project, state string) (Job, error) { 604 | bi, err := ReadJobBuildInfo(filepath.Join(path, jobID), false) 605 | if err != nil { 606 | return Job{}, err 607 | } 608 | 609 | return Job{ 610 | ID: jobID, 611 | Project: project, 612 | StartedAt: bi.StartedAt, 613 | State: state, 614 | BuildInfo: bi}, nil 615 | } 616 | 617 | for _, j := range pendingJobs { 618 | job, err := getJob(pendingPath, j.Name(), p, "pending") 619 | if err != nil { 620 | return nil, fmt.Errorf("cannot find job %s; %s", j.Name(), err) 621 | } 622 | jobs = append(jobs, job) 623 | } 624 | 625 | for _, j := range readyJobs { 626 | job, err := getJob(readyPath, j.Name(), p, "ready") 627 | if err != nil { 628 | return nil, fmt.Errorf("cannot find job %s; %s", j.Name(), err) 629 | } 630 | jobs = append(jobs, job) 631 | } 632 | } 633 | 634 | return jobs, nil 635 | } 636 | -------------------------------------------------------------------------------- /cmd/mistryd/server_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "math/rand" 8 | "net/http" 9 | "net/http/httptest" 10 | "path" 11 | "strings" 12 | "sync" 13 | "testing" 14 | "time" 15 | 16 | "github.com/skroutz/mistry/pkg/types" 17 | ) 18 | 19 | func TestBootstrapProjectRace(t *testing.T) { 20 | n := 10 21 | project := "bootstrap-concurrent" 22 | jobs := []*Job{} 23 | var wg sync.WaitGroup 24 | 25 | for i := 0; i < n; i++ { 26 | j, err := NewJob(project, params, "", testcfg) 27 | if err != nil { 28 | t.Fatal(err) 29 | } 30 | jobs = append(jobs, j) 31 | } 32 | 33 | for _, j := range jobs { 34 | wg.Add(1) 35 | go func(j *Job) { 36 | defer wg.Done() 37 | err := server.BootstrapProject(j) 38 | if err != nil { 39 | panic(err) 40 | } 41 | }(j) 42 | } 43 | wg.Wait() 44 | } 45 | 46 | func TestLoad(t *testing.T) { 47 | n := 100 48 | results := make(chan *types.BuildInfo, n) 49 | rand.Seed(time.Now().UnixNano()) 50 | 51 | projects := []string{"concurrent", "concurrent2", "concurrent3", "concurrent4"} 52 | params := []types.Params{{}, {"foo": "bar"}, {"abc": "efd", "zzz": "xxx"}} 53 | groups := []string{"", "foo", "abc"} 54 | 55 | for i := 0; i < n; i++ { 56 | go func() { 57 | project := projects[rand.Intn(len(projects))] 58 | params := params[rand.Intn(len(params))] 59 | group := groups[rand.Intn(len(groups))] 60 | 61 | jr := types.JobRequest{Project: project, Params: params, Group: group} 62 | time.Sleep(time.Duration(rand.Intn(200)) * time.Millisecond) 63 | br, err := postJob(jr) 64 | if err != nil { 65 | panic(err) 66 | } 67 | results <- br 68 | }() 69 | } 70 | 71 | for i := 0; i < n; i++ { 72 | <-results 73 | } 74 | } 75 | 76 | func TestHandleIndex(t *testing.T) { 77 | cmdout, cmderr, err := cliBuildJob("--project", "simple") 78 | if err != nil { 79 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 80 | } 81 | 82 | req, err := http.NewRequest("GET", "/index", nil) 83 | if err != nil { 84 | t.Fatal(err) 85 | } 86 | 87 | rr := httptest.NewRecorder() 88 | handler := http.HandlerFunc(server.HandleIndex) 89 | handler.ServeHTTP(rr, req) 90 | result := rr.Result() 91 | 92 | if result.StatusCode != http.StatusOK { 93 | t.Errorf("Expected status code %d, got %d", http.StatusOK, result.StatusCode) 94 | } 95 | 96 | expected := `"state":"ready"` 97 | body, err := ioutil.ReadAll(result.Body) 98 | if err != nil { 99 | t.Fatal(err) 100 | } 101 | if !strings.Contains(string(body), expected) { 102 | t.Errorf("Expected body to contain %v, got %v", expected, string(body)) 103 | } 104 | } 105 | 106 | func TestHandleShowJob(t *testing.T) { 107 | cmdout, cmderr, err := cliBuildJob("--project", "simple") 108 | if err != nil { 109 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err) 110 | } 111 | 112 | // Get a job id and project from the index page. 113 | req, err := http.NewRequest("GET", "/index", nil) 114 | if err != nil { 115 | t.Fatal(err) 116 | } 117 | rr := httptest.NewRecorder() 118 | handler := http.HandlerFunc(server.HandleIndex) 119 | handler.ServeHTTP(rr, req) 120 | result := rr.Result() 121 | body, err := ioutil.ReadAll(result.Body) 122 | if err != nil { 123 | t.Fatal(err) 124 | } 125 | job := make([]Job, 0) 126 | err = json.Unmarshal([]byte(body), &job) 127 | if err != nil { 128 | t.Fatal(err) 129 | } 130 | jobID := job[0].ID 131 | project := job[0].Project 132 | 133 | // Request the show page of the job selected from the index page. 134 | showPath := path.Join("/job", project, jobID) 135 | req, err = http.NewRequest("GET", showPath, nil) 136 | if err != nil { 137 | t.Fatal(err) 138 | } 139 | req.Header.Set("Content-type", "application/json") 140 | rr = httptest.NewRecorder() 141 | handler = http.HandlerFunc(server.HandleShowJob) 142 | handler.ServeHTTP(rr, req) 143 | result = rr.Result() 144 | 145 | if result.StatusCode != http.StatusOK { 146 | t.Errorf("Expected status code %d, got %d", http.StatusOK, result.StatusCode) 147 | } 148 | 149 | expected := fmt.Sprintf(`"ID":"%s"`, jobID) 150 | body, err = ioutil.ReadAll(result.Body) 151 | if err != nil { 152 | t.Fatal(err) 153 | } 154 | if !strings.Contains(string(body), expected) { 155 | t.Errorf("Expected body to contain %v, got %v", expected, string(body)) 156 | } 157 | } 158 | 159 | func TestNewJobAsync(t *testing.T) { 160 | rec := httptest.NewRecorder() 161 | req := httptest.NewRequest("POST", "/jobs?async", strings.NewReader("{\"project\": \"simple\"}")) 162 | server.srv.Handler.ServeHTTP(rec, req) 163 | resp := rec.Result() 164 | body, err := ioutil.ReadAll(resp.Body) 165 | if err != nil { 166 | t.Errorf("Error in reading response body: %s", err) 167 | } 168 | assertEq(resp.StatusCode, 201, t) 169 | assertEq(string(body), "", t) 170 | } 171 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/bad_entrypoint/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/bad_entrypoint/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | >&2 echo "this is stderr" 5 | echo "this is stdout" 6 | missing_command 7 | 8 | exit 0 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/bootstrap-concurrent/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/bootstrap-concurrent/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | exit 0 3 | 4 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/bootstrap-twice/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/bootstrap-twice/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | touch artifacts/out.txt 5 | 6 | exit 0 7 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/build-cache/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/build-cache/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -f cache/out.txt ]; then 5 | date +%S%N > artifacts/out.txt 6 | else 7 | date +%S%N | tee cache/out.txt > artifacts/out.txt 8 | fi 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/build-coalescing-exitcode/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/build-coalescing-exitcode/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | sleep 2 5 | 6 | echo "coalescing!" > artifacts/out.txt 7 | 8 | exit 35 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/build-coalescing/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/build-coalescing/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | sleep 2 5 | 6 | echo "coalescing!" > artifacts/out.txt 7 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/concurrent/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/concurrent/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo 0 3 | exit 0 4 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/concurrent2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/concurrent2/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo 2 3 | exit 0 4 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/concurrent3/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/concurrent3/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo 3 3 | exit 0 4 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/concurrent4/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/concurrent4/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo 4 3 | exit 0 4 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/copy-folder/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | COPY koko/ /koko/ 7 | WORKDIR /data 8 | 9 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 10 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/copy-folder/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #HACK: mistry will attempt to copy all artifacts out of the container 3 | # Therefore we need to leave something behind. This might or might not be 4 | # the intented behaviour. Revisit in the future. 5 | touch /data/artifacts/foo 6 | stat /koko/lala.txt 7 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/copy-folder/koko/lala.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skroutz/mistry/f7b47b936e29985b63a693c9713e6c821e9a01f3/cmd/mistryd/testdata/projects/copy-folder/koko/lala.txt -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/exit-code/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/exit-code/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | exit 77 3 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/failed-build-cleanup/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | INVALIDCOMMAND 4 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/failed-build-link/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/failed-build-link/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | exit `cat params/_exitcode` 3 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/hanging-pending/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/hanging-pending/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | touch artifacts/out.txt 5 | 6 | exit 0 7 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/image-build-failure/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | RUN apt-get install -y fofkoeakodksao 4 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/job-id-seeding/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/job-id-seeding/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | sleep 2 5 | 6 | echo "coalescing!" > artifacts/out.txt 7 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/params/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/params/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cat params/foo > artifacts/out.txt 5 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/result-cache/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/result-cache/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | date +%S%N > artifacts/out.txt 5 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/simple/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/simple/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | touch artifacts/out.txt 5 | 6 | exit 0 7 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/sleep/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh 5 | 6 | WORKDIR /data 7 | 8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /cmd/mistryd/testdata/projects/sleep/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | sleep 10 5 | -------------------------------------------------------------------------------- /cmd/mistryd/worker.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io/ioutil" 9 | "log" 10 | "os" 11 | "path/filepath" 12 | "strings" 13 | "time" 14 | 15 | _ "github.com/docker/distribution" 16 | docker "github.com/docker/docker/client" 17 | "github.com/skroutz/mistry/pkg/types" 18 | "github.com/skroutz/mistry/pkg/utils" 19 | ) 20 | 21 | // Work performs the work denoted by j and returns a BuildInfo upon 22 | // successful completion, or an error. 23 | func (s *Server) Work(ctx context.Context, j *Job) (buildInfo *types.BuildInfo, err error) { 24 | log := log.New(os.Stderr, fmt.Sprintf("[worker] [%s] ", j), log.LstdFlags) 25 | start := time.Now() 26 | 27 | buildInfo = types.NewBuildInfo() 28 | j.BuildInfo = buildInfo 29 | j.BuildInfo.Path = filepath.Join(j.ReadyBuildPath, DataDir, ArtifactsDir) 30 | j.BuildInfo.TransportMethod = types.Rsync 31 | j.BuildInfo.Params = j.Params 32 | j.BuildInfo.StartedAt = j.StartedAt 33 | j.BuildInfo.URL = getJobURL(j) 34 | j.BuildInfo.Group = j.Group 35 | 36 | if s.metrics != nil { 37 | s.metrics.RecordBuildStarted(j.Project) 38 | } 39 | 40 | // build coalescing 41 | added := s.jq.Add(j) 42 | if added { 43 | defer s.jq.Delete(j) 44 | } else { 45 | t := time.NewTicker(2 * time.Second) 46 | defer t.Stop() 47 | log.Printf("Coalescing with %s...", j.PendingBuildPath) 48 | for { 49 | select { 50 | case <-ctx.Done(): 51 | err = workErr("context cancelled while coalescing", nil) 52 | return 53 | case <-t.C: 54 | _, err = os.Stat(j.ReadyBuildPath) 55 | if err == nil { 56 | i, err := ExitCode(j) 57 | if err != nil { 58 | return j.BuildInfo, err 59 | } 60 | j.BuildInfo.ExitCode = i 61 | j.BuildInfo.Coalesced = true 62 | 63 | if s.metrics != nil { 64 | s.metrics.RecordBuildCoalesced(j.Project) 65 | } 66 | 67 | return j.BuildInfo, err 68 | } 69 | 70 | if os.IsNotExist(err) { 71 | continue 72 | } else { 73 | err = workErr("could not coalesce", err) 74 | return 75 | } 76 | } 77 | } 78 | } 79 | 80 | // build result cache 81 | _, err = os.Stat(j.ReadyBuildPath) 82 | if err == nil { 83 | buildInfo, err := ReadJobBuildInfo(j.ReadyBuildPath, true) 84 | if err != nil { 85 | return nil, err 86 | } else if buildInfo.ExitCode != 0 { 87 | // Previous build failed, remove its build dir to 88 | // restart it. We know it's not pointed to by a 89 | // latest link since we only symlink successful builds 90 | err = s.cfg.FileSystem.Remove(j.ReadyBuildPath) 91 | if err != nil { 92 | return buildInfo, workErr("could not remove existing failed build", err) 93 | } 94 | } else { // if a successful result already exists, use that 95 | buildInfo.Cached = true 96 | 97 | if s.metrics != nil { 98 | s.metrics.RecordCacheUtilization(j.Project) 99 | } 100 | 101 | return buildInfo, err 102 | } 103 | } else if !os.IsNotExist(err) { 104 | err = workErr("could not check for ready path", err) 105 | return 106 | } 107 | 108 | _, err = os.Stat(filepath.Join(s.cfg.ProjectsPath, j.Project)) 109 | if err != nil { 110 | if os.IsNotExist(err) { 111 | err = workErr("Unknown project", nil) 112 | return 113 | } 114 | err = workErr("could not check for project", err) 115 | return 116 | } 117 | 118 | err = s.BootstrapProject(j) 119 | if err != nil { 120 | err = workErr("could not bootstrap project", err) 121 | return 122 | } 123 | 124 | err = j.BootstrapBuildDir(s.cfg.FileSystem) 125 | if err != nil { 126 | err = workErr("could not bootstrap build dir", err) 127 | return 128 | } 129 | 130 | err = persistBuildInfo(j) 131 | if err != nil { 132 | err = workErr("could not persist build info", err) 133 | return 134 | } 135 | 136 | // move from pending to ready when finished 137 | defer func() { 138 | rerr := os.Rename(j.PendingBuildPath, j.ReadyBuildPath) 139 | if rerr != nil { 140 | errstr := "could not move pending path" 141 | if err == nil { 142 | err = fmt.Errorf("%s; %s", errstr, rerr) 143 | } else { 144 | err = fmt.Errorf("%s; %s | %s", errstr, rerr, err) 145 | } 146 | } 147 | 148 | // if build was successful, point 'latest' link to it 149 | if err == nil && j.BuildInfo.ExitCode == types.ContainerSuccessExitCode { 150 | // eliminate concurrent filesystem operations since 151 | // they could result in a corrupted state (eg. if 152 | // jobs of the same project simultaneously finish 153 | // successfully) 154 | s.pq.Lock(j.Project) 155 | defer s.pq.Unlock(j.Project) 156 | 157 | _, err = os.Lstat(j.LatestBuildPath) 158 | if err == nil { 159 | err = os.Remove(j.LatestBuildPath) 160 | if err != nil { 161 | err = workErr("could not remove latest build link", err) 162 | return 163 | } 164 | } else if !os.IsNotExist(err) { 165 | err = workErr("could not stat the latest build link", err) 166 | return 167 | } 168 | 169 | err = os.Symlink(j.ReadyBuildPath, j.LatestBuildPath) 170 | if err != nil { 171 | err = workErr("could not create latest build link", err) 172 | } 173 | } 174 | }() 175 | 176 | // populate j.BuildInfo.Err and persist build_info file one last 177 | // time 178 | defer func() { 179 | if err != nil { 180 | j.BuildInfo.ErrBuild = err.Error() 181 | } 182 | 183 | biErr := persistBuildInfo(j) 184 | if biErr != nil { 185 | err = workErr("could not persist build info", biErr) 186 | return 187 | } 188 | }() 189 | 190 | for k, v := range j.Params { 191 | err = ioutil.WriteFile(filepath.Join(j.PendingBuildPath, DataDir, ParamsDir, k), []byte(v), 0644) 192 | if err != nil { 193 | err = workErr("could not write param file", err) 194 | return 195 | } 196 | } 197 | 198 | out, err := os.Create(j.BuildLogPath) 199 | if err != nil { 200 | err = workErr("could not create build log file", err) 201 | return 202 | } 203 | defer func() { 204 | ferr := out.Close() 205 | errstr := "could not close build log file" 206 | if ferr != nil { 207 | if err == nil { 208 | err = fmt.Errorf("%s; %s", errstr, ferr) 209 | } else { 210 | err = fmt.Errorf("%s; %s | %s", errstr, ferr, err) 211 | } 212 | } 213 | }() 214 | 215 | client, err := docker.NewEnvClient() 216 | if err != nil { 217 | err = workErr("could not create docker client", err) 218 | return 219 | } 220 | defer func() { 221 | derr := client.Close() 222 | errstr := "could not close docker client" 223 | if derr != nil { 224 | if err == nil { 225 | err = fmt.Errorf("%s; %s", errstr, derr) 226 | } else { 227 | err = fmt.Errorf("%s; %s | %s", errstr, derr, err) 228 | } 229 | } 230 | }() 231 | 232 | err = j.BuildImage(ctx, s.cfg.UID, client, out, j.Rebuild, j.Rebuild) 233 | if err != nil { 234 | err = workErr("could not build docker image", err) 235 | return 236 | } 237 | 238 | var outErr strings.Builder 239 | j.BuildInfo.ExitCode, err = j.StartContainer(ctx, s.cfg, client, out, &outErr) 240 | if err != nil { 241 | err = workErr("could not start docker container", err) 242 | return 243 | } 244 | 245 | err = out.Sync() 246 | if err != nil { 247 | err = workErr("could not flush the output log", err) 248 | return 249 | } 250 | 251 | stdouterr, err := ReadJobLogs(j.PendingBuildPath) 252 | if err != nil { 253 | err = workErr("could not read the job logs", err) 254 | return 255 | } 256 | 257 | j.BuildInfo.ContainerStdouterr = string(stdouterr) 258 | j.BuildInfo.ContainerStderr = outErr.String() 259 | j.BuildInfo.Duration = time.Now().Sub(start).Truncate(time.Millisecond) 260 | 261 | if s.metrics != nil { 262 | s.metrics.RecordBuildFinished( 263 | j.Project, 264 | j.BuildInfo.ExitCode == types.ContainerSuccessExitCode, 265 | j.BuildInfo.Incremental, 266 | j.BuildInfo.Duration, 267 | ) 268 | } 269 | 270 | log.Println("Finished after", j.BuildInfo.Duration) 271 | return 272 | } 273 | 274 | // BootstrapProject bootstraps j's project if needed. BootstrapProject is 275 | // idempotent. 276 | func (s *Server) BootstrapProject(j *Job) error { 277 | s.pq.Lock(j.Project) 278 | defer s.pq.Unlock(j.Project) 279 | 280 | err := utils.EnsureDirExists(j.RootBuildPath) 281 | if err != nil { 282 | return err 283 | } 284 | 285 | err = utils.EnsureDirExists(filepath.Join(j.RootBuildPath, "pending")) 286 | if err != nil { 287 | return err 288 | } 289 | 290 | err = utils.EnsureDirExists(filepath.Join(j.RootBuildPath, "ready")) 291 | if err != nil { 292 | return err 293 | } 294 | 295 | if j.Group != "" { 296 | err = utils.EnsureDirExists(filepath.Join(j.RootBuildPath, "groups")) 297 | if err != nil { 298 | return err 299 | } 300 | } 301 | 302 | return nil 303 | } 304 | 305 | // ExitCode returns the exit code of the job's container build. 306 | // If an error is returned, the exit code is irrelevant. 307 | func ExitCode(j *Job) (int, error) { 308 | buildInfo, err := ReadJobBuildInfo(j.ReadyBuildPath, false) 309 | if err != nil { 310 | return types.ContainerPendingExitCode, err 311 | } 312 | return buildInfo.ExitCode, nil 313 | } 314 | 315 | func workErr(s string, e error) error { 316 | s = "work: " + s 317 | if e != nil { 318 | s += "; " + e.Error() 319 | } 320 | return errors.New(s) 321 | } 322 | 323 | // persistBuildInfo persists the JSON-serialized version of j.BuildInfo 324 | // to disk. 325 | func persistBuildInfo(j *Job) error { 326 | // we don't want to persist the whole build logs in the build_info file 327 | bi := *j.BuildInfo 328 | bi.ContainerStdouterr = "" 329 | bi.ContainerStderr = "" 330 | 331 | out, err := json.Marshal(bi) 332 | if err != nil { 333 | return err 334 | } 335 | 336 | return ioutil.WriteFile(j.BuildInfoFilePath, out, 0666) 337 | } 338 | -------------------------------------------------------------------------------- /cmd/mistryd/worker_pool.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "sync" 9 | 10 | "github.com/skroutz/mistry/pkg/types" 11 | ) 12 | 13 | // WorkResult contains the result of a build, either a BuildInfo or an error 14 | type WorkResult struct { 15 | BuildInfo *types.BuildInfo 16 | Err error 17 | } 18 | 19 | // FutureWorkResult is a WorkResult that may not yet have become available and 20 | // can be Wait()'ed on 21 | type FutureWorkResult struct { 22 | result <-chan WorkResult 23 | } 24 | 25 | // Wait waits for WorkResult to become available and returns it 26 | func (f FutureWorkResult) Wait() WorkResult { 27 | r, ok := <-f.result 28 | if !ok { 29 | // this should never happen, reading from the result channel 30 | // is exclusive to this future 31 | panic("Failed to read from result channel") 32 | } 33 | return r 34 | } 35 | 36 | // workItem contains a job and a channel to place the job result. struct 37 | // used in the internal work queue 38 | type workItem struct { 39 | job *Job 40 | result chan<- WorkResult 41 | } 42 | 43 | // WorkerPool implements a fixed-size pool of workers that build jobs 44 | // build jobs and communicate their result 45 | type WorkerPool struct { 46 | // the fixed amount of goroutines that will be handling running jobs 47 | concurrency int 48 | 49 | // the maximum backlog of pending requests. if exceeded, sending new work 50 | // to the pool will return an error 51 | backlogSize int 52 | 53 | queue chan workItem 54 | wg sync.WaitGroup 55 | } 56 | 57 | // NewWorkerPool initializes and starts a new worker pool, waiting for incoming 58 | // jobs. 59 | func NewWorkerPool(s *Server, concurrency, backlog int, logger *log.Logger) *WorkerPool { 60 | p := new(WorkerPool) 61 | p.concurrency = concurrency 62 | p.backlogSize = backlog 63 | p.queue = make(chan workItem, backlog) 64 | 65 | for i := 0; i < concurrency; i++ { 66 | go work(s, i, p.queue, &p.wg) 67 | p.wg.Add(1) 68 | } 69 | logger.Printf("Set up %d workers", concurrency) 70 | return p 71 | } 72 | 73 | // Stop signals the workers to close and blocks until they are closed. 74 | func (p *WorkerPool) Stop() { 75 | close(p.queue) 76 | p.wg.Wait() 77 | } 78 | 79 | // SendWork schedules the work j on p and returns a FutureWorkResult. 80 | // The actual result can be obtained by calling FutureWorkResult.Wait(). 81 | // 82 | // An error is returned if the work backlog is full. 83 | func (p *WorkerPool) SendWork(j *Job) (FutureWorkResult, error) { 84 | resultQueue := make(chan WorkResult, 1) 85 | wi := workItem{j, resultQueue} 86 | result := FutureWorkResult{resultQueue} 87 | 88 | select { 89 | case p.queue <- wi: 90 | return result, nil 91 | default: 92 | return result, errors.New("queue is full") 93 | } 94 | } 95 | 96 | // work listens to the workQueue, runs Work() on any incoming work items, and 97 | // sends the result through the result queue 98 | func work(s *Server, id int, queue <-chan workItem, wg *sync.WaitGroup) { 99 | defer wg.Done() 100 | logPrefix := fmt.Sprintf("[worker %d]", id) 101 | for item := range queue { 102 | buildInfo, err := s.Work(context.Background(), item.job) 103 | 104 | select { 105 | case item.result <- WorkResult{buildInfo, err}: 106 | default: 107 | // this should never happen, the result chan should be unique for this worker 108 | s.Log.Panicf("%s failed to write result to the result channel", logPrefix) 109 | } 110 | close(item.result) 111 | } 112 | s.Log.Printf("%s exiting...", logPrefix) 113 | } 114 | -------------------------------------------------------------------------------- /cmd/mistryd/worker_pool_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/skroutz/mistry/pkg/types" 8 | ) 9 | 10 | func TestBacklogLimit(t *testing.T) { 11 | wp, cfg := setupQueue(t, 0, 1) 12 | defer wp.Stop() 13 | 14 | params := types.Params{"test": "pool-backlog-limit"} 15 | params2 := types.Params{"test": "pool-backlog-limit2"} 16 | project := "simple" 17 | 18 | sendWorkNoErr(wp, project, params, cfg, t) 19 | _, _, err := sendWork(wp, project, params2, cfg, t) 20 | 21 | if err == nil { 22 | t.Fatal("Expected error") 23 | } 24 | } 25 | 26 | func TestConcurrency(t *testing.T) { 27 | // instatiate server with 1 worker 28 | wp, cfg := setupQueue(t, 1, 100) 29 | defer wp.Stop() 30 | 31 | project := "sleep" 32 | params := types.Params{"test": "pool-concurrency"} 33 | params2 := types.Params{"test": "pool-concurrency2"} 34 | 35 | sendWorkNoErr(wp, project, params, cfg, t) 36 | // give the chance for the worker to start work 37 | time.Sleep(1 * time.Second) 38 | 39 | j, _ := sendWorkNoErr(wp, project, params2, cfg, t) 40 | 41 | // the queue should contain only 1 item, the work item for the 2nd job 42 | assertEq(len(wp.queue), 1, t) 43 | select { 44 | case i, ok := <-wp.queue: 45 | if !ok { 46 | t.Fatalf("Unexpectedly closed worker pool queue") 47 | } 48 | assertEq(i.job, j, t) 49 | default: 50 | t.Fatalf("Expected to find a work item in the queue") 51 | } 52 | } 53 | 54 | func setupQueue(t *testing.T, workers, backlog int) (*WorkerPool, *Config) { 55 | cfg := testcfg 56 | cfg.Concurrency = workers 57 | cfg.Backlog = backlog 58 | 59 | s, err := NewServer(cfg, nil, false) 60 | failIfError(err, t) 61 | return s.workerPool, cfg 62 | } 63 | 64 | func sendWork(wp *WorkerPool, project string, params types.Params, cfg *Config, t *testing.T) (*Job, FutureWorkResult, error) { 65 | j, err := NewJob(project, params, "", cfg) 66 | failIfError(err, t) 67 | 68 | r, err := wp.SendWork(j) 69 | return j, r, err 70 | } 71 | 72 | func sendWorkNoErr(wp *WorkerPool, project string, params types.Params, cfg *Config, t *testing.T) (*Job, FutureWorkResult) { 73 | j, r, err := sendWork(wp, project, params, cfg, t) 74 | failIfError(err, t) 75 | return j, r 76 | } 77 | -------------------------------------------------------------------------------- /cmd/mistryd/worker_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "github.com/skroutz/mistry/pkg/types" 8 | ) 9 | 10 | func TestBuildCache(t *testing.T) { 11 | params := types.Params{"foo": "bar"} 12 | group := "baz" 13 | 14 | result1, err := postJob( 15 | types.JobRequest{Project: "build-cache", Params: params, Group: group}) 16 | if err != nil { 17 | t.Fatal(err) 18 | } 19 | 20 | out1, err := readOut(result1, ArtifactsDir) 21 | if err != nil { 22 | t.Fatal(err) 23 | } 24 | 25 | cachedOut1, err := readOut(result1, CacheDir) 26 | if err != nil { 27 | t.Fatal(err) 28 | } 29 | 30 | assertEq(out1, cachedOut1, t) 31 | 32 | params["foo"] = "bar2" 33 | result2, err := postJob( 34 | types.JobRequest{Project: "build-cache", Params: params, Group: group}) 35 | if err != nil { 36 | t.Fatal(err) 37 | } 38 | 39 | out2, err := readOut(result2, ArtifactsDir) 40 | if err != nil { 41 | t.Fatal(err) 42 | } 43 | 44 | cachedOut2, err := readOut(result2, CacheDir) 45 | if err != nil { 46 | t.Fatal(err) 47 | } 48 | 49 | assertEq(cachedOut1, cachedOut2, t) 50 | assertNotEq(out1, out2, t) 51 | assertNotEq(result1.Path, result2.Path, t) 52 | assert(result1.ExitCode, 0, t) 53 | assert(result2.ExitCode, 0, t) 54 | assert(result1.Incremental, false, t) 55 | assert(result2.Incremental, true, t) 56 | } 57 | 58 | func TestFailedPendingBuildCleanup(t *testing.T) { 59 | var err error 60 | project := "failed-build-cleanup" 61 | expected := "unknown instruction: INVALIDCOMMAND" 62 | 63 | for i := 0; i < 3; i++ { 64 | _, err = postJob( 65 | types.JobRequest{Project: project, Params: params, Group: ""}) 66 | if !strings.Contains(err.Error(), expected) { 67 | t.Fatalf("Expected '%s' to contain '%s'", err.Error(), expected) 68 | } 69 | } 70 | } 71 | 72 | // regression test for incremental building bug 73 | func TestBuildCacheWhenFailed(t *testing.T) { 74 | group := "ppp" 75 | 76 | // a successful build - it'll be symlinked 77 | _, err := postJob( 78 | types.JobRequest{Project: "failed-build-link", 79 | Params: types.Params{"_exitcode": "0"}, 80 | Group: group}) 81 | if err != nil { 82 | t.Fatal(err) 83 | } 84 | 85 | // a failed build - it should NOT be symlinked 86 | _, err = postJob( 87 | types.JobRequest{Project: "failed-build-link", 88 | Params: types.Params{"_exitcode": "1", "foo": "bar"}, 89 | Group: group}) 90 | if err != nil { 91 | t.Fatal(err) 92 | } 93 | 94 | // repeat the previous failed build - it 95 | // SHOULD be incremental 96 | buildInfo, err := postJob( 97 | types.JobRequest{Project: "failed-build-link", 98 | Params: types.Params{"_exitcode": "1", "foo": "bar"}, 99 | Group: group}) 100 | if err != nil { 101 | t.Fatal(err) 102 | } 103 | 104 | if !buildInfo.Incremental { 105 | t.Fatal("build should be incremental, but it isn't") 106 | } 107 | 108 | } 109 | -------------------------------------------------------------------------------- /contrib/fabfile.py.sample: -------------------------------------------------------------------------------- 1 | """ 2 | Deploy mistry 3 | 4 | Usage: 5 | 6 | $ fab -H deploy 7 | 8 | """ 9 | from json import loads as json 10 | 11 | from fabric.api import env, settings, parallel 12 | from fabric.operations import put, sudo, local, run 13 | from fabric.decorators import runs_once 14 | from fabric.context_managers import hide 15 | 16 | def tail(): 17 | env.remote_interrupt = True 18 | with settings(warn_only=True): 19 | sudo('journalctl --unit=mistry --follow --lines=0', pty=True) 20 | 21 | @runs_once 22 | def build(): 23 | local('GOOS=linux GOARCH=amd64 make build') 24 | 25 | def copy(): 26 | put('mistryd', '/usr/bin/', use_sudo=True, mode=0755) 27 | 28 | def restart(): 29 | sudo('systemctl restart mistry-server.service') 30 | 31 | def status(): 32 | run('systemctl status mistry-server.service') 33 | 34 | def deploy(): 35 | build() 36 | copy() 37 | restart() 38 | -------------------------------------------------------------------------------- /contrib/mistry-purge-builds: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | require 'json' 3 | require 'time' 4 | require 'optparse' 5 | 6 | options = {} 7 | OptionParser.new do |opts| 8 | opts.banner = "Purge old mistry builds from the file system.\nUsage: #{$0} [options]" 9 | opts.on('--older-than DAYS' 'remove builds older than DAYS days') { |v| options[:stale_point] = Time.now - Integer(v)*24*60*60 } 10 | opts.on('--path PATH', 'Build path') { |v| options[:path] = v } 11 | opts.on('--dry-run', 'Dry run') { |v| options[:dry_run] = v } 12 | end.parse! 13 | 14 | abort("#{options[:path]} is not a directory") unless File.directory?(options[:path]) 15 | 16 | jobs = [] 17 | stale_jobs = [] 18 | groups_and_latest = [] 19 | projects = Dir["#{options[:path]}/projects/*"].map { |p| p.gsub("#{options[:path]}/projects/","") } 20 | 21 | projects.each do |p| 22 | data_path = File.join(options[:path], "data", p) 23 | groups_path = File.join(data_path, "groups") 24 | latest_path = File.join(data_path, "latest") 25 | group_jobs = Dir["#{groups_path}/*"] 26 | groups_and_latest << Dir["latest_path"].first if !Dir["latest_path"].empty? 27 | group_jobs.each do |j| 28 | groups_and_latest << j 29 | end 30 | 31 | ready_path = File.join(data_path, "ready") 32 | ready_jobs = Dir["#{ready_path}/*"] 33 | ready_jobs.each do |rj| 34 | if t = JSON.parse(File.read("#{rj}/build_info.json"))["StartedAt"] 35 | start_time = Time.parse(t) 36 | end 37 | stale_jobs << rj if start_time.nil? || start_time < options[:stale_point] 38 | end 39 | end 40 | 41 | if options[:dry_run] 42 | puts "would delete jobs: #{stale_jobs}" 43 | puts "would unlink: #{groups_and_latest}" 44 | elsif !stale_jobs.empty? 45 | File.unlink(*(groups_and_latest.select{ |j| stale_jobs.include?(File.readlink(j)) })) 46 | `btrfs subvolume delete #{stale_jobs.join(' ')}` 47 | end 48 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/skroutz/mistry 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/containerd/containerd v1.5.9 // indirect 7 | github.com/docker/distribution v2.7.1+incompatible 8 | github.com/docker/docker v20.10.12+incompatible 9 | github.com/docker/go-connections v0.4.0 // indirect 10 | github.com/docker/go-units v0.4.0 11 | github.com/gorilla/mux v1.8.0 // indirect 12 | github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect 13 | github.com/morikuni/aec v1.0.0 // indirect 14 | github.com/prometheus/client_golang v1.11.0 15 | github.com/rakyll/statik v0.1.7 16 | github.com/urfave/cli v1.22.5 17 | golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect 18 | google.golang.org/grpc v1.43.0 // indirect 19 | ) 20 | -------------------------------------------------------------------------------- /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skroutz/mistry/f7b47b936e29985b63a693c9713e6c821e9a01f3/logo.png -------------------------------------------------------------------------------- /pkg/broker/broker.go: -------------------------------------------------------------------------------- 1 | package broker 2 | 3 | import ( 4 | "bufio" 5 | "log" 6 | "sync" 7 | 8 | "github.com/skroutz/mistry/pkg/tailer" 9 | ) 10 | 11 | // A Broker holds a registry with open client connections, listens for events on the 12 | // Notifier channel and broadcasts event messages to the corresponding clients. 13 | type Broker struct { 14 | Log *log.Logger 15 | 16 | // Messages are pushed to this channel. 17 | Notifier chan *Event 18 | 19 | // Channel for adding new client connections. 20 | NewClients chan *Client 21 | 22 | // Channel for signaling a closed client connection. 23 | ClosingClients chan *Client 24 | 25 | // Channel for signaling the closing of all connections for an id. 26 | CloseClientC map[string]chan struct{} 27 | 28 | // clients is the connections registry of the Broker. clients sent to the 29 | // NewClients channel are being added to the registry. 30 | // A reference to the Client is being used so that the connections can be 31 | // uniquely identified for the messages broadcasting. 32 | clients map[*Client]bool 33 | 34 | // Queue used to track all open clients count grouped by their id. 35 | // The stored map type is [string]int. 36 | clientsCount *sync.Map 37 | } 38 | 39 | // Client represents a client-connection. 40 | type Client struct { 41 | // The connection channel to communicate with the events gathering 42 | // channel. 43 | Data chan []byte 44 | 45 | // Each connection has an id that corresponds to the Event ID it is 46 | // interested in receiving messages about. 47 | ID string 48 | 49 | // Extra contains any extra misc information about the connection. 50 | // e.g a secondary unique identifier for the Client 51 | Extra string 52 | } 53 | 54 | // Event consists of an id ID and a message Msg. All clients with the same id 55 | // receive the event message. 56 | type Event struct { 57 | // The message to be consumed by any connected client e.g., browser. 58 | Msg []byte 59 | 60 | // Each message has an id which corresponds to the concerning client id. 61 | ID string 62 | } 63 | 64 | // NewBroker returns a new Broker. 65 | func NewBroker(logger *log.Logger) *Broker { 66 | br := &Broker{} 67 | br.Log = logger 68 | br.Notifier = make(chan *Event) 69 | br.NewClients = make(chan *Client) 70 | br.ClosingClients = make(chan *Client) 71 | br.clients = make(map[*Client]bool) 72 | br.clientsCount = new(sync.Map) 73 | br.CloseClientC = make(map[string]chan struct{}) 74 | return br 75 | } 76 | 77 | // ListenForClients is responsible for taking the appropriate course of 78 | // action based on the different channel messages. It listens for new clients 79 | // on the NewClients channel, for closing clients on the ClosingClients channel 80 | // and for events Event on the Notifier channel. 81 | func (br *Broker) ListenForClients() { 82 | for { 83 | select { 84 | case client := <-br.NewClients: 85 | br.clients[client] = true 86 | val, exists := br.clientsCount.Load(client.ID) 87 | cc, ok := val.(int) 88 | if exists && !ok { 89 | br.Log.Printf("got data of type %T but wanted int", val) 90 | } 91 | if exists && cc > 0 { 92 | br.clientsCount.Store(client.ID, cc+1) 93 | } else { 94 | br.clientsCount.Store(client.ID, 1) 95 | br.CloseClientC[client.ID] = make(chan struct{}) 96 | tl, err := tailer.New(client.Extra) 97 | if err != nil { 98 | br.Log.Printf("[broker] Could not start the tailer for file %s", client.Extra) 99 | } 100 | go func() { 101 | s := bufio.NewScanner(tl) 102 | for s.Scan() { 103 | br.Notifier <- &Event{Msg: []byte(s.Text()), ID: client.ID} 104 | } 105 | }() 106 | go func() { 107 | <-br.CloseClientC[client.ID] 108 | err = tl.Close() 109 | if err != nil { 110 | br.Log.Print(err) 111 | } 112 | }() 113 | } 114 | case client := <-br.ClosingClients: 115 | close(client.Data) 116 | delete(br.clients, client) 117 | val, _ := br.clientsCount.Load(client.ID) 118 | cc, ok := val.(int) 119 | if !ok { 120 | br.Log.Printf("got data of type %T but wanted int", val) 121 | } 122 | newVal := cc - 1 123 | br.clientsCount.Store(client.ID, newVal) 124 | if newVal == 0 { 125 | br.CloseClientC[client.ID] <- struct{}{} 126 | } 127 | case event := <-br.Notifier: 128 | for client := range br.clients { 129 | if client.ID == event.ID { 130 | client.Data <- event.Msg 131 | } 132 | } 133 | } 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /pkg/filesystem/btrfs/btrfs.go: -------------------------------------------------------------------------------- 1 | package btrfs 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/skroutz/mistry/pkg/filesystem" 8 | "github.com/skroutz/mistry/pkg/utils" 9 | ) 10 | 11 | // Btrfs implements the FileSystem interface. It is an efficient implementation 12 | // since it uses Copy-on-Write snapshots to do the cloning. It is the 13 | // recommended solution for production systems. 14 | type Btrfs struct{} 15 | 16 | func init() { 17 | filesystem.Registry["btrfs"] = Btrfs{} 18 | } 19 | 20 | // Create creates a new subvolume named path. 21 | func (fs Btrfs) Create(path string) error { 22 | return runCmd([]string{"btrfs", "subvolume", "create", path}) 23 | } 24 | 25 | // Clone creates a Btrfs snapshot of subvolume src to a new subvolume, dst. 26 | func (fs Btrfs) Clone(src, dst string) error { 27 | return runCmd([]string{"btrfs", "subvolume", "snapshot", src, dst}) 28 | } 29 | 30 | // Remove deletes the subvolume with name path. 31 | func (fs Btrfs) Remove(path string) error { 32 | _, err := os.Stat(path) 33 | if err == nil { 34 | return runCmd([]string{"btrfs", "subvolume", "delete", path}) 35 | } 36 | return nil 37 | } 38 | 39 | func runCmd(args []string) error { 40 | out, err := utils.RunCmd(args) 41 | if err != nil { 42 | return fmt.Errorf("%s (%s)", err, out) 43 | } 44 | return nil 45 | } 46 | -------------------------------------------------------------------------------- /pkg/filesystem/filesystem.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // Registry maps the filesystem name to its implementation 8 | var Registry = make(map[string]FileSystem) 9 | 10 | // FileSystem defines a few basic filesystem operations 11 | type FileSystem interface { 12 | // Create creates a new directory in the given path. 13 | Create(path string) error 14 | 15 | // Clone copies the src path and its contents to dst. 16 | Clone(src, dst string) error 17 | 18 | // Remove removes path and its children. 19 | // Implementors should not return an error when the path does not 20 | // exist. 21 | Remove(path string) error 22 | } 23 | 24 | // Get returns the registered filesystem denoted by s. If it doesn't exist, 25 | // an error is returned. 26 | func Get(s string) (FileSystem, error) { 27 | fs, ok := Registry[s] 28 | if !ok { 29 | return nil, fmt.Errorf("unknown filesystem '%s' (%v)", s, Registry) 30 | } 31 | return fs, nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/filesystem/plainfs/plainfs.go: -------------------------------------------------------------------------------- 1 | package plainfs 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/skroutz/mistry/pkg/filesystem" 8 | "github.com/skroutz/mistry/pkg/utils" 9 | ) 10 | 11 | // PlainFS implements the FileSystem interface. It uses plain `cp` and `mkdir` 12 | // commands. 13 | type PlainFS struct{} 14 | 15 | func init() { 16 | filesystem.Registry["plain"] = PlainFS{} 17 | } 18 | 19 | // Create creates a new directory at path 20 | func (fs PlainFS) Create(path string) error { 21 | return os.Mkdir(path, 0755) 22 | } 23 | 24 | // Clone recursively copies the contents of src to dst 25 | func (fs PlainFS) Clone(src, dst string) error { 26 | out, err := utils.RunCmd([]string{"cp", "-r", src, dst}) 27 | if err != nil { 28 | return fmt.Errorf("%s (%s)", err, out) 29 | } 30 | return nil 31 | } 32 | 33 | // Remove deletes the path and all its contents 34 | func (fs PlainFS) Remove(path string) error { 35 | return os.RemoveAll(path) 36 | } 37 | -------------------------------------------------------------------------------- /pkg/tailer/tailer.go: -------------------------------------------------------------------------------- 1 | // Package tailer emulates the features of the tail program (reading from 2 | // continuously updated files). 3 | package tailer 4 | 5 | import ( 6 | "io" 7 | "os" 8 | "time" 9 | ) 10 | 11 | // A Tailer holds an io.ReadCloser interface. It implements a Read() function 12 | // which emulates the tailf UNIX program. 13 | type Tailer struct { 14 | io.ReadCloser 15 | } 16 | 17 | // New returns a new Tailer for the given path. 18 | func New(path string) (*Tailer, error) { 19 | f, err := os.Open(path) 20 | if err != nil { 21 | return &Tailer{}, err 22 | } 23 | 24 | if _, err := f.Seek(0, 2); err != nil { 25 | return &Tailer{}, err 26 | } 27 | return &Tailer{f}, nil 28 | } 29 | 30 | // Read provides a tailf like generator by handling the io.EOF error. 31 | // It returns the number of bytes read and any error encountered. 32 | // At end of file, when no more input is available, Read handles the io.EOF 33 | // error by continuing the reading loop. 34 | func (t *Tailer) Read(b []byte) (int, error) { 35 | for { 36 | n, err := t.ReadCloser.Read(b) 37 | if n > 0 { 38 | return n, nil 39 | } else if err != io.EOF { 40 | return n, err 41 | } 42 | time.Sleep(500 * time.Millisecond) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /pkg/types/build_info.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | const ( 8 | // ContainerPendingExitCode is the zero value of BuildInfo.ExitCode 9 | // and is updated after the container finishes running. 10 | ContainerPendingExitCode = -999 11 | 12 | // ContainerSuccessExitCode indicates that the build was successful. 13 | ContainerSuccessExitCode = 0 14 | ) 15 | 16 | // BuildInfo contains information regarding the outcome of an executed job. 17 | type BuildInfo struct { 18 | // Params are the job build parameters 19 | Params Params 20 | 21 | // Group is the job group 22 | Group string 23 | 24 | // Path is the absolute path where the build artifacts are located. 25 | Path string 26 | 27 | // Cached is true if the build artifacts were retrieved from the cache. 28 | Cached bool 29 | 30 | // Coalesced is true if the build was returned from another pending 31 | // build. 32 | Coalesced bool 33 | 34 | // Incremental is true if the results of a previous build were 35 | // used as the base for this build (ie. build cache). 36 | Incremental bool 37 | 38 | // ExitCode is the exit code of the container command. 39 | // 40 | // It is initialized to ContainerFailureExitCode and is updated upon 41 | // build completion. If ExitCode is still set to ContainerFailureExitCode 42 | // after the build is finished, it indicates an error somewhere along 43 | // the path. 44 | // 45 | // It is irrelevant and should be ignored if Coalesced is true. 46 | ExitCode int 47 | 48 | // ErrBuild contains any errors that occurred during the build. 49 | // 50 | // TODO: It might contain errors internal to the server, that the 51 | // user can do nothing about. This should be fixed 52 | ErrBuild string 53 | 54 | // ContainerStdouterr contains the stdout/stderr of the container. 55 | ContainerStdouterr string `json:",omitempty"` 56 | 57 | // ContainerStderr contains the stderr of the container. 58 | ContainerStderr string `json:",omitempty"` 59 | 60 | // TransportMethod is the method with which the build artifacts can be 61 | // fetched. 62 | TransportMethod TransportMethod 63 | 64 | // StartedAt is the date and time when the build started. 65 | StartedAt time.Time 66 | 67 | // Duration is how much the build took to complete. If it cannot be 68 | // calculated yet, the value will be -1 seconds. 69 | // 70 | // NOTE: if Cached is true, this refers to the original build. 71 | Duration time.Duration 72 | 73 | // URL is the relative URL at which the build log is available. 74 | URL string 75 | } 76 | 77 | // NewBuildInfo initializes a new BuildInfo with its StartedAt set to the 78 | // current time. 79 | func NewBuildInfo() *BuildInfo { 80 | bi := new(BuildInfo) 81 | bi.StartedAt = time.Now() 82 | bi.ExitCode = ContainerPendingExitCode 83 | bi.Duration = -1 * time.Second 84 | 85 | return bi 86 | } 87 | -------------------------------------------------------------------------------- /pkg/types/doc.go: -------------------------------------------------------------------------------- 1 | // Package types contains the types that are used both by mistry server and 2 | // client. 3 | package types 4 | -------------------------------------------------------------------------------- /pkg/types/errors.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "fmt" 4 | 5 | // ErrImageBuild indicates an error occurred while building a Docker image. 6 | type ErrImageBuild struct { 7 | Image string 8 | Err error 9 | } 10 | 11 | func (e ErrImageBuild) Error() string { 12 | return fmt.Sprintf("could not build docker image '%s': %s", e.Image, e.Err) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/types/job_request.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // JobRequest contains the data the job was requested with 4 | type JobRequest struct { 5 | Project string 6 | Params Params 7 | Group string 8 | Rebuild bool 9 | } 10 | -------------------------------------------------------------------------------- /pkg/types/params.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // Params are the user-provided parameters of a particular build. 4 | // They're submitted as part of the job, typically using the mistry CLI. 5 | type Params map[string]string 6 | -------------------------------------------------------------------------------- /pkg/types/transport_method.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // TransportMethod indicates the tool (binary) that the client will use to 4 | // download the build artifacts from the server. The binary should be installed 5 | // in the system. 6 | type TransportMethod string 7 | 8 | const ( 9 | // Rsync instructs the client to use rsync(1) to download the assets, 10 | // either over the SSH or rsync protocol. It is the recommended choice 11 | // for production environments. 12 | Rsync TransportMethod = "rsync" 13 | 14 | // Scp instructs the client to use scp(1) to download the assets. 15 | Scp = "scp" 16 | ) 17 | -------------------------------------------------------------------------------- /pkg/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "archive/tar" 5 | "bytes" 6 | "errors" 7 | "io" 8 | "os" 9 | "os/exec" 10 | "path/filepath" 11 | ) 12 | 13 | // PathIsDir returns an error if p does not exist or is not a directory. 14 | func PathIsDir(p string) error { 15 | fi, err := os.Stat(p) 16 | if err != nil { 17 | return err 18 | } 19 | 20 | if !fi.IsDir() { 21 | return errors.New("Path " + p + " is not a directory") 22 | } 23 | 24 | return nil 25 | } 26 | 27 | // EnsureDirExists verifies path is a directory and creates it if it doesn't 28 | // exist. 29 | func EnsureDirExists(path string) error { 30 | fi, err := os.Stat(path) 31 | if err == nil { 32 | if !fi.IsDir() { 33 | return errors.New(path + " is not a directory") 34 | } 35 | } else { 36 | if os.IsNotExist(err) { 37 | err = os.Mkdir(path, 0755) 38 | if err != nil { 39 | return err 40 | } 41 | } else { 42 | return err 43 | } 44 | } 45 | 46 | return nil 47 | } 48 | 49 | // RunCmd runs the shell command denoted by args, using the first 50 | // element as the command and the remained as its arguments. 51 | // It returns the combined stderr/stdout output of the command. 52 | func RunCmd(args []string) (string, error) { 53 | cmd := exec.Command(args[0], args[1:]...) 54 | out, err := cmd.CombinedOutput() 55 | return string(out), err 56 | } 57 | 58 | // Tar walks the file tree rooted at root, adding each file or directory in the 59 | // tree (including root) in a tar archive. The files are walked 60 | // in lexical order, which makes the output deterministic. 61 | func Tar(root string) ([]byte, error) { 62 | var buf bytes.Buffer 63 | tw := tar.NewWriter(&buf) 64 | walkFn := func(path string, info os.FileInfo, err error) error { 65 | if err != nil { 66 | return err 67 | } 68 | if !info.Mode().IsRegular() { 69 | return nil 70 | } 71 | 72 | hdr, err := tar.FileInfoHeader(info, info.Name()) 73 | if err != nil { 74 | return err 75 | } 76 | 77 | // Preserve directory structure when docker "untars" the build context 78 | hdr.Name, err = filepath.Rel(root, path) 79 | if err != nil { 80 | return err 81 | } 82 | 83 | err = tw.WriteHeader(hdr) 84 | if err != nil { 85 | return err 86 | } 87 | 88 | f, err := os.Open(path) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | _, err = io.Copy(tw, f) 94 | if err != nil { 95 | return err 96 | } 97 | 98 | err = f.Close() 99 | if err != nil { 100 | return err 101 | } 102 | 103 | return nil 104 | } 105 | 106 | err := filepath.Walk(root, walkFn) 107 | if err != nil { 108 | return nil, err 109 | } 110 | 111 | err = tw.Close() 112 | if err != nil { 113 | return nil, err 114 | } 115 | 116 | return buf.Bytes(), nil 117 | } 118 | --------------------------------------------------------------------------------