19 |
20 |
21 |
Job: {{.ID}}
22 |
23 |
24 |
25 |
26 |
27 |
Details
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
Logs
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
102 |
103 |
104 |
--------------------------------------------------------------------------------
/cmd/mistryd/server.go:
--------------------------------------------------------------------------------
1 | //go:generate statik -src=./public -f
2 | package main
3 |
4 | import (
5 | "bufio"
6 | "bytes"
7 | "context"
8 | "encoding/json"
9 | "errors"
10 | "fmt"
11 | "html/template"
12 | "io"
13 | "io/ioutil"
14 | "log"
15 | "net/http"
16 | "os"
17 | "path/filepath"
18 | "sort"
19 | "strings"
20 | "time"
21 |
22 | "github.com/docker/docker/api/types/filters"
23 | docker "github.com/docker/docker/client"
24 | units "github.com/docker/go-units"
25 | "github.com/rakyll/statik/fs"
26 | "github.com/skroutz/mistry/cmd/mistryd/metrics"
27 | _ "github.com/skroutz/mistry/cmd/mistryd/statik"
28 | "github.com/skroutz/mistry/pkg/broker"
29 | "github.com/skroutz/mistry/pkg/types"
30 |
31 | "github.com/prometheus/client_golang/prometheus/promhttp"
32 | )
33 |
34 | // Server is the component that performs the actual work (builds images, runs
35 | // commands etc.). It also exposes the JSON API by which users interact with
36 | // mistry.
37 | type Server struct {
38 | Log *log.Logger
39 |
40 | fs http.FileSystem
41 | srv *http.Server
42 | jq *JobQueue
43 | cfg *Config
44 | workerPool *WorkerPool
45 |
46 | // synchronizes access to the filesystem on a per-project basis
47 | pq *ProjectQueue
48 |
49 | // web-view related
50 | br *broker.Broker
51 |
52 | // related to prometheus
53 | metrics *metrics.Recorder
54 | }
55 |
56 | // NewServer accepts a non-nil configuration and an optional logger, and
57 | // returns a new Server.
58 | // If logger is nil, server logs are disabled.
59 | func NewServer(cfg *Config, logger *log.Logger, enableMetrics bool) (*Server, error) {
60 | var err error
61 |
62 | if cfg == nil {
63 | return nil, errors.New("config cannot be nil")
64 | }
65 |
66 | if logger == nil {
67 | logger = log.New(ioutil.Discard, "", 0)
68 | }
69 |
70 | s := new(Server)
71 | mux := http.NewServeMux()
72 |
73 | s.fs, err = fs.New()
74 | if err != nil {
75 | logger.Fatal(err)
76 | }
77 |
78 | mux.Handle("/", http.StripPrefix("/", http.FileServer(s.fs)))
79 | mux.HandleFunc("/jobs", s.HandleNewJob)
80 | mux.HandleFunc("/index/", s.HandleIndex)
81 | mux.HandleFunc("/job/", s.HandleShowJob)
82 | mux.HandleFunc("/log/", s.HandleServerPush)
83 | mux.Handle("/metrics", promhttp.Handler())
84 |
85 | s.srv = &http.Server{Handler: mux, Addr: cfg.Addr}
86 | s.cfg = cfg
87 | s.Log = logger
88 | s.jq = NewJobQueue()
89 | s.pq = NewProjectQueue()
90 | s.br = broker.NewBroker(s.Log)
91 | s.workerPool = NewWorkerPool(s, cfg.Concurrency, cfg.Backlog, logger)
92 |
93 | if enableMetrics {
94 | s.metrics = metrics.NewRecorder(logger)
95 | }
96 |
97 | return s, nil
98 | }
99 |
100 | // HandleNewJob receives requests for new jobs and builds them.
101 | func (s *Server) HandleNewJob(w http.ResponseWriter, r *http.Request) {
102 | if r.Method != "POST" {
103 | http.Error(w, "Expected POST, got "+r.Method, http.StatusMethodNotAllowed)
104 | return
105 | }
106 |
107 | body, err := ioutil.ReadAll(r.Body)
108 | if err != nil {
109 | http.Error(w, "Error reading request body: "+err.Error(), http.StatusBadRequest)
110 | return
111 | }
112 | r.Body.Close()
113 |
114 | jr := types.JobRequest{}
115 | err = json.Unmarshal(body, &jr)
116 | if err != nil {
117 | http.Error(w, fmt.Sprintf("Error unmarshalling body '%s' to Job: %s", body, err),
118 | http.StatusBadRequest)
119 | return
120 | }
121 | j, err := NewJob(jr.Project, jr.Params, jr.Group, s.cfg)
122 | if err != nil {
123 | http.Error(w, fmt.Sprintf("Error creating new job %v: %s", jr, err),
124 | http.StatusInternalServerError)
125 | return
126 | }
127 | j.Rebuild = jr.Rebuild
128 |
129 | // send the work item to the worker pool
130 | future, err := s.workerPool.SendWork(j)
131 | if err != nil {
132 | // the in-memory queue is overloaded, we have to wait for the workers to pick
133 | // up new items.
134 | // return a 503 to signal that the server is overloaded and for clients to try
135 | // again later
136 | // 503 is an appropriate status code to signal that the server is overloaded
137 | // for all users, while 429 would have been used if we implemented user-specific
138 | // throttling
139 | s.Log.Print("Failed to send message to work queue")
140 | w.WriteHeader(http.StatusServiceUnavailable)
141 | return
142 | }
143 |
144 | // if async, we're done, otherwise wait for the result in the result channel
145 | _, async := r.URL.Query()["async"]
146 | if async {
147 | s.Log.Printf("Scheduled %s", j)
148 | w.WriteHeader(http.StatusCreated)
149 | } else {
150 | s.Log.Printf("Scheduled %s and waiting for result...", j)
151 | s.writeWorkResult(j, future.Wait(), w)
152 | }
153 | }
154 |
155 | func (s *Server) writeWorkResult(j *Job, r WorkResult, w http.ResponseWriter) {
156 | if r.Err != nil {
157 | http.Error(w, fmt.Sprintf("Error building %s: %s", j, r.Err),
158 | http.StatusInternalServerError)
159 | return
160 | }
161 |
162 | w.WriteHeader(http.StatusCreated)
163 | w.Header().Set("Content-Type", "application/json")
164 |
165 | resp, err := json.Marshal(r.BuildInfo)
166 | if err != nil {
167 | s.Log.Print(err)
168 | }
169 | _, err = w.Write([]byte(resp))
170 | if err != nil {
171 | s.Log.Printf("Error writing response for %s: %s", j, err)
172 | }
173 | }
174 |
175 | // HandleIndex returns all available jobs.
176 | func (s *Server) HandleIndex(w http.ResponseWriter, r *http.Request) {
177 | if r.Method != "GET" {
178 | http.Error(w, "Expected GET, got "+r.Method, http.StatusMethodNotAllowed)
179 | return
180 | }
181 |
182 | jobs, err := s.getJobs()
183 | if err != nil {
184 | s.Log.Printf("cannot get jobs for path %s; %s", s.cfg.BuildPath, err)
185 | w.WriteHeader(http.StatusInternalServerError)
186 | return
187 | }
188 | sort.Slice(jobs, func(i, j int) bool {
189 | return jobs[j].StartedAt.Before(jobs[i].StartedAt)
190 | })
191 |
192 | resp, err := json.Marshal(jobs)
193 | if err != nil {
194 | s.Log.Printf("cannot marshal jobs '%#v'; %s", jobs, err)
195 | w.WriteHeader(http.StatusInternalServerError)
196 | return
197 | }
198 |
199 | w.WriteHeader(http.StatusOK)
200 | w.Header().Set("Content-Type", "application/json")
201 |
202 | _, err = w.Write(resp)
203 | if err != nil {
204 | s.Log.Printf("cannot write response %s", err)
205 | w.WriteHeader(http.StatusInternalServerError)
206 | return
207 | }
208 | }
209 |
210 | // HandleShowJob receives requests for a job and produces the appropriate output
211 | // based on the content type of the request.
212 | func (s *Server) HandleShowJob(w http.ResponseWriter, r *http.Request) {
213 | if r.Method != "GET" {
214 | http.Error(w, "Expected GET, got "+r.Method, http.StatusMethodNotAllowed)
215 | return
216 | }
217 |
218 | parts := strings.Split(r.URL.Path, "/")
219 | if len(parts) != 4 {
220 | w.WriteHeader(http.StatusBadRequest)
221 | return
222 | }
223 | project := parts[2]
224 | id := parts[3]
225 |
226 | state, err := GetState(s.cfg.BuildPath, project, id)
227 | if err != nil {
228 | s.Log.Print(err)
229 | w.WriteHeader(http.StatusInternalServerError)
230 | return
231 | }
232 | jPath := filepath.Join(s.cfg.BuildPath, project, state, id)
233 |
234 | buildInfo, err := ReadJobBuildInfo(jPath, true)
235 | if err != nil {
236 | s.Log.Print(err)
237 | w.WriteHeader(http.StatusInternalServerError)
238 | return
239 | }
240 |
241 | j := Job{
242 | BuildInfo: buildInfo,
243 | ID: id,
244 | Project: project,
245 | State: state,
246 | }
247 |
248 | if r.Header.Get("Content-type") == "application/json" {
249 | jData, err := json.Marshal(j)
250 | if err != nil {
251 | s.Log.Print(err)
252 | w.WriteHeader(http.StatusInternalServerError)
253 | return
254 | }
255 |
256 | w.Header().Set("Content-Type", "application/json")
257 | _, err = w.Write(jData)
258 | if err != nil {
259 | s.Log.Printf("HandleShowJob: error writing Content-Type header: %s", err)
260 | }
261 | return
262 | }
263 |
264 | f, err := s.fs.Open("/templates/show.html")
265 | if err != nil {
266 | s.Log.Print(err)
267 | w.WriteHeader(http.StatusInternalServerError)
268 | return
269 | }
270 |
271 | tmplBody, err := ioutil.ReadAll(f)
272 | if err != nil {
273 | s.Log.Print(err)
274 | w.WriteHeader(http.StatusInternalServerError)
275 | return
276 | }
277 |
278 | tmpl := template.New("jobshow")
279 | tmpl, err = tmpl.Parse(string(tmplBody))
280 | if err != nil {
281 | s.Log.Print(err)
282 | w.WriteHeader(http.StatusInternalServerError)
283 | return
284 | }
285 |
286 | buf := new(bytes.Buffer)
287 | err = tmpl.Execute(buf, j)
288 | if err != nil {
289 | s.Log.Print(err)
290 | w.WriteHeader(http.StatusInternalServerError)
291 | return
292 | }
293 | _, err = buf.WriteTo(w)
294 | if err != nil {
295 | s.Log.Print(err)
296 | w.WriteHeader(http.StatusInternalServerError)
297 | return
298 | }
299 | }
300 |
301 | func getJobURL(j *Job) string {
302 | return strings.Join([]string{"job", j.Project, j.ID}, "/")
303 | }
304 |
305 | // HandleServerPush emits build logs as Server-SentEvents (SSE).
306 | func (s *Server) HandleServerPush(w http.ResponseWriter, r *http.Request) {
307 | if r.Method != "GET" {
308 | http.Error(w, "Expected GET, got "+r.Method, http.StatusMethodNotAllowed)
309 | return
310 | }
311 |
312 | parts := strings.Split(r.URL.Path, "/")
313 | if len(parts) != 4 {
314 | w.WriteHeader(http.StatusBadRequest)
315 | return
316 | }
317 | project := parts[2]
318 | id := parts[3]
319 |
320 | state, err := GetState(s.cfg.BuildPath, project, id)
321 | if err != nil {
322 | s.Log.Print(err)
323 | w.WriteHeader(http.StatusInternalServerError)
324 | return
325 | }
326 |
327 | // Decide whether to tail the log file and keep the connection alive for
328 | // sending server side events.
329 | if state != "pending" {
330 | w.WriteHeader(http.StatusNoContent)
331 | return
332 | }
333 |
334 | flusher, ok := w.(http.Flusher)
335 | if !ok {
336 | http.Error(w, "Streaming unsupported!", http.StatusInternalServerError)
337 | return
338 | }
339 |
340 | w.Header().Set("Content-Type", "text/event-stream")
341 | w.Header().Set("Cache-Control", "no-cache")
342 | w.Header().Set("Connection", "keep-alive")
343 | w.Header().Set("Access-Control-Allow-Origin", "*")
344 |
345 | jPath := filepath.Join(s.cfg.BuildPath, project, state, id)
346 | buildLogPath := filepath.Join(jPath, BuildLogFname)
347 | client := &broker.Client{ID: id, Data: make(chan []byte), Extra: buildLogPath}
348 | s.br.NewClients <- client
349 |
350 | go func() {
351 | <-w.(http.CloseNotifier).CloseNotify()
352 | s.br.ClosingClients <- client
353 | }()
354 |
355 | for {
356 | msg, ok := <-client.Data
357 | if !ok {
358 | break
359 | }
360 | _, err := fmt.Fprintf(w, "data: %s\n\n", msg)
361 | if err != nil {
362 | s.Log.Printf("HandleServerPush: error writing log data to client: %s", err)
363 | }
364 |
365 | flusher.Flush()
366 | }
367 | }
368 |
369 | // ListenAndServe listens on the TCP network address s.srv.Addr and handle
370 | // requests on incoming connections. ListenAndServe always returns a
371 | // non-nil error.
372 | func (s *Server) ListenAndServe() error {
373 | s.Log.Printf("Configuration: %#v", s.cfg)
374 | go s.br.ListenForClients()
375 |
376 | go func() {
377 | for {
378 | s.metrics.RecordHostedBuilds(s.cfg.BuildPath, s.cfg.ProjectsPath)
379 | time.Sleep(5 * time.Minute)
380 | }
381 | }()
382 |
383 | return s.srv.ListenAndServe()
384 | }
385 |
386 | type pruneResult struct {
387 | prunedImages int
388 | prunedContainers int
389 | reclaimedSpace uint64
390 | }
391 |
392 | // RebuildResult contains result data on the rebuild operation
393 | type RebuildResult struct {
394 | successful int
395 | failed []string
396 | pruneResult
397 | }
398 |
399 | func (r RebuildResult) String() string {
400 | var failedNames string
401 | if len(r.failed) > 0 {
402 | failedNames = ", Failed names: " + strings.Join(r.failed, ", ")
403 | }
404 |
405 | return fmt.Sprintf(
406 | "Rebuilt: %d, Pruned images: %d, Pruned containers: %d, Reclaimed: %s, Failed: %d%s",
407 | r.successful, r.prunedImages, r.prunedContainers, units.HumanSize(float64(r.reclaimedSpace)),
408 | len(r.failed), failedNames)
409 | }
410 |
411 | // RebuildImages rebuilds images for all projects, and prunes any dangling images
412 | func RebuildImages(cfg *Config, log *log.Logger, projects []string, stopErr, verbose bool) (RebuildResult, error) {
413 | var err error
414 | r := RebuildResult{}
415 | if len(projects) == 0 {
416 | projects, err = getProjects(cfg)
417 | if err != nil {
418 | return r, err
419 | }
420 | }
421 |
422 | client, err := docker.NewEnvClient()
423 | if err != nil {
424 | return r, err
425 | }
426 |
427 | ctx := context.Background()
428 | for _, project := range projects {
429 | start := time.Now()
430 | log.Printf("Rebuilding %s...\n", project)
431 | j, err := NewJob(project, types.Params{}, "", cfg)
432 | if err != nil {
433 | r.failed = append(r.failed, project)
434 | if stopErr {
435 | return r, err
436 | }
437 | log.Printf("Failed to instantiate %s job with error: %s\n", project, err)
438 | } else {
439 | var buildErr error
440 | if verbose {
441 | // pipe image build logs to the logger
442 | pr, pw := io.Pipe()
443 | buildResult := make(chan error)
444 |
445 | go func() {
446 | err := j.BuildImage(ctx, cfg.UID, client, pw, true, true)
447 | pErr := pw.Close()
448 | if pErr != nil {
449 | // as of Go 1.10 this is never non-nil
450 | log.Printf("Unexpected PipeWriter.Close() error: %s\n", pErr)
451 | }
452 | buildResult <- err
453 | }()
454 |
455 | scanner := bufio.NewScanner(pr)
456 | for scanner.Scan() {
457 | log.Print(scanner.Text())
458 | }
459 | buildErr = <-buildResult
460 | } else {
461 | // discard image build logs
462 | buildErr = j.BuildImage(ctx, cfg.UID, client, ioutil.Discard, true, true)
463 | }
464 |
465 | if buildErr != nil {
466 | r.failed = append(r.failed, project)
467 | if stopErr {
468 | return r, buildErr
469 | }
470 | log.Printf("Failed to build %s job %s with error: %s\n", project, j.ID, buildErr)
471 | } else {
472 | log.Printf("Rebuilt %s in %s\n", project, time.Now().Sub(start).Truncate(time.Millisecond))
473 | r.successful++
474 | }
475 | }
476 | }
477 | r.pruneResult, err = dockerPruneUnused(ctx, client)
478 | if err != nil {
479 | return r, err
480 | }
481 | return r, nil
482 | }
483 |
484 | // dockerPruneUnused prunes stopped containers and unused images
485 | func dockerPruneUnused(ctx context.Context, c *docker.Client) (pruneResult, error) {
486 | // prune containers before images, this will allow more images to be eligible for clean up
487 | noFilters := filters.NewArgs()
488 | cr, err := c.ContainersPrune(ctx, noFilters)
489 | if err != nil {
490 | return pruneResult{}, err
491 | }
492 | ir, err := c.ImagesPrune(ctx, noFilters)
493 | if err != nil {
494 | return pruneResult{}, err
495 | }
496 | return pruneResult{
497 | prunedImages: len(ir.ImagesDeleted),
498 | prunedContainers: len(cr.ContainersDeleted),
499 | reclaimedSpace: ir.SpaceReclaimed + cr.SpaceReclaimed}, nil
500 | }
501 |
502 | // PruneZombieBuilds removes any pending builds from the filesystem.
503 | func PruneZombieBuilds(cfg *Config) error {
504 | projects, err := getProjects(cfg)
505 | if err != nil {
506 | return err
507 | }
508 | l := log.New(os.Stderr, "[cleanup] ", log.LstdFlags)
509 |
510 | for _, p := range projects {
511 | pendingPath := filepath.Join(cfg.BuildPath, p, "pending")
512 | pendingBuilds, err := ioutil.ReadDir(pendingPath)
513 | if err != nil {
514 | l.Printf("error reading pending builds; skipping project (%s): %s", p, err)
515 | continue
516 | }
517 |
518 | for _, pending := range pendingBuilds {
519 | pendingBuildPath := filepath.Join(pendingPath, pending.Name())
520 | err = cfg.FileSystem.Remove(pendingBuildPath)
521 | if err != nil {
522 | return fmt.Errorf("Error pruning zombie build '%s' of project '%s'", pending.Name(), p)
523 | }
524 | l.Printf("Pruned zombie build '%s' of project '%s'", pending.Name(), p)
525 | }
526 | }
527 | return nil
528 | }
529 |
530 | func getProjects(cfg *Config) ([]string, error) {
531 | root := cfg.ProjectsPath
532 | folders, err := ioutil.ReadDir(root)
533 | if err != nil {
534 | return nil, err
535 | }
536 |
537 | projects := []string{}
538 |
539 | for _, f := range folders {
540 | if !f.IsDir() {
541 | continue
542 | }
543 |
544 | _, err := os.Stat(filepath.Join(root, f.Name(), "Dockerfile"))
545 | if err != nil {
546 | if os.IsNotExist(err) {
547 | fmt.Println(filepath.Join(root, f.Name(), "Dockerfile"), "doesn't exist")
548 | continue
549 | }
550 | return nil, err
551 | }
552 |
553 | projects = append(projects, f.Name())
554 | }
555 |
556 | return projects, nil
557 | }
558 |
559 | // getJobs returns all pending and ready jobs.
560 | func (s *Server) getJobs() ([]Job, error) {
561 | var pendingJobs, readyJobs []os.FileInfo
562 | jobs := []Job{}
563 | projects := []string{}
564 |
565 | // find projects
566 | folders, err := ioutil.ReadDir(s.cfg.BuildPath)
567 | if err != nil {
568 | return nil, fmt.Errorf("cannot scan projects; %s", err)
569 | }
570 | for _, f := range folders {
571 | if f.IsDir() {
572 | projects = append(projects, f.Name())
573 | }
574 | }
575 |
576 | for _, p := range projects {
577 | pendingPath := filepath.Join(s.cfg.BuildPath, p, "pending")
578 | _, err := os.Stat(pendingPath)
579 | pendingExists := !os.IsNotExist(err)
580 | if err != nil && !os.IsNotExist(err) {
581 | return nil, fmt.Errorf("cannot check if pending path exists; %s", err)
582 | }
583 | readyPath := filepath.Join(s.cfg.BuildPath, p, "ready")
584 | _, err = os.Stat(readyPath)
585 | readyExists := !os.IsNotExist(err)
586 | if err != nil && !os.IsNotExist(err) {
587 | return nil, fmt.Errorf("cannot check if ready path exists; %s", err)
588 | }
589 |
590 | if pendingExists {
591 | pendingJobs, err = ioutil.ReadDir(pendingPath)
592 | if err != nil {
593 | return nil, fmt.Errorf("cannot scan pending jobs of project %s; %s", p, err)
594 | }
595 | }
596 | if readyExists {
597 | readyJobs, err = ioutil.ReadDir(readyPath)
598 | if err != nil {
599 | return nil, fmt.Errorf("cannot scan ready jobs of project %s; %s", p, err)
600 | }
601 | }
602 |
603 | getJob := func(path, jobID, project, state string) (Job, error) {
604 | bi, err := ReadJobBuildInfo(filepath.Join(path, jobID), false)
605 | if err != nil {
606 | return Job{}, err
607 | }
608 |
609 | return Job{
610 | ID: jobID,
611 | Project: project,
612 | StartedAt: bi.StartedAt,
613 | State: state,
614 | BuildInfo: bi}, nil
615 | }
616 |
617 | for _, j := range pendingJobs {
618 | job, err := getJob(pendingPath, j.Name(), p, "pending")
619 | if err != nil {
620 | return nil, fmt.Errorf("cannot find job %s; %s", j.Name(), err)
621 | }
622 | jobs = append(jobs, job)
623 | }
624 |
625 | for _, j := range readyJobs {
626 | job, err := getJob(readyPath, j.Name(), p, "ready")
627 | if err != nil {
628 | return nil, fmt.Errorf("cannot find job %s; %s", j.Name(), err)
629 | }
630 | jobs = append(jobs, job)
631 | }
632 | }
633 |
634 | return jobs, nil
635 | }
636 |
--------------------------------------------------------------------------------
/cmd/mistryd/server_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "io/ioutil"
7 | "math/rand"
8 | "net/http"
9 | "net/http/httptest"
10 | "path"
11 | "strings"
12 | "sync"
13 | "testing"
14 | "time"
15 |
16 | "github.com/skroutz/mistry/pkg/types"
17 | )
18 |
19 | func TestBootstrapProjectRace(t *testing.T) {
20 | n := 10
21 | project := "bootstrap-concurrent"
22 | jobs := []*Job{}
23 | var wg sync.WaitGroup
24 |
25 | for i := 0; i < n; i++ {
26 | j, err := NewJob(project, params, "", testcfg)
27 | if err != nil {
28 | t.Fatal(err)
29 | }
30 | jobs = append(jobs, j)
31 | }
32 |
33 | for _, j := range jobs {
34 | wg.Add(1)
35 | go func(j *Job) {
36 | defer wg.Done()
37 | err := server.BootstrapProject(j)
38 | if err != nil {
39 | panic(err)
40 | }
41 | }(j)
42 | }
43 | wg.Wait()
44 | }
45 |
46 | func TestLoad(t *testing.T) {
47 | n := 100
48 | results := make(chan *types.BuildInfo, n)
49 | rand.Seed(time.Now().UnixNano())
50 |
51 | projects := []string{"concurrent", "concurrent2", "concurrent3", "concurrent4"}
52 | params := []types.Params{{}, {"foo": "bar"}, {"abc": "efd", "zzz": "xxx"}}
53 | groups := []string{"", "foo", "abc"}
54 |
55 | for i := 0; i < n; i++ {
56 | go func() {
57 | project := projects[rand.Intn(len(projects))]
58 | params := params[rand.Intn(len(params))]
59 | group := groups[rand.Intn(len(groups))]
60 |
61 | jr := types.JobRequest{Project: project, Params: params, Group: group}
62 | time.Sleep(time.Duration(rand.Intn(200)) * time.Millisecond)
63 | br, err := postJob(jr)
64 | if err != nil {
65 | panic(err)
66 | }
67 | results <- br
68 | }()
69 | }
70 |
71 | for i := 0; i < n; i++ {
72 | <-results
73 | }
74 | }
75 |
76 | func TestHandleIndex(t *testing.T) {
77 | cmdout, cmderr, err := cliBuildJob("--project", "simple")
78 | if err != nil {
79 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err)
80 | }
81 |
82 | req, err := http.NewRequest("GET", "/index", nil)
83 | if err != nil {
84 | t.Fatal(err)
85 | }
86 |
87 | rr := httptest.NewRecorder()
88 | handler := http.HandlerFunc(server.HandleIndex)
89 | handler.ServeHTTP(rr, req)
90 | result := rr.Result()
91 |
92 | if result.StatusCode != http.StatusOK {
93 | t.Errorf("Expected status code %d, got %d", http.StatusOK, result.StatusCode)
94 | }
95 |
96 | expected := `"state":"ready"`
97 | body, err := ioutil.ReadAll(result.Body)
98 | if err != nil {
99 | t.Fatal(err)
100 | }
101 | if !strings.Contains(string(body), expected) {
102 | t.Errorf("Expected body to contain %v, got %v", expected, string(body))
103 | }
104 | }
105 |
106 | func TestHandleShowJob(t *testing.T) {
107 | cmdout, cmderr, err := cliBuildJob("--project", "simple")
108 | if err != nil {
109 | t.Fatalf("mistry-cli stdout: %s, stderr: %s, err: %#v", cmdout, cmderr, err)
110 | }
111 |
112 | // Get a job id and project from the index page.
113 | req, err := http.NewRequest("GET", "/index", nil)
114 | if err != nil {
115 | t.Fatal(err)
116 | }
117 | rr := httptest.NewRecorder()
118 | handler := http.HandlerFunc(server.HandleIndex)
119 | handler.ServeHTTP(rr, req)
120 | result := rr.Result()
121 | body, err := ioutil.ReadAll(result.Body)
122 | if err != nil {
123 | t.Fatal(err)
124 | }
125 | job := make([]Job, 0)
126 | err = json.Unmarshal([]byte(body), &job)
127 | if err != nil {
128 | t.Fatal(err)
129 | }
130 | jobID := job[0].ID
131 | project := job[0].Project
132 |
133 | // Request the show page of the job selected from the index page.
134 | showPath := path.Join("/job", project, jobID)
135 | req, err = http.NewRequest("GET", showPath, nil)
136 | if err != nil {
137 | t.Fatal(err)
138 | }
139 | req.Header.Set("Content-type", "application/json")
140 | rr = httptest.NewRecorder()
141 | handler = http.HandlerFunc(server.HandleShowJob)
142 | handler.ServeHTTP(rr, req)
143 | result = rr.Result()
144 |
145 | if result.StatusCode != http.StatusOK {
146 | t.Errorf("Expected status code %d, got %d", http.StatusOK, result.StatusCode)
147 | }
148 |
149 | expected := fmt.Sprintf(`"ID":"%s"`, jobID)
150 | body, err = ioutil.ReadAll(result.Body)
151 | if err != nil {
152 | t.Fatal(err)
153 | }
154 | if !strings.Contains(string(body), expected) {
155 | t.Errorf("Expected body to contain %v, got %v", expected, string(body))
156 | }
157 | }
158 |
159 | func TestNewJobAsync(t *testing.T) {
160 | rec := httptest.NewRecorder()
161 | req := httptest.NewRequest("POST", "/jobs?async", strings.NewReader("{\"project\": \"simple\"}"))
162 | server.srv.Handler.ServeHTTP(rec, req)
163 | resp := rec.Result()
164 | body, err := ioutil.ReadAll(resp.Body)
165 | if err != nil {
166 | t.Errorf("Error in reading response body: %s", err)
167 | }
168 | assertEq(resp.StatusCode, 201, t)
169 | assertEq(string(body), "", t)
170 | }
171 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/bad_entrypoint/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/bad_entrypoint/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | >&2 echo "this is stderr"
5 | echo "this is stdout"
6 | missing_command
7 |
8 | exit 0
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/bootstrap-concurrent/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/bootstrap-concurrent/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | exit 0
3 |
4 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/bootstrap-twice/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/bootstrap-twice/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | touch artifacts/out.txt
5 |
6 | exit 0
7 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/build-cache/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/build-cache/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | if [ -f cache/out.txt ]; then
5 | date +%S%N > artifacts/out.txt
6 | else
7 | date +%S%N | tee cache/out.txt > artifacts/out.txt
8 | fi
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/build-coalescing-exitcode/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/build-coalescing-exitcode/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | sleep 2
5 |
6 | echo "coalescing!" > artifacts/out.txt
7 |
8 | exit 35
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/build-coalescing/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/build-coalescing/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | sleep 2
5 |
6 | echo "coalescing!" > artifacts/out.txt
7 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/concurrent/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/concurrent/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo 0
3 | exit 0
4 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/concurrent2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/concurrent2/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo 2
3 | exit 0
4 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/concurrent3/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/concurrent3/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo 3
3 | exit 0
4 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/concurrent4/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/concurrent4/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo 4
3 | exit 0
4 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/copy-folder/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | COPY koko/ /koko/
7 | WORKDIR /data
8 |
9 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
10 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/copy-folder/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #HACK: mistry will attempt to copy all artifacts out of the container
3 | # Therefore we need to leave something behind. This might or might not be
4 | # the intented behaviour. Revisit in the future.
5 | touch /data/artifacts/foo
6 | stat /koko/lala.txt
7 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/copy-folder/koko/lala.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skroutz/mistry/f7b47b936e29985b63a693c9713e6c821e9a01f3/cmd/mistryd/testdata/projects/copy-folder/koko/lala.txt
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/exit-code/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/exit-code/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | exit 77
3 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/failed-build-cleanup/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | INVALIDCOMMAND
4 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/failed-build-link/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/failed-build-link/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | exit `cat params/_exitcode`
3 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/hanging-pending/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/hanging-pending/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | touch artifacts/out.txt
5 |
6 | exit 0
7 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/image-build-failure/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | RUN apt-get install -y fofkoeakodksao
4 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/job-id-seeding/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/job-id-seeding/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | sleep 2
5 |
6 | echo "coalescing!" > artifacts/out.txt
7 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/params/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/params/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | cat params/foo > artifacts/out.txt
5 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/result-cache/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/result-cache/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | date +%S%N > artifacts/out.txt
5 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/simple/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/simple/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | touch artifacts/out.txt
5 |
6 | exit 0
7 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/sleep/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stretch
2 |
3 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
4 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh
5 |
6 | WORKDIR /data
7 |
8 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
9 |
--------------------------------------------------------------------------------
/cmd/mistryd/testdata/projects/sleep/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | sleep 10
5 |
--------------------------------------------------------------------------------
/cmd/mistryd/worker.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "errors"
7 | "fmt"
8 | "io/ioutil"
9 | "log"
10 | "os"
11 | "path/filepath"
12 | "strings"
13 | "time"
14 |
15 | _ "github.com/docker/distribution"
16 | docker "github.com/docker/docker/client"
17 | "github.com/skroutz/mistry/pkg/types"
18 | "github.com/skroutz/mistry/pkg/utils"
19 | )
20 |
21 | // Work performs the work denoted by j and returns a BuildInfo upon
22 | // successful completion, or an error.
23 | func (s *Server) Work(ctx context.Context, j *Job) (buildInfo *types.BuildInfo, err error) {
24 | log := log.New(os.Stderr, fmt.Sprintf("[worker] [%s] ", j), log.LstdFlags)
25 | start := time.Now()
26 |
27 | buildInfo = types.NewBuildInfo()
28 | j.BuildInfo = buildInfo
29 | j.BuildInfo.Path = filepath.Join(j.ReadyBuildPath, DataDir, ArtifactsDir)
30 | j.BuildInfo.TransportMethod = types.Rsync
31 | j.BuildInfo.Params = j.Params
32 | j.BuildInfo.StartedAt = j.StartedAt
33 | j.BuildInfo.URL = getJobURL(j)
34 | j.BuildInfo.Group = j.Group
35 |
36 | if s.metrics != nil {
37 | s.metrics.RecordBuildStarted(j.Project)
38 | }
39 |
40 | // build coalescing
41 | added := s.jq.Add(j)
42 | if added {
43 | defer s.jq.Delete(j)
44 | } else {
45 | t := time.NewTicker(2 * time.Second)
46 | defer t.Stop()
47 | log.Printf("Coalescing with %s...", j.PendingBuildPath)
48 | for {
49 | select {
50 | case <-ctx.Done():
51 | err = workErr("context cancelled while coalescing", nil)
52 | return
53 | case <-t.C:
54 | _, err = os.Stat(j.ReadyBuildPath)
55 | if err == nil {
56 | i, err := ExitCode(j)
57 | if err != nil {
58 | return j.BuildInfo, err
59 | }
60 | j.BuildInfo.ExitCode = i
61 | j.BuildInfo.Coalesced = true
62 |
63 | if s.metrics != nil {
64 | s.metrics.RecordBuildCoalesced(j.Project)
65 | }
66 |
67 | return j.BuildInfo, err
68 | }
69 |
70 | if os.IsNotExist(err) {
71 | continue
72 | } else {
73 | err = workErr("could not coalesce", err)
74 | return
75 | }
76 | }
77 | }
78 | }
79 |
80 | // build result cache
81 | _, err = os.Stat(j.ReadyBuildPath)
82 | if err == nil {
83 | buildInfo, err := ReadJobBuildInfo(j.ReadyBuildPath, true)
84 | if err != nil {
85 | return nil, err
86 | } else if buildInfo.ExitCode != 0 {
87 | // Previous build failed, remove its build dir to
88 | // restart it. We know it's not pointed to by a
89 | // latest link since we only symlink successful builds
90 | err = s.cfg.FileSystem.Remove(j.ReadyBuildPath)
91 | if err != nil {
92 | return buildInfo, workErr("could not remove existing failed build", err)
93 | }
94 | } else { // if a successful result already exists, use that
95 | buildInfo.Cached = true
96 |
97 | if s.metrics != nil {
98 | s.metrics.RecordCacheUtilization(j.Project)
99 | }
100 |
101 | return buildInfo, err
102 | }
103 | } else if !os.IsNotExist(err) {
104 | err = workErr("could not check for ready path", err)
105 | return
106 | }
107 |
108 | _, err = os.Stat(filepath.Join(s.cfg.ProjectsPath, j.Project))
109 | if err != nil {
110 | if os.IsNotExist(err) {
111 | err = workErr("Unknown project", nil)
112 | return
113 | }
114 | err = workErr("could not check for project", err)
115 | return
116 | }
117 |
118 | err = s.BootstrapProject(j)
119 | if err != nil {
120 | err = workErr("could not bootstrap project", err)
121 | return
122 | }
123 |
124 | err = j.BootstrapBuildDir(s.cfg.FileSystem)
125 | if err != nil {
126 | err = workErr("could not bootstrap build dir", err)
127 | return
128 | }
129 |
130 | err = persistBuildInfo(j)
131 | if err != nil {
132 | err = workErr("could not persist build info", err)
133 | return
134 | }
135 |
136 | // move from pending to ready when finished
137 | defer func() {
138 | rerr := os.Rename(j.PendingBuildPath, j.ReadyBuildPath)
139 | if rerr != nil {
140 | errstr := "could not move pending path"
141 | if err == nil {
142 | err = fmt.Errorf("%s; %s", errstr, rerr)
143 | } else {
144 | err = fmt.Errorf("%s; %s | %s", errstr, rerr, err)
145 | }
146 | }
147 |
148 | // if build was successful, point 'latest' link to it
149 | if err == nil && j.BuildInfo.ExitCode == types.ContainerSuccessExitCode {
150 | // eliminate concurrent filesystem operations since
151 | // they could result in a corrupted state (eg. if
152 | // jobs of the same project simultaneously finish
153 | // successfully)
154 | s.pq.Lock(j.Project)
155 | defer s.pq.Unlock(j.Project)
156 |
157 | _, err = os.Lstat(j.LatestBuildPath)
158 | if err == nil {
159 | err = os.Remove(j.LatestBuildPath)
160 | if err != nil {
161 | err = workErr("could not remove latest build link", err)
162 | return
163 | }
164 | } else if !os.IsNotExist(err) {
165 | err = workErr("could not stat the latest build link", err)
166 | return
167 | }
168 |
169 | err = os.Symlink(j.ReadyBuildPath, j.LatestBuildPath)
170 | if err != nil {
171 | err = workErr("could not create latest build link", err)
172 | }
173 | }
174 | }()
175 |
176 | // populate j.BuildInfo.Err and persist build_info file one last
177 | // time
178 | defer func() {
179 | if err != nil {
180 | j.BuildInfo.ErrBuild = err.Error()
181 | }
182 |
183 | biErr := persistBuildInfo(j)
184 | if biErr != nil {
185 | err = workErr("could not persist build info", biErr)
186 | return
187 | }
188 | }()
189 |
190 | for k, v := range j.Params {
191 | err = ioutil.WriteFile(filepath.Join(j.PendingBuildPath, DataDir, ParamsDir, k), []byte(v), 0644)
192 | if err != nil {
193 | err = workErr("could not write param file", err)
194 | return
195 | }
196 | }
197 |
198 | out, err := os.Create(j.BuildLogPath)
199 | if err != nil {
200 | err = workErr("could not create build log file", err)
201 | return
202 | }
203 | defer func() {
204 | ferr := out.Close()
205 | errstr := "could not close build log file"
206 | if ferr != nil {
207 | if err == nil {
208 | err = fmt.Errorf("%s; %s", errstr, ferr)
209 | } else {
210 | err = fmt.Errorf("%s; %s | %s", errstr, ferr, err)
211 | }
212 | }
213 | }()
214 |
215 | client, err := docker.NewEnvClient()
216 | if err != nil {
217 | err = workErr("could not create docker client", err)
218 | return
219 | }
220 | defer func() {
221 | derr := client.Close()
222 | errstr := "could not close docker client"
223 | if derr != nil {
224 | if err == nil {
225 | err = fmt.Errorf("%s; %s", errstr, derr)
226 | } else {
227 | err = fmt.Errorf("%s; %s | %s", errstr, derr, err)
228 | }
229 | }
230 | }()
231 |
232 | err = j.BuildImage(ctx, s.cfg.UID, client, out, j.Rebuild, j.Rebuild)
233 | if err != nil {
234 | err = workErr("could not build docker image", err)
235 | return
236 | }
237 |
238 | var outErr strings.Builder
239 | j.BuildInfo.ExitCode, err = j.StartContainer(ctx, s.cfg, client, out, &outErr)
240 | if err != nil {
241 | err = workErr("could not start docker container", err)
242 | return
243 | }
244 |
245 | err = out.Sync()
246 | if err != nil {
247 | err = workErr("could not flush the output log", err)
248 | return
249 | }
250 |
251 | stdouterr, err := ReadJobLogs(j.PendingBuildPath)
252 | if err != nil {
253 | err = workErr("could not read the job logs", err)
254 | return
255 | }
256 |
257 | j.BuildInfo.ContainerStdouterr = string(stdouterr)
258 | j.BuildInfo.ContainerStderr = outErr.String()
259 | j.BuildInfo.Duration = time.Now().Sub(start).Truncate(time.Millisecond)
260 |
261 | if s.metrics != nil {
262 | s.metrics.RecordBuildFinished(
263 | j.Project,
264 | j.BuildInfo.ExitCode == types.ContainerSuccessExitCode,
265 | j.BuildInfo.Incremental,
266 | j.BuildInfo.Duration,
267 | )
268 | }
269 |
270 | log.Println("Finished after", j.BuildInfo.Duration)
271 | return
272 | }
273 |
274 | // BootstrapProject bootstraps j's project if needed. BootstrapProject is
275 | // idempotent.
276 | func (s *Server) BootstrapProject(j *Job) error {
277 | s.pq.Lock(j.Project)
278 | defer s.pq.Unlock(j.Project)
279 |
280 | err := utils.EnsureDirExists(j.RootBuildPath)
281 | if err != nil {
282 | return err
283 | }
284 |
285 | err = utils.EnsureDirExists(filepath.Join(j.RootBuildPath, "pending"))
286 | if err != nil {
287 | return err
288 | }
289 |
290 | err = utils.EnsureDirExists(filepath.Join(j.RootBuildPath, "ready"))
291 | if err != nil {
292 | return err
293 | }
294 |
295 | if j.Group != "" {
296 | err = utils.EnsureDirExists(filepath.Join(j.RootBuildPath, "groups"))
297 | if err != nil {
298 | return err
299 | }
300 | }
301 |
302 | return nil
303 | }
304 |
305 | // ExitCode returns the exit code of the job's container build.
306 | // If an error is returned, the exit code is irrelevant.
307 | func ExitCode(j *Job) (int, error) {
308 | buildInfo, err := ReadJobBuildInfo(j.ReadyBuildPath, false)
309 | if err != nil {
310 | return types.ContainerPendingExitCode, err
311 | }
312 | return buildInfo.ExitCode, nil
313 | }
314 |
315 | func workErr(s string, e error) error {
316 | s = "work: " + s
317 | if e != nil {
318 | s += "; " + e.Error()
319 | }
320 | return errors.New(s)
321 | }
322 |
323 | // persistBuildInfo persists the JSON-serialized version of j.BuildInfo
324 | // to disk.
325 | func persistBuildInfo(j *Job) error {
326 | // we don't want to persist the whole build logs in the build_info file
327 | bi := *j.BuildInfo
328 | bi.ContainerStdouterr = ""
329 | bi.ContainerStderr = ""
330 |
331 | out, err := json.Marshal(bi)
332 | if err != nil {
333 | return err
334 | }
335 |
336 | return ioutil.WriteFile(j.BuildInfoFilePath, out, 0666)
337 | }
338 |
--------------------------------------------------------------------------------
/cmd/mistryd/worker_pool.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "log"
8 | "sync"
9 |
10 | "github.com/skroutz/mistry/pkg/types"
11 | )
12 |
13 | // WorkResult contains the result of a build, either a BuildInfo or an error
14 | type WorkResult struct {
15 | BuildInfo *types.BuildInfo
16 | Err error
17 | }
18 |
19 | // FutureWorkResult is a WorkResult that may not yet have become available and
20 | // can be Wait()'ed on
21 | type FutureWorkResult struct {
22 | result <-chan WorkResult
23 | }
24 |
25 | // Wait waits for WorkResult to become available and returns it
26 | func (f FutureWorkResult) Wait() WorkResult {
27 | r, ok := <-f.result
28 | if !ok {
29 | // this should never happen, reading from the result channel
30 | // is exclusive to this future
31 | panic("Failed to read from result channel")
32 | }
33 | return r
34 | }
35 |
36 | // workItem contains a job and a channel to place the job result. struct
37 | // used in the internal work queue
38 | type workItem struct {
39 | job *Job
40 | result chan<- WorkResult
41 | }
42 |
43 | // WorkerPool implements a fixed-size pool of workers that build jobs
44 | // build jobs and communicate their result
45 | type WorkerPool struct {
46 | // the fixed amount of goroutines that will be handling running jobs
47 | concurrency int
48 |
49 | // the maximum backlog of pending requests. if exceeded, sending new work
50 | // to the pool will return an error
51 | backlogSize int
52 |
53 | queue chan workItem
54 | wg sync.WaitGroup
55 | }
56 |
57 | // NewWorkerPool initializes and starts a new worker pool, waiting for incoming
58 | // jobs.
59 | func NewWorkerPool(s *Server, concurrency, backlog int, logger *log.Logger) *WorkerPool {
60 | p := new(WorkerPool)
61 | p.concurrency = concurrency
62 | p.backlogSize = backlog
63 | p.queue = make(chan workItem, backlog)
64 |
65 | for i := 0; i < concurrency; i++ {
66 | go work(s, i, p.queue, &p.wg)
67 | p.wg.Add(1)
68 | }
69 | logger.Printf("Set up %d workers", concurrency)
70 | return p
71 | }
72 |
73 | // Stop signals the workers to close and blocks until they are closed.
74 | func (p *WorkerPool) Stop() {
75 | close(p.queue)
76 | p.wg.Wait()
77 | }
78 |
79 | // SendWork schedules the work j on p and returns a FutureWorkResult.
80 | // The actual result can be obtained by calling FutureWorkResult.Wait().
81 | //
82 | // An error is returned if the work backlog is full.
83 | func (p *WorkerPool) SendWork(j *Job) (FutureWorkResult, error) {
84 | resultQueue := make(chan WorkResult, 1)
85 | wi := workItem{j, resultQueue}
86 | result := FutureWorkResult{resultQueue}
87 |
88 | select {
89 | case p.queue <- wi:
90 | return result, nil
91 | default:
92 | return result, errors.New("queue is full")
93 | }
94 | }
95 |
96 | // work listens to the workQueue, runs Work() on any incoming work items, and
97 | // sends the result through the result queue
98 | func work(s *Server, id int, queue <-chan workItem, wg *sync.WaitGroup) {
99 | defer wg.Done()
100 | logPrefix := fmt.Sprintf("[worker %d]", id)
101 | for item := range queue {
102 | buildInfo, err := s.Work(context.Background(), item.job)
103 |
104 | select {
105 | case item.result <- WorkResult{buildInfo, err}:
106 | default:
107 | // this should never happen, the result chan should be unique for this worker
108 | s.Log.Panicf("%s failed to write result to the result channel", logPrefix)
109 | }
110 | close(item.result)
111 | }
112 | s.Log.Printf("%s exiting...", logPrefix)
113 | }
114 |
--------------------------------------------------------------------------------
/cmd/mistryd/worker_pool_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | "github.com/skroutz/mistry/pkg/types"
8 | )
9 |
10 | func TestBacklogLimit(t *testing.T) {
11 | wp, cfg := setupQueue(t, 0, 1)
12 | defer wp.Stop()
13 |
14 | params := types.Params{"test": "pool-backlog-limit"}
15 | params2 := types.Params{"test": "pool-backlog-limit2"}
16 | project := "simple"
17 |
18 | sendWorkNoErr(wp, project, params, cfg, t)
19 | _, _, err := sendWork(wp, project, params2, cfg, t)
20 |
21 | if err == nil {
22 | t.Fatal("Expected error")
23 | }
24 | }
25 |
26 | func TestConcurrency(t *testing.T) {
27 | // instatiate server with 1 worker
28 | wp, cfg := setupQueue(t, 1, 100)
29 | defer wp.Stop()
30 |
31 | project := "sleep"
32 | params := types.Params{"test": "pool-concurrency"}
33 | params2 := types.Params{"test": "pool-concurrency2"}
34 |
35 | sendWorkNoErr(wp, project, params, cfg, t)
36 | // give the chance for the worker to start work
37 | time.Sleep(1 * time.Second)
38 |
39 | j, _ := sendWorkNoErr(wp, project, params2, cfg, t)
40 |
41 | // the queue should contain only 1 item, the work item for the 2nd job
42 | assertEq(len(wp.queue), 1, t)
43 | select {
44 | case i, ok := <-wp.queue:
45 | if !ok {
46 | t.Fatalf("Unexpectedly closed worker pool queue")
47 | }
48 | assertEq(i.job, j, t)
49 | default:
50 | t.Fatalf("Expected to find a work item in the queue")
51 | }
52 | }
53 |
54 | func setupQueue(t *testing.T, workers, backlog int) (*WorkerPool, *Config) {
55 | cfg := testcfg
56 | cfg.Concurrency = workers
57 | cfg.Backlog = backlog
58 |
59 | s, err := NewServer(cfg, nil, false)
60 | failIfError(err, t)
61 | return s.workerPool, cfg
62 | }
63 |
64 | func sendWork(wp *WorkerPool, project string, params types.Params, cfg *Config, t *testing.T) (*Job, FutureWorkResult, error) {
65 | j, err := NewJob(project, params, "", cfg)
66 | failIfError(err, t)
67 |
68 | r, err := wp.SendWork(j)
69 | return j, r, err
70 | }
71 |
72 | func sendWorkNoErr(wp *WorkerPool, project string, params types.Params, cfg *Config, t *testing.T) (*Job, FutureWorkResult) {
73 | j, r, err := sendWork(wp, project, params, cfg, t)
74 | failIfError(err, t)
75 | return j, r
76 | }
77 |
--------------------------------------------------------------------------------
/cmd/mistryd/worker_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "strings"
5 | "testing"
6 |
7 | "github.com/skroutz/mistry/pkg/types"
8 | )
9 |
10 | func TestBuildCache(t *testing.T) {
11 | params := types.Params{"foo": "bar"}
12 | group := "baz"
13 |
14 | result1, err := postJob(
15 | types.JobRequest{Project: "build-cache", Params: params, Group: group})
16 | if err != nil {
17 | t.Fatal(err)
18 | }
19 |
20 | out1, err := readOut(result1, ArtifactsDir)
21 | if err != nil {
22 | t.Fatal(err)
23 | }
24 |
25 | cachedOut1, err := readOut(result1, CacheDir)
26 | if err != nil {
27 | t.Fatal(err)
28 | }
29 |
30 | assertEq(out1, cachedOut1, t)
31 |
32 | params["foo"] = "bar2"
33 | result2, err := postJob(
34 | types.JobRequest{Project: "build-cache", Params: params, Group: group})
35 | if err != nil {
36 | t.Fatal(err)
37 | }
38 |
39 | out2, err := readOut(result2, ArtifactsDir)
40 | if err != nil {
41 | t.Fatal(err)
42 | }
43 |
44 | cachedOut2, err := readOut(result2, CacheDir)
45 | if err != nil {
46 | t.Fatal(err)
47 | }
48 |
49 | assertEq(cachedOut1, cachedOut2, t)
50 | assertNotEq(out1, out2, t)
51 | assertNotEq(result1.Path, result2.Path, t)
52 | assert(result1.ExitCode, 0, t)
53 | assert(result2.ExitCode, 0, t)
54 | assert(result1.Incremental, false, t)
55 | assert(result2.Incremental, true, t)
56 | }
57 |
58 | func TestFailedPendingBuildCleanup(t *testing.T) {
59 | var err error
60 | project := "failed-build-cleanup"
61 | expected := "unknown instruction: INVALIDCOMMAND"
62 |
63 | for i := 0; i < 3; i++ {
64 | _, err = postJob(
65 | types.JobRequest{Project: project, Params: params, Group: ""})
66 | if !strings.Contains(err.Error(), expected) {
67 | t.Fatalf("Expected '%s' to contain '%s'", err.Error(), expected)
68 | }
69 | }
70 | }
71 |
72 | // regression test for incremental building bug
73 | func TestBuildCacheWhenFailed(t *testing.T) {
74 | group := "ppp"
75 |
76 | // a successful build - it'll be symlinked
77 | _, err := postJob(
78 | types.JobRequest{Project: "failed-build-link",
79 | Params: types.Params{"_exitcode": "0"},
80 | Group: group})
81 | if err != nil {
82 | t.Fatal(err)
83 | }
84 |
85 | // a failed build - it should NOT be symlinked
86 | _, err = postJob(
87 | types.JobRequest{Project: "failed-build-link",
88 | Params: types.Params{"_exitcode": "1", "foo": "bar"},
89 | Group: group})
90 | if err != nil {
91 | t.Fatal(err)
92 | }
93 |
94 | // repeat the previous failed build - it
95 | // SHOULD be incremental
96 | buildInfo, err := postJob(
97 | types.JobRequest{Project: "failed-build-link",
98 | Params: types.Params{"_exitcode": "1", "foo": "bar"},
99 | Group: group})
100 | if err != nil {
101 | t.Fatal(err)
102 | }
103 |
104 | if !buildInfo.Incremental {
105 | t.Fatal("build should be incremental, but it isn't")
106 | }
107 |
108 | }
109 |
--------------------------------------------------------------------------------
/contrib/fabfile.py.sample:
--------------------------------------------------------------------------------
1 | """
2 | Deploy mistry
3 |
4 | Usage:
5 |
6 | $ fab -H
deploy
7 |
8 | """
9 | from json import loads as json
10 |
11 | from fabric.api import env, settings, parallel
12 | from fabric.operations import put, sudo, local, run
13 | from fabric.decorators import runs_once
14 | from fabric.context_managers import hide
15 |
16 | def tail():
17 | env.remote_interrupt = True
18 | with settings(warn_only=True):
19 | sudo('journalctl --unit=mistry --follow --lines=0', pty=True)
20 |
21 | @runs_once
22 | def build():
23 | local('GOOS=linux GOARCH=amd64 make build')
24 |
25 | def copy():
26 | put('mistryd', '/usr/bin/', use_sudo=True, mode=0755)
27 |
28 | def restart():
29 | sudo('systemctl restart mistry-server.service')
30 |
31 | def status():
32 | run('systemctl status mistry-server.service')
33 |
34 | def deploy():
35 | build()
36 | copy()
37 | restart()
38 |
--------------------------------------------------------------------------------
/contrib/mistry-purge-builds:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 | require 'json'
3 | require 'time'
4 | require 'optparse'
5 |
6 | options = {}
7 | OptionParser.new do |opts|
8 | opts.banner = "Purge old mistry builds from the file system.\nUsage: #{$0} [options]"
9 | opts.on('--older-than DAYS' 'remove builds older than DAYS days') { |v| options[:stale_point] = Time.now - Integer(v)*24*60*60 }
10 | opts.on('--path PATH', 'Build path') { |v| options[:path] = v }
11 | opts.on('--dry-run', 'Dry run') { |v| options[:dry_run] = v }
12 | end.parse!
13 |
14 | abort("#{options[:path]} is not a directory") unless File.directory?(options[:path])
15 |
16 | jobs = []
17 | stale_jobs = []
18 | groups_and_latest = []
19 | projects = Dir["#{options[:path]}/projects/*"].map { |p| p.gsub("#{options[:path]}/projects/","") }
20 |
21 | projects.each do |p|
22 | data_path = File.join(options[:path], "data", p)
23 | groups_path = File.join(data_path, "groups")
24 | latest_path = File.join(data_path, "latest")
25 | group_jobs = Dir["#{groups_path}/*"]
26 | groups_and_latest << Dir["latest_path"].first if !Dir["latest_path"].empty?
27 | group_jobs.each do |j|
28 | groups_and_latest << j
29 | end
30 |
31 | ready_path = File.join(data_path, "ready")
32 | ready_jobs = Dir["#{ready_path}/*"]
33 | ready_jobs.each do |rj|
34 | if t = JSON.parse(File.read("#{rj}/build_info.json"))["StartedAt"]
35 | start_time = Time.parse(t)
36 | end
37 | stale_jobs << rj if start_time.nil? || start_time < options[:stale_point]
38 | end
39 | end
40 |
41 | if options[:dry_run]
42 | puts "would delete jobs: #{stale_jobs}"
43 | puts "would unlink: #{groups_and_latest}"
44 | elsif !stale_jobs.empty?
45 | File.unlink(*(groups_and_latest.select{ |j| stale_jobs.include?(File.readlink(j)) }))
46 | `btrfs subvolume delete #{stale_jobs.join(' ')}`
47 | end
48 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/skroutz/mistry
2 |
3 | go 1.15
4 |
5 | require (
6 | github.com/containerd/containerd v1.5.9 // indirect
7 | github.com/docker/distribution v2.7.1+incompatible
8 | github.com/docker/docker v20.10.12+incompatible
9 | github.com/docker/go-connections v0.4.0 // indirect
10 | github.com/docker/go-units v0.4.0
11 | github.com/gorilla/mux v1.8.0 // indirect
12 | github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
13 | github.com/morikuni/aec v1.0.0 // indirect
14 | github.com/prometheus/client_golang v1.11.0
15 | github.com/rakyll/statik v0.1.7
16 | github.com/urfave/cli v1.22.5
17 | golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
18 | google.golang.org/grpc v1.43.0 // indirect
19 | )
20 |
--------------------------------------------------------------------------------
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skroutz/mistry/f7b47b936e29985b63a693c9713e6c821e9a01f3/logo.png
--------------------------------------------------------------------------------
/pkg/broker/broker.go:
--------------------------------------------------------------------------------
1 | package broker
2 |
3 | import (
4 | "bufio"
5 | "log"
6 | "sync"
7 |
8 | "github.com/skroutz/mistry/pkg/tailer"
9 | )
10 |
11 | // A Broker holds a registry with open client connections, listens for events on the
12 | // Notifier channel and broadcasts event messages to the corresponding clients.
13 | type Broker struct {
14 | Log *log.Logger
15 |
16 | // Messages are pushed to this channel.
17 | Notifier chan *Event
18 |
19 | // Channel for adding new client connections.
20 | NewClients chan *Client
21 |
22 | // Channel for signaling a closed client connection.
23 | ClosingClients chan *Client
24 |
25 | // Channel for signaling the closing of all connections for an id.
26 | CloseClientC map[string]chan struct{}
27 |
28 | // clients is the connections registry of the Broker. clients sent to the
29 | // NewClients channel are being added to the registry.
30 | // A reference to the Client is being used so that the connections can be
31 | // uniquely identified for the messages broadcasting.
32 | clients map[*Client]bool
33 |
34 | // Queue used to track all open clients count grouped by their id.
35 | // The stored map type is [string]int.
36 | clientsCount *sync.Map
37 | }
38 |
39 | // Client represents a client-connection.
40 | type Client struct {
41 | // The connection channel to communicate with the events gathering
42 | // channel.
43 | Data chan []byte
44 |
45 | // Each connection has an id that corresponds to the Event ID it is
46 | // interested in receiving messages about.
47 | ID string
48 |
49 | // Extra contains any extra misc information about the connection.
50 | // e.g a secondary unique identifier for the Client
51 | Extra string
52 | }
53 |
54 | // Event consists of an id ID and a message Msg. All clients with the same id
55 | // receive the event message.
56 | type Event struct {
57 | // The message to be consumed by any connected client e.g., browser.
58 | Msg []byte
59 |
60 | // Each message has an id which corresponds to the concerning client id.
61 | ID string
62 | }
63 |
64 | // NewBroker returns a new Broker.
65 | func NewBroker(logger *log.Logger) *Broker {
66 | br := &Broker{}
67 | br.Log = logger
68 | br.Notifier = make(chan *Event)
69 | br.NewClients = make(chan *Client)
70 | br.ClosingClients = make(chan *Client)
71 | br.clients = make(map[*Client]bool)
72 | br.clientsCount = new(sync.Map)
73 | br.CloseClientC = make(map[string]chan struct{})
74 | return br
75 | }
76 |
77 | // ListenForClients is responsible for taking the appropriate course of
78 | // action based on the different channel messages. It listens for new clients
79 | // on the NewClients channel, for closing clients on the ClosingClients channel
80 | // and for events Event on the Notifier channel.
81 | func (br *Broker) ListenForClients() {
82 | for {
83 | select {
84 | case client := <-br.NewClients:
85 | br.clients[client] = true
86 | val, exists := br.clientsCount.Load(client.ID)
87 | cc, ok := val.(int)
88 | if exists && !ok {
89 | br.Log.Printf("got data of type %T but wanted int", val)
90 | }
91 | if exists && cc > 0 {
92 | br.clientsCount.Store(client.ID, cc+1)
93 | } else {
94 | br.clientsCount.Store(client.ID, 1)
95 | br.CloseClientC[client.ID] = make(chan struct{})
96 | tl, err := tailer.New(client.Extra)
97 | if err != nil {
98 | br.Log.Printf("[broker] Could not start the tailer for file %s", client.Extra)
99 | }
100 | go func() {
101 | s := bufio.NewScanner(tl)
102 | for s.Scan() {
103 | br.Notifier <- &Event{Msg: []byte(s.Text()), ID: client.ID}
104 | }
105 | }()
106 | go func() {
107 | <-br.CloseClientC[client.ID]
108 | err = tl.Close()
109 | if err != nil {
110 | br.Log.Print(err)
111 | }
112 | }()
113 | }
114 | case client := <-br.ClosingClients:
115 | close(client.Data)
116 | delete(br.clients, client)
117 | val, _ := br.clientsCount.Load(client.ID)
118 | cc, ok := val.(int)
119 | if !ok {
120 | br.Log.Printf("got data of type %T but wanted int", val)
121 | }
122 | newVal := cc - 1
123 | br.clientsCount.Store(client.ID, newVal)
124 | if newVal == 0 {
125 | br.CloseClientC[client.ID] <- struct{}{}
126 | }
127 | case event := <-br.Notifier:
128 | for client := range br.clients {
129 | if client.ID == event.ID {
130 | client.Data <- event.Msg
131 | }
132 | }
133 | }
134 | }
135 | }
136 |
--------------------------------------------------------------------------------
/pkg/filesystem/btrfs/btrfs.go:
--------------------------------------------------------------------------------
1 | package btrfs
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/skroutz/mistry/pkg/filesystem"
8 | "github.com/skroutz/mistry/pkg/utils"
9 | )
10 |
11 | // Btrfs implements the FileSystem interface. It is an efficient implementation
12 | // since it uses Copy-on-Write snapshots to do the cloning. It is the
13 | // recommended solution for production systems.
14 | type Btrfs struct{}
15 |
16 | func init() {
17 | filesystem.Registry["btrfs"] = Btrfs{}
18 | }
19 |
20 | // Create creates a new subvolume named path.
21 | func (fs Btrfs) Create(path string) error {
22 | return runCmd([]string{"btrfs", "subvolume", "create", path})
23 | }
24 |
25 | // Clone creates a Btrfs snapshot of subvolume src to a new subvolume, dst.
26 | func (fs Btrfs) Clone(src, dst string) error {
27 | return runCmd([]string{"btrfs", "subvolume", "snapshot", src, dst})
28 | }
29 |
30 | // Remove deletes the subvolume with name path.
31 | func (fs Btrfs) Remove(path string) error {
32 | _, err := os.Stat(path)
33 | if err == nil {
34 | return runCmd([]string{"btrfs", "subvolume", "delete", path})
35 | }
36 | return nil
37 | }
38 |
39 | func runCmd(args []string) error {
40 | out, err := utils.RunCmd(args)
41 | if err != nil {
42 | return fmt.Errorf("%s (%s)", err, out)
43 | }
44 | return nil
45 | }
46 |
--------------------------------------------------------------------------------
/pkg/filesystem/filesystem.go:
--------------------------------------------------------------------------------
1 | package filesystem
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | // Registry maps the filesystem name to its implementation
8 | var Registry = make(map[string]FileSystem)
9 |
10 | // FileSystem defines a few basic filesystem operations
11 | type FileSystem interface {
12 | // Create creates a new directory in the given path.
13 | Create(path string) error
14 |
15 | // Clone copies the src path and its contents to dst.
16 | Clone(src, dst string) error
17 |
18 | // Remove removes path and its children.
19 | // Implementors should not return an error when the path does not
20 | // exist.
21 | Remove(path string) error
22 | }
23 |
24 | // Get returns the registered filesystem denoted by s. If it doesn't exist,
25 | // an error is returned.
26 | func Get(s string) (FileSystem, error) {
27 | fs, ok := Registry[s]
28 | if !ok {
29 | return nil, fmt.Errorf("unknown filesystem '%s' (%v)", s, Registry)
30 | }
31 | return fs, nil
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/filesystem/plainfs/plainfs.go:
--------------------------------------------------------------------------------
1 | package plainfs
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/skroutz/mistry/pkg/filesystem"
8 | "github.com/skroutz/mistry/pkg/utils"
9 | )
10 |
11 | // PlainFS implements the FileSystem interface. It uses plain `cp` and `mkdir`
12 | // commands.
13 | type PlainFS struct{}
14 |
15 | func init() {
16 | filesystem.Registry["plain"] = PlainFS{}
17 | }
18 |
19 | // Create creates a new directory at path
20 | func (fs PlainFS) Create(path string) error {
21 | return os.Mkdir(path, 0755)
22 | }
23 |
24 | // Clone recursively copies the contents of src to dst
25 | func (fs PlainFS) Clone(src, dst string) error {
26 | out, err := utils.RunCmd([]string{"cp", "-r", src, dst})
27 | if err != nil {
28 | return fmt.Errorf("%s (%s)", err, out)
29 | }
30 | return nil
31 | }
32 |
33 | // Remove deletes the path and all its contents
34 | func (fs PlainFS) Remove(path string) error {
35 | return os.RemoveAll(path)
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/tailer/tailer.go:
--------------------------------------------------------------------------------
1 | // Package tailer emulates the features of the tail program (reading from
2 | // continuously updated files).
3 | package tailer
4 |
5 | import (
6 | "io"
7 | "os"
8 | "time"
9 | )
10 |
11 | // A Tailer holds an io.ReadCloser interface. It implements a Read() function
12 | // which emulates the tailf UNIX program.
13 | type Tailer struct {
14 | io.ReadCloser
15 | }
16 |
17 | // New returns a new Tailer for the given path.
18 | func New(path string) (*Tailer, error) {
19 | f, err := os.Open(path)
20 | if err != nil {
21 | return &Tailer{}, err
22 | }
23 |
24 | if _, err := f.Seek(0, 2); err != nil {
25 | return &Tailer{}, err
26 | }
27 | return &Tailer{f}, nil
28 | }
29 |
30 | // Read provides a tailf like generator by handling the io.EOF error.
31 | // It returns the number of bytes read and any error encountered.
32 | // At end of file, when no more input is available, Read handles the io.EOF
33 | // error by continuing the reading loop.
34 | func (t *Tailer) Read(b []byte) (int, error) {
35 | for {
36 | n, err := t.ReadCloser.Read(b)
37 | if n > 0 {
38 | return n, nil
39 | } else if err != io.EOF {
40 | return n, err
41 | }
42 | time.Sleep(500 * time.Millisecond)
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/pkg/types/build_info.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "time"
5 | )
6 |
7 | const (
8 | // ContainerPendingExitCode is the zero value of BuildInfo.ExitCode
9 | // and is updated after the container finishes running.
10 | ContainerPendingExitCode = -999
11 |
12 | // ContainerSuccessExitCode indicates that the build was successful.
13 | ContainerSuccessExitCode = 0
14 | )
15 |
16 | // BuildInfo contains information regarding the outcome of an executed job.
17 | type BuildInfo struct {
18 | // Params are the job build parameters
19 | Params Params
20 |
21 | // Group is the job group
22 | Group string
23 |
24 | // Path is the absolute path where the build artifacts are located.
25 | Path string
26 |
27 | // Cached is true if the build artifacts were retrieved from the cache.
28 | Cached bool
29 |
30 | // Coalesced is true if the build was returned from another pending
31 | // build.
32 | Coalesced bool
33 |
34 | // Incremental is true if the results of a previous build were
35 | // used as the base for this build (ie. build cache).
36 | Incremental bool
37 |
38 | // ExitCode is the exit code of the container command.
39 | //
40 | // It is initialized to ContainerFailureExitCode and is updated upon
41 | // build completion. If ExitCode is still set to ContainerFailureExitCode
42 | // after the build is finished, it indicates an error somewhere along
43 | // the path.
44 | //
45 | // It is irrelevant and should be ignored if Coalesced is true.
46 | ExitCode int
47 |
48 | // ErrBuild contains any errors that occurred during the build.
49 | //
50 | // TODO: It might contain errors internal to the server, that the
51 | // user can do nothing about. This should be fixed
52 | ErrBuild string
53 |
54 | // ContainerStdouterr contains the stdout/stderr of the container.
55 | ContainerStdouterr string `json:",omitempty"`
56 |
57 | // ContainerStderr contains the stderr of the container.
58 | ContainerStderr string `json:",omitempty"`
59 |
60 | // TransportMethod is the method with which the build artifacts can be
61 | // fetched.
62 | TransportMethod TransportMethod
63 |
64 | // StartedAt is the date and time when the build started.
65 | StartedAt time.Time
66 |
67 | // Duration is how much the build took to complete. If it cannot be
68 | // calculated yet, the value will be -1 seconds.
69 | //
70 | // NOTE: if Cached is true, this refers to the original build.
71 | Duration time.Duration
72 |
73 | // URL is the relative URL at which the build log is available.
74 | URL string
75 | }
76 |
77 | // NewBuildInfo initializes a new BuildInfo with its StartedAt set to the
78 | // current time.
79 | func NewBuildInfo() *BuildInfo {
80 | bi := new(BuildInfo)
81 | bi.StartedAt = time.Now()
82 | bi.ExitCode = ContainerPendingExitCode
83 | bi.Duration = -1 * time.Second
84 |
85 | return bi
86 | }
87 |
--------------------------------------------------------------------------------
/pkg/types/doc.go:
--------------------------------------------------------------------------------
1 | // Package types contains the types that are used both by mistry server and
2 | // client.
3 | package types
4 |
--------------------------------------------------------------------------------
/pkg/types/errors.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import "fmt"
4 |
5 | // ErrImageBuild indicates an error occurred while building a Docker image.
6 | type ErrImageBuild struct {
7 | Image string
8 | Err error
9 | }
10 |
11 | func (e ErrImageBuild) Error() string {
12 | return fmt.Sprintf("could not build docker image '%s': %s", e.Image, e.Err)
13 | }
14 |
--------------------------------------------------------------------------------
/pkg/types/job_request.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | // JobRequest contains the data the job was requested with
4 | type JobRequest struct {
5 | Project string
6 | Params Params
7 | Group string
8 | Rebuild bool
9 | }
10 |
--------------------------------------------------------------------------------
/pkg/types/params.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | // Params are the user-provided parameters of a particular build.
4 | // They're submitted as part of the job, typically using the mistry CLI.
5 | type Params map[string]string
6 |
--------------------------------------------------------------------------------
/pkg/types/transport_method.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | // TransportMethod indicates the tool (binary) that the client will use to
4 | // download the build artifacts from the server. The binary should be installed
5 | // in the system.
6 | type TransportMethod string
7 |
8 | const (
9 | // Rsync instructs the client to use rsync(1) to download the assets,
10 | // either over the SSH or rsync protocol. It is the recommended choice
11 | // for production environments.
12 | Rsync TransportMethod = "rsync"
13 |
14 | // Scp instructs the client to use scp(1) to download the assets.
15 | Scp = "scp"
16 | )
17 |
--------------------------------------------------------------------------------
/pkg/utils/utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "archive/tar"
5 | "bytes"
6 | "errors"
7 | "io"
8 | "os"
9 | "os/exec"
10 | "path/filepath"
11 | )
12 |
13 | // PathIsDir returns an error if p does not exist or is not a directory.
14 | func PathIsDir(p string) error {
15 | fi, err := os.Stat(p)
16 | if err != nil {
17 | return err
18 | }
19 |
20 | if !fi.IsDir() {
21 | return errors.New("Path " + p + " is not a directory")
22 | }
23 |
24 | return nil
25 | }
26 |
27 | // EnsureDirExists verifies path is a directory and creates it if it doesn't
28 | // exist.
29 | func EnsureDirExists(path string) error {
30 | fi, err := os.Stat(path)
31 | if err == nil {
32 | if !fi.IsDir() {
33 | return errors.New(path + " is not a directory")
34 | }
35 | } else {
36 | if os.IsNotExist(err) {
37 | err = os.Mkdir(path, 0755)
38 | if err != nil {
39 | return err
40 | }
41 | } else {
42 | return err
43 | }
44 | }
45 |
46 | return nil
47 | }
48 |
49 | // RunCmd runs the shell command denoted by args, using the first
50 | // element as the command and the remained as its arguments.
51 | // It returns the combined stderr/stdout output of the command.
52 | func RunCmd(args []string) (string, error) {
53 | cmd := exec.Command(args[0], args[1:]...)
54 | out, err := cmd.CombinedOutput()
55 | return string(out), err
56 | }
57 |
58 | // Tar walks the file tree rooted at root, adding each file or directory in the
59 | // tree (including root) in a tar archive. The files are walked
60 | // in lexical order, which makes the output deterministic.
61 | func Tar(root string) ([]byte, error) {
62 | var buf bytes.Buffer
63 | tw := tar.NewWriter(&buf)
64 | walkFn := func(path string, info os.FileInfo, err error) error {
65 | if err != nil {
66 | return err
67 | }
68 | if !info.Mode().IsRegular() {
69 | return nil
70 | }
71 |
72 | hdr, err := tar.FileInfoHeader(info, info.Name())
73 | if err != nil {
74 | return err
75 | }
76 |
77 | // Preserve directory structure when docker "untars" the build context
78 | hdr.Name, err = filepath.Rel(root, path)
79 | if err != nil {
80 | return err
81 | }
82 |
83 | err = tw.WriteHeader(hdr)
84 | if err != nil {
85 | return err
86 | }
87 |
88 | f, err := os.Open(path)
89 | if err != nil {
90 | return err
91 | }
92 |
93 | _, err = io.Copy(tw, f)
94 | if err != nil {
95 | return err
96 | }
97 |
98 | err = f.Close()
99 | if err != nil {
100 | return err
101 | }
102 |
103 | return nil
104 | }
105 |
106 | err := filepath.Walk(root, walkFn)
107 | if err != nil {
108 | return nil, err
109 | }
110 |
111 | err = tw.Close()
112 | if err != nil {
113 | return nil, err
114 | }
115 |
116 | return buf.Bytes(), nil
117 | }
118 |
--------------------------------------------------------------------------------