├── .gitignore ├── LICENSE ├── README.md ├── _release ├── create_release_artifacts.sh └── release.sh ├── configparser.go ├── consts └── consts.go ├── container └── container.go ├── example ├── Dockerfile ├── bluegreen │ └── capitan.cfg.sh └── std │ └── capitan.cfg.sh ├── glide.lock ├── glide.yaml ├── helpers ├── dockerhelper.go ├── slicehelp.go └── stringhelp.go ├── logger ├── containerlog.go └── logger.go ├── main.go ├── output.gif ├── projectconfig.go ├── shellsession └── shellsession.go └── version.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Go template 3 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 4 | *.o 5 | *.a 6 | *.so 7 | 8 | # Folders 9 | _obj 10 | _test 11 | 12 | # Architecture specific extensions/prefixes 13 | *.[568vq] 14 | [568vq].out 15 | 16 | *.cgo1.go 17 | *.cgo2.c 18 | _cgo_defun.c 19 | _cgo_gotypes.go 20 | _cgo_export.* 21 | 22 | _testmain.go 23 | 24 | *.exe 25 | *.test 26 | *.prof 27 | .idea 28 | capitan.iml 29 | capitan 30 | 31 | build/* 32 | vendor/ 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Donal Byrne 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Capitan 2 | 3 | [![GoDoc](https://godoc.org/github.com/byrnedo/capitan?status.svg)](https://godoc.org/github.com/byrnedo/capitan) 4 | 5 | Capitan is a tool for managing multiple Docker containers based largely on [crowdr](https://github.com/polonskiy/crowdr) 6 | 7 | Capitan is only a wrapper around the docker cli tool, no api usage whatsoever (well... an `inspect` command here and there). 8 | This means it will basically work with all versions of docker. 9 | 10 | $ capitan up 11 | 12 | Run arguments changed, doing blue-green redeploy: capitan_redis_green_1 13 | Running capitan_redis_blue_1 14 | cd939956f332391489d0383610d9da2c420595d495934c3221376dbf68854316 15 | Removing old container capitan_redis_green_1... 16 | Already running capitan_mongo_blue_1 17 | Already running capitan_nats_blue_1 18 | Already running capitan_app_blue_1 19 | 20 | ## Features 21 | 22 | - Provides commands which operate on collection of containers. 23 | - Uses predefined description of containers from readable configuration file. 24 | - Can use any docker run option that is provided by your docker version. 25 | - The order of starting containers is defined in configuration file. 26 | - The order of stopping containers is the reverse of the order of starting. 27 | - Easy to install, compiled static go binaries available for linux, and mac. It doesn't require any execution environment or other libraries. 28 | - Allows the use of bash as hooks for many capitan commands. 29 | - [Blue/Green](https://docs.cloudfoundry.org/devguide/deploy-apps/blue-green.html) deployment - option to only remove original container when starting new version of same container if it starts and passes hook commands. 30 | 31 | 32 | ## Installation 33 | 34 | Head over to the [releases](https://github.com/byrnedo/capitan/releases) page to download a pre-build binary or deb file. 35 | 36 | Or using go: 37 | 38 | go get github.com/byrnedo/capitan 39 | 40 | ## Commands 41 | 42 | ### Invasive commands 43 | 44 | #### `up` 45 | Create then run or update containers 46 | Recreates if: 47 | 48 | 1. ~~If newer image is found it will remove the old container and run a new one~~ No longer does this as capitan can't know which node to check images for when talking to a swarm. 49 | 2. Container config has changed 50 | 51 | Starts stopped containers 52 | 53 | capitan up 54 | # Optionally can attach to output using `--attach|-a` flag. 55 | capitan up -a 56 | 57 | #### `create` 58 | Create but don't run containers 59 | 60 | capitan create 61 | 62 | #### `start` 63 | Start stopped containers 64 | 65 | capitan start 66 | # Optionally can attach to output using `--attach|-a` flag. 67 | capitan start -a 68 | 69 | #### `scale` 70 | Start or stop instances of a container until required amount are running 71 | 72 | # run 5 instances of mysql 73 | capitan scale mysql 5 74 | 75 | NOTE: for containers started via this command to be accepted by further commands, the config output must be altered to state the required instances 76 | 77 | ##### `restart` 78 | Restart containers 79 | 80 | capitan restart 81 | # Further arguments passed through to docker, example `capitan start -t 5` 82 | capitan restart -t 10 83 | 84 | ##### `stop` 85 | Stop running containers 86 | 87 | capitan stop 88 | # Further arguments passed through to docker, example `capitan stop -t 5` 89 | capitan stop -t 10 90 | 91 | ##### `kill` 92 | Kill running containers using SIGKILL or a specified signal 93 | 94 | capitan kill 95 | # Further arguments passed through to docker, example `capitan kill --signal KILL` 96 | capitan kill --signal KILL 97 | 98 | ##### `rm` 99 | Remove stopped containers 100 | 101 | capitan rm 102 | # Further arguments passed through to docker, example `capitan rm -f` 103 | capitan rm -fv 104 | 105 | ### Non invasive commands 106 | 107 | ##### `ps` 108 | Show container status 109 | 110 | - Further arguments passed through to docker, example `capitan ps -a` 111 | 112 | ##### `ip` 113 | Show container ip addresses 114 | 115 | ##### `logs` 116 | Follow container logs 117 | 118 | ##### `pull` 119 | Pull images for all containers 120 | 121 | ##### `build` 122 | Build any containers with 'build' flag set (WIP) 123 | 124 | 125 | ## Configuration 126 | 127 | 128 | ### Global options 129 | 130 | --cmd, -c "./capitan.cfg.sh" Command used to obtain config 131 | --debug, -d Print extra log messages 132 | --dry-run, --dry Preview outcome, no changes will be made 133 | --filter, -f Filter to run action on a specific container only 134 | --help, -h Show help 135 | --version, -v Print the version 136 | 137 | ### Config file/output 138 | 139 | Service config is read from stdout of the command defined with `--cmd` . 140 | 141 | `capitan` by default runs the command `./capitan.cfg.sh` in the current directory to get the config. This can be customized with `--cmd|-c` flag. 142 | 143 | capitan --cmd ./someotherexecutable 144 | 145 | You could use any command which generates a valid config. It doesn't have to be a bash script like in the example or default. 146 | 147 | ### Filtering 148 | 149 | A single service type can specified for an action by using the `--filter|-f` flag. So if your conf looked like this: 150 | 151 | ... 152 | fooapp hostname blah 153 | ... 154 | 155 | You could filter any command to run on just that service type by doing: 156 | 157 | capitan --filter fooapp 158 | 159 | #### Global options 160 | 161 | ##### `global project` 162 | The project name, defaults to current working directory 163 | 164 | ##### `global project_sep` 165 | String to use to create container name from `project` and name specified in config 166 | 167 | ##### `global blue_green [true/false]` 168 | String to deploy using blue/green handover. Defaults to false. This can be turned on/off per container with 169 | 170 | CONTAINER_NAME blue-green [true/false] 171 | 172 | #### `global hook [hook name] [hook command]` 173 | Allows for a custom shell command to be evaluated once at the following points: 174 | 175 | - Before/After up (`before.up`, `after.up`) 176 | - This occurs during the `up` command 177 | - Before/After Start (`before.start`, `after.start`) 178 | - This will occur in the `start` command 179 | - Before/After Stop (`before.stop`, `after.stop`) 180 | - This will occur in the `stop` command 181 | - Before/After Kill (`before.kill`, `after.kill`) 182 | - This will occur in the `kill` command 183 | - Before/After Rm (`before.rm`, `after.rm`) 184 | - This will occur in the `rm` command 185 | 186 | #### Container Options 187 | 188 | The output format must be: 189 | 190 | CONTAINER_NAME COMMAND [ARGS...] 191 | 192 | All commands are passed through to docker cli as `--COMMAND` EXCEPT the following: 193 | 194 | #### `build` 195 | This allows a path to be given for a dockerfile. Note, it will attempt to build every time. Use `build-args` and pass `--no-cache` to force a full clean build each time. 196 | 197 | #### `build-args` 198 | Any further arguments that need to be passed when building. 199 | 200 | #### `hook [hook name] [hook command]` 201 | Allows for a custom shell command to be evaluated at the following points **for each container** 202 | 203 | - Before/After Run (`before.run`, `after.run`) 204 | - This occurs during the `up` command 205 | - Before/After Start (`before.start`, `after.start`) 206 | - This will occur in the `up`, `start` and `restart` command 207 | - Before/After Stop (`before.stop`, `after.stop`) 208 | - This will occur in the `stop` command only 209 | - Before/After Kill (`before.kill`, `after.kill`) 210 | - This will occur in the `kill` command only 211 | - Before/After Rm (`before.rm`, `after.rm`) 212 | - This will occur in the `up` and `rm` command 213 | 214 | *NOTE* hooks do not conform exactly to each command. Example: an `up` command may `rm` and then `run` a container OR just `start` a stopped container. 215 | 216 | #### `scale` 217 | Number of instances of the container to run. Default is 1. 218 | 219 | NOTE: this is untested with links ( I don't use links ) 220 | 221 | #### `link` 222 | An attempt to resolve a link to the first instance of a container is made. Otherwise the unresolved name is used. 223 | 224 | WARNING: When scaling, if the link resolves to a container defined in capitan's config, it will always resolve to the first instance. 225 | For example: `app link mycontainer:some-alias` will always resolve to `_mycontainer_1` 226 | 227 | #### `rm` 228 | 229 | By default capitan runs all commands with `-d`. This flag makes capitan run the command with `-rm` instead. 230 | 231 | WARNING: This feature is experimental and may result in unexpected failures. A more predictable way is to leverage `docker wait` along with a dynamic label. 232 | For example: 233 | 234 | mycontainer label $(date +%s) 235 | mycontainer hook after.run docker wait \$CAPITAN_CONTAINER_NAME 236 | 237 | #### `volumes-from` 238 | 239 | An attempt to resolve a volume-from arg to the first instance of a container is made. Otherwise the unresolved name is used. 240 | 241 | WARNING: When scaling, if the container name resolves to a container defined in capitan's config, it will always resolve to the first instance. 242 | For example: `app volumes-from mycontainer` will always resolve to `_mycontainer_1` 243 | 244 | ### Environment Variables 245 | 246 | The following environment variables are available when creating the containers and when running **container hooks** 247 | 248 | # container name 249 | CAPITAN_CONTAINER_NAME 250 | # container type 251 | CAPITAN_CONTAINER_SERVICE_TYPE 252 | # instance of this type,eg if you have scale = 5 then each container will have their own instance number from 1 -> 5 253 | CAPITAN_CONTAINER_INSTANCE_NUMBER 254 | # the project name 255 | CAPITAN_PROJECT_NAME 256 | 257 | The following environment variables are available to all **hook** scripts 258 | 259 | CAPITAN_PROJECT_NAME 260 | CAPITAN_HOOK_NAME 261 | 262 | 263 | For example, following `capitan.cfg.sh` 264 | 265 | #!/bin/bash 266 | 267 | cat < build/releases/${pkg_name}/DEBIAN/postinst 31 | #!/bin/sh 32 | set -e 33 | echo 'Installed capitan' 34 | EOF 35 | chmod +x build/releases/${pkg_name}/DEBIAN/postinst 36 | 37 | cat << EOF > build/releases/${pkg_name}/DEBIAN/prerem 38 | #!/bin/sh 39 | set -e 40 | echo 'Removing capitan...' 41 | EOF 42 | chmod +x build/releases/${pkg_name}/DEBIAN/prerem 43 | 44 | cat << EOF > build/releases/${pkg_name}/DEBIAN/control 45 | Package: capitan 46 | Version: $version 47 | Section: base 48 | Priority: optional 49 | Architecture: $deb_arch 50 | Maintainer: Donal Byrne 51 | Description: Scriptable docker container orchestration 52 | Scriptable docker container orchestration 53 | EOF 54 | (cd build/releases && dpkg-deb --build ${pkg_name} && mv ${pkg_name}.deb ../) 55 | 56 | done 57 | -------------------------------------------------------------------------------- /_release/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | SCRIPT=`realpath $0` 5 | SCRIPT_PATH=`dirname $SCRIPT` 6 | BUILD_PATH=$SCRIPT_PATH/../build 7 | GITHUB_TOKEN="${GITHUB_TOKEN:=}" 8 | [ -z "$GITHUB_TOKEN" ] && >&2 echo "Must set GITHUB_TOKEN env" && exit 1 9 | 10 | [ $# -lt 1 ] && >&2 echo must give release version as argument && exit 1 11 | 12 | RELEASE_VERSION=$1 13 | 14 | if GIT_DIR=$SCRIPT_PATH/../.git git rev-parse $RELEASE_VERSION >/dev/null 2>&1 15 | then 16 | >&2 echo tag $RELEASE_VERSION already exists 17 | exit 1 18 | fi 19 | 20 | set +e 21 | rm -rf ../build/* 22 | set -e 23 | 24 | cat < $BUILD_PATH/../version.go 25 | package main 26 | const VERSION = "$RELEASE_VERSION" 27 | EOF 28 | 29 | set +e 30 | git commit $BUILD_PATH/../version.go -m "Release version $RELEASE_VERSION" 31 | git push origin master 32 | set -e 33 | 34 | github-release release \ 35 | --user byrnedo \ 36 | --repo capitan \ 37 | --tag $RELEASE_VERSION \ 38 | --name "$RELEASE_VERSION" \ 39 | --description "$RELEASE_VERSION" \ 40 | --pre-release 41 | 42 | git pull origin 43 | 44 | 45 | $SCRIPT_PATH/create_release_artifacts.sh 46 | 47 | for artifact in $(ls -1 -d $BUILD_PATH/capitan_${RELEASE_VERSION}_*.{zip,deb}) 48 | do 49 | github-release upload \ 50 | --user byrnedo \ 51 | --repo capitan \ 52 | --tag $RELEASE_VERSION \ 53 | --file $artifact \ 54 | --name $(basename $artifact) 55 | done 56 | -------------------------------------------------------------------------------- /configparser.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "github.com/byrnedo/capitan/container" 8 | "github.com/byrnedo/capitan/helpers" 9 | "github.com/byrnedo/capitan/logger" 10 | "github.com/codegangsta/cli" 11 | "github.com/codeskyblue/go-sh" 12 | "github.com/mgutz/str" 13 | "os" 14 | "path" 15 | "strconv" 16 | "strings" 17 | "unicode" 18 | ) 19 | 20 | type ConfigParser struct { 21 | // command to obtain config from 22 | Command string 23 | // args given to cli 24 | Args cli.Args 25 | // the container filter 26 | Filter string 27 | } 28 | 29 | func NewSettingsParser(cmd string, args cli.Args, filter string) *ConfigParser { 30 | return &ConfigParser{ 31 | Command: cmd, 32 | Args: args, 33 | Filter: filter, 34 | } 35 | } 36 | 37 | func (f *ConfigParser) Run() (*ProjectConfig, error) { 38 | var ( 39 | output []byte 40 | err error 41 | cmdSlice []string 42 | cmdArgs []interface{} 43 | ) 44 | if len(f.Command) == 0 { 45 | return nil, errors.New("Command must not be empty") 46 | } 47 | 48 | if cmdSlice = str.ToArgv(f.Command); len(cmdSlice) > 1 { 49 | cmdArgs = helpers.ToInterfaceSlice(cmdSlice[1:]) 50 | } else { 51 | cmdArgs = []interface{}{} 52 | } 53 | 54 | ses := sh.NewSession() 55 | if output, err = ses.Command(cmdSlice[0], cmdArgs...).Output(); err != nil { 56 | return nil, err 57 | } 58 | settings, err := f.parseOutput(output) 59 | return settings, err 60 | 61 | } 62 | 63 | func (f *ConfigParser) parseOutput(out []byte) (*ProjectConfig, error) { 64 | lines := bytes.Split(out, []byte{'\n'}) 65 | settings, err := f.parseSettings(lines) 66 | return settings, err 67 | 68 | } 69 | 70 | // The main parse function. Creates the final list of containers. 71 | func (f *ConfigParser) parseSettings(lines [][]byte) (projSettings *ProjectConfig, err error) { 72 | //minimum of len1 at this point in parts 73 | 74 | cmdsMap := make(map[string]container.Container, 0) 75 | 76 | projName, _ := os.Getwd() 77 | projName = toSnake(path.Base(projName)) 78 | projNameArr := strings.Split(projName, "_") 79 | projSettings = new(ProjectConfig) 80 | projSettings.ProjectName = projNameArr[len(projNameArr)-1] 81 | projSettings.ProjectSeparator = "_" 82 | projSettings.Hooks = make(Hooks) 83 | 84 | for lineNum, line := range lines { 85 | 86 | line = bytes.TrimLeft(line, " ") 87 | if len(line) == 0 || line[0] == '#' { 88 | //comment 89 | continue 90 | } 91 | lineParts := bytes.SplitN(line, []byte{' '}, 3) 92 | if len(lineParts) < 2 { 93 | //not enough args on line 94 | continue 95 | } 96 | 97 | if string(lineParts[0]) == "global" { 98 | if len(lineParts) > 2 { 99 | switch string(lineParts[1]) { 100 | case "project": 101 | projSettings.ProjectName = string(lineParts[2]) 102 | case "project_sep": 103 | projSettings.ProjectSeparator = stripChars(string(lineParts[2]), " \t") 104 | case "blue_green": 105 | projSettings.BlueGreenMode, _ = strconv.ParseBool(string(lineParts[2])) 106 | case "hook": 107 | hookAndCommand := bytes.SplitN(lineParts[2], []byte{' '}, 2) 108 | if len(hookAndCommand) == 2 { 109 | hookName := string(hookAndCommand[0]) 110 | hookScript := string(hookAndCommand[1]) 111 | hook := projSettings.Hooks[hookName] 112 | if hook == nil { 113 | hook = new(Hook) 114 | } 115 | hook.Scripts = append(hook.Scripts, hookScript) 116 | projSettings.Hooks[hookName] = hook 117 | } 118 | } 119 | } 120 | continue 121 | 122 | } 123 | 124 | contr := string(lineParts[0]) 125 | 126 | if _, found := cmdsMap[contr]; !found { 127 | cmdsMap[contr] = container.Container{ 128 | Placement: len(cmdsMap), 129 | Hooks: make(map[string]*container.Hook, 0), 130 | Scale: 1, 131 | BlueGreenMode: container.BGModeUnknown, 132 | Enabled: true, 133 | } 134 | } 135 | 136 | action := string(lineParts[1]) 137 | setting := cmdsMap[contr] 138 | 139 | var args string 140 | if len(lineParts) > 2 { 141 | args = string(lineParts[2]) 142 | } 143 | args = strings.TrimRight(args, " ") 144 | switch action { 145 | case "command": 146 | if len(args) > 0 { 147 | parsedArgs := str.ToArgv(args) 148 | for _, arg := range parsedArgs { 149 | setting.Command = append(setting.Command, arg) 150 | } 151 | } 152 | case "scale": 153 | if len(args) > 0 { 154 | scale, err := strconv.Atoi(args) 155 | if err != nil { 156 | return projSettings, errors.New(fmt.Sprintf("Failed to parse `scale` on line %d, %s", lineNum+1, err)) 157 | } 158 | if scale < 1 { 159 | scale = 1 160 | } 161 | setting.Scale = scale 162 | } 163 | case "image": 164 | if len(args) > 0 { 165 | setting.Image = args 166 | } 167 | case "build": 168 | if len(args) > 0 { 169 | setting.Build = args 170 | } 171 | case "build-args": 172 | if len(args) > 0 { 173 | setting.BuildArgs = str.ToArgv(args) 174 | } 175 | case "link": 176 | 177 | argParts := strings.SplitN(args, ":", 2) 178 | 179 | var alias string 180 | if len(argParts) > 1 { 181 | alias = argParts[1] 182 | } 183 | 184 | newLink := container.Link{ 185 | Container: argParts[0], 186 | Alias: alias, 187 | } 188 | 189 | setting.Links = append(setting.Links, newLink) 190 | 191 | case "rm": 192 | setting.Remove = true 193 | case "hook": 194 | if len(args) > 0 { 195 | curHooks := setting.Hooks 196 | argParts := strings.SplitN(args, " ", 2) 197 | hookName := argParts[0] 198 | if len(argParts) > 1 { 199 | hookScript := argParts[1] 200 | 201 | hook := curHooks[hookName] 202 | if hook == nil { 203 | hook = new(container.Hook) 204 | } 205 | hook.Scripts = append(hook.Scripts, hookScript) 206 | curHooks[hookName] = hook 207 | } 208 | setting.Hooks = curHooks 209 | } 210 | case "blue-green": 211 | if len(args) > 0 { 212 | isBGMode, _ := strconv.ParseBool(args) 213 | if isBGMode { 214 | setting.BlueGreenMode = container.BGModeOn 215 | } else { 216 | setting.BlueGreenMode = container.BGModeOff 217 | } 218 | } 219 | case "enabled": 220 | if len(args) > 0 { 221 | setting.Enabled, _ = strconv.ParseBool(args) 222 | } 223 | case "volumes-from": 224 | argParts := strings.SplitN(args, " ", 2) 225 | setting.VolumesFrom = append(setting.VolumesFrom, argParts[0]) 226 | case "global": 227 | default: 228 | if action != "" { 229 | setting.ContainerArgs = append(setting.ContainerArgs, "--"+action) 230 | if args != "" { 231 | setting.ContainerArgs = append(setting.ContainerArgs, args) 232 | } 233 | } 234 | } 235 | 236 | cmdsMap[contr] = setting 237 | } 238 | 239 | var containersState map[string]*helpers.ServiceState 240 | if containersState, err = helpers.GetProjectState(projSettings.ProjectName, projSettings.ProjectSeparator); err != nil { 241 | return 242 | } 243 | // Post process 244 | err = f.postProcessConfig(cmdsMap, projSettings, containersState) 245 | return 246 | 247 | } 248 | 249 | // Now that we have all settings do some house keeping and processing 250 | func (f *ConfigParser) postProcessConfig(parsedConfig map[string]container.Container, projSettings *ProjectConfig, state map[string]*helpers.ServiceState) error { 251 | 252 | // TODO duplicate containers for scaling 253 | projSettings.ContainerList = make(SettingsList, 0) 254 | 255 | for name, item := range parsedConfig { 256 | if f.Filter != "" && f.Filter != name { 257 | continue 258 | } 259 | 260 | if ! item.Enabled { 261 | continue 262 | } 263 | 264 | item.Name = projSettings.ProjectName + projSettings.ProjectSeparator + name 265 | item.ServiceType = name 266 | item.ProjectName = projSettings.ProjectName 267 | item.ProjectNameSeparator = projSettings.ProjectSeparator 268 | 269 | // default image to name if 'build' is set 270 | if item.Build != "" { 271 | item.Image = item.Name 272 | } 273 | 274 | f.processBlueGreenMode(projSettings.BlueGreenMode, &item) 275 | 276 | f.processScaleArg(&item) 277 | 278 | f.processCleanupTasks(projSettings, &item) 279 | 280 | // resolve links 281 | f.processLinks(parsedConfig, &item) 282 | 283 | // resolve volumes from 284 | f.processVolumesFrom(parsedConfig, &item) 285 | 286 | ctrsToAdd := f.scaleContainers(&item, state) 287 | 288 | 289 | projSettings.ContainerList = append(projSettings.ContainerList, ctrsToAdd...) 290 | } 291 | 292 | 293 | return nil 294 | } 295 | 296 | func (f *ConfigParser) processBlueGreenMode(globalBGMode bool, item *container.Container) { 297 | if item.BlueGreenMode == container.BGModeUnknown { 298 | if globalBGMode { 299 | item.BlueGreenMode = container.BGModeOn 300 | } else { 301 | item.BlueGreenMode = container.BGModeOff 302 | } 303 | } 304 | 305 | 306 | } 307 | 308 | // Parse the volumes-from args and try and find first container with that type 309 | func (f *ConfigParser) processVolumesFrom(parsedConfig map[string]container.Container, item *container.Container) { 310 | for i, ctrName := range item.VolumesFrom { 311 | // TODO Not sure how to do this for scaling 312 | if _, found := parsedConfig[ctrName]; found { 313 | ctrName = item.ProjectName + item.ProjectNameSeparator + ctrName + item.ProjectNameSeparator + "1" 314 | item.VolumesFrom[i] = ctrName 315 | } 316 | } 317 | } 318 | 319 | // Parse the link args and try and find first container with that type 320 | func (f *ConfigParser) processLinks(parsedConfig map[string]container.Container, item *container.Container) { 321 | for i, link := range item.Links { 322 | // TODO right now, for scaling links are bad so just putting it to first container 323 | container := link.Container 324 | if _, found := parsedConfig[link.Container]; found { 325 | container = item.ProjectName + item.ProjectNameSeparator + link.Container + item.ProjectNameSeparator + "1" 326 | } 327 | link.Container = container 328 | item.Links[i] = link 329 | } 330 | } 331 | 332 | // Parse the scale argument and set the container's scale property 333 | func (f *ConfigParser) processScaleArg(ctr *container.Container) { 334 | if f.Args.Get(0) == "scale" { 335 | if f.Args.Get(1) == ctr.ServiceType { 336 | if scaleArg, err := strconv.Atoi(f.Args.Get(2)); err == nil { 337 | if scaleArg > 0 { 338 | ctr.Scale = scaleArg 339 | } 340 | } 341 | } 342 | } 343 | } 344 | 345 | // Create list of containers to cleanup when scaling 346 | func (f *ConfigParser) processCleanupTasks(projSettings *ProjectConfig, ctr *container.Container) { 347 | var tasks SettingsList 348 | for _, existing := range projSettings.ContainersState { 349 | instNum, err := helpers.GetNumericSuffix(existing.Name, ctr.ProjectNameSeparator) 350 | if err != nil || instNum < 0 || instNum > ctr.Scale { 351 | tempCtr := new(container.Container) 352 | *tempCtr = *ctr 353 | tempCtr.Name = existing.Name 354 | tasks = append(tasks, tempCtr) 355 | } 356 | } 357 | projSettings.ContainerCleanupList = append(projSettings.ContainerCleanupList, tasks...) 358 | return 359 | } 360 | 361 | // Create copies of containers which need to scale 362 | func (f *ConfigParser) scaleContainers(ctr *container.Container, state map[string]*helpers.ServiceState) []*container.Container { 363 | 364 | ctrCopies := make([]*container.Container, ctr.Scale) 365 | 366 | for i := 0; i < ctr.Scale; i++ { 367 | ctrCopies[i] = new(container.Container) 368 | *ctrCopies[i] = *ctr 369 | ctrCopies[i].InstanceNumber = i + 1 370 | 371 | var found bool 372 | var lookup = ctr.Name + ctr.ProjectNameSeparator + strconv.Itoa(ctrCopies[i].InstanceNumber) 373 | if ctrCopies[i].State, found = state[lookup]; !found { 374 | ctrCopies[i].State = &helpers.ServiceState{ 375 | Running: false, 376 | Color: "blue", 377 | } 378 | } 379 | 380 | ctrCopies[i].ServiceName = ctr.Name 381 | ctrCopies[i].NewName() 382 | 383 | // HACK for container logging prefix width alignment, eg 'some_container | blahbla' 384 | if len(ctrCopies[i].Name) > logger.LongestContainerName { 385 | logger.LongestContainerName = len(ctrCopies[i].Name) 386 | } 387 | } 388 | 389 | return ctrCopies 390 | } 391 | 392 | func stripChars(str, chr string) string { 393 | return strings.Map(func(r rune) rune { 394 | if strings.IndexRune(chr, r) < 0 { 395 | return r 396 | } 397 | return -1 398 | }, str) 399 | } 400 | 401 | // toSnake convert the given string to snake case following the Golang format: 402 | // acronyms are converted to lower-case and preceded by an underscore. 403 | func toSnake(in string) string { 404 | runes := []rune(in) 405 | length := len(runes) 406 | 407 | var out []rune 408 | for i := 0; i < length; i++ { 409 | if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) { 410 | out = append(out, '_') 411 | } 412 | out = append(out, unicode.ToLower(runes[i])) 413 | } 414 | 415 | return string(out) 416 | } 417 | -------------------------------------------------------------------------------- /consts/consts.go: -------------------------------------------------------------------------------- 1 | package consts 2 | 3 | const ( 4 | UniqueLabelName = "capitanRunCmd" 5 | ServiceLabelName = "capitanServiceName" 6 | ServiceLabelType = "capitanServiceType" 7 | ProjectLabelName = "capitanProjectName" 8 | ContainerNumberLabelName = "capitanContainerNumber" 9 | ColorLabelName = "capitanDeployColor" 10 | ) 11 | -------------------------------------------------------------------------------- /container/container.go: -------------------------------------------------------------------------------- 1 | package container 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | . "github.com/byrnedo/capitan/consts" 7 | "github.com/byrnedo/capitan/helpers" 8 | "github.com/byrnedo/capitan/logger" 9 | . "github.com/byrnedo/capitan/logger" 10 | "github.com/codeskyblue/go-sh" 11 | "io/ioutil" 12 | "math/rand" 13 | "os" 14 | "strings" 15 | "sync" 16 | "time" 17 | "strconv" 18 | "github.com/byrnedo/capitan/shellsession" 19 | ) 20 | 21 | var ( 22 | colorList = []string{ 23 | "white", 24 | "red", 25 | "green", 26 | "yellow", 27 | "blue", 28 | "magenta", 29 | "cyan", 30 | } 31 | 32 | nextColorIndex = rand.Intn(len(colorList) - 1) 33 | ) 34 | 35 | // Get the next color to be used in log output 36 | func nextColor() string { 37 | defer func() { 38 | nextColorIndex++ 39 | if nextColorIndex >= len(colorList) { 40 | nextColorIndex = 0 41 | } 42 | }() 43 | return colorList[nextColorIndex] 44 | } 45 | 46 | type Link struct { 47 | Container string 48 | Alias string 49 | } 50 | 51 | type AppliedAction string 52 | 53 | const ( 54 | Run AppliedAction = "run" 55 | Start AppliedAction = "start" 56 | Stop AppliedAction = "stop" 57 | Kill AppliedAction = "kill" 58 | Restart AppliedAction = "restart" 59 | Remove AppliedAction = "remove" 60 | ) 61 | 62 | type Hook struct { 63 | Scripts []string 64 | Ses *shellsession.ShellSession 65 | } 66 | 67 | type Hooks map[string]*Hook 68 | 69 | 70 | func NewContainerShellSession(ctr *Container) *shellsession.ShellSession { 71 | return shellsession.NewShellSession(func(s *shellsession.ShellSession){ 72 | s.SetEnv("CAPITAN_CONTAINER_NAME", ctr.Name) 73 | s.SetEnv("CAPITAN_CONTAINER_SERVICE_TYPE", ctr.ServiceType) 74 | s.SetEnv("CAPITAN_CONTAINER_INSTANCE_NUMBER", strconv.Itoa(ctr.InstanceNumber)) 75 | s.SetEnv("CAPITAN_PROJECT_NAME", ctr.ProjectName) 76 | }) 77 | } 78 | 79 | // Runs a hook command if it exists for a specific container 80 | func (h Hooks) Run(hookName string, ctr *Container) error { 81 | var ( 82 | hook *Hook 83 | found bool 84 | err error 85 | ) 86 | 87 | if hook, found = h[hookName]; !found { 88 | return nil 89 | } 90 | 91 | for _, script := range hook.Scripts { 92 | hook.Ses = NewContainerShellSession(ctr) 93 | hook.Ses.SetEnv("CAPITAN_HOOK_NAME", hookName) 94 | 95 | hook.Ses.Command("bash", "-c", script) 96 | 97 | hook.Ses.Stdout = os.Stdout 98 | hook.Ses.Stderr = os.Stderr 99 | hook.Ses.Stdin = os.Stdin 100 | 101 | if err = hook.Ses.Run(); err != nil { 102 | return err 103 | } 104 | } 105 | return nil 106 | } 107 | 108 | type BlueGreenMode int 109 | 110 | const ( 111 | BGModeOff BlueGreenMode = iota 112 | BGModeOn 113 | BGModeUnknown 114 | ) 115 | 116 | type Container struct { 117 | // Container name 118 | Name string 119 | // name of service (not including number) 120 | ServiceName string 121 | // non unique service id, eg the first col in config, "mongo" or "php" 122 | ServiceType string 123 | // the order defined in the config output 124 | Placement int 125 | // arguments to container 126 | ContainerArgs []string 127 | // image to use 128 | Image string 129 | // if supplied will do docker build on this path 130 | Build string 131 | // The arguments for the build command 132 | BuildArgs []string 133 | // command for container 134 | Command []string 135 | // links 136 | Links []Link 137 | // volumes from list 138 | VolumesFrom []string 139 | // hooks map for this definition 140 | Hooks Hooks 141 | // used in commands 142 | Action AppliedAction 143 | // the total number of containers to scale to. 144 | Scale int 145 | // the arguments for docker run / create 146 | RunArguments []interface{} 147 | // the project name 148 | ProjectName string 149 | // the project name separator, usually "_" 150 | ProjectNameSeparator string 151 | // the number of this container, relates to scale 152 | InstanceNumber int 153 | // Rm command given, therefore dont run as daemon 154 | Remove bool 155 | // Whether or not Blue/Green mode is enabled for this container 156 | BlueGreenMode BlueGreenMode 157 | // Is this container enabled or not 158 | Enabled bool 159 | // The current state of the container 160 | State *helpers.ServiceState 161 | } 162 | 163 | func (set *Container) NewName() { 164 | set.Name = fmt.Sprintf("%s%s%s%s%d", set.ServiceName, set.ProjectNameSeparator, set.State.Color, set.ProjectNameSeparator, set.InstanceNumber) 165 | } 166 | 167 | // Builds an image for a container 168 | func (set *Container) BuildImage() error { 169 | if err := set.Hooks.Run("before.build", set); err != nil { 170 | return err 171 | } 172 | 173 | args := append([]interface{}{ 174 | "build", 175 | }, helpers.ToInterfaceSlice(set.BuildArgs)...) 176 | 177 | args = append(args, "--tag", set.Image, set.Build) 178 | 179 | if _, err := helpers.RunCmd(args...); err != nil { 180 | return err 181 | } 182 | if err := set.Hooks.Run("after.build", set); err != nil { 183 | return err 184 | } 185 | return nil 186 | } 187 | 188 | func (set *Container) launchWithRmInForeground(cmd []interface{}) error { 189 | var ( 190 | ses *shellsession.ShellSession 191 | err error 192 | ) 193 | 194 | initialArgs := []interface{}{ 195 | "run", 196 | "-a", "stdout", 197 | "-a", "stderr", 198 | "-a", "stdin", 199 | "--sig-proxy=false", 200 | "--rm", 201 | } 202 | 203 | cmd = append(initialArgs, cmd...) 204 | if ses, err = set.startLoggedCommand(cmd); err != nil { 205 | return err 206 | } 207 | 208 | err = ses.Wait() 209 | if err != nil { 210 | return errors.New(set.Name + " exited with error: " + err.Error()) 211 | } 212 | 213 | return nil 214 | } 215 | 216 | func (set *Container) launchInForeground(cmd []interface{}, wg *sync.WaitGroup) error { 217 | 218 | var ( 219 | ses *shellsession.ShellSession 220 | err error 221 | ) 222 | 223 | beforeStart := time.Now() 224 | 225 | initialArgs := []interface{}{ 226 | "run", 227 | "-a", "stdout", 228 | "-a", "stderr", 229 | "-a", "stdin", 230 | "--sig-proxy=false", 231 | } 232 | 233 | if set.Remove { 234 | initialArgs = append(initialArgs, "--rm") 235 | } 236 | 237 | cmd = append(initialArgs, cmd...) 238 | if ses, err = set.startLoggedCommand(cmd); err != nil { 239 | return err 240 | } 241 | wg.Add(1) 242 | go func(name string) { 243 | ses.Wait() 244 | wg.Done() 245 | }(set.Name) 246 | 247 | 248 | 249 | if !helpers.WasContainerStartedAfterOrRetry(set.Name, beforeStart, 10, 200 * time.Millisecond) { 250 | return errors.New(set.Name + " failed to start") 251 | } 252 | 253 | Debug.Println("Container deemed to have started after", beforeStart) 254 | if !helpers.ContainerIsRunning(set.Name) { 255 | exitCode := helpers.ContainerExitCode(set.Name) 256 | if exitCode != "0" { 257 | return errors.New(set.Name + " exited with non-zero exit code " + exitCode) 258 | } 259 | } 260 | 261 | return nil 262 | 263 | } 264 | 265 | func (set *Container) BlueGreenCopy() (newCon *Container) { 266 | // rename the current 267 | // if id then we have a color, 268 | var newColor = "blue" 269 | Debug.Println(set.State) 270 | if set.State.Color == "blue" { 271 | newColor = "green" 272 | } 273 | 274 | newCon = new(Container) 275 | *newCon = *set 276 | newCon.State.Color = newColor 277 | newCon.NewName() 278 | return 279 | 280 | 281 | } 282 | 283 | func (set *Container) BlueGreenDeploy(attach bool, dryRun bool, wg *sync.WaitGroup) error { 284 | 285 | newCon := set.BlueGreenCopy() 286 | 287 | if err := newCon.Run(attach, dryRun, wg); err != nil { 288 | // put back the old 289 | Warning.Println("Error running new container, killing...") 290 | newCon.Kill(nil) 291 | return err 292 | } 293 | 294 | // shutdown the old 295 | ContainerInfoLog(newCon.Name, "Removing old container "+set.Name+"...") 296 | if ! dryRun { 297 | if err := set.Rm([]string{"-f"}); err != nil { 298 | Error.Println("Error stopping old container") 299 | return err 300 | } 301 | } 302 | 303 | return nil 304 | } 305 | 306 | func (set *Container) RecreateAndRun(attach bool, dryRun bool, wg *sync.WaitGroup) error { 307 | if !dryRun { 308 | set.Rm([]string{"-f"}) 309 | } 310 | 311 | if err := set.Run(attach, dryRun, wg); err != nil { 312 | return err 313 | } 314 | return nil 315 | } 316 | 317 | func createCapitanContainerLabels(ctr *Container, args []interface{}) []interface{} { 318 | return []interface{}{ 319 | "--label", 320 | UniqueLabelName + "=" + helpers.HashInterfaceSlice(args), 321 | "--label", 322 | ServiceLabelName + "=" + ctr.ServiceName, 323 | "--label", 324 | ServiceLabelType + "=" + ctr.ServiceType, 325 | "--label", 326 | ProjectLabelName + "=" + ctr.ProjectName, 327 | "--label", 328 | ContainerNumberLabelName + "=" + strconv.Itoa(ctr.InstanceNumber), 329 | "--label", 330 | ColorLabelName + "=" + ctr.State.Color, 331 | } 332 | } 333 | 334 | // Run a container 335 | func (set *Container) Create(dryRun bool) error { 336 | set.Action = Run 337 | 338 | ContainerInfoLog(set.Name, "Creating...") 339 | if dryRun { 340 | return nil 341 | } 342 | if err := set.Hooks.Run("before.create", set); err != nil { 343 | return err 344 | } 345 | 346 | cmd := set.GetRunArguments() 347 | labels := createCapitanContainerLabels(set, cmd) 348 | cmd = append(labels, cmd...) 349 | 350 | cmd = append([]interface{}{"create"}, cmd...) 351 | if err := set.launchDaemonCommand(cmd); err != nil { 352 | return err 353 | } 354 | 355 | return set.Hooks.Run("after.create", set) 356 | } 357 | 358 | // Run a container 359 | func (set *Container) Run(attach bool, dryRun bool, wg *sync.WaitGroup) error { 360 | set.Action = Run 361 | 362 | ContainerInfoLog(set.Name,"Running...") 363 | if dryRun { 364 | return nil 365 | } 366 | if err := set.Hooks.Run("before.run", set); err != nil { 367 | return err 368 | } 369 | 370 | cmd := set.GetRunArguments() 371 | labels := createCapitanContainerLabels(set, cmd) 372 | cmd = append(labels, cmd...) 373 | 374 | if set.Remove { 375 | if err := set.launchWithRmInForeground(cmd); err != nil { 376 | return err 377 | } 378 | } else if attach { 379 | if err := set.launchInForeground(cmd, wg); err != nil { 380 | return err 381 | } 382 | } else { 383 | cmd = append([]interface{}{"run", "-d"}, cmd...) 384 | if err := set.launchDaemonCommand(cmd); err != nil { 385 | return err 386 | } 387 | } 388 | 389 | return set.Hooks.Run("after.run", set) 390 | } 391 | 392 | func (set *Container) launchDaemonCommand(cmd []interface{}) error { 393 | var ( 394 | ses *shellsession.ShellSession 395 | err error 396 | ) 397 | ses = NewContainerShellSession(set) 398 | 399 | concStr := "docker " 400 | for _, arg := range cmd { 401 | concStr += fmt.Sprintf("%s", arg) + " " 402 | } 403 | concStr = strings.Trim(concStr, " ") 404 | 405 | err = ses.Command("bash", "-c", concStr).Run() 406 | return err 407 | } 408 | 409 | func (set *Container) startLoggedCommand(cmd []interface{}) (*shellsession.ShellSession, error) { 410 | ses := NewContainerShellSession(set) 411 | 412 | color := nextColor() 413 | ses.Stdout = NewContainerLogWriter(os.Stdout, set.Name, color) 414 | ses.Stderr = NewContainerLogWriter(os.Stderr, set.Name, color) 415 | 416 | concStr := "docker " 417 | for _, arg := range cmd { 418 | concStr += fmt.Sprintf("%s", arg) + " " 419 | } 420 | concStr = strings.Trim(concStr, " ") 421 | 422 | err := ses.Command("bash", "-c", concStr).Start() 423 | 424 | return ses, err 425 | } 426 | 427 | // Create docker arg slice from container options 428 | func (set *Container) GetRunArguments() []interface{} { 429 | imageName := set.Name 430 | if len(set.Image) > 0 { 431 | imageName = set.Image 432 | } 433 | 434 | var linkArgs = make([]interface{}, 0, len(set.Links)*2) 435 | for _, link := range set.Links { 436 | linkStr := link.Container 437 | if link.Alias != "" { 438 | linkStr += ":" + link.Alias 439 | } 440 | linkArgs = append(linkArgs, "--link", linkStr) 441 | } 442 | 443 | var volumesFromArgs = make([]interface{}, 0, len(set.VolumesFrom)*2) 444 | for _, vol := range set.VolumesFrom { 445 | volumesFromArgs = append(volumesFromArgs, "--volumes-from", vol) 446 | } 447 | 448 | cmd := append([]interface{}{"--name", set.Name}, helpers.ToInterfaceSlice(set.ContainerArgs)...) 449 | cmd = append(cmd, linkArgs...) 450 | cmd = append(cmd, volumesFromArgs...) 451 | cmd = append(cmd, imageName) 452 | cmd = append(cmd, helpers.ToInterfaceSlice(set.Command)...) 453 | return cmd 454 | } 455 | 456 | func (set *Container) Attach(wg *sync.WaitGroup) error { 457 | var ( 458 | err error 459 | ses *shellsession.ShellSession 460 | ) 461 | if ses, err = set.startLoggedCommand(append([]interface{}{"attach", "--sig-proxy=false"}, set.Name)); err != nil { 462 | return err 463 | } 464 | wg.Add(1) 465 | 466 | go func(name string) { 467 | ses.Wait() 468 | wg.Done() 469 | }(set.Name) 470 | return nil 471 | } 472 | 473 | // Start a given container 474 | // TODO needs to respect scale 475 | func (set *Container) Start(attach bool, wg *sync.WaitGroup) error { 476 | var ( 477 | err error 478 | ) 479 | set.Action = Start 480 | if set.State.Running { 481 | ContainerInfoLog(set.Name, "Already running.") 482 | if attach { 483 | if err = set.Attach(wg); err != nil { 484 | return err 485 | } 486 | } 487 | return nil 488 | } 489 | 490 | if err = set.Hooks.Run("before.start", set); err != nil { 491 | return err 492 | } 493 | 494 | if err = set.launchDaemonCommand(append([]interface{}{"start"}, set.Name)); err != nil { 495 | return err 496 | } 497 | if attach { 498 | if err = set.Attach(wg); err != nil { 499 | return err 500 | } 501 | } 502 | 503 | if err := set.Hooks.Run("after.start", set); err != nil { 504 | return err 505 | } 506 | return nil 507 | } 508 | 509 | // Restart the container 510 | // TODO needs to respect scale 511 | func (set *Container) Restart(args []string) error { 512 | set.Action = Restart 513 | if err := set.Hooks.Run("before.start", set); err != nil { 514 | return err 515 | } 516 | args = append(args, set.Name) 517 | if _, err := helpers.RunCmd(append([]interface{}{"restart"}, helpers.ToInterfaceSlice(args)...)...); err != nil { 518 | return err 519 | } 520 | if err := set.Hooks.Run("after.start", set); err != nil { 521 | return err 522 | } 523 | return nil 524 | } 525 | 526 | // Returns a containers IP 527 | // TODO needs to respect scale 528 | func (set *Container) IPs() string { 529 | ses := sh.NewSession() 530 | ses.Stderr = ioutil.Discard 531 | out, err := ses.Command("docker", "inspect", "--type", "container", "--format", "{{range $i, $p := .NetworkSettings.Networks}}{{$p.IPAddress}}@{{$i}},{{end}}", set.Name).Output() 532 | if err != nil { 533 | return "" 534 | } 535 | ip := strings.Trim(string(out), ",\n") 536 | return ip 537 | } 538 | 539 | // Start streaming a container's logs 540 | // TODO needs to respect scale 541 | func (set *Container) Logs() (*sh.Session, error) { 542 | color := nextColor() 543 | ses := sh.NewSession() 544 | 545 | if logger.GetLevel() == DebugLevel { 546 | ses.ShowCMD = true 547 | } 548 | ses.Command("docker", "logs", "--tail", "10", "-f", set.Name) 549 | 550 | ses.Stdout = NewContainerLogWriter(os.Stdout, set.Name, color) 551 | ses.Stderr = NewContainerLogWriter(os.Stderr, set.Name, color) 552 | 553 | err := ses.Start() 554 | return ses, err 555 | } 556 | 557 | // Kills the container 558 | // TODO needs to respect scale 559 | func (set *Container) Kill(args []string) error { 560 | set.Action = Kill 561 | if err := set.Hooks.Run("before.kill", set); err != nil { 562 | return err 563 | } 564 | args = append(args, set.Name) 565 | if _, err := helpers.RunCmd(append([]interface{}{"kill"}, helpers.ToInterfaceSlice(args)...)...); err != nil { 566 | return err 567 | } 568 | if err := set.Hooks.Run("after.kill", set); err != nil { 569 | return err 570 | } 571 | return nil 572 | 573 | } 574 | 575 | // Stops the container 576 | // TODO needs to respect scale 577 | func (set *Container) Stop(args []string) error { 578 | set.Action = Stop 579 | if err := set.Hooks.Run("before.stop", set); err != nil { 580 | return err 581 | } 582 | args = append(args, set.Name) 583 | if _, err := helpers.RunCmd(append([]interface{}{"stop"}, helpers.ToInterfaceSlice(args)...)...); err != nil { 584 | return err 585 | } 586 | if err := set.Hooks.Run("after.stop", set); err != nil { 587 | return err 588 | } 589 | return nil 590 | } 591 | 592 | // Removes the container 593 | // TODO needs to respect scale 594 | func (set *Container) Rm(args []string) error { 595 | 596 | set.Action = Remove 597 | if err := set.Hooks.Run("before.rm", set); err != nil { 598 | return err 599 | } 600 | args = append(args, set.Name) 601 | if _, err := helpers.RunCmd(append([]interface{}{"rm"}, helpers.ToInterfaceSlice(args)...)...); err != nil { 602 | return err 603 | } 604 | if err := set.Hooks.Run("after.rm", set); err != nil { 605 | return err 606 | } 607 | return nil 608 | } 609 | -------------------------------------------------------------------------------- /example/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tutum/hello-world 2 | 3 | EXPOSE 2222 4 | -------------------------------------------------------------------------------- /example/bluegreen/capitan.cfg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PREFIX=dev 4 | cat <= maxAttempts { 60 | break 61 | } else { 62 | time.Sleep(interval) 63 | } 64 | } 65 | return false 66 | } 67 | 68 | //Get the id for a given image name 69 | func GetImageId(imageName string) string { 70 | ses := sh.NewSession() 71 | ses.Stderr = ioutil.Discard 72 | out, err := ses.Command("docker", "inspect", "--type", "image", "--format", "{{.Id}}", imageName).Output() 73 | if err != nil { 74 | return "" 75 | } 76 | imageId := strings.Trim(string(out), " \n") 77 | return imageId 78 | } 79 | 80 | //pull the image for a given image name 81 | func PullImage(imageName string) error { 82 | err := sh.Command("docker", "pull", imageName).Run() 83 | return err 84 | } 85 | 86 | // Get the image id for a given container 87 | func GetContainerImageId(name string) string { 88 | ses := sh.NewSession() 89 | ses.Stderr = ioutil.Discard 90 | out, err := ses.Command("docker", "inspect", "--type", "container", "--format", "{{.Image}}", name).Output() 91 | if err != nil { 92 | return "" 93 | } 94 | imageId := strings.Trim(string(out), " \n") 95 | return imageId 96 | 97 | } 98 | 99 | // Checks if a container exists 100 | func ContainerExists(name string) bool { 101 | ses := sh.NewSession() 102 | ses.Stderr = ioutil.Discard 103 | out, err := ses.Command("docker", "inspect", "--format", "{{.State.Running}}", name).Output() 104 | if err != nil { 105 | return false 106 | } 107 | if strings.Trim(string(out), " \n") == "" { 108 | return false 109 | } 110 | return true 111 | 112 | } 113 | 114 | // Check if a container is running 115 | func ContainerIsRunning(name string) bool { 116 | ses := sh.NewSession() 117 | ses.Stderr = ioutil.Discard 118 | out, err := ses.Command("docker", "inspect", "--format", "{{.State.Running}}", name).Output() 119 | if err != nil { 120 | return false 121 | } 122 | if strings.Trim(string(out), " \n") == "true" { 123 | return true 124 | } 125 | return false 126 | } 127 | 128 | // Helper to run a docker command 129 | func RunCmd(args ...interface{}) (out []byte, err error) { 130 | ses := sh.NewSession() 131 | 132 | if logger.GetLevel() == DebugLevel { 133 | ses.ShowCMD = true 134 | } 135 | 136 | out, err = ses.Command("docker", args...).Output() 137 | Debug.Println(string(out)) 138 | if err != nil { 139 | return out, errors.New("Error running docker command:" + err.Error()) 140 | } 141 | return out, nil 142 | } 143 | 144 | // Get the value of the label used to record the run 145 | // arguments used when creating the container 146 | func GetContainerUniqueLabel(containerName string) string { 147 | return getLabel(UniqueLabelName, containerName) 148 | } 149 | 150 | // Get the value of the label used to record the run 151 | // service name (for scaling) 152 | func GetContainerServiceNameLabel(containerName string) string { 153 | return getLabel(ServiceLabelName, containerName) 154 | } 155 | 156 | func RenameContainer(currentName string, newName string) error { 157 | ses := sh.NewSession() 158 | ses.Stderr = ioutil.Discard 159 | _, err := ses.Command("docker", "rename", currentName, newName).Output() 160 | return err 161 | } 162 | 163 | func getLabel(label string, container string) string { 164 | ses := sh.NewSession() 165 | ses.Stderr = ioutil.Discard 166 | out, err := ses.Command("docker", "inspect", "--type", "container", "--format", "{{.Config.Labels."+label+"}}", container).Output() 167 | if err != nil { 168 | return "" 169 | } 170 | value := strings.Trim(string(out), " \n") 171 | return value 172 | } 173 | 174 | type ServiceState struct { 175 | ID string 176 | Name string 177 | ServiceName string 178 | InstanceNum int 179 | Color string 180 | Running bool 181 | ArgsHash string 182 | } 183 | 184 | func GetProjectState(projName string, projSep string) (svcs map[string]*ServiceState, err error) { 185 | ses := sh.NewSession() 186 | out, err := ses.Command("docker", 187 | "ps", 188 | "-af", 189 | fmt.Sprintf("label=%s=%s", ProjectLabelName, projName), 190 | "--format", 191 | fmt.Sprintf(`{{.ID}}\t{{.Names}}\t{{.Label "%s"}}\t{{.Label "%s"}}\t{{.Label "%s"}}\t{{.Status}}\t{{.Label "%s"}}`, ColorLabelName, ServiceLabelName, ContainerNumberLabelName, UniqueLabelName)).Output() 192 | if err != nil { 193 | return 194 | } 195 | if len(out) == 0 { 196 | return 197 | } 198 | 199 | out = bytes.Trim(out, "\n") 200 | 201 | Debug.Println(string(out)) 202 | 203 | svcs = make(map[string]*ServiceState, 0) 204 | for _, line := range bytes.Split(out, []byte{'\n'}) { 205 | lineParts := bytes.Split(line, []byte{'\t'}) 206 | 207 | if len(lineParts) < 2 { 208 | continue 209 | } 210 | 211 | id := string(lineParts[0]) 212 | names := string(lineParts[1]) 213 | 214 | var color string 215 | if len(lineParts) > 2 { 216 | color = string(lineParts[2]) 217 | } 218 | if color == "" { 219 | color = "blue" 220 | } 221 | 222 | var serviceName string 223 | if len(lineParts) > 3 { 224 | serviceName = string(lineParts[3]) 225 | } 226 | 227 | var instanceNum int 228 | if len(lineParts) > 4 { 229 | if instanceNum, err = strconv.Atoi(string(lineParts[4])); err != nil { 230 | Warning.Println("Instance number label missing, parsing from name") 231 | if instanceNum, err = GetNumericSuffix(names, projSep); err != nil { 232 | return nil, errors.New("Failed to parse instance number for container: " + names) 233 | } 234 | } 235 | } 236 | 237 | var running = false 238 | if len(lineParts) > 5 { 239 | if bytes.HasPrefix(bytes.TrimSpace(lineParts[5]), []byte("Up")){ 240 | running = true 241 | } 242 | } 243 | 244 | var argsHash string 245 | if len(lineParts) > 6 { 246 | argsHash = string(lineParts[6]) 247 | } 248 | 249 | name := filepath.Base(names) 250 | svcs[serviceName + projSep + strconv.Itoa(instanceNum)] = &ServiceState{ 251 | ID: id, 252 | Name: name, 253 | ServiceName: serviceName, 254 | InstanceNum: instanceNum, 255 | Color: color, 256 | Running: running, 257 | ArgsHash : argsHash, 258 | } 259 | } 260 | return 261 | } 262 | 263 | -------------------------------------------------------------------------------- /helpers/slicehelp.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import "fmt" 4 | 5 | func ToStringSlice(data []interface{}) (out []string) { 6 | out = make([]string, len(data)) 7 | for i, item := range data { 8 | out[i] = fmt.Sprintf("%s", item) 9 | } 10 | return 11 | } 12 | 13 | func ToInterfaceSlice(data []string) (out []interface{}) { 14 | out = make([]interface{}, len(data)) 15 | for i, item := range data { 16 | out[i] = item 17 | } 18 | return 19 | } 20 | -------------------------------------------------------------------------------- /helpers/stringhelp.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "math/rand" 5 | "strconv" 6 | "strings" 7 | "time" 8 | "fmt" 9 | "crypto/md5" 10 | ) 11 | 12 | var src = rand.NewSource(time.Now().UnixNano()) 13 | 14 | const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" 15 | const ( 16 | letterIdxBits = 6 // 6 bits to represent a letter index 17 | letterIdxMask = 1<= 0; { 26 | if remain == 0 { 27 | cache, remain = src.Int63(), letterIdxMax 28 | } 29 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { 30 | b[i] = letterBytes[idx] 31 | i-- 32 | } 33 | cache >>= letterIdxBits 34 | remain-- 35 | } 36 | 37 | return string(b) 38 | } 39 | 40 | func GetNumericSuffix(name string, sep string) (int, error) { 41 | namePts := strings.Split(name, sep) 42 | instNumStr := namePts[len(namePts)-1] 43 | return strconv.Atoi(instNumStr) 44 | } 45 | 46 | func HashInterfaceSlice(args []interface{}) string { 47 | return fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("'%s'", args)))) 48 | } 49 | -------------------------------------------------------------------------------- /logger/containerlog.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "github.com/mgutz/ansi" 7 | "io" 8 | "log" 9 | "strconv" 10 | ) 11 | 12 | type ContainerLogWriter struct { 13 | *log.Logger 14 | colorCode []byte 15 | } 16 | 17 | var ( 18 | resetCode = []byte(ansi.ColorCode("reset")) 19 | ) 20 | 21 | func NewContainerLogWriter(out io.Writer, containerName string, color string) *ContainerLogWriter { 22 | 23 | conOut := log.New(out, 24 | ansi.Color(fmt.Sprintf("%-"+strconv.Itoa(LongestContainerName)+"s| ", containerName), color), 25 | 0) 26 | return &ContainerLogWriter{ 27 | Logger: conOut, 28 | colorCode: []byte(ansi.ColorCode(color)), 29 | } 30 | } 31 | 32 | func (w *ContainerLogWriter) Write(b []byte) (int, error) { 33 | toPrint := bytes.Split(bytes.Trim(b, "\n"), []byte{'\n'}) 34 | for _, line := range toPrint { 35 | w.Printf("%s%s%s", w.colorCode, line, resetCode) 36 | } 37 | return len(b), nil 38 | } 39 | -------------------------------------------------------------------------------- /logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "io/ioutil" 5 | "log" 6 | "os" 7 | "fmt" 8 | "strconv" 9 | ) 10 | 11 | var ( 12 | Debug *log.Logger 13 | Info *log.Logger 14 | Warning *log.Logger 15 | Error *log.Logger 16 | 17 | //logFormat int = log.Ldate | log.Ltime | log.Lshortfile 18 | logFormat int = 0 19 | level = InfoLevel 20 | LongestContainerName int 21 | ) 22 | 23 | type LogLevel int 24 | 25 | const ( 26 | DebugLevel LogLevel = 0 27 | InfoLevel LogLevel = 1 28 | WarnLevel LogLevel = 2 29 | ErrorLevel LogLevel = 3 30 | ) 31 | 32 | func init() { 33 | 34 | Debug = log.New(ioutil.Discard, 35 | "DEBUG: ", 36 | logFormat) 37 | 38 | Info = log.New(os.Stdout, 39 | "", 40 | logFormat) 41 | 42 | Warning = log.New(os.Stdout, 43 | "WARN: ", 44 | logFormat) 45 | 46 | Error = log.New(os.Stderr, 47 | "ERR: ", 48 | logFormat) 49 | } 50 | 51 | func GetLevel() LogLevel { 52 | return level 53 | } 54 | 55 | func ContainerInfoLog(name string, msgs ...interface{}) { 56 | var lenStr = strconv.Itoa(LongestContainerName) 57 | var strs = []interface{}{fmt.Sprintf("%-"+lenStr+"s:", name)} 58 | Info.Println(append(strs, msgs...)...) 59 | } 60 | 61 | func SetDebug() { 62 | level = DebugLevel 63 | Debug = log.New(os.Stdout, 64 | "DEBUG: ", 65 | logFormat) 66 | } 67 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/byrnedo/capitan/container" 5 | . "github.com/byrnedo/capitan/logger" 6 | "github.com/codegangsta/cli" 7 | "os" 8 | ) 9 | 10 | var ( 11 | command string 12 | args []string 13 | verboseLog bool 14 | dryRun bool 15 | attach bool 16 | filter string 17 | ) 18 | 19 | func main() { 20 | app := cli.NewApp() 21 | app.Name = "capitan" 22 | app.Usage = "Deploy and orchestrate docker containers" 23 | app.Version = VERSION 24 | 25 | app.Flags = []cli.Flag{ 26 | cli.StringFlag{ 27 | Name: "cmd,c", 28 | Value: "./capitan.cfg.sh", 29 | Usage: "Command to obtain config from", 30 | Destination: &command, 31 | }, 32 | cli.BoolFlag{ 33 | Name: "debug,d", 34 | Usage: "Print extra log messages", 35 | Destination: &verboseLog, 36 | }, 37 | cli.BoolFlag{ 38 | Name: "dry-run,dry", 39 | Usage: "Preview outcome, no changes will be made", 40 | Destination: &dryRun, 41 | }, 42 | cli.StringFlag{ 43 | Name: "filter,f", 44 | Value: "", 45 | Usage: "Filter to run action on a specific container only", 46 | Destination: &filter, 47 | }, 48 | } 49 | 50 | app.Before = func(c *cli.Context) error { 51 | if verboseLog { 52 | SetDebug() 53 | } 54 | 55 | if dryRun { 56 | Info.Printf("Previewing changes...\n\n") 57 | } 58 | 59 | args = c.Args() 60 | return nil 61 | } 62 | 63 | app.Action = func(c *cli.Context) error { 64 | cli.ShowAppHelp(c) 65 | return nil 66 | } 67 | 68 | app.Commands = []cli.Command{ 69 | { 70 | Name: "up", 71 | Aliases: []string{}, 72 | Usage: "Create then run or update containers", 73 | Action: func(c *cli.Context) error { 74 | //first get settings 75 | settings := getSettings() 76 | settings.LaunchSignalWatcher() 77 | if !settings.RunHook("before.up") { 78 | os.Exit(1) 79 | } 80 | if err := settings.ContainerCleanupList.CapitanRm([]string{"-f"}, dryRun); err != nil { 81 | Warning.Println("Failed to scale down containers:", err) 82 | } 83 | if err := settings.ContainerList.CapitanUp(attach, dryRun); err != nil { 84 | Error.Println("Up failed:", err) 85 | os.Exit(1) 86 | } 87 | if !settings.RunHook("after.up") { 88 | os.Exit(1) 89 | } 90 | 91 | return nil 92 | 93 | }, 94 | Flags: []cli.Flag{ 95 | cli.BoolFlag{ 96 | Name: "attach,a", 97 | Usage: "attach to container output", 98 | Destination: &attach, 99 | }, 100 | }, 101 | }, 102 | { 103 | Name: "create", 104 | Aliases: []string{}, 105 | Usage: "Create containers, but don't run them", 106 | Action: func(c *cli.Context) error { 107 | settings := getSettings() 108 | if !settings.RunHook("before.create") { 109 | os.Exit(1) 110 | } 111 | if err := settings.ContainerCleanupList.CapitanRm([]string{"-f"}, dryRun); err != nil { 112 | Warning.Println("Failed to scale down containers:", err) 113 | } 114 | if err := settings.ContainerList.CapitanCreate(dryRun); err != nil { 115 | Error.Println("Create failed:", err) 116 | os.Exit(1) 117 | } 118 | if !settings.RunHook("after.create") { 119 | os.Exit(1) 120 | } 121 | return nil 122 | }, 123 | }, 124 | { 125 | Name: "start", 126 | Aliases: []string{}, 127 | Usage: "Start stopped containers", 128 | Action: func(c *cli.Context) error { 129 | settings := getSettings() 130 | settings.LaunchSignalWatcher() 131 | if !settings.RunHook("before.start") { 132 | os.Exit(1) 133 | } 134 | if err := settings.ContainerCleanupList.CapitanRm([]string{"-f"}, dryRun); err != nil { 135 | Warning.Println("Failed to scale down containers:", err) 136 | } 137 | if err := settings.ContainerList.CapitanStart(attach, dryRun); err != nil { 138 | Error.Println("Start failed:", err) 139 | os.Exit(1) 140 | } 141 | if !settings.RunHook("after.start") { 142 | os.Exit(1) 143 | } 144 | 145 | return nil 146 | }, 147 | Flags: []cli.Flag{ 148 | cli.BoolFlag{ 149 | Name: "attach,a", 150 | Usage: "attach to container output", 151 | Destination: &attach, 152 | }, 153 | }, 154 | }, 155 | { 156 | Name: "scale", 157 | Aliases: []string{}, 158 | Usage: "Number of instances to run of container", 159 | SkipFlagParsing: true, 160 | Action: func(c *cli.Context) error { 161 | settings := getSettings() 162 | if !settings.RunHook("before.scale") { 163 | os.Exit(1) 164 | } 165 | if err := settings.ContainerCleanupList.Filter(func(i *container.Container) bool { 166 | return i.ServiceType == c.Args().Get(0) 167 | }).CapitanRm([]string{"-f"}, dryRun); err != nil { 168 | Warning.Println("Failed to scale down containers:", err) 169 | } 170 | if err := settings.ContainerList.Filter(func(i *container.Container) bool { 171 | return i.ServiceType == c.Args().Get(0) 172 | }).CapitanUp(false, dryRun); err != nil { 173 | Error.Println("Scale failed:", err) 174 | os.Exit(1) 175 | } 176 | if !settings.RunHook("after.scale") { 177 | os.Exit(1) 178 | } 179 | return nil 180 | }, 181 | }, 182 | { 183 | Name: "restart", 184 | Aliases: []string{}, 185 | Usage: "Restart containers", 186 | SkipFlagParsing: true, 187 | Action: func(c *cli.Context) error { 188 | settings := getSettings() 189 | if !settings.RunHook("before.restart") { 190 | os.Exit(1) 191 | } 192 | if err := settings.ContainerCleanupList.CapitanRm([]string{"-f"}, dryRun); err != nil { 193 | Warning.Println("Failed to scale down containers:", err) 194 | } 195 | if err := settings.ContainerList.CapitanRestart(c.Args(), dryRun); err != nil { 196 | Error.Println("Restart failed:", err) 197 | os.Exit(1) 198 | } 199 | if !settings.RunHook("after.restart") { 200 | os.Exit(1) 201 | } 202 | return nil 203 | }, 204 | }, 205 | { 206 | Name: "stop", 207 | Aliases: []string{}, 208 | Usage: "Stop running containers", 209 | SkipFlagParsing: true, 210 | Action: func(c *cli.Context) error { 211 | settings := getSettings() 212 | if !settings.RunHook("before.stop") { 213 | os.Exit(1) 214 | } 215 | combined := append(settings.ContainerList, settings.ContainerCleanupList...) 216 | if err := combined.CapitanStop(c.Args(), dryRun); err != nil { 217 | Error.Println("Stop failed:", err) 218 | os.Exit(1) 219 | } 220 | if !settings.RunHook("after.stop") { 221 | os.Exit(1) 222 | } 223 | 224 | return nil 225 | }, 226 | }, 227 | { 228 | Name: "kill", 229 | Aliases: []string{}, 230 | Usage: "Kill running containers using SIGKILL or a specified signal", 231 | SkipFlagParsing: true, 232 | Action: func(c *cli.Context) error { 233 | settings := getSettings() 234 | if !settings.RunHook("before.kill") { 235 | os.Exit(1) 236 | } 237 | combined := append(settings.ContainerList, settings.ContainerCleanupList...) 238 | if err := combined.CapitanKill(c.Args(), dryRun); err != nil { 239 | Error.Println("Kill failed:", err) 240 | os.Exit(1) 241 | } 242 | if !settings.RunHook("after.kill") { 243 | os.Exit(1) 244 | } 245 | return nil 246 | }, 247 | }, 248 | { 249 | Name: "rm", 250 | Aliases: []string{}, 251 | Usage: "Remove stopped containers", 252 | SkipFlagParsing: true, 253 | Action: func(c *cli.Context) error { 254 | settings := getSettings() 255 | if !settings.RunHook("before.rm") { 256 | os.Exit(1) 257 | } 258 | combined := append(settings.ContainerList, settings.ContainerCleanupList...) 259 | if err := combined.CapitanRm(c.Args(), dryRun); err != nil { 260 | Error.Println("Rm failed:", err) 261 | os.Exit(1) 262 | } 263 | if !settings.RunHook("after.rm") { 264 | os.Exit(1) 265 | } 266 | return nil 267 | }, 268 | }, 269 | { 270 | Name: "ps", 271 | Aliases: []string{}, 272 | Usage: "Show container status", 273 | SkipFlagParsing: true, 274 | Action: func(c *cli.Context) error { 275 | settings := getSettings() 276 | if err := settings.CapitanPs(c.Args()); err != nil { 277 | Error.Println("Ps failed:", err) 278 | os.Exit(1) 279 | } 280 | 281 | return nil 282 | }, 283 | }, 284 | { 285 | Name: "ip", 286 | Aliases: []string{}, 287 | Usage: "Show container ip addresses", 288 | SkipFlagParsing: true, 289 | Action: func(c *cli.Context) error { 290 | settings := getSettings() 291 | if err := settings.ContainerList.CapitanIP(); err != nil { 292 | Error.Println("IP failed:", err) 293 | os.Exit(1) 294 | } 295 | return nil 296 | }, 297 | }, 298 | { 299 | Name: "build", 300 | Aliases: []string{}, 301 | Usage: "Build any containers with 'build' flag set", 302 | Action: func(c *cli.Context) error { 303 | settings := getSettings() 304 | if !settings.RunHook("before.build") { 305 | os.Exit(1) 306 | } 307 | if err := settings.ContainerList.CapitanBuild(dryRun); err != nil { 308 | Error.Println("Build failed:", err) 309 | os.Exit(1) 310 | } 311 | if !settings.RunHook("after.build") { 312 | os.Exit(1) 313 | } 314 | return nil 315 | }, 316 | }, 317 | { 318 | Name: "pull", 319 | Aliases: []string{}, 320 | Usage: "Pull all images defined in project", 321 | Action: func(c *cli.Context) error { 322 | settings := getSettings() 323 | if err := settings.ContainerList.CapitanPull(dryRun); err != nil { 324 | Error.Println("Pull failed:", err) 325 | os.Exit(1) 326 | } 327 | return nil 328 | }, 329 | }, 330 | { 331 | Name: "logs", 332 | Aliases: []string{}, 333 | Usage: "stream container logs", 334 | Action: func(c *cli.Context) error { 335 | settings := getSettings() 336 | combined := append(settings.ContainerList, settings.ContainerCleanupList...) 337 | if err := combined.CapitanLogs(); err != nil { 338 | Error.Println("Logs failed:", err) 339 | os.Exit(1) 340 | } 341 | return nil 342 | }, 343 | }, 344 | { 345 | Name: "stats", 346 | Aliases: []string{}, 347 | Usage: "stream stats for all containers in project", 348 | Action: func(c *cli.Context) error { 349 | settings := getSettings() 350 | combined := append(settings.ContainerList, settings.ContainerCleanupList...) 351 | if err := combined.CapitanStats(); err != nil { 352 | Error.Println("Stats failed:", err) 353 | os.Exit(1) 354 | } 355 | return nil 356 | }, 357 | }, 358 | { 359 | Name: "show", 360 | Aliases: []string{}, 361 | Usage: "Prints config as interpreted by Capitan", 362 | Action: func(c *cli.Context) error { 363 | settings := getSettings() 364 | if err := settings.CapitanShow(); err != nil { 365 | Error.Println("Show failed:", err) 366 | os.Exit(1) 367 | } 368 | return nil 369 | }, 370 | }, 371 | } 372 | app.Run(os.Args) 373 | } 374 | 375 | func getSettings() (settings *ProjectConfig) { 376 | var ( 377 | err error 378 | ) 379 | runner := NewSettingsParser(command, args, filter) 380 | if settings, err = runner.Run(); err != nil { 381 | Error.Printf("Error running command: %s\n", err) 382 | os.Exit(1) 383 | } 384 | if attach { 385 | settings.IsInteractive = true 386 | } 387 | return settings 388 | } 389 | -------------------------------------------------------------------------------- /output.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/byrnedo/capitan/c7d11c10f63a6a99f6c86ecd55e2b0a4d6648ab0/output.gif -------------------------------------------------------------------------------- /projectconfig.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/byrnedo/capitan/consts" 6 | "github.com/byrnedo/capitan/container" 7 | "github.com/byrnedo/capitan/helpers" 8 | . "github.com/byrnedo/capitan/logger" 9 | "github.com/codeskyblue/go-sh" 10 | "os" 11 | "os/signal" 12 | "sort" 13 | "sync" 14 | "syscall" 15 | "text/template" 16 | "github.com/byrnedo/capitan/shellsession" 17 | ) 18 | 19 | const projectShowTemplate = `------------------------------------------------- 20 | Project Name: {{.ProjectName}} 21 | Blue/Green Mode (Global): {{.BlueGreenMode}} 22 | Hooks (Global): {{range $key, $val := .Hooks}} 23 | {{$key}} 24 | {{range $hook := $val.Scripts}}{{$hook}} 25 | {{end}}{{end}} 26 | ------------------------------------------------- 27 | ` 28 | 29 | const containerShowTemplate = `{{.Name}}: 30 | Name: {{.ServiceName}} 31 | State: 32 | ID: {{.State.ID}} 33 | Color: {{.State.Color}} 34 | Running: {{.State.Running}} 35 | Hash: {{.State.ArgsHash}} 36 | Type: {{.ServiceType}} 37 | Image: {{.Image}}{{if .Build}} 38 | Build: {{.Build}}{{end}} 39 | Order: {{.Placement}} 40 | Blue/Green Mode: {{.BlueGreenMode}} 41 | Links: {{range $ind, $link := .Links}} 42 | {{$link.Container}}{{if $link.Alias}}:{{$link.Alias}}{{end}}{{end}} 43 | Hooks: {{range $key, $val := .Hooks}} 44 | {{$key}} 45 | {{range $hook := $val.Scripts}}{{$hook}} 46 | {{end}}{{end}} 47 | Scale: {{.Scale}} 48 | Volumes From: {{range $ind, $val := .VolumesFrom}} 49 | {{$val}}{{end}} 50 | Run Args: {{range $ind, $val := .RunArguments}} 51 | {{$val}}{{end}} 52 | ------------------------------------------------- 53 | ` 54 | 55 | var ( 56 | allDone = make(chan bool, 1) 57 | ) 58 | 59 | type ProjectConfig struct { 60 | ProjectName string 61 | ProjectSeparator string 62 | BlueGreenMode bool 63 | IsInteractive bool 64 | ContainersState []*helpers.ServiceState 65 | ContainerList SettingsList 66 | ContainerCleanupList SettingsList 67 | Hooks Hooks 68 | } 69 | 70 | type Hook struct { 71 | Scripts []string 72 | Ses *shellsession.ShellSession 73 | } 74 | 75 | type Hooks map[string]*Hook 76 | 77 | // Runs a hook command if it exists for a specific container 78 | func (h Hooks) Run(hookName string, settings *ProjectConfig) error { 79 | var ( 80 | hook *Hook 81 | found bool 82 | err error 83 | ) 84 | 85 | if hook, found = h[hookName]; !found { 86 | return nil 87 | } 88 | 89 | for _, script := range hook.Scripts { 90 | hook.Ses = shellsession.NewShellSession(func(s *shellsession.ShellSession){ 91 | s.SetEnv("CAPITAN_PROJECT_NAME", settings.ProjectName) 92 | }) 93 | hook.Ses.SetEnv("CAPITAN_HOOK_NAME", hookName) 94 | 95 | hook.Ses.Command("bash", "-c", script) 96 | 97 | hook.Ses.Stdout = os.Stdout 98 | hook.Ses.Stderr = os.Stderr 99 | hook.Ses.Stdin = os.Stdin 100 | 101 | if err = hook.Ses.Run(); err != nil { 102 | return err 103 | } 104 | } 105 | return nil 106 | } 107 | 108 | type SettingsList []*container.Container 109 | 110 | func (s SettingsList) Len() int { 111 | return len(s) 112 | } 113 | func (s SettingsList) Swap(i, j int) { 114 | s[i], s[j] = s[j], s[i] 115 | } 116 | func (s SettingsList) Less(i, j int) bool { 117 | if s[i].Placement == s[j].Placement { 118 | iSuf, iErr := helpers.GetNumericSuffix(s[i].Name, s[i].ProjectNameSeparator) 119 | jSuf, jErr := helpers.GetNumericSuffix(s[j].Name, s[i].ProjectNameSeparator) 120 | if iErr == nil && jErr == nil { 121 | return iSuf < jSuf 122 | } else { 123 | return sort.StringsAreSorted([]string{s[i].Name, s[j].Name}) 124 | } 125 | } 126 | return s[i].Placement < s[j].Placement 127 | } 128 | 129 | func (s SettingsList) Filter(cb func(*container.Container) bool) (filtered SettingsList) { 130 | filtered = make(SettingsList, 0) 131 | for _, item := range s { 132 | if cb(item) { 133 | filtered = append(filtered, item) 134 | } 135 | } 136 | return 137 | } 138 | 139 | func (settings *ProjectConfig) LaunchSignalWatcher() { 140 | 141 | var ( 142 | killBegan = make(chan bool, 1) 143 | killDone = make(chan bool, 1) 144 | stopDone = make(chan bool, 1) 145 | signalChannel = make(chan os.Signal) 146 | ) 147 | signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM) 148 | 149 | go func() { 150 | 151 | var ( 152 | killing bool 153 | ) 154 | 155 | for { 156 | select { 157 | case <-killBegan: 158 | killing = true 159 | case <-stopDone: 160 | if !killing { 161 | allDone <- true 162 | } 163 | case <-killDone: 164 | allDone <- true 165 | } 166 | } 167 | }() 168 | 169 | go func() { 170 | // var calls int 171 | for { 172 | sig := <-signalChannel 173 | switch sig { 174 | case os.Interrupt, syscall.SIGTERM: 175 | 176 | for _, con := range append(settings.ContainerCleanupList, settings.ContainerList...) { 177 | for _, hooks := range con.Hooks { 178 | if hooks.Ses != nil { 179 | Debug.Println("killing hook...") 180 | hooks.Ses.Kill(syscall.SIGKILL) 181 | } 182 | } 183 | } 184 | // if settings.IsInteractive { 185 | // calls++ 186 | // if calls == 1 { 187 | // go func() { 188 | // settings.ContainerList.CapitanStop(nil, false) 189 | // stopDone <- true 190 | // }() 191 | // } else if calls == 2 { 192 | // killBegan <- true 193 | // settings.ContainerList.CapitanKill(nil, false) 194 | // killDone <- true 195 | // } 196 | // } else { 197 | os.Exit(1) 198 | // } 199 | default: 200 | Debug.Println("Unhandled signal", sig) 201 | } 202 | } 203 | Info.Println("Done cleaning up") 204 | }() 205 | } 206 | 207 | func (settings *ProjectConfig) RunHook(hookName string) bool { 208 | if err:= settings.Hooks.Run(hookName,settings); err != nil { 209 | Error.Println("Hook failed:", err) 210 | return false 211 | } 212 | return true 213 | } 214 | 215 | func (settings *ProjectConfig) CapitanPs(args []string) error { 216 | 217 | allArgs := append([]interface{}{"ps"}, helpers.ToInterfaceSlice(args)...) 218 | allArgs = append(allArgs, "-f", fmt.Sprintf("label=%s=%s", consts.ProjectLabelName, settings.ProjectName)) 219 | 220 | var ( 221 | err error 222 | out []byte 223 | ) 224 | if out, err = helpers.RunCmd(allArgs...); err != nil { 225 | return err 226 | } 227 | Info.Print(string(out)) 228 | return nil 229 | 230 | } 231 | 232 | func (settings *ProjectConfig) CapitanShow() error { 233 | var ( 234 | tmpl *template.Template 235 | err error 236 | ) 237 | if tmpl, err = template.New("projectStringer").Parse(projectShowTemplate); err != nil { 238 | return err 239 | } 240 | if err = tmpl.Execute(os.Stdout, settings); err != nil { 241 | return err 242 | } 243 | return settings.ContainerList.CapitanShow() 244 | } 245 | 246 | func newerImage(container string, image string) bool { 247 | 248 | conImage := helpers.GetContainerImageId(container) 249 | localImage := helpers.GetImageId(image) 250 | if conImage != "" && localImage != "" && conImage != localImage { 251 | return true 252 | } 253 | return false 254 | } 255 | 256 | func haveArgsChanged(container string, runArgs []interface{}) bool { 257 | 258 | uniqueLabel := helpers.HashInterfaceSlice(runArgs) 259 | if helpers.GetContainerUniqueLabel(container) != uniqueLabel { 260 | return true 261 | } 262 | return false 263 | // remove and restart 264 | 265 | } 266 | 267 | 268 | func (settings SettingsList) CapitanCreate(dryRun bool) error { 269 | sort.Sort(settings) 270 | 271 | for _, set := range settings { 272 | 273 | if set.Build != "" { 274 | ContainerInfoLog(set.Name, "Building image...") 275 | if ! dryRun { 276 | if err := set.BuildImage(); err != nil { 277 | return err 278 | } 279 | } 280 | } 281 | 282 | if helpers.GetImageId(set.Image) == "" { 283 | Warning.Printf("Capitan was unable to find image %s locally\n", set.Image) 284 | 285 | ContainerInfoLog(set.Name, "Pulling image...") 286 | if ! dryRun { 287 | if err := helpers.PullImage(set.Image); err != nil { 288 | return err 289 | } 290 | } 291 | } 292 | 293 | if err := set.Create(dryRun); err != nil { 294 | return err 295 | } 296 | } 297 | return nil 298 | 299 | } 300 | 301 | // The 'up' command 302 | // 303 | // Creates a container if it doesn't exist 304 | // Starts a container if stopped 305 | // Recreates a container if the container's image has a newer id locally 306 | // OR if the command used to create the container is now changed (i.e. 307 | // config has changed. 308 | func (settings SettingsList) CapitanUp(attach bool, dryRun bool) error { 309 | sort.Sort(settings) 310 | 311 | wg := sync.WaitGroup{} 312 | 313 | for _, set := range settings { 314 | var ( 315 | err error 316 | ) 317 | 318 | if set.Build != "" { 319 | ContainerInfoLog(set.Name, "Building image...") 320 | if ! dryRun { 321 | if err := set.BuildImage(); err != nil { 322 | return err 323 | } 324 | } 325 | } 326 | 327 | if helpers.GetImageId(set.Image) == "" { 328 | Warning.Printf("Capitan was unable to find image %s locally\n", set.Image) 329 | 330 | ContainerInfoLog(set.Name, "Pulling image...") 331 | 332 | if ! dryRun { 333 | if err := helpers.PullImage(set.Image); err != nil { 334 | return err 335 | } 336 | } 337 | } 338 | 339 | //create new 340 | if !helpers.ContainerExists(set.Name) { 341 | if err = set.Run(attach, dryRun, &wg); err != nil { 342 | return err 343 | } 344 | continue 345 | } 346 | 347 | // disabling as this doesn't work with swarm (how do I know which node to look at??) 348 | // if newerImage(set.Name, set.Image) { 349 | // // remove and restart 350 | // Info.Println("Removing (different image available):", set.Name) 351 | // if err = set.RecreateAndRun(attach, dryRun, &wg); err != nil { 352 | // return err 353 | // } 354 | // 355 | // continue 356 | // } 357 | 358 | if haveArgsChanged(set.Name, set.GetRunArguments()) { 359 | // remove and restart 360 | if set.BlueGreenMode == container.BGModeOn { 361 | ContainerInfoLog(set.Name, "Run arguments changed, doing blue-green redeploy...") 362 | if err = set.BlueGreenDeploy(attach, dryRun, &wg); err != nil { 363 | return err 364 | } 365 | } else { 366 | ContainerInfoLog(set.Name, "Removing (run arguments changed)") 367 | if err = set.RecreateAndRun(attach, dryRun, &wg); err != nil { 368 | return err 369 | } 370 | } 371 | continue 372 | } 373 | 374 | //attach if running 375 | if set.State.Running { 376 | ContainerInfoLog(set.Name, "Already running.") 377 | if attach { 378 | ContainerInfoLog(set.Name, "Attaching") 379 | if err := set.Attach(&wg); err != nil { 380 | return err 381 | } 382 | } 383 | continue 384 | } 385 | 386 | ContainerInfoLog(set.Name, "Starting...") 387 | 388 | if dryRun { 389 | continue 390 | } 391 | 392 | //start if stopped 393 | if err = set.Start(attach, &wg); err != nil { 394 | return err 395 | } 396 | continue 397 | 398 | } 399 | wg.Wait() 400 | if !dryRun && attach { 401 | <-allDone 402 | } 403 | return nil 404 | } 405 | 406 | // Starts stopped containers 407 | func (settings SettingsList) CapitanStart(attach bool, dryRun bool) error { 408 | sort.Sort(settings) 409 | wg := sync.WaitGroup{} 410 | for _, set := range settings { 411 | 412 | if set.State.Running { 413 | ContainerInfoLog(set.Name, "Already running") 414 | if attach { 415 | ContainerInfoLog(set.Name, "Attaching...") 416 | if err := set.Attach(&wg); err != nil { 417 | return err 418 | } 419 | } 420 | continue 421 | } 422 | ContainerInfoLog(set.Name, "Starting") 423 | if !dryRun { 424 | if err := set.Start(attach, &wg); err != nil { 425 | return err 426 | } 427 | } 428 | } 429 | wg.Wait() 430 | if !dryRun && attach { 431 | <-allDone 432 | } 433 | return nil 434 | } 435 | 436 | // Command to restart all containers 437 | func (settings SettingsList) CapitanRestart(args []string, dryRun bool) error { 438 | sort.Sort(settings) 439 | for _, set := range settings { 440 | 441 | ContainerInfoLog(set.Name, "Restarting") 442 | if !dryRun { 443 | if err := set.Restart(args); err != nil { 444 | return err 445 | } 446 | } 447 | } 448 | return nil 449 | } 450 | 451 | // Print all container IPs 452 | func (settings SettingsList) CapitanIP() error { 453 | sort.Sort(settings) 454 | for _, set := range settings { 455 | ips := set.IPs() 456 | ContainerInfoLog(set.Name, ips) 457 | } 458 | return nil 459 | } 460 | 461 | // Stream all container logs 462 | func (settings SettingsList) CapitanLogs() error { 463 | sort.Sort(settings) 464 | var wg sync.WaitGroup 465 | for _, set := range settings { 466 | var ( 467 | ses *sh.Session 468 | err error 469 | ) 470 | if ses, err = set.Logs(); err != nil { 471 | Error.Println("Error getting log for " + set.Name + ": " + err.Error()) 472 | continue 473 | } 474 | 475 | wg.Add(1) 476 | 477 | go func() { 478 | ses.Wait() 479 | wg.Done() 480 | }() 481 | 482 | } 483 | wg.Wait() 484 | return nil 485 | } 486 | 487 | // Stream all container stats 488 | func (settings SettingsList) CapitanStats() error { 489 | var ( 490 | args []interface{} 491 | ) 492 | sort.Sort(settings) 493 | 494 | args = make([]interface{}, len(settings)) 495 | 496 | for i, set := range settings { 497 | args[i] = set.Name 498 | } 499 | 500 | ses := sh.NewSession() 501 | ses.Command("docker", append([]interface{}{"stats"}, args...)...) 502 | ses.Start() 503 | ses.Wait() 504 | return nil 505 | } 506 | 507 | // Kill all running containers in project 508 | func (settings SettingsList) CapitanKill(args []string, dryRun bool) error { 509 | sort.Sort(sort.Reverse(settings)) 510 | for _, set := range settings { 511 | if !set.State.Running { 512 | ContainerInfoLog(set.Name, "Already stopped") 513 | continue 514 | } 515 | ContainerInfoLog(set.Name, "Killing...") 516 | if !dryRun { 517 | if err := set.Kill(args); err != nil { 518 | return err 519 | } 520 | } 521 | } 522 | return nil 523 | } 524 | 525 | // Stops the containers in the project 526 | func (settings SettingsList) CapitanStop(args []string, dryRun bool) error { 527 | sort.Sort(sort.Reverse(settings)) 528 | for _, set := range settings { 529 | if !set.State.Running { 530 | ContainerInfoLog(set.Name, "Already stopped.") 531 | continue 532 | } 533 | ContainerInfoLog(set.Name, "Stopping...") 534 | if !dryRun { 535 | if err := set.Stop(args); err != nil { 536 | return err 537 | } 538 | } 539 | } 540 | return nil 541 | } 542 | 543 | // Remove all containers in project 544 | func (settings SettingsList) CapitanRm(args []string, dryRun bool) error { 545 | sort.Sort(sort.Reverse(settings)) 546 | for _, set := range settings { 547 | 548 | if helpers.ContainerExists(set.Name) { 549 | ContainerInfoLog(set.Name, "Removing....") 550 | if dryRun { 551 | continue 552 | } 553 | if err := set.Rm(args); err != nil { 554 | return err 555 | } 556 | } else { 557 | ContainerInfoLog(set.Name, "Container doesn't exist") 558 | } 559 | } 560 | return nil 561 | } 562 | 563 | // The build command 564 | func (settings SettingsList) CapitanBuild(dryRun bool) error { 565 | sort.Sort(settings) 566 | for _, set := range settings { 567 | if len(set.Build) == 0 { 568 | continue 569 | } 570 | ContainerInfoLog(set.Name, "Building...") 571 | if !dryRun { 572 | if err := set.BuildImage(); err != nil { 573 | return err 574 | } 575 | } 576 | 577 | } 578 | return nil 579 | } 580 | 581 | // The build command 582 | func (settings SettingsList) CapitanPull(dryRun bool) error { 583 | sort.Sort(settings) 584 | for _, set := range settings { 585 | if len(set.Build) > 0 || set.Image == "" { 586 | continue 587 | } 588 | ContainerInfoLog(set.Name, "Pulling ", set.Image, "...") 589 | if !dryRun { 590 | if err := helpers.PullImage(set.Image); err != nil { 591 | return err 592 | } 593 | } 594 | 595 | } 596 | return nil 597 | } 598 | 599 | func (settings SettingsList) CapitanShow() error { 600 | sort.Sort(settings) 601 | for _, set := range settings { 602 | var ( 603 | tmpl *template.Template 604 | err error 605 | ) 606 | if tmpl, err = template.New("containerStringer").Parse(containerShowTemplate); err != nil { 607 | return err 608 | } 609 | set.RunArguments = set.GetRunArguments() 610 | if err = tmpl.Execute(os.Stdout, set); err != nil { 611 | return err 612 | } 613 | 614 | } 615 | return nil 616 | } 617 | -------------------------------------------------------------------------------- /shellsession/shellsession.go: -------------------------------------------------------------------------------- 1 | package shellsession 2 | 3 | import ( 4 | "github.com/byrnedo/capitan/logger" 5 | "github.com/codeskyblue/go-sh" 6 | ) 7 | 8 | type ShellSession struct { 9 | *sh.Session 10 | } 11 | 12 | func NewShellSession(init func(*ShellSession)) *ShellSession { 13 | ses := ShellSession{sh.NewSession()} 14 | if logger.GetLevel() == logger.DebugLevel { 15 | ses.ShowCMD = true 16 | } 17 | init(&ses) 18 | return &ses 19 | } 20 | 21 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package main 2 | const VERSION = "0.24" 3 | --------------------------------------------------------------------------------