├── .gitignore ├── Gruntfile.coffee ├── LICENSE ├── README.md ├── bin └── vortex ├── doc └── examples │ ├── docker-helloworld │ ├── README.md │ └── vortex.json │ ├── expose-files │ ├── README.md │ ├── document.txt │ └── vortex.json │ ├── mongodb-helloworld │ ├── README.md │ ├── package.json │ └── vortex.json │ ├── nodejs-helloworld │ ├── README.md │ ├── app.js │ └── vortex.json │ ├── plugin-builtin │ ├── README.md │ ├── defaults.js │ └── vortex.json │ ├── puppet-helloworld │ ├── README.md │ ├── puppet │ │ └── manifests │ │ │ └── helloworld.pp │ └── vortex.json │ └── vortex-commander │ ├── README.md │ └── vortex.json ├── lib ├── actions.js ├── download.js ├── engine.js ├── index.js ├── manifest.js ├── plugins.js ├── provider_amazon.js ├── provider_virtualbox.js ├── providers.js ├── shell.js └── vortex.js ├── package.json └── src ├── actions.coffee ├── download.coffee ├── engine.coffee ├── index.coffee ├── manifest.coffee ├── plugins.coffee ├── provider_amazon.coffee ├── provider_virtualbox.coffee ├── providers.coffee ├── shell.coffee └── vortex.coffee /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | -------------------------------------------------------------------------------- /Gruntfile.coffee: -------------------------------------------------------------------------------- 1 | module.exports = (grunt) -> 2 | grunt.initConfig 3 | coffee: 4 | compile: 5 | files: [ 6 | {expand: true, cwd: 'src/', src: '*.coffee', dest: 'lib/', ext: '.js', filter: 'isFile'} 7 | ] 8 | watch: 9 | lib: 10 | files: ['lib/*'] 11 | tasks: ['coffee:compile'] 12 | 13 | grunt.loadNpmTasks 'grunt-contrib-watch' 14 | grunt.loadNpmTasks 'grunt-contrib-coffee' 15 | 16 | grunt.registerTask 'build', ['coffee:compile'] 17 | grunt.registerTask 'default', ['build'] 18 | 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2013 Websecurify 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Follow on Twitter](https://img.shields.io/twitter/follow/websecurify.svg?logo=twitter)](https://twitter.com/websecurify) 2 | 3 | 4 | _ _ __ ____ ____ ____ _ _ 5 | / )( \ / \( _ \(_ _)( __)( \/ ) 6 | \ \/ /( O )) / )( ) _) ) ( 7 | \__/ \__/(__\_) (__) (____)(_/\_) 8 | 9 | by Websecurify 10 | 11 | 12 | # Introduction 13 | 14 | Vortex is a virtual machine management tool. It is similar to [Vagrant](http://vagrantup.com/). The rationale for writing this tool is to enable better management of development and production infrastructure at the same time. We, [Websecurify](http://www.websecurify.com), could not easily achieve this with Vagrant so the this tool was written to fill the gap. 15 | 16 | You can do the following things with Vortex: 17 | 18 | 1. Develop your application in a replicable dev environment. 19 | 2. Easily manage your application in the same configuration into a prod environment. 20 | 21 | Vortex removes any barriers from the time you start developing your application to the time it is already live and you need to maintain it. 22 | 23 | # Tool Philosophy 24 | 25 | It is essential to understand the key principle behind Vortex, which is to **always produce a replicable environment**. This sounds nice and simple but it gets deeper than this. 26 | 27 | What this means in practice is that virtual machines/nodes are disposable. In other words, they only exist fully provisioned and not in any other way. They also don't maintain any state. Once you halt a node, it is gone for good with all the data it was keeping within it. If you boot the node again it will launch a brand new instance. This is why there is no state. 28 | 29 | State is essentially a 3rd-class citizen in Vortex. You provide it only by attaching external storages or by integrating with other services from your application. This sounds like a very extreme way of dealing with things but it does solve a few hard problems like scalability and the application effectiveness against hardware and other types of failure. 30 | 31 | This philosophy is a constrain, which works in our favour and it is fully embraced in the design of the tool. 32 | 33 | # Tool Installation 34 | 35 | The easiest way to install Vortex is via node's npm. You need to have nodejs installed for this. Simply type the following command: 36 | 37 | npm install -g vortex 38 | 39 | An alternative approach is to just copy the source files and execute them manually though nodejs. There are plans to create a standalone binary distributions if there is need for this. 40 | 41 | # Tool Usage 42 | 43 | You can use Vortex as a library or via the command line, which is more convenient. At the moment there are no docs on the API but for now you can just check the source code for inspiration. 44 | 45 | Here are a few examples of how to use Vortex via your shell: 46 | 47 | vortex status # shows the status of all nodes 48 | vortex boot # boots all nodes 49 | vortex halt # halts all nodes 50 | vortex provision # provision all nodes 51 | 52 | The following additional helper actions are also available: 53 | 54 | vortex up # boots and provisions a node 55 | vortex down # halts a node 56 | 57 | You can also specify which node you want to manipulate: 58 | 59 | vortex shell my-node-name # starts interactive session on the selected node 60 | vortex halt my-node-name # halts the selected node 61 | 62 | To get the complete list of actions just use `actions` 63 | 64 | vortex actions # get complete list of actions 65 | 66 | By the default Vortex reads the configuration from `vortex.json` located inside the current working directory. However, you can specify an alternative location with the `-f|--file` option. For example: 67 | 68 | vortex -f path/to/folder # loads path/to/folder/vortex.json manifest 69 | vortex -f path/to/config.json # loads path/to/config.json manifest 70 | 71 | Verbose messages can be obtained by using the `-v|--verbose` flag, which can also be combined with the `-c|--colorize` flag for better visual aid. For example: 72 | 73 | vortex -vv # enables debug level logging 74 | vortex -vvv -c # enables silly level logging with colorization 75 | 76 | Vortex supports different providers to manage your virtual machines/nodes. Out of the box you have support for VirtualBox and Amazon. VirtualBox is the default provider. Here is an example how to select a different provider: 77 | 78 | vortex --provider=Amazon boot # boots nodes into amazon ec2 79 | 80 | The default provisioner, Roost, can also be configured with some command-line options. If you specify the `-d|--dry` flag the provisioner will only output information on what it will do but not perform any actions. This is useful if you are uncertain about the changes you are making to the roost manifests and you just want to check it out before doing it for real. For example: 81 | 82 | vortex --provider=Amazon -d provision my-sensitive-node # dry-runs the provisioner 83 | 84 | Here is another fun bit you can do. The shell action also accepts parameters which will be directly executed as commands. For example: 85 | 86 | vortex shell -- -- ls -la # will list the home folder 87 | 88 | You can apply commands to all nodes or the one you have specifically selected. 89 | 90 | # Vortex Manifest 91 | 92 | The Vortex manifest file is a simple JSON document. By default you are only required to specify the nodes you want in your configuration: 93 | 94 | { 95 | ... 96 | 97 | "nodes": { 98 | "my-node": { 99 | } 100 | }, 101 | 102 | ... 103 | } 104 | 105 | This is the simplest possible configuration, which is not useful for anything just yet. To make this configuration useful for booting an image in Amazon you need to supply additional information. This is how it is done: 106 | 107 | { 108 | ... 109 | 110 | "amazon": { 111 | "accessKeyId": "YOUR ACCESS KEY GOES HERE", 112 | "secretAccessKey": "YOUR SECRET KEY GOES HERE", 113 | "region": "A REGION SUCH AS us-west-1, us-west-2, etc GOES HERE" 114 | }, 115 | 116 | "nodes": { 117 | "ubuntu": { 118 | "amazon": { 119 | "imageId": "ami-2fb3201f", 120 | "securityGroups": ["default"], 121 | "keyName": "my-key", 122 | "privateKey": "path/to/my-key.pem", 123 | "username": "ubuntu" 124 | } 125 | } 126 | }, 127 | 128 | ... 129 | } 130 | 131 | Providing credentials inside configuration file is not always optimal but it saves you from typing longer and more complex commands. The reality of the situation is that you can do the following: 132 | 133 | ACCESS_KEY_ID=bish SECRET_ACCESS_KEY=bosh AWS_REGION=us-west-1 vortex --provider=Amazon boot 134 | 135 | The config file for this will be: 136 | 137 | { 138 | ... 139 | 140 | "nodes": { 141 | "ubuntu": { 142 | "amazon": { 143 | "imageId": "ami-2fb3201f", 144 | "securityGroups": ["default"], 145 | "keyName": "my-key", 146 | "privateKey": "path/to/my-key.pem", 147 | "username": "ubuntu" 148 | } 149 | } 150 | }, 151 | 152 | ... 153 | } 154 | 155 | The same properties can also be provided per-node if this is what you want. Underneath all of this sits the [aws-sdk](http://aws.amazon.com/sdkfornodejs/) for nodejs so all parameters are exactly the same as you will find in the SDK. 156 | 157 | VirtualBox is configured in the same way. The only difference is that you need to specify VirtualBox specific configuration. For example: 158 | 159 | { 160 | ... 161 | 162 | "nodes": { 163 | "ubuntu": { 164 | "amazon": { 165 | "imageId": "ami-2fb3201f", 166 | "securityGroups": ["default"], 167 | "keyName": "my-key", 168 | "privateKey": "path/to/my-key.pem", 169 | "username": "ubuntu" 170 | }, 171 | 172 | "virtualbox": { 173 | "username": "ubuntu", 174 | "password": "ubuntu", 175 | "vmId": "baseimage", 176 | "vmUrl": "http://path/to/baseimage.ova" 177 | } 178 | } 179 | }, 180 | 181 | ... 182 | } 183 | 184 | If you have a lot of nodes that are similar with minor differences you can move the configuration out of the node structure and specify it globally like such: 185 | 186 | { 187 | ... 188 | 189 | "amazon": { 190 | "imageId": "ami-2fb3201f", 191 | "securityGroups": ["default"], 192 | "keyName": "my-key", 193 | "privateKey": "path/to/my-key.pem", 194 | "username": "ubuntu" 195 | }, 196 | 197 | "virtualbox": { 198 | "username": "ubuntu", 199 | "password": "ubuntu", 200 | "vmId": "baseimage", 201 | "vmUrl": "http://path/to/baseimage.ova" 202 | }, 203 | 204 | ... 205 | 206 | "nodes": { 207 | "node1": { 208 | "amazon": { 209 | "username": "node1" 210 | } 211 | }, 212 | "node2": { 213 | "amazon": { 214 | "username": "node2" 215 | } 216 | } 217 | }, 218 | 219 | ... 220 | } 221 | 222 | Last but not least, nodes can be launched in their own namespaces. Namespaces are useful when there are a lot of stuff going on and you just want to logically separate nodes into different groups (or soft-groups if you prefer). Here is an example: 223 | 224 | { 225 | ... 226 | 227 | namespace: "my-config", 228 | 229 | ... 230 | 231 | "amazon": { 232 | "imageId": "ami-2fb3201f", 233 | "securityGroups": ["default"], 234 | "keyName": "my-key", 235 | "privateKey": "path/to/my-key.pem", 236 | "username": "ubuntu" 237 | }, 238 | 239 | "virtualbox": { 240 | "username": "ubuntu", 241 | "password": "ubuntu", 242 | "vmId": "baseimage", 243 | "vmUrl": "http://path/to/baseimage.ova" 244 | }, 245 | 246 | ... 247 | 248 | "nodes": { 249 | "node1": { 250 | "amazon": { 251 | "username": "node1" 252 | } 253 | }, 254 | "node2": { 255 | "amazon": { 256 | "username": "node2" 257 | } 258 | } 259 | }, 260 | 261 | ... 262 | } 263 | 264 | Now `node1` and `node2` will run in the namespace `my-config` and this will not interfere with other nodes that have similar names. Namespaces can be used per node as well so you can get very creative. 265 | 266 | # VirtualBox Options 267 | 268 | The VirtualBox provider can be configured by supplying a "virtualbox" property at the top level of the manifest file or per-node. The following options are accepted everywhere: 269 | 270 | * **vmId** - (string) the id or name of the virtual machine to be used as a base image 271 | * **vmUrl** - (string) if the vmId is not found the image will be downloaded from a url 272 | * **username** - (string) username for ssh (defaults to vortex) 273 | * **password** - (string) password for ssh 274 | * **privateKey** - (string) path to public ssh key 275 | * **passphrase** - (string) passphrase for key 276 | 277 | # Amazon Options 278 | 279 | The Amazon provider can be configured by supplying a "amazon" property at the top level of the manifest file or per-node. The following options are accepted everywhere: 280 | 281 | * **accessKeyId** - (string) your amazon access key id 282 | * **secretAccessKey** - (string) your amazon access key 283 | * **region** - (string) the region where you want to deploy 284 | * **imageId** - (string) the image id to use 285 | * **securityGroups** - (array of strings) security groups to apply 286 | * **keyName** - (string) keyname to use 287 | * **disableApiTermination** - (string) make the instance un-terminatable 288 | * **username** - (string) username for ssh (defaults to vortex) 289 | * **password** - (string) password for ssh 290 | * **privateKey** - (string) path to public ssh key 291 | * **passphrase** - (string) passphrase for key 292 | 293 | # Node Provisioning 294 | 295 | Vortex comes with a built-in provisioner called [roost](https://github.com/websecurify/node-roost/) - another project of ours. Roost manifest files can be either imported from an external file or embedded directly into your vortex manifest. Here is an example: 296 | 297 | { 298 | ... 299 | 300 | "nodes": { 301 | "ubuntu": { 302 | "roost": "roost.json" 303 | } 304 | }, 305 | 306 | ... 307 | } 308 | 309 | You can also do the following if this is too much of a trouble: 310 | 311 | { 312 | ... 313 | 314 | "nodes": { 315 | "ubuntu": { 316 | "roost": { 317 | "apt": { 318 | "update": true 319 | }, 320 | 321 | "packages": [ 322 | "nodejs" 323 | ], 324 | 325 | "commands": [ 326 | "uname -a" 327 | ] 328 | } 329 | } 330 | }, 331 | 332 | ... 333 | } 334 | 335 | As a matter of fact, you can even apply a global roost file for all nodes. Just register the roost configuration outside of the `nodes` property. 336 | 337 | Merging roost manifests is also possible when declared at multiple levels. For example, at top level you may want to apply some defaults and maybe even some updates. Per node you may want to apply generic configurations and have some additional provisioning options for each provider. Such complex setup is possible and here is an example: 338 | 339 | { 340 | ... 341 | 342 | "roost": { 343 | "apt": { 344 | "update": true 345 | } 346 | } 347 | 348 | ... 349 | 350 | "nodes": { 351 | "ubuntu": { 352 | "roost": { 353 | "merge": true, 354 | 355 | "packages": [ 356 | "nodejs" 357 | ] 358 | }, 359 | 360 | "virtualbox": { 361 | "roost": { 362 | "merge": true, 363 | 364 | "commands": [ 365 | "cd /media/cdrom; ./VBoxLinuxAdditions-x86.run" 366 | ] 367 | } 368 | } 369 | } 370 | }, 371 | 372 | ... 373 | } 374 | 375 | The manifest is built from the inner most configuration and merged upwards if the `merge` flag is set to `true`. This is a non-standard roost option. 376 | 377 | For more information how the provisioner works just check the [project page](https://github.com/websecurify/node-roost/). 378 | 379 | # Vortex Plugins 380 | 381 | Vortex can be extended with plugins. Plugins are essentially nodejs modules and are installed the same way you typically install nodejs modules, i.e. npm and `package.json`. A good starting doc how npm modules work can be found [here](https://npmjs.org/doc/install.html). 382 | 383 | In order to load a plugin you need to declare it in your Vortex manifest file. Here is an example: 384 | 385 | { 386 | ... 387 | 388 | "plugins": [ 389 | "my-plugin" 390 | ], 391 | 392 | ... 393 | } 394 | 395 | Plugins are executed first and can affect everything from the actual manifest that was loaded to what providers and actions are exposed and much more. 396 | 397 | The following workflow takes place when working with plugins. 398 | 399 | 1. Each plugin is loaded via node's `require`. 400 | 2. The module is inspected for two functions `getVortex` (takes priority) and `vortex`. 401 | * `getVortex` is used to retrieve an object that exposes a `vortex` function. 402 | * `vortex` is looked for to check if the plugin is compatible at this stage. 403 | 3. Before execution the plugin is invoked via a call to `vortex` function. The following parameters are passed: 404 | * opt - command line options 405 | * manifest - the manifest file 406 | * provider - default provider 407 | * action - the action to be executed 408 | 409 | Use `getVortex` to augment the Vortex environment such as install new actions, providers, etc. Use `vortex` to do something, mostly with the manifest file, before the actual action takes place. 410 | 411 | Vortex plugins can do pretty much everything so here are some suggestions of what you could do if you spend some time writing a plugin: 412 | 413 | * A plugin, which fetches access credentials such as keys, usernames and password from a centralized storage. 414 | * A plugin, which adds another provisioner such as chef and puppet. 415 | * A plugin, which allows you extensive use of environment variables to configure all aspects of the manifest file. 416 | * A plugin, which double-checks all options before launching an action in order to prevent unexpected behaviour. 417 | 418 | The list goes on and on. Get creative! 419 | 420 | # Node States 421 | 422 | Each node can have the following states when querying via the Provider.prototype.status function: 423 | 424 | * **paused** - the node is currently paused and it is not available for interaction. 425 | * **booting** - the node is currently booting and it is not available for interaction. 426 | * **running** - the node is running and it is available for interaction. 427 | * **halting** - the node is halting and will soon become unavailable for interaction. 428 | * **stopped** - the node is stopped. 429 | 430 | These states are also exposed when quering a node via the status action, i.e. 431 | 432 | vortex status # shows a state such as booting, running, halting, stopped 433 | -------------------------------------------------------------------------------- /bin/vortex: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // --- 4 | 5 | if (process.env.NODE_ENV == 'development') { 6 | require('coffee-script'); 7 | 8 | // --- 9 | 10 | require('../src/vortex').main(); 11 | } else { 12 | require('../lib/vortex').main(); 13 | } 14 | -------------------------------------------------------------------------------- /doc/examples/docker-helloworld/README.md: -------------------------------------------------------------------------------- 1 | This example demonstrates how to configure and run docker. 2 | 3 | To run the example simply execute the following from your shell: 4 | 5 | cd path/to/vortex/git/doc/examples/docker-helloworld 6 | vortex boot 7 | vortex provision 8 | vortex shell 9 | 10 | There is no need to provision the machine since we are using a custom image designed to run docker natively. 11 | -------------------------------------------------------------------------------- /doc/examples/docker-helloworld/vortex.json: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "docker-helloworld", 3 | 4 | "virtualbox": { 5 | "vmId": "precise64-docker", 6 | "vmUrl": "https://s3.amazonaws.com/node-vortex/precise64-docker.ova", 7 | "username": "vortex", 8 | "password": "vortex" 9 | }, 10 | 11 | "nodes": { 12 | "docker": { 13 | "roost": { 14 | "apt": { 15 | "update": true 16 | }, 17 | 18 | "packages": [ 19 | "lxc-docker" 20 | ] 21 | } 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /doc/examples/expose-files/README.md: -------------------------------------------------------------------------------- 1 | This example demonstrates how to expose files inside the virtualized containers. 2 | 3 | To run the example simply execute the following from your shell: 4 | 5 | cd path/to/vortex/git/doc/examples/expose-fles 6 | vortex up 7 | 8 | This will expose all locals files and print document.txt. 9 | 10 | The same setup can be deployed in other providers besides VirtualBox. To use Amazon you will have to provide additional configuration options. 11 | -------------------------------------------------------------------------------- /doc/examples/expose-files/document.txt: -------------------------------------------------------------------------------- 1 | This is a simple document. 2 | -------------------------------------------------------------------------------- /doc/examples/expose-files/vortex.json: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "expose-files", 3 | 4 | "virtualbox": { 5 | "vmId": "precise64", 6 | "vmUrl": "https://s3.amazonaws.com/node-vortex/precise64.ova", 7 | "username": "vortex", 8 | "password": "vortex" 9 | }, 10 | 11 | "nodes": { 12 | "app": { 13 | "expose": { 14 | ".": "/files" 15 | }, 16 | 17 | "roost": { 18 | "commands": [ 19 | "ls -la /files", 20 | "cat /files/document.txt" 21 | ] 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /doc/examples/mongodb-helloworld/README.md: -------------------------------------------------------------------------------- 1 | This example demonstrates how to configure and run a mongodb instance. 2 | 3 | To run the example simply execute the following from your shell: 4 | 5 | cd path/to/vortex/git/doc/examples/mongodb-helloworld 6 | npm install 7 | vortex boot 8 | vortex provision 9 | 10 | Nothice that we use `npm install`. This ensures that all vortex/roost plugins are correctly installed. Mongodb itself is installed via the "roost-mongodb" plugin. 11 | -------------------------------------------------------------------------------- /doc/examples/mongodb-helloworld/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "roost-mongodb": "latest" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /doc/examples/mongodb-helloworld/vortex.json: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "mongodb-helloworld", 3 | 4 | "virtualbox": { 5 | "vmId": "precise64", 6 | "vmUrl": "https://s3.amazonaws.com/node-vortex/precise64.ova", 7 | "username": "vortex", 8 | "password": "vortex" 9 | }, 10 | 11 | "roost": { 12 | "plugins": [ 13 | "roost-mongodb" 14 | ] 15 | }, 16 | 17 | "nodes": { 18 | "db": { 19 | "roost": { 20 | "merge": true, 21 | 22 | "mongodb": { 23 | "install": true 24 | } 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /doc/examples/nodejs-helloworld/README.md: -------------------------------------------------------------------------------- 1 | This example demonstrates how to configure and run a nodejs application with the express framework. The application runs on its own and does not require database and other forms of data storage. 2 | 3 | To run the example simply execute the following from your shell: 4 | 5 | cd path/to/vortex/git/doc/examples/nodejs-helloworld 6 | vortex boot 7 | vortex provision 8 | 9 | The same setup can be deployed in other providers besides VirtualBox. To use Amazon you will have to provide additional configuration options. 10 | -------------------------------------------------------------------------------- /doc/examples/nodejs-helloworld/app.js: -------------------------------------------------------------------------------- 1 | var express = require('express'); 2 | var app = express(); 3 | 4 | app.get('/', function(req, res){ 5 | var body = 'Hello World! The time is ' + (new Date()).getTime() + '.'; 6 | 7 | res.setHeader('Content-Type', 'text/plain'); 8 | res.setHeader('Content-Length', body.length); 9 | 10 | res.end(body); 11 | }); 12 | 13 | app.listen(3000); 14 | -------------------------------------------------------------------------------- /doc/examples/nodejs-helloworld/vortex.json: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "nodejs-helloworld", 3 | 4 | "virtualbox": { 5 | "vmId": "precise64", 6 | "vmUrl": "https://s3.amazonaws.com/node-vortex/precise64.ova", 7 | "username": "vortex", 8 | "password": "vortex" 9 | }, 10 | 11 | "nodes": { 12 | "app": { 13 | "expose": { 14 | ".": "/app" 15 | }, 16 | 17 | "roost": { 18 | "apt": { 19 | "repositories": [ 20 | "ppa:chris-lea/node.js" 21 | ], 22 | 23 | "update": "true" 24 | }, 25 | 26 | "packages": [ 27 | "nodejs" 28 | ], 29 | 30 | "commands": [ 31 | "sudo npm install express -g", 32 | "sudo npm install forever -g", 33 | "sudo forever stopall; true", 34 | "sudo NODE_PATH=/usr/lib/nodejs:/usr/lib/node_modules:/usr/share/javascript forever start /app/app.js" 35 | ] 36 | } 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /doc/examples/plugin-builtin/README.md: -------------------------------------------------------------------------------- 1 | This example demonstrates how you can add your own plugin in Vortex. The plugin simply provides some defaults in order to simplify the configuration file. 2 | 3 | To run the example simply execute the following from your shell: 4 | 5 | cd path/to/vortex/git/doc/examples/plugin-builtin 6 | vortex boot 7 | vortex provision 8 | 9 | You can extend this example into something much more complex which can do the following: 10 | 11 | * Extract information from environment variables. 12 | * Extract information from a centralised server. 13 | * Automatically generate all nodes to be managed. 14 | -------------------------------------------------------------------------------- /doc/examples/plugin-builtin/defaults.js: -------------------------------------------------------------------------------- 1 | exports.vortex = function (opt, manifest, provider, action) { 2 | if (!manifest.hasOwnProperty('virtualbox')) { 3 | manifest.virtualbox = {}; 4 | } 5 | 6 | var virtualbox = manifest.virtualbox; 7 | 8 | if (!virtualbox.hasOwnProperty('vmId')) { 9 | virtualbox.vmId = 'precise64'; 10 | } 11 | 12 | if (!virtualbox.hasOwnProperty('vmUrl')) { 13 | virtualbox.vmUrl = 'https://s3.amazonaws.com/node-vortex/precise64.ova'; 14 | } 15 | 16 | if (!virtualbox.hasOwnProperty('username')) { 17 | virtualbox.username = 'vortex'; 18 | } 19 | 20 | if (!virtualbox.hasOwnProperty('password')) { 21 | virtualbox.password = 'vortex'; 22 | } 23 | }; 24 | -------------------------------------------------------------------------------- /doc/examples/plugin-builtin/vortex.json: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "plugin-builtin", 3 | 4 | "plugins": [ 5 | "./defaults.js" 6 | ], 7 | 8 | "nodes": { 9 | "app": { 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /doc/examples/puppet-helloworld/README.md: -------------------------------------------------------------------------------- 1 | This example demonstrates how to use puppet as a provisioner. 2 | 3 | To run the example simply execute the following from your shell: 4 | 5 | cd path/to/vortex/git/doc/examples/puppet-helloworld 6 | vortex boot 7 | vortex provision 8 | 9 | See the source for more information. 10 | -------------------------------------------------------------------------------- /doc/examples/puppet-helloworld/puppet/manifests/helloworld.pp: -------------------------------------------------------------------------------- 1 | exec { 'echo-hello-world': 2 | command => '/bin/sh -c "echo Hello World!"' 3 | } 4 | -------------------------------------------------------------------------------- /doc/examples/puppet-helloworld/vortex.json: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "puppet-helloworld", 3 | 4 | "virtualbox": { 5 | "vmId": "precise64", 6 | "vmUrl": "https://s3.amazonaws.com/node-vortex/precise64.ova", 7 | "username": "vortex", 8 | "password": "vortex" 9 | }, 10 | 11 | "nodes": { 12 | "puppet": { 13 | "expose": { 14 | "./puppet": "/puppet" 15 | }, 16 | 17 | "roost": { 18 | "apt": { 19 | "update": true 20 | }, 21 | 22 | "packages": [ 23 | "puppet" 24 | ], 25 | 26 | "commands": [ 27 | "puppet apply --verbose --modulepath /puppet/modules/ /puppet/manifests/helloworld.pp" 28 | ] 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /doc/examples/vortex-commander/README.md: -------------------------------------------------------------------------------- 1 | This example demonstrates how to execute various types of commands in a virtualized container. 2 | 3 | To run the example simply execute the following from your shell: 4 | 5 | cd path/to/vortex/git/doc/examples/vortex-commander 6 | vortex up 7 | 8 | Once the environment is up you can use the following syntax to launch various commands inside the virtualized container: 9 | 10 | vortex shell -- -- command 11 | 12 | For example 13 | 14 | vortex shell -- -- ls -la # lists the home directory 15 | vortex shell -- -- apt-get install nginx # install nginx 16 | 17 | This syntax also works for multiple nodes in your own setups. For example if you have a vortex project with nodes `app`, `db`, `backup` and `logs` then you can update all of them by using the following command 18 | 19 | vortex shell -- -- apt-gate update 20 | 21 | If you want to update just `app` and `db` but not `backup` and `logs` you do the following: 22 | 23 | vortex shell app db -- -- apt-get update 24 | -------------------------------------------------------------------------------- /doc/examples/vortex-commander/vortex.json: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "vortext-commander", 3 | 4 | "virtualbox": { 5 | "vmId": "precise64", 6 | "vmUrl": "https://s3.amazonaws.com/node-vortex/precise64.ova", 7 | "username": "vortex", 8 | "password": "vortex" 9 | }, 10 | 11 | "nodes": { 12 | "commander": { 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /lib/actions.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var async, child_process, fs, logsmith, path, roost, shell, shell_quote, 3 | __slice = [].slice; 4 | 5 | fs = require('fs'); 6 | 7 | path = require('path'); 8 | 9 | async = require('async'); 10 | 11 | roost = require('roost'); 12 | 13 | logsmith = require('logsmith'); 14 | 15 | shell_quote = require('shell-quote'); 16 | 17 | child_process = require('child_process'); 18 | 19 | shell = require('./shell'); 20 | 21 | exports.actions = function(opt, manifest, provider, node_name, callback) { 22 | /* 23 | Prints out the available actions. 24 | */ 25 | 26 | var action_fn, action_name, desc, _ref, _results; 27 | _results = []; 28 | for (action_name in exports) { 29 | action_fn = exports[action_name]; 30 | desc = (_ref = action_fn.toString().split('\n').slice(2, 3)[0]) != null ? _ref.trim() : void 0; 31 | _results.push(logsmith.info(action_name, '-', desc)); 32 | } 33 | return _results; 34 | }; 35 | 36 | exports.status = function(opt, manifest, provider, node_names, callback) { 37 | /* 38 | Obtains state and network address if the selected node is running. 39 | */ 40 | 41 | var process_node; 42 | process_node = function(node_name, callback) { 43 | logsmith.verbose("query status for node " + node_name); 44 | return provider.status(node_name, function(err, state, address) { 45 | var args; 46 | if (err) { 47 | return callback(err); 48 | } 49 | args = ['node', node_name, 'is', state]; 50 | if (address) { 51 | args.push('at'); 52 | args.push(address); 53 | } 54 | logsmith.info.apply(logsmith, args); 55 | return callback(null); 56 | }); 57 | }; 58 | return async.eachSeries(node_names, process_node, callback); 59 | }; 60 | 61 | exports.shellspec = function(opt, manifest, provider, node_names, callback) { 62 | /* 63 | Obtains the shell specification (typically ssh url) for the selected node. 64 | */ 65 | 66 | var process_node; 67 | process_node = function(node_name, callback) { 68 | logsmith.verbose("query shell spec for node " + node_name); 69 | return provider.shell_spec(node_name, function(err, spec) { 70 | if (err) { 71 | return callback(err); 72 | } 73 | logsmith.info(node_name, '->', spec); 74 | return callback(null, spec); 75 | }); 76 | }; 77 | return async.eachSeries(node_names, process_node, callback); 78 | }; 79 | 80 | exports.boot = function(opt, manifest, provider, node_names, callback) { 81 | /* 82 | Ensures that the node is running. 83 | */ 84 | 85 | var process_node; 86 | process_node = function(node_name, callback) { 87 | logsmith.verbose("boot node " + node_name); 88 | return provider.boot(node_name, function(err, state, address) { 89 | var args; 90 | if (err) { 91 | logsmith.error(err.message); 92 | } 93 | if (err) { 94 | return callback(null); 95 | } 96 | args = ['node', node_name, 'is', state]; 97 | if (address) { 98 | args.push('at'); 99 | args.push(address); 100 | } 101 | logsmith.info.apply(logsmith, args); 102 | return callback(null); 103 | }); 104 | }; 105 | return async.eachSeries(node_names, process_node, callback); 106 | }; 107 | 108 | exports.halt = function(opt, manifest, provider, node_names, callback) { 109 | /* 110 | Ensures that the node is stopped. 111 | */ 112 | 113 | var process_node; 114 | process_node = function(node_name, callback) { 115 | logsmith.verbose("halt node " + node_name); 116 | return provider.halt(node_name, function(err, state, address) { 117 | var args; 118 | if (err) { 119 | logsmith.error(err.message); 120 | } 121 | if (err) { 122 | return callback(null); 123 | } 124 | args = ['node', node_name, 'is', state]; 125 | if (address) { 126 | args.push('at'); 127 | args.push(address); 128 | } 129 | logsmith.info.apply(logsmith, args); 130 | return callback(null); 131 | }); 132 | }; 133 | return async.eachSeries(node_names, process_node, callback); 134 | }; 135 | 136 | exports.pause = function(opt, manifest, provider, node_names, callback) { 137 | /* 138 | Ensures that the node is paused. 139 | */ 140 | 141 | var process_node; 142 | process_node = function(node_name, callback) { 143 | logsmith.verbose("pause node " + node_name); 144 | return provider.pause(node_name, function(err, state, address) { 145 | var args; 146 | if (err) { 147 | logsmith.error(err.message); 148 | } 149 | if (err) { 150 | return callback(null); 151 | } 152 | args = ['node', node_name, 'is', state]; 153 | if (address) { 154 | args.push('at'); 155 | args.push(address); 156 | } 157 | logsmith.info.apply(logsmith, args); 158 | return callback(null); 159 | }); 160 | }; 161 | return async.eachSeries(node_names, process_node, callback); 162 | }; 163 | 164 | exports.resume = function(opt, manifest, provider, node_names, callback) { 165 | /* 166 | Ensures that the node is resumed. 167 | */ 168 | 169 | var process_node; 170 | process_node = function(node_name, callback) { 171 | logsmith.verbose("resume node " + node_name); 172 | return provider.resume(node_name, function(err, state, address) { 173 | var args; 174 | if (err) { 175 | logsmith.error(err.message); 176 | } 177 | if (err) { 178 | return callback(null); 179 | } 180 | args = ['node', node_name, 'is', state]; 181 | if (address) { 182 | args.push('at'); 183 | args.push(address); 184 | } 185 | logsmith.info.apply(logsmith, args); 186 | return callback(null); 187 | }); 188 | }; 189 | return async.eachSeries(node_names, process_node, callback); 190 | }; 191 | 192 | exports.restart = function(opt, manifest, provider, node_names, callback) { 193 | /* 194 | Chains actions halt and then boot for every node. 195 | */ 196 | 197 | var actions, process_node; 198 | actions = []; 199 | actions.push(function(node_name, callback) { 200 | return exports.halt(opt, manifest, provider, [node_name], function(err) { 201 | if (err) { 202 | return callback(err); 203 | } 204 | return callback(null, node_name); 205 | }); 206 | }); 207 | actions.push(function(node_name, callback) { 208 | return exports.boot(opt, manifest, provider, [node_name], function(err) { 209 | if (err) { 210 | return callback(err); 211 | } 212 | return callback(null, node_name); 213 | }); 214 | }); 215 | process_node = function(node_name, callback) { 216 | var current_actions; 217 | logsmith.verbose("restart node " + node_name); 218 | current_actions = [(function(callback) { 219 | return callback(null, node_name); 220 | })].concat(__slice.call(actions)); 221 | return async.waterfall(current_actions, callback); 222 | }; 223 | return async.eachSeries(node_names, process_node, callback); 224 | }; 225 | 226 | exports.provision = function(opt, manifest, provider, node_names, callback) { 227 | /* 228 | Starts the provisioner on the selected node. 229 | */ 230 | 231 | var actions, merge_objects, merge_roost, process_node; 232 | actions = []; 233 | merge_objects = function(a, b) { 234 | var key, value; 235 | for (key in b) { 236 | value = b[key]; 237 | if (a[key] != null) { 238 | a[key] = (function() { 239 | switch (false) { 240 | case !Array.isArray(a[key]): 241 | return a[key].concat(b[key]); 242 | case !(typeof a[key] === 'number' || a[key] instanceof Number): 243 | return b[key]; 244 | case !(typeof a[key] === 'string' || a[key] instanceof String): 245 | return b[key]; 246 | case !(typeof a[key] === 'boolean' || a[key] instanceof Boolean): 247 | return b[key]; 248 | default: 249 | return arguments.callee(a[key], b[key]); 250 | } 251 | }).apply(this, arguments); 252 | } else { 253 | a[key] = b[key]; 254 | } 255 | } 256 | return a; 257 | }; 258 | merge_roost = function(manifest, configs) { 259 | if (configs.length === 0) { 260 | return null; 261 | } 262 | return configs.map((function(config) { 263 | if (typeof config === 'string' || config instanceof String) { 264 | return roost.manifest.load(path.resolve(path.dirname(manifest.meta.location), config)); 265 | } else { 266 | return config; 267 | } 268 | })).reduce((function(previous_value, current_value) { 269 | if (!previous_value) { 270 | return JSON.parse(JSON.stringify(current_value)); 271 | } 272 | if ((current_value.merge != null) && current_value.merge) { 273 | return merge_objects(previous_value, current_value); 274 | } else { 275 | return current_value; 276 | } 277 | }), null); 278 | }; 279 | actions.push(function(node_name, callback) { 280 | return provider.bootstrap(node_name, function(err) { 281 | if (err) { 282 | return callback(err); 283 | } 284 | return callback(null, node_name); 285 | }); 286 | }); 287 | actions.push(function(node_name, callback) { 288 | var e, merge_configs, node_manifest, roost_manifest, roost_plugins, _ref; 289 | node_manifest = manifest.nodes[node_name]; 290 | merge_configs = []; 291 | if (typeof manifestroost !== "undefined" && manifestroost !== null) { 292 | merge_configs.push(manifest.roost); 293 | } 294 | if (node_manifest.roost != null) { 295 | merge_configs.push(node_manifest.roost); 296 | } 297 | if (((_ref = node_manifest[provider.name]) != null ? _ref.roost : void 0) != null) { 298 | merge_configs.push(node_manifest[provider.name].roost); 299 | } 300 | roost_manifest = merge_roost(manifest, merge_configs); 301 | if (!roost_manifest) { 302 | return callback(new Error("no roost configuration defined for node " + node_name)); 303 | } 304 | if (merge_configs.length > 0 && (roost_manifest.meta == null)) { 305 | roost_manifest.meta = { 306 | location: manifest.meta.location 307 | }; 308 | } 309 | try { 310 | roost_plugins = roost.plugins.obtain(roost_manifest); 311 | } catch (_error) { 312 | e = _error; 313 | return callback(e); 314 | } 315 | node_manifest.roost = roost_manifest; 316 | return callback(null, node_name, roost_manifest, roost_plugins); 317 | }); 318 | actions.push(function(node_name, roost_manifest, roost_plugins, callback) { 319 | return provider.shell_spec(node_name, function(err, spec) { 320 | if (err) { 321 | return callback(err); 322 | } 323 | return callback(null, node_name, roost_manifest, roost_plugins, spec); 324 | }); 325 | }); 326 | actions.push(function(node_name, roost_manifest, roost_plugins, spec, callback) { 327 | var obtain_status; 328 | if (roost_manifest.bootstrap == null) { 329 | roost_manifest.bootstrap = []; 330 | } 331 | roost_manifest.bootstrap.push('sudo mkdir -p /etc/vortex/nodes/'); 332 | obtain_status = function(node_name, callback) { 333 | return provider.status(node_name, function(err, state, address) { 334 | if (err) { 335 | return callback(err); 336 | } 337 | return callback(null, { 338 | node_name: node_name, 339 | address: address 340 | }); 341 | }); 342 | }; 343 | return async.map(Object.keys(manifest.nodes), obtain_status, function(err, results) { 344 | var address, file, result, _i, _len; 345 | if (err) { 346 | return callback(err); 347 | } 348 | for (_i = 0, _len = results.length; _i < _len; _i++) { 349 | result = results[_i]; 350 | if (result.node_name === node_name) { 351 | continue; 352 | } 353 | if (!result.address) { 354 | logsmith.error("node " + node_name + " does not expose address"); 355 | continue; 356 | } 357 | address = shell_quote.quote([result.address]); 358 | file = shell_quote.quote(["/etc/vortex/nodes/" + result.node_name]); 359 | roost_manifest.bootstrap.unshift("echo " + address + " | sudo tee " + file); 360 | } 361 | return callback(null, node_name, roost_manifest, roost_plugins, spec); 362 | }); 363 | }); 364 | actions.push(function(node_name, roost_manifest, roost_plugins, spec, callback) { 365 | var e, roost_opt, roost_target; 366 | try { 367 | roost_target = roost.targets.create(spec, roost_manifest); 368 | } catch (_error) { 369 | e = _error; 370 | return callback(e); 371 | } 372 | roost_opt = { 373 | options: {}, 374 | argv: [] 375 | }; 376 | if (opt.options.dry != null) { 377 | roost_opt.options.dry = opt.options.dry; 378 | } 379 | return roost.engine.launch(roost_opt, roost_manifest, roost_plugins, roost_target, callback); 380 | }); 381 | process_node = function(node_name, callback) { 382 | var current_actions; 383 | logsmith.info("provision node " + node_name); 384 | current_actions = [(function(callback) { 385 | return callback(null, node_name); 386 | })].concat(__slice.call(actions)); 387 | return async.waterfall(current_actions, callback); 388 | }; 389 | return async.eachSeries(node_names, process_node, callback); 390 | }; 391 | 392 | exports.up = function(opt, manifest, provider, node_names, callback) { 393 | /* 394 | Will bring up a node by first booting/resuming it and than starting the provisioning process. 395 | */ 396 | 397 | var process_node; 398 | process_node = function(node_name, callback) { 399 | return provider.status(node_name, function(err, state, address) { 400 | var perform_provision; 401 | if (err) { 402 | return callback(err); 403 | } 404 | perform_provision = function(state, address) { 405 | var callee, timeout_handler; 406 | if (state === 'running' && address) { 407 | return exports.provision(opt, manifest, provider, [node_name], callback); 408 | } else { 409 | callee = arguments.callee; 410 | timeout_handler = function() { 411 | return provider.status(node_name, function(err, state, address) { 412 | if (err) { 413 | return callback(err); 414 | } 415 | return callee(state, address); 416 | }); 417 | }; 418 | return setTimeout(timeout_handler, 1000); 419 | } 420 | }; 421 | switch (state) { 422 | case 'stopped': 423 | return provider.boot(node_name, function(err, state, address) { 424 | if (err) { 425 | return callback(err); 426 | } 427 | return perform_provision(state, address); 428 | }); 429 | case 'paused': 430 | return provider.resume(node_name, function(err, state, address) { 431 | if (err) { 432 | return callback(err); 433 | } 434 | return perform_provision(state, address); 435 | }); 436 | default: 437 | return callback(null); 438 | } 439 | }); 440 | }; 441 | return async.eachSeries(node_names, process_node, callback); 442 | }; 443 | 444 | exports.down = function(opt, manifest, provider, node_names, callback) { 445 | /* 446 | Will bring down a node. At the moment this action is a alias for action halt. 447 | */ 448 | 449 | var process_node; 450 | process_node = function(node_name, callback) { 451 | return provider.status(node_name, function(err, state, address) { 452 | if (err) { 453 | return callback(err); 454 | } 455 | if (state === 'stopped') { 456 | return callback(null); 457 | } 458 | return provider.halt(node_name, callback); 459 | }); 460 | }; 461 | return async.eachSeries(node_names, process_node, callback); 462 | }; 463 | 464 | exports.reload = function(opt, manifest, provider, node_names, callback) { 465 | /* 466 | Chains actions down and then up for every node. 467 | */ 468 | 469 | var actions, process_node; 470 | actions = []; 471 | actions.push(function(node_name, callback) { 472 | return exports.down(opt, manifest, provider, [node_name], function(err) { 473 | if (err) { 474 | return callback(err); 475 | } 476 | return callback(null, node_name); 477 | }); 478 | }); 479 | actions.push(function(node_name, callback) { 480 | return exports.up(opt, manifest, provider, [node_name], function(err) { 481 | if (err) { 482 | return callback(err); 483 | } 484 | return callback(null, node_name); 485 | }); 486 | }); 487 | process_node = function(node_name, callback) { 488 | var current_actions; 489 | logsmith.verbose("reload node " + node_name); 490 | current_actions = [(function(callback) { 491 | return callback(null, node_name); 492 | })].concat(__slice.call(actions)); 493 | return async.waterfall(current_actions, callback); 494 | }; 495 | return async.eachSeries(node_names, process_node, callback); 496 | }; 497 | 498 | exports.shell = function(opt, manifest, provider, node_names, callback) { 499 | /* 500 | Starts a shell or executes a command on the selected node. 501 | */ 502 | 503 | var actions, process_node; 504 | actions = []; 505 | actions.push(function(node_name, callback) { 506 | return provider.shell_spec(node_name, function(err, spec) { 507 | if (err) { 508 | return callback(err); 509 | } 510 | if (!spec.match(/^ssh:/i)) { 511 | return callback(new Error("unsupported shell spec " + spec)); 512 | } 513 | return callback(null, spec); 514 | }); 515 | }); 516 | actions.push(function(spec, callback) { 517 | var command, ssh; 518 | ssh = new shell.Ssh(spec, manifest); 519 | command = opt.argv.slice(opt.argv.indexOf('--') + 1); 520 | if (command.length === opt.argv.length) { 521 | command = null; 522 | } else { 523 | command = command.join(' '); 524 | } 525 | if (command) { 526 | ssh.exec(command); 527 | } else { 528 | ssh.shell(); 529 | } 530 | return ssh.ignite(false, function(err) { 531 | if (err) { 532 | return callback(err); 533 | } 534 | return callback(null); 535 | }); 536 | }); 537 | process_node = function(node_name, callback) { 538 | var current_actions; 539 | logsmith.info("shell into node " + node_name); 540 | current_actions = [(function(callback) { 541 | return callback(null, node_name); 542 | })].concat(__slice.call(actions)); 543 | return async.waterfall(current_actions, callback); 544 | }; 545 | return async.eachSeries(node_names, process_node, callback); 546 | }; 547 | 548 | exports.openurl = function(opt, manifest, provider, node_names, callback) { 549 | /* 550 | Open node url in browser. 551 | */ 552 | 553 | var command, process_node; 554 | command = (function() { 555 | switch (false) { 556 | case !process.platform.match(/^win/): 557 | return 'start'; 558 | case !process.platform.match(/^dar/): 559 | return 'open'; 560 | default: 561 | return 'firefox'; 562 | } 563 | })(); 564 | process_node = function(node_name, callback) { 565 | var node_def, port, scheme, web_def; 566 | node_def = manifest.nodes[node_name]; 567 | web_def = node_def.web || {}; 568 | path = (function() { 569 | switch (false) { 570 | case !web_def.path: 571 | return web_def.path; 572 | default: 573 | return '/'; 574 | } 575 | })(); 576 | port = (function() { 577 | switch (false) { 578 | case !web_def.port: 579 | return web_def.port; 580 | default: 581 | return 80; 582 | } 583 | })(); 584 | scheme = (function() { 585 | switch (false) { 586 | case !web_def.scheme: 587 | return web_def.scheme; 588 | case port !== 443: 589 | return 'https'; 590 | default: 591 | return 'http'; 592 | } 593 | })(); 594 | return provider.status(node_name, function(err, state, address) { 595 | var url; 596 | if (err) { 597 | return callback(err); 598 | } 599 | if (!address) { 600 | return callback(new Error("cannot identify address for node " + node_name)); 601 | } 602 | url = "" + scheme + "://" + address + ":" + port + path; 603 | return child_process.exec(shell_quote.quote([command, url]), function(err) { 604 | if (err) { 605 | return callback(err); 606 | } 607 | return callback(null); 608 | }); 609 | }); 610 | }; 611 | return async.eachSeries(node_names, process_node, callback); 612 | }; 613 | 614 | }).call(this); 615 | -------------------------------------------------------------------------------- /lib/download.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var Progress2, fs, http, https, logsmith; 3 | 4 | fs = require('fs'); 5 | 6 | http = require('http'); 7 | 8 | https = require('https'); 9 | 10 | logsmith = require('logsmith'); 11 | 12 | Progress2 = require('progress2'); 13 | 14 | exports.get = function(url, file, callback) { 15 | /* 16 | Downloads a url into a file. A progresbar will appear If the right logging level is set. 17 | */ 18 | 19 | var e, getter, socket; 20 | logsmith.verbose("download " + url + " to " + file); 21 | switch (false) { 22 | case !url.match(/^http:\/\//i): 23 | getter = http; 24 | break; 25 | case !url.match(/^https:\/\//i): 26 | getter = https; 27 | break; 28 | default: 29 | if (callback) { 30 | return callback(new Error("unrecognized scheme for url " + url)); 31 | } 32 | } 33 | try { 34 | socket = getter.get(url, function(response) { 35 | var content_length, progress, stream, _ref, _ref1; 36 | switch (false) { 37 | case response.statusCode !== 401: 38 | if (callback) { 39 | return callback(new Error("not authorized to download " + url)); 40 | } 41 | break; 42 | case response.statusCode !== 403: 43 | if (callback) { 44 | return callback(new Error("not allowed to download " + url)); 45 | } 46 | break; 47 | case response.statusCode !== 404: 48 | if (callback) { 49 | return callback(new Error("download " + url + " not found")); 50 | } 51 | break; 52 | case !((200 < (_ref = response.statusCode) && _ref > 299)): 53 | if (callback) { 54 | return callback(new Error("cannot download " + url)); 55 | } 56 | } 57 | if ((_ref1 = logsmith.level) === 'verbose' || _ref1 === 'debug' || _ref1 === 'silly') { 58 | content_length = parseInt(response.headers['content-length'], 10); 59 | if (!isNaN(content_length)) { 60 | progress = new Progress2('downloading [:bar] :percent :etas', { 61 | complete: '=', 62 | incomplete: ' ', 63 | total: content_length, 64 | width: 40 65 | }); 66 | response.on('data', function(chunk) { 67 | return progress.tick(chunk.length); 68 | }); 69 | response.on('end', function() { 70 | return process.stdout.write('\n'); 71 | }); 72 | } 73 | } 74 | stream = fs.createWriteStream(file); 75 | stream.on('error', function(error) { 76 | return callback(new Error("cannot write to file " + file + " for download " + url)); 77 | }); 78 | response.on('error', function(error) { 79 | return callback(new Error("cannot download from url " + url + " to file " + file)); 80 | }); 81 | response.on('end', function() { 82 | return callback(null); 83 | }); 84 | return response.pipe(stream); 85 | }); 86 | return socket.on('error', function(error) { 87 | if (callback) { 88 | return callback(error); 89 | } 90 | }); 91 | } catch (_error) { 92 | e = _error; 93 | if (callback) { 94 | return callback(e); 95 | } 96 | } 97 | }; 98 | 99 | }).call(this); 100 | -------------------------------------------------------------------------------- /lib/engine.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var actions; 3 | 4 | actions = require('./actions'); 5 | 6 | exports.launch = function(opt, manifest, plugins, provider, action, callback) { 7 | /* 8 | Main method for putting toghether the entire logic of Vortex. 9 | */ 10 | 11 | var node_name, plugin, selected_nodes, traversed_nodes, _i, _j, _len, _len1, _ref; 12 | if (actions[action] == null) { 13 | return callback(new Error("action " + action + " is not recognized")); 14 | } 15 | if (plugins) { 16 | [ 17 | (function() { 18 | var _i, _len, _results; 19 | _results = []; 20 | for (_i = 0, _len = plugins.length; _i < _len; _i++) { 21 | plugin = plugins[_i]; 22 | _results.push(plugin.vortex(opt, manifest, provider, action)); 23 | } 24 | return _results; 25 | })() 26 | ]; 27 | } 28 | if (manifest.nodes == null) { 29 | return callback(new Error("no nodes defined in the vortex manifest")); 30 | } 31 | selected_nodes = []; 32 | traversed_nodes = selected_nodes; 33 | _ref = opt.argv.slice(1); 34 | for (_i = 0, _len = _ref.length; _i < _len; _i++) { 35 | node_name = _ref[_i]; 36 | if (node_name === '--') { 37 | traversed_nodes = []; 38 | } else { 39 | traversed_nodes.push(node_name); 40 | } 41 | } 42 | if (selected_nodes.length === 0) { 43 | selected_nodes = Object.keys(manifest.nodes); 44 | } 45 | if (selected_nodes.length === 0) { 46 | return callback(new Error("no nodes selected for action " + action)); 47 | } 48 | for (_j = 0, _len1 = selected_nodes.length; _j < _len1; _j++) { 49 | node_name = selected_nodes[_j]; 50 | if (manifest.nodes[node_name] == null) { 51 | return callback(new Error("node " + node_name + " does not exist")); 52 | } 53 | } 54 | return actions[action](opt, manifest, provider, selected_nodes, callback); 55 | }; 56 | 57 | }).call(this); 58 | -------------------------------------------------------------------------------- /lib/index.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var base, ext, file, fs, path, _i, _len, _ref; 3 | 4 | fs = require('fs'); 5 | 6 | path = require('path'); 7 | 8 | _ref = fs.readdirSync(__dirname); 9 | for (_i = 0, _len = _ref.length; _i < _len; _i++) { 10 | file = _ref[_i]; 11 | ext = path.extname(file); 12 | base = path.basename(file, ext); 13 | exports[base] = require(path.join(__dirname, file)); 14 | } 15 | 16 | }).call(this); 17 | -------------------------------------------------------------------------------- /lib/manifest.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var fs, path; 3 | 4 | fs = require('fs'); 5 | 6 | path = require('path'); 7 | 8 | exports.locate = function(location) { 9 | /* 10 | Locates a manifest. There are different strategies where to find the manifest file. 11 | */ 12 | 13 | var file, stat; 14 | file = location != null ? location : path.join(process.cwd(), 'vortex.json'); 15 | if (!fs.existsSync(file)) { 16 | throw new Error('vortex manifest not found'); 17 | } 18 | stat = fs.statSync(file); 19 | if (stat.isDirectory()) { 20 | file = path.resolve(file, 'vortex.json'); 21 | stat = fs.statSync(file); 22 | } 23 | if (!stat.isFile()) { 24 | throw new Error('vortex manifest does not exist'); 25 | } 26 | return file; 27 | }; 28 | 29 | exports.load = function(location) { 30 | /* 31 | Loads a manifest. The manifest is initialized with a meta object containing its location. 32 | */ 33 | 34 | var manifest; 35 | manifest = require(location); 36 | manifest.meta = { 37 | location: location 38 | }; 39 | return manifest; 40 | }; 41 | 42 | }).call(this); 43 | -------------------------------------------------------------------------------- /lib/plugins.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var path, 3 | __indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }; 4 | 5 | path = require('path'); 6 | 7 | exports.obtain = function(manifest) { 8 | /* 9 | Obtains a list of plugins specified in the manifest. 10 | */ 11 | 12 | var failure, name, plugins, root, true_values, value; 13 | if (manifest.plugins == null) { 14 | return; 15 | } 16 | true_values = [1, '1', true, 'true']; 17 | if (Array.isArray(manifest.plugins)) { 18 | plugins = manifest.plugins; 19 | } else { 20 | plugins = [ 21 | (function() { 22 | var _ref, _results; 23 | if (__indexOf.call(true_values, value) >= 0) { 24 | _ref = manifest.plugins; 25 | _results = []; 26 | for (name in _ref) { 27 | value = _ref[name]; 28 | _results.push(name); 29 | } 30 | return _results; 31 | } 32 | })() 33 | ]; 34 | } 35 | root = path.dirname(manifest.meta.location); 36 | failure = function(err) { 37 | if (err.code !== 'MODULE_NOT_FOUND') { 38 | throw new Error("cannot load plugin " + name); 39 | } 40 | }; 41 | return plugins.map(function(name) { 42 | var e, plugin; 43 | try { 44 | plugin = require(path.resolve(root, name)); 45 | } catch (_error) { 46 | e = _error; 47 | failure(e); 48 | try { 49 | plugin = require(path.resolve(path.join(root, 'node_modules'), name)); 50 | } catch (_error) { 51 | e = _error; 52 | failure(e); 53 | try { 54 | plugin = require(name); 55 | } catch (_error) { 56 | e = _error; 57 | failure(e); 58 | throw e; 59 | } 60 | } 61 | } 62 | if (plugin.getVortex != null) { 63 | plugin = plugin.getVortex(manifest); 64 | } 65 | if (plugin.vortex == null) { 66 | throw new Error("plugins " + name + " is not comptabile"); 67 | } 68 | return plugin; 69 | }); 70 | }; 71 | 72 | }).call(this); 73 | -------------------------------------------------------------------------------- /lib/provider_amazon.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var async, aws_sdk, fs, logsmith, path_extra, portchecker; 3 | 4 | fs = require('fs'); 5 | 6 | async = require('async'); 7 | 8 | aws_sdk = require('aws-sdk'); 9 | 10 | logsmith = require('logsmith'); 11 | 12 | path_extra = require('path-extra'); 13 | 14 | portchecker = require('portchecker'); 15 | 16 | exports.Provider = (function() { 17 | /* 18 | This class exposes Amazon as a provider to Vortex. 19 | */ 20 | 21 | function _Class(manifest) { 22 | this.manifest = manifest; 23 | /* 24 | The provider accepts a manifest as a parameter by specification. 25 | */ 26 | 27 | aws_sdk.config.update(this.extract_client_options()); 28 | } 29 | 30 | _Class.prototype.get_node = function(node_name) { 31 | /* 32 | This method returns a node by looking up its name. It throws an error if the node is not found. 33 | */ 34 | 35 | if ((this.manifest.nodes != null) && (this.manifest.nodes[node_name] != null)) { 36 | return this.manifest.nodes[node_name]; 37 | } 38 | throw new Error("node " + node_name + " does not exist"); 39 | }; 40 | 41 | _Class.prototype.extract_property = function(property_name, node_name) { 42 | /* 43 | Extracts a property by looking into a node and upper layers of the manifest. 44 | */ 45 | 46 | var e, node, _ref, _ref1; 47 | try { 48 | node = this.get_node(node_name); 49 | } catch (_error) { 50 | e = _error; 51 | node = null; 52 | } 53 | if ((node != null ? (_ref = node.amazon) != null ? _ref[property_name] : void 0 : void 0) != null) { 54 | return node.amazon[property_name]; 55 | } 56 | if (((_ref1 = this.manifest.amazon) != null ? _ref1[property_name] : void 0) != null) { 57 | return this.manifest.amazon[property_name]; 58 | } 59 | return null; 60 | }; 61 | 62 | _Class.prototype.extract_access_key_id = function(node_name) { 63 | return this.extract_property('accessKeyId', node_name); 64 | }; 65 | 66 | _Class.prototype.extract_secret_access_key = function(node_name) { 67 | return this.extract_property('secretAccessKey', node_name); 68 | }; 69 | 70 | _Class.prototype.extract_region = function(node_name) { 71 | return this.extract_property('region', node_name); 72 | }; 73 | 74 | _Class.prototype.extract_max_retries = function(node_name) { 75 | return this.extract_property('maxRetries', node_name); 76 | }; 77 | 78 | _Class.prototype.extract_image_id = function(node_name) { 79 | return this.extract_property('imageId', node_name); 80 | }; 81 | 82 | _Class.prototype.extract_instance_type = function(node_name) { 83 | return this.extract_property('instanceType', node_name); 84 | }; 85 | 86 | _Class.prototype.extract_key_name = function(node_name) { 87 | return this.extract_property('keyName', node_name); 88 | }; 89 | 90 | _Class.prototype.extract_security_groups = function(node_name) { 91 | return this.extract_property('securityGroups', node_name); 92 | }; 93 | 94 | _Class.prototype.extract_user_data = function(node_name) { 95 | return this.extract_property('userData', node_name); 96 | }; 97 | 98 | _Class.prototype.extract_disable_api_termination = function(node_name) { 99 | return this.extract_property('disableApiTermination', node_name); 100 | }; 101 | 102 | _Class.prototype.extract_username = function(node_name) { 103 | return this.extract_property('username', node_name); 104 | }; 105 | 106 | _Class.prototype.extract_password = function(node_name) { 107 | return this.extract_property('password', node_name); 108 | }; 109 | 110 | _Class.prototype.extract_private_key = function(node_name) { 111 | return this.extract_property('privateKey', node_name); 112 | }; 113 | 114 | _Class.prototype.extract_passphrase = function(node_name) { 115 | return this.extract_property('passphrase', node_name); 116 | }; 117 | 118 | _Class.prototype.extract_ssh_port = function(node_name) { 119 | return this.extract_property('sshPort', node_name); 120 | }; 121 | 122 | _Class.prototype.extract_namespace = function(node_name) { 123 | /* 124 | Extracts a namespace by looking it up in the node itself and upper layers of the manifest 125 | */ 126 | 127 | var node; 128 | try { 129 | node = this.get_node(node_name); 130 | } catch (_error) { 131 | node = null; 132 | } 133 | if ((node != null ? node.namespace : void 0) != null) { 134 | return node.namespace; 135 | } 136 | if (this.manifest.namespace != null) { 137 | return this.manifest.namespace; 138 | } 139 | }; 140 | 141 | _Class.prototype.extract_client_options = function(node_name) { 142 | /* 143 | Extracts options related to the AWS client. 144 | */ 145 | 146 | var access_key_id, max_retries, options, region, secret_access_key; 147 | access_key_id = this.extract_access_key_id(node_name); 148 | secret_access_key = this.extract_secret_access_key(node_name); 149 | region = this.extract_region(node_name); 150 | max_retries = this.extract_max_retries(node_name); 151 | options = {}; 152 | if (access_key_id) { 153 | options.accessKeyId = access_key_id; 154 | } 155 | if (secret_access_key) { 156 | options.secretAccessKey = secret_access_key; 157 | } 158 | if (region) { 159 | options.region = region; 160 | } 161 | if (max_retries) { 162 | options.maxRetries = max_retries; 163 | } 164 | return options; 165 | }; 166 | 167 | _Class.prototype.extract_instance_options = function(node_name) { 168 | /* 169 | Extracts options related to AWS instances. 170 | */ 171 | 172 | var disable_api_termination, image_id, instance_type, key_name, options, security_groups, user_data; 173 | image_id = this.extract_image_id(node_name); 174 | instance_type = this.extract_instance_type(node_name); 175 | key_name = this.extract_key_name(node_name); 176 | security_groups = this.extract_security_groups(node_name); 177 | user_data = this.extract_user_data(node_name); 178 | disable_api_termination = this.extract_disable_api_termination(node_name); 179 | options = {}; 180 | if (image_id) { 181 | options.ImageId = image_id; 182 | } 183 | if (instance_type) { 184 | options.InstanceType = instance_type; 185 | } 186 | if (key_name) { 187 | options.KeyName = key_name; 188 | } 189 | if (security_groups) { 190 | options.SecurityGroups = security_groups; 191 | } 192 | if (user_data) { 193 | options.UserData = user_data; 194 | } 195 | if (disable_api_termination) { 196 | options.DisableApiTermination = disable_api_termination; 197 | } 198 | return options; 199 | }; 200 | 201 | _Class.prototype.get_client = function(node_name) { 202 | /* 203 | Obtain a client for EC2. 204 | */ 205 | 206 | return new aws_sdk.EC2(this.extract_client_options(node_name)); 207 | }; 208 | 209 | _Class.prototype.create_error = function(error, node_name) { 210 | /* 211 | Creates a friendlier error message. 212 | */ 213 | 214 | var message, parts, tokens, type; 215 | if (error.code === 'NetworkingError') { 216 | return error; 217 | } else { 218 | tokens = error.toString().split(':'); 219 | type = tokens[0]; 220 | message = tokens[1].trim(); 221 | parts = message.split('.'); 222 | message = parts.shift().toLowerCase().trim(); 223 | if (node_name) { 224 | message = "" + message + " for node " + node_name; 225 | } 226 | if (parts.length > 0) { 227 | message = "" + message + " (" + (parts.join('.').trim()) + ")"; 228 | } 229 | message = message.replace(/\s'(\w+)'\s/, function(match, group) { 230 | var param; 231 | param = group.toLowerCase(); 232 | switch (param) { 233 | case 'accesskeyid': 234 | param = 'accessKeyId'; 235 | break; 236 | case 'secretaccesskey': 237 | param = 'secretAccessKey'; 238 | break; 239 | case 'region': 240 | param = 'region'; 241 | break; 242 | case 'maxretries': 243 | param = 'maxRetries'; 244 | break; 245 | case 'imageid': 246 | param = 'imageId'; 247 | break; 248 | case 'instancetype': 249 | param = 'instanceType'; 250 | break; 251 | case 'keyname': 252 | param = 'keyName'; 253 | break; 254 | case 'securitygroups': 255 | param = 'securityGroups'; 256 | break; 257 | case 'userdata': 258 | param = 'userData'; 259 | break; 260 | case 'disableapitermination': 261 | param = 'disableApiTermination'; 262 | } 263 | return ' "' + param + '" '; 264 | }); 265 | message = message[0] + message.substring(1, message.length); 266 | return new Error(message); 267 | } 268 | }; 269 | 270 | _Class.prototype.bootstrap = function(node_name, callback) { 271 | /* 272 | Provider-specific method for bootstrapping a node. 273 | */ 274 | 275 | var obtain_shell_spec, prepare_exposed, verify_status, 276 | _this = this; 277 | verify_status = function(callback) { 278 | return _this.status(node_name, function(err, state, address) { 279 | if (err) { 280 | return callback(err); 281 | } 282 | if (state !== 'running') { 283 | return callback(new Error("node " + node_name + " is not ready")); 284 | } 285 | return callback(null); 286 | }); 287 | }; 288 | obtain_shell_spec = function(callback) { 289 | return _this.shell_spec(node_name, function(err, spec) { 290 | if (err) { 291 | return callback(err); 292 | } 293 | return callback(null, spec); 294 | }); 295 | }; 296 | prepare_exposed = function(spec, callback) { 297 | var dst, e, handle_exposure, node, src; 298 | try { 299 | node = _this.get_node(node_name); 300 | } catch (_error) { 301 | e = _error; 302 | node = null; 303 | } 304 | if ((node != null ? node.expose : void 0) == null) { 305 | return callback(null); 306 | } 307 | handle_exposure = function(exposure, callback) { 308 | var source_path; 309 | source_path = path_extra.resolve(path_extra.dirname(_this.manifest.meta.location), exposure.src); 310 | return fs.stat(source_path, function(err, stats) { 311 | if (err) { 312 | return callback(new Error("cannot expose " + exposure.src + " because it does not exist")); 313 | } 314 | return callback(null); 315 | }); 316 | }; 317 | return async.eachSeries((function() { 318 | var _ref, _results; 319 | _ref = node.expose; 320 | _results = []; 321 | for (src in _ref) { 322 | dst = _ref[src]; 323 | _results.push({ 324 | src: src, 325 | dst: dst 326 | }); 327 | } 328 | return _results; 329 | })(), handle_exposure, callback); 330 | }; 331 | return async.waterfall([verify_status, obtain_shell_spec, prepare_exposed], function(err, state, address) { 332 | if (err) { 333 | return callback(err); 334 | } 335 | return callback(null); 336 | }); 337 | }; 338 | 339 | _Class.prototype.status = function(node_name, callback) { 340 | /* 341 | Provider-specific method for checking the status of a node. 342 | */ 343 | 344 | var client, e, options, 345 | _this = this; 346 | try { 347 | client = this.get_client(node_name); 348 | } catch (_error) { 349 | e = _error; 350 | return callback(this.create_error(e, node_name)); 351 | } 352 | options = { 353 | Filters: [ 354 | { 355 | Name: 'tag:vortex-node-name', 356 | Values: [node_name] 357 | }, { 358 | Name: 'tag:vortex-node-namespace', 359 | Values: [this.extract_namespace(node_name)] 360 | } 361 | ] 362 | }; 363 | logsmith.debug('describe instances with options', options); 364 | return client.describeInstances(options, function(err, result) { 365 | var address, instance, instances, reservation, selected_instance, state, _i, _j, _k, _len, _len1, _len2, _ref, _ref1, _ref2; 366 | if (err) { 367 | return callback(_this.create_error(err, node_name)); 368 | } 369 | instances = []; 370 | _ref = result.Reservations; 371 | for (_i = 0, _len = _ref.length; _i < _len; _i++) { 372 | reservation = _ref[_i]; 373 | _ref1 = reservation.Instances; 374 | for (_j = 0, _len1 = _ref1.length; _j < _len1; _j++) { 375 | instance = _ref1[_j]; 376 | instances.push({ 377 | id: instance.InstanceId, 378 | state: instance.State.Name, 379 | address: instance.PublicDnsName 380 | }); 381 | } 382 | } 383 | if (instances.length === 0) { 384 | return callback(null, 'stopped'); 385 | } 386 | logsmith.debug('discovered instances', instances); 387 | selected_instance = instances[instances.length - 1]; 388 | if (!selected_instance) { 389 | return callback(new Error("could not obtain instance for node " + node_name)); 390 | } 391 | logsmith.debug('selected instance', selected_instance); 392 | for (_k = 0, _len2 = instances.length; _k < _len2; _k++) { 393 | instance = instances[_k]; 394 | if (((_ref2 = instance.state) !== 'shutting-down' && _ref2 !== 'terminated' && _ref2 !== 'stopping' && _ref2 !== 'stopped') && selected_instance !== instance) { 395 | logsmith.warn("duplicate node " + node_name + " with instance id " + instance.id + " detected"); 396 | } 397 | } 398 | state = (function() { 399 | switch (selected_instance.state) { 400 | case 'pending': 401 | return 'booting'; 402 | case 'running': 403 | return 'running'; 404 | case 'stopped': 405 | return 'stopped'; 406 | case 'stopping': 407 | return 'halting'; 408 | case 'terminated': 409 | return 'stopped'; 410 | case 'shutting-down': 411 | return 'halting'; 412 | default: 413 | return null; 414 | } 415 | })(); 416 | if (!state) { 417 | return callback(new Error("undefined state for node " + node_name)); 418 | } 419 | logsmith.debug("node " + node_name + " with instance id " + selected_instance.id + " has state " + state); 420 | address = selected_instance.address; 421 | if (!address) { 422 | state = 'booting'; 423 | } 424 | if (state !== 'running') { 425 | address = null; 426 | } 427 | return callback(null, state, address, selected_instance.id); 428 | }); 429 | }; 430 | 431 | _Class.prototype.boot = function(node_name, callback) { 432 | /* 433 | Provider-specific method for booting a node. 434 | */ 435 | 436 | var client, e, map_tags, run_instance, verify_status, 437 | _this = this; 438 | try { 439 | client = this.get_client(node_name); 440 | } catch (_error) { 441 | e = _error; 442 | return callback(this.create_error(e, node_name)); 443 | } 444 | verify_status = function(callback) { 445 | return _this.status(node_name, function(err, state, address) { 446 | if (err) { 447 | return callback(err); 448 | } 449 | if (state === 'booting') { 450 | return callback(new Error("node " + node_name + " is already booting")); 451 | } 452 | if (state === 'running') { 453 | return callback(new Error("node " + node_name + " is already running")); 454 | } 455 | if (state === 'halting') { 456 | return callback(new Error("node " + node_name + " is halting")); 457 | } 458 | return callback(null); 459 | }); 460 | }; 461 | run_instance = function(callback) { 462 | var options; 463 | options = _this.extract_instance_options(node_name); 464 | options.MinCount = 1; 465 | options.MaxCount = 1; 466 | logsmith.debug('run instances with options', options); 467 | return client.runInstances(options, function(err, result) { 468 | var instance, instances, selected_instance, _i, _j, _len, _len1, _ref; 469 | if (err) { 470 | return callback(_this.create_error(err, node_name)); 471 | } 472 | instances = []; 473 | _ref = result.Instances; 474 | for (_i = 0, _len = _ref.length; _i < _len; _i++) { 475 | instance = _ref[_i]; 476 | instances.push({ 477 | id: instance.InstanceId 478 | }); 479 | } 480 | if (instances.length === 0) { 481 | return callback(new Error("no instances run for node " + node_name)); 482 | } 483 | logsmith.debug('ran instances', instances); 484 | selected_instance = instances[instances.length - 1]; 485 | if (!selected_instance) { 486 | return callback(new Error("could not create instance for node " + node_name)); 487 | } 488 | logsmith.debug('selected instance', selected_instance); 489 | for (_j = 0, _len1 = instances.length; _j < _len1; _j++) { 490 | instance = instances[_j]; 491 | if (selected_instance !== instance) { 492 | logsmith.warn("duplicate node " + node_name + " with instance id " + instance_id + " detected"); 493 | } 494 | } 495 | return callback(null, selected_instance.id); 496 | }); 497 | }; 498 | map_tags = function(instance_id, callback) { 499 | var options; 500 | options = { 501 | Resources: [instance_id], 502 | Tags: [ 503 | { 504 | Key: 'vortex-node-name', 505 | Value: node_name 506 | }, { 507 | Key: 'vortex-node-namespace', 508 | Value: _this.extract_namespace(node_name) 509 | } 510 | ] 511 | }; 512 | logsmith.debug('create tags with options', options); 513 | return client.createTags(options, function(err, result) { 514 | if (err) { 515 | return callback(_this.create_error(err, node_name)); 516 | } 517 | return callback(null, instance_id); 518 | }); 519 | }; 520 | return async.waterfall([verify_status, run_instance, map_tags], function(err) { 521 | if (err) { 522 | return callback(err); 523 | } 524 | return _this.status(node_name, callback); 525 | }); 526 | }; 527 | 528 | _Class.prototype.halt = function(node_name, callback) { 529 | /* 530 | Provider-specific method for halting a node. 531 | */ 532 | 533 | var client, e, terminate_instance, unmap_tags, verify_status, 534 | _this = this; 535 | try { 536 | client = this.get_client(node_name); 537 | } catch (_error) { 538 | e = _error; 539 | return callback(this.create_error(e, node_name)); 540 | } 541 | verify_status = function(callback) { 542 | return _this.status(node_name, function(err, state, address, instance_id) { 543 | if (err) { 544 | return callback(err); 545 | } 546 | if (state === 'halting') { 547 | return callback(new Error("" + node_name + " is already halting")); 548 | } 549 | if (state === 'stopped') { 550 | return callback(new Error("" + node_name + " is already stopped")); 551 | } 552 | return callback(null, instance_id); 553 | }); 554 | }; 555 | terminate_instance = function(instance_id, callback) { 556 | var options; 557 | options = { 558 | InstanceIds: [instance_id] 559 | }; 560 | logsmith.debug('terminate instances with options', options); 561 | return client.terminateInstances(options, function(err, result) { 562 | if (err) { 563 | return callback(_this.create_error(err, node_name)); 564 | } 565 | return callback(null, instance_id); 566 | }); 567 | }; 568 | unmap_tags = function(instance_id, callback) { 569 | var options; 570 | options = { 571 | Resources: [instance_id], 572 | Tags: [ 573 | { 574 | Key: 'vortex-node-name', 575 | Value: node_name 576 | }, { 577 | Key: 'vortex-node-namespace', 578 | Value: _this.extract_namespace(node_name) 579 | } 580 | ] 581 | }; 582 | logsmith.debug('delete tags with options', options); 583 | return client.deleteTags(options, function(err, result) { 584 | if (err) { 585 | return callback(_this.create_error(err, node_name)); 586 | } 587 | return callback(null, instance_id); 588 | }); 589 | }; 590 | return async.waterfall([verify_status, terminate_instance, unmap_tags], function(err) { 591 | if (err) { 592 | return callback(err); 593 | } 594 | return _this.status(node_name, callback); 595 | }); 596 | }; 597 | 598 | _Class.prototype.pause = function(node_name, callback) { 599 | /* 600 | Provider-specific method for pausing a machine. 601 | */ 602 | 603 | return callback(new Error("cannot pause node " + node_name + " due to pause not implemented")); 604 | }; 605 | 606 | _Class.prototype.resume = function(node_name, callback) { 607 | /* 608 | Provider-specific method for resuming a machine. 609 | */ 610 | 611 | return callback(new Error("cannot resume node " + node_name + " due to resume not implemented")); 612 | }; 613 | 614 | _Class.prototype.shell_spec = function(node_name, callback) { 615 | /* 616 | Provider-specific method for obtaining a shell spec from a node. 617 | */ 618 | 619 | var build_spec, ensure_port, obtain_status, passphrase, password, private_key, ssh_port, username, 620 | _this = this; 621 | password = this.extract_password(node_name); 622 | private_key = this.extract_private_key(node_name); 623 | if (!password && !private_key) { 624 | return callback(new Error("no password or privateKey provided for node " + node_name)); 625 | } 626 | ssh_port = this.extract_ssh_port(node_name); 627 | if (ssh_port) { 628 | ssh_port = parseInt(ssh_port, 10); 629 | if (isNaN(ssh_port || ssh_port < 1)) { 630 | return callback(new Error("ssh port for node " + node_name + " is incorrect")); 631 | } 632 | } else { 633 | ssh_port = 22; 634 | } 635 | username = this.extract_username(node_name); 636 | if (!username) { 637 | username = 'vortex'; 638 | } 639 | passphrase = this.extract_passphrase(node_name); 640 | obtain_status = function(callback) { 641 | return _this.status(node_name, function(err, state, address) { 642 | if (err) { 643 | return callback(err); 644 | } 645 | if (state === 'halting') { 646 | return callback(new Error("node " + node_name + " is halting")); 647 | } 648 | if (state === 'stopped') { 649 | return callback(new Error("node " + node_name + " is stopped")); 650 | } 651 | if (!address) { 652 | return callback(new Error("cannot find network address for node " + node_name)); 653 | } 654 | return callback(null, address); 655 | }); 656 | }; 657 | ensure_port = function(address, callback) { 658 | return portchecker.isOpen(ssh_port, address, function(is_open) { 659 | var callee, milliseconds, timeout; 660 | if (is_open) { 661 | return callback(null, address); 662 | } 663 | callee = arguments.callee; 664 | milliseconds = 10000; 665 | timeout = function() { 666 | return portchecker.isOpen(ssh_port, address, callee); 667 | }; 668 | logsmith.debug("repeat check for ssh port open for node " + node_name + " in " + milliseconds + " milliseconds"); 669 | return setTimeout(timeout, milliseconds); 670 | }); 671 | }; 672 | build_spec = function(address, callback) { 673 | var parts, spec, spec_options; 674 | parts = []; 675 | parts.push('ssh://'); 676 | parts.push(encodeURIComponent(username)); 677 | if (password) { 678 | parts.push(':' + encodeURIComponent(password)); 679 | } 680 | parts.push('@'); 681 | parts.push(address); 682 | parts.push(':' + ssh_port); 683 | if (private_key) { 684 | parts.push(';privateKey=' + encodeURIComponent(private_key)); 685 | } 686 | if (passphrase) { 687 | parts.push(';passphrase=' + encodeURIComponent(passphrase)); 688 | } 689 | spec = parts.join(''); 690 | spec_options = { 691 | username: username, 692 | password: password, 693 | host: address, 694 | port: ssh_port, 695 | privateKey: private_key, 696 | passphrase: passphrase 697 | }; 698 | return callback(null, spec, spec_options); 699 | }; 700 | return async.waterfall([obtain_status, ensure_port, build_spec], callback); 701 | }; 702 | 703 | return _Class; 704 | 705 | })(); 706 | 707 | }).call(this); 708 | -------------------------------------------------------------------------------- /lib/provider_virtualbox.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var async, download, fs, logsmith, path_extra, portchecker, shell_quote, url, vboxmanage; 3 | 4 | fs = require('fs'); 5 | 6 | url = require('url'); 7 | 8 | async = require('async'); 9 | 10 | logsmith = require('logsmith'); 11 | 12 | path_extra = require('path-extra'); 13 | 14 | vboxmanage = require('vboxmanage'); 15 | 16 | portchecker = require('portchecker'); 17 | 18 | shell_quote = require('shell-quote'); 19 | 20 | download = require('./download'); 21 | 22 | exports.Provider = (function() { 23 | /* 24 | This class exposes VirtualBox as a provider to Vortex. 25 | */ 26 | 27 | function _Class(manifest) { 28 | this.manifest = manifest; 29 | /* 30 | The provider accepts a manifest as a parameter by specification. 31 | */ 32 | 33 | } 34 | 35 | _Class.prototype.get_node = function(node_name) { 36 | /* 37 | This method returns a node by looking up its name. It throws an error if the node is not found. 38 | */ 39 | 40 | if ((this.manifest.nodes != null) && (this.manifest.nodes[node_name] != null)) { 41 | return this.manifest.nodes[node_name]; 42 | } 43 | throw new Error("node " + node_name + " does not exist"); 44 | }; 45 | 46 | _Class.prototype.extract_property = function(property_name, node_name) { 47 | /* 48 | Extracts a property by looking into a node and upper layers of the manifest. 49 | */ 50 | 51 | var e, node, _ref, _ref1; 52 | try { 53 | node = this.get_node(node_name); 54 | } catch (_error) { 55 | e = _error; 56 | node = null; 57 | } 58 | if ((node != null ? (_ref = node.virtualbox) != null ? _ref[property_name] : void 0 : void 0) != null) { 59 | return node.virtualbox[property_name]; 60 | } 61 | if (((_ref1 = this.manifest.virtualbox) != null ? _ref1[property_name] : void 0) != null) { 62 | return this.manifest.virtualbox[property_name]; 63 | } 64 | return null; 65 | }; 66 | 67 | _Class.prototype.extract_vm_id = function(node_name) { 68 | return this.extract_property('vmId', node_name); 69 | }; 70 | 71 | _Class.prototype.extract_vm_url = function(node_name) { 72 | return this.extract_property('vmUrl', node_name); 73 | }; 74 | 75 | _Class.prototype.extract_username = function(node_name) { 76 | return this.extract_property('username', node_name); 77 | }; 78 | 79 | _Class.prototype.extract_password = function(node_name) { 80 | return this.extract_property('password', node_name); 81 | }; 82 | 83 | _Class.prototype.extract_private_key = function(node_name) { 84 | return this.extract_property('privateKey', node_name); 85 | }; 86 | 87 | _Class.prototype.extract_passphrase = function(node_name) { 88 | return this.extract_property('passphrase', node_name); 89 | }; 90 | 91 | _Class.prototype.extract_ssh_port = function(node_name) { 92 | return this.extract_property('sshPort', node_name); 93 | }; 94 | 95 | _Class.prototype.extract_namespace = function(node_name) { 96 | /* 97 | Extracts a namespace by looking it up in the node itself and upper layers of the manifest 98 | */ 99 | 100 | var node; 101 | try { 102 | node = this.get_node(node_name); 103 | } catch (_error) { 104 | node = null; 105 | } 106 | if ((node != null ? node.namespace : void 0) != null) { 107 | return node.namespace; 108 | } 109 | if (this.manifest.namespace != null) { 110 | return this.manifest.namespace; 111 | } 112 | }; 113 | 114 | _Class.prototype.get_node_handle = function(node_name) { 115 | /* 116 | Creates a VirtualBox friendlier name out of a node name. The method take into account the namespace. 117 | */ 118 | 119 | var namespace; 120 | namespace = this.extract_namespace(node_name); 121 | return (namespace ? namespace + ':' : '') + node_name; 122 | }; 123 | 124 | _Class.prototype.get_share_handle = function(share_name) { 125 | /* 126 | Creates a VirtualBox friendlier name out of a share name. 127 | */ 128 | 129 | return share_name.replace(/[^\w]+/, '_').replace(/_+/, '_'); 130 | }; 131 | 132 | _Class.prototype.schedule_import = function(vm_url, vm_id, callback) { 133 | /* 134 | Schedules import operation. The function will check if the vm_id exists before execution. 135 | */ 136 | 137 | var task, 138 | _this = this; 139 | if (this.import_queue == null) { 140 | this.import_queue = async.queue(function(task, callback) { 141 | return vboxmanage.machine.info(task.vm_id, function(err, info) { 142 | if (!err) { 143 | return callback(null); 144 | } 145 | return _this.perform_import(task.vm_url, task.vm_id, callback); 146 | }); 147 | }); 148 | } 149 | task = { 150 | vm_url: vm_url, 151 | vm_id: vm_id 152 | }; 153 | return this.import_queue.push(task, callback); 154 | }; 155 | 156 | _Class.prototype.perform_import = function(vm_url, vm_id, callback) { 157 | /* 158 | Performs import operation. 159 | */ 160 | 161 | var local_name, local_path, spec, _ref; 162 | logsmith.debug("import " + vm_url + " into " + vm_id); 163 | try { 164 | spec = url.parse(vm_url); 165 | } catch (_error) { 166 | return callback(new Error("cannot parse url " + vm_url)); 167 | } 168 | if ((_ref = spec.protocol) !== 'file:' && _ref !== 'http:' && _ref !== 'https:') { 169 | return callback(new Error("unsupported scheme for url " + vm_url)); 170 | } 171 | if (spec.protocol === 'file') { 172 | if (!spec.host) { 173 | local_path = spec.pathname; 174 | } else { 175 | local_path = path_extra.resolve(path_extra.dirname(this.manifest.meta.location), path_extra.join(spec.host, spec.pathname)); 176 | } 177 | return vboxmanage.machine["import"](local_path, vm_id, callback); 178 | } else { 179 | local_name = (new Date()).getTime() + '-' + path_extra.basename(spec.pathname); 180 | local_path = path_extra.join(path_extra.tempdir(), local_name); 181 | return download.get(vm_url, local_path, function(err) { 182 | if (err) { 183 | fs.unlink(local_path, function(err) { 184 | if (err) { 185 | return logsmith.exception(err); 186 | } 187 | }); 188 | return callback(err); 189 | } 190 | return vboxmanage.machine["import"](local_path, vm_id, function(err) { 191 | fs.unlink(local_path, function(err) { 192 | if (err) { 193 | return logmisth.exception(err); 194 | } 195 | }); 196 | if (err) { 197 | return callback(err); 198 | } 199 | return callback(null); 200 | }); 201 | }); 202 | } 203 | }; 204 | 205 | _Class.prototype.bootstrap = function(node_name, callback) { 206 | /* 207 | Provider-specific method for bootstrapping a node. 208 | */ 209 | 210 | var commands, node_handle, prepare_exposed, run_commands, verify_status, 211 | _this = this; 212 | commands = ['sudo mkdir -p /etc/vortex/flags/', 'sudo chmod a+rx /etc/vortex/flags/', '[ ! -f /etc/vortex/flags/network_ready ] && sudo ifconfig eth1 0.0.0.0 0.0.0.0', '[ ! -f /etc/vortex/flags/network_ready ] && sudo ifconfig eth2 0.0.0.0 0.0.0.0', '[ ! -f /etc/vortex/flags/network_ready ] && sudo dhclient -r eth1 eth2', '[ ! -f /etc/vortex/flags/network_ready ] && sudo dhclient eth1 eth2', '[ ! -f /etc/vortex/flags/network_ready ] && sudo touch /etc/vortex/flags/network_ready']; 213 | node_handle = this.get_node_handle(node_name); 214 | verify_status = function(callback) { 215 | return _this.status(node_name, function(err, state, address) { 216 | if (err) { 217 | return callback(err); 218 | } 219 | if (state !== 'running') { 220 | return callback(new Error("node " + node_name + " is not ready")); 221 | } 222 | return callback(null); 223 | }); 224 | }; 225 | prepare_exposed = function(callback) { 226 | var dst, e, handle_exposure, node, src; 227 | try { 228 | node = _this.get_node(node_name); 229 | } catch (_error) { 230 | e = _error; 231 | node = null; 232 | } 233 | if ((node != null ? node.expose : void 0) == null) { 234 | return callback(null); 235 | } 236 | handle_exposure = function(exposure, callback) { 237 | var source_path; 238 | source_path = path_extra.resolve(path_extra.dirname(_this.manifest.meta.location), exposure.src); 239 | return fs.stat(source_path, function(err, stats) { 240 | var share_handle; 241 | if (err) { 242 | return callback(new Error("cannot expose " + exposure.src + " because it does not exist")); 243 | } 244 | if (stats.isDirectory()) { 245 | share_handle = _this.get_share_handle(exposure.dst); 246 | commands.push(shell_quote.quote(['sudo', 'mkdir', '-p', exposure.dst])); 247 | commands.push(shell_quote.quote(['sudo', 'mount.vboxsf', share_handle, exposure.dst, '-o', 'rw'])); 248 | return callback(null); 249 | } else { 250 | return vboxmanage.instance.copy_from(source_path, exposure.dst, callback); 251 | } 252 | }); 253 | }; 254 | return async.eachSeries((function() { 255 | var _ref, _results; 256 | _ref = node.expose; 257 | _results = []; 258 | for (src in _ref) { 259 | dst = _ref[src]; 260 | _results.push({ 261 | src: src, 262 | dst: dst 263 | }); 264 | } 265 | return _results; 266 | })(), handle_exposure, callback); 267 | }; 268 | run_commands = function(callback) { 269 | var run_command; 270 | run_command = function(command, callback) { 271 | return vboxmanage.instance.exec(node_handle, 'vortex', 'vortex', '/bin/sh', '-c', command, function(err, output) { 272 | var _ref; 273 | if (err) { 274 | return callback(err); 275 | } 276 | if ((_ref = logsmith.level) === 'verbose' || _ref === 'debug' || _ref === 'silly') { 277 | process.stdout.write(output); 278 | } 279 | return callback(null); 280 | }); 281 | }; 282 | return async.eachSeries(commands, run_command, callback); 283 | }; 284 | return async.waterfall([verify_status, prepare_exposed, run_commands], function(err, state, address) { 285 | if (err) { 286 | return callback(err); 287 | } 288 | return callback(null); 289 | }); 290 | }; 291 | 292 | _Class.prototype.status = function(node_name, callback) { 293 | /* 294 | Provider-specific method for checking the status of a node. 295 | */ 296 | 297 | var node_handle, obtain_machine_address, obtain_machine_state; 298 | node_handle = this.get_node_handle(node_name); 299 | obtain_machine_state = function(callback) { 300 | return vboxmanage.machine.info(node_handle, function(err, info) { 301 | var state; 302 | if (err) { 303 | return callback(null, 'stopped'); 304 | } 305 | state = info.VMState.toLowerCase(); 306 | switch (state) { 307 | case 'saved': 308 | state = 'paused'; 309 | break; 310 | case 'paused': 311 | state = 'paused'; 312 | break; 313 | case 'running': 314 | state = 'running'; 315 | break; 316 | case 'starting': 317 | state = 'booting'; 318 | break; 319 | case 'powered off': 320 | state = 'stopped'; 321 | break; 322 | case 'guru meditation': 323 | state = 'paused'; 324 | } 325 | return callback(null, state); 326 | }); 327 | }; 328 | obtain_machine_address = function(state, callback) { 329 | return vboxmanage.adaptors.list(node_handle, function(err, adaptors) { 330 | var address, e; 331 | if (err) { 332 | return callback(null, 'stopped', address); 333 | } 334 | try { 335 | address = adaptors['Adaptor 1'].V4.IP; 336 | } catch (_error) { 337 | e = _error; 338 | address = null; 339 | state = 'booting'; 340 | } 341 | return callback(null, state, address); 342 | }); 343 | }; 344 | return async.waterfall([obtain_machine_state, obtain_machine_address], function(err, state, address) { 345 | if (err) { 346 | return callback(err); 347 | } 348 | return callback(null, state, address); 349 | }); 350 | }; 351 | 352 | _Class.prototype.boot = function(node_name, callback) { 353 | /* 354 | Provider-specific method for booting a node. 355 | */ 356 | 357 | var attemp_to_remove_vm, clone_vm, ensure_networking, ensure_vm_id, node_handle, setup_vm, start_vm, verify_status, vm_id, 358 | _this = this; 359 | vm_id = this.extract_vm_id(node_name); 360 | if (!vm_id) { 361 | return callback(new Error('no virtualbox "vmId" paramter specified for node')); 362 | } 363 | node_handle = this.get_node_handle(node_name); 364 | verify_status = function(callback) { 365 | return _this.status(node_name, function(err, state, address) { 366 | if (err) { 367 | return callback(err); 368 | } 369 | if (state === 'booting') { 370 | return callback(new Error("node " + node_name + " is already booting")); 371 | } 372 | if (state === 'running') { 373 | return callback(new Error("node " + node_name + " is already running")); 374 | } 375 | if (state === 'halting') { 376 | return callback(new Error("node " + node_name + " is halting")); 377 | } 378 | if (state === 'paused') { 379 | return callback(new Error("node " + node_name + " is paused")); 380 | } 381 | return callback(null); 382 | }); 383 | }; 384 | attemp_to_remove_vm = function(callback) { 385 | return vboxmanage.machine.remove(node_handle, function(err) { 386 | if (err) { 387 | logsmith.exception(err); 388 | } 389 | return callback(null); 390 | }); 391 | }; 392 | ensure_vm_id = function(callback) { 393 | return vboxmanage.machine.info(vm_id, function(err, info) { 394 | var vm_url; 395 | if (!err) { 396 | return callback(null); 397 | } 398 | vm_url = _this.extract_vm_url(node_name); 399 | if (vm_url == null) { 400 | return callback(new Error('no virtualbox "vmUrl" paramter specified for node')); 401 | } 402 | return _this.schedule_import(vm_url, vm_id, callback); 403 | }); 404 | }; 405 | clone_vm = function(callback) { 406 | return vboxmanage.machine.clone(vm_id, node_handle, callback); 407 | }; 408 | ensure_networking = function(callback) { 409 | var config; 410 | config = { 411 | network: { 412 | hostonly: { 413 | vboxnet5: { 414 | ip: '10.100.100.1', 415 | netmask: '255.255.255.0', 416 | dhcp: { 417 | lower_ip: '10.100.100.101', 418 | upper_ip: '10.100.100.254' 419 | } 420 | } 421 | }, 422 | internal: { 423 | vortex: { 424 | ip: '10.200.200.1', 425 | netmask: '255.255.255.0', 426 | dhcp: { 427 | lower_ip: '10.200.200.101', 428 | upper_ip: '10.200.200.254' 429 | } 430 | } 431 | } 432 | } 433 | }; 434 | return vboxmanage.setup.system(config, callback); 435 | }; 436 | setup_vm = function(callback) { 437 | var config, dst, e, node, share_handle, src, _ref; 438 | config = { 439 | network: { 440 | adaptors: [ 441 | { 442 | type: 'hostonly', 443 | network: 'vboxnet5' 444 | }, { 445 | type: 'internal', 446 | network: 'vortex' 447 | }, { 448 | type: 'nat' 449 | } 450 | ] 451 | }, 452 | shares: {} 453 | }; 454 | try { 455 | node = _this.get_node(node_name); 456 | } catch (_error) { 457 | e = _error; 458 | return callback(e); 459 | } 460 | if (node.expose != null) { 461 | _ref = node.expose; 462 | for (src in _ref) { 463 | dst = _ref[src]; 464 | src = path_extra.resolve(path_extra.dirname(_this.manifest.meta.location), src); 465 | share_handle = _this.get_share_handle(dst); 466 | config.shares[share_handle] = src; 467 | } 468 | } 469 | return vboxmanage.setup.machine(node_handle, config, callback); 470 | }; 471 | start_vm = function(callback) { 472 | return vboxmanage.instance.start(node_handle, callback); 473 | }; 474 | return async.waterfall([verify_status, attemp_to_remove_vm, ensure_vm_id, clone_vm, ensure_networking, setup_vm, start_vm], function(err) { 475 | if (err) { 476 | return callback(err); 477 | } 478 | return _this.status(node_name, callback); 479 | }); 480 | }; 481 | 482 | _Class.prototype.halt = function(node_name, callback) { 483 | /* 484 | Provider-specific method for halting a node. 485 | */ 486 | 487 | var attempt_to_remove_vm, attempt_to_stop_vm, node_handle, verify_status, 488 | _this = this; 489 | node_handle = this.get_node_handle(node_name); 490 | verify_status = function(callback) { 491 | return _this.status(node_name, function(err, state, address) { 492 | if (err) { 493 | return callback(err); 494 | } 495 | if (state === 'halting') { 496 | return callback(new Error("" + node_name + " is already halting")); 497 | } 498 | if (state === 'stopped') { 499 | return callback(new Error("" + node_name + " is already stopped")); 500 | } 501 | return callback(null); 502 | }); 503 | }; 504 | attempt_to_stop_vm = function(callback) { 505 | return vboxmanage.instance.stop(node_handle, function(err) { 506 | if (err) { 507 | logsmith.exception(err); 508 | } 509 | return callback(null); 510 | }); 511 | }; 512 | attempt_to_remove_vm = function(callback) { 513 | return vboxmanage.machine.remove(node_handle, function(err) { 514 | if (err) { 515 | logsmith.exception(err); 516 | } 517 | return callback(null); 518 | }); 519 | }; 520 | return async.waterfall([verify_status, attempt_to_stop_vm, attempt_to_remove_vm], function(err) { 521 | if (err) { 522 | return callback(err); 523 | } 524 | return _this.status(node_name, callback); 525 | }); 526 | }; 527 | 528 | _Class.prototype.pause = function(node_name, callback) { 529 | /* 530 | Provider-specific method for pausing a machine. 531 | */ 532 | 533 | var node_handle, pause_vm, verify_status, 534 | _this = this; 535 | node_handle = this.get_node_handle(node_name); 536 | verify_status = function(callback) { 537 | return _this.status(node_name, function(err, state, address) { 538 | if (err) { 539 | return callback(err); 540 | } 541 | if (state === 'paused') { 542 | return callback(new Error("" + node_name + " is already paused")); 543 | } 544 | if (state === 'halting') { 545 | return callback(new Error("" + node_name + " is halting")); 546 | } 547 | if (state === 'stopped') { 548 | return callback(new Error("" + node_name + " is stopped")); 549 | } 550 | return callback(null); 551 | }); 552 | }; 553 | pause_vm = function(callback) { 554 | return vboxmanage.instance.save(node_handle, callback); 555 | }; 556 | return async.waterfall([verify_status, pause_vm], function(err) { 557 | if (err) { 558 | return callback(err); 559 | } 560 | return _this.status(node_name, callback); 561 | }); 562 | }; 563 | 564 | _Class.prototype.resume = function(node_name, callback) { 565 | /* 566 | Provider-specific method for resuming a machine. 567 | */ 568 | 569 | var attempt_resume_vm, attempt_start_vm, node_handle, verify_status, 570 | _this = this; 571 | node_handle = this.get_node_handle(node_name); 572 | verify_status = function(callback) { 573 | return _this.status(node_name, function(err, state, address) { 574 | if (err) { 575 | return callback(err); 576 | } 577 | if (state === 'booting') { 578 | return callback(new Error("" + node_name + " is already booting")); 579 | } 580 | if (state === 'running') { 581 | return callback(new Error("" + node_name + " is already running")); 582 | } 583 | if (state === 'halting') { 584 | return callback(new Error("" + node_name + " is halting")); 585 | } 586 | if (state === 'stopped') { 587 | return callback(new Error("" + node_name + " is stopped")); 588 | } 589 | return callback(null); 590 | }); 591 | }; 592 | attempt_start_vm = function(callback) { 593 | return vboxmanage.instance.start(node_handle, function(err) { 594 | if (err) { 595 | logsmith.exception(err); 596 | } 597 | return callback(null); 598 | }); 599 | }; 600 | attempt_resume_vm = function(callback) { 601 | return vboxmanage.instance.resume(node_handle, function(err) { 602 | if (err) { 603 | logsmith.exception(err); 604 | } 605 | return callback(null); 606 | }); 607 | }; 608 | return async.waterfall([verify_status, attempt_start_vm, attempt_resume_vm], function(err) { 609 | if (err) { 610 | return callback(err); 611 | } 612 | return _this.status(node_name, callback); 613 | }); 614 | }; 615 | 616 | _Class.prototype.shell_spec = function(node_name, callback) { 617 | /* 618 | Provider-specific method for obtaining a shell spec from a node. 619 | */ 620 | 621 | var build_spec, ensure_port, obtain_status, passphrase, password, private_key, ssh_port, username, 622 | _this = this; 623 | password = this.extract_password(node_name); 624 | private_key = this.extract_private_key(node_name); 625 | if (!password && !private_key) { 626 | return callback(new Error("no password or privateKey provided for node " + node_name)); 627 | } 628 | ssh_port = this.extract_ssh_port(node_name); 629 | if (ssh_port) { 630 | ssh_port = parseInt(ssh_port, 10); 631 | if (isNaN(ssh_port || ssh_port < 1)) { 632 | return callback(new Error("ssh port for node " + node_name + " is incorrect")); 633 | } 634 | } else { 635 | ssh_port = 22; 636 | } 637 | username = this.extract_username(node_name); 638 | if (!username) { 639 | username = 'vortex'; 640 | } 641 | passphrase = this.extract_passphrase(node_name); 642 | obtain_status = function(callback) { 643 | return _this.status(node_name, function(err, state, address) { 644 | if (err) { 645 | return callback(err); 646 | } 647 | if (state === 'halting') { 648 | return callback(new Error("node " + node_name + " is halting")); 649 | } 650 | if (state === 'stopped') { 651 | return callback(new Error("node " + node_name + " is stopped")); 652 | } 653 | if (!address) { 654 | return callback(new Error("cannot find network address for node " + node_name)); 655 | } 656 | return callback(null, address); 657 | }); 658 | }; 659 | ensure_port = function(address, callback) { 660 | return portchecker.isOpen(ssh_port, address, function(is_open) { 661 | var callee, milliseconds, timeout; 662 | if (is_open) { 663 | return callback(null, address); 664 | } 665 | callee = arguments.callee; 666 | milliseconds = 10000; 667 | timeout = function() { 668 | return portchecker.isOpen(ssh_port, address, callee); 669 | }; 670 | logsmith.debug("repeat check for ssh port open for node " + node_name + " in " + milliseconds + " milliseconds"); 671 | return setTimeout(timeout, milliseconds); 672 | }); 673 | }; 674 | build_spec = function(address, callback) { 675 | var parts, spec, spec_options; 676 | parts = []; 677 | parts.push('ssh://'); 678 | parts.push(encodeURIComponent(username)); 679 | if (password) { 680 | parts.push(':' + encodeURIComponent(password)); 681 | } 682 | parts.push('@'); 683 | parts.push(address); 684 | parts.push(':' + ssh_port); 685 | if (private_key) { 686 | parts.push(';privateKey=' + encodeURIComponent(private_key)); 687 | } 688 | if (passphrase) { 689 | parts.push(';passphrase=' + encodeURIComponent(passphrase)); 690 | } 691 | spec = parts.join(''); 692 | spec_options = { 693 | username: username, 694 | password: password, 695 | host: address, 696 | port: ssh_port, 697 | privateKey: private_key, 698 | passphrase: passphrase 699 | }; 700 | return callback(null, spec, spec_options); 701 | }; 702 | return async.waterfall([obtain_status, ensure_port, build_spec], callback); 703 | }; 704 | 705 | return _Class; 706 | 707 | })(); 708 | 709 | }).call(this); 710 | -------------------------------------------------------------------------------- /lib/providers.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var instances; 3 | 4 | exports.amazon = require('./provider_amazon').Provider; 5 | 6 | exports.virtualbox = require('./provider_virtualbox').Provider; 7 | 8 | instances = {}; 9 | 10 | exports.instance = function(name, manifest) { 11 | /* 12 | Gets a single instance of a Provider. The methods esentially provides a way of getting singleton instances. 13 | */ 14 | 15 | var nice_name; 16 | nice_name = name.toLowerCase(); 17 | if (instances[nice_name] == null) { 18 | if ((exports[nice_name] != null) && nice_name !== 'instance') { 19 | instances[nice_name] = new exports[nice_name](manifest); 20 | instances[nice_name].name = nice_name; 21 | } else { 22 | throw new Error("provider " + name + " is not found"); 23 | } 24 | } 25 | return instances[nice_name]; 26 | }; 27 | 28 | }).call(this); 29 | -------------------------------------------------------------------------------- /lib/shell.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | var roost, _ref, 3 | __hasProp = {}.hasOwnProperty, 4 | __extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }; 5 | 6 | roost = require('roost'); 7 | 8 | exports.Ssh = (function(_super) { 9 | __extends(_Class, _super); 10 | 11 | function _Class() { 12 | _ref = _Class.__super__.constructor.apply(this, arguments); 13 | return _ref; 14 | } 15 | 16 | /* 17 | This is a helper class for launching shells. 18 | */ 19 | 20 | 21 | _Class.prototype.shell = function() { 22 | /* 23 | Setup a shell. 24 | */ 25 | 26 | var _this = this; 27 | return this.step(function(callback) { 28 | return _this.ssh2.shell({ 29 | term: process.env['TERM'], 30 | rows: process.stdout.rows, 31 | cols: process.stdout.columns 32 | }, function(err, stream) { 33 | var deinit, init, on_resize; 34 | if (err) { 35 | return callback(err); 36 | } 37 | on_resize = function() { 38 | return stream.setWindow(process.stdout.rows, process.stdout.columns); 39 | }; 40 | init = function() { 41 | process.stdin.setRawMode(true); 42 | process.stdout.on('resize', on_resize); 43 | process.stdin.pipe(stream); 44 | return stream.pipe(process.stdout, { 45 | end: false 46 | }); 47 | }; 48 | deinit = function() { 49 | process.stdin.unpipe(stream); 50 | process.stdout.removeListener('resize', on_resize); 51 | return process.stdin.setRawMode(false); 52 | }; 53 | process.stdin.on('error', function(error) { 54 | deinit(); 55 | return callback(error); 56 | }); 57 | process.stdin.on('end', function() { 58 | deinit(); 59 | return callback(null); 60 | }); 61 | stream.on('error', function(error) { 62 | deinit(); 63 | return callback(error); 64 | }); 65 | stream.on('end', function() { 66 | deinit(); 67 | return callback(null); 68 | }); 69 | return init(); 70 | }); 71 | }); 72 | }; 73 | 74 | return _Class; 75 | 76 | })(roost.target_ssh.Target); 77 | 78 | }).call(this); 79 | -------------------------------------------------------------------------------- /lib/vortex.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | exports.main = function(argv) { 3 | var e, engine, exit_code, failure, logsmith, manifest, node_getopt, opt, plugins, provider_action, providers, vortex_location, vortex_manifest, vortex_plugins, vortex_provider, _ref; 4 | if (argv == null) { 5 | argv = process.argv.slice(2); 6 | } 7 | /* 8 | Launches Vortex command line tool. It can be used for embedding. 9 | */ 10 | 11 | logsmith = require('logsmith'); 12 | node_getopt = require('node-getopt'); 13 | engine = require('./engine'); 14 | plugins = require('./plugins'); 15 | manifest = require('./manifest'); 16 | providers = require('./providers'); 17 | opt = node_getopt.create([['f', 'file=ARG', 'Specify the root of a vortex project or a vortex manifest.'], ['p', 'provider=ARG', 'Specify a default provider.'], ['d', 'dry', 'Dry run the roost manifest.'], ['v', 'verbose+', 'Make it verbose.'], ['c', 'colorize', 'Make it pretty.'], ['V', 'version', 'Shows version.'], ['h', 'help', 'Display this help.']]); 18 | opt = opt.bindHelp(); 19 | opt = opt.parse(argv); 20 | if (opt.options.verbose != null) { 21 | logsmith.setGlobalLevel(3 - (opt.options.verbose.length < 3 ? opt.options.verbose.length : 3)); 22 | } 23 | if (opt.options.colorize != null) { 24 | logsmith.setGlobalColorization(opt.options.colorize); 25 | } 26 | if (opt.options.version) { 27 | logsmith.info(require('../package.json').version); 28 | return; 29 | } 30 | exit_code = 0; 31 | failure = function(err) { 32 | logsmith.exception(err); 33 | logsmith.error(err.message); 34 | return process.exit(++exit_code); 35 | }; 36 | try { 37 | vortex_location = manifest.locate(opt.options.file); 38 | } catch (_error) { 39 | e = _error; 40 | failure(e); 41 | } 42 | try { 43 | vortex_manifest = manifest.load(vortex_location); 44 | } catch (_error) { 45 | e = _error; 46 | failure(e); 47 | } 48 | try { 49 | vortex_plugins = plugins.obtain(vortex_manifest); 50 | } catch (_error) { 51 | e = _error; 52 | failure(e); 53 | } 54 | try { 55 | if (opt.options.provider) { 56 | vortex_provider = providers.instance(opt.options.provider, vortex_manifest); 57 | } else { 58 | vortex_provider = providers.instance('VirtualBox', vortex_manifest); 59 | } 60 | } catch (_error) { 61 | e = _error; 62 | failure(e); 63 | } 64 | provider_action = (_ref = opt.argv[0]) != null ? _ref : 'status'; 65 | return engine.launch(opt, vortex_manifest, vortex_plugins, vortex_provider, provider_action, function(err) { 66 | if (err) { 67 | return failure(err); 68 | } 69 | }); 70 | }; 71 | 72 | if (require.main === module) { 73 | exports.main(); 74 | } 75 | 76 | }).call(this); 77 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "vortex", 3 | "version": "1.0.12", 4 | "author": "Websecurify ", 5 | "description": "Virtual machine management toolkit.", 6 | "license": "MIT", 7 | 8 | "repository": { 9 | "type": "git", 10 | "url": "git://github.com/websecurify/node-vortex.git" 11 | }, 12 | 13 | "keywords": [ 14 | "virtual", 15 | "virtualization", 16 | "amazon", 17 | "virtualbox" 18 | ], 19 | 20 | "main": "./lib/index.js", 21 | 22 | "bin": { 23 | "vortex": "./bin/vortex" 24 | }, 25 | 26 | "directories": { 27 | "lib": "./lib/" 28 | }, 29 | 30 | "dependencies": { 31 | "portchecker": "0.1.2", 32 | "node-getopt": "0.2.3", 33 | "shell-quote": "1.3.3", 34 | "path-extra": "0.1.1", 35 | "progress2": "0.1.1", 36 | "aws-sdk": "1.4.0", 37 | "async": "0.2.9", 38 | "vboxmanage": "0.0.15", 39 | "logsmith": "0.0.2", 40 | "roost": "1.0.12" 41 | }, 42 | 43 | "devDependencies": { 44 | "grunt": "0.4.1", 45 | "grunt-contrib-watch": "0.5.3", 46 | "grunt-contrib-coffee": "0.7.0" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/actions.coffee: -------------------------------------------------------------------------------- 1 | fs = require 'fs' 2 | path = require 'path' 3 | async = require 'async' 4 | roost = require 'roost' 5 | logsmith = require 'logsmith' 6 | shell_quote = require 'shell-quote' 7 | child_process = require 'child_process' 8 | 9 | # --- 10 | 11 | shell = require './shell' 12 | 13 | # --- 14 | 15 | exports.actions = (opt, manifest, provider, node_name, callback) -> 16 | ### 17 | Prints out the available actions. 18 | ### 19 | 20 | for action_name, action_fn of exports 21 | desc = action_fn.toString().split('\n').slice(2, 3)[0]?.trim() 22 | 23 | logsmith.info action_name, '-', desc 24 | 25 | # --- 26 | 27 | exports.status = (opt, manifest, provider, node_names, callback) -> 28 | ### 29 | Obtains state and network address if the selected node is running. 30 | ### 31 | 32 | # 33 | # Call provider's status for each node. 34 | # 35 | process_node = (node_name, callback) -> 36 | logsmith.verbose "query status for node #{node_name}" 37 | 38 | provider.status node_name, (err, state, address) -> 39 | return callback err if err 40 | 41 | args = ['node', node_name, 'is', state] 42 | 43 | if address 44 | args.push 'at' 45 | args.push address 46 | 47 | logsmith.info args... 48 | 49 | return callback null 50 | 51 | async.eachSeries node_names, process_node, callback 52 | 53 | # --- 54 | 55 | exports.shellspec = (opt, manifest, provider, node_names, callback) -> 56 | ### 57 | Obtains the shell specification (typically ssh url) for the selected node. 58 | ### 59 | 60 | # 61 | # Call provier's shell_spec for each node. 62 | # 63 | process_node = (node_name, callback) -> 64 | logsmith.verbose "query shell spec for node #{node_name}" 65 | 66 | provider.shell_spec node_name, (err, spec) -> 67 | return callback err if err 68 | 69 | logsmith.info node_name, '->', spec 70 | 71 | return callback null, spec 72 | 73 | async.eachSeries node_names, process_node, callback 74 | 75 | # --- 76 | 77 | exports.boot = (opt, manifest, provider, node_names, callback) -> 78 | ### 79 | Ensures that the node is running. 80 | ### 81 | 82 | # 83 | # Call provider's boot for each node. 84 | # 85 | process_node = (node_name, callback) -> 86 | logsmith.verbose "boot node #{node_name}" 87 | 88 | provider.boot node_name, (err, state, address) -> 89 | logsmith.error err.message if err 90 | 91 | return callback null if err 92 | 93 | args = ['node', node_name, 'is', state] 94 | 95 | if address 96 | args.push 'at' 97 | args.push address 98 | 99 | logsmith.info args... 100 | 101 | return callback null 102 | 103 | async.eachSeries node_names, process_node, callback 104 | 105 | # --- 106 | 107 | exports.halt = (opt, manifest, provider, node_names, callback) -> 108 | ### 109 | Ensures that the node is stopped. 110 | ### 111 | 112 | # 113 | # Call provider's halt for each node. 114 | # 115 | process_node = (node_name, callback) -> 116 | logsmith.verbose "halt node #{node_name}" 117 | 118 | provider.halt node_name, (err, state, address) -> 119 | logsmith.error err.message if err 120 | 121 | return callback null if err 122 | 123 | args = ['node', node_name, 'is', state] 124 | 125 | if address 126 | args.push 'at' 127 | args.push address 128 | 129 | logsmith.info args... 130 | 131 | return callback null 132 | 133 | async.eachSeries node_names, process_node, callback 134 | 135 | # --- 136 | 137 | exports.pause = (opt, manifest, provider, node_names, callback) -> 138 | ### 139 | Ensures that the node is paused. 140 | ### 141 | 142 | # 143 | # Call provider's pause for each node. 144 | # 145 | process_node = (node_name, callback) -> 146 | logsmith.verbose "pause node #{node_name}" 147 | 148 | provider.pause node_name, (err, state, address) -> 149 | logsmith.error err.message if err 150 | 151 | return callback null if err 152 | 153 | args = ['node', node_name, 'is', state] 154 | 155 | if address 156 | args.push 'at' 157 | args.push address 158 | 159 | logsmith.info args... 160 | 161 | return callback null 162 | 163 | async.eachSeries node_names, process_node, callback 164 | 165 | # --- 166 | 167 | exports.resume = (opt, manifest, provider, node_names, callback) -> 168 | ### 169 | Ensures that the node is resumed. 170 | ### 171 | 172 | # 173 | # Call provider's resume for each node. 174 | # 175 | process_node = (node_name, callback) -> 176 | logsmith.verbose "resume node #{node_name}" 177 | 178 | provider.resume node_name, (err, state, address) -> 179 | logsmith.error err.message if err 180 | 181 | return callback null if err 182 | 183 | args = ['node', node_name, 'is', state] 184 | 185 | if address 186 | args.push 'at' 187 | args.push address 188 | 189 | logsmith.info args... 190 | 191 | return callback null 192 | 193 | async.eachSeries node_names, process_node, callback 194 | 195 | # --- 196 | 197 | exports.restart = (opt, manifest, provider, node_names, callback) -> 198 | ### 199 | Chains actions halt and then boot for every node. 200 | ### 201 | 202 | actions = [] 203 | 204 | # 205 | # Invoke the halt method. 206 | # 207 | actions.push (node_name, callback) -> 208 | exports.halt opt, manifest, provider, [node_name], (err) -> 209 | return callback err if err 210 | return callback null, node_name 211 | 212 | # 213 | # Invoke the boot method. 214 | # 215 | actions.push (node_name, callback) -> 216 | exports.boot opt, manifest, provider, [node_name], (err) -> 217 | return callback err if err 218 | return callback null, node_name 219 | 220 | # 221 | # Invoke actions. 222 | # 223 | process_node = (node_name, callback) -> 224 | logsmith.verbose "restart node #{node_name}" 225 | 226 | current_actions = [((callback) -> callback null, node_name), actions...] 227 | 228 | async.waterfall current_actions, callback 229 | 230 | async.eachSeries node_names, process_node, callback 231 | 232 | # --- 233 | 234 | exports.provision = (opt, manifest, provider, node_names, callback) -> 235 | ### 236 | Starts the provisioner on the selected node. 237 | ### 238 | 239 | actions = [] 240 | 241 | # 242 | # Merges two objects. 243 | # 244 | merge_objects = (a, b) -> 245 | for key, value of b 246 | if a[key]? 247 | a[key] = switch 248 | when Array.isArray a[key] then a[key].concat b[key] 249 | when typeof a[key] == 'number' or a[key] instanceof Number then b[key] 250 | when typeof a[key] == 'string' or a[key] instanceof String then b[key] 251 | when typeof a[key] == 'boolean' or a[key] instanceof Boolean then b[key] 252 | else arguments.callee a[key], b[key] 253 | else 254 | a[key] = b[key] 255 | 256 | return a 257 | 258 | # 259 | # Merges roost configs. 260 | # 261 | merge_roost = (manifest, configs) -> 262 | return null if configs.length == 0 263 | 264 | return configs 265 | .map(((config) -> 266 | if typeof(config) == 'string' || config instanceof String 267 | return roost.manifest.load path.resolve(path.dirname(manifest.meta.location), config) 268 | else 269 | return config 270 | )) 271 | .reduce(((previous_value, current_value) -> 272 | return JSON.parse JSON.stringify(current_value) if not previous_value 273 | 274 | if current_value.merge? and current_value.merge 275 | return merge_objects previous_value, current_value 276 | else 277 | return current_value 278 | ), null) 279 | 280 | # 281 | # Call provider's bootstrap method first. 282 | # 283 | actions.push (node_name, callback) -> 284 | provider.bootstrap node_name, (err) -> 285 | return callback err if err 286 | return callback null, node_name 287 | 288 | # 289 | # Start configuring roost. 290 | # 291 | actions.push (node_name, callback) -> 292 | node_manifest = manifest.nodes[node_name] 293 | merge_configs = [] 294 | 295 | merge_configs.push manifest.roost if manifestroost? 296 | merge_configs.push node_manifest.roost if node_manifest.roost? 297 | merge_configs.push node_manifest[provider.name].roost if node_manifest[provider.name]?.roost? 298 | 299 | roost_manifest = merge_roost manifest, merge_configs 300 | 301 | return callback new Error "no roost configuration defined for node #{node_name}" if not roost_manifest 302 | 303 | if merge_configs.length > 0 and not roost_manifest.meta? 304 | roost_manifest.meta = 305 | location: manifest.meta.location 306 | 307 | try 308 | roost_plugins = roost.plugins.obtain roost_manifest 309 | catch e 310 | return callback e 311 | 312 | node_manifest.roost = roost_manifest 313 | 314 | return callback null, node_name, roost_manifest, roost_plugins 315 | 316 | # 317 | # Obtain shell spec. 318 | # 319 | actions.push (node_name, roost_manifest, roost_plugins, callback) -> 320 | provider.shell_spec node_name, (err, spec) -> 321 | return callback err if err 322 | return callback null, node_name, roost_manifest, roost_plugins, spec 323 | 324 | # 325 | # Expose nodes to each other by using roost as a provisioner. 326 | # 327 | actions.push (node_name, roost_manifest, roost_plugins, spec, callback) -> 328 | roost_manifest.bootstrap ?= [] 329 | roost_manifest.bootstrap.push 'sudo mkdir -p /etc/vortex/nodes/' 330 | 331 | obtain_status = (node_name, callback) -> 332 | provider.status node_name, (err, state, address) -> 333 | return callback err if err 334 | return callback null, {node_name: node_name, address: address} 335 | 336 | async.map Object.keys(manifest.nodes), obtain_status, (err, results) -> 337 | return callback err if err 338 | 339 | for result in results 340 | continue if result.node_name == node_name 341 | 342 | if not result.address 343 | logsmith.error "node #{node_name} does not expose address" 344 | 345 | continue 346 | 347 | address = shell_quote.quote([result.address]) 348 | file = shell_quote.quote(["/etc/vortex/nodes/#{result.node_name}"]) 349 | 350 | roost_manifest.bootstrap.unshift "echo #{address} | sudo tee #{file}" 351 | 352 | return callback null, node_name, roost_manifest, roost_plugins, spec 353 | 354 | # 355 | # Setup the roost target and invoke roost. 356 | # 357 | actions.push (node_name, roost_manifest, roost_plugins, spec, callback) -> 358 | try 359 | roost_target = roost.targets.create spec, roost_manifest 360 | catch e 361 | return callback e 362 | 363 | roost_opt = options: {}, argv: [] 364 | roost_opt.options.dry = opt.options.dry if opt.options.dry? 365 | 366 | roost.engine.launch roost_opt, roost_manifest, roost_plugins, roost_target, callback 367 | 368 | # 369 | # Invoke actions for each node. 370 | # 371 | process_node = (node_name, callback) -> 372 | logsmith.info "provision node #{node_name}" 373 | 374 | current_actions = [((callback) -> callback null, node_name), actions...] 375 | 376 | async.waterfall current_actions, callback 377 | 378 | async.eachSeries node_names, process_node, callback 379 | 380 | # --- 381 | 382 | exports.up = (opt, manifest, provider, node_names, callback) -> 383 | ### 384 | Will bring up a node by first booting/resuming it and than starting the provisioning process. 385 | ### 386 | 387 | # 388 | # Boot/resume and provision nodes. 389 | # 390 | process_node = (node_name, callback) -> 391 | provider.status node_name, (err, state, address) -> 392 | return callback err if err 393 | 394 | perform_provision = (state, address) -> 395 | if state == 'running' and address 396 | exports.provision opt, manifest, provider, [node_name], callback 397 | else 398 | callee = arguments.callee 399 | 400 | timeout_handler = () -> 401 | provider.status node_name, (err, state, address) -> 402 | return callback err if err 403 | return callee state, address 404 | 405 | setTimeout timeout_handler, 1000 406 | 407 | switch state 408 | when 'stopped' 409 | provider.boot node_name, (err, state, address) -> 410 | return callback err if err 411 | 412 | perform_provision state, address 413 | when 'paused' 414 | provider.resume node_name, (err, state, address) -> 415 | return callback err if err 416 | 417 | perform_provision state, address 418 | else 419 | return callback null 420 | 421 | async.eachSeries node_names, process_node, callback 422 | 423 | # --- 424 | 425 | exports.down = (opt, manifest, provider, node_names, callback) -> 426 | ### 427 | Will bring down a node. At the moment this action is a alias for action halt. 428 | ### 429 | 430 | # 431 | # Halt nodes. 432 | # 433 | process_node = (node_name, callback) -> 434 | provider.status node_name, (err, state, address) -> 435 | return callback err if err 436 | return callback null if state == 'stopped' 437 | 438 | provider.halt node_name, callback 439 | 440 | async.eachSeries node_names, process_node, callback 441 | 442 | # --- 443 | 444 | exports.reload = (opt, manifest, provider, node_names, callback) -> 445 | ### 446 | Chains actions down and then up for every node. 447 | ### 448 | 449 | actions = [] 450 | 451 | # 452 | # Invoke action down. 453 | # 454 | actions.push (node_name, callback) -> 455 | exports.down opt, manifest, provider, [node_name], (err) -> 456 | return callback err if err 457 | return callback null, node_name 458 | 459 | # 460 | # Invoke action up. 461 | # 462 | actions.push (node_name, callback) -> 463 | exports.up opt, manifest, provider, [node_name], (err) -> 464 | return callback err if err 465 | return callback null, node_name 466 | 467 | # 468 | # Invoke actions for each node. 469 | # 470 | process_node = (node_name, callback) -> 471 | logsmith.verbose "reload node #{node_name}" 472 | 473 | current_actions = [((callback) -> callback null, node_name), actions...] 474 | 475 | async.waterfall current_actions, callback 476 | 477 | async.eachSeries node_names, process_node, callback 478 | 479 | # --- 480 | 481 | exports.shell = (opt, manifest, provider, node_names, callback) -> 482 | ### 483 | Starts a shell or executes a command on the selected node. 484 | ### 485 | 486 | actions = [] 487 | 488 | # 489 | # Obtain shell spec. 490 | # 491 | actions.push (node_name, callback) -> 492 | provider.shell_spec node_name, (err, spec) -> 493 | return callback err if err 494 | return callback new Error "unsupported shell spec #{spec}" if not spec.match /^ssh:/i 495 | return callback null, spec 496 | 497 | # 498 | # Start shell or execute a command. 499 | # 500 | actions.push (spec, callback) -> 501 | ssh = new shell.Ssh spec, manifest 502 | command = opt.argv.slice opt.argv.indexOf('--') + 1 503 | 504 | if command.length == opt.argv.length 505 | command = null 506 | else 507 | command = command.join(' ') 508 | 509 | if command 510 | ssh.exec command 511 | else 512 | do ssh.shell 513 | 514 | ssh.ignite false, (err) -> 515 | return callback err if err 516 | return callback null 517 | 518 | # 519 | # Invoke actions for each node. 520 | # 521 | process_node = (node_name, callback) -> 522 | logsmith.info "shell into node #{node_name}" 523 | 524 | current_actions = [((callback) -> callback null, node_name), actions...] 525 | 526 | async.waterfall current_actions, callback 527 | 528 | async.eachSeries node_names, process_node, callback 529 | 530 | # --- 531 | 532 | exports.openurl = (opt, manifest, provider, node_names, callback) -> 533 | ### 534 | Open node url in browser. 535 | ### 536 | 537 | command = switch 538 | when process.platform.match /^win/ then 'start' 539 | when process.platform.match /^dar/ then 'open' 540 | else 'firefox' 541 | 542 | # 543 | # Invoke for each node. 544 | # 545 | process_node = (node_name, callback) -> 546 | node_def = manifest.nodes[node_name] 547 | web_def = node_def.web or {} 548 | 549 | path = switch 550 | when web_def.path then web_def.path 551 | else '/' 552 | 553 | port = switch 554 | when web_def.port then web_def.port 555 | else 80 556 | 557 | scheme = switch 558 | when web_def.scheme then web_def.scheme 559 | when port == 443 then 'https' 560 | else 'http' 561 | 562 | provider.status node_name, (err, state, address) -> 563 | return callback err if err 564 | return callback new Error "cannot identify address for node #{node_name}" if not address 565 | 566 | url = "#{scheme}://#{address}:#{port}#{path}" 567 | 568 | child_process.exec shell_quote.quote([command, url]), (err) -> 569 | return callback err if err 570 | return callback null 571 | 572 | async.eachSeries node_names, process_node, callback 573 | 574 | -------------------------------------------------------------------------------- /src/download.coffee: -------------------------------------------------------------------------------- 1 | fs = require 'fs' 2 | http = require 'http' 3 | https = require 'https' 4 | logsmith = require 'logsmith' 5 | Progress2 = require 'progress2' 6 | 7 | # --- 8 | 9 | exports.get = (url, file, callback) -> 10 | ### 11 | Downloads a url into a file. A progresbar will appear If the right logging level is set. 12 | ### 13 | 14 | logsmith.verbose "download #{url} to #{file}" 15 | 16 | switch 17 | when url.match /^http:\/\//i then getter = http 18 | when url.match /^https:\/\//i then getter = https 19 | else return callback new Error "unrecognized scheme for url #{url}" if callback 20 | 21 | try 22 | socket = getter.get url, (response) -> 23 | switch 24 | when response.statusCode == 401 then return callback new Error "not authorized to download #{url}" if callback 25 | when response.statusCode == 403 then return callback new Error "not allowed to download #{url}" if callback 26 | when response.statusCode == 404 then return callback new Error "download #{url} not found" if callback 27 | when 200 < response.statusCode > 299 then return callback new Error "cannot download #{url}" if callback 28 | 29 | if logsmith.level in ['verbose', 'debug', 'silly'] 30 | content_length = parseInt response.headers['content-length'], 10 31 | 32 | if not isNaN content_length 33 | progress = new Progress2 'downloading [:bar] :percent :etas', { 34 | complete: '=' 35 | incomplete: ' ' 36 | total: content_length 37 | width: 40 38 | } 39 | 40 | response.on 'data', (chunk) -> progress.tick chunk.length 41 | response.on 'end', () -> process.stdout.write '\n' 42 | 43 | stream = fs.createWriteStream file 44 | 45 | stream.on 'error', (error) -> callback new Error "cannot write to file #{file} for download #{url}" 46 | response.on 'error', (error) -> callback new Error "cannot download from url #{url} to file #{file}" 47 | response.on 'end', () -> callback null 48 | response.pipe stream 49 | 50 | socket.on 'error', (error) -> callback error if callback 51 | catch e 52 | return callback e if callback 53 | 54 | -------------------------------------------------------------------------------- /src/engine.coffee: -------------------------------------------------------------------------------- 1 | actions = require './actions' 2 | 3 | # --- 4 | 5 | exports.launch = (opt, manifest, plugins, provider, action, callback) -> 6 | ### 7 | Main method for putting toghether the entire logic of Vortex. 8 | ### 9 | 10 | return callback new Error "action #{action} is not recognized" if not actions[action]? 11 | 12 | [plugin.vortex opt, manifest, provider, action for plugin in plugins] if plugins 13 | 14 | return callback new Error "no nodes defined in the vortex manifest" if not manifest.nodes? 15 | 16 | selected_nodes = [] 17 | traversed_nodes = selected_nodes 18 | 19 | for node_name in opt.argv.slice 1 20 | if node_name == '--' 21 | traversed_nodes = [] 22 | else 23 | traversed_nodes.push node_name 24 | 25 | if selected_nodes.length == 0 26 | selected_nodes = Object.keys manifest.nodes 27 | 28 | return callback new Error "no nodes selected for action #{action}" if selected_nodes.length == 0 29 | 30 | for node_name in selected_nodes 31 | return callback new Error "node #{node_name} does not exist" if not manifest.nodes[node_name]? 32 | 33 | actions[action] opt, manifest, provider, selected_nodes, callback 34 | 35 | 36 | -------------------------------------------------------------------------------- /src/index.coffee: -------------------------------------------------------------------------------- 1 | fs = require 'fs' 2 | path = require 'path' 3 | 4 | # --- 5 | 6 | for file in fs.readdirSync __dirname 7 | ext = path.extname file 8 | base = path.basename file, ext 9 | exports[base] = require path.join __dirname, file 10 | 11 | -------------------------------------------------------------------------------- /src/manifest.coffee: -------------------------------------------------------------------------------- 1 | fs = require 'fs' 2 | path = require 'path' 3 | 4 | # --- 5 | 6 | exports.locate = (location) -> 7 | ### 8 | Locates a manifest. There are different strategies where to find the manifest file. 9 | ### 10 | 11 | file = location ? path.join process.cwd(), 'vortex.json' 12 | 13 | throw new Error 'vortex manifest not found' if not fs.existsSync file 14 | 15 | stat = fs.statSync file 16 | 17 | if stat.isDirectory() 18 | file = path.resolve file, 'vortex.json' 19 | stat = fs.statSync file 20 | 21 | throw new Error 'vortex manifest does not exist' if not stat.isFile() 22 | 23 | return file 24 | 25 | # --- 26 | 27 | exports.load = (location) -> 28 | ### 29 | Loads a manifest. The manifest is initialized with a meta object containing its location. 30 | ### 31 | 32 | manifest = require location 33 | manifest.meta = location: location 34 | 35 | return manifest 36 | 37 | -------------------------------------------------------------------------------- /src/plugins.coffee: -------------------------------------------------------------------------------- 1 | path = require 'path' 2 | 3 | # --- 4 | 5 | exports.obtain = (manifest) -> 6 | ### 7 | Obtains a list of plugins specified in the manifest. 8 | ### 9 | 10 | return if not manifest.plugins? 11 | 12 | true_values = [1, '1', true, 'true'] 13 | 14 | if Array.isArray manifest.plugins 15 | plugins = manifest.plugins 16 | else 17 | plugins = [name for name, value of manifest.plugins if value in true_values] 18 | 19 | root = path.dirname manifest.meta.location 20 | failure = (err) -> throw new Error "cannot load plugin #{name}" if err.code != 'MODULE_NOT_FOUND' 21 | 22 | return plugins.map (name) -> 23 | try 24 | plugin = require path.resolve root, name 25 | catch e 26 | failure e 27 | 28 | try 29 | plugin = require path.resolve path.join(root, 'node_modules'), name 30 | catch e 31 | failure e 32 | 33 | try 34 | plugin = require name 35 | catch e 36 | failure e 37 | throw e 38 | 39 | if plugin.getVortex? 40 | plugin = plugin.getVortex manifest 41 | 42 | throw new Error "plugins #{name} is not comptabile" if not plugin.vortex? 43 | 44 | return plugin 45 | 46 | -------------------------------------------------------------------------------- /src/provider_amazon.coffee: -------------------------------------------------------------------------------- 1 | fs = require 'fs' 2 | async = require 'async' 3 | aws_sdk = require 'aws-sdk' 4 | logsmith = require 'logsmith' 5 | path_extra = require 'path-extra' 6 | portchecker = require 'portchecker' 7 | 8 | # --- 9 | 10 | exports.Provider = class 11 | ### 12 | This class exposes Amazon as a provider to Vortex. 13 | ### 14 | 15 | constructor: (@manifest) -> 16 | ### 17 | The provider accepts a manifest as a parameter by specification. 18 | ### 19 | 20 | aws_sdk.config.update @extract_client_options() 21 | 22 | get_node: (node_name) -> 23 | ### 24 | This method returns a node by looking up its name. It throws an error if the node is not found. 25 | ### 26 | 27 | return @manifest.nodes[node_name] if @manifest.nodes? and @manifest.nodes[node_name]? 28 | throw new Error "node #{node_name} does not exist" 29 | 30 | extract_property: (property_name, node_name) -> 31 | ### 32 | Extracts a property by looking into a node and upper layers of the manifest. 33 | ### 34 | 35 | try 36 | node = @get_node node_name 37 | catch e 38 | node = null 39 | 40 | return node.amazon[property_name] if node?.amazon?[property_name]? 41 | return @manifest.amazon[property_name] if @manifest.amazon?[property_name]? 42 | return null 43 | 44 | # 45 | # Helper functions for extracting various properties. 46 | # 47 | extract_access_key_id: (node_name) -> @extract_property 'accessKeyId', node_name 48 | extract_secret_access_key: (node_name) -> @extract_property 'secretAccessKey', node_name 49 | extract_region: (node_name) -> @extract_property 'region', node_name 50 | extract_max_retries: (node_name) -> @extract_property 'maxRetries', node_name 51 | extract_image_id: (node_name) -> @extract_property 'imageId', node_name 52 | extract_instance_type: (node_name) -> @extract_property 'instanceType', node_name 53 | extract_key_name: (node_name) -> @extract_property 'keyName', node_name 54 | extract_security_groups: (node_name) -> @extract_property 'securityGroups', node_name 55 | extract_user_data: (node_name) -> @extract_property 'userData', node_name 56 | extract_disable_api_termination: (node_name) -> @extract_property 'disableApiTermination', node_name 57 | extract_username: (node_name) -> @extract_property 'username', node_name 58 | extract_password: (node_name) -> @extract_property 'password', node_name 59 | extract_private_key: (node_name) -> @extract_property 'privateKey', node_name 60 | extract_passphrase: (node_name) -> @extract_property 'passphrase', node_name 61 | extract_ssh_port: (node_name) -> @extract_property 'sshPort', node_name 62 | # 63 | # 64 | # 65 | 66 | extract_namespace: (node_name) -> 67 | ### 68 | Extracts a namespace by looking it up in the node itself and upper layers of the manifest 69 | ### 70 | 71 | try 72 | node = @get_node node_name 73 | catch 74 | node = null 75 | 76 | return node.namespace if node?.namespace? 77 | return @manifest.namespace if @manifest.namespace? 78 | 79 | extract_client_options: (node_name) -> 80 | ### 81 | Extracts options related to the AWS client. 82 | ### 83 | 84 | access_key_id = @extract_access_key_id node_name 85 | secret_access_key = @extract_secret_access_key node_name 86 | region = @extract_region node_name 87 | max_retries = @extract_max_retries node_name 88 | options = {} 89 | 90 | options.accessKeyId = access_key_id if access_key_id 91 | options.secretAccessKey = secret_access_key if secret_access_key 92 | options.region = region if region 93 | options.maxRetries = max_retries if max_retries 94 | 95 | return options 96 | 97 | extract_instance_options: (node_name) -> 98 | ### 99 | Extracts options related to AWS instances. 100 | ### 101 | 102 | image_id = this.extract_image_id node_name 103 | instance_type = this.extract_instance_type node_name 104 | key_name = this.extract_key_name node_name 105 | security_groups = this.extract_security_groups node_name 106 | user_data = this.extract_user_data node_name 107 | disable_api_termination = this.extract_disable_api_termination node_name 108 | options = {} 109 | 110 | options.ImageId = image_id if image_id 111 | options.InstanceType = instance_type if instance_type 112 | options.KeyName = key_name if key_name 113 | options.SecurityGroups = security_groups if security_groups 114 | options.UserData = user_data if user_data 115 | options.DisableApiTermination = disable_api_termination if disable_api_termination 116 | 117 | return options 118 | 119 | get_client: (node_name) -> 120 | ### 121 | Obtain a client for EC2. 122 | ### 123 | 124 | return new aws_sdk.EC2 @extract_client_options node_name 125 | 126 | create_error: (error, node_name) -> 127 | ### 128 | Creates a friendlier error message. 129 | ### 130 | 131 | if error.code == 'NetworkingError' 132 | return error 133 | else 134 | tokens = error.toString().split(':') 135 | type = tokens[0] 136 | message = tokens[1].trim() 137 | parts = message.split('.') 138 | message = parts.shift().toLowerCase().trim() 139 | 140 | if node_name 141 | message = "#{message} for node #{node_name}" 142 | 143 | if parts.length > 0 144 | message = "#{message} (#{parts.join('.').trim()})" 145 | 146 | message = message.replace /\s'(\w+)'\s/, (match, group) -> 147 | param = group.toLowerCase() 148 | 149 | switch param 150 | when 'accesskeyid' then param = 'accessKeyId' 151 | when 'secretaccesskey' then param = 'secretAccessKey' 152 | when 'region' then param = 'region' 153 | when 'maxretries' then param = 'maxRetries' 154 | when 'imageid' then param = 'imageId' 155 | when 'instancetype' then param = 'instanceType' 156 | when 'keyname' then param = 'keyName' 157 | when 'securitygroups' then param = 'securityGroups' 158 | when 'userdata' then param = 'userData' 159 | when 'disableapitermination' then param = 'disableApiTermination' 160 | 161 | return ' "' + param + '" ' 162 | 163 | message = message[0] + message.substring 1, message.length 164 | 165 | return new Error message 166 | 167 | bootstrap: (node_name, callback) -> 168 | ### 169 | Provider-specific method for bootstrapping a node. 170 | ### 171 | 172 | # 173 | # First we verify the status of the node to check if the state is correct. 174 | # 175 | verify_status = (callback) => 176 | @status node_name, (err, state, address) -> 177 | return callback err if err 178 | return callback new Error "node #{node_name} is not ready" if state != 'running' 179 | return callback null 180 | 181 | # 182 | # Next we obtain shell spec. 183 | # 184 | obtain_shell_spec = (callback) => 185 | @shell_spec node_name, (err, spec) -> 186 | return callback err if err 187 | return callback null, spec 188 | 189 | # 190 | # Next we check the exposed files and folders. 191 | # 192 | prepare_exposed = (spec, callback) => 193 | try 194 | node = @get_node node_name 195 | catch e 196 | node = null 197 | 198 | return callback null if not node?.expose? 199 | 200 | handle_exposure = (exposure, callback) => 201 | source_path = path_extra.resolve path_extra.dirname(@manifest.meta.location), exposure.src 202 | 203 | fs.stat source_path, (err, stats) => 204 | return callback new Error "cannot expose #{exposure.src} because it does not exist" if err 205 | return callback null 206 | 207 | async.eachSeries ({src: src, dst: dst} for src, dst of node.expose), handle_exposure, callback 208 | 209 | # 210 | # Action on the tasks. 211 | # 212 | async.waterfall [verify_status, obtain_shell_spec, prepare_exposed], (err, state, address) -> 213 | return callback err if err 214 | return callback null 215 | 216 | status: (node_name, callback) -> 217 | ### 218 | Provider-specific method for checking the status of a node. 219 | ### 220 | 221 | try 222 | client = @get_client node_name 223 | catch e 224 | return callback @create_error e, node_name 225 | 226 | options = 227 | Filters: [ 228 | {Name: 'tag:vortex-node-name', Values: [node_name]} 229 | {Name: 'tag:vortex-node-namespace', Values: [this.extract_namespace(node_name)]} 230 | ] 231 | 232 | logsmith.debug 'describe instances with options', options 233 | 234 | client.describeInstances options, (err, result) => 235 | return callback @create_error err, node_name if err 236 | 237 | instances = [] 238 | 239 | for reservation in result.Reservations 240 | for instance in reservation.Instances 241 | instances.push { 242 | id: instance.InstanceId 243 | state: instance.State.Name 244 | address: instance.PublicDnsName 245 | } 246 | 247 | return callback null, 'stopped' if instances.length == 0 248 | 249 | logsmith.debug 'discovered instances', instances 250 | 251 | selected_instance = instances[instances.length - 1] 252 | 253 | return callback new Error "could not obtain instance for node #{node_name}" if not selected_instance 254 | 255 | logsmith.debug 'selected instance', selected_instance 256 | 257 | for instance in instances 258 | if instance.state not in ['shutting-down', 'terminated', 'stopping', 'stopped'] and selected_instance != instance 259 | logsmith.warn "duplicate node #{node_name} with instance id #{instance.id} detected" 260 | 261 | state = switch selected_instance.state 262 | when 'pending' then 'booting' 263 | when 'running' then 'running' 264 | when 'stopped' then 'stopped' 265 | when 'stopping' then 'halting' 266 | when 'terminated' then 'stopped' 267 | when 'shutting-down' then 'halting' 268 | else null 269 | 270 | return callback new Error "undefined state for node #{node_name}" if not state 271 | 272 | logsmith.debug "node #{node_name} with instance id #{selected_instance.id} has state #{state}" 273 | 274 | address = selected_instance.address 275 | 276 | if not address 277 | state = 'booting' 278 | 279 | if state != 'running' 280 | address = null 281 | 282 | return callback null, state, address, selected_instance.id 283 | 284 | boot: (node_name, callback) -> 285 | ### 286 | Provider-specific method for booting a node. 287 | ### 288 | 289 | try 290 | client = @get_client node_name 291 | catch e 292 | return callback @create_error e, node_name 293 | 294 | # 295 | # First we verify the status of the node to check if the state is correct. 296 | # 297 | verify_status = (callback) => 298 | @status node_name, (err, state, address) -> 299 | return callback err if err 300 | return callback new Error "node #{node_name} is already booting" if state == 'booting' 301 | return callback new Error "node #{node_name} is already running" if state == 'running' 302 | return callback new Error "node #{node_name} is halting" if state == 'halting' 303 | return callback null 304 | 305 | # 306 | # Next we run the instance. 307 | # 308 | run_instance = (callback) => 309 | options = @extract_instance_options node_name 310 | 311 | options.MinCount = 1 312 | options.MaxCount = 1 313 | 314 | logsmith.debug 'run instances with options', options 315 | 316 | client.runInstances options, (err, result) => 317 | return callback @create_error err, node_name if err 318 | 319 | instances = [] 320 | 321 | for instance in result.Instances 322 | instances.push { 323 | id: instance.InstanceId 324 | } 325 | 326 | return callback new Error "no instances run for node #{node_name}" if instances.length == 0 327 | 328 | logsmith.debug 'ran instances', instances 329 | 330 | selected_instance = instances[instances.length - 1] 331 | 332 | return callback new Error "could not create instance for node #{node_name}" if not selected_instance 333 | 334 | logsmith.debug 'selected instance', selected_instance 335 | 336 | for instance in instances 337 | if selected_instance != instance 338 | logsmith.warn "duplicate node #{node_name} with instance id #{instance_id} detected" 339 | 340 | return callback null, selected_instance.id 341 | 342 | # 343 | # Finally we unmap any tags on the instance. 344 | # 345 | map_tags = (instance_id, callback) => 346 | options = 347 | Resources: [instance_id] 348 | Tags: [ 349 | {Key: 'vortex-node-name', Value: node_name} 350 | {Key: 'vortex-node-namespace', Value: @extract_namespace node_name} 351 | ] 352 | 353 | logsmith.debug 'create tags with options', options 354 | 355 | client.createTags options, (err, result) => 356 | return callback @create_error err, node_name if err 357 | return callback null, instance_id 358 | 359 | # 360 | # Action on the tasks. 361 | # 362 | async.waterfall [verify_status, run_instance, map_tags], (err) => 363 | return callback err if err 364 | return @status node_name, callback 365 | 366 | halt: (node_name, callback) -> 367 | ### 368 | Provider-specific method for halting a node. 369 | ### 370 | 371 | try 372 | client = @get_client node_name 373 | catch e 374 | return callback @create_error e, node_name 375 | 376 | # 377 | # First we verify the status of the node to check if the state is correct. 378 | # 379 | verify_status = (callback) => 380 | @status node_name, (err, state, address, instance_id) -> 381 | return callback err if err 382 | return callback new Error "#{node_name} is already halting" if state == 'halting' 383 | return callback new Error "#{node_name} is already stopped" if state == 'stopped' 384 | return callback null, instance_id 385 | 386 | # 387 | # Next we terminate the instance. 388 | # 389 | terminate_instance = (instance_id, callback) => 390 | options = 391 | InstanceIds: [instance_id] 392 | 393 | logsmith.debug 'terminate instances with options', options 394 | 395 | client.terminateInstances options, (err, result) => 396 | return callback @create_error err, node_name if err 397 | return callback null, instance_id 398 | 399 | # 400 | # Finally we unmap any tags on the instance. 401 | # 402 | unmap_tags = (instance_id, callback) => 403 | options = 404 | Resources: [instance_id] 405 | Tags: [ 406 | {Key: 'vortex-node-name', Value: node_name} 407 | {Key: 'vortex-node-namespace', Value: @extract_namespace node_name} 408 | ] 409 | 410 | logsmith.debug 'delete tags with options', options 411 | 412 | client.deleteTags options, (err, result) => 413 | return callback @create_error err, node_name if err 414 | return callback null, instance_id 415 | 416 | # 417 | # Action on the tasks. 418 | # 419 | async.waterfall [verify_status, terminate_instance, unmap_tags], (err) => 420 | return callback err if err 421 | return @status node_name, callback 422 | 423 | pause: (node_name, callback) -> 424 | ### 425 | Provider-specific method for pausing a machine. 426 | ### 427 | return callback new Error "cannot pause node #{node_name} due to pause not implemented" 428 | 429 | resume: (node_name, callback) -> 430 | ### 431 | Provider-specific method for resuming a machine. 432 | ### 433 | return callback new Error "cannot resume node #{node_name} due to resume not implemented" 434 | 435 | shell_spec: (node_name, callback) -> 436 | ### 437 | Provider-specific method for obtaining a shell spec from a node. 438 | ### 439 | 440 | password = @extract_password node_name 441 | private_key = @extract_private_key node_name 442 | 443 | return callback new Error "no password or privateKey provided for node #{node_name}" if not password and not private_key 444 | 445 | ssh_port = @extract_ssh_port node_name 446 | 447 | if ssh_port 448 | ssh_port = parseInt ssh_port, 10 449 | 450 | return callback new Error "ssh port for node #{node_name} is incorrect" if isNaN ssh_port or ssh_port < 1 451 | else 452 | ssh_port = 22 453 | 454 | username = @extract_username node_name 455 | 456 | if not username 457 | username = 'vortex' 458 | 459 | passphrase = @extract_passphrase node_name 460 | 461 | # 462 | # First we obtain the node status by looking for the address and to check if the state is correct. 463 | # 464 | obtain_status = (callback) => 465 | @status node_name, (err, state, address) -> 466 | return callback err if err 467 | return callback new Error "node #{node_name} is halting" if state == 'halting' 468 | return callback new Error "node #{node_name} is stopped" if state == 'stopped' 469 | return callback new Error "cannot find network address for node #{node_name}" if not address 470 | return callback null, address 471 | 472 | # 473 | # Next we continiusly check if the ssh port is open. 474 | # 475 | ensure_port = (address, callback) -> 476 | portchecker.isOpen ssh_port, address, (is_open) -> 477 | return callback null, address if is_open 478 | 479 | callee = arguments.callee 480 | milliseconds = 10000 481 | timeout = () -> portchecker.isOpen ssh_port, address, callee 482 | 483 | logsmith.debug "repeat check for ssh port open for node #{node_name} in #{milliseconds} milliseconds" 484 | 485 | setTimeout timeout, milliseconds 486 | 487 | # 488 | # Finally we build the spec and send it off. 489 | # 490 | build_spec = (address, callback) -> 491 | parts = [] 492 | parts.push 'ssh://' 493 | parts.push encodeURIComponent username 494 | parts.push ':' + encodeURIComponent password if password 495 | parts.push '@' 496 | parts.push address 497 | parts.push ':' + ssh_port 498 | parts.push ';privateKey=' + encodeURIComponent private_key if private_key 499 | parts.push ';passphrase=' + encodeURIComponent passphrase if passphrase 500 | 501 | spec = parts.join '' 502 | spec_options = 503 | username: username 504 | password: password 505 | host: address 506 | port: ssh_port 507 | privateKey: private_key 508 | passphrase: passphrase 509 | 510 | return callback null, spec, spec_options 511 | 512 | # 513 | # Action on the tasks. 514 | # 515 | async.waterfall [obtain_status, ensure_port, build_spec], callback 516 | 517 | -------------------------------------------------------------------------------- /src/provider_virtualbox.coffee: -------------------------------------------------------------------------------- 1 | fs = require 'fs' 2 | url = require 'url' 3 | async = require 'async' 4 | logsmith = require 'logsmith' 5 | path_extra = require 'path-extra' 6 | vboxmanage = require 'vboxmanage' 7 | portchecker = require 'portchecker' 8 | shell_quote = require 'shell-quote' 9 | 10 | # --- 11 | 12 | download = require './download' 13 | 14 | # --- 15 | 16 | exports.Provider = class 17 | ### 18 | This class exposes VirtualBox as a provider to Vortex. 19 | ### 20 | 21 | constructor: (@manifest) -> 22 | ### 23 | The provider accepts a manifest as a parameter by specification. 24 | ### 25 | 26 | get_node: (node_name) -> 27 | ### 28 | This method returns a node by looking up its name. It throws an error if the node is not found. 29 | ### 30 | 31 | return @manifest.nodes[node_name] if @manifest.nodes? and @manifest.nodes[node_name]? 32 | throw new Error "node #{node_name} does not exist" 33 | 34 | extract_property: (property_name, node_name) -> 35 | ### 36 | Extracts a property by looking into a node and upper layers of the manifest. 37 | ### 38 | 39 | try 40 | node = @get_node node_name 41 | catch e 42 | node = null 43 | 44 | return node.virtualbox[property_name] if node?.virtualbox?[property_name]? 45 | return @manifest.virtualbox[property_name] if @manifest.virtualbox?[property_name]? 46 | return null 47 | 48 | # 49 | # Helper functions for extracting various properties. 50 | # 51 | extract_vm_id: (node_name) -> @extract_property 'vmId', node_name 52 | extract_vm_url: (node_name) -> @extract_property 'vmUrl', node_name 53 | extract_username: (node_name) -> @extract_property 'username', node_name 54 | extract_password: (node_name) -> @extract_property 'password', node_name 55 | extract_private_key: (node_name) -> @extract_property 'privateKey', node_name 56 | extract_passphrase: (node_name) -> @extract_property 'passphrase', node_name 57 | extract_ssh_port: (node_name) -> @extract_property 'sshPort', node_name 58 | # 59 | # 60 | # 61 | 62 | extract_namespace: (node_name) -> 63 | ### 64 | Extracts a namespace by looking it up in the node itself and upper layers of the manifest 65 | ### 66 | 67 | try 68 | node = @get_node node_name 69 | catch 70 | node = null 71 | 72 | return node.namespace if node?.namespace? 73 | return @manifest.namespace if @manifest.namespace? 74 | 75 | get_node_handle: (node_name) -> 76 | ### 77 | Creates a VirtualBox friendlier name out of a node name. The method take into account the namespace. 78 | ### 79 | 80 | namespace = @extract_namespace node_name 81 | 82 | return (if namespace then namespace + ':' else '') + node_name 83 | 84 | get_share_handle: (share_name) -> 85 | ### 86 | Creates a VirtualBox friendlier name out of a share name. 87 | ### 88 | 89 | return share_name.replace(/[^\w]+/, '_').replace(/_+/, '_') 90 | 91 | schedule_import: (vm_url, vm_id, callback) -> 92 | ### 93 | Schedules import operation. The function will check if the vm_id exists before execution. 94 | ### 95 | 96 | if not @import_queue? 97 | @import_queue = async.queue (task, callback) => 98 | vboxmanage.machine.info task.vm_id, (err, info) => 99 | return callback null if not err 100 | return @perform_import task.vm_url, task.vm_id, callback 101 | 102 | task = 103 | vm_url: vm_url 104 | vm_id: vm_id 105 | 106 | @import_queue.push task, callback 107 | 108 | perform_import: (vm_url, vm_id, callback) -> 109 | ### 110 | Performs import operation. 111 | ### 112 | 113 | logsmith.debug "import #{vm_url} into #{vm_id}" 114 | 115 | try 116 | spec = url.parse vm_url 117 | catch 118 | return callback new Error "cannot parse url #{vm_url}" 119 | 120 | return callback new Error "unsupported scheme for url #{vm_url}" if spec.protocol not in ['file:', 'http:', 'https:'] 121 | 122 | if spec.protocol == 'file' 123 | if not spec.host 124 | local_path = spec.pathname 125 | else 126 | local_path = path_extra.resolve path_extra.dirname(@manifest.meta.location), path_extra.join(spec.host, spec.pathname) 127 | 128 | vboxmanage.machine.import local_path, vm_id, callback 129 | else 130 | local_name = (new Date()).getTime() + '-' + path_extra.basename(spec.pathname) 131 | local_path = path_extra.join path_extra.tempdir(), local_name 132 | 133 | download.get vm_url, local_path, (err) -> 134 | if err 135 | fs.unlink local_path, (err) -> 136 | logsmith.exception err if err 137 | 138 | return callback err 139 | 140 | vboxmanage.machine.import local_path, vm_id, (err) -> 141 | fs.unlink local_path, (err) -> 142 | logmisth.exception err if err 143 | 144 | return callback err if err 145 | return callback null 146 | 147 | bootstrap: (node_name, callback) -> 148 | ### 149 | Provider-specific method for bootstrapping a node. 150 | ### 151 | 152 | commands = [ 153 | 'sudo mkdir -p /etc/vortex/flags/' 154 | 'sudo chmod a+rx /etc/vortex/flags/' 155 | 156 | '[ ! -f /etc/vortex/flags/network_ready ] && sudo ifconfig eth1 0.0.0.0 0.0.0.0' 157 | '[ ! -f /etc/vortex/flags/network_ready ] && sudo ifconfig eth2 0.0.0.0 0.0.0.0' 158 | '[ ! -f /etc/vortex/flags/network_ready ] && sudo dhclient -r eth1 eth2' 159 | '[ ! -f /etc/vortex/flags/network_ready ] && sudo dhclient eth1 eth2' 160 | '[ ! -f /etc/vortex/flags/network_ready ] && sudo touch /etc/vortex/flags/network_ready' 161 | ] 162 | 163 | node_handle = @get_node_handle node_name 164 | 165 | # 166 | # First we verify the status of the node to check if the state is correct. 167 | # 168 | verify_status = (callback) => 169 | @status node_name, (err, state, address) -> 170 | return callback err if err 171 | return callback new Error "node #{node_name} is not ready" if state != 'running' 172 | return callback null 173 | 174 | # 175 | # Next we check the exposed files and folders. 176 | # 177 | prepare_exposed = (callback) => 178 | try 179 | node = @get_node node_name 180 | catch e 181 | node = null 182 | 183 | return callback null if not node?.expose? 184 | 185 | handle_exposure = (exposure, callback) => 186 | source_path = path_extra.resolve path_extra.dirname(@manifest.meta.location), exposure.src 187 | 188 | fs.stat source_path, (err, stats) => 189 | return callback new Error "cannot expose #{exposure.src} because it does not exist" if err 190 | 191 | if stats.isDirectory() 192 | share_handle = @get_share_handle exposure.dst 193 | 194 | commands.push shell_quote.quote ['sudo', 'mkdir', '-p', exposure.dst] 195 | commands.push shell_quote.quote ['sudo', 'mount.vboxsf', share_handle, exposure.dst, '-o', 'rw'] 196 | 197 | return callback null 198 | else 199 | vboxmanage.instance.copy_from source_path, exposure.dst, callback 200 | 201 | async.eachSeries ({src: src, dst: dst} for src, dst of node.expose), handle_exposure, callback 202 | 203 | # 204 | # Finally we execute all commands that were scheduled. 205 | # 206 | run_commands = (callback) -> 207 | run_command = (command, callback) -> 208 | vboxmanage.instance.exec node_handle, 'vortex', 'vortex', '/bin/sh', '-c', command, (err, output) -> 209 | return callback err if err 210 | 211 | process.stdout.write output if logsmith.level in ['verbose', 'debug', 'silly'] 212 | 213 | return callback null 214 | 215 | async.eachSeries commands, run_command, callback 216 | 217 | # 218 | # Action on the tasks. 219 | # 220 | async.waterfall [verify_status, prepare_exposed, run_commands], (err, state, address) -> 221 | return callback err if err 222 | return callback null 223 | 224 | status: (node_name, callback) -> 225 | ### 226 | Provider-specific method for checking the status of a node. 227 | ### 228 | 229 | node_handle = @get_node_handle node_name 230 | 231 | # 232 | # First we obtain basic info about the node. 233 | # 234 | obtain_machine_state = (callback) -> 235 | vboxmanage.machine.info node_handle, (err, info) -> 236 | return callback null, 'stopped' if err 237 | 238 | state = info.VMState.toLowerCase() 239 | 240 | switch state 241 | when 'saved' then state = 'paused' 242 | when 'paused' then state = 'paused' 243 | when 'running' then state = 'running' 244 | when 'starting' then state = 'booting' 245 | when 'powered off' then state = 'stopped' 246 | when 'guru meditation'then state = 'paused' 247 | 248 | return callback null, state 249 | 250 | # 251 | # Next we obtain the machine network address. 252 | # 253 | obtain_machine_address = (state, callback) -> 254 | vboxmanage.adaptors.list node_handle, (err, adaptors) -> 255 | return callback null, 'stopped', address if err 256 | 257 | try 258 | address = adaptors['Adaptor 1'].V4.IP 259 | catch e 260 | address = null 261 | state = 'booting' 262 | 263 | return callback null, state, address 264 | 265 | # 266 | # Action on the tasks. 267 | # 268 | async.waterfall [obtain_machine_state, obtain_machine_address], (err, state, address) -> 269 | return callback err if err 270 | return callback null, state, address 271 | 272 | boot: (node_name, callback) -> 273 | ### 274 | Provider-specific method for booting a node. 275 | ### 276 | 277 | vm_id = @extract_vm_id node_name 278 | 279 | return callback new Error 'no virtualbox "vmId" paramter specified for node' if not vm_id 280 | 281 | node_handle = @get_node_handle node_name 282 | 283 | # 284 | # First we verify the status of the node to check if the state is correct. 285 | # 286 | verify_status = (callback) => 287 | @status node_name, (err, state, address) -> 288 | return callback err if err 289 | return callback new Error "node #{node_name} is already booting" if state == 'booting' 290 | return callback new Error "node #{node_name} is already running" if state == 'running' 291 | return callback new Error "node #{node_name} is halting" if state == 'halting' 292 | return callback new Error "node #{node_name} is paused" if state == 'paused' 293 | return callback null 294 | 295 | # 296 | # Next we attemp to remove the vm. Proceed if there is a failure. 297 | # 298 | attemp_to_remove_vm = (callback) -> 299 | vboxmanage.machine.remove node_handle, (err) -> 300 | logsmith.exception err if err 301 | 302 | return callback null 303 | 304 | # 305 | # Next we ensure that the vm exists by checking its id. If it doesn't exist download it from the net or fail misserably. 306 | # 307 | ensure_vm_id = (callback) => 308 | vboxmanage.machine.info vm_id, (err, info) => 309 | return callback null if not err 310 | 311 | vm_url = @extract_vm_url node_name 312 | 313 | return callback new Error 'no virtualbox "vmUrl" paramter specified for node' if not vm_url? 314 | 315 | @schedule_import vm_url, vm_id, callback 316 | 317 | # 318 | # Next we clone the vm into a new one that will be used for the purpose. 319 | # 320 | clone_vm = (callback) -> 321 | vboxmanage.machine.clone vm_id, node_handle, callback 322 | 323 | # 324 | # Next we ensure that there is basic networking going on inside VirtualBox. 325 | # 326 | ensure_networking = (callback) => 327 | config = 328 | network: 329 | hostonly: 330 | vboxnet5: 331 | ip: '10.100.100.1' 332 | netmask: '255.255.255.0' 333 | 334 | dhcp: 335 | lower_ip: '10.100.100.101' 336 | upper_ip: '10.100.100.254' 337 | 338 | internal: 339 | vortex: 340 | ip: '10.200.200.1' 341 | netmask: '255.255.255.0' 342 | 343 | dhcp: 344 | lower_ip: '10.200.200.101' 345 | upper_ip: '10.200.200.254' 346 | 347 | vboxmanage.setup.system config, callback 348 | 349 | # 350 | # Next we setup the vm using the provided configuration. 351 | # 352 | setup_vm = (callback) => 353 | config = 354 | network: 355 | adaptors: [ 356 | {type: 'hostonly', network: 'vboxnet5'} 357 | {type: 'internal', network: 'vortex'} 358 | {type: 'nat'} 359 | ] 360 | shares: {} 361 | 362 | try 363 | node = @get_node node_name 364 | catch e 365 | return callback e 366 | 367 | if node.expose? 368 | for src, dst of node.expose 369 | src = path_extra.resolve path_extra.dirname(@manifest.meta.location), src 370 | share_handle = @get_share_handle dst 371 | 372 | config.shares[share_handle] = src 373 | 374 | vboxmanage.setup.machine node_handle, config, callback 375 | 376 | # 377 | # Finally we start the vm. 378 | # 379 | start_vm = (callback) -> 380 | vboxmanage.instance.start node_handle, callback 381 | 382 | # 383 | # Action on the tasks. 384 | # 385 | async.waterfall [verify_status, attemp_to_remove_vm, ensure_vm_id, clone_vm, ensure_networking, setup_vm, start_vm], (err) => 386 | return callback err if err 387 | return @status node_name, callback 388 | 389 | halt: (node_name, callback) -> 390 | ### 391 | Provider-specific method for halting a node. 392 | ### 393 | 394 | node_handle = @get_node_handle node_name 395 | 396 | # 397 | # First we verify the status of the node to check if the state is correct. 398 | # 399 | verify_status = (callback) => 400 | @status node_name, (err, state, address) -> 401 | return callback err if err 402 | return callback new Error "#{node_name} is already halting" if state == 'halting' 403 | return callback new Error "#{node_name} is already stopped" if state == 'stopped' 404 | return callback null 405 | 406 | # 407 | # Next we attempt to shutdown the node. Proceed if there is a failure. 408 | # 409 | attempt_to_stop_vm = (callback) -> 410 | vboxmanage.instance.stop node_handle, (err) -> 411 | logsmith.exception err if err 412 | 413 | return callback null 414 | 415 | # 416 | # Finally we attempt to remove the node. Proceed if there is a failure. 417 | # 418 | attempt_to_remove_vm = (callback) -> 419 | vboxmanage.machine.remove node_handle, (err) -> 420 | logsmith.exception err if err 421 | 422 | return callback null 423 | 424 | # 425 | # Action on the tasks. 426 | # 427 | async.waterfall [verify_status, attempt_to_stop_vm, attempt_to_remove_vm], (err) => 428 | return callback err if err 429 | return @status node_name, callback 430 | 431 | pause: (node_name, callback) -> 432 | ### 433 | Provider-specific method for pausing a machine. 434 | ### 435 | 436 | node_handle = @get_node_handle node_name 437 | 438 | # 439 | # First we verify the status of the node to check if the state is correct. 440 | # 441 | verify_status = (callback) => 442 | @status node_name, (err, state, address) -> 443 | return callback err if err 444 | return callback new Error "#{node_name} is already paused" if state == 'paused' 445 | return callback new Error "#{node_name} is halting" if state == 'halting' 446 | return callback new Error "#{node_name} is stopped" if state == 'stopped' 447 | return callback null 448 | 449 | # 450 | # Finally we pause the vm. We use the save method. 451 | # 452 | pause_vm = (callback) -> 453 | vboxmanage.instance.save node_handle, callback 454 | 455 | # 456 | # Action on the tasks. 457 | # 458 | async.waterfall [verify_status, pause_vm], (err) => 459 | return callback err if err 460 | return @status node_name, callback 461 | 462 | resume: (node_name, callback) -> 463 | ### 464 | Provider-specific method for resuming a machine. 465 | ### 466 | 467 | node_handle = @get_node_handle node_name 468 | 469 | # 470 | # First we verify the status of the node to check if the state is correct. 471 | # 472 | verify_status = (callback) => 473 | @status node_name, (err, state, address) -> 474 | return callback err if err 475 | return callback new Error "#{node_name} is already booting" if state == 'booting' 476 | return callback new Error "#{node_name} is already running" if state == 'running' 477 | return callback new Error "#{node_name} is halting" if state == 'halting' 478 | return callback new Error "#{node_name} is stopped" if state == 'stopped' 479 | return callback null 480 | 481 | # 482 | # Then we attempt to start the vm if the state has been saved. Don't handle errors. 483 | # 484 | attempt_start_vm = (callback) -> 485 | vboxmanage.instance.start node_handle, (err) -> 486 | logsmith.exception err if err 487 | 488 | return callback null 489 | 490 | # 491 | # Finally we attempt to resume the vm. Don't handle errors. 492 | # 493 | attempt_resume_vm = (callback) -> 494 | vboxmanage.instance.resume node_handle, (err) -> 495 | logsmith.exception err if err 496 | 497 | return callback null 498 | 499 | # 500 | # Action on the tasks. 501 | # 502 | async.waterfall [verify_status, attempt_start_vm, attempt_resume_vm], (err) => 503 | return callback err if err 504 | return @status node_name, callback 505 | 506 | shell_spec: (node_name, callback) -> 507 | ### 508 | Provider-specific method for obtaining a shell spec from a node. 509 | ### 510 | 511 | password = @extract_password node_name 512 | private_key = @extract_private_key node_name 513 | 514 | return callback new Error "no password or privateKey provided for node #{node_name}" if not password and not private_key 515 | 516 | ssh_port = @extract_ssh_port node_name 517 | 518 | if ssh_port 519 | ssh_port = parseInt ssh_port, 10 520 | 521 | return callback new Error "ssh port for node #{node_name} is incorrect" if isNaN ssh_port or ssh_port < 1 522 | else 523 | ssh_port = 22 524 | 525 | username = @extract_username node_name 526 | 527 | if not username 528 | username = 'vortex' 529 | 530 | passphrase = @extract_passphrase node_name 531 | 532 | # 533 | # First we obtain the node status by looking for the address and to check if the state is correct. 534 | # 535 | obtain_status = (callback) => 536 | @status node_name, (err, state, address) -> 537 | return callback err if err 538 | return callback new Error "node #{node_name} is halting" if state == 'halting' 539 | return callback new Error "node #{node_name} is stopped" if state == 'stopped' 540 | return callback new Error "cannot find network address for node #{node_name}" if not address 541 | return callback null, address 542 | 543 | # 544 | # Next we continiusly check if the ssh port is open. 545 | # 546 | ensure_port = (address, callback) -> 547 | portchecker.isOpen ssh_port, address, (is_open) -> 548 | return callback null, address if is_open 549 | 550 | callee = arguments.callee 551 | milliseconds = 10000 552 | timeout = () -> portchecker.isOpen ssh_port, address, callee 553 | 554 | logsmith.debug "repeat check for ssh port open for node #{node_name} in #{milliseconds} milliseconds" 555 | 556 | setTimeout timeout, milliseconds 557 | 558 | # 559 | # Finally we build the spec and send it off. 560 | # 561 | build_spec = (address, callback) -> 562 | parts = [] 563 | parts.push 'ssh://' 564 | parts.push encodeURIComponent username 565 | parts.push ':' + encodeURIComponent password if password 566 | parts.push '@' 567 | parts.push address 568 | parts.push ':' + ssh_port 569 | parts.push ';privateKey=' + encodeURIComponent private_key if private_key 570 | parts.push ';passphrase=' + encodeURIComponent passphrase if passphrase 571 | 572 | spec = parts.join '' 573 | spec_options = 574 | username: username 575 | password: password 576 | host: address 577 | port: ssh_port 578 | privateKey: private_key 579 | passphrase: passphrase 580 | 581 | return callback null, spec, spec_options 582 | 583 | # 584 | # Action on the tasks. 585 | # 586 | async.waterfall [obtain_status, ensure_port, build_spec], callback 587 | 588 | -------------------------------------------------------------------------------- /src/providers.coffee: -------------------------------------------------------------------------------- 1 | exports.amazon = require('./provider_amazon').Provider 2 | exports.virtualbox = require('./provider_virtualbox').Provider 3 | 4 | # --- 5 | 6 | instances = {} 7 | 8 | # --- 9 | 10 | exports.instance = (name, manifest) -> 11 | ### 12 | Gets a single instance of a Provider. The methods esentially provides a way of getting singleton instances. 13 | ### 14 | 15 | nice_name = name.toLowerCase() 16 | 17 | if not instances[nice_name]? 18 | if exports[nice_name]? and nice_name != 'instance' 19 | instances[nice_name] = new exports[nice_name] manifest 20 | instances[nice_name].name = nice_name 21 | else 22 | throw new Error "provider #{name} is not found" 23 | 24 | return instances[nice_name] 25 | 26 | -------------------------------------------------------------------------------- /src/shell.coffee: -------------------------------------------------------------------------------- 1 | roost = require 'roost' 2 | 3 | # --- 4 | 5 | exports.Ssh = class extends roost.target_ssh.Target 6 | ### 7 | This is a helper class for launching shells. 8 | ### 9 | 10 | shell: () -> 11 | ### 12 | Setup a shell. 13 | ### 14 | 15 | @step (callback) => 16 | @ssh2.shell {term: process.env['TERM'], rows: process.stdout.rows, cols: process.stdout.columns}, (err, stream) => 17 | return callback err if err 18 | 19 | on_resize = () -> 20 | stream.setWindow process.stdout.rows, process.stdout.columns 21 | 22 | init = () -> 23 | process.stdin.setRawMode true 24 | process.stdout.on 'resize', on_resize 25 | process.stdin.pipe stream 26 | stream.pipe process.stdout, {end: false} 27 | 28 | deinit = () -> 29 | process.stdin.unpipe stream 30 | process.stdout.removeListener 'resize', on_resize 31 | process.stdin.setRawMode false 32 | 33 | process.stdin.on 'error', (error) -> 34 | do deinit 35 | return callback error 36 | 37 | process.stdin.on 'end', () -> 38 | do deinit 39 | return callback null 40 | 41 | stream.on 'error', (error) -> 42 | do deinit 43 | return callback error 44 | 45 | stream.on 'end', () -> 46 | do deinit 47 | return callback null 48 | 49 | do init 50 | 51 | -------------------------------------------------------------------------------- /src/vortex.coffee: -------------------------------------------------------------------------------- 1 | exports.main = (argv=process.argv.slice(2)) -> 2 | ### 3 | Launches Vortex command line tool. It can be used for embedding. 4 | ### 5 | 6 | logsmith = require 'logsmith' 7 | node_getopt = require 'node-getopt' 8 | 9 | engine = require './engine' 10 | plugins = require './plugins' 11 | manifest = require './manifest' 12 | providers = require './providers' 13 | 14 | opt = node_getopt.create [ 15 | ['f', 'file=ARG', 'Specify the root of a vortex project or a vortex manifest.'] 16 | ['p', 'provider=ARG', 'Specify a default provider.'] 17 | ['d', 'dry', 'Dry run the roost manifest.'] 18 | ['v', 'verbose+', 'Make it verbose.'] 19 | ['c', 'colorize', 'Make it pretty.'] 20 | ['V', 'version', 'Shows version.'] 21 | ['h', 'help', 'Display this help.'] 22 | ] 23 | 24 | opt = opt.bindHelp() 25 | opt = opt.parse(argv) 26 | 27 | logsmith.setGlobalLevel(3 - (if opt.options.verbose.length < 3 then opt.options.verbose.length else 3)) if opt.options.verbose? 28 | logsmith.setGlobalColorization(opt.options.colorize) if opt.options.colorize? 29 | 30 | if opt.options.version 31 | logsmith.info require('../package.json').version 32 | 33 | return 34 | 35 | exit_code = 0 36 | 37 | failure = (err) -> 38 | logsmith.exception err 39 | logsmith.error err.message 40 | 41 | process.exit ++exit_code 42 | 43 | try vortex_location = manifest.locate opt.options.file 44 | catch e then failure e 45 | 46 | try vortex_manifest = manifest.load vortex_location 47 | catch e then failure e 48 | 49 | try vortex_plugins = plugins.obtain vortex_manifest 50 | catch e then failure e 51 | 52 | try 53 | if opt.options.provider 54 | vortex_provider = providers.instance opt.options.provider, vortex_manifest 55 | else 56 | vortex_provider = providers.instance 'VirtualBox', vortex_manifest 57 | catch e then failure e 58 | 59 | provider_action = opt.argv[0] ? 'status' 60 | 61 | engine.launch opt, vortex_manifest, vortex_plugins, vortex_provider, provider_action, (err) -> 62 | return failure err if err 63 | 64 | if require.main == module 65 | do exports.main 66 | 67 | --------------------------------------------------------------------------------