├── .gitignore ├── scripts ├── u9fs ├── uuid2mac ├── syslinux │ ├── ldlinux.c32 │ ├── isolinux.bin │ └── isolinux.cfg ├── keys │ ├── baker.pub │ └── baker_rsa └── kvm.xml.mustache ├── images ├── alpine3.8-simple │ ├── info.yml │ ├── Dockerfile │ └── files │ │ └── init └── alpine3.9-simple │ ├── info.yml │ ├── Dockerfile │ └── files │ └── init ├── doc └── img │ ├── build.png │ ├── images.png │ ├── nanobox.png │ └── run-vbox.png ├── lib ├── logger.js ├── commands │ ├── clean.js │ ├── images.js │ ├── delete.js │ ├── vms.js │ ├── run.js │ └── build.js ├── args.js ├── dependencies.js ├── providers.js ├── images.js ├── providers │ ├── virtualbox.js │ ├── kvm.js │ └── hyperkit.js ├── env.js └── build.js ├── .travis.yml ├── index.js ├── package.json ├── README.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ -------------------------------------------------------------------------------- /scripts/u9fs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PDDStudio/slim/master/scripts/u9fs -------------------------------------------------------------------------------- /images/alpine3.8-simple/info.yml: -------------------------------------------------------------------------------- 1 | description: A basic alpine server with ssh. 2 | -------------------------------------------------------------------------------- /images/alpine3.9-simple/info.yml: -------------------------------------------------------------------------------- 1 | description: A basic alpine server with ssh. 2 | -------------------------------------------------------------------------------- /doc/img/build.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PDDStudio/slim/master/doc/img/build.png -------------------------------------------------------------------------------- /doc/img/images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PDDStudio/slim/master/doc/img/images.png -------------------------------------------------------------------------------- /scripts/uuid2mac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PDDStudio/slim/master/scripts/uuid2mac -------------------------------------------------------------------------------- /doc/img/nanobox.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PDDStudio/slim/master/doc/img/nanobox.png -------------------------------------------------------------------------------- /doc/img/run-vbox.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PDDStudio/slim/master/doc/img/run-vbox.png -------------------------------------------------------------------------------- /scripts/syslinux/ldlinux.c32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PDDStudio/slim/master/scripts/syslinux/ldlinux.c32 -------------------------------------------------------------------------------- /scripts/syslinux/isolinux.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PDDStudio/slim/master/scripts/syslinux/isolinux.bin -------------------------------------------------------------------------------- /lib/logger.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | 3 | const error = e => console.error(chalk.red(e)); 4 | const info = m => console.log(chalk.yellow(m)); 5 | const ok = m => console.log(chalk.green(m)); 6 | 7 | module.exports = {error, info, ok}; 8 | -------------------------------------------------------------------------------- /scripts/syslinux/isolinux.cfg: -------------------------------------------------------------------------------- 1 | serial 0 115200 2 | default slim 3 | prompt 0 4 | 5 | label slim 6 | kernel /boot/vmlinuz 7 | initrd /boot/initrd 8 | # remove quiet from line below to see boot details 9 | append modules=loop,squashfs,sd-mod,usb-storage quiet console=tty0 console=ttyS0,115200 10 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | 2 | language: node_js 3 | # docker cannot be run on osx in travis 4 | os: linux 5 | addons: 6 | apt: 7 | packages: 8 | - vagrant 9 | - virtualbox 10 | - genisoimage 11 | node_js: 9 12 | install: npm install && npm link 13 | script: slim build images/alpine3.8-simple && slim images 14 | -------------------------------------------------------------------------------- /lib/commands/clean.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | 3 | const env = require('../env'); 4 | 5 | const { registry } = env.vars(); 6 | const { ok } = require('../logger'); 7 | 8 | exports.command = 'clean'; 9 | exports.desc = 'Clear all images from the registry'; 10 | 11 | exports.builder = () => {}; 12 | 13 | exports.handler = async () => { 14 | await fs.emptyDir(registry); 15 | 16 | ok('Registry cleared!'); 17 | }; 18 | -------------------------------------------------------------------------------- /lib/args.js: -------------------------------------------------------------------------------- 1 | const providers = Object.keys(require('./providers').providers); 2 | 3 | const providerArg = { 4 | alias: 'p', 5 | choices: ['hyperkit', 'kvm', 'virtualbox'], 6 | default: defaultProvider(), 7 | description: 'the vm provider to use', 8 | type: 'string' 9 | } 10 | 11 | module.exports = { providerArg }; 12 | 13 | function defaultProvider() { 14 | if (providers.length === 0) return ''; 15 | else if (providers.indexOf('virtualbox') !== -1) return 'virtualbox'; 16 | else return providers[0]; 17 | } 18 | -------------------------------------------------------------------------------- /scripts/keys/baker.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+YRhI2Gjno+5ND+N/pBvvw7Bvji6OEtZgUKvJf8P9rPcUCR8w7DpDPTpLSM4spBqIwoEM1CQRnH8x/Ufvhr51tU/74A4J2MgBEjClI8M5Z8iqYhDWfoRywo/2uB1rrPHICIM716LRFGIDoqnt+leHU4wcfHmHNa8/KqC5tNxd9/VBxeveh0CIu7/Ba3/UVtn6CTY2sGMo0mJk0IjzIsK42TgRL7ZOTQfbo1Td3DpOCdt02xft5xXCk9KuRwwrjdtyZbP8n8xc7/YcRk0pswFViNfEaU5Eb42+DTr0OhCgadGD9ufxJbSh4ty2VmRycQBfj00VqQO2zPNL2u76EfkMEI/TspVansMCheRtt3C5QJQCv0gXntDSunzgIOvbgShc644eIrmV/kh0oLYkW+Fi80zqx/dIdKMc7OpXK/umJb18ao2IBtBoTiNr5cla1XerDwZXJEp6sPJlSja9xNb0yw0PAfxMiKsR/fjymZ5E7dPaYjS3b+LIyxjxL+GSr8ZRL+3aH7lYsdAaQwekesxaMZUSfKDwRWk5UvE81gpCWWkgcTcqxCGuiBCdviBU88yXfDuFodidgYTJ39JM9v3gKuvlJAtRaBbXSJ6YTjanfozlmpaNYImHUPeooY20vlolmXzs2llZI+gM68GZpnml3en80LJYnin26kigWM9WQ== BakerForMac -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | const yargs = require('yargs'); 3 | const { version } = require('./package.json'); 4 | 5 | const { check } = require('./lib/dependencies'); 6 | const env = require('./lib/env'); 7 | 8 | // Environment reset/sanity check 9 | // - prereqs 10 | // - permissions 11 | // - required files 12 | (async () => { 13 | await env.setup(); 14 | 15 | yargs 16 | .middleware(check) 17 | .commandDir('./lib/commands') 18 | .version() 19 | .epilog(version ? `Version: ${version}`: '') 20 | .demandCommand(1, 'Did you forget to specify a command?') 21 | .recommendCommands() 22 | .showHelpOnFail(false, 'Specify --help for available options') 23 | .strict(true) 24 | .help() 25 | .wrap(yargs.terminalWidth()) 26 | .argv 27 | })(); 28 | -------------------------------------------------------------------------------- /lib/commands/images.js: -------------------------------------------------------------------------------- 1 | const { info } = require('../logger'); 2 | 3 | const images = require('../images'); 4 | 5 | exports.command = 'images'; 6 | exports.desc = 'List available images'; 7 | 8 | exports.builder = () => {}; 9 | 10 | exports.handler = async () => { 11 | let table = await images.list(); 12 | 13 | if (table.length === 0) { 14 | info('No images'); 15 | return; 16 | } 17 | 18 | let transformed = table 19 | .map(i => ({ 20 | image: i.image, 21 | size: sizeToHumanSize(i.size), 22 | description: i.description, 23 | providers: i.providers.join(', ') 24 | })) 25 | .reduce((table, {image, ...x}) => { 26 | table[image] = x; 27 | return table; 28 | }, {} 29 | ); 30 | 31 | console.table(transformed); 32 | }; 33 | 34 | function sizeToHumanSize(size) { 35 | if( size == 0 ) return 0; 36 | var i = Math.floor( Math.log(size) / Math.log(1024) ); 37 | return ( size / Math.pow(1024, i) ).toFixed(2) * 1 + ' ' + ['B', 'kB', 'MB', 'GB', 'TB'][i]; 38 | } 39 | -------------------------------------------------------------------------------- /lib/commands/delete.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const path = require('path'); 3 | 4 | const { error, ok } = require('../logger'); 5 | 6 | const env = require('../env'); 7 | const { providers } = require('../providers'); 8 | const { providerArg } = require('../args'); 9 | 10 | const { registry } = env.vars(); 11 | 12 | exports.command = 'delete '; 13 | exports.desc = 'Delete a microkernel image or vm'; 14 | 15 | exports.builder = yargs => { 16 | yargs.options({ 17 | provider: providerArg 18 | }); 19 | }; 20 | 21 | exports.handler = async argv => { 22 | // both vm and image have the same value 23 | const { vm: command, name, provider } = argv; 24 | 25 | try { 26 | switch (command) { 27 | case 'vm': 28 | await providers[provider].delete(name); 29 | break; 30 | case 'image': 31 | await fs.remove(path.resolve(registry, name)); 32 | break; 33 | } 34 | ok(`${name} deleted`); 35 | } catch (e) { 36 | error(e); 37 | } 38 | }; 39 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "slim", 3 | "version": "1.0.0", 4 | "description": "small and sleek computing environments", 5 | "main": "index.js", 6 | "bin": "index.js", 7 | "scripts": { 8 | "test": "echo \"Error: no test specified\" && exit 1" 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "git+https://github.com/ottomatica/slim.git" 13 | }, 14 | "keywords": [ 15 | "virtual", 16 | "box", 17 | "vm", 18 | "microkernel" 19 | ], 20 | "author": "ottomatica", 21 | "license": "Apache-2.0", 22 | "bugs": { 23 | "url": "https://github.com/ottomatica/slim/issues" 24 | }, 25 | "homepage": "https://github.com/ottomatica/slim#readme", 26 | "dependencies": { 27 | "chalk": "^2.4.2", 28 | "dockerode": "^2.5.8", 29 | "download": "^7.1.0", 30 | "fs-extra": "^8.0.1", 31 | "hasbin": "^1.2.3", 32 | "js-yaml": "^3.13.1", 33 | "mustache": "^3.0.1", 34 | "node-virtualbox": "^0.2.3", 35 | "progress": "^2.0.3", 36 | "simple-git": "^1.116.0", 37 | "sudo-prompt": "^9.0.0", 38 | "tar": "^4.4.10", 39 | "uuid": "^3.3.2" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /lib/commands/vms.js: -------------------------------------------------------------------------------- 1 | const { info } = require('../logger'); 2 | 3 | const { providers } = require('../providers'); 4 | const { providerArg } = require('../args'); 5 | 6 | exports.command = 'vms'; 7 | exports.desc = 'List virtual machines'; 8 | 9 | exports.builder = yargs => { 10 | yargs.options({ 11 | provider: { 12 | ...providerArg, 13 | default: undefined // we want to show all vms by default 14 | } 15 | }); 16 | }; 17 | 18 | exports.handler = async argv => { 19 | let { provider } = argv; 20 | 21 | let vms = []; 22 | if (provider) { 23 | vms.push(...await providers[provider].list()); 24 | } else { 25 | for (const provider of Object.keys(providers)) { 26 | vms.push(...(await providers[provider].list()).map(v => ({...v, provider: provider}))); 27 | } 28 | } 29 | 30 | if (vms.length === 0) { 31 | info('No virtual machines'); 32 | return; 33 | } 34 | 35 | let transformed = vms.reduce((table, {id, ...x}) => { 36 | table[id] = x; 37 | return table; 38 | }, {}); 39 | 40 | console.table(transformed); 41 | } 42 | -------------------------------------------------------------------------------- /lib/dependencies.js: -------------------------------------------------------------------------------- 1 | const hasbin = require('hasbin'); 2 | 3 | const providers = Object.keys(require('./providers').providers); 4 | 5 | const { error } = require('./logger'); 6 | 7 | const depMap = { 8 | 'build': [ 9 | () => mustBin('cpio'), 10 | () => mustBin('docker'), 11 | () => mustBin('gzip'), 12 | () => mustBin('mkisofs'), 13 | ], 14 | } 15 | 16 | const mustBin = bin => { 17 | if (!hasbin.sync(bin)) throw `You must have ${bin} installed to build a microkernel`; 18 | } 19 | 20 | exports.check = argv => { 21 | let cmd = argv._[0]; 22 | 23 | try { 24 | if (providers.length === 0) { 25 | throw 'You don\'t have any providers installed! Please see the docs for a list of supported providers'; 26 | } 27 | 28 | let { provider } = argv; 29 | if (provider && providers.indexOf(provider) === -1) { 30 | throw `Provider ${provider} is not installed! Please see the docs for a list of supported providers`; 31 | } 32 | 33 | (depMap[cmd] || []).forEach(d => d(argv)); 34 | } catch (e) { 35 | error(e); 36 | process.exit(1); 37 | } 38 | }; 39 | -------------------------------------------------------------------------------- /lib/providers.js: -------------------------------------------------------------------------------- 1 | const hasbin = require('hasbin'); 2 | 3 | // specifies the compatibility between build targets (keys) 4 | // and providers (values). 5 | // for instance, using kvm as a build target produces images 6 | // that are usable for either kvm or hyperkit. 7 | const compatMap = { 8 | 'hyperkit': ['kvm', 'hyperkit'], 9 | 'kvm': ['kvm', 'hyperkit'], 10 | 'virtualbox': ['virtualbox'], 11 | }; 12 | 13 | // these are formats that each provider can build, with the 14 | // first entry being the default format for that provider. 15 | // the 'raw' format signifies an unbundled initrd and vmlinuz 16 | const formatMap = { 17 | 'hyperkit': ['raw', 'iso'], 18 | 'kvm': ['raw', 'iso', 'qcow2'], 19 | 'virtualbox': ['iso', 'raw'], 20 | }; 21 | 22 | // map of provider name to class 23 | const providers = {}; 24 | 25 | // only add available providers to the map 26 | if (hasbin.sync('virsh')) providers['kvm'] = require('./providers/kvm'); 27 | if (hasbin.sync('hyperkit')) providers['hyperkit'] = require('./providers/hyperkit'); 28 | if (hasbin.sync('vboxmanage')) providers['virtualbox'] = require('./providers/virtualbox'); 29 | 30 | module.exports = {compatMap, formatMap, providers}; 31 | -------------------------------------------------------------------------------- /lib/images.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const path = require('path'); 3 | const yaml = require('js-yaml'); 4 | 5 | const env = require('./env'); 6 | const { providers } = require('./providers'); 7 | 8 | const { registry } = env.vars(); 9 | 10 | class Images { 11 | constructor() {} 12 | 13 | async list() 14 | { 15 | let images = []; 16 | 17 | for (let name of fs.readdirSync(registry)) { 18 | try { 19 | let info = await this.info(name, registry); 20 | 21 | let total = 0; 22 | for (let p of info.providers) { 23 | if (!providers[p]) { continue; } 24 | total += await providers[p].size(name); 25 | } 26 | 27 | images.push({ 28 | image: name, 29 | size: total, 30 | description: info.description, 31 | providers: info.providers, 32 | }); 33 | } catch (e) { undefined } 34 | } 35 | 36 | return images; 37 | } 38 | 39 | async exists(name) { 40 | return await fs.exists(path.join(registry, name, 'info.yml')); 41 | } 42 | 43 | async info(name) { 44 | return await yaml.safeLoad(fs.readFileSync(path.join(registry, name, 'info.yml'))); 45 | } 46 | } 47 | 48 | module.exports = new Images(); 49 | -------------------------------------------------------------------------------- /scripts/kvm.xml.mustache: -------------------------------------------------------------------------------- 1 | 2 | {{ name }} 3 | slim 4 | 5 | hvm 6 | {{ kernel }} 7 | {{ initrd }} 8 | append modules=loop,squashfs,sd-mod,usb-storage quiet console=tty0 console=ttyS0,115200 9 | 10 | 11 | 12 | 13 | 14 | {{ cpus }} 15 | {{ mem }} 16 | destroy 17 | restart 18 | destroy 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | {{ #syncs }} 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | {{ /syncs }} 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /images/alpine3.8-simple/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8.4 AS openrc 2 | RUN mkdir -p /lib/apk/db /run 3 | RUN apk add --no-cache --initdb openrc 4 | 5 | FROM alpine:3.8.4 AS kernel 6 | RUN mkdir -p /lib/apk/db /run 7 | RUN apk add --no-cache --initdb linux-virt virtualbox-guest-modules-virt 8 | 9 | FROM alpine:3.8.4 AS install 10 | USER root 11 | # the public key that is authorized to connect to this instance. 12 | ARG SSHPUBKEY 13 | # optional packages 14 | ARG PKGS 15 | 16 | # don't want all the /etc stuff from openrc -- only tools 17 | # https://pkgs.alpinelinux.org/contents?repo=main&page=2&arch=x86_64&branch=v3.8&name=openrc 18 | COPY --from=openrc /lib/ /lib/ 19 | COPY --from=openrc /bin /bin 20 | COPY --from=openrc /sbin /sbin 21 | COPY --from=openrc /etc/ /etc/ 22 | 23 | # Need virtio modules for networking 24 | COPY --from=kernel /lib/modules /lib/modules 25 | 26 | # Copy kernel for later use 27 | COPY --from=kernel /boot/vmlinuz-virt /vmlinuz 28 | 29 | RUN mkdir -p /lib/apk/db /run 30 | RUN apk add --update --no-cache --initdb alpine-baselayout apk-tools busybox ca-certificates util-linux \ 31 | openssh openssh-client rng-tools dhcpcd virtualbox-guest-additions 32 | RUN [ ! -z "$PKGS" ] && apk add --no-cache $PKGS || echo "No optional pkgs provided." 33 | 34 | # Deleted cached packages 35 | RUN rm -rf /var/cache/apk/* 36 | 37 | # Our local files 38 | COPY files/init /init 39 | 40 | RUN echo "RNGD_OPTS=\"-f -r /dev/urandom\"" > /etc/conf.d/rngd 41 | RUN echo "Welcome to slim!" > /etc/motd 42 | RUN echo "tty0::respawn:/sbin/agetty -a root -L tty0 38400 vt100" > /etc/inittab 43 | RUN echo "ttyS0::respawn:/sbin/agetty -a root -L ttyS0 115200 vt100" >> /etc/inittab 44 | 45 | # Set an ssh key 46 | RUN mkdir -p /etc/ssh /root/.ssh && chmod 0700 /root/.ssh 47 | RUN echo $SSHPUBKEY > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys 48 | 49 | # Fix ssh 50 | RUN sed -i 's/root:!/root:*/' /etc/shadow 51 | -------------------------------------------------------------------------------- /images/alpine3.9-simple/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.9.4 AS openrc 2 | RUN mkdir -p /lib/apk/db /run 3 | RUN apk add --no-cache --initdb openrc 4 | 5 | FROM alpine:3.9.4 AS kernel 6 | RUN mkdir -p /lib/apk/db /run 7 | RUN apk add --no-cache --initdb linux-virt virtualbox-guest-modules-virt 8 | 9 | FROM alpine:3.9.4 AS install 10 | USER root 11 | # the public key that is authorized to connect to this instance. 12 | ARG SSHPUBKEY 13 | # optional packages 14 | ARG PKGS 15 | 16 | # don't want all the /etc stuff from openrc -- only tools 17 | # https://pkgs.alpinelinux.org/contents?repo=main&page=2&arch=x86_64&branch=v3.9&name=openrc 18 | COPY --from=openrc /lib/ /lib/ 19 | COPY --from=openrc /bin /bin 20 | COPY --from=openrc /sbin /sbin 21 | COPY --from=openrc /etc/ /etc/ 22 | 23 | # Need virtio modules for networking 24 | COPY --from=kernel /lib/modules /lib/modules 25 | 26 | # Copy kernel for later use 27 | COPY --from=kernel /boot/vmlinuz-virt /vmlinuz 28 | 29 | RUN echo "http://dl-cdn.alpinelinux.org/alpine/v3.9/community" >> /etc/apk/repositories 30 | RUN mkdir -p /lib/apk/db /run 31 | RUN apk add --update --no-cache --initdb alpine-baselayout apk-tools busybox ca-certificates util-linux \ 32 | openssh openssh-client rng-tools dhcpcd virtualbox-guest-additions 33 | RUN [ ! -z "$PKGS" ] && apk add --no-cache $PKGS || echo "No optional pkgs provided." 34 | 35 | # Deleted cached packages 36 | RUN rm -rf /var/cache/apk/* 37 | 38 | # Our local files 39 | COPY files/init /init 40 | 41 | RUN echo "Welcome to slim!" > /etc/motd 42 | RUN echo "tty0::respawn:/sbin/agetty -a root -L tty0 38400 vt100" > /etc/inittab 43 | RUN echo "ttyS0::respawn:/sbin/agetty -a root -L ttyS0 115200 vt100" >> /etc/inittab 44 | 45 | # Set an ssh key 46 | RUN mkdir -p /etc/ssh /root/.ssh && chmod 0700 /root/.ssh 47 | RUN echo $SSHPUBKEY > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys 48 | 49 | # Fix ssh 50 | RUN sed -i 's/root:!/root:*/' /etc/shadow 51 | -------------------------------------------------------------------------------- /lib/commands/run.js: -------------------------------------------------------------------------------- 1 | const os = require('os'); 2 | const path = require('path'); 3 | 4 | const { error } = require('../logger'); 5 | 6 | const images = require('../images'); 7 | const { providers } = require('../providers'); 8 | const { providerArg } = require('../args'); 9 | 10 | exports.command = 'run '; 11 | exports.desc = 'Provision a new micro kernel'; 12 | 13 | exports.builder = yargs => { 14 | yargs.options({ 15 | cpus: { 16 | alias: 'c', 17 | describe: 'number of cpus (default 1)', 18 | type: 'number' 19 | }, 20 | memory: { 21 | alias: 'm', 22 | describe: 'choose memory size in MB (default 1024)', 23 | type: 'number' 24 | }, 25 | bridged: { 26 | alias: 'b', 27 | describe: 'enable bridged networking (DHCP IP)', 28 | type: 'boolean' 29 | }, 30 | provider: providerArg, 31 | sync: { 32 | default: true, 33 | description: 'whether to mount share the cwd and root with the vm', 34 | type: 'boolean' 35 | } 36 | }); 37 | }; 38 | 39 | exports.handler = async argv => { 40 | const { image, cpus, memory, name, bridged, provider, sync } = argv; 41 | 42 | if (!await images.exists(image)) { 43 | error(`${image} image not found`); 44 | return; 45 | } 46 | 47 | let info = await images.info(image).catch(e => error(e)); 48 | if (info.providers.indexOf(provider) == -1) { 49 | error(`Please rebuild ${image} for ${provider}`); 50 | return; 51 | } 52 | 53 | let root = (os.platform() == "win32") ? `${process.cwd().split(path.sep)[0]}/` : "/"; 54 | let syncs = sync ? [`${process.cwd()};/slim`, `${root};/host`] : []; 55 | 56 | await providers[provider].create(name, { 57 | image: image, 58 | cpus: cpus || info.cpus, 59 | mem: memory || info.memory, 60 | bridged, 61 | syncs, 62 | }).catch(e => error(e)); 63 | }; 64 | -------------------------------------------------------------------------------- /lib/commands/build.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const p = require('path'); 3 | const yaml = require('js-yaml'); 4 | 5 | const { error } = require('../logger'); 6 | 7 | const build = require('../build'); 8 | const env = require('../env'); 9 | const { providerArg } = require('../args'); 10 | const { compatMap, formatMap } = require('../providers'); 11 | 12 | const { pubkey } = env.vars(); 13 | 14 | exports.command = 'build [path]'; 15 | exports.desc = 'Build a new microkernel'; 16 | 17 | exports.builder = yargs => { 18 | yargs.options({ 19 | cache: { 20 | default: true, 21 | description: 'whether to cache images during docker build', 22 | type: 'boolean' 23 | }, 24 | format: { 25 | alias: 'f', 26 | default: [], 27 | description: 'additional image formats to build', 28 | type: 'string' 29 | }, 30 | provider: providerArg 31 | }); 32 | }; 33 | 34 | exports.handler = async argv => { 35 | let { path, cache, format, provider } = argv; 36 | // ensure format is an array 37 | if (typeof(format) === 'string') format = [format] 38 | let availFormats = formatMap[provider]; 39 | for (let f of format) { 40 | if (!availFormats.includes(f)) { 41 | error(`Format ${f} is not supported for ${provider}`); 42 | return; 43 | } 44 | } 45 | 46 | let { buildPath, infoPath, outputDir } = await env.makeContext(path); 47 | 48 | let info = await yaml.safeLoad(fs.readFileSync(infoPath)); 49 | let pkgs = ''; 50 | 51 | if (info.base_repository) buildPath = await env.cloneOrPull(info.base_repository); 52 | if (info.base_directory) buildPath = p.join(buildPath, info.base_directory); 53 | if (info.base_args) pkgs = info.base_args.PKGS; 54 | 55 | info.providers = compatMap[provider]; 56 | await fs.writeFile(p.join(outputDir, 'info.yml'), await yaml.safeDump(info)); 57 | 58 | let context = { 59 | provider, 60 | format, 61 | buildPath, 62 | outputDir, 63 | dockerOpts: { 64 | nocache: !cache, 65 | buildargs: { 66 | 'SSHPUBKEY': pubkey, 67 | 'PKGS': pkgs 68 | } 69 | } 70 | } 71 | 72 | try { 73 | await build(context); 74 | } catch (e) { 75 | error(e); 76 | } 77 | }; 78 | -------------------------------------------------------------------------------- /scripts/keys/baker_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKAIBAAKCAgEAqPmEYSNho56PuTQ/jf6Qb78Owb44ujhLWYFCryX/D/az3FAk 3 | fMOw6Qz06S0jOLKQaiMKBDNQkEZx/Mf1H74a+dbVP++AOCdjIARIwpSPDOWfIqmI 4 | Q1n6EcsKP9rgda6zxyAiDO9ei0RRiA6Kp7fpXh1OMHHx5hzWvPyqgubTcXff1QcX 5 | r3odAiLu/wWt/1FbZ+gk2NrBjKNJiZNCI8yLCuNk4ES+2Tk0H26NU3dw6TgnbdNs 6 | X7ecVwpPSrkcMK43bcmWz/J/MXO/2HEZNKbMBVYjXxGlORG+Nvg069DoQoGnRg/b 7 | n8SW0oeLctlZkcnEAX49NFakDtszzS9ru+hH5DBCP07KVWp7DAoXkbbdwuUCUAr9 8 | IF57Q0rp84CDr24EoXOuOHiK5lf5IdKC2JFvhYvNM6sf3SHSjHOzqVyv7piW9fGq 9 | NiAbQaE4ja+XJWtV3qw8GVyRKerDyZUo2vcTW9MsNDwH8TIirEf348pmeRO3T2mI 10 | 0t2/iyMsY8S/hkq/GUS/t2h+5WLHQGkMHpHrMWjGVEnyg8EVpOVLxPNYKQllpIHE 11 | 3KsQhrogQnb4gVPPMl3w7haHYnYGEyd/STPb94Crr5SQLUWgW10iemE42p36M5Zq 12 | WjWCJh1D3qKGNtL5aJZl87NpZWSPoDOvBmaZ5pd3p/NCyWJ4p9upIoFjPVkCAwEA 13 | AQKCAgBRyAn5Fa3BChIXmiEUcVuoqfjTbmR4RJy7YiNLMAGl0Uo13Bf8xp3N/bZf 14 | ULhWTZ41sGW9qLRaT64FoSWTSmg1+XNWsW0GQJHqQgiRHGOr40rE9PZ9WoP8rp90 15 | TlQKwRZDztqMFiJVFyi6yAb1q75oDZj1O4DPVa/c4hEIr/0wUstjiD4/cMOvcAbq 16 | KO6QvuiVfrauuhmpHrKNwlbliq7VAz+kh8Ey00vV1qTR++ILDmGO9x/hp7UkL1o3 17 | GSZ6rSconMPAO2ayIYp9kCeZ4wylnI4cCidEWsEMS88ZPw/aeHPkJfKu/e/dTzr/ 18 | yBBgzh1ud5HZzgEzK4aDzWrAFGkOT6Oa3OuqCPmYGZZ/Z8H/lSvRxcnVG9UyjFpe 19 | yjXhnBfsJzhjFLqyIQfoSO31sbNJkmxQ1XmMEOdYwqmK8nhCSJM5WiUDZQ/X6hpc 20 | ophsS4iGExqqB5/LpodZunDdrxJbljD86iJUp34xCs1LPHqcaB3QmUEppIfYrMUk 21 | crUm6OhUy/u5IcKcpeGGATd26XIi9ljs1GwgSjNd1ejO42HLQBXq569bfhhwDfEf 22 | rVoCM9h6jMeiZQXNsOgf4f7d7T8aPHaOSipJVzoEVyM21K/CDIUtdX8SENeDZaEc 23 | Ob7cKlVDuuZkfk1v/KVDhEGVYBpOYPAiHnedSiYoL6J/9RKTvQKCAQEA1YvF0aho 24 | //O55rk5uGOwdcZ9FM+QNoJNCILGDHWlo/AQdNwd1eWLH0Yap4uYn/WJT50VwcL0 25 | efDyToLDX7GLAZtc/6eUJ7glhQ0u0dUBcIlH6Z8KBZ8BVJIR7i3V2c9FBh0jmLF1 26 | BvO5ulcGizv3r8+vcVqVU5YF33vaWlcV4iP1GeH1QzAru5wLyKtLr8SrZZ9JL5xR 27 | 5xxmNvvxyUmL7tVCoclyMR9X0qluwGriSOziBCL4mUuhvK5UElgmvFFa2kSP/gEx 28 | +D+DyY8FZ3p8NtdZWYAEs+6HXekxTfD6EhbkfB3AWYeu/N3ETRTX64tS+twYSGQT 29 | EPQZNEt34qOKIwKCAQEAypFUD1cWMaHGzRgaXODqmGZj6uwb8kQ2MUDKlJloUt6H 30 | p/9779RYmcR1Yo5W7H0lmcVH12kE0co7Sco7YtSPR1wLgicyjR+Sq+zxyetwqVEu 31 | LDsmraL3BVaTMqRnAaWpHQlbp+NIeeZcnuW3b8EBAEKPy3xujGsHuZ9miumf45qq 32 | 4DbsqMB296yhiXQB1yhWDSEHdcnBfHN8aawDEkp6cssdOB85lmBZpwqAuXxH0l/W 33 | Qrhohq7MH1X7CDefwh23Pz4b9V7XxzirNaq7zFs6ceSNuHv11bHV5QRR5YsPh+KG 34 | CdEDzMuao/1/df0TsiztYdC3K2aYaOEejyGv4sH8UwKCAQBJ8jqwHScu6pEHSkCo 35 | jyy9u9v4Zt/DYF+YgOBf1CVlnW21abuTJAeG7tmwBvD1AytnPDgafo314++kLDfH 36 | XU2LYudTSA5Pqr6jUitSUfZLp94VEhOAWs01Ide/qHOTFukJ8vEuoNSrcZ5w3k3P 37 | zRY59SsFj56B8UNbXiIAgoN7aYQoUEyD1ZxvPNv/wwFUfj/z0rKfH/xkkTr780aI 38 | s0UXkRWfvIgkZnwc4LsPOnPdWNnzIMEBJGV/VsaaC5huQaW6S1+pT3SkSCo0k6gF 39 | ay60NuIj0ebO/9w0Mtn16WpO9UptiEfhONDpk0m0f3E9iWNUpv5pou3PQxevOirr 40 | ekINAoIBACfC3v0j2vdjCeK4GHSisWm4r2QtdE7ZlMmWLi187z1U8MvJGkq5I6sL 41 | JP9zcRx6dCb60l81/fwv9fNF/uInVvhq2NdzWjjZObEFkXBRBow1oxqLgcwTcOlb 42 | VQlbu9xW6BsK+zK5KkDDNur5rEgDWm7yoccPZaOqXpnQ8A/US84hTek03r9BCBkV 43 | iZ+xZasV/84T7aLxN0l2YbVcTj4I4IAn3lRlzKf3waFILnw6KN7icOwnxlypcuez 44 | uNKkGHfB3XZMerBvLWutc+3U1YgHYDF661aK/nYzsgiCEJE9+o5xqF3E6ToJvRDz 45 | cVF3m6Ydq3rHvSyHtuLfTWBK/HtGGIECggEBAMXM4KSngNKpTCApjFpvEI3TvJtp 46 | 8A2Mvhj/M0pXkBIDxcBFqF/0cYXnGpPc/AgPekh/vEyOOGOSh3dplswXie231Bod 47 | 6h8d2I8Pn7dTl/Po1jSMpwXDQu2PlrTr9+zcRZfGgJc4f6nU4PeD5CdUyBCtvJsG 48 | DrnJ2NVUhgRXkCwn8Whe+le9GUKo3athetUZ7lKrbxz0bs2KHniUURpWltfmG+EM 49 | g08w319Gyq9XEdfcdyoxUDCoiyhyV1AYx+tHiMYidkadnKJVXHUC2YapTKtEfK1o 50 | cJmno7sstGUNY0o6+sPve4CIECE4NSMCQdE/K1JPAvF+yZwHlVUZacX+a5M= 51 | -----END RSA PRIVATE KEY----- -------------------------------------------------------------------------------- /images/alpine3.8-simple/files/init: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # add a boot service to $sysroot 4 | rc_add() { 5 | mkdir -p $sysroot/etc/runlevels/$2 6 | ln -sf /etc/init.d/$1 $sysroot/etc/runlevels/$2/$1 7 | } 8 | 9 | rc_add sshd default 10 | rc_add dhcpcd default 11 | rc_add urandom default 12 | rc_add rngd default 13 | 14 | chown -R root:root /root 15 | 16 | echo "nanobox" > /etc/hostname 17 | 18 | modprobe virtio_net 19 | 20 | # mounts 21 | mount -t proc proc /proc -o nodev,nosuid,noexec,relatime 22 | mount -t tmpfs tmpfs /run -o nodev,nosuid,noexec,relatime,size=10%,mode=755 23 | mount -t tmpfs tmpfs /tmp -o nodev,nosuid,noexec,relatime,size=10%,mode=1777 24 | mount -t tmpfs tmpfs /var -o nodev,nosuid,noexec,relatime,size=50%,mode=755 25 | 26 | mkdir -m 0755 /var/cache /var/empty /var/lib /var/local /var/lock /var/log /var/opt /var/spool 27 | mkdir -m 01777 /var/tmp 28 | ln -s /run /var/run 29 | 30 | mount -t devtmpfs dev /dev -o nosuid,noexec,relatime,size=10m,nr_inodes=248418,mode=755 31 | mknod -m 0600 /dev/console c 5 1 32 | mknod -m 0620 /dev/tty1 c 4 1 33 | mknod -m 0666 /dev/tty 5 0 34 | mknod -m 0666 /dev/null 1 3 35 | mknod -m 0660 /dev/kmsg 1 11 36 | 37 | ln -s /proc/self/fd /dev/fd 38 | ln -s /proc/self/fd/0 /dev/stdin 39 | ln -s /proc/self/fd/1 /dev/stdout 40 | ln -s /proc/self/fd/2 /dev/stderr 41 | ln -s /proc/kcore /dev/kcore 42 | 43 | mkdir -m 01777 /dev/mqueue 44 | mkdir -m 01777 /dev/shm 45 | mkdir -m 0755 /dev/pts 46 | 47 | mount -t mqueue mqueue /dev/mqueue -o noexec,nosuid,nodev 48 | mount -t tmpfs shm /dev/shm -o noexec,nosuid,nodev,mode=1777 49 | mount -t devpts devpts /dev/pts -o noexec,nosuid,gid=5,mode=0620 50 | 51 | mount -t sysfs sysfs /sys -o noexec,nosuid,nodev 52 | 53 | # ignore errors 54 | mount -t securityfs /sys/kernel/security -o noexec,nosuid,nodev || true 55 | mount -t debugfs debugfs /sys/kernel/debug -o noexec,nosuid,nodev || true 56 | mount -t configfs configfs /sys/kernel/config -o noexec,nosuid,nodev || true 57 | mount -t fusectl fusectl /sys/fs/fuse/connections -o noexec,nosuid,nodev || true 58 | mount -t selinuxfs selinuxfs /sys/fs/selinux -o noexec,nosuid || true 59 | mount -t pstore pstore /sys/fs/pstore -o noexec,nosuid,nodev || true 60 | mount -t efivarfs efivarfs /sys/firmware/efi/efivars -o noexec,nosuid,nodev || true 61 | mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc -o noexec,nosuid,nodev || true 62 | 63 | mount -t tmpfs cgroup_root /sys/fs/cgroup -o nodev,noexec,nosuid,mode=755,size=10m 64 | 65 | while read c; do 66 | if [[ "1" == $(echo "$c" | cut -f4) ]]; then 67 | cg=$(echo "$c" | cut -f1) 68 | mkdir -m 0555 "/sys/fs/cgroup/$cg" 69 | mount -t cgroup "$cg" "/sys/fs/cgroup/$cg" -o "noexec,nosuid,nodev,$cg" 70 | fi 71 | done < /proc/cgroups 72 | 73 | echo "1" > /sys/fs/cgroup/memory/memory.use_hierarchy 74 | 75 | mkdir -m 0555 /sys/fs/cgroup/systemd 76 | mount -t cgroup cgroup /sys/fs/cgroup/systemd -o none,name=systemd 77 | 78 | mount --make-rshared / 79 | 80 | # hotplug 81 | echo "/sbin/mdev" > /proc/sys/kernel/hotplug 82 | for x in $(ls /sys/bus/*/devices/*/modalias); do 83 | /sbin/modprobe -abq $(cat "$x") || true; 84 | done 85 | 86 | # clock 87 | /sbin/hwclock --hctosys --utc 88 | 89 | # loopback 90 | /sbin/ip addr add 127.0.0.1/8 dev lo brd + scope host 91 | /sbin/ip route add 127.0.0.1/8 dev lo scope host 92 | /sbin/ip link set lo up 93 | 94 | # limits 95 | ulimit -n 1048576 96 | ulimit -p unlimited 97 | 98 | # hostname 99 | hostname $(cat /etc/hostname) 100 | 101 | # resolvconf 102 | touch /etc/resolv.conf 103 | 104 | # mount shared folders 105 | mkdir -p /slim /host 106 | modprobe 9pnet_virtio vboxsf 107 | mount -t 9p -o trans=virtio share0 /slim || mount.vboxsf vbox-share-0 /slim || true 108 | mount -t 9p -o trans=virtio share1 /host || mount.vboxsf vbox-share-1 /host || true 109 | 110 | # start default openrc level 111 | /sbin/openrc default 112 | 113 | exec /sbin/init 114 | -------------------------------------------------------------------------------- /images/alpine3.9-simple/files/init: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # add a boot service to $sysroot 4 | rc_add() { 5 | mkdir -p $sysroot/etc/runlevels/$2 6 | ln -sf /etc/init.d/$1 $sysroot/etc/runlevels/$2/$1 7 | } 8 | 9 | rc_add sshd default 10 | rc_add dhcpcd default 11 | rc_add urandom default 12 | rc_add rngd default 13 | 14 | chown -R root:root /root 15 | 16 | echo "nanobox" > /etc/hostname 17 | 18 | modprobe virtio_net 19 | 20 | # mounts 21 | mount -t proc proc /proc -o nodev,nosuid,noexec,relatime 22 | mount -t tmpfs tmpfs /run -o nodev,nosuid,noexec,relatime,size=10%,mode=755 23 | mount -t tmpfs tmpfs /tmp -o nodev,nosuid,noexec,relatime,size=10%,mode=1777 24 | mount -t tmpfs tmpfs /var -o nodev,nosuid,noexec,relatime,size=50%,mode=755 25 | 26 | mkdir -m 0755 /var/cache /var/empty /var/lib /var/local /var/lock /var/log /var/opt /var/spool 27 | mkdir -m 01777 /var/tmp 28 | ln -s /run /var/run 29 | 30 | mount -t devtmpfs dev /dev -o nosuid,noexec,relatime,size=10m,nr_inodes=248418,mode=755 31 | mknod -m 0600 /dev/console c 5 1 32 | mknod -m 0620 /dev/tty1 c 4 1 33 | mknod -m 0666 /dev/tty 5 0 34 | mknod -m 0666 /dev/null 1 3 35 | mknod -m 0660 /dev/kmsg 1 11 36 | 37 | ln -s /proc/self/fd /dev/fd 38 | ln -s /proc/self/fd/0 /dev/stdin 39 | ln -s /proc/self/fd/1 /dev/stdout 40 | ln -s /proc/self/fd/2 /dev/stderr 41 | ln -s /proc/kcore /dev/kcore 42 | 43 | mkdir -m 01777 /dev/mqueue 44 | mkdir -m 01777 /dev/shm 45 | mkdir -m 0755 /dev/pts 46 | 47 | mount -t mqueue mqueue /dev/mqueue -o noexec,nosuid,nodev 48 | mount -t tmpfs shm /dev/shm -o noexec,nosuid,nodev,mode=1777 49 | mount -t devpts devpts /dev/pts -o noexec,nosuid,gid=5,mode=0620 50 | 51 | mount -t sysfs sysfs /sys -o noexec,nosuid,nodev 52 | 53 | # ignore errors 54 | mount -t securityfs /sys/kernel/security -o noexec,nosuid,nodev || true 55 | mount -t debugfs debugfs /sys/kernel/debug -o noexec,nosuid,nodev || true 56 | mount -t configfs configfs /sys/kernel/config -o noexec,nosuid,nodev || true 57 | mount -t fusectl fusectl /sys/fs/fuse/connections -o noexec,nosuid,nodev || true 58 | mount -t selinuxfs selinuxfs /sys/fs/selinux -o noexec,nosuid || true 59 | mount -t pstore pstore /sys/fs/pstore -o noexec,nosuid,nodev || true 60 | mount -t efivarfs efivarfs /sys/firmware/efi/efivars -o noexec,nosuid,nodev || true 61 | mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc -o noexec,nosuid,nodev || true 62 | 63 | mount -t tmpfs cgroup_root /sys/fs/cgroup -o nodev,noexec,nosuid,mode=755,size=10m 64 | 65 | while read c; do 66 | if [[ "1" == $(echo "$c" | cut -f4) ]]; then 67 | cg=$(echo "$c" | cut -f1) 68 | mkdir -m 0555 "/sys/fs/cgroup/$cg" 69 | mount -t cgroup "$cg" "/sys/fs/cgroup/$cg" -o "noexec,nosuid,nodev,$cg" 70 | fi 71 | done < /proc/cgroups 72 | 73 | echo "1" > /sys/fs/cgroup/memory/memory.use_hierarchy 74 | 75 | mkdir -m 0555 /sys/fs/cgroup/systemd 76 | mount -t cgroup cgroup /sys/fs/cgroup/systemd -o none,name=systemd 77 | 78 | mount --make-rshared / 79 | 80 | # hotplug 81 | echo "/sbin/mdev" > /proc/sys/kernel/hotplug 82 | for x in $(ls /sys/bus/*/devices/*/modalias); do 83 | /sbin/modprobe -abq $(cat "$x") || true; 84 | done 85 | 86 | # clock 87 | /sbin/hwclock --hctosys --utc 88 | 89 | # loopback 90 | /sbin/ip addr add 127.0.0.1/8 dev lo brd + scope host 91 | /sbin/ip route add 127.0.0.1/8 dev lo scope host 92 | /sbin/ip link set lo up 93 | 94 | # limits 95 | ulimit -n 1048576 96 | ulimit -p unlimited 97 | 98 | # hostname 99 | hostname $(cat /etc/hostname) 100 | 101 | # resolvconf 102 | touch /etc/resolv.conf 103 | 104 | # mount shared folders 105 | mkdir -p /slim /host 106 | modprobe 9pnet_virtio vboxsf 107 | mount -t 9p -o trans=virtio share0 /slim || mount.vboxsf vbox-share-0 /slim || true 108 | mount -t 9p -o trans=virtio share1 /host || mount.vboxsf vbox-share-1 /host || true 109 | 110 | # start default openrc level 111 | /sbin/openrc default 112 | 113 | exec /sbin/init 114 | -------------------------------------------------------------------------------- /lib/providers/virtualbox.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const path = require('path'); 3 | const vbox = require('node-virtualbox'); 4 | const VBoxProvider = require('node-virtualbox/lib/VBoxProvider'); 5 | 6 | const env = require('../env'); 7 | 8 | const { slimdir, registry } = env.vars(); 9 | 10 | const privateKey = path.join(slimdir, 'baker_rsa'); 11 | 12 | class VirtualBox { 13 | constructor() 14 | { 15 | this.defaultOptions = 16 | { 17 | cpus: 1, 18 | mem: 1024, 19 | syncs: [], 20 | disk: false, 21 | verbose: true, 22 | ssh_port: undefined // auto-find a ssh available port 23 | } 24 | 25 | this.driver = new VBoxProvider(); 26 | this.privateKey = privateKey 27 | } 28 | 29 | /** 30 | * Returns State of a VM 31 | * @param {String} VMName 32 | */ 33 | async getState(VMName) { 34 | let vmInfo = await this.driver.info(VMName); 35 | return vmInfo.VMState.replace(/"/g,''); 36 | } 37 | 38 | async _getUsedPorts(name) 39 | { 40 | let ports = []; 41 | let properties = await this.driver.info(name); 42 | for( let prop in properties ) 43 | { 44 | if( prop.indexOf('Forwarding(') >= 0 ) 45 | { 46 | try { 47 | ports.push( parseInt( properties[prop].split(',')[3]) ); 48 | } 49 | catch(e) { console.error(e); } 50 | } 51 | } 52 | return ports; 53 | } 54 | 55 | /** 56 | * Get ssh configurations 57 | * @param {Obj} machine 58 | * @param {Obj} nodeName Optionally give name of machine when multiple machines declared. 59 | */ 60 | async getSSHConfig(machine, nodeName) { 61 | 62 | // Use VirtualBox driver 63 | let vmInfo = await this.driver.info(machine); 64 | let port = null; 65 | Object.keys(vmInfo).forEach(key => { 66 | if(vmInfo[key].includes('guestssh')){ 67 | port = parseInt( vmInfo[key].split(',')[3]); 68 | } 69 | }); 70 | return {user: 'root', port: port, host: machine, hostname: '127.0.0.1', private_key: this.privateKey}; 71 | } 72 | 73 | async create(name, options) 74 | { 75 | let iso = path.join(registry, options.image, 'slim.iso'); 76 | console.log(iso); 77 | 78 | let args = { 79 | vmname: name, 80 | micro: true, 81 | attach_iso: iso, 82 | quickBoot: true, 83 | cpus: options.cpus || this.defaultOptions.cpus, 84 | mem: options.mem || this.defaultOptions.mem, 85 | syncs: options.syncs || this.defaultOptions.syncs, 86 | disk: options.disk || this.defaultOptions.disk, 87 | verbose: options.verbose || this.defaultOptions.verbose, 88 | ssh_port: options.ssh_port || this.defaultOptions.ssh_port, 89 | }; 90 | 91 | if ((await this.driver.list()).filter(e => e.name === name).length == 0) { 92 | await vbox(args); 93 | } else if((await this.getState(name)) != 'running') { 94 | await vbox({start: true, vmname: name, syncs: [], verbose: true}); 95 | } 96 | 97 | let sshInfo = await this.getSSHConfig(name); 98 | console.log(`ssh -i ${sshInfo.private_key} ${sshInfo.user}@${sshInfo.hostname} -p ${sshInfo.port} -o StrictHostKeyChecking=no`) 99 | } 100 | 101 | 102 | async stop(name, force = false) { 103 | await vbox({ stopCmd: true, vmname: name, syncs: [], verbose: false }).catch(e => e); 104 | } 105 | 106 | async delete(name) { 107 | let state = await this.getState(name); 108 | if (state == 'running') { 109 | await this.stop(name); 110 | } else if (state === 'not_found') { 111 | throw new Error(`vm ${name} does not exist`); 112 | } 113 | await vbox({ deleteCmd: true, vmname: name, syncs: [], verbose: false }).catch(e => e); 114 | } 115 | 116 | async exists(name) { 117 | return await fs.exists(path.join(registry, name, 'slim.iso')); 118 | } 119 | 120 | async size(name) { 121 | return fs.statSync(path.join(registry, name, 'slim.iso')).size; 122 | } 123 | 124 | async list() { 125 | return await this.driver.list(); 126 | } 127 | } 128 | 129 | 130 | 131 | 132 | module.exports = new VirtualBox(); 133 | -------------------------------------------------------------------------------- /lib/env.js: -------------------------------------------------------------------------------- 1 | 2 | const fs = require('fs-extra'); 3 | const path = require('path'); 4 | const os = require('os'); 5 | const git = require('simple-git'); 6 | 7 | const download = require('download'); 8 | const ProgressBar = require('progress'); 9 | 10 | 11 | // All things slim 12 | const slimdir = path.join( os.homedir(), '.slim'); 13 | // For storing images built by slim 14 | const registry = path.join(slimdir, 'registry'); 15 | // For storing base images (Dockerfiles, etc.) retrieved via git. 16 | const baseImages = path.join(slimdir, 'baseImages'); 17 | // Script directory 18 | const scriptdir = path.dirname(require.main.filename); 19 | 20 | class Env { 21 | constructor() {} 22 | 23 | async setup() 24 | { 25 | this._preparePaths(); 26 | 27 | // Ensure baker keys are installed. 28 | fs.copyFileSync(path.resolve(scriptdir, 'scripts', 'keys', 'baker_rsa'), path.join(slimdir, 'baker_rsa')); 29 | await fs.chmod(path.join(slimdir, 'baker_rsa'), '600', () => {}); 30 | 31 | this.pubkey = fs.readFileSync(path.join(scriptdir, 'scripts', 'keys', 'baker.pub')).toString(); 32 | 33 | 34 | return this; 35 | } 36 | 37 | check() 38 | { 39 | 40 | return this; 41 | } 42 | 43 | vars() 44 | { 45 | return { 46 | slimdir: slimdir, 47 | registry: registry, 48 | scriptdir: scriptdir, 49 | pubkey: this.pubkey, 50 | env: this, 51 | } 52 | } 53 | 54 | async cloneOrPull(repoURL, dest) { 55 | let name = path.basename(repoURL); 56 | name = name.slice(-4) === '.git' ? name.slice(0, -4) : name; // Removing .git from the end 57 | let dir = path.join(baseImages); 58 | let repo_dir = path.join(dir, name); 59 | 60 | return new Promise((resolve, reject) => { 61 | 62 | // Run git pull if repo already exists locally 63 | if( fs.existsSync(repo_dir) ) 64 | { 65 | git(repo_dir).pull( (err, data) => 66 | { 67 | if (err) 68 | reject(err); 69 | else 70 | resolve(repo_dir); 71 | }) 72 | } 73 | else // clone 74 | { 75 | git(dir).silent(true).clone(repoURL, (err, data) => { 76 | if (err) 77 | reject(err); 78 | else 79 | resolve(repo_dir); 80 | }); 81 | } 82 | }); 83 | } 84 | 85 | async fetch(isoUrl, outputDir, name) 86 | { 87 | if (! fs.existsSync(path.join(outputDir, name)) /*|| (await md5File(isoPath)) != '851e2b2b34e31b67aa0758d25666e8e5'*/) { 88 | 89 | console.log(`Downloading base image ${isoUrl}`); 90 | const bar = new ProgressBar('[:bar] :percent :etas', { 91 | complete: '=', 92 | incomplete: ' ', 93 | width: 20, 94 | total: 0 95 | }); 96 | 97 | await download(isoUrl, outputDir, {filename: name}) 98 | .on('response', res => { 99 | // console.log(`Size: ${res.headers['content-length']}`); 100 | bar.total = res.headers['content-length']; 101 | res.on('data', data => bar.tick(data.length)); 102 | }) 103 | //.then(() => console.log('downloaded!')); 104 | 105 | 106 | } 107 | } 108 | 109 | async makeContext(p) { 110 | let buildPath = path.resolve(p || path.join(scriptdir, 'images', 'alpine3.8-runc-ansible')); 111 | let infoPath = path.join(buildPath, 'info.yml'); 112 | let name = path.basename(buildPath); 113 | let outputDir = path.join(registry, name); 114 | 115 | await Promise.all([ 116 | expectPath(infoPath, `Expected config does not exist in ${infoPath}`), 117 | expectPath(buildPath, `Build path ${buildPath} does not exist`), 118 | fs.ensureDir(outputDir), 119 | ]); 120 | 121 | return { 122 | buildPath, 123 | infoPath, 124 | outputDir 125 | }; 126 | } 127 | 128 | _preparePaths() 129 | { 130 | if( !fs.existsSync(slimdir) ) { fs.mkdirSync(slimdir); } 131 | if( !fs.existsSync(registry) ) { fs.mkdirSync(registry); } 132 | if( !fs.existsSync(baseImages) ) { fs.mkdirSync(baseImages); } 133 | 134 | } 135 | } 136 | 137 | async function expectPath(p, msg) { 138 | let exists = await fs.exists(p); 139 | if (!exists) { 140 | throw new Error(msg); 141 | } 142 | } 143 | 144 | 145 | 146 | module.exports = new Env(); 147 | -------------------------------------------------------------------------------- /lib/providers/kvm.js: -------------------------------------------------------------------------------- 1 | const child = require('child_process'); 2 | const fs = require('fs-extra'); 3 | const mustache = require('mustache'); 4 | mustache.escape = text => text; 5 | const path = require('path'); 6 | const portAvailable = require('is-port-available'); 7 | 8 | const env = require('../env'); 9 | 10 | const { registry, slimdir, scriptdir } = env.vars(); 11 | 12 | const privateKey = path.join(slimdir, 'baker_rsa'); 13 | 14 | class KVM { 15 | constructor() { 16 | this.defaultOptions = { 17 | cpus: 1, 18 | mem: 1024, 19 | syncs: [], 20 | disk: false, 21 | verbose: true, 22 | ssh_port: undefined // auto-find an available port 23 | }; 24 | } 25 | 26 | async exec(cmd, verbose=false) { 27 | let opts = verbose ? { stdio: 'inherit' } : {}; 28 | 29 | return child.execSync(`virsh -c qemu:///session ${cmd}`, opts); 30 | } 31 | 32 | async create(name, options) { 33 | let { image } = options; 34 | // since we are mounting by label rather than directory, 35 | // we need to create a label for each sync 36 | let syncs = (options.syncs || this.defaultOptions.syncs).map((syncs, index) => { 37 | let [ host, guest ] = syncs.split(';'); 38 | 39 | return { 40 | host, 41 | guest, 42 | label: `share${index}`, 43 | }; 44 | }); 45 | 46 | let sshPort = this.defaultOptions.ssh_port || await this.findAvailablePort(); 47 | 48 | let args = { 49 | name, 50 | cpus: options.cpus || this.defaultOptions.cpus, 51 | mem: options.mem || this.defaultOptions.mem, 52 | syncs, 53 | kernel: path.join(registry, image, 'vmlinuz'), 54 | initrd: path.join(registry, image, 'initrd'), 55 | ssh_port: options.sshPort || sshPort 56 | }; 57 | 58 | let xml = (await fs.readFile(path.join(scriptdir, 'scripts', 'kvm.xml.mustache'))).toString(); 59 | let render = mustache.render(xml, args); 60 | let output = path.join(slimdir, `${name}.xml`); 61 | 62 | await fs.writeFile(output, render); 63 | 64 | await this.exec(`create ${output}`, true); 65 | 66 | let sshInfo = await this.getSSHConfig(name); 67 | console.log(`ssh -i ${sshInfo.private_key} ${sshInfo.user}@${sshInfo.hostname} -p ${sshInfo.port} -o StrictHostKeyChecking=no`); 68 | } 69 | 70 | async delete(name) { 71 | await this.exec(`destroy ${name}`); 72 | } 73 | 74 | async exists(name) { 75 | let dir = path.join(registry, name); 76 | 77 | return await fs.exists(path.join(dir, 'vmlinuz')) && await fs.exists(path.join(dir, 'initrd')); 78 | } 79 | 80 | async size(name) { 81 | let dir = path.join(registry, name); 82 | 83 | return fs.statSync(path.join(dir, 'vmlinuz')).size + fs.statSync(path.join(dir, 'initrd')).size; 84 | } 85 | 86 | async list() { 87 | let output = (await this.exec(`list --all --title`)).toString(); 88 | // format is: 89 | // Id Name State 90 | // ------------------ 91 | // [] [] [] 92 | return output 93 | .trim() 94 | .split('\n') 95 | .splice(2,) 96 | .map(e => { 97 | let [ id, name, state, title ] = e.trim().split(/\s+/); 98 | 99 | return { 100 | id, 101 | name, 102 | state, 103 | title 104 | }; 105 | }) 106 | .filter(e => e.title === 'slim') 107 | .map(e => { delete e.title; return e }); 108 | } 109 | 110 | async getSSHConfig(name) { 111 | let port = await this.getSSHPort(name); 112 | 113 | return {user: 'root', port: port, host: name, hostname: '127.0.0.1', private_key: privateKey}; 114 | } 115 | 116 | async findAvailablePort(startPort=2002, endPort=2999) { 117 | let port = startPort; 118 | let blacklistPorts = await this.portsUsedByVMs(); 119 | 120 | while (port <= endPort) { 121 | if (!blacklistPorts[port]) { 122 | let status = await portAvailable(port); 123 | if (status) { 124 | console.log(`Port ${port} is available for SSH on localhost!`); 125 | return port; 126 | } 127 | } 128 | port++; 129 | } 130 | throw new Error(`Couldn't find open port between ${startPort} and ${endPort}`); 131 | } 132 | 133 | async portsUsedByVMs() { 134 | let vms = await this.list(); 135 | 136 | let ports = []; 137 | for (const vm of vms) { 138 | let port = await this.getSSHPort(vm.name); 139 | ports.push(port); 140 | } 141 | 142 | return ports; 143 | } 144 | 145 | async getSSHPort(name) { 146 | let re = /^.+hostfwd=tcp::(\d+)-:22.+$/gm; 147 | let xml = await this.exec(`dumpxml ${name}`); 148 | let [, port, ] = re.exec(xml); 149 | 150 | return port; 151 | } 152 | } 153 | 154 | module.exports = new KVM(); 155 | -------------------------------------------------------------------------------- /lib/build.js: -------------------------------------------------------------------------------- 1 | const child = require('child_process'); 2 | const Docker = require('dockerode'); 3 | const fs = require('fs-extra'); 4 | const path = require('path'); 5 | const tar = require('tar'); 6 | 7 | const env = require('./env'); 8 | 9 | const { info, ok } = require('./logger'); 10 | const { slimdir, scriptdir } = env.vars(); 11 | const { formatMap } = require('./providers'); 12 | 13 | const docker = new Docker(); 14 | 15 | const formatSteps = { 16 | 'raw': [dockerBuild, dockerExport, rawBuild], 17 | 'iso': [dockerBuild, dockerExport, rawBuild, isoBuild, cleanup], 18 | 'qcow2': [dockerBuild, dockerExport, rawBuild, isoBuild, qcowBuild, cleanup] 19 | }; 20 | 21 | async function build(context) { 22 | let { provider, format } = context; 23 | 24 | // first element in the map is the provider's base format 25 | let [ base, ] = formatMap[provider]; 26 | // use a set in case there are overlaps between steps 27 | let steps = new Set(formatSteps[base]); 28 | // add all additional steps for the requested formats 29 | format.forEach(f => formatSteps[f].forEach(s => steps.add(s))); 30 | 31 | // run each step in order 32 | for (let s of steps) { 33 | await s(context); 34 | } 35 | 36 | ok('success!'); 37 | } 38 | 39 | async function dockerBuild(context) { 40 | info('building docker image'); 41 | 42 | let { buildPath, dockerOpts } = context; 43 | 44 | if (!fs.existsSync(path.join(buildPath, 'Dockerfile'))) 45 | throw new Error(`Expected Dockerfile in ${buildPath}`); 46 | 47 | const image = await docker.buildImage({ context: buildPath }, { 48 | t: 'slim-vm', 49 | ...dockerOpts 50 | }); 51 | await new Promise((resolve, reject) => { 52 | docker.modem.followProgress( 53 | image, 54 | (err, res) => err ? reject(err) : resolve(res), 55 | ev => process.stdout.write(ev.stream) 56 | ); 57 | }); 58 | } 59 | 60 | async function dockerExport() { 61 | info('exporting docker filesystem'); 62 | 63 | let image = 'slim-vm'; 64 | let exportDir = path.join(slimdir, image); 65 | await fs.emptyDir(exportDir); 66 | 67 | const container = await docker.createContainer({ Image: image, Cmd: ['sh'] }); 68 | 69 | const contents = await container.export(); 70 | try { 71 | await new Promise((resolve, reject) => { 72 | contents.pipe( 73 | tar.x({ C: exportDir }) 74 | .on('close', resolve) 75 | .on('error', err => reject(err)) 76 | ); 77 | }); 78 | } catch (e) { 79 | throw e; 80 | } finally { 81 | container.remove().catch(() => undefined); 82 | } 83 | } 84 | 85 | async function rawBuild(context) { 86 | info('creating initrd'); 87 | 88 | let { outputDir } = context; 89 | let vmDir = path.join(slimdir, 'slim-vm'); 90 | 91 | await fs.move(path.join(vmDir, 'vmlinuz'), path.join(slimdir, 'vmlinuz'), { overwrite: true }); 92 | child.execSync(`find . | cpio -o -H newc 2>/dev/null | gzip > ${path.join(slimdir, 'initrd')}`, 93 | {cwd: vmDir, stdio: 'inherit'}); 94 | 95 | await fs.copy(path.join(slimdir, 'initrd'), path.join(outputDir, 'initrd')); 96 | await fs.copy(path.join(slimdir, 'vmlinuz'), path.join(outputDir, 'vmlinuz')); 97 | } 98 | 99 | async function isoBuild(context) { 100 | info('building iso'); 101 | 102 | let { outputDir } = context; 103 | let outputPath = path.join(outputDir, 'slim.iso'); 104 | 105 | let isoDir = path.join(slimdir, 'slim-iso') 106 | let bootDir = path.join(isoDir, 'boot'); 107 | let isolinuxDir = path.join(isoDir, 'isolinux'); 108 | 109 | await Promise.all([ 110 | fs.emptyDir(isoDir), 111 | fs.emptyDir(bootDir), 112 | fs.emptyDir(isolinuxDir) 113 | ]); 114 | 115 | await fs.copy(path.join(scriptdir, 'scripts', 'syslinux'), isolinuxDir); 116 | await fs.copy(path.join(slimdir, 'vmlinuz'), path.join(bootDir, 'vmlinuz')); 117 | await fs.copy(path.join(slimdir, 'initrd'), path.join(bootDir, 'initrd')); 118 | 119 | child.execSync(` 120 | mkisofs -o ${outputPath} \ 121 | -b isolinux/isolinux.bin \ 122 | -c isolinux/boot.cat \ 123 | -no-emul-boot -boot-load-size 4 -boot-info-table \ 124 | -V slim -J -R ${isoDir}`, {stdio: 'inherit'}); 125 | } 126 | 127 | async function qcowBuild(context) { 128 | info('building qcow2 image'); 129 | 130 | let { outputDir } = context; 131 | 132 | child.execSync(`qemu-img convert -O qcow2 slim.iso slim.qcow2`, 133 | {cwd: outputDir, stdio: 'inherit'}); 134 | } 135 | 136 | async function cleanup(context) { 137 | info('cleaning up...'); 138 | 139 | let { provider, format, outputDir } = context; 140 | 141 | // we need the iso for qcow, but if we should remove it 142 | // if we aren't also building an iso image 143 | if (provider !== 'virtualbox' && 144 | format.indexOf('iso') === -1) { 145 | await fs.remove(path.join(outputDir, 'slim.iso')); 146 | return; 147 | } 148 | 149 | // we need the raw images to build the iso, but we can 150 | // delete them afterwards 151 | if (provider === 'virtualbox' && 152 | format.indexOf('raw') === -1) { 153 | await fs.remove(path.join(outputDir, 'initrd')); 154 | await fs.remove(path.join(outputDir, 'vmlinuz')); 155 | return; 156 | } 157 | } 158 | 159 | module.exports = build; 160 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # slim | [![Build Status](https://travis-ci.org/ottomatica/slim.svg?branch=master)](https://travis-ci.org/ottomatica/slim) [![dependencies Status](https://david-dm.org/ottomatica/slim/status.svg)](https://david-dm.org/ottomatica/slim) 2 | 3 | `slim` will build a micro-vm from a Dockerfile. Slim works by building and extracting a rootfs from a Dockerfile, and then merging that filesystem with a small minimal kernel that runs in RAM. 4 | 5 | This results in a real VM that can boot instantly, while using very limited resources. If done properly, slim can allow you to design and build immutable unikernels for running services, or build tiny and embedded development environments. 6 | 7 | ## Using slim 8 | 9 | ### Build a micro-vm 10 | 11 | Create a micro-vm from a Dockerfile. Use `build` command with a directory containing a Dockerfile. 12 | 13 | ``` 14 | $ slim build images/alpine3.8-simple 15 | ``` 16 | 17 | ![build](doc/img/build.png) 18 | 19 | This will add a bootable iso in the slim registry. [See example Dockerfile](https://github.com/ottomatica/slim/tree/master/images/alpine3.8-simple). 20 | 21 | `slim build` will use your [default provider](#running-a-micro-vm) unless the `-p` flag is specified (ie `-p hyperkit`). 22 | 23 | ### Listing micro-vm images 24 | 25 | See a list of micro-vm images on your machine. 26 | 27 | ``` 28 | $ slim images 29 | ``` 30 | 31 | ![images command](doc/img/images.png) 32 | 33 | ### Running a micro-vm 34 | 35 | Provision a new instance of the given micro-vm image as a virtual machine. 36 | 37 | Slim currently supports Virtualbox, KVM, and hyperkit (MacOS only) as providers for running VMs. Slim will discover all available providers, defaulting to virtualbox, if more than one provider is available. The `-p` flag can be used to force Slim to use a specific provider. 38 | 39 | Using hyperkit (requires sudo): 40 | 41 | ``` 42 | $ slim run micro1 alpine3.8-simple -p hyperkit 43 | ``` 44 | 45 | ![nanobox](doc/img/nanobox.png) 46 | 47 | Using virtualbox: 48 | 49 | ``` 50 | $ slim run micro2 alpine3.8-simple 51 | ``` 52 | 53 | ![nanobox](doc/img/run-vbox.png) 54 | 55 | VirtualBox will run the micro-vm instance as an attached iso loaded into a cdrom, and boot up the iso in seconds. 56 | 57 | For convenience, a ssh connection command is provided at the end of the command, allowing easy access into the machine: 58 | 59 | Example: `ssh -i /Users/cjparnin/.slim/baker_rsa root@127.0.0.1 -p 2008 -o StrictHostKeyChecking=no` 60 | 61 | ## Advanced Features 62 | 63 | #### Build formats 64 | 65 | Slim supports building multiple image formats, but by default will only build the image required for the given provider. The `-f` flag can be used to specify any additional image formats that should be built, which will be stored in the registry directory for that image. The currently supported formats and their corresponding providers are: 66 | 67 | ​ | raw | iso | qcow2 68 | --- | --- | --- | --- 69 | kvm | ✓ | ✓ | ✓ 70 | hyperkit | ✓ | ✓ | 71 | virtualbox | ✓ | ✓ | 72 | 73 | * The `raw` format signifies an unbundled ramfs archive and kernel. 74 | 75 | Example: running `slim build images/alpine3.8-simple -p kvm -f qcow2` will build a `raw` image (KVM's default image format), as well as a `qcow2` image. 76 | 77 | #### Shared Folders 78 | 79 | Shared folders (mounting) with host system are possible. Some examples are documented here: https://github.com/ottomatica/slim/issues/39 80 | 81 | #### Build and run parameters 82 | 83 | * Using `slim build --no-cache` will skip the docker build cache, allowing you to repair stale apt-get caches, for example. 84 | * Using `slim run v0 ubuntu16.04-jenkins --memory 4096` will allow you allocate more memory to your instance. 85 | 86 | #### Managing vms and images 87 | 88 | * You can get a list of vms with `slim vms`. 89 | * You can delete a specific vm called "v0" with `slim delete vm v0` 90 | * You can delete a specific image in registry called "ubuntu" with `slim delete image ubuntu` 91 | * You can clean out the entire image registry with `slim clean`. 92 | 93 | #### Extending base images (experimental) 94 | 95 | Instead of having to copy and extend an existing Dockerfile, we're experimenting with a reuse pattern that lets you reference an existing base image hosted in a git repository, and extend it by passing in build arguments. For example, by defining this in an `info.yml`, you could extend the base alpine3.9 image with ansible and runc. 96 | 97 | ``` 98 | description: A simple configuration server with ansible and runc (for running containers). 99 | base_repository: https://github.com/ottomatica/slim 100 | base_directory: images/alpine3.9-simple 101 | base_args: 102 | PKGS: runc ansible 103 | ``` 104 | 105 | ## Example micro-vms 106 | 107 | A collection of micro-vms can be found here, including ubuntu base images, jenkins, kubenetes, and more: https://github.com/ottomatica/slim-images 108 | 109 | ## Installing slim 110 | 111 | Simply clone this repo and run: 112 | 113 | ``` 114 | npm install 115 | npm link 116 | ``` 117 | 118 | Unfortunately, due to the experimental nature, there are a few system dependencies you must also install: 119 | 120 | * [docker](https://docs.docker.com/install/), for building and extracting the kernel and filesystem. 121 | * cdrtools: `brew install cdrtools`, for building the micro-vm iso. 122 | 123 | To boot and run the image, you also need a hypervisor: 124 | 125 | * [VirtualBox](https://www.virtualbox.org/wiki/Downloads), `kvm` on Linux, or `hyperkit` on macOS. 126 | 127 | For kvm, you can install the following dependencies for ubuntu: 128 | 129 | ```bash 130 | sudo apt-get install qemu-kvm libvirt-bin virtinst bridge-utils cpu-checker mkisofs 131 | ``` 132 | -------------------------------------------------------------------------------- /lib/providers/hyperkit.js: -------------------------------------------------------------------------------- 1 | const child = require('child_process'); 2 | const fs = require('fs-extra'); 3 | const path = require('path'); 4 | const sudo = async (cmd, opts) => { 5 | return new Promise((resolve, reject) => { 6 | require('sudo-prompt').exec(cmd, opts, (err, stdout) => { 7 | if (err) reject(err); 8 | else resolve(stdout); 9 | }); 10 | }); 11 | }; 12 | const uuid4 = require('uuid/v4'); 13 | 14 | const { info } = require('../logger'); 15 | const env = require('../env'); 16 | 17 | const { registry, scriptdir, slimdir } = env.vars(); 18 | 19 | const privateKey = path.join(slimdir, 'baker_rsa'); 20 | 21 | const sudoOpts = { name: 'slim' }; 22 | 23 | const u9fsPath = path.join(scriptdir, 'scripts', 'u9fs'); 24 | const uuidPath = path.join(scriptdir, 'scripts', 'uuid2mac'); 25 | const hyperDir = path.join(slimdir, 'hyperkit'); 26 | 27 | const _baseDir = (name) => path.join(hyperDir, name); 28 | 29 | class Hyperkit { 30 | constructor() { 31 | this.defaultOptions = { 32 | cpus: 1, 33 | mem: 1024, 34 | syncs: [], 35 | disk: false, 36 | verbose: true, 37 | }; 38 | } 39 | 40 | async create(name, options) { 41 | let { cpus, image, mem, syncs } = options; 42 | cpus = cpus || this.defaultOptions.cpus; 43 | mem = mem || this.defaultOptions.mem; 44 | 45 | const baseDir = _baseDir(name); 46 | const initrdPath = path.join(registry, image, 'initrd'); 47 | const kernelPath = path.join(registry, image, 'vmlinuz'); 48 | 49 | await Promise.all([ 50 | await fs.ensureDir(hyperDir), 51 | this.delete(name), 52 | await fs.emptyDir(baseDir), 53 | ]); 54 | 55 | syncs = (syncs || this.defaultOptions.syncs).map((sync, index) => this.start9p(name, sync, index)); 56 | 57 | // get the mac address that we will use to find the ip 58 | const uuid = uuid4(); 59 | const mac = await this.uuidToMac(uuid); 60 | fs.writeFileSync(path.join(baseDir, 'hyperkit.mac'), mac); 61 | 62 | // make sure the cmdline options are in quotes 63 | const cmdline = `"modules=virtio_net console=ttyS0"`; 64 | // we need to run hyperkit with sudo, since we can't create the virtio-net 65 | // interface as a normal user 66 | const cmd = `hyperkit \ 67 | -m ${mem} -c ${cpus} \ 68 | -s 0:0,hostbridge -s 31,lpc \ 69 | -s 2:0,virtio-net -l com1,stdio \ 70 | ${syncs.join(' ')} \ 71 | -F ${path.join(baseDir, 'hyperkit.pid')} \ 72 | -U ${uuid} \ 73 | -f kexec,${kernelPath},${initrdPath},${cmdline} \ 74 | 2>&1 >> ${path.join(baseDir, 'hyperkit.log')} & 75 | `; 76 | info('Running hyperkit'); 77 | try { 78 | await sudo(cmd, sudoOpts); 79 | } catch (e) { undefined } 80 | 81 | info('Waiting for IP address'); 82 | let sshInfo = await this.getSSHConfig(name); 83 | console.log(`ssh -i ${sshInfo.private_key} ${sshInfo.user}@${sshInfo.hostname} -p ${sshInfo.port} -o StrictHostKeyChecking=no`); 84 | } 85 | 86 | async delete(name) { 87 | let baseDir = _baseDir(name); 88 | 89 | // kill hyperkit first, since it requires sudo and will die if 90 | // the 9p servers are killed before it 91 | await this._killProcess(name, 'hyperkit', true); 92 | 93 | let files = fs.existsSync(baseDir) ? fs.readdirSync(baseDir) : []; 94 | for (let file of files) { 95 | if (!path.extname(file) === '.pid') continue; 96 | 97 | // anything else must be a pid file 98 | let filename = path.basename(file, '.pid'); 99 | if (filename === 'hyperkit') continue; 100 | 101 | await this._killProcess(name, filename); 102 | } 103 | 104 | await fs.remove(baseDir); 105 | } 106 | 107 | async _killProcess(name, process, useSudo = false) { 108 | const pidPath = path.join(_baseDir(name), `${process}.pid`); 109 | 110 | let pid = fs.existsSync(pidPath) ? fs.readFileSync(pidPath) : undefined; 111 | 112 | try { 113 | if (pid) { 114 | if (useSudo) await sudo(`kill -9 ${pid}`, sudoOpts); 115 | else child.execSync(`kill -9 ${pid}`); 116 | } 117 | } catch (e) { undefined; } 118 | 119 | await fs.remove(pidPath); 120 | } 121 | 122 | async exists(name) { 123 | let dir = path.join(registry, name); 124 | 125 | return await fs.exists(path.join(dir, 'initrd')); 126 | } 127 | 128 | async size(name) { 129 | let dir = path.join(registry, name); 130 | 131 | return fs.statSync(path.join(dir, 'initrd')).size + fs.statSync(path.join(dir, 'vmlinuz')).size; 132 | } 133 | 134 | async list() { 135 | await fs.ensureDir(hyperDir); 136 | 137 | return fs.readdirSync(hyperDir) 138 | .filter(name => !(/(^|\/)\.[^/.]/g).test(name)) 139 | .map((name, index) => ({ id: index, name })); 140 | } 141 | 142 | async getSSHConfig(name) { 143 | let mac = fs.readFileSync(path.join(_baseDir(name), 'hyperkit.mac')).toString().trim(); 144 | let ip = await this.macToIP(mac); 145 | 146 | return {user: 'root', port: '22', host: name, hostname: ip, private_key: privateKey}; 147 | } 148 | 149 | async uuidToMac(uuid) { 150 | return await sudo(`${uuidPath} ${uuid}`, sudoOpts); 151 | } 152 | 153 | async macToIP(mac) { 154 | // for some reason the leading zeros in numbers are stripped 155 | mac = mac.split(':').map(v => v.replace(/^0/, '')).join(':'); 156 | 157 | let ip; 158 | do { 159 | ip = child.execSync(` 160 | cat /var/db/dhcpd_leases | \ 161 | grep ${mac} -B 1 | \ 162 | cut -d"=" -f2 | \ 163 | head -1 164 | `).toString().trim(); 165 | await new Promise(resolve => setTimeout(resolve, 1000)); 166 | } while (!ip); 167 | 168 | return ip; 169 | } 170 | 171 | start9p(name, sync, num) { 172 | let baseDir = _baseDir(name); 173 | 174 | let sockName = `share${num}` 175 | let [ host, ] = sync.split(';'); 176 | let socket = path.join(baseDir, `${sockName}.socket`); 177 | 178 | let proc = child.spawn(u9fsPath, 179 | ['-addr', `unix:${socket}`, '-root', host], 180 | {detached: true, stdio: 'ignore'}); 181 | fs.writeFileSync(path.join(baseDir, `${sockName}.pid`), proc.pid); 182 | proc.unref(); 183 | 184 | // start numbering at 3 185 | return `-s ${3+num},virtio-9p,path=${socket},tag=${sockName}`; 186 | } 187 | } 188 | 189 | module.exports = new Hyperkit(); 190 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------