├── .github ├── dependabot.yml └── workflows │ └── actions.yml ├── .gitignore ├── LICENSE ├── README.md ├── doc └── img │ ├── build.png │ ├── images.png │ ├── macos.png │ ├── nanobox.png │ ├── run-vbox.png │ └── slim-hyperv.png ├── images ├── alpine-docker │ ├── Dockerfile │ ├── files │ │ ├── daemon.json │ │ ├── init │ │ └── real_init │ └── info.yml ├── alpine3.12-raw │ ├── Dockerfile │ ├── files │ │ └── init │ └── info.yml ├── alpine3.12-virtualbox │ ├── Dockerfile │ ├── files │ │ └── init │ └── info.yml ├── alpine3.15 │ ├── Dockerfile │ ├── files │ │ └── init │ ├── info.yml │ └── notes.md ├── ubuntu-20.04-ci-hyperv │ ├── Dockerfile │ ├── info.yml │ ├── meta-data │ └── user-data └── ubuntu-20.04-cloud-init │ ├── Dockerfile │ ├── info.yml │ ├── meta-data │ └── user-data ├── index.js ├── lib ├── build.js ├── commands │ ├── build.js │ ├── clean.js │ ├── cloudinit.js │ ├── init.js │ └── push.js ├── dependencies.js ├── env.js ├── logger.js └── tools │ ├── docker.js │ ├── images │ ├── mkisofs │ │ └── Dockerfile │ └── vbox-img │ │ └── Dockerfile │ ├── makeiso.js │ ├── rootfs.js │ └── vhd.js ├── package-lock.json ├── package.json └── scripts ├── keys ├── baker.pub └── baker_rsa ├── make-efi.sh ├── make-ext4.sh └── syslinux ├── isolinux.bin ├── isolinux.cfg └── ldlinux.c32 /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: npm 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "11:00" 8 | timezone: EST 9 | open-pull-requests-limit: 10 10 | reviewers: 11 | - ssmirr 12 | ignore: 13 | - dependency-name: y18n 14 | versions: 15 | - 4.0.1 16 | - 4.0.2 17 | - dependency-name: "@octokit/rest" 18 | versions: 19 | - 18.0.15 20 | - 18.1.0 21 | - 18.1.1 22 | - 18.2.0 23 | - 18.2.1 24 | - 18.3.0 25 | - 18.3.1 26 | - 18.3.2 27 | - 18.3.3 28 | - 18.3.4 29 | - 18.3.5 30 | - 18.4.0 31 | - 18.5.0 32 | - 18.5.2 33 | - dependency-name: simple-git 34 | versions: 35 | - 2.31.0 36 | - 2.32.0 37 | - 2.34.2 38 | - 2.35.0 39 | - 2.35.1 40 | - 2.35.2 41 | - 2.36.0 42 | - 2.36.1 43 | - 2.36.2 44 | - 2.37.0 45 | - dependency-name: js-yaml 46 | versions: 47 | - 4.0.0 48 | - dependency-name: dockerode 49 | versions: 50 | - 3.2.1 51 | -------------------------------------------------------------------------------- /.github/workflows/actions.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches-ignore: 6 | - dependabot/** 7 | pull_request: 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | 13 | strategy: 14 | matrix: 15 | node-version: [14.x, 16.x] 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Use Node.js ${{ matrix.node-version }} 20 | uses: actions/setup-node@v1 21 | with: 22 | node-version: ${{ matrix.node-version }} 23 | 24 | # - name: Install bakerx 25 | # run: sudo npm install -g ottomatica/bakerx 26 | - name: Install slim dependencies 27 | run: npm i && npm link 28 | - name: Run init and build 29 | run: | 30 | slim init 31 | slim build images/alpine3.12-raw -f initrd -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # slim | ![CI](https://github.com/ottomatica/slim/workflows/CI/badge.svg) 2 | 3 | `slim` will build a VM from a Dockerfile. Slim works by building and extracting a rootfs from a Dockerfile, and packaging a corresponding kernel and initrd into a desired image. 4 | 5 | This results in a real VM that can boot instantly, while using very limited resources---all with a couple of lines in a Dockerfile. 6 | 7 | ## Using slim 8 | 9 | The following are a few ways you can use slim to build VM images. 10 | 11 | ### Build a Ubuntu Focal Cloud Raw VM Image 12 | 13 | 1. Provide a Dockerfile `images/ubuntu-20.04-cloud-init` 14 | 15 | ```Dockerfile 16 | FROM ubuntu:20.04 AS kernel 17 | RUN apt-get update && \ 18 | apt-get install -y linux-virtual && \ 19 | apt-get clean 20 | 21 | FROM ubuntu:20.04 22 | 23 | # Extract the kernel, modules, and initrd 24 | COPY --from=kernel /lib/modules /lib/modules 25 | COPY --from=kernel /boot/vmlinuz-* /vmlinuz 26 | COPY --from=kernel /boot/initrd.img-* /initrd 27 | 28 | RUN apt-get update 29 | # Needed for configuring server and setting up devices. 30 | RUN apt install cloud-init udev kmod -y 31 | # If you'd like to be able to ssh in: 32 | RUN apt install openssh-server sudo -y 33 | ``` 34 | 35 | 2. Extract an initrd, rootfs, and uncompressed kernel. 36 | 37 | ``` 38 | $ slim build images/ubuntu-20.04-cloud-init 39 | ... 40 | $ ls -lh ~/.slim/registry/ubuntu-20.04-cloud-init 41 | -rw-r--r-- 1 cjparnin staff 16M Jan 2 17:37 initrd 42 | -rw-r--r-- 1 cjparnin staff 512M Jan 2 19:24 rootfs 43 | -rw-------@ 1 cjparnin staff 29M Nov 5 12:04 vmlinuz 44 | ``` 45 | 46 | 3. Provide a user-data and meta-data file to customize VM. 47 | 48 | ``` 49 | $ slim cloudinit images/ubuntu-20.04-cloud-init 50 | ... 51 | $ ls -lh ~/.slim/registry/ubuntu-20.04-cloud-init 52 | -rw-r--r-- 1 cjparnin staff 366K Jan 2 21:19 cidata.iso 53 | ``` 54 | 55 | VM Running in MAC M1 (arm64). 56 | ![slim in macos](doc/img/macos.png) 57 | 58 | ### Create a custom Alpine RAM only VM Image 59 | 60 | 1. Provide a Dockerfile and custom init script. 61 | 62 | See [images/alpine3.12-raw](images/alpine3.12-raw). 63 | 64 | 2. Build initrd and kernel. 65 | 66 | ```bash 67 | $ slim build images/alpine3.12-raw -f initrd 68 | ... 69 | $ ls -lh ~/.slim/registry/alpine3.12-raw 70 | -rw-r--r-- 1 cjparnin staff 22M Jan 2 20:50 initrd 71 | -rw-r--r-- 1 cjparnin staff 4.6M Dec 28 11:22 vmlinuz 72 | ``` 73 | 74 | ### Create a Hyper-V VHD Image (Windows) 75 | 76 | The following creates a Ubuntu Focal with cloud-init, but the necessary hyper-v kernel modules, 77 | and bootable image for Microsoft's Hyper-V. 78 | 79 | 1. Provide a Dockerfile. 80 | 81 | See [images/ubuntu-20.04-ci-hyperv](images/ubuntu-20.04-ci-hyperv). 82 | 83 | 2. Create a VHD disk drive (1G) with EFI bootable partition. 84 | 85 | ``` 86 | PS slim build images/ubuntu-20.04-ci-hyperv -f vhd -s 1024 87 | ... 88 | PS ls ~/.slim/registry/ubuntu-20.04-ci-hyperv 89 | -a---- 1/2/2022 3:19 PM 738381824 rootfs.vhd 90 | ``` 91 | 92 | 3. Provide a user-data and meta-data file to customize VM. 93 | 94 | ``` 95 | $ slim cloudinit images/ubuntu-20.04-ci-hyperv 96 | ... 97 | $ ls -lh ~/.slim/registry/ubuntu-20.04-ci-hyperv 98 | -a---- 1/1/2022 3:32 PM 374784 cidata.iso 99 | ``` 100 | 101 | ![slim in hyperv](doc/img/slim-hyperv.png) 102 | 103 | ## Installing slim 104 | 105 | Simply clone this repo, cd slim, and run: 106 | 107 | ``` 108 | npm install 109 | npm link 110 | 111 | # Pull docker images used for system dependencies. 112 | slim init 113 | ``` 114 | 115 | You must have [docker](https://docs.docker.com/install/) on your system. -------------------------------------------------------------------------------- /doc/img/build.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ottomatica/slim/c792197335d8f7e9dc88e86e33f90964c06fba2e/doc/img/build.png -------------------------------------------------------------------------------- /doc/img/images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ottomatica/slim/c792197335d8f7e9dc88e86e33f90964c06fba2e/doc/img/images.png -------------------------------------------------------------------------------- /doc/img/macos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ottomatica/slim/c792197335d8f7e9dc88e86e33f90964c06fba2e/doc/img/macos.png -------------------------------------------------------------------------------- /doc/img/nanobox.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ottomatica/slim/c792197335d8f7e9dc88e86e33f90964c06fba2e/doc/img/nanobox.png -------------------------------------------------------------------------------- /doc/img/run-vbox.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ottomatica/slim/c792197335d8f7e9dc88e86e33f90964c06fba2e/doc/img/run-vbox.png -------------------------------------------------------------------------------- /doc/img/slim-hyperv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ottomatica/slim/c792197335d8f7e9dc88e86e33f90964c06fba2e/doc/img/slim-hyperv.png -------------------------------------------------------------------------------- /images/alpine-docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.15 2 | RUN mkdir -p /lib/apk/db /run 3 | RUN apk add --initdb openrc 4 | RUN apk add linux-virt kmod kmod-openrc blkid 5 | 6 | # busybox-initscripts busybox-suid 7 | RUN apk add --update alpine-baselayout alpine-conf alpine-keys apk-tools busybox busybox-initscripts \ 8 | ca-certificates dbus-libs kbd-bkeymaps \ 9 | coreutils bash-completion findutils procps sed readline e2fsprogs \ 10 | docker docker-bash-completion rsync \ 11 | gnutls openssh openssh-client rng-tools dhcpcd network-extras wget util-linux 12 | RUN [ ! -z "$PKGS" ] && apk add --no-cache $PKGS || echo "No optional pkgs provided." 13 | 14 | USER root 15 | # the public key that is authorized to connect to this instance. 16 | ARG SSHPUBKEY 17 | # optional packages 18 | ARG PKGS 19 | 20 | # Copy kernel for later use 21 | RUN cp /boot/vmlinuz-virt /vmlinuz 22 | # Nuke boot 23 | RUN rm -rf /boot 24 | 25 | # Deleted cached packages 26 | #RUN rm -rf /var/cache/apk/* 27 | 28 | # Our init 29 | COPY files/init /init 30 | COPY files/real_init /real_init 31 | COPY files/daemon.json /etc/docker/ 32 | 33 | RUN echo "Welcome to slim!" > /etc/motd 34 | 35 | # Set an ssh key 36 | RUN mkdir -p /etc/ssh /root/.ssh && chmod 0700 /root/.ssh 37 | RUN echo $SSHPUBKEY > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys 38 | 39 | # Fix ssh 40 | RUN sed -i 's/root:!/root:*/' /etc/shadow -------------------------------------------------------------------------------- /images/alpine-docker/files/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "data-root": "/docker" 3 | } -------------------------------------------------------------------------------- /images/alpine-docker/files/init: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Fix rootfs 4 | 5 | # Copy initramfs to /mnt and chroot 6 | mkfs.ext4 /dev/vda 7 | 8 | mount -t tmpfs -o size=90% tmpfs /mnt 9 | mount /dev/vda /docker 10 | rsync -av --exclude "/mnt" / /mnt 11 | 12 | # Create mountpoints 13 | # mkdir -p /media/root-ro /media/root-rw $sysroot/media/root-ro \ 14 | # $sysroot/media/root-rw 15 | # # Mount read-only underlying rootfs 16 | # rootflags="ro" 17 | # mount -t rootfs -o $rootflags /mnt /media/root-ro 18 | 19 | # # Mount writable overlay tmpfs 20 | # mount -t tmpfs -o $overlaytmpfsflags root-tmpfs /media/root-rw 21 | # # Create additional mountpoints and do the overlay mount 22 | # mkdir -p /media/root-rw/work /media/root-rw/root 23 | # mount -t overlay -o \ 24 | # lowerdir=/media/root-ro,upperdir=/media/root-rw/root,workdir=/media/root-rw/work \ 25 | # overlayfs $sysroot 26 | 27 | # tar -C / --exclude=mnt -cf - . | tar -C /mnt/ -xf - 28 | 29 | #cd /mnt 30 | #mount --move . / 31 | #cd / 32 | #mount --rbind /mnt /mnt 33 | #exec chroot . /real_init 34 | 35 | # proc 36 | mount -t proc proc /proc -o nodev,nosuid,noexec,relatime 37 | 38 | # dev 39 | mount -t devtmpfs dev /dev -o nosuid,noexec,relatime,size=10m,nr_inodes=248418,mode=755 40 | mknod -m 0600 /dev/console c 5 1 41 | mknod -m 0620 /dev/tty1 c 4 1 42 | mknod -m 0666 /dev/tty 5 0 43 | mknod -m 0666 /dev/null 1 3 44 | mknod -m 0660 /dev/kmsg 1 11 45 | 46 | ln -s /proc/self/fd /dev/fd 47 | ln -s /proc/self/fd/0 /dev/stdin 48 | ln -s /proc/self/fd/1 /dev/stdout 49 | ln -s /proc/self/fd/2 /dev/stderr 50 | ln -s /proc/kcore /dev/kcore 51 | 52 | mkdir -m 01777 /dev/mqueue 53 | mkdir -m 01777 /dev/shm 54 | mkdir -m 0755 /dev/pts 55 | 56 | mount -t mqueue mqueue /dev/mqueue -o noexec,nosuid,nodev 57 | mount -t tmpfs shm /dev/shm -o noexec,nosuid,nodev,mode=1777 58 | mount -t devpts devpts /dev/pts -o noexec,nosuid,gid=5,mode=0620 59 | 60 | mount -t sysfs sysfs /sys -o noexec,nosuid,nodev 61 | 62 | mount --move /sys /mnt/sys 63 | mount --move /proc /mnt/proc 64 | mount --move /dev /mnt/dev 65 | 66 | #exec /bin/busybox switch_root /mnt /real_init 67 | #echo "initramfs emergency recovery shell launched" 68 | exec chroot /mnt /real_init -------------------------------------------------------------------------------- /images/alpine-docker/files/real_init: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # add a boot service to $sysroot 4 | rc_add() { 5 | mkdir -p $sysroot/etc/runlevels/$2 6 | ln -sf /etc/init.d/$1 $sysroot/etc/runlevels/$2/$1 7 | } 8 | 9 | rc_add urandom boot 10 | rc_add rngd boot 11 | rc_add sshd default 12 | rc_add dhcpcd default 13 | 14 | chown -R root:root /root 15 | 16 | echo "slim" > /etc/hostname 17 | 18 | modprobe virtio_net 19 | 20 | # mounts 21 | mount -t tmpfs tmpfs /run -o nodev,nosuid,noexec,relatime,size=10%,mode=755 22 | mount -t tmpfs tmpfs /tmp -o nodev,nosuid,noexec,relatime,size=10%,mode=1777 23 | mount -t tmpfs tmpfs /var -o nodev,nosuid,noexec,relatime,size=50%,mode=755 24 | 25 | mkdir -m 0755 /var/cache /var/empty /var/lib /var/local /var/lock /var/log /var/opt /var/spool 26 | mkdir -m 01777 /var/tmp 27 | mkdir -m 0755 /var/cache/apk 28 | ln -s /run /var/run 29 | 30 | #mount -t devtmpfs dev /dev -o nosuid,noexec,relatime,size=10m,nr_inodes=248418,mode=755 31 | #mknod -m 0600 /dev/console c 5 1 32 | 33 | 34 | # ignore errors 35 | mount -t securityfs /sys/kernel/security -o noexec,nosuid,nodev || true 36 | mount -t debugfs debugfs /sys/kernel/debug -o noexec,nosuid,nodev || true 37 | mount -t configfs configfs /sys/kernel/config -o noexec,nosuid,nodev || true 38 | mount -t fusectl fusectl /sys/fs/fuse/connections -o noexec,nosuid,nodev || true 39 | mount -t selinuxfs selinuxfs /sys/fs/selinux -o noexec,nosuid || true 40 | mount -t pstore pstore /sys/fs/pstore -o noexec,nosuid,nodev || true 41 | mount -t efivarfs efivarfs /sys/firmware/efi/efivars -o noexec,nosuid,nodev || true 42 | mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc -o noexec,nosuid,nodev || true 43 | 44 | mount -t tmpfs cgroup_root /sys/fs/cgroup -o nodev,noexec,nosuid,mode=755,size=10m 45 | 46 | while read c; do 47 | if [[ "1" == $(echo "$c" | cut -f4) ]]; then 48 | cg=$(echo "$c" | cut -f1) 49 | mkdir -m 0555 "/sys/fs/cgroup/$cg" 50 | mount -t cgroup "$cg" "/sys/fs/cgroup/$cg" -o "noexec,nosuid,nodev,$cg" 51 | fi 52 | done < /proc/cgroups 53 | 54 | echo "1" > /sys/fs/cgroup/memory/memory.use_hierarchy 55 | 56 | mkdir -m 0555 /sys/fs/cgroup/systemd 57 | mount -t cgroup cgroup /sys/fs/cgroup/systemd -o none,name=systemd 58 | 59 | mount --make-rshared / 60 | 61 | # hotplug 62 | echo "/sbin/mdev" > /proc/sys/kernel/hotplug 63 | for x in $(ls /sys/bus/*/devices/*/modalias); do 64 | /sbin/modprobe -abq $(cat "$x") || true; 65 | done 66 | 67 | # clock 68 | /sbin/hwclock --hctosys --utc 69 | 70 | # loopback 71 | /sbin/ip addr add 127.0.0.1/8 dev lo brd + scope host 72 | /sbin/ip route add 127.0.0.1/8 dev lo scope host 73 | /sbin/ip link set lo up 74 | 75 | # limits 76 | ulimit -n 1048576 77 | ulimit -p unlimited 78 | 79 | 80 | 81 | # hostname 82 | hostname $(cat /etc/hostname) 83 | 84 | # resolvconf 85 | touch /etc/resolv.conf 86 | 87 | # mount shared folders 88 | mkdir -p /slim /host 89 | modprobe 9pnet_virtio 90 | mount -t 9p -o trans=virtio share0 /slim 91 | mount -t 9p -o trans=virtio share1 /host 92 | 93 | # start default openrc level 94 | /sbin/openrc default 95 | 96 | # Not in container anymore! 97 | rm /.dockerenv 98 | 99 | # mount /dev/vda /mnt 100 | # exec /bin/busybox switch_root -c /dev/console /mnt /sbin/init 101 | exec /sbin/init 102 | #exec /bin/sh -------------------------------------------------------------------------------- /images/alpine-docker/info.yml: -------------------------------------------------------------------------------- 1 | description: A basic alpine server with ssh, docker 2 | base_args: 3 | PKGS: tmux 4 | SSHPUBKEY: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+YRhI2Gjno+5ND+N/pBvvw7Bvji6OEtZgUKvJf8P9rPcUCR8w7DpDPTpLSM4spBqIwoEM1CQRnH8x/Ufvhr51tU/74A4J2MgBEjClI8M5Z8iqYhDWfoRywo/2uB1rrPHICIM716LRFGIDoqnt+leHU4wcfHmHNa8/KqC5tNxd9/VBxeveh0CIu7/Ba3/UVtn6CTY2sGMo0mJk0IjzIsK42TgRL7ZOTQfbo1Td3DpOCdt02xft5xXCk9KuRwwrjdtyZbP8n8xc7/YcRk0pswFViNfEaU5Eb42+DTr0OhCgadGD9ufxJbSh4ty2VmRycQBfj00VqQO2zPNL2u76EfkMEI/TspVansMCheRtt3C5QJQCv0gXntDSunzgIOvbgShc644eIrmV/kh0oLYkW+Fi80zqx/dIdKMc7OpXK/umJb18ao2IBtBoTiNr5cla1XerDwZXJEp6sPJlSja9xNb0yw0PAfxMiKsR/fjymZ5E7dPaYjS3b+LIyxjxL+GSr8ZRL+3aH7lYsdAaQwekesxaMZUSfKDwRWk5UvE81gpCWWkgcTcqxCGuiBCdviBU88yXfDuFodidgYTJ39JM9v3gKuvlJAtRaBbXSJ6YTjanfozlmpaNYImHUPeooY20vlolmXzs2llZI+gM68GZpnml3en80LJYnin26kigWM9WQ== BakerForMac -------------------------------------------------------------------------------- /images/alpine3.12-raw/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.12 AS openrc 2 | RUN mkdir -p /lib/apk/db /run 3 | RUN apk add --no-cache --initdb openrc 4 | 5 | FROM alpine:3.12 AS kernel 6 | RUN mkdir -p /lib/apk/db /run 7 | RUN apk add --no-cache --initdb linux-virt 8 | 9 | FROM alpine:3.12 AS install 10 | USER root 11 | # the public key that is authorized to connect to this instance. 12 | ARG SSHPUBKEY 13 | # optional packages 14 | ARG PKGS 15 | 16 | # don't want all the /etc stuff from openrc -- only tools 17 | # https://pkgs.alpinelinux.org/contents?repo=main&page=2&arch=x86_64&branch=v3.9&name=openrc 18 | COPY --from=openrc /lib/ /lib/ 19 | COPY --from=openrc /bin /bin 20 | COPY --from=openrc /sbin /sbin 21 | COPY --from=openrc /etc/ /etc/ 22 | 23 | # Need virtio modules for networking 24 | COPY --from=kernel /lib/modules /lib/modules 25 | 26 | # Copy kernel for later use 27 | COPY --from=kernel /boot/vmlinuz-virt /vmlinuz 28 | 29 | RUN mkdir -p /lib/apk/db /run 30 | RUN apk add --update --no-cache --initdb alpine-baselayout apk-tools busybox ca-certificates util-linux \ 31 | openssh openssh-client rng-tools dhcpcd 32 | RUN [ ! -z "$PKGS" ] && apk add --no-cache $PKGS || echo "No optional pkgs provided." 33 | 34 | # Deleted cached packages 35 | RUN rm -rf /var/cache/apk/* 36 | 37 | # Our local files 38 | COPY files/init /init 39 | 40 | RUN echo "Welcome to slim!" > /etc/motd 41 | RUN echo "tty0::respawn:/sbin/agetty -a root -L tty0 38400 vt100" > /etc/inittab 42 | RUN echo "ttyS0::respawn:/sbin/agetty -a root -L ttyS0 115200 vt100" >> /etc/inittab 43 | 44 | # Set an ssh key 45 | RUN mkdir -p /etc/ssh /root/.ssh && chmod 0700 /root/.ssh 46 | RUN echo $SSHPUBKEY > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys 47 | 48 | # Fix ssh 49 | RUN sed -i 's/root:!/root:*/' /etc/shadow 50 | -------------------------------------------------------------------------------- /images/alpine3.12-raw/files/init: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # add a boot service to $sysroot 4 | rc_add() { 5 | mkdir -p $sysroot/etc/runlevels/$2 6 | ln -sf /etc/init.d/$1 $sysroot/etc/runlevels/$2/$1 7 | } 8 | 9 | rc_add sshd default 10 | rc_add dhcpcd default 11 | rc_add urandom default 12 | rc_add rngd default 13 | 14 | chown -R root:root /root 15 | 16 | echo "nanobox" > /etc/hostname 17 | 18 | modprobe virtio_net 19 | 20 | # mounts 21 | mount -t proc proc /proc -o nodev,nosuid,noexec,relatime 22 | mount -t tmpfs tmpfs /run -o nodev,nosuid,noexec,relatime,size=10%,mode=755 23 | mount -t tmpfs tmpfs /tmp -o nodev,nosuid,noexec,relatime,size=10%,mode=1777 24 | mount -t tmpfs tmpfs /var -o nodev,nosuid,noexec,relatime,size=50%,mode=755 25 | 26 | mkdir -m 0755 /var/cache /var/empty /var/lib /var/local /var/lock /var/log /var/opt /var/spool 27 | mkdir -m 01777 /var/tmp 28 | mkdir -m 0755 /var/cache/apk 29 | ln -s /run /var/run 30 | 31 | mount -t devtmpfs dev /dev -o nosuid,noexec,relatime,size=10m,nr_inodes=248418,mode=755 32 | mknod -m 0600 /dev/console c 5 1 33 | mknod -m 0620 /dev/tty1 c 4 1 34 | mknod -m 0666 /dev/tty 5 0 35 | mknod -m 0666 /dev/null 1 3 36 | mknod -m 0660 /dev/kmsg 1 11 37 | 38 | ln -s /proc/self/fd /dev/fd 39 | ln -s /proc/self/fd/0 /dev/stdin 40 | ln -s /proc/self/fd/1 /dev/stdout 41 | ln -s /proc/self/fd/2 /dev/stderr 42 | ln -s /proc/kcore /dev/kcore 43 | 44 | mkdir -m 01777 /dev/mqueue 45 | mkdir -m 01777 /dev/shm 46 | mkdir -m 0755 /dev/pts 47 | 48 | mount -t mqueue mqueue /dev/mqueue -o noexec,nosuid,nodev 49 | mount -t tmpfs shm /dev/shm -o noexec,nosuid,nodev,mode=1777 50 | mount -t devpts devpts /dev/pts -o noexec,nosuid,gid=5,mode=0620 51 | 52 | mount -t sysfs sysfs /sys -o noexec,nosuid,nodev 53 | 54 | # ignore errors 55 | mount -t securityfs /sys/kernel/security -o noexec,nosuid,nodev || true 56 | mount -t debugfs debugfs /sys/kernel/debug -o noexec,nosuid,nodev || true 57 | mount -t configfs configfs /sys/kernel/config -o noexec,nosuid,nodev || true 58 | mount -t fusectl fusectl /sys/fs/fuse/connections -o noexec,nosuid,nodev || true 59 | mount -t selinuxfs selinuxfs /sys/fs/selinux -o noexec,nosuid || true 60 | mount -t pstore pstore /sys/fs/pstore -o noexec,nosuid,nodev || true 61 | mount -t efivarfs efivarfs /sys/firmware/efi/efivars -o noexec,nosuid,nodev || true 62 | mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc -o noexec,nosuid,nodev || true 63 | 64 | mount -t tmpfs cgroup_root /sys/fs/cgroup -o nodev,noexec,nosuid,mode=755,size=10m 65 | 66 | while read c; do 67 | if [[ "1" == $(echo "$c" | cut -f4) ]]; then 68 | cg=$(echo "$c" | cut -f1) 69 | mkdir -m 0555 "/sys/fs/cgroup/$cg" 70 | mount -t cgroup "$cg" "/sys/fs/cgroup/$cg" -o "noexec,nosuid,nodev,$cg" 71 | fi 72 | done < /proc/cgroups 73 | 74 | echo "1" > /sys/fs/cgroup/memory/memory.use_hierarchy 75 | 76 | mkdir -m 0555 /sys/fs/cgroup/systemd 77 | mount -t cgroup cgroup /sys/fs/cgroup/systemd -o none,name=systemd 78 | 79 | mount --make-rshared / 80 | 81 | # hotplug 82 | echo "/sbin/mdev" > /proc/sys/kernel/hotplug 83 | for x in $(ls /sys/bus/*/devices/*/modalias); do 84 | /sbin/modprobe -abq $(cat "$x") || true; 85 | done 86 | 87 | # clock 88 | /sbin/hwclock --hctosys --utc 89 | 90 | # loopback 91 | /sbin/ip addr add 127.0.0.1/8 dev lo brd + scope host 92 | /sbin/ip route add 127.0.0.1/8 dev lo scope host 93 | /sbin/ip link set lo up 94 | 95 | # limits 96 | ulimit -n 1048576 97 | ulimit -p unlimited 98 | 99 | # hostname 100 | hostname $(cat /etc/hostname) 101 | 102 | # resolvconf 103 | touch /etc/resolv.conf 104 | 105 | # mount shared folders 106 | mkdir -p /slim /host 107 | modprobe 9pnet_virtio 108 | mount -t 9p -o trans=virtio share0 /slim 109 | mount -t 9p -o trans=virtio share1 /host 110 | 111 | # start default openrc level 112 | /sbin/openrc default 113 | 114 | exec /sbin/init 115 | -------------------------------------------------------------------------------- /images/alpine3.12-raw/info.yml: -------------------------------------------------------------------------------- 1 | description: A basic alpine server with ssh. 2 | base_args: 3 | PKGS: tmux 4 | SSHPUBKEY: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+YRhI2Gjno+5ND+N/pBvvw7Bvji6OEtZgUKvJf8P9rPcUCR8w7DpDPTpLSM4spBqIwoEM1CQRnH8x/Ufvhr51tU/74A4J2MgBEjClI8M5Z8iqYhDWfoRywo/2uB1rrPHICIM716LRFGIDoqnt+leHU4wcfHmHNa8/KqC5tNxd9/VBxeveh0CIu7/Ba3/UVtn6CTY2sGMo0mJk0IjzIsK42TgRL7ZOTQfbo1Td3DpOCdt02xft5xXCk9KuRwwrjdtyZbP8n8xc7/YcRk0pswFViNfEaU5Eb42+DTr0OhCgadGD9ufxJbSh4ty2VmRycQBfj00VqQO2zPNL2u76EfkMEI/TspVansMCheRtt3C5QJQCv0gXntDSunzgIOvbgShc644eIrmV/kh0oLYkW+Fi80zqx/dIdKMc7OpXK/umJb18ao2IBtBoTiNr5cla1XerDwZXJEp6sPJlSja9xNb0yw0PAfxMiKsR/fjymZ5E7dPaYjS3b+LIyxjxL+GSr8ZRL+3aH7lYsdAaQwekesxaMZUSfKDwRWk5UvE81gpCWWkgcTcqxCGuiBCdviBU88yXfDuFodidgYTJ39JM9v3gKuvlJAtRaBbXSJ6YTjanfozlmpaNYImHUPeooY20vlolmXzs2llZI+gM68GZpnml3en80LJYnin26kigWM9WQ== BakerForMac -------------------------------------------------------------------------------- /images/alpine3.12-virtualbox/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.12 AS openrc 2 | RUN mkdir -p /lib/apk/db /run 3 | RUN apk add --no-cache --initdb openrc 4 | 5 | FROM alpine:3.12 AS kernel 6 | RUN mkdir -p /lib/apk/db /run 7 | RUN apk add --no-cache --initdb linux-virt virtualbox-guest-modules-virt 8 | 9 | FROM alpine:3.12 AS install 10 | USER root 11 | # the public key that is authorized to connect to this instance. 12 | ARG SSHPUBKEY 13 | # optional packages 14 | ARG PKGS 15 | 16 | # don't want all the /etc stuff from openrc -- only tools 17 | # https://pkgs.alpinelinux.org/contents?repo=main&page=2&arch=x86_64&branch=v3.9&name=openrc 18 | COPY --from=openrc /lib/ /lib/ 19 | COPY --from=openrc /bin /bin 20 | COPY --from=openrc /sbin /sbin 21 | COPY --from=openrc /etc/ /etc/ 22 | 23 | # Need virtio modules for networking 24 | COPY --from=kernel /lib/modules /lib/modules 25 | 26 | # Copy kernel for later use 27 | COPY --from=kernel /boot/vmlinuz-virt /vmlinuz 28 | 29 | RUN mkdir -p /lib/apk/db /run 30 | RUN apk add --update --no-cache --initdb alpine-baselayout apk-tools busybox ca-certificates util-linux \ 31 | openssh openssh-client rng-tools dhcpcd virtualbox-guest-additions 32 | RUN [ ! -z "$PKGS" ] && apk add --no-cache $PKGS || echo "No optional pkgs provided." 33 | 34 | # Deleted cached packages 35 | RUN rm -rf /var/cache/apk/* 36 | 37 | # Our local files 38 | COPY files/init /init 39 | 40 | RUN echo "Welcome to slim!" > /etc/motd 41 | RUN echo "tty0::respawn:/sbin/agetty -a root -L tty0 38400 vt100" > /etc/inittab 42 | RUN echo "ttyS0::respawn:/sbin/agetty -a root -L ttyS0 115200 vt100" >> /etc/inittab 43 | 44 | # Set an ssh key 45 | RUN mkdir -p /etc/ssh /root/.ssh && chmod 0700 /root/.ssh 46 | RUN echo $SSHPUBKEY > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys 47 | 48 | # Fix ssh 49 | RUN sed -i 's/root:!/root:*/' /etc/shadow 50 | -------------------------------------------------------------------------------- /images/alpine3.12-virtualbox/files/init: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # add a boot service to $sysroot 4 | rc_add() { 5 | mkdir -p $sysroot/etc/runlevels/$2 6 | ln -sf /etc/init.d/$1 $sysroot/etc/runlevels/$2/$1 7 | } 8 | 9 | rc_add sshd default 10 | rc_add dhcpcd default 11 | rc_add urandom default 12 | rc_add rngd default 13 | 14 | chown -R root:root /root 15 | 16 | echo "nanobox" > /etc/hostname 17 | 18 | modprobe virtio_net 19 | 20 | # mounts 21 | mount -t proc proc /proc -o nodev,nosuid,noexec,relatime 22 | mount -t tmpfs tmpfs /run -o nodev,nosuid,noexec,relatime,size=10%,mode=755 23 | mount -t tmpfs tmpfs /tmp -o nodev,nosuid,noexec,relatime,size=10%,mode=1777 24 | mount -t tmpfs tmpfs /var -o nodev,nosuid,noexec,relatime,size=50%,mode=755 25 | 26 | mkdir -m 0755 /var/cache /var/empty /var/lib /var/local /var/lock /var/log /var/opt /var/spool 27 | mkdir -m 01777 /var/tmp 28 | mkdir -m 0755 /var/cache/apk 29 | ln -s /run /var/run 30 | 31 | mount -t devtmpfs dev /dev -o nosuid,noexec,relatime,size=10m,nr_inodes=248418,mode=755 32 | mknod -m 0600 /dev/console c 5 1 33 | mknod -m 0620 /dev/tty1 c 4 1 34 | mknod -m 0666 /dev/tty 5 0 35 | mknod -m 0666 /dev/null 1 3 36 | mknod -m 0660 /dev/kmsg 1 11 37 | 38 | ln -s /proc/self/fd /dev/fd 39 | ln -s /proc/self/fd/0 /dev/stdin 40 | ln -s /proc/self/fd/1 /dev/stdout 41 | ln -s /proc/self/fd/2 /dev/stderr 42 | ln -s /proc/kcore /dev/kcore 43 | 44 | mkdir -m 01777 /dev/mqueue 45 | mkdir -m 01777 /dev/shm 46 | mkdir -m 0755 /dev/pts 47 | 48 | mount -t mqueue mqueue /dev/mqueue -o noexec,nosuid,nodev 49 | mount -t tmpfs shm /dev/shm -o noexec,nosuid,nodev,mode=1777 50 | mount -t devpts devpts /dev/pts -o noexec,nosuid,gid=5,mode=0620 51 | 52 | mount -t sysfs sysfs /sys -o noexec,nosuid,nodev 53 | 54 | # ignore errors 55 | mount -t securityfs /sys/kernel/security -o noexec,nosuid,nodev || true 56 | mount -t debugfs debugfs /sys/kernel/debug -o noexec,nosuid,nodev || true 57 | mount -t configfs configfs /sys/kernel/config -o noexec,nosuid,nodev || true 58 | mount -t fusectl fusectl /sys/fs/fuse/connections -o noexec,nosuid,nodev || true 59 | mount -t selinuxfs selinuxfs /sys/fs/selinux -o noexec,nosuid || true 60 | mount -t pstore pstore /sys/fs/pstore -o noexec,nosuid,nodev || true 61 | mount -t efivarfs efivarfs /sys/firmware/efi/efivars -o noexec,nosuid,nodev || true 62 | mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc -o noexec,nosuid,nodev || true 63 | 64 | mount -t tmpfs cgroup_root /sys/fs/cgroup -o nodev,noexec,nosuid,mode=755,size=10m 65 | 66 | while read c; do 67 | if [[ "1" == $(echo "$c" | cut -f4) ]]; then 68 | cg=$(echo "$c" | cut -f1) 69 | mkdir -m 0555 "/sys/fs/cgroup/$cg" 70 | mount -t cgroup "$cg" "/sys/fs/cgroup/$cg" -o "noexec,nosuid,nodev,$cg" 71 | fi 72 | done < /proc/cgroups 73 | 74 | echo "1" > /sys/fs/cgroup/memory/memory.use_hierarchy 75 | 76 | mkdir -m 0555 /sys/fs/cgroup/systemd 77 | mount -t cgroup cgroup /sys/fs/cgroup/systemd -o none,name=systemd 78 | 79 | mount --make-rshared / 80 | 81 | # hotplug 82 | echo "/sbin/mdev" > /proc/sys/kernel/hotplug 83 | for x in $(ls /sys/bus/*/devices/*/modalias); do 84 | /sbin/modprobe -abq $(cat "$x") || true; 85 | done 86 | 87 | # clock 88 | /sbin/hwclock --hctosys --utc 89 | 90 | # loopback 91 | /sbin/ip addr add 127.0.0.1/8 dev lo brd + scope host 92 | /sbin/ip route add 127.0.0.1/8 dev lo scope host 93 | /sbin/ip link set lo up 94 | 95 | # limits 96 | ulimit -n 1048576 97 | ulimit -p unlimited 98 | 99 | # hostname 100 | hostname $(cat /etc/hostname) 101 | 102 | # resolvconf 103 | touch /etc/resolv.conf 104 | 105 | # mount shared folders 106 | mkdir -p /slim /host 107 | modprobe 9pnet_virtio vboxsf 108 | mount -t 9p -o trans=virtio share0 /slim || mount.vboxsf vbox-share-0 /slim || true 109 | mount -t 9p -o trans=virtio share1 /host || mount.vboxsf vbox-share-1 /host || true 110 | 111 | # start default openrc level 112 | /sbin/openrc default 113 | 114 | exec /sbin/init 115 | -------------------------------------------------------------------------------- /images/alpine3.12-virtualbox/info.yml: -------------------------------------------------------------------------------- 1 | description: A basic alpine server with ssh, with virtualbox drivers. 2 | base_args: 3 | PKGS: tmux 4 | SSHPUBKEY: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+YRhI2Gjno+5ND+N/pBvvw7Bvji6OEtZgUKvJf8P9rPcUCR8w7DpDPTpLSM4spBqIwoEM1CQRnH8x/Ufvhr51tU/74A4J2MgBEjClI8M5Z8iqYhDWfoRywo/2uB1rrPHICIM716LRFGIDoqnt+leHU4wcfHmHNa8/KqC5tNxd9/VBxeveh0CIu7/Ba3/UVtn6CTY2sGMo0mJk0IjzIsK42TgRL7ZOTQfbo1Td3DpOCdt02xft5xXCk9KuRwwrjdtyZbP8n8xc7/YcRk0pswFViNfEaU5Eb42+DTr0OhCgadGD9ufxJbSh4ty2VmRycQBfj00VqQO2zPNL2u76EfkMEI/TspVansMCheRtt3C5QJQCv0gXntDSunzgIOvbgShc644eIrmV/kh0oLYkW+Fi80zqx/dIdKMc7OpXK/umJb18ao2IBtBoTiNr5cla1XerDwZXJEp6sPJlSja9xNb0yw0PAfxMiKsR/fjymZ5E7dPaYjS3b+LIyxjxL+GSr8ZRL+3aH7lYsdAaQwekesxaMZUSfKDwRWk5UvE81gpCWWkgcTcqxCGuiBCdviBU88yXfDuFodidgYTJ39JM9v3gKuvlJAtRaBbXSJ6YTjanfozlmpaNYImHUPeooY20vlolmXzs2llZI+gM68GZpnml3en80LJYnin26kigWM9WQ== BakerForMac -------------------------------------------------------------------------------- /images/alpine3.15/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.15 2 | RUN mkdir -p /lib/apk/db /run 3 | RUN apk add --initdb openrc 4 | RUN apk add linux-virt kmod kmod-openrc blkid 5 | #RUN apk add mkinitfs blkid squashfs-tools 6 | 7 | # busybox-initscripts busybox-suid 8 | RUN apk add --update alpine-baselayout alpine-conf alpine-keys apk-tools busybox busybox-initscripts \ 9 | ca-certificates dbus-libs kbd-bkeymaps \ 10 | gnutls openssh openssh-client rng-tools dhcpcd network-extras wget util-linux 11 | RUN [ ! -z "$PKGS" ] && apk add --no-cache $PKGS || echo "No optional pkgs provided." 12 | 13 | # FYI 14 | # https://hütter.ch/posts/pitaya-alpine/ 15 | 16 | # Create modloop 17 | #RUN update-kernel -f virt /boot 18 | 19 | # Rebuild initrd 20 | #RUN echo 'features="ata base cdrom dhcp ext4 keymap kms mmc nvme raid scsi usb network virtio squashfs"' > /etc/mkinitfs/mkinitfs.conf 21 | #RUN mkinitfs -b / 5.15.16-0-virt 22 | 23 | USER root 24 | # the public key that is authorized to connect to this instance. 25 | ARG SSHPUBKEY 26 | # optional packages 27 | ARG PKGS 28 | 29 | # Copy kernel for later use 30 | RUN cp /boot/vmlinuz-virt /vmlinuz 31 | # Nuke boot 32 | RUN rm -rf /boot 33 | 34 | # Deleted cached packages 35 | #RUN rm -rf /var/cache/apk/* 36 | 37 | # Our init 38 | COPY files/init /init 39 | 40 | RUN echo "Welcome to slim!" > /etc/motd 41 | # RUN echo "tty0::respawn:/sbin/agetty -a root -L tty0 38400 vt100" >> /etc/inittab 42 | # RUN echo "# Allow hypervisor login" >> /etc/inittab 43 | #RUN echo "hvc0:12345:respawn:/sbin/agetty -L 9600 hvc0 screen" >> /etc/inittab 44 | # RUN echo "ttyS0::respawn:/sbin/agetty -a root -L ttyS0 115200 vt100" >> /etc/inittab 45 | 46 | # Set an ssh key 47 | RUN mkdir -p /etc/ssh /root/.ssh && chmod 0700 /root/.ssh 48 | RUN echo $SSHPUBKEY > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys 49 | 50 | # Fix ssh 51 | RUN sed -i 's/root:!/root:*/' /etc/shadow -------------------------------------------------------------------------------- /images/alpine3.15/files/init: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # add a boot service to $sysroot 4 | rc_add() { 5 | mkdir -p $sysroot/etc/runlevels/$2 6 | ln -sf /etc/init.d/$1 $sysroot/etc/runlevels/$2/$1 7 | } 8 | 9 | rc_add sshd default 10 | rc_add dhcpcd default 11 | rc_add urandom default 12 | rc_add rngd default 13 | 14 | chown -R root:root /root 15 | 16 | echo "slim" > /etc/hostname 17 | 18 | modprobe virtio_net 19 | 20 | # mounts 21 | mount -t proc proc /proc -o nodev,nosuid,noexec,relatime 22 | mount -t tmpfs tmpfs /run -o nodev,nosuid,noexec,relatime,size=10%,mode=755 23 | mount -t tmpfs tmpfs /tmp -o nodev,nosuid,noexec,relatime,size=10%,mode=1777 24 | mount -t tmpfs tmpfs /var -o nodev,nosuid,noexec,relatime,size=50%,mode=755 25 | 26 | mkdir -m 0755 /var/cache /var/empty /var/lib /var/local /var/lock /var/log /var/opt /var/spool 27 | mkdir -m 01777 /var/tmp 28 | mkdir -m 0755 /var/cache/apk 29 | ln -s /run /var/run 30 | 31 | mount -t devtmpfs dev /dev -o nosuid,noexec,relatime,size=10m,nr_inodes=248418,mode=755 32 | mknod -m 0600 /dev/console c 5 1 33 | mknod -m 0620 /dev/tty1 c 4 1 34 | mknod -m 0666 /dev/tty 5 0 35 | mknod -m 0666 /dev/null 1 3 36 | mknod -m 0660 /dev/kmsg 1 11 37 | 38 | ln -s /proc/self/fd /dev/fd 39 | ln -s /proc/self/fd/0 /dev/stdin 40 | ln -s /proc/self/fd/1 /dev/stdout 41 | ln -s /proc/self/fd/2 /dev/stderr 42 | ln -s /proc/kcore /dev/kcore 43 | 44 | mkdir -m 01777 /dev/mqueue 45 | mkdir -m 01777 /dev/shm 46 | mkdir -m 0755 /dev/pts 47 | 48 | mount -t mqueue mqueue /dev/mqueue -o noexec,nosuid,nodev 49 | mount -t tmpfs shm /dev/shm -o noexec,nosuid,nodev,mode=1777 50 | mount -t devpts devpts /dev/pts -o noexec,nosuid,gid=5,mode=0620 51 | 52 | mount -t sysfs sysfs /sys -o noexec,nosuid,nodev 53 | 54 | # ignore errors 55 | mount -t securityfs /sys/kernel/security -o noexec,nosuid,nodev || true 56 | mount -t debugfs debugfs /sys/kernel/debug -o noexec,nosuid,nodev || true 57 | mount -t configfs configfs /sys/kernel/config -o noexec,nosuid,nodev || true 58 | mount -t fusectl fusectl /sys/fs/fuse/connections -o noexec,nosuid,nodev || true 59 | mount -t selinuxfs selinuxfs /sys/fs/selinux -o noexec,nosuid || true 60 | mount -t pstore pstore /sys/fs/pstore -o noexec,nosuid,nodev || true 61 | mount -t efivarfs efivarfs /sys/firmware/efi/efivars -o noexec,nosuid,nodev || true 62 | mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc -o noexec,nosuid,nodev || true 63 | 64 | mount -t tmpfs cgroup_root /sys/fs/cgroup -o nodev,noexec,nosuid,mode=755,size=10m 65 | 66 | while read c; do 67 | if [[ "1" == $(echo "$c" | cut -f4) ]]; then 68 | cg=$(echo "$c" | cut -f1) 69 | mkdir -m 0555 "/sys/fs/cgroup/$cg" 70 | mount -t cgroup "$cg" "/sys/fs/cgroup/$cg" -o "noexec,nosuid,nodev,$cg" 71 | fi 72 | done < /proc/cgroups 73 | 74 | echo "1" > /sys/fs/cgroup/memory/memory.use_hierarchy 75 | 76 | mkdir -m 0555 /sys/fs/cgroup/systemd 77 | mount -t cgroup cgroup /sys/fs/cgroup/systemd -o none,name=systemd 78 | 79 | mount --make-rshared / 80 | 81 | # hotplug 82 | echo "/sbin/mdev" > /proc/sys/kernel/hotplug 83 | for x in $(ls /sys/bus/*/devices/*/modalias); do 84 | /sbin/modprobe -abq $(cat "$x") || true; 85 | done 86 | 87 | # clock 88 | /sbin/hwclock --hctosys --utc 89 | 90 | # loopback 91 | /sbin/ip addr add 127.0.0.1/8 dev lo brd + scope host 92 | /sbin/ip route add 127.0.0.1/8 dev lo scope host 93 | /sbin/ip link set lo up 94 | 95 | # limits 96 | ulimit -n 1048576 97 | ulimit -p unlimited 98 | 99 | # hostname 100 | hostname $(cat /etc/hostname) 101 | 102 | # resolvconf 103 | touch /etc/resolv.conf 104 | 105 | # mount shared folders 106 | mkdir -p /slim /host 107 | modprobe 9pnet_virtio 108 | mount -t 9p -o trans=virtio share0 /slim 109 | mount -t 9p -o trans=virtio share1 /host 110 | 111 | # start default openrc level 112 | /sbin/openrc default 113 | 114 | exec /sbin/init 115 | -------------------------------------------------------------------------------- /images/alpine3.15/info.yml: -------------------------------------------------------------------------------- 1 | description: A basic 3.15 alpine server with ssh 2 | base_args: 3 | PKGS: tmux 4 | SSHPUBKEY: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+YRhI2Gjno+5ND+N/pBvvw7Bvji6OEtZgUKvJf8P9rPcUCR8w7DpDPTpLSM4spBqIwoEM1CQRnH8x/Ufvhr51tU/74A4J2MgBEjClI8M5Z8iqYhDWfoRywo/2uB1rrPHICIM716LRFGIDoqnt+leHU4wcfHmHNa8/KqC5tNxd9/VBxeveh0CIu7/Ba3/UVtn6CTY2sGMo0mJk0IjzIsK42TgRL7ZOTQfbo1Td3DpOCdt02xft5xXCk9KuRwwrjdtyZbP8n8xc7/YcRk0pswFViNfEaU5Eb42+DTr0OhCgadGD9ufxJbSh4ty2VmRycQBfj00VqQO2zPNL2u76EfkMEI/TspVansMCheRtt3C5QJQCv0gXntDSunzgIOvbgShc644eIrmV/kh0oLYkW+Fi80zqx/dIdKMc7OpXK/umJb18ao2IBtBoTiNr5cla1XerDwZXJEp6sPJlSja9xNb0yw0PAfxMiKsR/fjymZ5E7dPaYjS3b+LIyxjxL+GSr8ZRL+3aH7lYsdAaQwekesxaMZUSfKDwRWk5UvE81gpCWWkgcTcqxCGuiBCdviBU88yXfDuFodidgYTJ39JM9v3gKuvlJAtRaBbXSJ6YTjanfozlmpaNYImHUPeooY20vlolmXzs2llZI+gM68GZpnml3en80LJYnin26kigWM9WQ== BakerForMac -------------------------------------------------------------------------------- /images/alpine3.15/notes.md: -------------------------------------------------------------------------------- 1 | features="ata base cdrom ext4 keymap kms mmc nvme raid scsi usb network virtio squashfs" 2 | 3 | ata 4 | base 5 | bootchart 6 | btrfs 7 | cdrom 8 | cramfs 9 | cryptkey 10 | cryptsetup 11 | dasd_mod 12 | dhcp 13 | ena 14 | ext2 15 | ext3 16 | ext4 17 | f2fs 18 | floppy 19 | gfs2 20 | https 21 | jfs 22 | keymap 23 | kms 24 | lvm 25 | mmc 26 | nbd 27 | network 28 | nvme 29 | ocfs2 30 | qeth 31 | raid 32 | reiserfs 33 | scsi 34 | squashfs 35 | ubifs 36 | usb 37 | virtio 38 | xenpci 39 | xfs 40 | zfcp 41 | zfs -------------------------------------------------------------------------------- /images/ubuntu-20.04-ci-hyperv/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | RUN apt-get update && \ 3 | # kernel, hyper-v support, and other virtual tooling. 4 | apt-get install -y linux-virtual linux-cloud-tools-virtual linux-tools-virtual 5 | 6 | RUN echo $' \n\ 7 | hv_blkvsc\n\ 8 | hv_utils\n\ 9 | hv_vmbus\n\ 10 | hv_sock\n\ 11 | hv_storvsc\n\ 12 | hv_netvsc\n' >> /etc/initramfs-tools/modules 13 | 14 | # hv_balloon\n\ 15 | RUN update-initramfs -u 16 | 17 | # Move for easier extraction. 18 | RUN mv /boot/vmlinuz-* /vmlinuz 19 | RUN mv /boot/initrd.img-* /initrd 20 | 21 | # Needed for configuring server and setting up devices. 22 | RUN apt install cloud-init udev kmod -y 23 | # Quality of life: 24 | RUN apt install openssh-server sudo -y 25 | RUN apt clean -------------------------------------------------------------------------------- /images/ubuntu-20.04-ci-hyperv/info.yml: -------------------------------------------------------------------------------- 1 | description: A simple ubuntu 20.04 server with cloud-init. 2 | base_args: 3 | PKGS: tmux -------------------------------------------------------------------------------- /images/ubuntu-20.04-ci-hyperv/meta-data: -------------------------------------------------------------------------------- 1 | instance-id: basicvm 2 | local-hostname: basicvm -------------------------------------------------------------------------------- /images/ubuntu-20.04-ci-hyperv/user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | debug: true 3 | disable_root: false 4 | users: 5 | - name: ubuntu 6 | sudo: ALL=(ALL) NOPASSWD:ALL 7 | groups: users, admin 8 | home: /home/ubuntu 9 | shell: /bin/bash 10 | lock_passwd: false 11 | ssh-authorized-keys: 12 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGKxzz9dfF1vxLkHL++/BkLM/3CpxQeQgQM10T2ZmhlA 13 | ssh_pwauth: true 14 | disable_root: false 15 | chpasswd: 16 | list: | 17 | ubuntu:ubuntu 18 | expire: False 19 | 20 | write_files: 21 | - path: /etc/netplan/basicvm-net.yaml 22 | permission: '0644' 23 | content: | 24 | network: 25 | ethernets: 26 | enp0s1: 27 | dhcp4: true 28 | version: 2 29 | - path: /etc/fstab 30 | content: | 31 | basicvm_shared_dir /home/ubuntu/shared virtiofs rw,noatime,_netdev 0 0 32 | append: true 33 | 34 | runcmd: 35 | - rm /etc/netplan/50-cloud-init.yaml 36 | - netplan generate 37 | - netplan apply 38 | - mkdir -p /home/ubuntu/shared 39 | 40 | final_message: "The system started in $UPTIME seconds" -------------------------------------------------------------------------------- /images/ubuntu-20.04-cloud-init/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 AS kernel 2 | RUN apt-get update && \ 3 | apt-get install -y linux-virtual && \ 4 | apt-get clean 5 | 6 | FROM ubuntu:20.04 7 | 8 | # Extract the kernel, modules, and initrd 9 | COPY --from=kernel /lib/modules /lib/modules 10 | COPY --from=kernel /boot/vmlinuz-* /vmlinuz 11 | COPY --from=kernel /boot/initrd.img-* /initrd 12 | 13 | RUN apt-get update 14 | # Needed for configuring server and setting up devices. 15 | RUN apt install cloud-init udev kmod -y 16 | # If you'd like to be able to ssh in: 17 | RUN apt install openssh-server sudo -y -------------------------------------------------------------------------------- /images/ubuntu-20.04-cloud-init/info.yml: -------------------------------------------------------------------------------- 1 | description: A simple ubuntu 20.04 server with cloud-init. 2 | base_args: 3 | PKGS: tmux -------------------------------------------------------------------------------- /images/ubuntu-20.04-cloud-init/meta-data: -------------------------------------------------------------------------------- 1 | instance-id: basicvm 2 | local-hostname: basicvm -------------------------------------------------------------------------------- /images/ubuntu-20.04-cloud-init/user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | debug: true 3 | disable_root: false 4 | users: 5 | - name: ubuntu 6 | sudo: ALL=(ALL) NOPASSWD:ALL 7 | groups: users, admin 8 | home: /home/ubuntu 9 | shell: /bin/bash 10 | lock_passwd: false 11 | ssh-authorized-keys: 12 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGKxzz9dfF1vxLkHL++/BkLM/3CpxQeQgQM10T2ZmhlA 13 | ssh_pwauth: true 14 | disable_root: false 15 | chpasswd: 16 | list: | 17 | ubuntu:ubuntu 18 | expire: False 19 | 20 | write_files: 21 | - path: /etc/netplan/basicvm-net.yaml 22 | permission: '0644' 23 | content: | 24 | network: 25 | ethernets: 26 | enp0s1: 27 | dhcp4: true 28 | version: 2 29 | - path: /etc/fstab 30 | content: | 31 | basicvm_shared_dir /home/ubuntu/shared virtiofs rw,noatime,_netdev 0 0 32 | append: true 33 | 34 | runcmd: 35 | - rm /etc/netplan/50-cloud-init.yaml 36 | - netplan generate 37 | - netplan apply 38 | - mkdir -p /home/ubuntu/shared 39 | 40 | final_message: "The system started in $UPTIME seconds" -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | const yargs = require('yargs'); 3 | const { version } = require('./package.json'); 4 | 5 | const { check } = require('./lib/dependencies'); 6 | const env = require('./lib/env'); 7 | 8 | // Environment reset/sanity check 9 | // - prereqs 10 | // - permissions 11 | // - required files 12 | (async () => { 13 | await env.setup(); 14 | const {} = env.vars(); 15 | 16 | yargs 17 | .middleware(check) 18 | .commandDir('./lib/commands') 19 | .version() 20 | .epilog(version ? `Version: ${version}`: '') 21 | .demandCommand(1, 'Did you forget to specify a command?') 22 | .recommendCommands() 23 | .showHelpOnFail(false, 'Specify --help for available options') 24 | .strict(true) 25 | .help() 26 | .wrap(yargs.terminalWidth()) 27 | .argv 28 | })(); 29 | -------------------------------------------------------------------------------- /lib/build.js: -------------------------------------------------------------------------------- 1 | const child = require('child_process'); 2 | const Docker = require('dockerode'); 3 | const fs = require('fs-extra'); 4 | const path = require('path'); 5 | const tar = require('tar'); 6 | 7 | const env = require('./env'); 8 | 9 | const { info, ok, error } = require('./logger'); 10 | const { slimdir, scriptdir } = env.vars(); 11 | 12 | const docker = new Docker(); 13 | 14 | const formatSteps = { 15 | 'vhd': [dockerBuild, dockerExport, buildEfiImage, makeVhd], 16 | 'raw': [dockerBuild, dockerExport, buildRootfsImage, cleanup], 17 | 'initrd-m1': [dockerBuild, dockerExtract, uncompressKernel, cpioPack], 18 | 'initrd': [dockerBuild, dockerExtract, cpioPack], 19 | 'special': [dockerBuild, dockerExport, buildRootfsImage, dockerExtract, uncompressKernel, cpioPack], 20 | 'iso': [dockerBuild, dockerExtract, uncompressKernel, rawExtract, buildRootfsImage, isoBuild, cleanup], 21 | 'qcow2': [dockerBuild, dockerExtract, uncompressKernel, rawExtract, buildRootfsImage, isoBuild, qcowBuild, cleanup] 22 | }; 23 | 24 | async function build(context) { 25 | let { format, dockerOpts, outputDir } = context; 26 | 27 | ok( `Starting build for ${format} format(s) using ${JSON.stringify(dockerOpts)}` ); 28 | 29 | await fs.emptyDir( outputDir ); 30 | 31 | // use a set in case there are overlaps between steps 32 | let steps = new Set(formatSteps[format]); 33 | // add all additional steps for the requested formats 34 | format.forEach(f => formatSteps[f].forEach(s => steps.add(s))); 35 | 36 | // run each step in order 37 | for (let s of steps) { 38 | try { 39 | await s(context); 40 | } catch (err) { 41 | error( err ); 42 | return; 43 | } 44 | } 45 | 46 | ok('success!'); 47 | } 48 | 49 | async function uncompressKernel(context) { 50 | info('Uncompressing compressed kernel'); 51 | 52 | let vmDir = path.join(slimdir, 'slim-vm'); 53 | 54 | let { outputDir } = context; 55 | 56 | return new Promise( async (resolve, reject) => { 57 | 58 | // Rename vmlinuz => vmlinuz.gz 59 | await fs.move(path.join(vmDir, 'vmlinuz'), path.join(vmDir, 'vmlinuz.gz'), { overwrite: true }); 60 | 61 | const zlib = require('zlib'); 62 | const unzip = zlib.createGunzip(); 63 | 64 | // Unzip 65 | const inp = fs.createReadStream(path.join(vmDir, 'vmlinuz.gz')); 66 | const out = fs.createWriteStream(path.join(vmDir, 'vmlinuz')); 67 | out.on('finish', () => 68 | { 69 | fs.removeSync( path.join(vmDir, 'vmlinuz.gz') ); 70 | console.log("Uncompressed kernel"); 71 | resolve(); 72 | }); 73 | out.on('error', (err) => { 74 | reject( err ); 75 | }) 76 | 77 | inp.pipe(unzip).pipe(out); 78 | }) 79 | 80 | 81 | } 82 | 83 | async function dockerBuild(context) { 84 | info('building docker image'); 85 | 86 | let { buildPath, dockerOpts } = context; 87 | 88 | if (!fs.existsSync(path.join(buildPath, 'Dockerfile'))) 89 | throw new Error(`Expected Dockerfile in ${buildPath}`); 90 | 91 | let stream = await docker.buildImage({ context: buildPath }, { 92 | t: 'slim-vm', 93 | ...dockerOpts, 94 | }); 95 | return new Promise((resolve, reject) => { 96 | docker.modem.followProgress(stream, (err, res) => err ? reject(err) : resolve(res), 97 | (ev) => { 98 | if( ev.error ) { 99 | return reject( ev.error ); 100 | } 101 | if( ev.stream ) 102 | { 103 | process.stdout.write(ev.stream) 104 | } 105 | }); 106 | }); 107 | } 108 | 109 | 110 | async function dockerExtract(context) { 111 | info('exporting docker filesystem'); 112 | 113 | let image = 'slim-vm'; 114 | let vmDir = path.join(slimdir, image); 115 | await fs.emptyDir(vmDir); 116 | 117 | 118 | const container = await docker.createContainer({ Image: image, Cmd: ['sh'] }); 119 | 120 | const contents = await container.export(); 121 | try { 122 | await new Promise((resolve, reject) => { 123 | contents.pipe( 124 | tar.x({ C: vmDir }) 125 | .on('close', resolve) 126 | .on('error', err => reject(err)) 127 | ); 128 | }); 129 | } catch (e) { 130 | throw e; 131 | } finally { 132 | container.remove().catch(() => undefined); 133 | } 134 | } 135 | 136 | async function dockerExport(context) { 137 | info('exporting docker filesystem'); 138 | 139 | let image = 'slim-vm'; 140 | let exportDir = path.join(slimdir, image); 141 | await fs.emptyDir(exportDir); 142 | 143 | let { outputDir } = context; 144 | 145 | 146 | const container = await docker.createContainer({ Image: image, Cmd: ['sh'] }); 147 | 148 | const contents = await container.export(); 149 | try { 150 | await new Promise((resolve, reject) => { 151 | contents.pipe( 152 | fs.createWriteStream( path.join(outputDir, 'rootfs.tar')) 153 | .on('finish', () => 154 | { 155 | resolve(); 156 | }) 157 | .on('error', err => reject(err) ) 158 | ); 159 | }); 160 | } catch (e) { 161 | throw e; 162 | } finally { 163 | container.remove().catch(() => undefined); 164 | } 165 | } 166 | 167 | async function buildEfiImage(context) { 168 | info('saving rootfs as uefi bootable raw image'); 169 | 170 | let { outputDir, formatOptions } = context; 171 | let vmDir = path.join(slimdir, 'slim-vm'); 172 | 173 | const rootFs = require("./tools/rootfs"); 174 | await rootFs.asEfi( 175 | outputDir, 176 | outputDir, 177 | formatOptions.size 178 | ); 179 | 180 | } 181 | 182 | async function buildRootfsImage(context) { 183 | info('saving rootfs as ext4 image'); 184 | 185 | let { outputDir, formatOptions } = context; 186 | let vmDir = path.join(slimdir, 'slim-vm'); 187 | 188 | const rootFs = require("./tools/rootfs"); 189 | await rootFs.asExt4( 190 | outputDir, 191 | outputDir, 192 | formatOptions.size 193 | ); 194 | 195 | } 196 | 197 | async function moveBoot(context) { 198 | info('moving vmlinuz and initrd to /boot'); 199 | 200 | let vmDir = path.join(slimdir, 'slim-vm'); 201 | 202 | await fs.move(path.join(vmDir, 'vmlinuz'), path.join(vmDir, 'boot', 'vmlinuz'), { overwrite: true }); 203 | await fs.move(path.join(vmDir, 'initrd'), path.join(vmDir, 'boot', 'initrd'), { overwrite: true }); 204 | 205 | } 206 | 207 | async function makeVhd(context) { 208 | info('making vhd'); 209 | 210 | let { outputDir } = context; 211 | 212 | let input = path.join(outputDir, 'rootfs') 213 | let output = path.join(outputDir, "rootfs.vhd"); 214 | 215 | const vhd = require("./tools/vhd"); 216 | await vhd.makeVhd( input, output ); 217 | } 218 | 219 | 220 | async function rawExtract(context) { 221 | info('extracting rootfs'); 222 | 223 | let { outputDir, formatOptions } = context; 224 | let vmDir = path.join(slimdir, 'slim-vm'); 225 | 226 | // Move out kernel and initrd to be seperate. Copy into outputDir. 227 | // Note, a copy is left in case another stage needs to repackage together (iso, etc.) 228 | if( fs.existsSync(path.join(vmDir, 'vmlinuz')) ) { 229 | await fs.move(path.join(vmDir, 'vmlinuz'), path.join(slimdir, 'vmlinuz'), { overwrite: true }); 230 | await fs.copy(path.join(slimdir, 'vmlinuz'), path.join(outputDir, 'vmlinuz')); 231 | } 232 | if( fs.existsSync(path.join(vmDir, 'initrd')) ) { 233 | await fs.move(path.join(vmDir, 'initrd'), path.join(slimdir, 'initrd'), { overwrite: true }); 234 | await fs.copy(path.join(slimdir, 'initrd'), path.join(outputDir, 'initrd')); 235 | } 236 | 237 | // This should only be done for manual initrd builds... pending...new pipeline. 238 | // await cpioPack(); 239 | // await fs.copy(path.join(slimdir, 'rootfs'), path.join(outputDir, 'rootfs')); 240 | } 241 | 242 | 243 | async function cpioPack(context) { 244 | info( "Packing initrd as compressed cpio archive" ) 245 | // child.execSync(`find . | cpio -o -H newc 2>/dev/null > ${path.join(slimdir, 'rootfs')}`, 246 | // {cwd: vmDir, stdio: 'inherit'}); 247 | // return; 248 | let { formatOptions, outputDir } = context; 249 | 250 | let vmDir = path.join(slimdir, 'slim-vm'); 251 | let output = path.join(outputDir, 'initrd') 252 | let zip = true; 253 | 254 | // Move kernel out 255 | await fs.move(path.join(vmDir, 'vmlinuz'), path.join(outputDir, 'vmlinuz'), { overwrite: true }); 256 | console.log("Moved kernel into ", outputDir); 257 | 258 | const cpio = require('cpio-fs'); 259 | const pack = cpio.pack(vmDir, {format: 'newc'}); 260 | 261 | const zlib = require('zlib'); 262 | const pass = new require('stream').PassThrough(); 263 | const zipOrPass = zip ? zlib.createGzip() : pass; 264 | 265 | const outputStream = fs.createWriteStream(output); 266 | 267 | return new Promise( (resolve, reject) => { 268 | 269 | let onError = (err) => { 270 | error( err.message ); 271 | reject(err); 272 | }; 273 | 274 | pack.on('error', onError ); 275 | zipOrPass.on('error', onError ); 276 | outputStream.on('error', onError ) 277 | outputStream.on('finish', () => 278 | { 279 | console.log("Finished cpio pack"); 280 | resolve(); 281 | }); 282 | 283 | pack.pipe( zipOrPass ) 284 | .pipe( outputStream ); 285 | 286 | }); 287 | } 288 | 289 | async function isoBuild(context) { 290 | info('building iso'); 291 | 292 | 293 | const makeiso = require('./tools/makeiso'); 294 | 295 | let { outputDir } = context; 296 | let outputPath = path.join(outputDir, 'slim.iso'); 297 | 298 | let isoDir = path.join(slimdir, 'slim-iso') 299 | let bootDir = path.join(isoDir, 'boot'); 300 | let isolinuxDir = path.join(isoDir, 'isolinux'); 301 | 302 | await Promise.all([ 303 | fs.emptyDir(isoDir), 304 | fs.emptyDir(bootDir), 305 | fs.emptyDir(isolinuxDir) 306 | ]); 307 | 308 | await fs.copy(path.join(scriptdir, 'scripts', 'syslinux'), isolinuxDir); 309 | await fs.copy(path.join(slimdir, 'vmlinuz'), path.join(bootDir, 'vmlinuz')); 310 | await fs.copy(path.join(slimdir, 'rootfs'), path.join(bootDir, 'rootfs')); 311 | 312 | await makeiso.createBootableIso(outputPath, 313 | 'slim', 314 | isoDir 315 | ); 316 | } 317 | 318 | async function qcowBuild(context) { 319 | info('building qcow2 image'); 320 | 321 | let { outputDir } = context; 322 | 323 | child.execSync(`qemu-img convert -O qcow2 slim.iso slim.qcow2`, 324 | {cwd: outputDir, stdio: 'inherit'}); 325 | } 326 | 327 | async function cleanup(context) { 328 | info('cleaning up...'); 329 | 330 | let { format, outputDir } = context; 331 | 332 | // If raw build, clean up intermediate step in slim directory. 333 | if( format.indexOf("raw") >= 0 ) { 334 | await fs.remove(path.join(slimdir, 'rootfs')); 335 | await fs.remove(path.join(slimdir, 'vmlinuz')); 336 | await fs.remove(path.join(slimdir, 'initrd')); 337 | } 338 | 339 | } 340 | 341 | module.exports = build; 342 | -------------------------------------------------------------------------------- /lib/commands/build.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | const p = require('path'); 3 | const yaml = require('js-yaml'); 4 | 5 | const { error } = require('../logger'); 6 | 7 | const build = require('../build'); 8 | const env = require('../env'); 9 | 10 | const availFormats = ['raw', 'initrd', 'initrd-m1', 'iso', 'qcow2', 'vhd', 'special'] 11 | 12 | const { pubkey } = env.vars(); 13 | 14 | function getDefaultPlatform() { 15 | 16 | let commonMappings = { 17 | 'x64' : 'amd64' 18 | }; 19 | let arch = process.arch; 20 | arch = commonMappings[arch] || arch; 21 | return arch; 22 | } 23 | 24 | 25 | exports.command = 'build [path]'; 26 | exports.desc = 'Build a new vm'; 27 | 28 | exports.builder = yargs => { 29 | yargs.options({ 30 | cache: { 31 | default: true, 32 | description: 'whether to cache images during docker build', 33 | type: 'boolean' 34 | }, 35 | platform: { 36 | default: getDefaultPlatform(), 37 | description: 'platform option for docker build', 38 | type: 'string' 39 | }, 40 | format: { 41 | alias: 'f', 42 | default: "raw", 43 | description: 'image formats to build', 44 | type: 'string' 45 | }, 46 | zip: { 47 | alias: 'z', 48 | default: false, 49 | description: 'Compress raw image', 50 | type: 'boolean' 51 | }, 52 | size: { 53 | alias: 's', 54 | default: 512, 55 | description: 'Size for rootfs (in MB)', 56 | type: 'string' 57 | } 58 | 59 | }); 60 | }; 61 | 62 | exports.handler = async argv => { 63 | let { path, cache, platform, format, zip, size } = argv; 64 | 65 | if( !path ) { 66 | error("Path with Dockerfile is required."); 67 | process.exit(1); 68 | } 69 | 70 | // ensure format is an array 71 | if (typeof(format) === 'string') format = [format] 72 | 73 | // Check if we support format. 74 | for (let f of format) { 75 | if (!availFormats.includes(f)) { 76 | error(`Format ${f} is not supported`); 77 | return; 78 | } 79 | } 80 | 81 | let { buildPath, infoPath, outputDir } = await env.makeContext(path); 82 | 83 | let info = await yaml.safeLoad(fs.readFileSync(infoPath)); 84 | let base_args = ''; 85 | 86 | if (info.base_repository) buildPath = await env.cloneOrPull(info.base_repository); 87 | if (info.base_directory) buildPath = p.join(buildPath, info.base_directory); 88 | if (info.base_args) base_args = info.base_args; 89 | // base_args["SSHPUBKEY"] = pubkey; 90 | 91 | await fs.writeFile(p.join(outputDir, 'info.yml'), await yaml.safeDump(info)); 92 | 93 | let context = { 94 | format, 95 | formatOptions: { 96 | zip: zip, 97 | size: size 98 | }, 99 | buildPath, 100 | outputDir, 101 | dockerOpts: { 102 | nocache: !cache, 103 | platform: platform, 104 | buildargs: base_args 105 | } 106 | } 107 | 108 | try { 109 | await build(context); 110 | } catch (e) { 111 | error(e); 112 | } 113 | }; 114 | -------------------------------------------------------------------------------- /lib/commands/clean.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs-extra'); 2 | 3 | const env = require('../env'); 4 | 5 | const { registry } = env.vars(); 6 | const { ok } = require('../logger'); 7 | 8 | exports.command = 'clean'; 9 | exports.desc = 'Clear all images from the registry'; 10 | 11 | exports.builder = () => {}; 12 | 13 | exports.handler = async () => { 14 | await fs.emptyDir(registry); 15 | 16 | ok('Registry cleared!'); 17 | }; 18 | -------------------------------------------------------------------------------- /lib/commands/cloudinit.js: -------------------------------------------------------------------------------- 1 | const pathUtil = require('path'); 2 | const env = require('../env'); 3 | const makeiso = require('../tools/makeiso'); 4 | 5 | exports.command = 'cloudinit [path]'; 6 | exports.desc = 'Create cloud init iso'; 7 | 8 | exports.builder = yargs => { 9 | }; 10 | 11 | exports.handler = async argv => { 12 | 13 | let { path } = argv; 14 | let { buildPath, outputDir } = await env.makeContext(path); 15 | 16 | await makeiso.createCloudInitIso(buildPath, pathUtil.join( outputDir, 'cidata.iso')); 17 | }; -------------------------------------------------------------------------------- /lib/commands/init.js: -------------------------------------------------------------------------------- 1 | exports.command = 'init'; 2 | exports.desc = 'Initialize slim'; 3 | 4 | exports.builder = yargs => { 5 | }; 6 | 7 | exports.handler = async argv => { 8 | 9 | const docker = require("../tools/docker"); 10 | try { 11 | await docker.pull( "ottomatica/vbox-img", {}, undefined, false ); 12 | await docker.pull( "ottomatica/mkisofs", {}, undefined, false ); 13 | } catch (err ) { 14 | console.error( "Could not pull image(s)", err.message ); 15 | } 16 | 17 | }; -------------------------------------------------------------------------------- /lib/commands/push.js: -------------------------------------------------------------------------------- 1 | const env = require('../env'); 2 | const fs = require('fs-extra'); 3 | const path = require('path'); 4 | const { Octokit } = require("@octokit/rest"); 5 | const { ok } = require('../logger'); 6 | 7 | const { registry } = env.vars(); 8 | const registryPath = registry; 9 | 10 | exports.command = 'push '; 11 | exports.desc = 'Build a new microkernel'; 12 | 13 | exports.builder = yargs => { 14 | yargs.options({ 15 | force: { 16 | default: true, 17 | description: 'delete the old image from registry and re-upload', 18 | type: 'boolean' 19 | } 20 | }); 21 | }; 22 | 23 | exports.handler = async argv => { 24 | let { image, registry, force } = argv; 25 | 26 | const [owner, repo, release] = registry.split(/[\/#]/g); 27 | const ghToken = process.env.GH_TOKEN; 28 | 29 | let imageAssets = [ 30 | path.join(registryPath, image, 'vmlinuz'), 31 | path.join(registryPath, image, 'rootfs'), 32 | path.join(registryPath, image, 'initrd'), 33 | path.join(registryPath, image, 'slim.iso')]; 34 | 35 | for (let asset of imageAssets) { 36 | if (fs.existsSync(asset)) { 37 | await uploadAsset(owner, repo, release, asset, image, ghToken, force); 38 | ok(`Pushed asset: ${asset}`); 39 | } 40 | } 41 | }; 42 | 43 | async function uploadAsset(owner, repo, release, file, imageName, token, force = true) { 44 | const octokit = new Octokit({ auth: 'token ' + token }); 45 | 46 | let fileName = path.basename(file); 47 | if (fileName === 'slim.iso') fileName = 'vbox.iso'; 48 | 49 | const releases = await octokit.repos.listReleases({ 50 | owner, 51 | repo 52 | }); 53 | 54 | let existingRelease = releases.data.filter(r => r.tag_name == release)[0]; 55 | 56 | // creating release if doesn't exist 57 | if (!existingRelease) { 58 | existingRelease = (await octokit.repos.createRelease({ 59 | owner, 60 | repo, 61 | tag_name: release 62 | })).data; 63 | } 64 | 65 | let upload_url = existingRelease.upload_url; 66 | 67 | // delete old asset before uploading 68 | if (force) { 69 | try { 70 | await octokit.repos.deleteReleaseAsset({ 71 | owner, 72 | repo, 73 | asset_id: existingRelease.assets.filter(a => a.name == `${imageName}-${fileName}`)[0].id, 74 | }); 75 | } catch (err) { } 76 | } 77 | 78 | // upload 79 | try { 80 | await octokit.repos.uploadReleaseAsset({ 81 | url: upload_url, 82 | data: fs.createReadStream(file), 83 | headers: { 84 | 'content-type': 'application/octet-stream', 85 | 'content-length': (await fs.stat(file)).size 86 | }, 87 | name: `${imageName}-${fileName}`, 88 | owner, 89 | repo 90 | }); 91 | } catch (err) { 92 | console.error(err); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /lib/dependencies.js: -------------------------------------------------------------------------------- 1 | const hasbin = require('hasbin'); 2 | 3 | const { info, error } = require('./logger'); 4 | 5 | const docker = require('./tools/docker'); 6 | 7 | const mustBin = bin => { 8 | if (!hasbin.sync(bin)) throw `You must have ${bin} installed to build a vm`; 9 | } 10 | 11 | const mustImage = async (image, cmd) => { 12 | if( ! await docker.imageExists( image ) ) { 13 | throw `You must have ${image} docker image to run ${cmd}.\nRun \`slim init\` to pull images.`; 14 | } 15 | } 16 | 17 | exports.check = async argv => { 18 | let cmd = argv._[0]; 19 | 20 | try { 21 | if( cmd === "build" || cmd === "cloudinit") { 22 | mustBin('docker'); 23 | 24 | await mustImage( 'ottomatica/vbox-img', cmd ); 25 | await mustImage( 'ottomatica/mkisofs', cmd ); 26 | 27 | } 28 | } catch (e) { 29 | error(e); 30 | process.exit(1); 31 | } 32 | }; 33 | -------------------------------------------------------------------------------- /lib/env.js: -------------------------------------------------------------------------------- 1 | 2 | const fs = require('fs-extra'); 3 | const path = require('path'); 4 | const os = require('os'); 5 | const git = require('simple-git'); 6 | 7 | const download = require('download'); 8 | const ProgressBar = require('progress'); 9 | 10 | 11 | // All things slim 12 | const slimdir = path.join( os.homedir(), '.slim'); 13 | // For storing images built by slim 14 | const registry = path.join(slimdir, 'registry'); 15 | // For storing base images (Dockerfiles, etc.) retrieved via git. 16 | const baseImages = path.join(slimdir, 'baseImages'); 17 | // Script directory 18 | const scriptdir = path.dirname(require.main.filename); 19 | 20 | class Env { 21 | constructor() {} 22 | 23 | async setup() 24 | { 25 | this._preparePaths(); 26 | 27 | // Ensure baker keys are installed. 28 | fs.copyFileSync(path.resolve(scriptdir, 'scripts', 'keys', 'baker_rsa'), path.join(slimdir, 'baker_rsa')); 29 | await fs.chmod(path.join(slimdir, 'baker_rsa'), '600', () => {}); 30 | 31 | this.pubkey = fs.readFileSync(path.join(scriptdir, 'scripts', 'keys', 'baker.pub')).toString(); 32 | 33 | 34 | return this; 35 | } 36 | 37 | check() 38 | { 39 | 40 | return this; 41 | } 42 | 43 | vars() 44 | { 45 | return { 46 | slimdir: slimdir, 47 | registry: registry, 48 | scriptdir: scriptdir, 49 | pubkey: this.pubkey, 50 | env: this, 51 | } 52 | } 53 | 54 | async cloneOrPull(repoURL, dest) { 55 | let name = path.basename(repoURL); 56 | name = name.slice(-4) === '.git' ? name.slice(0, -4) : name; // Removing .git from the end 57 | let dir = path.join(baseImages); 58 | let repo_dir = path.join(dir, name); 59 | 60 | return new Promise((resolve, reject) => { 61 | 62 | // Run git pull if repo already exists locally 63 | if( fs.existsSync(repo_dir) ) 64 | { 65 | git(repo_dir).pull( (err, data) => 66 | { 67 | if (err) 68 | reject(err); 69 | else 70 | resolve(repo_dir); 71 | }) 72 | } 73 | else // clone 74 | { 75 | git(dir).silent(true).clone(repoURL, (err, data) => { 76 | if (err) 77 | reject(err); 78 | else 79 | resolve(repo_dir); 80 | }); 81 | } 82 | }); 83 | } 84 | 85 | async fetch(isoUrl, outputDir, name) 86 | { 87 | if (! fs.existsSync(path.join(outputDir, name)) /*|| (await md5File(isoPath)) != '851e2b2b34e31b67aa0758d25666e8e5'*/) { 88 | 89 | console.log(`Downloading base image ${isoUrl}`); 90 | const bar = new ProgressBar('[:bar] :percent :etas', { 91 | complete: '=', 92 | incomplete: ' ', 93 | width: 20, 94 | total: 0 95 | }); 96 | 97 | await download(isoUrl, outputDir, {filename: name}) 98 | .on('response', res => { 99 | // console.log(`Size: ${res.headers['content-length']}`); 100 | bar.total = res.headers['content-length']; 101 | res.on('data', data => bar.tick(data.length)); 102 | }) 103 | //.then(() => console.log('downloaded!')); 104 | 105 | 106 | } 107 | } 108 | 109 | async makeContext(p) { 110 | let buildPath = path.resolve(p); 111 | let infoPath = path.join(buildPath, 'info.yml'); 112 | let name = path.basename(buildPath); 113 | let outputDir = path.join(registry, name); 114 | 115 | await Promise.all([ 116 | expectPath(infoPath, `Expected config does not exist in ${infoPath}`), 117 | expectPath(buildPath, `Build path ${buildPath} does not exist`), 118 | fs.ensureDir(outputDir), 119 | ]); 120 | 121 | return { 122 | buildPath, 123 | infoPath, 124 | outputDir 125 | }; 126 | } 127 | 128 | _preparePaths() 129 | { 130 | if( !fs.existsSync(slimdir) ) { fs.mkdirSync(slimdir); } 131 | if( !fs.existsSync(registry) ) { fs.mkdirSync(registry); } 132 | if( !fs.existsSync(baseImages) ) { fs.mkdirSync(baseImages); } 133 | } 134 | } 135 | 136 | async function expectPath(p, msg) { 137 | let exists = await fs.exists(p); 138 | if (!exists) { 139 | throw new Error(msg); 140 | } 141 | } 142 | 143 | 144 | 145 | module.exports = new Env(); 146 | -------------------------------------------------------------------------------- /lib/logger.js: -------------------------------------------------------------------------------- 1 | const chalk = require('chalk'); 2 | 3 | const error = e => console.error(chalk.red(e)); 4 | const info = m => console.log(chalk.yellow(m)); 5 | const ok = m => console.log(chalk.green(m)); 6 | 7 | module.exports = {error, info, ok}; 8 | -------------------------------------------------------------------------------- /lib/tools/docker.js: -------------------------------------------------------------------------------- 1 | const Docker = require('dockerode'); 2 | 3 | class DockerConnector { 4 | 5 | constructor() { 6 | this.docker = new Docker(); 7 | } 8 | 9 | /* 10 | * imageNames: string | string[] 11 | * image format [:] 12 | */ 13 | async imageExists( imageNames ) { 14 | return new Promise((resolve, reject) => { 15 | let imageNamesArray = Array.isArray(imageNames) ? imageNames : [imageNames]; 16 | 17 | this.docker.listImages({ filters: { reference: imageNamesArray } }) 18 | .then( (images) => { 19 | resolve( images.length > 0 ) 20 | }) 21 | .catch( (err) => reject(err.message) ) 22 | ; 23 | }); 24 | } 25 | 26 | async run(image, cmd, options, startOptions) { 27 | 28 | options = options || {}; 29 | startOptions = startOptions || {}; 30 | 31 | return new Promise( (resolve, reject) => { 32 | console.log( image, cmd ); 33 | this.docker.run(image, cmd, process.stdout, options, startOptions, function (err, data, container) { 34 | 35 | if( err ) return reject( err ); 36 | // console.log(err, data, container.id); 37 | resolve(data.StatusCode); 38 | }); 39 | }); 40 | 41 | } 42 | 43 | async ready() { 44 | let isReady = false; 45 | const containerExists = await this.containerExists(); 46 | 47 | if(containerExists) { 48 | const container = this.docker.getContainer(this.containerId); 49 | isReady = (await container.inspect()).State.Running; 50 | } 51 | 52 | return isReady; 53 | } 54 | 55 | async containerExists() { 56 | let containerExists = false; 57 | try { 58 | let runningContainers = await this.docker.listContainers({ all: true }); 59 | containerExists = runningContainers.filter(container => container.Id.includes(this.containerId) || container.Names.includes(`/${this.containerId}`)).length > 0; 60 | } catch (err) { 61 | console.error(chalk.red(' => Docker is not running so can\'t check for any matching containers.')); 62 | } 63 | return containerExists; 64 | } 65 | 66 | async pull(imageName, options, onProgress, verbose = true) { 67 | 68 | let self = this; 69 | process.stdout.write(`pulling ${imageName} `); 70 | return new Promise((resolve, reject) => { 71 | self.docker.pull(imageName, options, async (error, stream) => { 72 | 73 | if (error) { return reject(error); } 74 | if (!stream) { return reject("Failured to pull."); } 75 | 76 | let onFinished = (error, output) => { 77 | if (error) { 78 | return reject(error); 79 | } 80 | process.stdout.write('... pulled\n'); 81 | resolve(output); 82 | } 83 | 84 | if( onProgress == undefined ) 85 | { 86 | onProgress = (data) => { if(verbose){ console.log(data) }}; 87 | } 88 | 89 | self.docker.modem.followProgress(stream, onFinished, onProgress); 90 | }); 91 | }); 92 | } 93 | } 94 | 95 | module.exports = new DockerConnector(); -------------------------------------------------------------------------------- /lib/tools/images/mkisofs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | RUN apt update && \ 3 | apt install mkisofs e2fsprogs mtools dosfstools wget -y && \ 4 | apt clean 5 | 6 | RUN wget https://www.kernel.org/pub/linux/utils/boot/syslinux/syslinux-6.03.tar.gz 7 | RUN tar -xvf syslinux-6.03.tar.gz 8 | 9 | RUN apt install grub-efi gdisk rsync -y 10 | 11 | # syslinux-6.03/efi64/efi/syslinux.efi 12 | # syslinux-6.03/efi64/com32/elflink/ldlinux/ldlinux.e64 -------------------------------------------------------------------------------- /lib/tools/images/vbox-img/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 as install 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | RUN apt update && \ 5 | apt install curl -y 6 | 7 | ENV VIRTUALBOX=virtualbox-6.1_6.1.30-148432~Ubuntu~eoan_amd64.deb 8 | RUN curl -s -O https://download.virtualbox.org/virtualbox/6.1.30/$VIRTUALBOX 9 | RUN apt install ./$VIRTUALBOX -y 10 | 11 | FROM ubuntu:20.04 12 | COPY --from=install /usr/bin/vbox-img /usr/bin/vbox-img 13 | # Library dependencies 14 | RUN apt update && \ 15 | apt install libxml2 -y -------------------------------------------------------------------------------- /lib/tools/makeiso.js: -------------------------------------------------------------------------------- 1 | const hasbin = require('hasbin'); 2 | const docker = require("./docker"); 3 | const child = require('child_process'); 4 | const fs = require('fs-extra'); 5 | const pathUtil = require('path'); 6 | 7 | class MakeIsoWithBin { 8 | 9 | async createBootableIso(outputPath, volume, isoFilesPath) { 10 | 11 | child.execSync(` 12 | mkisofs -o ${outputPath} \ 13 | -b isolinux/isolinux.bin \ 14 | -c isolinux/boot.cat \ 15 | -no-emul-boot -boot-load-size 4 -boot-info-table \ 16 | -V ${volume} -J -R ${isoFilesPath}`, {stdio: 'inherit'}); 17 | 18 | } 19 | 20 | async createCloudInitIso( baseDir, outputPath ) { 21 | 22 | child.execSync(`mkisofs -o ${outputPath} \ 23 | -V cidata -J -R user-data meta-data`, {stdio: 'inherit', cwd: baseDir}); 24 | 25 | } 26 | } 27 | 28 | class MakeIsoWithDocker { 29 | 30 | async createBootableIso(outputPath, volume, isoFilesPath) { 31 | 32 | const output = pathUtil.basename( outputPath ); 33 | const outputDir = pathUtil.dirname( outputPath ); 34 | 35 | const args = ['-o', `/iso/${output}`, '-no-emul-boot', '-boot-info-table', '-R', '-J', 36 | '-boot-load-size', '4', '-b', 'isolinux/isolinux.bin', '-c', 'isolinux/boot.cat', 37 | '-V', volume, '/slim-iso' ] 38 | 39 | console.log( ['mkisofs', ...args].join(' ')); 40 | 41 | await docker.run("ottomatica/mkisofs", ['mkisofs', ...args], { 42 | AutoRemove: true, 43 | Tty: true, 44 | Volumes: { 45 | "/slim-iso": {}, 46 | "/iso": {} 47 | }, 48 | Hostconfig: { 49 | Binds: [ 50 | `${isoFilesPath}:/slim-iso`, 51 | `${outputDir}:/iso` 52 | ] 53 | }, 54 | WorkingDir: "/slim-iso" 55 | }); 56 | 57 | // console.log( outputDir ); 58 | } 59 | 60 | async createCloudInitIso( baseDir, outputPath ) { 61 | 62 | const output = pathUtil.basename( outputPath ); 63 | const args = ['-o', output, '-R', '-J', '-V', 'cidata', 'user-data', 'meta-data' ]; 64 | 65 | await docker.run("ottomatica/mkisofs", ['mkisofs', ...args], { 66 | AutoRemove: true, 67 | Tty: true, 68 | Volumes: { 69 | "/v": { } 70 | }, 71 | Hostconfig: { 72 | Binds: [ 73 | `${baseDir}:/v` 74 | ] 75 | }, 76 | WorkingDir: "/v" 77 | }); 78 | 79 | await fs.move( pathUtil.join( baseDir, output), outputPath, {overwrite: true} ); 80 | } 81 | 82 | 83 | } 84 | 85 | if( hasbin("mkisofs") ) { 86 | module.exports = new MakeIsoWithBin(); 87 | } else { 88 | module.exports = new MakeIsoWithDocker(); 89 | } 90 | -------------------------------------------------------------------------------- /lib/tools/rootfs.js: -------------------------------------------------------------------------------- 1 | const docker = require("./docker"); 2 | const pathUtil = require('path'); 3 | 4 | const env = require('../env'); 5 | 6 | class RootFs { 7 | 8 | // Size in MB 9 | async asExt4(vmDir, outputDir, size=512) { 10 | 11 | const {scriptdir} = env.vars(); 12 | 13 | await docker.run('ottomatica/mkisofs', ['bash', '-c', `/script/make-ext4.sh ${size}`], { 14 | AutoRemove: true, 15 | Tty: true, 16 | Volumes: { 17 | "/script": {}, 18 | "/slim-vm": {}, 19 | "/out": {} 20 | }, 21 | Hostconfig: { 22 | Privileged: true, 23 | Binds: [ 24 | `${pathUtil.resolve(scriptdir, 'scripts')}:/script`, 25 | `${vmDir}:/slim-vm`, 26 | `${outputDir}:/out` 27 | ] 28 | } 29 | }); 30 | 31 | } 32 | 33 | // Size in MB 34 | async asEfi(vmDir, outputDir, size=512) { 35 | 36 | const {scriptdir} = env.vars(); 37 | 38 | await docker.run('ottomatica/mkisofs', ['bash', '-c', `/script/make-efi.sh ${size}`], { 39 | AutoRemove: true, 40 | Tty: true, 41 | Volumes: { 42 | "/script": {}, 43 | "/slim-vm": {}, 44 | "/out": {} 45 | }, 46 | Hostconfig: { 47 | Privileged: true, 48 | Binds: [ 49 | `${pathUtil.resolve(scriptdir, 'scripts')}:/script`, 50 | `${vmDir}:/slim-vm`, 51 | `${outputDir}:/out` 52 | ] 53 | } 54 | }); 55 | 56 | } 57 | 58 | } 59 | 60 | module.exports = new RootFs(); -------------------------------------------------------------------------------- /lib/tools/vhd.js: -------------------------------------------------------------------------------- 1 | const docker = require("./docker"); 2 | const pathUtil = require('path'); 3 | const fs = require('fs-extra'); 4 | 5 | class Vhd { 6 | 7 | async makeVhd( inputPath, outputPath ) { 8 | 9 | const input = pathUtil.basename( inputPath ); 10 | const inputDir = pathUtil.dirname( inputPath ); 11 | const output = pathUtil.basename( outputPath ); 12 | const args = ['--srcfilename', input, '--srcformat', 'RAW', '--dstfilename', output, '--dstformat', 'VHD' ]; 13 | 14 | await docker.run("ottomatica/vbox-img", ['vbox-img', 'convert', ...args], { 15 | AutoRemove: true, 16 | Tty: true, 17 | Volumes: { 18 | "/v": { } 19 | }, 20 | Hostconfig: { 21 | Binds: [ 22 | `${inputDir}:/v` 23 | ] 24 | }, 25 | WorkingDir: "/v" 26 | }); 27 | 28 | // Move output if it needs to be in another directory 29 | if( pathUtil.join(inputDir, output) != outputPath ) { 30 | await fs.move( pathUtil.join( inputDir, output), outputPath, {overwrite: true} ); 31 | } 32 | } 33 | } 34 | 35 | module.exports = new Vhd(); -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "slim", 3 | "version": "2.0.1", 4 | "description": "small and sleek computing environments", 5 | "main": "index.js", 6 | "bin": "index.js", 7 | "scripts": { 8 | "test": "echo \"Error: no test specified\" && exit 1" 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "git+https://github.com/ottomatica/slim.git" 13 | }, 14 | "keywords": [ 15 | "virtual", 16 | "box", 17 | "vm", 18 | "microvm" 19 | ], 20 | "author": "ottomatica", 21 | "license": "Apache-2.0", 22 | "bugs": { 23 | "url": "https://github.com/ottomatica/slim/issues" 24 | }, 25 | "homepage": "https://github.com/ottomatica/slim#readme", 26 | "dependencies": { 27 | "@octokit/rest": "^18.12.0", 28 | "chalk": "^4.1.0", 29 | "cpio-fs": "^1.1.0", 30 | "dockerode": "^3.3.1", 31 | "download": "^8.0.0", 32 | "fs-extra": "^10.0.0", 33 | "hasbin": "^1.2.3", 34 | "js-yaml": "^3.13.1", 35 | "progress": "^2.0.3", 36 | "simple-git": "^2.48.0", 37 | "tar": "^6.1.11", 38 | "yargs": "^17.3.1" 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /scripts/keys/baker.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+YRhI2Gjno+5ND+N/pBvvw7Bvji6OEtZgUKvJf8P9rPcUCR8w7DpDPTpLSM4spBqIwoEM1CQRnH8x/Ufvhr51tU/74A4J2MgBEjClI8M5Z8iqYhDWfoRywo/2uB1rrPHICIM716LRFGIDoqnt+leHU4wcfHmHNa8/KqC5tNxd9/VBxeveh0CIu7/Ba3/UVtn6CTY2sGMo0mJk0IjzIsK42TgRL7ZOTQfbo1Td3DpOCdt02xft5xXCk9KuRwwrjdtyZbP8n8xc7/YcRk0pswFViNfEaU5Eb42+DTr0OhCgadGD9ufxJbSh4ty2VmRycQBfj00VqQO2zPNL2u76EfkMEI/TspVansMCheRtt3C5QJQCv0gXntDSunzgIOvbgShc644eIrmV/kh0oLYkW+Fi80zqx/dIdKMc7OpXK/umJb18ao2IBtBoTiNr5cla1XerDwZXJEp6sPJlSja9xNb0yw0PAfxMiKsR/fjymZ5E7dPaYjS3b+LIyxjxL+GSr8ZRL+3aH7lYsdAaQwekesxaMZUSfKDwRWk5UvE81gpCWWkgcTcqxCGuiBCdviBU88yXfDuFodidgYTJ39JM9v3gKuvlJAtRaBbXSJ6YTjanfozlmpaNYImHUPeooY20vlolmXzs2llZI+gM68GZpnml3en80LJYnin26kigWM9WQ== BakerForMac -------------------------------------------------------------------------------- /scripts/keys/baker_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKAIBAAKCAgEAqPmEYSNho56PuTQ/jf6Qb78Owb44ujhLWYFCryX/D/az3FAk 3 | fMOw6Qz06S0jOLKQaiMKBDNQkEZx/Mf1H74a+dbVP++AOCdjIARIwpSPDOWfIqmI 4 | Q1n6EcsKP9rgda6zxyAiDO9ei0RRiA6Kp7fpXh1OMHHx5hzWvPyqgubTcXff1QcX 5 | r3odAiLu/wWt/1FbZ+gk2NrBjKNJiZNCI8yLCuNk4ES+2Tk0H26NU3dw6TgnbdNs 6 | X7ecVwpPSrkcMK43bcmWz/J/MXO/2HEZNKbMBVYjXxGlORG+Nvg069DoQoGnRg/b 7 | n8SW0oeLctlZkcnEAX49NFakDtszzS9ru+hH5DBCP07KVWp7DAoXkbbdwuUCUAr9 8 | IF57Q0rp84CDr24EoXOuOHiK5lf5IdKC2JFvhYvNM6sf3SHSjHOzqVyv7piW9fGq 9 | NiAbQaE4ja+XJWtV3qw8GVyRKerDyZUo2vcTW9MsNDwH8TIirEf348pmeRO3T2mI 10 | 0t2/iyMsY8S/hkq/GUS/t2h+5WLHQGkMHpHrMWjGVEnyg8EVpOVLxPNYKQllpIHE 11 | 3KsQhrogQnb4gVPPMl3w7haHYnYGEyd/STPb94Crr5SQLUWgW10iemE42p36M5Zq 12 | WjWCJh1D3qKGNtL5aJZl87NpZWSPoDOvBmaZ5pd3p/NCyWJ4p9upIoFjPVkCAwEA 13 | AQKCAgBRyAn5Fa3BChIXmiEUcVuoqfjTbmR4RJy7YiNLMAGl0Uo13Bf8xp3N/bZf 14 | ULhWTZ41sGW9qLRaT64FoSWTSmg1+XNWsW0GQJHqQgiRHGOr40rE9PZ9WoP8rp90 15 | TlQKwRZDztqMFiJVFyi6yAb1q75oDZj1O4DPVa/c4hEIr/0wUstjiD4/cMOvcAbq 16 | KO6QvuiVfrauuhmpHrKNwlbliq7VAz+kh8Ey00vV1qTR++ILDmGO9x/hp7UkL1o3 17 | GSZ6rSconMPAO2ayIYp9kCeZ4wylnI4cCidEWsEMS88ZPw/aeHPkJfKu/e/dTzr/ 18 | yBBgzh1ud5HZzgEzK4aDzWrAFGkOT6Oa3OuqCPmYGZZ/Z8H/lSvRxcnVG9UyjFpe 19 | yjXhnBfsJzhjFLqyIQfoSO31sbNJkmxQ1XmMEOdYwqmK8nhCSJM5WiUDZQ/X6hpc 20 | ophsS4iGExqqB5/LpodZunDdrxJbljD86iJUp34xCs1LPHqcaB3QmUEppIfYrMUk 21 | crUm6OhUy/u5IcKcpeGGATd26XIi9ljs1GwgSjNd1ejO42HLQBXq569bfhhwDfEf 22 | rVoCM9h6jMeiZQXNsOgf4f7d7T8aPHaOSipJVzoEVyM21K/CDIUtdX8SENeDZaEc 23 | Ob7cKlVDuuZkfk1v/KVDhEGVYBpOYPAiHnedSiYoL6J/9RKTvQKCAQEA1YvF0aho 24 | //O55rk5uGOwdcZ9FM+QNoJNCILGDHWlo/AQdNwd1eWLH0Yap4uYn/WJT50VwcL0 25 | efDyToLDX7GLAZtc/6eUJ7glhQ0u0dUBcIlH6Z8KBZ8BVJIR7i3V2c9FBh0jmLF1 26 | BvO5ulcGizv3r8+vcVqVU5YF33vaWlcV4iP1GeH1QzAru5wLyKtLr8SrZZ9JL5xR 27 | 5xxmNvvxyUmL7tVCoclyMR9X0qluwGriSOziBCL4mUuhvK5UElgmvFFa2kSP/gEx 28 | +D+DyY8FZ3p8NtdZWYAEs+6HXekxTfD6EhbkfB3AWYeu/N3ETRTX64tS+twYSGQT 29 | EPQZNEt34qOKIwKCAQEAypFUD1cWMaHGzRgaXODqmGZj6uwb8kQ2MUDKlJloUt6H 30 | p/9779RYmcR1Yo5W7H0lmcVH12kE0co7Sco7YtSPR1wLgicyjR+Sq+zxyetwqVEu 31 | LDsmraL3BVaTMqRnAaWpHQlbp+NIeeZcnuW3b8EBAEKPy3xujGsHuZ9miumf45qq 32 | 4DbsqMB296yhiXQB1yhWDSEHdcnBfHN8aawDEkp6cssdOB85lmBZpwqAuXxH0l/W 33 | Qrhohq7MH1X7CDefwh23Pz4b9V7XxzirNaq7zFs6ceSNuHv11bHV5QRR5YsPh+KG 34 | CdEDzMuao/1/df0TsiztYdC3K2aYaOEejyGv4sH8UwKCAQBJ8jqwHScu6pEHSkCo 35 | jyy9u9v4Zt/DYF+YgOBf1CVlnW21abuTJAeG7tmwBvD1AytnPDgafo314++kLDfH 36 | XU2LYudTSA5Pqr6jUitSUfZLp94VEhOAWs01Ide/qHOTFukJ8vEuoNSrcZ5w3k3P 37 | zRY59SsFj56B8UNbXiIAgoN7aYQoUEyD1ZxvPNv/wwFUfj/z0rKfH/xkkTr780aI 38 | s0UXkRWfvIgkZnwc4LsPOnPdWNnzIMEBJGV/VsaaC5huQaW6S1+pT3SkSCo0k6gF 39 | ay60NuIj0ebO/9w0Mtn16WpO9UptiEfhONDpk0m0f3E9iWNUpv5pou3PQxevOirr 40 | ekINAoIBACfC3v0j2vdjCeK4GHSisWm4r2QtdE7ZlMmWLi187z1U8MvJGkq5I6sL 41 | JP9zcRx6dCb60l81/fwv9fNF/uInVvhq2NdzWjjZObEFkXBRBow1oxqLgcwTcOlb 42 | VQlbu9xW6BsK+zK5KkDDNur5rEgDWm7yoccPZaOqXpnQ8A/US84hTek03r9BCBkV 43 | iZ+xZasV/84T7aLxN0l2YbVcTj4I4IAn3lRlzKf3waFILnw6KN7icOwnxlypcuez 44 | uNKkGHfB3XZMerBvLWutc+3U1YgHYDF661aK/nYzsgiCEJE9+o5xqF3E6ToJvRDz 45 | cVF3m6Ydq3rHvSyHtuLfTWBK/HtGGIECggEBAMXM4KSngNKpTCApjFpvEI3TvJtp 46 | 8A2Mvhj/M0pXkBIDxcBFqF/0cYXnGpPc/AgPekh/vEyOOGOSh3dplswXie231Bod 47 | 6h8d2I8Pn7dTl/Po1jSMpwXDQu2PlrTr9+zcRZfGgJc4f6nU4PeD5CdUyBCtvJsG 48 | DrnJ2NVUhgRXkCwn8Whe+le9GUKo3athetUZ7lKrbxz0bs2KHniUURpWltfmG+EM 49 | g08w319Gyq9XEdfcdyoxUDCoiyhyV1AYx+tHiMYidkadnKJVXHUC2YapTKtEfK1o 50 | cJmno7sstGUNY0o6+sPve4CIECE4NSMCQdE/K1JPAvF+yZwHlVUZacX+a5M= 51 | -----END RSA PRIVATE KEY----- -------------------------------------------------------------------------------- /scripts/make-efi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Creating disk sized $1M" 3 | dd if=/dev/zero of=rootfs.ext4 bs=1M count=$1 4 | 5 | # Create two partitions (ESP for boot and second for rootfs). 6 | sgdisk --clear \ 7 | --new 1::+100M --typecode=1:ef00 --change-name=1:'EFI System' \ 8 | --new 2::-0 --typecode=2:8300 --change-name=2:'slim-rootfs' \ 9 | --attributes 1:set:2 \ 10 | rootfs.ext4 11 | # --new 1::+1M --typecode=1:ef02 --change-name=1:'BIOS boot partition' \ 12 | 13 | # Print partitions 14 | gdisk -l rootfs.ext4 15 | 16 | # Automatically mount partitions on loop devices. 17 | # losetup --partscan --find --show rootfs.ext4 18 | 19 | # Bug: Loop device partitions do not show inside container 20 | # https://github.com/moby/moby/issues/27886 21 | 22 | # Workaround (tonyfahrion) 23 | LOOPDEV=$(losetup --find --show --partscan rootfs.ext4) 24 | 25 | # drop the first line, as this is our LOOPDEV itself, but we only want the child partitions 26 | PARTITIONS=$(lsblk --raw --output "MAJ:MIN" --noheadings ${LOOPDEV} | tail -n +2) 27 | 28 | echo "FS ", $LOOPDEV, $PARTITIONS 29 | 30 | COUNTER=1 31 | for i in $PARTITIONS; do 32 | MAJ=$(echo $i | cut -d: -f1) 33 | MIN=$(echo $i | cut -d: -f2) 34 | if [ ! -e "${LOOPDEV}p${COUNTER}" ]; then 35 | echo "Creating loop partition", ${LOOPDEV}p${COUNTER} 36 | mknod ${LOOPDEV}p${COUNTER} b $MAJ $MIN; 37 | fi 38 | COUNTER=$((COUNTER + 1)) 39 | done 40 | 41 | lsblk 42 | 43 | # Format ESI partition (ESP) 44 | mkfs.fat ${LOOPDEV}p1 45 | ESP=/tmp/esp 46 | mkdir $ESP && mount ${LOOPDEV}p1 $ESP 47 | mkdir -p $ESP/EFI/BOOT 48 | 49 | # Copy syslinux efi files 50 | #cp syslinux-6.03/efi64/efi/syslinux.efi $ESP/EFI/BOOT/bootx64.efi 51 | #cp syslinux-6.03/efi64/com32/elflink/ldlinux/ldlinux.e64 $ESP/EFI/BOOT/ldlinux.e64 52 | 53 | # Prepare bootloader configuration 54 | #echo "DEFAULT linux" > $ESP/EFI/BOOT/syslinux.cfg 55 | #echo "LABEL linux" >> $ESP/EFI/BOOT/syslinux.cfg 56 | #echo "KERNEL vmlinuz" >> $ESP/EFI/BOOT/syslinux.cfg 57 | #echo "INITRD initrd" >> $ESP/EFI/BOOT/syslinux.cfg 58 | #echo "APPEND root=/dev/sda2 console=tty0 console=ttyS0,115200n8" >> $ESP/EFI/BOOT/syslinux.cfg 59 | 60 | # GRUB 61 | cat >> $ESP/EFI/BOOT/grub.cfg <> /tmp/rootfs/etc/fstab 90 | 91 | # Syslinux/Grub needs kernel and initrd on same partition. 92 | cp /tmp/rootfs/vmlinuz $ESP/EFI/BOOT/vmlinuz 93 | cp /tmp/rootfs/initrd $ESP/EFI/BOOT/initrd 94 | 95 | # Cleanup 96 | umount ${LOOPDEV}p1 97 | umount ${LOOPDEV}p2 98 | losetup -d ${LOOPDEV} 99 | 100 | # Sanity check disk 101 | fsck -f rootfs.ext4 102 | 103 | # Store back on host 104 | mv rootfs.ext4 /out/rootfs 105 | echo "Done" -------------------------------------------------------------------------------- /scripts/make-ext4.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Creating disk sized $1M" 3 | dd if=/dev/zero of=rootfs.ext4 bs=1M count=$1 4 | mkfs.ext4 rootfs.ext4 5 | mkdir -p /tmp/rootfs 6 | mount -t ext4 rootfs.ext4 /tmp/rootfs 7 | 8 | # Copy extracted rootfs into mounted image 9 | echo "Copying rootfs" 10 | #cp -a /slim-vm/. /tmp/rootfs 11 | tar -xf /slim-vm/rootfs.tar -C /tmp/rootfs 12 | 13 | # Mount rootfs on boot. 14 | echo "LABEL=slim-rootfs / ext4 discard,errors=remount-ro 0 1" >> /tmp/rootfs/etc/fstab 15 | 16 | # Docker overrides /etc/hosts and /etc/hostname and will export blank versions. 17 | # Patch /etc/hosts and /etc/hostname 18 | cat << 'EOF' > /tmp/rootfs/etc/hosts 19 | 127.0.0.1 localhost 20 | 127.0.1.1 slim 21 | ::1 localhost ip6-localhost ip6-loopback 22 | fe00::0 ip6-localnet 23 | ff00::0 ip6-mcastprefix 24 | ff02::1 ip6-allnodes 25 | ff02::2 ip6-allrouters 26 | EOF 27 | 28 | echo "slim" > /tmp/rootfs/etc/hostname 29 | 30 | echo "Extracting uncompressed kernel" 31 | mv /tmp/rootfs/vmlinuz /out/tmp.gz 32 | gzip -d /out/tmp.gz 33 | mv /out/tmp /out/vmlinuz 34 | 35 | echo "Extracting initrd" 36 | mv /tmp/rootfs/initrd /out/initrd 37 | 38 | # CLeanup 39 | umount /tmp/rootfs 40 | fsck -f rootfs.ext4 41 | 42 | # Finalize 43 | tune2fs -O ^read-only -L "slim-rootfs" rootfs.ext4 44 | 45 | # Store back on host 46 | mv rootfs.ext4 /out/rootfs 47 | 48 | echo "Saved rootfs raw image." -------------------------------------------------------------------------------- /scripts/syslinux/isolinux.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ottomatica/slim/c792197335d8f7e9dc88e86e33f90964c06fba2e/scripts/syslinux/isolinux.bin -------------------------------------------------------------------------------- /scripts/syslinux/isolinux.cfg: -------------------------------------------------------------------------------- 1 | serial 0 115200 2 | default slim 3 | prompt 0 4 | 5 | label slim 6 | kernel /boot/vmlinuz 7 | initrd /boot/initrd 8 | # add/remove quiet from line below to see boot details 9 | append modules=loop,squashfs,sd-mod,usb-storage console=tty0 console=ttyS0,115200 10 | -------------------------------------------------------------------------------- /scripts/syslinux/ldlinux.c32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ottomatica/slim/c792197335d8f7e9dc88e86e33f90964c06fba2e/scripts/syslinux/ldlinux.c32 --------------------------------------------------------------------------------