├── .gitattributes ├── .gitignore ├── .gitmodules ├── BACLIN.md ├── Dockerfile ├── LICENSE ├── NOTES.md ├── README.md ├── RELEASENOTES.md ├── baclin ├── docs ├── Abstract.md ├── Bio.md └── Reference.md ├── lib ├── README.md ├── api │ ├── cli.tcl │ ├── pkgIndex.tcl │ └── wapi.tcl └── cluster │ ├── atexit.tcl │ ├── cluster.tcl │ ├── environment.tcl │ ├── extend.tcl │ ├── json.tcl │ ├── mount.tcl │ ├── pkgIndex.tcl │ ├── proctrace.tcl │ ├── swarm.tcl │ ├── swarmmode.tcl │ ├── tooling.tcl │ ├── unix.tcl │ ├── utils.tcl │ ├── vcompare.tcl │ ├── virtualbox.tcl │ └── zipper.tcl ├── machinery ├── make ├── README.md ├── bin │ ├── linux-ix86 │ │ └── tclkit │ ├── linux-x86_64 │ │ └── tclkit │ ├── macosx-ix86 │ │ └── tclkit │ ├── macosx-x86_64 │ │ └── tclkit │ └── win32-ix86 │ │ └── tclkit ├── distro │ └── README.md ├── kits │ └── sdx.kit └── make.tcl └── test ├── .gitignore ├── bin └── witness.sh ├── depends ├── skeleton.yml └── test.env ├── docker-compose.yml ├── myvars.env ├── test.twk ├── test.yml └── test.zip /.gitattributes: -------------------------------------------------------------------------------- 1 | # Set the default behavior, in case people don't have core.autocrlf set. 2 | * text=auto 3 | 4 | # Explicitly declare text files you want to always be normalized and converted 5 | # to native line endings on checkout. 6 | *.c text 7 | *.h text 8 | *.tcl text 9 | *.md text 10 | 11 | # Declare files that will always have CRLF line endings on checkout. 12 | *.sln text eol=crlf 13 | 14 | # Denote all files that are truly binary and should not be modified. 15 | *.png binary 16 | *.jpg binary -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | \#*\# 3 | *.bak 4 | make/distro/* -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "lib/pseudofs"] 2 | path = lib/pseudofs 3 | url = https://github.com/efrecon/pseudofs.git 4 | -------------------------------------------------------------------------------- /BACLIN.md: -------------------------------------------------------------------------------- 1 | # BACLIN - BAsic Compose LINeariser 2 | 3 | This utility will linearise a file in the Docker [Compose][1] format so that all 4 | occurrences of [extends][2] directives are recursively replaced by the service 5 | definitions that they point at. It has been writter to circumvent issue 6 | [#31101][3], i.e. to palliate the removal of `extends` directive between version 7 | 2 and version 3 of the compose file format. 8 | 9 | Using this tool, you should be able to write `extends` directives in your 10 | compose 3 files, and linearise them before sending to the swarm using `docker 11 | stack deploy`. Obviously, these files would *not* comply to version 3+ of the 12 | file format specification as it lacks support for `extend`. 13 | 14 | The tool takes two arguments on the command line: the path to the input file and 15 | the path to the output file. Each argument can be replaced by a `-`, meaning 16 | reading from standard in or writing to standard out. Empty (or missing) 17 | arguments will have the same meaning as the use of the more explicit dash. When 18 | reading from the standard input, `baclin` will default to the current directory 19 | as the root for relative file specifications that could happen in `extends` 20 | directives. 21 | 22 | [1]: https://docs.docker.com/compose/compose-file/ 23 | [2]: https://docs.docker.com/compose/compose-file/compose-file-v2/#extends 24 | [3]: https://github.com/moby/moby/issues/31101 25 | 26 | ## Example 27 | 28 | Provided a main compose file with the following content: 29 | 30 | ```` 31 | version: 3 32 | 33 | services: 34 | web: 35 | extends: 36 | file: ../common.yml 37 | service: webapp 38 | environment: 39 | - DEBUG=1 40 | cpu_shares: 5 41 | 42 | important_web: 43 | extends: web 44 | cpu_shares: 10 45 | ```` 46 | 47 | And the file at `../common.yml` containing: 48 | 49 | ```` 50 | version: 2 51 | 52 | services: 53 | common: 54 | labels: 55 | se.sics.copyright: "Emmanuel Frecon" 56 | se.sics.organisation: "RISE SICS" 57 | webapp: 58 | extends: common 59 | labels: 60 | se.sics.application: "Web" 61 | image: nginx 62 | ports: 63 | - "8000:8000" 64 | volumes: 65 | - "/data" 66 | environment: 67 | - TEST=34 68 | ```` 69 | 70 | Running `baclin` on the main file would lead to the following content: 71 | 72 | ```` 73 | version: "3" 74 | services: 75 | web: 76 | labels: 77 | se.sics.copyright: Emmanuel Frecon 78 | se.sics.organisation: RISE SICS 79 | se.sics.application: Web 80 | image: nginx 81 | ports: 82 | - 8000:8000 83 | volumes: 84 | - /data 85 | environment: 86 | - TEST=34 87 | - DEBUG=1 88 | cpu_shares: 5 89 | important_web: 90 | labels: 91 | se.sics.copyright: Emmanuel Frecon 92 | se.sics.organisation: RISE SICS 93 | se.sics.application: Web 94 | image: nginx 95 | ports: 96 | - 8000:8000 97 | volumes: 98 | - /data 99 | environment: 100 | - TEST=34 101 | - DEBUG=1 102 | cpu_shares: 10 103 | ```` 104 | 105 | ## Binaries 106 | 107 | Binaries, automatically generated using [make.tcl][4] ar available [here][5]. 108 | 109 | [4]: https://github.com/efrecon/machinery/blob/master/make/make.tcl 110 | [5]: https://bintray.com/efrecon/baclin/baclin/0.2#files -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ##### 2 | # To run this, you will have to mount the local docker socket and your working directory onto /cluster, e.g. 3 | # docker run -it --rm -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`:/cluster efrecon/machinery help 4 | FROM docker:stable 5 | 6 | # Lock in versions of compose and machine, will change at the pace of stable 7 | # releases. 8 | ARG DOCKER_COMPOSE_VERSION=1.27.2 9 | ARG DOCKER_MACHINE_VERSION=0.16.2 10 | ARG TCLLIB_VERSION=1-20 11 | 12 | # Install glibc so compose can run. Also make sure wget can properly handle 13 | # https and arrange for an ssh client to be present for use from docker-machine 14 | ARG GLIBC=2.32-r0 15 | ARG GLIBC_SHA256=2a3cd1111d2b42563e90a1ace54c3e000adf3a5a422880e7baf628c671b430c5 16 | RUN apk update && apk add --no-cache openssh-client ca-certificates && \ 17 | wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \ 18 | wget -q https://github.com/sgerrand/alpine-pkg-glibc/releases/download/${GLIBC}/glibc-${GLIBC}.apk && \ 19 | echo "${GLIBC_SHA256} glibc-${GLIBC}.apk" | sha256sum -c - && \ 20 | apk add --no-cache glibc-${GLIBC}.apk && rm glibc-${GLIBC}.apk && \ 21 | ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \ 22 | ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib 23 | 24 | # Install compose and machine 25 | RUN wget -q -O /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/$DOCKER_COMPOSE_VERSION/docker-compose-Linux-x86_64 && \ 26 | chmod +x /usr/local/bin/docker-compose && \ 27 | wget -q -O /usr/local/bin/docker-machine https://github.com/docker/machine/releases/download/v$DOCKER_MACHINE_VERSION/docker-machine-Linux-x86_64 && \ 28 | chmod +x /usr/local/bin/docker-machine 29 | 30 | # Install TCL, TLS, tcllib and other tcl/machinery dependencies 31 | RUN apk add --no-cache tcl tcl-tls tclx archivemount && \ 32 | wget -q -O /tmp/tcllib-${TCLLIB_VERSION}.tar.gz https://github.com/tcltk/tcllib/archive/tcllib-${TCLLIB_VERSION}.tar.gz && \ 33 | tar -zx -C /tmp -f /tmp/tcllib-${TCLLIB_VERSION}.tar.gz && \ 34 | tclsh /tmp/tcllib-tcllib-${TCLLIB_VERSION}/installer.tcl -no-html -no-nroff -no-examples -no-gui -no-apps -no-wait -pkg-path /usr/lib/tcllib$(echo ${TCLLIB_VERSION}|sed s/-/./g) && \ 35 | rm -rf /tmp/tcllib* 36 | 37 | # Install our main script and implementation 38 | RUN mkdir -p /opt/machinery/lib 39 | COPY machinery /opt/machinery/ 40 | COPY lib /opt/machinery/lib 41 | RUN ln -s /opt/machinery/machinery /usr/local/bin/machinery 42 | 43 | # Install til library, the only remaining dependency we have 44 | RUN wget -q -O /tmp/til.zip https://github.com/efrecon/til/archive/master.zip && \ 45 | unzip -q /tmp/til.zip -d /opt/machinery/lib && \ 46 | mv /opt/machinery/lib/til-master /opt/machinery/lib/til && \ 47 | rm -rf /tmp/til.zip 48 | 49 | # Expose for running as a service 50 | EXPOSE 8070 51 | 52 | # Mount your main working directory onto /cluster 53 | RUN mkdir -p /cluster 54 | WORKDIR /cluster 55 | VOLUME /cluster 56 | 57 | ENTRYPOINT [ "/usr/local/bin/machinery" ] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Emmanuel Frecon <> 2 | 3 | Permission to use, copy, modify, and/or distribute this software for any 4 | purpose with or without fee is hereby granted, provided that the above 5 | copyright notice and this permission notice appear in all copies. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | 15 | -------------------------------------------------------------------------------- /NOTES.md: -------------------------------------------------------------------------------- 1 | # Internal Notes 2 | 3 | ## Making a Release 4 | 5 | To make a release, you should do the following: 6 | 7 | 1. Make sure all changes are documented in the [Release Notes](RELEASENOTES.md). 8 | 2. Bump up/adapt the version number that is declared in `cli.tcl`. 9 | 3. Make binaries using the script in the `make` sub-directory. 10 | 4. Tag the release in git using: `git tag -a -m "Version vX.Y" vX.Y`. 11 | 5. Push the tag to the main repository: `git push --tags`. 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # machinery 2 | 3 | `machinery` is a command-line tool to operate on a whole cluster of [Docker 4 | Machine] virtual or bare-metal machines. `machinery` uses a YAML definition of 5 | the whole cluster to create machines, bring them up or down, remove them at will 6 | and create (overlay) networks to be used across deployed containers. In short, 7 | `machinery` is to `docker-machine` what `docker-compose` is to `docker`. In 8 | addition, `machinery` provides [Docker Swarm] and [Swarm Mode], and [Docker 9 | Compose] integration. It will automatically arrange for the created virtual 10 | machines to join the swarm cluster, generate the token(s) as needed or even 11 | manage the life-cycle of several compose projects to be run on the cluster. 12 | `machinery` can automatically bring up specific project files onto machines that 13 | it controls. `machinery` is able to substitute the value of local environment 14 | variables in the compose project files before bringing the services up. Together 15 | with conventions for the dynamic construction of network-related environment 16 | variables, this provides for a simple mechanism for service discovery. 17 | 18 | In short `machinery` provides you with an at-a-glance view of your whole 19 | cluster, from all the (virtual) machines that build it up, to all the services 20 | that should be run, and on which machine(s). `machinery` provides both a 21 | command-line and a REST API for operating on your cluster from the central 22 | controlling point that it constructs. This document provides a quick 23 | introduction to the main features of `machinery`, read the 24 | [documentation](docs/Reference.md) for a thorough description of all its 25 | functionality. 26 | 27 | [Docker Machine]: https://docs.docker.com/machine/ 28 | [Docker Swarm]: https://docs.docker.com/swarm/ 29 | [Swarm Mode]: https://docs.docker.com/engine/swarm 30 | [Docker Compose]: https://docs.docker.com/compose/ 31 | 32 | ## Quick Tour 33 | 34 | `machinery` reads its default configuration from the file `cluster.yml` in the 35 | local directory. [YAML](http://yaml.org/) definition files have a 36 | straightforward syntax. For example, the following content would define 3 37 | machines using the `virtualbox` driver, one with more memory and ready to be 38 | duplicated through using the YAML anchoring facilities, another one with more 39 | disk than the defaults provided by `docker-machine` and the last one as the 40 | master of the cluster. The description also defines some labels that can be 41 | used by `swarm` to schedule services on specific nodes and arrange for the 42 | machine called `core` to have access to your home directory. Finally, it 43 | arranges for the services pinpointed by a relative `compose` project file to 44 | automatically be started up when `db` is brought up and created. 45 | 46 | ```yaml 47 | version: '2' 48 | 49 | machines: 50 | wk01: &worker 51 | driver: virtualbox 52 | memory: 2GiB 53 | labels: 54 | role: worker 55 | db: 56 | driver: virtualbox 57 | size: 40G 58 | labels: 59 | role: db 60 | compose: 61 | - 62 | file: ../compose/backend/db.yml 63 | core: 64 | driver: virtualbox 65 | master: on 66 | labels: 67 | role: core 68 | shares: 69 | - $HOME 70 | ``` 71 | 72 | Given access to a cluster definition file such as the one described above, the 73 | following command would create all the configured machines and arrange for a 74 | swarm token to be created when first executed. 75 | 76 | ```shell 77 | machinery up 78 | ``` 79 | 80 | And the following command would gently bring the machine called `db` down and 81 | then destroy it. 82 | 83 | ```shell 84 | machinery destroy db 85 | ``` 86 | 87 | If you had a YAML compose project description file called `myapp.yml` describing 88 | the containers to run on your cluster, you could schedule it for execution by 89 | calling: 90 | 91 | ```shell 92 | machinery swarm myapp.yml 93 | ``` 94 | 95 | Do you want to try for yourself at once? Read the next section ant try the 96 | example. You might want to download a "compiled" 97 | [binary](https://github.com/efrecon/machinery/releases) to avoid having to solve 98 | the few dependencies `machinery` has yourself. For a complete description, read 99 | the [documentation](docs/Reference.md). 100 | 101 | ## Giving it a Quick Test 102 | 103 | The directory `test` contains a test cluster with a single machine. Try for 104 | yourself by running the following command from the main directory of the 105 | repository. 106 | 107 | ```shell 108 | ./machinery -cluster test/test.yml up 109 | ``` 110 | 111 | You should see an output similar to the following one on the terminal. Actually, 112 | what *you* will see is a colourised output without timestamps. `machinery` 113 | automatically segregates terminals from regular file descriptor and the 114 | following was captured using a file redirection. 115 | 116 | ``` 117 | [20150414 204739] [NOTICE] Generating new token 118 | [20150414 204739] [INFO] Detaching from vm... 119 | [20150414 204739] [INFO] Creating swarm token... 120 | [20150414 204740] [NOTICE] Created cluster token 87c9e52eb6be5d0c794afa7053462667 121 | [20150414 204740] [INFO] Token for cluster definition at test/test.yml is 87c9e52eb6be5d0c794afa7053462667 122 | [20150414 204740] [NOTICE] Creating machine test-test 123 | [20150414 204741] [INFO] Creating SSH key... 124 | [20150414 204741] [INFO] Creating VirtualBox VM... 125 | [20150414 204743] [INFO] Starting VirtualBox VM... 126 | [20150414 204743] [INFO] Waiting for VM to start... 127 | [20150414 204829] [INFO] Configuring Swarm... 128 | [20150414 204849] [INFO] "test-test" has been created and is now the active machine. 129 | [20150414 204849] [INFO] To point your Docker client at it, run this in your shell: $(docker-machine env test-test) 130 | [20150414 204849] [INFO] SSH to test-test working properly 131 | [20150414 204849] [NOTICE] Tagging test-test with role=testing target=dev 132 | [20150414 204849] [NOTICE] Copying local /tmp/profile-11494-395 to test-test:/tmp/profile-11494-395 133 | [20150414 204856] [INFO] Waiting for VM to start... 134 | [20150414 204928] [NOTICE] Port forwarding for test-test as follows: 8080->80/tcp 20514->514/udp 9090->9090/tcp 135 | [20150414 204929] [NOTICE] Mounting shares as follows for test-test: /home/emmanuel->/home/emmanuel 136 | [20150414 204929] [INFO] Getting info for guest test-test 137 | [20150414 204929] [NOTICE] Waiting for test-test to shutdown... 138 | [20150414 204934] [NOTICE] Bringing up machine test-test... 139 | [20150414 204935] [INFO] Waiting for VM to start... 140 | [20150414 205007] [INFO] Attaching to test-test 141 | [20150414 205012] [INFO] Docker setup properly on test-test 142 | [20150414 205012] [NOTICE] Pulling images in test-test: gliderlabs/alpine 143 | [20150414 205012] [INFO] Attaching to test-test 144 | [20150414 205013] [INFO] Pulling repository gliderlabs/alpine 145 | [20150414 205015] [INFO] a5b60fe97da5: Pulling image (latest) from gliderlabs/alpine 146 | [20150414 205015] [INFO] a5b60fe97da5: Pulling image (latest) from gliderlabs/alpine, endpoint: https://registry-1.docker.io/v1/ 147 | [20150414 205016] [INFO] a5b60fe97da5: Pulling dependent layers 148 | [20150414 205016] [INFO] 511136ea3c5a: Download complete 149 | [20150414 205016] [INFO] a5b60fe97da5: Pulling metadata 150 | [20150414 205017] [INFO] a5b60fe97da5: Pulling fs layer 151 | [20150414 205019] [INFO] a5b60fe97da5: Download complete 152 | [20150414 205019] [INFO] a5b60fe97da5: Download complete 153 | [20150414 205019] [INFO] Status: Downloaded newer image for gliderlabs/alpine:latest 154 | ``` 155 | 156 | To check around, you could run the following command to check that the machine 157 | `test-test` has really been created: 158 | 159 | ```shell 160 | docker-machine ls 161 | ``` 162 | 163 | You could also jump into the created machine using the following command: 164 | 165 | ```shell 166 | docker-machine ssh test-test 167 | ``` 168 | 169 | At the prompt, you can perhaps get a list of the docker containers that have 170 | been started in the machine using the following command and verify that there 171 | are two running containers: one swarm master container and one swarm agent. 172 | 173 | ```shell 174 | docker ps 175 | ``` 176 | 177 | You can also check which images have been downloaded using the following 178 | command. That should list at least 3 images: one for `swarm`, one for `busybox` 179 | (which is used to verify that `docker` runs properly at the end of the machine 180 | creation process) and finally one for Alpine Linux, which is downloaded as part 181 | of the test cluster definition file. 182 | 183 | ```shell 184 | docker images 185 | ``` 186 | 187 | Finally, you can check that you can access your home directory at its usual 188 | place, as it is automatically mounted as part of the test cluster definition. A 189 | final note: jumping into the machine was not a necessary process, you would have 190 | been able to execute those commands directly from the host command prompt after 191 | having run `$(docker-machine env test-test)`. 192 | 193 | Once done, return to the host prompt and run the following to clean everything 194 | up: 195 | 196 | ```shell 197 | ./machinery -cluster test/test.yml destroy 198 | ``` 199 | 200 | ## Notes 201 | 202 | Support for [Swarm Mode] is work in progress and not yet released yet, so is 203 | support for the creation of cluster-wide overlay networks that can be used for 204 | communication between [Docker Stack]s across the cluster. In order to handle the 205 | creation of both machines and networks the YAML format has been modified in the 206 | development version. The default is to keep a list of machines under the root of 207 | the YAML file. However, whenever a key called `version` is present, `machinery` 208 | will expect a list of machines under the key `machines` and a possible list of 209 | networks under the key `networks`. 210 | 211 | [Docker Stack]: https://docs.docker.com/engine/reference/commandline/stack/ 212 | 213 | ## Comparison to Other Tools 214 | 215 | `machinery` is closely related to [Vagrant](https://www.vagrantup.com/), and it 216 | evens provides a similar set of commands. However, being built on top of 217 | `docker-machine` provides access to many more providers through all the existing 218 | Docker Machine [drivers](https://docs.docker.com/machine/#drivers). 219 | 220 | ## Implementation 221 | 222 | `machinery` is written in [Tcl](http://www.tcl.tk/). It requires a recent 223 | version of Tcl (8.6 at least) and the `yaml` library to be able to parse YAML 224 | description files. As the `yaml` library is part of the standard `tcllib`, the 225 | easiest is usually to install the whole library using your package manager. For 226 | example, on ubuntu, running the following will suffice as Tcl is part of the 227 | core server and desktop installation. 228 | 229 | ```shell 230 | apt-get install tcllib 231 | ``` 232 | -------------------------------------------------------------------------------- /RELEASENOTES.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | 3 | 4 | ## v 0.7 5 | 6 | * Suppress most unexpected characters from temporary paths so as to minimise 7 | problems (scp uses, for example, the colon sign as a separator between the 8 | host and the path). 9 | 10 | * Default is now to create and use a machine storage cache in the same directory 11 | as the main YAML cluster file. This is a *breaking change*, but eases project 12 | migration between machines. To revert to prior behaviour and keep using 13 | `machinery` on an existing cluster, use the global option `-storage` and set 14 | it to something like `~/.docker/machine`, which is the default location for 15 | machine. 16 | 17 | * Machine specification now takes a list of `files` specifications. These are 18 | formatted as the path to the source, possibly followed by a colon `:` 19 | character and the path to the destination; modelled after the volume mounting 20 | command of `docker`. These files (or directories) will be copied early in the 21 | initialisation process. They are meant to provide a quick way to initialise 22 | machines and ensure that they, for example, host all necessary secrets, ready 23 | to be mounted onto the containers running on those machines. 24 | 25 | * Preliminary support for running from windows, using the 26 | [DockerToolbox](https://www.docker.com/products/docker-toolbox). 27 | 28 | * Ability to use another machine as a cache for the storage of images before 29 | they are transmitted to the remote machines in the cluster. This is a 30 | *breaking change* as it modifies the semantics of the `-cache` global option 31 | (contains the name of the machine used for cache). This new behaviour is 32 | necessary for the windows port as the daemon runs within a machine created in 33 | [VirtualBox](https://www.virtualbox.org/). 34 | 35 | 36 | ## v 0.6 37 | 38 | * Fixed better exit from interactive ssh sessions. 39 | 40 | * Refactoring all command-line parsing and operations under global 41 | `cli` command in order to make some of its services available to 42 | other modules. 43 | 44 | * Implementation of a Web API, using a vocabulary similar to the one 45 | from the command line. 46 | 47 | * Adding commands to search for components (by name patterns) and to 48 | execute docker commands on groups of components (by the same name 49 | patterns). 50 | 51 | * Adapting to new options in latest machine, which leads to quicker 52 | machine creations as setting labels does not need a restart anymore. 53 | 54 | * Dynamically adapt to the OS on the created virtual machine when 55 | installing necessary tools such as rsync. 56 | 57 | * Adding `-restrict` option to `forall` and `search` to restrict to a 58 | subset of matching machine name patterns within the cluster. 59 | 60 | * Now accepting wildcards instead of (shortened) machine names in most 61 | CLI commands. Most usefull is for `ps` sub-command: when called 62 | without arguments it will return the components in the swarm, when 63 | called with `*` (matching all machine names), it will list *all* 64 | components in the cluster. 65 | 66 | * Added a `ls` CLI sub-command, this will print out the list of 67 | cluster machines and their state. 68 | 69 | * Use a dash-led marker at the beginning of YAML files to 70 | automatically pick up the cluster description from the current 71 | directory as a default failover. 72 | 73 | * Adding an `addendum` section to VMs, this contains a list of scripts 74 | (or similar) that will automatically be run once the machine has 75 | been created. They can be used to control access to ports, 76 | etc. using provider-specific command-line tools. 77 | 78 | 79 | ## v 0.5.1 80 | 81 | * Exported shares are now resolved relatively to the directory of the 82 | main cluster description file, which helps migrating project trees 83 | if necessary. 84 | 85 | * Mounted shares (still VirtualBox only) will persist reboots of the 86 | virtual machines through adding a specific section to 87 | /var/lib/boot2docker/bootlocal.sh or creating the file if necessary. 88 | 89 | * Shares can now have a type which defaults properly to vboxsf on the 90 | virtualbox driver and to (new type) rsync on all other drivers. 91 | This will use rsync to copy the content of a host directory at VM 92 | creation and to copy back the content of the VM directory using the 93 | new sub-command called `sync`. Running `machinery sync` as a `cron` 94 | job is then probably a good solution. 95 | 96 | * Fixed bug that would prevent `machinery` to mount some of the shares 97 | using `vboxsf`. 98 | 99 | * Adding support for human-readable sizes, e.g. `10G` for 10 gigabytes 100 | (powers of 1000), or `1GiB` for 1 gigibytes (powers of 1024 101 | instead!). 102 | 103 | * Adding support for user-specified `ssh` command, which will be used 104 | in place of the one that is guessed from introspection of 105 | `docker-machine ssh` whenever possible. 106 | 107 | * Using the new `docker-machine scp` sub-command for file copy 108 | operations when it exists. 109 | 110 | * Added possibility to recreate the shares and mount them from within 111 | a `reinit` call. This isn't entirely tested yet and will fail 112 | reusing shares if they had already been created in the past. 113 | 114 | * Added possibility to read program options from configuration file 115 | through `-config` global option (handy when specifying complex ssh 116 | commands for example). 117 | 118 | * Added `ps` command to return the status of the whole cluster (via 119 | the swarm master). 120 | 121 | * `ssh` sub-command now can be called without arguments to get a 122 | prompt into virtual machine (thus being a (handy?) relay to 123 | `docker-machine ssh`). 124 | 125 | 126 | ## v 0.5 127 | 128 | * The default is now to use the locally cached images when 129 | initialising virtual machines. This will speed up initialisation in 130 | a number of cases. Furthermore, this allows to push private images 131 | on the machines withouth having to transfer repository credentials 132 | or similar. 133 | 134 | * Factored away UNIX/linux specific commands into a separate package 135 | to keep down the size of the main code-base. 136 | 137 | 138 | ## v 0.4.2 (Never Released) 139 | 140 | * Fixed a bug when swarming compose project files: these would be started up on 141 | the host and not within the cluster! 142 | 143 | * Added possibility to pass project name or substitution instructions to YAML 144 | compose files being swarmed into the cluster. 145 | 146 | * Fixed bug so that `.env` files pointed by project files that should be 147 | substituted will also be substituted and will be found by the project file 148 | when running compose. 149 | 150 | * Fixed bug where compose projects would not be properly resolved when started 151 | using `swarm`. 152 | 153 | * (Finally?) fixing login/pull bug so `machinery` will now be able to 154 | properly download from private repositories. 155 | 156 | * Adding support for aliases, a new key called `aliases` under the VM 157 | dictionary in the YAML file. This should be a list of names that 158 | can also be used to point at the machine from the command line, but 159 | will also appear in the discovery mechanisms. 160 | 161 | * Squeezed away a number of synchronisation bugs that would occur on 162 | slow hosts, we typically wait for good markers and retry a number of 163 | times sensitive operations. 164 | 165 | 166 | ## v 0.4.1 167 | 168 | * Added support for passing docker-compose commands when swarming in 169 | project files: -kill, -start, -rm, etc. 170 | 171 | * Fixed output bug so we get (again!) a list of running components 172 | once compose operations have ended. 173 | 174 | 175 | ## v 0.4 (never released) 176 | 177 | * Added support for substitution of (local) environment variables as a 178 | workaround for this 179 | [highly-requested](https://github.com/docker/compose/issues/495) missing 180 | feature of compose. However, this means that, when crafting compose project 181 | files to use the feature, you will not be able to use them directly outside of 182 | `machinery`, unless you run them through 183 | [envsubst](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html) 184 | 185 | * Adding support for "poor man's discovery". `machinery` automatically declares 186 | and caches environment variables with network information for the created 187 | machines. These variables can be, for example, used when starting components 188 | through compose (see above). 189 | 190 | * Adding support for logging in at one or several registries at the `docker` 191 | daemon running within a specific virtual machine. Note that you will have to 192 | start the components under the same user than the one that was automatically 193 | logged in. 194 | 195 | * Adapting to new versions of `docker`, `docker-machine` and `docker-compose`. 196 | 197 | * Automatically trying to update a machine to the latest `boot2docker.iso` 198 | when the installed version of `docker` is lower than the one of the local 199 | installation. 200 | 201 | * Arrange for the temporary files created for performing environment variable 202 | substitution to point back correctly to the source files when using extending 203 | services with compose. 204 | 205 | * Adding an `env` command to export all machinery network details to shell. 206 | 207 | * Extending the `swarm` command so it takes a list of compose project files, or 208 | indirections to compose project file in a format similar to the `compose` key 209 | of the YAML syntax. These projects will be sent to the swarm master for 210 | creation. 211 | 212 | * Adding a `reinit` command to trigger some of the machine initialisation steps, 213 | such as image downloading or component creation. 214 | 215 | 216 | ## v 0.3 217 | 218 | * Added support for automatically run `docker-compose up` against a given 219 | machine. This allows to bypass swarm (for development purposes), or to 220 | schedule components on globally. 221 | 222 | 223 | ## v 0.2.2 224 | 225 | * Refactoring of the code to move out all swarm-related code to a separate 226 | (internal) package. 227 | 228 | * Introduces two new (internal) commands to call `docker` and `docker-machine` 229 | so as to ensure standardised behaviour. These also place both commands in 230 | debug mode whenever the current verbosity level is greater than DEBUG. 231 | 232 | * Fixed mounting of shares so docker user inside the virtual box guest can also 233 | write to files. 234 | 235 | 236 | ## v 0.2.1 237 | 238 | * Adding YAML syntax for automatically pull a number of repositories once a 239 | machine has been created and initiated. 240 | 241 | 242 | ## v 0.2 243 | 244 | * Intelligent logging, will behave differently when logging to a 245 | terminal or to a regular file descriptor. 246 | 247 | * Convert between logrus (and thus docker-machine) log levels and 248 | internal log levels for an improved output. 249 | 250 | * Now creates cluster token using a local docker component, which 251 | reduces generation time as we do not require the creation of a 252 | temporary virtual machine. 253 | 254 | * Support port forwarding and share mounting on top of the virtualbox 255 | driver. 256 | 257 | * Adding support for application versioning (to make files like this 258 | one more meaninfull!). 259 | 260 | * Renamed `machinery info` to `machinery swarm` to make it more explicit. 261 | 262 | 263 | ## v 0.1 264 | 265 | First official version pushed out to github. This provides a 266 | reduced but working set of features. 267 | -------------------------------------------------------------------------------- /baclin: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env tclsh 2 | 3 | ################## 4 | ## Module Name -- baclin.tcl 5 | ## Original Author -- Emmanuel Frecon - emmanuel@sics.se 6 | ## Description: 7 | ## 8 | ## Basic Docker Compose Lineariser 9 | ## 10 | ################## 11 | 12 | package require Tcl 8.6; # The cluster module requires chan pipe 13 | 14 | 15 | # Arrange to access all libraries under lib sub-directory. There is 16 | # only one, but we want to be sure to be able to expand if necessary. 17 | set resolvedArgv0 [file dirname [file normalize $argv0/___]]; # Trick to resolve last symlink 18 | set dirname [file dirname [file normalize $resolvedArgv0]] 19 | set appname [file rootname [file tail $resolvedArgv0]] 20 | lappend auto_path [file join $dirname lib] 21 | package require cluster::extend; 22 | 23 | #package require proctrace 24 | #proctrace init -allowed ::cluster* 25 | 26 | 27 | set in stdin 28 | set out stdout 29 | set dir [pwd] 30 | if { [llength argv] } { 31 | lassign $argv in_fname out_fname 32 | if { $in_fname ne "" && $in_fname ne "-" } { 33 | set in [open $in_fname] 34 | set dir [file dirname $in_fname] 35 | } 36 | if { $out_fname ne "" && $out_fname ne "-" } { 37 | set in [open $out_fname w] 38 | } 39 | } 40 | 41 | puts $out [extend linearise [read $in] $dir] 42 | if { ![string match std* $in] } { 43 | close $in 44 | } 45 | if { ![string match std* $out] } { 46 | close $out 47 | } -------------------------------------------------------------------------------- /docs/Abstract.md: -------------------------------------------------------------------------------- 1 | # Docker Machinery - Complete Cluster Control at the Command-Line 2 | 3 | machinery tries to be the missing piece at the top of the Docker 4 | pyramid. machinery is (mostly) a command-line tool that integrates 5 | Machine, Swarm, Compose and Docker itself to manage the lifecycle of 6 | entire clusters. machinery combines a specifically crafted YAML file 7 | format with compose-compatible files to provide an at-a-glance view of 8 | whole clusters and all of their components. In addition to its 9 | command-line interface, machinery also provides a REST-like interface 10 | to ease integration and automation with external projects and tools. 11 | 12 | Through the provision of an integrated view of entire clusters, 13 | machinery eases tasks such creating or removing virtual machines 14 | hosted at any of the providers supported by Machine, but also managing 15 | the creation or removal of components onto those machines. Components 16 | can either be pinpointed to specific machines, either be placed onto 17 | the cluster using any of the controlling facilities provided by Swarm. 18 | To quicken component starting in dynamic scenarios, machinery is able 19 | to initialise virtual machines with a number of docker images ready to 20 | be instantiated whenever needed. 21 | 22 | Machinery can be used in the development, prototyping and testing 23 | phases by providing quick access to production-like environments, but 24 | also in real production scenarios when ramping up projects. -------------------------------------------------------------------------------- /docs/Bio.md: -------------------------------------------------------------------------------- 1 | Dr. Emmanuel Frécon owns his Ph.D. from the IT University of Göteborg. 2 | He is French, but has been living in Sweden almost all his adult life. 3 | He combines working as a researcher at SICS Swedish ICT with leading 4 | the technical work at an e-health company called JoiceCare. He likes 5 | working on projects spanning from low-level system architectures, 6 | through applications, to user experience. His career has moved from 7 | Collaborative Virtual Environments to the Internet of Things. -------------------------------------------------------------------------------- /lib/README.md: -------------------------------------------------------------------------------- 1 | # Resolving dependencies 2 | 3 | `machinery` has few dependencies and tries to be self-contained as much as 4 | possible. Required libraries, whenever possible, will appear as submodules in 5 | this directory. 6 | 7 | ## Web Server 8 | 9 | To benefit from the web API, you will need to arrange for the `til` to be 10 | accessible from this directory. Just issue the following command in this 11 | directory is enough: 12 | 13 | git clone https://github.com/efrecon/til 14 | 15 | The web server in the `til` relies on the logger module from `tcllib`, 16 | meaning that you will also need `tcllib` installed on your system. 17 | But this is anyhow already needed for YAML parsing. 18 | 19 | ## Mounting 20 | 21 | `machinery` is able to mount in-process and out-process. When mounting out of 22 | the process, the preferred behaviour, `machinery` relies on a number of FUSE 23 | helpers (see below). In addition, `machinery` will only cleanup on when 24 | interrupted with `CTRL+C` if `Tclx` is present (e.g. `tclx` package on Ubuntu). 25 | When mounting inside the process, `machinery` requires the TclVFS package (e.g. 26 | `tcl-vfs` on Ubuntu). All mounting happens on a best-effort basis, based on the 27 | presence of these binaries or packages in the respective paths. Mounting 28 | in-process implies copying to temporary files and directories for each operation 29 | that `machinery` will delegate to external tools such as `docker-machine` or 30 | `docker-compose`. 31 | 32 | By default, the following FUSE helpers are: 33 | 34 | * `fuse-zip` or `archivemount` are used for ZIP files 35 | * `archivemount` is used for all types of compressed or uncompressed TAR files. -------------------------------------------------------------------------------- /lib/api/pkgIndex.tcl: -------------------------------------------------------------------------------- 1 | # Tcl package index file, version 1.1 2 | # This file is generated by the "pkg_mkIndex" command 3 | # and sourced either when an application starts up or 4 | # by a "package unknown" script. It invokes the 5 | # "package ifneeded" command to set up package-related 6 | # information so that packages will be loaded automatically 7 | # in response to "package require" commands. When this 8 | # script is sourced, the variable $dir must contain the 9 | # full path name of this file's directory. 10 | 11 | package ifneeded api::wapi 0.1 [list source [file join $dir wapi.tcl]] 12 | package ifneeded api::cli 0.2 [list source [file join $dir cli.tcl]] 13 | -------------------------------------------------------------------------------- /lib/api/wapi.tcl: -------------------------------------------------------------------------------- 1 | ################## 2 | ## Module Name -- wapi.tcl 3 | ## Original Author -- Emmanuel Frecon - emmanuel@sics.se 4 | ## Description: 5 | ## 6 | ## This module implements a web-server and API for operating on a 7 | ## cluster using a vocabulary that mimics most of the 8 | ## command-line arguments. The current implementation binds a 9 | ## given cluster YAML description file to a port on which the 10 | ## server will listen. This is likely to change in the future. 11 | ## The module implements a single command called wapi, a command 12 | ## pushed into the main namespace. 13 | ## 14 | ################## 15 | 16 | package require cluster::swarm 17 | package require cluster::utils 18 | package require minihttpd 19 | 20 | namespace eval ::api::wapi { 21 | namespace eval vars { 22 | variable version "0.1"; # Version of this module and API! 23 | variable clusters {}; # List of dictionaries, keys are port numbers 24 | 25 | variable -port 8090; # Default port to listen on 26 | variable -endpoint "/api/v$version"; # Entry point of the API 27 | } 28 | 29 | namespace export {[a-z]*} 30 | namespace path ::cluster; # Arrange to access log easily, should we rm? 31 | namespace ensemble create -command ::wapi 32 | namespace import ::cluster::utils::log 33 | } 34 | 35 | 36 | # ::api::wapi::server -- Start serving commands 37 | # 38 | # This procedure will bind a YAML description file to a port 39 | # number and will listen to incoming HTTP connections on that 40 | # port. This implements a REST-like vocabulary that mimics the 41 | # command line options. 42 | # 43 | # Arguments: 44 | # yaml Path to YAML file description 45 | # pfx Prefix when creating the machines in cluster 46 | # args List of dash-led keys and their values: -port should be 47 | # the port to listen on, -root a directory to serve file 48 | # from, all other arguments are passed to the HTTPd lib. 49 | # 50 | # Results: 51 | # The port number on success, a negative number otherwise. 52 | # 53 | # Side Effects: 54 | # Start serving on the port, with all the risks involved. Note 55 | # that the HTTP server used is capable of doing HTTPS and 56 | # implements basic authorisation, but this hasn't been lift up 57 | # to the interface (yet?) 58 | proc ::api::wapi::server { yaml pfx args } { 59 | # Get "our" arguments so we can translate between our API (only 60 | # based on dash-led options) to the one of the web server (which 61 | # requires a root directory and a port number). 62 | utils getopt args -port port ${vars::-port} 63 | utils getopt args -root www "" 64 | 65 | # Create a webserver on the (default?) port, pass all other 66 | # arguments to the web server. Of special interest should be 67 | # -authorization to control access using basic authentication and 68 | # -pki for HTTPS serving. 69 | set srv [::minihttpd::new $www $port {*}$args] 70 | if { $srv < 0 } { 71 | log ERROR "Cannot start web server on port: $port" 72 | return $srv 73 | } 74 | 75 | # Bind API entry points to procedures. API entry points that end 76 | # with .json will return a JSON construct, ending with .txt a text 77 | # (more tcl-like) and nothing will default to the JSON behaviour. 78 | set api [string trimright ${vars::-endpoint} "/"] 79 | foreach entries {token names info version up destroy halt restart sync \ 80 | ps reinit search} { 81 | # Extract entrypoint for API and name of procedure it should 82 | # map to. When no procedure is specified, it will be the same 83 | # as the API entry point, with an uppercase first letter. 84 | foreach {entry procname} $entries break 85 | if { $procname eq "" } { 86 | set procname [string toupper [string index $entry 0]] 87 | append procname [string range $entry 1 end] 88 | } 89 | 90 | # Empty procname leads a Not-Yet-Implemented result, we might 91 | # want to do better than this... 92 | if { $procname eq "" } { 93 | set procname NYI 94 | } 95 | 96 | # Create REST entry points. Everything which ends with .json 97 | # or nothing will expect JSON returning results, .txt will 98 | # lead to results that are more easily munge by Tcl. The 99 | # desired output type is automatically passed to the 100 | # procedures as a first argument, it is up to the 101 | # implementation to respect the output format. 102 | ::minihttpd::handler $srv $api/$entry \ 103 | [list [namespace current]::$procname json] "application/json" 104 | ::minihttpd::handler $srv $api/${entry}.json \ 105 | [list [namespace current]::$procname json] "application/json" 106 | ::minihttpd::handler $srv $api/${entry}.txt \ 107 | [list [namespace current]::$procname txt] "text/plain" 108 | } 109 | 110 | # Bind the cluster to the server port. vars::clusters contains a 111 | # dictionary where the keys are the ports that we are serving as a 112 | # module (i.e. EACH call to this procedure). The value of these 113 | # dictionaries will itself be a dictionary with the following 114 | # keys: yaml -- Path to YAML description file; prefix -- Prefix 115 | # for machine name creation; cluster -- List of VM dictionaries 116 | # describing the machines of the cluster. 117 | dict set vars::clusters $srv yaml $yaml 118 | dict set vars::clusters $srv prefix $pfx 119 | 120 | log NOTICE "Listening for web connections on $port,\ 121 | endpoint: ${vars::-endpoint}" 122 | return $srv 123 | } 124 | 125 | #################################################################### 126 | # 127 | # Procedures below are internal to the implementation, they shouldn't 128 | # be changed unless you wish to help... 129 | # 130 | #################################################################### 131 | 132 | # ::api::wapi::Init -- Conditional cluster reading 133 | # 134 | # Read the cluster description associated to the web server port 135 | # number if necessary and return its list of dictionary 136 | # descriptions. 137 | # 138 | # Arguments: 139 | # prt Port number we are listening on. 140 | # 141 | # Results: 142 | # Return the list of VM description dictionaries, this is 143 | # cached. 144 | # 145 | # Side Effects: 146 | # None. 147 | proc ::api::wapi::Init { prt } { 148 | if { [dict exists $vars::clusters $prt] } { 149 | if { [dict exists $vars::clusters $prt cluster] } { 150 | return [dict get $vars::clusters $prt cluster] 151 | } else { 152 | set yaml [dict get $vars::clusters $prt yaml] 153 | set prefix [dict get $vars::clusters $prt prefix] 154 | set driver [dict get [cli defaults] -driver] 155 | if { [catch {cluster parse $yaml \ 156 | -prefix $prefix -driver $driver} vms] } { 157 | log ERROR "Cannot parse $yaml: $vms" 158 | } else { 159 | dict set vars::clusters $prt cluster $vms 160 | return $vms 161 | } 162 | } 163 | } else { 164 | log WARN "No initialisation ever performed for $prt" 165 | } 166 | return {} 167 | } 168 | 169 | 170 | # ::api::wapi::Bind -- Bind cluster to running state 171 | # 172 | # Bind the cluster associated to the port number passed as an 173 | # argument to the current running state reported by 174 | # docker-machine. 175 | # 176 | # Arguments: 177 | # prt Port number we are listening on. 178 | # 179 | # Results: 180 | # Return the list of VM description dictionaries, this is not 181 | # cached. 182 | # 183 | # Side Effects: 184 | # None. 185 | proc ::api::wapi::Bind { prt } { 186 | set cluster {} 187 | 188 | # Initialise if necessary. 189 | if { ![dict exists $vars::clusters $prt cluster] } { 190 | Init $prt 191 | } 192 | 193 | # Get state from docker machine and merge into VM descriptions so 194 | # as complete the information for each machine. 195 | if { [dict exists $vars::clusters $prt cluster] } { 196 | set state [cluster ls [cluster storage [lindex [dict get $vars::clusters $prt cluster] 0]]] 197 | foreach vm [dict get $vars::clusters $prt cluster] { 198 | lappend cluster [cluster bind $vm $state] 199 | } 200 | } 201 | 202 | return $cluster 203 | } 204 | 205 | 206 | # ::api::wapi::GetToken -- Get swarm token 207 | # 208 | # Get the token associated to the cluster associated to the port 209 | # number passed as an argument. 210 | # 211 | # Arguments: 212 | # prt Port number we are listening on 213 | # force Force re-generation of token 214 | # 215 | # Results: 216 | # Return the token 217 | # 218 | # Side Effects: 219 | # None. 220 | proc ::api::wapi::GetToken { prt {force 0}} { 221 | set token "" 222 | if { [Init $prt] ne {} } { 223 | set yaml [dict get $vars::clusters $prt yaml] 224 | set token [::cluster::swarm::token $yaml $force ""] 225 | } 226 | return $token 227 | } 228 | 229 | 230 | # ::api::wapi::Token -- API implementation for token 231 | # 232 | # Get or (re)generate swarm token 233 | # 234 | # Arguments: 235 | # output Output format (json or txt) 236 | # prt Port number we are listening on 237 | # sock Socket to client 238 | # url Path which was requested 239 | # qry key/values from query 240 | # 241 | # Results: 242 | # Token information in JSON or TXT format. 243 | # 244 | # Side Effects: 245 | # None. 246 | proc ::api::wapi::Token {output prt sock url qry} { 247 | set force 0 248 | if { [dict exists $qry force] } { 249 | set force [string is true [dict get $qry force]] 250 | } 251 | set token [GetToken $prt $force] 252 | 253 | if { $output eq "txt" } { 254 | return $token 255 | } else { 256 | return [::json::stringify [dict create token $token] 0] 257 | } 258 | } 259 | 260 | 261 | # ::api::wapi::Names -- API implementation for names 262 | # 263 | # Get the names of the machines that are declared as part of the 264 | # cluster. 265 | # 266 | # Arguments: 267 | # output Output format (json or txt) 268 | # prt Port number we are listening on 269 | # sock Socket to client 270 | # url Path which was requested 271 | # qry key/values from query 272 | # 273 | # Results: 274 | # Names in JSON or TXT format. 275 | # 276 | # Side Effects: 277 | # None. 278 | proc ::api::wapi::Names {output prt sock url qry} { 279 | set names [cluster names [Bind $prt]] 280 | if { $output eq "txt" } { 281 | return $names 282 | } else { 283 | # Force proper schema 284 | return [::json::stringify [dict create names $names] 0 \ 285 | [dict create names array]] 286 | } 287 | } 288 | 289 | 290 | # ::api::wapi::Info -- API implementation for info 291 | # 292 | # Get information of (some of) the machines that are declared as 293 | # part of the cluster. Recognise 'machines' as an argument, a 294 | # comma-separated list of machine names (short names accepted). 295 | # 296 | # Arguments: 297 | # output Output format (json or txt) 298 | # prt Port number we are listening on 299 | # sock Socket to client 300 | # url Path which was requested 301 | # qry key/values from query 302 | # 303 | # Results: 304 | # List of machine information, an array of objects in JSON, a 305 | # tcl-list for TXT. 306 | # 307 | # Side Effects: 308 | # None. 309 | proc ::api::wapi::Info {output prt sock url qry} { 310 | # Get list of machines from argument, all machines in cluster if 311 | # nothing specified. 312 | if { [dict exists $qry machines] } { 313 | set machines [split [dict get $qry machines] ,] 314 | } else { 315 | set machines {} 316 | } 317 | 318 | set vms [cli machines [Bind $prt] $machines] 319 | if { $output eq "txt" } { 320 | return $vms 321 | } else { 322 | set json "\[" 323 | foreach vm $vms { 324 | dict unset vm origin; # Remove internal state 325 | append json [::json::stringify $vm 0 \ 326 | [dict create -ports array \ 327 | -shares array \ 328 | -images array \ 329 | -compose array \ 330 | swarm string \ 331 | state string \ 332 | -registries array]] 333 | append json "," 334 | } 335 | set json [string trimright $json ","] 336 | append json "\]" 337 | return $json 338 | } 339 | } 340 | 341 | 342 | # ::api::wapi::Version -- API implementation for version 343 | # 344 | # Return machinery version 345 | # 346 | # Arguments: 347 | # output Output format (json or txt) 348 | # prt Port number we are listening on 349 | # sock Socket to client 350 | # url Path which was requested 351 | # qry key/values from query 352 | # 353 | # Results: 354 | # Version number in JSON or TXT format. 355 | # 356 | # Side Effects: 357 | # None. 358 | proc ::api::wapi::Version {output prt sock url qry} { 359 | set cluster [Bind $prt] 360 | if { $output eq "txt" } { 361 | return [cli version] 362 | } else { 363 | return [::json::stringify [dict create version [cli version]] 0] 364 | } 365 | } 366 | 367 | 368 | # ::api::wapi::Up -- API implementation for up 369 | # 370 | # Create or (re)start machines. Recognise 'machines' as an 371 | # argument, a comma-separated list of machine names (short names 372 | # accepted). No argument means all machines in cluster. 373 | # 374 | # Arguments: 375 | # output Output format (json or txt) 376 | # prt Port number we are listening on 377 | # sock Socket to client 378 | # url Path which was requested 379 | # qry key/values from query 380 | # 381 | # Results: 382 | # List of machine information, an array of objects in JSON, a 383 | # tcl-list for TXT. 384 | # 385 | # Side Effects: 386 | # None. 387 | proc ::api::wapi::Up {output prt sock url qry} { 388 | # Get list of machines from argument, all machines in cluster if 389 | # nothing specified. 390 | set token [GetToken $prt] 391 | if { [dict exists $qry machines] } { 392 | set machines [split [dict get $qry machines] ,] 393 | } else { 394 | set machines {} 395 | } 396 | foreach vm [cli machines [Bind $prt] $machines] { 397 | cli up $vm $token 398 | } 399 | 400 | # Return information for the machines that we requested to be 401 | # started up. 402 | return [Info $output $prt $sock $url $qry] 403 | } 404 | 405 | 406 | # Implement destroy, see OnEach 407 | proc ::api::wapi::Destroy {output prt sock url qry} { 408 | return [OnEach $output $prt $sock $url $qry [list destroy]] 409 | } 410 | 411 | # Implement halt, see OnEach 412 | proc ::api::wapi::Halt {output prt sock url qry} { 413 | return [OnEach $output $prt $sock $url $qry [list halt]] 414 | } 415 | 416 | # Implement restart, see OnEach 417 | proc ::api::wapi::Restart {output prt sock url qry} { 418 | return [OnEach $output $prt $sock $url $qry [list halt start]] 419 | } 420 | 421 | # Implement sync, see OnEach 422 | proc ::api::wapi::Sync {output prt sock url qry} { 423 | return [OnEach $output $prt $sock $url $qry [list sync]] 424 | } 425 | 426 | # ::api::wapi::Ps -- API implementation for ps 427 | # 428 | # List the components running on selected machines of the 429 | # cluster, or as reported by the swarm master. This is 430 | # basically an interface to docker ps. Recognise 'machines' as 431 | # an argument, a comma-separated list of machine names (short 432 | # names accepted). No argument means requesting the cluster 433 | # master about the list of components running. 434 | # 435 | # Arguments: 436 | # output Output format (json or txt) 437 | # prt Port number we are listening on 438 | # sock Socket to client 439 | # url Path which was requested 440 | # qry key/values from query 441 | # 442 | # Results: 443 | # List of component information, an array of objects in JSON, a 444 | # tcl-list for TXT. When machines have been pin-pointed, an 445 | # argument called machine will contain the name of the machine 446 | # that the component runs on. 447 | # 448 | # Side Effects: 449 | # None. 450 | proc ::api::wapi::Ps {output prt sock url qry} { 451 | # Get the list of machines out of the machines query parameter 452 | if { [dict exists $qry machines] } { 453 | set machines [split [dict get $qry machines] ,] 454 | } else { 455 | set machines {} 456 | } 457 | 458 | # Get list of components out of the list of machines or from the 459 | # swarm master. Add the name of the machine when listing 460 | # machines to make sure callers can make the difference. 461 | set out {} 462 | if { [llength $machines] > 0 } { 463 | foreach vm [cli machines [Bind $prt] $machines] { 464 | foreach c [cluster ps $vm 0 0] { 465 | if { [dict exists $vm -name] } { 466 | dict set c machine [dict get $vm -name] 467 | } 468 | lappend out $c 469 | } 470 | } 471 | } else { 472 | set master [::cluster::swarm::master [Bind $prt]] 473 | set out [cluster ps $master 1 0] 474 | } 475 | 476 | # Output the list of components which we've got from the swarm 477 | # master or from the machines. 478 | if { $output eq "txt" } { 479 | return $out 480 | } else { 481 | set json "\[" 482 | foreach c $out { 483 | # Trim leading quote away from command 484 | if { [dict exists $c command] } { 485 | dict set c command [string trim [dict get $c command] \"] 486 | } 487 | # Split the list of ports 488 | if { [dict exists $c ports] } { 489 | dict set c ports [split [dict get $c ports] ","] 490 | } 491 | 492 | append json [::json::stringify $c 0 \ 493 | [dict create \ 494 | ports array \ 495 | command string \ 496 | created string \ 497 | status string]] 498 | append json "," 499 | } 500 | set json [string trimright $json ","] 501 | append json "\]" 502 | return $json 503 | } 504 | } 505 | 506 | 507 | proc ::api::wapi::Search {output prt sock url qry} { 508 | # Get the list of components out of the components query parameter 509 | if { [dict exists $qry components] } { 510 | set components [split [dict get $qry components] ,] 511 | } else { 512 | set components * 513 | } 514 | 515 | set cluster [Bind $prt] 516 | set json "\[" 517 | foreach ptn $components { 518 | foreach {mc nm id} [cluster search $cluster $ptn] { 519 | set d [dict create machine $mc name $nm id $id] 520 | append json [::json::stringify $d 0] "," 521 | } 522 | } 523 | set json [string trimright $json ","] 524 | append json "\]" 525 | return $json 526 | } 527 | 528 | # ::api::wapi::Reinit -- API implementation for reinit 529 | # 530 | # Reinitialise machines. Recognise 'machines' as an argument, a 531 | # comma-separated list of machine names (short names accepted). 532 | # No argument means all machines in cluster. Also recognise 533 | # 'steps' as a comma separated list of steps to perform during 534 | # reinitialisation: registries, images or compose. 535 | # 536 | # Arguments: 537 | # output Output format (json or txt) 538 | # prt Port number we are listening on 539 | # sock Socket to client 540 | # url Path which was requested 541 | # qry key/values from query 542 | # 543 | # Results: 544 | # List of machine information, an array of objects in JSON, a 545 | # tcl-list for TXT. 546 | # 547 | # Side Effects: 548 | # None. 549 | proc ::api::wapi::Reinit {output prt sock url qry} { 550 | if { [dict exists $qry steps] } { 551 | set steps [split [dict get $qry steps] ,] 552 | } else { 553 | set steps [list registries images compose] 554 | } 555 | 556 | return [OnEach $output $prt $sock $url $qry [list [list init $steps]]] 557 | } 558 | 559 | 560 | proc ::api::wapi::Swarm {output prt sock url qry} { 561 | # Get list of operations to perform out of query arguments ops 562 | # (but also accepts operations) 563 | set ops {} 564 | if { [dict exists $qry ops] } { 565 | set ops [split [dict get $qry ops] ,] 566 | dict unset qry ops 567 | } elseif { [dict exists $qry operations] } { 568 | set ops [split [dict get $qry operations] ,] 569 | dict unset qry operations 570 | } 571 | 572 | # Order the operations to be performed into list called operations 573 | if { [llength $ops] == 0 } { 574 | set operations [list UP] 575 | } else { 576 | foreach order [list STOP KILL RM UP START] { 577 | if { [lsearch -glob -nocase $ops *${order}*] >= 0 } { 578 | lappend operations $order 579 | } 580 | } 581 | } 582 | 583 | # Pass all other query parameters as options to swarm 584 | set compose [string trim [::minihttpd::data $prt $sock]] 585 | if { $compose ne "" } { 586 | set master [::cluster::swarm::master [Bind $prt]] 587 | 588 | # Create temporary file with content of POSTed data. 589 | set tmpfile [utils tmpfile compose .yml] 590 | set fd [open $tmpfile w] 591 | puts $fd $compose 592 | close $fd 593 | 594 | foreach op $operations { 595 | cluster swarm $master $op $tmpfile $qry 596 | } 597 | 598 | # Remove temp file 599 | file delete -force $tmpfile 600 | } 601 | } 602 | 603 | 604 | # ::api::wapi::OnEach -- Execute sequence of ops on machines 605 | # 606 | # This will execute a sequence of operations on a number of 607 | # machines in the cluster. Recognise 'machines' as an argument, 608 | # a comma-separated list of machine names (short names 609 | # accepted). No argument means all machines in cluster. 610 | # 611 | # Arguments: 612 | # output Output format (json or txt) 613 | # prt Port number we are listening on 614 | # sock Socket to client 615 | # url Path which was requested 616 | # qry key/values from query 617 | # 618 | # Results: 619 | # List of machine information, an array of objects in JSON, a 620 | # tcl-list for TXT. 621 | # 622 | # Side Effects: 623 | # None. 624 | proc ::api::wapi::OnEach {output prt sock url qry ops} { 625 | if { [dict exists $qry machines] } { 626 | set machines [split [dict get $qry machines] ,] 627 | } else { 628 | set machines {} 629 | } 630 | foreach vm [cli machines [Bind $prt] $machines] { 631 | foreach op $ops { 632 | if { [llength $op] > 1 } { 633 | cluster [lindex $op 0] $vm {*}[lrange $op 1 end] 634 | } else { 635 | cluster $op $vm 636 | } 637 | } 638 | } 639 | 640 | # Return information for the machines that we requested to be 641 | # started up. 642 | return [Info $output $prt $sock $url $qry] 643 | } 644 | 645 | 646 | # ::api::wapi::NYI -- Not Yet Implemented 647 | # 648 | # Not yet implemented, return empty! 649 | # 650 | # Arguments: 651 | # output Output format (json or txt) 652 | # prt Port number we are listening on 653 | # sock Socket to client 654 | # url Path which was requested 655 | # qry key/values from query 656 | # 657 | # Results: 658 | # Empty string/object. 659 | # 660 | # Side Effects: 661 | # None. 662 | proc ::api::wapi::NYI {output prt sock url qry} { 663 | # "Implementation" of NYI 664 | if {$output eq "txt" } { 665 | return "" 666 | } else { 667 | return "\{\}" 668 | } 669 | } 670 | 671 | package provide api::wapi $::api::wapi::vars::version 672 | 673 | 674 | -------------------------------------------------------------------------------- /lib/cluster/atexit.tcl: -------------------------------------------------------------------------------- 1 | namespace eval AtExit { 2 | variable atExitScripts [list] 3 | variable trapped 0 4 | 5 | proc atExit script { 6 | variable atExitScripts 7 | variable trapped 8 | 9 | # Install CTRL+C handler if possible 10 | if { ! $trapped && [catch {package require Tclx} ver] == 0 } { 11 | signal trap SIGINT exit 12 | set trapped 1 13 | } 14 | 15 | lappend atExitScripts \ 16 | [uplevel 1 [list namespace code $script]] 17 | } 18 | 19 | namespace export atExit 20 | } 21 | 22 | rename exit AtExit::ExitOrig 23 | proc exit {{code 0}} { 24 | variable AtExit::atExitScripts 25 | set n [llength $atExitScripts] 26 | while {$n} { 27 | catch [lindex $atExitScripts [incr n -1]] 28 | } 29 | rename exit {} 30 | rename AtExit::ExitOrig exit 31 | namespace delete AtExit 32 | exit $code 33 | } 34 | 35 | namespace import AtExit::atExit 36 | 37 | package provide atExit 1.0 -------------------------------------------------------------------------------- /lib/cluster/environment.tcl: -------------------------------------------------------------------------------- 1 | package require cluster::utils 2 | 3 | namespace eval ::cluster::environment { 4 | # Encapsulates variables global to this namespace under their own 5 | # namespace, an idea originating from http://wiki.tcl.tk/1489. 6 | # Variables which name start with a dash are options and which 7 | # values can be changed to influence the behaviour of this 8 | # implementation. 9 | namespace eval vars { 10 | # Extension for env storage cache files 11 | variable -ext .env 12 | # Character for shell-compatible quoting 13 | variable -quote "\"" 14 | variable -backslashed {"\\\$" "\$" "\\\"" \" "\\'" ' "\\\\" "\\" "\\`" "`"} 15 | variable stack {} 16 | } 17 | # Export all lower case procedure, arrange to be able to access 18 | # commands from the parent (cluster) namespace from here and 19 | # create an ensemble command called swarmmode (note the leading :: to make 20 | # this a top-level command!) to ease API calls. 21 | namespace export {[a-z]*} 22 | namespace path [namespace parent] 23 | namespace ensemble create -command ::environment 24 | namespace import [namespace parent]::CacheFile \ 25 | [namespace parent]::utils::log 26 | } 27 | 28 | 29 | # ::cluster::environment::set -- Set environement 30 | # 31 | # Set the (discovery) environment based on the origin of a 32 | # virtual machine. 33 | # 34 | # Arguments: 35 | # vm Virtual machine description 36 | # 37 | # Results: 38 | # Return the full dictionary of what was set, empty dict on errors. 39 | # 40 | # Side Effects: 41 | # Changes the ::env global array, which will be passed to sub-processes. 42 | proc ::cluster::environment::set { vm } { 43 | if { [dict exists $vm origin] } { 44 | ::set environment [read [cache $vm]] 45 | export $environment 46 | } else { 47 | ::set environment {} 48 | } 49 | 50 | return $environment 51 | } 52 | 53 | 54 | proc ::cluster::environment::export { environment } { 55 | dict for {k v} $environment { 56 | log DEBUG "Exporting value of $k to environment and process tree" 57 | ::set ::env($k) $v 58 | } 59 | } 60 | 61 | 62 | proc ::cluster::environment::push { environment } { 63 | # This variable will hold a dictionary with the value of the environment 64 | # variables before they are pushed into the environment, if relevant 65 | ::set setback [dict create] 66 | # This variable will hold a list of the names of the environment variables 67 | # that didn't exist but are being set. 68 | ::set remove [list] 69 | # Check current environment against the one being pushed. Collect in the 70 | # variables described above, depending on if the variables already existed 71 | # in the process environment or not. 72 | dict for {k v} $environment { 73 | if { [info exists ::env($k)] } { 74 | dict set setback $k $::env($k) 75 | } else { 76 | lappend remove $k 77 | } 78 | } 79 | # Remember the old state of the environment in the global vars::stack and 80 | # finally set the variables that are pushed onto the process environment. 81 | lappend vars::stack $setback $remove 82 | export $environment 83 | } 84 | 85 | proc ::cluster::environment::pop { { howmany 1 } } { 86 | for { ::set i 0 } { $i < $howmany } { incr i } { 87 | # Get back both variables with information from the push from the stack and 88 | # pop them away. 89 | ::set setback [lindex $vars::stack end-1] 90 | ::set remove [lindex $vars::stack end] 91 | ::set vars::stack [lrange $vars::stack 0 end-2] 92 | # Unset the variables that should be unset, and re-export back the previous 93 | # state. 94 | foreach k $remove { 95 | log DEBUG "Removing $k from environment" 96 | ::unset ::env($k) 97 | } 98 | export $setback 99 | } 100 | } 101 | 102 | proc ::cluster::environment::clean { ptn } { 103 | log DEBUG "Cleaning environment from variables matching $ptn" 104 | foreach k [array names ::env $ptn] { 105 | log TRACE "Removing $k from environment" 106 | unset ::env($k) 107 | } 108 | 109 | } 110 | 111 | proc ::cluster::environment::cache { vm } { 112 | return [CacheFile [dict get $vm origin] ${vars::-ext}] 113 | } 114 | 115 | 116 | # ::cluster::environment::read -- Read an environment file 117 | # 118 | # Read the content of an environment file, such as the ones used 119 | # for declaring defaults in /etc (or for our discovery cache). 120 | # This isn't a perfect parser, but is able to skip comments and 121 | # blank lines. 122 | # 123 | # Arguments: 124 | # fpath Full path to file to read 125 | # 126 | # Results: 127 | # Content of file as a dictionary 128 | # 129 | # Side Effects: 130 | # None. 131 | proc ::cluster::environment::read { fpath } { 132 | ::set d [dict create] 133 | if { [file exists $fpath] } { 134 | log DEBUG "Reading environment description file at $fpath" 135 | ::set fd [open $fpath] 136 | while {![eof $fd]} { 137 | line d [gets $fd] 138 | } 139 | close $fd 140 | } 141 | log DEBUG "Read [join [dict keys $d] {, }] from $fpath" 142 | 143 | return $d 144 | } 145 | 146 | 147 | # ::cluster::environment::line -- Parse lines of environment files 148 | # 149 | # Parses the line passed as an argument and set the 150 | # corresponding keys in the dictionary from the arguments. 151 | # 152 | # Arguments: 153 | # d_ Name of dictionary variable to modify 154 | # line Line to parse 155 | # 156 | # Results: 157 | # Return the key that was extracted from the line, or an empty 158 | # string on errors, empty lines, comments, etc. 159 | # 160 | # Side Effects: 161 | # None. 162 | proc ::cluster::environment::line { d_ line } { 163 | upvar $d_ d; # Get to the dictionary variable. 164 | ::set line [string trim $line] 165 | if { $line ne "" || [string index $line 0] ne "\#" } { 166 | # Skip leading "export" bash instruction 167 | if { [string first "export " $line] == 0 } { 168 | ::set line [string trim \ 169 | [string range $line [string length "export "] end]] 170 | } 171 | ::set eql [string first "=" $line] 172 | if { $eql >= 0 } { 173 | ::set k [string trim [string range $line 0 [expr {$eql-1}]]] 174 | ::set v [string trim [string range $line [expr {$eql+1}] end]] 175 | ::set v [string trim $v "\"'"]; # Remove UNIX outer-quoting 176 | # Replace backslashed characters 177 | ::set v [string map ${vars::-backslashed} $v] 178 | 179 | dict set d $k $v 180 | return $k 181 | } 182 | } 183 | return "" 184 | } 185 | 186 | 187 | # ::cluster::environment::write -- Write an environment file 188 | # 189 | # Write the content of a dictionary to an environment file. 190 | # 191 | # Arguments: 192 | # fpath Full path to file to write to (or file descriptor) 193 | # enviro Environment to write 194 | # lead String to insert at beginning of each line 195 | # 196 | # Results: 197 | # None. 198 | # 199 | # Side Effects: 200 | # None. 201 | proc ::cluster::environment::write { fpath enviro { lead "" } } { 202 | log DEBUG "Writing [join [dict keys $enviro] {, }] to\ 203 | description file at $fpath" 204 | if { [catch {fconfigure $fpath} res] == 0 } { 205 | ::set fd $fpath 206 | } else { 207 | ::set fd [open $fpath "w"] 208 | } 209 | dict for {k v} $enviro { 210 | if { [string first " " $v] < 0 } { 211 | puts $fd "${lead}${k}=${v}" 212 | } else { 213 | puts $fd "${lead}${k}=${vars::-quote}${v}${vars::-quote}" 214 | } 215 | } 216 | if { $fd ne $fpath } { 217 | close $fd 218 | } 219 | } 220 | 221 | 222 | # ::cluster::environment::resolve -- Environement variable resolution 223 | # 224 | # This procedure will resolve every occurence of a construct 225 | # $name where name is the name of an environment variable to the 226 | # value of that variable, as long as it exists. It also 227 | # recognises ${name} and ${name:default} (i.e. replace by the 228 | # content of the variable if it exists, or by the default value 229 | # if the variable does not exist). 230 | # 231 | # Arguments: 232 | # str Incoming string 233 | # 234 | # Results: 235 | # String where environment variables have been resolved to their 236 | # values. 237 | # 238 | # Side Effects: 239 | # None. 240 | proc ::cluster::environment::resolve { str { patterns {*}}} { 241 | # Do a quick string mapping for $VARNAME and ${VARNAME} and store 242 | # result in variable called quick. 243 | ::set mapper {} 244 | foreach e [array names ::env] { 245 | if { [MultiMatch $e $patterns] } { 246 | lappend mapper \$${e} [::set ::env($e)] 247 | lappend mapper \$\{${e}\} [::set ::env($e)] 248 | } 249 | } 250 | ::set quick [string map $mapper $str] 251 | 252 | # Iteratively modify quick for replacing occurences of 253 | # ${name:default} constructs. We do this until there are no 254 | # match. 255 | ::set done 0 256 | # The regexp below using varnames as bash seems to be considering 257 | # them. 258 | ::set exp "\\$\{(\[a-zA-Z_\]+\[a-zA-Z0-9_\]*):(-?)(\[^\}\]*?)\}" 259 | while { !$done } { 260 | # Look for the expression and if we have a match, extract the 261 | # name of the variable. 262 | ::set rpl [regexp -inline -indices -- $exp $quick] 263 | if { [llength $rpl] >= 3 } { 264 | lassign $rpl range var marker dft 265 | lassign $range range_start range_stop 266 | lassign $var var_start var_stop 267 | lassign $marker marker_start marker_stop 268 | lassign $dft dft_start dft_stop 269 | ::set var [string range $quick $var_start $var_stop] 270 | ::set marker [string range $quick $marker_start $marker_stop] 271 | ::set dft [string range $quick $dft_start $dft_stop] 272 | # If that variable is declared and exist, replace by its 273 | # value, otherwise replace with the default value. 274 | if { [MultiMatch $var $patterns] } { 275 | if { [info exists ::env($var)] } { 276 | if { $marker eq "-" && [::set ::env($var)] eq "" } { 277 | ::set quick \ 278 | [string replace $quick $range_start $range_stop $dft] 279 | } else { 280 | ::set quick \ 281 | [string replace $quick $range_start $range_stop \ 282 | [::set ::env($var)]] 283 | } 284 | } else { 285 | ::set quick \ 286 | [string replace $quick $range_start $range_stop $dft] 287 | } 288 | } else { 289 | ::set quick \ 290 | [string replace $quick $range_start $range_stop $dft] 291 | } 292 | } else { 293 | ::set done 1 294 | } 295 | } 296 | 297 | return $quick 298 | } 299 | 300 | 301 | proc ::cluster::environment::quote { str } { 302 | ::set prev "" 303 | ::set ret "" 304 | foreach c [split $str ""] { 305 | if { $c in [list "(" ")" "'" "\"" "\$"] && $prev ne "\\" } { 306 | append ret "\\" $c 307 | } else { 308 | append ret $c 309 | } 310 | ::set prev $c 311 | } 312 | return $ret 313 | } 314 | 315 | proc ::cluster::environment::MultiMatch { v patterns } { 316 | foreach ptn $patterns { 317 | if { [string match $ptn $v] } { 318 | return 1 319 | } 320 | } 321 | return 0 322 | } 323 | 324 | package provide cluster::environment 0.2 -------------------------------------------------------------------------------- /lib/cluster/extend.tcl: -------------------------------------------------------------------------------- 1 | package require yaml 2 | package require huddle 3 | 4 | package require cluster::vcompare 5 | 6 | namespace eval ::cluster::extend { 7 | # Encapsulates variables global to this namespace under their own 8 | # namespace, an idea originating from http://wiki.tcl.tk/1489. 9 | # Variables which name start with a dash are options and which 10 | # values can be changed to influence the behaviour of this 11 | # implementation. 12 | namespace eval vars { 13 | # List of endings to trim away, in order from reconstructed YAML 14 | variable -trim {- " \f\v\r\t\n"} 15 | # Indenting and wordwrapping options 16 | variable -indent 2 17 | variable -wordwrap 1024 18 | } 19 | # Export all lower case procedure, arrange to be able to access 20 | # commands from the parent (cluster) namespace from here and 21 | # create an ensemble command called swarmmode (note the leading :: to make 22 | # this a top-level command!) to ease API calls. 23 | namespace export {[a-z]*} 24 | namespace path [namespace parent] 25 | namespace ensemble create -command ::extend 26 | } 27 | 28 | 29 | # ::cluster::extend::linearise -- Linearise away 'extends' 30 | # 31 | # Linearise YAML so it does not contain 'extends' instructions. Each 32 | # occurence of 'extends' will be resolved recursively and inserted into 33 | # the service description. 34 | # 35 | # Arguments: 36 | # yaml Textual YAML content, as read from a file, for example 37 | # dir Original directory context where the content is coming from (empty==pwd) 38 | # 39 | # Results: 40 | # Return the same YAML content, where all references to other services 41 | # (pointed at by 'extends') have been recursively replaced by their 42 | # content. 43 | # 44 | # Side Effects: 45 | # None. 46 | proc ::cluster::extend::linearise { yaml { dir "." } } { 47 | return [huddle2yaml [linearise2huddle $yaml $dir]] 48 | } 49 | 50 | 51 | # ::cluster::extend::linearise2huddle -- Linearise away 'extends' to huddle 52 | # 53 | # Linearise YAML so it does not contain 'extends' instructions. Each 54 | # occurence of 'extends' will be resolved recursively and inserted into 55 | # the service description. This procedure returns the internal huddle 56 | # representation of the linearised result so that it can be process 57 | # further. 58 | # 59 | # Arguments: 60 | # yaml Textual YAML content, as read from a file, for example 61 | # dir Original directory context where the content is coming from (empty==pwd) 62 | # 63 | # Results: 64 | # Return a huddle tree representation, where all references to other 65 | # services (pointed at by 'extends') have been recursively replaced by 66 | # their content. 67 | # 68 | # Side Effects: 69 | # None. 70 | proc ::cluster::extend::linearise2huddle { yaml { dir "." } } { 71 | if { $dir eq "" } { 72 | set dir [pwd] 73 | } 74 | 75 | # Make sure we have "huddle get_stripped". This is to cope with earlier 76 | # versions of huddle in tcllib. 77 | if { [llength [info commands ::huddle::_gets]] } { 78 | proc ::huddle::get_stripped { src args } { 79 | return [uplevel 1 [linsert $args 0 huddle gets $src]] 80 | } 81 | } 82 | 83 | set hdl [::yaml::yaml2huddle $yaml] 84 | if { "services" in [huddle keys $hdl] } { 85 | foreach k [huddle keys $hdl] { 86 | if { $k ne "services" } { 87 | if { [info exists output] } { 88 | huddle set output $k [huddle get $hdl $k] 89 | } else { 90 | set output [huddle create $k [huddle get $hdl $k]] 91 | } 92 | } 93 | } 94 | huddle set output services [Services $dir [huddle get $hdl services]] 95 | } else { 96 | set output [Services $dir $hdl] 97 | } 98 | 99 | return $output 100 | } 101 | 102 | 103 | # ::cluster::extend::huddle2yaml -- Convert back to YAML 104 | # 105 | # Convert back the huddle representation of a YAML file to YAML content. 106 | # This performs a number of cleanup and transformations to make docker 107 | # stack deploy happy with the input. 108 | # 109 | # Arguments: 110 | # hdl Huddle representation of YAML content 111 | # 112 | # Results: 113 | # Return YAML content ready to be given away to docker stack deploy or 114 | # docker-compose. 115 | # 116 | # Side Effects: 117 | # None. 118 | proc ::cluster::extend::huddle2yaml { hdl } { 119 | # Trim for improved readability of the result 120 | set yaml [::yaml::huddle2yaml $hdl ${vars::-indent} ${vars::-wordwrap}] 121 | foreach trim ${vars::-trim} { 122 | set yaml [string trim $yaml $trim] 123 | } 124 | 125 | # Performs a number of textual translations on the output. At present, this 126 | # forces the version number to be represented as a string as docker stack 127 | # deploy is peaky about that very type, and arranges for values that end 128 | # with a : (and that could fool YAML parsing) to be enclosed by quotes. 129 | foreach translation [list \ 130 | "s/version:\\s*(\[0-9.\]+)/version: \"\\1\"/g" \ 131 | "s/^(\\s*)(\\w*):\\s*(.*:)$/\\1\\2: \"\\3\"/g" \ 132 | "s/^(\\s*)-\\s*(.*:)$/\\1- \"\\2\"/g" \ 133 | "s/^(\\s*)(cpus|memory):\\s*(\[0-9.\]+)$/\\1\\2: \"\\3\"/g"] { 134 | set yaml [Sed $translation $yaml] 135 | } 136 | 137 | return $yaml 138 | } 139 | 140 | 141 | # ::cluster::extend::Service -- Look for a service 142 | # 143 | # Look for the description of a given services within a list of service and 144 | # return it. 145 | # 146 | # Arguments: 147 | # services List of services 148 | # srv Name of service to look for 149 | # 150 | # Results: 151 | # Return the service description, or an empty dict 152 | # 153 | # Side Effects: 154 | # None. 155 | proc ::cluster::extend::Service { services srv } { 156 | foreach s [huddle keys $services] { 157 | if { $s eq $srv } { 158 | return [huddle get $services $s] 159 | } 160 | } 161 | return [dict create] 162 | } 163 | 164 | 165 | # This is an ugly (and rather unprecise when it comes to version numbers) fix. 166 | # Starting with later updates, the huddle2yaml implementation is broken as it 167 | # does not properly supports its own types. The following fixes it in a rather 168 | # ugly way, until this has made its way into the official implementation in 169 | # tcllib. 170 | if { [vcompare gt [package provide yaml] 0.3.7] } { 171 | proc ::yaml::_imp_huddle2yaml {data {offset ""}} { 172 | set nextoff "$offset[string repeat { } $yaml::_dumpIndent]" 173 | switch -glob -- [huddle type $data] { 174 | "int*" - 175 | "str*" { 176 | set data [huddle get_stripped $data] 177 | return [_dumpScalar $data $offset] 178 | } 179 | "sequence" - 180 | "list" { 181 | set inner {} 182 | set len [huddle llength $data] 183 | for {set i 0} {$i < $len} {incr i} { 184 | set sub [huddle get $data $i] 185 | set tsub [huddle type $sub] 186 | set sep [expr {[string match "str*" $tsub] || [string match "int*" $tsub] ? " " : "\n"}] 187 | lappend inner [join [list $offset - $sep [_imp_huddle2yaml $sub $nextoff]] ""] 188 | } 189 | return [join $inner "\n"] 190 | } 191 | "mapping" - 192 | "dict" { 193 | set inner {} 194 | foreach {key} [huddle keys $data] { 195 | set sub [huddle get $data $key] 196 | set tsub [huddle type $sub] 197 | set sep [expr {[string match "str*" $tsub] || [string match "int*" $tsub] ? " " : "\n"}] 198 | lappend inner [join [list $offset $key: $sep [_imp_huddle2yaml $sub $nextoff]] ""] 199 | } 200 | return [join $inner "\n"] 201 | } 202 | default { 203 | return $data 204 | } 205 | } 206 | } 207 | } 208 | 209 | 210 | # ::cluster::extend::Combine -- combine together huddle YAML content 211 | # 212 | # Given a huddle map representation, this procedure will merge onto of its 213 | # keys the content of another similar map. 214 | # 215 | # Arguments: 216 | # tgt_ Pointer to huddle map to modify 217 | # src Huddle map to copy into map 218 | # allow List of key matching patterns to consider when copying 219 | # deny List of key matching patterns not to consider when copying 220 | # 221 | # Results: 222 | # List of keys that were combined 223 | # 224 | # Side Effects: 225 | # Actively modify the target 226 | proc ::cluster::extend::Combine { tgt_ src {allow {*}} {deny {}} } { 227 | upvar $tgt_ tgt 228 | 229 | set combined [list] 230 | foreach k [huddle keys $src] { 231 | 232 | # Decide if the content of this key should be merged or not 233 | set allow 0 234 | foreach ptn $allow { 235 | if { [string match $ptn $k] } { 236 | set allow 1; break 237 | } 238 | } 239 | if { $allow } { 240 | foreach ptn $deny { 241 | if { [string match $ptn $k] } { 242 | set allow 0; break 243 | } 244 | } 245 | } 246 | 247 | if { $allow } { 248 | set v [huddle get $src $k] 249 | if { $k in [huddle keys $tgt] } { 250 | switch [huddle type $v] { 251 | "mapping" - 252 | "dict" { 253 | huddle set tgt \ 254 | $k [huddle combine [huddle get $tgt $k] $v] 255 | } 256 | "sequence" - 257 | "list" { 258 | huddle set tgt \ 259 | $k [huddle combine [huddle get $tgt $k] $v] 260 | } 261 | default { 262 | huddle set tgt $k $v 263 | } 264 | } 265 | } else { 266 | huddle set tgt $k $v 267 | } 268 | lappend combined $k 269 | } 270 | } 271 | 272 | return $combined 273 | } 274 | 275 | 276 | # ::cluster::extend::Services -- Linarise services 277 | # 278 | # Given a huddle representation of services (originating from a given 279 | # directory context), this procedure will replace all occurences of 280 | # services that contain an 'extends' directive with the description that 281 | # originates from the service pointed at by extend. This implementation is 282 | # aware both of extending within files, but also when referencing to 283 | # external files and will recurse as necessary 284 | # 285 | # Arguments: 286 | # dir Context dictionary where the description is coming from 287 | # hdl Huddle representation of the list of services. 288 | # 289 | # Results: 290 | # A linearised list of services. 291 | # 292 | # Side Effects: 293 | # None. 294 | proc ::cluster::extend::Services { dir hdl } { 295 | # Access services (do this is in version dependent manner so we can support 296 | # the old v 1. file format and the new ones. 297 | if { "services" in [huddle keys $hdl] } { 298 | set services [huddle get $hdl services] 299 | } else { 300 | set services $hdl 301 | } 302 | 303 | # The YAML implementation relies on specific internal types in addition to 304 | # the types that are directly supported by huddle, the following copies the 305 | # incoming map of services and empties it in order to start from something 306 | # that has the same internal type (as opposed to create). 307 | set all_services $services 308 | foreach service [huddle keys $all_services] { 309 | set all_services [huddle remove $all_services $service] 310 | } 311 | 312 | foreach service [huddle keys $services] { 313 | set descr [huddle get $services $service] 314 | if { "extends" in [huddle keys $descr] } { 315 | # When a key called extends exist in the service description, 316 | # recursively go a look for the entire description of that service 317 | # and store this in the variable n_descr. 318 | set src [huddle get $descr extends] 319 | if { [string match "str*" [huddle type $src]] } { 320 | # When the value of what we extend is a string, this is the name 321 | # of a service that is already in this file, recurse using the 322 | # services that we already know of. 323 | set n_descr [Service $all_services [huddle get_stripped $descr extends]] 324 | } else { 325 | # Otherwise, we need to specify at least the name of a service 326 | # (in which case this is exactly the same as above), or a 327 | # service in another (relative) file. 328 | if { "service" in [huddle keys $src] } { 329 | if { "file" in [huddle keys $src] } { 330 | # When extending from a service in another file, 331 | # recursively find the services in that file, look for 332 | # that service use that description 333 | set s_file [file join $dir [huddle get_stripped $src file]] 334 | set s_dir [file dirname $s_file] 335 | set in [open $s_file]; # Let it fail on purpose 336 | set n_descr [Service \ 337 | [Services $s_dir [::yaml::yaml2huddle [read $in]]] \ 338 | [huddle get_stripped $src service]] 339 | close $in 340 | } else { 341 | set n_descr [Service $all_services [huddle get_stripped $src service]] 342 | } 343 | } else { 344 | set n_descr {} 345 | } 346 | } 347 | 348 | 349 | # Now add the value of all local keys to n_descr, skip extend since 350 | # we have linearised above. We cannot simply copy into, since that 351 | # would loose value from the extended object, instead we arrange to 352 | # combine for composed-objects such as lists or dictionaries. 353 | Combine n_descr $descr [list *] [list "extends"] 354 | 355 | 356 | # Add this service to the list of linearised services. 357 | huddle set all_services $service $n_descr 358 | } else { 359 | # Since no extends exists in that service, add it to the list of 360 | # lineraised services verbatim. 361 | huddle set all_services $service $descr 362 | } 363 | } 364 | return $all_services 365 | } 366 | 367 | 368 | # ::cluster::extend::SedLine -- Mini-sed implementation 369 | # 370 | # This is a minimal sed implementation that has been lifted up from toclbox 371 | # and also implements use of \1, \2, etc. in subgroups replacements. 372 | # 373 | # Arguments: 374 | # script sed-like script. Not all syntax is supported! 375 | # input Text to perform sed operations on. 376 | # 377 | # Results: 378 | # Return the result of the sed-like mini-language operation on the input. 379 | # 380 | # Side Effects: 381 | # None. 382 | proc ::cluster::extend::SedLine {script input} { 383 | set sep [string index $script 1] 384 | foreach {cmd from to flag} [::split $script $sep] break 385 | switch -- $cmd { 386 | "s" { 387 | set cmd regsub 388 | if {[string first "g" $flag]>=0} { 389 | lappend cmd -all 390 | } 391 | if {[string first "i" [string tolower $flag]]>=0} { 392 | lappend cmd -nocase 393 | } 394 | set idx [regsub -all -- {[a-zA-Z]} $flag ""] 395 | if { [string is integer -strict $idx] } { 396 | set cmd [lreplace $cmd 0 0 regexp] 397 | lappend cmd -inline -indices -all -- $from $input 398 | set res [eval $cmd] 399 | if { [llength $res] } { 400 | set which [lindex $res $idx] 401 | # Create map for replacement of all subgroups, if necessary. 402 | for {set i 1} {$i<[llength $res]} { incr i} { 403 | foreach {b e} [lindex $res $i] break 404 | lappend map "\\$i" [string range $input $b $e] 405 | } 406 | return [string replace $input [lindex $which 0] [lindex $which 1] [string map $map $to]] 407 | } else { 408 | return $input 409 | } 410 | } 411 | # Most generic case 412 | lappend cmd -- $from $input $to 413 | return [eval $cmd] 414 | } 415 | "e" { 416 | set cmd regexp 417 | if { $to eq "" } { set to 0 } 418 | if {![string is integer -strict $to]} { 419 | return -error code "No proper group identifier specified for extraction" 420 | } 421 | lappend cmd -inline -- $from $input 422 | return [lindex [eval $cmd] $to] 423 | } 424 | "y" { 425 | return [string map [list $from $to] $input] 426 | } 427 | } 428 | return -code error "not yet implemented" 429 | } 430 | 431 | proc ::cluster::extend::Sed {script input} { 432 | set input [string map [list "\r\n" "\n" "\r" "\n"] $input] 433 | set output "" 434 | foreach line [split $input \n] { 435 | set res [SedLine $script $line] 436 | append output ${res}\n 437 | } 438 | return [string trimright $output \n] 439 | } 440 | 441 | 442 | package provide cluster::extend 0.1 443 | -------------------------------------------------------------------------------- /lib/cluster/json.tcl: -------------------------------------------------------------------------------- 1 | # JSON parser / encoder. 2 | # Copyright (C) 2014, 2015 Danyil Bohdan. 3 | # License: MIT 4 | 5 | ### The public API: will remain backwards compatible for a major release 6 | ### version of this module. 7 | 8 | namespace eval ::json { 9 | variable version 1.0.0 10 | } 11 | 12 | # Parse the string $str containing JSON into nested Tcl dictionaries. 13 | # numberDictArrays: decode arrays as dictionaries with sequential integers 14 | # starting with zero as keys; otherwise decode them as lists. 15 | proc ::json::parse {str {numberDictArrays 0}} { 16 | set result [::json::decode-value $str $numberDictArrays] 17 | if {[lindex $result 1] eq ""} { 18 | return [lindex $result 0] 19 | } else { 20 | error "trailing garbage after JSON data: $str" 21 | } 22 | } 23 | 24 | # Serialize nested Tcl dictionaries as JSON. 25 | # 26 | # numberDictArrays: encode dictionaries with keys {0 1 2 3 ...} as arrays, e.g., 27 | # {0 a 1 b} to ["a", "b"]. If numberDictArrays is not true stringify will try to 28 | # produce objects from all Tcl lists and dictionaries unless explicitly told 29 | # otherwise in the schema. 30 | # 31 | # schema: data types for values in $dictionaryOrValue. $schema consists of 32 | # nested dictionaries where the keys are either those in $dictionaryOrValue or 33 | # their superset and the values specify data types. Those values can each be 34 | # one of "array", "boolean", "null", "number", "object" or "string" as well as 35 | # "array:(element type)" and "object:(element type)". 36 | # 37 | # strictSchema: generate an error if there is no schema for a value in 38 | # $dictionaryOrValue. 39 | proc ::json::stringify {dictionaryOrValue {numberDictArrays 1} {schema ""} 40 | {strictSchema 0}} { 41 | set result {} 42 | 43 | lassign [::json::array-schema $schema] schemaArray _ 44 | lassign [::json::object-schema $schema] schemaObject _ 45 | 46 | if {$schema eq "string"} { 47 | return "\"$dictionaryOrValue\"" 48 | } 49 | 50 | if {([llength $dictionaryOrValue] <= 1) && 51 | !$schemaArray && !$schemaObject} { 52 | # Value. 53 | set isNumber [expr { 54 | ($schema in {"" "number"}) && 55 | ([string is integer $dictionaryOrValue] || 56 | [string is double $dictionaryOrValue]) 57 | }] 58 | set isBoolean [expr { 59 | ($schema in {"" "boolean"}) && 60 | ($dictionaryOrValue in {"true" "false" 0 1}) 61 | }] 62 | set isNull [expr { 63 | ($schema in {"" "null"}) && 64 | ($dictionaryOrValue eq "null") 65 | }] 66 | 67 | if {$isNumber || $isBoolean || $isNull} { 68 | # Map 0/1 values explicitly marked as boolean to false/true. 69 | if {($schema eq "boolean") && ($dictionaryOrValue in {0 1})} { 70 | set dictionaryOrValue \ 71 | [string map {0 false 1 true} $dictionaryOrValue] 72 | } 73 | set result $dictionaryOrValue 74 | } elseif {$schema eq ""} { 75 | set result "\"$dictionaryOrValue\"" 76 | } else { 77 | error "invalid schema \"$schema\" for value \"$dictionaryOrValue\"" 78 | } 79 | } else { 80 | # Dictionary or list. 81 | set validDict [expr { [llength $dictionaryOrValue] % 2 == 0 }] 82 | set isArray [expr { 83 | ($numberDictArrays && 84 | !$schemaObject && 85 | $validDict && 86 | [number-dict? $dictionaryOrValue]) || 87 | 88 | (!$numberDictArrays && $schemaArray) 89 | }] 90 | 91 | if {$isArray} { 92 | set result [::json::stringify-array $dictionaryOrValue \ 93 | $numberDictArrays $schema $strictSchema] 94 | } elseif {$validDict} { 95 | set result [::json::stringify-object $dictionaryOrValue \ 96 | $numberDictArrays $schema $strictSchema] 97 | } else { 98 | error "invalid schema \"$schema\" for value \"$dictionaryOrValue\"" 99 | } 100 | } 101 | return $result 102 | } 103 | 104 | ### The private API: can change at any time. 105 | 106 | ## Procedures used by ::json::stringify. 107 | 108 | # Returns a list of two values: whether the $schema is a schema for an array and 109 | # the "subschema" after "array:", if any. 110 | proc ::json::array-schema {schema {numberDictArrays 1}} { 111 | return [list [expr { 112 | ($schema eq "array") || [string match "array:*" $schema] 113 | }] [string range $schema 6 end]] 114 | } 115 | 116 | # Returns a list of two values: whether the $schema is a schema for an object 117 | # and the "subschema" after "object:", if any. 118 | proc ::json::object-schema {schema {numberDictArrays 1}} { 119 | return [list [expr { 120 | ($schema eq "object") || [string match "object:*" $schema] 121 | }] [string range $schema 7 end]] 122 | } 123 | 124 | # Return 1 if the keys in dictionary are numbers 0, 1, 2... and 0 otherwise. 125 | proc ::json::number-dict? {dictionary} { 126 | set allNumericKeys 1 127 | set i 0 128 | foreach {key value} $dictionary { 129 | set allNumericKeys [expr { $allNumericKeys && ($key == $i) }] 130 | if {!$allNumericKeys} { 131 | return 0 132 | } 133 | incr i 134 | } 135 | return 1 136 | } 137 | 138 | # Return the value for key $key from $schema if the key is present. Otherwise 139 | # either return the default value "" or, if $strictSchema is true, generate an 140 | # error. 141 | proc ::json::get-schema-by-key {schema key {strictSchema 0}} { 142 | if {[dict exists $schema $key]} { 143 | set valueSchema [dict get $schema $key] 144 | } else { 145 | if {$strictSchema} { 146 | error "missing schema for key \"$key\"" 147 | } else { 148 | set valueSchema "" 149 | } 150 | } 151 | } 152 | 153 | proc ::json::stringify-array {array {numberDictArrays 1} {schema ""} 154 | {strictSchema 0}} { 155 | set arrayElements {} 156 | lassign [array-schema $schema] schemaArray subschema 157 | if {$numberDictArrays} { 158 | foreach {key value} $array { 159 | if {($schema eq "") || $schemaArray} { 160 | set valueSchema $subschema 161 | } else { 162 | set valueSchema [::json::get-schema-by-key \ 163 | $schema $key $strictSchema] 164 | } 165 | lappend arrayElements [::json::stringify $value 1 \ 166 | $valueSchema $strictSchema] 167 | } 168 | } else { ;# list arrays 169 | foreach value $array valueSchema $schema { 170 | if {($schema eq "") || $schemaArray} { 171 | set valueSchema $subschema 172 | } 173 | lappend arrayElements [::json::stringify $value 0 \ 174 | $valueSchema $strictSchema] 175 | } 176 | } 177 | set result "\[[join $arrayElements {, }]\]" 178 | } 179 | 180 | proc ::json::stringify-object {dictionary {numberDictArrays 1} {schema ""} 181 | {strictSchema 0}} { 182 | set objectDict {} 183 | lassign [object-schema $schema] schemaObject subschema 184 | foreach {key value} $dictionary { 185 | if {($schema eq "") || $schemaObject} { 186 | set valueSchema $subschema 187 | } else { 188 | set valueSchema [::json::get-schema-by-key \ 189 | $schema $key $strictSchema] 190 | } 191 | lappend objectDict "\"$key\": [::json::stringify $value \ 192 | $numberDictArrays $valueSchema $strictSchema]" 193 | } 194 | set result "{[join $objectDict {, }]}" 195 | } 196 | 197 | ## Procedures used by ::json::parse. 198 | 199 | # Choose how to decode a JSON value. Return a list consisting of the result of 200 | # parsing the initial part of $str and the remainder of $str that was not 201 | # parsed. E.g., ::json::decode-value {"string", 55} returns {{string} {, 55}}. 202 | proc ::json::decode-value {str {numberDictArrays 0}} { 203 | set str [string trimleft $str] 204 | switch -regexp -- $str { 205 | {^\"} { 206 | return [::json::decode-string $str] 207 | } 208 | {^[0-9-]} { 209 | return [::json::decode-number $str] 210 | } 211 | {^\{} { 212 | return [::json::decode-object $str $numberDictArrays] 213 | } 214 | {^\[} { 215 | return [::json::decode-array $str $numberDictArrays] 216 | } 217 | {^(true|false|null)} { 218 | return [::json::decode-boolean-or-null $str] 219 | } 220 | default { 221 | error "cannot decode value as JSON: \"$str\"" 222 | } 223 | } 224 | } 225 | 226 | # Return a list of two elements: the initial part of $str parsed as "true", 227 | # "false" or "null" and the remainder of $str that wasn't parsed. 228 | proc ::json::decode-boolean-or-null {str} { 229 | regexp {^(true|false|null)} $str value 230 | return [list $value [string range $str [string length $value] end]] 231 | } 232 | 233 | # Return a list of two elements: the initial part of $str parsed as a JSON 234 | # string and the remainder of $str that wasn't parsed. 235 | proc ::json::decode-string {str} { 236 | if {[regexp {^"((?:[^"\\]|\\.)*)"} $str _ result]} { 237 | return [list \ 238 | [subst -nocommands -novariables $result] \ 239 | [string range $str [expr {2 + [string length $result]}] end]] 240 | # Add two to result length to account for the double quotes 241 | # around the string. 242 | } else { 243 | error "can't parse JSON string: $str" 244 | } 245 | } 246 | 247 | # Return a list of two elements: the initial part of $str parsed as a JSON 248 | # number and the remainder of $str that wasn't parsed. 249 | proc ::json::decode-number {str} { 250 | if {[regexp -- {^-?(?:0|[1-9][0-9]*)(?:\.[0-9]*)?(:?(?:e|E)[+-]?[0-9]*)?} \ 251 | $str result]} { 252 | # [][ integer part ][ optional ][ optional exponent ] 253 | # ^ sign [ frac. part] 254 | return [list $result [string range $str [string length $result] end]] 255 | } else { 256 | error "can't parse JSON number: $str" 257 | } 258 | } 259 | 260 | # Return a list of two elements: the initial part of $str parsed as a JSON array 261 | # and the remainder of $str that wasn't parsed. Arrays are parsed into 262 | # dictionaries with numbers {0 1 2 ...} as keys if $numberDictArrays is true 263 | # or lists if it is false. E.g., if $numberDictArrays == 1 then 264 | # ["Hello, World" 2048] is converted to {0 {Hello, World!} 1 2048}; otherwise 265 | # it is converted to {{Hello, World!} 2048}. 266 | proc ::json::decode-array {str {numberDictArrays 0}} { 267 | set strInitial $str 268 | set result {} 269 | set value {} 270 | set i 0 271 | if {[string index $str 0] ne "\["} { 272 | error "can't parse JSON array: $strInitial" 273 | } else { 274 | set str [string range $str 1 end] 275 | } 276 | while 1 { 277 | # Empty array => break out of the loop. 278 | if {[string index [string trimleft $str] 0] eq "\]"} { 279 | set str [string range [string trimleft $str] 1 end] 280 | break 281 | } 282 | 283 | # Value. 284 | lassign [::json::decode-value $str $numberDictArrays] value str 285 | set str [string trimleft $str] 286 | if {$numberDictArrays} { 287 | lappend result $i 288 | } 289 | lappend result $value 290 | 291 | # "," 292 | set sep [string index $str 0] 293 | set str [string range $str 1 end] 294 | if {$sep eq "\]"} { 295 | break 296 | } elseif {$sep ne ","} { 297 | error "can't parse JSON array: $strInitial" 298 | } 299 | incr i 300 | } 301 | return [list $result $str] 302 | } 303 | 304 | # Return a list of two elements: the initial part of $str parsed as a JSON 305 | # object and the remainder of $str that wasn't parsed. 306 | proc ::json::decode-object {str {numberDictArrays 0}} { 307 | set strInitial $str 308 | set result {} 309 | set value {} 310 | if {[string index $str 0] ne "\{"} { 311 | error "can't parse JSON object: $strInitial" 312 | } else { 313 | set str [string range $str 1 end] 314 | } 315 | while 1 { 316 | # Key string. 317 | set str [string trimleft $str] 318 | # Empty object => break out of the loop. 319 | if {[string index $str 0] eq "\}"} { 320 | set str [string range $str 1 end] 321 | break 322 | } 323 | lassign [::json::decode-string $str] value str 324 | set str [string trimleft $str] 325 | lappend result $value 326 | 327 | # ":" 328 | set sep [string index $str 0] 329 | set str [string range $str 1 end] 330 | if {$sep ne ":"} { 331 | error "can't parse JSON object: $strInitial" 332 | } 333 | 334 | # Value. 335 | lassign [::json::decode-value $str $numberDictArrays] value str 336 | set str [string trimleft $str] 337 | lappend result $value 338 | 339 | # "," 340 | set sep [string index $str 0] 341 | set str [string range $str 1 end] 342 | if {$sep eq "\}"} { 343 | break 344 | } elseif {$sep ne ","} { 345 | error "can't parse JSON object: $str" 346 | } 347 | } 348 | return [list $result $str] 349 | } 350 | -------------------------------------------------------------------------------- /lib/cluster/mount.tcl: -------------------------------------------------------------------------------- 1 | ################## 2 | ## Module Name -- cluster::mount 3 | ## Original Author -- Emmanuel Frecon - emmanuel@sics.se 4 | ## Description: 5 | ## 6 | ## This module provides mounting helpers. 7 | ## 8 | ################## 9 | 10 | package require Tcl 8.6 11 | 12 | package require cluster::environment 13 | package require cluster::tooling 14 | package require cluster::utils 15 | package require cluster::unix 16 | package require atExit 17 | 18 | namespace eval ::cluster::mount { 19 | namespace eval vars { 20 | # binary of FUSE zip mounter 21 | variable -mount {zip {{fuse-zip -r} archivemount} tar archivemount} 22 | # binary to unmount 23 | variable -umount "fusermount" 24 | # Cache for abs location for above 25 | variable umount "" 26 | } 27 | namespace export {[a-z]*} 28 | namespace path [namespace parent] 29 | namespace ensemble create -command ::mount 30 | namespace import [namespace parent]::utils::log 31 | } 32 | 33 | 34 | # ::cluster::mount::add -- Mount external source 35 | # 36 | # This procedure will arrange for mounting a source onto a destination. 37 | # Mounting is done to the best of our capability and behaviour can be 38 | # adapted through the -order option. This option takes a list of methods 39 | # for mounting and these will be proven in turns. Allows techniques are the 40 | # keywords internal and external. External mounting uses FUSE and is only 41 | # available on UNIX-like system, internal mounting uses Tcl VFS 42 | # capabilities, when present. All other options are given further to the 43 | # internal or external mounting implementations. 44 | # 45 | # Arguments: 46 | # src Source of information, a remote URL or a local (archive) file 47 | # dst Where to mount, this can be an internal to process path 48 | # args Dash-led options and their values, only -order understood here. 49 | # 50 | # Results: 51 | # Return how the mount was performed, or an empty string when mounting was 52 | # not possible. 53 | # 54 | # Side Effects: 55 | # External mounting will make available the content of the remote resouce 56 | # or file to other processes own by the user for the life-time of the 57 | # operation. 58 | proc ::cluster::mount::add { src dst args } { 59 | # Extract the order to see how we prefer to mount the source URL/file onto 60 | # the destination, all other arguments will be passed to the internal or 61 | # external implementations. On windows, no point trying with external 62 | # mounting... 63 | if { [lsearch [split [::platform::generic] -] "win32"] >= 0 } { 64 | utils getopt args -order order internal 65 | } else { 66 | utils getopt args -order order {external internal} 67 | } 68 | 69 | # Pass further mounting to internal or external, i.e. in process using 70 | # TclVFS or system-wide out of process using FUSE. 71 | foreach o $order { 72 | set o [string tolower $o] 73 | switch -glob -- $o { 74 | "i*" { 75 | if { [AddInternal $src $dst {*}$args] } { 76 | return $o 77 | } 78 | } 79 | "e*" { 80 | if { [AddExternal $src $dst {*}$args] } { 81 | return $o 82 | } 83 | } 84 | } 85 | } 86 | return "" 87 | } 88 | 89 | 90 | # ::cluster::mount::origin -- Where does a file/dir come from 91 | # 92 | # Look for existing internal to process and fuse-based external mounts and 93 | # return either the string internal, the string external or the empty 94 | # string. 95 | # 96 | # Arguments: 97 | # fname Path to file to detect origin of 98 | # type_ Will contain some description of the mount type (impl. dependant) 99 | # 100 | # Results: 101 | # One of the string internal, external or empty string 102 | # 103 | # Side Effects: 104 | # None. 105 | proc ::cluster::mount::origin { fname {type_ ""} } { 106 | # The type of the origin FS, if found will be contained here. 107 | if { $type_ ne "" } { 108 | upvar $type_ type 109 | } 110 | 111 | # Try to find the file under an internally mounted FS, if possible 112 | if { [catch {package require vfs} ver] == 0 } { 113 | # Normalize incoming file path 114 | set src [::vfs::filesystem fullynormalize $fname] 115 | 116 | # Try to see if the file/dir is part of a mounted filesystem, if so we'll be 117 | # copying it. 118 | foreach fs [::vfs::filesystem info] { 119 | if { [string first $fs $src] == 0 } { 120 | # Force leading :: namespace marker on handler for matching 121 | # filesystem and assume the namespace right after ::vfs:: in the 122 | # handler name provides the type of the VFS used 123 | set handler ::[string trimleft [lindex [::vfs::filesystem info $fs] 0] :] 124 | set type [lindex [split [string map [list "::" ":"] $handler] :] 2] 125 | return "internal" 126 | } 127 | } 128 | } 129 | 130 | # Try to find the file under an externally mounted FS, if possible 131 | if { [lsearch [split [::platform::generic] -] "win32"] < 0 } { 132 | set src [file normalize $fname] 133 | foreach {dev fs t opts } [unix mounts] { 134 | if { [string first $fs $src] == 0 && [string match -nocase *fuse* $t] } { 135 | set type $t 136 | return "external" 137 | } 138 | } 139 | } 140 | 141 | return "" 142 | } 143 | 144 | 145 | # ::cluster::mount::access -- Cache in file/dir 146 | # 147 | # The base logic is to arrange for caching a copy of a file or directory in 148 | # a locally accessible temporary location so that external processes will 149 | # be able to use the file(s). In short, this procedure arranges for files 150 | # that are internally mounted within this process to become accessible to 151 | # external processes that are spawn. 152 | # 153 | # Arguments: 154 | # fname Name of file/dir to make accessible. 155 | # tmpdir Temporary directory to store at, good default if empty 156 | # force Force copy 157 | # 158 | # Results: 159 | # Return a path location that will be accessible to external processes, 160 | # this might be the same location as the original file path when it is 161 | # mounted externally (and not forced to caching) 162 | # 163 | # Side Effects: 164 | # None. 165 | proc ::cluster::mount::access { fname { tmpdir "" } { force 0 } { gc 1 } } { 166 | # If the file is placed under an internally mounted VFS, we force caching so 167 | # that it can be made available to other processes. 168 | if { [origin $fname type] eq "internal" } { 169 | log INFO "Temporarily caching $fname since mounted as $type VFS" 170 | set force 1 171 | } 172 | 173 | # Recursively copy file/dir into a good candidate temporary directory. 174 | if { $force } { 175 | set dst [utils tmpfile automount [string tolower $type] $tmpdir] 176 | file mkdir $dst 177 | log DEBUG "(Recursively) copying from $fname to $dst" 178 | file copy -force -- $fname $dst 179 | if { $gc } { 180 | atExit [list file delete -force -- $dst] 181 | } 182 | 183 | return [file join $dst [file tail $fname]] 184 | } else { 185 | return $fname 186 | } 187 | } 188 | 189 | 190 | #################################################################### 191 | # 192 | # Procedures below are internal to the implementation, they shouldn't 193 | # be changed unless you wish to help... 194 | # 195 | #################################################################### 196 | 197 | 198 | # ::cluster::mount::AddInternal -- Inter-process mount 199 | # 200 | # Mount a remote location or file onto a local destination. This uses the 201 | # TclVFS services, ensuring that files and directories that are mounted 202 | # this way are available to this process and implementation, but not to 203 | # external processes. 204 | # 205 | # Arguments: 206 | # src Source of information, a remote URL or a local (archive) file 207 | # dst Where to mount, this can be an internal to process path 208 | # args Dash-led options and their values, but none supported yet. 209 | # 210 | # Results: 211 | # 1 on mount success, 0 otherwise 212 | # 213 | # Side Effects: 214 | # None. 215 | proc ::cluster::mount::AddInternal { src dst args } { 216 | if { [catch {package require vfs} ver] == 0 } { 217 | set i [string first "://" $src] 218 | if { $i >= 0 } { 219 | incr i -1 220 | set proto [string range $src 0 $i] 221 | switch -- $proto { 222 | "http" - 223 | "https" { 224 | if { [catch {package require vfs::http} ver] == 0 } { 225 | log NOTICE "Internally mounting $src onto $dst" 226 | ::vfs::http::Mount $src $dst 227 | } else { 228 | log WARN "Cannot mount from $src internally, don't know about http!" 229 | return 0 230 | } 231 | } 232 | "file" { 233 | return [AddInternal [string range $src [expr {$i+3}] end] $dst {*}$args] 234 | } 235 | default { 236 | if { [catch {package require vfs::$proto} ver] == 0 } { 237 | log NOTICE "Internally mounting $src onto $dst" 238 | ::vfs::${proto}::Mount $src $dst 239 | } else { 240 | log WARN "Cannot mount from $src internally, don't know about $proto!" 241 | return 0 242 | } 243 | } 244 | } 245 | } else { 246 | set ext [string trimleft [file extension $src] .] 247 | if { [catch {package require vfs::$ext} ver] == 0 } { 248 | log NOTICE "Internally mounting $src onto $dst" 249 | ::vfs::${ext}::Mount $src $dst 250 | } else { 251 | log WARN "Cannot mount from $src internally, don't know about $ext!" 252 | return 0 253 | } 254 | } 255 | return 1 256 | } else { 257 | log ERROR "No VFS support, will not be able to mount in process!" 258 | return 0 259 | } 260 | } 261 | 262 | 263 | # ::cluster::mount::AddExternal -- OS-based mount 264 | # 265 | # This arranges for a remote location or a file to be mounted onto a 266 | # destination using various FUSE-based helpers. External mounting enables 267 | # external processes that are spawn from here to access the mounted files 268 | # directly. The destination mountpoiint will be created if necessary and 269 | # automatically unmounted and cleaned away on exit. 270 | # 271 | # Arguments: 272 | # src Source of information, a remote URL or a local (archive) file 273 | # dst Where to mount, this can be an internal to process path 274 | # args Dash-led options and their values, passed further to mounter 275 | # 276 | # Results: 277 | # 1 on mount success, 0 otherwise 278 | # 279 | # Side Effects: 280 | # FUSE mounting makes the content of the mounted resources available to all 281 | # processes that are run by the user under the lifetime of the machinery 282 | # session. 283 | proc ::cluster::mount::AddExternal { src dst args } { 284 | # No FUSE on windows, don't even try 285 | if { [lsearch [split [::platform::generic] -] "win32"] >= 0 } { 286 | log NOTICE "No FUSE support on Windows!" 287 | return 0 288 | } 289 | 290 | # Isolate by scheme so the source can be a remote location (but no support 291 | # yet). Otherwise, for typically archives, use the known FUSE helpers 292 | # implementations pointed at by the -mount global to mount the archive onto 293 | # a directory. 294 | set i [string first "://" $src] 295 | if { $i >= 0 } { 296 | incr i -1 297 | set proto [string range $src 0 $i] 298 | switch -- $proto { 299 | "file" { 300 | return [AddExternal [string range $src [expr {$i+3}] end] $dst {*}$args] 301 | } 302 | } 303 | return 0 304 | } else { 305 | # Guess the type of the file 306 | set type [FileType $src] 307 | 308 | # Look for a working mounter for the type of the file and try using it. 309 | # Mounters can have options in addition to the binary that needs to be 310 | # found in the path. This set of pre-defined options is appended to the 311 | # command formed for mounting, in addition to the arguments coming from 312 | # outside callers. 313 | foreach {t mounters} ${vars::-mount} { 314 | # Found matching type, try all possible mounters and return ASAP 315 | if { $t eq $type } { 316 | # Go through all commands, these are list with a mounter binary 317 | # and options 318 | foreach cmd $mounters { 319 | # Extract mounter binary and look for it in path 320 | set bin [lindex $cmd 0] 321 | set mounter [auto_execok $bin] 322 | # Extract options if present. 323 | set opts [lrange $cmd 1 end] 324 | # If the mounter was found in path, create directory and 325 | # mount. Make sure we cleanup on exit. 326 | if { $mounter ne "" } { 327 | if { ![file isdirectory $dst] } { 328 | file mkdir $dst 329 | } 330 | log NOTICE "Externally mounting $src as $t file onto $dst with $cmd" 331 | tooling run -- $mounter {*}$opts {*}$args $src $dst 332 | atExit [list [namespace current]::RemoveExternal $dst] 333 | return 1; # ASAP 334 | } else { 335 | log WARN "$bin not found to mount $src onto $dst" 336 | } 337 | } 338 | } 339 | } 340 | 341 | log WARN "Cannot mount from $src externally, don't know how to mount!" 342 | return 0 343 | } 344 | return 1 345 | } 346 | 347 | 348 | # ::cluster::mount::RemoveExternal -- Remove external mount 349 | # 350 | # Unmount an existing mount, and cleanup the directory on which the 351 | # resource was mounted once it is empty. 352 | # 353 | # Arguments: 354 | # dst Mountpoint to unmount from 355 | # rmdir Should we clean away directory mountpoint (default to yes) 356 | # 357 | # Results: 358 | # None. 359 | # 360 | # Side Effects: 361 | # Will call FUSE unmount 362 | proc ::cluster::mount::RemoveExternal { dst { rmdir 1 } } { 363 | # Cache FUSE unmounter 364 | if { $vars::umount eq "" } { 365 | set vars::umount [auto_execok ${vars::-umount}] 366 | } 367 | 368 | # Unmount if we can 369 | if { $vars::umount ne "" } { 370 | # XX: Should we check this is an external mount? 371 | log NOTICE "Unmounting $dst, this might take time..." 372 | tooling run -- $vars::umount -qu $dst; # quiet and unmount, synchronously to make sure we finish 373 | } 374 | 375 | # Test emptiness of directory and remove if we are asked to. Generate a 376 | # warning in all cases so we can warn about not being able to find and 377 | # successfully unmount. 378 | if { [llength [glob -nocomplain -directory $dst -tails -- *]] } { 379 | log WARN "Directory at $dst not empty, cannot cleanup properly" 380 | } elseif { $rmdir } { 381 | log INFO "Removing dangling directory $dst" 382 | if { [catch {file delete -force -- $dst} err] } { 383 | log WARN "Cannot remove dangling directory: $err" 384 | } 385 | } 386 | } 387 | 388 | 389 | # ::cluster::mount::FileType -- Guess file type 390 | # 391 | # Crudly guess the type of the file based on the extension. There are 392 | # implementation in the tcllib, but we want to keep the dependencies to a 393 | # minimum and this will do for our purpose. 394 | # 395 | # Arguments: 396 | # fpath Path to file 397 | # 398 | # Results: 399 | # Type of file, right now only archives are recognised, e.g. tar and zip. 400 | # 401 | # Side Effects: 402 | # None. 403 | proc ::cluster::mount::FileType { fpath } { 404 | switch -glob -nocase -- $fpath { 405 | "*.zip" { 406 | return "zip" 407 | } 408 | "*.tar" - 409 | "*.tgz" - 410 | "*.tar.gz" - 411 | "*.tar.Z" - 412 | "*.tar.bz2" { 413 | return "tar" 414 | } 415 | } 416 | 417 | return ""; # Catch all 418 | } 419 | 420 | 421 | package provide cluster::mount 0.1 422 | -------------------------------------------------------------------------------- /lib/cluster/pkgIndex.tcl: -------------------------------------------------------------------------------- 1 | # Tcl package index file, version 1.1 2 | # This file is generated by the "pkg_mkIndex" command 3 | # and sourced either when an application starts up or 4 | # by a "package unknown" script. It invokes the 5 | # "package ifneeded" command to set up package-related 6 | # information so that packages will be loaded automatically 7 | # in response to "package require" commands. When this 8 | # script is sourced, the variable $dir must contain the 9 | # full path name of this file's directory. 10 | 11 | package ifneeded cluster 0.4 [list source [file join $dir cluster.tcl]] 12 | package ifneeded cluster::swarm 0.3 [list source [file join $dir swarm.tcl]] 13 | package ifneeded cluster::swarmmode 0.3 [list source [file join $dir swarmmode.tcl]] 14 | package ifneeded cluster::vcompare 0.1 [list source [file join $dir vcompare.tcl]] 15 | package ifneeded cluster::virtualbox 0.1 [list source [file join $dir virtualbox.tcl]] 16 | package ifneeded cluster::unix 0.3 [list source [file join $dir unix.tcl]] 17 | package ifneeded cluster::environment 0.2 [list source [file join $dir environment.tcl]] 18 | package ifneeded cluster::tooling 0.2 [list source [file join $dir tooling.tcl]] 19 | package ifneeded cluster::extend 0.1 [list source [file join $dir extend.tcl]] 20 | package ifneeded cluster::utils 0.1 [list source [file join $dir utils.tcl]] 21 | package ifneeded cluster::mount 0.1 [list source [file join $dir mount.tcl]] 22 | package ifneeded proctrace 0.2 [list source [file join $dir proctrace.tcl]] 23 | package ifneeded atExit 1.0 [list source [file join $dir atexit.tcl]] 24 | package ifneeded zipper 0.12 [list source [file join $dir zipper.tcl]] 25 | -------------------------------------------------------------------------------- /lib/cluster/proctrace.tcl: -------------------------------------------------------------------------------- 1 | ################## 2 | ## Module Name -- proctrace.tcl 3 | ## Original Author -- Emmanuel Frecon 4 | ## Description: 5 | ## 6 | ## This module is meant to be a last resort debugging facility. It will 7 | ## arrange for being able to trace execution either at the entry of 8 | ## procedure, either of all commands within procedures. The defaults are to 9 | ## trace all procedures, except the one from a few packages known to slow 10 | ## execution down. See beginning of library for an explanation of the 11 | ## options. 12 | ## 13 | ################## 14 | 15 | package require Tcl 8.6 16 | 17 | namespace eval ::proctrace { 18 | namespace eval vars { 19 | # File to trace execution to (if no file is specified, tracing will 20 | # occur on the standard error) 21 | variable -file "" 22 | # List of pattern to match against the name of current and future 23 | # procedures. Only the procedures matching the patterns in this list 24 | # will be considered for tracing. 25 | variable -allowed {*} 26 | # List of patterns to match against the name of procedure that should 27 | # not be considered for tracing. This is a subset of the ones allowed. 28 | variable -denied {::tcl::* ::aes::* ::logger::*} 29 | # A boolean, turn it on to trace the execution of each command block 30 | # within the procedures. 31 | variable -detailed off 32 | 33 | variable fd stderr; # File descriptor where to trace 34 | variable version 0.2; # Current package version. 35 | variable enabled 1; # Is tracing enabled 36 | } 37 | 38 | # Automatically export all procedures starting with lower case and 39 | # create an ensemble for an easier API. 40 | namespace export {[a-z]*} 41 | namespace ensemble create 42 | 43 | } 44 | 45 | # ::proctrace::init -- Init and start tracing 46 | # 47 | # Arrange to trace the execution of code either at the entry of procedure, 48 | # either of all commands within procedures. This command takes a number of 49 | # dash led options, these are described a the beginning of the library. 50 | # 51 | # Arguments: 52 | # args List of dash-led options and arguments. 53 | # 54 | # Results: 55 | # None. 56 | # 57 | # Side Effects: 58 | # Will start tracing, which means a LOT of output! 59 | proc ::proctrace::init { args } { 60 | # Detect all options available to the procedure, out of the variables that 61 | # are dash-led. 62 | set opts [list] 63 | foreach o [info vars vars::-*] { 64 | set i [string last "::-" $o] 65 | lappend opts [string trimleft [string range $o $i end] :] 66 | } 67 | 68 | # "parse" the options, i.e. set the values if they should exist... 69 | foreach {k v} $args { 70 | if { $k in $opts } { 71 | set vars::$k $v 72 | } else { 73 | return -code error "$k unknown options, should be [join $opts ,\ ]" 74 | } 75 | } 76 | 77 | # Open the file for output, if relevant. 78 | if { ${vars::-file} ne "" } { 79 | set vars::fd [open ${vars::-file} w] 80 | } 81 | 82 | # Arrange to reroute procedure declaration through our command so we can 83 | # automagically install execution traces. 84 | rename ::proc ::proctrace::RealProc 85 | interp alias {} ::proc {} ::proctrace::Proc 86 | 87 | # Catch up with the current set of existing procedure to make sure we can 88 | # also capture execution within procedure that would have been created 89 | # before ::proctrace::init was called. 90 | foreach p [AllProcs] { 91 | if { [Tracable $p]} { 92 | Follow $p 2 93 | } 94 | } 95 | } 96 | 97 | proc ::proctrace::terminate {} {set ::proctrace::vars::enabled 0} 98 | proc ::proctrace::resume {} {set ::proctrace::vars::enabled 1} 99 | 100 | 101 | # ::proctrace::AllProcs -- List all declared procedures 102 | # 103 | # Returns a list of all declared procedures, in all namespaces currently 104 | # defined in the interpreter. The implementation recursively list all 105 | # procedures in all sub-namespaces. 106 | # 107 | # Arguments: 108 | # base Namespace at which to start. 109 | # 110 | # Results: 111 | # List of all procedure in current and descendant namespaces. 112 | # 113 | # Side Effects: 114 | # None. 115 | proc ::proctrace::AllProcs { { base "::" } } { 116 | # Get list of procedures in current namespace. 117 | set procs [info procs [string trimright ${base} :]::*] 118 | # Recurse in children namespaces. 119 | foreach ns [namespace children $base] { 120 | set procs [concat $procs [AllProcs $ns]] 121 | } 122 | return $procs 123 | } 124 | 125 | 126 | # ::proctrace::Follow -- Install traces 127 | # 128 | # Install traces to be able to get notified whenever procedures are 129 | # entered or commands within procedures are executed. 130 | # 131 | # Arguments: 132 | # name Name (fully-qualified) of procedure. 133 | # lvl Call stack level at which to execute trace installation 134 | # 135 | # Results: 136 | # None. 137 | # 138 | # Side Effects: 139 | # Arrange for Trace procedure to be called 140 | proc ::proctrace::Follow { name {lvl 1}} { 141 | if { [string is true ${vars::-detailed}] } { 142 | uplevel $lvl [list trace add execution $name enterstep [list ::proctrace::Trace $name]] 143 | } else { 144 | uplevel $lvl [list trace add execution $name enter [list ::proctrace::Trace $name]] 145 | } 146 | 147 | } 148 | 149 | 150 | # ::proctrace::Proc -- Capturing procedure 151 | # 152 | # This is our re-implementation of the proc command. It calls the original 153 | # command and also arranges to install traces if appropriate. 154 | # 155 | # Arguments: 156 | # name Name of procedure 157 | # arglist List of arguments to procedure 158 | # body Procedure body. 159 | # 160 | # Results: 161 | # None. 162 | # 163 | # Side Effects: 164 | # Creates a new procedure, possibly arrange for tracing its execution. 165 | proc ::proctrace::Proc { name arglist body } { 166 | uplevel 1 [list ::proctrace::RealProc $name $arglist $body] 167 | if { [Tracable $name]} { 168 | Follow $name 2 169 | } 170 | } 171 | 172 | 173 | # ::proctrace::Trace -- Perform trace 174 | # 175 | # Trace procedure/command execution. 176 | # 177 | # Arguments: 178 | # name Name of procedure 179 | # command Command being executed 180 | # op Operation (should be enter or enterstep, not used) 181 | # 182 | # Results: 183 | # None. 184 | # 185 | # Side Effects: 186 | # Trace execution on globally allocated file descriptor. 187 | proc ::proctrace::Trace { name command op } { 188 | if {!$::proctrace::vars::enabled} {return} 189 | puts $vars::fd "$name >> $command" 190 | flush $vars::fd 191 | } 192 | 193 | # ::proctrace::Tracable -- Should procedure be traced 194 | # 195 | # Decide if a procedure should be traced according to the -allowed and 196 | # -denied options that are global to this library. 197 | # 198 | # Arguments: 199 | # name Fully-qualified procedure name 200 | # 201 | # Results: 202 | # 1 if the procedure should be traced, 0 otherwise. 203 | # 204 | # Side Effects: 205 | # None. 206 | proc ::proctrace::Tracable { name } { 207 | # Traverse -allow(ance) list to allow procedure. 208 | set allow 0 209 | foreach ptn ${vars::-allowed} { 210 | if { [string match $ptn $name] } { 211 | set allow 1 212 | break 213 | } 214 | } 215 | 216 | # Possibly negate previous allowance through matching the name against the 217 | # patterns in the -denied list. 218 | foreach ptn ${vars::-denied} { 219 | if { [string match $ptn $name] } { 220 | set allow 0 221 | break 222 | } 223 | } 224 | 225 | # Return final decision. 226 | return $allow 227 | } 228 | 229 | package provide proctrace $::proctrace::vars::version -------------------------------------------------------------------------------- /lib/cluster/swarm.tcl: -------------------------------------------------------------------------------- 1 | package require cluster::utils 2 | 3 | namespace eval ::cluster::swarm { 4 | # Encapsulates variables global to this namespace under their own 5 | # namespace, an idea originating from http://wiki.tcl.tk/1489. 6 | # Variables which name start with a dash are options and which 7 | # values can be changed to influence the behaviour of this 8 | # implementation. 9 | namespace eval vars { 10 | # Extension for token storage files 11 | variable -ext .tkn 12 | # Name of master agent and agents 13 | variable -agent "swarm-agent" 14 | variable -master "swarm-agent-master" 15 | # List of "on" state 16 | variable -running {running timeout} 17 | } 18 | # Export all lower case procedure, arrange to be able to access 19 | # commands from the parent (cluster) namespace from here and 20 | # create an ensemble command called swarm (note the leading :: to 21 | # make this a top-level command!) to ease API calls. 22 | namespace export {[a-z]*} 23 | namespace path [namespace parent] 24 | namespace ensemble create -command ::swarm 25 | namespace import [namespace parent]::Machines \ 26 | [namespace parent]::IsRunning \ 27 | [namespace parent]::Attach \ 28 | [namespace parent]::Detach \ 29 | [namespace parent]::Create \ 30 | [namespace parent]::CacheFile 31 | namespace import [namespace parent]::utils::log 32 | } 33 | 34 | 35 | # ::cluster::swarm::master -- Master description 36 | # 37 | # This procedure looks up the swarm master out of a cluster 38 | # description and returns its vm description. 39 | # 40 | # Arguments: 41 | # cluster List of machine description dictionaries. 42 | # 43 | # Results: 44 | # Virtual machine description of swarm master, empty if none. 45 | # 46 | # Side Effects: 47 | # None. 48 | proc ::cluster::swarm::master { cluster } { 49 | foreach vm [Machines $cluster] { 50 | if { [dict exists $vm -master] } { 51 | if { [string is true [dict get $vm -master]] } { 52 | return $vm 53 | } 54 | } 55 | } 56 | return {} 57 | } 58 | 59 | 60 | proc ::cluster::swarm::info { cluster } { 61 | # Dump out swarm master information 62 | set master [master $cluster] 63 | if { $master ne "" } { 64 | if { [IsRunning $master] } { 65 | log NOTICE "Getting cluster info via [dict get $master -name]" 66 | Attach $master -swarm 67 | tooling docker info 68 | } else { 69 | log WARN "Cluster not bound or master not running" 70 | } 71 | } else { 72 | log WARN "Cluster has no swarm master" 73 | } 74 | } 75 | 76 | proc ::cluster::swarm::recapture { cluster } { 77 | set master [master $cluster] 78 | if { $master ne "" } { 79 | if { [IsRunning $master] } { 80 | log NOTICE "Capturing current list of live machines in swarm" 81 | Attach $master 82 | tooling docker restart ${vars::-master} 83 | } else { 84 | log WARN "Cluster not bound or master not running" 85 | } 86 | } else { 87 | log WARN "Cluster has no swarm master" 88 | } 89 | } 90 | 91 | 92 | # ::cluster::swarm::token -- Generate a token 93 | # 94 | # This procedure will generate a swarm token cluster if 95 | # necessary and return it. The token is stored in a hidden file 96 | # under the same directory as the YAML description file, and 97 | # with the .tkn extension. When the token needs to be 98 | # generated, this is done through the creation of a temporary 99 | # virtual machine. 100 | # 101 | # Arguments: 102 | # yaml Path to YAML description for cluster 103 | # force Force token (re)generation 104 | # driver Driver to use for token generation 105 | # 106 | # Results: 107 | # None. 108 | # 109 | # Side Effects: 110 | # None. 111 | proc ::cluster::swarm::token { yaml { force 0 } { driver virtualbox } } { 112 | set token "" 113 | 114 | # Generate file name for token caching out of yaml path. 115 | set tkn_path [CacheFile $yaml ${vars::-ext}] 116 | 117 | # Read from cache if we have a cache and force is not on. 118 | # Otherwise, generate a new token and cache it. 119 | if { [file exists $tkn_path] && [string is false $force] } { 120 | log NOTICE "Reading token from $tkn_path" 121 | set fd [open $tkn_path] 122 | set token [string trim [read $fd]] 123 | close $fd 124 | } else { 125 | # Generate and cache. 126 | log NOTICE "Generating new token" 127 | set token [Token $driver] 128 | if { $token ne "" } { 129 | log DEBUG "Storing new generated token in $tkn_path" 130 | set fd [open $tkn_path "w"] 131 | puts -nonewline $fd $token 132 | close $fd 133 | } 134 | } 135 | log INFO "Token for cluster definition at $yaml is $token" 136 | return $token 137 | } 138 | 139 | 140 | #################################################################### 141 | # 142 | # Procedures below are internal to the implementation, they shouldn't 143 | # be changed unless you wish to help... 144 | # 145 | #################################################################### 146 | 147 | 148 | # ::cluster::swarm::Token -- Generate token 149 | # 150 | # Generate a new swarm token through creating a temporary 151 | # virtual machine in which we will run "docker-machine run swarm 152 | # create". The temporary machine is removed once the token has 153 | # been generated. When the driver is empty, this will create 154 | # the swarm token using a local container, thus leaving an extra 155 | # image on the local machine. 156 | # 157 | # Arguments: 158 | # driver Default driver to use for (temporary) VM creation. 159 | # 160 | # Results: 161 | # Generated token 162 | # 163 | # Side Effects: 164 | # Create a (temporary) virtual machine and component for swarm 165 | # token creation. 166 | proc ::cluster::swarm::Token { {driver none} } { 167 | set token "" 168 | if { $driver eq "none" || $driver eq "" } { 169 | Detach; # Ensure we are running locally... 170 | log INFO "Creating swarm token..." 171 | set token [tooling docker -return -- run --rm swarm create] 172 | log NOTICE "Created cluster token $token" 173 | } else { 174 | set nm [utils temporary "tokeniser"] 175 | log NOTICE "Creating machine $nm for token creation" 176 | set vm [dict create -name $nm -driver $driver] 177 | if { [Create $vm] ne "" } { 178 | Attach $vm 179 | log INFO "Creating swarm token..." 180 | set token [tooling docker -return -- run --rm swarm create] 181 | log NOTICE "Created cluster token $token" 182 | tooling machine kill $nm; # We want to make this quick! 183 | tooling machine rm $nm 184 | } 185 | } 186 | return $token 187 | } 188 | 189 | 190 | package provide cluster::swarm 0.3 191 | 192 | -------------------------------------------------------------------------------- /lib/cluster/tooling.tcl: -------------------------------------------------------------------------------- 1 | package require cluster::vcompare 2 | package require cluster::utils 3 | 4 | namespace eval ::cluster::tooling { 5 | # Encapsulates variables global to this namespace under their own 6 | # namespace, an idea originating from http://wiki.tcl.tk/1489. 7 | # Variables which name start with a dash are options and which 8 | # values can be changed to influence the behaviour of this 9 | # implementation. 10 | namespace eval vars { 11 | # Path to common executables 12 | variable -machine docker-machine 13 | variable -docker docker 14 | variable -compose docker-compose 15 | variable -rsync rsync 16 | # Force attachment via command line options 17 | variable -sticky off 18 | # Object generation identifiers 19 | variable generator 0 20 | # CLI commands supported by tools (on demand) 21 | variable commands {docker "" compose "" machine ""} 22 | # version numbers for our tools (on demand) 23 | variable versions {docker "" compose "" machine ""} 24 | } 25 | # Export all lower case procedure, arrange to be able to access 26 | # commands from the parent (cluster) namespace from here and 27 | # create an ensemble command called swarmmode (note the leading :: to make 28 | # this a top-level command!) to ease API calls. 29 | namespace export {[a-z]*} 30 | namespace path [namespace parent] 31 | namespace ensemble create -command ::tooling 32 | namespace import [namespace parent]::utils::log 33 | } 34 | 35 | 36 | # ::cluster::runtime -- Runtime check 37 | # 38 | # Check for the presence of (and access to) the underlying necessary docker 39 | # tools and execute a command when they are not available. 40 | # 41 | # Arguments: 42 | # cmd Command to execute when one tool is not available 43 | # 44 | # Results: 45 | # A boolean telling if all runtime configuration is proper (or not) 46 | # 47 | # Side Effects: 48 | # None 49 | proc ::cluster::tooling::runtime { { cmd {} } } { 50 | foreach tool [list docker compose machine] { 51 | if { [auto_execok [set vars::-[string trimleft $tool -]]] eq "" } { 52 | log FATAL "Cannot access '$tool'!" 53 | if { [llength $cmd] } { 54 | eval {*}$cmd 55 | } 56 | return 0 57 | } 58 | } 59 | return 1 60 | } 61 | 62 | 63 | proc ::cluster::tooling::commands { tool } { 64 | switch -nocase -- $tool { 65 | compose - 66 | machine - 67 | docker { 68 | if { [dict get $vars::commands $tool] eq "" } { 69 | dict set vars::commands $tool [CommandsQuery $tool] 70 | log DEBUG "Current set of commands for $tool is\ 71 | [join [dict get $vars::commands $tool] ,\ ]" 72 | } 73 | return [dict get $vars::commands $tool] 74 | } 75 | } 76 | return {} 77 | } 78 | 79 | 80 | # ::cluster::tooling::version -- (Cached) version of underlying tools. 81 | # 82 | # This will return the core version number of one of the 83 | # underlying tools that we support. The version number is 84 | # cached in a global variable so it will only be queried once. 85 | # 86 | # Arguments: 87 | # tool Tool to query, a string, one of: docker, machine or compose 88 | # 89 | # Results: 90 | # Return the version number or an empty string. 91 | # 92 | # Side Effects: 93 | # None. 94 | proc ::cluster::tooling::version { tool } { 95 | switch -nocase -- $tool { 96 | compose - 97 | machine - 98 | docker { 99 | if { [dict get $vars::versions $tool] eq "" } { 100 | dict set vars::versions $tool [VersionQuery $tool] 101 | log DEBUG "Current version for $tool is\ 102 | [dict get $vars::versions $tool]" 103 | } 104 | return [dict get $vars::versions $tool] 105 | } 106 | } 107 | return "" 108 | } 109 | 110 | # ::cluster::tooling::docker -- Run docker binary 111 | # 112 | # Run the docker binary registered as part as the global library 113 | # options under the control of this program. This wrapper will 114 | # turn on extra debbuggin in docker itself whenever the 115 | # verbosity level of the library is greated or equal than DEBUG. 116 | # 117 | # Arguments: 118 | # args Arguments to docker command (compatible with Run) 119 | # 120 | # Results: 121 | # Result of command. 122 | # 123 | # Side Effects: 124 | # None. 125 | proc ::cluster::tooling::docker { args } { 126 | # Isolate -- that will separate options to procedure from options 127 | # that would be for command. Using -- is MANDATORY if you want to 128 | # specify options to the procedure. 129 | set sep [lsearch $args "--"] 130 | if { $sep >= 0 } { 131 | set opts [lrange $args 0 [expr {$sep-1}]] 132 | set args [lrange $args [expr {$sep+1}] end] 133 | } else { 134 | set opts [list] 135 | } 136 | 137 | # Put docker in debug mode when we are ourselves at debug level. 138 | if { [utils outlog] >= 7 } { 139 | set args [linsert $args 0 --debug] 140 | } 141 | if { [string is true ${vars::-sticky}] } { 142 | if { [info exists ::env(DOCKER_TLS_VERIFY)] } { 143 | if { [string is true $::env(DOCKER_TLS_VERIFY)] } { 144 | set args [linsert $args 0 --tls --tlsverify=true] 145 | } 146 | } 147 | if { [info exists ::env(DOCKER_CERT_PATH)] } { 148 | foreach {opt fname} [list cacert ca.pem cert cert.pem key key.pem] { 149 | set fpath [file join $::env(DOCKER_CERT_PATH) $fname] 150 | if { [file exists $fpath] } { 151 | set args [linsert $args 0 --tls$opt [file nativename $fpath]] 152 | } 153 | } 154 | } 155 | if { [info exists ::env(DOCKER_HOST)] } { 156 | set args [linsert $args 0 -H $::env(DOCKER_HOST)] 157 | } 158 | log INFO "Automatically added command line arguments to docker: $args" 159 | } 160 | return [eval run $opts -- [auto_execok ${vars::-docker}] $args] 161 | } 162 | 163 | 164 | # ::cluster::tooling::compose -- Run compose binary 165 | # 166 | # Run the compose binary registered as part as the global 167 | # library options under the control of this program. This 168 | # wrapper will turn on extra debbuggin in compose itself 169 | # whenever the verbosity level of the library is greated or 170 | # equal than DEBUG. 171 | # 172 | # Arguments: 173 | # args Arguments to compose command (compatible with Run) 174 | # 175 | # Results: 176 | # Result of command. 177 | # 178 | # Side Effects: 179 | # None. 180 | proc ::cluster::tooling::compose { args } { 181 | # Isolate -- that will separate options to procedure from options 182 | # that would be for command. Using -- is MANDATORY if you want to 183 | # specify options to the procedure. 184 | set sep [lsearch $args "--"] 185 | if { $sep >= 0 } { 186 | set opts [lrange $args 0 [expr {$sep-1}]] 187 | set args [lrange $args [expr {$sep+1}] end] 188 | } else { 189 | set opts [list] 190 | } 191 | 192 | # Put docker in debug mode when we are ourselves at debug level. 193 | if { [utils outlog] >= 7 } { 194 | set args [linsert $args 0 --verbose] 195 | } 196 | return [eval run $opts -- [auto_execok ${vars::-compose}] $args] 197 | } 198 | 199 | 200 | # ::cluster::tooling::machine -- Run machine binary 201 | # 202 | # Run the docker machine binary registered as part as the global 203 | # library options under the control of this program. This 204 | # wrapper will turn on extra debbuggin in machine itself 205 | # whenever the verbosity level of the library is greated or 206 | # equal than DEBUG. 207 | # 208 | # Arguments: 209 | # args Arguments to machine command (compatible with Run) 210 | # 211 | # Results: 212 | # Result of command. 213 | # 214 | # Side Effects: 215 | # None. 216 | proc ::cluster::tooling::machine { args } { 217 | # Isolate -- that will separate options to procedure from options 218 | # that would be for command. Using -- is preferred if you want to 219 | # specify options to the procedure. 220 | utils options args opts 221 | 222 | # Put docker-machine in debug mode when we are ourselves at debug 223 | # level. 224 | if { [utils outlog] >= 7 } { 225 | set args [linsert $args 0 --debug] 226 | set opts [linsert $opts 0 -stderr] 227 | } 228 | if { 0 && [lsearch [split [::platform::generic] -] "win32"] >= 0 } { 229 | set args [linsert $args 0 --native-ssh] 230 | } 231 | 232 | return [eval run $opts -- [auto_execok ${vars::-machine}] $args] 233 | } 234 | 235 | 236 | proc ::cluster::tooling::relatively { args } { 237 | utils options args opts 238 | set chdir [expr {[utils getopt opts -cd] || [utils getopt opts -chdir]}] 239 | set dir [lindex $args 0] 240 | set args [lrange $args 1 end] 241 | 242 | set modified 0 243 | set nargs [list] 244 | foreach a $args { 245 | if { [file exists $a] } { 246 | set modified 1 247 | lappend nargs [utils relative $a $dir] 248 | } else { 249 | lappend nargs $a 250 | } 251 | } 252 | 253 | if { $modified } { 254 | log DEBUG "Calling '$nargs' in directory context of $dir" 255 | } 256 | set cwd [pwd] 257 | cd $dir 258 | set res [uplevel 1 $nargs] 259 | if { !$chdir} { 260 | cd $cwd 261 | } 262 | return $res 263 | } 264 | 265 | 266 | # ::cluster::tooling::run -- Run command 267 | # 268 | # Run an external (local!) command and possibly capture its 269 | # output. The commands is followed of all the arguments placed 270 | # after --, meaning that all dash-led options before the -- are 271 | # options to this procedure. These options are as follows: 272 | # -return Return result of command instead: list of (non-empty) lines 273 | # -keepblanks Keep blank lines (default is to omit them) 274 | # -stderr Also capture standard error. 275 | # 276 | # When output of the command should simply be shown, we do some 277 | # extra extraction and parsing work on the output of 278 | # docker-machine and output at the log level INFO (but maybe 279 | # could we translate between log levels?) 280 | # 281 | # Arguments: 282 | # args (Optional dash-led options, followed by --) and command 283 | # to execute. 284 | # 285 | # Results: 286 | # Result of command 287 | # 288 | # Side Effects: 289 | # Run local command and (possibly) show its output. 290 | proc ::cluster::tooling::run { args } { 291 | # Isolate -- that will separate options to procedure from options 292 | # that would be for command. Using -- is MANDATORY if you want to 293 | # specify options to the procedure. 294 | utils options args opts 295 | 296 | if { [utils getopt opts -interactive] } { 297 | log DEBUG "Executing $args interactively" 298 | foreach fd {stdout stderr stdin} { 299 | fconfigure $fd -buffering none -translation binary 300 | } 301 | if { [catch {exec {*}$args >@ stdout 2>@ stderr <@ stdin} err] } { 302 | log WARN "Child returned: $err" 303 | } 304 | } else { 305 | # Create an array global to the namespace that we'll use for 306 | # synchronisation and context storage. 307 | set c [namespace current]::command[incr ${vars::generator}] 308 | upvar \#0 $c CMD 309 | set CMD(id) $c 310 | set CMD(command) $args 311 | log DEBUG "Executing $CMD(command) and capturing its output" 312 | 313 | # Extract some options and start building the 314 | # pipe. As we want to capture output of the command, we will be 315 | # using the Tcl command "open" with a file path that starts with a 316 | # "|" sign. 317 | set CMD(keep) [utils getopt opts -keepblanks] 318 | set CMD(back) [utils getopt opts -return] 319 | set CMD(outerr) [utils getopt opts -stderr] 320 | set CMD(relay) [utils getopt opts -raw] 321 | set CMD(done) 0 322 | set CMD(result) {} 323 | 324 | # Kick-off the command and wait for its end 325 | # Kick-off the command and wait for its end 326 | if { [lsearch [split [::platform::generic] -] win32] >= 0 } { 327 | set pipe |[concat $args] 328 | if { $CMD(outerr) } { 329 | append pipe " 2>@1" 330 | } 331 | set CMD(stdin) "" 332 | set CMD(stderr) "" 333 | set CMD(stdout) [open $pipe] 334 | set CMD(pid) [pid $CMD(stdout)] 335 | fileevent $CMD(stdout) readable [namespace code [list LineRead $c stdout]] 336 | } else { 337 | lassign [POpen4 {*}$args] CMD(pid) CMD(stdin) CMD(stdout) CMD(stderr) 338 | fileevent $CMD(stdout) readable [namespace code [list LineRead $c stdout]] 339 | fileevent $CMD(stderr) readable [namespace code [list LineRead $c stderr]] 340 | } 341 | vwait ${c}(done); # Wait for command to end 342 | 343 | catch {close $CMD(stdin)} 344 | catch {close $CMD(stdout)} 345 | catch {close $CMD(stderr)} 346 | 347 | set res $CMD(result) 348 | unset $c 349 | return $res 350 | } 351 | } 352 | 353 | 354 | 355 | proc ::cluster::tooling::machineOptions { driver } { 356 | log INFO "Actively discovering creation options for driver $driver" 357 | return [options [machine -return -- create --driver $driver]] 358 | } 359 | 360 | 361 | proc ::cluster::tooling::options { lines } { 362 | set cmdopts {}; # Empty list of discovered options 363 | 364 | foreach l $lines { 365 | # Only considers indented lines, they contain the option 366 | # descriptions (there might be a lot!). 367 | if { [string trim $l] ne "" && [string trimleft $l] ne $l } { 368 | set l [string trim $l] 369 | # Now only consider lines that start with a dash. 370 | if { [string index $l 0] eq "-" } { 371 | # Get rid of the option textual description behind the 372 | # first tab. 373 | set tab [string first "\t" $l] 374 | if { $tab < 0 } { 375 | set tab [string first " " $l] 376 | } 377 | set lead [string trim [string range $l 0 $tab]] 378 | # Now isolate the real option, starting from the back 379 | # of the string. Try capturing the default value if 380 | # any. 381 | set def_val {}; # Default value is empty by default 382 | set back [expr {[string length $lead]-1}]; # End of lead 383 | # Default value is at end, between quotes. 384 | if { [string index $lead end] eq "\"" } { 385 | set op_quote [string last "\"" $lead end-1] 386 | set back [expr {$op_quote-1}]; # Skip default val 387 | set def_val [string trim [string range $lead $op_quote end] "\""] 388 | } 389 | # Skip multioption specification, which is enclosed by 390 | # brackets. 391 | set idx [string last "\]" $lead] 392 | if { $idx >= 0 } { 393 | set back [expr {[string last "\[" $lead $idx]-1}] 394 | } 395 | # When we are here, the variable back contains the 396 | # location within the lead where the options 397 | # specifications are contained. Split on coma and 398 | # pick up the first with a double dash. 399 | foreach opt [split [string range $lead 0 $back] ","] { 400 | set opt [string trim $opt] 401 | if { [string range $opt 0 1] eq "--" } { 402 | set space [string first " " $opt] 403 | if { $space >= 0 } { 404 | lappend cmdopts [string range $opt 2 [expr {$space - 1}]] $def_val 405 | } else { 406 | lappend cmdopts [string range $opt 2 end] $def_val 407 | } 408 | break; # Done, we have found one! 409 | } 410 | } 411 | } 412 | } 413 | } 414 | 415 | return $cmdopts 416 | } 417 | 418 | 419 | proc ::cluster::tooling::parser { state { hdrfix {}} } { 420 | set content {}; # The list of dictionaries we will return 421 | 422 | set cols [lindex $state 0] 423 | if { [llength $hdrfix] > 0 } { 424 | set cols [string map $hdrfix $cols] 425 | } 426 | 427 | # Arrange for indices to contain the character index at which each 428 | # column of the output starts (in same order as the list of keys 429 | # above). 430 | set indices {} 431 | foreach c $cols { 432 | lappend indices [string first $c $cols] 433 | } 434 | 435 | # Now loop through all lines of the output, i.e. the complete 436 | # state of the cluster. 437 | foreach m [lrange $state 1 end] { 438 | # Isolate the content of each keys, respecting the column 439 | # alignment found in and the order of the columns. 440 | for {set c 0} {$c<[llength $cols]} {incr c} { 441 | set k [lindex $cols $c]; # Extract the key 442 | # Extract its value, i.e. the characters between where the 443 | # key started in the header up to the character before 444 | # where the next key started in the header. 445 | if { $c < [expr [llength $cols]-1] } { 446 | set end [lindex $indices [expr {$c+1}]] 447 | incr end -1 448 | } else { 449 | set end "end" 450 | } 451 | # The value is in between those ranges, trim to get rid of 452 | # trailing spaces that had been added for a nice output. 453 | set v [string range $m [lindex $indices $c] $end] 454 | dict set nfo [string trim [string tolower $k]] [string trim $v] 455 | } 456 | lappend content $nfo 457 | } 458 | 459 | return $content 460 | } 461 | 462 | 463 | 464 | #################################################################### 465 | # 466 | # Procedures below are internal to the implementation, they shouldn't 467 | # be changed unless you wish to help... 468 | # 469 | #################################################################### 470 | 471 | 472 | # ::cluster::tooling::POpen4 -- Pipe open 473 | # 474 | # This procedure executes an external command and arranges to 475 | # redirect locally assiged channel descriptors to its stdin, 476 | # stdout and stderr. This makes it possible to send input to 477 | # the command, but also to properly separate its two forms of 478 | # outputs. 479 | # 480 | # Arguments: 481 | # args Command to execute 482 | # 483 | # Results: 484 | # A list of four elements. Respectively: the list of process 485 | # identifiers for the command(s) that were piped, channel for 486 | # input to command pipe, for regular output of command pipe and 487 | # channel for errors of command pipe. 488 | # 489 | # Side Effects: 490 | # None. 491 | proc ::cluster::tooling::POpen4 { args } { 492 | foreach chan {In Out Err} { 493 | set pipe [chan pipe] 494 | if { [llength $pipe] >= 2 } { 495 | lassign $pipe read$chan write$chan 496 | } else { 497 | log FATAL "Cannot create channel pipes!" 498 | return [list] 499 | } 500 | } 501 | 502 | if { [catch {exec {*}$args <@$readIn >@$writeOut 2>@$writeErr &} pid] } { 503 | foreach chan {In Out Err} { 504 | chan close write$chan 505 | chan close read$chan 506 | } 507 | log CRITICAL "Cannot execute $args: $pid" 508 | return [list] 509 | } 510 | chan close $writeOut 511 | chan close $writeErr 512 | 513 | foreach chan [list stdout stderr $readOut $readErr $writeIn] { 514 | chan configure $chan -buffering line -blocking false 515 | } 516 | 517 | return [list $pid $writeIn $readOut $readErr] 518 | } 519 | 520 | 521 | # ::cluster::tooling::LineRead -- Read line output from started commands 522 | # 523 | # This reads the output from commands that we have started, line 524 | # by line and either prints it out or accumulate the result. 525 | # Properly mark for end of output so the caller will stop 526 | # waiting for output to happen. When outputing through the 527 | # logging facility, the procedure is able to recognise the 528 | # output of docker-machine commands (which uses the logrus 529 | # package) and to convert between loglevels. 530 | # 531 | # Arguments: 532 | # c Identifier of command being run 533 | # fd Which channel to read (refers to index in command) 534 | # 535 | # Results: 536 | # None. 537 | # 538 | # Side Effects: 539 | # Read lines, outputs 540 | proc ::cluster::tooling::LineRead { c fd } { 541 | upvar \#0 $c CMD 542 | 543 | set line [gets $CMD($fd)] 544 | set outlvl [expr {$fd eq "stderr" ? "NOTICE":"INFO"}] 545 | # Parse and analyse output of docker-machine. Do some translation 546 | # of the loglevels between logrus and our internal levels. 547 | set bin [lindex $CMD(command) 0] 548 | if { [string first ${vars::-machine} $bin] >= 0 } { 549 | if { [string first "msg=" $line] >= 0 } { 550 | foreach {k v} [string map {"=" " "} $line] { 551 | if { $k eq "msg" } { 552 | set line $v 553 | break 554 | } 555 | # Translate between loglevels from logrus to internal 556 | # levels. 557 | if { $k eq "level" } { 558 | foreach { gl lvl } [list info INFO \ 559 | warn NOTICE \ 560 | error WARN \ 561 | fatal ERROR \ 562 | panic FATAL] { 563 | if { [string equal -nocase $v $gl] } { 564 | set outlvl $lvl 565 | } 566 | } 567 | } 568 | } 569 | } 570 | } 571 | # Respect -keepblanks and output or accumulate in result 572 | if { ( !$CMD(keep) && [string trim $line] ne "") || $CMD(keep) } { 573 | if { $CMD(back) } { 574 | if { ( $CMD(outerr) && $fd eq "stderr" ) || $fd eq "stdout" } { 575 | log TRACE "Appending '$line' to result" 576 | lappend CMD(result) $line 577 | } 578 | } elseif { $CMD(relay) } { 579 | puts $fd $line 580 | } else { 581 | # Output even what was captured on stderr, which is 582 | # probably what we wanted in the first place. 583 | log $outlvl " $line" 584 | } 585 | } 586 | 587 | # On EOF, we stop this very procedure to be triggered. If there 588 | # are no more outputs to listen to, then the process has ended and 589 | # we are done. 590 | if { [eof $CMD($fd)] } { 591 | fileevent $CMD($fd) readable {} 592 | if { ($CMD(stdout) eq "" || [fileevent $CMD(stdout) readable] eq "" ) \ 593 | && ($CMD(stderr) eq "" || [fileevent $CMD(stderr) readable] eq "" ) } { 594 | set CMD(done) 1 595 | } 596 | } 597 | } 598 | 599 | 600 | 601 | # ::cluster::tooling::VersionQuery -- Version of underlying tools 602 | # 603 | # Query and return the version number of one of the underlying 604 | # tools that we support. This will call the appropriate tool 605 | # with the proper arguments to get the version number. 606 | # 607 | # Arguments: 608 | # tool Tool to query, a string, one of: docker, machine or compose 609 | # 610 | # Results: 611 | # Return the version number or an empty string. 612 | # 613 | # Side Effects: 614 | # None. 615 | proc ::cluster::tooling::VersionQuery { tool } { 616 | set vline "" 617 | switch -nocase -- $tool { 618 | docker { 619 | set vline [lindex [docker -return -- --version] 0] 620 | } 621 | machine { 622 | set vline [lindex [machine -return -- -version] 0] 623 | } 624 | compose { 625 | set vline [lindex [compose -return -- --version] 0] 626 | } 627 | default { 628 | log WARN "$tool isn't a tool that we can query the version for" 629 | } 630 | } 631 | return [vcompare extract $vline]; # Catch all for errors 632 | } 633 | 634 | 635 | proc ::cluster::tooling::CommandsQuery { tool } { 636 | set hlp {} 637 | switch -nocase -- $tool { 638 | docker { 639 | set hlp [docker -return -keepblanks -- --help] 640 | } 641 | machine { 642 | set hlp [machine -return -keepblanks -- --help] 643 | } 644 | compose { 645 | set hlp [compose -return -keepblanks -- --help] 646 | } 647 | default { 648 | log WARN "$tool isn't a tool that we can query the commands for" 649 | } 650 | } 651 | 652 | # Analyse the output of the help for the tool, they are all 653 | # formatted more or less the same way. We look for a line that 654 | # starts with commands and considers that it marks the beginning 655 | # of the list of commands. 656 | set commands {} 657 | set c_group 0 658 | foreach l $hlp { 659 | if { $c_group } { 660 | set l [string trim $l] 661 | # Empty line marks the end of the command description 662 | # group, return what we've found. 663 | if { $l eq "" } { 664 | return $commands 665 | } 666 | # Look for a separator between the command name(s) and 667 | # its/their description. We prefer the tab, but accept 668 | # also a double space. 669 | set sep [string first "\t" $l] 670 | if { $sep < 0 } { 671 | set sep [string first " " $l] 672 | } 673 | # Separate the command name(s) from the description, split 674 | # on the coma sign in case there were aliases, then add 675 | # each command in turns. 676 | if { $sep < 0 } { 677 | log WARN "Cannot find command leading '$l'" 678 | } else { 679 | set spec [string trim [string range $l 0 $sep]] 680 | foreach c [split $spec ,] { 681 | lappend commands [string trim $c] 682 | } 683 | } 684 | } else { 685 | # We don't do anything until we've found a line that marks 686 | # the start of the command description group. 687 | if { [string match -nocase "commands*" $l] } { 688 | set c_group 1 689 | } 690 | } 691 | } 692 | return $commands 693 | } 694 | 695 | 696 | 697 | package provide cluster::tooling 0.2 698 | -------------------------------------------------------------------------------- /lib/cluster/utils.tcl: -------------------------------------------------------------------------------- 1 | namespace eval ::cluster::utils { 2 | # Encapsulates variables global to this namespace under their own 3 | # namespace, an idea originating from http://wiki.tcl.tk/1489. 4 | # Variables which name start with a dash are options and which 5 | # values can be changed to influence the behaviour of this 6 | # implementation. 7 | namespace eval vars { 8 | # Current verbosity level 9 | variable -verbose NOTICE 10 | # Mapping from integer to string representation of verbosity levels 11 | variable verboseTags {1 FATAL 2 ERROR 3 WARN 4 NOTICE 5 INFO 6 DEBUG 7 TRACE} 12 | # File descriptor to dump log messages to 13 | variable -log stderr 14 | # How to dump log messages, auto will detect when dumping to TTY and 15 | # will use colours. 16 | variable -color auto 17 | # Date log output 18 | variable -date "%Y%m%d %H%M%S" 19 | variable -hdate "%Y%m%d/%H:%M:%S" 20 | # Options marker 21 | variable -marker "-" 22 | # Temporary directory, empty for good platform guess 23 | variable -tmp "" 24 | # Character used as a mapping marker 25 | variable -mapper "%" 26 | # Characters to keep in temporary filepath 27 | variable fpathCharacters "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789/-.,=_" 28 | # Size converters 29 | variable converters [list \ 30 | {^b$} 1 \ 31 | {^k(?!i)(b|)$} 1000.0 \ 32 | {^ki(b|)$} 1024.0 \ 33 | {^m(?!i)(b|)$} [expr {pow(1000,2)}] \ 34 | {^mi(b|)$} [expr {pow(1024,2)}] \ 35 | {^g(?!i)(b|)$} [expr {pow(1000,3)}] \ 36 | {^gi(b|)$} [expr {pow(1024,3)}] \ 37 | {^t(?!i)(b|)$} [expr {pow(1000,4)}] \ 38 | {^ti(b|)$} [expr {pow(1024,4)}] \ 39 | {^p(?!i)(b|)$} [expr {pow(1000,5)}] \ 40 | {^pi(b|)$} [expr {pow(1024,5)}] \ 41 | {^e(?!i)(b|)$} [expr {pow(1000,6)}] \ 42 | {^ei(b|)$} [expr {pow(1024,6)}] \ 43 | {^z(?!i)(b|)$} [expr {pow(1000,7)}] \ 44 | {^zi(b|)$} [expr {pow(1024,7)}] \ 45 | {^y(?!i)(b|)$} [expr {pow(1000,8)}] \ 46 | {^yi(b|)$} [expr {pow(1024,8)}]] 47 | } 48 | # Export all lower case procedure, arrange to be able to access 49 | # commands from the parent (cluster) namespace from here and 50 | # create an ensemble command called swarmmode (note the leading :: to make 51 | # this a top-level command!) to ease API calls. 52 | namespace export {[a-z]*} 53 | namespace path [namespace parent] 54 | namespace ensemble create -command ::utils 55 | } 56 | 57 | 58 | # ::cluster::utils::defaults -- Set/get default parameters 59 | # 60 | # This procedure takes an even list of keys and values used to 61 | # set the values of the options supported by the library. The 62 | # list of options is composed of all variables starting with a 63 | # dash in the vars sub-namespace. In the list, the dash 64 | # preceding the key is optional. 65 | # 66 | # Arguments: 67 | # args List of key and values to set for module options. 68 | # 69 | # Results: 70 | # A dictionary with all current keys and their values 71 | # 72 | # Side Effects: 73 | # None. 74 | proc ::cluster::utils::defaults { ns args } { 75 | set store ::[string trim $ns :]::vars 76 | if { [llength $args] == 1 } { 77 | set k ${vars::-marker}[string trimleft [lindex $args 0] ${vars::-marker}] 78 | if { [info exists ${store}::$k] } { 79 | return [set ${store}::$k] 80 | } else { 81 | return -code error "$k is not a known option" 82 | } 83 | } 84 | 85 | foreach {k v} $args { 86 | set k ${vars::-marker}[string trimleft $k ${vars::-marker}] 87 | if { [info exists ${store}::$k] } { 88 | set ${store}::$k $v 89 | } 90 | } 91 | 92 | set state {} 93 | foreach v [info vars ${store}::${vars::-marker}*] { 94 | lappend state [lindex [split $v ":"] end] [set $v] 95 | } 96 | return $state 97 | } 98 | 99 | 100 | # ::cluster::utils::getopt -- Quick and Dirty Options Parser 101 | # 102 | # Parses options, code comes from wiki. Once parsed, an option 103 | # (and its argument) are removed from the argument list. 104 | # 105 | # Arguments: 106 | # _argv List of arguments to parse 107 | # name Name of option to look for. 108 | # _var Pointer to variable in which to store value 109 | # dft Default value if not found 110 | # 111 | # Results: 112 | # 1 if option was found, 0 otherwise. 113 | # 114 | # Side Effects: 115 | # Modifies the incoming arguments list. 116 | proc ::cluster::utils::getopt {_argv name {_var ""} {dft ""}} { 117 | upvar $_argv argv $_var var 118 | set pos [lsearch -regexp $argv ^$name] 119 | if {$pos>=0} { 120 | set to $pos 121 | if {$_var ne ""} { 122 | set var [lindex $argv [incr to]] 123 | } 124 | set argv [lreplace $argv $pos $to] 125 | return 1 126 | } else { 127 | # Did we provide a value to default? 128 | if {[llength [info level 0]] == 5} {set var $dft} 129 | return 0 130 | } 131 | } 132 | 133 | 134 | # ::cluster::utils::log -- Conditional logging 135 | # 136 | # Conditionally output the message passed as an argument 137 | # depending on the current module debug level. The level can 138 | # either be an integer or one of FATAL ERROR WARN NOTICE INFO 139 | # DEBUG, where FATAL corresponds to level 1 and DEBUG to 140 | # level 6. Level 0 will therefor turn off ALL debugging. 141 | # Logging happens on the standard error, but this can be changed 142 | # through the -log option to the module. Logging is pretty 143 | # printed using ANSI codes when the destination channel is a 144 | # terminal. 145 | # 146 | # Arguments: 147 | # lvl Logging level of the message 148 | # msg String content of the message. 149 | # 150 | # Results: 151 | # None. 152 | # 153 | # Side Effects: 154 | # Output message to logging channel whenever at the proper level. 155 | proc ::cluster::utils::log { lvl msg } { 156 | # If we should output (i.e. level of message is below the global 157 | # module level), pretty print and output. 158 | if { [outlog $lvl l] } { 159 | if { "${vars::-color}" eq "auto" } { 160 | set toTTY [dict exists [fconfigure ${vars::-log}] -mode] 161 | } else { 162 | set toTTY ${vars::-color} 163 | } 164 | # Output the whole line. 165 | if { $toTTY } { 166 | puts ${vars::-log} [LogTerminal $l $msg] 167 | } else { 168 | puts ${vars::-log} [LogStandard $l $msg] 169 | } 170 | } 171 | } 172 | 173 | 174 | # ::cluster::utils::outlog -- current or decide log output 175 | # 176 | # When called with no arguments, this procedure will return the current 177 | # loglevel in numerical form. Otherwise, the procedure will decide if log 178 | # should be output according to the level passed as a parameter. In that 179 | # case, callers can collect back the loglevel passed as a parameter in 180 | # numerical form using a variable name. 181 | # 182 | # Arguments: 183 | # lvl Log level of the message that we wish to output (text or numeric) 184 | # intlvl_ Name of variable to store numerical level of lvl 185 | # 186 | # Results: 187 | # Either the numeric loglevel or a 1/0 boolean telling if we should output 188 | # to log. 189 | # 190 | # Side Effects: 191 | # None. 192 | proc ::cluster::utils::outlog { { lvl "" } { intlvl_ ""} } { 193 | # Convert current module level from string to integer. 194 | set current [LogLevel ${vars::-verbose}] 195 | 196 | # Either return current log level or if we should log. 197 | if { $lvl eq "" } { 198 | return $current 199 | } else { 200 | if { $intlvl_ ne "" } { 201 | upvar $intlvl_ intlvl 202 | } 203 | # Convert incoming level from string to integer. 204 | set intlvl [LogLevel $lvl] 205 | return [expr {$current >= $intlvl}] 206 | } 207 | return -1; # Never reached. 208 | } 209 | 210 | 211 | # ::cluster::utils::dget -- get or default from dictionary 212 | # 213 | # Get the value of a key from a dictionary, returning a default value if 214 | # the key does not exist in the dictionary. 215 | # 216 | # Arguments: 217 | # d Dictionary to get from 218 | # key Key in dictionary to query 219 | # default Default value to return when key does not exist 220 | # 221 | # Results: 222 | # Value of key in dictionary, or default value if it does not exist. 223 | # 224 | # Side Effects: 225 | # Copy the file using scp 226 | proc ::cluster::utils::dget { d key { default "" } } { 227 | if { [dict exists $d $key] } { 228 | return [dict get $d $key] 229 | } 230 | return $default 231 | } 232 | 233 | 234 | 235 | # ::cluster::utils::options -- Separate options and args 236 | # 237 | # Separate options from arguments. A double dash (double marker) is 238 | # prefered to mark the end of the options and the beginning of the 239 | # arguments. Otherwise, the beginning of the arguments is where there is an 240 | # option that does not start with the dash marker. 241 | # 242 | # Arguments: 243 | # _argv "Pointer" to incoming list of arguments. Will be modified. 244 | # _opts "pointer" to list of options." 245 | # 246 | # Results: 247 | # None. 248 | # 249 | # Side Effects: 250 | # None. 251 | proc ::cluster::utils::options {_argv _opts} { 252 | upvar $_argv argv $_opts opts 253 | 254 | set opts {} 255 | set ddash [lsearch $argv [string repeat ${vars::-marker} 2]] 256 | if { $ddash >= 0 } { 257 | # Double dash is always on the safe-side. 258 | set opts [lrange $argv 0 [expr {$ddash-1}]] 259 | set argv [lrange $argv [expr {$ddash+1}] end] 260 | } else { 261 | # Otherwise, we give it a good guess, i.e. first non-dash-led 262 | # argument is the start of the arguments. 263 | set i 0 264 | while { $i < [llength $argv] } { 265 | set lead [string index [lindex $argv $i] 0] 266 | if { $lead eq ${vars::-marker} } { 267 | set next [string index [lindex $argv [expr {$i+1}]] 0] 268 | if { $next eq ${vars::-marker} } { 269 | incr i 270 | } elseif { $next eq "" } { 271 | set opts $argv 272 | set argv [list] 273 | return 274 | } else { 275 | incr i 2 276 | } 277 | } else { 278 | break 279 | } 280 | } 281 | set opts [lrange $argv 0 [expr {$i-1}]] 282 | set argv [lrange $argv $i end] 283 | } 284 | } 285 | 286 | 287 | 288 | # ::cluster::utils::temporary -- Temporary name 289 | # 290 | # Generate a rather unique temporary name (to be used, for 291 | # example, when creating temporary files). The procedure only keep 292 | # worthwhile characters, trying to ensure minimal problems when it comes 293 | # to file paths. 294 | # 295 | # Arguments: 296 | # pfx Prefix before unicity taggers 297 | # 298 | # Results: 299 | # A string that is made unique through the process identifier 300 | # and some randomness. 301 | # 302 | # Side Effects: 303 | # None. 304 | proc ::cluster::utils::temporary { pfx } { 305 | set dirname [file dirname $pfx] 306 | set fname [file tail $pfx] 307 | 308 | set nm "" 309 | set allowed [split $vars::fpathCharacters ""] 310 | foreach c [split $fname ""] { 311 | if { [lsearch $allowed $c] >= 0 } { 312 | append nm $c 313 | } else { 314 | append nm "-" 315 | } 316 | } 317 | if { $dirname eq "." || $dirname eq "" } { 318 | return ${nm}-[pid]-[expr {int(rand()*1000)}] 319 | } else { 320 | return [file join $dirname ${nm}-[pid]-[expr {int(rand()*1000)}]] 321 | } 322 | } 323 | 324 | 325 | # ::cluster::utils::tmpdir -- Good platform temporary directory 326 | # 327 | # Return the path to a good location for a temporary directory. Decision is 328 | # foremost the -tmp variable from the global module variable, otherwise 329 | # good platform-defaults making use of well-known environment variables. 330 | # 331 | # Arguments: 332 | # None. 333 | # 334 | # Results: 335 | # Path to a directory where to store temporary files/directories. This 336 | # directory is guaranteed to exist. 337 | # 338 | # Side Effects: 339 | # None. 340 | proc ::cluster::utils::tmpdir {} { 341 | if { ${vars::-tmp} ne "" } { 342 | return ${vars::-tmp} 343 | } 344 | 345 | if { [lsearch [split [::platform::generic] -] win32] >= 0 } { 346 | set resolutions [list USERPROFILE AppData/Local/Temp \ 347 | windir TEMP \ 348 | SystemRoot TEMP \ 349 | TEMP "" TMP "" \ 350 | "" "C:/TEMP" "" "C:/TMP" "" "C:/"] 351 | } else { 352 | set resolutions [list TMP "" "" /tmp] 353 | } 354 | 355 | foreach { var subdir } $resolutions { 356 | set dir "" 357 | if { $var eq "" } { 358 | set dir $subdir 359 | } elseif { [info exists ::env($var)] && [set ::env($var)] ne "" } { 360 | set dir [file join [set ::env($var)] $subdir] 361 | } 362 | if { $dir ne "" && [file isdirectory $dir] } { 363 | log TRACE "Using $dir as a temporary directory" 364 | return $dir 365 | } 366 | } 367 | 368 | return [cwd] 369 | } 370 | 371 | 372 | # ::cluster::utils::tmpfile -- Generate path to temporary file 373 | # 374 | # Return a path that can be used for a temporary file (or directory). The 375 | # procedure utilises a prefix and extension in order to better identify 376 | # files wihtin the system. 377 | # 378 | # Arguments: 379 | # pfx Prefix string to add at beginning of file name. 380 | # ext Extension for file 381 | # tmpdir Location of directory for file, empty for good platform guess 382 | # 383 | # Results: 384 | # Return a full path to a file, in a good platform-dependent temporary 385 | # directory. 386 | # 387 | # Side Effects: 388 | # None. 389 | proc ::cluster::utils::tmpfile { pfx { ext "" } {tmpdir ""}} { 390 | if { $tmpdir eq "" } { set tmpdir [tmpdir] } 391 | if { $ext eq "" } { 392 | return [temporary [file join $tmpdir $pfx]] 393 | } else { 394 | return [temporary [file join $tmpdir $pfx].[string trimleft $ext .]] 395 | } 396 | } 397 | 398 | 399 | 400 | # ::cluster::utils::convert -- SI multiples converter 401 | # 402 | # This procedure will convert sizes (memory or disk) to a target 403 | # unit. The incoming size specification is either a floating 404 | # point (see below for value) or a floating point followed by a 405 | # unit specifier, e.g. 10k to express 10 kilobytes. The unit 406 | # specifier is case independent and the conversion will 407 | # understand both k, kB or KB. Recognised are multipliers up to 408 | # yottabyte, e.g. the leading letters (in order!): b (bytes), k, 409 | # m, g, t, p, e, z, y. When the incoming size, its default unit 410 | # can be specified, in which case this is one of the unit string 411 | # as described before. If a returning unit is specified, then it 412 | # is a unit string as described before and describes the unit of 413 | # the returned value. 414 | # 415 | # Arguments: 416 | # spec Size specification, e.g. 1345, 34k or 20MB. 417 | # dft Default Unit of size spec when unspecified, e.g. k, GB, etc. 418 | # unit Unit of converted returned value, e.g. k, GB or similar. 419 | # 420 | # Results: 421 | # The converted value in the requested SI unit, or an error. 422 | # 423 | # Side Effects: 424 | # None. 425 | proc ::cluster::utils::convert { spec {dft ""} { unit "" } { precision "%.01f"} } { 426 | # Extract value and first letter of unit specification from 427 | # string. 428 | set len [scan $spec "%f %c" val ustart] 429 | 430 | # Convert incoming string to number of bytes in metric format, 431 | # see: http://en.wikipedia.org/wiki/Gigabyte 432 | if { $len == 2 } { 433 | set i [string first [format %c $ustart] $spec] 434 | set m [Multiplier [string range $spec $i end]] 435 | set val [expr {$val*$m}] 436 | } else { 437 | if { $dft ne "" } { 438 | set m [Multiplier $dft] 439 | set val [expr {$val*$m}] 440 | } 441 | } 442 | 443 | # Now convert back to the requested size 444 | if { $unit ne "" } { 445 | set m [Multiplier $unit] 446 | set val [expr {$val/$m}] 447 | } 448 | if { [string match "*.0" $val] } { 449 | return [expr {int($val)}] 450 | } 451 | return [format $precision $val] 452 | } 453 | 454 | # get relative path to target file from current file (end of http://wiki.tcl.tk/15925) 455 | proc ::cluster::utils::relative {targetFile {currentPath ""}} { 456 | if { $currentPath eq "" } { 457 | set currentPath [pwd] 458 | } 459 | 460 | if { [file isdirectory $currentPath] } { 461 | set cc [file split [file normalize $currentPath]] 462 | set tt [file split [file normalize $targetFile]] 463 | if {![string equal [lindex $cc 0] [lindex $tt 0]]} { 464 | # not on *n*x then 465 | return -code error "$targetFile not on same volume as $currentPath" 466 | } 467 | while {[string equal [lindex $cc 0] [lindex $tt 0]] && [llength $cc] > 0} { 468 | # discard matching components from the front 469 | set cc [lreplace $cc 0 0] 470 | set tt [lreplace $tt 0 0] 471 | } 472 | set prefix "" 473 | if {[llength $cc] == 0} { 474 | # just the file name, so targetFile is lower down (or in same place) 475 | set prefix "." 476 | } 477 | # step up the tree 478 | for {set i 0} {$i < [llength $cc]} {incr i} { 479 | append prefix " .." 480 | } 481 | # stick it all together (the eval is to flatten the targetFile list) 482 | return [eval file join $prefix $tt] 483 | } else { 484 | return [relative $targetFile [file dirname $currentPath]] 485 | } 486 | } 487 | 488 | 489 | proc ::cluster::utils::resolve { str { mapper {}} } { 490 | set fullmap [list] 491 | foreach {k v} [concat [array get ::tcl_platform] \ 492 | [array get ::env] \ 493 | $mapper] { 494 | lappend fullmap ${vars::-mapper}[string trim $k ${vars::-mapper}]${vars::-mapper} $v 495 | } 496 | return [string map $fullmap $str] 497 | } 498 | 499 | #################################################################### 500 | # 501 | # Procedures below are internal to the implementation, they shouldn't 502 | # be changed unless you wish to help... 503 | # 504 | #################################################################### 505 | 506 | # ::cluster::utils::LogLevel -- Convert log levels 507 | # 508 | # For convenience, log levels can also be expressed using 509 | # human-readable strings. This procedure will convert from this 510 | # format to the internal integer format. 511 | # 512 | # Arguments: 513 | # lvl Log level (integer or string). 514 | # 515 | # Results: 516 | # Log level in integer format, -1 if it could not be converted. 517 | # 518 | # Side Effects: 519 | # None. 520 | proc ::cluster::utils::LogLevel { lvl } { 521 | if { ![string is integer $lvl] } { 522 | foreach {l str} $vars::verboseTags { 523 | if { [string match -nocase $str $lvl] } { 524 | return $l 525 | } 526 | } 527 | return -1 528 | } 529 | return $lvl 530 | } 531 | 532 | 533 | # ::cluster::utils::+ -- Implements ANSI colouring codes. 534 | # 535 | # Output ANSI colouring codes, inspired by wiki code at 536 | # http://wiki.tcl.tk/1143. 537 | # 538 | # Arguments: 539 | # args List of colouring and effects to apply 540 | # 541 | # Results: 542 | # Return coding escape. 543 | # 544 | # Side Effects: 545 | # None. 546 | proc ::cluster::utils::+ { args } { 547 | set map { 548 | normal 0 bold 1 light 2 blink 5 invert 7 549 | black 30 red 31 green 32 yellow 33 blue 34 purple 35 cyan 36 white 37 550 | Black 40 Red 41 Green 42 Yellow 43 Blue 44 Purple 45 Cyan 46 White 47 551 | } 552 | set t 0 553 | foreach i $args { 554 | set ix [lsearch -exact $map $i] 555 | if {$ix>-1} {lappend t [lindex $map [incr ix]]} 556 | } 557 | return "\033\[[join $t {;}]m" 558 | } 559 | 560 | 561 | # ::cluster::utils::LogTerminal -- Create log line for terminal output 562 | # 563 | # Pretty print a log message for output on the terminal. This 564 | # will use ANSI colour codings to improve readability (and will 565 | # omit the timestamps). 566 | # 567 | # Arguments: 568 | # lvl Log level (an integer) 569 | # msg Log message 570 | # 571 | # Results: 572 | # Line to output on terminal 573 | # 574 | # Side Effects: 575 | # None. 576 | proc ::cluster::utils::LogTerminal { lvl msg } { 577 | # Format the tagger so that they all have the same size, 578 | # i.e. the size of the longest level (in words) 579 | array set TAGGER $vars::verboseTags 580 | if { [info exists TAGGER($lvl)] } { 581 | set lbl [format %.6s "$TAGGER($lvl) "] 582 | } else { 583 | set lbl [format %.6s "$lvl "] 584 | } 585 | # Start by appending a human-readable date 586 | if { ${vars::-hdate} eq "" } { 587 | set line "" 588 | } else { 589 | set dt [clock format [clock seconds] -format ${vars::-hdate}] 590 | set line "[+ light]\[$dt\][+ normal] " 591 | } 592 | # Continue by appending a human-readable level, using colors to 593 | # rank the levels. (see the + procedure below) 594 | append line "\[" 595 | array set LABELER { 3 yellow 2 red 1 purple 4 blue 6 light } 596 | if { [info exists LABELER($lvl)] } { 597 | append line [+ $LABELER($lvl)]$lbl[+ normal] 598 | } else { 599 | append line $lbl 600 | } 601 | append line "\] " 602 | # Append the message itself, colorised again 603 | array set COLORISER { 3 yellow 2 red 1 purple 4 bold 6 light } 604 | if { [info exists COLORISER($lvl)] } { 605 | append line [+ $COLORISER($lvl)]$msg[+ normal] 606 | } else { 607 | append line $msg 608 | } 609 | 610 | return $line 611 | } 612 | 613 | 614 | # ::cluster::utils::LogStandard -- Create log line for file output 615 | # 616 | # Pretty print a log message for output to a file descriptor. 617 | # This will add a timestamp to ease future introspection. 618 | # 619 | # Arguments: 620 | # lvl Log level (an integer) 621 | # msg Log message 622 | # 623 | # Results: 624 | # Line to output on file 625 | # 626 | # Side Effects: 627 | # None. 628 | proc ::cluster::utils::LogStandard { lvl msg } { 629 | if { ${vars::-date} eq "" } { 630 | set line "" 631 | } else { 632 | set dt [clock format [clock seconds] -format ${vars::-date}] 633 | set line "\[$dt\] " 634 | } 635 | array set TAGGER $vars::verboseTags 636 | if { [info exists TAGGER($lvl)] } { 637 | set lbl $TAGGER($lvl) 638 | } else { 639 | set lbl $lvl 640 | } 641 | append line "\[$lbl\] $msg" 642 | return $line 643 | } 644 | 645 | 646 | # ::cluster::utils::Multiplier -- Human-multiplier detection 647 | # 648 | # Return the decimal multiplier for a human-readable unit. 649 | # 650 | # Arguments: 651 | # unit Human-readabe unit, e.g. K, MiB, etc. 652 | # 653 | # Results: 654 | # A decimal value to multiply with to convert to (bytes). 655 | # 656 | # Side Effects: 657 | # Generate an error for unrecognised units. 658 | proc ::cluster::utils::Multiplier { unit } { 659 | foreach {rx m} $vars::converters { 660 | if { [regexp -nocase -- $rx $unit] } { 661 | return $m 662 | } 663 | } 664 | 665 | return -code error "$unit is not a recognised multiple of bytes" 666 | } 667 | 668 | 669 | 670 | package provide cluster::utils 0.1 671 | 672 | 673 | -------------------------------------------------------------------------------- /lib/cluster/vcompare.tcl: -------------------------------------------------------------------------------- 1 | ################## 2 | ## Module Name -- cluster::vcompare 3 | ## Original Author -- Emmanuel Frecon - emmanuel@sics.se 4 | ## Description: 5 | ## 6 | ## Provides facilities to compare version number. This supports 7 | ## semantic versioning, but will not handle numbers where the 8 | ## dash sign is used to separate the main version number from a 9 | ## release (alpha, beta, etc.) specification. 10 | ## 11 | ################## 12 | 13 | package require cluster::utils 14 | 15 | namespace eval ::cluster::vcompare { 16 | namespace eval vars { 17 | # Maximum of version dividers (dots in the number!) 18 | variable -depth 8 19 | } 20 | namespace export {[a-z]*} 21 | namespace path [namespace parent] 22 | namespace ensemble create -command ::vcompare 23 | namespace import [namespace parent]::utils::log 24 | } 25 | 26 | 27 | proc ::cluster::vcompare::depth { vernum } { 28 | return [llength [split $vernum .]] 29 | } 30 | 31 | 32 | 33 | proc ::cluster::vcompare::gt { current base } { 34 | set len [expr {max([depth $current],[depth $base])}] 35 | set l_current [Equalise $current $len] 36 | set l_base [Equalise $base $len] 37 | 38 | for {set i 0} {$i < $len} {incr i} { 39 | if { [lindex $l_current $i] > [lindex $l_base $i] } { 40 | return 1 41 | } 42 | if { [lindex $l_current $i] < [lindex $l_base $i] } { 43 | return 0 44 | } 45 | } 46 | return 0 47 | } 48 | 49 | proc ::cluster::vcompare::lt { current base } { 50 | return [expr {![ge $current $base]}] 51 | } 52 | 53 | 54 | proc ::cluster::vcompare::eq { current base } { 55 | set len [expr {max([depth $current],[depth $base])}] 56 | set l_current [Equalise $current $len] 57 | set l_base [Equalise $base $len] 58 | 59 | for {set i 0} {$i < $len} {incr i} { 60 | if { [lindex $l_current $i] != [lindex $l_base $i] } { 61 | return 0 62 | } 63 | } 64 | return 1 65 | } 66 | 67 | proc ::cluster::vcompare::ge { current base } { 68 | set len [expr {max([depth $current],[depth $base])}] 69 | set l_current [Equalise $current $len] 70 | set l_base [Equalise $base $len] 71 | 72 | for {set i 0} {$i < $len} {incr i} { 73 | if { [lindex $l_current $i] > [lindex $l_base $i] } { 74 | return 1 75 | } 76 | if { [lindex $l_current $i] < [lindex $l_base $i] } { 77 | return 0 78 | } 79 | } 80 | return 1 81 | } 82 | 83 | proc ::cluster::vcompare::le { current base } { 84 | return [expr !{gt $current $base}] 85 | } 86 | 87 | 88 | proc ::cluster::vcompare::extract { vline } { 89 | if { $vline ne "" } { 90 | if { [regexp {\d+(\.\d+)*} $vline version] } { 91 | return $version 92 | } else { 93 | log WARN "Cannot extract a version number out of '$vline'!" 94 | } 95 | } 96 | return "" 97 | } 98 | 99 | 100 | 101 | #################################################################### 102 | # 103 | # Procedures below are internal to the implementation, they shouldn't 104 | # be changed unless you wish to help... 105 | # 106 | #################################################################### 107 | 108 | proc ::cluster::vcompare::Equalise { vernum {depth -1}} { 109 | if { $depth < 0 } { 110 | set depth ${vars::-depth} 111 | } 112 | 113 | set l_vernum [split $vernum .] 114 | while { [llength $l_vernum] < $depth } { 115 | lappend l_vernum 0 116 | } 117 | return $l_vernum 118 | } 119 | 120 | package provide cluster::vcompare 0.1 121 | -------------------------------------------------------------------------------- /lib/cluster/virtualbox.tcl: -------------------------------------------------------------------------------- 1 | ################## 2 | ## Module Name -- cluster::virtualbox 3 | ## Original Author -- Emmanuel Frecon - emmanuel@sics.se 4 | ## Description: 5 | ## 6 | ## This module provides a (restricted) set of operations to 7 | ## modify, create and operate on virtual machines locally 8 | ## accessible. 9 | ## 10 | ################## 11 | 12 | package require cluster::tooling 13 | package require cluster::utils 14 | 15 | namespace eval ::cluster::virtualbox { 16 | # Encapsulates variables global to this namespace under their own 17 | # namespace, an idea originating from http://wiki.tcl.tk/1489. 18 | # Variables which name start with a dash are options and which 19 | # values can be changed to influence the behaviour of this 20 | # implementation. 21 | namespace eval vars { 22 | variable -manage VBoxManage 23 | } 24 | namespace export {[a-z]*} 25 | namespace path [namespace parent] 26 | namespace import [namespace parent]::utils::log 27 | } 28 | 29 | 30 | # ::cluster::virtualbox::info -- VM info 31 | # 32 | # Return a dictionary describing a complete description of a 33 | # given virtual machine. The output will be a dictionary, more 34 | # or less a straightforward translation of the output of 35 | # VBoxManage showvminfo. However, "arrays" in the output 36 | # (i.e. keys with indices in-between parenthesis) will be 37 | # translated to a proper list in order to ease parsing. 38 | # 39 | # Arguments: 40 | # vm Name or identifier of virtualbox guest machine. 41 | # 42 | # Results: 43 | # Return a dictionary describing the machine 44 | # 45 | # Side Effects: 46 | # None. 47 | proc ::cluster::virtualbox::info { vm } { 48 | log DEBUG "Getting info for guest $vm" 49 | foreach l [Manage -return -- showvminfo $vm --machinereadable --details] { 50 | set eq [string first "=" $l] 51 | if { $eq >= 0 } { 52 | set k [string trim [string range $l 0 [expr {$eq-1}]]] 53 | set v [string trim [string range $l [expr {$eq+1}] end]] 54 | set k [string trim [string trim $k \"]] 55 | set v [string trim [string trim $v \"]] 56 | # Convert arrays into list in the dictionary, otherwise 57 | # just create a key/value in the dictionary. 58 | if { [regexp {(.*)\([0-9]+\)} $k - mk] } { 59 | dict lappend nfo $mk $v 60 | } else { 61 | dict set nfo $k $v 62 | } 63 | } 64 | } 65 | return $nfo 66 | } 67 | 68 | 69 | # ::cluster::virtualbox::forward -- Establish port-forwarding 70 | # 71 | # Arrange for a number of NAT port forwardings to be applied 72 | # between the host and a guest machine. 73 | # 74 | # Arguments: 75 | # vm Name or identifier of virtualbox guest machine. 76 | # args Repeatedly host port, guest port, protocol in a list. 77 | # 78 | # Results: 79 | # None. 80 | # 81 | # Side Effects: 82 | # Perform port forwarding on the guest machine 83 | proc ::cluster::virtualbox::forward { vm args } { 84 | # TODO: Don't redo if forwarding already exists... 85 | set running [expr {[Running $vm] ne ""}] 86 | foreach {host mchn proto} $args { 87 | set proto [string tolower $proto] 88 | if { $proto eq "tcp" || $proto eq "udp" } { 89 | log INFO "[string toupper $proto] port forwarding\ 90 | localhost:$host -> ${vm}:$mchn" 91 | if { $running } { 92 | Manage controlvm $vm natpf1 \ 93 | "${proto}-$host,$proto,,$host,,$mchn" 94 | } else { 95 | Manage modifyvm $vm --natpf1 \ 96 | "${proto}-${host},$proto,,$host,,$mchn" 97 | } 98 | } 99 | } 100 | } 101 | 102 | 103 | # ::cluster::virtualbox::addshare -- Add a mountable share 104 | # 105 | # Arrange for a local directory path to be mountable from within 106 | # a guest virtual machine. This will generate a unique 107 | # identifier for the share. 108 | # 109 | # Arguments: 110 | # vm Name or identifier of virtualbox guest machine. 111 | # path Path to EXISTING directory 112 | # 113 | # Results: 114 | # Return the identifier for the share, or an empty string on 115 | # errors. 116 | # 117 | # Side Effects: 118 | # Will turn off the machine if it is running as it is not 119 | # possible to add shares to live machines. 120 | proc ::cluster::virtualbox::addshare { vm path } { 121 | # Refuse to add directories that do not exist (and anything else 122 | # that would not be a directory). 123 | if { ![file isdirectory $path] } { 124 | log WARN "$path is not a host directory!" 125 | return "" 126 | } 127 | 128 | # Lookup the share so we'll only add once. 129 | set nm [share $vm $path] 130 | 131 | # If if it did not exist, add the shared folder definition to the 132 | # virtual machine. Generate a unique name that has some 133 | # connection to the path requested. 134 | if { $nm eq "" } { 135 | # Halt the machine if it is running, since we cannot add 136 | # shared folders to running machines. 137 | if { [Running $vm] ne "" } { 138 | halt $vm 139 | } 140 | # Generate a unique name and add the share 141 | set nm [utils temporary [file tail $path]] 142 | log INFO "Adding share ${vm}:${nm} for localhost:$path" 143 | Manage sharedfolder add $vm \ 144 | --name $nm \ 145 | --hostpath $path 146 | Manage setextradata $vm \ 147 | VBoxInternal2/SharedFoldersEnableSymlinksCreate/$nm 1 148 | } 149 | return $nm 150 | } 151 | 152 | 153 | # ::cluster::virtualbox::halt -- Halt a machine 154 | # 155 | # Halt a virtual machine by simulating first a press on the 156 | # power button and then by powering it off completely if it had 157 | # not shutdown properly after a respit period. 158 | # 159 | # Arguments: 160 | # vm Name or identifier of virtualbox guest machine. 161 | # respit Respit period, in seconds. 162 | # 163 | # Results: 164 | # 1 if machine was halted, 0 otherwise 165 | # 166 | # Side Effects: 167 | # Will block while waiting for the machine to gently shutdown. 168 | proc ::cluster::virtualbox::halt { vm { respit 15 } } { 169 | # Do a nice shutdown and wait for end of machine 170 | Manage controlvm $vm acpipowerbutton 171 | 172 | # Wait for VM to shutdown 173 | log NOTICE "Waiting for $vm to shutdown..." 174 | if { ![Wait $vm $respit] } { 175 | log NOTICE "Forcing powering off of $vm" 176 | Manage controlvm $vm poweroff 177 | return [Wait $vm $respit] 178 | } 179 | return 1 180 | } 181 | 182 | 183 | # ::cluster::virtualbox::share -- Find a share 184 | # 185 | # Given a local host path, find if there is an existing share 186 | # declared within a guest and return its identifier. 187 | # 188 | # Arguments: 189 | # vm Name or identifier of virtualbox guest machine. 190 | # path Local host path 191 | # 192 | # Results: 193 | # Return the identifier of the share if it existed, an empty 194 | # string otherwise 195 | # 196 | # Side Effects: 197 | # None. 198 | proc ::cluster::virtualbox::share { vm path } { 199 | set nfo [info $vm] 200 | foreach k [dict keys $nfo SharedFolderPathMachineMapping*] { 201 | if { [dict get $nfo $k] eq $path } { 202 | return [dict get $nfo [string map [list Path Name] $k]] 203 | } 204 | } 205 | return "" 206 | } 207 | 208 | #################################################################### 209 | # 210 | # Procedures below are internal to the implementation, they shouldn't 211 | # be changed unless you wish to help... 212 | # 213 | #################################################################### 214 | 215 | # ::cluster::virtualbox::Running -- Is a machine running? 216 | # 217 | # Check if a virtual machine is running and returns its identifier. 218 | # 219 | # Arguments: 220 | # vm Name or identifier of virtualbox guest machine. 221 | # 222 | # Results: 223 | # Return the identifier of the machine if it is running, 224 | # otherwise an empty string. 225 | # 226 | # Side Effects: 227 | # None. 228 | proc ::cluster::virtualbox::Running { vm } { 229 | # Detect if machine is currently running. 230 | log DEBUG "Detecting running state of $vm" 231 | foreach l [Manage -return -- list runningvms] { 232 | foreach {nm id} $l { 233 | set id [string trim $id "\{\}"] 234 | if { [string equal $nm $vm] || [string equal $id $vm] } { 235 | log DEBUG "$vm is running, id: $id" 236 | return $id 237 | } 238 | } 239 | } 240 | return "" 241 | } 242 | 243 | 244 | proc ::cluster::virtualbox::Wait { vm { respit 15 } } { 245 | while {$respit >= 0} { 246 | set nfo [info $vm] 247 | if { [dict exists $nfo VMState] \ 248 | && [string equal -nocase [dict get $nfo VMState] "poweroff"] } { 249 | return 1 250 | } else { 251 | log DEBUG "$vm still running, keep waiting" 252 | after 1000 253 | incr respit -1 254 | } 255 | } 256 | return 0 257 | } 258 | 259 | proc ::cluster::virtualbox::Manage { args } { 260 | # Isolate -- that will separate options to procedure from options 261 | # that would be for command. Using -- is MANDATORY if you want to 262 | # specify options to the procedure. 263 | set sep [lsearch $args "--"] 264 | if { $sep >= 0 } { 265 | set opts [lrange $args 0 [expr {$sep-1}]] 266 | set args [lrange $args [expr {$sep+1}] end] 267 | } else { 268 | set opts [list] 269 | } 270 | 271 | return [eval tooling run $opts -- \ 272 | [auto_execok ${vars::-manage}] $args] 273 | } 274 | 275 | 276 | 277 | package provide cluster::virtualbox 0.1 278 | -------------------------------------------------------------------------------- /lib/cluster/zipper.tcl: -------------------------------------------------------------------------------- 1 | # ZIP file constructor 2 | 3 | package require zlib 4 | 5 | namespace eval zipper { 6 | namespace export initialize 7 | namespace eval v {} 8 | catch {namespace ensemble create} 9 | } 10 | 11 | proc ::zipper::initialize {fd} { 12 | # Store file specific information in a separate namespace 13 | namespace eval v::$fd {} 14 | set v::${fd}::fd $fd 15 | set v::${fd}::base [tell $fd] 16 | set v::${fd}::toc [list] 17 | fconfigure $fd -translation binary -encoding binary 18 | # Arrange for access to callers, Tk-style 19 | interp alias {} [namespace current]::v::$fd {} [namespace current]::Dispatch $fd 20 | return [namespace current]::v::$fd 21 | } 22 | 23 | proc ::zipper::Dispatch { fd cmd args } { 24 | if { [string match {[a-z]*} $cmd] && [llength [info procs [namespace current]::$cmd]] } { 25 | if { [namespace exists [namespace current]::v::$fd] } { 26 | return [uplevel 1 [linsert $args 0 [namespace current]::$cmd $fd]] 27 | } else { 28 | return -code error "$fd doesn't refer to a zipper context" 29 | } 30 | } else { 31 | return -code error "$cmd is not a known zipper command" 32 | } 33 | } 34 | 35 | proc ::zipper::Emit { fd s} { 36 | puts -nonewline [set v::${fd}::fd] $s 37 | } 38 | 39 | proc ::zipper::DosTime {sec} { 40 | set f [clock format $sec -format {%Y %m %d %H %M %S} -gmt 1] 41 | regsub -all { 0(\d)} $f { \1} f 42 | foreach {Y M D h m s} $f break 43 | set date [expr {(($Y-1980)<<9) | ($M<<5) | $D}] 44 | set time [expr {($h<<11) | ($m<<5) | ($s>>1)}] 45 | return [list $date $time] 46 | } 47 | 48 | proc ::zipper::addentry {fd name contents {date ""} {force 0}} { 49 | if {$date == ""} { set date [clock seconds] } 50 | foreach {date time} [DosTime $date] break 51 | set flag 0 52 | set type 0 ;# stored 53 | set fsize [string length $contents] 54 | set csize $fsize 55 | set fnlen [string length $name] 56 | 57 | if {$force > 0 && $force != [string length $contents]} { 58 | set csize $fsize 59 | set fsize $force 60 | set type 8 ;# if we're passing in compressed data, it's deflated 61 | } 62 | 63 | if {[catch { zlib crc32 $contents } crc]} { 64 | set crc 0 65 | } elseif {$type == 0} { 66 | set cdata [zlib deflate $contents] 67 | if {[string length $cdata] < [string length $contents]} { 68 | set contents $cdata 69 | set csize [string length $cdata] 70 | set type 8 ;# deflate 71 | } 72 | } 73 | 74 | lappend v::${fd}::toc "[binary format a2c6ssssiiiss4ii PK {1 2 20 0 20 0} \ 75 | $flag $type $time $date $crc $csize $fsize $fnlen \ 76 | {0 0 0 0} 128 [tell [set v::${fd}::fd]]]$name" 77 | 78 | Emit $fd [binary format a2c4ssssiiiss PK {3 4 20 0} \ 79 | $flag $type $time $date $crc $csize $fsize $fnlen 0] 80 | Emit $fd $name 81 | Emit $fd $contents 82 | } 83 | 84 | proc ::zipper::adddirentry {fd name {date ""} {force 0}} { 85 | if {$date == ""} { set date [clock seconds] } 86 | # remove trailing slashes and add new one 87 | set name "[string trimright $name /]/" 88 | foreach {date time} [DosTime $date] break 89 | set flag 2 90 | set type 0 91 | set crc 0 92 | set csize 0 93 | set fsize 0 94 | set fnlen [string length $name] 95 | 96 | lappend v::${fd}::toc "[binary format a2c6ssssiiiss4ii PK {1 2 20 0 20 0} \ 97 | $flag $type $time $date $crc $csize $fsize $fnlen \ 98 | {0 0 0 0} 128 [tell [set v::${fd}::fd]]]$name" 99 | Emit $fd [binary format a2c4ssssiiiss PK {3 4 20 0} \ 100 | $flag $type $time $date $crc $csize $fsize $fnlen 0] 101 | Emit $fd $name 102 | } 103 | 104 | proc ::zipper::finalize { fd } { 105 | set pos [tell [set v::${fd}::fd]] 106 | 107 | set ntoc [llength [set v::${fd}::toc]] 108 | foreach x [set v::${fd}::toc] { 109 | Emit $fd $x 110 | } 111 | set v::${fd}::toc {} 112 | 113 | set len [expr {[tell [set v::${fd}::fd]] - $pos}] 114 | incr pos -[set v::${fd}::base] 115 | 116 | Emit $fd [binary format a2c2ssssiis PK {5 6} 0 0 $ntoc $ntoc $len $pos 0] 117 | namespace delete v::$fd 118 | 119 | return $fd 120 | } 121 | 122 | # test code below runs when this is launched as the main script 123 | if {[info exists argv0] && [string match zipper* [file tail $argv0]]} { 124 | 125 | set zip [zipper initialize [open try.zip w]] 126 | 127 | set dirs [list .] 128 | while {[llength $dirs] > 0} { 129 | set d [lindex $dirs 0] 130 | set dirs [lrange $dirs 1 end] 131 | foreach f [lsort [glob -nocomplain [file join $d *]]] { 132 | if {[file isfile $f]} { 133 | regsub {^\./} $f {} f 134 | set fd [open $f] 135 | fconfigure $fd -translation binary -encoding binary 136 | $zip addentry $f [read $fd] [file mtime $f] 137 | close $fd 138 | } elseif {[file isdir $f]} { 139 | lappend dirs $f 140 | } 141 | } 142 | } 143 | 144 | close [$zip finalize] 145 | 146 | puts "size = [file size try.zip]" 147 | puts [exec unzip -v try.zip] 148 | 149 | file delete try.zip 150 | } 151 | 152 | package provide zipper 0.12 153 | -------------------------------------------------------------------------------- /machinery: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env tclsh 2 | 3 | ################## 4 | ## Module Name -- machinery.tcl 5 | ## Original Author -- Emmanuel Frecon - emmanuel@sics.se 6 | ## Description: 7 | ## 8 | ## Cluster handler through docker-machine 9 | ## 10 | ################## 11 | 12 | package require Tcl 8.6; # The cluster module requires chan pipe 13 | 14 | 15 | # Arrange to access all libraries under lib sub-directory. There is 16 | # only one, but we want to be sure to be able to expand if necessary. 17 | set resolvedArgv0 [file dirname [file normalize $argv0/___]]; # Trick to resolve last symlink 18 | set dirname [file dirname [file normalize $resolvedArgv0]] 19 | set appname [file rootname [file tail $resolvedArgv0]] 20 | lappend auto_path [file join $dirname lib] [file join $dirname lib til] 21 | foreach module [list pseudofs] { 22 | ::tcl::tm::path add [file join $dirname lib $module] 23 | } 24 | package require cluster; # This implements the core of our routines! 25 | package require api::cli 26 | 27 | #package require proctrace 28 | #proctrace init -allowed [list ::cluster* ::api*] 29 | 30 | 31 | # Parse global options out of program arguments. 32 | cli globals $appname argv 33 | 34 | 35 | # Extract command (the first item of the remaining arguments) and pass 36 | # further the remaining arguments to the main command dispatcher. 37 | cli command [lindex $argv 0] {*}[lrange $argv 1 end] 38 | 39 | 40 | # TODO: 41 | # 42 | # Add external drivers that can be taken into account with the new 43 | # generic type in docker machine. 44 | # 45 | # Use the port opening mappings as a way to restrict further access to 46 | # the machines, app ports are closed, unless listed. Implement on top 47 | # of ufw, or directly on top of iptables. 48 | # 49 | # Implement an "inspect" command (returning JSON/YAML?), that can be 50 | # ingested by other tools. 51 | # 52 | # Adapt to new --file - option in docker compose, so we don't generate 53 | # temporary files anymore. 54 | -------------------------------------------------------------------------------- /make/README.md: -------------------------------------------------------------------------------- 1 | # Creating binaries 2 | 3 | ## Introduction 4 | 5 | This directory contains all the necessary files to generate self-contained 6 | binaries that will can be installed on any system, without any dependency on a 7 | local Tcl installation. 8 | 9 | ## Usage 10 | 11 | Running the main script as follows will create binaries for each of the 12 | currently supported platform, with a snapshot of the current code and at the 13 | version reported by the main `machinery` script: 14 | 15 | make.tcl 16 | 17 | The script can also take arguments, which are the names of the platforms to 18 | build for, e.g.: 19 | 20 | make.tcl linux-x86_64 21 | 22 | In addition to these arguments, the script accepts a number of options. All 23 | these options can be shortened to their minimal subset, i.e. `-version` can be 24 | shortened to `-v`: 25 | 26 | * `-target name` can occur several times and `name` is the name of the two 27 | binaries that can be generated, namely `baclin` and `machinery`. 28 | 29 | * `-debug level` can be used to specify the debug level, which defaults to 30 | `INFO` and is enough for most purposes. 31 | 32 | * `-version vernum` can be used to specify a version number for the generation 33 | of tools that do not have official version numbers. When creating binaries for 34 | `machinery`, the tool itself is run with the `version` command to query the 35 | current version. `baclin` does not have this same support, so `-version` can 36 | be used to manually produce versions. 37 | 38 | * `--` can be used to specify the end of the options and the beginning of the 39 | arguments, but this is usually not necessary. 40 | 41 | As a summary, to generate version 0.1 of baclin for Windows 32bits, you could 42 | run the following command, even from a linux host: 43 | 44 | make.tcl -t baclin -v 0.1 win32-ix86 45 | 46 | 47 | ## Internals 48 | 49 | The whole binary creation process is made possible through these 50 | [basekits](http://kitcreator.rkeene.org/kitcreator) and the 51 | [starkit](http://www.tcl.tk/starkits/) techniques. This also means that it is 52 | possible to "cross-compile" (no compilation actually occurs!) for several 53 | platforms, as long as there is a working kit available under the directory 54 | `bin`. 55 | 56 | The main script `make.tcl` supposes that `machinery` is placed in its parent 57 | directory. It also requires the following directory structure: 58 | 59 | * `bin` is the directory where all basekits should be placed. There should be 60 | one for each platform that you want to support, and there should be at lease 61 | one for the platform that you run the script on. Basekits should be named 62 | `tclkit` and placed under a directory that contains an identifier for the 63 | platform. This identifier should match the result of the command 64 | `::platform::generic` on that platform. 65 | 66 | * `kits` should contain kits that are necessary for the building process. 67 | Currently, this only contains a copy of [sdx](http://wiki.tcl.tk/3411). 68 | 69 | * `distro` is the directory where the final binaries will be placed. Binaries 70 | are automatically tagged with the name of the platform and the version number. 71 | 72 | Note that `make.tcl` should be able to create a number of files and directories 73 | in the directory where it is started from. These files and directories are 74 | automatically cleaned up once the build process has finished. 75 | -------------------------------------------------------------------------------- /make/bin/linux-ix86/tclkit: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/efrecon/machinery/9eadd4fd1c226ec14e5420d18f3b63ec571e7a3a/make/bin/linux-ix86/tclkit -------------------------------------------------------------------------------- /make/bin/linux-x86_64/tclkit: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/efrecon/machinery/9eadd4fd1c226ec14e5420d18f3b63ec571e7a3a/make/bin/linux-x86_64/tclkit -------------------------------------------------------------------------------- /make/bin/macosx-ix86/tclkit: -------------------------------------------------------------------------------- 1 | ../macosx-x86_64/tclkit -------------------------------------------------------------------------------- /make/bin/macosx-x86_64/tclkit: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/efrecon/machinery/9eadd4fd1c226ec14e5420d18f3b63ec571e7a3a/make/bin/macosx-x86_64/tclkit -------------------------------------------------------------------------------- /make/bin/win32-ix86/tclkit: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/efrecon/machinery/9eadd4fd1c226ec14e5420d18f3b63ec571e7a3a/make/bin/win32-ix86/tclkit -------------------------------------------------------------------------------- /make/distro/README.md: -------------------------------------------------------------------------------- 1 | # Target Location 2 | 3 | This is the target directory where distribution binaries will 4 | automatically be created. -------------------------------------------------------------------------------- /make/kits/sdx.kit: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/efrecon/machinery/9eadd4fd1c226ec14e5420d18f3b63ec571e7a3a/make/kits/sdx.kit -------------------------------------------------------------------------------- /make/make.tcl: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env tclsh 2 | 3 | package require platform 4 | package require http 5 | package require tls 6 | 7 | set tcllib_ver 1.18 8 | 9 | set dirname [file dirname [file normalize [info script]]] 10 | set kitdir [file join $dirname kits] 11 | set bindir [file join $dirname bin] 12 | set dstdir [file join $dirname distro] 13 | set rootdir [file join $dirname ..] 14 | 15 | lappend auto_path [file join $rootdir lib] 16 | package require cluster; # So we can call Run... 17 | package require cluster::utils 18 | cluster defaults -verbose INFO 19 | 20 | # Quick options parsing, accepting several times -target 21 | set targets [list]; set version "" 22 | for { set i 0 } { $i < [llength $argv] } { incr i } { 23 | set opt [lindex $argv $i] 24 | switch -glob -- $opt { 25 | "-t*" { 26 | incr i 27 | lappend targets [lindex $argv $i] 28 | } 29 | "-d*" { 30 | incr i 31 | cluster defaults -verbose [lindex $argv $i] 32 | } 33 | "-v*" { 34 | incr i 35 | set version [lindex $argv $i] 36 | } 37 | "--" { 38 | incr i 39 | break 40 | } 41 | default { 42 | break 43 | } 44 | } 45 | } 46 | set argv [lrange $argv $i end] 47 | if { ![llength $targets] } { 48 | set targets [list "machinery" "baclin"] 49 | } 50 | cluster log NOTICE "Building targets: $targets" 51 | 52 | # Build for all platforms 53 | if { [llength $argv] == 0 } { 54 | set argv [glob -directory $bindir -nocomplain -tails -- *] 55 | } 56 | cluster log NOTICE "Building for platforms: $argv" 57 | 58 | # The missing procedure of the http package 59 | proc ::http::geturl_followRedirects {url args} { 60 | while {1} { 61 | set token [eval [list http::geturl $url] $args] 62 | switch -glob -- [http::ncode $token] { 63 | 30[1237] { 64 | } 65 | default { return $token } 66 | } 67 | upvar #0 $token state 68 | array set meta [set ${token}(meta)] 69 | if {![info exist meta(Location)]} { 70 | return $token 71 | } 72 | set url $meta(Location) 73 | unset meta 74 | } 75 | } 76 | 77 | # Arrange for https to work properly 78 | ::http::register https 443 [list ::tls::socket -tls1 1] 79 | 80 | 81 | # Protect wrapping through temporary directory 82 | set origdir [pwd] 83 | set wrapdir [file normalize [file join $origdir wrapper-[pid]-[expr {int(rand()*1000)}]]] 84 | cluster log NOTICE "Wrapping inside $wrapdir" 85 | file mkdir $wrapdir 86 | cd $wrapdir 87 | 88 | proc cleanup { { target "" } } { 89 | cd $::origdir 90 | 91 | set toremove [list] 92 | if { [info exists ::xdir] } { lappend toremove $::xdir } 93 | if { [info exists ::tcllib_path] } { lappend toremove $::tcllib_path } 94 | if { $target ne "" } { 95 | lappend toremove ${target}.vfs ${target}.kit 96 | } 97 | lappend toremove $::wrapdir 98 | 99 | foreach fname $toremove { 100 | if { [file exists $fname] } { 101 | file delete -force -- $fname 102 | } 103 | } 104 | } 105 | 106 | # Get the tcllib, this is a complete overkill, but is generic and 107 | # might help us in the future. We get it from the github mirror as 108 | # the main fossil source is protected by a captcha. 109 | cluster log NOTICE "Getting tcllib v$tcllib_ver from github mirror" 110 | set gver [string map [list . _] $tcllib_ver] 111 | set url https://github.com/tcltk/tcllib/archive/tcllib_$gver.tar.gz 112 | set tok [::http::geturl_followRedirects $url -binary on] 113 | set tcllib_path [utils temporary tcllib].tar.gz 114 | if { [::http::ncode $tok] == 200 } { 115 | # Copy content of file to file, we can't use -channel as the 116 | # procedure to follow redirects cannot rewind on file descriptor 117 | # content. 118 | set fd [open $tcllib_path "w"] 119 | fconfigure $fd -encoding binary -translation binary 120 | puts -nonewline $fd [::http::data $tok] 121 | close $fd 122 | } else { 123 | cluster log ERROR "Could not download from $url!" 124 | cleanup 125 | exit 126 | } 127 | ::http::cleanup $tok 128 | 129 | # Extract the content of tcllib to disk for a while 130 | cluster log NOTICE "Extracting tcllib" 131 | tooling run -- tar zxf $tcllib_path 132 | set xdir [lindex [glob -nocomplain -- *tcllib*$gver] 0] 133 | if { $xdir eq "" } { 134 | cluster log ERROR "Could not find where tcllib was extracted!" 135 | cleanup 136 | exit 137 | } 138 | 139 | foreach target $targets { 140 | # Handle versioning for some of the targets 141 | if { $version eq "" && $target eq "machinery" } { 142 | # Run machinery and ask it for its current version number. 143 | cluster log NOTICE "Getting version" 144 | set version [lindex [tooling run -return -- [info nameofexecutable] [file join $dirname .. $target] version] 0] 145 | } 146 | 147 | # Start creating an application directory structure using qwrap (from 148 | # sdx). 149 | cluster log NOTICE "Creating skeleton and filling VFS" 150 | set tclkit [file join $bindir [::platform::generic] tclkit] 151 | set sdx [file join $kitdir sdx.kit] 152 | tooling run $tclkit $sdx qwrap [file join $rootdir $target] 153 | tooling run $tclkit $sdx unwrap ${target}.kit 154 | 155 | # Install the modules of tcllib into the lib directory of the VFS 156 | # directory. 157 | cluster log NOTICE "Installing tcllib into VFS" 158 | set installer [file join $xdir installer.tcl] 159 | tooling run -- [info nameofexecutable] $installer -no-html -no-nroff -no-examples \ 160 | -no-gui -no-apps -no-wait -pkg-path ${target}.vfs/lib 161 | foreach subdir [glob -directory ${target}.vfs/lib -types d -nocomplain -tails *] { 162 | set match 0 163 | foreach ptn [list *${target}* yaml json cmdline] { 164 | if { [string match $ptn $subdir] } { 165 | set match 1 166 | break 167 | } 168 | } 169 | if { ! $match } { 170 | cluster log DEBUG "Cleaning away directory $subdir" 171 | file delete -force -- [file join ${target}.vfs lib $subdir] 172 | } 173 | } 174 | 175 | # Install application libraries into VFS 176 | foreach fname [glob -directory [file join $rootdir lib] -nocomplain -- *] { 177 | set r_fname [file dirname [file normalize ${fname}/___]] 178 | cluster log DEBUG "Copying $r_fname -> ${target}.vfs/lib" 179 | file copy -force -- $r_fname ${target}.vfs/lib 180 | } 181 | 182 | # And now, for each of the platforms requested at the command line, 183 | # build a platform dependent binary out of the kit. 184 | foreach platform $argv { 185 | set binkit [file join $bindir $platform tclkit] 186 | if { [file exists $binkit] } { 187 | cluster log INFO "Final wrapping of binary for $platform" 188 | tooling run $tclkit $sdx wrap ${target}.kit 189 | # Copy runtime to temporary because won't work if same as the 190 | # one we are starting from. 191 | set tmpkit [file join $wrapdir [file tail ${binkit}].temp] 192 | cluster log DEBUG "Creating temporary kit for final wrapping: $tmpkit" 193 | file copy $binkit $tmpkit 194 | tooling run $tclkit $sdx wrap ${target} -runtime $tmpkit 195 | file delete -force -- $tmpkit 196 | } else { 197 | cluster log ERROR "Cannot build for $platform, no main kit available" 198 | } 199 | 200 | # Move created binary to directory for official distributions 201 | if { $version eq "" } { 202 | set dstbin ${target}-$platform 203 | } else { 204 | set dstbin ${target}-$version-$platform 205 | } 206 | if { [string match -nocase "win*" $platform] } { 207 | file rename -force -- ${target} [file join $dstdir $dstbin].exe 208 | } else { 209 | file rename -force -- ${target} [file join $dstdir $dstbin] 210 | file attributes [file join $dstdir $dstbin] -permissions a+x 211 | } 212 | } 213 | 214 | # Big cleanup 215 | file delete -force -- ${target}.vfs 216 | file delete -force -- ${target}.kit 217 | file delete -force -- ${target}.bat 218 | } 219 | 220 | cleanup 221 | -------------------------------------------------------------------------------- /test/.gitignore: -------------------------------------------------------------------------------- 1 | .test.* 2 | .test.*/** -------------------------------------------------------------------------------- /test/bin/witness.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | echo "These are my arguments: $*" 4 | echo "The value of TEST is $TEST" -------------------------------------------------------------------------------- /test/depends/skeleton.yml: -------------------------------------------------------------------------------- 1 | #docker-machinery 2 | version: 2 3 | 4 | machines: 5 | .skeleton: 6 | swarm: off 7 | memory: "1GiB" 8 | size: "2GB" 9 | labels: 10 | environment: dev 11 | 12 | .labelled: 13 | labels: 14 | role: testing 15 | target: dev 16 | -------------------------------------------------------------------------------- /test/depends/test.env: -------------------------------------------------------------------------------- 1 | TEST=infile -------------------------------------------------------------------------------- /test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.1' 2 | 3 | services: 4 | web: 5 | image: nginx:${VERSION:-latest} 6 | ports: 7 | - "80:80" 8 | environment: 9 | - NGINX_PORT=80 10 | -------------------------------------------------------------------------------- /test/myvars.env: -------------------------------------------------------------------------------- 1 | # This is an env file that is read to set the environment of the compose 2 | # project. 3 | MYTEST=123456 4 | -------------------------------------------------------------------------------- /test/test.twk: -------------------------------------------------------------------------------- 1 | # This file should be in YAML format and is an example for how to force in some 2 | # low-level options together with a cluster file definition. 3 | 4 | # Turn off image caching, images specified by the cluster file will be 5 | # downloaded at the machine in all cases. 6 | .cache: "-" 7 | 8 | # This just sets back the -color option of the cluster::utils namespace to the 9 | # same value that it has by default. This is just meant as an example to access 10 | # sub-namespaces. 11 | utils.color: "auto" 12 | -------------------------------------------------------------------------------- /test/test.yml: -------------------------------------------------------------------------------- 1 | #docker-machinery 2 | version: 2 3 | 4 | include: 5 | - depends/skeleton.yml 6 | # - depends/google.yml 7 | 8 | env_file: 9 | - depends/test.env 10 | 11 | environment: 12 | - TEST=inmain 13 | 14 | machines: 15 | test: 16 | aliases: 17 | - emmanuel 18 | extends: 19 | - .skeleton 20 | - .google 21 | - .labelled 22 | prelude: 23 | - 24 | exec: mkdir 25 | args: -p /etc/test 26 | sudo: on 27 | remote: on 28 | copy: off 29 | - 30 | exec: bin/witness.sh 31 | args: $TEST 32 | sudo: on 33 | remote: on 34 | substitution: on 35 | - 36 | exec: bin/witness.sh 37 | args: $TEST 38 | sudo: on 39 | remote: on 40 | substitution: off 41 | - 42 | exec: bin/witness.sh 43 | args: $TEST 44 | sudo: on 45 | remote: on 46 | substitution: 47 | scope: both 48 | - 49 | exec: bin/witness.sh 50 | args: $TEST 51 | sudo: on 52 | remote: on 53 | substitution: 54 | scope: args 55 | - 56 | exec: bin/witness.sh 57 | args: $TEST 58 | sudo: on 59 | remote: on 60 | substitution: 61 | scope: args 62 | patterns: 63 | - A* 64 | - 65 | exec: bin/witness.sh 66 | args: $TEST 67 | sudo: on 68 | remote: on 69 | substitution: 70 | scope: args 71 | patterns: 72 | - TES* 73 | files: 74 | - 75 | source: test/pwd/access.cfg 76 | destination: /etc/test/ 77 | sudo: on 78 | images: 79 | - alpine 80 | - nginx:${VERSION:-latest} 81 | ports: 82 | - 8080:80 83 | - 20514:514/udp 84 | - 9090 85 | environment: 86 | - TEST=overrideinvm 87 | - VERSION=1.19-alpine 88 | compose: 89 | - 90 | environment: 91 | - NOTUSED=ForDemo 92 | env_file: 93 | - myvars.env 94 | files: 95 | - docker-compose.yml 96 | project: test 97 | -------------------------------------------------------------------------------- /test/test.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/efrecon/machinery/9eadd4fd1c226ec14e5420d18f3b63ec571e7a3a/test/test.zip --------------------------------------------------------------------------------