├── .gitignore ├── Dockerfile ├── README.md ├── design.md ├── go.sh ├── requirements.txt ├── setup.py ├── test_integration.py ├── ts.py ├── unofficial_flocker_tools ├── __init__.py ├── config.py ├── destroy_nodes.py ├── diagnostics.py ├── flocker_volumes.py ├── get_nodes.py ├── hub_agents.py ├── install.py ├── plugin.py ├── sample_files.py ├── samples │ ├── cluster.yml.ebs.sample │ ├── cluster.yml.openstack.sample │ └── cluster.yml.zfs.sample ├── terraform_templates │ ├── aws.tf │ ├── cluster.tf │ ├── cluster.yml.template │ ├── terraform.tfvars.sample │ └── variables.tf ├── texttable.py ├── txflocker │ ├── __init__.py │ └── client.py └── utils.py └── web ├── Dockerfile ├── app ├── config.js ├── example.js └── page.css ├── build ├── ng-admin-only.min.css ├── ng-admin-only.min.js ├── ng-admin.min.css └── ng-admin.min.js ├── cluster.html ├── fixtures ├── configuration.json ├── nodes.json ├── state.json └── volume.json ├── flockerclient.py ├── images ├── clusterhq.png ├── clusterhq@2x.png ├── logo.png └── logo@2x.png ├── index.html ├── index.js ├── package.json ├── server.tac ├── setup.py ├── start.sh ├── test.html └── txflocker ├── __init__.py └── client.py /.gitignore: -------------------------------------------------------------------------------- 1 | # OS generated files # 2 | ###################### 3 | .DS_Store 4 | .DS_Store? 5 | ._* 6 | .Spotlight-V100 7 | .Trashes 8 | Icon? 9 | ehthumbs.db 10 | Thumbs.db 11 | 12 | # Vagrant # 13 | ########### 14 | .vagrant 15 | *.box 16 | 17 | *.crt 18 | *.key 19 | cluster.yml 20 | agent.yml 21 | node_mapping.yml 22 | *.pid 23 | node_modules 24 | venv 25 | build 26 | dist 27 | *.egg-info -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04.3 2 | 3 | # Last build date - this can be updated whenever there are security updates so 4 | # that everything is rebuilt 5 | ENV security_updates_as_of 2015-08-14 6 | 7 | # Install security updates and required packages 8 | RUN apt-get -qy update && \ 9 | apt-get -y install apt-transport-https software-properties-common wget zip && \ 10 | wget -qO /tmp/terraform.zip https://dl.bintray.com/mitchellh/terraform/terraform_0.6.3_linux_amd64.zip && \ 11 | cd /tmp && unzip terraform.zip && rm terraform.zip && mv terraform terraform-provider-aws terraform-provider-template terraform-provisioner-local-exec terraform-provisioner-remote-exec /usr/local/bin/ && rm * && \ 12 | add-apt-repository -y "deb https://clusterhq-archive.s3.amazonaws.com/ubuntu/$(lsb_release --release --short)/\$(ARCH) /" && \ 13 | apt-get -qy update && \ 14 | apt-get -qy upgrade && \ 15 | apt-get -y --force-yes install clusterhq-flocker-cli && \ 16 | apt-get remove --purge -y $(apt-mark showauto) python3.4 && \ 17 | apt-get -y install apt-transport-https software-properties-common && \ 18 | apt-get -y --force-yes install clusterhq-flocker-cli && \ 19 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 20 | 21 | ADD . /app 22 | RUN cd /app && /opt/flocker/bin/pip install --no-cache-dir . && \ 23 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /app 24 | 25 | ENV PATH /opt/flocker/bin:$PATH 26 | WORKDIR /pwd 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Unofficial Flocker Tools 2 | 3 | This repository contains the following ClusterHQ Labs projects. 4 | 5 | * [Flocker Volumes CLI (`flockerctl`)](https://docs.clusterhq.com/en/latest/flocker-features/flockerctl.html) 6 | 7 | ## Documentation 8 | 9 | Please refer to the individual projects above for instructions on how to use this repo. 10 | 11 | ## Running tests 12 | 13 | Run an integration test for the installer thus: 14 | 15 | ``` 16 | $ trial test_integration.py 17 | ``` 18 | 19 | Note the comment at the top of the `test_integration.py` file before running the test. 20 | 21 | 22 | ## Changelog 23 | 24 | * 2016-07-27 Deprecated the "Labs Installer". 25 | (also known as "hatch" / "uft-flocker-install") 26 | -------------------------------------------------------------------------------- /design.md: -------------------------------------------------------------------------------- 1 | Key idea: make this especially useful in conjunction with the flocker docker 2 | plugin. 3 | 4 | For now, assume the existence of a user certificate as indicated by a 5 | cluster.yml on this host which has a `users` key. Use the first user 6 | certificate in that list. 7 | 8 | * http://foutaise.org/code/texttable/ 9 | 10 | Idea for CLI: 11 | 12 | ``` 13 | $ ./flocker-volumes.py 14 | Subcommands: 15 | version show version informatioon 16 | 17 | list-nodes show list of nodes in the configured cluster 18 | 19 | list list flocker datasets 20 | --deleted include deleted datasets 21 | 22 | create create a flocker dataset 23 | --host [-h] 0f72ae0c initial host for dataset to appear on 24 | --metadata [-m] name=vol2 set dataset metadata 25 | --size [-s] 20G set size in bytes (default), k, G, T 26 | 27 | destroy mark a dataset to be deleted 28 | 29 | move 30 | move a dataset from one host to another 31 | 32 | $ ./flocker-volumes.py version 33 | Client version: 1.0.0 34 | Server version: 1.0.0 35 | 36 | $ ./flocker-volumes.py list-nodes 37 | SERVER ADDRESS VOLUMES 38 | 0f72ae0c 1.2.3.4 3 39 | 6af074e4 1.2.3.5 2 40 | 41 | $ ./flocker-volumes.py create -h 6af074e4 -m name=postgresql_8 -s 20G 42 | 14f2fa0c1a14f2fa0c14f2fa0c14f2fa0 43 | 44 | $ ./flocker-volumes.py list [--deleted] 45 | DATASET SIZE METADATA STATUS SERVER 46 | 14f2fa0c 20GB name=postgresql_8 pending 6af074e4 (1.2.3.5) 47 | 1921edea 30GB name=postgresql_7 attached 6af074e4 (1.2.3.5) 48 | 4ba2a30d 30GB name=postgresql_8 unattached 49 | 50 | $ ssh 1.2.3.5 docker run -d -v postgresql_8:/data/db --volume-driver=flocker --name=pgsql 51 | 383ab293ac7a7d533d83ab293c77d533d 52 | 53 | [time passes...] 54 | 55 | $ ssh 1.2.3.5 docker rm -f -v pgsql 56 | 383ab293ac7a7d533d83ab293c77d533d 57 | 58 | $ ./flocker-volumes.py destroy 14f2fa0c 59 | Marking dataset 14f2fa0c to be destroyed. 60 | 61 | --- 62 | 63 | Later ideas: 64 | 65 | TODO: make it possible to see which containers are using which volumes via metadata updates. 66 | 67 | $ ./flocker-volumes.py list 68 | DATASET SERVER CONTAINERS SIZE METADATA 69 | 1921edea 1.2.3.4 pgsql7,pgsql9 30GB name=postgresql_7 70 | 14f2fa0c 1.2.3.5 pgsql8 30GB name=postgresql_8 71 | b31a0311 30GB name=nonmanifest 72 | 73 | $ ./flocker-volumes.py destroy name=badger 74 | Volume c548725a is currently in use. Please stop it first. 75 | 76 | TODO: make metadata name be special/unique. 77 | 78 | $ ./flocker-volumes.py create --size 30g name=badger 79 | Volume "badger" already exists. Please choose another name. 80 | 81 | ``` 82 | -------------------------------------------------------------------------------- /go.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | do_install() { 3 | IMAGE="clusterhq/uft:latest" 4 | DEPRECATION_WARNING=$(cat </dev/null 21 | #!/bin/sh 22 | DEPRECATED="${DEPRECATED}" 23 | DEPRECATION_WARNING=\$(cat <&2 29 | echo "" >&2 30 | fi 31 | if docker version >/dev/null 2>&1; then 32 | SUDO_PREFIX="" 33 | elif sudo docker version >/dev/null 2>&1; then 34 | SUDO_PREFIX="sudo " 35 | else 36 | echo "===========================================================================" 37 | echo "Unable to reach docker daemon with or without sudo. Please check that" 38 | echo "docker is running and that DOCKER_HOST is set correctly." 39 | echo 40 | echo "If you use docker-machine (e.g. as part of docker toolbox) then" 41 | echo "'eval \\\$(docker-machine env default)' or similar may help." 42 | echo 43 | echo "In that case, also make sure your docker machine is running, using e.g." 44 | echo "'docker-machine start default'." 45 | echo "===========================================================================" 46 | exit 1 47 | fi 48 | 49 | if [ ! "\$IGNORE_NETWORK_CHECK" = "1" ]; then 50 | if ! \$SUDO_PREFIX docker run --rm gliderlabs/alpine wget -q -O /dev/null -T 5 http://check.clusterhq.com/uft.txt 51 | then 52 | echo "===========================================================================" 53 | echo "Unable to establish network connectivity from inside a container." 54 | echo 55 | echo "If you see an error message above, that may give you a clue how to fix it." 56 | echo 57 | echo "If you run docker in a VM, restarting the VM often helps, especially if" 58 | echo "you have changed network (and/or DNS servers) since starting the VM." 59 | echo 60 | echo "If you are using docker-machine (e.g. as part of docker toolbox), you can" 61 | echo "run the following command (or similar) to do that:" 62 | echo 63 | echo " docker-machine restart default && eval \\\$(docker-machine env default)" 64 | echo 65 | echo "To ignore this check, and proceed anyway (e.g. if you know you are offline)" 66 | echo "set IGNORE_NETWORK_CHECK=1" 67 | echo "===========================================================================" 68 | exit 1 69 | fi 70 | fi 71 | 72 | 73 | \$SUDO_PREFIX docker run -ti --rm -e FLOCKER_CERTS_PATH="\${FLOCKER_CERTS_PATH}" -e FLOCKER_USER="\${FLOCKER_USER}" -e FLOCKER_CONTROL_SERVICE="\${FLOCKER_CONTROL_SERVICE}" -e EARLY_DOCKER="\${EARLY_DOCKER}" -e TOKEN="\${TOKEN}" -e CUSTOM_REPO=\${CUSTOM_REPO} -e FORCE_DESTROY=\${FORCE_DESTROY} -e CONTAINERIZED=1 -v /:/host -v \$PWD:/pwd:z $IMAGE $CMD "\$@" 74 | EOF 75 | sudo chmod +x /usr/local/bin/${PREFIX}${CMD} 76 | if [ "${DEPRECATED}" = "TRUE" ]; then 77 | EXTRA=" (deprecated)" 78 | else 79 | EXTRA="" 80 | fi 81 | echo "Installed /usr/local/bin/${PREFIX}${CMD}${EXTRA}" 82 | done 83 | 84 | if docker version >/dev/null 2>&1; then 85 | SUDO_PREFIX="" 86 | elif sudo docker version >/dev/null 2>&1; then 87 | SUDO_PREFIX="sudo " 88 | else 89 | echo "===========================================================================" 90 | echo "Unable to reach docker daemon with or without sudo. Please check that" 91 | echo "docker is running and that DOCKER_HOST is set correctly." 92 | echo 93 | echo "If you use docker-machine (e.g. as part of docker toolbox) then" 94 | echo "'eval \$(docker-machine env default)' or similar may help." 95 | echo 96 | echo "In that case, also make sure your docker machine is running, using e.g." 97 | echo "'docker-machine start default'." 98 | echo "===========================================================================" 99 | exit 1 100 | fi 101 | 102 | echo "Verifying internet connectivity inside container..." 103 | if [ ! "$IGNORE_NETWORK_CHECK" = "1" ]; then 104 | if ! $SUDO_PREFIX docker run --rm gliderlabs/alpine wget -q -O /dev/null -T 5 http://check.clusterhq.com/uft-install.txt 105 | then 106 | echo "===========================================================================" 107 | echo "Unable to establish network connectivity from inside a container." 108 | echo 109 | echo "If you see an error message above, that may give you a clue how to fix it." 110 | echo 111 | echo "If you run docker in a VM, restarting the VM often helps, especially if" 112 | echo "you have changed network (and/or DNS servers) since starting the VM." 113 | echo 114 | echo "If you are using docker-machine (e.g. as part of docker toolbox), you can" 115 | echo "run the following command (or similar) to do that:" 116 | echo 117 | echo " docker-machine restart default && eval \$(docker-machine env default)" 118 | echo 119 | echo "To ignore this check, and proceed anyway (e.g. if you know you are offline)" 120 | echo "set IGNORE_NETWORK_CHECK=1" 121 | echo "===========================================================================" 122 | exit 1 123 | fi 124 | fi 125 | 126 | echo "Pulling Docker image for Flocker installer..." 127 | $SUDO_PREFIX docker pull $IMAGE 128 | if [ -n "${DEPRECATION_WARNING}" ]; then 129 | echo "" >&2 130 | echo "WARNING: Some of these commands were ${DEPRECATION_WARNING}" >&2 131 | fi 132 | echo "" 133 | } 134 | 135 | # wrapped up in a function so that we have some protection against only getting 136 | # half the file during "curl | sh" 137 | do_install 138 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | PyYAML==3.11 2 | Twisted==15.2.1 3 | argparse==1.2.1 4 | cffi==1.1.2 5 | characteristic==14.3.0 6 | cryptography==0.9.1 7 | enum34==1.0.4 8 | idna==2.0 9 | ipaddress==1.0.7 10 | pyOpenSSL==0.15.1 11 | pyasn1==0.1.7 12 | pyasn1-modules==0.0.5 13 | pycparser==2.14 14 | requests==2.7.0 15 | service-identity==14.0.0 16 | six==1.9.0 17 | treq==15.0.0 18 | wsgiref==0.1.2 19 | zope.interface==4.1.2 20 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name="UnofficialFlockerTools", 5 | packages=[ 6 | "unofficial_flocker_tools", 7 | "unofficial_flocker_tools.txflocker", 8 | ], 9 | package_data={ 10 | "unofficial_flocker_tools": ["samples/*", "terraform_templates/*"], 11 | }, 12 | entry_points={ 13 | "console_scripts": [ 14 | "flocker-sample-files = unofficial_flocker_tools.sample_files:main", 15 | "flocker-config = unofficial_flocker_tools.config:_main", # async 16 | "flocker-install = unofficial_flocker_tools.install:_main", # async 17 | "flocker-plugin-install = unofficial_flocker_tools.plugin:_main", # async 18 | "flocker-volumes = unofficial_flocker_tools.flocker_volumes:_main", # async 19 | "flockerctl = unofficial_flocker_tools.flocker_volumes:_main", # async 20 | "flocker-get-nodes = unofficial_flocker_tools.get_nodes:main", 21 | "flocker-destroy-nodes = unofficial_flocker_tools.destroy_nodes:main", 22 | "flocker-get-diagnostics = unofficial_flocker_tools.diagnostics:_main", #async 23 | "volume-hub-agents-install = unofficial_flocker_tools.hub_agents:_main", # async 24 | ], 25 | }, 26 | version="0.6", 27 | description="Unofficial tools to make installing and using Flocker easier and more fun.", 28 | author="Luke Marsden", 29 | author_email="luke@clusterhq.com", 30 | url="https://github.com/ClusterHQ/unofficial-flocker-tools", 31 | install_requires=[ 32 | "PyYAML>=3", 33 | "Twisted>=14", 34 | "treq>=14", 35 | "pyasn1>=0.1", 36 | ], 37 | ) 38 | -------------------------------------------------------------------------------- /test_integration.py: -------------------------------------------------------------------------------- 1 | # Copyright ClusterHQ Inc. See LICENSE file for details. 2 | 3 | """ 4 | Test supported configurations of the installer. 5 | 6 | To run these tests, you must place a `terraform.tfvars.json` file in your home 7 | directory thusly: 8 | 9 | luke@tiny:~$ cat ~/terraform.tfvars.json 10 | {"aws_access_key": "XXX", 11 | "aws_secret_key": "YYY", 12 | "aws_region": "us-west-1", 13 | "aws_availability_zone": "us-west-1b", 14 | "aws_key_name": "luke2", 15 | "private_key_path": "/Users/luke/Downloads/luke2.pem"} 16 | """ 17 | 18 | from twisted.trial.unittest import TestCase 19 | import os 20 | from subprocess import check_output 21 | from twisted.python.filepath import FilePath 22 | import yaml, json 23 | 24 | SECRETS_FILE = FilePath(os.path.expanduser("~") + "/terraform.tfvars.json") 25 | SECRETS = json.load(SECRETS_FILE.open()) 26 | KEY = FilePath(SECRETS["private_key_path"]) 27 | GET_FLOCKER = "https://get.flocker.io/" 28 | # set to empty string if you want to test against local install of tools 29 | # (faster RTT than building docker image every time) 30 | UFT = "uft-" 31 | 32 | class UnofficialFlockerInstallerTests(TestCase): 33 | """ 34 | Complete spin-up tests. 35 | """ 36 | # Slow builds because we're provisioning VMs. 37 | timeout = 60 * 60 38 | 39 | def _run_integration_test(self, configuration): 40 | test_dir = FilePath(self.mktemp()) 41 | test_dir.makedirs() 42 | v = dict(testdir=test_dir.path, get_flocker=GET_FLOCKER, 43 | configuration=configuration, uft=UFT, key=KEY.path) 44 | cleaned_up = False 45 | try: 46 | os.system("""curl -sSL %(get_flocker)s | sh && \ 47 | cd %(testdir)s && \ 48 | chmod 0600 %(key)s && \ 49 | %(uft)sflocker-sample-files""" % v) 50 | SECRETS_FILE.copyTo(test_dir.child("terraform").child("terraform.tfvars.json")) 51 | os.system("""cd %(testdir)s && \ 52 | %(uft)sflocker-get-nodes --%(configuration)s && \ 53 | %(uft)sflocker-install cluster.yml && \ 54 | %(uft)sflocker-config cluster.yml && \ 55 | %(uft)sflocker-plugin-install cluster.yml && \ 56 | echo "sleeping 130 seconds to let cluster settle..." && \ 57 | sleep 130""" % v) 58 | cluster_config = yaml.load(test_dir.child("cluster.yml").open()) 59 | node1 = cluster_config['agent_nodes'][0] 60 | node2 = cluster_config['agent_nodes'][1] 61 | self.assertNotEqual(node1, node2) 62 | node1public = node1['public'] 63 | node2public = node2['public'] 64 | print runSSHRaw(node1public, 65 | 'docker run -v foo:/data --volume-driver=flocker busybox ' 66 | 'sh -c \\"echo hello \\> /data/foo\\"') 67 | output = runSSHRaw(node2public, 68 | 'docker run -v foo:/data --volume-driver=flocker busybox ' 69 | 'cat /data/foo') 70 | self.assertTrue(output.strip().endswith("hello")) 71 | result = os.system("""cd %(testdir)s && \ 72 | %(uft)sflocker-volumes destroy --dataset=$(%(uft)sflocker-volumes list|tail -n 2 |head -n 1|awk -F ' ' '{print $1}') && \ 73 | while [ $(%(uft)sflocker-volumes list |wc -l) != "2" ]; do echo waiting for volumes to be deleted; sleep 1; done && \ 74 | FORCE_DESTROY=yes %(uft)sflocker-destroy-nodes""" % v) 75 | if result == 0: 76 | cleaned_up = True 77 | finally: 78 | if not cleaned_up: 79 | os.system("""cd %(testdir)s && \ 80 | FORCE_DESTROY=yes %(uft)sflocker-destroy-nodes""" % v) 81 | 82 | def test_ubuntu_aws(self): 83 | return self._run_integration_test("ubuntu-aws") 84 | 85 | def runSSHRaw(ip, command, username="root"): 86 | command = 'ssh -o LogLevel=error -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s %s@%s %s' % ( 87 | KEY.path, username, ip, command) 88 | return check_output(command, shell=True) 89 | -------------------------------------------------------------------------------- /ts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | optimization tool. see which parts of the install are taking the longest. 4 | 5 | 1. 'ts' is in moreutils package, install it. 6 | 2. trial test_integration.py | ts -s '%.s, ' > ~/log.txt 7 | 3. python ts.py ~/log.txt > ~/log.csv 8 | 4. open ~/log.csv 9 | """ 10 | import sys 11 | data = [s.split(", ") for s in open(sys.argv[1]).read().strip().split("\n")] 12 | relative_data = [] 13 | last_absolute_time = 0.0 14 | last_job = "" 15 | for absolute_time, job in data: 16 | absolute_time = float(absolute_time) 17 | relative_time = absolute_time - last_absolute_time 18 | relative_data.append((relative_time, job, last_job)) 19 | last_absolute_time = absolute_time 20 | last_job = job 21 | 22 | relative_data.sort() 23 | relative_data.reverse() 24 | for (time, job, previous_job) in relative_data: 25 | if time > 0.05: 26 | print "%.2f, %r, %r" % (time, previous_job.replace(',', '\,'), job.replace(',', '\,')) 27 | """ 28 | print "=== %.2f ===" % (time,) 29 | print "from:", previous_job 30 | print " to:", job 31 | print 32 | """ 33 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterHQ/unofficial-flocker-tools/4d649b0fca3226b73920fb9e7b23ca15b18f521e/unofficial_flocker_tools/__init__.py -------------------------------------------------------------------------------- /unofficial_flocker_tools/config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This script will generate some certificates using flocker-ca and upload them 4 | # to the servers specified in a cluster.yml 5 | 6 | import sys 7 | import yaml 8 | import time 9 | from twisted.internet.task import react 10 | from twisted.internet.defer import inlineCallbacks, gatherResults 11 | 12 | from os import environ 13 | 14 | # when installing on k8s which has been set up with kube-aws, this is necc 15 | EARLY_DOCKER_PREFIX = "" 16 | if environ.get("EARLY_DOCKER") == "1": 17 | EARLY_DOCKER_PREFIX = "-H unix:///run/early-docker.sock " 18 | 19 | # Usage: deploy.py cluster.yml 20 | from utils import Configurator, log 21 | 22 | def report_completion(result, public_ip, message=""): 23 | log(message, public_ip) 24 | return result 25 | 26 | @inlineCallbacks 27 | def main(reactor, *args): 28 | c = Configurator(configFile=sys.argv[1]) 29 | c.run("flocker-ca initialize %s" % (c.config["cluster_name"],)) 30 | log("Initialized cluster CA.") 31 | c.run("flocker-ca create-control-certificate %s" % (c.config["control_node"],)) 32 | log("Created control cert.") 33 | node_mapping = {} 34 | for node in c.config["agent_nodes"]: 35 | public_ip = node["public"] 36 | # Created 8eab4b8d-c0a2-4ce2-80aa-0709277a9a7a.crt. Copy ... 37 | uuid = c.run("flocker-ca create-node-certificate").split(".")[0].split(" ")[1] 38 | node_mapping[public_ip] = uuid 39 | log("Generated", uuid, "for", public_ip) 40 | for user in c.config["users"]: 41 | c.run("flocker-ca create-api-certificate %s" % (user,)) 42 | log("Created user key for", user) 43 | 44 | # Dump agent_config into a file and scp it to /etc/flocker/agent.yml on the 45 | # nodes. 46 | f = open("agent.yml", "w") 47 | yaml.dump(c.config["agent_config"], f) 48 | f.close() 49 | 50 | # Record the node mapping for later. 51 | f = open("node_mapping.yml", "w") 52 | yaml.dump(node_mapping, f) 53 | f.close() 54 | 55 | log("Making /etc/flocker directory on all nodes") 56 | deferreds = [] 57 | for node, uuid in node_mapping.iteritems(): 58 | deferreds.append(c.runSSHAsync(node, "mkdir -p /etc/flocker")) 59 | deferreds.append(c.runSSHAsync(c.config["control_node"], "mkdir -p /etc/flocker")) 60 | yield gatherResults(deferreds) 61 | 62 | log("Uploading keys to respective nodes:") 63 | deferreds = [] 64 | 65 | # Copy cluster cert, and control cert and key to control node. 66 | d = c.scp("cluster.crt", c.config["control_node"], "/etc/flocker/cluster.crt", async=True) 67 | d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Uploaded cluster cert to") 68 | deferreds.append(d) 69 | 70 | for ext in ("crt", "key"): 71 | d = c.scp("control-%s.%s" % (c.config["control_node"], ext), 72 | c.config["control_node"], "/etc/flocker/control-service.%s" % (ext,), async=True) 73 | d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Uploaded control %s to" % (ext,)) 74 | deferreds.append(d) 75 | log(" * Uploaded control cert & key to control node.") 76 | 77 | # Copy cluster cert, and agent cert and key to agent nodes. 78 | deferreds = [] 79 | for node, uuid in node_mapping.iteritems(): 80 | d = c.scp("cluster.crt", node, "/etc/flocker/cluster.crt", async=True) 81 | d.addCallback(report_completion, public_ip=node, message=" * Uploaded cluster cert to") 82 | deferreds.append(d) 83 | 84 | d = c.scp("agent.yml", node, "/etc/flocker/agent.yml", async=True) 85 | d.addCallback(report_completion, public_ip=node, message=" * Uploaded agent.yml to") 86 | deferreds.append(d) 87 | 88 | for ext in ("crt", "key"): 89 | d = c.scp("%s.%s" % (uuid, ext), node, "/etc/flocker/node.%s" % (ext,), async=True) 90 | d.addCallback(report_completion, public_ip=node, message=" * Uploaded node %s to" % (ext,)) 91 | deferreds.append(d) 92 | 93 | yield gatherResults(deferreds) 94 | 95 | deferreds = [] 96 | for node, uuid in node_mapping.iteritems(): 97 | if c.config["os"] == "ubuntu": 98 | d = c.runSSHAsync(node, """echo "starting flocker-container-agent..." 99 | service flocker-container-agent start 100 | echo "starting flocker-dataset-agent..." 101 | service flocker-dataset-agent start 102 | """) 103 | elif c.config["os"] == "centos": 104 | d = c.runSSHAsync(node, """if selinuxenabled; then setenforce 0; fi 105 | systemctl enable docker.service 106 | systemctl start docker.service 107 | """) 108 | elif c.config["os"] == "coreos": 109 | d = c.runSSHAsync(node, """echo 110 | echo > /tmp/flocker-command-log 111 | docker %(early_docker_prefix)s run --restart=always -d --net=host --privileged \\ 112 | -v /etc/flocker:/etc/flocker \\ 113 | -v /var/run/docker.sock:/var/run/docker.sock \\ 114 | --name=flocker-container-agent \\ 115 | clusterhq/flocker-container-agent 116 | docker %(early_docker_prefix)s run --restart=always -d --net=host --privileged \\ 117 | -e DEBUG=1 \\ 118 | -v /tmp/flocker-command-log:/tmp/flocker-command-log \\ 119 | -v /flocker:/flocker -v /:/host -v /etc/flocker:/etc/flocker \\ 120 | -v /dev:/dev \\ 121 | --name=flocker-dataset-agent \\ 122 | clusterhq/flocker-dataset-agent 123 | """ % dict(early_docker_prefix=EARLY_DOCKER_PREFIX)) 124 | deferreds.append(d) 125 | 126 | if c.config["os"] == "ubuntu": 127 | d = c.runSSHAsync(c.config["control_node"], """cat < /etc/init/flocker-control.override 128 | start on runlevel [2345] 129 | stop on runlevel [016] 130 | EOF 131 | echo 'flocker-control-api 4523/tcp # Flocker Control API port' >> /etc/services 132 | echo 'flocker-control-agent 4524/tcp # Flocker Control Agent port' >> /etc/services 133 | service flocker-control restart 134 | ufw allow flocker-control-api 135 | ufw allow flocker-control-agent 136 | """) 137 | elif c.config["os"] == "centos": 138 | d = c.runSSHAsync(c.config["control_node"], """systemctl enable flocker-control 139 | systemctl start flocker-control 140 | firewall-cmd --permanent --add-service flocker-control-api 141 | firewall-cmd --add-service flocker-control-api 142 | firewall-cmd --permanent --add-service flocker-control-agent 143 | firewall-cmd --add-service flocker-control-agent 144 | """) 145 | elif c.config["os"] == "coreos": 146 | d = c.runSSHAsync(c.config["control_node"], """echo 147 | docker %(early_docker_prefix)s run --name=flocker-control-volume -v /var/lib/flocker clusterhq/flocker-control-service true 148 | docker %(early_docker_prefix)s run --restart=always -d --net=host -v /etc/flocker:/etc/flocker --volumes-from=flocker-control-volume --name=flocker-control-service clusterhq/flocker-control-service""" % dict(early_docker_prefix=EARLY_DOCKER_PREFIX)) 149 | 150 | deferreds.append(d) 151 | 152 | yield gatherResults(deferreds) 153 | 154 | if c.config["os"] == "ubuntu": 155 | # XXX INSECURE, UNSUPPORTED, UNDOCUMENTED EXPERIMENTAL OPTION 156 | # Usage: `uft-flocker-config --ubuntu-aws --swarm`, I guess 157 | if len(sys.argv) > 2 and sys.argv[2] == "--swarm": 158 | # Install swarm 159 | deferreds = [] 160 | clusterid = c.runSSH(c.config["control_node"], """ 161 | docker run swarm create""").strip() 162 | log("Created Swarm ID") 163 | for node in c.config["agent_nodes"]: 164 | d = c.runSSHAsync(node['public'], """ 165 | service docker stop 166 | docker daemon -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 >> /tmp/dockerlogs 2>&1 & 167 | """) 168 | # Let daemon come up 169 | time.sleep(3) 170 | d = c.runSSHAsync(node['public'], """ 171 | docker run -d swarm join --addr=%s:2375 token://%s 172 | """ % (node['private'], clusterid)) 173 | log("Started Swarm Agent for %s" % node['public']) 174 | deferreds.append(d) 175 | 176 | d = c.runSSHAsync(c.config["control_node"], """ 177 | docker run -d -p 2357:2375 swarm manage token://%s 178 | """ % clusterid) 179 | log("Starting Swarm Master") 180 | deferreds.append(d) 181 | yield gatherResults(deferreds) 182 | log("Swarm Master is at tcp://%s:2357" % c.config["control_node"]) 183 | 184 | def _main(): 185 | react(main, sys.argv[1:]) 186 | 187 | if __name__ == "__main__": 188 | _main() 189 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/destroy_nodes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | from twisted.python.filepath import FilePath 5 | 6 | def main(): 7 | terraform_templates = FilePath("terraform") 8 | if not terraform_templates.exists(): 9 | print "Please run uft-flocker-sample-files in the current directory first." 10 | os._exit(1) 11 | os.system("cd terraform && terraform destroy" 12 | + (" -force" if os.environ.get("FORCE_DESTROY") else "")) 13 | pass 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/diagnostics.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | from twisted.internet.task import react 4 | from twisted.internet.defer import inlineCallbacks, gatherResults 5 | 6 | # Usage: diagnostics.py cluster.yml 7 | from utils import Configurator, log 8 | 9 | def report_completion(result, public_ip, message=""): 10 | log(message, public_ip) 11 | return result 12 | 13 | @inlineCallbacks 14 | def main(reactor, *args): 15 | c = Configurator(configFile=sys.argv[1]) 16 | 17 | # Run flocker-diagnostics 18 | deferreds = [] 19 | log("Running Flocker-diagnostics on agent nodes.") 20 | for node in c.config["agent_nodes"]: 21 | d = c.runSSHAsync(node["public"], "rm -rf /tmp/diagnostics; mkdir /tmp/diagnostics; cd /tmp/diagnostics; flocker-diagnostics") 22 | d.addCallback(report_completion, public_ip=node["public"], message=" * Ran diagnostics on agent node.") 23 | deferreds.append(d) 24 | d = c.runSSHAsync(c.config["control_node"], "rm -rf /tmp/diagnostics; mkdir /tmp/diagnostics; cd /tmp/diagnostics; flocker-diagnostics") 25 | d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Ran diagnostics on control node.") 26 | deferreds.append(d) 27 | yield gatherResults(deferreds) 28 | 29 | # Let flocker diagnostics run 30 | time.sleep(5) 31 | 32 | # Gather flocker-diagnostics 33 | deferreds = [] 34 | log("Gathering Flocker-diagnostics on agent nodes.") 35 | for node in c.config["agent_nodes"]: 36 | d = c.scp("./", node["public"], "/tmp/diagnostics/clusterhq_flocker_logs_*.tar", async=True, reverse=True) 37 | d.addCallback(report_completion, public_ip=node["public"], message=" * Gathering diagnostics on agent node.") 38 | deferreds.append(d) 39 | d = c.scp("./", c.config["control_node"], "/tmp/diagnostics/clusterhq_flocker_logs_*.tar", async=True, reverse=True) 40 | d.addCallback(report_completion, public_ip=c.config["control_node"], message=" * Gathering diagnostics on control node.") 41 | deferreds.append(d) 42 | yield gatherResults(deferreds) 43 | 44 | def _main(): 45 | react(main, sys.argv[1:]) 46 | 47 | if __name__ == "__main__": 48 | _main() 49 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/flocker_volumes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | A prototype version of a CLI tool which shows off flocker's first class 5 | volumes capabilities. 6 | 7 | Run me from a directory containing a cluster.yml and appropriate cluster 8 | certificates, or specify --cluster-crt, --user-crt, --user-key, and 9 | --control-service. 10 | """ 11 | 12 | from twisted.internet import defer 13 | from twisted.internet.task import react 14 | from twisted.python.usage import Options, UsageError 15 | from twisted.python import log 16 | from twisted.python.filepath import FilePath 17 | from txflocker.client import get_client as txflocker_get_client 18 | from txflocker.client import combined_state, parse_num, process_metadata 19 | import sys 20 | import yaml 21 | import treq 22 | import texttable 23 | import json 24 | import os 25 | 26 | def get_client(options): 27 | cluster_yml = options["cluster-yml"] 28 | if "CONTAINERIZED" in os.environ: 29 | if cluster_yml.startswith("/"): 30 | cluster_yml = "/host" + cluster_yml 31 | cluster = FilePath(cluster_yml) 32 | if cluster.exists(): 33 | config = yaml.load(cluster.open()) 34 | certificates_path = cluster.parent() 35 | user = config["users"][0] 36 | control_service = None # figure it out based on cluster.yml 37 | else: 38 | certs_path = options["certs-path"] 39 | if "CONTAINERIZED" in os.environ: 40 | if certs_path.startswith("/"): 41 | certs_path = "/host" + certs_path 42 | certificates_path = FilePath(certs_path) 43 | if options["user"] is None: 44 | raise UsageError("must specify --user") 45 | user = options["user"] 46 | if options["control-service"] is None: 47 | raise UsageError("must specify --control-service") 48 | control_service = options["control-service"] 49 | 50 | user_certificate_filename = "%s.crt" % (user,) 51 | user_key_filename = "%s.key" % (user,) 52 | 53 | return txflocker_get_client( 54 | certificates_path=certificates_path, 55 | user_certificate_filename=user_certificate_filename, 56 | user_key_filename=user_key_filename, 57 | target_hostname=control_service, 58 | ) 59 | 60 | 61 | def get_base_url(options): 62 | pwd = FilePath(options["certs-path"]) 63 | if options["control-service"] is not None: 64 | control_config = {"hostname": options["control-service"]} 65 | else: 66 | control_config = yaml.load( 67 | pwd.child("agent.yml").open())["control-service"] 68 | control_config["port"] = options["control-port"] 69 | return "https://%(hostname)s:%(port)s/v1" % control_config 70 | 71 | 72 | def get_table(): 73 | table = texttable.Texttable(max_width=140) 74 | table.set_deco(0) 75 | return table 76 | 77 | 78 | class Version(Options): 79 | """ 80 | show version information 81 | """ 82 | def run(self): 83 | print "flocker-volumes.py prototype version 0.0.1" 84 | print "see https://docs.clusterhq.com/en/latest/labs/" 85 | print 86 | 87 | 88 | class ListNodes(Options): 89 | """ 90 | show list of nodes in the cluster 91 | """ 92 | optFlags = [ 93 | ("long", "l", "Show long UUIDs"), 94 | ] 95 | def run(self): 96 | self.client = get_client(self.parent) 97 | self.base_url = get_base_url(self.parent) 98 | uuid_length = get_uuid_length(self["long"]) 99 | d = self.client.get(self.base_url + "/state/nodes") 100 | d.addCallback(treq.json_content) 101 | def print_table(nodes): 102 | table = get_table() 103 | table.set_cols_align(["l", "l"]) 104 | table.add_rows([["", ""]] + 105 | [["SERVER", "ADDRESS"]] + 106 | [[node["uuid"][:uuid_length], node["host"]] 107 | for node in nodes]) 108 | print table.draw() + "\n" 109 | d.addCallback(print_table) 110 | return d 111 | 112 | 113 | def get_uuid_length(long_): 114 | if long_: 115 | uuid_length = 100 116 | else: 117 | uuid_length = 8 118 | return uuid_length 119 | 120 | 121 | class List(Options): 122 | """ 123 | list flocker datasets 124 | """ 125 | optFlags = [ 126 | ("deleted", "d", "Show deleted datasets"), 127 | ("long", "l", "Show long UUIDs"), 128 | ("human", "h", "Human readable numbers"), 129 | ] 130 | def run(self): 131 | client = get_client(self.parent) 132 | base_url = get_base_url(self.parent) 133 | uuid_length = get_uuid_length(self["long"]) 134 | 135 | d = combined_state(client, base_url, self["deleted"]) 136 | def got_results(results): 137 | rows = [] 138 | for result in results: 139 | rows.append([result["dataset_id"], 140 | result["size"], 141 | result["meta"], 142 | result["status"], 143 | ("" if result["node"] is None else 144 | result["node"]["uuid"][:uuid_length] 145 | + " (" + result["node"]["host"] + ")")]) 146 | table = get_table() 147 | table.set_cols_align(["l", "l", "l", "l", "l"]) 148 | rows = [["", "", "", "", ""]] + [ 149 | ["DATASET", "SIZE", "METADATA", "STATUS", "SERVER"]] + rows 150 | table.add_rows(rows) 151 | print table.draw() + "\n" 152 | d.addCallback(got_results) 153 | return d 154 | 155 | 156 | class Create(Options): 157 | """ 158 | create a flocker dataset 159 | """ 160 | optParameters = [ 161 | ("node", "n", None, 162 | "Initial primary node for dataset " 163 | "(any unique prefix of node uuid, see " 164 | "flocker-volumes list-nodes)"), 165 | ("metadata", "m", None, 166 | "Set volume metadata (\"a=b,c=d\")"), 167 | ("size", "s", None, 168 | "Set size in bytes (default), k, M, G, T"), 169 | ] 170 | def run(self): 171 | if not self.get("node"): 172 | raise UsageError("must specify --node") 173 | self.client = get_client(self.parent) 174 | self.base_url = get_base_url(self.parent) 175 | 176 | d = self.client.get(self.base_url + "/state/nodes") 177 | d.addCallback(treq.json_content) 178 | def got_nodes(nodes): 179 | args = {} 180 | 181 | # size 182 | if self["size"]: 183 | args["maximum_size"] = parse_num(self["size"]) 184 | 185 | # primary node 186 | args["primary"] = filter_primary_node(self["node"], nodes) 187 | 188 | # metadata 189 | args["metadata"] = process_metadata(self["metadata"]) 190 | 191 | # TODO: don't allow non-unique name in metadata (by 192 | # convention) 193 | 194 | d = self.client.post( 195 | self.base_url + "/configuration/datasets", 196 | json.dumps(args), 197 | headers={'Content-Type': ['application/json']}) 198 | d.addCallback(treq.json_content) 199 | return d 200 | d.addCallback(got_nodes) 201 | def created_dataset(result): 202 | print "created dataset in configuration, manually poll", 203 | print "state with 'flocker-volumes list' to see it", 204 | print "show up." 205 | print 206 | # TODO: poll the API until it shows up, give the user a nice 207 | # progress bar. 208 | # TODO: investigate bug where all datasets go to pending 209 | # during waiting for a dataset to show up. 210 | d.addCallback(created_dataset) 211 | return d 212 | 213 | 214 | class Destroy(Options): 215 | """ 216 | mark a dataset to be deleted 217 | """ 218 | optParameters = [ 219 | ("dataset", "d", None, "Dataset to destroy"), 220 | ] 221 | def run(self): 222 | if not self.get("dataset"): 223 | raise UsageError("must specify --dataset") 224 | 225 | self.client = get_client(self.parent) 226 | self.base_url = get_base_url(self.parent) 227 | d = self.client.get(self.base_url + "/configuration/datasets") 228 | d.addCallback(treq.json_content) 229 | def got_configuration(datasets): 230 | victim = filter_datasets(self["dataset"], datasets) 231 | d = self.client.delete(self.base_url + 232 | "/configuration/datasets/%s" 233 | % (victim,)) 234 | d.addCallback(treq.json_content) 235 | return d 236 | d.addCallback(got_configuration) 237 | def done_deletion(result): 238 | print "marked dataset as deleted. poll list manually to see", 239 | print "it disappear." 240 | print 241 | d.addCallback(done_deletion) 242 | return d 243 | 244 | 245 | def filter_primary_node(prefix, nodes): 246 | candidates = [] 247 | for node in nodes: 248 | if node["uuid"].startswith(prefix): 249 | candidates.append(node) 250 | if len(candidates) == 0: 251 | raise UsageError("no node uuids matching %s" % 252 | (prefix,)) 253 | if len(candidates) > 1: 254 | raise UsageError("%s is ambiguous node" % 255 | (prefix,)) 256 | return candidates[0]["uuid"].encode("ascii") 257 | 258 | 259 | def filter_datasets(prefix, datasets): 260 | candidates = [] 261 | for dataset in datasets: 262 | if dataset["dataset_id"].startswith(prefix): 263 | candidates.append(dataset) 264 | if len(candidates) == 0: 265 | raise UsageError("no dataset uuids matching %s" % 266 | (prefix,)) 267 | if len(candidates) > 1: 268 | raise UsageError("%s is ambiguous dataset" % (prefix,)) 269 | return candidates[0]["dataset_id"].encode("ascii") 270 | 271 | 272 | class Move(Options): 273 | """ 274 | move a dataset from one node to another 275 | """ 276 | optParameters = [ 277 | ("dataset", "d", None, "Dataset to move (uuid)"), 278 | ("destination", "t", None, "New primary node (uuid) " 279 | "to move the dataset to"), 280 | ] 281 | def run(self): 282 | if not self.get("dataset"): 283 | raise UsageError("must specify --dataset") 284 | if not self.get("destination"): 285 | raise UsageError("must specify --destination") 286 | self.client = get_client(self.parent) 287 | self.base_url = get_base_url(self.parent) 288 | 289 | d1 = self.client.get(self.base_url + "/state/nodes") 290 | d1.addCallback(treq.json_content) 291 | d2 = self.client.get(self.base_url + "/configuration/datasets") 292 | d2.addCallback(treq.json_content) 293 | def got_results((nodes, datasets)): 294 | dataset = filter_datasets(self["dataset"], datasets) 295 | primary = filter_primary_node(self["destination"], nodes) 296 | args = {"primary": primary} 297 | d = self.client.post( 298 | self.base_url 299 | + "/configuration/datasets/%s" % (dataset,), 300 | json.dumps(args), 301 | headers={'Content-Type': ['application/json']}) 302 | d.addCallback(treq.json_content) 303 | return d 304 | d = defer.gatherResults([d1, d2]) 305 | d.addCallback(got_results) 306 | def initiated_move(result): 307 | print "initiated move of dataset, please check state", 308 | print "to observe it actually move." 309 | print 310 | d.addCallback(initiated_move) 311 | return d 312 | 313 | 314 | commands = { 315 | "version": Version, 316 | "list-nodes": ListNodes, 317 | "list": List, 318 | "status": ListNodes, 319 | "ls": List, 320 | "create": Create, 321 | "destroy": Destroy, 322 | "move": Move, 323 | } 324 | 325 | 326 | class FlockerVolumesCommands(Options): 327 | optParameters = [ 328 | ("cluster-yml", None, "./cluster.yml", 329 | "Location of cluster.yml file " 330 | "(makes other options unnecessary)"), 331 | ("certs-path", None, ".", 332 | "Path to certificates folder"), 333 | ("user", None, "user", 334 | "Name of user for which .key and .crt files exist"), 335 | ("cluster-crt", None, "cluster.crt", 336 | "Name of cluster cert file"), 337 | ("control-service", None, None, 338 | "Hostname or IP of control service"), 339 | ("control-port", None, 4523, 340 | "Port for control service REST API"), 341 | ] 342 | subCommands = [ 343 | (cmd, None, cls, cls.__doc__) 344 | for cmd, cls 345 | in sorted(commands.iteritems())] 346 | 347 | 348 | def main(reactor, *argv): 349 | try: 350 | base = FlockerVolumesCommands() 351 | # Fake some commandline args based on env vars 352 | argv = list(argv) 353 | if os.environ.get("FLOCKER_CERTS_PATH"): 354 | argv = ["--certs-path", os.environ["FLOCKER_CERTS_PATH"]] + argv 355 | if os.environ.get("FLOCKER_USER"): 356 | argv = ["--user", os.environ["FLOCKER_USER"]] + argv 357 | if os.environ.get("FLOCKER_CONTROL_SERVICE"): 358 | argv = ["--control-service", os.environ["FLOCKER_CONTROL_SERVICE"]] + argv 359 | base.parseOptions(argv) 360 | if base.subCommand is not None: 361 | d = defer.maybeDeferred(base.subOptions.run) 362 | else: 363 | raise UsageError("Please specify a command.") 364 | def usageError(failure): 365 | failure.trap(UsageError) 366 | print str(failure.value) 367 | return # skips verbose exception printing 368 | d.addErrback(usageError) 369 | def err(failure): 370 | log.err(failure) 371 | reactor.stop() 372 | d.addErrback(err) 373 | return d 374 | except UsageError, errortext: 375 | print errortext 376 | print 'Try --help for usage details.' 377 | sys.exit(1) 378 | 379 | 380 | def _main(): 381 | react(main, sys.argv[1:]) 382 | 383 | if __name__ == "__main__": 384 | _main() 385 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/get_nodes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | from twisted.python.filepath import FilePath 5 | 6 | def main(): 7 | terraform = FilePath("terraform") 8 | if not terraform.exists(): 9 | print "Please run uft-flocker-sample-files in the current directory first." 10 | os._exit(1) 11 | os.system("cd terraform && terraform apply") 12 | cluster_yml = terraform.child("cluster.yml") 13 | if cluster_yml.exists(): 14 | cluster_yml.moveTo(FilePath(".").child("cluster.yml")) 15 | 16 | if __name__ == "__main__": 17 | main() 18 | 19 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/hub_agents.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Install catalog agents. 5 | 6 | import sys 7 | from twisted.internet.task import react 8 | from twisted.internet.defer import gatherResults, inlineCallbacks 9 | from os import environ 10 | 11 | # Usage: plugin.py cluster.yml 12 | from utils import Configurator, log 13 | 14 | def report_completion(result, public_ip, 15 | message="Completed volume hub catalog agents install for"): 16 | log(message, public_ip) 17 | return result 18 | 19 | @inlineCallbacks 20 | def main(reactor, configFile): 21 | c = Configurator(configFile=configFile) 22 | control_ip = c.config["control_node"] 23 | 24 | install_command = ('TOKEN="%s" ' 25 | """sh -c 'curl -H "cache-control: max-age=0" -ssL https://get-volumehub.clusterhq.com/ |sh'""" % 26 | (environ["TOKEN"],)) 27 | 28 | deferreds = [c.runSSHAsync(control_ip, 29 | "TARGET=control-service " + install_command)] 30 | 31 | first = True 32 | for node in c.config["agent_nodes"]: 33 | if first: 34 | run_flocker_agent_here = "RUN_FLOCKER_AGENT_HERE=1 " 35 | else: 36 | run_flocker_agent_here = "" 37 | deferreds.append(c.runSSHAsync(node["public"], 38 | run_flocker_agent_here + "TARGET=agent-node " + install_command)) 39 | first = False 40 | 41 | log("Installing volume hub catalog agents...") 42 | yield gatherResults(deferreds) 43 | log("Done!") 44 | 45 | def _main(): 46 | react(main, sys.argv[1:]) 47 | 48 | if __name__ == "__main__": 49 | _main() 50 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/install.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This script will use the correct repo to install packages for clusterhq-flocker-node 4 | 5 | import sys, os 6 | 7 | # Usage: deploy.py cluster.yml 8 | from utils import Configurator, verify_socket, log 9 | from twisted.internet.task import react 10 | from twisted.internet.defer import gatherResults, inlineCallbacks 11 | from twisted.python.filepath import FilePath 12 | 13 | def report_completion(result, public_ip, message="Completed install for"): 14 | log(message, public_ip) 15 | return result 16 | 17 | class UsageError(Exception): 18 | pass 19 | 20 | @inlineCallbacks 21 | def main(reactor, configFile): 22 | c = Configurator(configFile=configFile) 23 | 24 | # Check that key file is accessible. If it isn't, give an error that 25 | # doesn't include the container-wrapping `/host/` to avoid confusing the 26 | # user. 27 | if not FilePath(c.get_container_facing_key_path()).exists(): 28 | raise UsageError( 29 | "Private key specified in private_key_path in config does not exist at: %s" % 30 | (c.get_user_facing_key_path(),)) 31 | 32 | # Permit root access 33 | if c.config["os"] == "coreos": 34 | user = "core" 35 | elif c.config["os"] == "ubuntu": 36 | user = "ubuntu" 37 | elif c.config["os"] == "centos": 38 | user = "centos" 39 | 40 | # Gather IPs of all nodes 41 | nodes = c.config["agent_nodes"] 42 | node_public_ips = [n["public"] for n in nodes] 43 | node_public_ips.append(c.config["control_node"]) 44 | 45 | # Wait for all nodes to boot 46 | yield gatherResults([verify_socket(ip, 22, timeout=600) for ip in node_public_ips]) 47 | 48 | # Enable root access 49 | cmd1 = "sudo mkdir -p /root/.ssh" 50 | cmd2 = "sudo cp /home/%s/.ssh/authorized_keys /root/.ssh/authorized_keys" % (user,) 51 | deferreds = [] 52 | for public_ip in node_public_ips: 53 | d = c.runSSHAsync(public_ip, cmd1 + " && " + cmd2, username=user) 54 | d.addCallback(report_completion, public_ip=public_ip, message="Enabled root login for") 55 | deferreds.append(d) 56 | yield gatherResults(deferreds) 57 | 58 | # Install flocker node software on all the nodes 59 | deferreds = [] 60 | for public_ip in node_public_ips: 61 | if c.config["os"] == "ubuntu": 62 | log("Running install for", public_ip, "...") 63 | default = "https://clusterhq-archive.s3.amazonaws.com/ubuntu/$(lsb_release --release --short)/\$(ARCH)" 64 | repo = os.environ.get("CUSTOM_REPO", default) 65 | if not repo: 66 | repo = default 67 | d = c.runSSHAsync(public_ip, """apt-get -y install apt-transport-https software-properties-common 68 | add-apt-repository -y "deb %s /" 69 | apt-get update 70 | curl -sSL https://get.docker.com/ | sh 71 | apt-get -y --force-yes install clusterhq-flocker-node 72 | """ % (repo,)) 73 | d.addCallback(report_completion, public_ip=public_ip) 74 | deferreds.append(d) 75 | elif c.config["os"] == "centos": 76 | default = "https://clusterhq-archive.s3.amazonaws.com/centos/clusterhq-release$(rpm -E %dist).noarch.rpm" 77 | repo = os.environ.get("CUSTOM_REPO", default) 78 | if not repo: 79 | repo = default 80 | d = c.runSSHAsync(public_ip, """if selinuxenabled; then setenforce 0; fi 81 | yum update 82 | curl -sSL https://get.docker.com/ | sh 83 | service docker start 84 | test -e /etc/selinux/config && sed --in-place='.preflocker' 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config 85 | yum install -y %s 86 | yum install -y clusterhq-flocker-node 87 | """ % (repo,)) 88 | d.addCallback(report_completion, public_ip=public_ip) 89 | deferreds.append(d) 90 | yield gatherResults(deferreds) 91 | 92 | # if the dataset.backend is ZFS then install ZFS and mount a flocker pool 93 | # then create and distribute SSH keys amoungst the nodes 94 | if c.config["agent_config"]["dataset"]["backend"] == "zfs": 95 | # CentOS ZFS installation requires a restart 96 | # XXX todo - find out a way to handle a restart mid-script 97 | if c.config["os"] == "centos": 98 | log("Auto-install of ZFS on CentOS is not currently supported") 99 | sys.exit(1) 100 | if c.config["os"] == "coreos": 101 | log("Auto-install of ZFS on CoreOS is not currently supported") 102 | sys.exit(1) 103 | 104 | for node in c.config["agent_nodes"]: 105 | node_public_ip = node["public"] 106 | if c.config["os"] == "ubuntu": 107 | c.runSSH(node_public_ip, """echo installing-zfs 108 | add-apt-repository -y ppa:zfs-native/stable 109 | apt-get update 110 | apt-get -y --force-yes install libc6-dev zfsutils 111 | mkdir -p /var/opt/flocker 112 | truncate --size 10G /var/opt/flocker/pool-vdev 113 | zpool create flocker /var/opt/flocker/pool-vdev 114 | """) 115 | 116 | """ 117 | Loop over each node and generate SSH keys 118 | Then get the public key so we can distribute it to other nodes 119 | """ 120 | for node in c.config["agent_nodes"]: 121 | node_public_ip = node["public"] 122 | log("Generating SSH Keys for %s" % (node_public_ip,)) 123 | publicKey = c.runSSH(node_public_ip, """cat < /tmp/genkeys.sh 124 | #!/bin/bash 125 | ssh-keygen -q -f /root/.ssh/id_rsa -N "" 126 | EOF 127 | bash /tmp/genkeys.sh 128 | cat /root/.ssh/id_rsa.pub 129 | rm /tmp/genkeys.sh 130 | """) 131 | 132 | publicKey = publicKey.rstrip('\n') 133 | """ 134 | Now we have the public key for the node we loop over all the other 135 | nodes and append it to /root/.ssh/authorized_keys 136 | """ 137 | for othernode in c.config["agent_nodes"]: 138 | othernode_public_ip = othernode["public"] 139 | if othernode_public_ip != node_public_ip: 140 | log("Copying %s key -> %s" % (node_public_ip, othernode_public_ip,)) 141 | c.runSSH(othernode_public_ip, """cat < /tmp/uploadkey.sh 142 | #!/bin/bash 143 | echo "%s" >> /root/.ssh/authorized_keys 144 | EOF 145 | bash /tmp/uploadkey.sh 146 | rm /tmp/uploadkey.sh 147 | """ % (publicKey,)) 148 | 149 | log("Installed clusterhq-flocker-node on all nodes") 150 | 151 | def _main(): 152 | react(main, sys.argv[1:]) 153 | 154 | if __name__ == "__main__": 155 | _main() 156 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/plugin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # This script will generate a user certificate using flocker-ca and upload it 5 | # ready for the plugin to consume 6 | # It will then install a build of docker that supports --volume-driver 7 | # It will then pip to install the plugin to run with the certs and set up 8 | # startup scripts according to the platform 9 | 10 | import sys 11 | import os 12 | from twisted.internet.task import react 13 | from twisted.internet.defer import gatherResults, inlineCallbacks 14 | 15 | # Usage: plugin.py cluster.yml 16 | from utils import Configurator, log 17 | 18 | # a dict that holds the default values for each of the env vars 19 | # that can be overriden 20 | settings_defaults = { 21 | # skip installing the flocker plugin 22 | 'SKIP_INSTALL_PLUGIN': '' 23 | } 24 | 25 | # dict that holds our actual env vars once the overrides have been applied 26 | settings = {} 27 | 28 | # loop over each of the default vars and check to see if we have been 29 | # given an override in the environment 30 | for field in settings_defaults: 31 | value = os.environ.get(field) 32 | if value is None: 33 | value = settings_defaults[field] 34 | settings[field] = value 35 | 36 | @inlineCallbacks 37 | def main(reactor, configFile): 38 | c = Configurator(configFile=configFile) 39 | control_ip = c.config["control_node"] 40 | 41 | log("Generating plugin certs") 42 | # generate and upload plugin.crt and plugin.key for each node 43 | for node in c.config["agent_nodes"]: 44 | public_ip = node["public"] 45 | # use the node IP to name the local files 46 | # so they do not overwrite each other 47 | c.run("flocker-ca create-api-certificate %s-plugin" % (public_ip,)) 48 | log("Generated plugin certs for", public_ip) 49 | 50 | def report_completion(result, public_ip, message="Completed plugin install for"): 51 | log(message, public_ip) 52 | return result 53 | 54 | deferreds = [] 55 | log("Uploading plugin certs...") 56 | for node in c.config["agent_nodes"]: 57 | public_ip = node["public"] 58 | # upload the .crt and .key 59 | for ext in ("crt", "key"): 60 | d = c.scp("%s-plugin.%s" % (public_ip, ext,), 61 | public_ip, "/etc/flocker/plugin.%s" % (ext,), async=True) 62 | d.addCallback(report_completion, public_ip=public_ip, message=" * Uploaded plugin cert for") 63 | deferreds.append(d) 64 | yield gatherResults(deferreds) 65 | log("Uploaded plugin certs") 66 | 67 | log("Installing flocker plugin") 68 | # loop each agent and get the plugin installed/running 69 | # clone the plugin and configure an upstart/systemd unit for it to run 70 | 71 | deferreds = [] 72 | for node in c.config["agent_nodes"]: 73 | public_ip = node["public"] 74 | private_ip = node["private"] 75 | log("Using %s => %s" % (public_ip, private_ip)) 76 | 77 | # the full api path to the control service 78 | controlservice = 'https://%s:4523/v1' % (control_ip,) 79 | 80 | # perhaps the user has pre-compiled images with the plugin 81 | # downloaded and installed 82 | if not settings["SKIP_INSTALL_PLUGIN"]: 83 | if c.config["os"] == "ubuntu": 84 | log("Installing plugin for", public_ip, "...") 85 | d = c.runSSHAsync(public_ip, 86 | "apt-get install -y --force-yes clusterhq-flocker-docker-plugin && " 87 | "service flocker-docker-plugin restart") 88 | d.addCallback(report_completion, public_ip=public_ip) 89 | deferreds.append(d) 90 | elif c.config["os"] == "centos": 91 | log("Installing plugin for", public_ip, "...") 92 | d = c.runSSHAsync(public_ip, 93 | "yum install -y clusterhq-flocker-docker-plugin && " 94 | "systemctl enable flocker-docker-plugin && " 95 | "systemctl start flocker-docker-plugin") 96 | d.addCallback(report_completion, public_ip=public_ip) 97 | deferreds.append(d) 98 | else: 99 | log("Skipping installing plugin: %r" % (settings["SKIP_INSTALL_PLUGIN"],)) 100 | yield gatherResults(deferreds) 101 | 102 | for node in c.config["agent_nodes"]: 103 | public_ip = node["public"] 104 | private_ip = node["private"] 105 | # ensure that the /run/docker/plugins 106 | # folder exists 107 | log("Creating the /run/docker/plugins folder") 108 | c.runSSHRaw(public_ip, "mkdir -p /run/docker/plugins") 109 | if c.config["os"] == "coreos": 110 | log("Starting flocker-docker-plugin as docker container on CoreOS on %s" % (public_ip,)) 111 | c.runSSH(public_ip, """echo 112 | docker run --restart=always -d --net=host --privileged \\ 113 | -e FLOCKER_CONTROL_SERVICE_BASE_URL=%s \\ 114 | -e MY_NETWORK_IDENTITY=%s \\ 115 | -v /etc/flocker:/etc/flocker \\ 116 | -v /run/docker:/run/docker \\ 117 | --name=flocker-docker-plugin \\ 118 | clusterhq/flocker-docker-plugin""" % (controlservice, private_ip,)) 119 | 120 | log("Done!") 121 | 122 | def _main(): 123 | react(main, sys.argv[1:]) 124 | 125 | if __name__ == "__main__": 126 | _main() 127 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/sample_files.py: -------------------------------------------------------------------------------- 1 | import shutil, os 2 | from pkg_resources import resource_filename 3 | 4 | def main(): 5 | for backend in ["ebs", "openstack", "zfs"]: 6 | filename = "cluster.yml.%s.sample" % (backend,) 7 | resource = resource_filename("unofficial_flocker_tools", "samples/" + filename) 8 | shutil.copyfile(resource, filename) 9 | 10 | target_dir = "terraform" 11 | terraform_templates = resource_filename("unofficial_flocker_tools", "terraform_templates") 12 | os.system("mkdir -p %(target_dir)s && cp %(terraform_templates)s/* %(target_dir)s/" 13 | % dict(terraform_templates=terraform_templates, target_dir=target_dir)) 14 | print "Copied sample files into current directory." 15 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/samples/cluster.yml.ebs.sample: -------------------------------------------------------------------------------- 1 | cluster_name: name 2 | agent_nodes: 3 | - {public: 1.2.3.4, private: 10.2.3.4} 4 | - {public: 1.2.3.5, private: 10.2.3.5} 5 | control_node: dns.name.for.control.node # or IP address (but you must always connect via TLS to the given name here) 6 | users: 7 | - user1 8 | os: XXX # ubuntu, centos or coreos 9 | private_key_path: XXX # the key used to SSH as root onto the nodes 10 | agent_config: 11 | version: 1 12 | control-service: 13 | hostname: XXX # control_node should get substituted in here 14 | port: 4524 15 | dataset: 16 | backend: "aws" 17 | region: "us-west-1" 18 | zone: "us-west-1a" 19 | access_key_id: "foo" 20 | secret_access_key: "bar" 21 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/samples/cluster.yml.openstack.sample: -------------------------------------------------------------------------------- 1 | cluster_name: name 2 | agent_nodes: 3 | - {public: 1.2.3.4, private: 10.2.3.4} 4 | - {public: 1.2.3.5, private: 10.2.3.5} 5 | control_node: dns.name.for.control.node # or IP address (but you must always connect via TLS to the given name here) 6 | users: 7 | - user1 8 | os: XXX # ubuntu, centos or coreos 9 | private_key_path: XXX # the key used to SSH as root onto the nodes 10 | agent_config: 11 | version: 1 12 | control-service: 13 | hostname: XXX # control_node should get substituted in here 14 | port: 4524 15 | dataset: 16 | backend: "openstack" 17 | region: "LON" 18 | auth_plugin: "rackspace" 19 | username: "joe.bloggs.rackspace" 20 | api_key: "aaa-bbb-ccc-ddd" 21 | auth_url: "https://lon.identity.api.rackspacecloud.com/v2.0" 22 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/samples/cluster.yml.zfs.sample: -------------------------------------------------------------------------------- 1 | cluster_name: name 2 | agent_nodes: 3 | - {public: 1.2.3.4, private: 10.2.3.4} 4 | - {public: 1.2.3.5, private: 10.2.3.5} 5 | control_node: dns.name.for.control.node # or IP address (but you must always connect via TLS to the given name here) 6 | users: 7 | - user1 8 | os: XXX # ubuntu, centos or coreos 9 | private_key_path: XXX # the key used to SSH as root onto the nodes 10 | agent_config: 11 | version: 1 12 | control-service: 13 | hostname: XXX # control_node should get substituted in here 14 | port: 4524 15 | dataset: 16 | backend: "zfs" 17 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/terraform_templates/aws.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | access_key = "${var.aws_access_key}" 3 | secret_key = "${var.aws_secret_key}" 4 | region = "${var.aws_region}" 5 | } 6 | 7 | /* 8 | http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html#default-vpc-basics 9 | 10 | x Create a default subnet in each Availability Zone. 11 | x Create an Internet gateway and connect it to your default VPC. 12 | x Create a main route table for your default VPC with a rule that sends all traffic destined for the Internet to the Internet gateway. 13 | x Create a default security group and associate it with your default VPC. 14 | ? Create a default network access control list (ACL) and associate it with your default VPC. 15 | x Associate the default DHCP options set for your AWS account with your default VPC. 16 | */ 17 | 18 | resource "aws_vpc" "cluster_vpc" { 19 | cidr_block = "10.0.0.0/16" 20 | enable_dns_support = "true" 21 | enable_dns_hostnames = "true" 22 | tags { 23 | Name = "Flocker VPC" 24 | } 25 | } 26 | resource "aws_subnet" "cluster_subnet" { 27 | vpc_id = "${aws_vpc.cluster_vpc.id}" 28 | cidr_block = "10.0.0.0/16" 29 | availability_zone = "${var.aws_availability_zone}" 30 | map_public_ip_on_launch = "true" 31 | tags { 32 | Name = "Flocker subnet" 33 | } 34 | } 35 | resource "aws_internet_gateway" "gateway" { 36 | vpc_id = "${aws_vpc.cluster_vpc.id}" 37 | } 38 | resource "aws_route_table" "public" { 39 | vpc_id = "${aws_vpc.cluster_vpc.id}" 40 | 41 | route { 42 | cidr_block = "0.0.0.0/0" 43 | gateway_id = "${aws_internet_gateway.gateway.id}" 44 | } 45 | } 46 | resource "aws_route_table_association" "public" { 47 | subnet_id = "${aws_subnet.cluster_subnet.id}" 48 | route_table_id = "${aws_route_table.public.id}" 49 | } 50 | 51 | resource "aws_security_group" "cluster_security_group" { 52 | name = "flocker_rules" 53 | description = "Allow SSH, HTTP, Flocker APIs" 54 | vpc_id = "${aws_vpc.cluster_vpc.id}" 55 | # ssh 56 | ingress { 57 | from_port = 22 58 | to_port = 22 59 | protocol = "tcp" 60 | cidr_blocks = ["0.0.0.0/0"] 61 | } 62 | # http for demo 63 | ingress { 64 | from_port = 80 65 | to_port = 80 66 | protocol = "tcp" 67 | cidr_blocks = ["0.0.0.0/0"] 68 | } 69 | # external flocker api 70 | ingress { 71 | from_port = 4523 72 | to_port = 4523 73 | protocol = "tcp" 74 | cidr_blocks = ["0.0.0.0/0"] 75 | } 76 | # internal flocker-control port 77 | ingress { 78 | from_port = 4524 79 | to_port = 4524 80 | protocol = "tcp" 81 | self = true 82 | } 83 | # swarm 84 | ingress { 85 | from_port = 2375 86 | to_port = 2375 87 | protocol = "tcp" 88 | self = true 89 | } 90 | # swarm 91 | ingress { 92 | from_port = 2357 93 | to_port = 2357 94 | protocol = "tcp" 95 | self = true 96 | } 97 | # allow outbound traffic 98 | egress { 99 | from_port = 0 100 | to_port = 0 101 | protocol = "-1" 102 | cidr_blocks = ["0.0.0.0/0"] 103 | } 104 | } 105 | resource "aws_instance" "master" { 106 | ami = "${lookup(var.aws_ubuntu_amis, var.aws_region)}" 107 | instance_type = "${var.aws_instance_type}" 108 | availability_zone = "${var.aws_availability_zone}" 109 | vpc_security_group_ids = ["${aws_security_group.cluster_security_group.id}"] 110 | subnet_id = "${aws_subnet.cluster_subnet.id}" 111 | key_name = "${var.aws_key_name}" 112 | tags { 113 | Name = "Flocker master node" 114 | } 115 | } 116 | resource "aws_instance" "nodes" { 117 | ami = "${lookup(var.aws_ubuntu_amis, var.aws_region)}" 118 | instance_type = "${var.aws_instance_type}" 119 | availability_zone = "${var.aws_availability_zone}" 120 | vpc_security_group_ids = ["${aws_security_group.cluster_security_group.id}"] 121 | subnet_id = "${aws_subnet.cluster_subnet.id}" 122 | key_name = "${var.aws_key_name}" 123 | count = "${var.agent_nodes}" 124 | tags { 125 | Name = "Flocker agent node" 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/terraform_templates/cluster.tf: -------------------------------------------------------------------------------- 1 | resource "template_file" "cluster_yml" { 2 | filename = "cluster.yml.template" 3 | vars { 4 | cluster_name = "cluster" 5 | operating_system = "ubuntu" 6 | aws_region = "${var.aws_region}" 7 | aws_availability_zone = "${var.aws_availability_zone}" 8 | aws_access_key = "${var.aws_access_key}" 9 | aws_secret_key = "${var.aws_secret_key}" 10 | private_key_path = "${var.private_key_path}" 11 | agent_nodes = "${join("", formatlist(" - {public: %v, private: %v}\n", aws_instance.nodes.*.public_ip, aws_instance.nodes.*.private_ip))}" 12 | master_dns_name = "${aws_instance.master.public_dns}" 13 | } 14 | provisioner "local-exec" { 15 | command = "echo '${self.rendered}' > cluster.yml" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/terraform_templates/cluster.yml.template: -------------------------------------------------------------------------------- 1 | cluster_name: ${cluster_name} 2 | agent_nodes: 3 | ${agent_nodes} 4 | # TODO make this use a floating IP 5 | control_node: ${master_dns_name} 6 | users: 7 | - user 8 | os: ${operating_system} 9 | private_key_path: ${private_key_path} 10 | agent_config: 11 | version: 1 12 | control-service: 13 | hostname: ${master_dns_name} 14 | port: 4524 15 | dataset: 16 | backend: "aws" 17 | region: "${aws_region}" 18 | zone: "${aws_availability_zone}" 19 | access_key_id: "${aws_access_key}" 20 | secret_access_key: "${aws_secret_key}" 21 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/terraform_templates/terraform.tfvars.sample: -------------------------------------------------------------------------------- 1 | # AWS keys 2 | aws_access_key = "your AWS access key" 3 | aws_secret_key = "your AWS secret key" 4 | 5 | # AWS region and zone 6 | aws_region = "region you want nodes deployed in e.g. us-east-1" 7 | aws_availability_zone = "zone you want nodes deployed in e.g. us-east-1a" 8 | 9 | # Key to authenticate to nodes via SSH 10 | aws_key_name = "name of EC2 keypair" 11 | private_key_path = "absolute path to EC2 key (.pem file) on your local machine" 12 | 13 | # Instance types and number of nodes (total launched = agent_nodes + 1, for master) 14 | aws_instance_type = "m3.large" 15 | agent_nodes = "2" 16 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/terraform_templates/variables.tf: -------------------------------------------------------------------------------- 1 | variable "private_key_path" {} 2 | variable "aws_key_name" {} 3 | variable "aws_access_key" {} 4 | variable "aws_secret_key" {} 5 | variable "agent_nodes" { 6 | default = "2" 7 | } 8 | variable "cloud_provider" { 9 | default = "aws" 10 | } 11 | variable "aws_region" { 12 | default = "us-east-1" 13 | } 14 | variable "aws_availability_zone" { 15 | default = "us-east-1a" 16 | } 17 | variable "aws_ubuntu_amis" { 18 | # HVM EBS Ubuntu 14.04 AMIs from 19 | # http://cloud-images.ubuntu.com/locator/ec2/ as of 15th September 2015 20 | default = { 21 | ap-northeast-1 = "ami-0841ca08" 22 | ap-southeast-1 = "ami-96c2c8c4" 23 | eu-central-1 = "ami-6265657f" 24 | eu-west-1 = "ami-9d2f0fea" 25 | sa-east-1 = "ami-4ddb4e50" 26 | us-east-1 = "ami-21630d44" 27 | us-west-1 = "ami-c52dd781" 28 | cn-north-1 = "ami-18980421" 29 | us-gov-west-1 = "ami-b1d9ba92" 30 | ap-southeast-2 = "ami-f32b64c9" 31 | us-west-2 = "ami-cf3c21ff" 32 | } 33 | } 34 | variable "aws_instance_type" { 35 | default = "m3.medium" 36 | } 37 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/texttable.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # texttable - module for creating simple ASCII tables 4 | # Copyright (C) 2003-2011 Gerome Fournier 5 | # 6 | # This library is free software; you can redistribute it and/or 7 | # modify it under the terms of the GNU Lesser General Public 8 | # License as published by the Free Software Foundation; either 9 | # version 2.1 of the License, or (at your option) any later version. 10 | # 11 | # This library is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 | # Lesser General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU Lesser General Public 17 | # License along with this library; if not, write to the Free Software 18 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 19 | 20 | """module for creating simple ASCII tables 21 | 22 | 23 | Example: 24 | 25 | table = Texttable() 26 | table.set_cols_align(["l", "r", "c"]) 27 | table.set_cols_valign(["t", "m", "b"]) 28 | table.add_rows([ ["Name", "Age", "Nickname"], 29 | ["Mr\\nXavier\\nHuon", 32, "Xav'"], 30 | ["Mr\\nBaptiste\\nClement", 1, "Baby"] ]) 31 | print table.draw() + "\\n" 32 | 33 | table = Texttable() 34 | table.set_deco(Texttable.HEADER) 35 | table.set_cols_dtype(['t', # text 36 | 'f', # float (decimal) 37 | 'e', # float (exponent) 38 | 'i', # integer 39 | 'a']) # automatic 40 | table.set_cols_align(["l", "r", "r", "r", "l"]) 41 | table.add_rows([["text", "float", "exp", "int", "auto"], 42 | ["abcd", "67", 654, 89, 128.001], 43 | ["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023], 44 | ["lmn", 5e-78, 5e-78, 89.4, .000000000000128], 45 | ["opqrstu", .023, 5e+78, 92., 12800000000000000000000]]) 46 | print table.draw() 47 | 48 | Result: 49 | 50 | +----------+-----+----------+ 51 | | Name | Age | Nickname | 52 | +==========+=====+==========+ 53 | | Mr | | | 54 | | Xavier | 32 | | 55 | | Huon | | Xav' | 56 | +----------+-----+----------+ 57 | | Mr | | | 58 | | Baptiste | 1 | | 59 | | Clement | | Baby | 60 | +----------+-----+----------+ 61 | 62 | text float exp int auto 63 | =========================================== 64 | abcd 67.000 6.540e+02 89 128.001 65 | efgh 67.543 6.540e-01 90 1.280e+22 66 | ijkl 0.000 5.000e-78 89 0.000 67 | mnop 0.023 5.000e+78 92 1.280e+22 68 | """ 69 | 70 | __all__ = ["Texttable", "ArraySizeError"] 71 | 72 | __author__ = 'Gerome Fournier ' 73 | __license__ = 'GPL' 74 | __version__ = '0.8.1' 75 | __credits__ = """\ 76 | Jeff Kowalczyk: 77 | - textwrap improved import 78 | - comment concerning header output 79 | 80 | Anonymous: 81 | - add_rows method, for adding rows in one go 82 | 83 | Sergey Simonenko: 84 | - redefined len() function to deal with non-ASCII characters 85 | 86 | Roger Lew: 87 | - columns datatype specifications 88 | 89 | Brian Peterson: 90 | - better handling of unicode errors 91 | """ 92 | 93 | import math 94 | import sys 95 | import string 96 | from functools import reduce 97 | 98 | try: 99 | if sys.version >= '2.3': 100 | import textwrap 101 | elif sys.version >= '2.2': 102 | from optparse import textwrap 103 | else: 104 | from optik import textwrap 105 | except ImportError: 106 | sys.stderr.write("Can't import textwrap module!\n") 107 | raise 108 | 109 | def len(iterable): 110 | """Redefining len here so it will be able to work with non-ASCII characters 111 | """ 112 | if not isinstance(iterable, str): 113 | return iterable.__len__() 114 | 115 | try: 116 | return len(str(iterable, 'utf')) 117 | except: 118 | return iterable.__len__() 119 | 120 | class ArraySizeError(Exception): 121 | """Exception raised when specified rows don't fit the required size 122 | """ 123 | 124 | def __init__(self, msg): 125 | self.msg = msg 126 | Exception.__init__(self, msg, '') 127 | 128 | def __str__(self): 129 | return self.msg 130 | 131 | class bcolors: 132 | PURPLE = '\033[95m' 133 | BLUE = '\033[94m' 134 | GREEN = '\033[92m' 135 | YELLOW = '\033[93m' 136 | RED = '\033[91m' 137 | ENDC = '\033[0m' 138 | WHITE = '' 139 | 140 | def bcolors_public_props(): 141 | return (name for name in dir(bcolors) if not name.startswith('_')) 142 | 143 | def get_color_string(type, string): 144 | end = bcolors.ENDC 145 | if type == bcolors.WHITE: 146 | end = '' 147 | return '%s%s%s' % (type, string, end) 148 | 149 | class Texttable: 150 | 151 | BORDER = 1 152 | HEADER = 1 << 1 153 | HLINES = 1 << 2 154 | VLINES = 1 << 3 155 | 156 | def __init__(self, max_width=80): 157 | """Constructor 158 | 159 | - max_width is an integer, specifying the maximum width of the table 160 | - if set to 0, size is unlimited, therefore cells won't be wrapped 161 | """ 162 | 163 | if max_width <= 0: 164 | max_width = False 165 | self._max_width = max_width 166 | self._precision = 3 167 | 168 | self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \ 169 | Texttable.HEADER 170 | self.set_chars(['-', '|', '+', '=']) 171 | self.reset() 172 | 173 | def reset(self): 174 | """Reset the instance 175 | 176 | - reset rows and header 177 | """ 178 | 179 | self._hline_string = None 180 | self._row_size = None 181 | self._header = [] 182 | self._rows = [] 183 | 184 | def set_chars(self, array): 185 | """Set the characters used to draw lines between rows and columns 186 | 187 | - the array should contain 4 fields: 188 | 189 | [horizontal, vertical, corner, header] 190 | 191 | - default is set to: 192 | 193 | ['-', '|', '+', '='] 194 | """ 195 | 196 | if len(array) != 4: 197 | raise ArraySizeError("array should contain 4 characters") 198 | array = [ x[:1] for x in [ str(s) for s in array ] ] 199 | (self._char_horiz, self._char_vert, 200 | self._char_corner, self._char_header) = array 201 | 202 | def set_deco(self, deco): 203 | """Set the table decoration 204 | 205 | - 'deco' can be a combinaison of: 206 | 207 | Texttable.BORDER: Border around the table 208 | Texttable.HEADER: Horizontal line below the header 209 | Texttable.HLINES: Horizontal lines between rows 210 | Texttable.VLINES: Vertical lines between columns 211 | 212 | All of them are enabled by default 213 | 214 | - example: 215 | 216 | Texttable.BORDER | Texttable.HEADER 217 | """ 218 | 219 | self._deco = deco 220 | 221 | def set_cols_align(self, array): 222 | """Set the desired columns alignment 223 | 224 | - the elements of the array should be either "l", "c" or "r": 225 | 226 | * "l": column flushed left 227 | * "c": column centered 228 | * "r": column flushed right 229 | """ 230 | 231 | self._check_row_size(array) 232 | self._align = array 233 | 234 | def set_cols_valign(self, array): 235 | """Set the desired columns vertical alignment 236 | 237 | - the elements of the array should be either "t", "m" or "b": 238 | 239 | * "t": column aligned on the top of the cell 240 | * "m": column aligned on the middle of the cell 241 | * "b": column aligned on the bottom of the cell 242 | """ 243 | 244 | self._check_row_size(array) 245 | self._valign = array 246 | 247 | def set_cols_dtype(self, array): 248 | """Set the desired columns datatype for the cols. 249 | 250 | - the elements of the array should be either "a", "t", "f", "e" or "i": 251 | 252 | * "a": automatic (try to use the most appropriate datatype) 253 | * "t": treat as text 254 | * "f": treat as float in decimal format 255 | * "e": treat as float in exponential format 256 | * "i": treat as int 257 | 258 | - by default, automatic datatyping is used for each column 259 | """ 260 | 261 | self._check_row_size(array) 262 | self._dtype = array 263 | 264 | def set_cols_width(self, array): 265 | """Set the desired columns width 266 | 267 | - the elements of the array should be integers, specifying the 268 | width of each column. For example: 269 | 270 | [10, 20, 5] 271 | """ 272 | 273 | self._check_row_size(array) 274 | try: 275 | array = list(map(int, array)) 276 | if reduce(min, array) <= 0: 277 | raise ValueError 278 | except ValueError: 279 | sys.stderr.write("Wrong argument in column width specification\n") 280 | raise 281 | self._width = array 282 | 283 | def set_precision(self, width): 284 | """Set the desired precision for float/exponential formats 285 | 286 | - width must be an integer >= 0 287 | 288 | - default value is set to 3 289 | """ 290 | 291 | if not type(width) is int or width < 0: 292 | raise ValueError('width must be an integer greater then 0') 293 | self._precision = width 294 | 295 | def header(self, array): 296 | """Specify the header of the table 297 | """ 298 | 299 | self._check_row_size(array) 300 | self._header = list(map(str, array)) 301 | 302 | def add_row(self, array): 303 | """Add a row in the rows stack 304 | 305 | - cells can contain newlines and tabs 306 | """ 307 | 308 | self._check_row_size(array) 309 | 310 | if not hasattr(self, "_dtype"): 311 | self._dtype = ["a"] * self._row_size 312 | 313 | cells = [] 314 | for i,x in enumerate(array): 315 | cells.append(self._str(i,x)) 316 | self._rows.append(cells) 317 | 318 | def add_rows(self, rows, header=True): 319 | """Add several rows in the rows stack 320 | 321 | - The 'rows' argument can be either an iterator returning arrays, 322 | or a by-dimensional array 323 | - 'header' specifies if the first row should be used as the header 324 | of the table 325 | """ 326 | 327 | # nb: don't use 'iter' on by-dimensional arrays, to get a 328 | # usable code for python 2.1 329 | if header: 330 | if hasattr(rows, '__iter__') and hasattr(rows, 'next'): 331 | self.header(next(rows)) 332 | else: 333 | self.header(rows[0]) 334 | rows = rows[1:] 335 | for row in rows: 336 | self.add_row(row) 337 | 338 | 339 | def draw(self): 340 | """Draw the table 341 | 342 | - the table is returned as a whole string 343 | """ 344 | 345 | if not self._header and not self._rows: 346 | return 347 | self._compute_cols_width() 348 | self._check_align() 349 | out = "" 350 | if self._has_border(): 351 | out += self._hline() 352 | if self._header: 353 | out += self._draw_line(self._header, isheader=True) 354 | if self._has_header(): 355 | out += self._hline_header() 356 | length = 0 357 | for row in self._rows: 358 | length += 1 359 | out += self._draw_line(row) 360 | if self._has_hlines() and length < len(self._rows): 361 | out += self._hline() 362 | if self._has_border(): 363 | out += self._hline() 364 | return out[:-1] 365 | 366 | def _str(self, i, x): 367 | """Handles string formatting of cell data 368 | 369 | i - index of the cell datatype in self._dtype 370 | x - cell data to format 371 | """ 372 | if type(x) is str: 373 | return x 374 | else: 375 | if x is None: 376 | return str(x) 377 | else: 378 | return str(x.encode('utf-8')) 379 | 380 | def _check_row_size(self, array): 381 | """Check that the specified array fits the previous rows size 382 | """ 383 | 384 | if not self._row_size: 385 | self._row_size = len(array) 386 | elif self._row_size != len(array): 387 | raise ArraySizeError("array should contain %d elements" \ 388 | % self._row_size) 389 | 390 | def _has_vlines(self): 391 | """Return a boolean, if vlines are required or not 392 | """ 393 | 394 | return self._deco & Texttable.VLINES > 0 395 | 396 | def _has_hlines(self): 397 | """Return a boolean, if hlines are required or not 398 | """ 399 | 400 | return self._deco & Texttable.HLINES > 0 401 | 402 | def _has_border(self): 403 | """Return a boolean, if border is required or not 404 | """ 405 | 406 | return self._deco & Texttable.BORDER > 0 407 | 408 | def _has_header(self): 409 | """Return a boolean, if header line is required or not 410 | """ 411 | 412 | return self._deco & Texttable.HEADER > 0 413 | 414 | def _hline_header(self): 415 | """Print header's horizontal line 416 | """ 417 | 418 | return self._build_hline(True) 419 | 420 | def _hline(self): 421 | """Print an horizontal line 422 | """ 423 | 424 | if not self._hline_string: 425 | self._hline_string = self._build_hline() 426 | return self._hline_string 427 | 428 | def _build_hline(self, is_header=False): 429 | """Return a string used to separated rows or separate header from 430 | rows 431 | """ 432 | horiz = self._char_horiz 433 | if (is_header): 434 | horiz = self._char_header 435 | # compute cell separator 436 | s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()], 437 | horiz) 438 | # build the line 439 | l = s.join([horiz * n for n in self._width]) 440 | # add border if needed 441 | if self._has_border(): 442 | l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz, 443 | self._char_corner) 444 | else: 445 | l += "\n" 446 | return l 447 | 448 | def _len_cell(self, cell): 449 | """Return the width of the cell 450 | 451 | Special characters are taken into account to return the width of the 452 | cell, such like newlines and tabs 453 | """ 454 | 455 | for attr in bcolors_public_props(): 456 | cell = cell.replace(getattr(bcolors, attr), '').replace(bcolors.ENDC,'') 457 | 458 | cell_lines = cell.split('\n') 459 | maxi = 0 460 | for line in cell_lines: 461 | length = 0 462 | parts = line.split('\t') 463 | for part, i in zip(parts, list(range(1, len(parts) + 1))): 464 | for attr in bcolors_public_props(): 465 | part = part.replace(getattr(bcolors, attr), '') 466 | length = length + len(part) 467 | if i < len(parts): 468 | length = (length//8 + 1) * 8 469 | maxi = max(maxi, length) 470 | return maxi 471 | 472 | def _compute_cols_width(self): 473 | """Return an array with the width of each column 474 | 475 | If a specific width has been specified, exit. If the total of the 476 | columns width exceed the table desired width, another width will be 477 | computed to fit, and cells will be wrapped. 478 | """ 479 | 480 | if hasattr(self, "_width"): 481 | return 482 | maxi = [] 483 | if self._header: 484 | maxi = [ self._len_cell(x) for x in self._header ] 485 | for row in self._rows: 486 | for cell,i in zip(row, list(range(len(row)))): 487 | try: 488 | maxi[i] = max(maxi[i], self._len_cell(cell)) 489 | except (TypeError, IndexError): 490 | maxi.append(self._len_cell(cell)) 491 | items = len(maxi) 492 | length = reduce(lambda x,y: x+y, maxi) 493 | if self._max_width and length + items * 3 + 1 > self._max_width: 494 | max_lengths = maxi 495 | maxi = [(self._max_width - items * 3 -1) // items \ 496 | for n in range(items)] 497 | 498 | # free space to distribute 499 | free = 0 500 | 501 | # how many columns are oversized 502 | oversized = 0 503 | 504 | # reduce size of columns that need less space and calculate how 505 | # much space is freed 506 | for col, max_len in enumerate(max_lengths): 507 | current_length = maxi[col] 508 | 509 | # column needs less space, adjust and 510 | # update free space 511 | if current_length > max_len: 512 | free += current_length - max_len 513 | maxi[col] = max_len 514 | 515 | # column needs more space, count it 516 | elif max_len > current_length: 517 | oversized += 1 518 | 519 | # as long as free space is available, distribute it 520 | while free > 0: 521 | # available free space for each oversized column 522 | free_part = int(math.ceil(float(free) / float(oversized))) 523 | 524 | for col, max_len in enumerate(max_lengths): 525 | current_length = maxi[col] 526 | 527 | # column needs more space 528 | if current_length < max_len: 529 | 530 | # how much space is needed 531 | needed = max_len - current_length 532 | 533 | # enough free space for column 534 | if needed <= free_part: 535 | maxi[col] = max_len 536 | free -= needed 537 | oversized -= 1 538 | 539 | # still oversized after re-sizing 540 | else: 541 | maxi[col] = maxi[col] + free_part 542 | free -= free_part 543 | self._width = maxi 544 | 545 | def _check_align(self): 546 | """Check if alignment has been specified, set default one if not 547 | """ 548 | 549 | if not hasattr(self, "_align"): 550 | self._align = ["l"] * self._row_size 551 | if not hasattr(self, "_valign"): 552 | self._valign = ["t"] * self._row_size 553 | 554 | def _draw_line(self, line, isheader=False): 555 | """Draw a line 556 | 557 | Loop over a single cell length, over all the cells 558 | """ 559 | 560 | line = self._splitit(line, isheader) 561 | space = " " 562 | out = "" 563 | for i in range(len(line[0])): 564 | if self._has_border(): 565 | out += "%s " % self._char_vert 566 | length = 0 567 | for cell, width, align in zip(line, self._width, self._align): 568 | length += 1 569 | cell_line = cell[i] 570 | lost_color = bcolors.WHITE 571 | original_cell = cell_line 572 | for attr in bcolors_public_props(): 573 | cell_line = cell_line.replace( 574 | getattr(bcolors, attr), '').replace(bcolors.ENDC,'' 575 | ) 576 | if cell_line.replace(bcolors.ENDC,'') != original_cell.replace( 577 | bcolors.ENDC,'') and attr != 'ENDC': 578 | if not lost_color: 579 | lost_color = attr 580 | fill = width - len(cell_line) 581 | try: 582 | cell_line = get_color_string( 583 | getattr(bcolors, lost_color),cell_line 584 | ) 585 | except AttributeError: 586 | pass 587 | if isheader: 588 | align = "c" 589 | if align == "r": 590 | out += "%s " % (fill * space + cell_line) 591 | elif align == "c": 592 | out += "%s " % (fill//2 * space + cell_line \ 593 | + (fill//2 + fill%2) * space) 594 | else: 595 | out += "%s " % (cell_line + fill * space) 596 | if length < len(line): 597 | out += "%s " % [space, self._char_vert][self._has_vlines()] 598 | out += "%s\n" % ['', self._char_vert][self._has_border()] 599 | return out 600 | 601 | def _splitit(self, line, isheader): 602 | """Split each element of line to fit the column width 603 | 604 | Each element is turned into a list, result of the wrapping of the 605 | string to the desired width 606 | """ 607 | 608 | line_wrapped = [] 609 | for cell, width in zip(line, self._width): 610 | array = [] 611 | original_cell = cell 612 | lost_color = bcolors.WHITE 613 | for attr in bcolors_public_props(): 614 | cell = cell.replace( 615 | getattr(bcolors, attr), '').replace(bcolors.ENDC,'') 616 | if cell.replace(bcolors.ENDC,'') != original_cell.replace( 617 | bcolors.ENDC,'') and attr != 'ENDC': 618 | if not lost_color: 619 | lost_color = attr 620 | for c in cell.split('\n'): 621 | if type(c) is not str: 622 | try: 623 | c = str(c, 'utf') 624 | except UnicodeDecodeError as strerror: 625 | sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror)) 626 | c = str(c, 'utf', 'replace') 627 | try: 628 | array.extend( 629 | [get_color_string( 630 | getattr(bcolors, lost_color),x 631 | ) for x in textwrap.wrap(c, width) 632 | ] 633 | ) 634 | except AttributeError: 635 | array.extend(textwrap.wrap(c, width)) 636 | line_wrapped.append(array) 637 | max_cell_lines = reduce(max, list(map(len, line_wrapped))) 638 | for cell, valign in zip(line_wrapped, self._valign): 639 | if isheader: 640 | valign = "t" 641 | if valign == "m": 642 | missing = max_cell_lines - len(cell) 643 | cell[:0] = [""] * (missing // 2) 644 | cell.extend([""] * (missing // 2 + missing % 2)) 645 | elif valign == "b": 646 | cell[:0] = [""] * (max_cell_lines - len(cell)) 647 | else: 648 | cell.extend([""] * (max_cell_lines - len(cell))) 649 | return line_wrapped 650 | 651 | if __name__ == '__main__': 652 | table = Texttable() 653 | table.set_cols_align(["l", "r", "c"]) 654 | table.set_cols_valign(["t", "m", "b"]) 655 | table.add_rows([ [get_color_string(bcolors.GREEN, "Name Of Person"), "Age", "Nickname"], 656 | ["Mr\nXavier\nHuon", 32, "Xav'"], 657 | [get_color_string(bcolors.BLUE,"Mr\nBaptiste\nClement"), 1, get_color_string(bcolors.RED,"Baby")] ]) 658 | print(table.draw() + "\n") 659 | 660 | table = Texttable() 661 | table.set_deco(Texttable.HEADER) 662 | table.set_cols_dtype(['t', # text 663 | 'f', # float (decimal) 664 | 'e', # float (exponent) 665 | 'i', # integer 666 | 'a']) # automatic 667 | table.set_cols_align(["l", "r", "r", "r", "l"]) 668 | table.add_rows([['text', "float", "exp", "int", "auto"], 669 | ["abcd", "67", 654, 89, 128.001], 670 | ["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023], 671 | ["lmn", 5e-78, 5e-78, 89.4, .000000000000128], 672 | ["opqrstu", .023, 5e+78, 92., 12800000000000000000000]]) 673 | print(table.draw()) 674 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/txflocker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterHQ/unofficial-flocker-tools/4d649b0fca3226b73920fb9e7b23ca15b18f521e/unofficial_flocker_tools/txflocker/__init__.py -------------------------------------------------------------------------------- /unofficial_flocker_tools/txflocker/client.py: -------------------------------------------------------------------------------- 1 | """ 2 | A collection of utilities for using the flocker REST API. 3 | """ 4 | 5 | from treq.client import HTTPClient 6 | 7 | from twisted.internet import reactor, ssl, defer 8 | from twisted.python.usage import UsageError 9 | from twisted.python.filepath import FilePath 10 | from twisted.web.client import Agent 11 | 12 | import yaml 13 | import treq 14 | import copy 15 | 16 | def process_metadata(metadata_str): 17 | if not metadata_str: 18 | return {} 19 | metadata = {} 20 | try: 21 | for pair in metadata_str.split(","): 22 | k, v = pair.split("=") 23 | metadata[k] = v 24 | except: 25 | raise UsageError("malformed metadata specification " 26 | "'%s', please use format 'a=b,c=d'" % 27 | (metadata_str,)) 28 | return metadata 29 | 30 | def parse_num(expression): 31 | if not expression: 32 | return None 33 | expression = expression.encode("ascii") 34 | unit = expression.translate(None, "1234567890.") 35 | num = expression.replace(unit, "") 36 | unit = unit.lower() 37 | if unit == 'tb' or unit == 't' or unit =='tib': 38 | return int(float(num)*1024*1024*1024*1024) 39 | elif unit == 'gb' or unit == 'g' or unit =='gib': 40 | return int(float(num)*1024*1024*1024) 41 | elif unit == 'mb' or unit == 'm' or unit =='mib': 42 | return int(float(num)*1024*1024) 43 | elif unit == 'kb' or unit == 'k' or unit =='kib': 44 | return int(float(num)*1024) 45 | else: 46 | return int(float(num)) 47 | 48 | def combined_state(client, base_url, deleted): 49 | d1 = client.get(base_url + "/configuration/datasets") 50 | d1.addCallback(treq.json_content) 51 | 52 | d2 = client.get(base_url + "/state/datasets") 53 | d2.addCallback(treq.json_content) 54 | 55 | d3 = client.get(base_url + "/state/nodes") 56 | d3.addCallback(treq.json_content) 57 | 58 | ds = [d1, d2, d3] 59 | 60 | d = defer.gatherResults(ds) 61 | def got_results(results): 62 | configuration_datasets, state_datasets, state_nodes = results 63 | 64 | # build up a table, based on which datasets are in the 65 | # configuration, adding data from the state as necessary 66 | configuration_map = dict((d["dataset_id"], d) for d in 67 | configuration_datasets) 68 | state_map = dict((d["dataset_id"], d) for d in state_datasets) 69 | nodes_map = dict((n["uuid"], n) for n in state_nodes) 70 | 71 | #print "got state:" 72 | #pprint.pprint(state_datasets) 73 | #print 74 | 75 | objects = [] 76 | 77 | for (key, dataset) in configuration_map.iteritems(): 78 | dataset = copy.copy(dataset) 79 | if dataset["deleted"]: 80 | if key in state_map: 81 | status = "deleting" 82 | else: 83 | status = "deleted" 84 | if not deleted: 85 | # we are hiding deleted datasets 86 | continue 87 | else: 88 | if key in state_map: 89 | if ("primary" in state_map[key] and 90 | state_map[key]["primary"] in nodes_map): 91 | status = u"attached \u2705" 92 | else: 93 | status = u"detached" 94 | else: 95 | # not deleted, not in state, probably waiting for it to 96 | # show up. 97 | status = u"pending \u231b" 98 | 99 | dataset["status"] = status 100 | 101 | meta = [] 102 | if dataset["metadata"]: 103 | for k, v in dataset["metadata"].iteritems(): 104 | meta.append("%s=%s" % (k, v)) 105 | 106 | dataset["meta"] = ",".join(meta) 107 | 108 | if dataset["primary"] in nodes_map: 109 | primary = nodes_map[dataset["primary"]] 110 | node = dict(uuid=primary["uuid"], host=primary["host"]) 111 | else: 112 | node = None 113 | 114 | dataset["node"] = node 115 | 116 | dataset["short_dataset_id"] = dataset["dataset_id"][:8] 117 | 118 | if dataset.get("maximum_size"): 119 | size = "%.2fG" % (dataset["maximum_size"] 120 | / (1024 * 1024 * 1024.),) 121 | else: 122 | # must be a backend with quotas instead of sizes 123 | size = "" 124 | 125 | dataset["size"] = size 126 | objects.append(dataset) 127 | return objects 128 | d.addCallback(got_results) 129 | return d 130 | 131 | def get_client(reactor=reactor, certificates_path=FilePath("/etc/flocker"), 132 | user_certificate_filename="plugin.crt", user_key_filename="plugin.key", 133 | cluster_certificate_filename="cluster.crt", target_hostname=None): 134 | """ 135 | Create a ``treq``-API object that implements the REST API TLS 136 | authentication. 137 | 138 | That is, validating the control service as well as presenting a 139 | certificate to the control service for authentication. 140 | 141 | :return: ``treq`` compatible object. 142 | """ 143 | if target_hostname is None: 144 | config = certificates_path.child("agent.yml") 145 | if config.exists(): 146 | agent_config = yaml.load(config.open()) 147 | target_hostname = agent_config["control-service"]["hostname"] 148 | 149 | user_crt = certificates_path.child(user_certificate_filename) 150 | user_key = certificates_path.child(user_key_filename) 151 | cluster_crt = certificates_path.child(cluster_certificate_filename) 152 | 153 | if (user_crt.exists() and user_key.exists() and cluster_crt.exists() 154 | and target_hostname is not None): 155 | # we are installed on a flocker node with a certificate, try to reuse 156 | # it for auth against the control service 157 | cert_data = cluster_crt.getContent() 158 | auth_data = user_crt.getContent() + user_key.getContent() 159 | 160 | authority = ssl.Certificate.loadPEM(cert_data) 161 | client_certificate = ssl.PrivateCertificate.loadPEM(auth_data) 162 | 163 | class ContextFactory(object): 164 | def getContext(self, hostname, port): 165 | context = client_certificate.options(authority).getContext() 166 | return context 167 | 168 | return HTTPClient(Agent(reactor, contextFactory=ContextFactory())) 169 | else: 170 | raise Exception("Not enough information to construct TLS context: " 171 | "user_crt: %s, cluster_crt: %s, user_key: %s, target_hostname: %s" % ( 172 | user_crt, cluster_crt, user_key, target_hostname)) 173 | -------------------------------------------------------------------------------- /unofficial_flocker_tools/utils.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | from pipes import quote 3 | import yaml 4 | import os 5 | import time 6 | from contextlib import closing 7 | from socket import socket 8 | from twisted.internet import reactor 9 | from twisted.internet.defer import maybeDeferred 10 | from twisted.internet.task import deferLater 11 | from twisted.internet.utils import _callProtocolWithDeferred 12 | from twisted.internet import protocol 13 | from io import BytesIO as StringIO 14 | 15 | class SensibleProcessProtocol(protocol.ProcessProtocol): 16 | def __init__(self, deferred): 17 | self.deferred = deferred 18 | self.outBuf = StringIO() 19 | self.outReceived = self.outBuf.write 20 | self.errReceived = self.outBuf.write 21 | 22 | def processEnded(self, reason): 23 | out = self.outBuf.getvalue() 24 | e = reason.value 25 | code = e.exitCode 26 | if e.signal: 27 | self.deferred.errback( 28 | Exception("Process exited on signal %s: %s" % (e.signal, out))) 29 | elif code != 0: 30 | self.deferred.errback( 31 | Exception("Process exited with error code %s: %s" % (code, out))) 32 | else: 33 | self.deferred.callback(out) 34 | 35 | def getSensibleProcessOutput(executable, args=(), env={}, path=None, 36 | reactor=None): 37 | """ 38 | Do what you would expect getProcessOutput to do: 39 | * if process emits stderr, capture it along with stdout 40 | * if process ends with exit code != 0 or signal, errback with combined process output 41 | * otherwise, callback with combined process output 42 | """ 43 | return _callProtocolWithDeferred(SensibleProcessProtocol, executable, args, env, path, 44 | reactor) 45 | 46 | def append_to_install_log(s): 47 | fp = open('install-log.txt', 'a') 48 | fp.write(str(int(time.time())) + ", " + s + "\n") 49 | fp.close() 50 | 51 | def format_log_args(args): 52 | return " ".join([str(a) for a in args]) 53 | 54 | def log(*args): 55 | print format_log_args(args) 56 | append_to_install_log(format_log_args(args)) 57 | 58 | def verbose_log(*args): 59 | append_to_install_log(format_log_args(args)) 60 | 61 | def verbose_log_callback(result, message): 62 | verbose_log(message, result) 63 | return result 64 | 65 | def verify_socket(host, port, timeout=None, connect_timeout=5): 66 | """ 67 | Wait until the destionation can be connected to. 68 | 69 | :param bytes host: Host to connect to. 70 | :param int port: Port to connect to. 71 | 72 | :return Deferred: Firing when connection is possible. 73 | """ 74 | def can_connect(): 75 | with closing(socket()) as s: 76 | s.settimeout(connect_timeout) 77 | conn = s.connect_ex((host, port)) 78 | return conn == 0 79 | 80 | log("Attempting to connect to %s:%s..." % (host, port)) 81 | dl = loop_until(can_connect, timeout=timeout) 82 | then = time.time() 83 | def success(result, ip, port): 84 | log("Connected to %s:%s after %.2f seconds!" % (ip, port, time.time() - then)) 85 | def failure(result, ip, port): 86 | log("Failed to connect to %s:%s after %.2f seconds :(" % (ip, port, time.time() - then)) 87 | dl.addCallback(success, ip=host, port=port) 88 | dl.addErrback(failure, ip=host, port=port) 89 | return dl 90 | 91 | 92 | class TimeoutError(Exception): 93 | pass 94 | 95 | 96 | def loop_until_success(predicate, timeout=None, message="task"): 97 | """ 98 | Call predicate every second, until it fires a non-failed Deferred, or hits 99 | the timeout. 100 | 101 | :param predicate: Callable returning termination condition. 102 | :type predicate: 0-argument callable returning a Deferred. 103 | 104 | :return: A ``Deferred`` firing with the first non-failed Deferred from 105 | ``predicate``, or, if predicate didn't fire with non-``Failure``-y 106 | thing within the timeout, returns the ``Failure``. 107 | """ 108 | d = maybeDeferred(predicate) 109 | then = time.time() 110 | def loop(failure): 111 | if timeout and time.time() - then > timeout: 112 | # propogate the failure 113 | return failure 114 | print "Retrying %s given result %r..." % (message, failure.getErrorMessage()) 115 | d = deferLater(reactor, 1.0, predicate) 116 | d.addErrback(loop) 117 | return d 118 | d.addErrback(loop) 119 | return d 120 | 121 | def loop_until(predicate, timeout=None, message="task"): 122 | """ 123 | Call predicate every second, until it returns something ``Truthy``. 124 | 125 | :param predicate: Callable returning termination condition. 126 | :type predicate: 0-argument callable returning a Deferred. 127 | 128 | :return: A ``Deferred`` firing with the first ``Truthy`` response from 129 | ``predicate``, or, if predicate didn't fire truthfully within the 130 | timeout, raise TimeoutError(). 131 | """ 132 | d = maybeDeferred(predicate) 133 | then = time.time() 134 | def loop(result): 135 | if timeout and time.time() - then > timeout: 136 | raise TimeoutError() 137 | if not result: 138 | print "Retrying %s given result %r..." % (message, result) 139 | d = deferLater(reactor, 1.0, predicate) 140 | d.addCallback(loop) 141 | return d 142 | return result 143 | d.addCallback(loop) 144 | return d 145 | 146 | class UsageError(Exception): 147 | pass 148 | 149 | class Configurator(object): 150 | def __init__(self, configFile): 151 | self.configFile = configFile 152 | self.config = yaml.load(open(self.configFile)) 153 | # set some defaults 154 | self.config["private_key_path"] = self.get_container_facing_key_path() 155 | self.config["remote_server_username"] = self.config.get("remote_server_username", "root") 156 | 157 | def get_user_facing_key_path(self): 158 | config = yaml.load(open(self.configFile)) 159 | return config["private_key_path"] 160 | 161 | def get_container_facing_key_path(self): 162 | private_key_path = self.get_user_facing_key_path() 163 | if "CONTAINERIZED" in os.environ: 164 | private_key_path = "/host" + private_key_path 165 | return private_key_path 166 | 167 | def runSSH(self, ip, command, username=None): 168 | command = 'ssh -o ServerAliveInterval=10 -o LogLevel=error -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s %s@%s %s' % (self.config["private_key_path"], 169 | username if username is not None else self.config["remote_server_username"], 170 | ip, " ".join(map(quote, ["bash", "-c", "echo; " + command]))) 171 | verbose_log("runSSH:", command) 172 | result = subprocess.check_output(command, shell=True) 173 | verbose_log("runSSH result of", command, " - ", result) 174 | return result 175 | 176 | def runSSHAsync(self, ip, command, username=None, retry_with_timeout=600): 177 | """ 178 | Use Twisted APIs, assuming a reactor is running, to return a deferred 179 | which fires with the result. 180 | """ 181 | executable = "/usr/bin/ssh" 182 | command = ['-o', 'ServerAliveInterval=10', '-o', 'LogLevel=error', '-o', 'UserKnownHostsFile=/dev/null', '-o', 183 | 'StrictHostKeyChecking=no', '-i', 184 | self.config["private_key_path"], "%s@%s" % ( 185 | username if username is not None else self.config["remote_server_username"], ip), 186 | " ".join(map(quote, ["bash", "-c", "echo; " + command]))] 187 | verbose_log("runSSHAsync:", command) 188 | if retry_with_timeout is not None: 189 | d = loop_until_success(lambda: getSensibleProcessOutput(executable, command), 190 | timeout=retry_with_timeout, 191 | message="running %s on %s" % (command, ip)) 192 | else: 193 | d = getSensibleProcessOutput(executable, command) 194 | d.addBoth(verbose_log_callback, message="runSSHAsync result of %s - " % (command,)) 195 | return d 196 | 197 | def runSSHRaw(self, ip, command, username=None): 198 | command = 'ssh -o ServerAliveInterval=10 -o LogLevel=error -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s %s@%s %s' % (self.config["private_key_path"], 199 | username if username is not None else self.config["remote_server_username"], 200 | ip, command) 201 | verbose_log("runSSHRaw:", command) 202 | result = subprocess.check_output(command, shell=True) 203 | verbose_log("runSSHRaw result of", command, " - ", result) 204 | return result 205 | 206 | def run(self, command): 207 | verbose_log("run:", command) 208 | result = subprocess.check_output(command, shell=True) 209 | verbose_log("run result of", command, " - ", result) 210 | return result 211 | 212 | def scp(self, local_path, external_ip, remote_path, 213 | private_key_path=None, remote_server_username=None, async=False, 214 | retry_with_timeout=600, reverse=False): 215 | if retry_with_timeout and not async: 216 | raise UsageError("Can't retry_with_timeout if not async") 217 | if private_key_path is not None: 218 | private_key_path = self.config["private_key_path"] 219 | if remote_server_username is not None: 220 | remote_server_username = self.config["remote_server_username"] 221 | if reverse: 222 | scp = ("scp -o LogLevel=error -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %(private_key_path)s " 223 | "%(remote_server_username)s@%(external_ip)s:%(remote_path)s %(local_path)s") % dict( 224 | private_key_path=self.config["private_key_path"], 225 | remote_server_username=self.config["remote_server_username"], 226 | external_ip=external_ip, remote_path=remote_path, 227 | local_path=local_path) 228 | else: 229 | scp = ("scp -o LogLevel=error -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %(private_key_path)s %(local_path)s " 230 | "%(remote_server_username)s@%(external_ip)s:%(remote_path)s") % dict( 231 | private_key_path=self.config["private_key_path"], 232 | remote_server_username=self.config["remote_server_username"], 233 | external_ip=external_ip, remote_path=remote_path, 234 | local_path=local_path) 235 | if async: 236 | verbose_log("scp async:", scp) 237 | if retry_with_timeout is not None: 238 | d = loop_until_success(lambda: getSensibleProcessOutput("/bin/bash", ["-c", scp]), 239 | timeout=retry_with_timeout, 240 | message="uploading %s to %s" % (local_path, external_ip)) 241 | else: 242 | d = getSensibleProcessOutput("/bin/bash", ["-c", scp]) 243 | d.addBoth(verbose_log_callback, message="scp async result of %s - " % (scp,)) 244 | return d 245 | else: 246 | verbose_log("scp sync:", scp) 247 | result = subprocess.check_output(scp, shell=True) 248 | verbose_log("scp sync result of", scp, " - ", result) 249 | return result 250 | -------------------------------------------------------------------------------- /web/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | # Last build date - this can be updated whenever there are security updates so 4 | # that everything is rebuilt 5 | ENV security_updates_as_of 2014-07-06 6 | 7 | # Install security updates and required packages 8 | RUN apt-get -qy update 9 | RUN apt-get -qy upgrade 10 | RUN apt-get -qy install python-pip 11 | RUN apt-get -qy install python-dev 12 | RUN apt-get -qy install python-pyasn1 13 | RUN apt-get -qy install libyaml-dev 14 | RUN apt-get -qy install libffi-dev 15 | RUN apt-get -qy install libssl-dev 16 | 17 | ADD . /app 18 | 19 | WORKDIR /app 20 | 21 | # Install requirements from the project's setup.py 22 | RUN pip install . 23 | 24 | #CMD ["twistd", "-noy", "server.tac"] 25 | CMD ["./start.sh"] 26 | -------------------------------------------------------------------------------- /web/app/config.js: -------------------------------------------------------------------------------- 1 | /*global angular*/ 2 | (function () { 3 | "use strict"; 4 | 5 | var DEBUG = false; 6 | //var BASE_URL = 'https://test.labs.clusterhq.com:4523/v1' 7 | //var BASE_URL = 'v1/' 8 | //var BASE_URL = 'http://192.168.1.102:8088/v1/' 9 | var BASE_URL = '/v1/' 10 | 11 | var app = angular.module('myApp', ['ng-admin']); 12 | 13 | function getUrlMapper(base){ 14 | return function(entityName, viewType, identifierValue, identifierName) { 15 | 16 | var url = base 17 | 18 | if(identifierValue){ 19 | url += '/' + identifierValue 20 | } 21 | 22 | return url 23 | //return '/comments/' + entityName + '_' + viewType + '?' + identifierName + '=' + identifierValue; // Can be absolute or relative 24 | } 25 | } 26 | 27 | app.config(['NgAdminConfigurationProvider', 'RestangularProvider', function (NgAdminConfigurationProvider, RestangularProvider) { 28 | var nga = NgAdminConfigurationProvider; 29 | 30 | // truncate a long uuid to a short version 31 | function short_uuid(value) { 32 | return value.split('-')[0] 33 | } 34 | 35 | // use the custom query parameters function to format the API request correctly 36 | /* 37 | RestangularProvider.addFullRequestInterceptor(function(element, operation, what, url, headers, params) { 38 | if (operation == "getList") { 39 | // custom pagination params 40 | if (params._page) { 41 | params._start = (params._page - 1) * params._perPage; 42 | params._end = params._page * params._perPage; 43 | } 44 | delete params._page; 45 | delete params._perPage; 46 | // custom sort params 47 | if (params._sortField) { 48 | params._sort = params._sortField; 49 | delete params._sortField; 50 | } 51 | // custom filters 52 | if (params._filters) { 53 | for (var filter in params._filters) { 54 | params[filter] = params._filters[filter]; 55 | } 56 | delete params._filters; 57 | } 58 | } 59 | return { params: params }; 60 | });*/ 61 | 62 | var admin = nga.application('Flocker GUI') // application main title 63 | .debug(DEBUG) // debug disabled 64 | .baseApiUrl(BASE_URL); // main API endpoint 65 | 66 | // define all entities at the top to allow references between them 67 | var node = nga.entity('nodes') 68 | .baseApiUrl(BASE_URL) 69 | .identifier(nga.field('uuid')) 70 | .readOnly() 71 | 72 | var volume = nga.entity('datasets') 73 | .baseApiUrl(BASE_URL) 74 | .identifier(nga.field('dataset_id')) 75 | .url(getUrlMapper('datasets')) 76 | 77 | var configuration = nga.entity('configuration') 78 | .baseApiUrl(BASE_URL) 79 | .identifier(nga.field('dataset_id')) 80 | .readOnly() 81 | .url(getUrlMapper('configuration/datasets')) 82 | 83 | var state = nga.entity('state') 84 | .baseApiUrl(BASE_URL) 85 | .identifier(nga.field('dataset_id')) 86 | .readOnly() 87 | .url(getUrlMapper('state/datasets')) 88 | 89 | // set the application entities 90 | admin 91 | .addEntity(node) 92 | .addEntity(volume) 93 | .addEntity(configuration) 94 | .addEntity(state) 95 | 96 | // customize entities and views 97 | /* 98 | node.dashboardView() // customize the dashboard panel for this entity 99 | .name('nodes') 100 | .title('Your nodes') 101 | .order(1) // display the post panel first in the dashboard 102 | .perPage(5) // limit the panel to the 5 latest posts 103 | .fields([ 104 | nga.field('host').isDetailLink(true), 105 | nga.field('uuid').label('uuid').map(short_uuid) 106 | ]); // fields() called with arguments add fields to the view 107 | 108 | volume.dashboardView() // customize the dashboard panel for this entity 109 | .name('volumes') 110 | .title('Your datasets') 111 | .order(1) // display the post panel first in the dashboard 112 | .perPage(5) // limit the panel to the 5 latest posts 113 | .fields([ 114 | nga.field('short_dataset_id').label('Dataset ID').isDetailLink(true), 115 | nga.field('primary', 'reference') // ReferenceMany translates to a select multiple 116 | .label('Primary') 117 | .targetEntity(node) 118 | .targetField(nga.field('host')), 119 | nga.field('status'), 120 | nga.field('deleted', 'boolean'), 121 | nga.field('meta'), 122 | nga.field('size') 123 | ]); 124 | */ 125 | /* 126 | configuration.dashboardView() // customize the dashboard panel for this entity 127 | .name('configuration') 128 | .title('Your configuration') 129 | .order(1) // display the post panel first in the dashboard 130 | .perPage(5) // limit the panel to the 5 latest posts 131 | .fields([ 132 | nga.field('dataset_id').label('dataset_id').map(short_uuid), 133 | nga.field('deleted', 'boolean') 134 | ]); 135 | 136 | state.dashboardView() // customize the dashboard panel for this entity 137 | .name('state') 138 | .title('Your state') 139 | .order(1) // display the post panel first in the dashboard 140 | .perPage(5) // limit the panel to the 5 latest posts 141 | .fields([ 142 | nga.field('dataset_id').label('dataset_id').map(short_uuid) 143 | ]); 144 | */ 145 | node.listView() 146 | .title('All nodes') // default title is "[Entity_name] list" 147 | .description('Show the nodes in your cluster') // description appears under the title 148 | .infinitePagination(true) // load pages as the user scrolls 149 | .fields([ 150 | nga.field('host'), 151 | nga.field('uuid').label('uuid').map(short_uuid) 152 | 153 | ]) 154 | .listActions(['show']); 155 | 156 | volume.listView() 157 | .title('All datasets') // default title is "[Entity_name] list" 158 | .description('Show the datasets in your cluster') // description appears under the title 159 | .infinitePagination(true) // load pages as the user scrolls 160 | .fields([ 161 | nga.field('short_dataset_id').label('Dataset ID').isDetailLink(true), 162 | nga.field('primary', 'reference') // ReferenceMany translates to a select multiple 163 | .label('Primary') 164 | .targetEntity(node) 165 | .targetField(nga.field('host')), 166 | nga.field('status'), 167 | // nga.field('deleted', 'boolean'), 168 | nga.field('meta'), 169 | nga.field('size') 170 | //status 171 | //meta 172 | //node 173 | //size 174 | ]) 175 | .listActions(['show', 'edit'/*, 'delete'*/]); 176 | 177 | configuration.listView() 178 | .title('All configuration') // default title is "[Entity_name] list" 179 | .description('Show the configuration of datasets in your cluster') // description appears under the title 180 | .infinitePagination(true) // load pages as the user scrolls 181 | .fields([ 182 | nga.field('dataset_id').label('dataset_id').map(short_uuid), 183 | //nga.field('deleted', 'boolean'), 184 | nga.field('maximum_size'), 185 | nga.field('primary', 'reference') // ReferenceMany translates to a select multiple 186 | .label('Node') 187 | .targetEntity(node) 188 | .targetField(nga.field('host')) 189 | ]) 190 | .listActions(['show', 'edit', 'delete']); 191 | 192 | state.listView() 193 | .title('All state') // default title is "[Entity_name] list" 194 | .description('Show the state of datasets in your cluster') // description appears under the title 195 | .infinitePagination(true) // load pages as the user scrolls 196 | .fields([ 197 | nga.field('dataset_id').label('dataset_id').map(short_uuid) 198 | ]) 199 | .listActions(['show']); 200 | 201 | node.showView() // a showView displays one entry in full page - allows to display more data than in a a list 202 | .fields([ 203 | nga.field('host'), 204 | nga.field('uuid').label('uuid').map(short_uuid) 205 | 206 | ]); 207 | 208 | volume.showView() // a showView displays one entry in full page - allows to display more data than in a a list 209 | .fields([ 210 | nga.field('short_dataset_id').label('Dataset ID'), 211 | // nga.field('deleted', 'boolean'), 212 | nga.field('size'), 213 | nga.field('meta'), 214 | nga.field('status') 215 | ]); 216 | 217 | configuration.showView() // a showView displays one entry in full page - allows to display more data than in a a list 218 | .fields([ 219 | nga.field('dataset_id').label('dataset_id').map(short_uuid), 220 | //nga.field('deleted', 'boolean'), 221 | nga.field('maximum_size') 222 | ]); 223 | 224 | 225 | state.showView() // a showView displays one entry in full page - allows to display more data than in a a list 226 | .fields([ 227 | nga.field('dataset_id').label('dataset_id').map(short_uuid) 228 | ]); 229 | 230 | 231 | volume.creationView() 232 | .fields([ 233 | nga.field('primary', 'reference') // ReferenceMany translates to a select multiple 234 | .label('Node') 235 | .targetEntity(node) 236 | .targetField(nga.field('host')), 237 | nga.field('size').label('Maximum Size'), 238 | nga.field('meta').label('Metadata') 239 | ]); 240 | 241 | volume.editionView() 242 | .title('Edit dataset "{{ entry.values.short_dataset_id }}"') 243 | .fields([ 244 | nga.field('primary', 'reference') // ReferenceMany translates to a select multiple 245 | .label('Node') 246 | .targetEntity(node) 247 | .targetField(nga.field('host')) 248 | ]); 249 | 250 | // customize header 251 | var customHeaderTemplate = 252 | ''; 259 | admin.header(customHeaderTemplate); 260 | 261 | // customize menu 262 | admin.menu(nga.menu() 263 | /* 264 | .addChild( 265 | nga.menu() 266 | .title('Dashboard') 267 | .link('dashboard') 268 | .icon('') 269 | )*/ 270 | .addChild( 271 | nga.menu(node) 272 | .title('Nodes') 273 | .icon('') 274 | ) 275 | .addChild( 276 | nga.menu(volume) 277 | .title('Datasets') 278 | .icon('') 279 | )/* 280 | .addChild( 281 | nga.menu() 282 | .title('Debug') 283 | .icon('') 284 | .addChild( 285 | nga.menu(configuration) 286 | .title('Configuration') 287 | .icon('') 288 | ) 289 | .addChild( 290 | nga.menu(state) 291 | .title('State') 292 | .icon('') 293 | ) 294 | )*/ 295 | ); 296 | 297 | nga.configure(admin); 298 | }]); 299 | 300 | }()); 301 | 302 | // a total hack to reload lists every 5 seconds 303 | function reloadData(){ 304 | var time = new Date().getTime() 305 | var url = document.location.toString() 306 | 307 | var hashParts = url.split('#') 308 | if((hashParts[1] || '').match(/^\/\w+\/list/)){ 309 | var urlParts = url.split('?') 310 | var query = urlParts[1] || '' 311 | var currentValue = 'ASC' 312 | var pairs = query.split('&').filter(function(pair){ 313 | var parts = pair.split('=') 314 | if(parts[0]=='sortDir'){ 315 | currentValue = parts[1] 316 | return false 317 | } 318 | return true 319 | }) 320 | 321 | var newValue = currentValue=='ASC' ? 'DESC' : 'ASC' 322 | pairs.push('sortDir=' + newValue) 323 | var newURL = urlParts[0] + '?' + pairs.join('&') 324 | document.location = newURL 325 | } 326 | 327 | } 328 | 329 | setInterval(reloadData, 10000) 330 | 331 | -------------------------------------------------------------------------------- /web/app/example.js: -------------------------------------------------------------------------------- 1 | /*global angular*/ 2 | (function () { 3 | "use strict"; 4 | 5 | var app = angular.module('myApp', ['ng-admin']); 6 | 7 | app.config(['NgAdminConfigurationProvider', 'RestangularProvider', function (NgAdminConfigurationProvider, RestangularProvider) { 8 | var nga = NgAdminConfigurationProvider; 9 | 10 | function truncate(value) { 11 | if (!value) { 12 | return ''; 13 | } 14 | 15 | return value.length > 50 ? value.substr(0, 50) + '...' : value; 16 | } 17 | 18 | // use the custom query parameters function to format the API request correctly 19 | RestangularProvider.addFullRequestInterceptor(function(element, operation, what, url, headers, params) { 20 | if (operation == "getList") { 21 | // custom pagination params 22 | if (params._page) { 23 | params._start = (params._page - 1) * params._perPage; 24 | params._end = params._page * params._perPage; 25 | } 26 | delete params._page; 27 | delete params._perPage; 28 | // custom sort params 29 | if (params._sortField) { 30 | params._sort = params._sortField; 31 | delete params._sortField; 32 | } 33 | // custom filters 34 | if (params._filters) { 35 | for (var filter in params._filters) { 36 | params[filter] = params._filters[filter]; 37 | } 38 | delete params._filters; 39 | } 40 | } 41 | return { params: params }; 42 | }); 43 | 44 | var admin = nga.application('ng-admin backend demo') // application main title 45 | .debug(false) // debug disabled 46 | .baseApiUrl('http://localhost:3000/'); // main API endpoint 47 | 48 | // define all entities at the top to allow references between them 49 | var post = nga.entity('posts'); // the API endpoint for posts will be http://localhost:3000/posts/:id 50 | 51 | var comment = nga.entity('comments') 52 | .baseApiUrl('http://localhost:3000/') // The base API endpoint can be customized by entity 53 | .identifier(nga.field('id')); // you can optionally customize the identifier used in the api ('id' by default) 54 | 55 | var tag = nga.entity('tags') 56 | .readOnly(); // a readOnly entity has disabled creation, edition, and deletion views 57 | 58 | // set the application entities 59 | admin 60 | .addEntity(post) 61 | .addEntity(tag) 62 | .addEntity(comment); 63 | 64 | // customize entities and views 65 | 66 | post.dashboardView() // customize the dashboard panel for this entity 67 | .name('posts') 68 | .title('Recent posts') 69 | .order(1) // display the post panel first in the dashboard 70 | .perPage(5) // limit the panel to the 5 latest posts 71 | .fields([nga.field('title').isDetailLink(true).map(truncate)]); // fields() called with arguments add fields to the view 72 | 73 | post.listView() 74 | .title('All posts') // default title is "[Entity_name] list" 75 | .description('List of posts with infinite pagination') // description appears under the title 76 | .infinitePagination(true) // load pages as the user scrolls 77 | .fields([ 78 | nga.field('id').label('id'), // The default displayed name is the camelCase field name. label() overrides id 79 | nga.field('title'), // the default list field type is "string", and displays as a string 80 | nga.field('published_at', 'date'), // Date field type allows date formatting 81 | nga.field('average_note', 'float'), // Float type also displays decimal digits 82 | nga.field('views', 'number'), 83 | nga.field('tags', 'reference_many') // a Reference is a particular type of field that references another entity 84 | .targetEntity(tag) // the tag entity is defined later in this file 85 | .targetField(nga.field('name')) // the field to be displayed in this list 86 | ]) 87 | .listActions(['show', 'edit', 'delete']); 88 | 89 | post.creationView() 90 | .fields([ 91 | nga.field('title') // the default edit field type is "string", and displays as a text input 92 | .attributes({ placeholder: 'the post title' }) // you can add custom attributes, too 93 | .validation({ required: true, minlength: 3, maxlength: 100 }), // add validation rules for fields 94 | nga.field('teaser', 'text'), // text field type translates to a textarea 95 | nga.field('body', 'wysiwyg'), // overriding the type allows rich text editing for the body 96 | nga.field('published_at', 'date') // Date field type translates to a datepicker 97 | ]); 98 | 99 | var subCategories = [ 100 | { category: 'tech', label: 'Computers', value: 'computers' }, 101 | { category: 'tech', label: 'Gadgets', value: 'gadgets' }, 102 | { category: 'lifestyle', label: 'Travel', value: 'travel' }, 103 | { category: 'lifestyle', label: 'Fitness', value: 'fitness' } 104 | ]; 105 | 106 | post.editionView() 107 | .title('Edit post "{{ entry.values.title }}"') // title() accepts a template string, which has access to the entry 108 | .actions(['list', 'show', 'delete']) // choose which buttons appear in the top action bar. Show is disabled by default 109 | .fields([ 110 | post.creationView().fields(), // fields() without arguments returns the list of fields. That way you can reuse fields from another view to avoid repetition 111 | nga.field('category', 'choice') // a choice field is rendered as a dropdown in the edition view 112 | .choices([ // List the choice as object literals 113 | { label: 'Tech', value: 'tech' }, 114 | { label: 'Lifestyle', value: 'lifestyle' } 115 | ]), 116 | nga.field('subcategory', 'choice') 117 | .choices(function(entry) { // choices also accepts a function to return a list of choices based on the current entry 118 | return subCategories.filter(function (c) { 119 | return c.category === entry.values.category 120 | }); 121 | }), 122 | nga.field('tags', 'reference_many') // ReferenceMany translates to a select multiple 123 | .targetEntity(tag) 124 | .targetField(nga.field('name')) 125 | .cssClasses('col-sm-4'), // customize look and feel through CSS classes 126 | nga.field('pictures', 'json'), 127 | nga.field('views', 'number') 128 | .cssClasses('col-sm-4'), 129 | nga.field('average_note', 'float') 130 | .cssClasses('col-sm-4'), 131 | nga.field('comments', 'referenced_list') // display list of related comments 132 | .targetEntity(comment) 133 | .targetReferenceField('post_id') 134 | .targetFields([ 135 | nga.field('created_at').label('Posted'), 136 | nga.field('body').label('Comment') 137 | ]) 138 | .sortField('created_at') 139 | .sortDir('DESC'), 140 | nga.field('', 'template').label('') 141 | .template('') 142 | ]); 143 | 144 | post.showView() // a showView displays one entry in full page - allows to display more data than in a a list 145 | .fields([ 146 | nga.field('id'), 147 | post.editionView().fields(), // reuse fields from another view in another order 148 | nga.field('custom_action', 'template') 149 | .label('') 150 | .template('') 151 | ]); 152 | 153 | comment.dashboardView() 154 | .title('Last comments') 155 | .order(2) // display the comment panel second in the dashboard 156 | .perPage(5) 157 | .fields([ 158 | nga.field('id'), 159 | nga.field('body', 'wysiwyg') 160 | .label('Comment') 161 | .stripTags(true) 162 | .map(truncate), 163 | nga.field(null, 'template') // template fields don't need a name in dashboard view 164 | .label('') 165 | .template('') // you can use custom directives, too 166 | ]); 167 | 168 | comment.listView() 169 | .title('Comments') 170 | .perPage(10) // limit the number of elements displayed per page. Default is 30. 171 | .fields([ 172 | nga.field('created_at', 'date') 173 | .label('Posted'), 174 | nga.field('author'), 175 | nga.field('body', 'wysiwyg') 176 | .stripTags(true) 177 | .map(truncate), 178 | nga.field('post_id', 'reference') 179 | .label('Post') 180 | .map(truncate) 181 | .targetEntity(post) 182 | .targetField(nga.field('title').map(truncate)) 183 | ]) 184 | .filters([ 185 | nga.field('q', 'string').label('').attributes({'placeholder': 'Global Search'}), 186 | nga.field('created_at', 'date') 187 | .label('Posted') 188 | .attributes({'placeholder': 'Filter by date'}), 189 | nga.field('today', 'boolean').map(function() { 190 | var now = new Date(), 191 | year = now.getFullYear(), 192 | month = now.getMonth() + 1, 193 | day = now.getDate(); 194 | month = month < 10 ? '0' + month : month; 195 | day = day < 10 ? '0' + day : day; 196 | return { 197 | created_at: [year, month, day].join('-') // ?created_at=... will be appended to the API call 198 | }; 199 | }), 200 | nga.field('post_id', 'reference') 201 | .label('Post') 202 | .targetEntity(post) 203 | .targetField(nga.field('title')) 204 | ]) 205 | .listActions(['edit', 'delete']); 206 | 207 | comment.creationView() 208 | .fields([ 209 | nga.field('created_at', 'date') 210 | .label('Posted') 211 | .defaultValue(new Date()), // preset fields in creation view with defaultValue 212 | nga.field('author'), 213 | nga.field('body', 'wysiwyg'), 214 | nga.field('post_id', 'reference') 215 | .label('Post') 216 | .map(truncate) 217 | .targetEntity(post) 218 | .targetField(nga.field('title')) 219 | .validation({ required: true }), 220 | ]); 221 | 222 | comment.editionView() 223 | .fields(comment.creationView().fields()) 224 | .fields([nga.field(null, 'template') 225 | .label('') 226 | .template('') // template() can take a function or a string 227 | ]); 228 | 229 | comment.deletionView() 230 | .title('Deletion confirmation'); // customize the deletion confirmation message 231 | 232 | tag.dashboardView() 233 | .title('Recent tags') 234 | .order(3) 235 | .perPage(10) 236 | .fields([ 237 | nga.field('id'), 238 | nga.field('name'), 239 | nga.field('published', 'boolean').label('Is published ?') 240 | ]); 241 | 242 | tag.listView() 243 | .infinitePagination(false) // by default, the list view uses infinite pagination. Set to false to use regulat pagination 244 | .fields([ 245 | nga.field('id').label('ID'), 246 | nga.field('name'), 247 | nga.field('published', 'boolean').cssClasses(function(entry) { // add custom CSS classes to inputs and columns 248 | if (entry.values.published) { 249 | return 'bg-success text-center'; 250 | } 251 | return 'bg-warning text-center'; 252 | }), 253 | nga.field('custom', 'template') 254 | .label('Upper name') 255 | .template('{{ entry.values.name.toUpperCase() }}') 256 | ]) 257 | .batchActions([]) // disable checkbox column and batch delete 258 | .listActions(['show']); 259 | 260 | tag.showView() 261 | .fields([ 262 | nga.field('name'), 263 | nga.field('published', 'boolean') 264 | ]); 265 | 266 | // customize header 267 | var customHeaderTemplate = 268 | '' + 271 | ''; 274 | admin.header(customHeaderTemplate); 275 | 276 | // customize menu 277 | admin.menu(nga.menu() 278 | .addChild(nga.menu(post).icon('')) // customize the entity menu icon 279 | .addChild(nga.menu(comment).icon('')) // you can even use utf-8 symbols! 280 | .addChild(nga.menu(tag).icon('')) 281 | .addChild(nga.menu().title('Other') 282 | .addChild(nga.menu().title('Stats').icon('').link('/stats')) 283 | ) 284 | ); 285 | 286 | nga.configure(admin); 287 | }]); 288 | 289 | app.directive('postLink', ['$location', function ($location) { 290 | return { 291 | restrict: 'E', 292 | scope: { entry: '&' }, 293 | template: '

View post

', 294 | link: function (scope) { 295 | scope.displayPost = function () { 296 | $location.path('/posts/show/' + scope.entry().values.post_id); 297 | }; 298 | } 299 | }; 300 | }]); 301 | 302 | app.directive('sendEmail', ['$location', function ($location) { 303 | return { 304 | restrict: 'E', 305 | scope: { post: '&' }, 306 | template: 'Send post by email', 307 | link: function (scope) { 308 | scope.send = function () { 309 | $location.path('/sendPost/' + scope.post().values.id); 310 | }; 311 | } 312 | }; 313 | }]); 314 | 315 | // custom 'send post by email' page 316 | 317 | function sendPostController($stateParams, notification) { 318 | this.postId = $stateParams.id; 319 | // notification is the service used to display notifications on the top of the screen 320 | this.notification = notification; 321 | } 322 | sendPostController.prototype.sendEmail = function() { 323 | if (this.email) { 324 | this.notification.log('Email successfully sent to ' + this.email, {addnCls: 'humane-flatty-success'}); 325 | } else { 326 | this.notification.log('Email is undefined', {addnCls: 'humane-flatty-error'}); 327 | } 328 | }; 329 | sendPostController.$inject = ['$stateParams', 'notification']; 330 | 331 | var sendPostControllerTemplate = 332 | '
' + 333 | '' + 334 | '' + 338 | '
' + 339 | '
' + 340 | '
' + 341 | '' + 342 | '
'; 343 | 344 | app.config(['$stateProvider', function ($stateProvider) { 345 | $stateProvider.state('send-post', { 346 | parent: 'main', 347 | url: '/sendPost/:id', 348 | params: { id: null }, 349 | controller: sendPostController, 350 | controllerAs: 'controller', 351 | template: sendPostControllerTemplate 352 | }); 353 | }]); 354 | 355 | // custom page with menu item 356 | var customPageTemplate = '
' + 357 | '' + 358 | '' + 362 | '
'; 363 | app.config(['$stateProvider', function ($stateProvider) { 364 | $stateProvider.state('stats', { 365 | parent: 'main', 366 | url: '/stats', 367 | template: customPageTemplate 368 | }); 369 | }]); 370 | 371 | }()); -------------------------------------------------------------------------------- /web/app/page.css: -------------------------------------------------------------------------------- 1 | .navbar-header, #header-nav{ 2 | background-color:#fff; 3 | } 4 | 5 | a.navbar-brand{ 6 | margin:0px; 7 | padding:0px; 8 | } 9 | 10 | .navbar-header img{ 11 | float:left; 12 | margin-top:2%; 13 | height:80%; 14 | } 15 | 16 | .experiment{ 17 | float:left; 18 | padding-top:15px; 19 | color:#999; 20 | } -------------------------------------------------------------------------------- /web/build/ng-admin-only.min.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Start Bootstrap - SB Admin 2 Bootstrap Admin Theme (http://startbootstrap.com) 3 | * Code licensed under the Apache License v2.0. 4 | * For details, see http://www.apache.org/licenses/LICENSE-2.0. 5 | */.grid .label-default,.grid td a.multiple:hover{text-decoration:none}body{background-color:#f8f8f8}#wrapper{width:100%}#page-wrapper{padding:0 15px;min-height:568px;background-color:#fff}@media (min-width:768px){#page-wrapper{position:inherit;margin:0 0 0 250px;padding:0 30px;border-left:1px solid #e7e7e7}}.navbar-top-links{margin-right:0}.navbar-top-links li{display:inline-block}.flot-chart,.navbar-top-links .dropdown-menu li{display:block}.navbar-top-links li:last-child{margin-right:15px}.navbar-top-links li a{padding:15px;min-height:50px}.navbar-top-links .dropdown-menu li:last-child{margin-right:0}.navbar-top-links .dropdown-menu li a{padding:3px 20px;min-height:0}.navbar-top-links .dropdown-menu li a div{white-space:normal}.navbar-top-links .dropdown-alerts,.navbar-top-links .dropdown-messages,.navbar-top-links .dropdown-tasks{width:310px;min-width:0}.navbar-top-links .dropdown-messages{margin-left:5px}.navbar-top-links .dropdown-tasks{margin-left:-59px}.navbar-top-links .dropdown-alerts{margin-left:-123px}.navbar-top-links .dropdown-user{right:0;left:auto}.sidebar .sidebar-nav.navbar-collapse{padding-right:0;padding-left:0}.sidebar .sidebar-search{padding:15px}.sidebar ul li{border-bottom:1px solid #e7e7e7}.sidebar ul li a.active{background-color:#eee}.sidebar .arrow{float:right}.sidebar .fa.arrow:before{content:"\f104"}.sidebar .active>a>.fa.arrow:before{content:"\f107"}.sidebar .nav-second-level li,.sidebar .nav-third-level li{border-bottom:0!important}.sidebar .nav-second-level li a{padding-left:37px}.sidebar .nav-third-level li a{padding-left:52px}@media (min-width:768px){.sidebar{z-index:1;position:absolute;width:250px;margin-top:51px}.navbar-top-links .dropdown-alerts,.navbar-top-links .dropdown-messages,.navbar-top-links .dropdown-tasks{margin-left:auto}}.btn-outline{color:inherit;background-color:transparent;transition:all .5s}.btn-primary.btn-outline{color:#428bca}.btn-success.btn-outline{color:#5cb85c}.btn-info.btn-outline{color:#5bc0de}.btn-warning.btn-outline{color:#f0ad4e}.btn-danger.btn-outline{color:#d9534f}.btn-danger.btn-outline:hover,.btn-info.btn-outline:hover,.btn-primary.btn-outline:hover,.btn-success.btn-outline:hover,.btn-warning.btn-outline:hover{color:#fff}.chat{margin:0;padding:0;list-style:none}.chat li{margin-bottom:10px;padding-bottom:5px;border-bottom:1px dotted #999}.chat li.left .chat-body{margin-left:60px}.chat li.right .chat-body{margin-right:60px}.chat li .chat-body p{margin:0}.chat .glyphicon,.panel .slidedown .glyphicon{margin-right:5px}.chat-panel .panel-body{height:350px;overflow-y:scroll}.login-panel{margin-top:25%}.flot-chart{height:400px}.flot-chart-content{width:100%;height:100%}table.dataTable thead .sorting,table.dataTable thead .sorting_asc,table.dataTable thead .sorting_asc_disabled,table.dataTable thead .sorting_desc,table.dataTable thead .sorting_desc_disabled{background:0 0}table.dataTable thead .sorting_asc:after{content:"\f0de";float:right;font-family:fontawesome}table.dataTable thead .sorting_desc:after{content:"\f0dd";float:right;font-family:fontawesome}table.dataTable thead .sorting:after{content:"\f0dc";float:right;font-family:fontawesome;color:rgba(50,50,50,.5)}.btn-circle{width:30px;height:30px;padding:6px 0;border-radius:15px;text-align:center;font-size:12px;line-height:1.428571429}.btn-circle.btn-lg{width:50px;height:50px;padding:10px 16px;border-radius:25px;font-size:18px;line-height:1.33}.btn-circle.btn-xl{width:70px;height:70px;padding:10px 16px;border-radius:35px;font-size:24px;line-height:1.33}.show-grid [class^=col-]{padding-top:10px;padding-bottom:10px;border:1px solid #ddd;background-color:#eee!important}.show-grid{margin:15px 0}#wrapper .navbar-static-top,.dashboard-content .panel-default table{margin-bottom:0}.huge{font-size:40px}.panel-green{border-color:#5cb85c}.panel-green .panel-heading{border-color:#5cb85c;color:#fff;background-color:#5cb85c}.panel-green a{color:#5cb85c}.panel-green a:hover{color:#3d8b3d}.panel-red{border-color:#d9534f}.panel-red .panel-heading{border-color:#d9534f;color:#fff;background-color:#d9534f}.panel-red a{color:#d9534f}.panel-red a:hover{color:#b52b27}.panel-yellow{border-color:#f0ad4e}.panel-yellow .panel-heading{border-color:#f0ad4e;color:#fff;background-color:#f0ad4e}.panel-yellow a{color:#f0ad4e}.panel-yellow a:hover{color:#df8a13}#nprogress div.spinner{width:18px;margin-left:-9px;left:50%}#wrapper .navbar-static-top .navbar-right{margin-right:15px}pre.ascii{border:none;background:0 0}menu{margin:0;padding:0}ul.collapsible.collapsed{max-height:0;transition:max-height .15s ease-out}ul.collapsible{max-height:500px;transition:max-height .25s ease-in;overflow-y:hidden}[data-ng-click],[ng-click],[x-ng-click]{cursor:pointer}.dashboard-content .panel-default .panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.dashboard-content .grid{border:none}.dashboard-invite{margin:3em;padding:1em;text-align:center;background-color:#DFF0D8}.dashboard-invite p{padding:.5em;margin:0}#page-wrapper .page-header{margin:10px 0 15px}#page-wrapper .page-header .lead{margin-bottom:0}ma-view-actions{margin:25px 0 15px;float:right}.list-header .filters .filter{margin-right:5px}.list-header .filters .filter .input-group-btn{width:auto}.list-header .filters .filter select{width:250px}.list-header .filters .datepicker .form-control{border-top-left-radius:0;border-bottom-left-radius:0}.list-header .filters .datepicker .btn-default{height:34px}.list-header .filters .form-control{width:auto}.grid{background-color:#fff}.grid .label-default{margin-right:5px;font-weight:400;font-size:12px;padding-top:4px}.grid thead tr .glyphicon{font-size:13px;color:#aaa}div.bottom-loader{margin-top:40px;position:inherit;width:auto;height:auto}div.bottom-loader:after{position:relative;display:inherit;margin:0 auto}div.bottom-loader:before{display:none}.pagination-bar{text-align:right;margin:20px 0}.pagination-bar .pagination{margin:0 0 0 20px}.pagination-bar .total{display:inline-block;padding:5px}.form-horizontal textarea{height:150px}.form-horizontal input[type=checkbox],.form-horizontal input[type=radio]{max-width:16px;box-shadow:none;cursor:pointer;margin:0}.form-horizontal .border-around{margin-top:2px;background-color:#FFF;background-image:none;border:1px solid #CCC;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;width:100%}.form-horizontal .ta-toolbar button{font-size:12px;padding:5px 8px}.form-horizontal .ta-toolbar button.active{z-index:1}.form-horizontal#show-view .label-default{margin-right:5px;font-weight:400;font-size:12px;padding-top:4px}.form-horizontal#show-view .control-label{padding-top:0}.form-horizontal#show-view .show-value .table{margin:0}.form-horizontal#show-view .show-value .table td{border-top:0}.form-horizontal .CodeMirror{border:1px solid #CCC;border-radius:4px} -------------------------------------------------------------------------------- /web/cluster.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Angular admin 6 | 7 | 8 | 9 | 10 | 11 |
12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /web/fixtures/configuration.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "deleted": false, 4 | "dataset_id": "5c0ee5ce-8b20-47b1-810c-2dbb0edee524", 5 | "maximum_size": 107374182400, 6 | "primary": "e7186cc3-b82d-447d-91c1-f453a94c48ca", 7 | "metadata": { 8 | "name": "apples" 9 | } 10 | }, 11 | { 12 | "deleted": false, 13 | "dataset_id": "1b2b662c-4185-4957-9317-fdd60dc52104", 14 | "maximum_size": 107374182400, 15 | "primary": "aba44c05-c0eb-4386-9c77-45657afbf0a4", 16 | "metadata": { 17 | "name": "oranges" 18 | } 19 | } 20 | ] -------------------------------------------------------------------------------- /web/fixtures/nodes.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "host": "10.231.209.44", 4 | "uuid": "e7186cc3-b82d-447d-91c1-f453a94c48ca" 5 | }, 6 | { 7 | "host": "10.230.11.106", 8 | "uuid": "aba44c05-c0eb-4386-9c77-45657afbf0a4" 9 | }, 10 | { 11 | "host": "10.93.140.171", 12 | "uuid": "f4e3eadc-443f-4949-8f70-c4aa4e5e2df0" 13 | } 14 | ] -------------------------------------------------------------------------------- /web/fixtures/state.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "primary": "e7186cc3-b82d-447d-91c1-f453a94c48ca", 4 | "path": "/flocker/5c0ee5ce-8b20-47b1-810c-2dbb0edee524", 5 | "dataset_id": "5c0ee5ce-8b20-47b1-810c-2dbb0edee524", 6 | "maximum_size": 107374182400 7 | }, 8 | { 9 | "primary": "aba44c05-c0eb-4386-9c77-45657afbf0a4", 10 | "path": "/flocker/1b2b662c-4185-4957-9317-fdd60dc52104", 11 | "dataset_id": "1b2b662c-4185-4957-9317-fdd60dc52104", 12 | "maximum_size": 107374182400 13 | } 14 | ] -------------------------------------------------------------------------------- /web/fixtures/volume.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "deleted": false, 4 | "short_dataset_id": "5c0ee5ce", 5 | "dataset_id": "5c0ee5ce-8b20-47b1-810c-2dbb0edee524", 6 | "maximum_size": 107374182400, 7 | "primary": "e7186cc3-b82d-447d-91c1-f453a94c48ca", 8 | "metadata": { 9 | "name": "apples" 10 | }, 11 | "meta":"name=apples", 12 | "size":"100Gb" 13 | }, 14 | { 15 | "deleted": false, 16 | "short_dataset_id": "1b2b662c", 17 | "dataset_id": "1b2b662c-4185-4957-9317-fdd60dc52104", 18 | "maximum_size": 107374182400, 19 | "primary": "aba44c05-c0eb-4386-9c77-45657afbf0a4", 20 | "metadata": { 21 | "name": "oranges" 22 | }, 23 | "meta":"name=oranges", 24 | "size":"100Gb" 25 | } 26 | ] -------------------------------------------------------------------------------- /web/flockerclient.py: -------------------------------------------------------------------------------- 1 | from twisted.python.filepath import FilePath 2 | from twisted.python import log 3 | from twisted.web import resource, server 4 | from twisted.web.static import File 5 | from twisted.internet import defer 6 | from txflocker.client import combined_state, parse_num, process_metadata 7 | from txflocker.client import get_client as txflocker_get_client 8 | import json 9 | import os 10 | import treq 11 | 12 | """ 13 | Supported verb/url tuples: 14 | 15 | GET /v1/nodes 16 | GET /v1/nodes/:uuid 17 | 18 | GET /v1/datasets 19 | GET /v1/datasets/:uuid 20 | 21 | POST /v1/datasets 22 | PUT /v1/datasets/:uuid (maps to POST) 23 | 24 | DELETE /v1/datasets/:uuid TODO 25 | 26 | For debugging: 27 | 28 | GET /v1/version 29 | GET /v1/configuration/datasets 30 | GET /v1/state/datasets 31 | """ 32 | 33 | class BaseResource(resource.Resource): 34 | def __init__(self, *args, **kw): 35 | self.base_url = get_base_url() 36 | self.client = get_client() 37 | return resource.Resource.__init__(self, *args, **kw) 38 | 39 | class ChildProxyResource(BaseResource): 40 | def __init__(self, child_id, proxy_path, *args, **kw): 41 | self.child_id = child_id 42 | self.proxy_path = proxy_path 43 | return BaseResource.__init__(self, *args, **kw) 44 | 45 | def render_GET(self, request): 46 | d = self.client.get(self.base_url + self.proxy_path) 47 | d.addCallback(treq.json_content) 48 | def got_result(results): 49 | request.setHeader("content-type", "application/json") 50 | request.setHeader("access-control-allow-origin", "*") 51 | for result in results: 52 | if result["uuid"] == self.child_id: 53 | request.write(json.dumps(result)) 54 | request.finish() 55 | return 56 | request.setResponseCode(400) 57 | request.write(json.dumps(dict(error="unable to find child %s" % 58 | (self.child_id,)))) 59 | d.addCallback(got_result) 60 | def handle_failure(failure): 61 | request.setResponseCode(400) 62 | request.setHeader("content-type", "application/json") 63 | request.setHeader("access-control-allow-origin", "*") 64 | request.write(json.dumps(dict(error=str(failure)))) 65 | request.finish() 66 | return failure 67 | d.addErrback(handle_failure) 68 | d.addErrback(log.err, "while trying to query backend" + self.base_url + 69 | self.proxy_path + "/" + self.child_id) 70 | return server.NOT_DONE_YET 71 | 72 | def simpleProxyFactory(proxy_path): 73 | """ 74 | GET and POST proxy factory (POST assumes it returns JSON too). 75 | """ 76 | class ProxyResource(BaseResource): 77 | def __init__(self, *args, **kw): 78 | self.proxy_path = proxy_path 79 | return BaseResource.__init__(self, *args, **kw) 80 | 81 | def getChild(self, path, request): 82 | fragments = request.uri.split("/") 83 | return ChildProxyResource(child_id=fragments.pop().encode("ascii"), 84 | proxy_path=self.proxy_path) 85 | 86 | def render_GET(self, request): 87 | d = self.client.get(self.base_url + self.proxy_path) 88 | d.addCallback(treq.json_content) 89 | def got_result(result): 90 | # proxy straight thru 91 | request.setHeader("content-type", "application/json") 92 | request.setHeader("access-control-allow-origin", "*") 93 | request.write(json.dumps(result)) 94 | request.finish() 95 | d.addCallback(got_result) 96 | def handle_failure(failure): 97 | request.setResponseCode(400) 98 | request.setHeader("content-type", "application/json") 99 | request.setHeader("access-control-allow-origin", "*") 100 | request.write(json.dumps(dict(error=str(failure)))) 101 | request.finish() 102 | return failure 103 | d.addErrback(handle_failure) 104 | d.addErrback(log.err, "while trying to query backend") 105 | return server.NOT_DONE_YET 106 | 107 | return ProxyResource() 108 | 109 | def get_root(): 110 | state = resource.Resource() 111 | state.putChild("datasets", simpleProxyFactory("/state/datasets")) 112 | 113 | configuration = resource.Resource() 114 | configuration.putChild("datasets", 115 | simpleProxyFactory("/configuration/datasets")) 116 | 117 | v1 = resource.Resource() 118 | # passthru endpoints: 119 | v1.putChild("configuration", configuration) 120 | v1.putChild("state", state) 121 | v1.putChild("version", simpleProxyFactory("/version")) 122 | 123 | # top level synthesized endpoints: 124 | v1.putChild("nodes", simpleProxyFactory("/state/nodes")) 125 | 126 | v1.putChild("datasets", CombinedDatasets()) 127 | 128 | root = resource.Resource() 129 | root.putChild("v1", v1) 130 | root.putChild("client", File(".")) 131 | return root 132 | 133 | def get_hostname(): 134 | return os.environ["CONTROL_SERVICE"] 135 | 136 | def get_user(): 137 | return os.environ.get("USERNAME", "user") 138 | 139 | def get_certificates_path(): 140 | return FilePath(os.environ.get("CERTS_PATH", "..")) 141 | 142 | def get_client(): 143 | certificates_path = get_certificates_path() 144 | user_certificate_filename = "%s.crt" % (get_user(),) 145 | user_key_filename = "%s.key" % (get_user(),) 146 | return txflocker_get_client( 147 | certificates_path=certificates_path, 148 | user_certificate_filename=user_certificate_filename, 149 | user_key_filename=user_key_filename, 150 | target_hostname=get_hostname(), 151 | ) 152 | 153 | def get_base_url(): 154 | return "https://%(hostname)s:4523/v1" % dict( 155 | hostname=get_hostname(),) 156 | 157 | class DatasetResource(resource.Resource): 158 | def __init__(self, dataset_id, *args, **kw): 159 | self.dataset_id = dataset_id 160 | return resource.Resource.__init__(self, *args, **kw) 161 | 162 | def render_GET(self, request): 163 | d = combined_state(get_client(), get_base_url(), deleted=False) 164 | def got_state(results): 165 | request.setHeader("content-type", "application/json") 166 | request.setHeader("access-control-allow-origin", "*") 167 | for result in results: 168 | if result["dataset_id"] == self.dataset_id: 169 | request.write(json.dumps(result)) 170 | request.finish() 171 | return 172 | request.setResponseCode(400) 173 | request.write(json.dumps(dict( 174 | error="unable to find %s" % (self.dataset_id,)))) 175 | request.finish() 176 | d.addCallback(got_state) 177 | d.addErrback(log.err, "while trying to GET child dataset") 178 | return server.NOT_DONE_YET 179 | 180 | def render_PUT(self, request): 181 | return self.render_POST(request) 182 | 183 | def render_POST(self, request): 184 | request_raw = json.loads(request.content.read()) 185 | client = get_client() 186 | if "primary" not in request_raw: 187 | d = defer.fail(Exception("must specify primary")) 188 | else: 189 | d = client.post(get_base_url() + "/configuration/datasets/%s" % 190 | (self.dataset_id,), json.dumps({"primary": request_raw["primary"]}), 191 | headers={"content-type": "application/json"}) 192 | d.addCallback(treq.json_content) 193 | def got_result(result): 194 | request.setHeader("content-type", "application/json") 195 | request.setHeader("access-control-allow-origin", "*") 196 | request.write(json.dumps(dict(result="success"))) 197 | request.finish() 198 | d.addCallback(got_result) 199 | def handle_failure(failure): 200 | request.setResponseCode(400) 201 | request.setHeader("content-type", "application/json") 202 | request.setHeader("access-control-allow-origin", "*") 203 | request.write(json.dumps(dict(error=str(failure)))) 204 | request.finish() 205 | return failure 206 | d.addErrback(handle_failure) 207 | d.addErrback(log.err, "while trying to POST combined state") 208 | return server.NOT_DONE_YET 209 | 210 | class CombinedDatasets(resource.Resource): 211 | def getChild(self, path, request): 212 | fragments = request.uri.split("/") 213 | return DatasetResource(dataset_id=fragments.pop().encode("ascii")) 214 | def render_GET(self, request): 215 | d = combined_state(get_client(), get_base_url(), deleted=False) 216 | def got_state(result): 217 | request.setHeader("content-type", "application/json") 218 | request.setHeader("access-control-allow-origin", "*") 219 | request.write(json.dumps(result)) 220 | request.finish() 221 | d.addCallback(got_state) 222 | d.addErrback(log.err, "while trying to GET combined state") 223 | return server.NOT_DONE_YET 224 | def render_POST(self, request): 225 | request_raw = json.loads(request.content.read()) 226 | try: 227 | if "meta" in request_raw: 228 | request_raw["metadata"] = process_metadata(request_raw.pop("meta")) 229 | if "size" in request_raw: 230 | request_raw["maximum_size"] = parse_num(request_raw.pop("size")) 231 | except Exception, e: 232 | request.setHeader("content-type", "application/json") 233 | request.setHeader("access-control-allow-origin", "*") 234 | request.setResponseCode(400) 235 | return json.dumps(dict(error=str(e))) 236 | client = get_client() 237 | d = client.post(get_base_url() + "/configuration/datasets", 238 | json.dumps(request_raw), headers={ 239 | "content-type": "application/json"}) 240 | d.addCallback(treq.json_content) 241 | def got_result(result): 242 | request.setHeader("content-type", "application/json") 243 | request.setHeader("access-control-allow-origin", "*") 244 | request.write(json.dumps(result)) 245 | request.finish() 246 | d.addCallback(got_result) 247 | def handle_failure(failure): 248 | request.setResponseCode(400) 249 | request.setHeader("content-type", "application/json") 250 | request.setHeader("access-control-allow-origin", "*") 251 | request.write(json.dumps(dict(error=str(failure)))) 252 | request.finish() 253 | return failure 254 | d.addErrback(handle_failure) 255 | d.addErrback(log.err, "while trying to POST combined state") 256 | return server.NOT_DONE_YET 257 | -------------------------------------------------------------------------------- /web/images/clusterhq.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterHQ/unofficial-flocker-tools/4d649b0fca3226b73920fb9e7b23ca15b18f521e/web/images/clusterhq.png -------------------------------------------------------------------------------- /web/images/clusterhq@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterHQ/unofficial-flocker-tools/4d649b0fca3226b73920fb9e7b23ca15b18f521e/web/images/clusterhq@2x.png -------------------------------------------------------------------------------- /web/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterHQ/unofficial-flocker-tools/4d649b0fca3226b73920fb9e7b23ca15b18f521e/web/images/logo.png -------------------------------------------------------------------------------- /web/images/logo@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterHQ/unofficial-flocker-tools/4d649b0fca3226b73920fb9e7b23ca15b18f521e/web/images/logo@2x.png -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Angular admin 6 | 7 | 8 | 9 | 10 | 11 |
12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /web/index.js: -------------------------------------------------------------------------------- 1 | var http = require('http'); 2 | var concat = require('concat-stream') 3 | var ecstatic = require('ecstatic') 4 | var Router = require('routes-router') 5 | 6 | var router = Router() 7 | 8 | var server = http.createServer(router) 9 | 10 | var nodes = require('./fixtures/nodes.json') 11 | var state = require('./fixtures/state.json') 12 | var configuration = require('./fixtures/configuration.json') 13 | var volume = require('./fixtures/volume.json') 14 | 15 | var fileServer = ecstatic({ root: __dirname }) 16 | 17 | function crud(route, idfield, data){ 18 | 19 | router.addRoute("/v1/" + route, { 20 | GET: function (req, res) { 21 | res.end(JSON.stringify(data)) 22 | } 23 | }) 24 | 25 | router.addRoute("/v1/" + route + "/:id", { 26 | GET: function (req, res, opts) { 27 | var results = data.filter(function(entry){ 28 | return entry[idfield].indexOf(opts.params.id)==0 29 | }) 30 | res.end(JSON.stringify(results[0])) 31 | } 32 | }) 33 | } 34 | 35 | crud('nodes', 'uuid', nodes) 36 | crud('configuration/datasets', 'dataset_id', configuration) 37 | crud('state/datasets', 'dataset_id', state) 38 | crud('datasets', 'dataset_id', volume) 39 | 40 | router.addRoute("/*", fileServer) 41 | 42 | server.listen(8081, function(){ 43 | console.log('server listening on port 8081') 44 | }) -------------------------------------------------------------------------------- /web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web", 3 | "version": "1.0.0", 4 | "description": "Prerequisites:", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "concat-stream": "^1.4.10", 13 | "ecstatic": "^0.8.0", 14 | "routes-router": "^4.1.2" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /web/server.tac: -------------------------------------------------------------------------------- 1 | # Copyright ClusterHQ Inc. See LICENSE file for details. 2 | 3 | import flockerclient 4 | 5 | from twisted.application import service, internet 6 | from twisted.web import server 7 | 8 | def getAdapter(): 9 | root = flockerclient.get_root() 10 | site = server.Site(root) 11 | return site 12 | 13 | application = service.Application("Insecure Flocker Client REST API") 14 | adapterServer = internet.TCPServer(80, getAdapter()) 15 | adapterServer.setServiceParent(application) 16 | -------------------------------------------------------------------------------- /web/setup.py: -------------------------------------------------------------------------------- 1 | # Copyright ClusterHQ Limited. See LICENSE file for details. 2 | 3 | """ 4 | Setup Flocker Experimental GUI. 5 | """ 6 | from setuptools import setup 7 | 8 | description = "Experimental Flocker GUI" 9 | 10 | setup( 11 | # This is the human-targetted name of the software being packaged. 12 | name="Flocker Experimental GUI", 13 | # This is a string giving the version of the software being packaged. For 14 | # simplicity it should be something boring like X.Y.Z. 15 | version="0.1", 16 | # This identifies the creators of this software. This is left symbolic for 17 | # ease of maintenance. 18 | author="ClusterHQ Labs", 19 | # This is contact information for the authors. 20 | author_email="labs@clusterhq.com", 21 | # Here is a website where more information about the software is available. 22 | url="https://clusterhq.com/", 23 | 24 | # A short identifier for the license under which the project is released. 25 | license="Apache License, Version 2.0", 26 | 27 | # Some details about what Flocker is. Synchronized with the README.rst to 28 | # keep it up to date more easily. 29 | long_description=description, 30 | 31 | install_requires=[ 32 | "Twisted == 14.0.0", 33 | "PyYAML == 3.10", 34 | "treq == 0.2.1", 35 | "service_identity", 36 | "pycrypto", 37 | ], 38 | # Some "trove classifiers" which are relevant. 39 | classifiers=[ 40 | "License :: OSI Approved :: Apache Software License", 41 | ], 42 | ) 43 | -------------------------------------------------------------------------------- /web/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Set USERNAME and CONTROL_SERVICE env vars 3 | PYTHONPATH=..:$PYTHONPATH twistd -noy server.tac 4 | -------------------------------------------------------------------------------- /web/test.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /web/txflocker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterHQ/unofficial-flocker-tools/4d649b0fca3226b73920fb9e7b23ca15b18f521e/web/txflocker/__init__.py -------------------------------------------------------------------------------- /web/txflocker/client.py: -------------------------------------------------------------------------------- 1 | """ 2 | A collection of utilities for using the flocker REST API. 3 | """ 4 | 5 | from treq.client import HTTPClient 6 | 7 | from twisted.internet import reactor, ssl, defer 8 | from twisted.python.usage import UsageError 9 | from twisted.python.filepath import FilePath 10 | from twisted.web.client import Agent 11 | 12 | import yaml 13 | import treq 14 | import copy 15 | 16 | def process_metadata(metadata_str): 17 | if not metadata_str: 18 | return {} 19 | metadata = {} 20 | try: 21 | for pair in metadata_str.split(","): 22 | k, v = pair.split("=") 23 | metadata[k] = v 24 | except: 25 | raise UsageError("malformed metadata specification " 26 | "'%s', please use format 'a=b,c=d'" % 27 | (metadata_str,)) 28 | return metadata 29 | 30 | def parse_num(expression): 31 | if not expression: 32 | return None 33 | expression = expression.encode("ascii") 34 | unit = expression.translate(None, "1234567890.") 35 | num = expression.replace(unit, "") 36 | unit = unit.lower() 37 | if unit == 'tb' or unit == 't' or unit =='tib': 38 | return int(float(num)*1024*1024*1024*1024) 39 | elif unit == 'gb' or unit == 'g' or unit =='gib': 40 | return int(float(num)*1024*1024*1024) 41 | elif unit == 'mb' or unit == 'm' or unit =='mib': 42 | return int(float(num)*1024*1024) 43 | elif unit == 'kb' or unit == 'k' or unit =='kib': 44 | return int(float(num)*1024) 45 | else: 46 | return int(float(num)) 47 | 48 | def combined_state(client, base_url, deleted): 49 | d1 = client.get(base_url + "/configuration/datasets") 50 | d1.addCallback(treq.json_content) 51 | 52 | d2 = client.get(base_url + "/state/datasets") 53 | d2.addCallback(treq.json_content) 54 | 55 | d3 = client.get(base_url + "/state/nodes") 56 | d3.addCallback(treq.json_content) 57 | 58 | ds = [d1, d2, d3] 59 | 60 | d = defer.gatherResults(ds) 61 | def got_results(results): 62 | configuration_datasets, state_datasets, state_nodes = results 63 | 64 | # build up a table, based on which datasets are in the 65 | # configuration, adding data from the state as necessary 66 | configuration_map = dict((d["dataset_id"], d) for d in 67 | configuration_datasets) 68 | state_map = dict((d["dataset_id"], d) for d in state_datasets) 69 | nodes_map = dict((n["uuid"], n) for n in state_nodes) 70 | 71 | #print "got state:" 72 | #pprint.pprint(state_datasets) 73 | #print 74 | 75 | objects = [] 76 | 77 | for (key, dataset) in configuration_map.iteritems(): 78 | dataset = copy.copy(dataset) 79 | if dataset["deleted"]: 80 | # the user has asked to see deleted datasets 81 | if deleted: 82 | if key in state_map: 83 | status = "deleting" 84 | else: 85 | status = "deleted" 86 | # we are hiding deleted datasets 87 | else: 88 | continue 89 | else: 90 | if key in state_map: 91 | if ("primary" in state_map[key] and 92 | state_map[key]["primary"] in nodes_map): 93 | status = u"attached \u2705" 94 | else: 95 | status = u"detached" 96 | else: 97 | # not deleted, not in state, probably waiting for it to 98 | # show up. 99 | status = u"pending \u231b" 100 | 101 | dataset["status"] = status 102 | 103 | meta = [] 104 | if dataset["metadata"]: 105 | for k, v in dataset["metadata"].iteritems(): 106 | meta.append("%s=%s" % (k, v)) 107 | 108 | dataset["meta"] = ",".join(meta) 109 | 110 | if dataset["primary"] in nodes_map: 111 | primary = nodes_map[dataset["primary"]] 112 | node = dict(uuid=primary["uuid"], host=primary["host"]) 113 | else: 114 | node = None 115 | 116 | dataset["node"] = node 117 | 118 | dataset["short_dataset_id"] = dataset["dataset_id"][:8] 119 | 120 | if dataset.get("maximum_size"): 121 | size = "%.2fG" % (dataset["maximum_size"] 122 | / (1024 * 1024 * 1024.),) 123 | else: 124 | # must be a backend with quotas instead of sizes 125 | size = "" 126 | 127 | dataset["size"] = size 128 | objects.append(dataset) 129 | return objects 130 | d.addCallback(got_results) 131 | return d 132 | 133 | def get_client(reactor=reactor, certificates_path=FilePath("/etc/flocker"), 134 | user_certificate_filename="node.crt", user_key_filename="node.key", 135 | cluster_certificate_filename="cluster.crt", target_hostname=None): 136 | """ 137 | Create a ``treq``-API object that implements the REST API TLS 138 | authentication. 139 | 140 | That is, validating the control service as well as presenting a 141 | certificate to the control service for authentication. 142 | 143 | :return: ``treq`` compatible object. 144 | """ 145 | if target_hostname is None: 146 | config = certificates_path.child("agent.yml") 147 | if config.exists(): 148 | agent_config = yaml.load(config.open()) 149 | target_hostname = agent_config["control-service"]["hostname"] 150 | 151 | user_crt = certificates_path.child(user_certificate_filename) 152 | user_key = certificates_path.child(user_key_filename) 153 | cluster_crt = certificates_path.child(cluster_certificate_filename) 154 | 155 | if (user_crt.exists() and user_key.exists() and cluster_crt.exists() 156 | and target_hostname is not None): 157 | # we are installed on a flocker node with a certificate, try to reuse 158 | # it for auth against the control service 159 | cert_data = cluster_crt.getContent() 160 | auth_data = user_crt.getContent() + user_key.getContent() 161 | 162 | authority = ssl.Certificate.loadPEM(cert_data) 163 | client_certificate = ssl.PrivateCertificate.loadPEM(auth_data) 164 | 165 | class ContextFactory(object): 166 | def getContext(self, hostname, port): 167 | context = client_certificate.options(authority).getContext() 168 | return context 169 | 170 | return HTTPClient(Agent(reactor, contextFactory=ContextFactory())) 171 | else: 172 | raise Exception("Not enough information to construct TLS context: " 173 | "user_crt: %s, cluster_crt: %s, user_key: %s, target_hostname: %s" % ( 174 | user_crt, cluster_crt, user_key, target_hostname)) 175 | --------------------------------------------------------------------------------