├── docs ├── monitoring.md ├── dns.md ├── logging-monitoring.md ├── networks.md ├── replicators.md ├── volumes.md ├── clustering.md └── pods.md ├── examples ├── cassandra │ ├── cassandra-app-cleanup.sh │ └── cassandra-app-create.sh ├── zookeeper │ ├── zk-app-cleanup.sh │ └── zk-app-create.sh └── wordpress-mysql.sh ├── docker ├── zookeeper │ ├── Dockerfile │ └── start.sh └── cassandra │ ├── Dockerfile │ └── docker-entrypoint.sh ├── vagrant └── Vagrantfile ├── README.md └── LICENSE /docs/monitoring.md: -------------------------------------------------------------------------------- 1 | # Statistics & Monitoring 2 | 3 | Statistics & monitoring is enabled by default in Project6. Container metrics 4 | are collected periodically in a centralized time series database. The metrics 5 | can be viewed in grafana dashboard at the virtual-ip url: http://<virtual-ip>:8000 6 | 7 | This is a work in progress. Expect more improvements to the grafana dashboard 8 | soon. 9 | 10 | -------------------------------------------------------------------------------- /examples/cassandra/cassandra-app-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DWCTL=/home/dws/bin/dwctl 4 | 5 | # Cassandra prefix 6 | NAME_PREFIX=${1:-cassandra} 7 | 8 | # Number of instances to configure 9 | NUM_INSTANCES=${2:-3} 10 | 11 | # Delete volumes 12 | for INSTANCE in `seq 1 ${NUM_INSTANCES}` 13 | do 14 | ${DWCTL} volume delete ${NAME_PREFIX}-vg${INSTANCE} 15 | ${DWCTL} pod delete ${NAME_PREFIX}-${INSTANCE} 16 | done 17 | -------------------------------------------------------------------------------- /docker/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER Chakravarthy Nelluri 3 | 4 | RUN yum -y install java wget tar && yum clean all 5 | 6 | RUN wget -q -O - http://mirror.cc.columbia.edu/pub/software/apache/zookeeper/stable/zookeeper-3.4.6.tar.gz | tar -zx -C /opt 7 | 8 | RUN ln -s /opt/zookeeper-3.4.6 /opt/zookeeper 9 | 10 | EXPOSE 2181 2888 3888 11 | 12 | WORKDIR /opt/zookeeper 13 | 14 | ADD start.sh /start.sh 15 | RUN chmod +x /start.sh 16 | 17 | CMD ["/start.sh"] 18 | -------------------------------------------------------------------------------- /examples/zookeeper/zk-app-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Name prefix 4 | NAME_PREFIX="zk" 5 | 6 | # Number of instances to configure 7 | NUM_INSTANCES=3 8 | 9 | # dwctl binary 10 | DWCTL=/home/dws/bin/dwctl 11 | 12 | # Create volumes 13 | for INSTANCE in `seq 1 ${NUM_INSTANCES}` 14 | do 15 | ${DWCTL} volume delete ${NAME_PREFIX}-vg${INSTANCE} 16 | done 17 | 18 | # Create pods 19 | for INSTANCE in `seq 1 ${NUM_INSTANCES}` 20 | do 21 | ${DWCTL} pod delete ${NAME_PREFIX}-${INSTANCE} 22 | done 23 | -------------------------------------------------------------------------------- /examples/wordpress-mysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | dwctl volume create wp-vol -v vol1,1G,ext4 3 | dwctl pod create mysql -i docker.io/mysql:5.5 -v wp-vol/vol1:/var/lib/mysql,rw -n blue -c 1250 -m 750M -l mysql -e MYSQL_ROOT_PASSWORD=root,MYSQL_DATABASE=blog,MYSQL_USER=mysql,MYSQL_PASSWORD=mysql 4 | dwctl pod create wordpress -i docker.io/wordpress:4.3.1 -v /var/www/html,rw,100M,ext4 -n blue -c 1250 -m 100M -l wordpress -e WORDPRESS_DB_HOST=mysql.mycluster.datawise.io,WORDPRESS_DB_USER=mysql,WORDPRESS_DB_PASSWORD=mysql,WORDPRESS_DB_NAME=blog 5 | 6 | -------------------------------------------------------------------------------- /docs/dns.md: -------------------------------------------------------------------------------- 1 | # DNS 2 | 3 | In Project6, cluster wide DNS can be configured during cluster create. It 4 | allows applications & services running on containers to discover each other 5 | with out the need of external DNS. DNS entries are automatically updated when a 6 | new container is created or deleted. 7 | 8 | ## Enabling DNS 9 | DNS is enabled by specifying --poddns option during cluster create. 10 | Example: 11 | ``` 12 | $ dwctl cluster create mycluster 192.168.30.11,192.168.30.12,192.168.30.13 \ 13 | --vip 192.168.30.10 --poddns mycluster.datawise.io 14 | ``` 15 | 16 | DNS service is highly available and serves requests on cluster virtual 17 | IP. 18 | -------------------------------------------------------------------------------- /docker/cassandra/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:jessie 2 | 3 | RUN apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys 514A2AD631A57A16DD0047EC749D6EEC0353B12C 4 | 5 | RUN echo 'deb http://www.apache.org/dist/cassandra/debian 21x main' >> /etc/apt/sources.list.d/cassandra.list 6 | 7 | RUN apt-get update \ 8 | && apt-get install -y cassandra \ 9 | && rm -rf /var/lib/apt/lists/* 10 | 11 | ENV CASSANDRA_CONFIG /etc/cassandra 12 | 13 | # listen to all rpc 14 | RUN sed -ri ' \ 15 | s/^(rpc_address:).*/\1 0.0.0.0/; \ 16 | ' "$CASSANDRA_CONFIG/cassandra.yaml" 17 | 18 | COPY docker-entrypoint.sh /docker-entrypoint.sh 19 | ENTRYPOINT ["/docker-entrypoint.sh"] 20 | 21 | CMD ["cassandra", "-f"] 22 | -------------------------------------------------------------------------------- /docker/cassandra/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # first arg is `-f` or `--some-option` 5 | if [ "${1:0:1}" = '-' ]; then 6 | set -- cassandra -f "$@" 7 | fi 8 | 9 | if [ "$1" = 'cassandra' ]; then 10 | CASSANDRA_LISTEN_ADDRESS=`/sbin/ip -o -f inet addr show $CASSANDRA_LISTEN_INTERFACE | cut -d" " -f7 | cut -d"/" -f 1` 11 | 12 | if [ "$CASSANDRA_SEEDS" = "" ]; then 13 | CASSANDRA_SEEDS="$CASSANDRA_LISTEN_ADDRESS" 14 | fi 15 | 16 | sed -ri 's/(- seeds:) "127.0.0.1"/\1 '$CASSANDRA_SEEDS'/' "$CASSANDRA_CONFIG/cassandra.yaml" 17 | 18 | CASSANDRA_RPC_ADDRESS="$CASSANDRA_LISTEN_ADDRESS" 19 | 20 | for yaml in \ 21 | listen_address \ 22 | rpc_address \ 23 | ; do 24 | var="CASSANDRA_${yaml^^}" 25 | val="${!var}" 26 | if [ "$val" ]; then 27 | sed -ri 's/^(# )?('"$yaml"':).*/\2 '"$val"'/' "$CASSANDRA_CONFIG/cassandra.yaml" 28 | fi 29 | done 30 | fi 31 | 32 | exec "$@" 33 | -------------------------------------------------------------------------------- /docker/zookeeper/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PREFIX=${PREFIX:-zookeeper} 4 | TICK_TIME=${TICK_TIME:-2000} 5 | DATA_DIR=${DATA_DIR:-/data} 6 | CLIENT_PORT=${CLIENT_PORT:-2181} 7 | INIT_LIMIT=${INIT_LIMIT:-5} 8 | SYNC_LIMIT=${SYNC_LIMIT:-2} 9 | NUM_INSTANCES=${NUM_INSTANCES:-1} 10 | MYID=${MYID:-1} 11 | 12 | ZOOKEEPER_CONF_FILE=${ZOOKEEPER_CONF_FILE:-/opt/zookeeper/conf/zoo.cfg} 13 | 14 | echo "tickTime=${TICK_TIME}" >> ${ZOOKEEPER_CONF_FILE} 15 | echo "dataDir=${DATA_DIR}" >> ${ZOOKEEPER_CONF_FILE} 16 | echo "clientPort=${CLIENT_PORT}" >> ${ZOOKEEPER_CONF_FILE} 17 | echo "initLimit=${INIT_LIMIT}" >> ${ZOOKEEPER_CONF_FILE} 18 | echo "syncLimit=${SYNC_LIMIT}" >> ${ZOOKEEPER_CONF_FILE} 19 | 20 | for IDX in `seq 1 ${NUM_INSTANCES}` 21 | do 22 | if [ "${IDX}" = "${MYID}" ]; then 23 | echo "server.${IDX}=0.0.0.0:2888:3888" >> ${ZOOKEEPER_CONF_FILE} 24 | else 25 | echo "server.${IDX}=${PREFIX}-${IDX}:2888:3888" >> ${ZOOKEEPER_CONF_FILE} 26 | fi 27 | done 28 | 29 | if [ ! -d ${DATA_DIR} ]; then 30 | mkdir ${DATA_DIR} 31 | fi 32 | 33 | echo ${MYID} > ${DATA_DIR}/myid 34 | 35 | /opt/zookeeper/bin/zkServer.sh start-foreground 36 | -------------------------------------------------------------------------------- /docs/logging-monitoring.md: -------------------------------------------------------------------------------- 1 | # Logging 2 | 3 | In Project6, centralized log management for pods is enabled by default. 4 | Logs from all the pods are collected into elastic search backend and 5 | are readily available for monitoring at the url: http://kibana-logging-0001. 6 | The services to collect the logs from the pods are themselves configured to run 7 | as default cluster-service pods. These pods are also listed in ``dwctl 8 | pod list`` 9 | 10 | To resolve the hostnames, configure your client to point to Skydns running on 11 | cluster virtual ip. 12 | 13 | Note: Logging service requires cluster DNS service to be up and running. 14 | 15 | ##Disable Logging: 16 | Logging can be disabled at run time using: 17 | ``` 18 | $ dwctl cluster configure -l disable 19 | ``` 20 | 21 | ##Enable logging 22 | Logging can be enabled at run time using: 23 | ``` 24 | $ dwctl cluster configure -l enable 25 | ``` 26 | 27 | # Statistics & Monitoring 28 | 29 | Statistics & monitoring is enabled by default in Project6. Container metrics 30 | are collected periodically in a centralized time series database. The metrics 31 | can be viewed in grafana dashboard at the virtual-ip url: http://<virtual-ip>:8000 32 | 33 | This is a work in progress. Expect more improvements to the grafana dashboard 34 | soon. 35 | 36 | -------------------------------------------------------------------------------- /examples/zookeeper/zk-app-create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Default 256MB memory 4 | MEM=256M 5 | 6 | # 1 CPU Core/1000 CPU milli cores 7 | CPU=1000 8 | 9 | # Volume size 10 | VOLSIZE=1G 11 | 12 | # Zookeeper prefix 13 | NAME_PREFIX=${1:-zk} 14 | 15 | # Number of instances to configure 16 | NUM_INSTANCES=${2:-3} 17 | 18 | # dwctl binary 19 | DWCTL=/home/dws/bin/dwctl 20 | 21 | # Create volumes 22 | for INSTANCE in `seq 1 ${NUM_INSTANCES}` 23 | do 24 | ${DWCTL} volume create ${NAME_PREFIX}-vg${INSTANCE} -v vol,${VOLSIZE},ext4 -l ${NAME_PREFIX} -x ${NAME_PREFIX} 25 | if [ $? -ne 0 ]; then 26 | echo "Failed to create volume: ${NAME_PREFIX}-vg${INSTANCE}" 27 | exit 1 28 | else 29 | echo "Successfully created volume: ${NAME_PREFIX}-vg${INSTANCE}" 30 | fi 31 | done 32 | 33 | # Create pods 34 | for INSTANCE in `seq 1 ${NUM_INSTANCES}` 35 | do 36 | # Env vars 37 | ENV_ARGS="-e MYID=${INSTANCE},NUM_INSTANCES=${NUM_INSTANCES},PREFIX=${NAME_PREFIX}" 38 | # Other args 39 | ARGS="-v ${NAME_PREFIX}-vg${INSTANCE}/vol:/data,rw -c ${CPU} -m ${MEM}" 40 | 41 | # Create Pod 42 | ${DWCTL} pod create ${NAME_PREFIX}-${INSTANCE} ${ENV_ARGS} ${ARGS} -i datawiseio/zookeeper 43 | if [ $? -ne 0 ]; then 44 | echo "Failed to create pod: ${NAME_PREFIX}-${INSTANCE}" 45 | exit 1 46 | else 47 | echo "Successfully created pod: ${NAME_PREFIX}-${INSTANCE}" 48 | fi 49 | done 50 | -------------------------------------------------------------------------------- /examples/cassandra/cassandra-app-create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Default 512MB memory 4 | MEM=512M 5 | 6 | # 1 CPU Core/1000 CPU milli cores 7 | CPU=1000 8 | 9 | # Volume size 10 | VOLSIZE=1G 11 | 12 | # Cassandra prefix 13 | NAME_PREFIX=${1:-cassandra} 14 | 15 | # Number of instances to configure 16 | NUM_INSTANCES=${2:-3} 17 | 18 | # dwctl binary 19 | DWCTL=/home/dws/bin/dwctl 20 | 21 | # Create volumes 22 | for INSTANCE in `seq 1 ${NUM_INSTANCES}` 23 | do 24 | ${DWCTL} volume create ${NAME_PREFIX}-vg${INSTANCE} -v vol,${VOLSIZE},ext4 -l ${NAME_PREFIX} -x ${NAME_PREFIX} 25 | if [ $? -ne 0 ]; then 26 | echo "Failed to create volume: ${NAME_PREFIX}-vg${INSTANCE}" 27 | exit 1 28 | else 29 | echo "Successfully created volume: ${NAME_PREFIX}-vg${INSTANCE}" 30 | fi 31 | done 32 | 33 | # Create pods 34 | for INSTANCE in `seq 1 ${NUM_INSTANCES}` 35 | do 36 | # Env vars 37 | ENV_ARGS="-e CASSANDRA_LISTEN_INTERFACE=macvlan0" 38 | if [ $INSTANCE -ne 1 ]; then 39 | # Configure first instance as seed 40 | ENV_ARGS="${ENV_ARGS},CASSANDRA_SEEDS=${NAME_PREFIX}-1" 41 | fi 42 | # Other args 43 | ARGS="-v ${NAME_PREFIX}-vg${INSTANCE}/vol:/data,rw -c ${CPU} -m ${MEM}" 44 | 45 | # Create Pod 46 | ${DWCTL} pod create ${NAME_PREFIX}-${INSTANCE} ${ENV_ARGS} ${ARGS} -i datawiseio/cassandra 47 | if [ $? -ne 0 ]; then 48 | echo "Failed to create pod: ${NAME_PREFIX}-${INSTANCE}" 49 | exit 1 50 | else 51 | echo "Successfully created pod: ${NAME_PREFIX}-${INSTANCE}" 52 | fi 53 | done 54 | -------------------------------------------------------------------------------- /docs/networks.md: -------------------------------------------------------------------------------- 1 | #Networking 2 | 3 | Containers are first class network endpoints with Project 6 and get an IP address 4 | allocated to them. There is no overhead of configuring and managing port mappings. 5 | 6 | Creation of a network is needed for selecting IP addresses for each of the Pods. 7 | The IP address assignment is stable (not freed until the pod is deleted) and is 8 | also backed by a builtin cluster DNS. At the time of creating a cluster, a pod DNS 9 | domain can be specified. Cluster DNS has an entry for each pod in the format 10 | "pod-name.pod-DNS-domain". 11 | 12 | Pods can connect to each other using pod DNS names. To connect external applications 13 | to pods by DNS names, point your external DNS server to the Virtual IP of the 14 | cluster for the pod DNS domain. 15 | 16 | Current implementation makes the following assumptions and is targeted at small, 17 | trial deployments. 18 | - Layer 2 Top of Rack switch 19 | - vlan spans all nodes 20 | 21 | There are many planned enhancements that will be announced in the near future 22 | which will overcome these limitations. 23 | 24 | ##Networks 25 | 26 | The below command creates a network called "blue". 27 | 28 | ``` 29 | $ dwctl network create blue -s 192.168.30.0/24 --start 192.168.30.101 \ 30 | --end 192.168.30.200 -g 192.168.30.1 -v 10 31 | NAME START ADDRESS TOTAL USED GATEWAY V[X]LAN 32 | blue 192.168.30.101 100 0 192.168.30.1 10 33 | ``` 34 | 35 | ##MAC pools 36 | There is a default MAC pool already created that is used to assign MAC addresses 37 | to each container. A new MAC pool can be creating using the command below. 38 | 39 | ``` 40 | dwctl macpool create pool0 E8:66:C6:00:00:00 1000 41 | NAME START ADDRESS TOTAL USED 42 | ---- ------------- ----- ---- 43 | pool0 E8:66:C6:00:00:00 1000 0 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/replicators.md: -------------------------------------------------------------------------------- 1 | #Replicator 2 | 3 | Replicator is primarily used to run multiple instances of stateless applications. 4 | It follows the pod create semantics with a couple of additional options. 5 | - Replicas (how many copies to run) 6 | - Selector (label used to identify pods controlled by this replicator) 7 | 8 | The replicator numbering is different from the default Kubernetes implementation. 9 | This is a nominal replication controller that will number pods from 0001 to X, X 10 | being the number of replicas. 11 | 12 | ##Replicator create 13 | ``` 14 | $ dwctl replicator create nginx -i nginx:latest -r 3 -s app=nginx 15 | CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS 16 | nginx nginx nginx:latest app=nginx 3 17 | 18 | $ dwctl pod list -l app=nginx 19 | POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED 20 | nginx-0001 192.168.30.201 nginx nginx:latest 192.168.30.11 app=nginx Running 15 seconds 21 | nginx-0002 192.168.30.203 nginx nginx:latest 192.168.30.13 app=nginx Running 15 seconds 22 | nginx-0003 192.168.30.202 nginx nginx:latest 192.168.30.12 app=nginx Running 15 seconds 23 | ``` 24 | 25 | By default, the scheduler attempts to spread the pods belonging to a replicator. 26 | If strict spreading is desired, use the -x option to specify conflict. 27 | 28 | ##Replicator update 29 | 30 | To increase/decrease the number of instances of the application, use the update 31 | command. 32 | 33 | ``` 34 | $ dwctl replicator update nginx -r 4 35 | CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS 36 | nginx nginx nginx:latest app=nginx 4 37 | 38 | $ dwctl pod list -l app=nginx 39 | POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED 40 | nginx-0001 192.168.30.201 nginx nginx:latest 192.168.30.11 app=nginx Running 4 minutes 41 | nginx-0002 192.168.30.203 nginx nginx:latest 192.168.30.13 app=nginx Running 4 minutes 42 | nginx-0003 192.168.30.202 nginx nginx:latest 192.168.30.12 app=nginx Running 4 minutes 43 | nginx-0004 192.168.30.106 nginx nginx:latest 192.168.30.11 app=nginx Running 10 seconds 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/volumes.md: -------------------------------------------------------------------------------- 1 | #Volumes 2 | 3 | Storage is a first class object in Project6. Most other orchestration systems 4 | are focused on using the storage that is already provisioned. Instead, Project6 5 | pools storage from disks of all nodes in the cluster. It is meant for bare 6 | metal on premises deployment. To try this outside of the vagrant setup, please 7 | contact us at project6@datawise.io. 8 | 9 | Volumes are scheduled to a node based on storage requirement and the available 10 | storage capacity on that node. 11 | 12 | #Types of volumes 13 | There are two types of volumes - persistent and temporary. 14 | 15 | Persistent volumes are explicitly created using ```dwctl volume create```. They 16 | are named and scheduled to a node. A pod wanting to use a persistent volume needs 17 | to refer to the volume at the time of creation. 18 | 19 | Temporary volumes are tied to the lifecycle of the pod and dont need to managed 20 | by the user. 21 | 22 | ##Persistent volume creation 23 | ``` 24 | $ dwctl volume create wp-vol -v vol1,1G,ext4 25 | GROUP VOLUME SIZE FSTYPE HOST LABELS STATUS CREATED 26 | wp-vol vol1 1G ext4 Pending Less than a second 27 | 28 | $ dwctl volume list 29 | GROUP VOLUME SIZE FSTYPE HOST LABELS STATUS CREATED 30 | wp-vol vol1 1G ext4 192.168.30.11 Available 24 seconds 31 | ``` 32 | 33 | Multiple volumes can be created together on a node by using the -v option multiple 34 | times. The aggregate storage capacity of all these volumes is used to schedule the 35 | volume group. Once the volume is scheduled and created, it transitions to Available 36 | status. If there is no storage available, the volume remains in Pending state. 37 | 38 | Volumes can be scheduled to different nodes using the same labels and conflicts 39 | concept as pods. This is useful for deploying clustered apps like zookeeper & 40 | cassandra. 41 | 42 | ##Using a persistent volume 43 | ``` 44 | dwctl pod create mysql -i docker.io/mysql:latest \ 45 | -v wp-vol/vol1:/var/lib/mysql,rw -e MYSQL_ROOT_PASSWORD=root 46 | ``` 47 | 48 | As of now, this pod can only be scheduled to a node where its persistent volume is 49 | scheduled. If for some reason, the persistent volume is not scheduled, the pod 50 | remains in pending state until the persistent volume is scheduled. 51 | 52 | ##Using a temporary volume 53 | 54 | When a pod needs temporary storage, it can use a temporary volume. Storage capacity 55 | also becomes a scheduling criterion for selecting a node for this pod. 56 | 57 | ``` 58 | dwctl pod create wordpress -i docker.io/wordpress:4.2.2 \ 59 | -v /var/www/html,rw,100M,ext4 \ 60 | -e WORDPRESS_DB_HOST=172.16.1.106,WORDPRESS_DB_USER=root,WORDPRESS_DB_PASSWORD=root 61 | ``` 62 | 63 | In the above example, a 100M volume is created with ext4 filesystem and is mounted 64 | at /var/www/html in read-write mode inside the container. 65 | -------------------------------------------------------------------------------- /docs/clustering.md: -------------------------------------------------------------------------------- 1 | #Clustering Guide 2 | 3 | ##Creating a cluster 4 | A cluster is created by logging in to a node and adding other nodes to the cluster. 5 | ``` 6 | $ vagrant ssh node-1 7 | 8 | $ dwctl cluster create mycluster 192.168.30.11,192.168.30.12,192.168.30.13 \ 9 | --vip 192.168.30.10 --poddns mycluster.datawise.io 10 | ``` 11 | 12 | ##Checking the status of the cluster 13 | ``` 14 | $ dwctl cluster status 15 | Name : mycluster 16 | Id : 6643793367141168237 17 | Version : 0.1 18 | Master : 192.168.30.11 19 | Virtual IP : 192.168.30.10 20 | NTP server(s) : [pool.ntp.org] 21 | DNS server(s) : [8.8.8.8] 22 | Logging service : enabled 23 | Pod DNS Domain : mycluster.datawise.io 24 | 25 | IP ADDRESS STATE CONDITION MILLICORES MEMORY STORAGE VNICS LABELS 26 | 192.168.30.11 Joined Ready 0/2000 0/993.5M 0/10G 0/64 27 | 192.168.30.12 Joined Ready 0/2000 0/993.5M 0/10G 0/64 28 | 192.168.30.13 Joined Ready 0/2000 0/993.5M 0/10G 0/64 29 | ``` 30 | 31 | There is an election to determine which of these nodes runs the scheduler and API 32 | end points. In the above example, ```192.168.30.11``` is the master. The cluster is 33 | always reachable using the virtual IP of ```192.168.30.10``` from this point onwards. 34 | dwctl commands could be run on any node by setting the environment variable VIRTUAL_IP 35 | 36 | ``` 37 | $ export VIRTUAL_IP=192.168.30.10 38 | ``` 39 | 40 | State indicates whether the node successfully joined the cluster. Ready condition 41 | indicates the node is ready to accept workloads and is in healthy state. Resource 42 | capacity and utilization for all nodes are listed in the next columns. 43 | 44 | ##Removing a node from the cluster 45 | 46 | ``` 47 | $ dwctl cluster remove 192.168.30.13 48 | ``` 49 | 50 | Check cluster status to ensure it was removed. Since the cluster has only three nodes, 51 | only one node can be removed from the cluster to ensure quorum is not lost. 52 | 53 | ##Adding a node to the cluster 54 | 55 | ``` 56 | $ dwctl cluster add 192.168.30.13 57 | ``` 58 | 59 | ##Change cluster configuration 60 | 61 | ``` 62 | $ dwctl cluster configure --ntp time.ubuntu.com 63 | ``` 64 | 65 | ##High availability 66 | The cluster can tolerate node, master, pod failures. 67 | 68 | ###Node failure 69 | No new pods are scheduled to the failed node. Existing pods are relocated to other 70 | nodes(after a timeout) unless they have hard constraints (like persistent volumes) 71 | that prevent them from being moved. 72 | 73 | Note: The volume portability restriction will go away in the future. 74 | 75 | ###Master failure 76 | A new node is elected to be the master and hosts the Virtual IP for the cluster. 77 | Cluster DNS server moves to the new master. All the pods are moved as described 78 | above in Node failure. 79 | 80 | ###Pod failure 81 | If a pod fails, it is automatically restarted. At this time, application level 82 | health monitoring is not supported. 83 | -------------------------------------------------------------------------------- /docs/pods.md: -------------------------------------------------------------------------------- 1 | #Pods 2 | 3 | ##What is a Pod 4 | Pod is a collection of one or more containers that need to be scheduled together. 5 | For a more detailed description of pods, see this Kubernetes [link](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/pods.md) 6 | 7 | ##Creating a Pod 8 | For creating a pod, these are the attributes that can be specified. Name and Image 9 | attributes are mandatory, the rest are optional. 10 | 11 | * Name 12 | * Image 13 | * CPU (shares or millcores) 14 | * Memory (can be expressed in bytes, KB, MB, GB) 15 | * Network (if no network is configured, the pod remains in pending state) 16 | * Storage (persistent or temporary and where to mount it) 17 | * Labels (labels are useful to search pods or to drive scheduling decision) 18 | * Selectors (Used to match nodes that have these labels) 19 | * Conflicts (Used to prevent conflicting pods from being scheduled together) 20 | * Env Vars (Environment variables that are passed to the application) 21 | 22 | A pod can be created using ```dwctl pod create``` or ```dwctl run```. 23 | 24 | ###A simple example 25 | ``` 26 | $ dwctl run nginx -i nginx:latest 27 | ``` 28 | 29 | To check where the pod is scheduled and its status: 30 | ``` 31 | $ dwctl pod list 32 | ``` 33 | OR 34 | ``` 35 | $ dwctl pod get nginx 36 | POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED 37 | nginx 192.168.30.106 nginx nginx:latest 192.168.30.13 Pending 7 seconds 38 | ``` 39 | 40 | Note that each Pod gets a unique IP for itself. This IP is allocated from a 41 | pre-created network pool. 42 | 43 | If the pod is in pending state, it means that some of the resources it needs are 44 | not available. This could be an image download in progress, a missing network or 45 | exhaustion of storage capacity. Use ```dwctl event list -p nginx``` to list events 46 | related to this pod. 47 | 48 | ###Scheduling a pod to a specific set of nodes 49 | Lets say there are a few nodes with SSDs in the cluster and a pod needs to be 50 | scheduled to those nodes. 51 | 52 | Step 1 - tag a node 53 | ``` 54 | $ dwctl node tag 192.168.30.13 ssd 55 | NAME LABELS STATUS 56 | 192.168.30.13 ssd Schedulable,Ready 57 | ``` 58 | 59 | Step 2 - specify a selector with pod creation. 60 | ``` 61 | $ dwctl run nginx -i nginx:latest -s ssd 62 | POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED 63 | nginx nginx nginx:latest Pending Less than a second 64 | 65 | $ dwctl pod get nginx 66 | POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED 67 | nginx 192.168.30.106 nginx nginx:latest 192.168.30.13 Pending 7 seconds 68 | 69 | ``` 70 | 71 | ###Scheduling two pods away from each other 72 | If two pods need to be scheduled on different nodes, conflicts can be used to 73 | achieve that. 74 | ``` 75 | $ dwctl run nginx1 -i nginx:latest -l app=nginx -x app=nginx 76 | $ dwctl run nginx2 -i nginx:latest -l app=nginx -x app=nginx 77 | 78 | $ dwctl pod list -l app=nginx 79 | POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED 80 | nginx1 192.168.30.106 nginx1 nginx:latest 192.168.30.11 app=nginx Running 17 seconds 81 | nginx2 192.168.30.107 nginx2 nginx:latest 192.168.30.12 app=nginx Pending 12 seconds 82 | ``` 83 | 84 | ###Listing pods 85 | ```dwctl pod list``` lists all the running pods in the cluster including the 86 | builtin service pods. -------------------------------------------------------------------------------- /vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # The number of nodes to provision 5 | $num_node = (ENV['NUM_NODES'] || 3).to_i 6 | 7 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 8 | # configures the configuration version (we support older styles for 9 | # backwards compatibility). Please don't change it unless you know what 10 | # you're doing. 11 | Vagrant.configure(2) do |config| 12 | # The most common configuration options are documented and commented below. 13 | # For a complete reference, please see the online documentation at 14 | # https://docs.vagrantup.com. 15 | 16 | # Every Vagrant development environment requires a box. You can search for 17 | # boxes at https://atlas.hashicorp.com/search. 18 | config.vm.box = "datawiseio/project6" 19 | config.ssh.username = 'root' 20 | config.ssh.password = 'vagrant' 21 | config.ssh.insert_key = 'true' 22 | # Disable automatic box update checking. If you disable this, then 23 | # boxes will only be checked for updates when the user runs 24 | # `vagrant box outdated`. This is not recommended. 25 | # config.vm.box_check_update = false 26 | $num_node.times do |i| 27 | config.vm.define node_name = "node-#{i+1}" do |config| 28 | config.vm.hostname = node_name 29 | # Create a forwarded port mapping which allows access to a specific port 30 | # within the machine from a port on the host machine. In the example below, 31 | # accessing "localhost:8080" will access port 80 on the guest machine. 32 | # config.vm.network "forwarded_port", guest: 80, host: 8080 33 | 34 | # Create a private network, which allows host-only access to the machine 35 | # using a specific IP. 36 | ip = "192.168.30.#{i+11}" 37 | config.vm.network :private_network, ip: ip 38 | 39 | # Create a public network, which generally matched to bridged network. 40 | # Bridged networks make the machine appear as another physical device on 41 | # your network. 42 | # config.vm.network "public_network" 43 | 44 | # Share an additional folder to the guest VM. The first argument is 45 | # the path on the host to the actual folder. The second argument is 46 | # the path on the guest to mount the folder. And the optional third 47 | # argument is a set of non-required options. 48 | # config.vm.synced_folder "../data", "/vagrant_data" 49 | 50 | # Provider-specific configuration so you can fine-tune various 51 | # backing providers for Vagrant. These expose provider-specific options. 52 | # Example for VirtualBox: 53 | # 54 | config.vm.provider "virtualbox" do |vb| 55 | # Display the VirtualBox GUI when booting the machine 56 | # vb.gui = true 57 | 58 | # # Customize the amount of memory on the VM: 59 | vb.memory = "1024" 60 | vb.cpus = "2" 61 | vb.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] 62 | # If number of drives is increased/decreased, please update drives.cfg 63 | # to reflect the change and run 'vagrant reload --provision' 64 | drives = 2 65 | idx = 1 66 | while idx <= drives do 67 | disk = "disk#{i}#{idx}.vmdk" 68 | unless File.exist?(disk) 69 | vb.customize ['createhd', '--filename', disk, '--size', 8192, '--format', 'VMDK'] 70 | end 71 | vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', idx, '--device', 0, '--type', 'hdd', '--medium', disk] 72 | idx += 1 73 | end 74 | end 75 | # 76 | # View the documentation for the provider you are using for more 77 | # information on available options. 78 | 79 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 80 | # such as FTP and Heroku are also available. See the documentation at 81 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 82 | # config.push.define "atlas" do |push| 83 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 84 | # end 85 | 86 | # Enable provisioning with a shell script. Additional provisioners such as 87 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 88 | # documentation for more information about their specific syntax and use. 89 | config.vm.provision "shell", inline: <<-SHELL 90 | sudo ifconfig enp0s8 promisc 91 | SHELL 92 | 93 | end 94 | end 95 | end 96 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #Project 6 2 | ##Kubernetes + Simplified Networking and Storage 3 | ![alt tag](https://cloud.githubusercontent.com/assets/3618819/8199045/1a612582-1464-11e5-96ca-69ed66a399ab.png) 4 | 5 | ##About 6 | Project 6 is software for deploying and managing Docker containers across a cluster of hosts, with a focus on simplifying network and storage configurations for on-premises environments. It builds on Google's Kubernetes, Docker and CoreOS's Etcd. Get your Kubernetes cluster up and running in just 3 simple steps in less than 10 minutes. 7 | 8 | ##Setup 9 | 10 | Install Vagrant and 11 | VirtualBox 12 | 13 | Note: Please download the latest vagrant >1.7.x from http://www.vagrantup.com/downloads.html 14 | 15 | ##Quick start guide 16 | 17 | Clone the repository and bring up the VMs. 18 | ``` 19 | $ git clone https://github.com/DatawiseIO/Project6.git 20 | $ cd Project6/vagrant 21 | $ vagrant up 22 | ``` 23 | By default 3 VMs with 1G RAM & 2 CPU cores are created. Each VM is assigned a 24 | 192.168.30.x IP address. Datawise.io software is already installed in these VMs. 25 | dwctl is the command line tool to interact with the cluster. Every dwctl command 26 | has help available with the -h option. 27 | 28 | Here are the three simple steps to create a cluster, provision a network and 29 | start nginx/mysql. 30 | 31 | ####Step 1 - Create a cluster 32 | ``` 33 | $ vagrant ssh node-1 34 | 35 | $ dwctl cluster create mycluster 192.168.30.11,192.168.30.12,192.168.30.13 \ 36 | --vip 192.168.30.10 --poddns mycluster.datawise.io 37 | ``` 38 | 39 | Check cluster status 40 | ``` 41 | $ dwctl cluster status 42 | ``` 43 | ####Step 2 - Create a network 44 | This network is used to assign IP addresses to pods. 45 | ``` 46 | $ dwctl network create blue -s 192.168.30.0/24 --start 192.168.30.101 \ 47 | --end 192.168.30.200 -g 192.168.30.1 48 | ``` 49 | ####Step 3 - Create a stateless or stateful pod 50 | 51 | #####Stateless example - nginx 52 | ``` 53 | $ dwctl pod create nginx -i docker.io/nginx:latest -n blue 54 | ``` 55 | 56 | Get status of the pod. 57 | 58 | ``` 59 | [root@node-1 ~]# dwctl pod get nginx 60 | POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED 61 | nginx 192.168.30.106 nginx docker.io/nginx:latest 192.168.30.12 Running 10 seconds 62 | ``` 63 | 64 | Connect to nginx using the url http://192.168.30.106 65 | 66 | #####Stateful example - mysql 67 | ``` 68 | dwctl volume create wp-vol -v vol1,1G,ext4 69 | 70 | dwctl pod create mysql -i docker.io/mysql:5.5 \ 71 | -v wp-vol/vol1:/var/lib/mysql,rw -n blue -c 1250 -m 750M \ 72 | -e MYSQL_ROOT_PASSWORD=root 73 | ``` 74 | 75 | ###Advanced use cases 76 | Please refer to the docs and examples directories for more details on these use cases. 77 | * Multi tier applications ([wordpress & mysql](https://github.com/DatawiseIO/Project6/blob/master/examples/wordpress-mysql.sh)) 78 | * Clustered applications (like [zookeeper](https://github.com/DatawiseIO/Project6/tree/master/examples/zookeeper), [cassandra](https://github.com/DatawiseIO/Project6/tree/master/examples/cassandra)) 79 | * [Multi instance pods](https://github.com/DatawiseIO/Project6/blob/master/docs/replicators.md) 80 | 81 | ##Features 82 | ###Cluster management 83 | * Single command to set up a kubernetes cluster 84 | * Easy to grow/shrink the cluster 85 | * Self healing control plane with no single point of failure 86 | * Process management through systemd 87 | * Virtual IP for stable API endpoint access 88 | * NTP for cluster wide time synchronization 89 | * DNS server provisioning for external connectivity 90 | * MDNS based discovery of free nodes 91 | * Built in DNS server for pods 92 | * Integrated logging & monitoring services 93 | * Command line tool and REST interfaces 94 | 95 | ###Docker container management 96 | ####Kubernetes features 97 | * Pods 98 | * Replication controllers 99 | * Labels 100 | * Events 101 | * CPU, memory, constraint scheduler 102 | * cadvisor, heapster for statistics 103 | * Cluster DNS server for inter pod connectivity 104 | 105 | ####Datawise extensions 106 | * Direct attached storage scheduling & provisioning 107 | * Durable & temporary storage 108 | * Cluster wide network resource management (IPs & MACs) 109 | * [Anti affinity scheduler](https://github.com/GoogleCloudPlatform/kubernetes/pull/9560) 110 | * Agent container per node 111 | 112 | ##Roadmap 113 | This is a preliminary implementation (version 0.1), based on Docker (version 1.5.0) and Google’s Kubernetes (version 0.16), with plans to stay current with the new releases and make open source contributions back to those projects. Based on community input, we will add more enterprise grade network and storage features over time, and also extend compatibility to other orchestration layers such as Docker Swarm and Apache Mesos. 114 | 115 | ##Feedback & Support 116 | Your feedback, suggestions and questions are very important to us. Please let us know what you think via our discussion forum datawiseio-project6@googlegroups.com or by email project6@datawise.io. 117 | 118 | A Vagrant and VirtualBox configuration is available via the Setup steps above for quick download and evaluation. If you would like to install the software in a different environment, or bare-metal servers please get in touch and we would be happy to help. 119 | 120 | Disclaimer: this is an alpha/preview release. We are happy to answer questions and provide help on a best effort basis, but would not recommend production usage yet. 121 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | --------------------------------------------------------------------------------