├── .dockerignore ├── .gitignore ├── zookeeper-env.sh ├── k8s ├── helm │ ├── values.yaml │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ └── templates │ │ └── zookeeper.yaml ├── zk.yaml ├── README.md ├── zk-persistent.yaml └── main.sh ├── zookeeper-download.sh ├── Dockerfile ├── zkBootstrap.sh ├── openshift ├── buildconfig.yaml ├── main.sh ├── zk.yaml ├── README.md └── zk-persistent.yaml ├── Makefile ├── README.md ├── .circleci └── config.yml └── LICENSE /.dockerignore: -------------------------------------------------------------------------------- 1 | *.* 2 | !Dockerfile 3 | !*.sh -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !.circleci 3 | !.helmignore 4 | !.dockerignore 5 | !.gitignore 6 | 7 | minikube 8 | minishift 9 | kubectl 10 | oc -------------------------------------------------------------------------------- /zookeeper-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | export ZK_dataDir=${ZK_dataDir:-$ZOO_HOME/data} 4 | export ZK_dataLogDir=${ZK_dataLogDir:-$ZOO_HOME/data-log} 5 | export ZK_clientPort=${ZK_clientPort:-2181} 6 | 7 | export ZOOPIDFILE=$ZK_dataDir/myid 8 | -------------------------------------------------------------------------------- /k8s/helm/values.yaml: -------------------------------------------------------------------------------- 1 | Name: "zk" 2 | Replicas: 1 3 | Image: "engapa/zookeeper" 4 | ImageTag: "3.7.0" 5 | ImagePullPolicy: "IfNotPresent" 6 | Cpu: "1" 7 | Memory: "512M" 8 | MaxCpu: "1" 9 | MaxMemory: "1G" 10 | Storage: "1Gi" 11 | Component: "zookeeper" -------------------------------------------------------------------------------- /k8s/helm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj -------------------------------------------------------------------------------- /k8s/helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Apache ZooKeeper enables highly reliable distributed coordination system. 3 | name: zookeeper 4 | version: 3.7.0 5 | keywords: 6 | - zookeeper 7 | home: https://zookeeper.apache.org/ 8 | sources: 9 | - https://github.com/engapa/zookeeper-k8s-openshift/k8s/helm 10 | - https://github.com/engapa/zookeeper-k8s-openshift 11 | maintainers: 12 | - name: Enrique Garcia - engapa 13 | email: engapa@gmail.com 14 | icon: http://zookeeper.apache.org/images/zookeeper_small.gif -------------------------------------------------------------------------------- /zookeeper-download.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | if [[ -z $ZOO_VERSION || -z $ZOO_HOME ]]; then 4 | echo 'ZOO_VERSION and ZOO_HOME are required values.' && exit 1; 5 | fi 6 | 7 | function download_zoo_release () { 8 | 9 | URL_BASE="https://dist.apache.org/repos/dist/release/zookeeper" 10 | URL_DIST="${URL_BASE}/zookeeper-${ZOO_VERSION}/apache-zookeeper-${ZOO_VERSION}-bin.tar.gz" 11 | 12 | wget -q -O /tmp/zookeeper.tar.gz "${URL_DIST}" 13 | wget -q -O /tmp/zookeeper.tar.gz.asc "${URL_DIST}.asc" 14 | 15 | wget -q -O /tmp/KEYS "${URL_BASE}/KEYS" 16 | gpg -q --import /tmp/KEYS 17 | 18 | gpg -q --verify /tmp/zookeeper.tar.gz.asc /tmp/zookeeper.tar.gz 19 | tar -xzf /tmp/zookeeper.tar.gz --strip-components 1 -C $ZOO_HOME 20 | 21 | rm -rf /tmp/zookeeper.tar.{gz, gz.asc} \ 22 | /tmp/KEYS 23 | rm -rf $ZOO_HOME/{NOTICE.txt,LICENSE.txt,README*} \ 24 | $ZOO_HOME/docs \ 25 | $ZOO_HOME/bin/{README*,*.cmd} 26 | } 27 | 28 | function download_utils() { 29 | wget -q -O $ZOO_HOME/common_functions.sh https://raw.githubusercontent.com/engapa/utils-docker/master/common-functions.sh 30 | chmod a+x $ZOO_HOME/common_functions.sh 31 | } 32 | 33 | download_utils && download_zoo_release -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:11-jre-slim-buster 2 | 3 | MAINTAINER Enrique Garcia 4 | 5 | ARG ZOO_HOME=/opt/zookeeper 6 | ARG ZOO_USER=zookeeper 7 | ARG ZOO_GROUP=zookeeper 8 | ARG ZOO_VERSION="3.7.0" 9 | 10 | ENV ZOO_HOME=$ZOO_HOME \ 11 | ZOO_VERSION=$ZOO_VERSION \ 12 | ZOO_REPLICAS=1 \ 13 | ZOO_USER=$ZOO_USER \ 14 | ZOO_GROUP=$ZOO_GROUP \ 15 | ZOOCFGDIR=$ZOO_HOME/conf \ 16 | PATH=$ZOO_HOME/bin:${PATH} 17 | 18 | # Required packages 19 | RUN apt update && \ 20 | apt install -y tar gnupg openssl ca-certificates wget netcat sudo 21 | 22 | # User and group 23 | RUN groupadd -g 1001 $ZOO_GROUP \ 24 | && useradd -d $ZOO_HOME -g $ZOO_GROUP -u 1001 -G sudo -m $ZOO_USER\ 25 | && echo "${ZOO_USER} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers 26 | 27 | # Download zookeeper distribution under ZOO_HOME directory 28 | ADD zookeeper-download.sh /tmp/ 29 | 30 | RUN chmod a+x /tmp/zookeeper-download.sh \ 31 | && /tmp/zookeeper-download.sh 32 | 33 | # Add custom files. 34 | ADD zkBootstrap.sh $ZOO_HOME/bin 35 | ADD zookeeper-env.sh $ZOOCFGDIR 36 | 37 | # Permissions 38 | RUN chown -R $ZOO_USER:$ZOO_GROUP $ZOO_HOME && \ 39 | chmod a+x $ZOO_HOME/bin/* && \ 40 | chmod -R a+w $ZOO_HOME && \ 41 | ln -s $ZOO_HOME/bin/zk_*.sh /usr/bin 42 | 43 | USER $ZOO_USER 44 | 45 | # Workdir for docker images is the same that ZOOBINDIR env variable for zookeeper process 46 | WORKDIR $ZOO_HOME/bin/ 47 | 48 | EXPOSE ${ZK_clientPort:-2181} ${ZOO_SERVER_PORT:-2888} ${ZOO_ELECTION_PORT:-3888} 49 | 50 | HEALTHCHECK --interval=10s --retries=10 CMD zkServer.sh status 51 | 52 | CMD zkBootstrap.sh && zkServer.sh --config $ZOOCFGDIR start-foreground 53 | -------------------------------------------------------------------------------- /k8s/zk.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: zk 5 | labels: 6 | zk-name: zk 7 | component: zk 8 | annotations: 9 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 10 | spec: 11 | ports: 12 | - port: 2181 13 | name: zkclient 14 | - port: 2888 15 | name: zkserver 16 | - port: 3888 17 | name: zkleader 18 | clusterIP: None 19 | selector: 20 | zk-name: zk 21 | --- 22 | apiVersion: apps/v1 23 | kind: StatefulSet 24 | metadata: 25 | name: zk 26 | labels: 27 | zk-name: zk 28 | component: zk 29 | spec: 30 | serviceName: zk 31 | selector: 32 | matchLabels: 33 | zk-name: zk 34 | component: zk 35 | replicas: 3 36 | podManagementPolicy: "Parallel" 37 | template: 38 | metadata: 39 | labels: 40 | zk-name: zk 41 | component: zk 42 | spec: 43 | securityContext: 44 | runAsUser: 1001 45 | fsGroup: 1001 46 | containers: 47 | - name: zk 48 | imagePullPolicy: IfNotPresent 49 | image: engapa/zookeeper:3.7.0 50 | resources: 51 | requests: 52 | memory: 512M 53 | cpu: 300m 54 | limits: 55 | memory: 512M 56 | cpu: 300m 57 | ports: 58 | - containerPort: 2181 59 | name: zkclient 60 | - containerPort: 2888 61 | name: zkserver 62 | - containerPort: 3888 63 | name: zkleader 64 | env: 65 | - name: ZOO_REPLICAS 66 | value: "3" 67 | - name: JAVA_ZK_JVMFLAGS 68 | value: "\"-Xmx512M -Xms512M\"" 69 | readinessProbe: 70 | exec: 71 | command: 72 | - zkServer.sh 73 | - status 74 | initialDelaySeconds: 20 75 | timeoutSeconds: 10 76 | livenessProbe: 77 | exec: 78 | command: 79 | - zkServer.sh 80 | - status 81 | initialDelaySeconds: 20 82 | timeoutSeconds: 10 83 | -------------------------------------------------------------------------------- /zkBootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | . $ZOO_HOME/common_functions.sh 4 | . $ZOOCFGDIR/zookeeper-env.sh 5 | 6 | function zk_local_cluster() { 7 | 8 | # Required envs for replicated mode 9 | export ZK_tickTime=${ZK_tickTime:-2000} 10 | export ZK_initLimit=${ZK_initLimit:-5} 11 | export ZK_syncLimit=${ZK_syncLimit:-2} 12 | 13 | ZOO_SERVER_PORT=${ZOO_SERVER_PORT:-2888} 14 | ZOO_ELECTION_PORT=${ZOO_ELECTION_PORT:-3888} 15 | 16 | for (( i=1; i<=$ZOO_REPLICAS; i++ )); do 17 | export ZK_server_$i="$NAME-$((i-1)).$DOMAIN:$ZOO_SERVER_PORT:$ZOO_ELECTION_PORT" 18 | done 19 | 20 | } 21 | 22 | HOST=`hostname -s` 23 | DOMAIN=`hostname -d` 24 | 25 | if [[ $ZOO_REPLICAS -gt 1 ]];then 26 | if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then 27 | NAME=${BASH_REMATCH[1]} 28 | ORD=${BASH_REMATCH[2]} 29 | zk_local_cluster 30 | export MYID=$((ORD+1)) 31 | else 32 | echo "Unable to create local Zookeeper. Name of host doesn't match with pattern: (.*)-([0-9]+). Consider to use PetSets or StatefulSets." 33 | exit 1 34 | fi 35 | fi 36 | 37 | if [[ -f $ZOOCFGDIR/zoo_sample.cfg ]]; then 38 | mv $ZOOCFGDIR/zoo_sample.cfg $ZOOCFGDIR/zoo.cfg 39 | else 40 | touch $ZOOCFGDIR/zoo.cfg 41 | fi 42 | 43 | # Dynamic setup from environment variables to files 44 | for dir in $ZOOCFGDIR $ZK_dataDir $ZK_dataLogDir;do 45 | if [[ ! -d $dir ]]; then 46 | echo "Creating directory $dir ..." 47 | mkdir -p $dir 48 | else 49 | # Ensure that we can write on directories (possible persistent volumes) 50 | echo "Ensuring permission for directory $dir ..." 51 | sudo chown -R $ZOO_USER:$ZOO_GROUP $dir 52 | fi 53 | done 54 | 55 | DEBUG=${SETUP_DEBUG:-false} 56 | LOWER=${SETUP_LOWER:-false} 57 | 58 | # Zookeeper config 59 | PREFIX=ZK_ DEST_FILE=${ZOOCFGDIR}/zoo.cfg env_vars_in_file 60 | 61 | # Tools log4j 62 | PREFIX=LOG4J_ DEST_FILE=${ZOOCFGDIR}/log4j.properties env_vars_in_file 63 | 64 | # Java 65 | PREFIX=JAVA_ZK_ DEST_FILE=${ZOOCFGDIR}/java.env env_vars_in_file 66 | 67 | # The myid for each node 68 | export MYID=${MYID:-1} 69 | 70 | zkServer-initialize.sh --configfile $ZOOCFGDIR/zoo.cfg --myid $MYID --force -------------------------------------------------------------------------------- /k8s/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes resources 2 | 3 | Here we have some examples of resources that may be deployed on your kubernetes environment. 4 | 5 | ## Requirements 6 | 7 | - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 8 | - Kubernetes 9 | 10 | ## Launch a cluster 11 | 12 | Adjust the contents of file `zk.yaml` file and type next command to create a zookeeper cluster: 13 | 14 | If you have a kubernetes cluster ready, then: 15 | 16 | ```bash 17 | $ kubectl create -f zk[-persistent].yaml 18 | ``` 19 | >NOTE: choose file zk.yaml or zk-persistent.yaml. 20 | 21 | ## Local environment 22 | 23 | We recommend to use "minikube" in order to get quickly a ready kubernetes cluster. 24 | 25 | The script "main.sh" may help you to do that on your local workstation (checked on Debian/Ubuntu distributions): 26 | 27 | ```bash 28 | $ ./main.sh 29 | ``` 30 | 31 | Install kubectl: 32 | ```bash 33 | $ ./main.sh kubectl-install 34 | ``` 35 | 36 | Install and run minikube: 37 | ```bash 38 | $ ./main.sh minikube-install 39 | $ ./main.sh minikube-run 40 | ``` 41 | 42 | Deploy zookeeper cluster: 43 | ```bash 44 | $ ./main.sh test 45 | ``` 46 | 47 | Deploy zookeeper cluster with persistent storage: 48 | ```bash 49 | $ ./main.sh test-persistent 50 | ``` 51 | 52 | Clean all resources and delete minikube cluster: 53 | ```bash 54 | $ ./main.sh clean-all 55 | $ ./main.sh minikube-delete 56 | ``` 57 | 58 | ## Production environment 59 | 60 | We recommend you to use resources with suffix **persistent** because of persistent storage. 61 | This means that although pods are destroyed all data are safe under persistent volumes, and when pod are recreated the volumes are attached again. 62 | 63 | The statefulset object has an "antiaffinity" pod scheduler policy so those pods will be allocated in separate nodes. 64 | It's required the same number of nodes that the value of parameter `ZOO_REPLICAS`. 65 | 66 | ```bash 67 | $ kubectl create -f zk-persistent.yaml 68 | $ kubectl get all,pv,pvc -l component=zk 69 | ``` 70 | 71 | ## Cleanup 72 | 73 | This command removes all resources belong to zookeeper cluster 74 | 75 | ```bash 76 | kubectl delete -f zk[-persistent].yaml 77 | ``` 78 | -------------------------------------------------------------------------------- /openshift/buildconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Template 3 | metadata: 4 | name: zk-builder 5 | annotations: 6 | description: Zookeeper image build template 7 | openshift.io/display-name: Zookeeper Image Builder 8 | iconClass: icon-database 9 | tags: database,zookeeper 10 | labels: 11 | template: zk-builder 12 | component: zk 13 | 14 | parameters: 15 | - name: GITHUB_REPOSITORY 16 | value: "https://github.com/engapa/zookeeper-k8s-openshift" 17 | description: Source code respository (Github) 18 | required: true 19 | - name: GITHUB_REF 20 | description: Github source ref 21 | value: "master" 22 | required: true 23 | - name: GITHUB_HOOK_SECRET 24 | description: Secret to notify a change from Github. 25 | value: "secretito" 26 | required: false 27 | - name: IMAGE_STREAM_NAME 28 | description: 29 | value: "zookeeper" 30 | required: true 31 | - name: IMAGE_STREAM_VERSION 32 | description: 33 | value: "latest" 34 | required: true 35 | - name: HTTP_PROXY 36 | description: Any required HTTP Proxy 37 | value: "" 38 | required: false 39 | - name: HTTPS_PROXY 40 | description: Any required HTTPS Proxy 41 | value: "" 42 | - name: NO_PROXY 43 | description: Any required NO_PROXY 44 | value: "" 45 | required: false 46 | 47 | objects: 48 | - apiVersion: v1 49 | kind: ImageStream 50 | metadata: 51 | name: ${IMAGE_STREAM_NAME} 52 | component: zk 53 | spec: 54 | lookupPolicy: 55 | local: true 56 | 57 | - kind: BuildConfig 58 | apiVersion: v1 59 | metadata: 60 | name: zk-builder 61 | component: zk 62 | spec: 63 | runPolicy: Serial 64 | triggers: 65 | - type: GitHub 66 | github: 67 | secret: ${GITHUB_HOOK_SECRET} 68 | - type: ConfigChange 69 | source: 70 | git: 71 | uri: ${GITHUB_REPOSITORY} 72 | ref: ${GITHUB_REF} 73 | httpProxy: ${HTTP_PROXY} 74 | httpsProxy: ${HTTPS_PROXY} 75 | noProxy: ${NO_PROXY} 76 | strategy: 77 | type: Docker 78 | dockerStrategy: 79 | env: 80 | - name: "HTTP_PROXY" 81 | value: ${HTTP_PROXY} 82 | - name: "HTTPS_PROXY" 83 | value: ${HTTPS_PROXY} 84 | - name: "NO_PROXY" 85 | value: ${NO_PROXY} 86 | - name: "http_proxy" 87 | value: ${HTTP_PROXY} 88 | - name: "https_proxy" 89 | value: ${HTTPS_PROXY} 90 | - name: "no_proxy" 91 | value: ${NO_PROXY} 92 | output: 93 | to: 94 | kind: ImageStreamTag 95 | name: "${IMAGE_STREAM_NAME}:${IMAGE_STREAM_VERSION}" -------------------------------------------------------------------------------- /k8s/helm/README.md: -------------------------------------------------------------------------------- 1 | # Apache Zookeeper Helm Chart 2 | 3 | ## Pre Requisites: 4 | 5 | * Kubernetes 1.20 (this is version we've tested it) 6 | 7 | * More than 1 node (if replicas is upper than 1) because of an anti-affinity scheduler policy 8 | 9 | ### Installing the Chart 10 | 11 | To install the chart with the release name `zookeeper-` in the default 12 | namespace: 13 | 14 | ```bash 15 | $ helm repo add engapa http://storage.googleapis.com/kubernetes-charts-incubator 16 | $ helm install --name zookeeper-3.7.0 engapa/zookeeper 17 | ``` 18 | 19 | If you're using a dedicated namespace (recommended) then make sure the namespace 20 | exists: 21 | 22 | ```bash 23 | $ kubectl create ns zookeeper 24 | $ helm install --name zookeeper-3.7.0 --set global.namespace=zookeeper engapa/zookeeper 25 | ``` 26 | 27 | The chart can be customized using the 28 | following configurable parameters: 29 | 30 | | Parameter | Description | Default | 31 | | ----------------------- | ----------------------------------- | ---------------------------------------------------------- | 32 | | `Name` | Zookeeper resource names | `zk` | 33 | | `Image` | Zookeeper container image name | `engapa/zookeeper` | 34 | | `ImageTag` | Zookeeper container image tag | `3.7.0` | 35 | | `ImagePullPolicy` | Zookeeper container pull policy | `IfNotPresent` | 36 | | `Replicas` | Zookeeper replicas | `3` | 37 | | `Component` | Zookeeper k8s selector key | `zk` | 38 | | `Cpu` | Zookeeper container requested cpu | `500m` | 39 | | `Memory` | Zookeeper container requested memory| `512Mi` | 40 | | `MaxCpu` | Zookeeper container cpu limit | `2` | 41 | | `MaxMemory` | Zookeeper container memory limit | `1Gi` | 42 | 43 | Specify parameters using `--set key=value[,key=value]` argument to `helm install` 44 | 45 | Alternatively a YAML file that specifies the values for the parameters can be provided like this: 46 | 47 | ```bash 48 | $ helm install --name zookeeper-3.7.0 -f values.yaml engapa/zookeeper 49 | ``` 50 | 51 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := help 2 | 3 | DOCKER_ORG ?= engapa 4 | DOCKER_IMAGE ?= zookeeper 5 | 6 | ZK_VERSION ?= 3.7.0 7 | 8 | .PHONY: help 9 | help: ## Show this help 10 | @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 11 | 12 | .PHONY: clean 13 | clean: ## Clean docker containers and images 14 | @docker rm -f $$(docker ps -a -f "ancestor=$(DOCKER_ORG)/$(DOCKER_IMAGE):$(ZK_VERSION)" --format '{{.Names}}') > /dev/null 2>&1 || true 15 | @docker rmi -f $(DOCKER_ORG)/$(DOCKER_IMAGE):$(ZK_VERSION) > /dev/null 2>&1 || true 16 | 17 | .PHONY: docker-build 18 | docker-build: ## Build the docker image 19 | @docker build --no-cache \ 20 | -t $(DOCKER_ORG)/$(DOCKER_IMAGE):$(ZK_VERSION) . 21 | 22 | .PHONY: docker-run 23 | docker-run: ## Create a docker container 24 | @docker run -d --rm --name zk $(DOCKER_ORG)/$(DOCKER_IMAGE):$(ZK_VERSION) 25 | 26 | .PHONY: docker-test 27 | docker-test: docker-run ## Test for docker container 28 | @until [ "$$(docker ps --filter 'name=zk' --filter 'health=healthy' --format '{{.Names}}')" == "zk" ]; do \ 29 | sleep 10; \ 30 | (docker ps --filter 'name=zk' --format '{{.Names}}' | grep zk > /dev/null 2>&1) || exit $$?; \ 31 | echo "Checking healthy status of zookeeper ..."; \ 32 | done 33 | 34 | .PHONY: docker-push 35 | docker-push: ## Publish docker images 36 | @docker push $(DOCKER_ORG)/$(DOCKER_IMAGE):$(ZK_VERSION) 37 | 38 | .PHONY: minikube-install 39 | minikube-install: ## Install minikube and kubectl 40 | @k8s/main.sh minikube-install 41 | @k8s/main.sh kubectl-install 42 | 43 | .PHONY: minikube-run 44 | minikube-run: ## Run minikube 45 | @k8s/main.sh minikube-run 46 | 47 | .PHONY: minikube-test 48 | minikube-test: ## Launch tests on minikube 49 | @k8s/main.sh test 50 | 51 | .PHONY: minikube-test-persistent 52 | minikube-test-persistent: ## Launch tests on minikube with persistent volumes 53 | @k8s/main.sh test-persistent 54 | 55 | .PHONY: minikube-clean 56 | minikube-clean: ## Remove kubernetes resources 57 | @k8s/main.sh clean-all 58 | 59 | .PHONY: minikube-delete 60 | minikube-delete: ## Remove minikube cluster 61 | @k8s/main.sh minikube-delete 62 | 63 | .PHONY: oc-install 64 | oc-install: ## Install oc tools 65 | @openshift/main.sh oc-install 66 | 67 | .PHONY: oc-cluster-run 68 | oc-cluster-run: ## Run a cluster through oc command 69 | @openshift/main.sh oc-cluster-run 70 | 71 | .PHONY: oc-cluster-test 72 | oc-cluster-test: ## Launch tests on our local openshift cluster 73 | # Test with 3 replicas 74 | @openshift/main.sh test 3 75 | 76 | .PHONY: oc-clean-resources 77 | oc-clean-resources: ## Clean zk resources 78 | @openshift/main.sh clean-resources 79 | 80 | .PHONY: oc-cluster-test-persistent 81 | oc-cluster-test-persistent: ## Launch tests on our local openshift cluster with persistence 82 | # Test with 3 replicas 83 | @openshift/main.sh test-persistent 3 84 | 85 | .PHONY: oc-cluster-clean 86 | oc-cluster-clean: ## Remove openshift cluster 87 | @openshift/main.sh oc-cluster-clean 88 | 89 | .PHONY: version 90 | version: ## Get version 91 | @echo $(ZK_VERSION) 92 | 93 | ## TODO: helm, ksonnet for deploy on kubernetes -------------------------------------------------------------------------------- /k8s/helm/templates/zookeeper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{.Values.Name}} 5 | labels: 6 | app: {{.Values.Name}} 7 | heritage: {{.Release.Service | quote }} 8 | release: {{.Release.Name | quote }} 9 | chart: "{{.Chart.Name}}-{{.Chart.Version}}" 10 | component: "{{.Release.Name}}-{{.Values.Component}}" 11 | annotations: 12 | "helm.sh/created": {{.Release.Time.Seconds | quote }} 13 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 14 | spec: 15 | ports: 16 | - port: 2181 17 | name: zkclient 18 | - port: 2888 19 | name: zkserver 20 | - port: 3888 21 | name: zkleader 22 | clusterIP: None 23 | selector: 24 | app: {{.Values.Name}} 25 | --- 26 | apiVersion: apps/v1beta1 27 | kind: StatefulSet 28 | metadata: 29 | name: {{.Values.Name}} 30 | labels: 31 | app: {{.Values.Name}} 32 | heritage: {{.Release.Service | quote }} 33 | release: {{.Release.Name | quote }} 34 | chart: "{{.Chart.Name}}-{{.Chart.Version}}" 35 | component: "{{.Release.Name}}-{{.Values.Component}}" 36 | annotations: 37 | "helm.sh/created": {{.Release.Time.Seconds | quote }} 38 | spec: 39 | serviceName: {{.Values.Name}} 40 | replicas: {{ default 1 .Values.Replicas }} 41 | template: 42 | metadata: 43 | labels: 44 | app: {{.Values.Name}} 45 | component: "{{.Release.Name}}-{{.Values.Component}}" 46 | annotations: 47 | scheduler.alpha.kubernetes.io/affinity: > 48 | { 49 | "podAntiAffinity": { 50 | "requiredDuringSchedulingIgnoredDuringExecution": [{ 51 | "labelSelector": { 52 | "matchExpressions": [{ 53 | "key": "app", 54 | "operator": "In", 55 | "values": ["{{.Values.Name}}"] 56 | }] 57 | }, 58 | "topologyKey": "kubernetes.io/hostname" 59 | }] 60 | } 61 | } 62 | spec: 63 | containers: 64 | - name: {{.Values.Name}} 65 | imagePullPolicy: "{{.Values.ImagePullPolicy}}" 66 | image: "{{.Values.Image}}:{{.Values.ImageTag}}" 67 | resources: 68 | limits: 69 | cpu: "{{ .Values.MaxCpu }}" 70 | memory: "{{ .Values.MaxMemory }}" 71 | requests: 72 | cpu: "{{ .Values.Cpu }}" 73 | memory: "{{ .Values.Memory }}" 74 | ports: 75 | - containerPort: 2181 76 | name: zkclient 77 | - containerPort: 2888 78 | name: zkserver 79 | - containerPort: 3888 80 | name: zkleader 81 | env: 82 | - name: ZOO_REPLICAS 83 | value: {{ default 1 .Values.Replicas }} 84 | - name: JAVA_ZK_JVMFLAG 85 | value: "\"-Xmx{{ .Values.Memory }} -Xms{{ .Values.MaxMemory }}\"" 86 | readinessProbe: 87 | exec: 88 | command: 89 | - zk_status.sh 90 | initialDelaySeconds: 15 91 | timeoutSeconds: 5 92 | livenessProbe: 93 | exec: 94 | command: 95 | - zk_status.sh 96 | initialDelaySeconds: 15 97 | timeoutSeconds: 5 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Zookeeper Docker Image 2 | [![Build status](https://circleci.com/gh/engapa/zookeeper-k8s-openshift/tree/master.svg?style=svg "Build status")](https://circleci.com/gh/engapa/zookeeper-k8s-openshift/tree/master) 3 | [![Docker Pulls](https://img.shields.io/docker/pulls/engapa/zookeeper.svg)](https://hub.docker.com/r/engapa/zookeeper/) 4 | [![Docker image version](https://images.microbadger.com/badges/version/engapa/zookeeper.svg)](https://microbadger.com/images/engapa/zookeeper) 5 | ![OSS](https://badges.frapsoft.com/os/v1/open-source.svg?v=103 "We love OpenSource") 6 | 7 | This project aims to provide zookeeper docker images and prepare them to be deployed as 'statefulsets' on kubernetes (openshift). 8 | 9 | These scripts are used to build/run the docker image/container: 10 | 11 | * **zookeeper-env.sh**: Export needed env variable for other scripts. 12 | * **zookeeper-download.sh**: is used to download the suitable release of zookeeper (version `ZOO_VERSION`). 13 | * **zkBootstrap.sh**: Initializes zookeeper dynamically, based on [utils-docker project](https://github.com/engapa/utils-docker). 14 | 15 | ## Build and push the docker image 16 | 17 | Set env variables DOCKER_ORG (defaults to `engapa`), DOCKER_IMAGE (defaults to `zookeeper`) and ZOO_VERSION (the real zookeeper version that will be downloaded into the docker image) 18 | to tag docker image as you wish and then build, test and push: 19 | 20 | ```bash 21 | $ make clean docker-build docker-test docker-push 22 | ``` 23 | 24 | ## Run a container 25 | 26 | Let's run a zookeeper container with default environment variables: 27 | 28 | ```bash 29 | $ docker run -it --name zk engapa/zookeeper:${ZOO_VERSION} 30 | ``` 31 | 32 | ## Setting up 33 | 34 | Users may configure parameters in config files just adding environment variables with specific name patterns. 35 | 36 | This table collects the patterns of variable names which will are written in the suitable file: 37 | 38 | PREFIX | FILE (${ZOO_HOME}/config) | Example 39 | -----------|-----------------------------|----------------------------- 40 | ZK_ | zoo.cfg | ZK_maxClientCnxns=0 --> maxClientCnxns=0 41 | LOG4J_ | log4j.properties | LOG4J_zookeeper_root_logger=INFO, CONSOLE--> zookeeper.root.logger=INFO, CONSOLE 42 | JAVA_ZK_ | java.env | JAVA_ZK_JVMFLAG="-Xmx1G -Xms1G" --> JVMFLAG="-Xmx1G -Xms1G" 43 | 44 | So we can configure our zookeeper server by adding environments variables: 45 | 46 | ```bash 47 | $ docker run -it -d --name zk -e "SETUP_DEBUG=true" -e "LOG4J_zookeeper_root_logger=DEBUG, CONSOLE" engapa/zookeeper:${ZOO_VERSION} 48 | ``` 49 | 50 | > NOTE: We've passed a SETUP_DEBUG environment variable with value 'true' to view the setup process of config files. 51 | > . Show logs by command `docker logs ZK` 52 | 53 | Also you may use `--env-file` option to load these variables from a file. 54 | 55 | And, of course, you could provide your own properties files directly through volumes by option `-v`, as you know. 56 | 57 | ## k8s 58 | 59 | In [k8s directory](k8s) there are some resources for Kubernetes. 60 | 61 | Thanks to kubernetes team for the [contrib](https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper). 62 | 63 | ## Openshift 64 | 65 | In [openshift directory](openshift) you can find some Openshift templates. 66 | 67 | ## Author 68 | 69 | Enrique Garcia **engapa@gmail.com** 70 | -------------------------------------------------------------------------------- /k8s/zk-persistent.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: zk-persistent 5 | labels: 6 | zk-name: zk-persistent 7 | component: zk 8 | annotations: 9 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 10 | spec: 11 | ports: 12 | - port: 2181 13 | name: zkclient 14 | - port: 2888 15 | name: zkserver 16 | - port: 3888 17 | name: zkleader 18 | clusterIP: None 19 | selector: 20 | zk-name: zk-persistent 21 | --- 22 | apiVersion: apps/v1 23 | kind: StatefulSet 24 | metadata: 25 | name: zk-persistent 26 | labels: 27 | zk-name: zk-persistent 28 | component: zk 29 | spec: 30 | serviceName: zk-persistent 31 | selector: 32 | matchLabels: 33 | zk-name: zk-persistent 34 | component: zk 35 | replicas: 3 36 | podManagementPolicy: "Parallel" 37 | template: 38 | metadata: 39 | labels: 40 | zk-name: zk-persistent 41 | component: zk 42 | # annotations: 43 | ## Use this annotation if you want allocate each pod on different node 44 | ## Note the number of nodes must be upper than REPLICAS parameter. 45 | # scheduler.alpha.kubernetes.io/affinity: > 46 | # { 47 | # "podAntiAffinity": { 48 | # "requiredDuringSchedulingIgnoredDuringExecution": [{ 49 | # "labelSelector": { 50 | # "matchExpressions": [{ 51 | # "key": "zk-name", 52 | # "operator": "In", 53 | # "values": ["zk"] 54 | # }] 55 | # }, 56 | # "topologyKey": "kubernetes.io/hostname" 57 | # }] 58 | # } 59 | # } 60 | spec: 61 | securityContext: 62 | runAsUser: 1001 63 | fsGroup: 1001 64 | containers: 65 | - name: zk-persistent 66 | imagePullPolicy: IfNotPresent 67 | image: engapa/zookeeper:3.7.0 68 | resources: 69 | requests: 70 | memory: 512M 71 | cpu: 300m 72 | limits: 73 | memory: 512M 74 | cpu: 300m 75 | ports: 76 | - containerPort: 2181 77 | name: zkclient 78 | - containerPort: 2888 79 | name: zkserver 80 | - containerPort: 3888 81 | name: zkleader 82 | env: 83 | - name: ZOO_REPLICAS 84 | value: "3" 85 | - name: JAVA_ZK_JVMFLAGS 86 | value: "\"-Xmx512M -Xms512M\"" 87 | readinessProbe: 88 | exec: 89 | command: 90 | - zkServer.sh 91 | - status 92 | initialDelaySeconds: 20 93 | timeoutSeconds: 10 94 | livenessProbe: 95 | exec: 96 | command: 97 | - zkServer.sh 98 | - status 99 | initialDelaySeconds: 20 100 | timeoutSeconds: 10 101 | volumeMounts: 102 | - name: datadir 103 | mountPath: /opt/zookeeper/data 104 | - name: datalogdir 105 | mountPath: /opt/zookeeper/data-log 106 | volumeClaimTemplates: 107 | - metadata: 108 | name: datadir 109 | spec: 110 | accessModes: [ "ReadWriteOnce" ] 111 | storageClassName: "standard" 112 | resources: 113 | requests: 114 | storage: 1Gi 115 | - metadata: 116 | name: datalogdir 117 | spec: 118 | accessModes: [ "ReadWriteOnce" ] 119 | storageClassName: "standard" 120 | resources: 121 | requests: 122 | storage: 1Gi -------------------------------------------------------------------------------- /k8s/main.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | MINIKUBE_VERSION=${MINIKUBE_VERSION:-"v1.19.0"} 6 | KUBE_VERSION=${KUBE_VERSION:-"v1.20.0"} 7 | 8 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 9 | 10 | DISTRO=$(uname -s | tr '[:upper:]' '[:lower:]') 11 | 12 | function kubectl-install() 13 | { 14 | 15 | if [[ "${KUBE_VERSION}" == 'latest' ]]; then 16 | KUBE_VERSION=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) 17 | fi 18 | 19 | # Download kubectl 20 | curl -L -o kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/$DISTRO/amd64/kubectl 21 | chmod +x kubectl 22 | sudo mv kubectl /usr/local/bin/ 23 | mkdir -p ${HOME}/.kube 24 | touch ${HOME}/.kube/config 25 | 26 | } 27 | 28 | function minikube-install() 29 | { 30 | # Download minikube 31 | curl -L -o minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-$DISTRO-amd64 32 | chmod +x minikube 33 | sudo mv minikube /usr/local/bin/ 34 | 35 | } 36 | 37 | function minikube-run() 38 | { 39 | 40 | export MINIKUBE_WANTUPDATENOTIFICATION=false 41 | export MINIKUBE_WANTREPORTERRORPROMPT=false 42 | export MINIKUBE_HOME=$HOME 43 | export CHANGE_MINIKUBE_NONE_USER=true 44 | export KUBECONFIG=$HOME/.kube/config 45 | 46 | sudo -E minikube start --vm-driver=none --cpus 2 --memory 3062 --kubernetes-version=${KUBE_VERSION} 47 | 48 | # this for loop waits until kubectl can access the api server that Minikube has created 49 | for i in {1..150}; do # timeout for 5 minutes 50 | kubectl version &> /dev/null 51 | if [[ $? -ne 1 ]]; then 52 | break 53 | fi 54 | sleep 2 55 | done 56 | 57 | # Check kubernetes info 58 | kubectl cluster-info 59 | # RBAC 60 | kubectl create clusterrolebinding add-on-cluster-admin --clusterrole cluster-admin --serviceaccount=kube-system:default 61 | # Install Helm 62 | # curl https://raw.githubusercontent.com/helm/helm/master/scripts/get | bash 63 | } 64 | 65 | # $1 : file 66 | # $2 : Number of replicas 67 | function check() 68 | { 69 | SLEEP_TIME=10 70 | MAX_ATTEMPTS=50 71 | ATTEMPTS=0 72 | READY_REPLICAS="0" 73 | until [[ "$READY_REPLICAS" == "$2" ]]; do 74 | sleep $SLEEP_TIME 75 | ATTEMPTS=`expr $ATTEMPTS + 1` 76 | if [[ $ATTEMPTS -gt $MAX_ATTEMPTS ]]; then 77 | echo "ERROR: Max number of attempts was reached (${MAX_ATTEMPTS})" 78 | exit 1 79 | fi 80 | READY_REPLICAS=$(kubectl get -f $1 -o jsonpath='{.items[?(@.kind=="StatefulSet")].status.readyReplicas}' 2>&1) 81 | echo "[${ATTEMPTS}/${MAX_ATTEMPTS}] - Ready zookeeper replicas : ${READY_REPLICAS:-0}/$2 ... " 82 | done 83 | kubectl get all 84 | } 85 | 86 | function test() 87 | { 88 | # Given 89 | file=$DIR/zk.yaml 90 | # When 91 | kubectl create -f $file 92 | # Then 93 | check $file 3 94 | 95 | } 96 | 97 | function test-persistent() 98 | { 99 | # Given, 3 replicas 100 | file=$DIR/zk-persistent.yaml 101 | # When 102 | kubectl create -f $file 103 | # Then 104 | check $file 3 105 | 106 | kubectl get pvc,pv 107 | } 108 | 109 | function test-all() 110 | { 111 | test && kubectl delete -l component=zk all 112 | test-persistent && kubectl delete -l component=zk all,pv,pvc 113 | } 114 | 115 | function clean-all() 116 | { 117 | echo "Cleaning resources ...." 118 | kubectl delete -l component=zk all,pv,pvc 119 | } 120 | 121 | function minikube-delete(){ 122 | 123 | echo "Deleting minikube cluster ...." 124 | minikube delete 125 | 126 | } 127 | function help() # Show a list of functions 128 | { 129 | declare -F -p | cut -d " " -f 3 130 | } 131 | 132 | if [[ "_$1" = "_" ]]; then 133 | help 134 | else 135 | "$@" 136 | fi 137 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | docker: 4 | docker: 5 | - image: docker 6 | steps: 7 | - setup_remote_docker 8 | - checkout 9 | - run: 10 | name: Docker login 11 | command: | 12 | docker login -u=${DOCKER_USERNAME} -p ${DOCKER_PASSWORD} 13 | - run: 14 | name: Install essentials 15 | command: | 16 | apk add --no-cache --virtual .build-deps make gcc 17 | - run: 18 | name: build, test and push images 19 | command: | 20 | if [ "$CIRCLE_BRANCH" == "master" ]; then 21 | docker build -t ${DOCKER_USERNAME}/zookeeper:latest .; 22 | docker push ${DOCKER_USERNAME}/zookeeper:latest; 23 | else 24 | make docker-build docker-test docker-push; 25 | fi 26 | k8s: 27 | machine: 28 | image: ubuntu-2004:202101-01 29 | steps: 30 | - checkout 31 | - run: 32 | name: update pkgs 33 | command: sudo apt-get update && sudo apt-get install -f -y conntrack 34 | - run: 35 | name: install minikube and kubectl 36 | command: make minikube-install 37 | - run: 38 | name: run minikube 39 | command: make minikube-run 40 | - run: 41 | name: run zookeeper tests 42 | command: make minikube-test 43 | - run: 44 | name: delete resources 45 | command: make minikube-clean 46 | - run: 47 | name: run zookeeper tests persistent 48 | command: make minikube-test-persistent 49 | - run: 50 | name: delete minikube 51 | command: make minikube-delete 52 | openshift: 53 | machine: 54 | image: ubuntu-2004:202101-01 55 | steps: 56 | - checkout 57 | - run: 58 | name: install oc 59 | command: make oc-install 60 | - run: 61 | name: run local openshift cluster 62 | command: make oc-cluster-run 63 | - run: 64 | name: run zookeeper tests 65 | command: make oc-cluster-test 66 | - run: 67 | name: clean resources 68 | command: make oc-clean-resources 69 | - run: 70 | name: run zookeeper persistent tests 71 | command: make oc-cluster-test-persistent 72 | - run: 73 | name: delete local openshift cluster 74 | command: make oc-cluster-clean 75 | gh-tag-release: 76 | docker: 77 | - image: cibuilds/github:0.13 78 | steps: 79 | - checkout 80 | - run: 81 | name: Install essentials 82 | command: | 83 | apk add --no-cache --virtual .build-deps make gcc curl 84 | - run: 85 | name: Create a new tag 86 | command: | 87 | VERSION=v$(make version) 88 | git tag -f ${VERSION} 89 | git remote set-url origin https://${CIRCLE_PROJECT_USERNAME}:${GITHUB_TOKEN}@github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME} 90 | git push -f --tags 91 | - run: 92 | name: Create a tag release on github 93 | command: | 94 | VERSION=v$(make version) 95 | ghr -t ${GITHUB_TOKEN} -u ${CIRCLE_PROJECT_USERNAME} -r ${CIRCLE_PROJECT_REPONAME} -c ${CIRCLE_SHA1} -b "Zookeeper version ${VERSION}" -delete ${VERSION} ./k8s 96 | 97 | workflows: 98 | version: 2 99 | build: 100 | jobs: 101 | - docker: 102 | filters: 103 | branches: 104 | only: 105 | - /^\d+\.\d+$/ 106 | - master 107 | - k8s: 108 | requires: 109 | - docker 110 | filters: 111 | branches: 112 | only: 113 | - /^\d+\.\d+$/ 114 | - openshift: 115 | requires: 116 | - docker 117 | filters: 118 | branches: 119 | only: 120 | - /^\d+\.\d+$/ 121 | - gh-tag-release: 122 | requires: 123 | - k8s 124 | - openshift 125 | filters: 126 | branches: 127 | only: 128 | - /^\d+\.\d+$/ -------------------------------------------------------------------------------- /openshift/main.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | ZK_VERSION=${ZK_VERSION:-"3.7.0"} 6 | ZK_IMAGE="engapa/zookeeper:${ZK_VERSION}" 7 | 8 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 9 | 10 | 11 | function oc-install() 12 | { 13 | # Download oc 14 | curl -LO https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz 15 | tar -xvzf openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz 16 | mv openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit/oc ./oc 17 | rm -rf openshift-origin-client-tools* 18 | chmod a+x oc 19 | sudo mv oc /usr/local/bin/oc 20 | } 21 | 22 | function oc-cluster-run() 23 | { 24 | 25 | # Add internal insecure registry, CAUTION: this command depends on Linux VM and docker version 26 | sudo sed -i 's#/usr/bin/dockerd#/usr/bin/dockerd --insecure-registry '172.30.0.0/16'#' /etc/systemd/system/docker.service.d/10-machine.conf 27 | sudo systemctl daemon-reload 28 | sudo systemctl restart docker 29 | 30 | # Run openshift cluster 31 | oc cluster up --enable=[*] 32 | 33 | # Waiting for cluster 34 | for i in {1..150}; do # timeout for 5 minutes 35 | oc cluster status &> /dev/null 36 | if [[ $? -ne 1 ]]; then 37 | break 38 | fi 39 | sleep 2 40 | done 41 | 42 | oc login -u system:admin 43 | oc adm policy add-scc-to-group privileged system:serviceaccounts:myproject 44 | oc create -f $DIR/zk.yaml 45 | oc create -f $DIR/zk-persistent.yaml 46 | 47 | } 48 | 49 | function build_local_image() 50 | { 51 | 52 | oc new-build --name zk --strategy docker --binary --docker-image "openjdk:8-jre-alpine" 53 | oc start-build zk --from-dir $DIR/.. --follow 54 | 55 | } 56 | 57 | # $1 : Number of replicas 58 | function check() 59 | { 60 | 61 | SLEEP_TIME=10 62 | MAX_ATTEMPTS=50 63 | ATTEMPTS=0 64 | READY_REPLICAS="0" 65 | until [[ "$READY_REPLICAS" == "$1" ]]; do 66 | sleep $SLEEP_TIME 67 | ATTEMPTS=`expr $ATTEMPTS + 1` 68 | if [[ $ATTEMPTS -gt $MAX_ATTEMPTS ]]; then 69 | echo "ERROR: Max number of attempts was reached (${MAX_ATTEMPTS})" 70 | exit 1 71 | fi 72 | READY_REPLICAS=$(oc get statefulset -l component=zk -o jsonpath='{.items[?(@.kind=="StatefulSet")].status.readyReplicas}' 2>&1) 73 | echo "[${ATTEMPTS}/${MAX_ATTEMPTS}] - Ready zookeeper replicas : ${READY_REPLICAS:-0}/$1 ... " 74 | done 75 | oc get all 76 | } 77 | 78 | function test() 79 | { 80 | # Given 81 | ZOO_REPLICAS=${1:-1} 82 | # When 83 | oc new-app --template=zk -p ZOO_REPLICAS=${ZOO_REPLICAS} -p SOURCE_IMAGE="engapa/zookeeper" 84 | # Then 85 | check ${ZOO_REPLICAS} 86 | 87 | } 88 | 89 | function test-persistent() 90 | { 91 | # Given 92 | ZOO_REPLICAS=${1:-1} 93 | for i in $(seq 1 ${ZOO_REPLICAS});do 94 | cat << PV | oc create -f - 95 | apiVersion: v1 96 | kind: PersistentVolume 97 | metadata: 98 | name: zk-persistent-data-disk-$i 99 | contents: data 100 | labels: 101 | component: zk 102 | spec: 103 | capacity: 104 | storage: 1Gi 105 | accessModes: 106 | - ReadWriteOnce 107 | hostPath: 108 | path: /tmp/oc/zk-persistent-data-disk-$i 109 | PV 110 | cat << PVLOG | oc create -f - 111 | apiVersion: v1 112 | kind: PersistentVolume 113 | metadata: 114 | name: zk-persistent-datalog-disk-$i 115 | contents: datalog 116 | labels: 117 | component: zk 118 | spec: 119 | capacity: 120 | storage: 1Gi 121 | accessModes: 122 | - ReadWriteOnce 123 | hostPath: 124 | path: /tmp/oc/zk-persistent-datalog-disk-$i 125 | PVLOG 126 | done 127 | # When 128 | oc new-app --template=zk-persistent -p ZOO_REPLICAS=${ZOO_REPLICAS} -p SOURCE_IMAGE="engapa/zookeeper" 129 | # Then 130 | check ${ZOO_REPLICAS} 131 | oc get pv,pvc 132 | 133 | } 134 | 135 | function test-all() 136 | { 137 | ZOO_REPLICAS=$1 138 | test $ZOO_REPLICAS && oc delete -l component=zk all 139 | test-persistent $ZOO_REPLICAS && oc delete -l component=zk all,pv,pvc 140 | } 141 | 142 | function clean-resources() 143 | { 144 | echo "Cleaning resources ...." 145 | oc delete -l component=zk all,pv,pvc 146 | } 147 | 148 | function oc-cluster-clean() 149 | { 150 | echo "Cleaning ...." 151 | oc cluster down 152 | } 153 | 154 | function help() # Show a list of functions 155 | { 156 | declare -F -p | cut -d " " -f 3 157 | } 158 | 159 | if [[ "_$1" = "_" ]]; then 160 | help 161 | else 162 | "$@" 163 | fi 164 | -------------------------------------------------------------------------------- /openshift/zk.yaml: -------------------------------------------------------------------------------- 1 | kind: Template 2 | apiVersion: v1 3 | metadata: 4 | name: zk 5 | annotations: 6 | openshift.io/display-name: Zookeeper (Ephemeral) 7 | description: Create a replicated Zookeeper server 8 | iconClass: icon-database 9 | tags: database,zookeeper 10 | labels: 11 | template: zk 12 | component: zk 13 | parameters: 14 | - name: NAME 15 | value: zk 16 | required: true 17 | - name: SOURCE_IMAGE 18 | description: Container image 19 | value: zookeeper 20 | required: true 21 | - name: ZOO_VERSION 22 | description: Version 23 | value: "3.7.0" 24 | required: true 25 | - name: ZOO_REPLICAS 26 | description: Number of nodes 27 | value: "3" 28 | required: true 29 | - name: ZOO_TICK_TIME 30 | description: The number of milliseconds of each tick 31 | value: "2000" 32 | required: true 33 | - name: ZOO_INIT_LIMIT 34 | description: The number of ticks that the initial synchronization phase can take 35 | value: "5" 36 | required: true 37 | - name: ZOO_SYNC_LIMIT 38 | description: The number of ticks that can pass between sending a request and getting an acknowledgement 39 | value: "2" 40 | required: true 41 | - name: ZOO_CLIENT_PORT 42 | description: The port at which the clients will connect 43 | value: "2181" 44 | required: true 45 | - name: ZOO_SERVER_PORT 46 | description: Server port 47 | value: "2888" 48 | required: true 49 | - name: ZOO_ELECTION_PORT 50 | description: Election port 51 | value: "3888" 52 | required: true 53 | - name: ZOO_MAX_CLIENT_CNXNS 54 | description: The maximum number of client connections 55 | value: "60" 56 | required: true 57 | - name: ZOO_SNAP_RETAIN_COUNT 58 | description: The number of snapshots to retain in dataDir 59 | value: "3" 60 | required: true 61 | - name: ZOO_PURGE_INTERVAL 62 | description: Purge task interval in hours. Set to 0 to disable auto purge feature 63 | value: "1" 64 | required: true 65 | - name: ZOO_MEMORY 66 | description: JVM heap size 67 | value: "-Xmx512M -Xms512M" 68 | required: true 69 | - name: RESOURCE_MEMORY_REQ 70 | description: The memory resource request. 71 | value: "512M" 72 | required: true 73 | - name: RESOURCE_MEMORY_LIMIT 74 | description: The limits for memory resource. 75 | value: "512M" 76 | required: true 77 | - name: RESOURCE_CPU_REQ 78 | description: The CPU resource request. 79 | value: "300m" 80 | required: true 81 | - name: RESOURCE_CPU_LIMIT 82 | description: The limits for CPU resource. 83 | value: "300m" 84 | required: true 85 | 86 | objects: 87 | - apiVersion: v1 88 | kind: Service 89 | metadata: 90 | name: ${NAME} 91 | labels: 92 | zk-name: ${NAME} 93 | component: zk 94 | annotations: 95 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 96 | spec: 97 | ports: 98 | - port: ${ZOO_CLIENT_PORT} 99 | name: client 100 | - port: ${ZOO_SERVER_PORT} 101 | name: server 102 | - port: ${ZOO_ELECTION_PORT} 103 | name: election 104 | clusterIP: None 105 | selector: 106 | zk-name: ${NAME} 107 | - apiVersion: apps/v1 108 | kind: StatefulSet 109 | metadata: 110 | name: ${NAME} 111 | labels: 112 | zk-name: ${NAME} 113 | component: zk 114 | spec: 115 | podManagementPolicy: "Parallel" 116 | serviceName: ${NAME} 117 | selector: 118 | matchLabels: 119 | zk-name: ${NAME} 120 | component: zk 121 | replicas: ${ZOO_REPLICAS} 122 | template: 123 | metadata: 124 | labels: 125 | zk-name: ${NAME} 126 | template: zk 127 | component: zk 128 | spec: 129 | containers: 130 | - name: ${NAME} 131 | imagePullPolicy: IfNotPresent 132 | image: ${SOURCE_IMAGE}:${ZOO_VERSION} 133 | resources: 134 | requests: 135 | memory: ${RESOURCE_MEMORY_REQ} 136 | cpu: ${RESOURCE_CPU_REQ} 137 | limits: 138 | memory: ${RESOURCE_MEMORY_LIMIT} 139 | cpu: ${RESOURCE_CPU_LIMIT} 140 | ports: 141 | - containerPort: ${ZOO_CLIENT_PORT} 142 | name: client 143 | - containerPort: ${ZOO_SERVER_PORT} 144 | name: server 145 | - containerPort: ${ZOO_ELECTION_PORT} 146 | name: election 147 | env: 148 | - name : SETUP_DEBUG 149 | value: "true" 150 | - name : ZOO_REPLICAS 151 | value: ${ZOO_REPLICAS} 152 | - name : ZK_HEAP_SIZE 153 | value: ${ZOO_HEAP_SIZE} 154 | - name : ZK_tickTime 155 | value: ${ZOO_TICK_TIME} 156 | - name : ZK_initLimit 157 | value: ${ZOO_INIT_LIMIT} 158 | - name : ZK_syncLimit 159 | value: ${ZOO_SYNC_LIMIT} 160 | - name : ZK_maxClientCnxns 161 | value: ${ZOO_MAX_CLIENT_CNXNS} 162 | - name : ZK_autopurge_snapRetainCount 163 | value: ${ZOO_SNAP_RETAIN_COUNT} 164 | - name : ZK_autopurge_purgeInterval 165 | value: ${ZOO_PURGE_INTERVAL} 166 | - name : ZK_clientPort 167 | value: ${ZOO_CLIENT_PORT} 168 | - name : ZOO_SERVER_PORT 169 | value: ${ZOO_SERVER_PORT} 170 | - name : ZOO_ELECTION_PORT 171 | value: ${ZOO_ELECTION_PORT} 172 | - name : JAVA_ZK_JVMFLAGS 173 | value: "\"${ZOO_MEMORY}\"" 174 | readinessProbe: 175 | exec: 176 | command: 177 | - zkServer.sh 178 | - status 179 | initialDelaySeconds: 15 180 | timeoutSeconds: 5 181 | livenessProbe: 182 | exec: 183 | command: 184 | - zkServer.sh 185 | - status 186 | initialDelaySeconds: 15 187 | timeoutSeconds: 5 188 | securityContext: 189 | runAsUser: 1001 190 | fsGroup: 1001 -------------------------------------------------------------------------------- /openshift/README.md: -------------------------------------------------------------------------------- 1 | # Zookeeper cluster 2 | 3 | Zookeeper cluster deployment. 4 | 5 | The resources found here are templates for Openshift catalog. 6 | 7 | It isn't necessary to clone this repo, you can use directly resource URLs. 8 | 9 | ## Requirements 10 | 11 | - [oc](https://github.com/openshift/origin/releases) (v3.11) 12 | - [minishift](https://github.com/minishift/minishift) (v1.33.0) 13 | 14 | ### DEV environment 15 | 16 | We'll use only opensource, that is 'openshift origin'. 17 | 18 | [Minishift](https://github.com/minishift/minishift) is the simplest way to get a local Openshift installation on our workstation. 19 | After install the command client check everything is alright to continue: 20 | 21 | ```bash 22 | $ minishift version 23 | minishift v1.33.0+ba29431 24 | $ minishift start [options] 25 | ... 26 | $ minishift openshift version 27 | openshift v3.11.0+57f8760-31 28 | ``` 29 | >NOTE: minishift has configured the oc client correctly to connect to local Openshift cluster properly. 30 | 31 | With `oc` command client is possible get up a cluster as well, take a look at: https://github.com/openshift/origin/blob/master/docs/cluster_up_down.md 32 | 33 | Check that our cluster is ready: 34 | 35 | ```bash 36 | $ oc version 37 | oc v3.11.0+0cbc58b 38 | kubernetes v1.17.0+d4cacc0 39 | features: Basic-Auth 40 | 41 | Server https://192.168.2.32:8443 42 | kubernetes v1.17.0+d4cacc0 43 | ``` 44 | 45 | You may use the Openshift dashboard (`minishift console`) if you prefer to do those steps through the web interface, 46 | in other case use `oc` command client: 47 | 48 | ```bash 49 | oc login -u system:admin 50 | ``` 51 | 52 | ### PROD environment 53 | 54 | To connect to external cluster we need to know the URL to login with your credentials. 55 | 56 | For production environments we'll use zookeeper deployments with persistence (zk-persistent.yaml). 57 | 58 | We recommend you to use **zk-persistent.yaml**. 59 | This means that although pods are destroyed all data are safe under persistent volumes, and when pods are recreated the volumes will be attached again. 60 | 61 | The statefulset object has an "antiaffinity" pod scheduler policy so pods will be allocated on separated nodes (uncomment those lines at `zk-persistent.yaml` file to activate it). 62 | It's required the same number of nodes that the value of parameter `ZOO_REPLICAS`. 63 | 64 | ## Building the image 65 | 66 | This is a recommended step, although you can always use the [public images at dockerhub](https://hub.docker.com/r/engapa/zookeeper) which are automatically uploaded with CI of this project. 67 | 68 | To build local docker images of zookeeper in your private Openshift registry just follow these instructions: 69 | 70 | 1 - Create an image builder and build the container image locally 71 | 72 | ```bash 73 | $ oc create -f buildconfig.yaml 74 | $ oc new-app zk-builder -p GITHUB_REF="v3.7.0" -p IMAGE_STREAM_VERSION="v3.7.0" 75 | ``` 76 | 77 | If you want to get an image from another git commit: 78 | 79 | ```bash 80 | $ oc start-build zk-builder --commit=master 81 | ``` 82 | 83 | Or build a local docker image from source directly: 84 | ```bash 85 | $ ./main build_local_image 86 | ``` 87 | 88 | **NOTE**: If you want to use this local/private image from containers on other projects then use the "\/NAME" value as `SOURCE_IMAGE` parameter value, and use one value of "TAGS" as `ZOO_VERSION` parameter value (e.g: test/zookeeper:3.7.0). 89 | 90 | ## Deploying zookeeper cluster 91 | 92 | Just type next command to create a zookeeper cluster by using a statefulset on Openshift: 93 | 94 | ```bash 95 | $ oc create -f zk[-persistent].yaml 96 | $ oc new-app zk -p ZOO_REPLICAS=1 -p SOURCE_IMAGE="172.30.1.1:5000/myproject/zookeeper" -p ZOO_VERSION="3.7.0" 97 | ``` 98 | > NOTE: select zk.yaml or zk-persistence.yaml, and set parameter values 99 | 100 | For example, if you deployed a persistent zookeeper with ZOO_REPLICAS=1: 101 | 102 | ```bash 103 | $ oc get all,pvc,pv -l component=zk 104 | NAME READY STATUS RESTARTS AGE 105 | pod/zk-persistent-0 1/1 Running 0 53s 106 | 107 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 108 | service/zk-persistent ClusterIP None 2181/TCP,2888/TCP,3888/TCP 53s 109 | 110 | NAME DESIRED CURRENT AGE 111 | statefulset.apps/zk-persistent 1 1 53s 112 | 113 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 114 | persistentvolumeclaim/datadir-zk-persistent-0 Bound zk-persistent-datalog-disk-1 1Gi RWO 53s 115 | persistentvolumeclaim/datalogdir-zk-persistent-0 Bound zk-persistent-data-disk-1 1Gi RWO 53s 116 | 117 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE 118 | persistentvolume/zk-persistent-data-disk-1 1Gi RWO Retain Bound myproject/datalogdir-zk-persistent-0 54s 119 | persistentvolume/zk-persistent-datalog-disk-1 1Gi RWO Retain Bound myproject/datadir-zk-persistent-0 53s 120 | ``` 121 | 122 | You may use the `main.sh` script on this directory: 123 | ```bash 124 | $ ./main test 125 | ``` 126 | or 127 | ```bash 128 | $ ./main test-persistent 129 | ``` 130 | > NOTE: Where is the number or replicas you want, by default 1. 131 | 132 | ## Cleaning up 133 | 134 | To remove all resources related to the zookeeper cluster deployment launch this command: 135 | 136 | ```bash 137 | $ oc delete all -l component=zk [-n |--all-namespaces] 138 | ``` 139 | 140 | And finally, you want to remove the template as well: 141 | 142 | ```bash 143 | $ oc delete template zk-builder [-n |--all-namespaces] 144 | $ oc delete template zk[-persistent] [-n |--all-namespaces] 145 | ``` 146 | 147 | You may use the `main.sh` script on this directory: 148 | ```bash 149 | $ ./main clean-resources 150 | ``` -------------------------------------------------------------------------------- /openshift/zk-persistent.yaml: -------------------------------------------------------------------------------- 1 | kind: Template 2 | apiVersion: v1 3 | metadata: 4 | name: zk-persistent 5 | annotations: 6 | openshift.io/display-name: Zookeeper (Persistent) 7 | description: Create a replicated Zookeeper server with persistent storage 8 | iconClass: icon-database 9 | tags: database,zookeeper 10 | labels: 11 | template: zk-persistent 12 | component: zk 13 | parameters: 14 | - name: NAME 15 | value: zk-persistent 16 | required: true 17 | - name: SOURCE_IMAGE 18 | description: Container image 19 | value: zookeeper 20 | required: true 21 | - name: ZOO_VERSION 22 | description: Version 23 | value: "3.7.0" 24 | required: true 25 | - name: ZOO_REPLICAS 26 | description: Number of nodes 27 | value: "3" 28 | required: true 29 | - name: VOLUME_DATA_CAPACITY 30 | description: Persistent volume capacity for zookeeper dataDir directory (e.g. 512Mi, 2Gi) 31 | value: 1Gi 32 | required: true 33 | - name: VOLUME_DATALOG_CAPACITY 34 | description: Persistent volume capacity for zookeeper dataLogDir directory (e.g. 512Mi, 2Gi) 35 | value: 1Gi 36 | required: true 37 | - name: ZOO_TICK_TIME 38 | description: The number of milliseconds of each tick 39 | value: "2000" 40 | required: true 41 | - name: ZOO_INIT_LIMIT 42 | description: The number of ticks that the initial synchronization phase can take 43 | value: "5" 44 | required: true 45 | - name: ZOO_SYNC_LIMIT 46 | description: The number of ticks that can pass between sending a request and getting an acknowledgement 47 | value: "2" 48 | required: true 49 | - name: ZOO_CLIENT_PORT 50 | description: The port at which the clients will connect 51 | value: "2181" 52 | required: true 53 | - name: ZOO_SERVER_PORT 54 | description: Server port 55 | value: "2888" 56 | required: true 57 | - name: ZOO_ELECTION_PORT 58 | description: Election port 59 | value: "3888" 60 | required: true 61 | - name: ZOO_MAX_CLIENT_CNXNS 62 | description: The maximum number of client connections 63 | value: "60" 64 | required: true 65 | - name: ZOO_SNAP_RETAIN_COUNT 66 | description: The number of snapshots to retain in dataDir 67 | value: "3" 68 | required: true 69 | - name: ZOO_PURGE_INTERVAL 70 | description: Purge task interval in hours. Set to 0 to disable auto purge feature 71 | value: "1" 72 | required: true 73 | - name: ZOO_MEMORY 74 | description: JVM heap size 75 | value: "-Xmx512M -Xms512M" 76 | required: true 77 | - name: RESOURCE_MEMORY_REQ 78 | description: The memory resource request. 79 | value: "512M" 80 | required: true 81 | - name: RESOURCE_MEMORY_LIMIT 82 | description: The limits for memory resource. 83 | value: "512M" 84 | required: true 85 | - name: RESOURCE_CPU_REQ 86 | description: The CPU resource request. 87 | value: "300m" 88 | required: true 89 | - name: RESOURCE_CPU_LIMIT 90 | description: The limits for CPU resource. 91 | value: "300m" 92 | required: true 93 | 94 | objects: 95 | - apiVersion: v1 96 | kind: Service 97 | metadata: 98 | name: ${NAME} 99 | labels: 100 | zk-name: ${NAME} 101 | component: zk 102 | annotations: 103 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 104 | spec: 105 | ports: 106 | - port: ${ZOO_CLIENT_PORT} 107 | name: client 108 | - port: ${ZOO_SERVER_PORT} 109 | name: server 110 | - port: ${ZOO_ELECTION_PORT} 111 | name: election 112 | clusterIP: None 113 | selector: 114 | zk-name: ${NAME} 115 | - apiVersion: apps/v1 116 | kind: StatefulSet 117 | metadata: 118 | name: ${NAME} 119 | labels: 120 | zk-name: ${NAME} 121 | component: zk 122 | spec: 123 | podManagementPolicy: "Parallel" 124 | serviceName: ${NAME} 125 | selector: 126 | matchLabels: 127 | zk-name: ${NAME} 128 | component: zk 129 | replicas: ${ZOO_REPLICAS} 130 | template: 131 | metadata: 132 | labels: 133 | zk-name: ${NAME} 134 | template: zk-persistent 135 | component: zk 136 | # annotations: 137 | ## Use this annotation if you want allocate each pod on different node 138 | ## Note the number of nodes must be upper than REPLICAS parameter. 139 | # scheduler.alpha.kubernetes.io/affinity: > 140 | # { 141 | # "podAntiAffinity": { 142 | # "requiredDuringSchedulingIgnoredDuringExecution": [{ 143 | # "labelSelector": { 144 | # "matchExpressions": [{ 145 | # "key": "zk-name", 146 | # "operator": "In", 147 | # "values": ["zk"] 148 | # }] 149 | # }, 150 | # "topologyKey": "kubernetes.io/hostname" 151 | # }] 152 | # } 153 | # } 154 | spec: 155 | containers: 156 | - name: ${NAME} 157 | imagePullPolicy: IfNotPresent 158 | image: ${SOURCE_IMAGE}:${ZOO_VERSION} 159 | resources: 160 | requests: 161 | memory: ${RESOURCE_MEMORY_REQ} 162 | cpu: ${RESOURCE_CPU_REQ} 163 | limits: 164 | memory: ${RESOURCE_MEMORY_LIMIT} 165 | cpu: ${RESOURCE_CPU_LIMIT} 166 | ports: 167 | - containerPort: ${ZOO_CLIENT_PORT} 168 | name: client 169 | - containerPort: ${ZOO_SERVER_PORT} 170 | name: server 171 | - containerPort: ${ZOO_ELECTION_PORT} 172 | name: election 173 | env: 174 | - name : SETUP_DEBUG 175 | value: "true" 176 | - name : ZOO_REPLICAS 177 | value: ${ZOO_REPLICAS} 178 | - name : ZK_HEAP_SIZE 179 | value: ${ZOO_HEAP_SIZE} 180 | - name : ZK_tickTime 181 | value: ${ZOO_TICK_TIME} 182 | - name : ZK_initLimit 183 | value: ${ZOO_INIT_LIMIT} 184 | - name : ZK_syncLimit 185 | value: ${ZOO_SYNC_LIMIT} 186 | - name : ZK_maxClientCnxns 187 | value: ${ZOO_MAX_CLIENT_CNXNS} 188 | - name : ZK_autopurge_snapRetainCount 189 | value: ${ZOO_SNAP_RETAIN_COUNT} 190 | - name : ZK_autopurge_purgeInterval 191 | value: ${ZOO_PURGE_INTERVAL} 192 | - name : ZK_clientPort 193 | value: ${ZOO_CLIENT_PORT} 194 | - name : ZOO_SERVER_PORT 195 | value: ${ZOO_SERVER_PORT} 196 | - name : ZOO_ELECTION_PORT 197 | value: ${ZOO_ELECTION_PORT} 198 | - name : JAVA_ZK_JVMFLAGS 199 | value: "\"${ZOO_MEMORY}\"" 200 | readinessProbe: 201 | exec: 202 | command: 203 | - zkServer.sh 204 | - status 205 | initialDelaySeconds: 20 206 | timeoutSeconds: 10 207 | livenessProbe: 208 | exec: 209 | command: 210 | - zkServer.sh 211 | - status 212 | initialDelaySeconds: 20 213 | timeoutSeconds: 10 214 | securityContext: 215 | runAsUser: 1001 216 | fsGroup: 1001 217 | volumeMounts: 218 | - name: datadir 219 | mountPath: /opt/zookeeper/data 220 | - name: datalogdir 221 | mountPath: /opt/zookeeper/data-log 222 | volumeClaimTemplates: 223 | - metadata: 224 | name: datadir 225 | spec: 226 | accessModes: [ "ReadWriteOnce" ] 227 | resources: 228 | requests: 229 | storage: ${VOLUME_DATA_CAPACITY} 230 | selector: 231 | component: zk 232 | contents: data 233 | - metadata: 234 | name: datalogdir 235 | spec: 236 | accessModes: [ "ReadWriteOnce" ] 237 | resources: 238 | requests: 239 | storage: ${VOLUME_DATALOG_CAPACITY} 240 | selector: 241 | component: zk 242 | contents: datalog -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------