├── .gitignore ├── README.md ├── hadoop ├── Makefile ├── README.md ├── conf │ ├── core-site.xml │ ├── hdfs-site.xml │ ├── id_rsa │ ├── id_rsa.pub │ ├── masters │ ├── slaves │ └── yarn-site.xml ├── hadoop-base │ ├── Dockerfile │ └── Makefile ├── hadoop-datanode │ ├── Dockerfile │ ├── LICENSE │ ├── Makefile │ ├── README.md │ └── docker-entrypoint.sh ├── hadoop-journalnode │ ├── Dockerfile │ ├── Makefile │ └── docker-entrypoint.sh ├── hadoop-namenode │ ├── Dockerfile │ ├── LICENSE │ ├── Makefile │ ├── README.md │ └── docker-entrypoint.sh ├── yaml │ ├── datanode.yaml │ ├── journalnode.yaml │ ├── namenode0.yaml │ └── namenode1.yaml └── yarn │ ├── Dockerfile │ ├── Makefile │ └── docker-entrypoint.sh ├── hbase-image ├── .gitignore ├── Dockerfile ├── Makefile ├── hbase-site.xml └── start-k8s-hbase.sh ├── hmaster.yaml ├── opentsdb-pod.yaml ├── opentsdb ├── .gitignore ├── Dockerfile ├── README.md ├── core-site.xml ├── create_table.sh ├── hbase-service.sh ├── hbase-site.xml ├── hdfs-site.xml ├── opentsdb-service.sh ├── opentsdb.yaml ├── query.sh └── tmp.sh ├── region.yaml └── zookeeper ├── Dockerfile ├── Makefile ├── README.md ├── config-and-run.sh ├── petset.yaml ├── zoo.cfg └── zookeeper.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | hbase-1.2.3-bin.tar.gz 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hbase-kubernetes 2 | 3 | Distributed hbase based on hdfs above kubernetes with 2 masters and 2 regionservers. 4 | 5 | - hadoop: hadoop cluster running on kubernetes with high avaiability: 2 namenodes/3 journalnodes/2 datanodes 6 | - zookeeper: a 3-node zookeeper cluster running on kubernetes. 7 | - opentsdb: run on hbase 8 | 9 | ## How to run 10 | 11 | Download hbase from `https://archive.apache.org/dist/hbase/1.2.3/hbase-1.2.3-bin.tar.gz` and copy it into hbase-image 12 | ``` 13 | # cd hbase-image 14 | # IMAGE_BASE_URL=cargo.caicloudprivatetest.com/caicloud IMAGE_TAG=lastest make 15 | # cd .. 16 | # kubectl create -f hmaster.yaml 17 | # kubectl create -f region.yaml 18 | ``` 19 | 20 | Feel free to file a issue -------------------------------------------------------------------------------- /hadoop/Makefile: -------------------------------------------------------------------------------- 1 | all: hadoop-base hadoop-datanode hadoop-journalnode hadoop-namenode 2 | 3 | .PHONY: hadoop-base hadoop-datanode hadoop-journalnode hadoop-namenode 4 | 5 | hadoop-base: 6 | $(MAKE) -C hadoop-base 7 | 8 | hadoop-datanode: 9 | $(MAKE) -C hadoop-datanode 10 | 11 | hadoop-journalnode: 12 | $(MAKE) -C hadoop-journalnode 13 | 14 | hadoop-namenode: 15 | $(MAKE) -C hadoop-namenode -------------------------------------------------------------------------------- /hadoop/README.md: -------------------------------------------------------------------------------- 1 | ## k8s-hadoop ## 2 | 3 | - hadoop-base: hadoop base image 4 | - hadoop-namenode: nn based on hadoop-base 5 | - hadoop-datanode: dn based on hadoop-base 6 | - hadoop-journalnode: jn based on hadoop-base 7 | - yarn: why shoud we use it? 8 | - yaml:yamls for creating k8s-hadoop cluster 9 | 10 | ## Building Image 11 | 12 | ``` 13 | IMAGE_BASE_URL=your_base_url IMAGE_TAG=latest make 14 | ``` 15 | 16 | ## Run hadoop 17 | ``` 18 | kubectl create -f ../zookeeper/zookeeper.yaml 19 | kubectl create -f yaml/journalnode.yaml 20 | # Wait until journal node is ready 21 | kubectl create -f yaml/namenode0.yaml 22 | kubectl create -f yaml/namenode1.yaml 23 | # Wait until namenodes are ready 24 | kubectl create -f yaml/datanode.yaml 25 | ``` 26 | -------------------------------------------------------------------------------- /hadoop/conf/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | fs.defaultFS 7 | hdfs://mycluster 8 | 9 | 10 | io.compression.codecs 11 | 12 | org.apache.hadoop.io.compress.GzipCodec, 13 | org.apache.hadoop.io.compress.DefaultCodec, 14 | com.hadoop.compression.lzo.LzoCodec, 15 | com.hadoop.compression.lzo.LzopCodec, 16 | org.apache.hadoop.io.compress.BZip2Codec 17 | 18 | 19 | 20 | io.compression.codec.lzo.class 21 | com.hadoop.compression.lzo.LzoCodec 22 | 23 | 24 | ha.zookeeper.quorum 25 | zookeeper-3:2181,zookeeper-1:2181,zookeeper-2:2181 26 | 27 | 28 | ipc.server.tcpnodelay 29 | true 30 | 31 | 32 | ipc.client.tcpnodelay 33 | true 34 | 35 | 36 | -------------------------------------------------------------------------------- /hadoop/conf/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | dfs.nameservices 6 | mycluster 7 | 8 | 9 | dfs.ha.namenodes.mycluster 10 | nn0,nn1 11 | 12 | 13 | dfs.namenode.rpc-address.mycluster.nn0 14 | hadoop-namenode-0:8020 15 | 16 | 17 | dfs.namenode.rpc-address.mycluster.nn1 18 | hadoop-namenode-1:8020 19 | 20 | 21 | dfs.namenode.http-address.mycluster.nn0 22 | hadoop-namenode-0:50070 23 | 24 | 25 | dfs.namenode.http-address.mycluster.nn1 26 | hadoop-namenode-1:50070 27 | 28 | 29 | dfs.namenode.shared.edits.dir 30 | qjournal://hadoop-journalnode-0:8485;hadoop-journalnode-1:8485;hadoop-journalnode-2:8485/mycluster 31 | 32 | 33 | dfs.client.failover.proxy.provider.mycluster 34 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider 35 | 36 | 37 | dfs.journalnode.edits.dir 38 | /home/hadoop/journaldata 39 | 40 | 41 | dfs.ha.automatic-failover.enabled 42 | true 43 | 44 | 45 | dfs.ha.fencing.methods 46 | shell(/bin/true) 47 | 48 | 49 | dfs.replication 50 | 2 51 | 52 | 53 | dfs.permissions 54 | false 55 | 56 | 57 | dfs.name.dir 58 | file:///var/hdfs/name 59 | 60 | 61 | dfs.namenode.datanode.registration.ip-hostname-check 62 | false 63 | 64 | 65 | dfs.namenode.avoid.read.stale.datanode 66 | true 67 | 68 | 69 | dfs.namenode.avoid.write.stale.datanode 70 | true 71 | 72 | 73 | dfs.datanode.max.xcievers 74 | 8192 75 | 76 | 77 | dfs.datanode.handler.count 78 | 32 79 | 80 | 81 | -------------------------------------------------------------------------------- /hadoop/conf/id_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEAstO1WivsgRQpIarPMEDc38lVzqcfE2+cVcirDZIENDkBehDN 3 | g4E62b0893GNeNchqurP2oAEuhxqdiUt42s/4mhCyctxDU2DEGUJe4ckdGBehJfa 4 | SIz0FPb6n8guzf3ur9c6ymaOI9uhvQRcbHI3CH/54ddtFFRjBFVfnq6bVl4sWaDf 5 | Go1DdxW+x5lhLfy82rV1WbEQjWEdIKbJh8nFONEAAEQ5wCj4ifnjHEU8s825osIz 6 | 9IqReZ9zd8mOrowVzwEyFWiLGj6CPSi/JdHBwT88RCObF2Bed+dd/ROmIeHtz6B4 7 | EcAzs68rw8wKW3sitd7AUcy8bh9kSFNGXZ2PgwIDAQABAoIBAFIEe4bR4nUJNBw2 8 | 5TPya9tP0kUnrT8Sz5cgpCNjNv5kruK3VircWIuJGa87q3Ei6lQjwhUq5gAeDE4H 9 | m1ZU2EYVH5zB7CX3yQb58DFxuCyQDomtw/XGgCQlaqlh/OWJc7G+ez5dbw/PKg0x 10 | NNqgP9FsMMV+Mo9CbO+N4H89Ag3H+kp404VcIH1levmXmtXoamKOq9BoNRR1W49M 11 | Rbapo93pzJUusdYLX1HvDbdU0qYX84ppw2t+cX4dVa/CmPU7s91nLM+scP+q0R16 12 | 7OiQ4zVPECSJNZ4s5RwyjsqLkSxSElr6sX02vMie+Jk8hgioFTXFMFLqS3JtQ6O/ 13 | upaAvCECgYEA68INJJ2kspkPzdE0qoFL0t+5b/RJqnmscBdDbavB1aVP6Bns+S9Q 14 | 2gI12LcbFbkZX3I+gOspgcw6L2VXwlsWm85oOiCyRRqz6OKyYy+1W7hxEQ9y5ScS 15 | 0/UO02ZPpTjW7qWTMMzWFYmMf6RDbC1VztYRq4S9Ds1uOxsNdUYNNUkCgYEAwi5Q 16 | 7Basy7kPbQbtoIjtNf5h2HgOP6BMaYseVFtQBKI5aUL1my3RG1V6oNEaU/3eH0RH 17 | jf6db/s3NVh3wPuk9Q/IY3/rBWwiDmx3do4sPesURGtOUpGxczMdILcB6BN6/N9v 18 | Z99f3z9YMvGEF9eYrmSKph1rz4TI42Xl8rKX+msCgYEApW4mUNI5toUG/PJXFWni 19 | 7dbFRZTSeEgmwwTf8L38JtF4T7KZgYU9I603YZmi7MxUzI/a4hWhq43GKmnfeeb2 20 | 51+f7WHyFeGV6s3gRQ8+IF09Ia6Ifdm8MXUc8SQysQpAb3dJ4I1rj/NT3mFCWk85 21 | jgjSn1tmWlO2jMz/1ZHRRNkCgYEAmCGTUrQdIurU9BU2fMxqFCA9ZkwA1cZvt0zA 22 | MVTs5wR1uEs2gyItLp4UmgPY/a7qpFJpQQ0XKZGG2mt6Px6oSTrZQ2MHrVLfh69h 23 | FOgkL0qKsWPkk/oDLX8BNg1LGBiTBaTD8nxl71EAd8ESPV+zBgZNPgHU4ltkzAuc 24 | UT2spgsCgYAEoZvZ6xhpDiJb6gzkS5VVgKWrsboHVT6F3aa2AEjzbsqF43EfHcVE 25 | 7YTA8K9VnwI0Goq4wXLJj2BhNvPabew+CAgek3x4vLMsQqdPakf0rU3PPgyCc91h 26 | /vEBZGYlyVBJLJbSdbBx+Dmz3Y2BYbhLkiJVGxowWSKI2GZbVy9P7w== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /hadoop/conf/id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy07VaK+yBFCkhqs8wQNzfyVXOpx8Tb5xVyKsNkgQ0OQF6EM2DgTrZvTz3cY141yGq6s/agAS6HGp2JS3jaz/iaELJy3ENTYMQZQl7hyR0YF6El9pIjPQU9vqfyC7N/e6v1zrKZo4j26G9BFxscjcIf/nh120UVGMEVV+erptWXixZoN8ajUN3Fb7HmWEt/LzatXVZsRCNYR0gpsmHycU40QAARDnAKPiJ+eMcRTyzzbmiwjP0ipF5n3N3yY6ujBXPATIVaIsaPoI9KL8l0cHBPzxEI5sXYF535139E6Yh4e3PoHgRwDOzryvDzApbeyK13sBRzLxuH2RIU0ZdnY+D root@01f83bc90985 2 | -------------------------------------------------------------------------------- /hadoop/conf/masters: -------------------------------------------------------------------------------- 1 | hadoop-namenode 2 | -------------------------------------------------------------------------------- /hadoop/conf/slaves: -------------------------------------------------------------------------------- 1 | hadoop-dn01 2 | hadoop-dn02 3 | -------------------------------------------------------------------------------- /hadoop/conf/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | yarn.resourcemanager.cluster-id 7 | yarncluster 8 | 9 | 10 | yarn.resourcemanager.zk-address 11 | zookeeper-0:2181,zookeeper-1:2181,zookeeper-2:2181 12 | 13 | 14 | yarn.resourcemanager.ha.enabled 15 | true 16 | 17 | 18 | yarn.resourcemanager.ha.rm-ids 19 | rm0,rm1 20 | 21 | 22 | yarn.resourcemanager.hostname.rm0 23 | yarn-resourcemanager-0 24 | 25 | 26 | yarn.resourcemanager.hostname.rm1 27 | yarn-resourcemanager-1 28 | 29 | 30 | -------------------------------------------------------------------------------- /hadoop/hadoop-base/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM java:7 2 | 3 | ENV DEBIAN_FRONTEND noninteractive 4 | 5 | ENV HADOOP_VERSION 2.7.1 6 | ENV HADOOP_INSTALL_DIR /opt/hadoop 7 | 8 | # init base os 9 | RUN sed -i "s/httpredir.debian.org/mirrors.163.com/g" /etc/apt/sources.list 10 | RUN apt-get update && \ 11 | apt-get install -y --no-install-recommends curl tar ssh && \ 12 | apt-get clean autoclean && \ 13 | apt-get autoremove --yes && \ 14 | rm -rf /var/lib/{apt,dpkg,cache,log}/ 15 | 16 | # download hadoop 17 | RUN mkdir -p ${HADOOP_INSTALL_DIR} && \ 18 | curl -L http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz | tar -xz --strip=1 -C ${HADOOP_INSTALL_DIR} 19 | 20 | # build LZO 21 | WORKDIR /tmp 22 | RUN apt-get update && \ 23 | apt-get install -y build-essential maven lzop liblzo2-2 && \ 24 | wget http://www.oberhumer.com/opensource/lzo/download/lzo-2.09.tar.gz && \ 25 | tar zxvf lzo-2.09.tar.gz && \ 26 | cd lzo-2.09 && \ 27 | ./configure --enable-shared --prefix /usr/local/lzo-2.09 && \ 28 | make && make install && \ 29 | cd .. && git clone https://github.com/twitter/hadoop-lzo.git && cd hadoop-lzo && \ 30 | git checkout release-0.4.20 && \ 31 | C_INCLUDE_PATH=/usr/local/lzo-2.09/include LIBRARY_PATH=/usr/local/lzo-2.09/lib mvn clean package && \ 32 | apt-get remove -y build-essential maven && \ 33 | apt-get clean autoclean && \ 34 | apt-get autoremove --yes && \ 35 | rm -rf /var/lib/{apt,dpkg,cache.log}/ && \ 36 | cd target/native/Linux-amd64-64 && \ 37 | tar -cBf - -C lib . | tar -xBvf - -C /tmp && \ 38 | cp /tmp/libgplcompression* ${HADOOP_INSTALL_DIR}/lib/native/ && \ 39 | cd /tmp/hadoop-lzo && cp target/hadoop-lzo-0.4.20.jar ${HADOOP_INSTALL_DIR}/share/hadoop/common/ && \ 40 | echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lzo-2.09/lib" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 41 | rm -rf /tmp/lzo-2.09* hadoop-lzo lib libgplcompression* 42 | 43 | # Enable jmx by default 44 | WORKDIR ${HADOOP_INSTALL_DIR} 45 | 46 | RUN echo "# Extra Java runtime options. Empty by default." >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 47 | echo "export HADOOP_OPTS=\"-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false\"" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 48 | echo "# Command specific options appended to HADOOP_OPTS when specified" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 49 | echo "export HADOOP_NAMENODE_OPTS=\"-Dcom.sun.management.jmxremote \$HADOOP_NAMENODE_OPTS -Dcom.sun.management.jmxremote.port=8004\"" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 50 | echo "export HADOOP_SECONDARYNAMENODE_OPTS=\"-Dcom.sun.management.jmxremote \$HADOOP_SECONDARYNAMENODE_OPTS -Dcom.sun.management.jmxremote.port=8005\"" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 51 | echo "export HADOOP_DATANODE_OPTS=\"-Dcom.sun.management.jmxremote \$HADOOP_DATANODE_OPTS -Dcom.sun.management.jmxremote.port=8006\"" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 52 | echo "export HADOOP_BALANCER_OPTS=\"-Dcom.sun.management.jmxremote \$HADOOP_BALANCER_OPTS -Dcom.sun.management.jmxremote.port=8007\"" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 53 | echo "export HADOOP_JOBTRACKER_OPTS=\"-Dcom.sun.management.jmxremote \$HADOOP_JOBTRACKER_OPTS -Dcom.sun.management.jmxremote.port=8008\"" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh && \ 54 | echo "export HADOOP_TASKTRACKER_OPTS=\"-Dcom.sun.management.jmxremote.port=8009\"" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh 55 | -------------------------------------------------------------------------------- /hadoop/hadoop-base/Makefile: -------------------------------------------------------------------------------- 1 | all: push 2 | 3 | IMAGE_NAME=${IMAGE_BASE_URL}/hadoop-base 4 | 5 | build: 6 | docker build -t $(IMAGE_NAME):$(IMAGE_TAG) . 7 | 8 | push: build 9 | docker push $(IMAGE_NAME):$(IMAGE_TAG) 10 | -------------------------------------------------------------------------------- /hadoop/hadoop-datanode/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM $BASE_IMAGE 2 | 3 | ENV HDFS_DATANODE_ROOT_DIR=/var/hdfs/datanode 4 | ENV HDFS_NAMENODE_RPC_HOST=0.0.0.0 5 | ENV HDFS_NAMENODE_RPC_PORT=8020 6 | ENV JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 7 | 8 | ADD conf/core-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.xml 9 | ADD conf/hdfs-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.xml 10 | ADD conf/yarn-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/yarn-site.xml 11 | COPY conf/id_rsa.pub /root/.ssh/authorized_keys 12 | 13 | ADD docker-entrypoint.sh /usr/local/sbin/docker-entrypoint.sh 14 | 15 | RUN \ 16 | mkdir /var/run/sshd && \ 17 | chmod 0755 /var/run/sshd && \ 18 | service ssh start 19 | 20 | VOLUME ["${HDFS_DATANODE_ROOT_DIR}"] 21 | 22 | 23 | # TCP 50010 dfs.datanode.address port for data transfer 24 | # TCP 50020 dfs.datanode.ipc.address ipc server 25 | # TCP 50075 dfs.datanode.http.address http server 26 | # TCP 50475 dfs.datanode.https.address https server 27 | 28 | EXPOSE 50010 50020 50075 50475 29 | 30 | RUN chmod a+x /usr/local/sbin/docker-entrypoint.sh 31 | 32 | ENTRYPOINT ["/usr/local/sbin/docker-entrypoint.sh"] 33 | CMD ["bin/hdfs", "datanode"] 34 | -------------------------------------------------------------------------------- /hadoop/hadoop-datanode/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /hadoop/hadoop-datanode/Makefile: -------------------------------------------------------------------------------- 1 | all: push clean 2 | 3 | IMAGE_NAME=${IMAGE_BASE_URL}/hadoop-datanode 4 | BASE_IMAGE=$(IMAGE_BASE_URL)/hadoop-base:${IMAGE_TAG} 5 | addconf: 6 | cp -rf ../conf . 7 | 8 | replace: 9 | sed -i '1 s|$$BASE_IMAGE|${BASE_IMAGE}|g' Dockerfile 10 | 11 | build: addconf replace 12 | docker build -t $(IMAGE_NAME):$(IMAGE_TAG) . 13 | 14 | push: build 15 | docker push $(IMAGE_NAME):$(IMAGE_TAG) 16 | 17 | clean: addconf 18 | sed -i '1 s|.*|FROM $$BASE_IMAGE|g' Dockerfile 19 | rm -rf conf 20 | -------------------------------------------------------------------------------- /hadoop/hadoop-datanode/README.md: -------------------------------------------------------------------------------- 1 | # hdfs-datanode 2 | 3 | hdfs datanode in docker 4 | 5 | ## How to use it 6 | 7 | ```bash 8 | docker run -e HDFS_NAMENODE_RPC_HOST=$NAMENODEHOST -v /data:/var/hdfs/datanode --net=host --name hdfs-dn -d dataman/hdfs-datanode:2.7.1 9 | ``` 10 | 11 | ## Exposed ports 12 | 13 | * TCP 50010 dfs.datanode.address port for data transfer 14 | * TCP 50020 dfs.datanode.ipc.address ipc server 15 | * TCP 50075 dfs.datanode.http.address http server 16 | * TCP 50475 dfs.datanode.https.address https server 17 | 18 | ## FAQ 19 | 20 | Q1: Tab **Overview** is giving me 3 living datanodes on namenode webpage, while tab **Datanodes** is showing 1 living datanode only.Why? 21 | 22 | A1. Plz make sure your datanode servers have the different **hostname** 23 | -------------------------------------------------------------------------------- /hadoop/hadoop-datanode/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | #sed "s/HDFS_NAMENODE_RPC_HOST/$HDFS_NAMENODE_RPC_HOST/" ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.xml > ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.new.xml 4 | #mv ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.new.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.xml 5 | 6 | cat ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.xml 7 | 8 | # set fqdn 9 | for i in $(seq 1 10) 10 | do 11 | if grep --quiet $CLUSTER_DOMAIN /etc/hosts; then 12 | break 13 | elif grep --quiet $POD_NAME /etc/hosts; then 14 | cat /etc/hosts | sed "s/$POD_NAME/${POD_NAME}.${POD_NAMESPACE}.svc.${CLUSTER_DOMAIN} $POD_NAME/g" > /etc/hosts.bak 15 | cat /etc/hosts.bak > /etc/hosts 16 | break 17 | else 18 | echo "waiting for /etc/hosts ready" 19 | sleep 1 20 | fi 21 | done 22 | 23 | exec "$@" 24 | -------------------------------------------------------------------------------- /hadoop/hadoop-journalnode/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM $BASE_IMAGE 2 | 3 | ADD conf/core-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.xml 4 | ADD conf/hdfs-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.xml 5 | ADD conf/yarn-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/yarn-site.xml 6 | COPY conf/id_rsa.pub /root/.ssh/authorized_keys 7 | 8 | ADD docker-entrypoint.sh /usr/local/sbin/docker-entrypoint.sh 9 | 10 | # Enable jmx for journalnode 11 | RUN echo "export HADOOP_OPTS=\"-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=8010 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false\"" >> ${HADOOP_INSTALL_DIR}/etc/hadoop/hadoop-env.sh 12 | 13 | EXPOSE 8485 14 | 15 | RUN chmod a+x /usr/local/sbin/docker-entrypoint.sh 16 | 17 | ENTRYPOINT ["/usr/local/sbin/docker-entrypoint.sh"] 18 | CMD ["bin/hdfs", "journalnode"] 19 | -------------------------------------------------------------------------------- /hadoop/hadoop-journalnode/Makefile: -------------------------------------------------------------------------------- 1 | all: push clean 2 | 3 | IMAGE_NAME=${IMAGE_BASE_URL}/hadoop-journalnode 4 | BASE_IMAGE=$(IMAGE_BASE_URL)/hadoop-base:${IMAGE_TAG} 5 | addconf: 6 | cp -rf ../conf . 7 | 8 | replace: 9 | sed -i '1 s|$$BASE_IMAGE|${BASE_IMAGE}|g' Dockerfile 10 | 11 | build: addconf replace 12 | docker build -t $(IMAGE_NAME):$(IMAGE_TAG) . 13 | 14 | push: build 15 | docker push $(IMAGE_NAME):$(IMAGE_TAG) 16 | 17 | clean: addconf 18 | sed -i '1 s|.*|FROM $$BASE_IMAGE|g' Dockerfile 19 | rm -rf conf 20 | -------------------------------------------------------------------------------- /hadoop/hadoop-journalnode/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #sed "s/HDFS_NAMENODE_RPC_HOST/$HDFS_NAMENODE_RPC_HOST/" ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.xml > ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.new.xml 4 | #mv ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.new.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.xml 5 | 6 | cat ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.xml 7 | 8 | # set fqdn 9 | for i in $(seq 1 10) 10 | do 11 | if grep --quiet $CLUSTER_DOMAIN /etc/hosts; then 12 | break 13 | elif grep --quiet $POD_NAME /etc/hosts; then 14 | cat /etc/hosts | sed "s/$POD_NAME/${POD_NAME}.${POD_NAMESPACE}.svc.${CLUSTER_DOMAIN} $POD_NAME/g" > /etc/hosts.bak 15 | cat /etc/hosts.bak > /etc/hosts 16 | break 17 | else 18 | echo "waiting for /etc/hosts ready" 19 | sleep 1 20 | fi 21 | done 22 | 23 | exec "$@" 24 | -------------------------------------------------------------------------------- /hadoop/hadoop-namenode/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM $BASE_IMAGE 2 | 3 | ENV HDFS_NAMENODE_ROOT_DIR=/var/hdfs/name 4 | ENV JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 5 | 6 | ADD conf/core-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.xml 7 | ADD conf/hdfs-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.xml 8 | ADD conf/yarn-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/yarn-site.xml 9 | COPY conf/id_rsa /root/.ssh/id_rsa 10 | COPY conf/id_rsa.pub /root/.ssh/id_rsa.pub 11 | 12 | ADD docker-entrypoint.sh /usr/local/sbin/docker-entrypoint.sh 13 | 14 | VOLUME ["${HDFS_NAMENODE_ROOT_DIR}"] 15 | 16 | RUN \ 17 | mkdir /var/run/sshd && \ 18 | chmod 0755 /var/run/sshd && \ 19 | service ssh start 20 | 21 | # TCP 8020 fs.defaultFS IPC: ClientProtocol 22 | # TCP 50070 dfs.namenode.http-address HTTP connector 23 | # TCP 50470 dfs.namenode.https-address HTTPS connector 24 | 25 | EXPOSE 22 8020 50070 50470 26 | 27 | RUN chmod a+x /usr/local/sbin/docker-entrypoint.sh 28 | 29 | ENTRYPOINT ["/usr/local/sbin/docker-entrypoint.sh"] 30 | #CMD ["/usr/sbin/sshd"] 31 | CMD ["bin/hdfs", "namenode"] 32 | #CMD ["./sbin/start-dfs.sh"] 33 | -------------------------------------------------------------------------------- /hadoop/hadoop-namenode/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /hadoop/hadoop-namenode/Makefile: -------------------------------------------------------------------------------- 1 | all: push clean 2 | 3 | IMAGE_NAME=${IMAGE_BASE_URL}/hadoop-namenode 4 | BASE_IMAGE=$(IMAGE_BASE_URL)/hadoop-base:${IMAGE_TAG} 5 | addconf: 6 | cp -rf ../conf . 7 | 8 | replace: 9 | sed -i '1 s|$$BASE_IMAGE|${BASE_IMAGE}|g' Dockerfile 10 | 11 | build: addconf replace 12 | docker build -t $(IMAGE_NAME):$(IMAGE_TAG) . 13 | 14 | push: build 15 | docker push $(IMAGE_NAME):$(IMAGE_TAG) 16 | 17 | clean: addconf 18 | sed -i '1 s|.*|FROM $$BASE_IMAGE|g' Dockerfile 19 | rm -rf conf 20 | -------------------------------------------------------------------------------- /hadoop/hadoop-namenode/README.md: -------------------------------------------------------------------------------- 1 | # hdfs-namenode 2 | 3 | hdfs namenode in docker 4 | 5 | ## How to use it 6 | 7 | ```bash 8 | docker run -v /data:/var/hdfs/namenode -d --name hdfs-nn -p 8020:8020 -p 50070:50070 dataman/hdfs-namenode:2.7.1 9 | ``` 10 | 11 | ## Exposed ports 12 | 13 | * TCP 8020 fs.defaultFS IPC: ClientProtocol 14 | * TCP 50070 dfs.namenode.http-address HTTP connector 15 | * TCP 50470 dfs.namenode.https-address HTTPS connector 16 | -------------------------------------------------------------------------------- /hadoop/hadoop-namenode/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # set fqdn 4 | for i in $(seq 1 10) 5 | do 6 | if grep --quiet $CLUSTER_DOMAIN /etc/hosts; then 7 | break 8 | elif grep --quiet $POD_NAME /etc/hosts; then 9 | cat /etc/hosts | sed "s/$POD_NAME/${POD_NAME}.${POD_NAMESPACE}.svc.${CLUSTER_DOMAIN} $POD_NAME/g" > /etc/hosts.bak 10 | cat /etc/hosts.bak > /etc/hosts 11 | break 12 | else 13 | echo "waiting for /etc/hosts ready" 14 | sleep 1 15 | fi 16 | done 17 | 18 | if [ ! -f ${HDFS_NAMENODE_ROOT_DIR}/current/VERSION ]; then 19 | echo Formatting namenode root fs in ${HDFS_NAMENODE_ROOT_DIR} 20 | bin/hdfs namenode -format -nonInteractive 21 | fi 22 | 23 | if [ "${HDFS_INIT_NAMENODE}" = "true" ]; then 24 | echo forcing initialize shared edits... 25 | bin/hdfs namenode -initializeSharedEdits -nonInteractive 26 | else 27 | echo booting standby... 28 | bin/hdfs namenode -bootstrapStandby -nonInteractive 29 | fi 30 | 31 | bin/hdfs zkfc -formatZK -nonInteractive 32 | sbin/hadoop-daemon.sh --script bin/hdfs start zkfc 33 | 34 | exec "$@" 35 | -------------------------------------------------------------------------------- /hadoop/yaml/datanode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hadoop-datanode-2 5 | spec: 6 | clusterIP: None 7 | selector: 8 | component: hadoop-datanode-2 9 | ports: 10 | - name: p1 11 | port: 50010 12 | - name: p2 13 | port: 50020 14 | - name: p3 15 | port: 50075 16 | - name: p4 17 | port: 50475 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: hadoop-datanode-1 23 | spec: 24 | clusterIP: None 25 | selector: 26 | component: hadoop-datanode-1 27 | ports: 28 | - name: p1 29 | port: 50010 30 | - name: p2 31 | port: 50020 32 | - name: p3 33 | port: 50075 34 | - name: p4 35 | port: 50475 36 | --- 37 | apiVersion: v1 38 | kind: Pod 39 | metadata: 40 | name: hadoop-datanode-1 41 | labels: 42 | component: hadoop-datanode-1 43 | spec: 44 | # nodeSelector: 45 | # kubernetes.io/hostname: kube-node-6 46 | containers: 47 | - name: hadoop-datanode-1 48 | image: cargo.caicloudprivatetest.com/caicloud/hadoop-datanode 49 | imagePullPolicy: Always 50 | volumeMounts: 51 | - name: hdfs-dn 52 | mountPath: /var/hdfs/data 53 | ports: 54 | - containerPort: 50010 55 | - containerPort: 50020 56 | - containerPort: 50075 57 | - containerPort: 50475 58 | env: 59 | - name: POD_NAMESPACE 60 | valueFrom: 61 | fieldRef: 62 | fieldPath: metadata.namespace 63 | - name: POD_NAME 64 | valueFrom: 65 | fieldRef: 66 | fieldPath: metadata.name 67 | volumes: 68 | - hostPath: 69 | path: /var/hdfs/data 70 | name: hdfs-dn 71 | --- 72 | apiVersion: v1 73 | kind: Pod 74 | metadata: 75 | name: hadoop-datanode-2 76 | labels: 77 | component: hadoop-datanode-2 78 | spec: 79 | # nodeSelector: 80 | # kubernetes.io/hostname: kube-node-7 81 | containers: 82 | - name: hadoop-datanode-2 83 | image: cargo.caicloudprivatetest.com/caicloud/hadoop-datanode 84 | imagePullPolicy: Always 85 | volumeMounts: 86 | - name: hdfs-dn 87 | mountPath: /var/hdfs/data 88 | ports: 89 | - containerPort: 50010 90 | - containerPort: 50020 91 | - containerPort: 50075 92 | - containerPort: 50475 93 | env: 94 | - name: POD_NAMESPACE 95 | valueFrom: 96 | fieldRef: 97 | fieldPath: metadata.namespace 98 | - name: POD_NAME 99 | valueFrom: 100 | fieldRef: 101 | fieldPath: metadata.name 102 | volumes: 103 | - hostPath: 104 | path: /var/hdfs/data 105 | name: hdfs-dn 106 | -------------------------------------------------------------------------------- /hadoop/yaml/journalnode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hadoop-journalnode-1 5 | spec: 6 | clusterIP: None 7 | selector: 8 | component: hadoop-journalnode-1 9 | ports: 10 | - name: web 11 | port: 8480 12 | - name: ipc 13 | port: 8485 14 | --- 15 | apiVersion: v1 16 | kind: Service 17 | metadata: 18 | name: hadoop-journalnode-2 19 | spec: 20 | clusterIP: None 21 | selector: 22 | component: hadoop-journalnode-2 23 | ports: 24 | - name: web 25 | port: 8480 26 | - name: ipc 27 | port: 8485 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | name: hadoop-journalnode-0 33 | spec: 34 | clusterIP: None 35 | selector: 36 | component: hadoop-journalnode-0 37 | ports: 38 | - name: web 39 | port: 8480 40 | - name: ipc 41 | port: 8485 42 | --- 43 | apiVersion: v1 44 | kind: Pod 45 | metadata: 46 | name: hadoop-journalnode-1 47 | labels: 48 | component: hadoop-journalnode-1 49 | spec: 50 | containers: 51 | - name: journalnode 52 | env: 53 | - name: POD_NAMESPACE 54 | valueFrom: 55 | fieldRef: 56 | fieldPath: metadata.namespace 57 | - name: POD_NAME 58 | valueFrom: 59 | fieldRef: 60 | fieldPath: metadata.name 61 | image: cargo.caicloudprivatetest.com/caicloud/hadoop-journalnode 62 | imagePullPolicy: Always 63 | ports: 64 | - containerPort: 8485 65 | - containerPort: 8480 66 | --- 67 | apiVersion: v1 68 | kind: Pod 69 | metadata: 70 | name: hadoop-journalnode-2 71 | labels: 72 | component: hadoop-journalnode-2 73 | spec: 74 | containers: 75 | - name: journalnode 76 | env: 77 | - name: POD_NAMESPACE 78 | valueFrom: 79 | fieldRef: 80 | fieldPath: metadata.namespace 81 | - name: POD_NAME 82 | valueFrom: 83 | fieldRef: 84 | fieldPath: metadata.name 85 | image: cargo.caicloudprivatetest.com/caicloud/hadoop-journalnode 86 | imagePullPolicy: Always 87 | ports: 88 | - containerPort: 8485 89 | - containerPort: 8480 90 | --- 91 | apiVersion: v1 92 | kind: Pod 93 | metadata: 94 | name: hadoop-journalnode-0 95 | labels: 96 | component: hadoop-journalnode-0 97 | spec: 98 | containers: 99 | - name: journalnode 100 | env: 101 | - name: POD_NAMESPACE 102 | valueFrom: 103 | fieldRef: 104 | fieldPath: metadata.namespace 105 | - name: POD_NAME 106 | valueFrom: 107 | fieldRef: 108 | fieldPath: metadata.name 109 | image: cargo.caicloudprivatetest.com/caicloud/hadoop-journalnode 110 | imagePullPolicy: Always 111 | ports: 112 | - containerPort: 8485 113 | - containerPort: 8480 114 | -------------------------------------------------------------------------------- /hadoop/yaml/namenode0.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hadoop-namenode-0 5 | spec: 6 | clusterIP: None 7 | selector: 8 | component: hadoop-namenode-0 9 | ports: 10 | - name: rpc 11 | port: 8020 12 | - name: p1 13 | port: 50070 14 | - name: p2 15 | port: 50470 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: hadoop-namenode-0 21 | labels: 22 | component: hadoop-namenode-0 23 | spec: 24 | # nodeSelector: 25 | # kubernetes.io/hostname: kube-node-7 26 | containers: 27 | - name: namenode 28 | image: cargo.caicloudprivatetest.com/caicloud/hadoop-namenode 29 | imagePullPolicy: Always 30 | ports: 31 | - containerPort: 8020 32 | - containerPort: 50070 33 | - containerPort: 50470 34 | env: 35 | - name: HDFS_INIT_NAMENODE 36 | value: "true" 37 | - name: POD_NAMESPACE 38 | valueFrom: 39 | fieldRef: 40 | fieldPath: metadata.namespace 41 | - name: POD_NAME 42 | valueFrom: 43 | fieldRef: 44 | fieldPath: metadata.name 45 | -------------------------------------------------------------------------------- /hadoop/yaml/namenode1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hadoop-namenode-1 5 | spec: 6 | clusterIP: None 7 | selector: 8 | component: hadoop-namenode-1 9 | ports: 10 | - name: rpc 11 | port: 8020 12 | - name: p1 13 | port: 50070 14 | - name: p2 15 | port: 50470 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: hadoop-namenode-1 21 | labels: 22 | component: hadoop-namenode-1 23 | spec: 24 | # nodeSelector: 25 | # kubernetes.io/hostname: kube-node-6 26 | containers: 27 | - name: namenode 28 | image: cargo.caicloudprivatetest.com/caicloud/hadoop-namenode 29 | imagePullPolicy: Always 30 | ports: 31 | - containerPort: 8020 32 | - containerPort: 50070 33 | - containerPort: 50470 34 | env: 35 | - name: HDFS_INIT_NAMENODE 36 | value: "false" 37 | - name: POD_NAMESPACE 38 | valueFrom: 39 | fieldRef: 40 | fieldPath: metadata.namespace 41 | - name: POD_NAME 42 | valueFrom: 43 | fieldRef: 44 | fieldPath: metadata.name 45 | -------------------------------------------------------------------------------- /hadoop/yarn/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM index.caicloud.io/caicloud/hadoop-base:sysinfra 2 | 3 | ADD conf/core-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/core-site.xml 4 | ADD conf/hdfs-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/hdfs-site.xml 5 | ADD conf/yarn-site.xml ${HADOOP_INSTALL_DIR}/etc/hadoop/yarn-site.xml 6 | COPY conf/id_rsa.pub /root/.ssh/authorized_keys 7 | 8 | ADD docker-entrypoint.sh /usr/local/sbin/docker-entrypoint.sh 9 | 10 | RUN chmod a+x /usr/local/sbin/docker-entrypoint.sh 11 | 12 | ENTRYPOINT ["/usr/local/sbin/docker-entrypoint.sh"] 13 | CMD ["bin/yarn", "-h"] 14 | -------------------------------------------------------------------------------- /hadoop/yarn/Makefile: -------------------------------------------------------------------------------- 1 | all: push clean 2 | 3 | NAME=${IMAGE_BASE_URL}/yarn 4 | addconf: 5 | cp -rf ../conf . 6 | 7 | build: addconf 8 | docker build -t $(NAME):$(IMAGE_TAG) . 9 | 10 | push: build 11 | docker push $(NAME):$(IMAGE_TAG) 12 | 13 | clean: addconf 14 | rm -rf conf 15 | -------------------------------------------------------------------------------- /hadoop/yarn/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # set fqdn 4 | for i in $(seq 1 10) 5 | do 6 | if grep --quiet $CLUSTER_DOMAIN /etc/hosts; then 7 | break 8 | elif grep --quiet $POD_NAME /etc/hosts; then 9 | cat /etc/hosts | sed "s/$POD_NAME/${POD_NAME}.${POD_NAMESPACE}.svc.${CLUSTER_DOMAIN} $POD_NAME/g" > /etc/hosts.bak 10 | cat /etc/hosts.bak > /etc/hosts 11 | break 12 | else 13 | echo "waiting for /etc/hosts ready" 14 | sleep 1 15 | fi 16 | done 17 | 18 | exec "$@" 19 | -------------------------------------------------------------------------------- /hbase-image/.gitignore: -------------------------------------------------------------------------------- 1 | hbase-1.2.3-bin.tar.gz 2 | -------------------------------------------------------------------------------- /hbase-image/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM java:7 2 | 3 | ENV HBASE_VERSION 1.2.6 4 | ENV HBASE_INSTALL_DIR /opt/hbase 5 | 6 | ENV JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 7 | 8 | RUN mkdir -p ${HBASE_INSTALL_DIR} && \ 9 | curl -L https://mirrors.tuna.tsinghua.edu.cn/apache/hbase/${HBASE_VERSION}/hbase-${HBASE_VERSION}-bin.tar.gz | tar -xz --strip=1 -C ${HBASE_INSTALL_DIR} 10 | 11 | RUN sed -i "s/httpredir.debian.org/mirrors.163.com/g" /etc/apt/sources.list 12 | # build LZO 13 | WORKDIR /tmp 14 | RUN apt-get update && \ 15 | apt-get install -y build-essential maven lzop liblzo2-2 && \ 16 | wget http://www.oberhumer.com/opensource/lzo/download/lzo-2.09.tar.gz && \ 17 | tar zxvf lzo-2.09.tar.gz && \ 18 | cd lzo-2.09 && \ 19 | ./configure --enable-shared --prefix /usr/local/lzo-2.09 && \ 20 | make && make install && \ 21 | cd .. && git clone https://github.com/twitter/hadoop-lzo.git && cd hadoop-lzo && \ 22 | git checkout release-0.4.20 && \ 23 | C_INCLUDE_PATH=/usr/local/lzo-2.09/include LIBRARY_PATH=/usr/local/lzo-2.09/lib mvn clean package && \ 24 | apt-get remove -y build-essential maven && \ 25 | apt-get clean autoclean && \ 26 | apt-get autoremove --yes && \ 27 | rm -rf /var/lib/{apt,dpkg,cache.log}/ && \ 28 | cd target/native/Linux-amd64-64 && \ 29 | tar -cBf - -C lib . | tar -xBvf - -C /tmp && \ 30 | mkdir -p ${HBASE_INSTALL_DIR}/lib/native && \ 31 | cp /tmp/libgplcompression* ${HBASE_INSTALL_DIR}/lib/native/ && \ 32 | cd /tmp/hadoop-lzo && cp target/hadoop-lzo-0.4.20.jar ${HBASE_INSTALL_DIR}/lib/ && \ 33 | echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lzo-2.09/lib" >> ${HBASE_INSTALL_DIR}/conf/hbase-env.sh && \ 34 | rm -rf /tmp/lzo-2.09* hadoop-lzo lib libgplcompression* 35 | 36 | ADD hbase-site.xml /opt/hbase/conf/hbase-site.xml 37 | ADD core-site.xml /opt/hbase/conf/core-site.xml 38 | ADD hdfs-site.xml /opt/hbase/conf/hdfs-site.xml 39 | ADD start-k8s-hbase.sh /opt/hbase/bin/start-k8s-hbase.sh 40 | RUN chmod +x /opt/hbase/bin/start-k8s-hbase.sh 41 | 42 | WORKDIR ${HBASE_INSTALL_DIR} 43 | RUN echo "export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"" >> conf/hbase-env.sh && \ 44 | echo "export HBASE_MASTER_OPTS=\"\$HBASE_MASTER_OPTS \$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101\"" >> conf/hbase-env.sh && \ 45 | echo "export HBASE_REGIONSERVER_OPTS=\"\$HBASE_REGIONSERVER_OPTS \$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102\"" >> conf/hbase-env.sh && \ 46 | echo "export HBASE_THRIFT_OPTS=\"\$HBASE_THRIFT_OPTS \$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"" >> conf/hbase-env.sh && \ 47 | echo "export HBASE_ZOOKEEPER_OPTS=\"\$HBASE_ZOOKEEPER_OPTS \$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"" >> conf/hbase-env.sh && \ 48 | echo "export HBASE_REST_OPTS=\"\$HBASE_REST_OPTS \$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105\"" >> conf/hbase-env.sh 49 | 50 | ENV PATH=$PATH:/opt/hbase/bin 51 | 52 | CMD /opt/hbase/bin/start-k8s-hbase.sh 53 | -------------------------------------------------------------------------------- /hbase-image/Makefile: -------------------------------------------------------------------------------- 1 | all: push clean 2 | 3 | IMAGE_NAME=${IMAGE_BASE_URL}/hbase 4 | 5 | addconf: 6 | cp -rf ../hadoop/conf/*.xml . 7 | 8 | build: addconf 9 | docker build -t $(IMAGE_NAME):$(IMAGE_TAG) . 10 | 11 | push: build 12 | docker push $(IMAGE_NAME):$(IMAGE_TAG) 13 | 14 | clean: 15 | rm core-site.xml hdfs-site.xml yarn-site.xml 16 | -------------------------------------------------------------------------------- /hbase-image/hbase-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | hbase.cluster.distributed 6 | true 7 | 8 | 9 | hbase.rootdir 10 | hdfs://@HDFS_PATH@/hbase/ 11 | 12 | 13 | hbase.zookeeper.quorum 14 | @ZOOKEEPER_IP_LIST@ 15 | 16 | 17 | hbase.zookeeper.property.clientPort 18 | @ZOOKEEPER_PORT@ 19 | 20 | 21 | zookeeper.znode.parent 22 | /@ZNODE_PARENT@ 23 | 24 | 25 | zookeeper.session.timeout 26 | 30 27 | 28 | 29 | hbase.ipc.client.tcpnodelay 30 | true 31 | 32 | 33 | hbase.ipc.server.tcpnodelay 34 | true 35 | 36 | 37 | dfs.namenode.avoid.read.stale.datanode 38 | true 39 | 40 | 41 | dfs.namenode.avoid.write.stale.datanode 42 | true 43 | 44 | 45 | hbase.regionserver.handler.count 46 | 64 47 | 48 | 49 | hbase.hstore.min.locality.to.skip.major.compact 50 | 0.7 51 | 52 | 53 | -------------------------------------------------------------------------------- /hbase-image/start-k8s-hbase.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export HBASE_CONF_FILE=/opt/hbase/conf/hbase-site.xml 4 | export HADOOP_USER_NAME=root 5 | export HBASE_MANAGES_ZK=false 6 | 7 | sed -i "s/@HDFS_PATH@/$HDFS_PATH/g" $HBASE_CONF_FILE 8 | sed -i "s/@ZOOKEEPER_IP_LIST@/$ZOOKEEPER_SERVICE_LIST/g" $HBASE_CONF_FILE 9 | sed -i "s/@ZOOKEEPER_PORT@/$ZOOKEEPER_PORT/g" $HBASE_CONF_FILE 10 | sed -i "s/@ZNODE_PARENT@/$ZNODE_PARENT/g" $HBASE_CONF_FILE 11 | 12 | # set fqdn 13 | for i in $(seq 1 10) 14 | do 15 | if grep --quiet $CLUSTER_DOMAIN /etc/hosts; then 16 | break 17 | elif grep --quiet $POD_NAME /etc/hosts; then 18 | cat /etc/hosts | sed "s/$POD_NAME/${POD_NAME}.${POD_NAMESPACE}.svc.${CLUSTER_DOMAIN} $POD_NAME/g" > /etc/hosts.bak 19 | cat /etc/hosts.bak > /etc/hosts 20 | break 21 | else 22 | echo "waiting for /etc/hosts ready" 23 | sleep 1 24 | fi 25 | done 26 | 27 | if [ "$HBASE_SERVER_TYPE" = "master" ]; then 28 | /opt/hbase/bin/hbase master start 29 | elif [ "$HBASE_SERVER_TYPE" = "regionserver" ]; then 30 | /opt/hbase/bin/hbase regionserver start 31 | fi 32 | -------------------------------------------------------------------------------- /hmaster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hbase-master-a 5 | spec: 6 | clusterIP: None 7 | selector: 8 | app: hbase-master-a 9 | ports: 10 | - name: p1 11 | port: 16000 12 | - name: p2 13 | port: 16010 14 | --- 15 | apiVersion: v1 16 | kind: Service 17 | metadata: 18 | name: hbase-master-b 19 | spec: 20 | clusterIP: None 21 | selector: 22 | app: hbase-master-b 23 | ports: 24 | - name: p1 25 | port: 16000 26 | - name: p2 27 | port: 16010 28 | --- 29 | apiVersion: v1 30 | kind: Pod 31 | metadata: 32 | labels: 33 | app: hbase-master-a 34 | name: hbase-master-a 35 | spec: 36 | containers: 37 | - env: 38 | - name: POD_NAMESPACE 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.namespace 42 | - name: POD_NAME 43 | valueFrom: 44 | fieldRef: 45 | fieldPath: metadata.name 46 | - name: HBASE_SERVER_TYPE 47 | value: master 48 | - name: HDFS_PATH 49 | value: mycluster 50 | - name: ZOOKEEPER_SERVICE_LIST 51 | value: zookeeper-1,zookeeper-2,zookeeper-3 52 | - name: ZOOKEEPER_PORT 53 | value: "2181" 54 | - name: ZNODE_PARENT 55 | value: hbase 56 | image: cargo.caicloudprivatetest.com/caicloud/hbase 57 | imagePullPolicy: Always 58 | name: hbase-master-a 59 | ports: 60 | - containerPort: 16000 61 | protocol: TCP 62 | - containerPort: 16010 63 | protocol: TCP 64 | --- 65 | apiVersion: v1 66 | kind: Pod 67 | metadata: 68 | labels: 69 | app: hbase-master-b 70 | name: hbase-master-b 71 | spec: 72 | containers: 73 | - env: 74 | - name: POD_NAMESPACE 75 | valueFrom: 76 | fieldRef: 77 | fieldPath: metadata.namespace 78 | - name: POD_NAME 79 | valueFrom: 80 | fieldRef: 81 | fieldPath: metadata.name 82 | - name: HBASE_SERVER_TYPE 83 | value: master 84 | - name: HDFS_PATH 85 | value: mycluster 86 | - name: ZOOKEEPER_SERVICE_LIST 87 | value: zookeeper-1,zookeeper-2,zookeeper-3 88 | - name: ZOOKEEPER_PORT 89 | value: "2181" 90 | - name: ZNODE_PARENT 91 | value: hbase 92 | image: cargo.caicloudprivatetest.com/caicloud/hbase 93 | imagePullPolicy: Always 94 | name: hbase-master-b 95 | ports: 96 | - containerPort: 16000 97 | protocol: TCP 98 | - containerPort: 16010 99 | protocol: TCP 100 | -------------------------------------------------------------------------------- /opentsdb-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: opentsdb-svc 5 | spec: 6 | selector: 7 | app: opentsdb 8 | ports: 9 | - name: port1 10 | port: 4242 11 | targetPort: 4242 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | name: opentsdb 17 | labels: 18 | app: opentsdb 19 | spec: 20 | containers: 21 | - name: opentsdb-node 22 | image: index.caicloud.io/caicloud/opentsdb 23 | imagePullPolicy: IfNotPresent 24 | ports: 25 | - containerPort: 4242 26 | name: port1 27 | -------------------------------------------------------------------------------- /opentsdb/.gitignore: -------------------------------------------------------------------------------- 1 | opentsdb-2.2.0_all.deb 2 | -------------------------------------------------------------------------------- /opentsdb/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM java:8-jre 2 | 3 | ADD opentsdb-2.2.0_all.deb . 4 | RUN dpkg -i opentsdb-2.2.0_all.deb 5 | RUN rm ./opentsdb-2.2.0_all.deb 6 | 7 | 8 | RUN echo "tsd.storage.hbase.zk_quorum = zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181" >> /etc/opentsdb/opentsdb.conf 9 | ADD hbase-service.sh /opt/hbase-service.sh 10 | ADD opentsdb-service.sh /opt/opentsdb-service.sh 11 | 12 | ENV HBASE_VERSION 1.2.3 13 | ENV HBASE_HOME /opt/hbase 14 | COPY ./hbase-1.2.3-bin.tar.gz /hbase-setup/hbase-1.2.3-bin.tar.gz 15 | RUN tar zxf /hbase-setup/hbase-1.2.3-bin.tar.gz -C /opt/ \ 16 | && ln -s /opt/hbase-1.2.3 /opt/hbase 17 | RUN rm /hbase-setup/hbase-1.2.3-bin.tar.gz 18 | 19 | ADD create_table.sh /usr/share/opentsdb/tools/create_table.sh 20 | ADD hbase-site.xml ${HBASE_HOME}/conf/hbase-site.xml 21 | ADD hdfs-site.xml ${HBASE_HOME}/conf/hdfs-site.xml 22 | ADD core-site.xml ${HBASE_HOME}/conf/core-site.xml 23 | 24 | EXPOSE 4242 25 | CMD ["/opt/opentsdb-service.sh"] 26 | -------------------------------------------------------------------------------- /opentsdb/README.md: -------------------------------------------------------------------------------- 1 | opentsdb 2 | ======== 3 | opentsdb docker on distributed hbase and zookeeper 4 | -------------------------------------------------------------------------------- /opentsdb/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | fs.defaultFS 7 | hdfs://mycluster 8 | 9 | 10 | io.compression.codecs 11 | 12 | org.apache.hadoop.io.compress.GzipCodec, 13 | org.apache.hadoop.io.compress.DefaultCodec, 14 | com.hadoop.compression.lzo.LzoCodec, 15 | com.hadoop.compression.lzo.LzopCodec, 16 | org.apache.hadoop.io.compress.BZip2Codec 17 | 18 | 19 | 20 | io.compression.codec.lzo.class 21 | com.hadoop.compression.lzo.LzoCodec 22 | 23 | 24 | ha.zookeeper.quorum 25 | zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181 26 | 27 | 28 | -------------------------------------------------------------------------------- /opentsdb/create_table.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Small script to setup the HBase tables used by OpenTSDB. 3 | 4 | export HBASE_CONF_FILE=/opt/hbase/conf/hbase-site.xml 5 | 6 | sed -i "s/@HDFS_PATH@/$HDFS_PATH/g" $HBASE_CONF_FILE 7 | sed -i "s/@ZOOKEEPER_IP_LIST@/$ZOOKEEPER_SERVICE_LIST/g" $HBASE_CONF_FILE 8 | sed -i "s/@ZOOKEEPER_PORT@/$ZOOKEEPER_PORT/g" $HBASE_CONF_FILE 9 | sed -i "s/@ZNODE_PARENT@/$ZNODE_PARENT/g" $HBASE_CONF_FILE 10 | cat $HBASE_CONF_FILE 11 | 12 | test -n "$HBASE_HOME" || { 13 | echo >&2 'The environment variable HBASE_HOME must be set' 14 | exit 1 15 | } 16 | test -d "$HBASE_HOME" || { 17 | echo >&2 "No such directory: HBASE_HOME=$HBASE_HOME" 18 | exit 1 19 | } 20 | 21 | TSDB_TABLE=${TSDB_TABLE-'tsdb'} 22 | UID_TABLE=${UID_TABLE-'tsdb-uid'} 23 | TREE_TABLE=${TREE_TABLE-'tsdb-tree'} 24 | META_TABLE=${META_TABLE-'tsdb-meta'} 25 | BLOOMFILTER=${BLOOMFILTER-'ROW'} 26 | # LZO requires lzo2 64bit to be installed + the hadoop-gpl-compression jar. 27 | COMPRESSION=${COMPRESSION-'LZO'} 28 | # All compression codec names are upper case (NONE, LZO, SNAPPY, etc). 29 | COMPRESSION=`echo "$COMPRESSION" | tr a-z A-Z` 30 | 31 | case $COMPRESSION in 32 | (NONE|LZO|GZIP|SNAPPY) :;; # Known good. 33 | (*) 34 | echo >&2 "warning: compression codec '$COMPRESSION' might not be supported." 35 | ;; 36 | esac 37 | 38 | # HBase scripts also use a variable named `HBASE_HOME', and having this 39 | # variable in the environment with a value somewhat different from what 40 | # they expect can confuse them in some cases. So rename the variable. 41 | hbh=$HBASE_HOME 42 | unset HBASE_HOME 43 | echo $hbh 44 | "$hbh/bin/hbase" shell < 'id', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'}, 47 | {NAME => 'name', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} 48 | 49 | create '$TSDB_TABLE', 50 | {NAME => 't', VERSIONS => 1, COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} 51 | 52 | create '$TREE_TABLE', 53 | {NAME => 't', VERSIONS => 1, COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} 54 | 55 | create '$META_TABLE', 56 | {NAME => 'name', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} 57 | EOF 58 | -------------------------------------------------------------------------------- /opentsdb/hbase-service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cat < /opt/hbase/conf/hbase-site.xml 4 | 5 | 6 | 7 | hbase.rootdir 8 | file:///${HBASE_DATA}/hbase\${user.name}/hbase 9 | 10 | 11 | EOF 12 | 13 | exec /opt/hbase/bin/hbase master start 14 | -------------------------------------------------------------------------------- /opentsdb/hbase-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | hbase.cluster.distributed 6 | true 7 | 8 | 9 | hbase.rootdir 10 | hdfs://@HDFS_PATH@/hbase/ 11 | 12 | 13 | hbase.zookeeper.quorum 14 | @ZOOKEEPER_IP_LIST@ 15 | 16 | 17 | hbase.zookeeper.property.clientPort 18 | @ZOOKEEPER_PORT@ 19 | 20 | 21 | zookeeper.znode.parent 22 | /@ZNODE_PARENT@ 23 | 24 | 25 | -------------------------------------------------------------------------------- /opentsdb/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | dfs.nameservices 6 | mycluster 7 | 8 | 9 | dfs.ha.namenodes.mycluster 10 | nn1,nn2 11 | 12 | 13 | dfs.namenode.rpc-address.mycluster.nn1 14 | hadoop-namenode-1:8020 15 | 16 | 17 | dfs.namenode.rpc-address.mycluster.nn2 18 | hadoop-namenode-2:8020 19 | 20 | 21 | dfs.namenode.http-address.mycluster.nn1 22 | hadoop-namenode-1:50070 23 | 24 | 25 | dfs.namenode.http-address.mycluster.nn2 26 | hadoop-namenode-2:50070 27 | 28 | 29 | dfs.namenode.shared.edits.dir 30 | qjournal://hadoop-journalnode-1:8485;hadoop-journalnode-2:8485;hadoop-journalnode-3:8485/mycluster 31 | 32 | 33 | dfs.client.failover.proxy.provider.mycluster 34 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider 35 | 36 | 37 | dfs.journalnode.edits.dir 38 | /home/hadoop/journaldata 39 | 40 | 41 | dfs.ha.automatic-failover.enabled 42 | true 43 | 44 | 45 | dfs.ha.fencing.methods 46 | shell(/bin/true) 47 | 48 | 49 | dfs.replication 50 | 2 51 | 52 | 53 | dfs.permissions 54 | false 55 | 56 | 57 | dfs.name.dir 58 | file:///var/hdfs/name 59 | 60 | 61 | dfs.namenode.datanode.registration.ip-hostname-check 62 | false 63 | 64 | 65 | -------------------------------------------------------------------------------- /opentsdb/opentsdb-service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $CREATE_TABLE == 'Y' ]; then 4 | sleep 20 5 | sh /usr/share/opentsdb/tools/create_table.sh 6 | rm /usr/share/opentsdb/tools/create_table.sh 7 | fi 8 | 9 | exec /usr/share/opentsdb/bin/tsdb tsd \ 10 | --port=4242 \ 11 | --staticroot=/usr/share/opentsdb/static \ 12 | --cachedir=/tmp \ 13 | --auto-metric 14 | -------------------------------------------------------------------------------- /opentsdb/opentsdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: opentsdb-svc 5 | spec: 6 | selector: 7 | app: opentsdb 8 | ports: 9 | - name: port1 10 | port: 4242 11 | targetPort: 4242 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | name: opentsdb 17 | labels: 18 | app: opentsdb 19 | spec: 20 | containers: 21 | - name: opentsdb-node 22 | image: index.caicloud.io/caicloud/opentsdb:createtable 23 | imagePullPolicy: Always 24 | ports: 25 | - containerPort: 4242 26 | name: port1 27 | env: 28 | - name: CREATE_TABLE 29 | value: "Y" 30 | - name: HDFS_PATH 31 | value: mycluster 32 | - name: ZOOKEEPER_SERVICE_LIST 33 | value: zookeeper-1,zookeeper-2,zookeeper-3 34 | - name: ZOOKEEPER_PORT 35 | value: "2181" 36 | - name: ZNODE_PARENT 37 | value: hbase 38 | # command: ["tail","-f","/dev/null"] 39 | -------------------------------------------------------------------------------- /opentsdb/query.sh: -------------------------------------------------------------------------------- 1 | create 'tsdb-uid', 2 | {NAME => 'id', COMPRESSION => 'lzo', BLOOMFILTER => 'ROW'}, 3 | {NAME => 'name', COMPRESSION => 'lzo', BLOOMFILTER => 'ROW'} 4 | 5 | create 'tsdb', 6 | {NAME => 't', VERSIONS => 1, COMPRESSION => 'lzo', BLOOMFILTER => 'ROW'} 7 | 8 | create 'tsdb-tree', 9 | {NAME => 't', VERSIONS => 1, COMPRESSION => 'lzo', BLOOMFILTER => 'ROW'} 10 | 11 | create 'tsdb-meta', 12 | {NAME => 'name', COMPRESSION => 'lzo', BLOOMFILTER => 'ROW'} 13 | 14 | create 'test', 'cf' 15 | 16 | put 'test', 'row1', 'cf:a', 'value1' 17 | put 'test', 'row2', 'cf:b', 'value2' 18 | put 'test', 'row3', 'cf:c', 'value3' 19 | 20 | scan 'test' 21 | 22 | 1474945928013 23 | sys.cpu.user 1474945928013 42.5 host=webserver01 cpu=0 24 | 25 | 26 | curl -H "Content-Type: application/json" -X POST -d '[ 27 | { 28 | "metric": "sys.cpu.nice", 29 | "timestamp": 1474945928016, 30 | "value": 22, 31 | "tags": { 32 | "host": "web01", 33 | "dc": "ff" 34 | } 35 | }, 36 | { 37 | "metric": "sys.cpu.nice", 38 | "timestamp": 1474945928017, 39 | "value": 29, 40 | "tags": { 41 | "host": "web02", 42 | "dc": "lga" 43 | } 44 | } 45 | ]' 192.168.67.8:4242/api/put 46 | 47 | curl "192.168.67.8:4242/api/query?start=2d-ago&m=sum:sys.cpu.nice\{host=web01\}" 48 | -------------------------------------------------------------------------------- /opentsdb/tmp.sh: -------------------------------------------------------------------------------- 1 | TSDB_TABLE=${TSDB_TABLE-'tsdb'} 2 | UID_TABLE=${UID_TABLE-'tsdb-uid'} 3 | TREE_TABLE=${TREE_TABLE-'tsdb-tree'} 4 | META_TABLE=${META_TABLE-'tsdb-meta'} 5 | BLOOMFILTER=${BLOOMFILTER-'ROW'} 6 | # LZO requires lzo2 64bit to be installed + the hadoop-gpl-compression jar. 7 | COMPRESSION=${COMPRESSION-'LZO'} 8 | # All compression codec names are upper case (NONE, LZO, SNAPPY, etc). 9 | COMPRESSION=`echo "$COMPRESSION" | tr a-z A-Z` 10 | 11 | case $COMPRESSION in 12 | (NONE|LZO|GZIP|SNAPPY) :;; # Known good. 13 | (*) 14 | echo >&2 "warning: compression codec '$COMPRESSION' might not be supported." 15 | ;; 16 | esac 17 | 18 | # HBase scripts also use a variable named `HBASE_HOME', and having this 19 | # variable in the environment with a value somewhat different from what 20 | # they expect can confuse them in some cases. So rename the variable. 21 | 22 | cat < tmp.txt 23 | create '$UID_TABLE', 24 | {NAME => 'id', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'}, 25 | {NAME => 'name', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} 26 | 27 | create '$TSDB_TABLE', 28 | {NAME => 't', VERSIONS => 1, COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} 29 | 30 | create '$TREE_TABLE', 31 | {NAME => 't', VERSIONS => 1, COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} 32 | 33 | create '$META_TABLE', 34 | {NAME => 'name', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} 35 | EOF 36 | -------------------------------------------------------------------------------- /region.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hbase-region-a 5 | spec: 6 | clusterIP: None 7 | selector: 8 | app: hbase-region-a 9 | ports: 10 | - name: p1 11 | port: 16020 12 | - name: p2 13 | port: 16030 14 | --- 15 | apiVersion: v1 16 | kind: Service 17 | metadata: 18 | name: hbase-region-b 19 | spec: 20 | clusterIP: None 21 | selector: 22 | app: hbase-region-b 23 | ports: 24 | - name: p1 25 | port: 16020 26 | - name: p2 27 | port: 16030 28 | --- 29 | apiVersion: v1 30 | kind: Pod 31 | metadata: 32 | labels: 33 | app: hbase-region-a 34 | name: hbase-region-a 35 | spec: 36 | containers: 37 | - env: 38 | - name: POD_NAMESPACE 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.namespace 42 | - name: POD_NAME 43 | valueFrom: 44 | fieldRef: 45 | fieldPath: metadata.name 46 | - name: HBASE_SERVER_TYPE 47 | value: regionserver 48 | - name: HDFS_PATH 49 | value: mycluster 50 | - name: ZOOKEEPER_SERVICE_LIST 51 | value: zookeeper-1,zookeeper-2,zookeeper-3 52 | - name: ZOOKEEPER_PORT 53 | value: "2181" 54 | - name: ZNODE_PARENT 55 | value: hbase 56 | image: cargo.caicloudprivatetest.com/caicloud/hbase 57 | imagePullPolicy: Always 58 | name: hbase-region-a 59 | ports: 60 | - containerPort: 16020 61 | protocol: TCP 62 | - containerPort: 16030 63 | protocol: TCP 64 | --- 65 | apiVersion: v1 66 | kind: Pod 67 | metadata: 68 | labels: 69 | app: hbase-region-b 70 | name: hbase-region-b 71 | spec: 72 | containers: 73 | - env: 74 | - name: POD_NAMESPACE 75 | valueFrom: 76 | fieldRef: 77 | fieldPath: metadata.namespace 78 | - name: POD_NAME 79 | valueFrom: 80 | fieldRef: 81 | fieldPath: metadata.name 82 | - name: HBASE_SERVER_TYPE 83 | value: regionserver 84 | - name: HDFS_PATH 85 | value: mycluster 86 | - name: ZOOKEEPER_SERVICE_LIST 87 | value: zookeeper-1,zookeeper-2,zookeeper-3 88 | - name: ZOOKEEPER_PORT 89 | value: "2181" 90 | - name: ZNODE_PARENT 91 | value: hbase 92 | image: cargo.caicloudprivatetest.com/caicloud/hbase 93 | imagePullPolicy: Always 94 | name: hbase-region-b 95 | ports: 96 | - containerPort: 16020 97 | protocol: TCP 98 | - containerPort: 16030 99 | protocol: TCP 100 | -------------------------------------------------------------------------------- /zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jboss/base-jdk:7 2 | 3 | MAINTAINER iocanel@gmail.com 4 | 5 | USER root 6 | 7 | ENV ZOOKEEPER_VERSION 3.4.9 8 | EXPOSE 2181 2888 3888 9 | 10 | RUN yum -y install wget bind-utils && yum clean all \ 11 | && wget -q -O - http://apache.mirrors.pair.com/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/zookeeper-${ZOOKEEPER_VERSION}.tar.gz | tar -xzf - -C /opt \ 12 | && mv /opt/zookeeper-${ZOOKEEPER_VERSION} /opt/zookeeper \ 13 | && cp /opt/zookeeper/conf/zoo_sample.cfg /opt/zookeeper/conf/zoo.cfg \ 14 | && mkdir -p /opt/zookeeper/{data,log} 15 | 16 | WORKDIR /opt/zookeeper 17 | VOLUME ["/opt/zookeeper/conf", "/opt/zookeeper/data", "/opt/zookeeper/log"] 18 | 19 | COPY config-and-run.sh ./bin/ 20 | COPY zoo.cfg ./conf/ 21 | 22 | CMD ["/opt/zookeeper/bin/config-and-run.sh"] 23 | -------------------------------------------------------------------------------- /zookeeper/Makefile: -------------------------------------------------------------------------------- 1 | all: push 2 | 3 | NAME=$(IMAGE_BASE_URL)/zookeeper 4 | 5 | build: 6 | docker build -t $(NAME):$(IMAGE_TAG) . 7 | 8 | push: build 9 | docker push $(NAME):$(IMAGE_TAG) 10 | -------------------------------------------------------------------------------- /zookeeper/README.md: -------------------------------------------------------------------------------- 1 | Fabric8 - ZooKeeper Docker Image 2 | ================================ 3 | 4 | A ZooKeeper Docker Image for use with Kubernetes. 5 | 6 | The image supports the following ZooKeeper modes: 7 | 8 | * Standalone 9 | * Clustered 10 | 11 | # Standalone Mode 12 | To start the image in standalone mode you can simply use: 13 | 14 | docker run fabric8/zookeeper 15 | 16 | # Clustered Mode 17 | To start the image in clustered mode you need to specify a couple of environment variables for the container. 18 | 19 | | Environment Variable | Description | 20 | | --------------------------------------------- | --------------------------------------| 21 | | SERVER_ID | The id of the server | 22 | | MAX_SERVERS | The number of servers in the ensemble | 23 | 24 | 25 | Each container started with both of the above variables will use the following env variable setup: 26 | 27 | server.1=zookeeper-1:2888:3888 28 | server.2=zookeeper-2:2888:3888 29 | server.3=zookeeper-3:2888:3888 30 | ... 31 | server.N=zookeeper-N:2888:3888 32 | 33 | Ensuring that zookeeper-1, zookeeper-2 ... zookeeper-N can be resolved is beyond the scope of this image. 34 | You can use DNS, or Kubernetes services, etc depending on your environment (see below). 35 | 36 | ## Inside Kubernetes 37 | 38 | Inside Kubernetes you can use a pod setup that looks like: 39 | 40 | { 41 | "kind": "Pod", 42 | "apiVersion": "v1beta3", 43 | "metadata": { 44 | "name": "zookeeper-1", 45 | "labels": { 46 | "name": "zookeeper", 47 | "server-id": "1" 48 | } 49 | }, 50 | "spec": { 51 | "containers": [ 52 | { 53 | "name": "server", 54 | "image": "fabric8/zookeeper", 55 | "env":[ 56 | { "name": "SERVER_ID", "value": "1" }, 57 | { "name": "MAX_SERVERS", "value": "3" } 58 | ], 59 | "ports":[ 60 | { 61 | "containerPort": 2181 62 | }, 63 | { 64 | "containerPort": 2888 65 | }, 66 | { 67 | "containerPort": 3888 68 | } 69 | ] 70 | } 71 | ] 72 | } 73 | 74 | In the example above we are creating a pod that creates a container using this image. The container is configured to use the environment variable required for a clustered setup. 75 | Last but not least pod is carefully named (as zookeeper-${SERVER_ID}) so that the other zookeeper servers can easily find it by hostname. -------------------------------------------------------------------------------- /zookeeper/config-and-run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | echo "$SERVER_ID / $MAX_SERVERS" 5 | if [ ! -z "$SERVER_ID" ] && [ ! -z "$MAX_SERVERS" ]; then 6 | echo "Starting up in clustered mode" 7 | echo "" >> /opt/zookeeper/conf/zoo.cfg 8 | echo "#Server List" >> /opt/zookeeper/conf/zoo.cfg 9 | for i in $( eval echo {1..$MAX_SERVERS});do 10 | if [ "$SERVER_ID" = "$i" ];then 11 | echo "server.$i=0.0.0.0:2888:3888" >> /opt/zookeeper/conf/zoo.cfg 12 | else 13 | echo "server.$i=${ZOOKEEPER_MEMBER}-${i}:2888:3888" >> /opt/zookeeper/conf/zoo.cfg 14 | fi 15 | done 16 | cat /opt/zookeeper/conf/zoo.cfg 17 | 18 | # Persists the ID of the current instance of Zookeeper 19 | echo ${SERVER_ID} > /opt/zookeeper/data/myid 20 | else 21 | echo "Starting up in standalone mode" 22 | fi 23 | 24 | exec /opt/zookeeper/bin/zkServer.sh start-foreground -------------------------------------------------------------------------------- /zookeeper/petset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1alpha1 2 | kind: PetSet 3 | metadata: 4 | name: zookeeper 5 | spec: 6 | serviceName: "zookeeper-cluster" 7 | replicas: 3 8 | template: 9 | metadata: 10 | labels: 11 | app: zookeeper 12 | spec: 13 | hostname: "zookeeper" 14 | containers: 15 | - name: nginx 16 | image: index.caicloud.io/caicloud/zookeeper:sysinfra2 17 | ports: 18 | - containerPort: 2181 19 | - containerPort: 2888 20 | - containerPort: 3888 21 | env: 22 | - name: POD_NAME 23 | valueFrom: 24 | fieldRef: 25 | fieldPath: metadata.name 26 | - name: MAX_SERVERS 27 | value: '2' 28 | -------------------------------------------------------------------------------- /zookeeper/zoo.cfg: -------------------------------------------------------------------------------- 1 | # The number of milliseconds of each tick 2 | tickTime=2000 3 | # The number of ticks that the initial 4 | # synchronization phase can take 5 | initLimit=10 6 | # The number of ticks that can pass between 7 | # sending a request and getting an acknowledgement 8 | syncLimit=5 9 | # the directory where the snapshot is stored. 10 | dataDir=/opt/zookeeper/data 11 | #This option will direct the machine to write the transaction log to the dataLogDir rather than the dataDir. This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. 12 | dataLogDir=/opt/zookeeper/log 13 | 14 | # the port at which the clients will connect 15 | clientPort=2181 16 | # 17 | # Be sure to read the maintenance section of the 18 | # administrator guide before turning on autopurge. 19 | # 20 | # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance 21 | # 22 | # The number of snapshots to retain in dataDir 23 | autopurge.snapRetainCount=3 24 | # Purge task interval in hours 25 | # Set to "0" to disable auto purge feature 26 | autopurge.purgeInterval=1 27 | -------------------------------------------------------------------------------- /zookeeper/zookeeper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: zookeeper-3 5 | labels: 6 | name: zookeeper-3 7 | spec: 8 | clusterIP: None 9 | ports: 10 | - name: client 11 | port: 2181 12 | targetPort: 2181 13 | - name: followers 14 | port: 2888 15 | targetPort: 2888 16 | - name: election 17 | port: 3888 18 | targetPort: 3888 19 | selector: 20 | name: zookeeper-3 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: zookeeper-2 26 | labels: 27 | name: zookeeper-2 28 | spec: 29 | clusterIP: None 30 | ports: 31 | - name: client 32 | port: 2181 33 | targetPort: 2181 34 | - name: followers 35 | port: 2888 36 | targetPort: 2888 37 | - name: election 38 | port: 3888 39 | targetPort: 3888 40 | selector: 41 | name: zookeeper-2 42 | --- 43 | apiVersion: v1 44 | kind: Service 45 | metadata: 46 | name: zookeeper-1 47 | labels: 48 | name: zookeeper-1 49 | spec: 50 | clusterIP: None 51 | ports: 52 | - name: client 53 | port: 2181 54 | targetPort: 2181 55 | - name: followers 56 | port: 2888 57 | targetPort: 2888 58 | - name: election 59 | port: 3888 60 | targetPort: 3888 61 | selector: 62 | name: zookeeper-1 63 | --- 64 | apiVersion: v1 65 | kind: Pod 66 | metadata: 67 | name: zookeeper-3 68 | labels: 69 | name: zookeeper-3 70 | spec: 71 | containers: 72 | - name: server 73 | image: cargo.caicloudprivatetest.com/caicloud/zookeeper:sysinfra 74 | imagePullPolicy: Always 75 | env: 76 | - name: POD_NAMESPACE 77 | valueFrom: 78 | fieldRef: 79 | fieldPath: metadata.namespace 80 | - name: POD_NAME 81 | valueFrom: 82 | fieldRef: 83 | fieldPath: metadata.name 84 | - name: SERVER_ID 85 | value: '3' 86 | - name: MAX_SERVERS 87 | value: '3' 88 | - name: ZOOKEEPER_MEMBER 89 | value: 'zookeeper' 90 | ports: 91 | - containerPort: 2181 92 | - containerPort: 2888 93 | - containerPort: 3888 94 | --- 95 | apiVersion: v1 96 | kind: Pod 97 | metadata: 98 | name: zookeeper-2 99 | labels: 100 | name: zookeeper-2 101 | spec: 102 | containers: 103 | - name: server 104 | image: cargo.caicloudprivatetest.com/caicloud/zookeeper:sysinfra 105 | imagePullPolicy: Always 106 | env: 107 | - name: POD_NAMESPACE 108 | valueFrom: 109 | fieldRef: 110 | fieldPath: metadata.namespace 111 | - name: POD_NAME 112 | valueFrom: 113 | fieldRef: 114 | fieldPath: metadata.name 115 | - name: SERVER_ID 116 | value: '2' 117 | - name: MAX_SERVERS 118 | value: '3' 119 | - name: ZOOKEEPER_MEMBER 120 | value: 'zookeeper' 121 | ports: 122 | - containerPort: 2181 123 | - containerPort: 2888 124 | - containerPort: 3888 125 | --- 126 | apiVersion: v1 127 | kind: Pod 128 | metadata: 129 | name: zookeeper-1 130 | labels: 131 | name: zookeeper-1 132 | spec: 133 | containers: 134 | - name: server 135 | image: cargo.caicloudprivatetest.com/caicloud/zookeeper:sysinfra 136 | imagePullPolicy: Always 137 | env: 138 | - name: POD_NAMESPACE 139 | valueFrom: 140 | fieldRef: 141 | fieldPath: metadata.namespace 142 | - name: POD_NAME 143 | valueFrom: 144 | fieldRef: 145 | fieldPath: metadata.name 146 | - name: SERVER_ID 147 | value: '1' 148 | - name: MAX_SERVERS 149 | value: '3' 150 | - name: ZOOKEEPER_MEMBER 151 | value: 'zookeeper' 152 | ports: 153 | - containerPort: 2181 154 | - containerPort: 2888 155 | - containerPort: 3888 156 | --------------------------------------------------------------------------------