├── .gitignore
├── LICENSE
├── README.md
├── client
├── README.md
├── Vagrantfile
├── configure.sh
├── hadoop
│ ├── core-site.xml.template
│ └── hdfs-site.xml.template
├── install_software.sh
└── spark
│ ├── spark-defaults.conf
│ └── spark-env.sh
├── coreos
├── cloud-config
│ ├── init.yml
│ ├── utility-cloud-config.yml
│ └── worker-cloud-config.yml
└── fleet
│ ├── README.md
│ ├── bin
│ ├── remove-framework.sh
│ ├── remove-unused-containers.sh
│ ├── reset-marathon.sh
│ ├── reset-mesos-master.sh
│ ├── reset-mesos-slave.sh
│ ├── reset-zookeeper.sh
│ ├── rolling-restart.sh
│ ├── shutdown-mesos.sh
│ ├── shutdown-zookeeper.sh
│ └── start-mesos.sh
│ ├── docker-registry.service
│ ├── hdfs
│ ├── README.md
│ ├── datanode.service
│ ├── httpfs.service
│ ├── journalnode.service
│ ├── namenode1.service
│ └── namenode2.service
│ ├── hue.service
│ ├── kafka-manager.service
│ ├── kafka.service
│ ├── mesosphere
│ ├── bamboo.service
│ ├── marathon.service
│ ├── mesos-consul.service
│ ├── mesos-master.service
│ ├── mesos-slave.service
│ └── spark-mesos-dispatcher.service
│ └── zookeeper
│ ├── zookeeper1.service
│ ├── zookeeper2.service
│ ├── zookeeper3.service
│ ├── zookeeper4.service
│ └── zookeeper5.service
├── docker
├── .gitignore
├── README.md
├── bamboo
│ ├── README.txt
│ ├── bamboo-reload.sh
│ ├── bamboo-remove.sh
│ ├── haproxy_template.cfg
│ └── launch-bamboo.sh
├── cloudera-hue
│ ├── Dockerfile
│ ├── build.sh
│ ├── push.sh
│ └── run.sh
├── docker-registry-nginx
│ ├── auth
│ │ └── htpasswd
│ ├── certs
│ │ └── domain.crt
│ ├── registry_config.yml
│ └── run.sh
├── docker-registry-open
│ ├── Dockerfile
│ ├── cert-creation.png
│ ├── certs
│ │ └── domain.crt
│ ├── domain.crt
│ ├── registry_config.yml
│ └── run.sh
├── docker-registry-secure
│ ├── Dockerfile
│ ├── auth
│ │ └── htpasswd
│ ├── cert-creation.png
│ ├── certs
│ │ └── domain.crt
│ ├── domain.crt
│ ├── registry_config.yml
│ ├── run.sh
│ └── run.sh.bak
├── docker-registry
│ ├── auth
│ │ └── htpasswd
│ ├── certs
│ │ └── domain.crt
│ ├── registry_config.yml
│ └── run.sh
├── dsra-hue
│ ├── Dockerfile
│ ├── bootstrap.sh
│ ├── build.sh
│ ├── hue.ini.template
│ ├── push.sh
│ └── run.sh
├── file-server
│ ├── Dockerfile
│ ├── build.sh
│ ├── run.sh
│ └── server.xml
├── hadoop
│ ├── README.md
│ ├── hdfs
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── bootstrap.sh
│ │ ├── core-site.xml.template
│ │ └── hdfs-site.xml.template
│ ├── httpfs
│ │ ├── Dockerfile
│ │ ├── bootstrap.sh
│ │ └── httpfs-site.xml
│ └── zookeeper
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── start.sh
│ │ └── zoo.cfg.template
├── haproxy
│ ├── Dockerfile
│ ├── build.sh
│ ├── dsra.haproxy.cfg
│ ├── run.sh
│ ├── run2.sh
│ └── test
│ │ ├── bamboo-reload.sh
│ │ ├── bamboo-remove.sh
│ │ ├── haproxy_template.cfg
│ │ ├── haproxy_template.cfg.orig
│ │ ├── launch-bamboo.sh
│ │ ├── launch-bamboo2.sh
│ │ └── test.sh
├── joshua
│ ├── Dockerfile
│ ├── build.sh
│ ├── output.txt
│ └── push.sh
├── kafka-manager
│ └── Dockerfile
├── kafka
│ ├── Dockerfile
│ ├── README.md
│ └── bootstrap.sh
├── mesos-dns
│ ├── Dockerfile
│ ├── bootstrap.sh
│ ├── build.sh
│ ├── config.json
│ ├── install.sh
│ └── push.sh
├── mesos-master
│ ├── Dockerfile
│ ├── build.sh
│ └── push.sh
├── mesos-slave
│ ├── Dockerfile
│ ├── bootstrap.sh
│ ├── build.sh
│ ├── core-site.xml.template
│ ├── hdfs-site.xml.template
│ └── push.sh
├── nginx
│ └── run.sh
└── spark
│ ├── Dockerfile
│ ├── build.sh
│ ├── run.sh
│ ├── spark-env-cluster-mode.sh
│ └── spark-env.sh
├── docs
├── architecture.odp
├── architecture.png
└── marathon3.png
├── git
└── parker.sh
└── marathon
├── README.md
├── atsd
├── atsd.json
└── hue.json
├── bamboo.json
├── bin
├── create-new-app.sh
├── delete-deployment.sh
├── destroy-app.sh
├── event-stream.sh
├── event-stream.sh.save
└── framework-teardown.sh
├── chronos.json
├── confluence
└── confluence-atlassian.json
├── httpfs.json
├── hue.json
├── kafka-manager.json
├── kafka-mesos.json
├── python3
├── python3.json
└── python3.sh
├── registry-ui
└── dsra-registry-ui.json
├── remove-qntfy-containers.sh
├── remove-unused-containers.json
├── spark-mesos-dispatcher.json
└── zk-web.json
/.gitignore:
--------------------------------------------------------------------------------
1 | # Vi swap files
2 | *.swp
3 |
4 | # Compiled source #
5 | ###################
6 | *.com
7 | *.class
8 | *.dll
9 | *.exe
10 | *.o
11 | *.so
12 | target/
13 |
14 | # Eclipse project files #
15 | #########################
16 | .classpath
17 | .project
18 | .settings/
19 | .springBeans
20 |
21 | # Packages #
22 | ############
23 | # it's better to unpack these files and commit the raw source
24 | # git has its own built in compression methods
25 | *.7z
26 | *.dmg
27 | *.gz
28 | *.iso
29 | *.jar
30 | *.rar
31 | *.tar
32 | *.tgz
33 | *.zip
34 | .vagrant
35 |
36 | # Logs and databases #
37 | ######################
38 | *.log
39 | *.sql
40 | *.sqlite
41 |
42 | # OS generated files #
43 | ######################
44 | .DS_Store
45 | .DS_Store?
46 | ._*
47 | .Spotlight-V100
48 | .Trashes
49 | Icon?
50 | ehthumbs.db
51 | Thumbs.db
52 | /.idea/
53 | /*.iml
54 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Data Science Research Architecture, Data Center OS
2 |
3 | ### Overview
4 |
5 | The newest version of the DSRA migrates the system to a Mesos-centric architecture. It provides
6 | significant distributed processing capabilities out of the box, supporting a variety of
7 | distributed frameworks. The distributed processing architecture has been abstracted for Engineers to
8 | develop their own frameworks in a wide-range of programming languages, including Java, Python, Ruby,
9 | C, and Go. Additional system information and examples are detailed in the [wiki](https://github.com/aglahe/dsra-dcos/wiki).
10 |
11 | 
12 |
13 | ### Future Releases
14 |
15 | Mesos also supports deploying applications as Docker containers. Long running processes are
16 | managed by Marathon, which can run commands across the Mesos cluster, containers deployed
17 | from public repoitories, like DockerHub, or it can use an organization's private Docker registry.
18 |
19 | Distributed storage is supported in the first software release, utilizing HDFS.
20 |
21 | Future releases and updates will focus on adding more software capabilities, to include, but not limited too, the following:
22 |
23 | * ElasticSearch
24 | * Kafka
25 | * HBase
26 | * Myriad/Yarn
27 | * Mongo
28 | * HDFS (Federated)
29 | * Security
30 |
31 | ### License
32 |
33 | DSRA DCOS is licensed under the [Apache 2 License](http://www.apache.org/licenses/LICENSE-2.0)
34 |
35 |
36 |
--------------------------------------------------------------------------------
/client/README.md:
--------------------------------------------------------------------------------
1 | # DSRA Client Vagrant VM
2 |
3 | Please make sure that vbguest is installed (https://github.com/dotless-de/vagrant-vbguest/). It helps keep the VirtuaalBoxGuestAdditions up to date, install on the cmd line:
4 | ```
5 | vagrant plugin install vagrant-vbguest
6 | ```
7 | When starting this on Linux, do not use your distribution's Vagrant package to install vagrant. Install from https://www.vagrantup.com to ensure you're using the latest version of Vagrant. Using old versions of Vagrant will result in errors.
8 |
9 | Currently has:
10 | 1. Haddop 2.7.1 with -site files pointing to the DSRA cluster
11 | 2. Kafka 0.9.0.0
12 | 3. Spark 1.6
13 | 3. fleetctl and etcdctl for coreos
14 | 4. libmesos
15 |
--------------------------------------------------------------------------------
/client/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
5 | VAGRANTFILE_API_VERSION = "2"
6 |
7 | VAGRANT_COMMAND = ARGV[0]
8 |
9 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
10 |
11 | config.vm.provider "virtualbox" do |v|
12 | v.memory = 4096
13 | v.cpus = 2
14 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
15 | end
16 |
17 | # Original starting point of the base box, and deploy script
18 | config.vm.box = "ubuntu/trusty64"
19 | config.vm.hostname = "dsra-client"
20 |
21 | # Get all the software installed or the base box
22 | config.vm.provision :shell, :path => "install_software.sh"
23 |
24 | # Setup configuration for DSRA
25 | config.vm.provision "file", source: "./hadoop/hdfs-site.xml.template", destination: "/tmp/hdfs-site.xml.template"
26 | config.vm.provision "file", source: "./hadoop/core-site.xml.template", destination: "/tmp/core-site.xml.template"
27 | config.vm.provision :shell, :path => "configure.sh"
28 |
29 | config.vm.synced_folder "./spark", "/opt/spark/conf"
30 |
31 | # iPython Notebook forwarded port
32 | #config.vm.network "forwarded_port", guest: 8888, host: 18888
33 |
34 | # To use the same network as the Host OS is
35 | #config.vm.network "public_network"
36 |
37 | end
38 |
--------------------------------------------------------------------------------
/client/configure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Setup Envs to be used in this script as default
4 | CLUSTER_NAME=dsra
5 | NNODE1_IP=r105u01.dsra.local
6 | NNODE2_IP=r105u03.dsra.local
7 | ZK_IPS=r105u01.dsra.local:2181,r105u03.dsra.local:2181,r105u05.dsra.local:2181,r105u07.dsra.local:2181,r105u09.dsra.local:2181
8 | JN_IPS=r105u01.dsra.local:8485;r105u03.dsra.local:8485;r105u05.dsra.local:8485;r105u07.dsra.local:8485;r105u09.dsra.local:8485
9 | FLEETCTL_ENDPOINT="http://r105u01.dsra.local:2379,http://r105u03.dsra.local:2379,http://r105u05.dsra.local:2379,http://r105u07.dsra.local:2379,http://r105u09.dsra.local:2379"
10 | ETCDCTL_ENDPOINT="http://r105u01.dsra.local:2379,http://r105u03.dsra.local:2379,http://r105u05.dsra.local:2379,http://r105u07.dsra.local:2379,http://r105u09.dsra.local:2379"
11 |
12 | # Now get fleetctl and etcdctl setup
13 | sudo echo "export FLEETCTL_ENDPOINT=$FLEETCTL_ENDPOINT" >> /etc/profile.d/fleet.sh
14 | sudo echo "export ETCDCTL_ENDPOINT=$ETCDCTL_ENDPOINT" >> /etc/profile.d/etcd.sh
15 |
16 | # Replace all the variables in hdfs-site.xml
17 | sudo sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /tmp/hdfs-site.xml.template \
18 | | sed "s/NNODE1_IP/$NNODE1_IP/" \
19 | | sed "s/NNODE2_IP/$NNODE2_IP/" \
20 | | sed "s/JNODES/$JNODES/" \
21 | > /opt/hadoop/etc/hadoop/hdfs-site.xml
22 |
23 | # Replace all the variables in core-site.xml
24 | sudo sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /tmp/core-site.xml.template \
25 | | sed "s/ZK_IPS/$ZK_IPS/" \
26 | > /opt/hadoop/etc/hadoop/core-site.xml
27 |
--------------------------------------------------------------------------------
/client/hadoop/core-site.xml.template:
--------------------------------------------------------------------------------
1 |
2 |
3 | fs.defaultFS
4 | hdfs://CLUSTER_NAME
5 |
6 |
7 |
8 | ha.zookeeper.quorum
9 | ZK_IPS
10 |
11 |
12 |
13 |
14 | fs.trash.interval
15 | 30
16 |
17 |
18 | fs.trash.checkpoint.interval
19 | 15
20 |
21 |
22 |
23 |
24 |
25 | hadoop.proxyuser.mapred.hosts
26 | *
27 |
28 |
29 | hadoop.proxyuser.mapred.groups
30 | *
31 |
32 |
33 |
34 |
35 | hadoop.proxyuser.httpfs.hosts
36 | *
37 |
38 |
39 | hadoop.proxyuser.httpfs.groups
40 | *
41 |
42 |
43 |
44 |
45 | hadoop.proxyuser.hue.hosts
46 | *
47 |
48 |
49 | hadoop.proxyuser.hue.groups
50 | *
51 |
52 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/client/hadoop/hdfs-site.xml.template:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | dfs.nameservices
5 | CLUSTER_NAME
6 |
7 |
8 | dfs.ha.namenodes.CLUSTER_NAME
9 | nn1,nn2
10 |
11 |
12 |
13 | dfs.namenode.rpc-address.CLUSTER_NAME.nn1
14 | NNODE1_IP:8020
15 |
16 |
17 | dfs.namenode.rpc-address.CLUSTER_NAME.nn2
18 | NNODE2_IP:8020
19 |
20 |
21 | dfs.namenode.servicerpc-address.CLUSTER_NAME.nn1
22 | NNODE1_IP:8022
23 |
24 |
25 | dfs.namenode.servicerpc-address.CLUSTER_NAME.nn2
26 | NNODE2_IP:8022
27 |
28 |
29 | dfs.namenode.http-address.CLUSTER_NAME.nn1
30 | NNODE1_IP:50070
31 |
32 |
33 | dfs.namenode.http-address.CLUSTER_NAME.nn2
34 | NNODE2_IP:50070
35 |
36 |
37 | dfs.namenode.name.dir
38 | file:///data/hdfs/nn
39 | Path on the local filesystem where the NameNode stores the namespace and transaction logs persistently.
40 |
41 |
42 |
43 |
44 | dfs.namenode.shared.edits.dir
45 | qjournal://JNODES/CLUSTER_NAME
46 |
47 |
48 | dfs.journalnode.edits.dir
49 | /data/hdfs/journal
50 |
51 |
52 |
53 |
54 |
55 | dfs.datanode.data.dir
56 | file:///data/hdfs/dn
57 | Comma separated list of paths on the local filesystem of a DataNode where it should store its blocks.
58 |
59 |
60 |
61 |
62 |
63 | dfs.ha.automatic-failover.enabled
64 | true
65 |
66 |
67 |
68 |
69 |
70 | dfs.client.failover.proxy.provider.CLUSTER_NAME
71 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
72 |
73 |
74 | dfs.ha.fencing.methods
75 | shell(/bin/true)
76 |
77 |
78 |
79 |
80 | dfs.namenode.replication.min
81 | 3
82 | true
83 |
84 |
85 | dfs.replication.max
86 | 10
87 | true
88 |
89 |
90 |
91 | mapreduce.client.submit.file.replication
92 | 3
93 | true
94 |
95 |
96 |
97 | dfs.webhdfs.enabled
98 | true
99 |
100 |
101 |
102 |
103 | dfs.datanode.hdfs-blocks-metadata.enabled
104 | true
105 |
106 |
107 |
108 |
109 |
--------------------------------------------------------------------------------
/client/install_software.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # install the little things
4 | sudo apt-get update
5 | sudo apt-get upgrade -y
6 | sudo apt-get install -y wget
7 |
8 | # Install Java
9 | sudo apt-get install -y openjdk-7-jdk
10 | sudo echo "JAVA_HOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64" >> /etc/environment
11 |
12 | # Get pip and virtualenv
13 | echo "Install pip, virtualenv, ipython"
14 | sudo wget https://bootstrap.pypa.io/get-pip.py
15 | sudo python get-pip.py
16 | sudo pip install virtualenv
17 | sudo pip install ipython
18 |
19 | # Now install golang
20 | echo "Install Go"
21 | sudo wget -q -O - https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz | tar -xzf - -C //opt
22 | sudo echo "export GOROOT=/opt/go" >> /etc/profile.d/go.sh
23 | sudo echo "export PATH=/opt/go/bin:\$PATH" >> /etc/profile.d/go.sh
24 |
25 | # Get Hadoop, and "install" it
26 | echo "Install Hadoop"
27 | sudo wget -q -O - http://apache.mirrors.pair.com/hadoop/common/hadoop-2.7.1/hadoop-2.7.1.tar.gz | tar -xzf - -C /opt
28 | sudo /usr/sbin/groupadd -r hdfs
29 | sudo /usr/sbin/useradd -r -g hdfs hdfs
30 | sudo /usr/sbin/groupadd -r hadoop
31 | sudo /usr/sbin/useradd -r -g hadoop hadoop
32 | sudo /bin/chown hadoop.hadoop -R /opt/hadoop-2.7.1
33 | sudo /bin/ln -s /opt/hadoop-2.7.1 /opt/hadoop
34 | sudo echo "export HADOOP_HOME=/opt/hadoop" >> /etc/profile.d/hdfs.sh
35 | sudo echo "export PATH=/opt/hadoop/bin:\$PATH" >> /etc/profile.d/hdfs.sh
36 |
37 | # Get Docker installed
38 | echo "Install Docker"
39 | sudo curl -sSL https://get.docker.com/ | sh
40 |
41 | # Now get fleetctl and etcdctl and set them up
42 | echo "Install fleetctl and etcdctl"
43 | sudo apt-get install -y git
44 | sudo git clone https://github.com/coreos/fleet.git /opt/fleet
45 | sudo git clone https://github.com/coreos/etcd.git /opt/etcd
46 | cd /opt/fleet && sudo env "PATH=$PATH:/opt/go/bin" /opt/fleet/build
47 | cd /opt/etcd && sudo env "PATH=$PATH:/opt/go/bin" /opt/etcd/build
48 | sudo echo "export PATH=/opt/etcd/bin:\$PATH" >> /etc/profile.d/etcd.sh
49 | sudo echo "export PATH=/opt/fleet/bin:\$PATH" >> /etc/profile.d/fleet.sh
50 |
51 | # Get Scala 2.11.7
52 | # sudo wget -q -O - http://www.scala-lang.org/files/archive/scala-2.11.7.tgz | tar -xzf - -C /usr/lib
53 | # sudo ln -s /usr/lib/scala-2.11.7 /usr/lib/scala
54 | # sudo echo "export SCALA_HOME=/usr/lib/scala" >> /etc/profile.d/scala.sh
55 | # sudo echo "export PATH=\$SCALA_HOME/bin:\$PATH" >> /etc/profile.d/scala.sh
56 |
57 | # Spark 1.6
58 | echo "Install Spark 1.6"
59 | sudo wget -q -O - http://d3kbcqa49mib13.cloudfront.net/spark-1.6.0-bin-hadoop2.6.tgz | tar -xzf - -C /opt
60 | sudo ln -s /opt/spark-1.6.0-bin-hadoop2.6 /opt/spark
61 | sudo echo "export PATH=/opt/spark/bin:\$PATH" >> /etc/profile.d/spark.sh
62 |
63 | # Get MESOS libs installed
64 | echo "Install Mesos Libs"
65 | sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
66 | echo "deb http://repos.mesosphere.com/ubuntu trusty main" | sudo tee /etc/apt/sources.list.d/mesosphere.list
67 | sudo apt-get -y install mesos
68 |
69 | # Kafka
70 | echo "Install Kafka"
71 | sudo wget -q -O - http://apache.arvixe.com/kafka/0.9.0.0/kafka_2.11-0.9.0.0.tgz | tar -xzf - -C /opt
72 | sudo ln -s /opt/kafka_2.11-0.9.0.0 /opt/kafka
73 | sudo mkdir -p /var/log/kafka
74 | sudo echo "export PATH=/opt/kafka/bin:\$PATH" >> /etc/profile.d/kafka.sh
75 |
76 | #Mongodb-org
77 | echo "Install Mongodb"
78 | sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
79 | echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.0.list
80 | sudo apt-get update
81 | sudo apt-get install -y mongodb-org
82 |
--------------------------------------------------------------------------------
/client/spark/spark-defaults.conf:
--------------------------------------------------------------------------------
1 | # Find the Mesos Master in Zookeeper
2 | spark.master=mesos://zk://r105u01.dsra.local:2181,r105u03.dsra.local:2181,r105u05.dsra.local:2181,r105u07.dsra.local:2181,r105u09.dsra.local:2181/mesos
3 |
4 | # Hadoop site files to be used on the mesos-slaves
5 | spark.files=file:///usr/local/hadoop/etc/hadoop/core-site.xml,file:///usr/local/hadoop/etc/hadoop/hdfs-site.xml
6 |
7 | # Log events
8 | spark.eventLog.enabled=true
9 | spark.eventLog.dir=hdfs://dsra/user/spark/applicationHistory
10 |
--------------------------------------------------------------------------------
/client/spark/spark-env.sh:
--------------------------------------------------------------------------------
1 | # Set the pythong shell to use ipython
2 | export PYSPARK_DRIVER_PYTHON=ipython
3 |
4 | # Location of Hadoop conf for the Driver
5 | export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop
6 |
7 | # Location of the MESOS Library
8 | export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so
9 |
10 | # Where the Spark Dist lives
11 | export SPARK_EXECUTOR_URI=hdfs://dsra/user/spark/dist/spark-1.6.0-bin-2.6.0.tgz
12 |
--------------------------------------------------------------------------------
/coreos/cloud-config/init.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 |
4 | #
5 | # Work around given that $private_ipv4 isn't supported when performing bare metal installs.
6 | # Generates unit drop-in for etc2.service
7 | #
8 | coreos:
9 | units:
10 | - name: format-var-lib-docker.service
11 | command: start
12 | content: |
13 | [Unit]
14 | Description=Formats the ephemeral drive
15 | [Service]
16 | Type=oneshot
17 | RemainAfterExit=yes
18 | ExecStart=/usr/sbin/wipefs -f /dev/sda
19 | ExecStart=/usr/sbin/mkfs.btrfs -m single -f /dev/sda
20 | - name: format-data.service
21 | command: start
22 | content: |
23 | [Unit]
24 | Description=Formats the data drive
25 | [Service]
26 | Type=oneshot
27 | RemainAfterExit=yes
28 | ExecStart=/usr/sbin/wipefs -f /dev/sdb
29 | ExecStart=/usr/sbin/mkfs.btrfs -m single -f /dev/sdb
30 | - name: var-lib-docker.mount
31 | command: start
32 | content: |
33 | [Unit]
34 | Description=Mount ephemeral to /var/lib/docker
35 | Before=docker.service
36 | [Mount]
37 | What=/dev/sda
38 | Where=/var/lib/docker
39 | Type=btrfs
40 | - name: data.mount
41 | command: start
42 | content: |
43 | [Unit]
44 | Description=Mount data
45 | [Mount]
46 | What=/dev/sdb
47 | Where=/data
48 | Type=btrfs
49 | ssh_authorized_keys:
50 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFDeCKt/GYFLig9AViMGsCADyXpt3F/D62m2eJPmxUs4EQAT/R3KKMb34T5JIoTfD/AMMffFO4aiRey1YP9ezkbpCBgA6PSU3DO2E75Kb9Y4P+W0iJetGv0pfr8D1nI0bc1wCF96cvODrGwzpSu2tyHeh2IGNYZc3EiExvXb3GhhZ17sXTW5RfBYqgZiV1SJBCbCajeDP4c4ZAI2mW87nJGWXocvZltSKW/GJlNSzNxz6u4mKsKl8YgF812UGDZwbJdI46vFBQ/LKVd0A3nKhh7zMdZU/kYYc1xgUJGHgXr5LM8UK7EhLeHOwLazQPw2mL3EF6kQn6g8FGmQgdeFrb
51 |
--------------------------------------------------------------------------------
/coreos/cloud-config/utility-cloud-config.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | #
4 | # Work around given that $private_ipv4 isn't supported when performing bare metal installs.
5 | # Generates unit drop-in for etc2.service
6 | #
7 | write_files:
8 | - path: /tmp/get_ip.sh
9 | permissions: 0700
10 | owner: root:root
11 | content: |
12 | #!/bin/bash
13 | logger "Create the etcd and fleet config files"
14 | PRIV_IP=`ifconfig eno1 | sed -n 2p | awk '{ print $2 }'`
15 | TARGET_DIR=/run/systemd/system/etcd2.service.d/
16 | TARGET="$TARGET_DIR/20-settings.conf"
17 | HOST=`/usr/bin/hostname`
18 | mkdir -p $TARGET_DIR
19 |
20 | echo "[Service]" > $TARGET
21 | echo "Environment=\"ETCD_NAME=$HOST\"" >> $TARGET
22 | #echo "Environment=\"ETCD_DISCOVERY_SRV=dsra.local\"" >> $TARGET
23 | echo "Environment=\"ETCD_INITIAL_ADVERTISE_PEER_URLS=http://$PRIV_IP:2380\"" >> $TARGET
24 | #echo "Environment=\"ETCD_INITIAL_CLUSTER_TOKEN=dsra-cluster\"" >> $TARGET
25 | echo "Environment=\"ETCD_INITIAL_CLUSTER_STATE=new\"" >> $TARGET
26 | echo "Environment=\"ETCD_INITIAL_CLUSTER=r105u01=http://10.105.0.1:2380,r105u03=http://10.105.0.3:2380,r105u05=http://10.105.0.5:2380,r105u07=http://10.105.0.7:2380,r105u09=http://10.105.0.9:2380\"" >> $TARGET
27 | echo "Environment=\"ETCD_ADVERTISE_CLIENT_URLS=http://$PRIV_IP:2379\"" >> $TARGET
28 | echo "Environment=\"ETCD_LISTEN_CLIENT_URLS=http://$PRIV_IP:2379,http://127.0.0.1:2379\"" >> $TARGET
29 | echo "Environment=\"ETCD_LISTEN_PEER_URLS=http://$PRIV_IP:2380\"" >> $TARGET
30 |
31 | TARGET_DIR=/run/systemd/system/fleet.service.d
32 | TARGET="$TARGET_DIR/20-settings.conf"
33 | mkdir -p $TARGET_DIR
34 | echo "[Service]" > $TARGET
35 | echo "Environment=\"FLEET_ETCD_SERVERS=http://10.105.0.1:2379,http://10.105.0.3:2379,http://10.105.0.5:2379,http://10.105.0.7:2379,http://10.105.0.9:2379\"" >> $TARGET
36 | echo "Environment=\"FLEET_PUBLIC_IP=$PRIV_IP\"" >> $TARGET
37 | echo "Environment=\"FLEET_METADATA=role=service,ip=$PRIV_IP\"" >> $TARGET
38 | logger "etcd and fleet config files written"
39 |
40 | #
41 | # Required to pull private docker images via marathon.
42 | #
43 |
44 | - path: /etc/docker/certs.d/hub.dsra.local:5000/ca.crt
45 | permissions: 0700
46 | owner: root:root
47 | content: |
48 | -----BEGIN CERTIFICATE-----
49 | MIIDdTCCAl2gAwIBAgIJAK0Y/ATHLBNyMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV
50 | BAYTAlVTMQswCQYDVQQIDAJWQTEPMA0GA1UEBwwGTUNMRUFOMQswCQYDVQQKDAJM
51 | MzEXMBUGA1UEAwwOaHViLmRzcmEubG9jYWwwHhcNMTUwODI2MDEzODI0WhcNMTYw
52 | ODI1MDEzODI0WjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExDzANBgNVBAcM
53 | Bk1DTEVBTjELMAkGA1UECgwCTDMxFzAVBgNVBAMMDmh1Yi5kc3JhLmxvY2FsMIIB
54 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmrNGpzIsk8THRgVKWhZw82jg
55 | RcP8uG7foMKwxSUNU9TUbGG05Mgq85QY83EmVycNgwTFnxYVP4BiY+o0yV7I2giq
56 | sTFUevCQfeG74vs58+jYns18fjreBRWZvGTQZ1Skruyg3f62x4RYcxuYAwwqrkrt
57 | /Y1+fBFTKkkX9Fy33yyU08D5EaO9XFbXcWuzB0A9g7kD5jCPW+JsCK4vcy851t4P
58 | aC/w8D8o946O3dJxCuG+VUFfPbw4pMDdZOh6oHuCP/ZxEebuzkXvQ6X6RM0Z8Lba
59 | dzntQeKRr/pZL7NlmicBVwnI4CX2ztLUTSQROmekl8Rh69YBhNEK+sv6WYVH0QID
60 | AQABo1AwTjAdBgNVHQ4EFgQUmHHAztO+Yl36Cp0fZ0eMFlzQr0owHwYDVR0jBBgw
61 | FoAUmHHAztO+Yl36Cp0fZ0eMFlzQr0owDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
62 | AQsFAAOCAQEAIz7Izzeom7HKDcyXzg0GpE2mFNFnsdYRImMLx/y6IRQoy6fWtIGi
63 | 7uqsGfDxoFcdrR1PNbwsLEKHs+e8dKCVDx0tZPguVsyusK+vtwkB4AEg6YpgCxVU
64 | zjFnNTDxOxwVVTF1F2zLdei8I4GkYIfhdi1Bj4Rj7r1MZmV2Z6eIXI+dYbcURoyy
65 | zunSyCOIzvB/jfhGXKv8iilkMl57ciSZT4ktvIHN4XiqebwuC+62uwJ7o6uztJOQ
66 | g1bwK8+oC+FGc5Wf74rDU+VEvfdY+Kd5ezlYnE6/sLEBt+i0WM4EdsjoGJtaEvvT
67 | t+/Cha7Fv0OpCUHLPI0ScF0YWbU4nF2LSw==
68 | -----END CERTIFICATE-----
69 |
70 | #
71 | # Required to pull private docker registry images via fleet.
72 | #
73 |
74 | - path: /home/core/.docker/config.json
75 | permissions: 0700
76 | owner: core:core
77 | content: |
78 | {
79 | "auths": {
80 | "hub.dsra.local:5000": {
81 | "auth": "bXBhcmtlcjpYREB0YTYxNm1w",
82 | "email": ""
83 | }
84 | }
85 | }
86 |
87 | - path: /tmp/get-docker-registry-login.sh
88 | permissions: 0700
89 | owner: root:root
90 | content: |
91 | #!/bin/bash
92 | cd /etc
93 | wget http://10.105.255.249:8088/dsra/repo/registry/docker.tar.gz
94 |
95 | - path: /tmp/reset-interfaces
96 | permissions: 0700
97 | owner: root:root
98 | content: |
99 | #!/bin/bash
100 | logger "Resetting network interfaces for bonding...."
101 | ip link set eno1 down
102 | ip link set eno2 down
103 | systemctl restart systemd-networkd
104 | logger "Done reseting network, now with bonding."
105 |
106 | - path: /etc/systemd/network/10-eno1.network
107 | permissions: 0644
108 | owner: root
109 | content: |
110 | [Match]
111 | Name=eno1
112 |
113 | [Network]
114 | Bond=bond0
115 |
116 | - path: /etc/systemd/network/11-eno2.network
117 | permissions: 0644
118 | owner: root
119 | content: |
120 | [Match]
121 | Name=eno2
122 |
123 | [Network]
124 | Bond=bond0
125 |
126 | - path: /etc/systemd/network/20-bond.netdev
127 | permissions: 0644
128 | owner: root
129 | content: |
130 | [NetDev]
131 | Name=bond0
132 | Kind=bond
133 |
134 | - path: /etc/systemd/network/30-setup-bonded-dhcp.network
135 | permissions: 0644
136 | owner: root
137 | content: |
138 | [Match]
139 | Name=bond0
140 |
141 | [Network]
142 | DHCP=true
143 |
144 | [DHCP]
145 | UseDomains=true
146 | - path: /etc/modprobe.d/bonding.conf
147 | permissions: 0644
148 | owner: root
149 | content: |
150 | options bonding mode=6 miimon=100
151 | - path: /etc/modules-load.d/bonding.conf
152 | permissions: 0644
153 | owner: root
154 | content: |
155 | bonding
156 |
157 | coreos:
158 | units:
159 | - name: config-etcd2.service
160 | command: start
161 | content: |
162 | [Unit]
163 | Description=Config ETCD2
164 | Before=etcd2.service reset-interfaces.service
165 |
166 | [Service]
167 | User=root
168 | Type=oneshot
169 | ExecStart=/tmp/get_ip.sh
170 |
171 | - name: setup-docker-registry-login.service
172 | command: start
173 | content: |
174 | [Unit]
175 | Description=Setup Docker Registry Login for Marathon
176 | Before=docker.service
177 |
178 | [Service]
179 | User=root
180 | Type=oneshot
181 | ExecStart=/tmp/get-docker-registry-login.sh
182 |
183 | - name: reset-interfaces.service
184 | command: start
185 | content: |
186 | [Unit]
187 | Description=Setting interfaces to down and restarting networking post boot.
188 | Requires=config-etcd2.service
189 | Before=etcd2.service
190 | After=config-etcd2.service network-online.target
191 |
192 | [Service]
193 | User=root
194 | Type=oneshot
195 | ExecStart=/tmp/reset-interfaces
196 | - name: etcd2.service
197 | command: start
198 | - name: fleet.service
199 | command: start
200 | - name: var-lib-docker.mount
201 | command: start
202 | content: |
203 | [Unit]
204 | Description=Mount ephemeral to /var/lib/docker
205 | Before=docker.service
206 | [Mount]
207 | What=/dev/sda
208 | Where=/var/lib/docker
209 | Type=btrfs
210 | - name: var-lib-docker-volumes.mount
211 | command: start
212 | content: |
213 | [Unit]
214 | Description=Mount Docker Volumes
215 | [Mount]
216 | What=/dev/sdb
217 | Where=/var/lib/docker/volumes
218 | Type=btrfs
219 |
220 | ssh_authorized_keys:
221 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFDeCKt/GYFLig9AViMGsCADyXpt3F/D62m2eJPmxUs4EQAT/R3KKMb34T5JIoTfD/AMMffFO4aiRey1YP9ezkbpCBgA6PSU3DO2E75Kb9Y4P+W0iJetGv0pfr8D1nI0bc1wCF96cvODrGwzpSu2tyHeh2IGNYZc3EiExvXb3GhhZ17sXTW5RfBYqgZiV1SJBCbCajeDP4c4ZAI2mW87nJGWXocvZltSKW/GJlNSzNxz6u4mKsKl8YgF812UGDZwbJdI46vFBQ/LKVd0A3nKhh7zMdZU/kYYc1xgUJGHgXr5LM8UK7EhLeHOwLazQPw2mL3EF6kQn6g8FGmQgdeFrb
222 |
--------------------------------------------------------------------------------
/coreos/cloud-config/worker-cloud-config.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | write-files:
3 |
4 | #
5 | # Required to pull private docker images via marathon.
6 | #
7 |
8 | - path: /etc/docker/certs.d/hub.dsra.local:5000/ca.crt
9 | permissions: 0700
10 | owner: root
11 | content: |
12 | -----BEGIN CERTIFICATE-----
13 | MIIDdTCCAl2gAwIBAgIJAK0Y/ATHLBNyMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV
14 | BAYTAlVTMQswCQYDVQQIDAJWQTEPMA0GA1UEBwwGTUNMRUFOMQswCQYDVQQKDAJM
15 | MzEXMBUGA1UEAwwOaHViLmRzcmEubG9jYWwwHhcNMTUwODI2MDEzODI0WhcNMTYw
16 | ODI1MDEzODI0WjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExDzANBgNVBAcM
17 | Bk1DTEVBTjELMAkGA1UECgwCTDMxFzAVBgNVBAMMDmh1Yi5kc3JhLmxvY2FsMIIB
18 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmrNGpzIsk8THRgVKWhZw82jg
19 | RcP8uG7foMKwxSUNU9TUbGG05Mgq85QY83EmVycNgwTFnxYVP4BiY+o0yV7I2giq
20 | sTFUevCQfeG74vs58+jYns18fjreBRWZvGTQZ1Skruyg3f62x4RYcxuYAwwqrkrt
21 | /Y1+fBFTKkkX9Fy33yyU08D5EaO9XFbXcWuzB0A9g7kD5jCPW+JsCK4vcy851t4P
22 | aC/w8D8o946O3dJxCuG+VUFfPbw4pMDdZOh6oHuCP/ZxEebuzkXvQ6X6RM0Z8Lba
23 | dzntQeKRr/pZL7NlmicBVwnI4CX2ztLUTSQROmekl8Rh69YBhNEK+sv6WYVH0QID
24 | AQABo1AwTjAdBgNVHQ4EFgQUmHHAztO+Yl36Cp0fZ0eMFlzQr0owHwYDVR0jBBgw
25 | FoAUmHHAztO+Yl36Cp0fZ0eMFlzQr0owDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
26 | AQsFAAOCAQEAIz7Izzeom7HKDcyXzg0GpE2mFNFnsdYRImMLx/y6IRQoy6fWtIGi
27 | 7uqsGfDxoFcdrR1PNbwsLEKHs+e8dKCVDx0tZPguVsyusK+vtwkB4AEg6YpgCxVU
28 | zjFnNTDxOxwVVTF1F2zLdei8I4GkYIfhdi1Bj4Rj7r1MZmV2Z6eIXI+dYbcURoyy
29 | zunSyCOIzvB/jfhGXKv8iilkMl57ciSZT4ktvIHN4XiqebwuC+62uwJ7o6uztJOQ
30 | g1bwK8+oC+FGc5Wf74rDU+VEvfdY+Kd5ezlYnE6/sLEBt+i0WM4EdsjoGJtaEvvT
31 | t+/Cha7Fv0OpCUHLPI0ScF0YWbU4nF2LSw==
32 | -----END CERTIFICATE-----
33 |
34 |
35 | #
36 | # Required to pull private docker images via fleet.
37 | #
38 |
39 | - path: /home/core/.docker/config.json
40 | permissions: 0700
41 | owner: core:core
42 | content: |
43 | {
44 | "auths": {
45 | "hub.dsra.local:5000": {
46 | "auth": "bXBhcmtlcjpYREB0YTYxNm1w",
47 | "email": ""
48 | }
49 | }
50 | }
51 |
52 | - path: /tmp/get-docker-registry-login.sh
53 | permissions: 0700
54 | owner: root:root
55 | content: |
56 | #!/bin/bash
57 | cd /etc
58 | wget http://10.105.255.249:8088/dsra/repo/registry/docker.tar.gz
59 |
60 | coreos:
61 | etcd2:
62 | proxy: on
63 | discovery-srv: dsra.local
64 | listen-client-urls: http://0.0.0.0:2379
65 |
66 | fleet:
67 | etcd_servers: http://10.105.0.1:2379,http://10.105.0.3:2379,http://10.105.0.5:2379,http://10.105.0.7:2379,http://10.105.0.9:2379
68 | metadata: role=worker
69 |
70 | units:
71 | - name: etcd2.service
72 | command: start
73 |
74 | - name: fleet.service
75 | command: start
76 |
77 | - name: setup-docker-registry-login.service
78 | command: start
79 | content: |
80 | [Unit]
81 | Description=Setup Docker Registry Login for Marathon
82 | Before=docker.service
83 |
84 | [Service]
85 | User=root
86 | Type=oneshot
87 | ExecStart=/tmp/get-docker-registry-login.sh
88 |
89 | - name: var-lib-docker.mount
90 | command: start
91 | content: |
92 | [Unit]
93 | Description=Mount ephemeral to /var/lib/docker
94 | Before=docker.service
95 | [Mount]
96 | What=/dev/sda
97 | Where=/var/lib/docker
98 | Type=btrfs
99 |
100 | - name: var-lib-docker-volumes.mount
101 | command: start
102 | content: |
103 | [Unit]
104 | Description=Mount Docker Volumes
105 | [Mount]
106 | What=/dev/sdb
107 | Where=/var/lib/docker/volumes
108 | Type=btrfs
109 |
110 | - name: reset-interfaces.service
111 | command: start
112 | content: |
113 | [Unit]
114 | Description=Setting interfaces to down and restarting networking post boot.
115 | After=network-online.target
116 |
117 | [Service]
118 | Type=oneshot
119 | ExecStart=/tmp/reset-interfaces
120 |
121 | write_files:
122 | - path: /tmp/reset-interfaces
123 | permissions: 0700
124 | owner: root
125 | content: |
126 | #!/bin/bash
127 | logger "Resetting network interfaces for bonding."
128 | ip link set eno1 down
129 | ip link set eno2 down
130 | systemctl restart systemd-networkd
131 | logger "Network interfaces have been reset"
132 | rm /tmp/reset-interfaces
133 | - path: /etc/systemd/network/10-eno1.network
134 | permissions: 0644
135 | owner: root
136 | content: |
137 | [Match]
138 | Name=eno1
139 |
140 | [Network]
141 | Bond=bond0
142 | - path: /etc/systemd/network/11-eno2.network
143 | permissions: 0644
144 | owner: root
145 | content: |
146 | [Match]
147 | Name=eno2
148 |
149 | [Network]
150 | Bond=bond0
151 | - path: /etc/systemd/network/20-bond.netdev
152 | permissions: 0644
153 | owner: root
154 | content: |
155 | [NetDev]
156 | Name=bond0
157 | Kind=bond
158 |
159 | [Bond]
160 | Mode=6
161 | - path: /etc/systemd/network/30-setup-bonded-dhcp.network
162 | permissions: 0644
163 | owner: root
164 | content: |
165 | [Match]
166 | Name=bond0
167 |
168 | [Network]
169 | DHCP=true
170 |
171 | [DHCP]
172 | UseDomains=true
173 | - path: /etc/modprobe.d/bonding.conf
174 | permissions: 0644
175 | owner: root
176 | content: |
177 | options bonding mode=6 miimon=100
178 | - path: /etc/modules-load.d/bonding.conf
179 | permissions: 0644
180 | owner: root
181 | content: |
182 | bonding
183 |
184 | ssh_authorized_keys:
185 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFDeCKt/GYFLig9AViMGsCADyXpt3F/D62m2eJPmxUs4EQAT/R3KKMb34T5JIoTfD/AMMffFO4aiRey1YP9ezkbpCBgA6PSU3DO2E75Kb9Y4P+W0iJetGv0pfr8D1nI0bc1wCF96cvODrGwzpSu2tyHeh2IGNYZc3EiExvXb3GhhZ17sXTW5RfBYqgZiV1SJBCbCajeDP4c4ZAI2mW87nJGWXocvZltSKW/GJlNSzNxz6u4mKsKl8YgF812UGDZwbJdI46vFBQ/LKVd0A3nKhh7zMdZU/kYYc1xgUJGHgXr5LM8UK7EhLeHOwLazQPw2mL3EF6kQn6g8FGmQgdeFrb
186 |
--------------------------------------------------------------------------------
/coreos/fleet/README.md:
--------------------------------------------------------------------------------
1 | #Fleet Notes
2 | *from https://github.com/coreos/fleet/tree/master/Documentation*
3 |
4 | * **Git and Build Fllet**
5 |
6 | * git clone https://github.com/coreos/fleet.git
7 |
8 | * ./build
9 |
10 | * **Add this to your .profile**
11 |
12 | * export FLEETCTL_ENDPOINT="http://r105u01.dsra.local:2379,http://r105u03.dsra.local:2379,http://r105u05.dsra.local:2379,http://r105u07.dsra.local:2379,http://r105u09.dsra.local:2379"
13 |
14 | ## Fleet Unit States
15 | *from https://github.com/coreos/fleet/blob/master/Documentation/states.md*
16 |
17 | fleet uses a _declarative model_ to evaluate unit state. This means that operations to change the state of units (e.g. `fleetctl` commands, or calls to the fleet API) change the desired state, rather than directly performing any state change. There are currently three cluster-level states for a unit:
18 |
19 | - `inactive`: known by fleet, but not assigned to a machine
20 | - `loaded`: assigned to a machine and loaded into systemd there, but not started
21 | - `launched`: loaded into systemd, and fleet has called the equivalent of `systemctl start`
22 |
23 | Units may only transition directly between these states. For example, for a unit to transition from `inactive` to `launched` it must first go state `loaded`.
24 |
25 | The desired and last known states are exposed in the `DSTATE` and `STATE` columns of the output from `fleetctl list-unit-files`.
26 |
27 | The `fleetctl` commands to act on units change the *desired state* of a unit. fleet itself is then responsible for performing the necessary state transitions to move a unit to the desired state. The following table explains the relationship between each `fleetctl` command and unit states.
28 |
29 | | Command | Description | Desired State | Valid Previous States | Is an alias for |
30 | |---------|-------------|--------------|-----|----|
31 | | `fleetctl submit` | Submits unit file into etcd storage | `inactive` | `none` | |
32 | | `fleetctl load` | Submits and schedules unit file into machines' systemd but doesn't start it | `loaded` | `none` or `inactive` | `submit+load` |
33 | | `fleetctl start` | Submits, schedules and starts unit file| `launched` | `none` or `inactive` or `loaded` | `submit+load+start` |
34 | | `fleetctl stop` | Stops scheduled unit file | `loaded` | `launched` | |
35 | | `fleetctl unload` | Stops and unschedules unit file from machines' systemd | `inactive`| `launched` or `loaded` | `stop+unload` |
36 | | `fleetctl destroy` | Stops, unschedules and removes unit file from etcd storage| `none` | `launched` or `loaded` or `inactive` | `stop+unload+destroy` |
37 |
38 | `none` indicates that the unit has not yet been submitted to fleet at all (or it previously existed in fleet but was destroyed).
39 |
40 | For example:
41 | - if a unit is `inactive`, then `fleetctl start` will cause it to be `loaded` and then `launched`
42 | - if a unit is `loaded`, then `fleetctl destroy` will cause it to be `inactive` and then `none`
43 | - if a unit is `inactive`, then `fleetctl stop` is an invalid action
44 |
45 |
46 | ##Example on wow to deploy a service on CoreOS using Fleetctl
47 |
48 | *from https://coreos.com/fleet/docs/latest/using-the-client.html*
49 |
50 | #Etcd notes:
51 | * **Git and Build Fllet**
52 |
53 | * git clone https://github.com/coreos/etcd.git
54 |
55 | * ./build
56 |
57 | * **Add this to your .profile**
58 | * export ETCDCTL_ENDPOINT="http://r105u01.dsra.local:2379,http://r105u03.dsra.local:2379,http://r105u05.dsra.local:2379,http://r105u07.dsra.local:2379,http://r105u09.dsra.local:2379"
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/remove-framework.sh:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/remove-unused-containers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | for containerId in `docker ps -a | grep Exited | grep -Po '^([\d\w])+'`;
4 | do
5 | docker rm -v $containerId
6 | done
7 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/reset-marathon.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | fleetctl list-unit-files
3 |
4 | fleetctl destroy marathon.service
5 | fleetctl submit ../mesosphere/marathon.service
6 | fleetctl start marathon.service
7 |
8 | fleetctl list-unit-files
9 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/reset-mesos-master.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | fleetctl list-unit-files
3 | fleetctl destroy mesos-master@{1..3}.service
4 | fleetctl destroy mesos-master@.service
5 | fleetctl submit ../mesosphere/mesos-master\@.service
6 | fleetctl start mesos-master@{1..3}.service
7 | fleetctl list-unit-files
8 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/reset-mesos-slave.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | fleetctl list-unit-files
3 | fleetctl stop mesos-slave.service
4 | fleetctl destroy mesos-slave.service
5 | fleetctl submit ../marathon/mesos-slave.service
6 | fleetctl start mesos-slave.service
7 | fleetctl list-unit-files
8 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/reset-zookeeper.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | fleetctl list-unit-files
3 | fleetctl destroy zookeeper{1..5}.service
4 | fleetctl submit ../zookeeper/zookeeper*.service
5 | fleetctl start zookeeper{1..5}.service
6 | fleetctl list-unit-files
7 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/rolling-restart.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # usage: ./rolling-restart.sh [service unit name prefix][@]
4 | #
5 | # reference: http://engineering.rainchasers.com/coreos/fleet/2015/03/03/rolling-unit-restart.html
6 | #
7 |
8 | if [ "$#" -ne 1 ]; then
9 | echo "Usage: $0 your-service-prefix" >&2
10 | exit 1
11 | fi
12 |
13 | echo "Executing..."
14 |
15 | fleetctl list-units | grep $1 | cut -f1 -d. | while read -r unit ; do
16 | unit_index=`echo $unit | cut -f2 -d@`
17 |
18 | printf "unit:> %s index:> %s\n" $unit $unit_index
19 |
20 | printf "stopping:> %s\n" $unit
21 | fleetctl stop $unit
22 |
23 | printf "waiting:> for %s to stop " $unit;
24 | is_running=1
25 | while [ $is_running -ne 0 ]; do
26 | is_running=`fleetctl list-units | grep running | grep $unit | wc -l`;
27 | sleep 1;
28 | printf ".";
29 | done
30 | printf "\n"
31 |
32 | printf "starting:> %s\n" $unit
33 | fleetctl start $unit
34 |
35 | printf "waiting:> for %s to start " $unit;
36 | while [ $is_running -eq 0 ]; do
37 | is_running=`fleetctl list-units | grep running | grep $unit | wc -l`;
38 | sleep 1;
39 | printf ".";
40 | done
41 | printf "\n"
42 |
43 | fleetctl list-units | grep $unit
44 |
45 | done
46 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/shutdown-mesos.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | fleetctl list-unit-files
3 |
4 | fleetctl stop marathon1.service
5 | fleetctl stop marathon2.service
6 |
7 | fleetctl stop mesos-slave.service
8 | fleetctl stop mesos-master@{1..3}
9 |
10 | fleetctl destroy marathon1.service
11 | fleetctl destroy marathon2.service
12 |
13 |
14 | fleetctl destroy mesos-master@{1..3}
15 |
16 | fleetctl destroy mesos-slave.service
17 | fleetctl destroy mesos-master@.service
18 |
19 | fleetctl list-unit-files
20 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/shutdown-zookeeper.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | fleetctl stop zookeeper{1..5}.service
3 | fleetctl destroy zookeeper{1..5}.service
4 |
--------------------------------------------------------------------------------
/coreos/fleet/bin/start-mesos.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | fleetctl list-unit-files
4 |
5 | fleetctl submit ../mesosphere/marathon1.service
6 | fleetctl submit ../mesosphere/marathon2.service
7 |
8 | fleetctl submit ../mesosphere/mesos-slave.service
9 | fleetctl submit ../mesosphere/mesos-master@.service
10 |
11 | fleetctl start mesos-master@{1..3}
12 | fleetctl start mesos-slave.service
13 |
14 | fleetctl start marathon1.service
15 | fleetctl start marathon2.service
16 |
17 | fleetctl list-unit-files
18 |
19 |
--------------------------------------------------------------------------------
/coreos/fleet/docker-registry.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=docker-registry
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=-/usr/bin/docker rm %p
9 | ExecStartPre=/usr/bin/docker pull registry:2
10 | ExecStart=/usr/bin/docker run --net=host --name %p -p 5000:5000 --restart=always -v /data/docker-registry:/var/lib/registry registry:2
11 | ExecStop=/usr/bin/docker stop %p
12 | TimeoutStartSec=900
13 |
14 | [X-Fleet]
15 | MachineMetadata=ip=10.105.0.1
16 |
17 |
--------------------------------------------------------------------------------
/coreos/fleet/hdfs/README.md:
--------------------------------------------------------------------------------
1 | The assumption is that Zookeeper cluster is up, double check and make sure the Zookeeper cluster is up.
2 |
3 | # Starting for first time
4 | 1. **Submit all the *.service* files**
5 | * fleetctl submit journalnode.service namenode1.service namenode2.service datanode.service
6 | 2. **Start the journal nodes, confirm they are running on the utility nodes**
7 | * fleetctl start journalnode.service
8 | 3. **Start namenode1**
9 | * fleetctl start namenode1.service
10 | * This should be running on r105u01, which you can check: http://r105u01.dsra.local:50070
11 | 4. **Start namenode2**
12 | * fleetctl start namenode2.service
13 | * This should be running on r105u03, which you can check: http://r105u03.dsra.local:50070
14 | 5. **Start the datanodes**
15 | * fleetctl start datanode.service
16 |
17 | # Starting an existing cluster
18 | Basically, if the Namenode has already been formated (as well as the Zookeeper Failover), you can just do steps 2-5 above.
19 |
20 | # If Zookeeper Fails (e.g. we lose Zookeeper Data containers)
21 | HDFS should be fine, but, we will need to reformat the zookeeper failover znode.
22 | 1. Start the journal ndoes
23 | 2. Start the namenode1 service
24 | 3. "exec" into the namenode1 container
25 | * ssh to core@r105u01.dsra.local
26 | * docker exec -i -t namenode bash
27 | * In the /usr/local/hadoop DIR run the command: *bin/hdfs zkfc -formatZK*
28 | * In the /usr/local/hadoop DIR run the command: *sbin/hadoop-daemon.sh start zkfc*
29 | 4. Start namenode2 service
30 | 5. Start the datanode services
31 |
32 |
--------------------------------------------------------------------------------
/coreos/fleet/hdfs/datanode.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=datanode
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=/usr/bin/docker pull aarongdocker/hdfs
9 | ExecStartPre=-/usr/bin/docker create --name datanode-data aarongdocker/hdfs /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v %p
11 | ExecStart=/usr/bin/docker run --name %p --hostname %H --restart on-failure:5 --log-driver=journald --volumes-from datanode-data -e CLUSTER_NAME=dsra -e NNODE1_IP=r105u01 -e NNODE2_IP=r105u03 -e ZK_IPS=r105u01:2181,r105u03:2181,r105u05:2181,r105u07:2181,r105u09:2181 -e JN_IPS=r105u01:8485,r105u03:8485,r105u05:8485,r105u07:8485,r105u09:8485 -p 1004:1004 -p 1006:1006 -p 8022:8022 -p 50010:50010 -p 50020:50020 -p 50075:50075 -p 14000:14000 aarongdocker/hdfs datanode
12 | ExecStop=/usr/bin/docker stop %p
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | Global=true
17 | MachineMetadata=role=worker
18 |
--------------------------------------------------------------------------------
/coreos/fleet/hdfs/httpfs.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=httpfs
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=/usr/bin/docker pull aarongdocker/httpfs
9 | ExecStartPre=-/usr/bin/docker rm -v %p
10 | ExecStart=/usr/bin/docker run --name %p --restart on-failure:5 --log-driver=journald -e CLUSTER_NAME=dsra -e NNODE1_IP=r105u01 -e NNODE2_IP=r105u03 -e ZK_IPS=r105u01:2181,r105u03:2181,r105u05:2181,r105u07:2181,r105u09:2181 -e JN_IPS=r105u01:8485,r105u03:8485,r105u05:8485,r105u07:8485,r105u09:8485 -p 14000:14000 aarongdocker/httpfs start
11 | ExecStop=/usr/bin/docker stop %p
12 | TimeoutStartSec=900s
13 |
14 | [X-Fleet]
15 | MachineMetadata=role=service
16 |
--------------------------------------------------------------------------------
/coreos/fleet/hdfs/journalnode.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=journalnode
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=/usr/bin/docker pull aarongdocker/hdfs
9 | ExecStartPre=-/usr/bin/docker create --name jornalnode-data aarongdocker/hdfs /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v %p
11 | ExecStart=/usr/bin/docker run --name %p --restart on-failure:5 --log-driver=journald --volumes-from jornalnode-data -e CLUSTER_NAME=dsra -e NNODE1_IP=r105u01 -e NNODE2_IP=r105u03 -e ZK_IPS=r105u01:2181,r105u03:2181,r105u05:2181,r105u07:2181,r105u09:2181 -e JN_IPS=r105u01:8485,r105u03:8485,r105u05:8485,r105u07:8485,r105u09:8485 -p 8485:8485 -p 8480:8480 aarongdocker/hdfs journalnode
12 | ExecStop=/usr/bin/docker stop %p
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | Global=true
17 | MachineMetadata=role=service
18 |
--------------------------------------------------------------------------------
/coreos/fleet/hdfs/namenode1.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=namenode1
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill namenode
8 | ExecStartPre=/usr/bin/docker pull aarongdocker/hdfs
9 | ExecStartPre=-/usr/bin/docker create --name namenode-data aarongdocker/hdfs /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v namenode
11 | ExecStart=/usr/bin/docker run --name namenode --restart on-failure:5 --log-driver=journald --volumes-from namenode-data -e CLUSTER_NAME=dsra -e NNODE1_IP=r105u01 -e NNODE2_IP=r105u03 -e ZK_IPS=r105u01:2181,r105u03:2181,r105u05:2181,r105u07:2181,r105u09:2181 -e JN_IPS=r105u01:8485,r105u03:8485,r105u05:8485,r105u07:8485,r105u09:8485 --net=host aarongdocker/hdfs active
12 | ExecStop=/usr/bin/docker stop namenode
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | MachineMetadata=ip=10.105.0.1
17 | Conflicts=namenode2
18 |
--------------------------------------------------------------------------------
/coreos/fleet/hdfs/namenode2.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=namenode2
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill namenode
8 | ExecStartPre=/usr/bin/docker pull aarongdocker/hdfs
9 | ExecStartPre=-/usr/bin/docker create --name namenode-data aarongdocker/hdfs /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v namenode
11 | ExecStart=/usr/bin/docker run --name namenode --restart on-failure:5 --log-driver=journald --volumes-from namenode-data -e CLUSTER_NAME=dsra -e NNODE1_IP=r105u01 -e NNODE2_IP=r105u03 -e ZK_IPS=r105u01:2181,r105u03:2181,r105u05:2181,r105u07:2181,r105u09:2181 -e JN_IPS=r105u01:8485,r105u03:8485,r105u05:8485,r105u07:8485,r105u09:8485 --net=host aarongdocker/hdfs standby
12 | ExecStop=/usr/bin/docker stop namenode
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | MachineMetadata=ip=10.105.0.3
17 | Conflicts=namenode1
18 |
--------------------------------------------------------------------------------
/coreos/fleet/hue.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=hue
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=/usr/bin/docker pull hub.dsra.local:5000/dsra/hue:3.9.0
9 | ExecStartPre=-/usr/bin/docker rm -v %p
10 | ExecStart=/usr/bin/docker run --name %p --restart on-failure:5 --log-driver=journald -e CLUSTER_NAME=dsra -e HTTPFS_SERVER=httpfs.dsra.local -e HTTPFS_PORT=32000 -p 8000:8000 hub.dsra.local:5000/dsra/hue:3.9.0 start
11 | ExecStop=/usr/bin/docker stop %p
12 | TimeoutStartSec=900s
13 |
14 | [X-Fleet]
15 | MachineMetadata=role=service
16 |
--------------------------------------------------------------------------------
/coreos/fleet/kafka-manager.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka-manager
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=/usr/bin/docker pull aarongdocker/kafka-manager
9 | ExecStartPre=-/usr/bin/docker rm -v %p
10 | ExecStart=//usr/bin/docker run --name %p --restart on-failure:5 --log-driver=journald \
11 | -e ZK_HOSTS=r105u01.dsra.local:2181,r105u03.dsra.local:2181,r105u05.dsra.local:2181,r105u07.dsra.local:2181,r105u09.dsra.local:2181 \
12 | -e APPLICATION_SECRET=pleaseletmeplaywiththeothers \
13 | -p 9000:9000 aarongdocker/kafka-manager
14 | ExecStop=/usr/bin/docker stop %p
15 | TimeoutStartSec=900s
16 |
17 | [X-Fleet]
18 | MachineMetadata=ip=10.105.0.1
19 |
--------------------------------------------------------------------------------
/coreos/fleet/kafka.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kafka
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=/usr/bin/docker pull aarongdocker/kafka
9 | ExecStartPre=-/usr/bin/docker create --name kafka-data aarongdocker/kafka /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v %p
11 | ExecStart=/usr/bin/bash -c "/usr/bin/docker run --name %p --restart on-failure:5 --log-driver=journald \
12 | --volumes-from kafka-data \
13 | -e BROKER_ID=`/usr/bin/ifconfig bond0 | /usr/bin/sed -n 2p | /usr/bin/awk '{ print $2 }' | cut -d . -f 4` \
14 | -e DELETE_TOPIC_ENABLE=true \
15 | -e CLUSTER_NAME=dsra \
16 | -e ZK_IPS=r105u01.dsra.local:2181,r105u03.dsra.local:2181,r105u05.dsra.local:2181,r105u07.dsra.local:2181,r105u09.dsra.local:2181 \
17 | -e ADVERTISED_HOST_NAME=%H.dsra.local \
18 | -p 9092:9092 -p 9999:9999 aarongdocker/kafka"
19 | ExecStop=/usr/bin/docker stop %p
20 | TimeoutStartSec=900s
21 |
22 | [X-Fleet]
23 | Global=true
24 | MachineMetadata=role=worker
25 |
--------------------------------------------------------------------------------
/coreos/fleet/mesosphere/bamboo.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=bamboo
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=-/usr/bin/docker rm -v %p
9 | ExecStartPre=/usr/bin/docker pull hub.dsra.local:5000/dsra/bamboo:0.2.16
10 | ExecStart=/usr/bin/docker run --name %p --rm -p 8000:8000 -p 80:80 \
11 | -e MARATHON_ENDPOINT=http://marathon \
12 | -e BAMBOO_ENDPOINT=http://bamboo:8000 \
13 | -e BAMBOO_ZK_HOST=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181 \
14 | -e BAMBOO_ZK_PATH=/bamboo \
15 | -e BIND=":8000" \
16 | -e CONFIG_PATH="config/production.example.json" \
17 | -e BAMBOO_DOCKER_AUTO_HOST=true \
18 | hub.dsra.local:5000/dsra/bamboo:0.2.16
19 | ExecStop=/usr/bin/docker stop %p
20 | TimeoutStartSec=900s
21 |
22 | [X-Fleet]
23 | Global=true
24 | MachineMetadata=role=worker
25 |
26 |
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/coreos/fleet/mesosphere/marathon.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=marathon
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=-/usr/bin/docker rm -v %p
9 | ExecStartPre=/usr/bin/docker pull mesosphere/marathon:v0.15.0-RC3
10 | ExecStart=/usr/bin/docker run --name %p --rm --net=host \
11 | -e MARATHON_EVENT_SUBSCRIBER=http_callback \
12 | -e MARATHON_HOSTNAME=%H.dsra.local \
13 | -e MARATHON_HTTP_PORT=80 \
14 | -e MARATHON_ZK=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/marathon \
15 | -e MARATHON_MASTER=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos \
16 | -e MARATHON_TASK_LAUNCH_CONFIRM_TIMEOUT=60000 \
17 | -e MARATHON_TASK_LAUNCH_TIMEOUT=300000 \
18 | mesosphere/marathon:v0.15.0-RC3 --checkpoint
19 | ExecStop=/usr/bin/docker stop %p
20 | TimeoutStartSec=900s
21 |
22 | [X-Fleet]
23 | Global=true
24 | MachineMetadata=role=service
25 |
--------------------------------------------------------------------------------
/coreos/fleet/mesosphere/mesos-consul.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=mesos-consul
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill %p
8 | ExecStartPre=-/usr/bin/docker rm -v %p
9 | ExecStartPre=/usr/bin/docker pull ciscocloud/mesos-consul:latest
10 | ExecStart=/usr/bin/docker run --name %p -p 8500:8500 --rm ciscocloud/mesos-consul:latest \
11 | --zk=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos
12 | ExecStop=/usr/bin/docker stop %p
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | Global=true
17 | # MachineMetadata=role=worker
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/coreos/fleet/mesosphere/mesos-master.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=mesos-master
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | User=core
8 | ExecStartPre=-/usr/bin/docker kill %p
9 | ExecStartPre=-/usr/bin/docker rm -v %p
10 | ExecStartPre=/usr/bin/docker pull hub.dsra.local:5000/dsra/mesos-master:0.27.1-2.0.226.ubuntu1404
11 | ExecStart=/usr/bin/docker run --net=host --name %p \
12 | -v /var/log/mesos \
13 | -e MESOS_HOSTNAME=%H.dsra.local \
14 | -e MESOS_LOG_DIR=/var/log/mesos \
15 | -e MESOS_ZK=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos \
16 | -e MESOS_CLUSTER=DSRA \
17 | -e MESOS_QUORUM=5 \
18 | hub.dsra.local:5000/dsra/mesos-master:0.27.1-2.0.226.ubuntu1404
19 | ExecStop=/usr/bin/docker stop %p
20 | TimeoutStartSec=900s
21 |
22 | [X-Fleet]
23 | Global=true
24 | MachineMetadata=role=service
25 |
--------------------------------------------------------------------------------
/coreos/fleet/mesosphere/mesos-slave.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=mesos-slave
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | User=core
8 | ExecStartPre=-/usr/bin/docker kill %p
9 | ExecStartPre=-/usr/bin/docker rm -v %p
10 | ExecStartPre=/usr/bin/docker pull hub.dsra.local:5000/dsra/mesos-slave:0.27.1-2.0.226.ubuntu1404
11 | ExecStart=/usr/bin/docker run --privileged --net=host --name %p --restart=always \
12 | -v /etc/docker.tar.gz:/etc/docker.tar.gz \
13 | -v /sys:/sys -v /tmp:/tmp -v /tmp/mesos:/tmp/mesos \
14 | -v /usr/bin/docker:/bin/docker \
15 | -v /var/run/docker.sock:/var/run/docker.sock \
16 | -v /lib64/libdevmapper.so.1.02:/usr/lib/libdevmapper.so.1.02:ro \
17 | -v /lib64/libpthread.so.0:/lib/libpthread.so.0:ro \
18 | -v /lib64/libsqlite3.so.0:/lib/libsqlite3.so.0:ro \
19 | -v /lib64/libudev.so.1:/lib/libudev.so.1:ro \
20 | -v /lib64/libsystemd.so:/usr/lib/libsystemd.so.0:ro \
21 | -v /lib64/libcrypt.so:/usr/lib/libcrypt.so.20:ro \
22 | -v /lib64/libgcrypt.so:/usr/lib/libgcrypt.so.20:ro \
23 | -e MESOS_SWITCH_USER=0 \
24 | -e MESOS_LOG_DIR=/tmp/log \
25 | -e MESOS_LOG_LEVEL=INFO \
26 | -e MESOS_WORK_DIR=/tmp/mesos \
27 | -e MESOS_DOCKER_SOCK=/var/run/docker.sock \
28 | -e MESOS_ISOLATION=cgroups/cpu,cgroups/mem \
29 | -e MESOS_MASTER=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos \
30 | -e MESOS_CONTAINERIZERS=docker,mesos \
31 | -e MESOS_HOSTNAME=%H.dsra.local \
32 | -e MESOS_EXECUTOR_REGISTRATION_TIMEOUT=5mins \
33 | -e MESOS_HADOOP_HOME=/usr/local/hadoop \
34 | -e CLUSTER_NAME=dsra \
35 | -e NNODE1_IP=r105u01.dsra.local \
36 | -e NNODE2_IP=r105u03.dsra.local \
37 | -e ZK_IPS=r105u01.dsra.local:2181,r105u03.dsra.local:2181,r105u05.dsra.local:2181,r105u07.dsra.local:2181,r105u09.dsra.local:2181 \
38 | -e JN_IPS=r105u01.dsra.local:8485,r105u03.dsra.local:8485,r105u05.dsra.local:8485,r105u07.dsra.local:8485,r105u09.dsra.local:8485 \
39 | hub.dsra.local:5000/dsra/mesos-slave:0.27.1-2.0.226.ubuntu1404
40 | ExecStop=/usr/bin/docker stop %p
41 | TimeoutStartSec=900s
42 |
43 | [X-Fleet]
44 | Global=true
45 | MachineMetadata=role=worker
46 |
--------------------------------------------------------------------------------
/coreos/fleet/mesosphere/spark-mesos-dispatcher.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=spark-mesos-dispatcher
3 | After=mesos-master.service
4 | Requires=mesos-master.service
5 |
6 | [Service]
7 | ExecStart=/usr/local/spark/sbin/start-mesos-dispatcher.sh --master=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos
8 | #TimeoutStartSec=900s
9 |
10 | [X-Fleet]
11 | MachineMetadata=ip=10.105.0.3
12 |
--------------------------------------------------------------------------------
/coreos/fleet/zookeeper/zookeeper1.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=zookeeper1
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill zookeeper1
8 | ExecStartPre=-/usr/bin/docker create --name zookeeper-data endocode/zookeeper /bin/true
9 | execStartPre=/usr/bin/docker pull endocode/zookeeper
10 | ExecStartPre=-/usr/bin/docker rm -v zookeeper1
11 | ExecStart=/usr/bin/docker run --name zookeeper1 --log-driver=journald --restart on-failure:5 --volumes-from zookeeper-data -e JVMFLAGS='-Xmx64m -Xms64M' -e ZK_SERVERS='server.1=r105u01.dsra.local:2888:3888 server.2=r105u03.dsra.local:2888:3888 server.3=r105u05.dsra.local:2888:3888 server.4=r105u07.dsra.local:2888:3888 server.5=r105u09.dsra.local:2888:3888' -e ZK_ID=1 --hostname r105u01 -p 2181:2181 -p 2888:2888 -p 3888:3888 endocode/zookeeper
12 | ExecStop=/usr/bin/docker stop zookeeper1
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | MachineMetadata=ip=10.105.0.1
17 |
--------------------------------------------------------------------------------
/coreos/fleet/zookeeper/zookeeper2.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=zookeeper2
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill zookeeper2
8 | ExecStartPre=/usr/bin/docker pull endocode/zookeeper
9 | ExecStartPre=-/usr/bin/docker create --name zookeeper-data endocode/zookeeper /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v zookeeper2
11 | ExecStart=/usr/bin/docker run --name zookeeper2 --log-driver=journald --restart on-failure:5 --volumes-from zookeeper-data -e JVMFLAGS='-Xmx64m -Xms64M' -e ZK_SERVERS='server.1=r105u01.dsra.local:2888:3888 server.2=r105u03.dsra.local:2888:3888 server.3=r105u05.dsra.local:2888:3888 server.4=r105u07.dsra.local:2888:3888 server.5=r105u09.dsra.local:2888:3888' -e ZK_ID=2 --hostname r105u03 -p 2181:2181 -p 2888:2888 -p 3888:3888 endocode/zookeeper
12 | ExecStop=/usr/bin/docker stop zookeeper2
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | MachineMetadata=ip=10.105.0.3
17 |
--------------------------------------------------------------------------------
/coreos/fleet/zookeeper/zookeeper3.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=zookeeper3
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill zookeeper3
8 | ExecStartPre=/usr/bin/docker pull endocode/zookeeper
9 | ExecStartPre=-/usr/bin/docker create --name zookeeper-data endocode/zookeeper /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v zookeeper3
11 | ExecStart=/usr/bin/docker run --name zookeeper3 --log-driver=journald --restart on-failure:5 --volumes-from zookeeper-data -e JVMFLAGS='-Xmx64m -Xms64M' -e ZK_SERVERS='server.1=r105u01.dsra.local:2888:3888 server.2=r105u03.dsra.local:2888:3888 server.3=r105u05.dsra.local:2888:3888 server.4=r105u07.dsra.local:2888:3888 server.5=r105u09.dsra.local:2888:3888' -e ZK_ID=3 --hostname r105u05 -p 2181:2181 -p 2888:2888 -p 3888:3888 endocode/zookeeper
12 | ExecStop=/usr/bin/docker stop zookeeper3
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | MachineMetadata=ip=10.105.0.5
17 |
--------------------------------------------------------------------------------
/coreos/fleet/zookeeper/zookeeper4.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=zookeeper4
3 | After=docker.service
4 | Requires=docker.services
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill zookeeper4
8 | ExecStartPre=/usr/bin/docker pull endocode/zookeeper
9 | ExecStartPre=-/usr/bin/docker create --name zookeeper-data endocode/zookeeper /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v zookeeper4
11 | ExecStart=/usr/bin/docker run --name zookeeper4 --log-driver=journald --restart on-failure:5 --volumes-from zookeeper-data -e JVMFLAGS='-Xmx64m -Xms64M' -e ZK_SERVERS='server.1=r105u01.dsra.local:2888:3888 server.2=r105u03.dsra.local:2888:3888 server.3=r105u05.dsra.local:2888:3888 server.4=r105u07.dsra.local:2888:3888 server.5=r105u09.dsra.local:2888:3888' -e ZK_ID=4 --hostname r105u07 -p 2181:2181 -p 2888:2888 -p 3888:3888 endocode/zookeeper
12 | ExecStop=/usr/bin/docker stop zookeeper4
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | MachineMetadata=ip=10.105.0.7
17 |
--------------------------------------------------------------------------------
/coreos/fleet/zookeeper/zookeeper5.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=zookeeper5
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | ExecStartPre=-/usr/bin/docker kill zookeeper5
8 | ExecStartPre=/usr/bin/docker pull endocode/zookeeper
9 | ExecStartPre=-/usr/bin/docker create --name zookeeper-data endocode/zookeeper /bin/true
10 | ExecStartPre=-/usr/bin/docker rm -v zookeeper5
11 | ExecStart=/usr/bin/docker run --name zookeeper5 --log-driver=journald --restart on-failure:5 --volumes-from zookeeper-data -e JVMFLAGS='-Xmx64m -Xms64M' -e ZK_SERVERS='server.1=r105u01.dsra.local:2888:3888 server.2=r105u03.dsra.local:2888:3888 server.3=r105u05.dsra.local:2888:3888 server.4=r105u07.dsra.local:2888:3888 server.5=r105u09.dsra.local:2888:3888' -e ZK_ID=5 --hostname r105u09 -p 2181:2181 -p 2888:2888 -p 3888:3888 endocode/zookeeper
12 | ExecStop=/usr/bin/docker stop zookeeper5
13 | TimeoutStartSec=900s
14 |
15 | [X-Fleet]
16 | MachineMetadata=ip=10.105.0.9
17 |
--------------------------------------------------------------------------------
/docker/.gitignore:
--------------------------------------------------------------------------------
1 | #ignore following files
2 | #private key
3 | domain.key
4 | #database
5 | *.sqlite
6 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | #Doing Docker
2 |
3 | ##Docker Data Container Setup
4 | from *https://docs.docker.com/engine/userguide/containers/dockervolumes/*
5 |
6 | For both HDFS and Kafka we make sure of two conatiners on each node:
7 |
8 | 1. one for data
9 | 2. one for running the service
10 |
11 | The one for data uses the Volume command from the [Dockerfile](https://github.com/aglahe/dsra-dcos/blob/master/docker/kafka/Dockerfile)
12 |
13 | ```
14 | VOLUME ["/data/kafka"]
15 |
16 | ```
17 | ###Example Unit File:
18 | ```
19 | [Unit]
20 | Description=kafka
21 | After=docker.service
22 | Requires=docker.service
23 |
24 | [Service]
25 | ExecStartPre=-/usr/bin/docker kill %p
26 | ExecStartPre=/usr/bin/docker pull aarongdocker/kafka
27 | ExecStartPre=-/usr/bin/docker create --name kafka-data aarongdocker/kafka /bin/true
28 | ExecStartPre=-/usr/bin/docker rm -v %p
29 | ExecStart=/usr/bin/bash -c "/usr/bin/docker run --name %p --restart on-failure:5 --log-driver=journald \
30 | --volumes-from kafka-data \
31 | -e BROKER_ID=`/usr/bin/ifconfig bond0 | /usr/bin/sed -n 2p | /usr/bin/awk '{ print $2 }' | cut -d . -f 4` \
32 | -e DELETE_TOPIC_ENABLE=true \
33 | -e CLUSTER_NAME=dsra \
34 | -e ZK_IPS=r105u01.dsra.local:2181,r105u03.dsra.local:2181,r105u05.dsra.local:2181,r105u07.dsra.local:2181,r105u09.dsra.local:2181 \
35 | -e ADVERTISED_HOST_NAME=%H.dsra.local \
36 | -p 9092:9092 -p 9999:9999 aarongdocker/kafka"
37 | ExecStop=/usr/bin/docker stop %p
38 | TimeoutStartSec=900s
39 |
40 | [X-Fleet]
41 | Global=true
42 | MachineMetadata=role=worker
43 | ```
44 |
45 | From our example service file above, In our services files, you'll see a couple of lines:
46 |
47 | 1. ExecStartPre=-/usr/bin/docker create --name kafka-data aarongdocker/kafka /bin/true
48 | 2. ExecStart=/usr/bin/bash -c "/usr/bin/docker run --name %p --restart on-failure:5 --log-driver=journald \
49 | --volumes-from kafka-data \
50 |
51 |
52 | The 1st line tries and create a docker container..but doesn't run it (the *-* at the start of line 1 tells fleet it can continue even if that command fails). The **--volumes-from kafka-data** tell the 2nd conatiner to *link* to volumes that are contained in the container with that name (in our example *kafka-data*).
53 |
54 | ##Docker Notes..little notes that made my life easier
55 |
56 | ###Remove docker containers that you don't want anymore
57 | ```
58 | $ pssh -vi -l core -h clusters/workers "docker rm -v \$(docker ps -a | grep "XXXXXXX" | cut -d' ' -f1)"
59 | ```
60 | where "XXXXXX" is used to filter down what you want. the *cut -d' ' -f1* part gets the container ID
61 |
62 |
--------------------------------------------------------------------------------
/docker/bamboo/README.txt:
--------------------------------------------------------------------------------
1 | Instructions to build Bamboo
2 | ============================
3 |
4 | 1. Download bamboo code base.
5 | 2. Replace haproxy_template.cfg in bamboo's config directory.
6 | 3. docker login hub.dsra.local:5000
7 | 4. docker build -t hub.dsra.local:5000/dsra/bamboo:[VERSION]
8 | 5. docker push hub.dsra.local:5000/dsra/bamboo:[VERSION]
9 | 6. Use marathon configuration to deploy on all worker nodes.
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/docker/bamboo/bamboo-reload.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | bamboo su - haproxy -c 'haproxy -c -f {{.}}'
3 |
4 |
5 |
--------------------------------------------------------------------------------
/docker/bamboo/bamboo-remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker stop bamboo
3 | docker rm -v bamboo
4 |
--------------------------------------------------------------------------------
/docker/bamboo/haproxy_template.cfg:
--------------------------------------------------------------------------------
1 | global
2 | log /dev/log local0
3 | log /dev/log local1 notice
4 | chroot /var/lib/haproxy
5 | stats socket /run/haproxy/admin.sock mode 660 level admin
6 | stats timeout 30s
7 | user haproxy
8 | group haproxy
9 | daemon
10 |
11 | # Default SSL material locations
12 | ca-base /etc/ssl/certs
13 | crt-base /etc/ssl/private
14 |
15 | # Default ciphers to use on SSL-enabled listening sockets.
16 | # For more information, see ciphers(1SSL).
17 | # ssl-default-bind-ciphers kEECDH+aRSA+AES:kRSA+AES:+AES256:RC4-SHA:!kEDH:!LOW:!EXP:!MD5:!aNULL:!eNULL
18 |
19 | defaults
20 | log global
21 | mode http
22 | option httplog
23 | option dontlognull
24 | timeout connect 5000
25 | timeout client 50000
26 | timeout server 50000
27 |
28 | errorfile 400 /etc/haproxy/errors/400.http
29 | errorfile 403 /etc/haproxy/errors/403.http
30 | errorfile 408 /etc/haproxy/errors/408.http
31 | errorfile 500 /etc/haproxy/errors/500.http
32 | errorfile 502 /etc/haproxy/errors/502.http
33 | errorfile 503 /etc/haproxy/errors/503.http
34 | errorfile 504 /etc/haproxy/errors/504.http
35 |
36 | {{
37 | import "strings"
38 |
39 | func reverse_format( id string ) string {
40 |
41 | var named_components = strings.Split(id[1:], "/")
42 | var num_components = len(named_components)
43 |
44 | var temp = make([]string, num_components)
45 |
46 | for i:=0; i
3 | ENV DEBIAN_FRONTEND noninteractive
4 |
5 | RUN apt-get update && apt-get install -q -y build-essential libkrb5-dev libldap2-dev \
6 | libgmp3-dev libmysqlclient-dev libsasl2-dev libsasl2-modules-gssapi-mit \
7 | libsqlite3-dev libssl-dev libtidy-0.99-0 libxml2-dev libxslt-dev mysql-server \
8 | python-dev python-setuptools python-simplejson
9 |
10 | RUN ln -s /usr/lib/python2.7/plat-*/_sysconfigdata_nd.py /usr/lib/python2.7/
11 |
12 | RUN git clone https://github.com/cloudera/hue.git && groupadd -r hue && useradd -r -g hue hue
13 | WORKDIR hue
14 | RUN git checkout tags/release-3.9.0 && make apps && rm -rf .git
15 | RUN chown -R hue:hue /local/git/hue
16 |
17 | EXPOSE 8000
18 |
--------------------------------------------------------------------------------
/docker/cloudera-hue/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker build -t hub.dsra.local:5000/cloudera/hue:3.9.0 .
4 |
--------------------------------------------------------------------------------
/docker/cloudera-hue/push.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker push hub.dsra.local:5000/cloudera/hue:3.9.0
4 |
--------------------------------------------------------------------------------
/docker/cloudera-hue/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -t -i --rm --name=cloudera-hue -p 8000:8000 -p 8020:8020 -p 50070:50070 -p 9999:9999 hub.dsra.local:5000/cloudera/hue:3.9.0
3 |
--------------------------------------------------------------------------------
/docker/docker-registry-nginx/auth/htpasswd:
--------------------------------------------------------------------------------
1 | mparker:$2y$05$8yi4zgMi0pnu4FJUUSm2Nu1h.rNacudgD51mt51CSoV360XNKEazi
2 |
3 |
--------------------------------------------------------------------------------
/docker/docker-registry-nginx/certs/domain.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDdTCCAl2gAwIBAgIJAK0Y/ATHLBNyMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV
3 | BAYTAlVTMQswCQYDVQQIDAJWQTEPMA0GA1UEBwwGTUNMRUFOMQswCQYDVQQKDAJM
4 | MzEXMBUGA1UEAwwOaHViLmRzcmEubG9jYWwwHhcNMTUwODI2MDEzODI0WhcNMTYw
5 | ODI1MDEzODI0WjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExDzANBgNVBAcM
6 | Bk1DTEVBTjELMAkGA1UECgwCTDMxFzAVBgNVBAMMDmh1Yi5kc3JhLmxvY2FsMIIB
7 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmrNGpzIsk8THRgVKWhZw82jg
8 | RcP8uG7foMKwxSUNU9TUbGG05Mgq85QY83EmVycNgwTFnxYVP4BiY+o0yV7I2giq
9 | sTFUevCQfeG74vs58+jYns18fjreBRWZvGTQZ1Skruyg3f62x4RYcxuYAwwqrkrt
10 | /Y1+fBFTKkkX9Fy33yyU08D5EaO9XFbXcWuzB0A9g7kD5jCPW+JsCK4vcy851t4P
11 | aC/w8D8o946O3dJxCuG+VUFfPbw4pMDdZOh6oHuCP/ZxEebuzkXvQ6X6RM0Z8Lba
12 | dzntQeKRr/pZL7NlmicBVwnI4CX2ztLUTSQROmekl8Rh69YBhNEK+sv6WYVH0QID
13 | AQABo1AwTjAdBgNVHQ4EFgQUmHHAztO+Yl36Cp0fZ0eMFlzQr0owHwYDVR0jBBgw
14 | FoAUmHHAztO+Yl36Cp0fZ0eMFlzQr0owDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
15 | AQsFAAOCAQEAIz7Izzeom7HKDcyXzg0GpE2mFNFnsdYRImMLx/y6IRQoy6fWtIGi
16 | 7uqsGfDxoFcdrR1PNbwsLEKHs+e8dKCVDx0tZPguVsyusK+vtwkB4AEg6YpgCxVU
17 | zjFnNTDxOxwVVTF1F2zLdei8I4GkYIfhdi1Bj4Rj7r1MZmV2Z6eIXI+dYbcURoyy
18 | zunSyCOIzvB/jfhGXKv8iilkMl57ciSZT4ktvIHN4XiqebwuC+62uwJ7o6uztJOQ
19 | g1bwK8+oC+FGc5Wf74rDU+VEvfdY+Kd5ezlYnE6/sLEBt+i0WM4EdsjoGJtaEvvT
20 | t+/Cha7Fv0OpCUHLPI0ScF0YWbU4nF2LSw==
21 | -----END CERTIFICATE-----
22 |
--------------------------------------------------------------------------------
/docker/docker-registry-nginx/registry_config.yml:
--------------------------------------------------------------------------------
1 | version: 0.1
2 | log:
3 | level: debug
4 |
5 | common:
6 | search_backend: sqlalchemy
7 | sqlalchemy_index_database: sqlite:////tmp/docker-registry.db
8 |
9 | storage:
10 | filesystem:
11 | rootdirectory: /data
12 |
13 | http:
14 | addr: 10.105.255.249:5000
15 | net: tcp
16 | secret: askdravherqp305u235lkadjfalsdjkfsasdfasdf
17 | tls:
18 | certificate: /certs/domain.crt
19 | key: /certs/domain.key
20 | debug:
21 | addr: 10.105.255.249:5001
22 |
23 | auth:
24 | htpasswd:
25 | realm: basic-realm
26 | path: /auth/htpasswd
27 |
--------------------------------------------------------------------------------
/docker/docker-registry-nginx/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -d -t -i --name docker-registry --restart=always -v /opt/registry/data:/registry/data -e STORAGE_PATH=/registry/data -e SETTINGS_FLAVOR=local -e LOGLEVEL=DEBUG registry
3 |
--------------------------------------------------------------------------------
/docker/docker-registry-open/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM registry:2
2 | MAINTAINER matt.parker
3 | ADD domain.crt /
4 | ADD domain.key /
5 | EXPOSE 5000
6 | CMD
7 |
--------------------------------------------------------------------------------
/docker/docker-registry-open/cert-creation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aglahe/dsra-dcos/9d99ae8b0e693026f2287934dfcbe5186e28d523/docker/docker-registry-open/cert-creation.png
--------------------------------------------------------------------------------
/docker/docker-registry-open/certs/domain.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDdTCCAl2gAwIBAgIJAK0Y/ATHLBNyMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV
3 | BAYTAlVTMQswCQYDVQQIDAJWQTEPMA0GA1UEBwwGTUNMRUFOMQswCQYDVQQKDAJM
4 | MzEXMBUGA1UEAwwOaHViLmRzcmEubG9jYWwwHhcNMTUwODI2MDEzODI0WhcNMTYw
5 | ODI1MDEzODI0WjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExDzANBgNVBAcM
6 | Bk1DTEVBTjELMAkGA1UECgwCTDMxFzAVBgNVBAMMDmh1Yi5kc3JhLmxvY2FsMIIB
7 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmrNGpzIsk8THRgVKWhZw82jg
8 | RcP8uG7foMKwxSUNU9TUbGG05Mgq85QY83EmVycNgwTFnxYVP4BiY+o0yV7I2giq
9 | sTFUevCQfeG74vs58+jYns18fjreBRWZvGTQZ1Skruyg3f62x4RYcxuYAwwqrkrt
10 | /Y1+fBFTKkkX9Fy33yyU08D5EaO9XFbXcWuzB0A9g7kD5jCPW+JsCK4vcy851t4P
11 | aC/w8D8o946O3dJxCuG+VUFfPbw4pMDdZOh6oHuCP/ZxEebuzkXvQ6X6RM0Z8Lba
12 | dzntQeKRr/pZL7NlmicBVwnI4CX2ztLUTSQROmekl8Rh69YBhNEK+sv6WYVH0QID
13 | AQABo1AwTjAdBgNVHQ4EFgQUmHHAztO+Yl36Cp0fZ0eMFlzQr0owHwYDVR0jBBgw
14 | FoAUmHHAztO+Yl36Cp0fZ0eMFlzQr0owDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
15 | AQsFAAOCAQEAIz7Izzeom7HKDcyXzg0GpE2mFNFnsdYRImMLx/y6IRQoy6fWtIGi
16 | 7uqsGfDxoFcdrR1PNbwsLEKHs+e8dKCVDx0tZPguVsyusK+vtwkB4AEg6YpgCxVU
17 | zjFnNTDxOxwVVTF1F2zLdei8I4GkYIfhdi1Bj4Rj7r1MZmV2Z6eIXI+dYbcURoyy
18 | zunSyCOIzvB/jfhGXKv8iilkMl57ciSZT4ktvIHN4XiqebwuC+62uwJ7o6uztJOQ
19 | g1bwK8+oC+FGc5Wf74rDU+VEvfdY+Kd5ezlYnE6/sLEBt+i0WM4EdsjoGJtaEvvT
20 | t+/Cha7Fv0OpCUHLPI0ScF0YWbU4nF2LSw==
21 | -----END CERTIFICATE-----
22 |
--------------------------------------------------------------------------------
/docker/docker-registry-open/domain.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIGFTCCA/2gAwIBAgIJAN4LjgfupTBgMA0GCSqGSIb3DQEBCwUAMIGgMQswCQYD
3 | VQQGEwJVUzERMA8GA1UECAwIVklSR0lOSUExDzANBgNVBAcMBk1DTEVBTjELMAkG
4 | A1UECgwCTDMxGTAXBgNVBAsMEEdMT0JBTCBTT0xVVElPTlMxGzAZBgNVBAMMEnIx
5 | MDV1MDEuZHNyYS5sb2NhbDEoMCYGCSqGSIb3DQEJARYZbWF0dGhldy5wYXJrZXJA
6 | bC0zY29tLmNvbTAeFw0xNTA4MTYwMzMyMzRaFw0xNjA4MTUwMzMyMzRaMIGgMQsw
7 | CQYDVQQGEwJVUzERMA8GA1UECAwIVklSR0lOSUExDzANBgNVBAcMBk1DTEVBTjEL
8 | MAkGA1UECgwCTDMxGTAXBgNVBAsMEEdMT0JBTCBTT0xVVElPTlMxGzAZBgNVBAMM
9 | EnIxMDV1MDEuZHNyYS5sb2NhbDEoMCYGCSqGSIb3DQEJARYZbWF0dGhldy5wYXJr
10 | ZXJAbC0zY29tLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMUs
11 | JkqpHkEPNHUHYUq5OkV7aAochMBi2TuBDf9BCxWPsoGOMPjjLobv3djHmmdqJXPH
12 | U8HJdTvTvUx7tsfV6tmxgCFUx2Gt9UnIksRr0xZQjPj4cxrMMbyFVtKh9EBOn99Q
13 | 8x8M1D7xKbo3VF+kdkKso1yta+07Q/crwrav7HEt2D/Wld7+Ydv60fMYoYUSjMOf
14 | fPKTGUuVWKA604rTjC8guUOJjST+VIH/F/zJrtGqgPU1BbjDMGX3lgFcCCrKQ7Fq
15 | tUfw+nxBYVRNcDyfdmt3F6lbDROJo8ki3PfhqEaB+aYkUJREU287IiC5dCO1nwjD
16 | 7K21OxS6uXA+yb01GSB1Bs6IUIez/QONBbXEYAQYm7pDROGTVdA7HGRQYOwAGMop
17 | rYiF62o/kRF+3nveevfFeSgcW6Bqp8o5V0LlVPFr3+5pAiuhDyP5X3mZwrSY5Hyl
18 | obhpsJUx13AN6EFI1ff7/ycjbDHHbArL+dsf2OnYw/f8BdYw+KWsL3MPQ5RTc1+U
19 | Cu/Wa3ldY7aZfBDEmO2iWsF+jeSxGAElIwhN1Kg4TrV3m2aTD9RkhNeyrvXwgNqp
20 | 8aemzW4UTSlf7IfnurQXwkOF9JVbTX21eFD8W2HQYjntYXeNWLiXk1Fo8YJEcniV
21 | OTYmZYdIjTlh++SjRF2ctKBLwnLESvxK4YOijkubAgMBAAGjUDBOMB0GA1UdDgQW
22 | BBR9hbfW62yQyFhErV+b/LpOxnruQzAfBgNVHSMEGDAWgBR9hbfW62yQyFhErV+b
23 | /LpOxnruQzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAW3VkDuMq5
24 | aWZLFQvvW7G6NFTYdasuNEirW+UgscLsCdlx7tlOasNKCZWoBrQFQckBeD4ZMR3x
25 | 86VucRyPYC6evLXZrhuNlOASzjr1YeGTdwcKVXoy9RcCzlX+28T5KPx880Vfg14m
26 | X63qbQtRbe4lW9aYkAIfxJ/3P4Kat6mxPOc4tdDwEL5ncQApwOjEc2DNZESy0e7P
27 | zrtLhX7TriQ8V3ji+gZ+QlCwyHXi+COeIGTOYPQm4NyTqckPsnf2jUdZaIEazlQP
28 | 8vBxE18toAxPM+hCEtnhUcLDbL/ZK4kXdcW+V4HkSp+GB3ZVmfV9JoMgl0dB0HWY
29 | eYEDx8rjhxNz984xRtbXcwyxotZm8nbX+nFJ/4PBZfCwYixJ6x65xzou1C4w0lGC
30 | +ckVr6/UtvE2EecU9DtOa6ZgGEakRGEhuusqjV7cvwd3m8T2L+UsYU6g6n/gx1hj
31 | 3wM6ffT070x5LLpnHVelMSMM7QnwsnY1HJlFYZ1jZPMBgimsiianUVBDaSY12hE/
32 | K7bGck/T8cP4KBFpDyXcxz5+1yTvgoiIZY2Kzt30yT+YYYZZhdoer2IXecV5/v7F
33 | uCJCV+LtsGWpQpGH7epRhUge0S2zhXrQ84jXfDZ9LkX0u1F1iGpcvi/ez74Ob1fg
34 | k3phmjUDULy28rvWiOmMZEd/P+P74dDrqg==
35 | -----END CERTIFICATE-----
36 |
--------------------------------------------------------------------------------
/docker/docker-registry-open/registry_config.yml:
--------------------------------------------------------------------------------
1 | version: 0.1
2 | log:
3 | level: debug
4 |
5 | common:
6 | search_backend: sqlalchemy
7 | sqlalchemy_index_database: sqlite:////tmp/docker-registry.db
8 |
9 | storage:
10 | filesystem:
11 | rootdirectory: /data
12 |
13 | http:
14 | addr: 0.0.0.0:5000
15 | net: tcp
16 | secret: askdravherqp305u235lkadjfalsdjkfsasdfasdf
17 | tls:
18 | certificate: /certs/domain.crt
19 | key: /certs/domain.key
20 | debug:
21 | addr: 0.0.0.0:5001
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docker/docker-registry-open/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -d -t -i --net=host --name docker-registry-open --restart=always -v /opt/registry/registry_config.yml:/etc/docker/registry/config.yml -v /opt/registry/certs:/certs -v /opt/registry/data:/data -e SETTINGS_FLAVOR=local -e STORAGE_PATH=/data -e LOGLEVEL=DEBUG registry:2.1.1
3 |
--------------------------------------------------------------------------------
/docker/docker-registry-secure/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM registry:2
2 | MAINTAINER matt.parker
3 | ADD domain.crt /
4 | ADD domain.key /
5 | EXPOSE 5000
6 | CMD
7 |
--------------------------------------------------------------------------------
/docker/docker-registry-secure/auth/htpasswd:
--------------------------------------------------------------------------------
1 | mparker:$2y$05$8yi4zgMi0pnu4FJUUSm2Nu1h.rNacudgD51mt51CSoV360XNKEazi
2 |
3 |
--------------------------------------------------------------------------------
/docker/docker-registry-secure/cert-creation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aglahe/dsra-dcos/9d99ae8b0e693026f2287934dfcbe5186e28d523/docker/docker-registry-secure/cert-creation.png
--------------------------------------------------------------------------------
/docker/docker-registry-secure/certs/domain.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDdTCCAl2gAwIBAgIJAK0Y/ATHLBNyMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV
3 | BAYTAlVTMQswCQYDVQQIDAJWQTEPMA0GA1UEBwwGTUNMRUFOMQswCQYDVQQKDAJM
4 | MzEXMBUGA1UEAwwOaHViLmRzcmEubG9jYWwwHhcNMTUwODI2MDEzODI0WhcNMTYw
5 | ODI1MDEzODI0WjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExDzANBgNVBAcM
6 | Bk1DTEVBTjELMAkGA1UECgwCTDMxFzAVBgNVBAMMDmh1Yi5kc3JhLmxvY2FsMIIB
7 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmrNGpzIsk8THRgVKWhZw82jg
8 | RcP8uG7foMKwxSUNU9TUbGG05Mgq85QY83EmVycNgwTFnxYVP4BiY+o0yV7I2giq
9 | sTFUevCQfeG74vs58+jYns18fjreBRWZvGTQZ1Skruyg3f62x4RYcxuYAwwqrkrt
10 | /Y1+fBFTKkkX9Fy33yyU08D5EaO9XFbXcWuzB0A9g7kD5jCPW+JsCK4vcy851t4P
11 | aC/w8D8o946O3dJxCuG+VUFfPbw4pMDdZOh6oHuCP/ZxEebuzkXvQ6X6RM0Z8Lba
12 | dzntQeKRr/pZL7NlmicBVwnI4CX2ztLUTSQROmekl8Rh69YBhNEK+sv6WYVH0QID
13 | AQABo1AwTjAdBgNVHQ4EFgQUmHHAztO+Yl36Cp0fZ0eMFlzQr0owHwYDVR0jBBgw
14 | FoAUmHHAztO+Yl36Cp0fZ0eMFlzQr0owDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
15 | AQsFAAOCAQEAIz7Izzeom7HKDcyXzg0GpE2mFNFnsdYRImMLx/y6IRQoy6fWtIGi
16 | 7uqsGfDxoFcdrR1PNbwsLEKHs+e8dKCVDx0tZPguVsyusK+vtwkB4AEg6YpgCxVU
17 | zjFnNTDxOxwVVTF1F2zLdei8I4GkYIfhdi1Bj4Rj7r1MZmV2Z6eIXI+dYbcURoyy
18 | zunSyCOIzvB/jfhGXKv8iilkMl57ciSZT4ktvIHN4XiqebwuC+62uwJ7o6uztJOQ
19 | g1bwK8+oC+FGc5Wf74rDU+VEvfdY+Kd5ezlYnE6/sLEBt+i0WM4EdsjoGJtaEvvT
20 | t+/Cha7Fv0OpCUHLPI0ScF0YWbU4nF2LSw==
21 | -----END CERTIFICATE-----
22 |
--------------------------------------------------------------------------------
/docker/docker-registry-secure/domain.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIGFTCCA/2gAwIBAgIJAN4LjgfupTBgMA0GCSqGSIb3DQEBCwUAMIGgMQswCQYD
3 | VQQGEwJVUzERMA8GA1UECAwIVklSR0lOSUExDzANBgNVBAcMBk1DTEVBTjELMAkG
4 | A1UECgwCTDMxGTAXBgNVBAsMEEdMT0JBTCBTT0xVVElPTlMxGzAZBgNVBAMMEnIx
5 | MDV1MDEuZHNyYS5sb2NhbDEoMCYGCSqGSIb3DQEJARYZbWF0dGhldy5wYXJrZXJA
6 | bC0zY29tLmNvbTAeFw0xNTA4MTYwMzMyMzRaFw0xNjA4MTUwMzMyMzRaMIGgMQsw
7 | CQYDVQQGEwJVUzERMA8GA1UECAwIVklSR0lOSUExDzANBgNVBAcMBk1DTEVBTjEL
8 | MAkGA1UECgwCTDMxGTAXBgNVBAsMEEdMT0JBTCBTT0xVVElPTlMxGzAZBgNVBAMM
9 | EnIxMDV1MDEuZHNyYS5sb2NhbDEoMCYGCSqGSIb3DQEJARYZbWF0dGhldy5wYXJr
10 | ZXJAbC0zY29tLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMUs
11 | JkqpHkEPNHUHYUq5OkV7aAochMBi2TuBDf9BCxWPsoGOMPjjLobv3djHmmdqJXPH
12 | U8HJdTvTvUx7tsfV6tmxgCFUx2Gt9UnIksRr0xZQjPj4cxrMMbyFVtKh9EBOn99Q
13 | 8x8M1D7xKbo3VF+kdkKso1yta+07Q/crwrav7HEt2D/Wld7+Ydv60fMYoYUSjMOf
14 | fPKTGUuVWKA604rTjC8guUOJjST+VIH/F/zJrtGqgPU1BbjDMGX3lgFcCCrKQ7Fq
15 | tUfw+nxBYVRNcDyfdmt3F6lbDROJo8ki3PfhqEaB+aYkUJREU287IiC5dCO1nwjD
16 | 7K21OxS6uXA+yb01GSB1Bs6IUIez/QONBbXEYAQYm7pDROGTVdA7HGRQYOwAGMop
17 | rYiF62o/kRF+3nveevfFeSgcW6Bqp8o5V0LlVPFr3+5pAiuhDyP5X3mZwrSY5Hyl
18 | obhpsJUx13AN6EFI1ff7/ycjbDHHbArL+dsf2OnYw/f8BdYw+KWsL3MPQ5RTc1+U
19 | Cu/Wa3ldY7aZfBDEmO2iWsF+jeSxGAElIwhN1Kg4TrV3m2aTD9RkhNeyrvXwgNqp
20 | 8aemzW4UTSlf7IfnurQXwkOF9JVbTX21eFD8W2HQYjntYXeNWLiXk1Fo8YJEcniV
21 | OTYmZYdIjTlh++SjRF2ctKBLwnLESvxK4YOijkubAgMBAAGjUDBOMB0GA1UdDgQW
22 | BBR9hbfW62yQyFhErV+b/LpOxnruQzAfBgNVHSMEGDAWgBR9hbfW62yQyFhErV+b
23 | /LpOxnruQzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAW3VkDuMq5
24 | aWZLFQvvW7G6NFTYdasuNEirW+UgscLsCdlx7tlOasNKCZWoBrQFQckBeD4ZMR3x
25 | 86VucRyPYC6evLXZrhuNlOASzjr1YeGTdwcKVXoy9RcCzlX+28T5KPx880Vfg14m
26 | X63qbQtRbe4lW9aYkAIfxJ/3P4Kat6mxPOc4tdDwEL5ncQApwOjEc2DNZESy0e7P
27 | zrtLhX7TriQ8V3ji+gZ+QlCwyHXi+COeIGTOYPQm4NyTqckPsnf2jUdZaIEazlQP
28 | 8vBxE18toAxPM+hCEtnhUcLDbL/ZK4kXdcW+V4HkSp+GB3ZVmfV9JoMgl0dB0HWY
29 | eYEDx8rjhxNz984xRtbXcwyxotZm8nbX+nFJ/4PBZfCwYixJ6x65xzou1C4w0lGC
30 | +ckVr6/UtvE2EecU9DtOa6ZgGEakRGEhuusqjV7cvwd3m8T2L+UsYU6g6n/gx1hj
31 | 3wM6ffT070x5LLpnHVelMSMM7QnwsnY1HJlFYZ1jZPMBgimsiianUVBDaSY12hE/
32 | K7bGck/T8cP4KBFpDyXcxz5+1yTvgoiIZY2Kzt30yT+YYYZZhdoer2IXecV5/v7F
33 | uCJCV+LtsGWpQpGH7epRhUge0S2zhXrQ84jXfDZ9LkX0u1F1iGpcvi/ez74Ob1fg
34 | k3phmjUDULy28rvWiOmMZEd/P+P74dDrqg==
35 | -----END CERTIFICATE-----
36 |
--------------------------------------------------------------------------------
/docker/docker-registry-secure/registry_config.yml:
--------------------------------------------------------------------------------
1 | version: 0.1
2 | log:
3 | level: debug
4 |
5 | common:
6 | search_backend: sqlalchemy
7 | sqlalchemy_index_database: sqlite:////tmp/docker-registry.db
8 |
9 | storage:
10 | filesystem:
11 | rootdirectory: /data
12 |
13 | http:
14 | addr: 10.105.255.249:5000
15 | net: tcp
16 | secret: askdravherqp305u235lkadjfalsdjkfsasdfasdf
17 | tls:
18 | certificate: /certs/domain.crt
19 | key: /certs/domain.key
20 | debug:
21 | addr: 10.105.255.249:5001
22 |
23 | auth:
24 | htpasswd:
25 | realm: basic-realm
26 | path: /auth/htpasswd
27 |
--------------------------------------------------------------------------------
/docker/docker-registry-secure/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -d -t -i --net=host --name docker-registry --restart=always -v /opt/registry/registry_config.yml:/etc/docker/registry/config.yml -v /opt/registry/auth:/auth -v /opt/registry/certs:/certs -v /opt/registry/data:/data -e SETTINGS_FLAVOR=local -e STORAGE_PATH=/data -e LOGLEVEL=DEBUG registry:2.1.1
3 |
--------------------------------------------------------------------------------
/docker/docker-registry-secure/run.sh.bak:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -d -t -i --net=host --name registry --restart=always -v /opt/registry/registry_config.yml:/etc/docker/registry/config.yml -v /opt/registry/auth:/auth -v /opt/registry/certs:/certs -v /opt/registry/data:/data registry:2.1.1
3 |
--------------------------------------------------------------------------------
/docker/docker-registry/auth/htpasswd:
--------------------------------------------------------------------------------
1 | mparker:$2y$05$8yi4zgMi0pnu4FJUUSm2Nu1h.rNacudgD51mt51CSoV360XNKEazi
2 |
3 |
--------------------------------------------------------------------------------
/docker/docker-registry/certs/domain.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDdTCCAl2gAwIBAgIJAK0Y/ATHLBNyMA0GCSqGSIb3DQEBCwUAMFExCzAJBgNV
3 | BAYTAlVTMQswCQYDVQQIDAJWQTEPMA0GA1UEBwwGTUNMRUFOMQswCQYDVQQKDAJM
4 | MzEXMBUGA1UEAwwOaHViLmRzcmEubG9jYWwwHhcNMTUwODI2MDEzODI0WhcNMTYw
5 | ODI1MDEzODI0WjBRMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExDzANBgNVBAcM
6 | Bk1DTEVBTjELMAkGA1UECgwCTDMxFzAVBgNVBAMMDmh1Yi5kc3JhLmxvY2FsMIIB
7 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmrNGpzIsk8THRgVKWhZw82jg
8 | RcP8uG7foMKwxSUNU9TUbGG05Mgq85QY83EmVycNgwTFnxYVP4BiY+o0yV7I2giq
9 | sTFUevCQfeG74vs58+jYns18fjreBRWZvGTQZ1Skruyg3f62x4RYcxuYAwwqrkrt
10 | /Y1+fBFTKkkX9Fy33yyU08D5EaO9XFbXcWuzB0A9g7kD5jCPW+JsCK4vcy851t4P
11 | aC/w8D8o946O3dJxCuG+VUFfPbw4pMDdZOh6oHuCP/ZxEebuzkXvQ6X6RM0Z8Lba
12 | dzntQeKRr/pZL7NlmicBVwnI4CX2ztLUTSQROmekl8Rh69YBhNEK+sv6WYVH0QID
13 | AQABo1AwTjAdBgNVHQ4EFgQUmHHAztO+Yl36Cp0fZ0eMFlzQr0owHwYDVR0jBBgw
14 | FoAUmHHAztO+Yl36Cp0fZ0eMFlzQr0owDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
15 | AQsFAAOCAQEAIz7Izzeom7HKDcyXzg0GpE2mFNFnsdYRImMLx/y6IRQoy6fWtIGi
16 | 7uqsGfDxoFcdrR1PNbwsLEKHs+e8dKCVDx0tZPguVsyusK+vtwkB4AEg6YpgCxVU
17 | zjFnNTDxOxwVVTF1F2zLdei8I4GkYIfhdi1Bj4Rj7r1MZmV2Z6eIXI+dYbcURoyy
18 | zunSyCOIzvB/jfhGXKv8iilkMl57ciSZT4ktvIHN4XiqebwuC+62uwJ7o6uztJOQ
19 | g1bwK8+oC+FGc5Wf74rDU+VEvfdY+Kd5ezlYnE6/sLEBt+i0WM4EdsjoGJtaEvvT
20 | t+/Cha7Fv0OpCUHLPI0ScF0YWbU4nF2LSw==
21 | -----END CERTIFICATE-----
22 |
--------------------------------------------------------------------------------
/docker/docker-registry/registry_config.yml:
--------------------------------------------------------------------------------
1 | version: 0.1
2 | log:
3 | level: debug
4 |
5 | common:
6 | search_backend: sqlalchemy
7 | sqlalchemy_index_database: sqlite:////tmp/docker-registry.db
8 |
9 | storage:
10 | filesystem:
11 | rootdirectory: /data
12 |
13 | http:
14 | addr: 0.0.0.0:5000
15 | net: tcp
16 | secret: askdravherqp305u235lkadjfalsdjkfsasdfasdf
17 | tls:
18 | certificate: /certs/domain.crt
19 | key: /certs/domain.key
20 | debug:
21 | addr: 0.0.0.0:5001
22 |
23 | auth:
24 | htpasswd:
25 | realm: basic-realm
26 | path: /auth/htpasswd
27 |
--------------------------------------------------------------------------------
/docker/docker-registry/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -d -t -i --name registry -p 10.105.255.249:5000:5000 --restart=always -v /opt/registry/data:/data -v /opt/registry/index/docker-registry.db:/tmp/docker-registry.db -v /opt/registry/auth:/auth -v /opt/registry/certs:/certs -v /opt/registry/registry_config.yml:/etc/docker/registry/config.yml registry:2.1.1
3 |
--------------------------------------------------------------------------------
/docker/dsra-hue/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM hub.dsra.local:5000/cloudera/hue:3.9.0
2 | MAINTAINER Matt Parker
3 |
4 | # Copy the hue template
5 | COPY hue.ini.template /tmp/hue.ini.template
6 |
7 | # Copy the bootstrap shell
8 | COPY bootstrap.sh /bin/bootstrap.sh
9 |
10 | USER hue
11 | ENTRYPOINT ["/bin/bootstrap.sh"]
12 | CMD ["bash"]
13 |
--------------------------------------------------------------------------------
/docker/dsra-hue/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # make sure we have everyrthing
4 | if [ -z $CLUSTER_NAME ] || [ -z $HTTPFS_SERVER ] || [ -z $HTTPFS_PORT ]; then
5 | echo CLUSTER_NAME, HTTPFS_SERVER, and HTTPFS_PORT needs to be set as environment addresses to be able to run.
6 | exit;
7 | fi
8 |
9 | # Replace all the variables in hue.ini.template
10 | sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /tmp/hue.ini.template \
11 | | sed "s/HTTPFS_SERVER/$HTTPFS_SERVER/" \
12 | | sed "s/HTTPFS_PORT/$HTTPFS_PORT/" \
13 | > /local/git/hue/desktop/conf/pseudo-distributed.ini
14 |
15 | # Read the 1st arg, and based upon that, act accordingly
16 | case "$1" in
17 | start)
18 | build/env/bin/hue runserver 0.0.0.0:8000
19 | ;;
20 | bash)
21 | /bin/bash
22 | ;;
23 | *)
24 | echo $"Usage: {start|bash}"
25 | eval $*
26 | esac
27 |
--------------------------------------------------------------------------------
/docker/dsra-hue/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker build -t hub.dsra.local:5000/dsra/hue:3.9.0 .
4 |
--------------------------------------------------------------------------------
/docker/dsra-hue/hue.ini.template:
--------------------------------------------------------------------------------
1 | # Hue configuration file
2 | # ===================================
3 | #
4 | # For complete documentation about the contents of this file, run
5 | # $ /build/env/bin/hue config_help
6 | #
7 | # All .ini files under the current directory are treated equally. Their
8 | # contents are merged to form the Hue configuration, which can
9 | # can be viewed on the Hue at
10 | # http://:/dump_config
11 |
12 |
13 | ###########################################################################
14 | # General configuration for core Desktop features (authentication, etc)
15 | ###########################################################################
16 |
17 | [desktop]
18 |
19 | # Set this to a random string, the longer the better.
20 | # This is used for secure hashing in the session store.
21 | secret_key=AS#D5Gqe&alkr*gnase!!lkfjgwr)GTASFG$Sdafg@sadfg^jpo+iasu
22 |
23 | # Webserver listens on this address and port
24 | http_host=0.0.0.0
25 | http_port=8000
26 |
27 | # Time zone name
28 | time_zone=America/New_York
29 |
30 | # Enable or disable Django debug mode.
31 | django_debug_mode=false
32 |
33 | # Enable or disable backtrace for server error
34 | http_500_debug_mode=false
35 |
36 | # Enable or disable memory profiling.
37 | ## memory_profiler=false
38 |
39 | # Server email for internal error messages
40 | ## django_server_email='hue@localhost.localdomain'
41 |
42 | # Email backend
43 | ## django_email_backend=django.core.mail.backends.smtp.EmailBackend
44 |
45 | # Webserver runs as this user
46 | ## server_user=hue
47 | ## server_group=hue
48 |
49 | # This should be the Hue admin and proxy user
50 | default_user=hue
51 |
52 | # This should be the hadoop cluster admin
53 | default_hdfs_superuser=hdfs
54 |
55 | # If set to false, runcpserver will not actually start the web server.
56 | # Used if Apache is being used as a WSGI container.
57 | ## enable_server=yes
58 |
59 | # Number of threads used by the CherryPy web server
60 | ## cherrypy_server_threads=10
61 |
62 | # Filename of SSL Certificate
63 | ## ssl_certificate=
64 |
65 | # Filename of SSL RSA Private Key
66 | ## ssl_private_key=
67 |
68 | # List of allowed and disallowed ciphers in cipher list format.
69 | # See http://www.openssl.org/docs/apps/ciphers.html for more information on cipher list format.
70 | ## ssl_cipher_list=DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2
71 |
72 | # LDAP username and password of the hue user used for LDAP authentications.
73 | # Set it to use LDAP Authentication with HiveServer2 and Impala.
74 | ## ldap_username=hue
75 | ## ldap_password=
76 |
77 | # Default encoding for site data
78 | ## default_site_encoding=utf-8
79 |
80 | # Help improve Hue with anonymous usage analytics.
81 | # Use Google Analytics to see how many times an application or specific section of an application is used, nothing more.
82 | collect_usage=false
83 |
84 | # Support for HTTPS termination at the load-balancer level with SECURE_PROXY_SSL_HEADER.
85 | ## secure_proxy_ssl_header=false
86 |
87 | # Comma-separated list of Django middleware classes to use.
88 | # See https://docs.djangoproject.com/en/1.4/ref/middleware/ for more details on middlewares in Django.
89 | ## middleware=desktop.auth.backend.LdapSynchronizationBackend
90 |
91 | # Comma-separated list of regular expressions, which match the redirect URL.
92 | # For example, to restrict to your local domain and FQDN, the following value can be used:
93 | # ^\/.*$,^http:\/\/www.mydomain.com\/.*$
94 | ## redirect_whitelist=^\/.*$
95 |
96 | # Comma separated list of apps to not load at server startup.
97 | # e.g.: beeswax,impala,security,filebrowser,jobbrowser,rdbms,jobsub,pig,hbase,sqoop,zookeeper,metastore,spark,oozie,indexer
98 | app_blacklist=beeswax,impala,jobbrowser,rdbms,jobsub,pig,hbase,sqoop,security,zookeeper,metastore,search,spark,oozie,indexer,yarn
99 |
100 | # The directory where to store the auditing logs. Auditing is disable if the value is empty.
101 | # e.g. /var/log/hue/audit.log
102 | ## audit_event_log_dir=
103 |
104 | # Size in KB/MB/GB for audit log to rollover.
105 | ## audit_log_max_file_size=100MB
106 |
107 | # Administrators
108 | # ----------------
109 | [[django_admins]]
110 | ## [[[admin1]]]
111 | ## name=john
112 | ## email=john@doe.com
113 |
114 | # UI customizations
115 | # -------------------
116 | [[custom]]
117 |
118 | # Top banner HTML code
119 | # e.g. Test Lab A2 Hue Services
120 | banner_top_html=DSRA Hue Services
121 |
122 | # Configuration options for user authentication into the web application
123 | # ------------------------------------------------------------------------
124 | [[auth]]
125 |
126 | # Authentication backend. Common settings are:
127 | # - django.contrib.auth.backends.ModelBackend (entirely Django backend)
128 | # - desktop.auth.backend.AllowAllBackend (allows everyone)
129 | # - desktop.auth.backend.AllowFirstUserDjangoBackend
130 | # (Default. Relies on Django and user manager, after the first login)
131 | # - desktop.auth.backend.LdapBackend
132 | # - desktop.auth.backend.PamBackend
133 | # - desktop.auth.backend.SpnegoDjangoBackend
134 | # - desktop.auth.backend.RemoteUserDjangoBackend
135 | # - libsaml.backend.SAML2Backend
136 | # - libopenid.backend.OpenIDBackend
137 | # - liboauth.backend.OAuthBackend
138 | # (Support Twitter, Facebook, Google+ and Linkedin
139 | backend=desktop.auth.backend.LdapBackend
140 |
141 | # The service to use when querying PAM.
142 | ## pam_service=login
143 |
144 | # When using the desktop.auth.backend.RemoteUserDjangoBackend, this sets
145 | # the normalized name of the header that contains the remote user.
146 | # The HTTP header in the request is converted to a key by converting
147 | # all characters to uppercase, replacing any hyphens with underscores
148 | # and adding an HTTP_ prefix to the name. So, for example, if the header
149 | # is called Remote-User that would be configured as HTTP_REMOTE_USER
150 | #
151 | # Defaults to HTTP_REMOTE_USER
152 | ## remote_user_header=HTTP_REMOTE_USER
153 |
154 | # Synchronize a users groups when they login
155 | ## sync_groups_on_login=false
156 |
157 | # Ignore the case of usernames when searching for existing users.
158 | # Only supported in remoteUserDjangoBackend.
159 | ## ignore_username_case=false
160 |
161 | # Ignore the case of usernames when searching for existing users to authenticate with.
162 | # Only supported in remoteUserDjangoBackend.
163 | ## force_username_lowercase=false
164 |
165 | # Users will expire after they have not logged in for 'n' amount of seconds.
166 | # A negative number means that users will never expire.
167 | ## expires_after=-1
168 |
169 | # Apply 'expires_after' to superusers.
170 | ## expire_superusers=true
171 |
172 | # Configuration options for connecting to LDAP and Active Directory
173 | # -------------------------------------------------------------------
174 | [[ldap]]
175 |
176 | # The search base for finding users and groups
177 | ## base_dn="DC=mycompany,DC=com"
178 | base_dn="cn=users,cn=accounts,dc=xdata,dc=data-tactics-corp,dc=com"
179 |
180 | # URL of the LDAP server
181 | ## ldap_url=ldap://auth.mycompany.com
182 | ldap_url="ldap://10.1.90.11"
183 |
184 | # A PEM-format file containing certificates for the CA's that
185 | # Hue will trust for authentication over TLS.
186 | # The certificate for the CA that signed the
187 | # LDAP server certificate must be included among these certificates.
188 | # See more here http://www.openldap.org/doc/admin24/tls.html.
189 | ## ldap_cert=
190 | ## use_start_tls=true
191 |
192 | # Distinguished name of the user to bind as -- not necessary if the LDAP server
193 | # supports anonymous searches
194 | ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
195 | bind_dn="uid=rouser,cn=users,cn=accounts,dc=xdata,dc=data-tactics-corp,dc=com"
196 |
197 | # Password of the bind user -- not necessary if the LDAP server supports
198 | # anonymous searches
199 | ## bind_password=
200 | bind_password="D@rpa123$"
201 |
202 | # Pattern for searching for usernames -- Use for the parameter
203 | # For use when using LdapBackend for Hue authentication
204 | ## ldap_username_pattern="uid=,ou=People,dc=mycompany,dc=com"
205 | ldap_username_pattern="uid=,CN=users,CN=accounts,DC=xdata,DC=data-tactics-corp,DC=com"
206 |
207 | # Create users in Hue when they try to login with their LDAP credentials
208 | # For use when using LdapBackend for Hue authentication
209 | ## create_users_on_login = true
210 |
211 | # Ignore the case of usernames when searching for existing users in Hue.
212 | ## ignore_username_case=false
213 |
214 | # Force usernames to lowercase when creating new users from LDAP.
215 | ## force_username_lowercase=false
216 |
217 | # Use search bind authentication.
218 | ## search_bind_authentication=true
219 |
220 | # Choose which kind of subgrouping to use: nested or suboordinate (deprecated).
221 | ## subgroups=suboordinate
222 |
223 | # Define the number of levels to search for nested members.
224 | ## nested_members_search_depth=10
225 |
226 | [[[users]]]
227 |
228 | # Base filter for searching for users
229 | ## user_filter="objectclass=*"
230 | user_filter="(memberOf=CN=memexUsers,CN=groups,CN=accounts,DC=xdata,DC=data-tactics-corp,DC=com)"
231 |
232 | # The username attribute in the LDAP schema
233 | ## user_name_attr=sAMAccountName
234 | user_name_attr=uid
235 |
236 | [[[groups]]]
237 |
238 | # Base filter for searching for groups
239 | ## group_filter="objectclass=*"
240 | group_filter="objectclass=groupOfNames"
241 |
242 | # The group name attribute in the LDAP schema
243 | ## group_name_attr=cn
244 |
245 | # The attribute of the group object which identifies the members of the group
246 | ## group_member_attr=members
247 | group_member_attr="member"
248 |
249 | [[[ldap_servers]]]
250 |
251 | ## [[[[mycompany]]]]
252 |
253 | # The search base for finding users and groups
254 | ## base_dn="DC=mycompany,DC=com"
255 |
256 | # URL of the LDAP server
257 | ## ldap_url=ldap://auth.mycompany.com
258 |
259 | # A PEM-format file containing certificates for the CA's that
260 | # Hue will trust for authentication over TLS.
261 | # The certificate for the CA that signed the
262 | # LDAP server certificate must be included among these certificates.
263 | # See more here http://www.openldap.org/doc/admin24/tls.html.
264 | ## ldap_cert=
265 | ## use_start_tls=true
266 |
267 | # Distinguished name of the user to bind as -- not necessary if the LDAP server
268 | # supports anonymous searches
269 | ## bind_dn="CN=ServiceAccount,DC=mycompany,DC=com"
270 |
271 | # Password of the bind user -- not necessary if the LDAP server supports
272 | # anonymous searches
273 | ## bind_password=
274 |
275 | # Pattern for searching for usernames -- Use for the parameter
276 | # For use when using LdapBackend for Hue authentication
277 | ## ldap_username_pattern="uid=,ou=People,dc=mycompany,dc=com"
278 |
279 | ## Use search bind authentication.
280 | ## search_bind_authentication=true
281 |
282 | ## [[[[[users]]]]]
283 |
284 | # Base filter for searching for users
285 | ## user_filter="objectclass=Person"
286 | #user_filter="(memberOf=CN=memexUsers,CN=groups,CN=accounts,DC=xdata,DC=data-tactics-corp,DC=com)"
287 |
288 | # The username attribute in the LDAP schema
289 | ## user_name_attr=sAMAccountName
290 | #user_name_attr=uid
291 |
292 | ## [[[[[groups]]]]]
293 |
294 | # Base filter for searching for groups
295 | ## group_filter="objectclass=groupOfNames"
296 | #group_filter="objectclass=groupOfNames"
297 |
298 | # The username attribute in the LDAP schema
299 | ## group_name_attr=cn
300 |
301 | # Configuration options for specifying the Desktop Database. For more info,
302 | # see http://docs.djangoproject.com/en/1.4/ref/settings/#database-engine
303 | # ------------------------------------------------------------------------
304 | [[database]]
305 | # Database engine is typically one of:
306 | # postgresql_psycopg2, mysql, sqlite3 or oracle.
307 | #
308 | # Note that for sqlite3, 'name', below is a path to the filename. For other backends, it is the database name.
309 | # Note for Oracle, options={'threaded':true} must be set in order to avoid crashes.
310 | # Note for Oracle, you can use the Oracle Service Name by setting "port=0" and then "name=:/".
311 | ## engine=sqlite3
312 | ## host=
313 | ## port=
314 | ## user=
315 | ## password=
316 | ## name=desktop/desktop.db
317 | ## options={}
318 |
319 | # Configuration options for specifying the Desktop session.
320 | # For more info, see https://docs.djangoproject.com/en/1.4/topics/http/sessions/
321 | # ------------------------------------------------------------------------
322 | [[session]]
323 | # The cookie containing the users' session ID will expire after this amount of time in seconds.
324 | # Default is 2 weeks.
325 | ## ttl=1209600
326 |
327 | # The cookie containing the users' session ID will be secure.
328 | # Should only be enabled with HTTPS.
329 | ## secure=false
330 |
331 | # The cookie containing the users' session ID will use the HTTP only flag.
332 | ## http_only=false
333 |
334 | # Use session-length cookies. Logs out the user when she closes the browser window.
335 | ## expire_at_browser_close=false
336 |
337 |
338 | # Configuration options for connecting to an external SMTP server
339 | # ------------------------------------------------------------------------
340 | [[smtp]]
341 |
342 | # The SMTP server information for email notification delivery
343 | host=localhost
344 | port=25
345 | user=
346 | password=
347 |
348 | # Whether to use a TLS (secure) connection when talking to the SMTP server
349 | tls=no
350 |
351 | # Default email address to use for various automated notification from Hue
352 | ## default_from_email=hue@localhost
353 |
354 |
355 | # Configuration options for Kerberos integration for secured Hadoop clusters
356 | # ------------------------------------------------------------------------
357 | [[kerberos]]
358 |
359 | # Path to Hue's Kerberos keytab file
360 | ## hue_keytab=
361 | # Kerberos principal name for Hue
362 | ## hue_principal=hue/hostname.foo.com
363 | # Path to kinit
364 | ## kinit_path=/path/to/kinit
365 |
366 |
367 | # Configuration options for using OAuthBackend (core) login
368 | # ------------------------------------------------------------------------
369 | [[oauth]]
370 | # The Consumer key of the application
371 | ## consumer_key=XXXXXXXXXXXXXXXXXXXXX
372 |
373 | # The Consumer secret of the application
374 | ## consumer_secret=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
375 |
376 | # The Request token URL
377 | ## request_token_url=https://api.twitter.com/oauth/request_token
378 |
379 | # The Access token URL
380 | ## access_token_url=https://api.twitter.com/oauth/access_token
381 |
382 | # The Authorize URL
383 | ## authenticate_url=https://api.twitter.com/oauth/authorize
384 |
385 |
386 | ###########################################################################
387 | # Settings to configure SAML
388 | ###########################################################################
389 |
390 | [libsaml]
391 | # Xmlsec1 binary path. This program should be executable by the user running Hue.
392 | ## xmlsec_binary=/usr/local/bin/xmlsec1
393 |
394 | # Entity ID for Hue acting as service provider.
395 | # Can also accept a pattern where '' will be replaced with server URL base.
396 | ## entity_id="/saml2/metadata/"
397 |
398 | # Create users from SSO on login.
399 | ## create_users_on_login=true
400 |
401 | # Required attributes to ask for from IdP.
402 | # This requires a comma separated list.
403 | ## required_attributes=uid
404 |
405 | # Optional attributes to ask for from IdP.
406 | # This requires a comma separated list.
407 | ## optional_attributes=
408 |
409 | # IdP metadata in the form of a file. This is generally an XML file containing metadata that the Identity Provider generates.
410 | ## metadata_file=
411 |
412 | # Private key to encrypt metadata with.
413 | ## key_file=
414 |
415 | # Signed certificate to send along with encrypted metadata.
416 | ## cert_file=
417 |
418 | # A mapping from attributes in the response from the IdP to django user attributes.
419 | ## user_attribute_mapping={'uid':'username'}
420 |
421 | # Have Hue initiated authn requests be signed and provide a certificate.
422 | ## authn_requests_signed=false
423 |
424 | # Have Hue initiated logout requests be signed and provide a certificate.
425 | ## logout_requests_signed=false
426 |
427 | ## Username can be sourced from 'attributes' or 'nameid'.
428 | ## username_source=attributes
429 |
430 | # Performs the logout or not.
431 | ## logout_enabled=true
432 |
433 |
434 | ###########################################################################
435 | # Settings to configure OPENID
436 | ###########################################################################
437 |
438 | [libopenid]
439 | # (Required) OpenId SSO endpoint url.
440 | ## server_endpoint_url=https://www.google.com/accounts/o8/id
441 |
442 | # OpenId 1.1 identity url prefix to be used instead of SSO endpoint url
443 | # This is only supported if you are using an OpenId 1.1 endpoint
444 | ## identity_url_prefix=https://app.onelogin.com/openid/your_company.com/
445 |
446 | # Create users from OPENID on login.
447 | ## create_users_on_login=true
448 |
449 | # Use email for username
450 | ## use_email_for_username=true
451 |
452 |
453 | ###########################################################################
454 | # Settings to configure OAuth
455 | ###########################################################################
456 |
457 | [liboauth]
458 | # NOTE:
459 | # To work, each of the active (i.e. uncommented) service must have
460 | # applications created on the social network.
461 | # Then the "consumer key" and "consumer secret" must be provided here.
462 | #
463 | # The addresses where to do so are:
464 | # Twitter: https://dev.twitter.com/apps
465 | # Google+ : https://cloud.google.com/
466 | # Facebook: https://developers.facebook.com/apps
467 | # Linkedin: https://www.linkedin.com/secure/developer
468 | #
469 | # Additionnaly, the following must be set in the application settings:
470 | # Twitter: Callback URL (aka Redirect URL) must be set to http://YOUR_HUE_IP_OR_DOMAIN_NAME/oauth/social_login/oauth_authenticated
471 | # Google+ : CONSENT SCREEN must have email address
472 | # Facebook: Sandbox Mode must be DISABLED
473 | # Linkedin: "In OAuth User Agreement", r_emailaddress is REQUIRED
474 |
475 | # The Consumer key of the application
476 | ## consumer_key_twitter=
477 | ## consumer_key_google=
478 | ## consumer_key_facebook=
479 | ## consumer_key_linkedin=
480 |
481 | # The Consumer secret of the application
482 | ## consumer_secret_twitter=
483 | ## consumer_secret_google=
484 | ## consumer_secret_facebook=
485 | ## consumer_secret_linkedin=
486 |
487 | # The Request token URL
488 | ## request_token_url_twitter=https://api.twitter.com/oauth/request_token
489 | ## request_token_url_google=https://accounts.google.com/o/oauth2/auth
490 | ## request_token_url_linkedin=https://www.linkedin.com/uas/oauth2/authorization
491 | ## request_token_url_facebook=https://graph.facebook.com/oauth/authorize
492 |
493 | # The Access token URL
494 | ## access_token_url_twitter=https://api.twitter.com/oauth/access_token
495 | ## access_token_url_google=https://accounts.google.com/o/oauth2/token
496 | ## access_token_url_facebook=https://graph.facebook.com/oauth/access_token
497 | ## access_token_url_linkedin=https://api.linkedin.com/uas/oauth2/accessToken
498 |
499 | # The Authenticate URL
500 | ## authenticate_url_twitter=https://api.twitter.com/oauth/authorize
501 | ## authenticate_url_google=https://www.googleapis.com/oauth2/v1/userinfo?access_token=
502 | ## authenticate_url_facebook=https://graph.facebook.com/me?access_token=
503 | ## authenticate_url_linkedin=https://api.linkedin.com/v1/people/~:(email-address)?format=json&oauth2_access_token=
504 |
505 | # Username Map. Json Hash format.
506 | # Replaces username parts in order to simplify usernames obtained
507 | # Example: {"@sub1.domain.com":"_S1", "@sub2.domain.com":"_S2"}
508 | # converts 'email@sub1.domain.com' to 'email_S1'
509 | ## username_map={}
510 |
511 | # Whitelisted domains (only applies to Google OAuth). CSV format.
512 | ## whitelisted_domains_google=
513 |
514 | ###########################################################################
515 | # Settings for the RDBMS application
516 | ###########################################################################
517 |
518 | [librdbms]
519 | # The RDBMS app can have any number of databases configured in the databases
520 | # section. A database is known by its section name
521 | # (IE sqlite, mysql, psql, and oracle in the list below).
522 |
523 | [[databases]]
524 | # sqlite configuration.
525 | ## [[[sqlite]]]
526 | # Name to show in the UI.
527 | ## nice_name=SQLite
528 |
529 | # For SQLite, name defines the path to the database.
530 | ## name=/tmp/sqlite.db
531 |
532 | # Database backend to use.
533 | ## engine=sqlite
534 |
535 | # Database options to send to the server when connecting.
536 | # https://docs.djangoproject.com/en/1.4/ref/databases/
537 | ## options={}
538 |
539 | # mysql, oracle, or postgresql configuration.
540 | ## [[[mysql]]]
541 | # Name to show in the UI.
542 | ## nice_name="My SQL DB"
543 |
544 | # For MySQL and PostgreSQL, name is the name of the database.
545 | # For Oracle, Name is instance of the Oracle server. For express edition
546 | # this is 'xe' by default.
547 | ## name=mysqldb
548 |
549 | # Database backend to use. This can be:
550 | # 1. mysql
551 | # 2. postgresql
552 | # 3. oracle
553 | ## engine=mysql
554 |
555 | # IP or hostname of the database to connect to.
556 | ## host=localhost
557 |
558 | # Port the database server is listening to. Defaults are:
559 | # 1. MySQL: 3306
560 | # 2. PostgreSQL: 5432
561 | # 3. Oracle Express Edition: 1521
562 | ## port=3306
563 |
564 | # Username to authenticate with when connecting to the database.
565 | ## user=example
566 |
567 | # Password matching the username to authenticate with when
568 | # connecting to the database.
569 | ## password=example
570 |
571 | # Database options to send to the server when connecting.
572 | # https://docs.djangoproject.com/en/1.4/ref/databases/
573 | ## options={}
574 |
575 | ###########################################################################
576 | # Settings to configure your Hadoop cluster.
577 | ###########################################################################
578 |
579 | [hadoop]
580 |
581 | # Configuration for HDFS NameNode
582 | # ------------------------------------------------------------------------
583 | [[hdfs_clusters]]
584 | # HA support by using HttpFs
585 |
586 | [[[default]]]
587 | # Enter the filesystem uri
588 | fs_defaultfs=hdfs://CLUSTER_NAME
589 |
590 | # NameNode logical name.
591 | ## logical_name=
592 |
593 | # Use WebHdfs/HttpFs as the communication mechanism.
594 | # Domain should be the NameNode or HttpFs host.
595 | # Default port is 14000 for HttpFs.
596 | webhdfs_url=http://HTTPFS_SERVER:HTTPFS_PORT/webhdfs/v1
597 |
598 | # Change this if your HDFS cluster is Kerberos-secured
599 | ## security_enabled=false
600 |
601 | # Default umask for file and directory creation, specified in an octal value.
602 | ## umask=022
603 |
604 | # Directory of the Hadoop configuration
605 | ## hadoop_conf_dir=$HADOOP_CONF_DIR when set or '/etc/hadoop/conf'
606 |
607 | # Configuration for YARN (MR2)
608 | # ------------------------------------------------------------------------
609 | [[yarn_clusters]]
610 |
611 | [[[default]]]
612 | # Enter the host on which you are running the ResourceManager
613 | ## resourcemanager_host=localhost
614 |
615 | # The port where the ResourceManager IPC listens on
616 | ## resourcemanager_port=8032
617 |
618 | # Whether to submit jobs to this cluster
619 | submit_to=false
620 |
621 | # Resource Manager logical name (required for HA)
622 | ## logical_name=
623 |
624 | # Change this if your YARN cluster is Kerberos-secured
625 | ## security_enabled=false
626 |
627 | # URL of the ResourceManager API
628 | ## resourcemanager_api_url=http://localhost:8088
629 |
630 | # URL of the ProxyServer API
631 | ## proxy_api_url=http://localhost:8088
632 |
633 | # URL of the HistoryServer API
634 | ## history_server_api_url=http://localhost:19888
635 |
636 | # In secure mode (HTTPS), if SSL certificates from Resource Manager's
637 | # Rest Server have to be verified against certificate authority
638 | ## ssl_cert_ca_verify=False
639 |
640 | # HA support by specifying multiple clusters
641 | # e.g.
642 |
643 | # [[[ha]]]
644 | # Resource Manager logical name (required for HA)
645 | ## logical_name=my-rm-name
646 |
647 | # Configuration for MapReduce (MR1)
648 | # ------------------------------------------------------------------------
649 | [[mapred_clusters]]
650 |
651 | [[[default]]]
652 | # Enter the host on which you are running the Hadoop JobTracker
653 | ## jobtracker_host=localhost
654 |
655 | # The port where the JobTracker IPC listens on
656 | ## jobtracker_port=8021
657 |
658 | # JobTracker logical name for HA
659 | ## logical_name=
660 |
661 | # Thrift plug-in port for the JobTracker
662 | ## thrift_port=9290
663 |
664 | # Whether to submit jobs to this cluster
665 | submit_to=false
666 |
667 | # Change this if your MapReduce cluster is Kerberos-secured
668 | ## security_enabled=false
669 |
670 | # HA support by specifying multiple clusters
671 | # e.g.
672 |
673 | # [[[ha]]]
674 | # Enter the logical name of the JobTrackers
675 | ## logical_name=my-jt-name
676 |
677 |
678 | ###########################################################################
679 | # Settings to configure the Filebrowser app
680 | ###########################################################################
681 |
682 | [filebrowser]
683 | # Location on local filesystem where the uploaded archives are temporary stored.
684 | ## archive_upload_tempdir=/tmp
685 |
686 | ###########################################################################
687 | # Settings to configure liboozie
688 | ###########################################################################
689 |
690 | [liboozie]
691 | # The URL where the Oozie service runs on. This is required in order for
692 | # users to submit jobs. Empty value disables the config check.
693 | ## oozie_url=http://localhost:11000/oozie
694 |
695 | # Requires FQDN in oozie_url if enabled
696 | ## security_enabled=false
697 |
698 | # Location on HDFS where the workflows/coordinator are deployed when submitted.
699 | ## remote_deployement_dir=/user/hue/oozie/deployments
700 |
701 |
702 | ###########################################################################
703 | # Settings to configure the Oozie app
704 | ###########################################################################
705 |
706 | [oozie]
707 | # Location on local FS where the examples are stored.
708 | ## local_data_dir=..../examples
709 |
710 | # Location on local FS where the data for the examples is stored.
711 | ## sample_data_dir=...thirdparty/sample_data
712 |
713 | # Location on HDFS where the oozie examples and workflows are stored.
714 | ## remote_data_dir=/user/hue/oozie/workspaces
715 |
716 | # Maximum of Oozie workflows or coodinators to retrieve in one API call.
717 | ## oozie_jobs_count=100
718 |
719 | # Use Cron format for defining the frequency of a Coordinator instead of the old frequency number/unit.
720 | ## enable_cron_scheduling=true
721 |
722 |
723 | ###########################################################################
724 | # Settings to configure Beeswax with Hive
725 | ###########################################################################
726 |
727 | [beeswax]
728 |
729 | # Host where HiveServer2 is running.
730 | # If Kerberos security is enabled, use fully-qualified domain name (FQDN).
731 | ## hive_server_host=localhost
732 |
733 | # Port where HiveServer2 Thrift server runs on.
734 | ## hive_server_port=10000
735 |
736 | # Hive configuration directory, where hive-site.xml is located
737 | ## hive_conf_dir=/etc/hive/conf
738 |
739 | # Timeout in seconds for thrift calls to Hive service
740 | ## server_conn_timeout=120
741 |
742 | # Choose whether Hue uses the GetLog() thrift call to retrieve Hive logs.
743 | # If false, Hue will use the FetchResults() thrift call instead.
744 | ## use_get_log_api=true
745 |
746 | # Set a LIMIT clause when browsing a partitioned table.
747 | # A positive value will be set as the LIMIT. If 0 or negative, do not set any limit.
748 | ## browse_partitioned_table_limit=250
749 |
750 | # A limit to the number of rows that can be downloaded from a query.
751 | # A value of -1 means there will be no limit.
752 | # A maximum of 65,000 is applied to XLS downloads.
753 | ## download_row_limit=1000000
754 |
755 | # Hue will try to close the Hive query when the user leaves the editor page.
756 | # This will free all the query resources in HiveServer2, but also make its results inaccessible.
757 | ## close_queries=false
758 |
759 | # Thrift version to use when communicating with HiveServer2
760 | ## thrift_version=5
761 |
762 | [[ssl]]
763 | # SSL communication enabled for this server.
764 | ## enabled=false
765 |
766 | # Path to Certificate Authority certificates.
767 | ## cacerts=/etc/hue/cacerts.pem
768 |
769 | # Path to the private key file.
770 | ## key=/etc/hue/key.pem
771 |
772 | # Path to the public certificate file.
773 | ## cert=/etc/hue/cert.pem
774 |
775 | # Choose whether Hue should validate certificates received from the server.
776 | ## validate=true
777 |
778 |
779 | ###########################################################################
780 | # Settings to configure Impala
781 | ###########################################################################
782 |
783 | [impala]
784 | # Host of the Impala Server (one of the Impalad)
785 | ## server_host=localhost
786 |
787 | # Port of the Impala Server
788 | ## server_port=21050
789 |
790 | # Kerberos principal
791 | ## impala_principal=impala/hostname.foo.com
792 |
793 | # Turn on/off impersonation mechanism when talking to Impala
794 | ## impersonation_enabled=False
795 |
796 | # Number of initial rows of a result set to ask Impala to cache in order
797 | # to support re-fetching them for downloading them.
798 | # Set to 0 for disabling the option and backward compatibility.
799 | ## querycache_rows=50000
800 |
801 | # Timeout in seconds for thrift calls
802 | ## server_conn_timeout=120
803 |
804 | # Hue will try to close the Impala query when the user leaves the editor page.
805 | # This will free all the query resources in Impala, but also make its results inaccessible.
806 | ## close_queries=true
807 |
808 | # If QUERY_TIMEOUT_S > 0, the query will be timed out (i.e. cancelled) if Impala does not do any work
809 | # (compute or send back results) for that query within QUERY_TIMEOUT_S seconds.
810 | ## query_timeout_s=600
811 |
812 | [[ssl]]
813 | # SSL communication enabled for this server.
814 | ## enabled=false
815 |
816 | # Path to Certificate Authority certificates.
817 | ## cacerts=/etc/hue/cacerts.pem
818 |
819 | # Path to the private key file.
820 | ## key=/etc/hue/key.pem
821 |
822 | # Path to the public certificate file.
823 | ## cert=/etc/hue/cert.pem
824 |
825 | # Choose whether Hue should validate certificates received from the server.
826 | ## validate=true
827 |
828 |
829 | ###########################################################################
830 | # Settings to configure Pig
831 | ###########################################################################
832 |
833 | [pig]
834 | # Location of piggybank.jar on local filesystem.
835 | ## local_sample_dir=/usr/share/hue/apps/pig/examples
836 |
837 | # Location piggybank.jar will be copied to in HDFS.
838 | ## remote_data_dir=/user/hue/pig/examples
839 |
840 |
841 | ###########################################################################
842 | # Settings to configure Sqoop
843 | ###########################################################################
844 |
845 | [sqoop]
846 | # For autocompletion, fill out the librdbms section.
847 |
848 | # Sqoop server URL
849 | ## server_url=http://localhost:12000/sqoop
850 |
851 |
852 | ###########################################################################
853 | # Settings to configure Proxy
854 | ###########################################################################
855 |
856 | [proxy]
857 | # Comma-separated list of regular expressions,
858 | # which match 'host:port' of requested proxy target.
859 | ## whitelist=(localhost|127\.0\.0\.1):(50030|50070|50060|50075)
860 |
861 | # Comma-separated list of regular expressions,
862 | # which match any prefix of 'host:port/path' of requested proxy target.
863 | # This does not support matching GET parameters.
864 | ## blacklist=
865 |
866 |
867 | ###########################################################################
868 | # Settings to configure HBase Browser
869 | ###########################################################################
870 |
871 | [hbase]
872 | # Comma-separated list of HBase Thrift servers for clusters in the format of '(name|host:port)'.
873 | # Use full hostname with security.
874 | ## hbase_clusters=(Cluster|localhost:9090)
875 |
876 | # HBase configuration directory, where hbase-site.xml is located.
877 | ## hbase_conf_dir=/etc/hbase/conf
878 |
879 | # Hard limit of rows or columns per row fetched before truncating.
880 | ## truncate_limit = 500
881 |
882 | # 'buffered' is the default of the HBase Thrift Server and supports security.
883 | # 'framed' can be used to chunk up responses,
884 | # which is useful when used in conjunction with the nonblocking server in Thrift.
885 | ## thrift_transport=buffered
886 |
887 |
888 | ###########################################################################
889 | # Settings to configure Solr Search
890 | ###########################################################################
891 |
892 | [search]
893 |
894 | # URL of the Solr Server
895 | ## solr_url=http://localhost:8983/solr/
896 |
897 | # Requires FQDN in solr_url if enabled
898 | ## security_enabled=false
899 |
900 | ## Query sent when no term is entered
901 | ## empty_query=*:*
902 |
903 |
904 | ###########################################################################
905 | # Settings to configure Solr Indexer
906 | ###########################################################################
907 |
908 | [indexer]
909 |
910 | # Location of the solrctl binary.
911 | ## solrctl_path=/usr/bin/solrctl
912 |
913 | # Zookeeper ensemble.
914 | ## solr_zk_ensemble=localhost:2181/solr
915 |
916 |
917 | ###########################################################################
918 | # Settings to configure Job Designer
919 | ###########################################################################
920 |
921 | [jobsub]
922 |
923 | # Location on local FS where examples and template are stored.
924 | ## local_data_dir=..../data
925 |
926 | # Location on local FS where sample data is stored
927 | ## sample_data_dir=...thirdparty/sample_data
928 |
929 |
930 | ###########################################################################
931 | # Settings to configure Job Browser.
932 | ###########################################################################
933 |
934 | [jobbrowser]
935 | # Share submitted jobs information with all users. If set to false,
936 | # submitted jobs are visible only to the owner and administrators.
937 | ## share_jobs=true
938 |
939 |
940 | ###########################################################################
941 | # Settings to configure the Zookeeper application.
942 | ###########################################################################
943 |
944 | [zookeeper]
945 |
946 | [[clusters]]
947 |
948 | [[[default]]]
949 | # Zookeeper ensemble. Comma separated list of Host/Port.
950 | # e.g. localhost:2181,localhost:2182,localhost:2183
951 | ## host_ports=localhost:2181
952 |
953 | # The URL of the REST contrib service (required for znode browsing)
954 | ## rest_url=http://localhost:9998
955 |
956 |
957 | ###########################################################################
958 | # Settings to configure the Spark application.
959 | ###########################################################################
960 |
961 | [spark]
962 | # URL of the REST Spark Job Server.
963 | ## server_url=http://localhost:8090/
964 |
965 |
966 | ###########################################################################
967 | # Settings for the User Admin application
968 | ###########################################################################
969 |
970 | [useradmin]
971 | # The name of the default user group that users will be a member of
972 | ## default_user_group=default
973 |
974 | [[password_policy]]
975 | # Set password policy to all users. The default policy requires password to be at least 8 characters long,
976 | # and contain both uppercase and lowercase letters, numbers, and special characters.
977 |
978 | ## is_enabled=false
979 | ## pwd_regex="^(?=.*?[A-Z])(?=(.*[a-z]){1,})(?=(.*[\d]){1,})(?=(.*[\W_]){1,}).{8,}$"
980 | ## pwd_hint="The password must be at least 8 characters long, and must contain both uppercase and lowercase letters, at least one number, and at least one special character."
981 | ## pwd_error_message="The password must be at least 8 characters long, and must contain both uppercase and lowercase letters, at least one number, and at least one special character."
982 |
983 | ###########################################################################
984 | # Settings for the Sentry lib
985 | ###########################################################################
986 |
987 | [libsentry]
988 | # Hostname or IP of server.
989 | ## hostname=localhost
990 |
991 | # Port the sentry service is running on.
992 | ## port=8038
993 |
994 | # Sentry configuration directory, where sentry-site.xml is located.
995 | ## sentry_conf_dir=/etc/sentry/conf
996 |
--------------------------------------------------------------------------------
/docker/dsra-hue/push.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker push hub.dsra.local:5000/dsra/hue:3.9.0
4 |
--------------------------------------------------------------------------------
/docker/dsra-hue/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -t -i --rm --name=hue -p 8000:8000 -e CLUSTER_NAME=dsra -e HTTPFS_SERVER=httpfs.marathon.mesos -e HTTPFS_PORT=32000 hub.dsra.local:5000/dsra/hue:3.9.0 start
3 |
--------------------------------------------------------------------------------
/docker/file-server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tutum/tomcat:latest
2 | MAINTAINER Matt Parker
3 |
4 | ADD server.xml $CATALINA_HOME/conf/server.xml
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/docker/file-server/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker build -t dsra/file-server .
3 |
--------------------------------------------------------------------------------
/docker/file-server/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -d -t -i --restart=always --net=host --name=hub -v /opt/file-serve:/opt/file-serve -p 8088:8088 dsra/file-server /run.sh
3 |
--------------------------------------------------------------------------------
/docker/file-server/server.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
22 |
23 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
36 |
37 |
40 |
45 |
46 |
47 |
52 |
53 |
54 |
55 |
59 |
60 |
61 |
68 |
71 |
72 |
78 |
83 |
88 |
89 |
90 |
91 |
92 |
93 |
98 |
99 |
102 |
103 |
104 |
107 |
110 |
111 |
113 |
114 |
118 |
120 |
121 |
122 |
124 |
125 |
127 |
130 |
131 |
134 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
--------------------------------------------------------------------------------
/docker/hadoop/README.md:
--------------------------------------------------------------------------------
1 | Following the docs on https://coreos.com/os/docs/latest/booting-on-vagrant.html
2 | ```
3 | git clone https://github.com/coreos/coreos-vagrant.git
4 | cd coreos-vagrant
5 | ```
6 | We use the "alpha" update channel, which is the current default Vagrant uses, but we need more instances.
7 | 1. Save the user-data.sample -> user-data
8 | 2. Save the config.rb.sample -> config.rb.
9 | 3. In the config.rb, change from 1->3:
10 | ```
11 | $num_instances = 3
12 | ```
13 |
14 | We also then want to make sure that the /etc/hosts file is updated...so we are going to brute force our way. We need to add the code below, right after the line,
15 | ```
16 | config.vm.network :private_network, ip: ip
17 | ```
18 | which is at approximately line 124 in the Vagrant file. Add the following code below to help update the Hosts file on each VM.
19 | ```
20 | # Update hosts file
21 | (1..$num_instances).each do|v|
22 | config.vm.provision "shell" do |s|
23 | s.inline = "echo $1 $2 >> /etc/hosts"
24 | s.args = ["172.17.8.#{v+100}", "%s-%02d" % [$instance_name_prefix, v]]
25 | end
26 | end
27 | ```
28 |
29 | Then connect to the vagrant guest via, where the X is 1,2 or 3:
30 | ```
31 | vagrant ssh core-0X
32 | ```
33 |
34 | ##coreos-01
35 | ```
36 | sudo docker run -d --name zk --env ZK_ID=1 --env ZK_SERVERS=core-01:2888:3888 -p 2181:2181 -p 2888:2888 -p 3888:3888 aarongdocker/zookeeper
37 | sudo docker create --name journalnode-data aarongdocker/hdfs /bin/true
38 | sudo docker run -it --name jn --volumes-from journalnode-data -e NNODE1_IP=core-02 -e NNODE2_IP=rcore-03 -e ZK_IPS=core-01:2181 -e JN_IPS=core-01:8485 -p 8485:8485 -p 8480:8480 aarongdocker/hdfs journalnode
39 | ```
40 | ###coreos-01 after the NNs have started
41 | ```
42 | sudo docker create --name datanode-data aarongdocker/hdfs /bin/true
43 | sudo docker run -it --name dn --volumes-from datanode-data -e NNODE1_IP=core-02 -e NNODE2_IP=core-03 -e ZK_IPS=core-01:2181 -e JN_IPS=core-01:8485 -p 1004:1004 -p 1006:1006 -p 8022:8022 -p 50010:50010 -p 50020:50020 -p 50075:50075 aarongdocker/hdfs datanode
44 | ```
45 | ####coreos-02
46 | ```
47 | sudo docker create --name namenode-data aarongdocker/hdfs /bin/true
48 | sudo docker run -it --name nn --volumes-from namenode-data -e NNODE1_IP=core-02 -e NNODE2_IP=core-03 -e ZK_IPS=core-01:2181 -e JN_IPS=core-01:8485 --net=host aarongdocker/hdfs active
49 | ```
50 | ####coreos-03
51 | ```
52 | sudo docker create --name namenode-data aarongdocker/hdfs /bin/true
53 | sudo docker run -it --name nn --volumes-from namenode-data -e NNODE1_IP=core-02 -e NNODE2_IP=core-03 -e ZK_IPS=core-01:2181 -e JN_IPS=core-01:8485 --net=host aarongdocker/hdfs standby
54 | ```
55 |
--------------------------------------------------------------------------------
/docker/hadoop/hdfs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM java:8
2 | MAINTAINER Aaron Glahe
3 |
4 | # Setup env
5 | USER root
6 | ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
7 |
8 | ENV HADOOP_USER hdfs
9 |
10 | ENV HADOOP_PREFIX /usr/local/hadoop
11 | ENV HADOOP_COMMON_HOME /usr/local/hadoop
12 | ENV HADOOP_HDFS_HOME /usr/local/hadoop
13 | ENV HADOOP_CONF_DIR /usr/local/hadoop/etc/hadoop
14 |
15 | # download hadoop
16 | RUN wget -q -O - http://apache.mirrors.pair.com/hadoop/common/hadoop-2.7.1/hadoop-2.7.1.tar.gz | tar -xzf - -C /usr/local \
17 | && ln -s /usr/local/hadoop-2.7.1 /usr/local/hadoop \
18 | && mkdir -p /data/hdfs/nn \
19 | && mkdir -p /data/hdfs/dn \
20 | && mkdir -p /data/hdfs/journal \
21 | && groupadd -r hadoop \
22 | && groupadd -r $HADOOP_USER && useradd -r -g $HADOOP_USER -G hadoop $HADOOP_USER
23 |
24 | # Copy the Site files up
25 | WORKDIR /usr/local/hadoop
26 | COPY core-site.xml.template etc/hadoop/core-site.xml.template
27 | COPY hdfs-site.xml.template etc/hadoop/hdfs-site.xml.template
28 |
29 | # Setup permissions and ownership (httpfs tomcat conf for 600 permissions)
30 | RUN chown -R $HADOOP_USER:hadoop /data/hdfs /usr/local/hadoop-2.7.1 && chmod -R 775 $HADOOP_CONF_DIR
31 |
32 | #Ports: Namenode DataNode Journal Node WebHDFS
33 | EXPOSE 8020 8022 50070 50470 50090 50495 1006 1004 50010 50020 50075 50475 8485 8480 14000
34 |
35 | # Location to store data
36 | VOLUME ["/data/hdfs/dn", "/data/hdfs/journal", "/data/hdfs/nn"]
37 |
38 | # Copy the bootstrap shell
39 | COPY bootstrap.sh /bin/bootstrap.sh
40 |
41 | # Entry Point for our
42 | USER $HADOOP_USER
43 | ENTRYPOINT ["/bin/bootstrap.sh"]
44 | CMD ["bash"]
45 |
--------------------------------------------------------------------------------
/docker/hadoop/hdfs/README.md:
--------------------------------------------------------------------------------
1 | ## Description:
2 |
3 | Meant to standup a Hadoop 2.7.1 HDFS HA on multiple machines inside Docker containers. On the "active" namenode, the 1st time it starts up, it will format the namenode as well as format the ZK for failover. The "standby" namenode will bootStrapStandby.
4 |
5 | #### Runtime options:
6 |
7 | * __acitve__: This is meant to start the 1st namenode. This will also start a zkfc service.
8 | * __standby__: This is meant to start the 2nd namenode, and boot starp of an already formatted/running namenode. This will also start a zkfc service.
9 | * __zkfc__: If you choose to start the zkfc in as a separate service
10 | * __journalnode__: starts a journalnode
11 | * __datanode__: starts a datanode
12 | * __bash__: allows you to jump in and check things out
13 |
14 | #### Envrionment Variables
15 |
16 | __CLUSTER_NAME__: the HDFS URI default filesystem name
17 |
18 | __NNODE1_IP__: Namenode #1 IP/hostname
19 |
20 | __NNODE2_IP__: Namenode #2 IP/hostname
21 |
22 | __NNODE_ID__: the id of the particular namenode either: *nn1* or *nn2*
23 |
24 | __JN_IPS__: comma separated list of journal node IPS, e.g jn01:8485,jn02:8485,jn03:8485,jn04:8485,jn05:8485
25 |
26 | __ZK_IPS__: comma separated list of zookeeper IPS, e.g. zk01:2181,zk02:2181,zk03:2181,zk04:2181,zk05:2181
27 |
28 | #### Volumes:
29 |
30 | /data/hdfs/nn: inside the container, where the fsimage/namenode metadata exists
31 |
32 | /data/hdfs/jn: inside the container, where the journal node keeps the edits
33 |
34 | /data/jdfs/dn: inside the container, where the datanode keeps the blocks
35 |
36 | #### Command Line examples
37 |
38 | To see examples of how to start, please see the startup-cmdlines files one directory "up."
39 |
40 | * sudo docker run -it --name nn01 --net=host --env-file hdfs-envs --env NNODE_ID=nn1 hdfs active
41 |
--------------------------------------------------------------------------------
/docker/hadoop/hdfs/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # make sure we have everyrthing
4 | if [ -z $CLUSTER_NAME ] || [ -z $NNODE1_IP ] || [ -z $NNODE2_IP ] || [ -z $ZK_IPS ] || [ -z $JN_IPS ]; then
5 | echo CLUSTER_NAME, NNODE1_IP, NNODE2_IP, JN_IPS and ZK_IPS needs to be set as environment addresses to be able to run.
6 | exit;
7 | fi
8 |
9 | # convert the commas to semicolons..if they exist
10 | JNODES=$(echo $JN_IPS | tr "," ";")
11 |
12 | # Replace all the variables in hdfs-site.xml
13 | sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /usr/local/hadoop/etc/hadoop/hdfs-site.xml.template \
14 | | sed "s/NNODE1_IP/$NNODE1_IP/" \
15 | | sed "s/NNODE2_IP/$NNODE2_IP/" \
16 | | sed "s/JNODES/$JNODES/" \
17 | > /usr/local/hadoop/etc/hadoop/hdfs-site.xml
18 |
19 | # Replace all the variables in core-site.xml
20 | sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /usr/local/hadoop/etc/hadoop/core-site.xml.template \
21 | | sed "s/ZK_IPS/$ZK_IPS/" \
22 | > /usr/local/hadoop/etc/hadoop/core-site.xml
23 |
24 | # Read the 1st arg, and based upon one of the five: format or bootstrap or start the particular service
25 | # NN and ZKFC stick together
26 | case "$1" in
27 | active)
28 | if [[ ! -a /data/hdfs/nn/current/VERSION ]]; then
29 | echo "Format Namenode.."
30 | $HADOOP_PREFIX/bin/hdfs namenode -format
31 |
32 | echo "Format Zookeeper for Fast failover.."
33 | $HADOOP_PREFIX/bin/hdfs zkfc -formatZK
34 | fi
35 | $HADOOP_PREFIX/sbin/hadoop-daemon.sh start zkfc
36 | $HADOOP_PREFIX/bin/hdfs namenode
37 | ;;
38 | standby)
39 | if [[ ! -a /data/hdfs/nn/current/VERSION ]]; then
40 | echo "Bootstrap Standby Namenode.."
41 | $HADOOP_PREFIX/bin/hdfs namenode -bootstrapStandby
42 | fi
43 | $HADOOP_PREFIX/sbin/hadoop-daemon.sh start zkfc
44 | $HADOOP_PREFIX/bin/hdfs namenode
45 | ;;
46 | zkfc)
47 | $HADOOP_PREFIX/bin/hdfs zkfc
48 | ;;
49 | journalnode)
50 | $HADOOP_PREFIX/bin/hdfs journalnode
51 | ;;
52 | datanode)
53 | $HADOOP_PREFIX/bin/hdfs datanode
54 | ;;
55 | bash)
56 | /bin/bash
57 | ;;
58 | *)
59 | echo $"Usage: {active|standby|zkfc|journalnode|datanode|bash}"
60 | eval $*
61 | esac
62 |
--------------------------------------------------------------------------------
/docker/hadoop/hdfs/core-site.xml.template:
--------------------------------------------------------------------------------
1 |
2 |
3 | fs.defaultFS
4 | hdfs://CLUSTER_NAME
5 |
6 |
7 |
8 | ha.zookeeper.quorum
9 | ZK_IPS
10 |
11 |
12 |
13 |
14 | fs.trash.interval
15 | 30
16 |
17 |
18 | fs.trash.checkpoint.interval
19 | 15
20 |
21 |
22 |
23 |
24 |
25 | hadoop.proxyuser.httpfs.hosts
26 | *
27 |
28 |
29 | hadoop.proxyuser.httpfs.groups
30 | *
31 |
32 |
33 | hadoop.proxyuser.hue.hosts
34 | *
35 |
36 |
37 | hadoop.proxyuser.hue.groups
38 | *
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/docker/hadoop/hdfs/hdfs-site.xml.template:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | dfs.nameservices
5 | CLUSTER_NAME
6 |
7 |
8 | dfs.ha.namenodes.CLUSTER_NAME
9 | nn1,nn2
10 |
11 |
12 |
13 | dfs.namenode.rpc-address.CLUSTER_NAME.nn1
14 | NNODE1_IP:8020
15 |
16 |
17 | dfs.namenode.rpc-address.CLUSTER_NAME.nn2
18 | NNODE2_IP:8020
19 |
20 |
21 | dfs.namenode.servicerpc-address.CLUSTER_NAME.nn1
22 | NNODE1_IP:8022
23 |
24 |
25 | dfs.namenode.servicerpc-address.CLUSTER_NAME.nn2
26 | NNODE2_IP:8022
27 |
28 |
29 | dfs.namenode.http-address.CLUSTER_NAME.nn1
30 | NNODE1_IP:50070
31 |
32 |
33 | dfs.namenode.http-address.CLUSTER_NAME.nn2
34 | NNODE2_IP:50070
35 |
36 |
37 | dfs.namenode.name.dir
38 | file:///data/hdfs/nn
39 | Path on the local filesystem where the NameNode stores the namespace and transaction logs persistently.
40 |
41 |
42 |
43 |
44 | dfs.namenode.shared.edits.dir
45 | qjournal://JNODES/CLUSTER_NAME
46 |
47 |
48 | dfs.journalnode.edits.dir
49 | /data/hdfs/journal
50 |
51 |
52 |
53 |
54 |
55 | dfs.datanode.data.dir
56 | file:///data/hdfs/dn
57 | Comma separated list of paths on the local filesystem of a DataNode where it should store its blocks.
58 |
59 |
60 |
61 |
62 |
63 | dfs.ha.automatic-failover.enabled
64 | true
65 |
66 |
67 |
68 |
69 |
70 | dfs.client.failover.proxy.provider.CLUSTER_NAME
71 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
72 |
73 |
74 | dfs.ha.fencing.methods
75 | shell(/bin/true)
76 |
77 |
78 |
79 |
80 | dfs.namenode.replication.min
81 | 3
82 | true
83 |
84 |
85 | dfs.replication.max
86 | 10
87 | true
88 |
89 |
90 |
91 | mapreduce.client.submit.file.replication
92 | 3
93 | true
94 |
95 |
96 |
97 | dfs.webhdfs.enabled
98 | true
99 |
100 |
101 |
102 |
103 | dfs.datanode.hdfs-blocks-metadata.enabled
104 | true
105 |
106 |
107 |
108 |
109 |
--------------------------------------------------------------------------------
/docker/hadoop/httpfs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM aarongdocker/hdfs
2 | MAINTAINER Aaron Glahe
3 |
4 | # Setup env
5 | USER root
6 | ENV HADOOP_USER httpfs
7 |
8 | # Copy the httpfs-site file to add Hue
9 | WORKDIR /usr/local/hadoop
10 | COPY httpfs-site.xml etc/hadoop/httpfs-site.xml
11 |
12 | # Add HADOOP_USER and setup permissions and ownership
13 | RUN groupadd -r $HADOOP_USER && useradd -r -g $HADOOP_USER -G hadoop $HADOOP_USER \
14 | && chown -R $HADOOP_USER:hadoop /usr/local/hadoop/
15 |
16 | #Ports: WebHDFS
17 | EXPOSE 14000
18 |
19 | # Copy the bootstrap shell
20 | COPY bootstrap.sh /bin/bootstrap.sh
21 |
22 | USER $HADOOP_USER
23 | ENTRYPOINT ["/bin/bootstrap.sh"]
24 | CMD ["bash"]
25 |
--------------------------------------------------------------------------------
/docker/hadoop/httpfs/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # make sure we have everyrthing
4 | if [ -z $CLUSTER_NAME ] || [ -z $NNODE1_IP ] || [ -z $NNODE2_IP ] || [ -z $ZK_IPS ] || [ -z $JN_IPS ]; then
5 | echo CLUSTER_NAME, NNODE1_IP, NNODE2_IP, JN_IPS and ZK_IPS needs to be set as environment addresses to be able to run.
6 | exit;
7 | fi
8 |
9 | # convert the commas to semicolons..if they exist
10 | JNODES=$(echo $JN_IPS | tr "," ";")
11 |
12 | # Replace all the variables in hdfs-site.xml
13 | sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /usr/local/hadoop/etc/hadoop/hdfs-site.xml.template \
14 | | sed "s/NNODE1_IP/$NNODE1_IP/" \
15 | | sed "s/NNODE2_IP/$NNODE2_IP/" \
16 | | sed "s/JNODES/$JNODES/" \
17 | > /usr/local/hadoop/etc/hadoop/hdfs-site.xml
18 |
19 | # Replace all the variables in core-site.xml
20 | sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /usr/local/hadoop/etc/hadoop/core-site.xml.template \
21 | | sed "s/ZK_IPS/$ZK_IPS/" \
22 | > /usr/local/hadoop/etc/hadoop/core-site.xml
23 |
24 | # Read the 1st arg and execute
25 | case "$1" in
26 | start)
27 | $HADOOP_PREFIX/sbin/httpfs.sh run
28 | ;;
29 | bash)
30 | /bin/bash
31 | ;;
32 | *)
33 | echo $"Usage: {httpfs|bash}"
34 | eval $*
35 | esac
36 |
--------------------------------------------------------------------------------
/docker/hadoop/httpfs/httpfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
15 |
16 |
17 |
18 |
19 | httpfs.proxyuser.hue.hosts
20 | *
21 |
22 |
23 | httpfs.proxyuser.hue.groups
24 | *
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/docker/hadoop/zookeeper/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM java:8
2 | MAINTAINER Aaron Glahe
3 |
4 | # Setup env
5 | USER root
6 | ENV JAVA_HOME /usr/lib/jvm/java-1.8.0-openjdk-amd64
7 | ENV ZK_VERSION 3.4.7
8 |
9 | # Download Apache Zookeeper, untar, setup zookeeper user, log/snapshot DIR
10 | RUN wget -q -O - http://apache.mirrors.pair.com/zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz | tar -xzf - -C /opt \
11 | && ln -s /opt/zookeeper-${ZK_VERSION} /opt/zookeeper \
12 | && groupadd -r zookeeper && useradd -r -g zookeeper zookeeper \
13 | && mkdir -p /var/lib/zookeeper
14 |
15 | # Configure inital zookeeper settings
16 | WORKDIR /opt/zookeeper
17 | COPY start.sh bin/start.sh
18 | COPY zoo.cfg.template conf/zoo.cfg
19 |
20 | # Have zookeeper own everything
21 | RUN chown -R zookeeper:zookeeper /var/lib/zookeeper /opt/zookeeper-${ZK_VERSION}
22 |
23 | # Zookeeper client port, peer port, and leader (election) port
24 | EXPOSE 2181 2888 3888
25 |
26 | # Save the snapshot/log data outside of Zookepeer
27 | VOLUME ["/var/lib/zookeeper"]
28 |
29 | USER zookeeper
30 | ENTRYPOINT ["/opt/zookeeper/bin/start.sh"]
31 | CMD ["/opt/zookeeper/bin/zkServer.sh", "start-foreground"]
32 |
--------------------------------------------------------------------------------
/docker/hadoop/zookeeper/README.md:
--------------------------------------------------------------------------------
1 | ## Description:
2 |
3 | Meant to startup a zookeeper node
4 |
5 | #### Runtime options:
6 |
7 | * __bash__: allows you to jump in and check things out
8 |
9 | #### Envrionment Variables
10 |
11 | __ZK_ID__: the ID of this particular server
12 |
13 | __ZK_SERVERS__: comma separated list of zookeeper IPS, e.g. zk01:2181,zk02:2181,zk03:2181,zk04:2181,zk05:2181
14 |
15 | #### Volumes:
16 |
17 | /var/lib/zookeeper: inside the container, where the data and logs exists
18 |
--------------------------------------------------------------------------------
/docker/hadoop/zookeeper/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [[ -z "${ZK_ID}" || -z "${ZK_SERVERS}" ]]; then
4 | echo "Please set ZK_ID and ZK_SERVERS environment variables first."
5 | exit 1
6 | fi
7 |
8 | # Store the id
9 | echo $ZK_ID > /var/lib/zookeeper/myid
10 |
11 | # add server list if given
12 | if [ ! -z "$ZK_SERVERS" ]; then
13 | # explode into array
14 | IFS=',' read -a arr <<< "$ZK_SERVERS"
15 | # if the count is even
16 | if [ $(expr ${#arr[@]} % 2) == 0 ]; then
17 | echo "Number of servers must be odd."
18 | exit 1
19 | else # odd count
20 | # remove current server entries
21 | sed '/^server\.[0-9]*=.*$/d' -i /opt/zookeeper/conf/zoo.cfg
22 | # add entries from array
23 | for i in ${!arr[@]}; do
24 | #echo "$i ${arr[i]}"
25 | echo "server.$(expr 1 + $i)=${arr[i]}" >> /opt/zookeeper/conf/zoo.cfg
26 | done
27 | fi
28 | fi
29 |
30 | eval $*
31 |
--------------------------------------------------------------------------------
/docker/hadoop/zookeeper/zoo.cfg.template:
--------------------------------------------------------------------------------
1 | maxClientCnxns=50
2 | tickTime=2000
3 | initLimit=10
4 | syncLimit=5
5 | clientPort=2181
6 | dataDir=/var/lib/zookeeper
7 |
--------------------------------------------------------------------------------
/docker/haproxy/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM haproxy:1.5
2 | MAINTAINER Matt Parker
3 |
4 | COPY dsra.haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg
5 |
6 |
--------------------------------------------------------------------------------
/docker/haproxy/build.sh:
--------------------------------------------------------------------------------
1 | docker build -t hub.dsra.local:5000/dsra/haproxy:0.3 .
2 |
3 | docker push hub.dsra.local:5000/dsra/haproxy:0.3
4 |
--------------------------------------------------------------------------------
/docker/haproxy/dsra.haproxy.cfg:
--------------------------------------------------------------------------------
1 | #
2 | # AUTHOR: Matt Parker
3 | # DESCRIPTION: DRSA Marathon HAProxy configuration.
4 | # Configuration based on https://serversforhackers.com/load-balancing-with-haproxy
5 | # LAST UPDATED: 9/22/2015
6 | #
7 |
8 | global
9 | maxconn 100
10 | debug
11 |
12 | defaults
13 | mode http
14 |
15 | timeout connect 30s
16 | timeout client 2m
17 | timeout server 2m
18 |
19 | frontend hub
20 | bind *:80
21 |
22 | option tcplog
23 | option forwardfor
24 |
25 | log global
26 | log 127.0.0.1 local0 debug
27 |
28 | # log /dev/log local0 debug
29 |
30 | acl is_marathon_app hdr_end(host) -i marathon.mesos
31 | use_backend bamboo_haproxy if is_marathon_app
32 |
33 | acl is_mesos_uri hdr(host) -i mesos
34 | acl is_marathon_uri hdr(host) -i marathon
35 |
36 | use_backend mesos_ui if is_mesos_uri
37 | use_backend marathon_uis if is_marathon_uri
38 |
39 | backend marathon_uis
40 | balance roundrobin
41 | server marathon1 10.105.0.1
42 | server marathon2 10.105.0.3
43 | server marathon3 10.105.0.5
44 | server marathon4 10.105.0.7
45 | server marathon5 10.105.0.9
46 |
47 | backend mesos_ui
48 | balance roundrobin
49 | option forwardfor
50 | option httpclose
51 | server mesos5 10.105.0.5:5050 check
52 | server mesos7 10.105.0.7:5050 backup
53 | server mesos9 10.105.0.9:5050 backup
54 | server mesos3 10.105.0.3:5050 backup
55 | server mesos1 10.105.0.1:5050 backup
56 |
57 | backend bamboo_haproxy
58 | mode http
59 | balance roundrobin
60 | option forwardfor
61 | option httpclose
62 | # server worker01 10.105.0.11:31180 check
63 | #server worker02 10.105.0.13:31180 check
64 | server worker03 10.105.0.15:31180 check
65 | #server worker04 10.105.0.17:31180 check
66 | #server worker05 10.105.0.19:31180 check
67 | #server worker06 10.105.0.21:31180 check
68 | #server worker07 10.105.0.25:31180 check
69 | #server worker08 10.105.0.27:31180 check
70 | #server worker09 10.105.0.29:31180 check
71 | #server worker10 10.105.0.31:31180 check
72 | #server worker11 10.105.0.33:31180 check
73 |
--------------------------------------------------------------------------------
/docker/haproxy/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -t -i --restart=always --name=haproxy -p 80:80 -p 8080:8080 hub.dsra.local:5000/dsra/haproxy:0.3 haproxy -f /usr/local/etc/haproxy/haproxy.cfg
3 |
--------------------------------------------------------------------------------
/docker/haproxy/run2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker stop haproxy
4 |
5 | docker rm -v haproxy
6 |
7 | docker run -d --name=haproxy -p 80:80 -v /home/mparker/dsra-dcos/docker/haproxy/dsra.haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro haproxy:1.5
8 |
9 | docker logs haproxy
10 |
--------------------------------------------------------------------------------
/docker/haproxy/test/bamboo-reload.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker exec bamboo su - haproxy -c 'haproxy -c -f {{.}}'
3 |
4 |
--------------------------------------------------------------------------------
/docker/haproxy/test/bamboo-remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker stop bamboo
3 | docker rm -v bamboo
4 |
--------------------------------------------------------------------------------
/docker/haproxy/test/haproxy_template.cfg:
--------------------------------------------------------------------------------
1 | global
2 | debug
3 |
4 | chroot /var/lib/haproxy
5 | stats socket /run/haproxy/admin.sock mode 660 level admin
6 | stats timeout 30s
7 | user haproxy
8 | group haproxy
9 | daemon
10 |
11 | # Default SSL material locations
12 | ca-base /etc/ssl/certs
13 | crt-base /etc/ssl/private
14 |
15 | # Default ciphers to use on SSL-enabled listening sockets.
16 | # For more information, see ciphers(1SSL).
17 | # ssl-default-bind-ciphers kEECDH+aRSA+AES:kRSA+AES:+AES256:RC4-SHA:!kEDH:!LOW:!EXP:!MD5:!aNULL:!eNULL
18 |
19 | defaults
20 | mode http
21 | option httplog
22 | option dontlognull
23 | timeout connect 30000
24 | timeout client 50000
25 | timeout server 50000
26 |
27 | errorfile 400 /etc/haproxy/errors/400.http
28 | errorfile 403 /etc/haproxy/errors/403.http
29 | errorfile 408 /etc/haproxy/errors/408.http
30 | errorfile 500 /etc/haproxy/errors/500.http
31 | errorfile 502 /etc/haproxy/errors/502.http
32 | errorfile 503 /etc/haproxy/errors/503.http
33 | errorfile 504 /etc/haproxy/errors/504.http
34 |
35 | # Template Customization
36 | frontend http-in
37 | log global
38 | log 127.0.0.1 local0
39 |
40 | bind *:80
41 | {{ $services := .Services }}
42 | {{ range $index, $app := .Apps }} {{ if hasKey $services $app.Id }} {{ $service := getService $services $app.Id }}
43 | acl {{ $app.EscapedId }}-aclrule {{ $service.Acl}}
44 | use_backend {{ $app.EscapedId }}-cluster if {{ $app.EscapedId }}-aclrule
45 | {{ else }}
46 | acl {{ $app.EscapedId }}-aclrule hdr_beg(host) -i {{ getMesosDnsAppName $app.Id "" }}
47 | use_backend {{ $app.EscapedId }}-cluster if {{ $app.EscapedId }}-aclrule
48 | {{ end }} {{ end }}
49 |
50 | stats enable
51 | # CHANGE: Your stats credentials
52 | stats auth admin:admin
53 | stats uri /haproxy_stats
54 |
55 | {{ range $index, $app := .Apps }} {{ if $app.Env.BAMBOO_TCP_PORT }}
56 | listen {{ $app.EscapedId }}-cluster-tcp :{{ $app.Env.BAMBOO_TCP_PORT }}
57 | mode tcp
58 | option tcplog
59 | balance roundrobin
60 | {{ range $page, $task := .Tasks }}
61 | server {{ $app.EscapedId}}-{{ $task.Host }}-{{ $task.Port }} {{ $task.Host }}:{{ $task.Port }} {{ if $app.HealthCheckPath }} check inter 30000 {{ end }} {{ end }}
62 | {{ end }}
63 | backend {{ $app.EscapedId }}-cluster{{ if $app.HealthCheckPath }}
64 | option httpchk GET {{ $app.HealthCheckPath }}
65 | {{ end }}
66 | balance leastconn
67 | option httpclose
68 | option forwardfor
69 | {{ range $page, $task := .Tasks }}
70 | server {{ $app.EscapedId}}-{{ $task.Host }}-{{ $task.Port }} {{ $task.Host }}:{{ $task.Port }} {{ if $app.HealthCheckPath }} check inter 30000 {{ end }} {{ end }}
71 | {{ end }}
72 |
73 | ##
74 | ## map service ports of marathon apps
75 | ## ( see https://mesosphere.github.io/marathon/docs/service-discovery-load-balancing.html#ports-assignment ))
76 | ## to haproxy frontend port
77 | ##
78 | ## {{ range $index, $app := .Apps }}
79 | ## {{ range $serviceIndex, $servicePort := $app.ServicePorts }}
80 | ## listen {{ $app.EscapedId }}_{{ $servicePort }}
81 | ## bind *:{{ $servicePort }}
82 | ## mode http
83 | ## {{ if $app.HealthCheckPath }}
84 | ## # option httpchk GET {{ $app.HealthCheckPath }}
85 | ## {{ end }}
86 | ## balance leastconn
87 | ## option forwardfor
88 | ## {{ range $page, $task := $app.Tasks }}
89 | ## server {{ $app.EscapedId }}-{{ $task.Host }}-{{ index $task.Ports $serviceIndex }} {{ $task.Host }}:{{ index $task.Ports $serviceIndex }} {{ if $app.HealthCheckPath }} check inter 30000 {{ end }} {{ end }}
90 | ## {{ end }}
91 | ## {{ end }}
92 |
--------------------------------------------------------------------------------
/docker/haproxy/test/haproxy_template.cfg.orig:
--------------------------------------------------------------------------------
1 | global
2 | log /dev/log local0
3 | log /dev/log local1 notice
4 | chroot /var/lib/haproxy
5 | stats socket /run/haproxy/admin.sock mode 660 level admin
6 | stats timeout 30s
7 | user haproxy
8 | group haproxy
9 | daemon
10 |
11 | # Default SSL material locations
12 | ca-base /etc/ssl/certs
13 | crt-base /etc/ssl/private
14 |
15 | # Default ciphers to use on SSL-enabled listening sockets.
16 | # For more information, see ciphers(1SSL).
17 | # ssl-default-bind-ciphers kEECDH+aRSA+AES:kRSA+AES:+AES256:RC4-SHA:!kEDH:!LOW:!EXP:!MD5:!aNULL:!eNULL
18 |
19 | defaults
20 | log global
21 | mode http
22 | option httplog
23 | option dontlognull
24 | timeout connect 5000
25 | timeout client 50000
26 | timeout server 50000
27 |
28 | errorfile 400 /etc/haproxy/errors/400.http
29 | errorfile 403 /etc/haproxy/errors/403.http
30 | errorfile 408 /etc/haproxy/errors/408.http
31 | errorfile 500 /etc/haproxy/errors/500.http
32 | errorfile 502 /etc/haproxy/errors/502.http
33 | errorfile 503 /etc/haproxy/errors/503.http
34 | errorfile 504 /etc/haproxy/errors/504.http
35 |
36 | # Template Customization
37 | frontend http-in
38 | bind *:80
39 | {{ $services := .Services }}
40 | {{ range $index, $app := .Apps }} {{ if hasKey $services $app.Id }} {{ $service := getService $services $app.Id }}
41 | acl {{ $app.EscapedId }}-aclrule {{ $service.Acl}}
42 | use_backend {{ $app.EscapedId }}-cluster if {{ $app.EscapedId }}-aclrule
43 | {{ else }}
44 | acl {{ $app.EscapedId }}-aclrule hdr_beg(host) -i {{ getMesosDnsAppName $app.Id "" }}
45 | use_backend {{ $app.EscapedId }}-cluster if {{ $app.EscapedId }}-aclrule
46 | {{ end }} {{ end }}
47 |
48 | stats enable
49 | # CHANGE: Your stats credentials
50 | stats auth admin:admin
51 | stats uri /haproxy_stats
52 |
53 | {{ range $index, $app := .Apps }} {{ if $app.Env.BAMBOO_TCP_PORT }}
54 | listen {{ $app.EscapedId }}-cluster-tcp :{{ $app.Env.BAMBOO_TCP_PORT }}
55 | mode tcp
56 | option tcplog
57 | balance roundrobin
58 | {{ range $page, $task := .Tasks }}
59 | server {{ $app.EscapedId}}-{{ $task.Host }}-{{ $task.Port }} {{ $task.Host }}:{{ $task.Port }} {{ if $app.HealthCheckPath }} check inter 30000 {{ end }} {{ end }}
60 | {{ end }}
61 | backend {{ $app.EscapedId }}-cluster{{ if $app.HealthCheckPath }}
62 | option httpchk GET {{ $app.HealthCheckPath }}
63 | {{ end }}
64 | balance leastconn
65 | option httpclose
66 | option forwardfor
67 | {{ range $page, $task := .Tasks }}
68 | server {{ $app.EscapedId}}-{{ $task.Host }}-{{ $task.Port }} {{ $task.Host }}:{{ $task.Port }} {{ if $app.HealthCheckPath }} check inter 30000 {{ end }} {{ end }}
69 | {{ end }}
70 |
71 | ##
72 | ## map service ports of marathon apps
73 | ## ( see https://mesosphere.github.io/marathon/docs/service-discovery-load-balancing.html#ports-assignment ))
74 | ## to haproxy frontend port
75 | ##
76 | ## {{ range $index, $app := .Apps }}
77 | ## {{ range $serviceIndex, $servicePort := $app.ServicePorts }}
78 | ## listen {{ $app.EscapedId }}_{{ $servicePort }}
79 | ## bind *:{{ $servicePort }}
80 | ## mode http
81 | ## {{ if $app.HealthCheckPath }}
82 | ## # option httpchk GET {{ $app.HealthCheckPath }}
83 | ## {{ end }}
84 | ## balance leastconn
85 | ## option forwardfor
86 | ## {{ range $page, $task := $app.Tasks }}
87 | ## server {{ $app.EscapedId }}-{{ $task.Host }}-{{ index $task.Ports $serviceIndex }} {{ $task.Host }}:{{ index $task.Ports $serviceIndex }} {{ if $app.HealthCheckPath }} check inter 30000 {{ end }} {{ end }}
88 | ## {{ end }}
89 | ## {{ end }}
90 |
--------------------------------------------------------------------------------
/docker/haproxy/test/launch-bamboo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker stop bamboo
4 |
5 | docker rm -v bamboo
6 |
7 | docker run -d --name bamboo -t -p 31000:8000 -p 31180:80 \
8 | -v /home/core/haproxy_template.cfg:/opt/go/src/github.com/QubitProducts/bamboo/config/haproxy_template.cfg \
9 | -e MARATHON_ENDPOINT=http://10.105.0.1,http://10.105.0.3,http://10.105.0.5,http://10.105.0.7,http://10.105.0.9 \
10 | -e MARATHON_USE_EVENT_STREAM=true \
11 | -e BAMBOO_ENDPOINT=http://localhost:8000 \
12 | -e BAMBOO_ZK_HOST=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181 \
13 | -e BAMBOO_ZK_PATH=/bamboo \
14 | -e BIND=":8000" \
15 | -e CONFIG_PATH="config/production.example.json" \
16 | -e BAMBOO_DOCKER_AUTO_HOST=true \
17 | -e STATSD_ENABLED=false \
18 | hub.dsra.local:5000/dsra/bamboo:0.2.16.9
19 |
--------------------------------------------------------------------------------
/docker/haproxy/test/launch-bamboo2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker stop bamboo
4 |
5 | docker rm -v bamboo
6 |
7 | docker run -d --name bamboo -t --rm -p 31000:8000 -p 31180:80 \
8 | -v /home/core/haproxy_template.cfg:/opt/go/src/github.com/QubitProducts/bamboo/config/haproxy_template.cfg \
9 | -e MARATHON_ENDPOINT=http://10.105.0.1,http://10.105.0.3,http://10.105.0.5,http://10.105.0.7,http://10.105.0.9 \
10 | -e BAMBOO_ENDPOINT=http://localhost:8000 \
11 | -e BAMBOO_ZK_HOST=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181 \
12 | -e BAMBOO_ZK_PATH=/bamboo \
13 | -e BIND=":8000" \
14 | -e CONFIG_PATH="config/production.example.json" \
15 | -e BAMBOO_DOCKER_AUTO_HOST=true \
16 | hub.dsra.local:5000/dsra/bamboo:0.2.16.9
17 |
--------------------------------------------------------------------------------
/docker/haproxy/test/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | for containerId in `docker ps -a | grep Exited | grep -Po '^([\d\w])+'`;
3 | do
4 | docker rm -v $containerId
5 | done
6 | ~
7 |
--------------------------------------------------------------------------------
/docker/joshua/Dockerfile:
--------------------------------------------------------------------------------
1 |
2 | FROM ingensi/oracle-jdk:latest
3 |
4 | LABEL version=6.0.4
5 | LABEL description="Dockerized version of Joshua MT"
6 | LABEL tag="dsra/joshua:6.0.4"
7 |
8 | MAINTAINER Matt Parker
9 |
10 | RUN yum install --assumeyes make boost boost-devel gcc-c++ zlib-devel nano ant wget tar
11 | RUN mkdir /opt/tmp
12 | ADD http://cs.jhu.edu/~post/files/joshua-v6.0.4.tgz /opt/
13 |
14 | WORKDIR /opt
15 |
16 | RUN tar -xf joshua-v6.0.4.tgz
17 | ENV JOSHUA=/opt/joshua-v6.0.4
18 |
19 | WORKDIR ${JOSHUA}
20 | RUN ant
21 |
22 |
--------------------------------------------------------------------------------
/docker/joshua/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker build -t dsra/joshua:6.0.4 .
3 |
--------------------------------------------------------------------------------
/docker/joshua/push.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker push dsra/joshua:6.0.4
3 |
--------------------------------------------------------------------------------
/docker/kafka-manager/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM hseeberger/scala-sbt
2 | MAINTAINER Aaron Glahe
3 |
4 | ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64 \
5 | KM_VERSION=1.3.0.7 \
6 | KM_REVISION=4b57fc9b65e6f9ac88fff4391994fd06bb782663
7 |
8 | RUN mkdir -p /tmp && \
9 | cd /tmp && \
10 | git clone https://github.com/yahoo/kafka-manager && \
11 | cd /tmp/kafka-manager && \
12 | git checkout ${KM_REVISION}
13 |
14 | WORKDIR /tmp/kafka-manager
15 | RUN /bin/echo 'scalacOptions ++= Seq("-Xmax-classfile-name", "200")' >> build.sbt
16 | RUN sbt clean dist && \
17 | unzip -d / ./target/universal/kafka-manager-${KM_VERSION}.zip && \
18 | rm -fr /tmp/*
19 |
20 | EXPOSE 9000
21 | WORKDIR /kafka-manager-${KM_VERSION}
22 | ENTRYPOINT ["./bin/kafka-manager","-Dconfig.file=conf/application.conf"]
23 |
--------------------------------------------------------------------------------
/docker/kafka/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM java:8
2 | MAINTAINER Aaron Glahe
3 |
4 | # Setup env
5 | USER root
6 | ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64
7 | ENV SCALA_VERSION 2.11
8 | ENV KAFKA_VERSION 0.9.0.0
9 | ENV KAFKA_HOME /opt/kafka
10 | ENV JMX_PORT 9999
11 |
12 | # Install Kafka
13 | RUN apt-get update && \
14 | apt-get install -y wget && \
15 | rm -rf /var/lib/apt/lists/* && \
16 | apt-get clean && \
17 | wget -q -O - http://apache.mirrors.hoobly.com/kafka/"$KAFKA_VERSION"/kafka_"$SCALA_VERSION"-"$KAFKA_VERSION".tgz | tar -xzf - -C /opt && \
18 | ln -s /opt/kafka_"$SCALA_VERSION"-"$KAFKA_VERSION" /opt/kafka
19 |
20 | #Ports
21 | EXPOSE 9092 ${JMX_PORT}
22 |
23 | # Location to store data
24 | VOLUME ["/data/kafka"]
25 |
26 | # Copy the bootstrap shell
27 | COPY bootstrap.sh /bin/bootstrap.sh
28 | ENTRYPOINT ["/bin/bootstrap.sh"]
29 |
--------------------------------------------------------------------------------
/docker/kafka/README.md:
--------------------------------------------------------------------------------
1 | ## Description:
2 |
3 | Currently setup to start a Kafka 0.9.0.0 Broker
4 |
5 | #### Runtime options:
6 |
7 | None.
8 |
9 | #### Envrionment Variables
10 |
11 | __BROKER_ID__: A non-negative integer
12 |
13 | __CLUSTER_NAME__: Name used in Zookeeper ZNode, helps keep it off the root znode
14 |
15 | __ZK_IPS__: comma separated list of zookeeper IPS, e.g. zk01:2181,zk02:2181,zk03:2181,zk04:2181,zk05:2181
16 |
17 | __ADVERTISED_HOST_NAME__: Optional hostname, if not using --net=HOST in the docker run command
18 |
19 | #### Volumes:
20 |
21 | /data/kafka: where the broker stores it's log information
22 |
23 | #### Ports:
24 |
25 | __9092__: default kafka port
26 |
27 | __9999__: JMX Port
28 |
29 | #### Command Line examples
30 | ```
31 | * sudo docker run --name kafka --restart on-failure:5 --log-driver=journald
32 | -e BROKER_ID=1 \
33 | -e CLUSTER_NAME=dsra \
34 | -e ZK_IPS=zk01:2181,zk02:2181,zk03:2181,zk04:2181,zk05:2181 \
35 | -p 9092:9092 -p 9999:9999 aarongdocker/kafka
36 | ```
37 |
38 | #### Example Broker ID, using the last octet of the IP:
39 | ```
40 | BROKER_ID=`/usr/bin/ifconfig bond0 | /usr/bin/sed -n 2p | /usr/bin/awk '{ print $2 }' | cut -d . -f 4`
41 | ```
42 |
--------------------------------------------------------------------------------
/docker/kafka/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # make sure we have everyrthing
4 | if [ -z $CLUSTER_NAME ] || [ -z $ZK_IPS ]; then
5 | echo CLUSTER_NAME \(which becomes the ZK root\), ZK_IPS needs to be set as environment addresses to be able to run
6 | exit;
7 | fi
8 |
9 | # Set the Logs DIR
10 | sed -r -i "s/(log.dirs)=(.*)/\1=\/data\/kafka/g" $KAFKA_HOME/config/server.properties
11 |
12 | # Add the delete logs property if it is set, put it right after the log.dirs setting
13 | if [ ! -z "$DELETE_TOPIC_ENABLE" ]; then
14 | echo "allow topic delete: $DELETE_TOPIC_ENABLE"
15 | sed -r -i "/(log.dirs)=/a delete.topic.enable=$DELETE_TOPIC_ENABLE" $KAFKA_HOME/config/server.properties
16 | fi
17 |
18 | # Set the ZK_IPS amd CLUSTER_NAME
19 | sed -r -i "s/(zookeeper.connect)=(.*)/\1=$ZK_IPS\/$CLUSTER_NAME-kafka/g" $KAFKA_HOME/config/server.properties
20 |
21 | # Set the broker Id, if not available create one, 1-100
22 | if [[ -z $BROKER_ID ]]; then
23 | export BROKER_ID=$((RANDOM % 100)+1)
24 | fi
25 | sed -r -i "s/(broker.id)=(.*)/\1=$BROKER_ID/g" $KAFKA_HOME/config/server.properties
26 |
27 | # Set the external host and port
28 | if [ ! -z "$ADVERTISED_HOST_NAME" ]; then
29 | echo "advertised host: $ADVERTISED_HOST"
30 | sed -r -i "s/#(advertised.host.name)=(.*)/\1=$ADVERTISED_HOST_NAME/g" $KAFKA_HOME/config/server.properties
31 | fi
32 | if [ ! -z "$ADVERTISED_PORT" ]; then
33 | echo "advertised port: $ADVERTISED_PORT"
34 | sed -r -i "s/#(advertised.port)=(.*)/\1=$ADVERTISED_PORT/g" $KAFKA_HOME/config/server.properties
35 | fi
36 |
37 | if [ ! -z "$LOG_RETENTION_HOURS" ]; then
38 | echo "log retention hours: $LOG_RETENTION_HOURS"
39 | sed -r -i "s/(log.retention.hours)=(.*)/\1=$LOG_RETENTION_HOURS/g" $KAFKA_HOME/config/server.properties
40 | fi
41 | if [ ! -z "$LOG_RETENTION_BYTES" ]; then
42 | echo "log retention bytes: $LOG_RETENTION_BYTES"
43 | sed -r -i "s/#(log.retention.bytes)=(.*)/\1=$LOG_RETENTION_BYTES/g" $KAFKA_HOME/config/server.properties
44 | fi
45 |
46 | if [ ! -z "$NUM_PARTITIONS" ]; then
47 | echo "default number of partition: $NUM_PARTITIONS"
48 | sed -r -i "s/(num.partitions)=(.*)/\1=$NUM_PARTITIONS/g" $KAFKA_HOME/config/server.properties
49 | fi
50 |
51 | # Stolen from ches/docker-kafka to help enable JMX monitoring
52 | if [ -z $KAFKA_JMX_OPTS ]; then
53 | KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true"
54 | KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
55 | KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.ssl=false"
56 | KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
57 | KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Djava.rmi.server.hostname=${JAVA_RMI_SERVER_HOSTNAME:-$ADVERTISED_HOST_NAME} "
58 | export KAFKA_JMX_OPTS
59 | fi
60 |
61 | # Run Kafka
62 | $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties
63 |
--------------------------------------------------------------------------------
/docker/mesos-dns/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # Lifted from tobilg/mesos-dns on dockerhub.com
3 | #
4 |
5 | FROM sillelien/base-alpine:0.10
6 | MAINTAINER tobilg
7 |
8 | ENV MESOS_DNS_VERSION v0.5.2
9 | ENV MESOS_DNS_FILENAME mesos-dns-$MESOS_DNS_VERSION-linux-amd64
10 | ENV MESOS_DNS_PATH /usr/local/mesos-dns
11 |
12 | ADD /config.json $MESOS_DNS_PATH/config.json
13 | ADD install.sh .
14 | ADD bootstrap.sh .
15 |
16 | RUN chmod +x install.sh
17 | RUN chmod +x bootstrap.sh
18 |
19 | RUN ./install.sh
20 |
21 | ENTRYPOINT ["./bootstrap.sh"]
22 |
--------------------------------------------------------------------------------
/docker/mesos-dns/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Check for MESOS_ZK parameter
4 | if [ -z ${MESOS_ZK+x} ]; then
5 | echo "Please supply at least a Zookeeper connection string!"
6 | exit 1
7 | else
8 | ZK=$(echo ${MESOS_ZK//\//\\/})
9 | fi
10 |
11 | # Check for local ip address parameter
12 | if [ -z ${LOCAL_IP+x} ]; then
13 | IP=$(nslookup `hostname -f` | tail -1 | head -2 | awk '{print $3}')
14 | else
15 | IP="${LOCAL_IP}"
16 | fi
17 |
18 | # Check for EXTERNAL_DNS_SERVERS parameter
19 | if [ -z ${MESOS_DNS_EXTERNAL_SERVERS+x} ]; then
20 | DNS_SERVERS="8.8.8.8"
21 | else
22 | IFS=',' read -a dnshosts <<< "$MESOS_DNS_EXTERNAL_SERVERS"
23 | for index in "${!dnshosts[@]}"
24 | do
25 | DNS_SERVER_STRINGS[(index+1)]="\"${dnshosts[index]}\""
26 | done
27 | # Produce correct env variables for DNS servers
28 | IFS=','
29 | DNS_SERVERS="[${DNS_SERVER_STRINGS[*]}]"
30 | fi
31 |
32 | # Check for MESOS_IP_SOURCES parameter
33 | if [ -z ${MESOS_IP_SOURCES+x} ]; then
34 | IP_SOURCES="[\"netinfo\", \"host\", \"mesos\"]"
35 | else
36 | IFS=',' read -a ipsources <<< "$MESOS_IP_SOURCES"
37 | for index in "${!ipsources[@]}"
38 | do
39 | IP_SOURCES_STRINGS[(index+1)]="\"${ipsources[index]}\""
40 | done
41 | # Produce correct env variables for IP sources
42 | IFS=','
43 | IP_SOURCES="[${IP_SOURCES_STRINGS[*]}]"
44 | fi
45 |
46 | # Check for HTTP_PORT parameter
47 | if [ -z ${MESOS_DNS_HTTP_PORT+x} ]; then
48 | PORT="8123"
49 | else
50 | PORT="${MESOS_DNS_HTTP_PORT}"
51 | fi
52 |
53 | # Check for HTTP_ENABLED parameter
54 | if [ -z ${MESOS_DNS_HTTP_ENABLED+x} ]; then
55 | HTTP_ENABLED="false"
56 | else
57 | HTTP_ENABLED="${MESOS_DNS_HTTP_ENABLED}"
58 | fi
59 |
60 | # Check for REFRESH parameter
61 | if [ -z ${MESOS_DNS_REFRESH+x} ]; then
62 | REFRESH="60"
63 | else
64 | REFRESH="${MESOS_DNS_REFRESH}"
65 | fi
66 |
67 | # Check for TIMEOUT parameter
68 | if [ -z ${MESOS_DNS_TIMEOUT+x} ]; then
69 | TIMEOUT="5"
70 | else
71 | TIMEOUT="${MESOS_DNS_TIMEOUT}"
72 | fi
73 |
74 | # Check for vervose logging settings
75 | if [ -z ${VERBOSITY_LEVEL+x} ]; then
76 | VERBOSITY=""
77 | else
78 | VERBOSITY="-v=${VERBOSITY_LEVEL}"
79 | fi
80 |
81 | # Replace network interface name
82 | sed -i -e "s/%%MESOS_ZK%%/${ZK}/" \
83 | -e "s/%%IP%%/${LOCAL_IP}/" \
84 | -e "s/%%HTTP_PORT%%/${PORT}/" \
85 | -e "s/%%EXTERNAL_DNS_SERVERS%%/${DNS_SERVERS}/" \
86 | -e "s/%%HTTP_ON%%/${HTTP_ENABLED}/" \
87 | -e "s/%%IP_SOURCES%%/${IP_SOURCES}/" \
88 | -e "s/%%REFRESH%%/${REFRESH}/" \
89 | -e "s/%%TIMEOUT%%/${TIMEOUT}/" \
90 | $MESOS_DNS_PATH/config.json
91 |
92 | exec $MESOS_DNS_PATH/mesos-dns -config=$MESOS_DNS_PATH/config.json $VERBOSITY
93 |
--------------------------------------------------------------------------------
/docker/mesos-dns/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker build -t hub.dsra.local:5000/dsra/mesos-dns:0.5.2 .
3 |
--------------------------------------------------------------------------------
/docker/mesos-dns/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "zk": "%%MESOS_ZK%%",
3 | "refreshSeconds": %%REFRESH%%,
4 | "ttl": %%REFRESH%%,
5 | "domain": "mesos",
6 | "port": 53,
7 | "resolvers": %%EXTERNAL_DNS_SERVERS%%,
8 | "timeout": %%TIMEOUT%%,
9 | "httpon": %%HTTP_ON%%,
10 | "dnson": true,
11 | "httpport": %%HTTP_PORT%%,
12 | "externalon": true,
13 | "listener": "%%IP%%",
14 | "SOAMname": "ns1.mesos",
15 | "SOARname": "root.ns1.mesos",
16 | "SOARefresh": %%REFRESH%%,
17 | "SOARetry": 600,
18 | "SOAExpire": 86400,
19 | "SOAMinttl": %%REFRESH%%,
20 | "IPSources": %%IP_SOURCES%%
21 | }
22 |
--------------------------------------------------------------------------------
/docker/mesos-dns/install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | set -ex
4 |
5 | apk upgrade
6 | apk update
7 | apk add wget gzip sed bash
8 |
9 | # Install Mesos DNS
10 | wget https://github.com/mesosphere/mesos-dns/releases/download/$MESOS_DNS_VERSION/$MESOS_DNS_FILENAME && \
11 | mkdir -p $MESOS_DNS_PATH && \
12 | mv $MESOS_DNS_FILENAME $MESOS_DNS_PATH/mesos-dns && \
13 | chmod +x $MESOS_DNS_PATH/mesos-dns
14 |
15 | apk del wget gzip
16 |
--------------------------------------------------------------------------------
/docker/mesos-dns/push.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker push hub.dsra.local:5000/dsra/mesos-dns:0.5.2
3 |
4 |
--------------------------------------------------------------------------------
/docker/mesos-master/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mesosphere/mesos-master:0.27.1-2.0.226.ubuntu1404
2 |
3 | MAINTAINER Matt Parker
4 |
--------------------------------------------------------------------------------
/docker/mesos-master/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker build -t hub.dsra.local:5000/dsra/mesos-master:0.27.1-2.0.226.ubuntu1404 .
3 |
--------------------------------------------------------------------------------
/docker/mesos-master/push.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker push hub.dsra.local:5000/dsra/mesos-master:0.27.1-2.0.226.ubuntu1404
3 |
--------------------------------------------------------------------------------
/docker/mesos-slave/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mesosphere/mesos-slave:0.27.1-2.0.226.ubuntu1404
2 | MAINTAINER Matt Parker
3 |
4 | # Setup env
5 | USER root
6 |
7 | # Define commonly used JAVA_HOME variable
8 | ENV JAVA_HOME /usr/lib/jvm/java-7-openjdk-amd64
9 |
10 | ENV HADOOP_PREFIX /usr/local/hadoop
11 | ENV HADOOP_COMMON_HOME /usr/local/hadoop
12 | ENV HADOOP_HDFS_HOME /usr/local/hadoop
13 | ENV HADOOP_CONF_DIR /usr/local/hadoop/etc/hadoop
14 |
15 | # download wget, java & hadoop and spark
16 | RUN apt-get -y update && \
17 | apt-get install -y openjdk-7-jdk wget && \
18 | wget -q -O - http://apache.mirrors.pair.com/hadoop/common/hadoop-2.7.1/hadoop-2.7.1.tar.gz | tar -xzf - -C /usr/local && \
19 | wget -q -O - http://d3kbcqa49mib13.cloudfront.net/spark-1.6.0-bin-hadoop2.6.tgz | tar -xzf - -C /usr/local && \
20 | ln -s /usr/local/hadoop-2.7.1 /usr/local/hadoop && \
21 | ln -s /usr/local/spark-1.6.0-bin-hadoop2.6 /usr/local/spark
22 |
23 | # Copy the Site files up
24 | WORKDIR /usr/local/hadoop
25 | COPY core-site.xml.template etc/hadoop/core-site.xml.template
26 | COPY hdfs-site.xml.template etc/hadoop/hdfs-site.xml.template
27 |
28 | # Copy the bootstrap shell
29 | COPY bootstrap.sh /bin/bootstrap.sh
30 |
31 | # Location to store data
32 | VOLUME ["/tmp/mesos"]
33 |
34 | # Entry Point for our
35 | ENV PATH $PATH:/usr/local/hadoop/bin
36 | ENTRYPOINT ["/bin/bootstrap.sh"]
37 | CMD ["mesos-slave"]
38 |
--------------------------------------------------------------------------------
/docker/mesos-slave/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # make sure we have everyrthing
4 | if [ -z $CLUSTER_NAME ] || [ -z $NNODE1_IP ] || [ -z $NNODE2_IP ] || [ -z $ZK_IPS ] || [ -z $JN_IPS ]; then
5 | echo CLUSTER_NAME, NNODE1_IP, NNODE2_IP, JN_IPS and ZK_IPS needs to be set as environment addresses to be able to run.
6 | exit;
7 | fi
8 |
9 | # convert the commas to semicolons..if they exist
10 | JNODES=$(echo $JN_IPS | tr "," ";")
11 |
12 | # Replace all the variables in hdfs-site.xml
13 | sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /usr/local/hadoop/etc/hadoop/hdfs-site.xml.template \
14 | | sed "s/NNODE1_IP/$NNODE1_IP/" \
15 | | sed "s/NNODE2_IP/$NNODE2_IP/" \
16 | | sed "s/JNODES/$JNODES/" \
17 | > /usr/local/hadoop/etc/hadoop/hdfs-site.xml
18 |
19 | # Replace all the variables in core-site.xml
20 | sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /usr/local/hadoop/etc/hadoop/core-site.xml.template \
21 | | sed "s/ZK_IPS/$ZK_IPS/" \
22 | > /usr/local/hadoop/etc/hadoop/core-site.xml
23 |
24 | eval $*
25 |
--------------------------------------------------------------------------------
/docker/mesos-slave/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker build -t hub.dsra.local:5000/dsra/mesos-slave:0.27.1-2.0.226.ubuntu1404 .
3 |
--------------------------------------------------------------------------------
/docker/mesos-slave/core-site.xml.template:
--------------------------------------------------------------------------------
1 |
2 |
3 | fs.defaultFS
4 | hdfs://CLUSTER_NAME
5 |
6 |
7 |
8 | ha.zookeeper.quorum
9 | ZK_IPS
10 |
11 |
12 |
13 |
14 | fs.trash.interval
15 | 30
16 |
17 |
18 | fs.trash.checkpoint.interval
19 | 15
20 |
21 |
22 |
23 |
24 |
25 | hadoop.proxyuser.httpfs.hosts
26 | *
27 |
28 |
29 | hadoop.proxyuser.httpfs.groups
30 | *
31 |
32 |
33 | hadoop.proxyuser.hue.hosts
34 | *
35 |
36 |
37 | hadoop.proxyuser.hue.groups
38 | *
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/docker/mesos-slave/hdfs-site.xml.template:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | dfs.nameservices
5 | CLUSTER_NAME
6 |
7 |
8 | dfs.ha.namenodes.CLUSTER_NAME
9 | nn1,nn2
10 |
11 |
12 |
13 | dfs.namenode.rpc-address.CLUSTER_NAME.nn1
14 | NNODE1_IP:8020
15 |
16 |
17 | dfs.namenode.rpc-address.CLUSTER_NAME.nn2
18 | NNODE2_IP:8020
19 |
20 |
21 | dfs.namenode.servicerpc-address.CLUSTER_NAME.nn1
22 | NNODE1_IP:8022
23 |
24 |
25 | dfs.namenode.servicerpc-address.CLUSTER_NAME.nn2
26 | NNODE2_IP:8022
27 |
28 |
29 | dfs.namenode.http-address.CLUSTER_NAME.nn1
30 | NNODE1_IP:50070
31 |
32 |
33 | dfs.namenode.http-address.CLUSTER_NAME.nn2
34 | NNODE2_IP:50070
35 |
36 |
37 | dfs.namenode.name.dir
38 | file:///data/hdfs/nn
39 | Path on the local filesystem where the NameNode stores the namespace and transaction logs persistently.
40 |
41 |
42 |
43 |
44 | dfs.namenode.shared.edits.dir
45 | qjournal://JNODES/CLUSTER_NAME
46 |
47 |
48 | dfs.journalnode.edits.dir
49 | /data/hdfs/journal
50 |
51 |
52 |
53 |
54 |
55 | dfs.datanode.data.dir
56 | file:///data/hdfs/dn
57 | Comma separated list of paths on the local filesystem of a DataNode where it should store its blocks.
58 |
59 |
60 |
61 |
62 |
63 | dfs.ha.automatic-failover.enabled
64 | true
65 |
66 |
67 |
68 |
69 |
70 | dfs.client.failover.proxy.provider.CLUSTER_NAME
71 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
72 |
73 |
74 | dfs.ha.fencing.methods
75 | shell(/bin/true)
76 |
77 |
78 |
79 |
80 | dfs.namenode.replication.min
81 | 3
82 | true
83 |
84 |
85 | dfs.replication.max
86 | 10
87 | true
88 |
89 |
90 |
91 | mapreduce.client.submit.file.replication
92 | 3
93 | true
94 |
95 |
96 |
97 | dfs.webhdfs.enabled
98 | true
99 |
100 |
101 |
102 |
103 | dfs.datanode.hdfs-blocks-metadata.enabled
104 | true
105 |
106 |
107 |
108 |
109 |
--------------------------------------------------------------------------------
/docker/mesos-slave/push.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker push hub.dsra.local:5000/dsra/mesos-slave:0.27.1-2.0.226.ubuntu1404
3 |
--------------------------------------------------------------------------------
/docker/nginx/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -d -p 443:443 -v /opt/registry/external:/etc/nginx/external --restart=always --link registry:registry --name nginx-registry-proxy marvambass/nginx-registry-proxy
3 |
--------------------------------------------------------------------------------
/docker/spark/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mesosphere/mesos:0.23.0-1.0.ubuntu1404
2 |
3 | # airdock/oracle-jdk:1.7
4 |
5 | MAINTAINER Matt Parker
6 |
7 | # LABEL mesos.version="0.23.0"
8 | # LABEL docker.version="1.5"
9 | # LABEL hadoop.version="2.6"
10 | # LABEL description="Spark 1.5 Hadoop 2.6 Docker image for use with DSRA MESOS Cluster"
11 |
12 | # ADD http://apache.arvixe.com/spark/spark-1.5.0/spark-1.5.0-bin-hadoop2.6.tgz /opt
13 |
14 | ADD spark-1.5.0-bin-hadoop2.6.tgz /opt
15 | RUN ln -s /opt/spark-1.5.0-bin-hadoop2.6 /opt/spark
16 |
17 | ENV SPARK_HOME /opt/spark
18 | ENV PATH $SPARK_HOME/bin:$JAVA_HOME/bin:$PATH
19 |
20 | COPY spark-env.sh /opt/spark-1.5.0-bin-hadoop2.6/conf/
21 | RUN chmod 755 /opt/spark-1.5.0-bin-hadoop2.6/conf/spark-env.sh
22 |
23 | EXPOSE 4040
24 |
25 | RUN echo $PATH
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/docker/spark/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker build -t dsra/spark-1.5-hadoop-2.6 .
3 |
--------------------------------------------------------------------------------
/docker/spark/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -i --net=host -t dsra/spark-1.5-hadoop-2.6 /bin/bash
3 |
--------------------------------------------------------------------------------
/docker/spark/spark-env-cluster-mode.sh:
--------------------------------------------------------------------------------
1 | export MESOS_JAVA_NATIVE_LIBRARY=/usr/local/lib/libmesos.so
2 | export SPARK_EXECUTOR_URI=http://hub.dsra.local:8088/dsra/repo/frameworks/spark-1.5.0-bin-hadoop2.6.tgz
3 | export MASTER=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos
4 |
--------------------------------------------------------------------------------
/docker/spark/spark-env.sh:
--------------------------------------------------------------------------------
1 | export MESOS_JAVA_NATIVE_LIBRARY=/usr/local/lib/libmesos.so
2 | export SPARK_EXECUTOR_URI=http://hub.dsra.local:8088/dsra/repo/frameworks/spark-1.5.0-bin-hadoop2.6.tgz
3 | export MASTER=mesos://10.105.0.3:5050,10.105.0.5:5050,10.105.0.7:5050
4 |
--------------------------------------------------------------------------------
/docs/architecture.odp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aglahe/dsra-dcos/9d99ae8b0e693026f2287934dfcbe5186e28d523/docs/architecture.odp
--------------------------------------------------------------------------------
/docs/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aglahe/dsra-dcos/9d99ae8b0e693026f2287934dfcbe5186e28d523/docs/architecture.png
--------------------------------------------------------------------------------
/docs/marathon3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aglahe/dsra-dcos/9d99ae8b0e693026f2287934dfcbe5186e28d523/docs/marathon3.png
--------------------------------------------------------------------------------
/git/parker.sh:
--------------------------------------------------------------------------------
1 | git config --global user.name "Matt Parker"
2 | git config --global user.email "parker20121@gmail.com"
3 |
--------------------------------------------------------------------------------
/marathon/README.md:
--------------------------------------------------------------------------------
1 | ### Deploying Marathon Services
2 |
3 | To deploy Marathon service using its JSON configuration file
4 |
5 | ```
6 | ./deploy-marathon-service.sh [JSON file path]
7 | ```
8 |
9 | This will submit the service to Marathon's REST API and it should appear in the web interface.
10 |
--------------------------------------------------------------------------------
/marathon/atsd/atsd.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "atsd",
3 | "cpus": 1.0,
4 | "mem": 2048.0,
5 | "ports": [5022, 8081, 8088, 8084, 8082],
6 | "container": {
7 | "type": "DOCKER",
8 | "docker": {
9 | "image": "axibase/atsd:latest",
10 | "network": "BRIDGE",
11 | "portMappings": [
12 | { "containerPort": 22, "hostPort": 5022 },
13 | { "containerPort": 8081, "hostPort": 8081 },
14 | { "containerPort": 8088, "hostPort": 8088 },
15 | { "containerPort": 8081, "hostPort": 8084 },
16 | { "containerPort": 8082, "hostPort": 8082, "protcol": "tcp" }
17 | ]
18 | }
19 | },
20 | "env": {
21 | "AXIBASE_USER_PASSWORD": "atsd1234",
22 | "ATSD_USER_NAME": "atsd",
23 | "ATSD_USER_PASSWORD": "atsd1234"
24 | },
25 | "maxLaunchDelaySeconds": 7200
26 | }
27 |
28 |
--------------------------------------------------------------------------------
/marathon/atsd/hue.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "hue",
3 | "cpus": 1.0,
4 | "mem": 2048.0,
5 | "cmd": "build/env/bin/hue runserver 0.0.0.0:8000",
6 | "ports": [50070],
7 | "container": {
8 | "type": "DOCKER",
9 | "docker": {
10 | "image": "hub.dsra.local:5000/dsra/hue:3.7.0.2",
11 | "network": "BRIDGE",
12 | "portMappings": [
13 | { "containerPort": 8000, "hostPort": 0 },
14 | { "containerPort": 50070, "hostPort": 50070 }
15 | ]
16 | }
17 | },
18 | "uris": [
19 | "file:///etc/docker.tar.gz"
20 | ],
21 | "maxLaunchDelaySeconds": 7200
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/marathon/bamboo.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/dsra/bamboo",
3 | "cpus": 1.0,
4 | "mem": 1024.0,
5 | "instances": 11,
6 | "constraints": [["hostname","UNIQUE",""]],
7 | "env": {
8 | "MARATHON_ENDPOINT" : "http://10.105.0.1,http://10.105.0.3,http://10.105.0.5,http://10.105.0.7,http://10.105.0.9",
9 | "MARATHON_USE_EVENT_STREAM" : "ture",
10 | "BAMBOO_ENDPOINT" : "http://localhost:8000",
11 | "BAMBOO_ZK_HOST" : "10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181",
12 | "BAMBOO_ZK_PATH" : "/bamboo",
13 | "BIND" : ":8000",
14 | "CONFIG_PATH": "config/production.example.json",
15 | "BAMBOO_DOCKER_AUTOHOST" : "true",
16 | "STATSD_ENABLED" : "false"
17 | },
18 |
19 | "container": {
20 | "type": "DOCKER",
21 | "docker": {
22 | "image": "hub.dsra.local:5000/dsra/bamboo:0.2.16.4",
23 | "network" : "BRIDGE",
24 | "portMappings": [
25 | {
26 | "containerPort": 8000,
27 | "hostPort": 31000,
28 | "servicePort": 31000
29 | },{
30 | "containerPort": 80,
31 | "hostPort": 31080,
32 | "servicePort": 31080
33 | }
34 | ]
35 | }
36 | },
37 |
38 | "uris": [
39 | "file:///etc/docker.tar.gz"
40 | ]
41 | }
42 |
--------------------------------------------------------------------------------
/marathon/bin/create-new-app.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | curl -X POST -H "Accept: application/json" -H "Content-Type: application/json" 10.105.0.5/v2/apps -d @$1
3 |
--------------------------------------------------------------------------------
/marathon/bin/delete-deployment.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # usage: bin/destroy-app.sh [DEPLOYMENT_ID]
4 | #
5 | curl -X DELETE -H "Accept: application/json" -H "Content-Type: application/json" 10.105.0.5/v2/deployments/$1
6 |
--------------------------------------------------------------------------------
/marathon/bin/destroy-app.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # usage: bin/destroy-app.sh [APPID]
4 | #
5 | curl -X DELETE -H "Accept: application/json" -H "Content-Type: application/json" 10.105.0.5/v2/apps/$1
6 |
--------------------------------------------------------------------------------
/marathon/bin/event-stream.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | curl -X GET -H "Accept: application/json" -H "Content-Type: application/json" 10.105.0.5/v2/events
3 |
--------------------------------------------------------------------------------
/marathon/bin/event-stream.sh.save:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | curl -X GET -H "Accept: application/json" -H "Content-Type: application/json" 10.105.0.5/v2/events
3 |
--------------------------------------------------------------------------------
/marathon/bin/framework-teardown.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin
2 | echo "frameworkId=$1" | curl -d@- -X POST http://10.105.0.5:5050/master/teardown
3 |
--------------------------------------------------------------------------------
/marathon/chronos.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/dsra/chronos",
3 | "instances": 1,
4 | "cpus": 1.0,
5 | "mem": 2048,
6 | "uris": [ ],
7 | "args": [
8 | "/usr/bin/chronos", "run_jar",
9 | "--master", "zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos",
10 | "--zk_hosts", "10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181",
11 | "--http_port", "4400"
12 | ],
13 | "container": {
14 | "type":"DOCKER",
15 | "volumes": [],
16 | "docker":{
17 | "image": "mesosphere/chronos",
18 |
19 | "network": "BRIDGE",
20 | "portMappings" : [
21 | {
22 | "containerPort": 4400,
23 | "hostPort": 0,
24 | "protocol": "tcp"
25 | }
26 | ]
27 | }
28 | },
29 | "healthChecks": [
30 | {
31 | "protocol": "HTTP",
32 | "portIndex": 0,
33 | "path": "/",
34 | "gracePeriodSeconds": 500,
35 | "maxConsecutiveFailures": 3
36 | }
37 | ],
38 | "maxLauchDelaySeconds": 15400
39 | }
40 |
--------------------------------------------------------------------------------
/marathon/confluence/confluence-atlassian.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/dsra/confluence",
3 | "cpus": 1,
4 | "mem": 2048,
5 | "container": {
6 | "type": "DOCKER",
7 | "docker": {
8 | "image": "cptactionhank/atlassian-confluence:latest",
9 | "network": "BRIDGE",
10 | "portMappings" : [
11 | { "containerPort": 8090, "hostPort": 8090, "protocol": "tcp" }
12 | ]
13 | }
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/marathon/httpfs.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/dsra/httpfs",
3 | "cpus": 1.0,
4 | "mem": 2048.0,
5 | "instances": 1,
6 | "env": {
7 | "CLUSTER_NAME": "dsra",
8 | "NNODE1_IP": "r105u01.dsra.local",
9 | "NNODE2_IP": "r105u03.dsra.local",
10 | "ZK_IPS": "r105u01:2181,r105u03:2181,r105u05:2181,r105u07:2181,r105u09:2181",
11 | "JN_IPS": "r105u01:8485,r105u03:8485,r105u05:8485,r105u07:8485,r105u09:8485"
12 | },
13 | "args": ["start"],
14 | "container": {
15 | "type": "DOCKER",
16 | "docker": {
17 | "image": "aarongdocker/httpfs",
18 | "forcePullImage": true,
19 | "network": "BRIDGE",
20 | "portMappings": [
21 | { "containerPort": 14000, "hostPort": 31400, "servicePort": 14000, "protocol": "tcp" }
22 | ],
23 | "parameters": [
24 | { "key": "name", "value": "httpfs" }
25 | ]
26 | }
27 | },
28 | "uris": [
29 | "file:///etc/docker.tar.gz"
30 | ],
31 | "maxLaunchDelaySeconds": 7200
32 | }
33 |
--------------------------------------------------------------------------------
/marathon/hue.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/dsra/hue",
3 | "cpus": 1.0,
4 | "mem": 2048.0,
5 | "instances": 1,
6 | "args": ["start"],
7 | "env": {
8 | "CLUSTER_NAME": "dsra",
9 | "HTTPFS_SERVER": "httpfs-dsra.marathon.slave.mesos",
10 | "HTTPFS_PORT": "31400"
11 | },
12 | "container": {
13 | "type": "DOCKER",
14 | "docker": {
15 | "image": "hub.dsra.local:5000/dsra/hue:3.9.0",
16 | "forcePullImage": true,
17 | "network": "BRIDGE",
18 | "portMappings": [
19 | { "containerPort": 8000, "hostPort": 0, "protocol": "tcp" }
20 | ]
21 | }
22 | },
23 | "uris": [
24 | "file:///etc/docker.tar.gz"
25 | ],
26 | "maxLaunchDelaySeconds": 7200
27 | }
28 |
--------------------------------------------------------------------------------
/marathon/kafka-manager.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "kafka-manager",
3 | "cpus": 1.0,
4 | "mem": 2048.0,
5 | "instances": 1,
6 | "env": {
7 | "ZK_HOSTS": "r105u01.dsra.local:2181,r105u03.dsra.local:2181,r105u05.dsra.local:2181,r105u07.dsra.local:2181,r105u09.dsra.local:2181",
8 | "APPLICATION_SECRET":"pleaseletmeplaywiththeothers"
9 | },
10 | "container": {
11 | "type": "DOCKER",
12 | "docker": {
13 | "image": "sheepkiller/kafka-manager",
14 | "forcePullImage": true,
15 | "network": "BRIDGE",
16 | "portMappings": [
17 | { "containerPort": 9000, "hostPort": 31900, "servicePort": 9000, "protocol": "tcp" }
18 | ],
19 | "parameters": [
20 | { "key": "name", "value": "kafka-manager" }
21 | ]
22 | }
23 | },
24 | "uris": [
25 | "file:///etc/docker.tar.gz"
26 | ],
27 | "maxLaunchDelaySeconds": 7200
28 | }
29 |
--------------------------------------------------------------------------------
/marathon/kafka-mesos.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "kafka-mesos-scheduler",
3 | "cpus": 1.0,
4 | "mem": 1024.0,
5 | "instances": 1,
6 | "ports": [31700],
7 | "cmd": "./kafka-mesos.sh scheduler --debug=true --master=zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos --zk=10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181 --api=http://kafka-mesos-scheduler.marathon.mesos:31700 --storage=zk:/kafka-mesos",
8 | "container": {
9 | "type": "DOCKER",
10 | "docker": {
11 | "image": "aarongdocker/kafka-mesos:0.9.3.0",
12 | "network": "HOST",
13 | "parameters": [
14 | { "key": "name", "value": "kafka-mesos-scheduler" }
15 | ]
16 | }
17 | },
18 | "uris": [
19 | "file:///etc/docker.tar.gz"
20 | ],
21 | "maxLaunchDelaySeconds": 7200
22 | }
23 |
--------------------------------------------------------------------------------
/marathon/python3/python3.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "python3",
3 | "cmd": "python3 -m http.server 8080",
4 | "cpus": 0.5,
5 | "mem": 32.0,
6 | "container": {
7 | "type": "DOCKER",
8 | "docker": {
9 | "image": "python:3",
10 | "network": "BRIDGE",
11 | "portMappings": [
12 | { "containerPort": 8080, "hostPort": 0 }
13 | ]
14 | }
15 | }
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/marathon/python3/python3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | curl -X POST -H Accept: application/json -H Content-Type: application/json 10.105.0.$1:8080/v2/apps -d @python3.json
3 |
--------------------------------------------------------------------------------
/marathon/registry-ui/dsra-registry-ui.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "registry",
3 | "cpus": 1.0,
4 | "mem": 2048.0,
5 | "container": {
6 | "type": "DOCKER",
7 | "docker": {
8 | "image": "atcol/docker-registry-ui",
9 | "network": "BRIDGE",
10 | "portMappings": [
11 | { "containerPort": 8080, "hostPort": 0 }
12 | ]
13 | }
14 | },
15 | "env": {
16 | "REG1": "http://hub.dsra.local:5000/v1/"
17 | },
18 | "maxLaunchDelaySeconds": 7200
19 | }
20 |
21 |
--------------------------------------------------------------------------------
/marathon/remove-qntfy-containers.sh:
--------------------------------------------------------------------------------
1 | {
2 | "id": "dsra/remove-qntfy-running-containers",
3 | "cmd": "chmod u+x remove-qntfy-running-containers.sh && ./remove-qntfy-running-containers.sh",
4 | "cpus": 1,
5 | "mem": 10.0,
6 | "instances": 11,
7 | "constraints": [["hostname", "UNIQUE"]],
8 | "uris": [
9 | "http://hub.dsra.local:8088/dsra/repo/scripts/remove-qntfy-running-containers.sh"
10 | ]
11 | }
12 |
--------------------------------------------------------------------------------
/marathon/remove-unused-containers.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "dsra/remove-unused-containers",
3 | "cmd": "chmod u+x remove-unused-containers.sh && ./remove-unused-containers.sh",
4 | "cpus": 1,
5 | "mem": 10.0,
6 | "instances": 11,
7 | "constraints": [["hostname", "UNIQUE"]],
8 | "uris": [
9 | "http://hub.dsra.local:8080/dsra/repo/scripts/remove-unused-containers.sh"
10 | ]
11 | }
12 |
--------------------------------------------------------------------------------
/marathon/spark-mesos-dispatcher.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/dsra/spark-mesos-dispatcher",
3 | "cmd": "/usr/local/spark/sbin/start-mesos-dispatcher.sh --master zk://10.105.0.1:2181,10.105.0.3:2181,10.105.0.5:2181,10.105.0.7:2181,10.105.0.9:2181/mesos",
4 | "cpus": 1,
5 | "mem": 4,
6 | "instances": 1
7 | }
8 |
--------------------------------------------------------------------------------
/marathon/zk-web.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "/dsra/zk-web",
3 | "cpus": 1.0,
4 | "mem": 2048.0,
5 | "instances": 1,
6 | "cmd": "lein run",
7 | "container": {
8 | "type": "DOCKER",
9 | "docker": {
10 | "image": "goodguide/zk-web",
11 | "forcePullImage": true,
12 | "network": "BRIDGE",
13 | "portMappings": [
14 | { "containerPort": 8080, "hostPort": 0, "protocol": "tcp" }
15 | ]
16 | }
17 | },
18 | "maxLaunchDelaySeconds": 7200
19 | }
20 |
--------------------------------------------------------------------------------