├── .gitattributes
├── .gitignore
├── .vagrant
└── .gitkeep
├── LICENSE.md
├── README.md
├── basic-containers
├── curl
│ └── Dockerfile
└── sample-webapp
│ ├── Dockerfile
│ └── app.py
├── basic-weave-example
├── .vagrant
├── Vagrantfile
├── config.rb
└── user-data
├── coreos-vagrant
├── .gitattributes
├── .gitignore
├── CONTRIBUTING.md
├── DCO
├── LICENSE
├── MAINTAINERS
├── NOTICE
├── README.md
├── Vagrantfile
├── config.rb.sample
└── user-data.sample
├── coreos-vagrant_subtree_pull.sh
├── felix
├── .gitignore
├── .vagrant
├── README.md
├── Vagrantfile
├── cloud-config.yaml
└── config.rb
├── hello-apps
└── elasticsearch-js
│ ├── .gitignore
│ ├── README.md
│ ├── myapp
│ ├── Dockerfile
│ ├── index.js
│ └── package.json
│ ├── myapp_lb
│ ├── Dockerfile
│ └── myapp.conf
│ ├── scripts
│ ├── bootstrap.sh
│ └── prepare_demo_vms.sh
│ └── tldr.md
├── java-containers
├── elasticsearch-river-twitter
│ └── Dockerfile
├── elasticsearch
│ ├── Dockerfile
│ └── logging.yml
├── lein
│ └── Dockerfile
├── mvn
│ ├── Dockerfile
│ └── settings.xml
├── sbt
│ └── Dockerfile
└── spark
│ ├── base
│ └── Dockerfile
│ ├── master
│ └── Dockerfile
│ ├── shell
│ └── Dockerfile
│ └── worker
│ └── Dockerfile
├── marathon-atomic
├── .gitignore
├── README.md
├── Vagrantfile
├── marathon.service
├── mesos-master.service
├── mesos-slave.service
├── test1.sh
├── weave
├── weave.service
├── weave.target
├── weavedns.service
├── weaveproxy.service
└── zookeeper.service
├── misc
├── gce-basic-two-nodes
│ ├── build1.sh
│ ├── build2.sh
│ ├── build3.sh
│ └── up.sh
└── ubuntu-kernels
│ └── Vagrantfile
├── poseidon
├── .gitignore
├── .vagrant
├── README.md
├── Vagrantfile
├── config.rb
├── kubernetes-cluster.yaml
├── network.graffle
├── network.png
├── ubuntu-trusty
│ └── Vagrantfile
└── weave-helper
├── quartet
├── README.md
├── app
│ ├── Dockerfile
│ ├── app.py
│ ├── build.sh
│ ├── docker-compose.yml
│ └── requirements.txt
├── scripts
│ ├── defaults.sh
│ ├── on-each-host.sh
│ ├── setup-cluster-dev.sh
│ ├── setup-cluster.sh
│ ├── setup-swarm-only-cluster.sh
│ ├── weave
│ ├── weave-dev
│ └── weave-run-on-swarm.sh
├── simple_machine_demo.md
└── simple_swarm_demo.md
├── sparkles
├── .gitignore
├── .vagrant
├── DEMO.md
├── Vagrantfile
├── cloud
│ ├── cloud-config.yaml
│ ├── genenv.sh
│ ├── infra
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── providers.tf
│ │ └── variables.tf
│ └── victorious_repl_session.tty
├── config.rb
├── elasticsearch-spark-test
│ ├── .gitignore
│ ├── build.sbt
│ ├── build.sh
│ └── project
│ │ └── plugins.sbt
├── join-remote-weave-cluster.rb
└── local-cluster.rb
├── subnet-per-host
├── .vagrant
├── Vagrantfile
├── config.rb
└── user-data
├── terraform-example
├── .gitignore
├── cloud-config.yaml
├── connections.graffle
│ ├── data.plist
│ ├── image2.tiff
│ └── image3.tiff
├── connections.png
├── genenv-aws-only.sh
├── genenv.sh
├── gensshwrapper.sh
├── infra-aws-only
│ ├── main.tf
│ ├── outputs.tf
│ ├── providers.tf
│ └── variables.tf
├── infra
│ ├── main.tf
│ ├── outputs.tf
│ ├── providers.tf
│ └── variables.tf
└── terrainfra-gce-aws.png
└── ubuntu-snappy
├── .gitignore
├── Vagrantfile
├── docker.profile
└── weave.profile
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Detect text files automatically
2 | * text=auto
3 |
4 | # Force Unix-style line endings on these files
5 | user-data* text eol=lf
6 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant/machines/
2 | log/
3 | .*.sw?
4 | .DS_Store
5 | npm-debug.log
6 |
--------------------------------------------------------------------------------
/.vagrant/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/errordeveloper/weave-demos/cd24bddd6842354b873d08f5deac48a4ac4fbb24/.vagrant/.gitkeep
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Apache License
2 | ==============
3 |
4 | _Version 2.0, January 2004_
5 | _< >_
6 |
7 | ### Terms and Conditions for use, reproduction, and distribution
8 |
9 | #### 1. Definitions
10 |
11 | “License” shall mean the terms and conditions for use, reproduction, and
12 | distribution as defined by Sections 1 through 9 of this document.
13 |
14 | “Licensor” shall mean the copyright owner or entity authorized by the copyright
15 | owner that is granting the License.
16 |
17 | “Legal Entity” shall mean the union of the acting entity and all other entities
18 | that control, are controlled by, or are under common control with that entity.
19 | For the purposes of this definition, “control” means **(i)** the power, direct or
20 | indirect, to cause the direction or management of such entity, whether by
21 | contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
22 | outstanding shares, or **(iii)** beneficial ownership of such entity.
23 |
24 | “You” (or “Your”) shall mean an individual or Legal Entity exercising
25 | permissions granted by this License.
26 |
27 | “Source” form shall mean the preferred form for making modifications, including
28 | but not limited to software source code, documentation source, and configuration
29 | files.
30 |
31 | “Object” form shall mean any form resulting from mechanical transformation or
32 | translation of a Source form, including but not limited to compiled object code,
33 | generated documentation, and conversions to other media types.
34 |
35 | “Work” shall mean the work of authorship, whether in Source or Object form, made
36 | available under the License, as indicated by a copyright notice that is included
37 | in or attached to the work (an example is provided in the Appendix below).
38 |
39 | “Derivative Works” shall mean any work, whether in Source or Object form, that
40 | is based on (or derived from) the Work and for which the editorial revisions,
41 | annotations, elaborations, or other modifications represent, as a whole, an
42 | original work of authorship. For the purposes of this License, Derivative Works
43 | shall not include works that remain separable from, or merely link (or bind by
44 | name) to the interfaces of, the Work and Derivative Works thereof.
45 |
46 | “Contribution” shall mean any work of authorship, including the original version
47 | of the Work and any modifications or additions to that Work or Derivative Works
48 | thereof, that is intentionally submitted to Licensor for inclusion in the Work
49 | by the copyright owner or by an individual or Legal Entity authorized to submit
50 | on behalf of the copyright owner. For the purposes of this definition,
51 | “submitted” means any form of electronic, verbal, or written communication sent
52 | to the Licensor or its representatives, including but not limited to
53 | communication on electronic mailing lists, source code control systems, and
54 | issue tracking systems that are managed by, or on behalf of, the Licensor for
55 | the purpose of discussing and improving the Work, but excluding communication
56 | that is conspicuously marked or otherwise designated in writing by the copyright
57 | owner as “Not a Contribution.”
58 |
59 | “Contributor” shall mean Licensor and any individual or Legal Entity on behalf
60 | of whom a Contribution has been received by Licensor and subsequently
61 | incorporated within the Work.
62 |
63 | #### 2. Grant of Copyright License
64 |
65 | Subject to the terms and conditions of this License, each Contributor hereby
66 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
67 | irrevocable copyright license to reproduce, prepare Derivative Works of,
68 | publicly display, publicly perform, sublicense, and distribute the Work and such
69 | Derivative Works in Source or Object form.
70 |
71 | #### 3. Grant of Patent License
72 |
73 | Subject to the terms and conditions of this License, each Contributor hereby
74 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
75 | irrevocable (except as stated in this section) patent license to make, have
76 | made, use, offer to sell, sell, import, and otherwise transfer the Work, where
77 | such license applies only to those patent claims licensable by such Contributor
78 | that are necessarily infringed by their Contribution(s) alone or by combination
79 | of their Contribution(s) with the Work to which such Contribution(s) was
80 | submitted. If You institute patent litigation against any entity (including a
81 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a
82 | Contribution incorporated within the Work constitutes direct or contributory
83 | patent infringement, then any patent licenses granted to You under this License
84 | for that Work shall terminate as of the date such litigation is filed.
85 |
86 | #### 4. Redistribution
87 |
88 | You may reproduce and distribute copies of the Work or Derivative Works thereof
89 | in any medium, with or without modifications, and in Source or Object form,
90 | provided that You meet the following conditions:
91 |
92 | * **(a)** You must give any other recipients of the Work or Derivative Works a copy of
93 | this License; and
94 | * **(b)** You must cause any modified files to carry prominent notices stating that You
95 | changed the files; and
96 | * **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
97 | all copyright, patent, trademark, and attribution notices from the Source form
98 | of the Work, excluding those notices that do not pertain to any part of the
99 | Derivative Works; and
100 | * **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
101 | Derivative Works that You distribute must include a readable copy of the
102 | attribution notices contained within such NOTICE file, excluding those notices
103 | that do not pertain to any part of the Derivative Works, in at least one of the
104 | following places: within a NOTICE text file distributed as part of the
105 | Derivative Works; within the Source form or documentation, if provided along
106 | with the Derivative Works; or, within a display generated by the Derivative
107 | Works, if and wherever such third-party notices normally appear. The contents of
108 | the NOTICE file are for informational purposes only and do not modify the
109 | License. You may add Your own attribution notices within Derivative Works that
110 | You distribute, alongside or as an addendum to the NOTICE text from the Work,
111 | provided that such additional attribution notices cannot be construed as
112 | modifying the License.
113 |
114 | You may add Your own copyright statement to Your modifications and may provide
115 | additional or different license terms and conditions for use, reproduction, or
116 | distribution of Your modifications, or for any such Derivative Works as a whole,
117 | provided Your use, reproduction, and distribution of the Work otherwise complies
118 | with the conditions stated in this License.
119 |
120 | #### 5. Submission of Contributions
121 |
122 | Unless You explicitly state otherwise, any Contribution intentionally submitted
123 | for inclusion in the Work by You to the Licensor shall be under the terms and
124 | conditions of this License, without any additional terms or conditions.
125 | Notwithstanding the above, nothing herein shall supersede or modify the terms of
126 | any separate license agreement you may have executed with Licensor regarding
127 | such Contributions.
128 |
129 | #### 6. Trademarks
130 |
131 | This License does not grant permission to use the trade names, trademarks,
132 | service marks, or product names of the Licensor, except as required for
133 | reasonable and customary use in describing the origin of the Work and
134 | reproducing the content of the NOTICE file.
135 |
136 | #### 7. Disclaimer of Warranty
137 |
138 | Unless required by applicable law or agreed to in writing, Licensor provides the
139 | Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
140 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
141 | including, without limitation, any warranties or conditions of TITLE,
142 | NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
143 | solely responsible for determining the appropriateness of using or
144 | redistributing the Work and assume any risks associated with Your exercise of
145 | permissions under this License.
146 |
147 | #### 8. Limitation of Liability
148 |
149 | In no event and under no legal theory, whether in tort (including negligence),
150 | contract, or otherwise, unless required by applicable law (such as deliberate
151 | and grossly negligent acts) or agreed to in writing, shall any Contributor be
152 | liable to You for damages, including any direct, indirect, special, incidental,
153 | or consequential damages of any character arising as a result of this License or
154 | out of the use or inability to use the Work (including but not limited to
155 | damages for loss of goodwill, work stoppage, computer failure or malfunction, or
156 | any and all other commercial damages or losses), even if such Contributor has
157 | been advised of the possibility of such damages.
158 |
159 | #### 9. Accepting Warranty or Additional Liability
160 |
161 | While redistributing the Work or Derivative Works thereof, You may choose to
162 | offer, and charge a fee for, acceptance of support, warranty, indemnity, or
163 | other liability obligations and/or rights consistent with this License. However,
164 | in accepting such obligations, You may act only on Your own behalf and on Your
165 | sole responsibility, not on behalf of any other Contributor, and only if You
166 | agree to indemnify, defend, and hold each Contributor harmless for any liability
167 | incurred by, or claims asserted against, such Contributor by reason of your
168 | accepting any such warranty or additional liability.
169 |
170 | _END OF TERMS AND CONDITIONS_
171 |
172 | # Copyright 2014 Weaveworks, Inc.
173 |
174 | Licensed under the Apache License, Version 2.0 (the "License");
175 | you may not use this file except in compliance with the License.
176 | You may obtain a copy of the License at
177 |
178 | http://www.apache.org/licenses/LICENSE-2.0
179 |
180 | Unless required by applicable law or agreed to in writing, software
181 | distributed under the License is distributed on an "AS IS" BASIS,
182 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
183 | See the License for the specific language governing permissions and
184 | limitations under the License.
185 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Weave Demos
2 |
3 | This repository contains various bits of code which go along with [blog posts I write][blog].
4 |
5 | [blog]: http://weaveblog.com/author/errordeveloper/
6 |
7 | ## Key Figures
8 |
9 | - [Felix](felix/README.md), the happy cat of prototyping, big friends with Vagrant
10 | - [Poseidon](poseidon/README.md), mythical god of the mightly ocean, he can call for a storm, mostly to please the other greek fella... It's a long story!
11 |
12 | ## Copyright
13 |
14 | Unless explicitly stated, all code included in this repository is under Apache licence (see [LICENSE.md](LICENSE.md)).
15 |
--------------------------------------------------------------------------------
/basic-containers/curl/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gliderlabs/alpine
2 |
3 | RUN apk --update add curl jq
4 |
5 | ENTRYPOINT [ "/bin/sh", "-l" ]
6 |
--------------------------------------------------------------------------------
/basic-containers/sample-webapp/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM progrium/webapp
2 |
3 | ADD ./app.py /app.py
4 |
5 | EXPOSE 5000
6 | CMD ["/usr/bin/python", "/app.py"]
7 |
--------------------------------------------------------------------------------
/basic-containers/sample-webapp/app.py:
--------------------------------------------------------------------------------
1 | import os, socket, redis
2 | from flask import Flask
3 |
4 | app = Flask(__name__)
5 |
6 | @app.route("/")
7 | def hello():
8 | return "Hello, Weave!\n"
9 |
10 | if __name__ == '__main__':
11 | port = int(os.environ.get('PORT', 5000))
12 | app.run(host='0.0.0.0', port=port)
13 |
--------------------------------------------------------------------------------
/basic-weave-example/.vagrant:
--------------------------------------------------------------------------------
1 | ../.vagrant/
--------------------------------------------------------------------------------
/basic-weave-example/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../coreos-vagrant/Vagrantfile
--------------------------------------------------------------------------------
/basic-weave-example/config.rb:
--------------------------------------------------------------------------------
1 | $num_instances=2
2 |
--------------------------------------------------------------------------------
/basic-weave-example/user-data:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | write_files:
3 | - path: /etc/weave.core-01.env
4 | permissions: 0644
5 | owner: root
6 | content: |
7 | WEAVE_LAUNCH_ARGS=""
8 | PINGER_LOCAL="10.0.1.1/24"
9 | PINGER_REMOTE="10.0.1.2"
10 | GREETER_ADDRESS="10.0.2.1/24"
11 | GREETER_MESSAGE="Hello from #1"
12 |
13 | - path: /etc/weave.core-02.env
14 | permissions: 0644
15 | owner: root
16 | content: |
17 | WEAVE_LAUNCH_ARGS="172.17.8.101"
18 | PINGER_LOCAL="10.0.1.2/24"
19 | PINGER_REMOTE="10.0.1.1"
20 | GREETER_ADDRESS="10.0.2.2/24"
21 | GREETER_MESSAGE="Hello from #2"
22 |
23 |
24 | coreos:
25 | units:
26 | - name: 10-weave.network
27 | runtime: false
28 | content: |
29 | [Match]
30 | Type=bridge
31 | Name=weave*
32 |
33 | [Network]
34 |
35 | - name: install-weave.service
36 | command: start
37 | enable: true
38 | content: |
39 | [Unit]
40 | After=network-online.target
41 | After=docker.service
42 | Description=Install Weave
43 | Documentation=http://zettio.github.io/weave/
44 | Requires=network-online.target
45 | Requires=docker.service
46 |
47 | [Service]
48 | Type=oneshot
49 | RemainAfterExit=yes
50 | ExecStartPre=/bin/mkdir -p /opt/bin/
51 | ExecStartPre=/usr/bin/curl \
52 | --silent \
53 | --location \
54 | https://github.com/zettio/weave/releases/download/latest_release/weave \
55 | --output /opt/bin/weave
56 | ExecStartPre=/usr/bin/chmod +x /opt/bin/weave
57 | ExecStartPre=/usr/bin/docker pull zettio/weave:latest
58 | ExecStart=/bin/echo Weave Installed
59 |
60 | - name: weave.service
61 | command: start
62 | enable: true
63 | content: |
64 | [Unit]
65 | After=install-weave.service
66 | Description=Weave Network
67 | Documentation=http://zettio.github.io/weave/
68 | Requires=install-weave.service
69 |
70 | [Service]
71 | EnvironmentFile=/etc/weave.%H.env
72 | ExecStartPre=/opt/bin/weave launch $WEAVE_LAUNCH_ARGS
73 | ExecStart=/usr/bin/docker logs -f weave
74 | SuccessExitStatus=2
75 | ExecStop=/opt/bin/weave stop
76 |
77 | - name: pinger.service
78 | command: start
79 | enable: true
80 | content: |
81 | [Unit]
82 | After=weave.service
83 | After=install-busybox.service
84 | Description=Weave Network Test Monitor
85 | Documentation=http://zettio.github.io/weave/
86 | Requires=weave.service
87 | Requires=install-busybox.service
88 |
89 | [Service]
90 | EnvironmentFile=/etc/weave.%H.env
91 | Type=oneshot
92 | RemainAfterExit=yes
93 | ExecStart=/opt/bin/weave \
94 | run $PINGER_LOCAL \
95 | --name=pinger busybox:latest \
96 | ping $PINGER_REMOTE
97 |
98 | - name: greeter.service
99 | command: start
100 | enable: true
101 | content: |
102 | [Unit]
103 | After=weave.service
104 | After=install-busybox.service
105 | Description=Weave Network Test Service
106 | Documentation=http://zettio.github.io/weave/
107 | Requires=weave.service
108 | Requires=install-busybox.service
109 |
110 | [Service]
111 | EnvironmentFile=/etc/weave.%H.env
112 | Type=oneshot
113 | RemainAfterExit=yes
114 | ExecStart=/opt/bin/weave \
115 | run $GREETER_ADDRESS \
116 | --name=greeter busybox:latest \
117 | nc -ll -p 2000 0.0.0.0 -e /bin/echo $GREETER_MESSAGE
118 |
119 | - name: install-busybox.service
120 | command: start
121 | enable: true
122 | content: |
123 | [Unit]
124 | After=network-online.target
125 | After=docker.service
126 | Description=Install BusyBox
127 | Documentation=http://zettio.github.io/weave/
128 | Requires=network-online.target
129 | Requires=docker.service
130 |
131 | [Service]
132 | Type=oneshot
133 | RemainAfterExit=yes
134 | ExecStart=/usr/bin/docker pull busybox:latest
135 |
--------------------------------------------------------------------------------
/coreos-vagrant/.gitattributes:
--------------------------------------------------------------------------------
1 | # Detect text files automatically
2 | * text=auto
3 |
4 | # Force Unix-style line endings on these files
5 | user-data* text eol=lf
6 |
--------------------------------------------------------------------------------
/coreos-vagrant/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant/
2 | log/
3 | user-data
4 | config.rb
5 |
--------------------------------------------------------------------------------
/coreos-vagrant/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
4 | GitHub pull requests. This document outlines some of the conventions on
5 | development workflow, commit message formatting, contact points and other
6 | resources to make it easier to get your contribution accepted.
7 |
8 | # Certificate of Origin
9 |
10 | By contributing to this project you agree to the Developer Certificate of
11 | Origin (DCO). This document was created by the Linux Kernel community and is a
12 | simple statement that you, as a contributor, have the legal right to make the
13 | contribution. See the [DCO](DCO) file for details.
14 |
15 | # Email and Chat
16 |
17 | The project currently uses the general CoreOS email list and IRC channel:
18 | - Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
19 | - IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
20 |
21 | Please avoid emailing maintainers found in the MAINTAINERS file directly. They
22 | are very busy and read the mailing lists.
23 |
24 | ## Getting Started
25 |
26 | - Fork the repository on GitHub
27 | - Read the [README](README.md) for build and test instructions
28 | - Play with the project, submit bugs, submit patches!
29 |
30 | ## Contribution Flow
31 |
32 | This is a rough outline of what a contributor's workflow looks like:
33 |
34 | - Create a topic branch from where you want to base your work (usually master).
35 | - Make commits of logical units.
36 | - Make sure your commit messages are in the proper format (see below).
37 | - Push your changes to a topic branch in your fork of the repository.
38 | - Make sure the tests pass, and add any new tests as appropriate.
39 | - Submit a pull request to the original repository.
40 |
41 | Thanks for your contributions!
42 |
43 | ### Format of the Commit Message
44 |
45 | We follow a rough convention for commit messages that is designed to answer two
46 | questions: what changed and why. The subject line should feature the what and
47 | the body of the commit should describe the why.
48 |
49 | ```
50 | scripts: add the test-cluster command
51 |
52 | this uses tmux to setup a test cluster that you can easily kill and
53 | start for debugging.
54 |
55 | Fixes #38
56 | ```
57 |
58 | The format can be described more formally as follows:
59 |
60 | ```
61 | :
62 |
63 |
64 |
65 |
66 | ```
67 |
68 | The first line is the subject and should be no longer than 70 characters, the
69 | second line is always blank, and other lines should be wrapped at 80 characters.
70 | This allows the message to be easier to read on GitHub as well as in various
71 | git tools.
72 |
--------------------------------------------------------------------------------
/coreos-vagrant/DCO:
--------------------------------------------------------------------------------
1 | Developer Certificate of Origin
2 | Version 1.1
3 |
4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
5 | 660 York Street, Suite 102,
6 | San Francisco, CA 94110 USA
7 |
8 | Everyone is permitted to copy and distribute verbatim copies of this
9 | license document, but changing it is not allowed.
10 |
11 |
12 | Developer's Certificate of Origin 1.1
13 |
14 | By making a contribution to this project, I certify that:
15 |
16 | (a) The contribution was created in whole or in part by me and I
17 | have the right to submit it under the open source license
18 | indicated in the file; or
19 |
20 | (b) The contribution is based upon previous work that, to the best
21 | of my knowledge, is covered under an appropriate open source
22 | license and I have the right under that license to submit that
23 | work with modifications, whether created in whole or in part
24 | by me, under the same open source license (unless I am
25 | permitted to submit under a different license), as indicated
26 | in the file; or
27 |
28 | (c) The contribution was provided directly to me by some other
29 | person who certified (a), (b) or (c) and I have not modified
30 | it.
31 |
32 | (d) I understand and agree that this project and the contribution
33 | are public and that a record of the contribution (including all
34 | personal information I submit with it, including my sign-off) is
35 | maintained indefinitely and may be redistributed consistent with
36 | this project or the open source license(s) involved.
37 |
--------------------------------------------------------------------------------
/coreos-vagrant/MAINTAINERS:
--------------------------------------------------------------------------------
1 | Jon Boulle (@jonboulle)
2 | Brian Waldon (@bcwaldon)
3 |
--------------------------------------------------------------------------------
/coreos-vagrant/NOTICE:
--------------------------------------------------------------------------------
1 | CoreOS Project
2 | Copyright 2014 CoreOS, Inc
3 |
4 | This product includes software developed at CoreOS, Inc.
5 | (http://www.coreos.com/).
6 |
--------------------------------------------------------------------------------
/coreos-vagrant/README.md:
--------------------------------------------------------------------------------
1 | # CoreOS Vagrant
2 |
3 | This repo provides a template Vagrantfile to create a CoreOS virtual machine using the VirtualBox software hypervisor.
4 | After setup is complete you will have a single CoreOS virtual machine running on your local machine.
5 |
6 | ## Streamlined setup
7 |
8 | 1) Install dependencies
9 |
10 | * [VirtualBox][virtualbox] 4.3.10 or greater.
11 | * [Vagrant][vagrant] 1.6 or greater.
12 |
13 | 2) Clone this project and get it running!
14 |
15 | ```
16 | git clone https://github.com/coreos/coreos-vagrant/
17 | cd coreos-vagrant
18 | ```
19 |
20 | 3) Startup and SSH
21 |
22 | There are two "providers" for Vagrant with slightly different instructions.
23 | Follow one of the following two options:
24 |
25 | **VirtualBox Provider**
26 |
27 | The VirtualBox provider is the default Vagrant provider. Use this if you are unsure.
28 |
29 | ```
30 | vagrant up
31 | vagrant ssh
32 | ```
33 |
34 | **VMware Provider**
35 |
36 | The VMware provider is a commercial addon from Hashicorp that offers better stability and speed.
37 | If you use this provider follow these instructions.
38 |
39 | VMware Fusion:
40 | ```
41 | vagrant up --provider vmware_fusion
42 | vagrant ssh
43 | ```
44 |
45 | VMware Workstation:
46 | ```
47 | vagrant up --provider vmware_workstation
48 | vagrant ssh
49 | ```
50 |
51 | ``vagrant up`` triggers vagrant to download the CoreOS image (if necessary) and (re)launch the instance
52 |
53 | ``vagrant ssh`` connects you to the virtual machine.
54 | Configuration is stored in the directory so you can always return to this machine by executing vagrant ssh from the directory where the Vagrantfile was located.
55 |
56 | 4) Get started [using CoreOS][using-coreos]
57 |
58 | [virtualbox]: https://www.virtualbox.org/
59 | [vagrant]: https://www.vagrantup.com/downloads.html
60 | [using-coreos]: http://coreos.com/docs/using-coreos/
61 |
62 | #### Shared Folder Setup
63 |
64 | There is optional shared folder setup.
65 | You can try it out by adding a section to your Vagrantfile like this.
66 |
67 | ```
68 | config.vm.network "private_network", ip: "172.17.8.150"
69 | config.vm.synced_folder ".", "/home/core/share", id: "core", :nfs => true, :mount_options => ['nolock,vers=3,udp']
70 | ```
71 |
72 | After a 'vagrant reload' you will be prompted for your local machine password.
73 |
74 | #### Provisioning with user-data
75 |
76 | The Vagrantfile will provision your CoreOS VM(s) with [coreos-cloudinit][coreos-cloudinit] if a `user-data` file is found in the project directory.
77 | coreos-cloudinit simplifies the provisioning process through the use of a script or cloud-config document.
78 |
79 | To get started, copy `user-data.sample` to `user-data` and make any necessary modifications.
80 | Check out the [coreos-cloudinit documentation][coreos-cloudinit] to learn about the available features.
81 |
82 | [coreos-cloudinit]: https://github.com/coreos/coreos-cloudinit
83 |
84 | #### Configuration
85 |
86 | The Vagrantfile will parse a `config.rb` file containing a set of options used to configure your CoreOS cluster.
87 | See `config.rb.sample` for more information.
88 |
89 | ## Cluster Setup
90 |
91 | Launching a CoreOS cluster on Vagrant is as simple as configuring `$num_instances` in a `config.rb` file to 3 (or more!) and running `vagrant up`.
92 | Make sure you provide a fresh discovery URL in your `user-data` if you wish to bootstrap etcd in your cluster.
93 |
94 | ## New Box Versions
95 |
96 | CoreOS is a rolling release distribution and versions that are out of date will automatically update.
97 | If you want to start from the most up to date version you will need to make sure that you have the latest box file of CoreOS.
98 | Simply remove the old box file and vagrant will download the latest one the next time you `vagrant up`.
99 |
100 | ```
101 | vagrant box remove coreos --provider vmware_fusion
102 | vagrant box remove coreos --provider vmware_workstation
103 | vagrant box remove coreos --provider virtualbox
104 | ```
105 |
106 | ## Docker Forwarding
107 |
108 | By setting the `$expose_docker_tcp` configuration value you can forward a local TCP port to docker on
109 | each CoreOS machine that you launch. The first machine will be available on the port that you specify
110 | and each additional machine will increment the port by 1.
111 |
112 | Follow the [Enable Remote API instructions][coreos-enabling-port-forwarding] to get the CoreOS VM setup to work with port forwarding.
113 |
114 | [coreos-enabling-port-forwarding]: https://coreos.com/docs/launching-containers/building/customizing-docker/#enable-the-remote-api-on-a-new-socket
115 |
116 | Then you can then use the `docker` command from your local shell by setting `DOCKER_HOST`:
117 |
118 | export DOCKER_HOST=tcp://localhost:2375
119 |
--------------------------------------------------------------------------------
/coreos-vagrant/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # # vi: set ft=ruby :
3 |
4 | require 'fileutils'
5 |
6 | Vagrant.require_version ">= 1.6.0"
7 |
8 | CLOUD_CONFIG_PATH = File.join(File.dirname(__FILE__), "user-data")
9 | CONFIG = File.join(File.dirname(__FILE__), "config.rb")
10 |
11 | # Defaults for config options defined in CONFIG
12 | $num_instances = 1
13 | $instance_name_prefix = "core"
14 | $update_channel = "alpha"
15 | $image_version = "current"
16 | $enable_serial_logging = false
17 | $share_home = false
18 | $vm_gui = false
19 | $vm_memory = 1024
20 | $vm_cpus = 1
21 | $shared_folders = {}
22 | $forwarded_ports = {}
23 |
24 | # Attempt to apply the deprecated environment variable NUM_INSTANCES to
25 | # $num_instances while allowing config.rb to override it
26 | if ENV["NUM_INSTANCES"].to_i > 0 && ENV["NUM_INSTANCES"]
27 | $num_instances = ENV["NUM_INSTANCES"].to_i
28 | end
29 |
30 | if File.exist?(CONFIG)
31 | require CONFIG
32 | end
33 |
34 | # Use old vb_xxx config variables when set
35 | def vm_gui
36 | $vb_gui.nil? ? $vm_gui : $vb_gui
37 | end
38 |
39 | def vm_memory
40 | $vb_memory.nil? ? $vm_memory : $vb_memory
41 | end
42 |
43 | def vm_cpus
44 | $vb_cpus.nil? ? $vm_cpus : $vb_cpus
45 | end
46 |
47 | Vagrant.configure("2") do |config|
48 | # always use Vagrants insecure key
49 | config.ssh.insert_key = false
50 |
51 | config.vm.box = "coreos-%s" % $update_channel
52 | if $image_version != "current"
53 | config.vm.box_version = $image_version
54 | end
55 | config.vm.box_url = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/%s/coreos_production_vagrant.json" % [$update_channel, $image_version]
56 |
57 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
58 | config.vm.provider vmware do |v, override|
59 | override.vm.box_url = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/%s/coreos_production_vagrant_vmware_fusion.json" % [$update_channel, $image_version]
60 | end
61 | end
62 |
63 | config.vm.provider :virtualbox do |v|
64 | # On VirtualBox, we don't have guest additions or a functional vboxsf
65 | # in CoreOS, so tell Vagrant that so it can be smarter.
66 | v.check_guest_additions = false
67 | v.functional_vboxsf = false
68 | end
69 |
70 | # plugin conflict
71 | if Vagrant.has_plugin?("vagrant-vbguest") then
72 | config.vbguest.auto_update = false
73 | end
74 |
75 | (1..$num_instances).each do |i|
76 | config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
77 | config.vm.hostname = vm_name
78 |
79 | if $enable_serial_logging
80 | logdir = File.join(File.dirname(__FILE__), "log")
81 | FileUtils.mkdir_p(logdir)
82 |
83 | serialFile = File.join(logdir, "%s-serial.txt" % vm_name)
84 | FileUtils.touch(serialFile)
85 |
86 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
87 | config.vm.provider vmware do |v, override|
88 | v.vmx["serial0.present"] = "TRUE"
89 | v.vmx["serial0.fileType"] = "file"
90 | v.vmx["serial0.fileName"] = serialFile
91 | v.vmx["serial0.tryNoRxLoss"] = "FALSE"
92 | end
93 | end
94 |
95 | config.vm.provider :virtualbox do |vb, override|
96 | vb.customize ["modifyvm", :id, "--uart1", "0x3F8", "4"]
97 | vb.customize ["modifyvm", :id, "--uartmode1", serialFile]
98 | end
99 | end
100 |
101 | if $expose_docker_tcp
102 | config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
103 | end
104 |
105 | $forwarded_ports.each do |guest, host|
106 | config.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
107 | end
108 |
109 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
110 | config.vm.provider vmware do |v|
111 | v.gui = vm_gui
112 | v.vmx['memsize'] = vm_memory
113 | v.vmx['numvcpus'] = vm_cpus
114 | end
115 | end
116 |
117 | config.vm.provider :virtualbox do |vb|
118 | vb.gui = vm_gui
119 | vb.memory = vm_memory
120 | vb.cpus = vm_cpus
121 | end
122 |
123 | ip = "172.17.8.#{i+100}"
124 | config.vm.network :private_network, ip: ip
125 |
126 | # Uncomment below to enable NFS for sharing the host machine into the coreos-vagrant VM.
127 | #config.vm.synced_folder ".", "/home/core/share", id: "core", :nfs => true, :mount_options => ['nolock,vers=3,udp']
128 | $shared_folders.each_with_index do |(host_folder, guest_folder), index|
129 | config.vm.synced_folder host_folder.to_s, guest_folder.to_s, id: "core-share%02d" % index, nfs: true, mount_options: ['nolock,vers=3,udp']
130 | end
131 |
132 | if $share_home
133 | config.vm.synced_folder ENV['HOME'], ENV['HOME'], id: "home", :nfs => true, :mount_options => ['nolock,vers=3,udp']
134 | end
135 |
136 | if File.exist?(CLOUD_CONFIG_PATH)
137 | config.vm.provision :file, :source => "#{CLOUD_CONFIG_PATH}", :destination => "/tmp/vagrantfile-user-data"
138 | config.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true
139 | end
140 |
141 | end
142 | end
143 | end
144 |
--------------------------------------------------------------------------------
/coreos-vagrant/config.rb.sample:
--------------------------------------------------------------------------------
1 | # Size of the CoreOS cluster created by Vagrant
2 | $num_instances=1
3 |
4 | # Used to fetch a new discovery token for a cluster of size $num_instances
5 | $new_discovery_url="https://discovery.etcd.io/new?size=#{$num_instances}"
6 |
7 | # Automatically replace the discovery token on 'vagrant up'
8 |
9 | if File.exists?('user-data') && ARGV[0].eql?('up')
10 | require 'open-uri'
11 | require 'yaml'
12 |
13 | token = open($new_discovery_url).read
14 |
15 | data = YAML.load(IO.readlines('user-data')[1..-1].join)
16 |
17 | if data.key? 'coreos' and data['coreos'].key? 'etcd'
18 | data['coreos']['etcd']['discovery'] = token
19 | end
20 |
21 | if data.key? 'coreos' and data['coreos'].key? 'etcd2'
22 | data['coreos']['etcd2']['discovery'] = token
23 | end
24 |
25 | # Fix for YAML.load() converting reboot-strategy from 'off' to `false`
26 | if data.key? 'coreos' and data['coreos'].key? 'update' and data['coreos']['update'].key? 'reboot-strategy'
27 | if data['coreos']['update']['reboot-strategy'] == false
28 | data['coreos']['update']['reboot-strategy'] = 'off'
29 | end
30 | end
31 |
32 | yaml = YAML.dump(data)
33 | File.open('user-data', 'w') { |file| file.write("#cloud-config\n\n#{yaml}") }
34 | end
35 |
36 | #
37 | # coreos-vagrant is configured through a series of configuration
38 | # options (global ruby variables) which are detailed below. To modify
39 | # these options, first copy this file to "config.rb". Then simply
40 | # uncomment the necessary lines, leaving the $, and replace everything
41 | # after the equals sign..
42 |
43 | # Change basename of the VM
44 | # The default value is "core", which results in VMs named starting with
45 | # "core-01" through to "core-${num_instances}".
46 | #$instance_name_prefix="core"
47 |
48 | # Change the version of CoreOS to be installed
49 | # To deploy a specific version, simply set $image_version accordingly.
50 | # For example, to deploy version 709.0.0, set $image_version="709.0.0".
51 | # The default value is "current", which points to the current version
52 | # of the selected channel
53 | #$image_version = "current"
54 |
55 | # Official CoreOS channel from which updates should be downloaded
56 | #$update_channel='alpha'
57 |
58 | # Log the serial consoles of CoreOS VMs to log/
59 | # Enable by setting value to true, disable with false
60 | # WARNING: Serial logging is known to result in extremely high CPU usage with
61 | # VirtualBox, so should only be used in debugging situations
62 | #$enable_serial_logging=false
63 |
64 | # Enable port forwarding of Docker TCP socket
65 | # Set to the TCP port you want exposed on the *host* machine, default is 2375
66 | # If 2375 is used, Vagrant will auto-increment (e.g. in the case of $num_instances > 1)
67 | # You can then use the docker tool locally by setting the following env var:
68 | # export DOCKER_HOST='tcp://127.0.0.1:2375'
69 | #$expose_docker_tcp=2375
70 |
71 | # Enable NFS sharing of your home directory ($HOME) to CoreOS
72 | # It will be mounted at the same path in the VM as on the host.
73 | # Example: /Users/foobar -> /Users/foobar
74 | #$share_home=false
75 |
76 | # Customize VMs
77 | #$vm_gui = false
78 | #$vm_memory = 1024
79 | #$vm_cpus = 1
80 |
81 | # Share additional folders to the CoreOS VMs
82 | # For example,
83 | # $shared_folders = {'/path/on/host' => '/path/on/guest', '/home/foo/app' => '/app'}
84 | # or, to map host folders to guest folders of the same name,
85 | # $shared_folders = Hash[*['/home/foo/app1', '/home/foo/app2'].map{|d| [d, d]}.flatten]
86 | #$shared_folders = {}
87 |
88 | # Enable port forwarding from guest(s) to host machine, syntax is: { 80 => 8080 }, auto correction is enabled by default.
89 | #$forwarded_ports = {}
90 |
--------------------------------------------------------------------------------
/coreos-vagrant/user-data.sample:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | coreos:
4 | etcd2:
5 | #generate a new token for each unique cluster from https://discovery.etcd.io/new
6 | #discovery: https://discovery.etcd.io/
7 | # multi-region and multi-cloud deployments need to use $public_ipv4
8 | advertise-client-urls: http://$public_ipv4:2379
9 | initial-advertise-peer-urls: http://$private_ipv4:2380
10 | # listen on both the official ports and the legacy ports
11 | # legacy ports can be omitted if your application doesn't depend on them
12 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
13 | listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
14 | fleet:
15 | public-ip: $public_ipv4
16 | flannel:
17 | interface: $public_ipv4
18 | units:
19 | - name: etcd2.service
20 | command: start
21 | - name: fleet.service
22 | command: start
23 | - name: flanneld.service
24 | drop-ins:
25 | - name: 50-network-config.conf
26 | content: |
27 | [Service]
28 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16" }'
29 | command: start
30 | - name: docker-tcp.socket
31 | command: start
32 | enable: true
33 | content: |
34 | [Unit]
35 | Description=Docker Socket for the API
36 |
37 | [Socket]
38 | ListenStream=2375
39 | Service=docker.service
40 | BindIPv6Only=both
41 |
42 | [Install]
43 | WantedBy=sockets.target
44 |
--------------------------------------------------------------------------------
/coreos-vagrant_subtree_pull.sh:
--------------------------------------------------------------------------------
1 | git ls-remote https://github.com/coreos/coreos-vagrant master \
2 | | awk '{ system( "git subtree pull --message=\"Subtree merge of coreos/coreos-vagrant@" $1 "\" --prefix=coreos-vagrant https://github.com/coreos/coreos-vagrant " $1) }'
3 |
--------------------------------------------------------------------------------
/felix/.gitignore:
--------------------------------------------------------------------------------
1 | user-data
2 | config-override.rb
3 |
--------------------------------------------------------------------------------
/felix/.vagrant:
--------------------------------------------------------------------------------
1 | ../.vagrant
--------------------------------------------------------------------------------
/felix/README.md:
--------------------------------------------------------------------------------
1 | # Felix, a simple and flexible Vagrant template for Weave+CoreOS
2 |
3 | 
4 |
5 | ## About this cat
6 |
7 | This is just a friendly and helpfull cat, you can use him to prototype project with Weave on Vagrant running CoreOS.
8 |
9 | It's based on [coreos/coreos-vagrant](https://github.com/coreos/coreos-vagrant/), which is included within this repositroy. All Felix does is add `config.rb`.
10 |
11 | With some basic logic in `config.rb`, he can append any number of `/etc/weave.#{HOSTNAME}.env` files to his own `cloud-config.yaml` and write out `user-data`, which he passed to Vagrant. He also generates some random strings that are used as weave network crypto salt for the lifetime of Vargant VMs. It's all kindda simple.
12 |
13 | ## How to use it
14 |
15 | ### Basic usage
16 |
17 | ```
18 | git clone https://github.com/errordeveloper/weave-demos
19 | cd weave-demos/felix
20 | vagrant up
21 | ```
22 |
23 | ### Configuration
24 |
25 | Felix will bring 3 box up by default. Each will have 1 CPU and 2G of RAM.
26 |
27 | If you wish to overide the number of boxes:
28 | ```
29 | echo '$num_instances=4' > config-override.rb
30 | ```
31 |
32 | If your host doesn't have 8G of RAM to spare for the 4 boxes you are wanting:
33 | ```
34 | echo '$vb_memory=512' >> config-override.rb
35 | ```
36 |
37 | All other defaults are define by upstream [`Vagrantfile`](../coreos-vagrant/Vagrantfile#L11-L17).
38 |
39 | ## Next Steps
40 |
41 | Once VMs are up, you can proceed to deploy Docker container on Weave.
42 |
43 | Here is something simple you can try.
44 |
45 | Firstly, launch a web server on one machine:
46 | ```
47 | $ vagrant ssh core-01 -c 'eval $(weave env) ; docker run -d --name=hola errordeveloper/hello-weave'
48 | Unable to find image 'errordeveloper/hello-weave:latest' locally
49 | Pulling repository errordeveloper/hello-weave
50 | ...
51 | Status: Downloaded newer image for errordeveloper/hello-weave:latest
52 | 007f00c857bb2559ed29cb713ba8cb88ff7ce2d23ec7f16231052bc0d6e92acc
53 | Connection to 127.0.0.1 closed.
54 | ```
55 | Then, attach a client container on the other and test it like so:
56 | ```
57 | 0 %> vagrant ssh core-02
58 | CoreOS stable (557.2.0)
59 | Update Strategy: No Reboots
60 | core@core-02 ~ $ eval $(weave env)
61 | core@core-02 ~ $ docker run -ti errordeveloper/curl
62 | Unable to find image 'errordeveloper/curl:latest' locally
63 | Pulling repository errordeveloper/curl
64 | ...
65 | Status: Downloaded newer image for errordeveloper/curl:latest
66 |
67 | / # ping -c 3 hola.weave.local
68 | PING hola.weave.local (10.0.0.1): 56 data bytes
69 | 64 bytes from 10.0.0.1: seq=0 ttl=64 time=2.635 ms
70 | 64 bytes from 10.0.0.1: seq=1 ttl=64 time=2.522 ms
71 | 64 bytes from 10.0.0.1: seq=2 ttl=64 time=3.134 ms
72 |
73 | --- hola.weave.local ping statistics ---
74 | 3 packets transmitted, 3 packets received, 0% packet loss
75 | round-trip min/avg/max = 2.522/2.763/3.134 ms
76 | / #
77 | / # curl hola.weave.local:5000
78 | Hello, Weave!
79 | ```
80 |
81 | — Happy weaving with Felix!
82 |
--------------------------------------------------------------------------------
/felix/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../coreos-vagrant/Vagrantfile
--------------------------------------------------------------------------------
/felix/cloud-config.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | coreos:
4 | update:
5 | reboot-strategy: 'off'
6 | units:
7 | - name: weave-network.target
8 | enable: true
9 | command: start
10 | content: |
11 | [Unit]
12 | Description=Weave Network Setup Complete
13 | Documentation=man:systemd.special(7)
14 | RefuseManualStart=no
15 | After=network-online.target
16 | [Install]
17 | WantedBy=multi-user.target
18 |
19 | - name: 10-weave.network
20 | runtime: false
21 | content: |
22 | [Match]
23 | Type=bridge
24 | Name=weave*
25 | [Network]
26 |
27 | - name: install-weave.service
28 | enable: true
29 | content: |
30 | [Unit]
31 | After=network-online.target
32 | After=docker.service
33 | Before=weave.service
34 | Description=Install Weave
35 | Documentation=http://docs.weave.works/
36 | Requires=network-online.target
37 | [Service]
38 | EnvironmentFile=-/etc/weave.%H.env
39 | EnvironmentFile=-/etc/weave.env
40 | Type=oneshot
41 | RemainAfterExit=yes
42 | TimeoutStartSec=0
43 | ExecStartPre=/bin/mkdir -p /opt/bin/
44 | ExecStartPre=/usr/bin/curl \
45 | --silent \
46 | --location \
47 | git.io/weave \
48 | --output /opt/bin/weave
49 | ExecStartPre=/usr/bin/chmod +x /opt/bin/weave
50 | ExecStart=/opt/bin/weave setup
51 | [Install]
52 | WantedBy=weave-network.target
53 | WantedBy=weave.service
54 |
55 | - name: weaveproxy.service
56 | enable: true
57 | content: |
58 | [Unit]
59 | After=install-weave.service
60 | After=docker.service
61 | Description=Weave proxy for Docker API
62 | Documentation=http://docs.weave.works/
63 | Requires=docker.service
64 | Requires=install-weave.service
65 | [Service]
66 | EnvironmentFile=-/etc/weave.%H.env
67 | EnvironmentFile=-/etc/weave.env
68 | ExecStartPre=/opt/bin/weave launch-proxy $WEAVEPROXY_ARGS
69 | ExecStart=/usr/bin/docker attach weaveproxy
70 | Restart=on-failure
71 | ExecStop=/opt/bin/weave stop-proxy
72 | [Install]
73 | WantedBy=weave-network.target
74 |
75 | - name: weave.service
76 | enable: true
77 | content: |
78 | [Unit]
79 | After=install-weave.service
80 | After=docker.service
81 | Description=Weave Network Router
82 | Documentation=http://docs.weave.works/
83 | Requires=docker.service
84 | Requires=install-weave.service
85 | [Service]
86 | TimeoutStartSec=0
87 | EnvironmentFile=-/etc/weave.%H.env
88 | EnvironmentFile=-/etc/weave.env
89 | ExecStartPre=/opt/bin/weave launch-router $WEAVE_PEERS
90 | ExecStart=/usr/bin/docker attach weave
91 | Restart=on-failure
92 | ExecStop=/opt/bin/weave stop-router
93 | [Install]
94 | WantedBy=weave-network.target
95 |
--------------------------------------------------------------------------------
/felix/config.rb:
--------------------------------------------------------------------------------
1 | $num_instances=3
2 | $vb_memory=2048
3 | $update_channel = 'stable'
4 |
5 | begin
6 | require File.join(File.dirname(__FILE__), 'config-override.rb')
7 | rescue LoadError => e
8 | end
9 |
10 | require 'securerandom'
11 | WEAVE_PASSWORD = SecureRandom.uuid
12 |
13 | def genenv_content(count)
14 | case count
15 | when 0
16 | weave_peers=''
17 | else
18 | weave_peers='172.17.8.101'
19 | end
20 |
21 | %W(
22 | WEAVE_PEERS="#{weave_peers}"
23 | WEAVE_PASSWORD="#{WEAVE_PASSWORD}"
24 | ).join("\n")
25 | end
26 |
27 | def genenv(count)
28 | {
29 | 'path' => sprintf("/etc/weave.core-%.2d.env", count+1),
30 | 'permissions' => '0600',
31 | 'owner' => 'root',
32 | 'content' => genenv_content(count),
33 | }
34 | end
35 |
36 | if File.exists?('cloud-config.yaml') && ARGV[0].eql?('up')
37 | require 'yaml'
38 | open('cloud-config.yaml', 'r') do |f|
39 | data = YAML.load(f)
40 |
41 | data['write_files'] = $num_instances.times.map { |x| genenv(x) }
42 |
43 | open('user-data', 'w') do |f|
44 | lines = YAML.dump(data).split("\n")
45 | lines[0] = '#cloud-config'
46 | f.puts(lines)
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 |
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/README.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Elasticsearch, Weave and Docker
3 | tags: nodejs, iojs, javascript, docker, coreos, guide, usecase, elasticsearch, vagrant, coreos, bigdata
4 | published: true
5 | ---
6 |
7 | This guide will demonstrate how to deploy an [Elasticsearch](http://www.elasticsearch.org/) cluster on [Weave](https://github.com/zettio/weave#weave---the-docker-network) as well as a JavaScript microservice application for it.
8 |
9 | There are a few major advantages of using Weave for Elasticsearch. Firstly, you will gain [Zen discovery](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html) working out of the box on any infrastructure. Secondly, you can encrypt the communication and avoid having to setup authentication and an HTTPS proxy. Additionally, you can also run a number of clusters across any number of hosts, which can be particularly useful in development, where teams want to have their own cluster instances but the number of physical machines is limited. There are other advantages of using Weave, however the above are the most critical for Elasticsearch.
10 |
11 | I will first show step-by-step how to run Elasticsearch on Weave, and then deploy a simple JavaScript app container.
12 |
13 | ## Setup an Elasticsearch cluster
14 |
15 | To run an Elasticsearch cluster, you will need 2 or more hosts. There are different ways to provision these and we are committed to guiding you on [tooling](http://weaveblog.com/tag/provisioning/), but for this post I have prepared something simple that will get you up and running with 3 VMs on Vagrant.
16 |
17 | Let's get started!
18 |
19 | ```
20 | $ git clone https://github.com/errordeveloper/weave-demos
21 | $ cd weave-demos/felix
22 | $ vagrant up
23 | ```
24 |
25 | This should bring up 3 VMs with weave installed out of the box. Now you can login to each of these and start Elasticsearch like so:
26 |
27 | ```
28 | sudo weave run \
29 | --with-dns 10.10.1.X/24 \
30 | --hostname=es-X.weave.local \
31 | errordeveloper/weave-elasticsearch-minimal:latest
32 | ```
33 |
34 | > _NOTE: the above command uses a container of my own, however you can use any other, given you have set configuration options correctly. Please refer to my Dockerfile lines [16](https://github.com/errordeveloper/weave-demos/blob/d2c2a00/java-containers/elasticsearch/Dockerfile#L16) and [33-35](https://github.com/errordeveloper/weave-demos/blob/d2c2a00/java-containers/elasticsearch/Dockerfile#L33-L35) and for details._
35 |
36 | You would substitute `X` with 1, 2 and 3 for each of the nodes. To keep it simple for you, I have created a little shell script that starts all 3 of these:
37 |
38 | ```
39 | cd ../hello-apps/elasticsearch-js/
40 | ./scripts/run_elasticsearch.sh
41 | ```
42 |
43 | Once all 3 nodes are set-up, let's verify we have a cluster. The easiest way to do so is by exposing the Docker host to the Weave network with the `weave expose ` command, where `` is a free IP address on the same subnet the app is on. We have used `10.10.1.X/24` for the Elasticsearch nodes, so let's pick a free IP in this subnet.
44 | ```
45 | vagrant ssh core-01
46 | core@core-01 ~ $ sudo weave expose 10.10.1.100/24
47 | core@core-01 ~ $ curl 10.10.1.1:9200/_cat/nodes
48 | es-2.weave.local 10.10.1.2 4 17 0.01 d m Hobgoblin II
49 | es-3.weave.local 10.10.1.3 4 17 0.00 d m Chtylok
50 | es-1.weave.local 10.10.1.1 5 17 0.07 d * Madame MacEvil
51 | core@core-01 ~ $ exit
52 | ```
53 |
54 | Ok, this looks pretty good so far. Let's move on to the next step!
55 |
56 | ## Deploying an app
57 |
58 | For the purpose of this post, I have written a little JavaScript demo app that talks to Elasticsearch. [It's rather simple](https://github.com/errordeveloper/weave-demos/blob/master/hello-apps/elasticsearch-js/index.js), yet capable of creating and retrieving documents in Elasticsearch.
59 |
60 | The app refers to ES nodes by their Weave DNS names like so:
61 | ```
62 | var es = new elasticsearch.Client({
63 | hosts: [ 'es-1.weave.local:9200'
64 | , 'es-2.weave.local:9200'
65 | , 'es-3.weave.local:9200' ],
66 | log: 'trace'
67 | });
68 | ```
69 |
70 | DNS in Weave is much more dynamic then traditional DNS, any container on the network gets a DNS record automatically, there is no management tools or in-app self-registration required. It will make your app config more human-friendly and the underlying IP addresses can be re-organised transparently when needed.
71 |
72 | Let's deploy it on `core-01`:
73 |
74 | ```
75 | git clone https://github.com/errordeveloper/weave-demos
76 | cd weave-demos/hello-apps/elasticsearch-js/
77 | ```
78 |
79 | First, run a build script that will install the dependencies and create a new container that you can run.
80 |
81 | ```
82 | ./scripts/build.sh
83 | ```
84 |
85 | > _NOTE: I am using an IO.js container [image of my own](https://registry.hub.docker.com/u/errordeveloper/iojs-minimal-runtime/), but you can use anything else._
86 |
87 | Now you have built the app into a local container image, which is ready to be deployed on the weave network.
88 |
89 | ```
90 | sudo weave run --with-dns 10.10.1.11/24 \
91 | --name hello-es-app-instance \
92 | -h hello-es-app.weave.local \
93 | -p 80:80 \
94 | hello-es-app
95 | docker logs -f hello-es-app-instance
96 | ```
97 |
98 | As you can see, port 80 will be exposed to the world. The IP address of Vagrant VM `core-01` is `172.17.8.101`.
99 |
100 | The API defined by our app is pretty simple:
101 |
102 | - `GET /` will give you some basic info about the database cluster
103 | - `POST /hello/:title` will store body in a document with title `:title`
104 | - `GET /hello/:title` will retrieve contents of document with title `:title`
105 | - `GET /search/:title` will search by title
106 |
107 | So let's create our first document:
108 |
109 | ```
110 | curl -s \
111 | --request POST \
112 | --data '{"a": 1}' \
113 | --header 'Content-type: application/json' \
114 | http://172.17.8.101/hello/sample1
115 | {
116 | "msg": {
117 | "_index": "hello",
118 | "_type": "json",
119 | "_id": "AUsB9l_6iEcqWz_eIw5X",
120 | "_version": 1,
121 | "created": true
122 | }
123 | }
124 | ```
125 |
126 | And fetch it:
127 | ```
128 | curl -s \
129 | --request GET \
130 | http://172.17.8.101/hello/sample1
131 | {
132 | "msg": {
133 | "a": 1
134 | }
135 | }
136 | ```
137 |
138 | Now, we can also post another document with the same title:
139 | ```
140 | curl -s \
141 | --request POST \
142 | --data '{"a": 2}' \
143 | --header 'Content-type: application/json' \
144 | http://172.17.8.101/hello/sample1
145 | {
146 | "msg": {
147 | "_index": "hello",
148 | "_type": "json",
149 | "_id": "AUsB9quZiEcqWz_eIw5Y",
150 | "_version": 1,
151 | "created": true
152 | }
153 | }
154 | ```
155 |
156 | Try to fetch it:
157 | ```
158 | curl -s \
159 | --request GET \
160 | http://172.17.8.101/hello/sample1
161 | {
162 | "msg": "There're too many of those, I'm sorry! But you can try `/hello/_search/:title` ;)"
163 | }
164 | ```
165 |
166 | So we no longer can use `GET /hello/:title`, however search comes to rescue:
167 |
168 | ```
169 | curl -s \
170 | --request GET \
171 | http://172.17.8.101/hello/_search/sample1
172 | {
173 | "msg": "Found 2 matching documents...",
174 | "hits": [
175 | {
176 | "title": "sample1",
177 | "text": {
178 | "a": 1
179 | },
180 | "id": "AUsB9l_6iEcqWz_eIw5X"
181 | },
182 | {
183 | "title": "sample1",
184 | "text": {
185 | "a": 2
186 | },
187 | "id": "AUsB9quZiEcqWz_eIw5Y"
188 | }
189 | ]
190 | }
191 | ```
192 |
193 | All done, have fun weaving Elasticsearch and Node.js (or IO.js) apps!
194 |
195 | ## Conclusion
196 |
197 | In this post, I have demonstrated how Weave helps with deploying a distributed database engine and an example of a microservice to go along with it. Weave makes it very easy to run containerised applications on any network, in particular, for Elasticsearch it enables out-of-the-box discovery mechanism to work and provides DNS for apps to find the database wherever it lives on the network. We would love to hear about your usecases, do get in touch with [team@weave.works](mailto:team@weave.works) and make sure to follow [@weaveworks](https://twitter.com/weaveworks) on twitter.
198 |
199 | ## Appendix
200 |
201 | ### Using Kibana or plugins
202 |
203 | I have shown how you can access Elasticsearch API on Weave network from the Docker host by running `weave expose 10.10.1.100/24`. If you want to access it from your own machine and hook-up Kibana or use BigDesk or other plugins through your browser, you will need to setup port forwarding.
204 |
205 | ```
206 | vagrant ssh core-01 -- -L localhost:9200:10.10.1.1:9200
207 | core@core-01 ~ $ sudo weave expose 10.10.1.100/24
208 | ```
209 |
210 | The above will forward port 9200 of container `es-1` as `localhost:9200`, which will persist until you exit the ssh session.
211 |
212 | In a new terminal window try
213 | ```
214 | curl localhost:9200/_cat/nodes
215 | ```
216 | and you will see the same list of nodes as show above.
217 |
218 | You can now [download and extract the Kibana release](http://www.elasticsearch.org/overview/kibana/installation/) and use it with the default URL. To use a plugin, you would need to install it on the container image, which is outside of the scope here to go into detail, but if you are not sure you can look at [one of my other examples](https://github.com/errordeveloper/weave-demos/blob/d2c2a00/java-containers/elasticsearch-river-twitter/Dockerfile).
219 |
220 | ### Running multiple clusters
221 |
222 | There could be different reasons why you may wish to run multiple clusters of Elasticsearch on the infrastructure. It might happen that someone wants to try something for the project of their own and you'd prefer they don't mess with your data, or you may wish to utilise Weave's isolation for spinning up a new version of Elasticsearch in production, while keeping previous version running. In any case, it's rather simple to do with Weave and you can see how it can be done with a simple shell script ([run_elasticsearch_2_clusters.sh](https://github.com/errordeveloper/weave-demos/blob/d2c2a00/hello-apps/elasticsearch-js/scripts/run_elasticsearch_2_clusters.sh)).
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/myapp/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node
2 | EXPOSE 80
3 | ADD ./ /app/
4 | WORKDIR /app
5 | RUN npm install
6 | CMD [ "npm", "start" ]
7 |
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/myapp/index.js:
--------------------------------------------------------------------------------
1 | var elasticsearch = require('elasticsearch');
2 | var es = new elasticsearch.Client({
3 | hosts: [ 'es-1.weave.local:9200', /* 'es-2.weave.local:9200', 'es-3.weave.local:9200', */ ],
4 | log: 'trace'
5 | });
6 |
7 | var restify = require('restify');
8 |
9 | var server = restify.createServer({
10 | name: 'Hello, ElasticSearch on Weave!',
11 | });
12 |
13 | server.use(restify.bodyParser({ mapParams: false }));
14 |
15 | server.listen(80);
16 |
17 | es.ping({
18 | requestTimeout: 1000,
19 | hello: "elasticsearch!"
20 | }, function (error) {
21 | if (error) {
22 | console.error('elasticsearch cluster is down!');
23 | } else {
24 | console.log('All is well');
25 | }
26 | });
27 |
28 | server.post('/hello', function (req, res, next) {
29 | es.indices.create({
30 | index: "hello",
31 | }, function (error) {
32 | if (error) {
33 | res.send(500, { msg: error.message });
34 | } else {
35 | res.send(200, es_res);
36 | }
37 | });
38 | return next();
39 | });
40 |
41 | server.get('/', function (req, res, next) {
42 | es.nodes.info({
43 | human: true,
44 | metrics: [ 'host', 'ip' ],
45 | }, function (error, es_res) {
46 | if (error) {
47 | res.send(500, { msg: error.message });
48 | } else {
49 | res.send(200, es_res);
50 | }
51 | });
52 | return next();
53 | });
54 |
55 | server.get('/hello/_search/:title', function (req, res, next) {
56 | if (req.params.title !== "") {
57 | t = req.params.title;
58 | } else {
59 | t = "*";
60 | }
61 | es.search({
62 | index: 'hello',
63 | type: 'json',
64 | q: "title:"+t,
65 | }, function (error, es_res) {
66 | if (error) {
67 | res.send(500, { msg: error.message });
68 | } else {
69 | if (es_res.hits.total >= 1) {
70 | var hits = [];
71 | for (i in es_res.hits.hits) {
72 | hits.push({
73 | title: es_res.hits.hits[i]._source.title,
74 | text: es_res.hits.hits[i]._source.text,
75 | id: es_res.hits.hits[i]._id
76 | });
77 | }
78 | res.send(200, {
79 | msg: "Found " + es_res.hits.total + " matching documents...",
80 | hits: hits
81 | });
82 | } else if (es_res.hits.total === 0) {
83 | res.send(404, { msg: "There're none of those, I'm afraid!" });
84 | }
85 | }
86 | });
87 | return next();
88 | });
89 |
90 | server.post('/hello/:title', function (req, res, next) {
91 | es.create({
92 | index: 'hello',
93 | type: 'json',
94 | body: {
95 | title: req.params.title,
96 | published: true,
97 | text: req.body,
98 | },
99 | }, function (error, es_res) {
100 | if (error) {
101 | res.send(500, { msg: error.message });
102 | } else {
103 | res.send(201, { msg: es_res });
104 | }
105 | });
106 | return next();
107 | });
108 |
109 | server.get('/hello/:title', function (req, res, next) {
110 | var redirect = function() {
111 | res.header('Location', '/hello/_search/'+req.params.title);
112 | res.send(302, { msg: "There're too many of those, I'm sorry! But you can try `GET /hello/_search/:title` ;)" });
113 | }
114 |
115 | if (req.params.title === "") {
116 | redirect();
117 | return next();
118 | }
119 |
120 | es.search({
121 | index: 'hello',
122 | type: 'json',
123 | q: "title:"+req.params.title,
124 | }, function (error, es_res) {
125 | if (error) {
126 | res.send(500, { msg: error.message });
127 | } else {
128 | if (es_res.hits.total === 1) {
129 | res.send(200, { msg: es_res.hits.hits[0]._source.text });
130 | } else if (es_res.hits.total === 0) {
131 | res.send(404, { msg: "There're none of those, I'm afraid!" });
132 | } else if (es_res.hits.total > 1) {
133 | redirect();
134 | }
135 | }
136 | });
137 | return next();
138 | });
139 |
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/myapp/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "elasticsearch-iojs-hello",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "dependencies": {
7 | "elasticsearch": "^10.0.1",
8 | "restify": "^4.0.3"
9 | },
10 | "devDependencies": {},
11 | "scripts": {
12 | "start": "node index.js",
13 | "test": "echo \"Error: no test specified\" && exit 1"
14 | },
15 | "author": "",
16 | "license": "ISC"
17 | }
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/myapp_lb/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nginxplus
2 |
3 | ARG weavedns_addr
4 |
5 | RUN echo "resolver ${weavedns_addr} ipv6=off;" > /etc/nginx/conf.d/weavedns.conf
6 |
7 | ADD myapp.conf /etc/nginx/conf.d/default.conf
8 |
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/myapp_lb/myapp.conf:
--------------------------------------------------------------------------------
1 | upstream myapp_backends {
2 | zone myapp_backends 64k;
3 | server myapp.weave.local resolve;
4 | }
5 |
6 | server {
7 | listen 80 default_server;
8 | location / {
9 | proxy_pass http://myapp_backends;
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/scripts/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd $(git rev-parse --show-toplevel)/felix
4 |
5 | #NGINXPLUS_CREDS="--build-arg nginxplus_license_cookie=${1} --build-arg nginxplus_license_secret=${2}"
6 |
7 | for i in 1 2 3 ; do
8 |
9 | build_nginxplus_base_image=" \
10 | docker build -t nginxplus $NGINXPLUS_CREDS https://github.com/errordeveloper/dockerfile-nginxplus.git \
11 | "
12 |
13 | build_myapp_image="\
14 | docker build -t myapp https://github.com/errordeveloper/weave-demos.git#:hello-apps/elasticsearch-js/myapp \
15 | "
16 |
17 | run_myapp="\
18 | docker run -d --hostname=myapp.weave.local myapp \
19 | "
20 |
21 | run_elasticsearch="\
22 | docker run -d --name='es-${i}' errordeveloper/weave-elasticsearch-minimal:latest \
23 | "
24 |
25 | build_myapp_lb_image="\
26 | docker build -t myapp_lb \
27 | --build-arg \$(ip -4 addr show dev docker0 | grep -m1 -o 'inet [.0-9]*' | sed 's/inet \([.0-9]*\)/weavedns_addr=\1/') \
28 | https://github.com/errordeveloper/weave-demos.git#:hello-apps/elasticsearch-js/myapp_lb \
29 | "
30 |
31 | run_my_app_lb="\
32 | docker run -d --net=host myapp_lb \
33 | "
34 |
35 | cmd="weave expose \
36 | && eval \$(weave env) \
37 | && ${build_nginxplus_base_image} \
38 | && ${build_myapp_image} \
39 | && ${build_myapp_lb_image} \
40 | && ${run_elasticsearch} \
41 | && ${run_myapp} \
42 | && ${run_myapp} \
43 | && ${run_my_app_lb} \
44 | "
45 |
46 | vm="core-0${i}"
47 | log="/tmp/vagrant_ssh_${vm}"
48 | echo "Bootstrapping ${vm}..."
49 | vagrant ssh $vm --command "${cmd}" &> $log && echo " - done" || echo " - fail (see $log)"
50 | done
51 |
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/scripts/prepare_demo_vms.sh:
--------------------------------------------------------------------------------
1 | for m in 'core-01' 'core-02' 'core-03'
2 | do vagrant ssh $m --command 'docker pull errordeveloper/weave-elasticsearch-minimal:latest'
3 | done
4 |
5 | vagrant ssh 'core-01' --command 'docker pull errordeveloper/iojs-minimal-runtime:v1.0.1'
6 | vagrant ssh 'core-01' --command 'git clone https://github.com/errordeveloper/weave-demos'
7 |
--------------------------------------------------------------------------------
/hello-apps/elasticsearch-js/tldr.md:
--------------------------------------------------------------------------------
1 | ## Boostrap
2 | ```bash
3 | git clone https://github.com/errordeveloper/weave-demos
4 | cd weave-demos/felix
5 | vagrant up
6 | cd ../hello-apps/elasticsearch-js
7 | ```
8 | Now you need to get the codes `cs.nginx.com`
9 | ```bash
10 | export NGINXPLUS_CREDS=" \
11 | --build-arg nginxplus_license_cookie=<...> \
12 | --build-arg nginxplus_license_secret=<...> \
13 | "
14 | ```
15 | Next, let's fire-up the containers!
16 | ```bash
17 | ./scripts/bootstrap.sh
18 | ```
19 |
20 | ## Does it work?
21 |
22 | Our app should be accessible via:
23 |
24 | - `172.17.8.101:80`
25 | - `172.17.8.102:80`
26 | - `172.17.8.103:80`
27 |
28 | The API defined by this app is pretty simple:
29 |
30 | - `GET /` will give you some basic info about the database cluster
31 | - `POST /hello/:title` will store body in a document with title `:title`
32 | - `GET /hello/:title` will retrieve contents of document with title `:title`
33 | - `GET /search/:title` will search by title
34 |
35 | So let's create our first document:
36 |
37 | ```bash
38 | curl -s \
39 | --request POST \
40 | --data '{"a": 1}' \
41 | --header 'Content-type: application/json' \
42 | http://172.17.8.102/hello/sample1 | jq .
43 | ```
44 |
45 | ```javascript
46 | {
47 | "msg": {
48 | "_index": "hello",
49 | "_type": "json",
50 | "_id": "AUsB9l_6iEcqWz_eIw5X",
51 | "_version": 1,
52 | "created": true
53 | }
54 | }
55 | ```
56 |
57 | And fetch it:
58 | ```bash
59 | curl -s \
60 | --request GET \
61 | http://172.17.8.101/hello/sample1 | jq .
62 | ```
63 | ```javascript
64 | {
65 | "msg": {
66 | "a": 1
67 | }
68 | }
69 | ```
70 |
71 | Now, we can also post another document with the same title:
72 | ```bash
73 | curl -s \
74 | --request POST \
75 | --data '{"a": 2}' \
76 | --header 'Content-type: application/json' \
77 | http://172.17.8.102/hello/sample1 | jq .
78 | ```
79 | ```javascript
80 | {
81 | "msg": {
82 | "_index": "hello",
83 | "_type": "json",
84 | "_id": "AUsB9quZiEcqWz_eIw5Y",
85 | "_version": 1,
86 | "created": true
87 | }
88 | }
89 | ```
90 |
91 | Try to fetch it:
92 | ```bash
93 | curl -s \
94 | --request GET \
95 | http://172.17.8.103/hello/sample1 | jq .
96 | ```
97 | ```javascript
98 | {
99 | "msg": "There're too many of those, I'm sorry! But you can try `/hello/_search/:title` ;)"
100 | }
101 | ```
102 |
103 | So we no longer can use `GET /hello/:title`, however search comes to rescue:
104 |
105 | ```bash
106 | curl -s \
107 | --request GET \
108 | http://172.17.8.102/hello/_search/sample1 | jq .
109 | ```
110 | ```javascript
111 | {
112 | "msg": "Found 2 matching documents...",
113 | "hits": [
114 | {
115 | "title": "sample1",
116 | "text": {
117 | "a": 1
118 | },
119 | "id": "AUsB9l_6iEcqWz_eIw5X"
120 | },
121 | {
122 | "title": "sample1",
123 | "text": {
124 | "a": 2
125 | },
126 | "id": "AUsB9quZiEcqWz_eIw5Y"
127 | }
128 | ]
129 | }
130 | ```
131 |
132 | All done, have fun weaving Nginx+, Elasticsearch and Node.js apps!
133 |
--------------------------------------------------------------------------------
/java-containers/elasticsearch-river-twitter/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/weave-elasticsearch-minimal
2 |
3 | RUN [ "java", "-Xmx64m", "-Xms16m", "-Delasticsearch", "-Des.path.home=/usr/elasticsearch", \
4 | "-cp", "/usr/elasticsearch/lib/*", "org.elasticsearch.plugins.PluginManager", \
5 | "--install", "elasticsearch/elasticsearch-river-twitter/2.4.2" ]
6 |
--------------------------------------------------------------------------------
/java-containers/elasticsearch/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/oracle-jre
2 |
3 | ENV ELASTICSEARCH_BINARY_RELEASE 2.1.0
4 |
5 | RUN curl \
6 | --silent \
7 | --location \
8 | --retry 3 \
9 | --cacert /etc/ssl/certs/Go_Daddy_Class_2_CA.crt \
10 | https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-$ELASTICSEARCH_BINARY_RELEASE.tar.gz \
11 | | gunzip \
12 | | tar x -C /usr/ \
13 | && ln -s /usr/elasticsearch-$ELASTICSEARCH_BINARY_RELEASE /usr/elasticsearch \
14 | && mkdir /data && chown nobody /data \
15 | && mkdir /usr/elasticsearch/logs && chown nobody /usr/elasticsearch/logs \
16 | && mkdir /usr/elasticsearch/plugins && chown nobody /usr/elasticsearch/plugins \
17 | && mkdir /usr/elasticsearch/config/scripts && chown nobody /usr/elasticsearch/config/scripts
18 |
19 | VOLUME [ "/data" ]
20 |
21 | RUN [ "java", "-Xmx64m", "-Xms16m", "-Delasticsearch", "-Des.path.home=/usr/elasticsearch", \
22 | "-cp", "/usr/elasticsearch/lib/*", "org.elasticsearch.plugins.PluginManager", \
23 | "--install", "discovery-multicast" ]
24 |
25 | USER nobody
26 |
27 | ADD logging.yml /usr/elasticsearch/config/logging.yml
28 |
29 | CMD [ \
30 | "-Xms256m", "-Xmx1g", \
31 | "-Djava.awt.headless=true", \
32 | "-Djna.nosys=true", \
33 | "-Dfile.encoding=UTF-8", \
34 | "-XX:+UseParNewGC", \
35 | "-XX:+UseConcMarkSweepGC", \
36 | "-XX:CMSInitiatingOccupancyFraction=75", \
37 | "-XX:+UseCMSInitiatingOccupancyOnly", \
38 | "-XX:+HeapDumpOnOutOfMemoryError", \
39 | "-XX:+DisableExplicitGC", \
40 | "-Delasticsearch", \
41 | "-Des.foreground=yes", \
42 | "-Des.path.home=/usr/elasticsearch", \
43 | "-Des.path.data=/data", \
44 | "-Des.network.bind_host=_ethwe:ipv4_", \
45 | "-Des.network.publish_host=_ethwe:ipv4_", \
46 | "-Des.discovery.zen.ping.multicast.address=_ethwe:ipv4_", \
47 | "-Des.cluster.name=elasticsearch", \
48 | "-Des.http.cors.enabled=true", \
49 | "-cp", "/usr/elasticsearch/lib/elasticsearch-2.1.0.jar:/usr/elasticsearch/lib/*", \
50 | "org.elasticsearch.bootstrap.Elasticsearch", \
51 | "start" \
52 | ]
53 |
--------------------------------------------------------------------------------
/java-containers/elasticsearch/logging.yml:
--------------------------------------------------------------------------------
1 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG
2 | es.logger.level: INFO
3 | rootLogger: ${es.logger.level}, console
4 | logger:
5 | # log action execution errors for easier debugging
6 | action: DEBUG
7 |
8 | # deprecation logging, turn to DEBUG to see them
9 | deprecation: INFO, deprecation_log
10 |
11 | # reduce the logging for aws, too much is logged under the default INFO
12 | com.amazonaws: WARN
13 | # aws will try to do some sketchy JMX stuff, but its not needed.
14 | com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
15 | com.amazonaws.metrics.AwsSdkMetrics: ERROR
16 |
17 | org.apache.http: INFO
18 |
19 | # gateway
20 | #gateway: DEBUG
21 | #index.gateway: DEBUG
22 |
23 | # peer shard recovery
24 | #indices.recovery: DEBUG
25 |
26 | # discovery
27 | #discovery: TRACE
28 |
29 | index.search.slowlog: TRACE, index_search_slow_log
30 | index.indexing.slowlog: TRACE, index_indexing_slow_log
31 |
32 | additivity:
33 | index.search.slowlog: false
34 | index.indexing.slowlog: false
35 | deprecation: false
36 |
37 | appender:
38 | console:
39 | type: console
40 | layout:
41 | type: consolePattern
42 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
43 |
44 | deprecation_log:
45 | type: console
46 | layout:
47 | type: consolePattern
48 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
49 |
50 | index_search_slow_log:
51 | type: console
52 | layout:
53 | type: consolePattern
54 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
55 |
56 | index_indexing_slow_log:
57 | type: console
58 | layout:
59 | type: consolePattern
60 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
61 |
--------------------------------------------------------------------------------
/java-containers/lein/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/oracle-jdk
2 |
3 | ENV LEIN_ROOT 1
4 | ENV HTTP_CLIENT curl -k -s -f -L -o
5 |
6 | RUN curl \
7 | --silent \
8 | --location \
9 | --retry 3 \
10 | --insecure \
11 | https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein \
12 | --output /usr/bin/lein \
13 | && chmod 0755 /usr/bin/lein
14 |
15 | RUN opkg-install bash ; /usr/bin/lein upgrade
16 |
17 | VOLUME [ "/io" ]
18 | WORKDIR /io
19 |
20 |
21 | ENTRYPOINT [ "lein" ]
22 |
--------------------------------------------------------------------------------
/java-containers/mvn/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/oracle-jdk
2 |
3 | ENV MVN_BINARY_RELEASE 3.2.3
4 |
5 | RUN curl \
6 | --silent \
7 | --location \
8 | --retry 3 \
9 | http://mirror.vorboss.net/apache/maven/maven-3/$MVN_BINARY_RELEASE/binaries/apache-maven-$MVN_BINARY_RELEASE-bin.tar.gz \
10 | | gunzip \
11 | | tar x -C /usr/ \
12 | && ln -s /usr/apache-maven-$MVN_BINARY_RELEASE /usr/maven
13 |
14 | ADD settings.xml /usr/maven/conf/
15 |
16 | ENV PATH $PATH:$SPARK_HOME/bin:/usr/maven/bin/
17 |
18 | VOLUME [ "/io" ]
19 |
20 | WORKDIR /io
21 |
22 |
23 | ENTRYPOINT [ "mvn" ]
24 |
--------------------------------------------------------------------------------
/java-containers/mvn/settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
21 |
22 |
46 |
49 |
54 | /io/.m2/cached_repository
55 |
56 |
64 |
65 |
72 |
73 |
78 |
79 |
83 |
84 |
85 |
90 |
91 |
105 |
106 |
107 |
111 |
112 |
125 |
126 |
133 |
134 |
135 |
146 |
147 |
159 |
160 |
161 |
182 |
183 |
212 |
213 |
247 |
248 |
249 |
257 |
258 |
--------------------------------------------------------------------------------
/java-containers/sbt/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/oracle-jdk
2 |
3 | ENV SBT_BINARY_RELEASE 0.13.7
4 |
5 | RUN curl \
6 | --silent \
7 | --location \
8 | --retry 3 \
9 | --insecure \
10 | https://repo.typesafe.com/typesafe/ivy-releases/org.scala-sbt/sbt-launch/$SBT_BINARY_RELEASE/sbt-launch.jar \
11 | --output /usr/lib/sbt-launch.jar \
12 | && java -Xms512M -Xmx1536M -Xss1M -XX:+CMSClassUnloadingEnabled -jar /usr/lib/sbt-launch.jar
13 |
14 | VOLUME [ "/io" ]
15 |
16 | WORKDIR /io
17 |
18 | ENTRYPOINT [ \
19 | "java", "-Xms512M", "-Xmx1536M", "-Xss1M", "-XX:+CMSClassUnloadingEnabled", \
20 | "-jar", "/usr/lib/sbt-launch.jar" \
21 | ]
22 |
--------------------------------------------------------------------------------
/java-containers/spark/base/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/oracle-jre
2 |
3 | RUN opkg-install bash libstdcpp zlib
4 |
5 | ENV SPARK_BINARY_RELEASE 1.2.1-bin-cdh4
6 |
7 | RUN curl \
8 | --silent \
9 | --location \
10 | --retry 3 \
11 | --insecure \
12 | https://d3kbcqa49mib13.cloudfront.net/spark-$SPARK_BINARY_RELEASE.tgz \
13 | | gunzip \
14 | | tar x -C /usr/ \
15 | && ln -s /usr/spark-$SPARK_BINARY_RELEASE /usr/spark
16 |
17 | RUN curl \
18 | --silent \
19 | --location \
20 | --retry 3 \
21 | --insecure \
22 | https://github.com/errordeveloper/weave-demos/releases/download/sparkles-demo-1/python-2.7.6-gce-x86_64.txz \
23 | | xzcat \
24 | | tar x -C /usr/
25 |
26 | RUN curl \
27 | --silent \
28 | --location \
29 | --retry 3 \
30 | http://central.maven.org/maven2/org/elasticsearch/elasticsearch-spark_2.10/2.1.0.Beta3/elasticsearch-spark_2.10-2.1.0.Beta3.jar \
31 | --output /usr/spark/lib/elasticsearch-spark_2.10-2.1.0.Beta3.jar
32 |
33 | ## Currently we need to tweak nsswitch.conf(5), mainly due to zettio/weave#68
34 | RUN sed 's/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/' -i /etc/nsswitch.conf
35 |
36 | ENV SPARK_HOME /usr/spark-$SPARK_BINARY_RELEASE
37 | ENV PATH $PATH:$SPARK_HOME/bin:/usr/python/bin/
38 |
39 | #RUN sed 's/^\(log4j.rootCategory=\)INFO\(, console\)$/\1DEBUG\2/' \
40 | # $SPARK_HOME/conf/log4j.properties.template \
41 | # > $SPARK_HOME/conf/log4j.properties
42 |
--------------------------------------------------------------------------------
/java-containers/spark/master/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/weave-spark-base-minimal
2 |
3 | ENTRYPOINT [ \
4 | "java", \
5 | "-Dspark.akka.logLifecycleEvents=true", \
6 | "-Xms512m", "-Xmx512m", \
7 | "-cp", "::/usr/spark/conf:/usr/spark/lib/*", \
8 | "org.apache.spark.deploy.master.Master" \
9 | ]
10 |
--------------------------------------------------------------------------------
/java-containers/spark/shell/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/oracle-jdk
2 |
3 | RUN opkg-install bash libstdcpp zlib
4 |
5 | ENV SPARK_BINARY_RELEASE 1.2.1-bin-cdh4
6 |
7 | RUN curl \
8 | --silent \
9 | --location \
10 | --retry 3 \
11 | --insecure \
12 | https://d3kbcqa49mib13.cloudfront.net/spark-$SPARK_BINARY_RELEASE.tgz \
13 | | gunzip \
14 | | tar x -C /usr/ \
15 | && ln -s /usr/spark-$SPARK_BINARY_RELEASE /usr/spark
16 |
17 | RUN curl \
18 | --silent \
19 | --location \
20 | --retry 3 \
21 | --insecure \
22 | https://github.com/errordeveloper/weave-demos/releases/download/sparkles-demo-1/python-2.7.6-gce-x86_64.txz \
23 | | xzcat \
24 | | tar x -C /usr/
25 |
26 | RUN curl \
27 | --silent \
28 | --location \
29 | --retry 3 \
30 | http://central.maven.org/maven2/org/elasticsearch/elasticsearch-spark_2.10/2.1.0.Beta3/elasticsearch-spark_2.10-2.1.0.Beta3.jar \
31 | --output /usr/spark/lib/elasticsearch-spark_2.10-2.1.0.Beta3.jar
32 |
33 | ## Currently we need to tweak nsswitch.conf(5), mainly due to zettio/weave#68
34 | RUN sed 's/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/' -i /etc/nsswitch.conf
35 |
36 | ENV SPARK_HOME /usr/spark-$SPARK_BINARY_RELEASE
37 | ENV PATH $PATH:$SPARK_HOME/bin:/usr/python/bin/
38 |
39 | ENTRYPOINT [ "spark-shell" ]
40 |
--------------------------------------------------------------------------------
/java-containers/spark/worker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM errordeveloper/weave-spark-base-minimal
2 |
3 | ENTRYPOINT [ \
4 | "java", \
5 | "-Dspark.akka.logLifecycleEvents=true", \
6 | "-Xms512m", "-Xmx512m", \
7 | "-cp", "::/usr/spark/conf:/usr/spark/lib/*", \
8 | "org.apache.spark.deploy.worker.Worker" \
9 | ]
10 |
--------------------------------------------------------------------------------
/marathon-atomic/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 |
--------------------------------------------------------------------------------
/marathon-atomic/README.md:
--------------------------------------------------------------------------------
1 | # Running Marathon on Fedora Atomic with Weave
2 |
3 | ## Install Vagrant plugin for Atomic
4 |
5 | sudo vagrant plugin install vagrant-atomic
6 |
7 | ## Bring the VM up
8 |
--------------------------------------------------------------------------------
/marathon-atomic/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | require 'base64'
5 | require 'yaml'
6 |
7 | FEDORA_RELEASE = 22
8 | BUILD_VERSION = 20150521
9 | MIRROR = 'https://download.fedoraproject.org'
10 | MIRROR_PREFIX = "#{MIRROR}/pub/fedora/linux/releases/#{FEDORA_RELEASE}/Cloud/x86_64/Images"
11 |
12 | CONFIG = File.join(File.dirname(__FILE__), 'config.rb')
13 |
14 | ## The variables bellow can be overriden in `config.rb` file
15 | $mesos_slaves = 1
16 | $memory = 1024*2
17 | $cpus = 2
18 | $network = [172, 17, 85]
19 |
20 | if File.exist?(CONFIG)
21 | require CONFIG
22 | end
23 |
24 | ## XXX: May be this could be turned into a generator script that shall be
25 | ## checked into the repo? ...it could also be turned into a Vagrant plugin
26 | ## or a more general purpose cloud-config genrator. The point is that it
27 | ## rather looks like a thing only an advanced user could read, but I'm not
28 | ## sure, it might be okay actually.
29 |
30 | cloud_config = {
31 | 'write_files' => %w(
32 | weave weave.target weave.service weavedns.service weaveproxy.service
33 | zookeeper.service mesos-master.service mesos-slave.service marathon.service
34 | ).map do |fn|
35 | {
36 | 'encoding' => 'b64',
37 | 'content' => Base64.encode64(File.open(fn).readlines.join),
38 | 'path' => fn == 'weave' ? "/usr/local/bin/#{fn}" : "/etc/systemd/system/#{fn}",
39 | 'permissions' => fn == 'weave' ? '0755' : '0644',
40 | }
41 | end
42 | }
43 |
44 | vms = (0..$mesos_slaves).map{ |a| 'mesos-%02d' % [a] }
45 |
46 | ips = {}
47 |
48 | vms.each_with_index{ |i, x| ips[i] = ($network + [x+100]).join('.') }
49 |
50 | cloud_config['write_files'] += vms.map do |vm_name|
51 | ## Compile a list of IP addresses modulo one for this VM
52 | weave_peers = ips.select{|host, addr| addr if host != vm_name}.values
53 | ## Create an environment file that will get load by `weave.service`
54 | ## with the `%H` expantion for each given host
55 | content = [ "WEAVE_PEERS=\"#{weave_peers}\"" ]
56 | content << "WEAVE_PASSWORD=\"#$weave_password\"" unless $weave_password.nil?
57 | {
58 | 'content' => content.join("\n"),
59 | 'path' => "/etc/weave.#{vm_name}.env",
60 | 'permissions' => '0640',
61 | }
62 | end
63 |
64 | cloud_init_ds = '/var/lib/cloud/seed/nocloud'
65 |
66 | $do_cloud_config = <<-SCRIPT
67 | mkdir -p #{cloud_init_ds}
68 |
69 | echo "instance-id: id-local-${HOSTNAME}" \
70 | > #{cloud_init_ds}/meta-data
71 |
72 | cat <<- EOF > #{cloud_init_ds}/user-data
73 | #cloud-config
74 | #{cloud_config.to_yaml}
75 | EOF
76 |
77 | systemctl -q unmask cloud-init-local.service cloud-config.service cloud-final.service
78 | systemctl -q --no-block start cloud-config.service cloud-final.service
79 | SCRIPT
80 |
81 | docker_images = %w(
82 | weaveworks/weave:1.0.1 weaveworks/weavedns:1.0.1 weaveworks/weaveexec:1.0.1
83 | mesoscloud/zookeeper:3.4.6-centos-7 mesoscloud/marathon:0.9.1-centos-7
84 | mesoscloud/mesos-master:0.23.0-centos-7 mesoscloud/mesos-slave:0.23.0-centos-7
85 | )
86 |
87 | $do_marathon_setup = <<-SCRIPT
88 | for i in #{docker_images.join(' ')}
89 | do docker pull $i > /dev/null
90 | done
91 |
92 | systemctl -q --no-block enable weave.target weave.service weaveproxy.service
93 | systemctl -q --no-block start weave.service weaveproxy.service
94 |
95 | systemctl -q --no-block start zookeeper.service
96 | ## give ZK some time... TODO: still need to figure out what to do in a cluster setup, may be just launch ZK hosts first
97 | systemctl -q --no-block start mesos-master.service mesos-slave.service marathon.service
98 | SCRIPT
99 |
100 | Vagrant.configure('2') do |config|
101 | config.vm.box = "fedora-atomic-#{FEDORA_RELEASE}-#{BUILD_VERSION}"
102 | config.vm.box_url = "#{MIRROR_PREFIX}/Fedora-Cloud-Atomic-Vagrant-#{FEDORA_RELEASE}-#{BUILD_VERSION}.x86_64.vagrant-virtualbox.box"
103 |
104 | #config.vm.guest = :atomic
105 |
106 | vms.each do |i|
107 |
108 | config.vm.define vm_name = i do |config|
109 |
110 | config.vm.network :private_network, ip: ips[vm_name]
111 |
112 | config.vm.provider :virtualbox do |vb|
113 | vb.gui = false
114 | vb.check_guest_additions = false
115 | vb.functional_vboxsf = false
116 |
117 | vb.memory = $memory
118 | vb.cpus = $cpus
119 | end
120 |
121 | config.vm.provision :shell, inline: "hostnamectl set-hostname #{vm_name}"
122 | config.vm.provision :shell, inline: $do_cloud_config
123 | config.vm.provision :shell, inline: $do_marathon_setup
124 | end
125 | end
126 | end
127 |
--------------------------------------------------------------------------------
/marathon-atomic/marathon.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Marathon
3 | After=weave.target zookeeper.service
4 | Requires=docker.service weave.target zookeeper.service
5 | [Service]
6 | TimeoutStartSec=0
7 | Restart=on-failure
8 | ExecStart=/usr/bin/docker \
9 | -H localhost:12375 \
10 | run --rm --name=marathon-00.mesos \
11 | -p 8080:8080 \
12 | mesoscloud/marathon:0.8.2-centos-7 \
13 | sh -c ' \
14 | sed "s/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/" -i /etc/nsswitch.conf ; \
15 | echo zk://zookeeper-00.mesos.weave.local:2181/mesos > /etc/mesos/zk ; \
16 | marathon --no-logger ; \
17 | '
18 | ExecStop=/usr/bin/docker rm -f marathon-00.mesos
19 |
--------------------------------------------------------------------------------
/marathon-atomic/mesos-master.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Mesos Master
3 | After=weave.target zookeeper.service
4 | Requires=docker.service weave.target zookeeper.service
5 | [Service]
6 | TimeoutStartSec=0
7 | Restart=on-failure
8 | ExecStart=/usr/bin/docker \
9 | -H localhost:12375 \
10 | run --rm --name=master-00.mesos \
11 | mesoscloud/mesos-master:0.22.1-centos-7 \
12 | sh -c ' \
13 | sed "s/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/" -i /etc/nsswitch.conf ; \
14 | mesos-master --registry=in_memory --zk=zk://zookeeper-00.mesos.weave.local:2181/mesos ; \
15 | '
16 | ExecStop=/usr/bin/docker rm -f master-00.mesos
17 |
--------------------------------------------------------------------------------
/marathon-atomic/mesos-slave.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Mesos Slave
3 | After=weave.target
4 | Requires=docker.service weave.target
5 | [Service]
6 | TimeoutStartSec=0
7 | Restart=on-failure
8 | ExecStart=/usr/bin/docker \
9 | -H localhost:12375 \
10 | run --rm --name=slave-00.mesos \
11 | -v /sys:/sys --privileged=true --pid=host \
12 | -e MESOS_CONTAINERIZERS=docker \
13 | -e DOCKER_HOST=172.17.42.1:12375 \
14 | mesoscloud/mesos-slave:0.22.1-centos-7 \
15 | sh -c ' \
16 | sed "s/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/" -i /etc/nsswitch.conf ; \
17 | mesos-slave --master=zk://zookeeper-00.mesos.weave.local:2181/mesos ; \
18 | '
19 | ExecStop=/usr/bin/docker rm -f slave-00.mesos
20 |
--------------------------------------------------------------------------------
/marathon-atomic/test1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh -x
2 | curl http://localhost:8080/v2/apps \
3 | -X POST \
4 | -H Content-type: application/json \
5 | -d '{
6 | "id": "basic-3",
7 | "cmd": "python3 -m http.server 8080",
8 | "cpus": 0.5,
9 | "mem": 32.0,
10 | "container": {
11 | "type": "DOCKER",
12 | "docker": {
13 | "image": "python:3",
14 | "network": "BRIDGE",
15 | "portMappings": [
16 | { "containerPort": 8080, "hostPort": 0 }
17 | ]
18 | }
19 | }
20 | }'
21 |
--------------------------------------------------------------------------------
/marathon-atomic/weave.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Weave Net
3 | Documentation=http://docs.weave.works/
4 | After=docker.service
5 | Requires=docker.service
6 | [Service]
7 | TimeoutStartSec=0
8 | EnvironmentFile=-/etc/weave.env
9 | EnvironmentFile=-/etc/weave.%H.env
10 | ExecStartPre=/usr/local/bin/weave launch $WEAVE_PEERS
11 | ExecStart=/usr/bin/docker attach weave
12 | Restart=on-failure
13 | ExecStop=/usr/local/bin/weave stop
14 | [Install]
15 | WantedBy=weave.target
16 |
--------------------------------------------------------------------------------
/marathon-atomic/weave.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Weave
3 | Documentation=man:systemd.special(7)
4 | RefuseManualStart=no
5 | After=network-online.target
6 | Requires=weave.service weavedns.service weaveproxy.service
7 | [Install]
8 | WantedBy=multi-user.target
9 |
--------------------------------------------------------------------------------
/marathon-atomic/weavedns.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Weave Run - DNS
3 | Documentation=http://docs.weave.works/
4 | After=weave.service
5 | Requires=docker.service weave.service
6 | [Service]
7 | TimeoutStartSec=0
8 | EnvironmentFile=-/etc/weave.env
9 | EnvironmentFile=-/etc/weave.%H.env
10 | ExecStartPre=/usr/local/bin/weave launch-dns $WEAVE_DNS_ADDR
11 | ExecStart=/usr/bin/docker attach weavedns
12 | Restart=on-failure
13 | ExecStop=/usr/local/bin/weave stop-dns
14 | [Install]
15 | WantedBy=weave.target
16 |
--------------------------------------------------------------------------------
/marathon-atomic/weaveproxy.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Weave Run - DNS
3 | Documentation=http://docs.weave.works/
4 | After=weave.service
5 | Requires=docker.service weave.service weavedns.service
6 | [Service]
7 | TimeoutStartSec=0
8 | EnvironmentFile=-/etc/weave.env
9 | EnvironmentFile=-/etc/weave.%H.env
10 | ExecStartPre=/usr/local/bin/weave launch-proxy --with-dns
11 | ExecStart=/usr/bin/docker attach weaveproxy
12 | Restart=on-failure
13 | ExecStop=/usr/local/bin/weave stop-proxy
14 | [Install]
15 | WantedBy=weave.target
16 |
--------------------------------------------------------------------------------
/marathon-atomic/zookeeper.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Zookeeper
3 | After=weave.target
4 | Requires=docker.service weave.target
5 | [Service]
6 | TimeoutStartSec=0
7 | Restart=on-failure
8 | ExecStart=/usr/bin/docker \
9 | -H localhost:12375 \
10 | run --rm --name=zookeeper-00.mesos \
11 | mesoscloud/zookeeper:3.4.6-centos-7 \
12 | sh -c ' \
13 | sed "s/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/" -i /etc/nsswitch.conf ; \
14 | /opt/zookeeper/bin/zkServer.sh start-foreground ; \
15 | '
16 | ExecStop=/usr/bin/docker rm -f zookeeper-00.mesos
17 |
--------------------------------------------------------------------------------
/misc/gce-basic-two-nodes/build1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x -e
2 |
3 | export DC='europe-west1-c'
4 |
5 | export VM='weave-01'
6 |
7 | ./up.sh
8 |
--------------------------------------------------------------------------------
/misc/gce-basic-two-nodes/build2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x -e
2 |
3 | export DC='us-central1-a'
4 |
5 | export VM='weave-02'
6 |
7 | ./up.sh
8 |
--------------------------------------------------------------------------------
/misc/gce-basic-two-nodes/build3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x -e
2 |
3 | export DC='us-central1-a'
4 |
5 | export VM='weave-03'
6 |
7 | ./up.sh
8 |
--------------------------------------------------------------------------------
/misc/gce-basic-two-nodes/up.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x -e
2 |
3 | args="--zone $DC $VM"
4 |
5 | gcloud compute instances create $args
6 |
7 | sleep 30
8 |
9 | gcloud compute ssh $args \
10 | --command 'curl https://get.docker.io/ | sudo bash'
11 |
12 | gcloud compute ssh $args
13 |
--------------------------------------------------------------------------------
/misc/ubuntu-kernels/Vagrantfile:
--------------------------------------------------------------------------------
1 | # Once rebooted the machine, you get the latest kernel that we installed
2 | # You can test that manually, then remove kernel you have just tested and
3 | # the next reboot brings you on the older one.
4 |
5 | kernels = %w(
6 | linux-image-3.2.0-70-generic
7 | linux-image-3.5.0-54-generic
8 | linux-image-3.8.0-44-generic
9 | linux-image-3.11.0-26-generic
10 | linux-image-3.13.0-39-generic
11 | )
12 |
13 | Vagrant.configure("2") do |config|
14 | config.vm.box = 'phusion/ubuntu-12.04-amd64'
15 | config.vm.box_url = 'https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-12.04-amd64-vbox.box'
16 |
17 | config.vm.provider :virtualbox do |vb|
18 | vb.memory = 2048
19 | vb.cpus = 2
20 | end
21 |
22 | [
23 | 'curl -s https://get.docker.io/ | sudo bash > /dev/null 2>&1',
24 | 'curl -s -L https://raw.github.com/zettio/weave/master/weave | sudo install --mode=0755 /dev/fd/0 /usr/bin/weave',
25 | 'sudo docker pull zettio/weave:latest',
26 | "sudo apt-get -qq -y --force-yes --no-install-recommends install ethtool conntrack #{kernels.join(' ')}",
27 | ].each do |cmd|
28 | config.vm.provision :shell, :inline => cmd
29 | end
30 |
31 | weave_cmds = [
32 | [
33 | 'sudo weave launch',
34 | 'sudo weave expose 10.9.0.1/24',
35 | ],
36 | [
37 | 'sudo weave launch 172.17.8.100',
38 | 'sudo weave expose 10.9.0.2/24',
39 | 'ping -c 5 10.9.0.1',
40 | ],
41 | ]
42 |
43 | 2.times do |i|
44 | config.vm.define vm_name = 'weave-%02d' % i do |config|
45 | config.vm.hostname = vm_name
46 | ip = "172.17.8.#{i+100}"
47 | config.vm.network :private_network, ip: ip
48 |
49 | weave_cmds[i].each do |cmd|
50 | config.vm.provision :shell, :inline => cmd
51 | end
52 | end
53 | end
54 | end
55 |
--------------------------------------------------------------------------------
/poseidon/.gitignore:
--------------------------------------------------------------------------------
1 | user-data
2 |
--------------------------------------------------------------------------------
/poseidon/.vagrant:
--------------------------------------------------------------------------------
1 | ../.vagrant/
--------------------------------------------------------------------------------
/poseidon/README.md:
--------------------------------------------------------------------------------
1 | This is an extract from our [blog post](http://weaveblog.com/2014/11/11/weave-for-kubernetes/), the post itself contains more technical details and the motivation.
2 |
3 | ## Quick Start
4 |
5 | Here is the gist of what you need to do in order to fire-up 3 CoreOS VMs. As result of this you will get a Kubernetes cluster running over Weave network and the `core-01` VM is the master.
6 |
7 | ```Bash
8 | git clone https://github.com/errordeveloper/weave-demos/
9 | cd weave-demos/poseidon
10 | vagrant up
11 | vagrant ssh core-01
12 | ```
13 |
14 | Now, on the master machine (`core-01`), let’s try deploying the guestbook example.
15 |
16 | As part of the provisioning process we have placed Kubernetes examples in your home directory. You should see a directory called `guestbook-example`, if it’s not there check if a `curl` process is still running.
17 |
18 | ```
19 | core@core-01 ~ $ ls
20 | guestbook-example
21 | ```
22 |
23 | Another basic thing to double-check is whether all minions have registered with the master.
24 |
25 | ```
26 | core@core-01 ~ $ kubectl get minions
27 | NAME
28 | 172.17.8.101
29 | 172.17.8.102
30 | 172.17.8.103
31 | ```
32 |
33 | If you have any problems, check the additional troubleshooting section at the end of this post.
34 |
35 | If all is well, let’s proceed!
36 |
37 | ```
38 | core@core-01 ~ $ cd guestbook-example
39 | core@core-01 ~/guestbook-example $
40 | ```
41 |
42 | Firstly we need to deploy Redis database, which consists of a single master and two slave pods.
43 |
44 | In Kubernetes terms, this consists of:
45 |
46 | - A single Redis master _Replication Controller_ and _Service_
47 |
48 | ```
49 | core@core-01 ~/guestbook-example $ kubectl create -f redis-master-controller.json
50 | I1105 17:08:24.679092 06680 restclient.go:133] Waiting for completion of operation 1
51 | redis-master-2
52 | core@core-01 ~/guestbook-example $ kubectl create -f redis-master-service.json
53 | redismaster
54 | ```
55 |
56 | - Two Redis slave _Replication Controllers_ and a _Service_
57 |
58 | ```
59 | core@core-01 ~/guestbook-example $ kubectl create -f redis-slave-controller.json
60 | redisSlaveController
61 | core@core-01 ~/guestbook-example $ kubectl create -f redis-slave-service.json
62 | I1105 17:08:44.219372 06719 restclient.go:133] Waiting for completion of operation 10
63 | redisslave
64 | ```
65 |
66 | Let’s take a look at the state of our Kubernetes cluster, we should see the three pods that we have just deployed. This number matches how many _Replication Controllers_ we had.
67 |
68 | ```
69 | core@core-01 ~/guestbook-example $ kubectl get pods
70 | NAME IMAGE(S) HOST LABELS STATUS
71 | redis-master-2 dockerfile/redis 172.17.8.102/ name=redis-master Pending
72 | 64749995-650e-11e4-b80b-080027fb95c5 brendanburns/redis-slave 172.17.8.103/ name=redisslave Pending
73 | 6474482b-650e-11e4-b80b-080027fb95c5 brendanburns/redis-slave 172.17.8.101/ name=redisslave Pending
74 | ```
75 |
76 | As it takes some time to pull the container images onto each of the machines, you might need to wait for a few minutes for pods to change from “Pending” state to “Running”. However, you don’t need to wait for all of them right now, unless you wish to test Redis manually.
77 |
78 | Let’s deploy the PHP app now. It will consists of three _Replication Controllers_ and a _Service_.
79 |
80 | ```
81 | core@core-01 ~/guestbook-example $ kubectl create -f frontend-controller.json
82 | I1105 17:43:38.936889 10080 restclient.go:133] Waiting for completion of operation 12
83 | frontendController
84 | core@core-01 ~/guestbook-example $ kubectl create -f frontend-service.json
85 | I1105 17:43:46.444804 10132 restclient.go:133] Waiting for completion of operation 19
86 | frontend
87 | ```
88 |
89 | If your run kubectl get pods now, you will observer that we have the new pods labeled “frontend” in a “Pending” state.
90 |
91 | ```
92 | core@core-01 ~/guestbook-example $ kubectl get pods
93 | NAME IMAGE(S) HOST LABELS STATUS
94 | 46849afa-6513-11e4-b80b-080027fb95c5 brendanburns/php-redis 172.17.8.103/ name=frontend Pending
95 | redis-master-2 dockerfile/redis 172.17.8.102/ name=redis-master Running
96 | 64749995-650e-11e4-b80b-080027fb95c5 brendanburns/redis-slave 172.17.8.103/ name=redisslave Running
97 | 6474482b-650e-11e4-b80b-080027fb95c5 brendanburns/redis-slave 172.17.8.101/ name=redisslave Running
98 | 468432e7-6513-11e4-b80b-080027fb95c5 brendanburns/php-redis 172.17.8.102/ name=frontend Pending
99 | 46844cba-6513-11e4-b80b-080027fb95c5 brendanburns/php-redis 172.17.8.101/ name=frontend Pending
100 | ```
101 |
102 | Running `kubectl get pods` after a few minutes shows us that the state has changed to “Running”.
103 |
104 | ```
105 | core@core-01 ~/guestbook-example $ kubectl get pods
106 | NAME IMAGE(S) HOST LABELS STATUS
107 | 46844cba-6513-11e4-b80b-080027fb95c5 brendanburns/php-redis 172.17.8.101/ name=frontend Running
108 | 46849afa-6513-11e4-b80b-080027fb95c5 brendanburns/php-redis 172.17.8.103/ name=frontend Running
109 | redis-master-2 dockerfile/redis 172.17.8.102/ name=redis-master Running
110 | 64749995-650e-11e4-b80b-080027fb95c5 brendanburns/redis-slave 172.17.8.103/ name=redisslave Running
111 | 6474482b-650e-11e4-b80b-080027fb95c5 brendanburns/redis-slave 172.17.8.101/ name=redisslave Running
112 | 468432e7-6513-11e4-b80b-080027fb95c5 brendanburns/php-redis 172.17.8.102/ name=frontend Running
113 | ```
114 |
115 | We should now be able to test this. If you look at the output of `kubectl get services`, you will see that there is front-end portal and it can be accessed on `10.0.0.5:9998` locally, and you can probably call `curl 10.0.0.5:9998`, but this doesn’t quite show the app in action. It’s not quite clear why the host ports are not exposed in the console output of `kubectl get pods`, but this is something you can find by either looking at `frontend-controller.json` or calling `kubectl get pod --output=yaml --selector="name=frontend"`. So whichever you did, you will find that it binds to the host’s port _8000_. As we have three machines in the cluster with IP addresses _172.17.8.101_, _172.17.8.102_ and _172.17.8.103_, we can connect to each of them on port _8000_ and see exact same application on each:
116 |
117 | - http://172.17.8.101:8000
118 | - http://172.17.8.102:8000
119 | - http://172.17.8.103:8000
120 |
121 |
--------------------------------------------------------------------------------
/poseidon/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../coreos-vagrant/Vagrantfile
--------------------------------------------------------------------------------
/poseidon/config.rb:
--------------------------------------------------------------------------------
1 | # Automatically set the discovery token on 'vagrant up'
2 |
3 | if File.exists?('kubernetes-cluster.yaml') && ARGV[0].eql?('up')
4 | require 'open-uri'
5 | require 'yaml'
6 |
7 | token = open('https://discovery.etcd.io/new').read
8 |
9 | data = YAML.load(IO.readlines('kubernetes-cluster.yaml')[1..-1].join)
10 | data['coreos']['etcd2']['discovery'] = token
11 |
12 | lines = YAML.dump(data).split("\n")
13 | lines[0] = '#cloud-config'
14 |
15 | open('user-data', 'w') do |f|
16 | f.puts(lines.join("\n"))
17 | end
18 | end
19 |
20 | $num_instances=3
21 | $vb_memory = 2048
22 | $vb_cpus = 2
23 | $update_channel = 'stable'
24 |
--------------------------------------------------------------------------------
/poseidon/network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/errordeveloper/weave-demos/cd24bddd6842354b873d08f5deac48a4ac4fbb24/poseidon/network.png
--------------------------------------------------------------------------------
/poseidon/ubuntu-trusty/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | KUBE_RELEASE_VERSION = 'v0.8.2'
5 | KUBE_RELEASE_TARBALL_URL = "https://github.com/GoogleCloudPlatform/kubernetes/releases/download/#{KUBE_RELEASE_VERSION}/kubernetes.tar.gz"
6 |
7 | ETCD_RELEASE_VERSION = 'v2.0.0-rc.1'
8 | ETCD_RELEASE_TARBALL_BASENAME = "etcd-#{ETCD_RELEASE_VERSION}-linux-amd64"
9 | ETCD_RELEASE_TARBALL_URL = "https://github.com/coreos/etcd/releases/download/#{ETCD_RELEASE_VERSION}/#{ETCD_RELEASE_TARBALL_BASENAME}.tar.gz"
10 |
11 | WEAVE_BASE_ADDR = '10.9'
12 |
13 | require 'securerandom'
14 | WEAVE_PASSWORD = SecureRandom.uuid
15 |
16 | require 'open-uri'
17 | ETCD_DISCOVERY_TOKEN = open('https://discovery.etcd.io/new').read
18 |
19 | Vagrant.configure(2) do |config|
20 | config.vm.box = "ubuntu/trusty64"
21 |
22 | ips = %W(
23 | 172.30.8.101
24 | 172.30.8.102
25 | 172.30.8.103
26 | )
27 |
28 | (1..3).each do |i|
29 | config.vm.define vm_name = "ubuntu-kube-%02d" % i do |config|
30 | config.vm.hostname = vm_name
31 | config.vm.network "private_network", ip: ips[i-1]
32 |
33 | config.vm.provision "docker", images: [ "zettio/weave", "zettio/weavetools" ]
34 |
35 | bridge_address_cidr = "#{WEAVE_BASE_ADDR}.#{i}.1/24"
36 | breakout_route = "#{WEAVE_BASE_ADDR}.0.0/16"
37 |
38 | other_nodes = ips.collect.with_index{ |x,j| x unless j == i }.compact
39 |
40 | config.vm.provision "shell", privileged: true, inline: <<-SHELL
41 | set -x
42 | mkdir -p /opt/bin
43 |
44 | curl -s -L #{ETCD_RELEASE_TARBALL_URL} | tar xz -C /opt/
45 | ln -s /opt/#{ETCD_RELEASE_TARBALL_BASENAME}/etcd /usr/local/sbin/
46 | ln -s /opt/#{ETCD_RELEASE_TARBALL_BASENAME}/etcd-migrate /usr/local/sbin/
47 | ln -s /opt/#{ETCD_RELEASE_TARBALL_BASENAME}/etcdctl /usr/local/bin/
48 |
49 | curl -s -L #{KUBE_RELEASE_TARBALL_URL} | tar xz -C /tmp/
50 | tar xzf /tmp/kubernetes/server/kubernetes-server-linux-amd64.tar.gz -C /opt
51 | ln -s /opt/kubernetes/server/bin/kubectl /usr/local/bin/
52 | cp /tmp/kubernetes/examples/guestbook /home/vagrant/guestbook-example
53 | ln -s /opt/kubernetes/server/bin/kube-apiserver /opt/bin/
54 | ln -s /opt/kubernetes/server/bin/kube-controller-manager /opt/bin/
55 | ln -s /opt/kubernetes/server/bin/kubelet /opt/bin/
56 | ln -s /opt/kubernetes/server/bin/kube-proxy /opt/bin/
57 | ln -s /opt/kubernetes/server/bin/kube-scheduler /opt/bin/
58 | cp /tmp/kubernetes/cluster/ubuntu/init_conf/* /etc/init/
59 | cp /tmp/kubernetes/cluster/ubuntu/initd_scripts/* /etc/init.d/
60 | cp /tmp/kubernetes/cluster/ubuntu/default_scripts/* /etc/default/
61 |
62 | curl -s -L https://raw.github.com/zettio/weave/master/weave -o /usr/local/sbin/weave
63 | chmod +x /usr/local/sbin/weave
64 | weave create-bridge
65 | ip addr add dev weave #{bridge_address_cidr}
66 | ip route add #{breakout_route} dev weave scope link
67 | ip route add 224.0.0.0/4 dev weave
68 |
69 | echo "DOCKER_OPTS='--bridge=weave -r=false'" > /etc/default/docker
70 | restart docker
71 |
72 | env WEAVE_PASSWORD=#{WEAVE_PASSWORD} weave launch #{other_nodes.join(' ')}
73 |
74 | echo "ETCD_OPTS='${ETCD_OPTS} --discovery #{ETCD_DISCOVERY_TOKEN}'" >> /etc/default/etcd
75 | start etcd
76 |
77 | echo KUBELET_OPTS="--address=0.0.0.0 --port=10250 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true" >> /etc/default/kubelet
78 | start kubelet
79 | SHELL
80 |
81 | if i == 1 then
82 | config.vm.provision "shell", privileged: true, inline: <<-SHELL
83 | echo "KUBE_CONTROLLER_MANAGER_OPTS='--master=127.0.0.1:8080 --machines=#{ips.join(',')} --logtostderr=true" > /etc/default/kube-controller-manager
84 | start kube-apiserver
85 | start kube-controller-manager
86 | start kube-proxy
87 | start kube-scheduler
88 | SHELL
89 | end
90 |
91 | end
92 | end
93 | end
94 |
--------------------------------------------------------------------------------
/poseidon/weave-helper:
--------------------------------------------------------------------------------
1 | #!/bin/sh -x
2 | set +e
3 |
4 | container_interface="${1:-'eth0'}"
5 |
6 | docker events | while read event
7 | do
8 | echo $event | grep -q -v '\ start$' && continue
9 |
10 | with_container_id=`echo $event | sed 's/.*Z\ \(.*\):\ .*/\1/'`
11 |
12 | only_if="test -n '{{ .NetworkSettings.IPAddress }}'"
13 | in_namespace="nsenter -n -t {{ .State.Pid }} --"
14 | ethtool_tx_off="ethtool -K ${container_interface} tx off >/dev/null"
15 |
16 | command_template="${only_if} && { (${in_namespace} ${ethtool_tx_off}); }"
17 |
18 | eval `docker inspect --format="${command_template}" ${with_container_id}`
19 | done
20 |
--------------------------------------------------------------------------------
/quartet/README.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Multi-host Docker deployment with Swarm and Compose using Weave 0.11
3 | published: true
4 | tags: docker, docker-machine, docker-swarm, docker-compose, guide, automation, command-line
5 | ---
6 |
7 | > ***This is an outdate guide, please find latest version on our website now!***
8 | > http://weave.works/guides/weave-and-docker-platform/index.html
9 |
10 |
11 | In this post I'd like to show how easily one can get up-and-running using [Weave](https://github.com/weaveworks/weave) with all the latest and greatest Docker tools - [Machine](https://github.com/docker/machine), [Swarm](https://github.com/docker/swarm) and [Compose](https://github.com/docker/compose). This was made especially simple with two recent release of Weave ([_v0.10_](https://github.com/weaveworks/weave/releases/tag/v0.10.0) & [_v0.11_](https://github.com/weaveworks/weave/releases/tag/v0.11.0)).
12 |
13 | > Since my last [blog post](http://blog.weave.works/2015/05/06/using-docker-machine-and-swarm-with-weave-0-10/), the Weaveworks team had been busy working on [new _v0.11_ release](https://github.com/weaveworks/weave/releases/tag/v0.11.0), that includes a number of great new features, one of which is [a proxy](http://docs.weave.works/weave/latest_release/proxy.html) that allows our users to simply call `docker run` (or the remote API) without needing to use `weave run`. This release also introduces [automatic IP address management](http://docs.weave.works/weave/latest_release/ipam.html), which Bryan has [blogged about yesterday](http://blog.weave.works/2015/05/26/let-weave-allocate-ip-addresses-for-you/).
14 |
15 | This guide builds on what was learned from two previous posts where I showed how one can use [Machine with a single VM](http://blog.weave.works/2015/04/22/using-docker-machine-with-weave-0-10/) and [Swarm with 3 VMs](http://blog.weave.works/2015/05/06/using-docker-machine-and-swarm-with-weave-0-10/). In those two posts I used Weave CLI agains remote Docker host(s), leveraging features introduced in _v0.10_. With [proxy](http://docs.weave.works/weave/latest_release/proxy.html) being introduced in _v0.11_, one can use Docker CLI or API (via Compose) directly. Additionally, automatic IP allocation will be also used behind the scenes, lifting the burden of manual IP address assignment, which had been [a long awaited feature](https://github.com/weaveworks/weave/issues/22).
16 |
17 | ### What you will do?
18 |
19 | This guide is design to get you started with Docker toolchain and Weave right out of the box, deploy and scale your application across multiple hosts in a very simple and absolutely transparent manner.
20 |
21 | 1. Setup a cluster of 3 VMs with Swarm and Weave configured by means of [a shell script](https://github.com/errordeveloper/weave-demos/blob/a90d959638948e796ab675e3dd0e1f98390ae3d0/quartet/scripts/setup-cluster.sh)
22 | 2. Deploy a simple 2-tier web application using Docker Compose
23 | 3. Scale the application from 1 web servers to 3
24 |
25 | > I will post later on with details on how exactly this kind of setup works, for those who might like to reproduce it in a different environment, perhaps without using Docker Machine and VirtualBox, so you would see how it can work in an existing infrastructure.
26 |
27 | To follow this guide you will need to obtain the binaries for
28 |
29 | - [***`docker`***](https://docs.docker.com/installation/#installation) _(at least the client)_
30 | - [***`docker-machine`***](http://docs.docker.com/machine/#installation)
31 | - [***`docker-compose`***](http://docs.docker.com/compose/install/)
32 | - [***`docker-swarm`***](http://docs.docker.com/swarm/#install-swarm) _(optional, see below)_
33 | - [**VirtualBox**](https://www.virtualbox.org/wiki/Downloads)
34 |
35 | If you are using OS X, then you can install these tools with Homebrew, via
36 |
37 | brew install docker docker-machine docker-swarm docker-compose
38 |
39 | You will need to download and install VirtualBox manually as well, if you haven't done it yet. Please be sure to install latest version of Machine (_v0.2.0_), as there are some bugs in the previous release. You also want to use latest `boot2docker` VM image; you will get it if you haven't used Docker Machine previously on your computer, otherwise you should delete the cached ISO image located in ***`~/.docker/machine/cache/boot2docker.iso`*** before you proceed.
40 |
41 | > If you don't find a `docker-swarm` binary for your OS, and have Docker daemon available locally, you can set `DOCKER_SWARM_CREATE` like shown below before you proceed.
42 | > ```
43 | > export DOCKER_SWARM_CREATE="docker run --rm swarm create | tail -1"`
44 | > ```
45 | > Alternatively, it may be easier to just use `curl`:
46 | > ```
47 | > export DOCKER_SWARM_CREATE="curl -XPOST https://discovery-stage.hub.docker.com/v1/clusters"
48 | >```
49 |
50 | ## Let's go!
51 |
52 | First, we need a few scripts. To get them, run
53 |
54 | git clone https://github.com/errordeveloper/weave-demos
55 | cd weave-demos/quartet
56 |
57 | Now, we'll provision a cluster of 3 VMs. The following script will make sure all 3 VMs join the Swarm, and sets up the Weave network and WeaveDNS.
58 |
59 | > If you've followed one of my previous guides, you'll need to clear the VMs you've previously created. Check the output of `docker-machine ls`, and delete them with `docker-machine rm -f `.
60 |
61 | ./scripts/setup-cluster.sh
62 |
63 |
64 | Once the cluster is up, you want to do the following
65 |
66 | #### 1. go into app's directory
67 | `cd app/`
68 | #### 2. build images on each host
69 | `./build.sh` (This makes it quicker to scale)
70 | #### 3. setup the environment
71 | `eval $(docker-machine env --swarm dev-1)`
72 | #### 4. deploy
73 | `docker-compose up -d`
74 | #### 5. test, scale, test
75 |
76 | We have just deployed a standard Compose demo, which consists of a Python Flask app that uses Redis as its database. Our `docker-compose.yml` file differs slightly from the original, it simply sets `hostname: redis.weave.local` and `hostname: hello.weave.local` instead of using Docker links ([**see diff**](https://github.com/errordeveloper/weave-demos/commit/4874be9943b41d1ad5352cf580ed95c608577ecf?diff=split)). These hostnames are picked up by WeaveDNS and can be resolved from any container on the Weave network. WeaveDNS records also survive container restarts, unlike Docker's built-in links.
77 |
78 | ```
79 | > docker-compose ps
80 | Name Command State Ports
81 | -------------------------------------------------------------------------------------
82 | app_redis_1 /home/weavewait/weavewait ... Up 6379/tcp
83 | app_web_1 /home/weavewait/weavewait ... Up 192.168.99.102:32773->5000/tcp
84 | ```
85 |
86 | From the above, you can see that the app can be accessed on `192.168.99.102:5000`, let's test this now.
87 |
88 | ```
89 | > curl 192.168.99.102:32773
90 | Hello World! I have been seen 1 times.
91 | > curl 192.168.99.102:32773
92 | Hello World! I have been seen 2 times.
93 | > curl 192.168.99.102:32773
94 | Hello World! I have been seen 3 times.
95 | ```
96 |
97 | Amazing, it worked!
98 |
99 | Of course, one server is not enough, if we have 3 VMs to our disposal. Let's scale this up!
100 |
101 | ```
102 | > docker-compose scale web=3
103 | Creating app_web_2...
104 | Creating app_web_3...
105 | Starting app_web_2...
106 | Starting app_web_3...
107 |
108 | > docker-compose ps
109 | Name Command State Ports
110 | -------------------------------------------------------------------------------------
111 | app_redis_1 /home/weavewait/weavewait ... Up 6379/tcp
112 | app_web_1 /home/weavewait/weavewait ... Up 192.168.99.102:32773->5000/tcp
113 | app_web_2 /home/weavewait/weavewait ... Up 192.168.99.100:32771->5000/tcp
114 | app_web_3 /home/weavewait/weavewait ... Up 192.168.99.101:32771->5000/tcp
115 | ```
116 |
117 | To verify it is working, we must test each of the new instances now.
118 | ```
119 | > curl 192.168.99.100:32771
120 | Hello World! I have been seen 4 times.
121 | > curl 192.168.99.101:32771
122 | Hello World! I have been seen 5 times.
123 | ```
124 |
125 | All working well, 3 web server instance running on different host, connected with Weave and no manual IP assignment required, neither [Docker links limitations](https://github.com/docker/compose/issues/608) get in our way.
126 |
127 | ## What's next?
128 |
129 | You can easily move the entire setup to run on a public cloud, with any of the many providers already available with Docker Machine.
130 |
131 | For example, you can follow part of the [Azure guide](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-machine/#create-the-certificate-and-key-files-for-docker-machine-and-azure) (setup steps 1, 2, and 3), and then set `DOCKER_MACHINE_DRIVER` like this:
132 |
133 | export DOCKER_MACHINE_DRIVER=" \
134 | --driver azure \
135 | --azure-subscription-id=SubscriptionID \
136 | --azure-subscription-cert=mycert.pem \
137 | "
138 |
139 | Clean-up your local VMs, and re-run the cluster setup.
140 |
141 | docker-machine rm -f dev-1 dev-2 dev-3
142 | ./scripts/setup-cluster.sh
143 |
144 |
145 | _Now repeat steps 1-5 show earlier._
146 |
147 | You can deploy a different app, if you'd like. You don't have to reuse all of my scripts for this purpose, although you certainly might like to take a look at how [Weave proxy is launched](https://github.com/errordeveloper/weave-demos/blob/597a0d78a7d00fd6652c1df8b8562cd0023310f1/quartet/scripts/setup-cluster.sh#L30-L40) and how [Swarm agents](https://github.com/errordeveloper/weave-demos/blob/597a0d78a7d00fd6652c1df8b8562cd0023310f1/quartet/scripts/setup-cluster.sh#L48-L56) and [master](https://github.com/errordeveloper/weave-demos/blob/597a0d78a7d00fd6652c1df8b8562cd0023310f1/quartet/scripts/setup-cluster.sh#L62-L66) are setup agains it.
148 |
149 | ## Summary
150 |
151 | We have just tested out a full setup with Weave integrated into Docker toolchain. We have first setup 3 VMs locally on VirtualBox, then deployed a very simple 2-tier web application. In the upcoming guide, we will take a look into more details on how exactly this works and how you can reproduce an effectively identical setup on a different infrastructure, using only Swarm and Compose agains Docker hosts you would setup yourself.
152 |
153 | Follow [@weaveworks](https://twitter.com/weaveworks), so you don't miss any new posts. And you can always drop us a few lines at [team@weave.works](mailto:team@weave.works), to let us know what you think about Weave.
--------------------------------------------------------------------------------
/quartet/app/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:2.7
2 | ADD . /code
3 | WORKDIR /code
4 | RUN pip install -r requirements.txt
5 |
--------------------------------------------------------------------------------
/quartet/app/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from redis import Redis
3 | import os
4 | app = Flask(__name__)
5 | redis = Redis(host='redis', port=6379)
6 |
7 | @app.route('/')
8 | def hello():
9 | redis.incr('hits')
10 | return 'Hello World! I have been seen %s times.' % redis.get('hits')
11 |
12 | if __name__ == "__main__":
13 | app.run(host="0.0.0.0", debug=True)
14 |
--------------------------------------------------------------------------------
/quartet/app/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh -e
2 |
3 | for m in $(docker-machine ls -q); do
4 | echo "Building on '${m}':"
5 | docker $(docker-machine config ${m}) build -t app_web .
6 | done
7 |
--------------------------------------------------------------------------------
/quartet/app/docker-compose.yml:
--------------------------------------------------------------------------------
1 | web:
2 | build: .
3 | command: python app.py
4 | ports:
5 | - "5000"
6 | volumes:
7 | - .:/code
8 | hostname: hello.weave.local
9 | environment:
10 | - "affinity:container!=app_web_*"
11 | redis:
12 | image: redis
13 | hostname: redis.weave.local
14 |
--------------------------------------------------------------------------------
/quartet/app/requirements.txt:
--------------------------------------------------------------------------------
1 | flask
2 | redis
--------------------------------------------------------------------------------
/quartet/scripts/defaults.sh:
--------------------------------------------------------------------------------
1 | ## This scrip provides the ability to test different versions of
2 | ## Machine, Swarm and Docker binaries as well as Weave script
3 |
4 | WEAVE=${WEAVE:-"$(git rev-parse --show-toplevel)/quartet/scripts/weave"}
5 | DOCKER=${DOCKER:-"docker"}
6 | DOCKER_MACHINE=${DOCKER_MACHINE:-"docker-machine"}
7 | DOCKER_MACHINE_DRIVER=${DOCKER_MACHINE_DRIVER:-"--driver virtualbox"}
8 | DOCKER_MACHINE_CREATE="${DOCKER_MACHINE} create ${DOCKER_MACHINE_DRIVER}"
9 | DOCKER_SWARM_CREATE=${DOCKER_SWARM_CREATE:-"docker-swarm create"}
10 |
11 | MACHINE_NAME_PREFIX=${MACHINE_NAME_PREFIX:-"dev"}
12 |
13 | with_machine_env() {
14 | m=$1
15 | shift 1
16 | (eval $($DOCKER_MACHINE env "${m}"); $@)
17 | }
18 |
--------------------------------------------------------------------------------
/quartet/scripts/on-each-host.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | source $(git rev-parse --show-toplevel)/quartet/scripts/defaults.sh
4 |
5 | for m in $(${DOCKER_MACHINE} ls -q)
6 | do (echo "${m}:"; with_machine_env ${m} "$@")
7 | done
8 |
--------------------------------------------------------------------------------
/quartet/scripts/setup-cluster-dev.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -xe
2 |
3 | ## DEVELOPMENT VERSION OF `setup-cluster.sh`, YOU SHOULD PROBABLY
4 | ## USE `setup-cluster.sh`, UNLESS YOU KNOW WHAT YOU ARE DOING.
5 |
6 | source $(git rev-parse --show-toplevel)/quartet/scripts/defaults.sh
7 |
8 | WEAVE="${WEAVE}-dev"
9 |
10 | head_node="${MACHINE_NAME_PREFIX}-1"
11 |
12 | ## Initial token to keep Machine happy
13 | temp_swarm_dicovery_token="token://$(${DOCKER_SWARM_CREATE})"
14 | swarm_flags="--swarm --swarm-discovery=${temp_swarm_dicovery_token}"
15 |
16 | ## Actual token to be used with proxied Docker
17 | swarm_dicovery_token="token://$(${DOCKER_SWARM_CREATE})"
18 |
19 | find_tls_args="cat /proc/\$(pgrep /usr/local/bin/docker)/cmdline | tr '\0' '\n' | grep ^--tls | tr '\n' ' '"
20 |
21 | for i in '1' '2' '3'; do
22 | if [ ${i} = '1' ]; then
23 | ## The first machine shall be the Swarm master
24 | $DOCKER_MACHINE_CREATE \
25 | ${swarm_flags} \
26 | --swarm-master \
27 | "${MACHINE_NAME_PREFIX}-${i}"
28 | else
29 | ## The rest of machines are Swarm slaves
30 | $DOCKER_MACHINE_CREATE \
31 | ${swarm_flags} \
32 | "${MACHINE_NAME_PREFIX}-${i}"
33 | fi
34 |
35 | ## This environment variable is respected by Weave,
36 | ## hence it needs to be exported
37 | export DOCKER_CLIENT_ARGS="$($DOCKER_MACHINE config)"
38 |
39 | for c in weave weavedns weaveexec; do
40 | docker ${DOCKER_CLIENT_ARGS} load -i ~/Code/weave/${c}.tar
41 | done
42 |
43 | tlsargs=$($DOCKER_MACHINE ssh "${MACHINE_NAME_PREFIX}-${i}" "${find_tls_args}")
44 |
45 | ## We are going to use IPAM, hence we launch it with
46 | ## the following arguments
47 | $WEAVE launch -iprange 10.2.3.0/24 -initpeercount 3
48 | ## WeaveDNS also needs to be launched
49 | $WEAVE launch-dns "10.9.1.${i}/24" -debug
50 | ## And now the proxy
51 | $WEAVE launch-proxy --with-dns --with-ipam ${tlsargs}
52 |
53 | ## Let's connect-up the Weave cluster by telling
54 | ## each of the node about the head node
55 | if [ ${i} -gt '1' ]; then
56 | $WEAVE connect $($DOCKER_MACHINE ip ${head_node})
57 | fi
58 |
59 | ## Default Weave proxy port is 12375, we shall point
60 | ## Swarm agents at it next
61 | weave_proxy_endpoint="$($DOCKER_MACHINE ip):12375"
62 |
63 | ## Now we need restart Swarm agents like this
64 | $DOCKER ${DOCKER_CLIENT_ARGS} rm -f swarm-agent
65 | $DOCKER ${DOCKER_CLIENT_ARGS} run -d --name=swarm-agent \
66 | swarm join \
67 | --addr ${weave_proxy_endpoint} ${swarm_dicovery_token}
68 |
69 | done
70 |
71 | ## Next we will also restart the Swarm master with the new token
72 | export DOCKER_CLIENT_ARGS=$($DOCKER_MACHINE config ${head_node})
73 |
74 | swarm_master_args_fmt='-d --name={{.Name}} -p 3376:3376 {{range .HostConfig.Binds}}-v {{.}} {{end}}swarm{{range .Args}} {{.}}{{end}}'
75 |
76 | swarm_master_args=$($DOCKER ${DOCKER_CLIENT_ARGS} inspect \
77 | --format="${swarm_master_args_fmt}" \
78 | swarm-agent-master \
79 | | sed "s|${temp_swarm_dicovery_token}|${swarm_dicovery_token}|")
80 |
81 | $DOCKER ${DOCKER_CLIENT_ARGS} rm -f swarm-agent-master
82 | $DOCKER ${DOCKER_CLIENT_ARGS} run ${swarm_master_args}
83 |
84 | ## And make sure Weave cluster setup is comple
85 | $WEAVE status
86 |
--------------------------------------------------------------------------------
/quartet/scripts/setup-cluster.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | ## If you want to reuse this script elsewhere, you probably want to
4 | ## copy all variables defined in `defaults.sh` here
5 | source $(git rev-parse --show-toplevel)/quartet/scripts/defaults.sh
6 |
7 | head_node="${MACHINE_NAME_PREFIX}-1"
8 |
9 | ## Initial token to keep Machine happy
10 | temp_swarm_dicovery_token="token://$(${DOCKER_SWARM_CREATE})"
11 | swarm_flags="--swarm --swarm-discovery=${temp_swarm_dicovery_token}"
12 |
13 | ## Actual token to be used with proxied Docker
14 | swarm_dicovery_token="token://$(${DOCKER_SWARM_CREATE})"
15 |
16 | for i in '1' '2' '3'; do
17 | if [ ${i} = '1' ]; then
18 | ## The first machine shall be the Swarm master
19 | $DOCKER_MACHINE_CREATE \
20 | ${swarm_flags} \
21 | --swarm-master \
22 | "${MACHINE_NAME_PREFIX}-${i}"
23 | else
24 | ## The rest of machines are Swarm slaves
25 | $DOCKER_MACHINE_CREATE \
26 | ${swarm_flags} \
27 | "${MACHINE_NAME_PREFIX}-${i}"
28 | fi
29 |
30 | ## This environment variable is respected by Weave,
31 | ## hence it needs to be exported
32 | export DOCKER_CLIENT_ARGS="$($DOCKER_MACHINE config)"
33 |
34 | ## We are going to use IPAM, hence we launch it with
35 | ## the following arguments
36 | $WEAVE launch -iprange 10.2.3.0/24 -initpeercount 3
37 | ## WeaveDNS also needs to be launched
38 | $WEAVE launch-dns "10.9.1.${i}/24" -debug
39 | ## And now the proxy
40 | $WEAVE launch-proxy --with-dns --with-ipam
41 |
42 | ## Let's connect-up the Weave cluster by telling
43 | ## each of the node about the head node
44 | if [ ${i} -gt '1' ]; then
45 | $WEAVE connect $($DOCKER_MACHINE ip ${head_node})
46 | fi
47 |
48 | ## Default Weave proxy port is 12375, we shall point
49 | ## Swarm agents at it next
50 | weave_proxy_endpoint="$($DOCKER_MACHINE ip):12375"
51 |
52 | ## Now we need restart Swarm agents like this
53 | $DOCKER ${DOCKER_CLIENT_ARGS} rm -f swarm-agent
54 | $DOCKER ${DOCKER_CLIENT_ARGS} run -d --name=swarm-agent \
55 | swarm join \
56 | --addr ${weave_proxy_endpoint} ${swarm_dicovery_token}
57 | done
58 |
59 | ## Next we will also restart the Swarm master with the new token
60 | export DOCKER_CLIENT_ARGS=$($DOCKER_MACHINE config ${head_node})
61 |
62 | $DOCKER ${DOCKER_CLIENT_ARGS} rm -f swarm-agent-master
63 | $DOCKER ${DOCKER_CLIENT_ARGS} run -d --name=swarm-agent-master \
64 | -p 3376:3376 \
65 | swarm manage \
66 | -H "tcp://0.0.0.0:3376" ${swarm_dicovery_token}
67 |
68 | ## And make sure Weave cluster setup is comple
69 | $WEAVE status
70 |
--------------------------------------------------------------------------------
/quartet/scripts/setup-swarm-only-cluster.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | source $(git rev-parse --show-toplevel)/quartet/scripts/defaults.sh
4 |
5 | head_node="${MACHINE_NAME_PREFIX}-1"
6 |
7 | swarm_dicovery_token=$(docker-swarm create)
8 |
9 | swarm_flags="--swarm --swarm-discovery=token://${swarm_dicovery_token}"
10 |
11 | for i in '1' '2' '3'; do
12 | if [ ${i} = '1' ]; then
13 | $DOCKER_MACHINE_CREATE \
14 | ${swarm_flags} \
15 | --swarm-master \
16 | "${MACHINE_NAME_PREFIX}-${i}"
17 | else
18 | $DOCKER_MACHINE_CREATE \
19 | ${swarm_flags} \
20 | "${MACHINE_NAME_PREFIX}-${i}"
21 | fi
22 |
23 | export DOCKER_CLIENT_ARGS="$(${DOCKER_MACHINE} config)"
24 |
25 | $WEAVE launch
26 | $WEAVE launch-dns "10.9.1.${i}/24" -debug
27 |
28 | if [ ${i} -gt '1' ]; then
29 | $WEAVE connect $(DOCKER_MACHINE ip ${head_node})
30 | fi
31 |
32 | unset DOCKER_CLIENT_ARGS
33 | done
34 |
--------------------------------------------------------------------------------
/quartet/scripts/weave-run-on-swarm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -xe
2 |
3 | ## NB: This is a very simple working prototype of how Weave can be used directly with Swarm,
4 | ## it is very basic and will be depricated once native Docker extensions land.
5 |
6 | ## IT IS RECOMMENDED TO USE PROXY-BASED APPROACH (SEE README).
7 | ## PLEASE DON'T USE THIS APPROACH UNLESS YOU KNOW WHAT YOU ARE DOING.
8 |
9 | source $(git rev-parse --show-toplevel)/quartet/scripts/defaults.sh
10 |
11 | weave_cidr=$1
12 | shift 1
13 |
14 | c=$(docker run -d --dns=172.17.42.1 $@)
15 |
16 | env WEAVEEXEC_DOCKER_ARGS="-e affinity:container==${c}" $WEAVE attach ${weave_cidr} ${c}
17 |
--------------------------------------------------------------------------------
/quartet/simple_machine_demo.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Using Docker Machine with Weave 0.10
3 | published: true
4 | tags: docker, docker-machine, guide, automation, command-line
5 | ---
6 |
7 | > ***This is an outdate guide, please find latest version on our website now!***
8 | > http://weave.works/guides/weave-and-docker-platform/index.html
9 |
10 | In this post I'd like to show how quickly one can get up-and-running using [Weave](https://github.com/weaveworks/weave/) with [Docker Machine](https://github.com/docker/machine/). This was made possible with our latest [_v0.10.0_ release](https://github.com/weaveworks/weave/releases/tag/v0.10.0), which has many improvements including the ability to communicate with [remote Docker host](http://weaveblog.com/2015/04/20/remote-weaving-with-0-10/).
11 |
12 | To follow this guide you will need to obtain the binaries for
13 |
14 | - [***`docker` (at least the client)***](https://docs.docker.com/installation/#installation)
15 | - [***`docker-machine`***](http://docs.docker.com/machine/#installation)
16 | - [**VirtualBox**](https://www.virtualbox.org/wiki/Downloads)
17 |
18 | If you are using OS X, then you can install these tools with Homebrew like this:
19 |
20 | brew install docker docker-machine
21 |
22 | You will need to download and install VirtualBox manually as well, if you haven't done it yet. Please be sure to install latest version of Machine (_v0.2.0_), as there are some bugs in the previous release. You also want to use latest `boot2docker` VM image; you will get it if you haven't used Docker Machine previously on your computer, otherwise you should delete cached ISO image located in ***`~/.docker/machine/cache/boot2docker.iso`*** before you proceed.
23 |
24 |
25 | ## Let's proceed, it's only a few steps!
26 |
27 | First, we will provision a VirtualBox VM with `docker-machine create`, then run Weave script again new VM and setup a few test containers.
28 |
29 | docker-machine create --driver=virtualbox weave-1
30 | curl --silent --location https://git.io/weave --output ./weave
31 | chmod +x ./weave
32 |
33 | As I said, with Weave _v0.10.0_, you can run `weave` command agains a [remote Docker host](http://weaveblog.com/2015/04/20/remote-weaving-with-0-10/). You just need to make sure `DOCKER_HOST` environment variable is set, which `docker-machine env` does for you.
34 |
35 | eval `docker-machine env weave-1`
36 |
37 | Now you can launch Weave router and WeaveDNS.
38 |
39 | ./weave launch
40 | ./weave launch-dns 10.30.50.1/24
41 |
42 | Running containers is also as pretty simple.
43 |
44 | First, the server:
45 |
46 | ./weave run --with-dns 10.5.2.1/24 \
47 | --hostname=hola.weave.local \
48 | errordeveloper/hello-weave
49 |
50 | Second a client:
51 |
52 | ./weave run --with-dns 10.5.2.2/24 \
53 | --hostname=test.weave.local \
54 | --name=test-client \
55 | --tty --interactive \
56 | errordeveloper/curl
57 |
58 | Now, let's test it out:
59 |
60 | ```
61 | > docker attach test-client
62 |
63 | test:/# ping -c 3 hola.weave.local
64 | PING hola.weave.local (10.5.2.1): 56 data bytes
65 | 64 bytes from 10.5.2.1: seq=0 ttl=64 time=0.130 ms
66 | 64 bytes from 10.5.2.1: seq=1 ttl=64 time=0.204 ms
67 | 64 bytes from 10.5.2.1: seq=2 ttl=64 time=0.155 ms
68 |
69 | --- hola.weave.local ping statistics ---
70 | 3 packets transmitted, 3 packets received, 0% packet loss
71 |
72 | test:/# curl hola.weave.local:5000
73 | Hello, Weave!
74 | test:/#
75 | ```
76 |
77 | ## What's next?
78 |
79 | You can easily extend this setup to more then one Docker hosts, here is a hint (best to use a new terminal window).
80 |
81 | docker-machine create -d virtualbox weave-2
82 | eval `docker-machine env weave-2`
83 | ./weave launch
84 | ./weave launch-dns 10.30.50.2/24
85 | ./weave connect `docker-machine ip weave-1`
86 | ./weave run ... # what do you want to run?
87 |
88 | Next I am planning to post a full guide on how to use Weave with Machine, Swarm and Compose all 4 together, but you should also checkout [Ben Firshman's talk](https://clusterhq.com/blog/adding-compose-to-the-swarm-demo/) on how to use these as well as [Flocker](https://clusterhq.com).
89 |
90 | Do make sure to follow [@weaveworks](https://twitter.com/weaveworks), so you don't miss any new posts. You can always drop us a few lines to [team@weave.works](mailto:team@weave.works), to let us know what you think about Weave.
--------------------------------------------------------------------------------
/quartet/simple_swarm_demo.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Using Docker Machine and Swarm with Weave 0.10
3 | published: true
4 | tags: docker, docker-machine, docker-swarm, guide, automation, command-line
5 | ---
6 |
7 | > ***This is an outdate guide, please find latest version on our main website now!***
8 | > http://weave.works/guides/weave-and-docker-platform/index.html
9 |
10 |
11 | In this post I'd like to show how quickly one can get up-and-running using [Weave](https://github.com/weaveworks/weave/) with [Docker Machine](https://github.com/docker/machine/) and [Docker Swarm](https://github.com/docker/swarm). This was made possible with our latest [_v0.10.0_ release](https://github.com/weaveworks/weave/releases/tag/v0.10.0), which has many improvements, including the ability to communicate with [remote Docker hosts](http://weaveblog.com/2015/04/20/remote-weaving-with-0-10/). This guide builds on what was learned from [a previous post](http://blog.weave.works/2015/04/22/using-docker-machine-with-weave-0-10/), where I demonstrated how to use Weave with Docker Machine on a single host.
12 |
13 | To follow this guide you will need to obtain the binaries for
14 |
15 | - [***`docker` (at least the client)***](https://docs.docker.com/installation/#installation)
16 | - [***`docker-machine`***](http://docs.docker.com/machine/#installation)
17 | - [***`docker-swarm`***](http://docs.docker.com/swarm/#install-swarm)
18 | - [**VirtualBox**](https://www.virtualbox.org/wiki/Downloads)
19 |
20 | If you are using OS X, then you can install these tools with Homebrew, via
21 |
22 | brew install docker docker-machine docker-swarm
23 |
24 | You will need to download and install VirtualBox manually as well, if you haven't done it yet. Please be sure to install latest version of Machine (_v0.2.0_), as there are some bugs in the previous release. You also want to use latest `boot2docker` VM image; you will get it if you haven't used Docker Machine previously on your computer, otherwise you should delete the cached ISO image located in ***`~/.docker/machine/cache/boot2docker.iso`*** before you proceed.
25 |
26 | ## Let's go!
27 |
28 | First, we need a few scripts. To get them, run
29 |
30 | git clone https://github.com/errordeveloper/weave-demos
31 | cd weave-demos/quartet
32 |
33 | Now, we'll provision a cluster of 3 VMs. The following script will make sure all 3 VMs join the Swarm, and sets up the Weave network and WeaveDNS.
34 |
35 | > If you've followed my previous guide, you'll need to clear the VMs you've previously created. Check the output of `docker-machine ls`, and delete them with `docker-machine rm -f `.
36 |
37 | ./scripts/setup-swarm-only-cluster.sh
38 |
39 | Next, we need to make sure the right environment variables are set to talk to the Swarm master.
40 |
41 | eval `docker-machine env --swarm dev-1`
42 |
43 | Although Weave _0.10.0_ can interact with remote Docker hosts, it doesn't yet work out of the box. So, I created a small script that does the right thing. Our _"Hello, Weave!"_ server can be launched like this:
44 |
45 | ./scripts/weave-run-on-swarm.sh 10.5.2.1/24 \
46 | --hostname=hola.weave.local \
47 | errordeveloper/hello-weave
48 |
49 | And our client container can be launched in a very similar way:
50 |
51 | ./scripts/weave-run-on-swarm.sh 10.5.2.2/24 \
52 | --hostname=test.weave.local \
53 | --name=test-client \
54 | --tty --interactive \
55 | errordeveloper/curl
56 |
57 | Now, let's test it out:
58 |
59 | ```
60 | > docker attach test-client
61 |
62 | test:/# ping -c 3 hola.weave.local
63 | PING hola.weave.local (10.5.2.1): 56 data bytes
64 | 64 bytes from 10.5.2.1: seq=0 ttl=64 time=0.187 ms
65 | 64 bytes from 10.5.2.1: seq=1 ttl=64 time=0.119 ms
66 | 64 bytes from 10.5.2.1: seq=2 ttl=64 time=0.057 ms
67 |
68 | --- hola.weave.local ping statistics ---
69 | 3 packets transmitted, 3 packets received, 0% packet loss
70 | round-trip min/avg/max = 0.057/0.121/0.187 ms
71 | test:/# curl hola.weave.local:5000
72 | Hello, Weave!
73 | test:/#
74 | ```
75 |
76 | ## What's next?
77 |
78 | You can easily move the entire setup to run on a public cloud, with any of the many providers already available with Docker Machine.
79 |
80 | For example, you can follow part of the [Azure guide](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-machine/#create-the-certificate-and-key-files-for-docker-machine-and-azure) (setup steps 1, 2, and 3), and then set `DOCKER_MACHINE_DRIVER` like this:
81 |
82 | export DOCKER_MACHINE_DRIVER=" \
83 | --driver azure \
84 | --azure-subscription-id=SubscriptionID \
85 | --azure-subscription-cert=mycert.pem \
86 | "
87 |
88 | Clean-up your local VMs, and re-run the cluster setup.
89 |
90 | docker-machine rm -f dev-1 dev-2 dev-3
91 | ./scripts/setup-swarm-only-cluster.sh
92 | eval `docker-machine env --swarm dev-1`
93 | ../scripts/weave-run-on-swarm.sh ... # what do you want to run?
94 |
95 | Next, I am planning to post a full guide on how to use Weave with Machine, Swarm and Compose altogether.
96 |
97 | Follow [@weaveworks](https://twitter.com/weaveworks), so you don't miss any new posts. And you can always drop us a few lines at [team@weave.works](mailto:team@weave.works), to let us know what you think about Weave.
--------------------------------------------------------------------------------
/sparkles/.gitignore:
--------------------------------------------------------------------------------
1 | cloud/kill*
2 | cloud/terraform-bin*
3 | cloud/terraform-playgound/
4 | cloud/terraform.tfstate
5 | cloud/terraform.tfstate.backup
6 | cloud/account.json
7 | cloud/client_secrets.json
8 | user-data
9 |
--------------------------------------------------------------------------------
/sparkles/.vagrant:
--------------------------------------------------------------------------------
1 | ../.vagrant/
--------------------------------------------------------------------------------
/sparkles/DEMO.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Networking Spark Cluster on Docker with Weave
3 | tags: docker, spark, vagrant, cloud, coreos, usecase, guide
4 | ---
5 |
6 | In this guide, I will show you how easy it is to deploy a Spark cluster using [Docker](https://www.docker.com/) and [Weave](https://weave.works/), running on [CoreOS](https://coreos.com/).
7 |
8 | [Apache Spark](http://spark.apache.org/) is a fast and general-purpose cluster computing system. It provides high-level APIs in Java, Scala and Python, and an optimized engine that supports general execution graphs. It also supports a rich set of higher-level tools, including [stream processing](http://spark.apache.org/docs/1.2.0/streaming-programming-guide.html). For this guide, I am going to demonstrate a basic stream processing example in Python. I will use Weave to connect 3 Spark nodes on a fully isolated and portable network with DNS, no reconfiguration would be required if you wish to run this elsewhere.
9 |
10 | To keep things simple for you, I will show how to setup the Spark cluster using Vagrant. If you would like to run a really big workload in the cloud, please refer to [my other blog post](http://weaveblog.com/2014/12/18/automated-provisioning-of-multi-cloud-weave-network-terraform/), but you probably want to try this first and make sure you understand all the relevant steps. For this guide, I made sure it's supper easy to get up and running and you don't have to pay for a cloud services account.
11 |
12 | ## Let's go!
13 |
14 | Firstly, let's checkout the code and bring up 3 VMs on Vagrant:
15 | ```
16 | git clone https://github.com/errordeveloper/weave-demos
17 | cd weave-demos/sparkles
18 | vagrant up
19 | ```
20 |
21 | Vagrant will boot and provision 3 VMs, shortly after there will be a Spark cluster running with master on the head node (`core-01`) and workers on the remaining `core-02` and `core-03`. To keep this guide short, I will not explain how exactly provisioning works, as I have [done so previously](http://weaveblog.com/2014/10/28/running-a-weave-network-on-coreos/).
22 |
23 | Now, let's login to `core-01`:
24 | ```
25 | vagrant ssh core-01
26 | ```
27 |
28 | A few container images should be downloading in the background. It takes a few minutes, but you can run `watch docker images` and wait for the following to appear:
29 | ```
30 | REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
31 | errordeveloper/weave-spark-master-minimal latest 437bd4307e0e 47 hours ago 430.4 MB
32 | errordeveloper/weave-spark-worker-minimal latest bdb33ca885ae 47 hours ago 430.4 MB
33 | errordeveloper/weave-twitter-river-minimal latest af9f7dad1877 47 hours ago 193.8 MB
34 | errordeveloper/weave-spark-shell-minimal latest 8d11396e01c2 47 hours ago 574.6 MB
35 | zettio/weavetools 0.9.0 6c2dd751b59c 2 weeks ago 5.138 MB
36 | zettio/weavetools latest 6c2dd751b59c 2 weeks ago 5.138 MB
37 | zettio/weavedns 0.9.0 8f3a856eda8f 2 weeks ago 9.382 MB
38 | zettio/weavedns latest 8f3a856eda8f 2 weeks ago 9.382 MB
39 | zettio/weave 0.9.0 efb52cb2a3b8 2 weeks ago 11.35 MB
40 | zettio/weave latest efb52cb2a3b8 2 weeks ago 11.35 MB
41 | ```
42 |
43 | I have prepared a set of [lean](http://weaveblog.com/2014/12/09/running-java-applications-in-docker-containers/) Spark container images for the purpose of this demo.
44 |
45 | > Note: You can use images of your own if you'd like, just make sure to consult my [Dockerfile](https://github.com/errordeveloper/weave-demos/blob/master/java-containers/spark/base/Dockerfile#L33-L34) for the `nsswitch.conf` tweak, you will need it to make sure DNS works correctly.
46 |
47 | You may have noticed there is Elasticsearch running, I will not be using it for the purpose of this guide, but it's there for you to experiment with, if you'd like.
48 |
49 | Once all of the images are downloaded, Spark cluster will get bootstrapped shortly.
50 |
51 | You can tail the logs and see 2 workers joining the cluster, these go by DNS names`spark-worker-1.weave.local` and `spark-worker-2.weave.local`:
52 |
53 | ```
54 | core@core-01 ~ $ journalctl -f -u spark
55 | ...
56 | Feb 18 16:09:34 core-01 docker[3658]: 15/02/18 16:09:34 INFO Master: I have been elected leader! New state: ALIVE
57 | Feb 18 16:10:15 core-01 docker[3658]: 15/02/18 16:10:15 INFO Master: Registering worker spark-worker-1.weave.local:44122 with 1 cores, 982.0 MB RAM
58 | Feb 18 16:10:17 core-01 docker[3658]: 15/02/18 16:10:17 INFO Master: Registering worker spark-worker-2.weave.local:33557 with 1 cores, 982.0 MB RAM
59 | ```
60 |
61 | > Note: these are not very big compute nodes, if your machine has more resource, you can deploy bigger VMs by setting `$vb_memory` and `$vb_cpus` in `config-override.rb`.
62 |
63 | ## Ready to work!
64 |
65 | Now everything is ready to deploy a workload on the cluster. I will submit a simple job written in Python, featuring newly added stream API.
66 |
67 | Let's start pyspark container:
68 | ```
69 | sudo weave run \
70 | --with-dns \
71 | 10.10.1.88/24 \
72 | --hostname=spark-shell.weave.local \
73 | \
74 | --tty --interactive \
75 | --name=spark-shell \
76 | --entrypoint=pyspark \
77 | errordeveloper/weave-spark-shell-minimal:latest \
78 | \
79 | --master spark://spark-master.weave.local:7077
80 | ```
81 |
82 | The IP address I have picked for this container is `10.10.1.88`, it's part of the `10.10.1.0/24` subnet, which had been allocated for the cluster, you can use any other IP in that range. This container will get a DNS name `spark-shell.weave.local`, that's simply taken care of by passing `--with-dns` and `--hostname=...`. Most of remaining arguments are not specific to Weave, these are just usual `docker run` arguments, followed by `pyspark` arguments, where master node is addressed by it's DNS name.
83 |
84 | For the demo to work, you will also need a data source of some sort. Naturally, it will run in a container as well, which in turns joins Weave network.
85 |
86 | Here is a very simple one for you:
87 | ```
88 | sudo weave run --with-dns 10.10.1.99/24 \
89 | --hostname=spark-data-source.weave.local \
90 | busybox sh -c 'nc -ll -p 9999 -e yes Hello, Weave!'
91 | ```
92 | So we will have a netcat server on 9999, with DNS name `spark-data-source.weave.local` and IP address `10.10.1.99`. Weave will make this server reachable from any node in the cluster, and it can be moved between hosts without need to change any of your code or config.
93 |
94 | Next, you want to attach to the Spark shell container:
95 |
96 | ```
97 | core@core-01 ~ $ docker attach spark-shell
98 | ...
99 | Welcome to
100 | ____ __
101 | / __/__ ___ _____/ /__
102 | _\ \/ _ \/ _ `/ __/ '_/
103 | /__ / .__/\_,_/_/ /_/\_\ version 1.2.1
104 | /_/
105 |
106 | Using Python version 2.7.6 (default, Nov 23 2014 14:48:23)
107 | SparkContext available as sc.
108 | >>> 15/02/18 17:10:37 INFO SparkDeploySchedulerBackend: Registered executor: Actor[akka.tcp://sparkExecutor@spark-worker-2.weave.local:34277/user/Executor#-2039127650] with ID 0
109 | 15/02/18 17:10:37 INFO SparkDeploySchedulerBackend: Registered executor: Actor[akka.tcp://sparkExecutor@spark-worker-1.weave.local:44723/user/Executor#-1272098548] with ID 1
110 | 15/02/18 17:10:38 INFO BlockManagerMasterActor: Registering block manager spark-worker-2.weave.local:44675 with 267.3 MB RAM, BlockManagerId(0, spark-worker-2.weave.local, 44675)
111 | 15/02/18 17:10:38 INFO BlockManagerMasterActor: Registering block manager spark-worker-1.weave.local:36614 with 267.3 MB RAM, BlockManagerId(1, spark-worker-1.weave.local, 36614)
112 | ```
113 |
114 | The code we are going to run is based on the [`streaming/network_wordcount.py`](https://github.com/apache/spark/blob/a8eb92dcb9ab1e6d8a34eed9a8fddeda645b5094/examples/src/main/python/streaming/network_wordcount.py) example, which counts words in a text stream received from the data source server every second.
115 | ```
116 | >>>
117 | >>> from pyspark.streaming import StreamingContext
118 | >>>
119 | >>> ssc = StreamingContext(sc, 1)
120 | >>>
121 | >>> lines = ssc.socketTextStream('spark-data-source.weave.local', 9999)
122 | >>>
123 | >>> counts = lines.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a+b)
124 | >>>
125 | >>> counts.pprint()
126 | >>>
127 | >>> ssc.start(); ssc.awaitTermination();
128 | ```
129 |
130 | Amongst much of log messages, you should see this being printed periodically:
131 | ```
132 | -------------------------------------------
133 | Time: 2015-02-18 18:10:56
134 | -------------------------------------------
135 | ('Hello,', 130962)
136 | ('Weave!', 130962)
137 | ```
138 |
139 | ## Conclusion
140 |
141 | In this guide, we have looked at deploying Apache Spark with Docker and Weave.
142 |
143 | There are several advantage that Weave brings to a containerised Spark cluster:
144 |
145 | 1. Secure and simple to setup virtual network with DNS out-of-the box
146 | 2. No need to care about what container ports are published to host's interface
147 | 3. Worker (and master) containers can be moved between hosts
148 | 4. Scaling Spark cluster and introducing ad-hoc services is very easy
149 | 5. Moving the entire setup to new infrastructure required no code changes
150 |
151 | Hope you find information in this guide useful, and do make sure to follow [@weaveworks](https://twitter.com) on Twitter to read more guides like this. You can also contact us via [help@weave.works](mailto:help@weave.works?subject=[sparkles]), and let us know of anything interesting you built using Weave.
--------------------------------------------------------------------------------
/sparkles/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../coreos-vagrant/Vagrantfile
--------------------------------------------------------------------------------
/sparkles/cloud/cloud-config.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | coreos:
3 | update:
4 | reboot-strategy: 'off'
5 | units:
6 | - name: 10-weave.network
7 | runtime: false
8 | content: |
9 | [Match]
10 | Type=bridge
11 | Name=weave*
12 | [Network]
13 |
14 | - name: weave.service
15 | content: |
16 | [Unit]
17 | After=install-weave.service
18 | Description=Weave Network
19 | Documentation=http://zettio.github.io/weave/
20 | Requires=install-weave.service
21 | [Service]
22 | EnvironmentFile=/etc/weave.env
23 | ExecStartPre=/opt/bin/weave launch $WEAVE_PEERS
24 | ExecStartPre=/opt/bin/weave launch-dns $WEAVEDNS_ADDR
25 | ExecStart=/usr/bin/docker logs -f weave
26 | SuccessExitStatus=2
27 | ExecStop=/opt/bin/weave stop
28 | ExecStop=/opt/bin/weave stop-dns
29 |
30 | - name: elasticsearch.service
31 | content: |
32 | [Unit]
33 | After=weave.service pull-elasticsearch-image.service
34 | Description=ElasticSearch on Weave
35 | Documentation=https://registry.hub.docker.com/u/errordeveloper/weave-elasticsearch-minimal/
36 | Requires=weave.service pull-elasticsearch-image.service
37 | [Service]
38 | EnvironmentFile=/etc/weave.env
39 | ExecStartPre=/opt/bin/weave \
40 | run --with-dns $ELASTICSEARCH_NODE_ADDR \
41 | --name=elasticsearch \
42 | -h $ELASTICSEARCH_NODE_NAME \
43 | $ELASTICSEARCH_CONTAINER
44 | ExecStart=/usr/bin/docker attach elasticsearch
45 | SuccessExitStatus=255
46 | ExecStop=/usr/bin/docker kill elasticsearch ; /usr/bin/docker rm elasticsearch
47 |
48 | - name: spark.service
49 | content: |
50 | [Unit]
51 | After=weave.service pull-spark-image.service
52 | Description=Apache Spark Cluster
53 | Documentation=https://spark.apache.org/docs/latest/
54 | Requires=weave.service pull-spark-image.service
55 | [Service]
56 | EnvironmentFile=/etc/weave.env
57 | ExecStartPre=/opt/bin/weave \
58 | run --with-dns $SPARK_NODE_ADDR \
59 | --name=spark \
60 | -h $SPARK_NODE_NAME \
61 | $SPARK_CONTAINER \
62 | $SPARK_CONTAINER_ARGS
63 | ExecStart=/usr/bin/docker attach spark
64 | SuccessExitStatus=255
65 | Restart=always
66 | ExecStop=/usr/bin/docker kill spark ; /usr/bin/docker rm spark
67 |
68 | - name: pull-spark-image.service
69 | content: |
70 | [Unit]
71 | After=docker.service network-online.target
72 | Requires=docker.service
73 | [Service]
74 | EnvironmentFile=/etc/weave.env
75 | Type=oneshot
76 | RemainAfterExit=yes
77 | ExecStart=/usr/bin/docker pull $SPARK_CONTAINER
78 |
79 | - name: pull-elasticsearch-image.service
80 | content: |
81 | [Unit]
82 | After=docker.service network-online.target
83 | Requires=docker.service
84 | [Service]
85 | EnvironmentFile=/etc/weave.env
86 | Type=oneshot
87 | RemainAfterExit=yes
88 | ExecStart=/usr/bin/docker pull $ELASTICSEARCH_CONTAINER
89 |
90 | - name: install-weave.service
91 | command: start
92 | enable: true
93 | content: |
94 | [Unit]
95 | After=pre-fetch-container-images.service
96 | After=network-online.target
97 | After=docker.service
98 | Description=Install Weave
99 | Documentation=http://zettio.github.io/weave/
100 | Requires=pre-fetch-container-images.service
101 | Requires=network-online.target
102 | Requires=docker.service
103 | [Service]
104 | Type=oneshot
105 | RemainAfterExit=yes
106 | ExecStartPre=/bin/mkdir -p /opt/bin/
107 | ExecStartPre=/usr/bin/curl \
108 | --silent \
109 | --location \
110 | https://github.com/zettio/weave/releases/download/latest_release/weave \
111 | --output /opt/bin/weave
112 | ExecStartPre=/usr/bin/chmod +x /opt/bin/weave
113 | ExecStart=/bin/echo Weave Installed
114 |
115 | - name: pre-fetch-container-images.service
116 | command: start
117 | enable: true
118 | content: |
119 | [Unit]
120 | After=docker.service
121 | Requires=docker.service
122 | [Service]
123 | EnvironmentFile=/etc/pre-fetch-container-images.env
124 | Type=oneshot
125 | RemainAfterExit=yes
126 | ExecStart=/bin/sh -c 'for i in $PRE_FETCH_CONTAINER_IMAGES; do /usr/bin/docker pull $i; done'
127 |
128 | write_files:
129 | - path: /etc/pre-fetch-container-images.env
130 | permissions: '0644'
131 | owner: root
132 | content: |
133 | PRE_FETCH_CONTAINER_IMAGES="\
134 | zettio/weave:latest \
135 | zettio/weavedns:latest \
136 | zettio/weavetools:latest \
137 | "
138 |
139 | - path: /opt/bin/spark-shell-gce-scala
140 | permissions: '0755'
141 | owner: root
142 | content: |
143 | #!/bin/sh -x
144 | sudo weave run --with-dns 10.10.1.88/24 \
145 | --tty --interactive \
146 | --hostname=spark-shell.weave.local \
147 | --name=spark-shell \
148 | errordeveloper/weave-spark-shell-minimal:latest \
149 | --master spark://spark-master-gce.weave.local:7077
150 | docker attach spark-shell
151 | docker rm -f spark-shell
152 |
153 | - path: /opt/bin/spark-shell-gce-python
154 | permissions: '0755'
155 | owner: root
156 | content: |
157 | #!/bin/sh -x
158 | sudo weave run --with-dns 10.10.1.88/24 \
159 | --tty --interactive \
160 | --hostname=spark-shell.weave.local \
161 | --name=spark-shell \
162 | --entrypoint=pyspark \
163 | errordeveloper/weave-spark-shell-minimal:latest \
164 | --master spark://spark-master-gce.weave.local:7077
165 | docker attach spark-shell
166 | docker rm -f spark-shell
167 |
168 | - path: /opt/bin/spark-shell-local-python
169 | permissions: '0755'
170 | owner: root
171 | content: |
172 | #!/bin/sh -x
173 | sudo weave run --with-dns 10.10.1.88/24 \
174 | --tty --interactive \
175 | --hostname=spark-shell.weave.local \
176 | --name=spark-shell \
177 | --entrypoint=pyspark \
178 | errordeveloper/weave-spark-shell-minimal:latest \
179 | --master spark://spark-master.weave.local:7077
180 | docker attach spark-shell
181 | docker rm -f spark-shell
182 |
--------------------------------------------------------------------------------
/sparkles/cloud/genenv.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ## This script takes count of the instance and the name of the cloud.
3 | ## It only generates and writes /etc/weave.env, and doesn't run anything.
4 | cloud=$1
5 | count=$2
6 | crypt=$3
7 | shift 3
8 |
9 | case "weave-${cloud}-${count}" in
10 | (weave-gce-0)
11 | ## first machine in GCE doesn't yet know of any other peers
12 | known_weave_nodes=''
13 | ## it also happes to run Spark master JVM
14 | spark_node_role='master'
15 | spark_container_args=''
16 | spark_node_name="spark-${spark_node_role}-gce.weave.local"
17 | break
18 | ;;
19 | (weave-gce-*)
20 | ## any other node in GCE connects to the first one by native DNS name
21 | known_weave_nodes='weave-gce-0'
22 | ## these nodes run Spark worker JVM's and connect to master using weave DNS
23 | spark_node_role='worker'
24 | spark_container_args='spark://spark-master-gce.weave.local:7077'
25 | spark_node_name="spark-${spark_node_role}-${cloud}-${count}.weave.local"
26 | break
27 | ;;
28 | (weave-aws-*)
29 | ## every node in AWS connects to all GCE nodes by IP address
30 | known_weave_nodes="$@"
31 | ## same as in GCE
32 | spark_node_role='worker'
33 | spark_container_args='spark://spark-master-gce.weave.local:7077'
34 | spark_node_name="spark-${spark_node_role}-${cloud}-${count}.weave.local"
35 | break
36 | ;;
37 | esac
38 |
39 | case "${cloud}" in
40 | (gce)
41 | weavedns_addr="10.10.2.1${count}/16"
42 | spark_node_addr="10.10.1.1${count}/24"
43 | elasticsearch_node_addr="10.10.1.2${count}/24"
44 | break
45 | ;;
46 | (aws)
47 | weavedns_addr="10.10.2.2${count}/16"
48 | spark_node_addr="10.10.1.3${count}/24"
49 | elasticsearch_node_addr="10.10.1.4${count}/24"
50 | break
51 | ;;
52 | esac
53 |
54 | cat << ENVIRON | sudo tee /etc/weave.env
55 | WEAVE_PEERS="${known_weave_nodes}"
56 | WEAVE_PASSWORD="${crypt}"
57 | WEAVEDNS_ADDR="${weavedns_addr}"
58 | SPARK_NODE_ADDR="${spark_node_addr}"
59 | SPARK_NODE_NAME="${spark_node_name}"
60 | SPARK_CONTAINER="errordeveloper/weave-spark-${spark_node_role}-minimal:latest"
61 | SPARK_CONTAINER_ARGS="${spark_container_args}"
62 | ELASTICSEARCH_NODE_ADDR="${elasticsearch_node_addr}"
63 | ELASTICSEARCH_NODE_NAME="elasticsearch-${cloud}-${count}.weave.local"
64 | ELASTICSEARCH_CONTAINER="errordeveloper/weave-twitter-river-minimal:latest"
65 | ENVIRON
66 |
--------------------------------------------------------------------------------
/sparkles/cloud/infra/main.tf:
--------------------------------------------------------------------------------
1 | // Declare and provision 3 GCE instances
2 | resource "google_compute_instance" "weave" {
3 | count = 3
4 | // By default (see variables.tf), these are going to be of type 'n1-standard-1' in zone 'us-central1-a'.
5 | machine_type = "${var.gce_machine_type}"
6 | zone = "${var.gce_zone}"
7 |
8 | // Ensure clear host naming scheme, which results in functional native DNS within GCE
9 | name = "weave-gce-${count.index}" // => `weave-gce-{0,1,2}`
10 |
11 | // Attach an alpha image of CoreOS as the primary disk
12 | disk {
13 | image = "${var.gce_coreos_disk_image}"
14 | }
15 |
16 | // Attach to a network with some custom firewall rules and static IPs (details further down)
17 | network {
18 | source = "${google_compute_network.weave.name}"
19 | address = "${element(google_compute_address.weave.*.address, count.index)}"
20 | }
21 |
22 | // Provisioning
23 |
24 | // 1. Cloud Config phase writes systemd unit definitions and only starts two host-independent units —
25 | // `pre-fetch-container-images.service` and `install-weave.service`
26 | metadata {
27 | user-data = "${file("cloud-config.yaml")}"
28 | }
29 |
30 | // 2. Upload shell script that generates host-specific environment file to be used by `weave.service`
31 | provisioner "file" {
32 | source = "genenv.sh"
33 | destination = "/tmp/genenv.sh"
34 | connection {
35 | user = "core"
36 | key_file = "${var.gce_key_path}"
37 | }
38 | }
39 |
40 | // 3. Run the `genenv.sh` script
41 | // 4. Start `weave.service`
42 | // 5. Start `elasticsearch.service` and `spark.service`
43 | provisioner "remote-exec" {
44 | inline = [
45 | "sudo sh /tmp/genenv.sh gce ${count.index} '${var.weave_launch_password}'",
46 | "sudo systemctl start weave",
47 | "sudo systemctl start elasticsearch spark",
48 | ]
49 | connection {
50 | user = "core"
51 | key_file = "${var.gce_key_path}"
52 | }
53 | }
54 | }
55 |
56 | // Custom GCE network declaration, so we can set firewall rules below
57 | resource "google_compute_network" "weave" {
58 | name = "weave"
59 | ipv4_range = "172.220.0.0/16"
60 | }
61 |
62 | // Firewall rules for the network (allow inbound ssh and weave connections)
63 | resource "google_compute_firewall" "weave" {
64 | name = "ports"
65 | network = "${google_compute_network.weave.name}"
66 |
67 | allow {
68 | protocol = "icmp"
69 | }
70 |
71 | allow {
72 | protocol = "tcp"
73 | ports = ["22", "6783"]
74 | }
75 |
76 | allow {
77 | protocol = "udp"
78 | ports = ["22", "6783"]
79 | }
80 |
81 | source_ranges = ["0.0.0.0/0"]
82 | }
83 |
84 | // Allocate static IPs for each of the instances, so if reboots occur
85 | // the AWS nodes can rejoin the weave network
86 | resource "google_compute_address" "weave" {
87 | count = 3
88 | name = "weave-gce-${count.index}-addr"
89 | }
90 |
91 | // Declare and provision 3 AWS instances
92 | resource "aws_instance" "weave" {
93 | count = 3
94 | // By default (see variables.tf), these are going to be of type 'm3.large' in region 'eu-west-1'.
95 | instance_type = "${var.aws_instance_type}"
96 |
97 | // Use an alpha image of CoreOS
98 | ami = "${var.aws_coreos_ami}"
99 |
100 | // Set the SSH key name to use (default: "terrraform")
101 | key_name = "${var.aws_key_name}"
102 |
103 | // Provisioning (mostly identical to the GCE counter-part)
104 |
105 | user_data = "${file("cloud-config.yaml")}"
106 |
107 | depends_on = [ "aws_vpc.weave", "aws_subnet.weave", "aws_security_group.weave", "aws_internet_gateway.weave" ]
108 | security_groups = [ "${aws_security_group.weave.id}" ]
109 | subnet_id = "${aws_subnet.weave.id}"
110 | associate_public_ip_address = true
111 |
112 | provisioner "file" {
113 | source = "genenv.sh"
114 | destination = "/tmp/genenv.sh"
115 | connection {
116 | user = "core"
117 | key_file = "${var.aws_key_path}"
118 | }
119 | }
120 |
121 | // The only difference here is what arguments are passed to `genenv.sh`
122 | provisioner "remote-exec" {
123 | inline = [
124 | "sudo sh /tmp/genenv.sh aws ${count.index} '${var.weave_launch_password}' ${join(" ", google_compute_instance.weave.*.network.0.external_address)}",
125 | "sudo systemctl start weave",
126 | "sudo systemctl start elasticsearch spark",
127 | ]
128 | connection {
129 | user = "core"
130 | key_file = "${var.aws_key_path}"
131 | }
132 | }
133 | }
134 |
135 | // Create a VPC for Terraform to manage, so it doesn't mess with the default one
136 | // NOTE: this make configuration a little more complex to read, but it's just that
137 | // I have found some issues with creating and destroying resources on default VPC,
138 | // so I decided to make it safer with dedicating a VPC to Terraform.
139 | // TODO: refactor this as a module
140 | resource "aws_vpc" "weave" {
141 | cidr_block = "172.220.0.0/16"
142 | }
143 |
144 | resource "aws_internet_gateway" "weave" {
145 | vpc_id = "${aws_vpc.weave.id}"
146 | }
147 |
148 | resource "aws_route_table_association" "weave" {
149 | subnet_id = "${aws_subnet.weave.id}"
150 | route_table_id = "${aws_route_table.weave.id}"
151 | }
152 |
153 | resource "aws_route_table" "weave" {
154 | vpc_id = "${aws_vpc.weave.id}"
155 | route {
156 | cidr_block = "0.0.0.0/0"
157 | gateway_id = "${aws_internet_gateway.weave.id}"
158 | }
159 | }
160 |
161 | // With a non-default VPC we have to create a subnet also
162 | resource "aws_subnet" "weave" {
163 | vpc_id = "${aws_vpc.weave.id}"
164 | cidr_block = "172.220.1.0/24"
165 | map_public_ip_on_launch = true
166 | }
167 |
168 | // Firewall rules for our security group only need to allow inbound ssh connections
169 | resource "aws_security_group" "weave" {
170 | name = "weave"
171 | description = "SSH access from anywhere"
172 | vpc_id = "${aws_vpc.weave.id}"
173 |
174 | ingress {
175 | from_port = 22
176 | to_port = 22
177 | protocol = "tcp"
178 | cidr_blocks = ["0.0.0.0/0"]
179 | }
180 | }
181 |
--------------------------------------------------------------------------------
/sparkles/cloud/infra/outputs.tf:
--------------------------------------------------------------------------------
1 | #output "aws_instances" {
2 | # value = "AWS instances:\n - ${join("\n - ", aws_instance.weave.*.public_ip)}"
3 | #}
4 |
5 | #output "gce_instances" {
6 | # value = "GCE instances:\n - ${join("\n - ", google_compute_instance.weave.*.network.0.external_address)}"
7 | #}
8 |
--------------------------------------------------------------------------------
/sparkles/cloud/infra/providers.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | access_key = "${var.aws_access_key}"
3 | secret_key = "${var.aws_secret_key}"
4 | region = "${var.aws_region}"
5 | }
6 |
7 | provider "google" {
8 | account_file = "${var.gce_account_file}"
9 | client_secrets_file = "${var.gce_client_secrets_file}"
10 | project = "${var.gce_project_name}"
11 | region = "${var.gce_region}"
12 | }
13 |
--------------------------------------------------------------------------------
/sparkles/cloud/infra/variables.tf:
--------------------------------------------------------------------------------
1 | variable "weave_launch_password" {
2 | description = "Set salt (NaCL) passphrase to encrypt weave network"
3 | }
4 |
5 | variable "aws_access_key" {
6 | description = "Your AWS API access key"
7 | default = ""
8 | }
9 |
10 | variable "aws_secret_key" {
11 | description = "Your AWS API secret key"
12 | default = ""
13 | }
14 |
15 | variable "aws_region" {
16 | description = "Region to run AWS instances in"
17 | default = "eu-west-1"
18 | }
19 |
20 | variable "aws_key_name" {
21 | description = "Name of the SSH key pair in the chosen AWS region"
22 | default = "terraform"
23 | }
24 |
25 | variable "aws_key_path" {
26 | description = "Path to private SSH key for the chosen AWS region"
27 | default = "~/.ssh/ec2_terraform.eu-west-1.pem"
28 | }
29 |
30 | variable "aws_coreos_ami" {
31 | description = "Name of CoreOS AMI in the chosen AWS region for instances to use"
32 | default = "ami-5b911f2c"
33 | }
34 |
35 | variable "aws_instance_type" {
36 | description = "Type of instance ot use in AWS"
37 | default = "m3.large"
38 | }
39 |
40 | variable "gce_account_file" {
41 | description = "Path to your GCE account credentials file"
42 | default = "account.json"
43 | }
44 |
45 | variable "gce_client_secrets_file" {
46 | description = "Path to your GCE client secrets file"
47 | default = "client_secrets.json"
48 | }
49 |
50 | variable "gce_project_name" {
51 | description = "Name of your existing GCE project"
52 | default = ""
53 | }
54 |
55 | variable "gce_region" {
56 | description = "Region to run GCE instances in"
57 | default = "us-central1"
58 | }
59 |
60 | variable "gce_zone" {
61 | description = "Zone to run GCE instances in"
62 | default = "us-central1-a"
63 | }
64 |
65 | variable "gce_key_path" {
66 | description = "Path to private SSH key for the GCE instances"
67 | default = "~/.ssh/google_compute_engine"
68 | }
69 |
70 | variable "gce_coreos_disk_image" {
71 | description = "Name of CoreOS Root disk image for the GCE instances to use"
72 | default = "coreos-stable-557-2-0-v20150210"
73 | }
74 |
75 | variable "gce_machine_type" {
76 | description = "Type of instance ot use in GCE"
77 | default = "n1-standard-1"
78 | }
79 |
--------------------------------------------------------------------------------
/sparkles/config.rb:
--------------------------------------------------------------------------------
1 | if ENV['WEAVE_PASSWORD'] and ENV['WEAVE_PEERS'] then
2 | require File.join(File.dirname(__FILE__), "join-remote-weave-cluster.rb")
3 | else
4 | require File.join(File.dirname(__FILE__), "local-cluster.rb")
5 | end
6 |
7 | begin
8 | require File.join(File.dirname(__FILE__), 'config-override.rb')
9 | rescue LoadError => e
10 | end
11 |
--------------------------------------------------------------------------------
/sparkles/elasticsearch-spark-test/.gitignore:
--------------------------------------------------------------------------------
1 | project/project/
2 | project/target/
3 | target/
4 |
--------------------------------------------------------------------------------
/sparkles/elasticsearch-spark-test/build.sbt:
--------------------------------------------------------------------------------
1 | import AssemblyKeys._
2 |
3 | assemblySettings
4 |
5 | name := "sparkles"
6 |
7 | version := "0.0.1"
8 |
9 | scalaVersion := "2.11.4"
10 |
11 | libraryDependencies += "org.elasticsearch" % "elasticsearch-hadoop" % "2.0.2" // "2.1.0.Beta3"
12 |
13 | resolvers += "Clojars" at "http://conjars.org/repo"
14 |
15 | mergeStrategy in assembly <<= (mergeStrategy in assembly) { (old) =>
16 | {
17 | case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
18 | case m if m.startsWith("META-INF") => MergeStrategy.discard
19 | case PathList("javax", "servlet", xs @ _*) => MergeStrategy.first
20 | case PathList("org", "apache", xs @ _*) => MergeStrategy.first
21 | case PathList("org", "jboss", xs @ _*) => MergeStrategy.first
22 | case "about.html" => MergeStrategy.rename
23 | case "reference.conf" => MergeStrategy.concat
24 | case _ => MergeStrategy.first
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/sparkles/elasticsearch-spark-test/build.sh:
--------------------------------------------------------------------------------
1 | docker run -t -i -v `pwd`:/io errordeveloper/sbt:latest ';assembly ;exit'
2 |
--------------------------------------------------------------------------------
/sparkles/elasticsearch-spark-test/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | //resolvers += Resolver.url("artifactory", url("http://scalasbt.artifactoryonline.com/scalasbt/sbt-plugin-releases"))(Resolver.ivyStylePatterns)
2 |
3 | resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
4 |
5 | resolvers += "Spray Repository" at "http://repo.spray.cc/"
6 |
7 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.10.2")
8 |
--------------------------------------------------------------------------------
/sparkles/join-remote-weave-cluster.rb:
--------------------------------------------------------------------------------
1 | $num_instances=1
2 | $vb_memory=2048
3 |
4 | $password = ENV['WEAVE_PASSWORD']
5 | $known_weave_nodes = ENV['WEAVE_PEERS']
6 |
7 | $weavedns_addr = '10.10.254.1/16'
8 |
9 | def genenv_content()
10 | %W(
11 | WEAVE_PEERS="#{$known_weave_nodes}"
12 | WEAVE_PASSWORD="#{$password}"
13 | WEAVEDNS_ADDR="#{$weavedns_addr}"
14 | ).join("\n")
15 | end
16 |
17 | def genenv()
18 | {
19 | 'path' => "/etc/weave.env",
20 | 'permissions' => '0600',
21 | 'owner' => 'root',
22 | 'content' => genenv_content(),
23 | }
24 | end
25 |
26 | if File.exists?('cloud/cloud-config.yaml') && ARGV[0].eql?('up')
27 | require 'yaml'
28 |
29 | data = YAML.load(IO.readlines('cloud/cloud-config.yaml')[1..-1].join)
30 |
31 | data['write_files'] << genenv()
32 |
33 | data['coreos']['units'] << {
34 | 'name' => 'provisioning-completed.target',
35 | 'command' => 'start',
36 | 'enable' => true,
37 | 'content' =>
38 | "[Unit]\n" \
39 | "Requires=weave.service\n" \
40 | "RefuseManualStart=no\n" \
41 | "Wants=weave.service\n" \
42 | "[Install]\n" \
43 | "WantedBy=multi-user.target\n" \
44 | }
45 |
46 | lines = YAML.dump(data).split("\n")
47 | lines[0] = '#cloud-config'
48 |
49 | open(File.join(File.dirname(__FILE__), "user-data"), 'w') do |f|
50 | f.puts(lines.join("\n"))
51 | end
52 | end
53 |
--------------------------------------------------------------------------------
/sparkles/local-cluster.rb:
--------------------------------------------------------------------------------
1 | $num_instances=3
2 | $vb_memory=2048
3 |
4 | require 'securerandom'
5 | $password = SecureRandom.uuid
6 |
7 | def genenv_content(count)
8 | case count
9 | when 0
10 | known_weave_nodes=''
11 | spark_node_role='master'
12 | spark_container_args=''
13 | spark_node_name="spark-#{spark_node_role}.weave.local"
14 | else
15 | known_weave_nodes='172.17.8.101'
16 | spark_node_role='worker'
17 | spark_container_args='spark://spark-master.weave.local:7077'
18 | spark_node_name="spark-#{spark_node_role}-#{count}.weave.local"
19 | end
20 |
21 | weavedns_addr="10.10.2.1#{count}/16"
22 | spark_node_addr="10.10.1.1#{count}/24"
23 | elasticsearch_node_addr="10.10.1.2#{count}/24"
24 |
25 | %W(
26 | WEAVE_PEERS="#{known_weave_nodes}"
27 | WEAVE_PASSWORD="#{$password}"
28 | WEAVEDNS_ADDR="#{weavedns_addr}"
29 | SPARK_NODE_ADDR="#{spark_node_addr}"
30 | SPARK_NODE_NAME="#{spark_node_name}"
31 | SPARK_CONTAINER="errordeveloper/weave-spark-#{spark_node_role}-minimal:latest"
32 | SPARK_CONTAINER_ARGS="#{spark_container_args}"
33 | ELASTICSEARCH_NODE_ADDR="#{elasticsearch_node_addr}"
34 | ELASTICSEARCH_NODE_NAME="elasticsearch-#{count}.weave.local"
35 | ELASTICSEARCH_CONTAINER="errordeveloper/weave-twitter-river-minimal:latest"
36 | ).join("\n")
37 | end
38 |
39 | def genenv(count)
40 | {
41 | 'path' => sprintf("/etc/weave.core-%.2d.env", count+1),
42 | 'permissions' => '0600',
43 | 'owner' => 'root',
44 | 'content' => genenv_content(count),
45 | }
46 | end
47 |
48 | if File.exists?('cloud/cloud-config.yaml') && ARGV[0].eql?('up')
49 | require 'yaml'
50 |
51 | data = YAML.load(IO.readlines('cloud/cloud-config.yaml')[1..-1].join)
52 |
53 | $num_instances.times { |x| data['write_files'] << genenv(x) }
54 |
55 | data['coreos']['units'] << {
56 | 'name' => 'fix-env-file-path.service',
57 | 'command' => 'start',
58 | 'enable' => true,
59 | 'content' =>
60 | "[Unit]\n" \
61 | "Before=install-weave.service\n" \
62 | "[Service]\n" \
63 | "Type=oneshot\n" \
64 | "ExecStart=/bin/ln -s /etc/weave.%H.env /etc/weave.env\n"
65 | }
66 |
67 | data['coreos']['units'] << {
68 | 'name' => 'provisioning-completed.target',
69 | 'command' => 'start',
70 | 'enable' => true,
71 | 'content' =>
72 | "[Unit]\n" \
73 | "Requires=weave.service elasticsearch.service spark.service\n" \
74 | "RefuseManualStart=no\n" \
75 | "Wants=weave.service elasticsearch.service spark.service\n" \
76 | "[Install]\n" \
77 | "WantedBy=multi-user.target\n" \
78 | }
79 |
80 | lines = YAML.dump(data).split("\n")
81 | lines[0] = '#cloud-config'
82 |
83 | open(File.join(File.dirname(__FILE__), "user-data"), 'w') do |f|
84 | f.puts(lines.join("\n"))
85 | end
86 | end
87 |
--------------------------------------------------------------------------------
/subnet-per-host/.vagrant:
--------------------------------------------------------------------------------
1 | ../.vagrant
--------------------------------------------------------------------------------
/subnet-per-host/Vagrantfile:
--------------------------------------------------------------------------------
1 | ../coreos-vagrant/Vagrantfile
--------------------------------------------------------------------------------
/subnet-per-host/config.rb:
--------------------------------------------------------------------------------
1 | $num_instances=3
2 | $instance_name_prefix="weave"
3 | $vb_memory = 512
4 |
--------------------------------------------------------------------------------
/subnet-per-host/user-data:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | write_files:
4 | - path: /etc/core-01.env
5 | permissions: 0644
6 | owner: root
7 | content: |
8 | WEAVE_LAUCH_ARGS=""
9 | BRIDGE_ADDRESS_CIDR="10.9.1.1/24"
10 | BREAKOUT_ROUTE="10.9.0.0/16"
11 | CONTAINER_INTERFACE="eth0"
12 | PINGER_REMOTE="10.9.2.3"
13 | GREATER_MESSAGE="Hello from #1"
14 |
15 | - path: /etc/core-02.env
16 | permissions: 0644
17 | owner: root
18 | content: |
19 | WEAVE_LAUCH_ARGS="172.17.8.101"
20 | BRIDGE_ADDRESS_CIDR="10.9.2.1/24"
21 | BREAKOUT_ROUTE="10.9.0.0/16"
22 | CONTAINER_INTERFACE="eth0"
23 | PINGER_REMOTE="10.9.3.3"
24 | GREATER_MESSAGE="Hello from #1"
25 |
26 | - path: /etc/core-03.env
27 | permissions: 0644
28 | owner: root
29 | content: |
30 | WEAVE_LAUCH_ARGS="172.17.8.101"
31 | BRIDGE_ADDRESS_CIDR="10.9.3.1/24"
32 | BREAKOUT_ROUTE="10.9.0.0/16"
33 | CONTAINER_INTERFACE="eth0"
34 | PINGER_REMOTE="10.9.1.3"
35 | GREATER_MESSAGE="Hello from #1"
36 |
37 | coreos:
38 | etcd:
39 | # generate a new token for each unique cluster from https://discovery.etcd.io/new
40 | # WARNING: replace each time you 'vagrant destroy'
41 | #discovery: https://discovery.etcd.io/
42 | addr: $public_ipv4:4001
43 | peer-addr: $public_ipv4:7001
44 | fleet:
45 | public-ip: $public_ipv4
46 | units:
47 | - name: weave-network.target
48 | enable: true
49 | content: |
50 | [Unit]
51 | Description=Weave Network Setup Complete
52 | Documentation=man:systemd.special(7)
53 | RefuseManualStart=no
54 | After=network-online.target
55 |
56 | [Install]
57 | WantedBy=multi-user.target
58 |
59 | - name: 10-weave.network
60 | runtime: false
61 | content: |
62 | [Match]
63 | Type=bridge
64 | Name=weave*
65 |
66 | [Network]
67 |
68 | - name: install-weave.service
69 | enable: true
70 | content: |
71 | [Unit]
72 | After=network-online.target
73 | Before=weave.service
74 | Before=weave-helper.service
75 | Before=docker.service
76 | Description=Install Weave
77 | Documentation=http://zettio.github.io/weave/
78 | Requires=network-online.target
79 |
80 | [Service]
81 | Type=oneshot
82 | RemainAfterExit=yes
83 | ExecStartPre=/usr/bin/wget -N -P /opt/bin https://raw.github.com/errordeveloper/weave-demos/master/poseidon/weave
84 | ExecStartPre=/usr/bin/chmod +x /opt/bin/weave
85 | ExecStart=/bin/echo Weave Installed
86 |
87 | [Install]
88 | WantedBy=weave-network.target
89 | WantedBy=weave.service
90 |
91 | - name: weave-helper.service
92 | enable: true
93 | content: |
94 | [Unit]
95 | After=install-weave.service
96 | After=docker.service
97 | Description=Weave Network Router
98 | Documentation=http://zettio.github.io/weave/
99 | Requires=docker.service
100 | Requires=install-weave.service
101 |
102 | [Service]
103 | EnvironmentFile=/etc/%H.env
104 | ExecStart=/opt/bin/weave helper $CONTAINER_INTERFACE $BREAKOUT_ROUTE
105 |
106 | [Install]
107 | WantedBy=weave-network.target
108 |
109 | - name: weave.service
110 | enable: true
111 | content: |
112 | [Unit]
113 | After=install-weave.service
114 | After=docker.service
115 | Description=Weave Network Router
116 | Documentation=http://zettio.github.io/weave/
117 | Requires=docker.service
118 | Requires=install-weave.service
119 |
120 | [Service]
121 | EnvironmentFile=/etc/%H.env
122 | ExecStartPre=/usr/bin/docker pull zettio/weave:latest
123 | ExecStartPre=/opt/bin/weave launch $WEAVE_LAUCH_ARGS
124 | ExecStart=/usr/bin/docker logs -f weave
125 | SuccessExitStatus=2
126 | ExecStop=/opt/bin/weave stop
127 |
128 | [Install]
129 | WantedBy=weave-network.target
130 |
131 | - name: install-busybox.service
132 | command: start
133 | enable: true
134 | content: |
135 | [Unit]
136 | After=docker.service
137 | Description=Install BusyBox
138 | Documentation=http://zettio.github.io/weave/
139 | Requires=docker.service
140 |
141 | [Service]
142 | Type=oneshot
143 | RemainAfterExit=yes
144 | ExecStart=/usr/bin/docker pull busybox:latest
145 |
146 | - name: pinger.service
147 | command: start
148 | enable: true
149 | content: |
150 | [Unit]
151 | After=weave-network.target
152 | After=install-busybox.service
153 | Description=Weave Network Test Monitor
154 | Documentation=http://zettio.github.io/weave/
155 | Requires=weave-network.target
156 | Requires=install-busybox.service
157 |
158 | [Service]
159 | EnvironmentFile=/etc/%H.env
160 | Type=oneshot
161 | RemainAfterExit=yes
162 | ExecStart=/usr/bin/docker \
163 | run -t -i \
164 | --name=pinger busybox:latest \
165 | ping $PINGER_REMOTE
166 |
167 | - name: greater.service
168 | command: start
169 | enable: true
170 | content: |
171 | [Unit]
172 | After=weave-network.target
173 | After=install-busybox.service
174 | Description=Weave Network Test Service
175 | Documentation=http://zettio.github.io/weave/
176 | Requires=weave-network.target
177 | Requires=install-busybox.service
178 |
179 | [Service]
180 | EnvironmentFile=/etc/%H.env
181 | Type=oneshot
182 | RemainAfterExit=yes
183 | ExecStart=/usr/bin/docker \
184 | run -t -i \
185 | --name=greater busybox:latest \
186 | nc -ll -p 2000 0.0.0.0 -e /bin/echo $GREATER_MESSAGE
187 |
188 | - name: tester.service
189 | command: start
190 | enable: true
191 | content: |
192 | [Unit]
193 | After=weave-network.target
194 | After=install-busybox.service
195 | Description=Weave Network Test
196 | Documentation=http://zettio.github.io/weave/
197 | Requires=weave-network.target
198 | Requires=install-busybox.service
199 |
200 | [Service]
201 | EnvironmentFile=/etc/%H.env
202 | Type=oneshot
203 | RemainAfterExit=yes
204 | ExecStart=/usr/bin/docker \
205 | run -t -i \
206 | --name=tester -d busybox:latest
207 |
208 | - name: docker.service
209 | enable: true
210 | content: |
211 | [Unit]
212 | After=network.target
213 | After=install-weave.service
214 | Before=weave.service
215 | Description=Docker Application Container Engine
216 | Documentation=http://docs.docker.io
217 | Requires=network.target
218 | Requires=install-weave.service
219 |
220 | [Service]
221 | EnvironmentFile=/etc/%H.env
222 | ExecStartPre=/bin/mount --make-rprivate /
223 | ExecStartPre=/opt/bin/weave setup
224 | ExecStartPre=/usr/bin/ip addr add dev weave $BRIDGE_ADDRESS_CIDR
225 | ExecStartPre=/usr/bin/ip route add $BREAKOUT_ROUTE dev weave scope link
226 | ExecStartPre=/usr/bin/ip route add 224.0.0.0/4 dev weave
227 | ExecStart=/usr/bin/docker --daemon --storage-driver=btrfs -H fd:// --bridge=weave
228 |
229 | [Install]
230 | WantedBy=multi-user.target
231 | WantedBy=weave-network.target
232 |
233 | - name: docker-tcp.socket
234 | command: start
235 | enable: true
236 | content: |
237 | [Unit]
238 | Description=Docker Socket for the AP
239 |
240 | [Socket]
241 | ListenStream=2375
242 | Service=docker.service
243 | BindIPv6Only=both
244 |
245 | [Install]
246 | WantedBy=sockets.target
247 |
--------------------------------------------------------------------------------
/terraform-example/.gitignore:
--------------------------------------------------------------------------------
1 | account.json
2 | client_secrets.json
3 | kill*
4 | terraform-bin*
5 | terraform.tfstate
6 | terraform.tfstate.backup
7 | tfvars
8 | ssh_*
9 |
--------------------------------------------------------------------------------
/terraform-example/cloud-config.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | coreos:
3 | update:
4 | ## NB: You should enable updates in production
5 | ## Terraform already takes care of static IPs
6 | ## in GCE, so it should be safe for weave nodes
7 | ## to rejoin after reboot
8 | reboot-strategy: 'off'
9 | units:
10 | - name: 10-weave.network
11 | runtime: false
12 | content: |
13 | [Match]
14 | Type=bridge
15 | Name=weave*
16 | [Network]
17 |
18 | - name: weave.service
19 | content: |
20 | [Unit]
21 | After=install-weave.service
22 | Description=Weave Network
23 | Documentation=http://zettio.github.io/weave/
24 | Requires=install-weave.service
25 | [Service]
26 | EnvironmentFile=/etc/weave.env
27 | ExecStartPre=/opt/bin/weave launch $WEAVE_PEERS
28 | ExecStartPre=/opt/bin/weave launch-dns $WEAVEDNS_ADDR
29 | ExecStart=/usr/bin/docker logs -f weave
30 | SuccessExitStatus=2
31 | ExecStop=/opt/bin/weave stop
32 | ExecStop=/opt/bin/weave stop-dns
33 |
34 | - name: install-weave.service
35 | command: start
36 | enable: true
37 | content: |
38 | [Unit]
39 | After=pre-fetch-container-images.service
40 | After=network-online.target
41 | After=docker.service
42 | Description=Install Weave
43 | Documentation=http://zettio.github.io/weave/
44 | Requires=pre-fetch-container-images.service
45 | Requires=network-online.target
46 | Requires=docker.service
47 | [Service]
48 | Type=oneshot
49 | RemainAfterExit=yes
50 | ExecStartPre=/bin/mkdir -p /opt/bin/
51 | ExecStartPre=/usr/bin/curl \
52 | --silent \
53 | --location \
54 | https://github.com/zettio/weave/releases/download/latest_release/weave \
55 | --output /opt/bin/weave
56 | ExecStartPre=/usr/bin/chmod +x /opt/bin/weave
57 | ExecStart=/bin/echo Weave Installed
58 |
59 | - name: pre-fetch-container-images.service
60 | command: start
61 | enable: true
62 | content: |
63 | [Unit]
64 | After=docker.service
65 | Requires=docker.service
66 | [Service]
67 | EnvironmentFile=/etc/pre-fetch-container-images.env
68 | Type=oneshot
69 | RemainAfterExit=yes
70 | ExecStart=/bin/sh -c 'for i in $PRE_FETCH_CONTAINER_IMAGES; do /usr/bin/docker pull $i; done'
71 |
72 | write_files:
73 | - path: /etc/pre-fetch-container-images.env
74 | permissions: 0644
75 | owner: root
76 | content: |
77 | PRE_FETCH_CONTAINER_IMAGES="\
78 | zettio/weave:latest \
79 | zettio/weavedns:latest \
80 | zettio/weavetools:latest \
81 | "
82 | # - path: /etc/weave.env
83 | # permissions: 0600
84 | # owner: root
85 | # content: |
86 | # WEAVE_PEERS="${peers}"
87 | # WEAVE_PASSWORD="${crypt}"
88 | # WEAVEDNS_ADDR="10.10.2.1${count}/16"
89 |
--------------------------------------------------------------------------------
/terraform-example/connections.graffle/data.plist:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/errordeveloper/weave-demos/cd24bddd6842354b873d08f5deac48a4ac4fbb24/terraform-example/connections.graffle/data.plist
--------------------------------------------------------------------------------
/terraform-example/connections.graffle/image2.tiff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/errordeveloper/weave-demos/cd24bddd6842354b873d08f5deac48a4ac4fbb24/terraform-example/connections.graffle/image2.tiff
--------------------------------------------------------------------------------
/terraform-example/connections.graffle/image3.tiff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/errordeveloper/weave-demos/cd24bddd6842354b873d08f5deac48a4ac4fbb24/terraform-example/connections.graffle/image3.tiff
--------------------------------------------------------------------------------
/terraform-example/connections.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/errordeveloper/weave-demos/cd24bddd6842354b873d08f5deac48a4ac4fbb24/terraform-example/connections.png
--------------------------------------------------------------------------------
/terraform-example/genenv-aws-only.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ## This script takes count of the instance and the name of the cloud.
3 | ## It only generates and writes /etc/weave.env, and doesn't run anything.
4 | count=$1
5 | crypt=$2
6 | shift 2
7 |
8 | cat << ENVIRON | sudo tee /etc/weave.env
9 | WEAVE_PEERS="${@}"
10 | WEAVE_PASSWORD="${crypt}"
11 | WEAVEDNS_ADDR="10.10.2.1${count}/16"
12 | ENVIRON
13 |
--------------------------------------------------------------------------------
/terraform-example/genenv.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ## This script takes count of the instance and the name of the cloud.
3 | ## It only generates and writes /etc/weave.env, and doesn't run anything.
4 | cloud=$1
5 | count=$2
6 | crypt=$3
7 | shift 3
8 |
9 | case "weave-${cloud}-${count}" in
10 | (weave-gce-0)
11 | ## first machine in GCE doesn't yet know of any other peers
12 | known_weave_nodes=''
13 | break
14 | ;;
15 | (weave-gce-*)
16 | ## any other node in GCE connects to the first one by native DNS name
17 | known_weave_nodes='weave-gce-0'
18 | break
19 | ;;
20 | (weave-aws-*)
21 | ## every node in AWS connects to all GCE nodes by IP address
22 | known_weave_nodes="$@"
23 | break
24 | ;;
25 | esac
26 |
27 | case "${cloud}" in
28 | (gce)
29 | weavedns_addr="10.10.2.1${count}/16"
30 | break
31 | ;;
32 | (aws)
33 | weavedns_addr="10.10.2.2${count}/16"
34 | break
35 | ;;
36 | esac
37 |
38 | cat << ENVIRON | sudo tee /etc/weave.env
39 | WEAVE_PEERS="${known_weave_nodes}"
40 | WEAVE_PASSWORD="${crypt}"
41 | WEAVEDNS_ADDR="${weavedns_addr}"
42 | ENVIRON
43 |
--------------------------------------------------------------------------------
/terraform-example/gensshwrapper.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | cloud="${1}"
3 | count="${2}"
4 | node_alias="node-${cloud}-${count}"
5 | ssh_key_path="${3}"
6 | ip_addr="${4}"
7 |
8 | cat << WRAPPER > ./ssh_${node_alias}
9 | #!/bin/sh
10 | ssh core@${ip_addr} \
11 | -o Compression=yes \
12 | -o LogLevel=FATAL \
13 | -o StrictHostKeyChecking=no \
14 | -o UserKnownHostsFile=/dev/null \
15 | -o IdentitiesOnly=yes \
16 | -o IdentityFile=${ssh_key_path} \
17 | \$@
18 | WRAPPER
19 | chmod +x ./ssh_${node_alias}
20 |
--------------------------------------------------------------------------------
/terraform-example/infra-aws-only/main.tf:
--------------------------------------------------------------------------------
1 | ## TODO: Refactor into a module
2 |
3 | resource "aws_instance" "weave_head_node" {
4 | count = 1
5 | instance_type = "${var.aws_instance_type}"
6 |
7 | ami = "${var.aws_coreos_ami}"
8 |
9 | key_name = "${var.aws_key_name}"
10 |
11 | user_data = "${file("cloud-config.yaml")}"
12 |
13 | security_groups = [ "${aws_security_group.weave.id}" ]
14 | subnet_id = "${aws_subnet.weave.id}"
15 | associate_public_ip_address = true
16 |
17 | provisioner "file" {
18 | source = "genenv-aws-only.sh"
19 | destination = "/tmp/genenv.sh"
20 | connection {
21 | user = "core"
22 | key_file = "${var.aws_key_path}"
23 | agent = false
24 | }
25 | }
26 |
27 | provisioner "remote-exec" {
28 | inline = [
29 | "sudo sh /tmp/genenv.sh ${count.index} '${var.weave_launch_password}'",
30 | "sudo systemctl start weave",
31 | ]
32 | connection {
33 | user = "core"
34 | key_file = "${var.aws_key_path}"
35 | agent = false
36 | }
37 | }
38 |
39 | provisioner "local-exec" {
40 | command = "sh gensshwrapper.sh aws_head '${count.index}' '${var.aws_key_path}' '${self.public_ip}'"
41 | }
42 | }
43 |
44 | resource "aws_instance" "weave" {
45 | count = "${var.aws_instance_count}"
46 | instance_type = "${var.aws_instance_type}"
47 |
48 | ami = "${var.aws_coreos_ami}"
49 |
50 | key_name = "${var.aws_key_name}"
51 |
52 | user_data = "${file("cloud-config.yaml")}"
53 |
54 | security_groups = [ "${aws_security_group.weave.id}" ]
55 | subnet_id = "${aws_subnet.weave.id}"
56 | associate_public_ip_address = true
57 |
58 | provisioner "file" {
59 | source = "genenv-aws-only.sh"
60 | destination = "/tmp/genenv.sh"
61 | connection {
62 | user = "core"
63 | key_file = "${var.aws_key_path}"
64 | agent = false
65 | }
66 | }
67 |
68 | provisioner "remote-exec" {
69 | inline = [
70 | "sudo sh /tmp/genenv.sh ${count.index} '${var.weave_launch_password}' ${aws_instance.weave_head_node.private_ip}",
71 | "sudo systemctl start weave",
72 | ]
73 | connection {
74 | user = "core"
75 | key_file = "${var.aws_key_path}"
76 | agent = false
77 | }
78 | }
79 |
80 | provisioner "local-exec" {
81 | command = "sh gensshwrapper.sh aws '${count.index}' '${var.aws_key_path}' '${self.public_ip}'"
82 | }
83 | }
84 |
85 | resource "aws_vpc" "weave" {
86 | cidr_block = "172.220.0.0/16"
87 | enable_dns_support = true
88 | enable_dns_hostnames = true
89 | }
90 |
91 | resource "aws_internet_gateway" "weave" {
92 | vpc_id = "${aws_vpc.weave.id}"
93 | }
94 |
95 | resource "aws_route_table_association" "weave" {
96 | subnet_id = "${aws_subnet.weave.id}"
97 | route_table_id = "${aws_route_table.weave.id}"
98 | }
99 |
100 | resource "aws_route_table" "weave" {
101 | vpc_id = "${aws_vpc.weave.id}"
102 | route {
103 | cidr_block = "0.0.0.0/0"
104 | gateway_id = "${aws_internet_gateway.weave.id}"
105 | }
106 | }
107 |
108 | resource "aws_subnet" "weave" {
109 | vpc_id = "${aws_vpc.weave.id}"
110 | cidr_block = "172.220.1.0/24"
111 | map_public_ip_on_launch = true
112 | }
113 |
114 | resource "aws_security_group" "weave" {
115 | name = "weave"
116 | description = "SSH access from anywhere"
117 | vpc_id = "${aws_vpc.weave.id}"
118 |
119 | ingress {
120 | from_port = 22
121 | to_port = 22
122 | protocol = "tcp"
123 | cidr_blocks = ["0.0.0.0/0"]
124 | }
125 |
126 | ingress {
127 | from_port = 6783
128 | to_port = 6783
129 | protocol = "tcp"
130 | self = true
131 | }
132 |
133 | ingress {
134 | from_port = 6783
135 | to_port = 6783
136 | protocol = "udp"
137 | self = true
138 | }
139 |
140 | egress {
141 | from_port = 0
142 | to_port = 0
143 | protocol = "-1"
144 | cidr_blocks = ["0.0.0.0/0"]
145 | }
146 | }
147 |
--------------------------------------------------------------------------------
/terraform-example/infra-aws-only/outputs.tf:
--------------------------------------------------------------------------------
1 | output "aws_instances_external" {
2 | value = "[ ${aws_instance.weave_head_node.public_ip}, ${join(", ", aws_instance.weave.*.public_ip)} ]"
3 | }
4 |
5 | output "aws_instances_internal" {
6 | value = "[ ${aws_instance.weave_head_node.private_ip}, ${join(", ", aws_instance.weave.*.private_ip)} ]"
7 | }
8 |
--------------------------------------------------------------------------------
/terraform-example/infra-aws-only/providers.tf:
--------------------------------------------------------------------------------
1 | ../infra/providers.tf
--------------------------------------------------------------------------------
/terraform-example/infra-aws-only/variables.tf:
--------------------------------------------------------------------------------
1 | ../infra/variables.tf
--------------------------------------------------------------------------------
/terraform-example/infra/main.tf:
--------------------------------------------------------------------------------
1 | // Declare and provision 3 GCE instances
2 | resource "google_compute_instance" "weave" {
3 | count = "${var.gce_instance_count}"
4 | // By default (see variables.tf), these are going to be of type 'n1-standard-1' in zone 'us-central1-a'.
5 | machine_type = "${var.gce_machine_type}"
6 | zone = "${var.gce_zone}"
7 |
8 | // Ensure clear host naming scheme, which results in functional native DNS within GCE
9 | name = "weave-gce-${count.index}" // => `weave-gce-{0,1,2}`
10 |
11 | // Attach an alpha image of CoreOS as the primary disk
12 | disk {
13 | image = "${var.gce_coreos_disk_image}"
14 | }
15 |
16 | // Attach to a network with some custom firewall rules and static IPs (details further down)
17 | network {
18 | source = "${google_compute_network.weave.name}"
19 | address = "${element(google_compute_address.weave.*.address, count.index)}"
20 | }
21 |
22 | // Provisioning
23 |
24 | // 1. Cloud Config phase writes systemd unit definitions and only starts two host-independent units —
25 | // `pre-fetch-container-images.service` and `install-weave.service`
26 | metadata {
27 | user-data = "${file("cloud-config.yaml")}"
28 | }
29 |
30 | // 2. Upload shell script that generates host-specific environment file to be used by `weave.service`
31 | provisioner "file" {
32 | source = "genenv.sh"
33 | destination = "/tmp/genenv.sh"
34 | connection {
35 | user = "core"
36 | key_file = "${var.gce_key_path}"
37 | agent = false
38 | }
39 | }
40 |
41 | // 3. Run the `genenv.sh` script
42 | // 4. Start `weave.service`
43 | provisioner "remote-exec" {
44 | inline = [
45 | "sudo sh /tmp/genenv.sh gce ${count.index} '${var.weave_launch_password}'",
46 | "sudo systemctl start weave",
47 | ]
48 | connection {
49 | user = "core"
50 | key_file = "${var.gce_key_path}"
51 | agent = false
52 | }
53 | }
54 |
55 | provisioner "local-exec" {
56 | command = "sh gensshwrapper.sh gce '${count.index}' '${var.gce_key_path}' '${self.network.0.external_address}'"
57 | }
58 | }
59 |
60 | // Custom GCE network declaration, so we can set firewall rules below
61 | resource "google_compute_network" "weave" {
62 | name = "weave"
63 | ipv4_range = "172.220.0.0/16"
64 | }
65 |
66 | // Firewall rules for the network (allow inbound ssh and weave connections)
67 | resource "google_compute_firewall" "weave" {
68 | name = "ports"
69 | network = "${google_compute_network.weave.name}"
70 |
71 | allow {
72 | protocol = "icmp"
73 | }
74 |
75 | allow {
76 | protocol = "tcp"
77 | ports = ["22", "6783"]
78 | }
79 |
80 | allow {
81 | protocol = "udp"
82 | ports = ["22", "6783"]
83 | }
84 |
85 | source_ranges = ["0.0.0.0/0"]
86 | }
87 |
88 | // Allocate static IPs for each of the instances, so if reboots occur
89 | // the AWS nodes can rejoin the weave network
90 | resource "google_compute_address" "weave" {
91 | count = "${var.gce_instance_count}"
92 | name = "weave-gce-${count.index}-addr"
93 | }
94 |
95 | // Declare and provision 3 AWS instances
96 | resource "aws_instance" "weave" {
97 | count = "${var.aws_instance_count}"
98 | // By default (see variables.tf), these are going to be of type 'm3.large' in region 'eu-west-1'.
99 | instance_type = "${var.aws_instance_type}"
100 |
101 | // Use an alpha image of CoreOS
102 | ami = "${var.aws_coreos_ami}"
103 |
104 | // Set the SSH key name to use (default: "terrraform")
105 | key_name = "${var.aws_key_name}"
106 |
107 | // Provisioning (mostly identical to the GCE counter-part)
108 |
109 | user_data = "${file("cloud-config.yaml")}"
110 |
111 | #depends_on = [ "aws_vpc.weave", "aws_subnet.weave", "aws_security_group.weave", "aws_internet_gateway.weave" ]
112 | security_groups = [ "${aws_security_group.weave.id}" ]
113 | subnet_id = "${aws_subnet.weave.id}"
114 | associate_public_ip_address = true
115 |
116 | provisioner "file" {
117 | source = "genenv.sh"
118 | destination = "/tmp/genenv.sh"
119 | connection {
120 | user = "core"
121 | key_file = "${var.aws_key_path}"
122 | agent = false
123 | }
124 | }
125 |
126 | // The only difference here is what arguments are passed to `genenv.sh`
127 | provisioner "remote-exec" {
128 | inline = [
129 | "sudo sh /tmp/genenv.sh aws ${count.index} '${var.weave_launch_password}' ${join(" ", google_compute_instance.weave.*.network.0.external_address)}",
130 | "sudo systemctl start weave",
131 | ]
132 | connection {
133 | user = "core"
134 | key_file = "${var.aws_key_path}"
135 | agent = false
136 | }
137 | }
138 |
139 | provisioner "local-exec" {
140 | command = "sh gensshwrapper.sh aws '${count.index}' '${var.aws_key_path}' '${self.public_ip}'"
141 | }
142 | }
143 |
144 | // Create a VPC for Terraform to manage, so it doesn't mess with the default one
145 | // NOTE: this not meant to make the configuration more complex, it's just that
146 | // I have found some issues with creating and destroying resources on default VPC,
147 | // so I decided to make it safer with dedicating a VPC to Terraform.
148 | // TODO: refactor this as a module
149 | resource "aws_vpc" "weave" {
150 | cidr_block = "172.220.0.0/16"
151 | }
152 |
153 | resource "aws_internet_gateway" "weave" {
154 | vpc_id = "${aws_vpc.weave.id}"
155 | }
156 |
157 | resource "aws_route_table_association" "weave" {
158 | subnet_id = "${aws_subnet.weave.id}"
159 | route_table_id = "${aws_route_table.weave.id}"
160 | }
161 |
162 | resource "aws_route_table" "weave" {
163 | vpc_id = "${aws_vpc.weave.id}"
164 | route {
165 | cidr_block = "0.0.0.0/0"
166 | gateway_id = "${aws_internet_gateway.weave.id}"
167 | }
168 | }
169 |
170 | // With a non-default VPC we have to create a subnet also
171 | resource "aws_subnet" "weave" {
172 | vpc_id = "${aws_vpc.weave.id}"
173 | cidr_block = "172.220.1.0/24"
174 | map_public_ip_on_launch = true
175 | }
176 |
177 | // Firewall rules for our security group only need to allow inbound ssh connections
178 | resource "aws_security_group" "weave" {
179 | name = "weave"
180 | description = "SSH access from anywhere"
181 | vpc_id = "${aws_vpc.weave.id}"
182 |
183 | ingress {
184 | from_port = 22
185 | to_port = 22
186 | protocol = "tcp"
187 | cidr_blocks = ["0.0.0.0/0"]
188 | }
189 |
190 | ingress {
191 | from_port = 6783
192 | to_port = 6783
193 | protocol = "tcp"
194 | self = true
195 | }
196 |
197 | ingress {
198 | from_port = 6783
199 | to_port = 6783
200 | protocol = "udp"
201 | self = true
202 | }
203 |
204 | egress {
205 | from_port = 0
206 | to_port = 0
207 | protocol = "-1"
208 | cidr_blocks = ["0.0.0.0/0"]
209 | }
210 | }
211 |
--------------------------------------------------------------------------------
/terraform-example/infra/outputs.tf:
--------------------------------------------------------------------------------
1 | output "aws_instances_external" {
2 | value = "[ ${join(", ", aws_instance.weave.*.public_ip)} ]"
3 | }
4 |
5 | output "gce_instances_external" {
6 | value = "[ ${join(", ", google_compute_instance.weave.*.network.0.external_address)} ]"
7 | }
8 |
9 | output "aws_instances_internal" {
10 | value = "[ ${join(", ", aws_instance.weave.*.private_ip)} ]"
11 | }
12 |
13 | output "gce_instances_internal" {
14 | value = "[ ${join(", ", google_compute_instance.weave.*.network.0.internal_address)} ]"
15 | }
16 |
--------------------------------------------------------------------------------
/terraform-example/infra/providers.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | access_key = "${var.aws_access_key}"
3 | secret_key = "${var.aws_secret_key}"
4 | region = "${var.aws_region}"
5 | }
6 |
7 | provider "google" {
8 | account_file = "${var.gce_account_file}"
9 | # client_secrets_file = "${var.gce_client_secrets_file}" # not needed since 0.4.0
10 | project = "${var.gce_project_name}"
11 | region = "${var.gce_region}"
12 | }
13 |
--------------------------------------------------------------------------------
/terraform-example/infra/variables.tf:
--------------------------------------------------------------------------------
1 | variable "weave_launch_password" {
2 | description = "Set salt (NaCL) passphrase to encrypt weave network"
3 | }
4 |
5 | variable "aws_access_key" {
6 | description = "Your AWS API access key"
7 | default = ""
8 | }
9 |
10 | variable "aws_secret_key" {
11 | description = "Your AWS API secret key"
12 | default = ""
13 | }
14 |
15 | variable "aws_region" {
16 | description = "Region to run AWS instances in"
17 | default = "eu-west-1"
18 | }
19 |
20 | variable "aws_key_name" {
21 | description = "Name of the SSH key pair in the chosen AWS region"
22 | default = "terraform"
23 | }
24 |
25 | variable "aws_key_path" {
26 | description = "Path to private SSH key for the chosen AWS region"
27 | default = "~/.ssh/ec2_terraform.eu-west-1.pem"
28 | }
29 |
30 | variable "aws_coreos_ami" {
31 | description = "Name of CoreOS AMI in the chosen AWS region for instances to use"
32 | default = "ami-5b911f2c"
33 | }
34 |
35 | variable "aws_instance_type" {
36 | description = "Type of instance ot use in AWS"
37 | default = "m3.large"
38 | }
39 |
40 | variable "aws_instance_count" {
41 | description = "Number of instances in AWS"
42 | default = 3
43 | }
44 |
45 | variable "gce_account_file" {
46 | description = "Path to your GCE account credentials file"
47 | default = "account.json"
48 | }
49 |
50 | variable "gce_client_secrets_file" {
51 | description = "Path to your GCE client secrets file"
52 | default = "client_secrets.json"
53 | }
54 |
55 | variable "gce_project_name" {
56 | description = "Name of your existing GCE project"
57 | default = ""
58 | }
59 |
60 | variable "gce_region" {
61 | description = "Region to run GCE instances in"
62 | default = "us-central1"
63 | }
64 |
65 | variable "gce_zone" {
66 | description = "Zone to run GCE instances in"
67 | default = "us-central1-a"
68 | }
69 |
70 | variable "gce_key_path" {
71 | description = "Path to private SSH key for the GCE instances"
72 | default = "~/.ssh/google_compute_engine"
73 | }
74 |
75 | variable "gce_coreos_disk_image" {
76 | description = "Name of CoreOS Root disk image for the GCE instances to use"
77 | default = "coreos-stable-557-2-0-v20150210"
78 | }
79 |
80 | variable "gce_machine_type" {
81 | description = "Type of instance ot use in GCE"
82 | default = "n1-standard-1"
83 | }
84 |
85 | variable "gce_instance_count" {
86 | description = "Number of instances in GCE"
87 | default = 3
88 | }
89 |
--------------------------------------------------------------------------------
/terraform-example/terrainfra-gce-aws.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/errordeveloper/weave-demos/cd24bddd6842354b873d08f5deac48a4ac4fbb24/terraform-example/terrainfra-gce-aws.png
--------------------------------------------------------------------------------
/ubuntu-snappy/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant/
2 |
--------------------------------------------------------------------------------
/ubuntu-snappy/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure
5 | # configures the configuration version (we support older styles for
6 | # backwards compatibility). Please don't change it unless you know what
7 | # you're doing.
8 | Vagrant.configure(2) do |config|
9 | # The most common configuration options are documented and commented below.
10 | # For a complete reference, please see the online documentation at
11 | # https://docs.vagrantup.com.
12 |
13 | # Every Vagrant development environment requires a box. You can search for
14 | # boxes at https://atlas.hashicorp.com/search.
15 | config.vm.box = "ubuntu/ubuntu-core-devel-amd64"
16 |
17 | # Disable automatic box update checking. If you disable this, then
18 | # boxes will only be checked for updates when the user runs
19 | # `vagrant box outdated`. This is not recommended.
20 | # config.vm.box_check_update = false
21 |
22 | # Create a forwarded port mapping which allows access to a specific port
23 | # within the machine from a port on the host machine. In the example below,
24 | # accessing "localhost:8080" will access port 80 on the guest machine.
25 | # config.vm.network "forwarded_port", guest: 80, host: 8080
26 |
27 | # Create a private network, which allows host-only access to the machine
28 | # using a specific IP.
29 | # config.vm.network "private_network", ip: "192.168.33.10"
30 |
31 | # Create a public network, which generally matched to bridged network.
32 | # Bridged networks make the machine appear as another physical device on
33 | # your network.
34 | # config.vm.network "public_network"
35 |
36 | # Share an additional folder to the guest VM. The first argument is
37 | # the path on the host to the actual folder. The second argument is
38 | # the path on the guest to mount the folder. And the optional third
39 | # argument is a set of non-required options.
40 | # config.vm.synced_folder "../data", "/vagrant_data"
41 |
42 | # Provider-specific configuration so you can fine-tune various
43 | # backing providers for Vagrant. These expose provider-specific options.
44 | # Example for VirtualBox:
45 | #
46 | # config.vm.provider "virtualbox" do |vb|
47 | # # Display the VirtualBox GUI when booting the machine
48 | # vb.gui = true
49 | #
50 | # # Customize the amount of memory on the VM:
51 | # vb.memory = "1024"
52 | # end
53 | #
54 | # View the documentation for the provider you are using for more
55 | # information on available options.
56 |
57 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
58 | # such as FTP and Heroku are also available. See the documentation at
59 | # https://docs.vagrantup.com/v2/push/atlas.html for more information.
60 | # config.push.define "atlas" do |push|
61 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
62 | # end
63 |
64 | # Enable provisioning with a shell script. Additional provisioners such as
65 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
66 | # documentation for more information about their specific syntax and use.
67 | # config.vm.provision "shell", inline: <<-SHELL
68 | # sudo apt-get update
69 | # sudo apt-get install -y apache2
70 | # SHELL
71 | config.vm.provision "file",
72 | source: "docker_1.4.1.001_amd64.snap",
73 | destination: "/tmp/docker_1.4.1.001_amd64.snap"
74 | config.vm.provision 'shell', privileged: true,
75 | inline: 'snappy install /tmp/docker_1.4.1.001_amd64.snap'
76 |
77 | config.vm.provision "shell", privileged: false, inline: <<-SHELL
78 | docker pull errordeveloper/curl:latest
79 | docker run errordeveloper/curl:latest curl \
80 | --location \
81 | --silent \
82 | --insecure \
83 | https://github.com/zettio/weave/releases/download/latest_release/weave \
84 | > ~/weave
85 | chmod +x ~/weave
86 | docker-privilege on
87 | sudo env PATH=$PATH sh -x ./weave launch
88 | #sudo env PATH=$PATH ./weave launch-dns 10.20.1.1/16
89 | SHELL
90 | end
91 |
--------------------------------------------------------------------------------
/ubuntu-snappy/docker.profile:
--------------------------------------------------------------------------------
1 | #
2 | # AppArmor confinement for docker daemon
3 | #
4 | # This confinement is intentionally not restrictive and is here to help guard
5 | # against programming errors and not for security confinement. docker daemon
6 | # requires far too much access to effictively confine and by its very nature it
7 | # must be considered a trusted service.
8 | #
9 |
10 | #include
11 |
12 | # Specified profile variables
13 | @{CLICK_DIR}="{/apps,/custom/click,/usr/share/click/preinstalled}"
14 | @{APP_PKGNAME}="docker"
15 | @{APP_APPNAME}="docker"
16 | @{APP_VERSION}="1.4.1.001"
17 |
18 | profile "docker_docker_1.4.1.001" (attach_disconnected) {
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 |
26 | # FIXME: app upgrades don't perform migration yet. When they do, remove
27 | # these two rules and update package-dir/bin/docker.wrap as per its comments.
28 | # See: https://app.asana.com/0/21120773903349/21160815722783
29 | /var/lib/apps/@{APP_PKGNAME}/ w,
30 | /var/lib/apps/@{APP_PKGNAME}/** wl,
31 |
32 | # Read-only for the install directory
33 | @{CLICK_DIR}/@{APP_PKGNAME}/ r,
34 | @{CLICK_DIR}/@{APP_PKGNAME}/@{APP_VERSION}/ r,
35 | @{CLICK_DIR}/@{APP_PKGNAME}/@{APP_VERSION}/** mrklix,
36 |
37 | # Writable path
38 | @{CLICK_DIR}/@{APP_PKGNAME}/@{APP_VERSION}/.docker/** mrwklix,
39 |
40 | # Need to be able to read access /home to read Dockerfile:
41 | owner /home/** mrwklix,
42 |
43 | # Writable home area
44 | owner @{HOMEDIRS}/apps/@{APP_PKGNAME}/ rw,
45 | owner @{HOMEDIRS}/apps/@{APP_PKGNAME}/** mrwklix,
46 |
47 | # Read-only system area for other versions
48 | /var/lib/apps/@{APP_PKGNAME}/ r,
49 | /var/lib/apps/@{APP_PKGNAME}/** mrkix,
50 |
51 | # TODO: the write on /var/lib/apps/@{APP_PKGNAME}/ is needed in case it
52 | # doesn't exist, but means an app could adjust inode data and affect
53 | # rollbacks.
54 | /var/lib/apps/@{APP_PKGNAME}/ w,
55 |
56 | # Writable system area only for this version.
57 | /var/lib/apps/@{APP_PKGNAME}/@{APP_VERSION}/ w,
58 | /var/lib/apps/@{APP_PKGNAME}/@{APP_VERSION}/** wl,
59 |
60 | /writable/cache/docker/ rw,
61 | /writable/cache/docker/** mrwklix,
62 |
63 | # Allow our pid file and socket
64 | /run/@{APP_PKGNAME}.pid rw,
65 | /run/@{APP_PKGNAME}.sock rw,
66 |
67 | # Wide read access to /proc, but somewhat limited writes for now
68 | @{PROC}/** r,
69 | @{PROC}/[0-9]*/attr/exec w,
70 | @{PROC}/sys/net/** w,
71 |
72 | # Wide read access to /sys
73 | /sys/** r,
74 | # Limit cgroup writes a bit
75 | /sys/fs/cgroup/*/docker/ rw,
76 | /sys/fs/cgroup/*/docker/** rw,
77 | /sys/fs/cgroup/*/system.slice/ rw,
78 | /sys/fs/cgroup/*/system.slice/** rw,
79 |
80 | # We can trace ourselves
81 | ptrace (trace) peer=@{profile_name},
82 |
83 | # Docker needs a lot of caps, but limits them in the app container
84 | capability,
85 |
86 | # Allow talking to systemd
87 | dbus (send)
88 | bus=system
89 | peer=(name=org.freedesktop.systemd*,label=unconfined),
90 | # Allow receiving from unconfined
91 | dbus (receive)
92 | bus=system
93 | peer=(label=unconfined),
94 |
95 | # Allow execute of anything we need
96 | /{,usr/}bin/* pux,
97 | /{,usr/}sbin/* pux,
98 |
99 | # Docker does all kinds of mounts all over the filesystem
100 | /dev/mapper/control rw,
101 | /dev/mapper/docker* rw,
102 | /dev/loop* r,
103 | /dev/loop[0-9]* w,
104 | mount,
105 | umount,
106 | pivot_root,
107 | /.pivot_root*/ rw,
108 |
109 | # for console access
110 | /dev/ptmx rw,
111 |
112 | # For loading the docker-default policy. We might be able to get rid of this
113 | # if we load docker-default ourselves and make docker not do it.
114 | /sbin/apparmor_parser ixr,
115 | /etc/apparmor*/** r,
116 | /var/lib/apparmor/profiles/docker rw,
117 | /etc/apparmor.d/cache/docker* w,
118 | /sys/kernel/security/apparmor/** rw,
119 |
120 | # We'll want to adjust this to support --security-opts...
121 | change_profile -> docker-default,
122 | signal (send) peer=docker-default,
123 | ptrace (read, trace) peer=docker-default,
124 |
125 | #cf bug 1411639
126 | /dev/dm-* rw,
127 | /dev/net/ r,
128 | /dev/snd/ r,
129 | /dev/ r,
130 | /dev/block/ r,
131 | /dev/bsg/ r,
132 | /dev/char/ r,
133 | /dev/cpu/ r,
134 | /dev/disk/ r,
135 | /dev/disk/by-id/ r,
136 | /dev/disk/by-label/ r,
137 | /dev/disk/by-partlabel/ r,
138 | /dev/disk/by-partuuid/ r,
139 | /dev/disk/by-path/ r,
140 | /dev/disk/by-uuid/ r,
141 | /dev/hugepages/ r,
142 | /dev/input/ r,
143 | /dev/input/by-path/ r,
144 |
145 | /dev/mapper/ r,
146 | /dev/mqueue/ r,
147 |
148 | /proc r, # for some reason only this works and not @{PROC} or anything
149 |
150 | change_profile -> unconfined,
151 | ptrace (read) peer=unconfined,
152 | signal (send) peer=unconfined,
153 | }
154 |
--------------------------------------------------------------------------------
/ubuntu-snappy/weave.profile:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | profile weave (attach_disconnected) {
4 | capability,
5 | network,
6 | / rwkl,
7 | /** rwlkm,
8 | /** pix,
9 |
10 | mount,
11 | remount,
12 | umount,
13 | dbus,
14 | signal,
15 | ptrace,
16 | unix,
17 | change_profile -> docker_docker_*,
18 | }
19 |
--------------------------------------------------------------------------------