├── .gitignore
├── Gemfile
├── Gemfile.lock
├── LICENSE
├── README.md
├── default-backend
├── backend.html
└── server.rb
├── init.sh
├── lib
├── connectable.rb
├── digital_ocean.rb
├── digital_ocean
│ ├── api.rb
│ ├── droplet.rb
│ ├── model.rb
│ └── resource.rb
├── document.rb
├── environment.rb
├── github.rb
├── github
│ └── api.rb
├── google.rb
├── google
│ └── api.rb
├── kubernetes.rb
├── kubernetes
│ ├── api.rb
│ ├── resource.rb
│ └── resource
│ │ ├── node.rb
│ │ └── pod.rb
├── minecraft
│ ├── dns
│ │ ├── manifest.rb
│ │ ├── parser.rb
│ │ └── server.rb
│ ├── protocol.rb
│ ├── protocol
│ │ ├── data.rb
│ │ └── packet.rb
│ ├── server.rb
│ └── server
│ │ └── plugin.rb
├── stratus.rb
├── worker.rb
└── worker
│ ├── discord.rb
│ ├── droplet.rb
│ ├── firewall.rb
│ ├── label.rb
│ ├── node.rb
│ ├── org.rb
│ ├── pod.rb
│ ├── repo.rb
│ ├── server.rb
│ └── server
│ ├── generic.rb
│ └── private.rb
├── manifests
├── backend
│ ├── couch.yml
│ ├── mongo.yml
│ ├── rabbit.yml
│ └── redis.yml
├── minecraft
│ ├── bungee.yml
│ ├── event.yml
│ ├── lobby.yml
│ ├── mapdev.yml
│ ├── mixed.yml
│ ├── tournament
│ │ ├── bungee
│ │ │ ├── daemon-set.yml
│ │ │ ├── service.yml
│ │ │ └── stateful-set.yml
│ │ ├── lobby
│ │ │ ├── service.yml
│ │ │ └── stateful-set.yml
│ │ ├── namespace.yml
│ │ └── official
│ │ │ ├── service.yml
│ │ │ └── stateful-set.yml
│ └── uhc.yml
├── storage
│ ├── git.yml
│ └── spaces.yml
├── system
│ ├── nginx.yml
│ ├── restart.yml
│ └── system.yml
└── web
│ ├── api-public.yml
│ ├── api.yml
│ ├── dns.yml
│ ├── ingress.yml
│ ├── site.yml
│ ├── stats.yml
│ └── worker.yml
├── models
├── default-backend
│ ├── Dockerfile
│ ├── build.yml
│ └── cloudbuild.yaml
├── minecraft
│ ├── Dockerfile
│ ├── Dockerfile-bukkit
│ ├── Dockerfile-bungee
│ ├── build.yml
│ └── run.rb
├── util
│ ├── build.yml
│ ├── git
│ │ ├── Dockerfile
│ │ └── git.sh
│ ├── proxy
│ │ ├── Dockerfile
│ │ └── haproxy.cfg
│ └── spaces
│ │ ├── Dockerfile
│ │ ├── acl.sh
│ │ ├── s3cfg
│ │ └── upload.sh
├── web
│ ├── Dockerfile
│ └── mongoid.yml
└── worker
│ ├── Dockerfile
│ ├── build.yml
│ └── cloudbuild.yaml
└── spec
├── dns_spec.rb
├── document_spec.rb
└── environment_spec.rb
/.gitignore:
--------------------------------------------------------------------------------
1 | # Secret files
2 | *secret*.yml
3 | *.secret
4 | /manifests/secret/*
5 | google.json
6 |
7 | # Maven generated files
8 | target
9 | *.jar
10 | dependency-reduced-pom.xml
11 |
12 | # Mac OSX generated files
13 | .DS_Store
14 |
15 | # Eclipse generated files
16 | .classpath
17 | .project
18 | .settings
19 |
20 | # IntelliJ IDEA
21 | .idea
22 | *.iml
23 |
24 | # Vim generated files
25 | *~
26 | *.swp
27 |
28 | # XCode 3 and 4 generated files
29 | *.mode1
30 | *.mode1v3
31 | *.mode2v3
32 | *.perspective
33 | *.perspectivev3
34 | *.pbxuser
35 | *.xcworkspace
36 | xcuserdata
37 | *class
38 |
39 | # Ruby ignores
40 | *.gem
41 | *.rbc
42 | /.config
43 | /coverage/
44 | /InstalledFiles
45 | /pkg/
46 | /spec/reports/
47 | /spec/examples.txt
48 | /test/tmp/
49 | /test/version_tmp/
50 | /tmp/
51 |
52 | # Used by dotenv library to load environment variables.
53 | # .env
54 |
55 | ## Specific to RubyMotion:
56 | .dat*
57 | .repl_history
58 | build/
59 | *.bridgesupport
60 | build-iPhoneOS/
61 | build-iPhoneSimulator/
62 |
63 | ## Specific to RubyMotion (use of CocoaPods):
64 | #
65 | # We recommend against adding the Pods directory to your .gitignore. However
66 | # you should judge for yourself, the pros and cons are mentioned at:
67 | # https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control
68 | #
69 | # vendor/Pods/
70 |
71 | ## Documentation cache and generated files:
72 | /.yardoc/
73 | /_yardoc/
74 | /doc/
75 | /rdoc/
76 |
77 | ## Environment normalization:
78 | /.bundle/
79 | /vendor/bundle
80 | /lib/bundler/man/
81 |
82 | # for a library or gem, you might want to ignore these files since the code is
83 | # intended to run in multiple environments; otherwise, check them in:
84 | # Gemfile.lock
85 | # .ruby-version
86 | # .ruby-gemset
87 |
88 | # unless supporting rvm < 1.11.0 or doing something fancy, ignore this:
89 | .rvmrc
90 | .kubeconfig
91 | manifests/auth.yml
92 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 |
3 | group :base do
4 | gem 'activesupport'
5 | gem 'rest-client'
6 | gem 'json'
7 | gem 'recursive-open-struct'
8 | gem 'mongoid'
9 | gem 'lru_redux'
10 | gem 'git'
11 | gem 'octokit'
12 | end
13 |
14 | group :worker do
15 | gem 'workers'
16 | gem 'kubeclient'
17 | gem 'droplet_kit'
18 | gem 'net-ssh-simple'
19 | gem 'celluloid-io'
20 | gem 'levenshtein-ffi'
21 | gem 'google-api-client'
22 | end
23 |
24 | group :test do
25 | gem 'timecop'
26 | gem 'test_helper'
27 | gem 'rspec'
28 | end
29 |
--------------------------------------------------------------------------------
/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | remote: https://rubygems.org/
3 | specs:
4 | activemodel (4.2.10)
5 | activesupport (= 4.2.10)
6 | builder (~> 3.1)
7 | activesupport (4.2.10)
8 | i18n (~> 0.7)
9 | minitest (~> 5.1)
10 | thread_safe (~> 0.3, >= 0.3.4)
11 | tzinfo (~> 1.1)
12 | addressable (2.5.2)
13 | public_suffix (>= 2.0.2, < 4.0)
14 | axiom-types (0.1.1)
15 | descendants_tracker (~> 0.0.4)
16 | ice_nine (~> 0.11.0)
17 | thread_safe (~> 0.3, >= 0.3.1)
18 | blockenspiel (0.5.0)
19 | bson (4.3.0)
20 | builder (3.2.3)
21 | celluloid (0.17.3)
22 | celluloid-essentials
23 | celluloid-extras
24 | celluloid-fsm
25 | celluloid-pool
26 | celluloid-supervision
27 | timers (>= 4.1.1)
28 | celluloid-essentials (0.20.5)
29 | timers (>= 4.1.1)
30 | celluloid-extras (0.20.5)
31 | timers (>= 4.1.1)
32 | celluloid-fsm (0.20.5)
33 | timers (>= 4.1.1)
34 | celluloid-io (0.17.3)
35 | celluloid (>= 0.17.2)
36 | nio4r (>= 1.1)
37 | timers (>= 4.1.1)
38 | celluloid-pool (0.20.5)
39 | timers (>= 4.1.1)
40 | celluloid-supervision (0.20.6)
41 | timers (>= 4.1.1)
42 | coercible (1.0.0)
43 | descendants_tracker (~> 0.0.1)
44 | concurrent-ruby (1.0.5)
45 | declarative (0.0.10)
46 | declarative-option (0.1.0)
47 | descendants_tracker (0.0.4)
48 | thread_safe (~> 0.3, >= 0.3.1)
49 | diff-lcs (1.3)
50 | domain_name (0.5.20180417)
51 | unf (>= 0.0.5, < 1.0.0)
52 | droplet_kit (2.3.0)
53 | activesupport (> 3.0, < 6)
54 | faraday (~> 0.9)
55 | kartograph (~> 0.2.3)
56 | resource_kit (~> 0.1.5)
57 | virtus (~> 1.0.3)
58 | equalizer (0.0.11)
59 | faraday (0.15.1)
60 | multipart-post (>= 1.2, < 3)
61 | ffi (1.9.23)
62 | git (1.4.0)
63 | google-api-client (0.22.0)
64 | addressable (~> 2.5, >= 2.5.1)
65 | googleauth (>= 0.5, < 0.7.0)
66 | httpclient (>= 2.8.1, < 3.0)
67 | mime-types (~> 3.0)
68 | representable (~> 3.0)
69 | retriable (>= 2.0, < 4.0)
70 | googleauth (0.6.2)
71 | faraday (~> 0.12)
72 | jwt (>= 1.4, < 3.0)
73 | logging (~> 2.0)
74 | memoist (~> 0.12)
75 | multi_json (~> 1.11)
76 | os (~> 0.9)
77 | signet (~> 0.7)
78 | hashie (3.5.7)
79 | hitimes (1.2.6)
80 | http (2.2.2)
81 | addressable (~> 2.3)
82 | http-cookie (~> 1.0)
83 | http-form_data (~> 1.0.1)
84 | http_parser.rb (~> 0.6.0)
85 | http-cookie (1.0.3)
86 | domain_name (~> 0.5)
87 | http-form_data (1.0.3)
88 | http_parser.rb (0.6.0)
89 | httpclient (2.8.3)
90 | i18n (0.9.5)
91 | concurrent-ruby (~> 1.0)
92 | ice_nine (0.11.2)
93 | json (2.1.0)
94 | jwt (2.1.0)
95 | kartograph (0.2.7)
96 | kubeclient (2.5.2)
97 | http (>= 0.98, < 3)
98 | recursive-open-struct (~> 1.0.0)
99 | rest-client
100 | levenshtein-ffi (1.1.0)
101 | ffi (~> 1.9)
102 | little-plugger (1.1.4)
103 | logging (2.2.2)
104 | little-plugger (~> 1.1)
105 | multi_json (~> 1.10)
106 | lru_redux (1.1.0)
107 | memoist (0.16.0)
108 | mime-types (3.1)
109 | mime-types-data (~> 3.2015)
110 | mime-types-data (3.2016.0521)
111 | minitest (5.11.3)
112 | mongo (2.5.3)
113 | bson (>= 4.3.0, < 5.0.0)
114 | mongoid (5.4.0)
115 | activemodel (~> 4.0)
116 | mongo (>= 2.5.1, < 3.0.0)
117 | origin (~> 2.3)
118 | tzinfo (>= 0.3.37)
119 | multi_json (1.13.1)
120 | multipart-post (2.0.0)
121 | net-scp (1.2.1)
122 | net-ssh (>= 2.6.5)
123 | net-ssh (4.2.0)
124 | net-ssh-simple (1.7.3)
125 | blockenspiel (= 0.5.0)
126 | hashie (~> 3.5.5)
127 | net-scp (= 1.2.1)
128 | net-ssh (= 4.2.0)
129 | netrc (0.11.0)
130 | nio4r (1.2.1)
131 | octokit (4.9.0)
132 | sawyer (~> 0.8.0, >= 0.5.3)
133 | origin (2.3.1)
134 | os (0.9.6)
135 | public_suffix (3.0.2)
136 | recursive-open-struct (1.0.5)
137 | representable (3.0.4)
138 | declarative (< 0.1.0)
139 | declarative-option (< 0.2.0)
140 | uber (< 0.2.0)
141 | resource_kit (0.1.7)
142 | addressable (>= 2.3.6, < 3.0.0)
143 | rest-client (2.0.2)
144 | http-cookie (>= 1.0.2, < 2.0)
145 | mime-types (>= 1.16, < 4.0)
146 | netrc (~> 0.8)
147 | retriable (3.1.1)
148 | rspec (3.7.0)
149 | rspec-core (~> 3.7.0)
150 | rspec-expectations (~> 3.7.0)
151 | rspec-mocks (~> 3.7.0)
152 | rspec-core (3.7.1)
153 | rspec-support (~> 3.7.0)
154 | rspec-expectations (3.7.0)
155 | diff-lcs (>= 1.2.0, < 2.0)
156 | rspec-support (~> 3.7.0)
157 | rspec-mocks (3.7.0)
158 | diff-lcs (>= 1.2.0, < 2.0)
159 | rspec-support (~> 3.7.0)
160 | rspec-support (3.7.1)
161 | sawyer (0.8.1)
162 | addressable (>= 2.3.5, < 2.6)
163 | faraday (~> 0.8, < 1.0)
164 | signet (0.8.1)
165 | addressable (~> 2.3)
166 | faraday (~> 0.9)
167 | jwt (>= 1.5, < 3.0)
168 | multi_json (~> 1.10)
169 | test_helper (0.0.1)
170 | thread_safe (0.3.6)
171 | timecop (0.9.1)
172 | timers (4.1.2)
173 | hitimes
174 | tzinfo (1.2.5)
175 | thread_safe (~> 0.1)
176 | uber (0.1.0)
177 | unf (0.1.4)
178 | unf_ext
179 | unf_ext (0.0.7.5)
180 | virtus (1.0.5)
181 | axiom-types (~> 0.1)
182 | coercible (~> 1.0)
183 | descendants_tracker (~> 0.0, >= 0.0.3)
184 | equalizer (~> 0.0, >= 0.0.9)
185 | workers (0.6.1)
186 |
187 | PLATFORMS
188 | ruby
189 |
190 | DEPENDENCIES
191 | activesupport
192 | celluloid-io
193 | droplet_kit
194 | git
195 | google-api-client
196 | json
197 | kubeclient
198 | levenshtein-ffi
199 | lru_redux
200 | mongoid
201 | net-ssh-simple
202 | octokit
203 | recursive-open-struct
204 | rest-client
205 | rspec
206 | test_helper
207 | timecop
208 | workers
209 |
210 | BUNDLED WITH
211 | 1.16.1
212 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [2017] [Ashcon Partovi]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Infrastructure
2 | Microservices and configurations to maintain and deploy [stratus.network](https://stratus.network)
3 |
4 | - `models` - Dockerfiles and scripts for building images
5 | - `manifests` - Kubernetes configurations for orchestrating containers
6 | - `lib` - Ruby microservices to auto-managing the cluster
7 | - `spec` - Unit testing of utilities
8 |
--------------------------------------------------------------------------------
/default-backend/backend.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Maintenance Mode
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
105 |
106 |
107 |
114 |
115 |
Maintenance Mode
116 |
117 | We're currently experiencing some issues with our website.
118 | Follow us on Twitter @StratusMC for updates.
119 |
120 |
121 |
122 |
123 |
124 |
125 | Stratus Network
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
--------------------------------------------------------------------------------
/default-backend/server.rb:
--------------------------------------------------------------------------------
1 | require 'socket'
2 | require 'uri'
3 |
4 | server = TCPServer.new(Socket.gethostname, 8080)
5 |
6 | STDERR.puts "Listening for requests on #{Socket.gethostname}:8080"
7 |
8 | loop do
9 | socket = server.accept
10 | request_line = socket.gets.split("/")[1].split(" ")[0]
11 |
12 | STDERR.puts request_line
13 |
14 | case request_line.downcase
15 | when "healthz"
16 | message = "ok\n"
17 | socket.print "HTTP/1.1 200 OK\r\n" +
18 | "Content-Type: text/plain\r\n" +
19 | "Content-Length: #{message.size}\r\n" +
20 | "Connection: close\r\n"
21 |
22 | socket.print "\r\n"
23 |
24 | socket.print message
25 | else
26 | File.open("backend.html", "rb") do |file|
27 | socket.print "HTTP/1.1 404 Not Found\r\n" +
28 | "Content-Type: text/html\r\n" +
29 | "Content-Length: #{file.size}\r\n" +
30 | "Connection: close\r\n"
31 |
32 | socket.print "\r\n"
33 | IO.copy_stream(file, socket)
34 | end
35 | end
36 |
37 | socket.close
38 | end
39 |
--------------------------------------------------------------------------------
/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | # Enable swap to prevent crashing of leaking applications
4 | fallocate -l $(awk '/MemTotal/ { printf "%.0f \n", $2*1024/2 }' /proc/meminfo) /swapfile
5 | chmod 600 /swapfile
6 | mkswap /swapfile
7 | swapon /swapfile
8 |
9 | # Tweak swap variables for maximum performance
10 | sudo sysctl vm.swappiness=15
11 | sudo sysctl vm.vfs_cache_pressure=50
12 |
13 | # Ensure swap is persistent on server shutdown
14 | echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab
15 |
16 | # Reset the kubernetes cluster
17 | kubeadm reset
18 |
19 | # Ensure the kubelet is operating in the private networking cluster
20 | echo "Environment=\"KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=$(curl http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address)\"" >> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
21 | systemctl daemon-reload
22 | systemctl restart kubelet
23 |
24 | # Join the private cluster, provided by a secret environment variable
25 | $KUBEADM_JOIN_COMMAND
26 |
--------------------------------------------------------------------------------
/lib/connectable.rb:
--------------------------------------------------------------------------------
1 | require "net/ssh/simple"
2 |
3 | # Represents a SSH-able physical or virtual server.
4 | module Connectable
5 |
6 | # Get the IPv4 address of this server.
7 | # Must be set by the implementer using @ip = "value".
8 | def ip
9 | @ip
10 | end
11 |
12 | # Get whether the connection is alive and the server is online.
13 | def alive?
14 | ping > 0
15 | end
16 |
17 | # Get the latency of the connection in number of seconds.
18 | # If the connection is not alive, this will return -1.
19 | def ping
20 | start = Time.now
21 | execute("uptime")
22 | Time.now - start
23 | rescue
24 | -1
25 | end
26 |
27 | # Block the current thread until the connection is alive.
28 | # An exception will be thrown if not connected after 60 seconds.
29 | def ensure!
30 | for i in 1..12
31 | return if alive?
32 | sleep(5.seconds)
33 | end
34 | raise "Unable to wait for machine to accept connections"
35 | end
36 |
37 | # Get a hash of the cpu, memory, and swap memory utilization from [0,1].
38 | # This will only work for specific versions of Linux, and is
39 | # not tested on other platforms.
40 | def utilization
41 | {
42 | cpu: execute("grep 'cpu ' /proc/stat | awk '{print ($2+$4)/($2+$4+$5)}'"),
43 | memory: execute("free | grep Mem | awk '{print $3/$2}'"),
44 | swap: execute("free | grep Swap | awk '{print $3/$2}'")
45 | }
46 | end
47 |
48 | # Remotely execute a shell-based command and return the output.
49 | def execute(command)
50 | session.ssh(ip, command).stdout
51 | end
52 |
53 | # Copy a remote file to a local file.
54 | def copy(remote_path, local_path)
55 | session.scp_get(ip, remote_path, local_path)
56 | end
57 |
58 | # Upload a local file and paste to a remote file.
59 | def paste(local_path, remote_path)
60 | session.scp_put(ip, local_path, remote_path)
61 | end
62 |
63 | protected
64 |
65 | # Get the current, non-thread-safe SSH session.
66 | def session
67 | Thread.current[:"#{ip}_ssh"] ||= Net::SSH::Simple.new({:user => "root"})
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/lib/digital_ocean.rb:
--------------------------------------------------------------------------------
1 | require "digital_ocean/api"
2 | require "digital_ocean/droplet"
3 | require "digital_ocean/model"
4 | require "digital_ocean/resource"
5 |
--------------------------------------------------------------------------------
/lib/digital_ocean/api.rb:
--------------------------------------------------------------------------------
1 | require "droplet_kit"
2 | require "environment"
3 |
4 | # Extension for objects from the DigitalOcean API.
5 | module DigitalOcean
6 | def digital_ocean
7 | @digital_ocean ||= DropletKit::Client.new(access_token: Env.need("digital_ocean_key"))
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/lib/digital_ocean/droplet.rb:
--------------------------------------------------------------------------------
1 | require "connectable"
2 | require "digital_ocean/model"
3 |
4 | # Extension for Droplet actions.
5 | module DropletKit
6 | class Droplet
7 | include Connectable
8 |
9 | # Check if this droplet is online.
10 | def online?
11 | status == "active"
12 | end
13 |
14 | # Check if this droplet is offline or archived.
15 | def offline?
16 | status == "off" || status == "archive"
17 | end
18 |
19 | # Check if this droplet is locked by an action or
20 | # is in the progress of being created.
21 | def locked?
22 | locked || status == "new"
23 | end
24 |
25 | # Gracefully shutdown this droplet and force power
26 | # off if the previous action fails.
27 | def shutdown
28 | wait{actions.shutdown(droplet_id: id)}
29 | rescue
30 | power_off
31 | end
32 |
33 | # Gracefully reboot this droplet and force power
34 | # on if the previous action fails.
35 | def reboot
36 | wait{actions.reboot(droplet_id: id)}
37 | rescue
38 | power_on
39 | end
40 |
41 | # Turn on this droplet.
42 | def power_on
43 | wait{actions.power_on(droplet_id: id)}
44 | end
45 |
46 | # Forcefully turn off this droplet.
47 | def power_off
48 | wait{actions.power_off(droplet_id: id)}
49 | end
50 |
51 | def ip
52 | public_ip
53 | end
54 |
55 | def destroy!
56 | shutdown
57 | super
58 | end
59 |
60 | def type
61 | "droplet"
62 | end
63 | end
64 | end
65 |
--------------------------------------------------------------------------------
/lib/digital_ocean/model.rb:
--------------------------------------------------------------------------------
1 | require "digital_ocean/api"
2 |
3 | # Extension for objects from the DigitalOcean API.
4 | module DropletKit
5 | class BaseModel
6 | include ::DigitalOcean
7 |
8 | # Get the DigitalOcean client that sends requests on behalf of this model.
9 | def client
10 | digital_ocean
11 | end
12 |
13 | # Get the type of resource this model represents.
14 | def type
15 | raise NotImplementedError, "Undefined resource type (ie. droplet, floating_ip)"
16 | end
17 |
18 | # Get the object provider that manages the given type.
19 | def collection
20 | client.send("#{type}s")
21 | end
22 |
23 | # Get the action provider that manages the given type.
24 | def actions
25 | client.send("#{type}_actions")
26 | end
27 |
28 | # Fetch the newest version of this model and mass-assign all the new attributes.
29 | def refresh!
30 | self.attributes = collection.find(id: id).attributes
31 | end
32 |
33 | # Block the current thread until an action is completed.
34 | # An error will be raised if there is an exception with the action.
35 | def wait(backoff=1.second, action=nil)
36 | 1.step do |i|
37 | break unless locked?
38 | sleep(backoff * 1.5 ** i)
39 | end
40 | case (action = block_given? ? yield : actions.find(id: action.id)).status
41 | when "in-progress"
42 | wait(backoff, action)
43 | when "errored"
44 | raise "Action #{action.type} for #{action.resource_type} errored out"
45 | else
46 | action
47 | end
48 | end
49 |
50 | # Check if any actions are currently in-progress for the given type.
51 | def locked?
52 | collection.actions(id: id).first.status == "in-progress"
53 | end
54 |
55 | # Tag the resource with the given name.
56 | # If the tag does not exist or the resource
57 | # already has the tag, it will be quietly handled.
58 | def tag(name)
59 | tag!(name, true)
60 | end
61 |
62 | # Untag the resource with the given name.
63 | def untag(name)
64 | tag!(name, false)
65 | end
66 |
67 | # Destroy the resource immediately, this is irreversible.
68 | def destroy!
69 | collection.delete(id: id)
70 | end
71 |
72 | protected
73 |
74 | def tag!(name, action)
75 | client.tags.create(DropletKit::Tag.new(name: name)) if action
76 | client.tags.send(
77 | "#{action ? '' : 'un'}tag_resources",
78 | {name: name, resources: [{resource_id: id, resource_type: type}]}
79 | )
80 | end
81 | end
82 | end
83 |
--------------------------------------------------------------------------------
/lib/digital_ocean/resource.rb:
--------------------------------------------------------------------------------
1 | require "droplet_kit"
2 | require "document"
3 | require "levenshtein"
4 |
5 | # Extension for objects from the DigitalOcean API.
6 | module ResourceKit
7 | class Resource
8 | include Document
9 |
10 | # Get the field of the resource to index this list by.
11 | # When calling #{get} or #{find}, this field will be
12 | # point of comparison for the query.
13 | def index
14 | raise NotImplementedError, "Must specify a field to index"
15 | end
16 |
17 | # Get a resource from the list given a query to the specific field #{index}.
18 | # Will throw an exception if not found, use #{find} for a safer search.
19 | def find_exact(key)
20 | cache[key] or raise "Unable to find resource by #{index} with #{key}"
21 | end
22 |
23 | # Find a list of resources given a query to the specific field #{index}
24 | def find_any(key="")
25 | cache.select{|k,v| key.empty? || k.to_s.include?(key.to_s)}
26 | .sort_by{|k,v| [Levenshtein.distance(k, key.to_s), k[/\d+/].to_i * -1, k]}
27 | .map{|k,v| find_exact(k)}
28 | end
29 |
30 | # Find the first resource that matches the query to the specific field #{index}
31 | def find_one(key)
32 | find_any(key).first rescue nil
33 | end
34 |
35 | # Find the first resource or throw an exception if nothing is found.
36 | def find_one_or_throw(key)
37 | find_one(key) or raise "Unable to find resource by #{index} with #{key}"
38 | end
39 |
40 | protected
41 |
42 | # Make the cache from the Document module a thread current variable.
43 | def cache
44 | Thread.current[:"#{self.class.name.downcase}_cache"] ||= super
45 | end
46 |
47 | def fetch!
48 | self.all.map{|resource| [resource.send("#{index}"), resource]}.to_h
49 | end
50 |
51 | def cache_duration
52 | 1.hour
53 | end
54 | end
55 | end
56 |
57 | # Specific extensions for resources that need to be cachable and indexable.
58 | module DropletKit
59 | class DropletResource
60 | def index
61 | "name"
62 | end
63 | end
64 | class ImageResource
65 | def index
66 | "name"
67 | end
68 | end
69 | class SSHKeyResource
70 | def index
71 | "name"
72 | end
73 | end
74 | class RegionResource
75 | def index
76 | "slug"
77 | end
78 | end
79 | class SizeResource
80 | def index
81 | "slug"
82 | end
83 | end
84 | end
85 |
--------------------------------------------------------------------------------
/lib/document.rb:
--------------------------------------------------------------------------------
1 | require "lru_redux"
2 | require "active_support/time"
3 |
4 | # Represents an object that has an unique ID and a cachable document.
5 | module Document
6 |
7 | # Get the ID of this document.
8 | # Must be set by the implementer using @id = "value".
9 | def id
10 | @id
11 | end
12 |
13 | # Gets a new version of the document and updates the cache.
14 | def document
15 | cache_internal[:document] = fetch!
16 | end
17 |
18 | # Get a cached version of the document or call #{fetch!}
19 | # if the cache has expired beyond the #{cache_duration}.
20 | def cache
21 | cache_internal.getset(:document){fetch!}
22 | end
23 |
24 | # Fetch the newest version of the document.
25 | # This does not update the cache, use #{document} instead.
26 | def fetch!
27 | raise NotImplementedError, "Unable to fetch document"
28 | end
29 |
30 | # Clear the document cache and fetch the newest document.
31 | def refresh!
32 | cache_internal.clear
33 | cache
34 | end
35 |
36 | protected
37 |
38 | # Duration that document caches should be stored before expiring.
39 | def cache_duration
40 | 5.minutes
41 | end
42 |
43 | # Cache provider that allows thread-safe ttl operations.
44 | def cache_internal
45 | @cache ||= LruRedux::TTL::ThreadSafeCache.new(1, cache_duration.to_i)
46 | end
47 |
48 | # Any missing methods are assumed to be directed at the document.
49 | # If the method ends with "_cache", then the method is forwarded
50 | # to the cache, if not it forwards to a newly requested document.
51 | def method_missing(m, *args, &block)
52 | if m.to_s.end_with?("_cache")
53 | m = m.to_s.gsub("_cache", "")
54 | else
55 | refresh!
56 | end
57 | cache.send(m, *args, &block)
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/lib/environment.rb:
--------------------------------------------------------------------------------
1 | # Utility class to get access to environment variables.
2 | class Env
3 | class << self
4 | # Get the value of a variable with a given key.
5 | # If an index is specified and the value is an
6 | # array, the value will be at the specified index.
7 | def get(key, index=0, splitter=",")
8 | raw = override[key.to_s.upcase] || ENV[key.to_s.upcase]
9 | if raw != nil && !raw.empty? && raw != "null"
10 | values = raw.split(splitter) rescue [raw]
11 | if index >= 0 && index < values.size
12 | values[index]
13 | else
14 | values
15 | end
16 | end
17 | end
18 |
19 | # Get an array of the values for the given key.
20 | def get_multi(key, splitter=",")
21 | get(key, -1, splitter)
22 | end
23 |
24 | # Get a key or throw an exception if it does not exist.
25 | def need(key, context=nil)
26 | get(key) or raise "Unable to find required variable '#{key}' #{context ? "in '#{context}'" : ""}"
27 | end
28 |
29 | # Determine whether the key has a value.
30 | def has?(key)
31 | get(key) != nil
32 | end
33 |
34 | # Override the value of an environment variable.
35 | def set(key, value, force=false)
36 | if force || !has?(key)
37 | override[key.to_s.upcase] = value
38 | end
39 | end
40 |
41 | # Get the name of the host, using environment variables.
42 | def host
43 | get("hostname")
44 | end
45 |
46 | # Determine if the host is replicated, -1 if unique.
47 | def replica
48 | Integer(host.split("-").last) rescue -1
49 | end
50 |
51 | # Get the hash of variables that override the system variables.
52 | def override
53 | @override ||= {}
54 | end
55 |
56 | # Get a hash of all variables including override.
57 | def all
58 | ENV.to_h.merge(override)
59 | end
60 |
61 | # Replace environment variable references with their actual values.
62 | def substitute(string)
63 | string.gsub(/\$\{([ a-zA-Z0-9_-]{1,})\b\}|\$([a-zA-Z0-9_-]{1,})\b/) do |var|
64 | need(var.gsub(/[\{\}\$]/, ""), string)
65 | end
66 | end
67 | end
68 | end
69 |
--------------------------------------------------------------------------------
/lib/github.rb:
--------------------------------------------------------------------------------
1 | require "github/api"
2 |
--------------------------------------------------------------------------------
/lib/github/api.rb:
--------------------------------------------------------------------------------
1 | require "git"
2 | require "octokit"
3 | require "environment"
4 |
5 | # Extension for objects from the Github API.
6 | module Github
7 | Octokit.auto_paginate = true
8 |
9 | def github
10 | @github ||= Octokit::Client.new(access_token: github_key)
11 | end
12 |
13 | def github_key
14 | @github_key ||= Env.need("github_key")
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/lib/google.rb:
--------------------------------------------------------------------------------
1 | require "google/api"
2 |
--------------------------------------------------------------------------------
/lib/google/api.rb:
--------------------------------------------------------------------------------
1 | require "googleauth"
2 | require "google/apis/compute_beta"
3 | require "environment"
4 |
5 | # Represents an object that interacts with any Google API.
6 | module Google
7 |
8 | # The path to the JSON credentials file.
9 | def credentials
10 | "google.json"
11 | end
12 |
13 | # Get the project ID that is specified in the credentials file.
14 | def project_id
15 | @project_id ||= Env.need("google_project_id")
16 | end
17 |
18 | # Authenticate a Google service object with a path to a JSON credentials file.
19 | # The environment variables 'GOOGLE_PRIVATE_KEY' and 'CLIENT_EMAIL_VAR' must
20 | # be defined for authentication to work properly.
21 | def auth(service)
22 | service.authorization = Google::Auth::ServiceAccountCredentials.make_creds({
23 | :scope => "https://www.googleapis.com/auth/compute"
24 | })
25 | service
26 | end
27 | end
28 |
29 |
--------------------------------------------------------------------------------
/lib/kubernetes.rb:
--------------------------------------------------------------------------------
1 | require "kubernetes/api"
2 | require "kubernetes/resource"
3 | require "kubernetes/resource/node"
4 | require "kubernetes/resource/pod"
5 |
--------------------------------------------------------------------------------
/lib/kubernetes/api.rb:
--------------------------------------------------------------------------------
1 | require "kubeclient"
2 | require "celluloid/current"
3 | require "celluloid/io"
4 |
5 | # Represents an object that interacts with the Kubernetes cluster.
6 | module Kubernetes
7 | def cluster
8 | @cluster ||= begin
9 | cluster_internal
10 | rescue
11 | cluster_external
12 | end
13 | end
14 |
15 | def nodes
16 | cluster.get_nodes.map{|node| Node.new(node.metadata.name)}
17 | end
18 |
19 | protected
20 |
21 | # Access the cluster from inside a pod that has a service account.
22 | def cluster_internal
23 | Kubeclient::Client.new(
24 | "https://kubernetes.default.svc",
25 | "v1",
26 | {
27 | ssl_options: {
28 | ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
29 | },
30 | auth_options: {
31 | bearer_token_file: "/var/run/secrets/kubernetes.io/serviceaccount/token"
32 | },
33 | socket_options: {
34 | socket_class: Celluloid::IO::TCPSocket,
35 | ssl_socket_class: Celluloid::IO::SSLSocket
36 | }
37 | }
38 | )
39 | end
40 |
41 | # Access the cluster from an external machine.
42 | def cluster_external
43 | config = Kubeclient::Config.read(File.expand_path("~/.kube/config"))
44 | context = config.context
45 | ssl_options = context.ssl_options
46 | ssl_options[:verify_ssl] = 0
47 | Kubeclient::Client.new(
48 | context.api_endpoint,
49 | context.api_version,
50 | {
51 | ssl_options: ssl_options,
52 | auth_options: context.auth_options,
53 | socket_options: {
54 | socket_class: Celluloid::IO::TCPSocket,
55 | ssl_socket_class: Celluloid::IO::SSLSocket
56 | }
57 | }
58 | )
59 | end
60 | end
61 |
--------------------------------------------------------------------------------
/lib/kubernetes/resource.rb:
--------------------------------------------------------------------------------
1 | require "kubernetes/api"
2 | require "document"
3 |
4 | # Represents a resource in a Kubernetes cluster.
5 | class Resource
6 | include Kubernetes
7 | include Document
8 |
9 | def initialize(name)
10 | @id = name
11 | end
12 |
13 | # Get the name of the resource.
14 | def name
15 | id
16 | end
17 |
18 | # Get the cached namespace of the resource.
19 | def namespace
20 | metadata_cache.namespace
21 | end
22 |
23 | # Block the current thread and proccess a block of code for every resource update.
24 | def watch!(&block)
25 | client.watch_events(namespace: namespace, field_selector: "involvedObject.name=#{name}").each(block)
26 | end
27 |
28 | def destroy!
29 | raise NotImplementedError, "Unable to delete a #{self.class.name} resource"
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/lib/kubernetes/resource/node.rb:
--------------------------------------------------------------------------------
1 | require "kubernetes/resource"
2 |
3 | # Represents a virtual machine in a Kubernetes cluster.
4 | class Node < Resource
5 |
6 | # Determine if the node is ready to accepts pods and connections.
7 | def ready?
8 | status.conditions.select{|c| c.type == "Ready"}.map{|c| c.status == "True"}.first
9 | end
10 |
11 | # Get the last heartbeat time from the node.
12 | def last_heartbeat
13 | status.conditions.map{|c| Time.parse(c.lastHeartbeatTime)}.sort.last
14 | end
15 |
16 | # Get the list of labels for this node.
17 | def labels
18 | metadata.labels.to_h.map{|k,v| [k.to_s, v.to_s]}.to_h
19 | end
20 |
21 | # Patch new labels to the node.
22 | def label(values)
23 | cluster.patch_node(id, {metadata: {labels: values}})
24 | end
25 |
26 | # Get a list of pods that are on this node.
27 | def pods
28 | client.get_pods.select{|pod| pod.spec.nodeName == name}.map{|pod| Pod.new(pod.metadata.name)}
29 | end
30 |
31 | def fetch!
32 | cluster.get_node(id)
33 | end
34 |
35 | def destroy!
36 | cluster.delete_node(id)
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/lib/kubernetes/resource/pod.rb:
--------------------------------------------------------------------------------
1 | require "kubernetes/resource"
2 |
3 | # Represents a set of Docker containers in a Kubernetes cluster.
4 | class Pod < Resource
5 |
6 | # Get a list of containers for this pod.
7 | def containers
8 | spec_cache.containers
9 | end
10 |
11 | # Run a block of code everything a message in logged in a container.
12 | def watch_container!(container_name=containers.first.name, &block)
13 | client.watch_pod_log(name, namespace, container: container_name, previous: true).each(block)
14 | end
15 |
16 | def fetch!
17 | cluster.get_pod(id)
18 | end
19 |
20 | def destroy!
21 | cluster.delete_pod(id)
22 | end
23 | end
24 |
--------------------------------------------------------------------------------
/lib/minecraft/dns/manifest.rb:
--------------------------------------------------------------------------------
1 | require "worker/base"
2 | require "stratus"
3 |
4 | # Routinely update the list of Minecraft servers sorted by fullness.
5 | module Minecraft
6 | module DNS
7 | class Manifest < Worker
8 | def run
9 | if servers = Stratus::Server.all
10 | .sort_by{|s| (s.num_online || 0) / ([1, s.max_players || 0].max).to_f}
11 | @servers = servers
12 | end
13 | end
14 |
15 | def servers
16 | @servers
17 | end
18 | end
19 | end
20 | end
--------------------------------------------------------------------------------
/lib/minecraft/dns/parser.rb:
--------------------------------------------------------------------------------
1 | require "stratus"
2 |
3 | # Parse custom DNS queries into Kubernetes IP addresses.
4 | module Minecraft
5 | module DNS
6 | class Parser
7 | def parse(query, servers)
8 | if components = parse_server_components(query, servers)
9 | pod = components[:index] ? "#{components[:name]}-#{components[:index]}"
10 | : components[:name]
11 | service = components[:name]
12 | namespace = components[:datacenter] == "TM" ? "tm" : "default"
13 | "#{pod}.#{service}.#{namespace}.svc.cluster.local"
14 | end
15 | end
16 |
17 | # Get the suffix that allows DNS queries to be processed.
18 | def suffix
19 | "mc"
20 | end
21 |
22 | # Parse all of the sub-components of the DNS query
23 | # and return the best server that matches the query.
24 | def parse_server_components(query, servers)
25 | if components = parse_components(query)
26 | servers = servers.select do |server|
27 | server_name = server.bungee_name.downcase
28 | server_name.include?(components[:name]) &&
29 | (!components[:index] || server_name.include?((components[:index] + 1).to_s)) &&
30 | (!components[:datacenter] || server.datacenter == components[:datacenter])
31 | end
32 | size = servers.size
33 | if size == 0 || (!components[:selector] && size > 1)
34 | raise ParseException, "#{servers.size} servers matched query\n(#{components})"
35 | elsif size == 1
36 | components[:server] = servers.first
37 | else
38 | components[:server] = case components[:selector]
39 | when "rand"
40 | servers[rand(0..size)]
41 | when "empty"
42 | servers.first
43 | when "full"
44 | servers.last
45 | end
46 | end
47 | components[:datacenter] = components[:server].datacenter
48 | unless components[:index]
49 | unless components[:server].settings_profile == "private"
50 | index = components[:server].bungee_name.gsub(/[^0-9]/, "")
51 | unless index.empty?
52 | components[:index] = [0, index.to_i - 1].max
53 | end
54 | end
55 | end
56 | components
57 | end
58 | end
59 |
60 | # Parse all of the sub-components of the DNS query.
61 | # Returns a hash containing the name, index, datacenter,
62 | # and selector of the query. Only name is gaurenteed to not be nil.
63 | def parse_components(query)
64 | parts = query.split(".").map{|part| part.downcase}
65 | size = parts.size
66 | if size > 1 && parts.last == suffix
67 | name = parts[0]
68 | index = nil
69 | datacenter = nil
70 | selector = nil
71 | if size > 2
72 | begin
73 | parsed = parse_selector(parts[1])
74 | if parsed.is_a?(Integer)
75 | index = parsed
76 | else
77 | selector = parsed
78 | end
79 | rescue ParseException => e
80 | raise e if size > 3
81 | datacenter = parse_datacenter(parts[1])
82 | end
83 | end
84 | if size > 3
85 | datacenter = parse_datacenter(parts[2])
86 | end
87 | {
88 | name: name,
89 | index: index,
90 | datacenter: datacenter,
91 | selector: selector
92 | }
93 | end
94 | end
95 |
96 | # Parse the server datacenter of the DNS query.
97 | # Can either be 'US', 'EU', or 'TM'. If a new
98 | # datacenter is added, it must be put on this list.
99 | def parse_datacenter(query)
100 | if query && ["us", "eu", "tm"].include?(query.downcase)
101 | query.upcase
102 | else
103 | raise ParseException, "Unable to parse datacenter from #{query}"
104 | end
105 | end
106 |
107 | # Parse the server selector of the DNS query.
108 | # Can be either an index, 'rand' for a random index,
109 | # 'empty' for the emptiest server, or 'full' for the
110 | # fullest server.
111 | def parse_selector(query)
112 | if query =~ /\d/
113 | [0, query.to_i].max
114 | elsif query && ["rand", "empty", "full"].include?(query.downcase)
115 | query.downcase
116 | else
117 | raise ParseException, "Unable to parse #{query} into a selector"
118 | end
119 | end
120 | end
121 | end
122 | end
123 |
124 | # Represents an exception while parsing the
125 | # special DNS query into a server ip address.
126 | class ParseException < StandardError
127 | end
128 |
--------------------------------------------------------------------------------
/lib/minecraft/dns/server.rb:
--------------------------------------------------------------------------------
1 | require "minecraft/dns/manifest"
2 | require "minecraft/dns/parser"
3 | require "async/dns"
4 |
5 | # Represents the custom DNS server that handles requests.
6 | module Minecraft
7 | module DNS
8 | class Server < Async::DNS::Server
9 | def initialize
10 | super([[:udp, "0.0.0.0", 2346]])
11 | @manifest = Manifest.new
12 | @parser = Parser.new
13 | @resolver = Async::DNS::Resolver.new([
14 | [:udp, "8.8.8.8", 53],
15 | [:tcp, "8.8.8.8", 53]
16 | ])
17 | end
18 |
19 | def process(name, resource_class, transaction)
20 | if response = @parser.parse(name, @manifest.servers)
21 | transaction.respond!(response)
22 | else
23 | transaction.passthrough!(@resolver)
24 | end
25 | end
26 |
27 | def run
28 | @manifest.run!
29 | super
30 | end
31 | end
32 | end
33 | end
34 |
--------------------------------------------------------------------------------
/lib/minecraft/protocol.rb:
--------------------------------------------------------------------------------
1 | require "minecraft/protocol/packet"
2 |
3 | module Minecraft
4 | module Protocol
5 |
6 | VERSION = 340 # 1.12.2
7 | PROTOCOLS = [:handshaking, :status, :login, :play]
8 |
9 | class ServerInfo
10 | attr :json,
11 | :version, :protocol,
12 | :max_players, :online_players,
13 | :description, :icon,
14 | :map_name, :map_icon,
15 | :participants, :observers
16 |
17 | def decode_icon(uri)
18 | if uri && uri =~ %r{^data:image/png;base64,(.*)$}m
19 | Base64.decode64($1)
20 | end
21 | end
22 |
23 | def initialize(json)
24 | @json = json
25 | if version = json['version']
26 | @version = version['name']
27 | @protocol = version['protocol']
28 | end
29 | if players = json['players']
30 | @max_players = players['max']
31 | @online_players = players['online']
32 | end
33 | @description = json['description']
34 | if icon = json['favicon']
35 | @icon = decode_icon(icon)
36 | end
37 | if pgm = json['pgm']
38 | @participants = pgm['participants'].to_i
39 | @observers = pgm['observers'].to_i
40 |
41 | if map = pgm['map']
42 | @map_name = map['name']
43 | @map_icon = decode_icon(map['icon'])
44 | end
45 | end
46 | end
47 |
48 | def pgm?
49 | json.key?('pgm')
50 | end
51 | end
52 |
53 | class Client
54 | def initialize(host:, port: 25565)
55 | @host = host
56 | @port = port
57 | @io = TCPSocket.new(host, port)
58 | @protocol = :handshaking
59 | end
60 |
61 | def read
62 | Packet.read(@io, @protocol, :clientbound)
63 | end
64 |
65 | def write(packet)
66 | packet.write(@io)
67 | if packet.is_a?(Packet::Handshaking::In::SetProtocol)
68 | @protocol = PROTOCOLS[packet.next_state]
69 | end
70 | end
71 |
72 | def handshake(protocol)
73 | write Packet::Handshaking::In::SetProtocol.new(
74 | protocol_version: VERSION,
75 | server_address: @host,
76 | server_port: @port,
77 | next_state: PROTOCOLS.index(protocol)
78 | )
79 | end
80 |
81 | def ping(payload = nil)
82 | handshake(:status)
83 | write(Packet::Status::In::Ping.new(payload: payload || Time.now.to_i))
84 | response = read.payload
85 | @io.close
86 | response
87 | end
88 |
89 | def status
90 | handshake(:status)
91 | write(Packet::Status::In::Start.new)
92 | json = JSON.parse(read.json)
93 | @io.close
94 | ServerInfo.new(json)
95 | end
96 | end
97 |
98 | class << self
99 | def ping(host: "localhost", port: 25565, payload: nil)
100 | Client.new(host: host, port: port).ping(payload)
101 | end
102 |
103 | def status(host: "localhost", port: 25565)
104 | Client.new(host: host, port: port).status
105 | end
106 |
107 | def safe_status(host: "localhost", port: 25565)
108 | status(host: host, port: port) rescue nil
109 | end
110 | end
111 | end
112 | end
113 |
--------------------------------------------------------------------------------
/lib/minecraft/protocol/data.rb:
--------------------------------------------------------------------------------
1 | module Minecraft
2 | module Protocol
3 | class Transcoder
4 | def values
5 | @values ||= []
6 | end
7 |
8 | def initialize(io)
9 | @io = io
10 | end
11 |
12 | def pack(length, format)
13 | raise NoMethodError
14 | end
15 |
16 | def byte
17 | pack(1, 'c')
18 | end
19 |
20 | def ubyte
21 | pack(1, 'C')
22 | end
23 |
24 | def short
25 | pack(2, 's>')
26 | end
27 |
28 | def ushort
29 | pack(2, 'S>')
30 | end
31 |
32 | def integer
33 | pack(4, 'i>')
34 | end
35 |
36 | def long
37 | pack(8, 'q>')
38 | end
39 |
40 | def float
41 | pack(4, 'g')
42 | end
43 |
44 | def double
45 | pack(8, 'G')
46 | end
47 | end
48 |
49 | class Decoder < Transcoder
50 | def pack(length, format)
51 | values << @io.read(length).unpack(format)[0]
52 | end
53 |
54 | def varnum(len)
55 | n = v = 0
56 | loop do
57 | b = @io.read(1).ord
58 | v |= (b & 0x7f) << (7 * n)
59 | break if b & 0x80 == 0
60 | n += 1
61 | raise "VarInt too long" if n > len
62 | end
63 | values << v
64 | end
65 |
66 | def varint
67 | varnum(5)
68 | end
69 |
70 | def varlong
71 | varnum(10)
72 | end
73 |
74 | def string
75 | varint
76 | values << @io.read(values.pop)
77 | end
78 | end
79 |
80 | class Encoder < Transcoder
81 | def pack(length, format)
82 | @io.write([values.shift].pack(format))
83 | end
84 |
85 | def varint
86 | v = values.shift % 0x1_0000_0000
87 | loop do
88 | b = v & 0x7f
89 | v >>= 7
90 | b |= 0x80 unless v == 0
91 | @io.putc(b)
92 | break if v == 0
93 | end
94 | end
95 |
96 | def varlong
97 | varint
98 | end
99 |
100 | def string
101 | v = values.shift
102 | values.unshift(v.size)
103 | varint
104 | @io.write(v)
105 | end
106 | end
107 | end
108 | end
109 |
--------------------------------------------------------------------------------
/lib/minecraft/protocol/packet.rb:
--------------------------------------------------------------------------------
1 | require "minecraft/protocol/data"
2 |
3 | module Minecraft
4 | module Protocol
5 | module Packet
6 | module Serverbound
7 | def direction
8 | :serverbound
9 | end
10 | end
11 |
12 | module Clientbound
13 | def direction
14 | :clientbound
15 | end
16 | end
17 |
18 | class << self
19 | def hash(value = nil, &block)
20 | if block_given?
21 | Hash.new{|h, k| h[k] = block[k] }
22 | else
23 | Hash.new(value)
24 | end
25 | end
26 |
27 | def packets
28 | # protocol -> direction -> packet_id -> class
29 | @packets ||= hash{ hash{ {} } }
30 | end
31 |
32 | def read(io, protocol, direction)
33 | decoder = Decoder.new(io)
34 | decoder.varint # length
35 | decoder.varint # ID
36 | packet_id = decoder.values[1]
37 |
38 | unless cls = Packet.packets[protocol.to_sym][direction.to_sym][packet_id]
39 | raise "Unknown packet #{protocol}:#{direction}:#{packet_id}"
40 | end
41 |
42 | decoder.values.clear
43 | cls.transcode_fields(decoder)
44 | cls.new(*decoder.values)
45 | end
46 | end
47 |
48 | class Base
49 | class << self
50 | attr :packet_id
51 |
52 | def id(packet_id)
53 | @packet_id = packet_id
54 | Packet.packets[protocol][direction][packet_id] = self
55 | end
56 |
57 | def fields
58 | @fields ||= {}
59 | end
60 |
61 | def field(name, type)
62 | index = fields.size
63 | fields[name.to_sym] = type.to_sym
64 |
65 | define_method name do
66 | @values[index]
67 | end
68 |
69 | define_method "#{name}=" do |value|
70 | @values[index] = value
71 | end
72 | end
73 |
74 | def transcode_fields(stream)
75 | fields.values.each do |type|
76 | stream.__send__(type)
77 | end
78 | end
79 | end
80 |
81 | def initialize(*values, **fields)
82 | @values = values
83 | self.class.fields.each do |name, _|
84 | @values << fields[name]
85 | end
86 | end
87 |
88 | def write(io)
89 | io.write(encode)
90 | end
91 |
92 | def encode
93 | encoded = ""
94 | encoder = Encoder.new(StringIO.new(encoded))
95 | encoder.values << self.class.packet_id
96 | encoder.values.concat(@values)
97 |
98 | encoder.varint # packet_id
99 | self.class.transcode_fields(encoder)
100 |
101 | prefix = ""
102 | encoder = Encoder.new(StringIO.new(prefix))
103 | encoder.values << encoded.size
104 | encoder.varint # length
105 |
106 | prefix + encoded
107 | end
108 | end
109 |
110 | module Handshaking
111 | class Base < Packet::Base
112 | def self.protocol
113 | :handshaking
114 | end
115 | end
116 |
117 | module In
118 | class Base < Handshaking::Base
119 | extend Serverbound
120 | end
121 |
122 | class SetProtocol < Base
123 | id 0
124 | field :protocol_version, :varint
125 | field :server_address, :string
126 | field :server_port, :ushort
127 | field :next_state, :varint
128 | end
129 | end
130 |
131 | module Out
132 | class Base < Handshaking::Base
133 | extend Clientbound
134 | end
135 | end
136 | end
137 |
138 | module Status
139 | class Base < Packet::Base
140 | def self.protocol
141 | :status
142 | end
143 | end
144 |
145 | module In
146 | class Base < Status::Base
147 | extend Serverbound
148 | end
149 |
150 | class Start < Base
151 | id 0
152 | end
153 |
154 | class Ping < Base
155 | id 1
156 | field :payload, :long
157 | end
158 | end
159 |
160 | module Out
161 | class Base < Status::Base
162 | extend Clientbound
163 | end
164 |
165 | class ServerInfo < Base
166 | id 0
167 | field :json, :string
168 | end
169 |
170 | class Pong < Base
171 | id 1
172 | field :payload, :long
173 | end
174 | end
175 | end
176 |
177 | end
178 | end
179 | end
180 |
--------------------------------------------------------------------------------
/lib/minecraft/server.rb:
--------------------------------------------------------------------------------
1 | require "stratus"
2 | require "document"
3 | require "minecraft/protocol"
4 | require "minecraft/server/plugin"
5 |
6 | # Represents a local Minecraft server in a Docker container.
7 | class LocalServer
8 | include Document
9 |
10 | def initialize(path=nil)
11 | @path = File.expand_path(path || "~")
12 | end
13 |
14 | # Get the absolute path that the server operates inside.
15 | def path
16 | @path
17 | end
18 |
19 | # Get the hostname of the container or unique ID of the server.
20 | def id
21 | @id ||= Env.has?("replica") ? Env.host : Env.host.split("-").first
22 | end
23 |
24 | # Find the server ID from the container hostname.
25 | def fetch!
26 | if server = Stratus::Server.by_id_or_name(id)
27 | unless @id == server._id
28 | @id = server._id
29 | end
30 | server
31 | else
32 | raise "Unable to find server using #{id}"
33 | end
34 | end
35 |
36 | # Determine if the server is part of a tournament.
37 | def tournament?
38 | if tm = role_cache == "PGM" && (network_cache == "TOURNAMENT" || Env.has?("tournament"))
39 | Env.set("tournament_id", Stratus::Tournament.current._id, true)
40 | end
41 | tm
42 | end
43 |
44 | # Move over files from the data folder, format plugin configuration files,
45 | # ensure at least one map available, and inject server variables into text-based files.
46 | def load!
47 | for folder in ["base", server_path_cache]
48 | FileUtils.copy_entry("#{path}/data/servers/#{folder}", "#{path}/server")
49 | end
50 | [
51 | Plugin.new("API", true),
52 | Plugin.new("Commons", true),
53 | Plugin.new("PGM", role_cache == "PGM"),
54 | Plugin.new("Lobby", role_cache == "LOBBY"),
55 | Plugin.new("WorldEdit", role_cache != "BUNGEE"),
56 | Plugin.new("CommandBook", role_cache != "BUNGEE"),
57 | Plugin.new("Tourney", tournament?),
58 | Plugin.new("Raven", Env.has?("sentry_dsn")),
59 | Plugin.new("BuycraftX", Env.has?("buycraft_secret"))
60 | ].each do |plugin|
61 | plugin.load_and_save!(
62 | "#{path}/data/plugins/#{plugins_path_cache}",
63 | "#{path}/server/plugins"
64 | )
65 | end
66 | FileUtils.mkdir_p("#{path}/maps")
67 | if role_cache == "PGM" && Dir.entries("#{path}/maps").empty?
68 | FileUtils.mv("world", "#{path}/maps/map")
69 | elsif role_cache == "LOBBY" && Dir.exists?("#{path}/maps/lobby")
70 | FileUtils.rm_rf("world")
71 | FileUtils.copy_entry("#{path}/maps/lobby", "world")
72 | end
73 | cache.to_h.each{|k,v| Env.set(k, v.to_s, true)}
74 | for file in ["yml", "yaml", "json", "properties"].flat_map{|ext| Dir.glob("#{path}/server/**/*.#{ext}")}
75 | data = Env.substitute(File.read(file))
76 | File.open(file, "w") do |f|
77 | f.write(data)
78 | end
79 | end
80 | FileUtils.copy_entry("#{path}/server/plugins/API", "#{path}/server/plugins/API-OCN")
81 | end
82 |
83 | # Check if the server is responding to pings.
84 | def alive?
85 | Minecraft::Protocol.safe_status != nil
86 | end
87 | end
88 |
--------------------------------------------------------------------------------
/lib/minecraft/server/plugin.rb:
--------------------------------------------------------------------------------
1 | require "environment"
2 | require "yaml"
3 | require "fileutils"
4 |
5 | # Represents a Java plugin and its config file.
6 | class Plugin
7 |
8 | def initialize(name, load)
9 | @name = name
10 | @load = load
11 | end
12 |
13 | def name
14 | @name
15 | end
16 |
17 | def load?
18 | @load
19 | end
20 |
21 | def load_and_save!(data_folder, server_folder)
22 | if data = load!(data_folder)
23 | save!(server_folder, data)
24 | else
25 | File.delete("#{server_folder}/#{name.downcase}.jar") rescue nil
26 | end
27 | end
28 |
29 | def load!(folder)
30 | return unless load?
31 | for f in Dir.glob("#{folder}/*")
32 | file = f if name.downcase == File.basename(f, ".*").downcase
33 | end
34 | unless file
35 | return load!("#{File.dirname(folder)}/base") if folder != "base"
36 | raise "Unable to find config #{name.downcase} in folder #{folder}"
37 | end
38 | config = if (ext = File.extname(file)) == ".yml"
39 | YAML.load_file(file)
40 | else
41 | {"lines" => File.read(file).to_s}
42 | end
43 | if parent = config["parent"]
44 | config = load!("#{File.dirname(folder)}/#{parent}").deep_merge(config)
45 | end
46 | config.merge({"ext" => ext})
47 | end
48 |
49 | def save!(folder, data)
50 | ext = data.delete("ext") or raise "Unable to get file extention for #{data.inspect}"
51 | lines = data.delete("lines")
52 | data.delete("parent")
53 | FileUtils.mkdir_p(directory = "#{folder}/#{name}")
54 | File.open("#{directory}/config#{ext}", "w") do |file|
55 | file.write(lines ? lines : data.to_yaml)
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/lib/stratus.rb:
--------------------------------------------------------------------------------
1 | require "recursive-open-struct"
2 | require "environment"
3 | require "rest-client"
4 | require "mongoid"
5 | require "json"
6 |
7 | # Represents the RESTful API that interacts with the Stratus backend.
8 | class Stratus
9 |
10 | # Generic base for implementing Stratus models.
11 | class Base < Stratus
12 | def initialize(route)
13 | @route = "/#{route}"
14 | end
15 |
16 | def route
17 | @route
18 | end
19 |
20 | # Update a model with the given partial document.
21 | def update(id, document)
22 | put("#{route}/#{id}", {document: document})
23 | end
24 |
25 | # Get a document based on ID or by the document's name.
26 | def by_id_or_name(idOrName)
27 | id?(idOrName) ? by_id(idOrName) : by_name(idOrName)
28 | end
29 |
30 | # Get a document by indexing the given name.
31 | def by_name(name)
32 | raise NotImplementedError, "Unable to index document by name"
33 | end
34 |
35 | # Get a document from its ID.
36 | def by_id(id)
37 | get("#{route}/#{id}")
38 | end
39 |
40 | # Get a list of documents given a hashed filter and other options.
41 | def search(filters={}, limit: nil, skip: nil)
42 | post("#{route}/search", {limit: limit, skip: skip}.merge(filters)).documents
43 | end
44 | end
45 |
46 | # Represents the API to interact with fetching and updating servers.
47 | class Servers < Base
48 | def initialize
49 | super("servers")
50 | end
51 |
52 | def by_name(name)
53 | get("#{route}/by_name/#{name}?bungee_name=true").documents.first
54 | end
55 |
56 | def restart(id, priority=0, reason="Automated restart queued")
57 | update(id, {restart_queued_at: Time.now, restart_reason: reason, restart_priority: priority})
58 | end
59 |
60 | def all
61 | search({offline: true, unlisted: true}, limit: 100)
62 | end
63 | end
64 |
65 | Server = Stratus::Servers.new
66 |
67 | # Represents the API to interact with user information.
68 | class Users < Base
69 | def initialize
70 | super("users")
71 | end
72 |
73 | def by_name(name)
74 | get("#{route}/by_username/#{name}") rescue nil
75 | end
76 | end
77 |
78 | User = Stratus::Users.new
79 |
80 | # Represents the API to interact with the current tournament data.
81 | class Tournaments < Base
82 | def initialize
83 | super("tournaments")
84 | end
85 |
86 | def search
87 | get("#{route}").documents
88 | end
89 |
90 | def current
91 | search.sort_by{|tm| tm.end ? Time.parse(tm.end) : Time.now}.last
92 | end
93 | end
94 |
95 | Tournament = Stratus::Tournaments.new
96 |
97 | protected
98 |
99 | # Check if a string is a legal ID.
100 | def id?(identifier)
101 | BSON::ObjectId.legal?(identifier)
102 | end
103 |
104 | def request(rest, url, payload, max_attempts=3, attempts=0, exception=nil)
105 | if attempts >= max_attempts
106 | raise "Unrecoverable HTTP #{rest} request exception: #{exception}\n" +
107 | "(#{url}#{payload ? " with payload #{payload}" : ""})"
108 | end
109 | begin
110 | case rest
111 | when "GET"
112 | response = RestClient.get(url, options)
113 | when "PUT"
114 | response = RestClient.put(url, payload.to_json, options)
115 | when "POST"
116 | response = RestClient.post(url, payload.to_json, options)
117 | end
118 | json(response)
119 | rescue Exception => error
120 | sleep 1
121 | request(rest, url, payload, max_attempts, attempts + 1, error)
122 | end
123 | end
124 |
125 | # Send a POST request with an optional payload.
126 | def post(route, payload={})
127 | request("POST", url + route, payload)
128 | end
129 |
130 | # Send a PUT request with an optional payload.
131 | def put(route, payload={})
132 | request("PUT", url + route, payload)
133 | end
134 |
135 | # Send a simple GET request.
136 | def get(route)
137 | request("GET", url + route, nil)
138 | end
139 |
140 | Response = Class.new(RecursiveOpenStruct)
141 |
142 | # Parse a string response into a dynamic JSON object.
143 | def json(response)
144 | Response.new(JSON.parse(response), recurse_over_arrays: true)
145 | end
146 |
147 | # Get the default HTTP options for the REST client.
148 | def options
149 | @options ||= {
150 | accept: :json,
151 | content_type: :json,
152 | timeout: 3
153 | }
154 | end
155 |
156 | # Get the base URL of the Stratus API.
157 | def url
158 | @url ||= Env.get("api") || "localhost:3010"
159 | end
160 |
161 | end
162 |
--------------------------------------------------------------------------------
/lib/worker.rb:
--------------------------------------------------------------------------------
1 | require "workers"
2 | require "environment"
3 | require "active_support/time"
4 |
5 | # Represents a task that is run periodically in a background thread.
6 | class Worker
7 |
8 | # Ensure exceptions pass to main thread
9 | Thread.abort_on_exception = true
10 |
11 | # Create a new background thread that calls #{run} every period of time.
12 | # This does not block the current thread, but exceptions will
13 | # pass all the way to the main thread.
14 | def run!(every=1.minute)
15 | raise "Worker is already running" if @timer
16 | log("Running every #{every.to_i} seconds")
17 | run # Run the code for the first time.
18 | @timer = Workers::PeriodicTimer.new(every.to_i) do
19 | run
20 | end
21 | end
22 |
23 | # Stop the background thread of this worker.
24 | def stop!
25 | raise "Worker is not currently running" unless @timer
26 | @timer.cancel
27 | @timer = nil
28 | log("Has been stopped")
29 | end
30 |
31 | protected
32 |
33 | def log(message)
34 | print "[#{Time.now.to_formatted_s(:long_ordinal)}] [#{self.class.name}] #{message}\n"
35 | end
36 |
37 | private
38 |
39 | def run
40 | raise NotImplementedError, "Worker has no code to run"
41 | end
42 |
43 | class << self
44 | def worker(value=[])
45 | @@worker ||= value
46 | end
47 |
48 | def worker?
49 | Env.has?("worker")
50 | end
51 |
52 | def static_template(*args, every: 1.minute)
53 | worker([self, every, *args])
54 | end
55 |
56 | def template(expected, every: 1.minute, i: 0)
57 | received = ARGV.map{|arg| arg.split("=")}.to_h
58 | args = Array.new(expected.size)
59 | expected.each do |key, val|
60 | args[i] = received[key.to_s] || val or \
61 | raise "Missing worker argument '#{key}'"
62 | i += 1
63 | end
64 | args.compact!
65 | worker([self, every, *args])
66 | end
67 | end
68 |
69 | # If the script is a worker, block the program from exiting naturally.
70 | END {
71 | if worker?
72 | clazz, every, *args = worker
73 | (args.empty? ? clazz.new
74 | : clazz.new(*args)).run!(every)
75 | while true
76 | sleep(1.day)
77 | end
78 | else
79 | worker(nil)
80 | end
81 | }
82 | end
83 |
--------------------------------------------------------------------------------
/lib/worker/discord.rb:
--------------------------------------------------------------------------------
1 | require "worker"
2 | require "environment"
3 | require "discordrb"
4 |
5 | # Represents a Worker that includes a Discord bot integration.
6 | class DiscordWorker < Worker
7 |
8 | def initialize(command="")
9 | bot(command)
10 | bot.run(:async)
11 | bot.online
12 | bot.update_status("Ready to go!", "Minecraft", nil)
13 | end
14 |
15 | def bot(command="")
16 | @bot ||= Discordrb::Commands::CommandBot.new(
17 | prefix: command.empty? ? "/" : "/#{command} ",
18 | client_id: Env.need("discord_client_id"),
19 | token: Env.need("discord_key"),
20 | log_mode: :quiet,
21 | supress_ready: true,
22 | parse_self: false,
23 | redact_token: true,
24 | ignore_bots: true
25 | )
26 | end
27 |
28 | protected
29 |
30 | def roles(name)
31 | bot.servers
32 | .values
33 | .map{|server| server.roles
34 | .select{|role| role.name.downcase.include?(name.downcase)}
35 | .map{|role| role.id}}
36 | .flatten!
37 | end
38 |
39 | def method_missing(m, *args, &block)
40 | bot.send(m, *args, &block)
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/lib/worker/droplet.rb:
--------------------------------------------------------------------------------
1 | require "worker"
2 | require "document"
3 | require "stratus"
4 | require "digital_ocean"
5 |
6 | # Listens to changes in server documents and allocates droplets.
7 | class DropletWorker < Worker
8 | include DigitalOcean
9 | include Document
10 |
11 | template(
12 | region: "nyc3", # Region slug
13 | size: "c-2", # Custom machine size
14 | image: "kubernetes", # Private snapshot image
15 | ssh_key: "stratus", # SSH-key for authentication
16 | script_path: "init.sh" # Bash script to execute on startup
17 | )
18 |
19 | def initialize(*attributes)
20 | @attributes = attributes
21 | end
22 |
23 | # Cache a manifest of droplets and servers for the worker to query.
24 | def fetch!
25 | servers = servers_fetch
26 | droplets = droplets_fetch
27 | servers_to_droplet = servers.map{|server| [
28 | server,
29 | (droplets.select{|droplet| droplet.id.to_s == server.machine_id}.first rescue nil)
30 | ]}.to_h
31 | {
32 | droplets: droplets,
33 | droplets_to_server: servers_to_droplet.invert,
34 | servers: servers,
35 | servers_to_droplet: servers_to_droplet
36 | }
37 | end
38 |
39 | # Periodically check servers to determine if any droplets need to be scaled.
40 | def run
41 | servers.each do |server|
42 | droplet = server_to_droplet(server)
43 | if !droplet
44 | if server.machine_id
45 | scale_fix(server)
46 | elsif server.ensure == "running"
47 | scale_up(server)
48 | end
49 | elsif server.ensure == "stopping"
50 | scale_down(server)
51 | end
52 | end
53 | droplets.each do |droplet|
54 | if droplet_can_delete?(droplet) && droplet.tags.include?("delete")
55 | log("Destroying the queued Droplet #{droplet.name}")
56 | droplet.destroy!
57 | end
58 | end
59 | end
60 |
61 | # Create a new droplet for the given server.
62 | def scale_up(server)
63 | raise "Droplet already exists for #{server.name}" if server_to_droplet(server)
64 | log("Creating a Droplet for #{server.name}")
65 | if droplet = droplet_create(@attributes.merge({name: server.bungee_name}))
66 | server_set_droplet(server, droplet)
67 | droplet.untag("delete")
68 | end
69 |
70 | end
71 |
72 | # Shutdown and destroy the droplet for the given server.
73 | def scale_down(server)
74 | raise "Droplet does not exist for #{server.name}" unless droplet = server_to_droplet(server)
75 | if droplet_can_delete?(droplet)
76 | log("Deleting the Droplet for #{server.name}")
77 | droplet.destroy!
78 | else
79 | log("Queuing the Droplet for #{server.name} to be deleted before its next billable hour")
80 | droplet.tag("delete")
81 | end
82 | server_set_droplet(server, nil)
83 | end
84 |
85 | # Remove any ensure commitments for the given server.
86 | # This can occur when the droplet is deleted outside the worker.
87 | def scale_fix(server)
88 | raise "Server #{server.name} is not assigned a Droplet to fix" unless server.machine_id
89 | log("Removing #{server.ensure} ensure from #{server.name} because Droplet was externally deleted")
90 | server_set_ensure(server, nil)
91 | server_set_droplet(server, nil)
92 | end
93 |
94 | protected
95 |
96 | def cache_duration
97 | 10.seconds
98 | end
99 |
100 | def servers
101 | cache[:servers]
102 | end
103 |
104 | def servers_fetch
105 | Stratus::Server.all
106 | end
107 |
108 | def server_to_droplet(server)
109 | cache[:servers_to_droplet][server]
110 | end
111 |
112 | def server_set_droplet(server, droplet)
113 | Stratus::Server.update(server._id, {machine_id: droplet ? droplet.id.to_s : nil})
114 | refresh!
115 | end
116 |
117 | def server_set_ensure(server, status)
118 | Stratus::Server.update(server._id, {ensure: status})
119 | refresh!
120 | end
121 |
122 | def droplets
123 | cache[:droplets]
124 | end
125 |
126 | def droplets_fetch
127 | digital_ocean.droplets.all.to_a
128 | end
129 |
130 | def droplet_to_server(droplet)
131 | cache[:droplets_to_server][droplet]
132 | end
133 |
134 | def droplet_can_delete?(droplet)
135 | (Time.now - Time.parse(droplet.created_at)) % 1.hour >= (1.hour - 5.minutes)
136 | end
137 |
138 | def droplet_create(name:, image:, region:, size:, ssh_key:, tags: [], script_path: nil)
139 | if exists = digital_ocean.droplets.find_one(name)
140 | return exists
141 | end
142 | image = digital_ocean.images.find_one_or_throw(image)
143 | region = digital_ocean.regions.find_one_or_throw(region)
144 | unless image.regions.include?(region.slug)
145 | raise "Image #{image.name} is not avaiable in the #{region.name} region"
146 | end
147 | size = digital_ocean.sizes.find_one_or_throw(size)
148 | unless region.sizes.include?(size.slug)
149 | raise "Size #{size.slug} is not avaiable in the #{region.name} region"
150 | end
151 | if size.disk < image.min_disk_size
152 | raise "Size #{size.slug} does not have enough disk space for the #{image.name} image"
153 | end
154 | ssh_key = digital_ocean.ssh_keys.find_one_or_throw(ssh_key)
155 | script = script_path ? File.read(File.expand_path("#{script_path}")).to_s : ""
156 | droplet = digital_ocean.droplets.create(DropletKit::Droplet.new(
157 | name: name,
158 | region: region.slug,
159 | size: size.slug,
160 | image: image.id,
161 | ssh_keys: [ssh_key.id],
162 | tags: tags,
163 | user_data: script,
164 | ipv6: true,
165 | private_networking: true,
166 | monitoring: true
167 | ))
168 | refresh!
169 | droplet
170 | end
171 | end
172 |
--------------------------------------------------------------------------------
/lib/worker/firewall.rb:
--------------------------------------------------------------------------------
1 | require "worker"
2 | require "google"
3 | require "digital_ocean"
4 |
5 | # Ensures that droplets are whitelisted on the API firewall on Google.
6 | class FirewallWorker < Worker
7 | include Google
8 | include DigitalOcean
9 |
10 | template(
11 | name: nil # Name of the Google firewall resource
12 | )
13 |
14 | def initialize(name)
15 | @name = name
16 | @service = auth(Google::Apis::ComputeBeta::ComputeService.new)
17 | end
18 |
19 | def run
20 | firewall = @service.get_firewall(project_id, @name) \
21 | or raise "Unable to find firewall #{@name}"
22 | google_ips = firewall.source_ranges.sort
23 | droplet_ips = digital_ocean.droplets.all.map(&:ip).sort
24 | unless google_ips == droplet_ips
25 | request = Google::Apis::ComputeBeta::Firewall.new
26 | request.source_ranges = droplet_ips
27 | @service.patch_firewall(project_id, @name, request)
28 | unless (add = droplet_ips - google_ips).empty?
29 | log("Added #{add} to firewall #{@name}")
30 | end
31 | unless (remove = google_ips - droplet_ips).empty?
32 | log("Removed #{remove} from firewall #{@name}")
33 | end
34 | end
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/lib/worker/label.rb:
--------------------------------------------------------------------------------
1 | require "set"
2 | require "worker"
3 | require "kubernetes"
4 | require "digital_ocean"
5 |
6 | # Ensures that droplet tags are properly converted into node labels.
7 | class LabelWorker < Worker
8 | include DigitalOcean
9 | include Kubernetes
10 |
11 | template(
12 | prefix: "stratus.network" # Name of the parent label
13 | )
14 |
15 | def initialize(parent_label)
16 | @parent_label = parent_label
17 | end
18 |
19 | def run
20 | droplets = digital_ocean.droplets.all
21 | nodes.each do |node|
22 | if droplet = (droplets.select{|droplet| droplet.name == node.name}.first rescue nil)
23 | tags_to_labels(droplet, node)
24 | end
25 | end
26 | end
27 |
28 | def tags_to_labels(droplet, node)
29 | return unless droplet && node
30 | expected = tags(droplet)
31 | actual = labels(node)
32 | patch = {}
33 | # Add keys that are defined as tags on the droplet,
34 | # but are not labels on the node.
35 | for key in expected
36 | unless actual.include?(key)
37 | patch[key] = "true"
38 | # Delete keys of already synced labels
39 | # to prevent double-checking.
40 | actual.delete(key)
41 | end
42 | end
43 | # Check for labels that must be removed,
44 | # because there is no corresponding tag.
45 | for key in actual
46 | unless expected.include?(key)
47 | patch[key] = nil
48 | end
49 | end
50 | # Only send the patch if there are any changes.
51 | unless patch.empty?
52 | log("Node #{node.name} patched with labels #{patch.to_s}")
53 | node.label(patch)
54 | end
55 | end
56 |
57 | def tags(droplet)
58 | droplet.tags
59 | .map{|tag| "#{@parent_label}/#{tag}"}
60 | .to_set
61 | end
62 |
63 | def labels(node)
64 | node.labels
65 | .select{|label| label.starts_with?(@parent_label)}
66 | .map{|label| label.first}
67 | .to_set
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/lib/worker/node.rb:
--------------------------------------------------------------------------------
1 | require "worker"
2 | require "kubernetes"
3 | require "digital_ocean"
4 |
5 | # Ensures that Nodes are healthy and synced with Droplets.
6 | class NodeWorker < Worker
7 | include DigitalOcean
8 | include Kubernetes
9 |
10 | template({}) # No arguments required
11 |
12 | def run
13 | droplets = digital_ocean.droplets.all.map{|droplet| [droplet.name, droplet]}.to_h
14 | nodes.each do |node|
15 | droplet = droplets[node.name]
16 | if !droplet
17 | log("Destroying #{node.name} node since its Droplet is missing")
18 | node.destroy!
19 | elsif (offline = Time.now - node.last_heartbeat) >= 15.minutes
20 | log("Deleting #{droplet.name} droplet because its been offline for too long")
21 | droplet.destroy!
22 | elsif offline >= 5.minutes
23 | log("Rebooting #{droplet.name} droplet because its heartbeat is not responding")
24 | droplet.reboot
25 | end
26 | end
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/lib/worker/org.rb:
--------------------------------------------------------------------------------
1 | require "worker"
2 | require "git"
3 |
4 | # Add members of a Github orginization as collaborators to an external repository.
5 | class OrgWorker < Worker
6 | include Github
7 |
8 | ADMIN = "Administrators"
9 | DEV = "Developers"
10 | JR_DEV = "Junior Developers"
11 | MAP_DEV = "Map Developers"
12 | ALL = [ADMIN, DEV, JR_DEV, MAP_DEV]
13 |
14 | static_template(
15 | "StratusNetwork",
16 | [
17 | {repo: "Electroid/maps", teams: ALL},
18 | {repo: "Electroid/plugins", teams: [ADMIN, DEV, JR_DEV]},
19 | {repo: "Electroid/infrastructure", teams: [ADMIN, DEV]}
20 | ],
21 | every: 1.day
22 | )
23 |
24 | def initialize(org, data)
25 | @org = github.org(org)
26 | @teams = github.org_teams(@org[:id]).map{|team| [team[:name], team]}.to_h
27 | @data = data.map do |data|
28 | unless data.key?(:teams) && data.key?(:repo)
29 | raise "Unable to parse #{data}, must have 'teams' and 'repo' keys"
30 | end
31 | {
32 | repo: (github.repo(data[:repo]) or raise "Unable to find repo '#{data[:repo]}'"),
33 | teams: data[:teams].map{|team| @teams[team] or raise "Unable to find team '#{team}'"}
34 | }
35 | end
36 | end
37 |
38 | def run
39 | @members = @teams.map{|name, team| [name, github.team_members(team[:id]).map(&:login)]}.to_h
40 | @data.each do |data|
41 | repo = data[:repo]
42 | teams = data[:teams]
43 | collaborators = github.collaborators(repo[:id]).map(&:login)
44 | members = teams.flat_map{|team| @members[team[:name]]}.uniq
45 | (members - collaborators).each do |add|
46 | log("Adding #{add} to #{repo[:name]}")
47 | github.add_collaborator(repo[:id], add)
48 | end
49 | (collaborators - members).each do |remove|
50 | log("Removing #{remove} from #{repo[:name]}")
51 | github.remove_collaborator(repo[:id], remove)
52 | end
53 | end
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/lib/worker/pod.rb:
--------------------------------------------------------------------------------
1 | require "worker"
2 | require "kubernetes"
3 | require "stratus"
4 | require "git"
5 |
6 | # Ensures that Pods are healthy and properly deployed.
7 | # TODO: Still in progress, yet to change
8 | class PodWorker < Worker
9 | include Kubernetes
10 |
11 | def initialize(git_user, git_repo, git_path, deploy_path)
12 | @path = "#{git_repo}/#{deploy_path}"
13 | git("https://github.com/#{git_user}/#{git_repo}.git", git_repo, git_path)
14 | end
15 |
16 | def run
17 | git.pull
18 | servers.each do |server|
19 | if path = server.update_server_path
20 | if server.ensure == "running"
21 | system("kubectl apply -f #{@path}/#{path}")
22 | elsif server.ensure == "stopping"
23 | system("kubectl delete -f #{@path}/#{path}")
24 | end
25 | end
26 | end
27 | end
28 |
29 | protected
30 |
31 | def git(uri="", name="", path="")
32 | @git ||= begin
33 | Git.clone(uri, name, :path => path)
34 | rescue Git::GitExecuteError => e
35 | Git.open(name)
36 | end
37 | end
38 |
39 | def servers
40 | Stratus::Server.all
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/lib/worker/repo.rb:
--------------------------------------------------------------------------------
1 | require "worker"
2 | require "github"
3 | require "fileutils"
4 |
5 | # Keeps a Github repo in sync with a local directory.
6 | class RepoWorker < Worker
7 | include Github
8 |
9 | template(
10 | repo: nil, # Git repo (ie. Electroid/infrastructure)
11 | branch: "master", # Git branch
12 | dir: "data", # Git directory
13 | hook: "pwd" # Hook command (ie. curl http://example.com/webhook)
14 | )
15 |
16 | def initialize(repo, branch, path, hook=nil)
17 | @repo = repo
18 | @path = File.expand_path(path)
19 | @parent_path = File.dirname(@path)
20 | @name = File.basename(@path)
21 | @uri = "https://#{github_key}:x-oauth-basic@github.com/#{repo}.git"
22 | @branch = branch
23 | @hook = hook
24 | clone
25 | end
26 |
27 | def run
28 | hook if update? && pull
29 | end
30 |
31 | # Called after successful update of repository.
32 | def hook
33 | log("Updated #{@repo} to '#{%x(git log --oneline -1).strip}'")
34 | execute(@hook, false)
35 | end
36 |
37 | protected
38 |
39 | # Has the remote branch been updated?
40 | def update?
41 | previous = @pulled_at
42 | @pulled_at = github.repo(@repo)[:pushed_at]
43 | previous != @pulled_at
44 | end
45 |
46 | # Pull the latest changes from the remote branch.
47 | def pull
48 | execute("git fetch --depth 1") &&
49 | execute("git reset --hard origin/#{@branch}") &&
50 | execute("git clean -dfx")
51 | end
52 |
53 | # Initially clone or fix the repository before pull.
54 | def clone
55 | FileUtils.mkdir_p(@parent_path)
56 | if Dir.exist?(@path)
57 | if !Dir.exist?(File.join(@path, ".git"))
58 | log("Removing empty repository")
59 | elsif @uri != (uri = %x(cd #{@path} && git config --get remote.origin.url).strip)
60 | log("Removing another repository: #{uri.split("/").last.split(".").first}")
61 | elsif @branch != (branch = %(cd #{@path} && git symbolic-ref --short -q HEAD).strip)
62 | log("Removing another branch: #{branch}")
63 | else
64 | valid = true
65 | log("Found valid repository: #{@name}")
66 | end
67 | FileUtils.rm_rf(@path) unless valid
68 | elsif execute("git clone --single-branch --depth 1 -b #{@branch} #{@uri} #{@path}")
69 | log("Cloned initial repository: #{@name}")
70 | else
71 | log("Failed to clone initial repository: #{@uri}")
72 | exit(1)
73 | end
74 | Dir.chdir(@path)
75 | end
76 |
77 | # Execute a shell command or exit the program if a failure occurs.
78 | def execute(cmd, fails=true)
79 | unless system("#{cmd}", :out => File::NULL)
80 | log("Error executing shell command: '#{cmd}'")
81 | exit(1) if fails
82 | end
83 | true
84 | end
85 | end
86 |
--------------------------------------------------------------------------------
/lib/worker/server.rb:
--------------------------------------------------------------------------------
1 | require "worker/discord"
2 | require "stratus"
3 |
4 | # Responds to Discord commands to spin-up Minecraft servers.
5 | class ServerWorker < DiscordWorker
6 |
7 | def run
8 | servers_empty.each do |server|
9 | deallocate(server)
10 | end
11 | servers_unallocated.each do |server|
12 | server_down(server)
13 | end
14 | end
15 |
16 | def server_up(server, name)
17 | Stratus::Server.update(server._id, {ensure: "running"})
18 | end
19 |
20 | def server_down(server)
21 | Stratus::Server.update(server._id, {ensure: "stopping"})
22 | end
23 |
24 | def server_manage?(server)
25 | raise NotImplementedError
26 | end
27 |
28 | protected
29 |
30 | def respond(event)
31 | reply = begin
32 | ["\u274c", "#{yield}"]
33 | rescue Exception => e
34 | ["\u2705", "An exception occured while running your command!\n```#{e}```"]
35 | end
36 | event.react(reply.first)
37 | message.respond(reply.second)
38 | end
39 |
40 | def allocate(name)
41 | server = servers_next or raise "No servers left to allocate #{name}"
42 | log("Allocating old server #{server.name} for new server #{name}")
43 | server_up(server, name)
44 | end
45 |
46 | def deallocate(server)
47 | log("Deallocating server #{server}")
48 | server = server_restart(server)
49 | start = Time.now
50 | while Stratus::Server.by_id(server._id).online
51 | sleep(1)
52 | break if Time.now - start >= 30.seconds
53 | end
54 | server_down(server)
55 | end
56 |
57 | def servers
58 | Stratus::Server.all
59 | .select{|server| server_manage?(server)}
60 | .sort_by{|server| BSON::ObjectId.from_string(server._id).generation_time}
61 | end
62 |
63 | def servers_allocated
64 | servers.select{|server| server_is_allocated?(server)}
65 | end
66 |
67 | def servers_unallocated
68 | servers - servers_allocated
69 | end
70 |
71 | def servers_empty
72 | servers_allocated.select{|server| server.online && server.num_online <= 0}
73 | end
74 |
75 | def servers_next
76 | servers_unallocated.first rescue nil
77 | end
78 |
79 | def server_is_allocated?(server)
80 | server.ensure == "running"
81 | end
82 |
83 | def server_restart(server)
84 | Stratus::Server.restart(server._id, 100, "Server has been automatically deallocated")
85 | end
86 |
87 | def server_reset(server, name: nil, user: nil, lobby: false, priv: false, tm: false, index: 0, priority: nil)
88 | name = if user
89 | user.username
90 | elsif name == nil
91 | "Unknown-#{priority ? priority % 10 : Random.new.rand(99)}"
92 | else
93 | name
94 | end
95 | name_indexed = if indexed = index > 0
96 | if tm && !lobby
97 | "#{index < 10 ? "0" : ""}#{index}#{name.downcase}"
98 | else
99 | "#{name}-#{index}"
100 | end
101 | end
102 | bungee_name = (indexed ? name_indexed : name).downcase
103 | ip = if indexed
104 | "#{name.downcase}-#{index-1}.#{name.downcase}.#{tm ? "tm" : "default"}.svc.cluster.local"
105 | elsif user
106 | user._id
107 | else
108 | name.downcase
109 | end
110 | name = indexed ? name_indexed : name
111 | Stratus::Server.update(server._id, {
112 | name: name,
113 | bungee_name: bungee_name,
114 | ip: ip,
115 | priority: (priority || 0) + index,
116 | online: false,
117 | whitelist_enabled: !lobby && (priv || tm) ? true : false,
118 | settings_profile: priv || tm ? "private" : "",
119 | datacenter: tm ? "TM" : "US",
120 | box: tm ? "tournament" : "production",
121 | family: lobby ? "lobby" : "pgm",
122 | role: lobby ? "LOBBY" : "PGM",
123 | network: tm ? "TOURNAMENT" : "PUBLIC",
124 | visibility: "UNLISTED",
125 | startup_visibility: priv ? "UNLISTED" : "PUBLIC",
126 | realms: ["global", tm ? "tournament" : "normal"],
127 | operator_ids: user ? [user._id] : []
128 | })
129 | end
130 | end
131 |
--------------------------------------------------------------------------------
/lib/worker/server/generic.rb:
--------------------------------------------------------------------------------
1 | require "worker/server"
2 |
3 | class GenericWorker < ServerWorker
4 |
5 | instance("/server", "admin")
6 |
7 | def initialize(command, role)
8 | super(command)
9 | bot.command(:enable,
10 | required_roles: roles(role),
11 | min_args: 1,
12 | max_args: 1,
13 | usage: "#{command} enable [server]") do |event, name|
14 | server_up_or_down(true, event, name)
15 | end
16 | bot.command(:disable,
17 | required_roles: roles(role),
18 | min_args: 1,
19 | max_args: 1,
20 | usage: "#{command} disable [server]") do |event, name|
21 | server_up_or_down(false, event, name)
22 | end
23 | end
24 |
25 | def server_up_or_down(up, event, name)
26 | respond(event) do
27 | if server = Stratus::Server.by_name(name)
28 | if server_manage?(server)
29 | if up
30 | allocate(name)
31 | "Server #{name} will be online in about two minutes!"
32 | else
33 | deallocate(name)
34 | "Server #{name} will be offline momentarily!"
35 | end
36 | else
37 | "Unable to manage server '#{name}'"
38 | end
39 | else
40 | "Unable to find server '#{name}'"
41 | end
42 | end
43 | end
44 |
45 | def server_manage?(server)
46 | server.family != "bungee" || server.family != "tm"
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/lib/worker/server/private.rb:
--------------------------------------------------------------------------------
1 | require "worker/server"
2 |
3 | class GenericWorker < ServerWorker
4 |
5 | def initialize
6 | super
7 | bot.command(:server,
8 | required_roles: roles("admin"),
9 | min_args: 1,
10 | max_args: 1,
11 | usage: "/enable [server]") do |event, name|
12 | respond(event) do
13 | allocate(name)
14 | "Server #{name} has been allocated and will be online in a few minutes!"
15 | end
16 | end
17 | end
18 |
19 | def server_manage?(server)
20 | server.family == "pgm"
21 | end
22 |
23 | def server_up(server, name)
24 | user = Stratus::User.by_name(name)
25 | server_reset(server,
26 | name: user ? nil : name,
27 | user: user,
28 | priority: 200 + (server.priority % 10),
29 | priv: true,
30 | status: "running"
31 | )
32 | end
33 |
34 | def server_down(server)
35 | server_reset(server,
36 | name: "Private",
37 | priority: 200 + (server.priority % 10),
38 | priv: true,
39 | status: "stopping"
40 | )
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/manifests/backend/couch.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: couch
5 | labels:
6 | role: couch
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 5984
11 | name: couch
12 | selector:
13 | role: couch
14 | ---
15 | apiVersion: v1
16 | kind: PersistentVolumeClaim
17 | metadata:
18 | name: couch
19 | spec:
20 | storageClassName: do-block-storage
21 | accessModes:
22 | - ReadWriteOnce
23 | resources:
24 | requests:
25 | storage: 5Gi
26 | ---
27 | apiVersion: apps/v1
28 | kind: StatefulSet
29 | metadata:
30 | name: couch
31 | spec:
32 | serviceName: couch
33 | selector:
34 | matchLabels:
35 | role: couch
36 | template:
37 | metadata:
38 | labels:
39 | role: couch
40 | spec:
41 | containers:
42 | - name: couch
43 | image: couchdb:1.7
44 | ports:
45 | - containerPort: 5984
46 | name: couch
47 | volumeMounts:
48 | - name: couch
49 | subPath: data
50 | mountPath: /opt/couchdb/data:rw
51 | - name: couch
52 | subPath: config
53 | mountPath: /opt/couchdb/etc:rw
54 | resources:
55 | requests:
56 | cpu: 100m
57 | memory: 100Mi
58 | volumes:
59 | - name: couch
60 | persistentVolumeClaim:
61 | claimName: couch
62 |
--------------------------------------------------------------------------------
/manifests/backend/mongo.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mongo
5 | labels:
6 | role: mongo
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 27017
11 | name: mongo
12 | selector:
13 | role: mongo
14 | ---
15 | apiVersion: v1
16 | kind: PersistentVolumeClaim
17 | metadata:
18 | name: mongo
19 | spec:
20 | storageClassName: do-block-storage
21 | accessModes:
22 | - ReadWriteOnce
23 | resources:
24 | requests:
25 | storage: 25Gi
26 | ---
27 | apiVersion: apps/v1
28 | kind: StatefulSet
29 | metadata:
30 | name: mongo
31 | spec:
32 | serviceName: mongo
33 | selector:
34 | matchLabels:
35 | role: mongo
36 | template:
37 | metadata:
38 | labels:
39 | role: mongo
40 | annotations:
41 | scheduler.alpha.kubernetes.io/critical-pod: 'true'
42 | spec:
43 | affinity:
44 | podAntiAffinity:
45 | requiredDuringSchedulingIgnoredDuringExecution:
46 | - topologyKey: kubernetes.io/hostname
47 | labelSelector:
48 | matchLabels:
49 | role: mongo
50 | containers:
51 | - name: mongo
52 | image: mongo:4.2.0
53 | imagePullPolicy: IfNotPresent
54 | command:
55 | - mongod
56 | - --storageEngine=wiredTiger
57 | - --wiredTigerCacheSizeGB=2.0
58 | - --wiredTigerJournalCompressor=zstd
59 | - --wiredTigerCollectionBlockCompressor=zstd
60 | - --wiredTigerIndexPrefixCompression=true
61 | - --bind_ip_all
62 | - --quiet
63 | - --serviceExecutor=adaptive
64 | - --enableFreeMonitoring=on
65 | - --setParameter=ttlMonitorEnabled=false
66 | ports:
67 | - containerPort: 27017
68 | volumeMounts:
69 | - name: mongo
70 | mountPath: /data/db:rw
71 | livenessProbe:
72 | exec:
73 | command:
74 | - /bin/sh
75 | - -c
76 | - echo 'db.stats().ok' | mongo --quiet || exit 1
77 | resources:
78 | requests:
79 | cpu: 200m
80 | memory: 2Gi
81 | limits:
82 | # cpu: 1000m
83 | # memory: 2Gi
84 | volumes:
85 | - name: mongo
86 | persistentVolumeClaim:
87 | claimName: mongo
88 |
--------------------------------------------------------------------------------
/manifests/backend/rabbit.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | type: Opaque
4 | metadata:
5 | name: rabbit-secret
6 | stringData:
7 | RABBITMQ_DEFAULT_USER: admin
8 | RABBITMQ_DEFAULT_PASS: admin
9 | RABBITMQ_ERLANG_COOKIE: stratus
10 | RABBITMQ_NODENAME: rabbit@rabbit-0
11 | ---
12 | apiVersion: v1
13 | kind: Service
14 | metadata:
15 | name: rabbit
16 | labels:
17 | role: rabbit
18 | spec:
19 | clusterIP: None
20 | ports:
21 | - port: 5672
22 | name: amqp
23 | - port: 4369
24 | name: epmd
25 | - port: 25672
26 | name: dist
27 | selector:
28 | role: rabbit
29 | ---
30 | apiVersion: apps/v1
31 | kind: StatefulSet
32 | metadata:
33 | name: rabbit
34 | spec:
35 | serviceName: rabbit
36 | selector:
37 | matchLabels:
38 | role: rabbit
39 | template:
40 | metadata:
41 | labels:
42 | role: rabbit
43 | annotations:
44 | scheduler.alpha.kubernetes.io/critical-pod: 'true'
45 | spec:
46 | containers:
47 | - name: rabbit
48 | image: rabbitmq:3.6-management-alpine
49 | envFrom:
50 | - secretRef:
51 | name: rabbit-secret
52 | ports:
53 | - name: queue
54 | containerPort: 5672
55 | - name: management
56 | containerPort: 15672
57 | - name: cluster
58 | containerPort: 4369
59 | - name: dist
60 | containerPort: 25672
61 | livenessProbe:
62 | initialDelaySeconds: 30
63 | timeoutSeconds: 5
64 | exec:
65 | command:
66 | - rabbitmqctl
67 | - status
68 | resources:
69 | requests:
70 | cpu: 100m
71 | memory: 100Mi
72 | volumeMounts:
73 | - name: volume
74 | mountPath: /var/lib/rabbitmq:rw
75 | volumes:
76 | - name: volume
77 | emptyDir:
78 |
--------------------------------------------------------------------------------
/manifests/backend/redis.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: redis
5 | labels:
6 | role: redis
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 6379
11 | name: redis
12 | selector:
13 | role: redis
14 | ---
15 | apiVersion: apps/v1
16 | kind: StatefulSet
17 | metadata:
18 | name: redis
19 | spec:
20 | serviceName: redis
21 | selector:
22 | matchLabels:
23 | role: redis
24 | template:
25 | metadata:
26 | labels:
27 | role: redis
28 | spec:
29 | containers:
30 | - name: redis
31 | image: redis:4.0.1-alpine
32 | ports:
33 | - containerPort: 6379
34 | name: redis
35 | resources:
36 | requests:
37 | cpu: 100m
38 | memory: 100Mi
39 |
--------------------------------------------------------------------------------
/manifests/minecraft/bungee.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: bungee
5 | labels:
6 | role: bungee
7 | annotations:
8 | service.beta.kubernetes.io/do-loadbalancer-algorithm: least_connections
9 | service.beta.kubernetes.io/do-loadbalancer-protocol: tcp
10 | service.beta.kubernetes.io/do-loadbalancer-enable-proxy-protocol: true
11 | spec:
12 | type: LoadBalancer
13 | clusterIP: 10.245.0.65
14 | ports:
15 | - targetPort: 25565
16 | port: 25565
17 | name: minecraft
18 | selector:
19 | role: bungee
20 | ---
21 | apiVersion: apps/v1
22 | kind: StatefulSet
23 | metadata:
24 | name: bungee
25 | spec:
26 | serviceName: bungee
27 | replicas: 2
28 | selector:
29 | matchLabels:
30 | role: bungee
31 | podManagementPolicy: Parallel
32 | updateStrategy:
33 | type: RollingUpdate
34 | template:
35 | metadata:
36 | labels:
37 | role: bungee
38 | spec:
39 | affinity:
40 | podAntiAffinity:
41 | preferredDuringSchedulingIgnoredDuringExecution:
42 | - weight: 100
43 | podAffinityTerm:
44 | topologyKey: kubernetes.io/hostname
45 | labelSelector:
46 | matchLabels:
47 | role: bungee
48 | containers:
49 | - name: bungee
50 | image: gcr.io/stratus-197318/minecraft:bungee-master
51 | imagePullPolicy: Always
52 | stdin: true
53 | tty: true
54 | readinessProbe:
55 | initialDelaySeconds: 15
56 | periodSeconds: 15
57 | timeoutSeconds: 5
58 | exec:
59 | command:
60 | - ruby
61 | - run.rb
62 | - ready?
63 | livenessProbe:
64 | initialDelaySeconds: 60
65 | periodSeconds: 30
66 | timeoutSeconds: 5
67 | exec:
68 | command:
69 | - ruby
70 | - run.rb
71 | - alive?
72 | livenessProbe:
73 | initialDelaySeconds: 60
74 | tcpSocket:
75 | port: 25565
76 | ports:
77 | - containerPort: 25565
78 | name: minecraft
79 | resources:
80 | requests:
81 | cpu: 300m
82 | memory: 300Mi
83 | envFrom:
84 | - secretRef:
85 | name: minecraft-secret
86 | env:
87 | - name: REPLICA
88 | value: enabled
89 |
--------------------------------------------------------------------------------
/manifests/minecraft/event.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: event
5 | labels:
6 | role: event
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 25565
11 | name: minecraft
12 | selector:
13 | role: event
14 | ---
15 | apiVersion: apps/v1
16 | kind: StatefulSet
17 | metadata:
18 | name: event
19 | spec:
20 | serviceName: event
21 | selector:
22 | matchLabels:
23 | role: event
24 | template:
25 | metadata:
26 | labels:
27 | role: event
28 | spec:
29 | containers:
30 | - name: minecraft
31 | image: gcr.io/stratus-197318/minecraft:bukkit-master
32 | imagePullPolicy: Always
33 | stdin: true
34 | tty: true
35 | ports:
36 | - containerPort: 25565
37 | name: minecraft
38 | readinessProbe:
39 | initialDelaySeconds: 15
40 | periodSeconds: 15
41 | timeoutSeconds: 5
42 | exec:
43 | command:
44 | - ruby
45 | - run.rb
46 | - ready?
47 | livenessProbe:
48 | initialDelaySeconds: 60
49 | periodSeconds: 30
50 | timeoutSeconds: 5
51 | exec:
52 | command:
53 | - ruby
54 | - run.rb
55 | - alive?
56 | resources:
57 | requests:
58 | cpu: 100m
59 | memory: 500Mi
60 | envFrom:
61 | - secretRef:
62 | name: minecraft-secret
63 | volumeMounts:
64 | - name: git
65 | subPath: maps-master
66 | mountPath: /minecraft/maps:ro
67 | volumes:
68 | - name: git
69 | persistentVolumeClaim:
70 | claimName: git
71 |
72 |
--------------------------------------------------------------------------------
/manifests/minecraft/lobby.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: lobby
5 | labels:
6 | role: lobby
7 | spec:
8 | # Give Lobby dedicated IP so restarts don't cause
9 | # DNS issues where the cached IP is served.
10 | clusterIP: 10.245.0.69
11 | ports:
12 | - port: 25565
13 | name: minecraft
14 | selector:
15 | role: lobby
16 | ---
17 | apiVersion: apps/v1
18 | kind: StatefulSet
19 | metadata:
20 | name: lobby
21 | spec:
22 | serviceName: lobby
23 | selector:
24 | matchLabels:
25 | role: lobby
26 | template:
27 | metadata:
28 | labels:
29 | role: lobby
30 | spec:
31 | containers:
32 | - name: minecraft
33 | image: gcr.io/stratus-197318/minecraft:bukkit-master
34 | imagePullPolicy: Always
35 | stdin: true
36 | tty: true
37 | ports:
38 | - containerPort: 25565
39 | name: minecraft
40 | readinessProbe:
41 | initialDelaySeconds: 15
42 | periodSeconds: 15
43 | timeoutSeconds: 5
44 | exec:
45 | command:
46 | - ruby
47 | - run.rb
48 | - ready?
49 | livenessProbe:
50 | initialDelaySeconds: 60
51 | periodSeconds: 30
52 | timeoutSeconds: 5
53 | exec:
54 | command:
55 | - ruby
56 | - run.rb
57 | - alive?
58 | resources:
59 | requests:
60 | cpu: 100m
61 | memory: 500Mi
62 | envFrom:
63 | - secretRef:
64 | name: minecraft-secret
65 | env:
66 | - name: REPLICA
67 | value: enabled
68 | volumeMounts:
69 | - name: git
70 | subPath: maps-master
71 | mountPath: /minecraft/maps:ro
72 | volumes:
73 | - name: git
74 | persistentVolumeClaim:
75 | claimName: git
76 |
--------------------------------------------------------------------------------
/manifests/minecraft/mapdev.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mapdev
5 | labels:
6 | role: mapdev
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 25565
11 | name: minecraft
12 | selector:
13 | role: mapdev
14 | ---
15 | apiVersion: v1
16 | kind: PersistentVolumeClaim
17 | metadata:
18 | name: dropbox
19 | spec:
20 | storageClassName: do-block-storage
21 | accessModes:
22 | - ReadWriteOnce
23 | resources:
24 | requests:
25 | storage: 5Gi
26 | ---
27 | apiVersion: apps/v1
28 | kind: StatefulSet
29 | metadata:
30 | name: mapdev
31 | spec:
32 | serviceName: mapdev
33 | selector:
34 | matchLabels:
35 | role: mapdev
36 | template:
37 | metadata:
38 | labels:
39 | role: mapdev
40 | spec:
41 | containers:
42 | - name: minecraft
43 | image: gcr.io/stratus-197318/minecraft:bukkit-master
44 | imagePullPolicy: Always
45 | stdin: true
46 | tty: true
47 | ports:
48 | - containerPort: 25565
49 | name: minecraft
50 | readinessProbe:
51 | initialDelaySeconds: 15
52 | periodSeconds: 15
53 | timeoutSeconds: 5
54 | exec:
55 | command:
56 | - ruby
57 | - run.rb
58 | - ready?
59 | livenessProbe:
60 | initialDelaySeconds: 60
61 | periodSeconds: 30
62 | timeoutSeconds: 5
63 | exec:
64 | command:
65 | - ruby
66 | - run.rb
67 | - alive?
68 | resources:
69 | requests:
70 | cpu: 100m
71 | memory: 500Mi
72 | envFrom:
73 | - secretRef:
74 | name: minecraft-secret
75 | volumeMounts:
76 | - name: dropbox
77 | mountPath: /minecraft/maps:rw
78 | - name: dropbox
79 | image: gcr.io/stratus-197318/dropbox:master
80 | imagePullPolicy: Always
81 | envFrom:
82 | - secretRef:
83 | name: dropbox-secret
84 | env:
85 | - name: DROPBOX_FOLDER_REMOTE
86 | value: /mapdev
87 | - name: DROPBOX_FOLDER_LOCAL
88 | value: /minecraft/maps
89 | resources:
90 | requests:
91 | cpu: 100m
92 | memory: 500Mi
93 | limits:
94 | cpu: 200m
95 | memory: 500Mi
96 | volumeMounts:
97 | - name: dropbox
98 | mountPath: /minecraft/maps:rw
99 | volumes:
100 | - name: dropbox
101 | persistentVolumeClaim:
102 | claimName: dropbox
103 |
--------------------------------------------------------------------------------
/manifests/minecraft/mixed.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mixed
5 | labels:
6 | role: mixed
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 25565
11 | name: minecraft
12 | selector:
13 | role: mixed
14 | ---
15 | apiVersion: apps/v1
16 | kind: StatefulSet
17 | metadata:
18 | name: mixed
19 | spec:
20 | serviceName: mixed
21 | selector:
22 | matchLabels:
23 | role: mixed
24 | template:
25 | metadata:
26 | labels:
27 | role: mixed
28 | spec:
29 | containers:
30 | - name: minecraft
31 | image: gcr.io/stratus-197318/minecraft:bukkit-master
32 | imagePullPolicy: Always
33 | stdin: true
34 | tty: true
35 | ports:
36 | - containerPort: 25565
37 | name: minecraft
38 | readinessProbe:
39 | initialDelaySeconds: 15
40 | periodSeconds: 15
41 | timeoutSeconds: 5
42 | exec:
43 | command:
44 | - ruby
45 | - run.rb
46 | - ready?
47 | livenessProbe:
48 | initialDelaySeconds: 60
49 | periodSeconds: 30
50 | timeoutSeconds: 5
51 | exec:
52 | command:
53 | - ruby
54 | - run.rb
55 | - alive?
56 | resources:
57 | requests:
58 | cpu: 1000m
59 | memory: 1Gi
60 | envFrom:
61 | - secretRef:
62 | name: minecraft-secret
63 | volumeMounts:
64 | - name: git
65 | subPath: maps-master
66 | mountPath: /minecraft/maps:ro
67 | volumes:
68 | - name: git
69 | persistentVolumeClaim:
70 | claimName: git
71 |
--------------------------------------------------------------------------------
/manifests/minecraft/tournament/bungee/daemon-set.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: proxy
5 | namespace: tm
6 | spec:
7 | selector:
8 | matchLabels:
9 | role: proxy
10 | template:
11 | metadata:
12 | labels:
13 | role: proxy
14 | type: minecraft
15 | spec:
16 | dnsPolicy: ClusterFirstWithHostNet
17 | hostNetwork: true
18 | nodeSelector:
19 | tm: 'true'
20 | containers:
21 | - name: proxy
22 | image: gcr.io/stratus-181519/proxy:latest
23 | imagePullPolicy: Always
24 | ports:
25 | - containerPort: 25575
26 | hostPort: 25575
27 | name: minecraft
28 | env:
29 | - name: PROXY_HOST
30 | valueFrom:
31 | fieldRef:
32 | fieldPath: status.hostIP
33 | - name: PROXY_EXTERNAL_PORT
34 | value: '25575'
35 | - name: PROXY_INTERNAL_PORT
36 | value: '25576'
37 | - name: PROXY_INTERNAL_PORT_SECONDARY
38 | value: '25576'
39 | resources:
40 | requests:
41 | cpu: 10m
42 | memory: 10Mi
43 |
--------------------------------------------------------------------------------
/manifests/minecraft/tournament/bungee/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: bungee
5 | namespace: tm
6 | labels:
7 | role: bungee
8 | spec:
9 | type: NodePort
10 | ports:
11 | - targetPort: 25565
12 | nodePort: 25576
13 | port: 25576
14 | name: minecraft
15 | selector:
16 | role: bungee
17 |
--------------------------------------------------------------------------------
/manifests/minecraft/tournament/bungee/stateful-set.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: bungee-tm
5 | namespace: tm
6 | spec:
7 | serviceName: bungee
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | role: bungee
12 | podManagementPolicy: Parallel
13 | updateStrategy:
14 | type: RollingUpdate
15 | template:
16 | metadata:
17 | labels:
18 | role: bungee
19 | type: minecraft
20 | spec:
21 | nodeSelector:
22 | tm: 'true'
23 | affinity:
24 | podAntiAffinity:
25 | preferredDuringSchedulingIgnoredDuringExecution:
26 | - weight: 100
27 | podAffinityTerm:
28 | topologyKey: kubernetes.io/hostname
29 | labelSelector:
30 | matchLabels:
31 | role: bungee
32 | containers:
33 | - name: bungee
34 | image: gcr.io/stratus-197318/minecraft:bungee-master
35 | imagePullPolicy: Always
36 | stdin: true
37 | tty: true
38 | ports:
39 | - containerPort: 25565
40 | name: minecraft
41 | resources:
42 | requests:
43 | cpu: 2000m
44 | memory: 4Gi
45 | limits:
46 | cpu: 2000m
47 | memory: 4Gi
48 | envFrom:
49 | - secretRef:
50 | name: minecraft-secret
51 | env:
52 | - name: REPLICA
53 | value: enabled
54 |
--------------------------------------------------------------------------------
/manifests/minecraft/tournament/lobby/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: lobby
5 | namespace: tm
6 | labels:
7 | role: lobby
8 | spec:
9 | clusterIP: 10.96.0.59
10 | ports:
11 | - port: 25565
12 | name: minecraft
13 | selector:
14 | role: lobby
15 |
--------------------------------------------------------------------------------
/manifests/minecraft/tournament/lobby/stateful-set.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: lobby-tm
5 | namespace: tm
6 | spec:
7 | serviceName: lobby
8 | selector:
9 | matchLabels:
10 | role: lobby
11 | template:
12 | metadata:
13 | labels:
14 | role: lobby
15 | type: minecraft
16 | spec:
17 | nodeSelector:
18 | tm: 'true'
19 | containers:
20 | - name: minecraft
21 | image: gcr.io/stratus-197318/minecraft:bukkit-master
22 | imagePullPolicy: Always
23 | stdin: true
24 | tty: true
25 | ports:
26 | - containerPort: 25565
27 | name: minecraft
28 | readinessProbe:
29 | initialDelaySeconds: 15
30 | periodSeconds: 15
31 | timeoutSeconds: 5
32 | exec:
33 | command:
34 | - ruby
35 | - run.rb
36 | - ready?
37 | livenessProbe:
38 | initialDelaySeconds: 60
39 | periodSeconds: 30
40 | timeoutSeconds: 5
41 | exec:
42 | command:
43 | - ruby
44 | - run.rb
45 | - alive?
46 | resources:
47 | requests:
48 | cpu: 1000m
49 | memory: 4Gi
50 | limits:
51 | cpu: 2000m
52 | memory: 4Gi
53 | envFrom:
54 | - secretRef:
55 | name: minecraft-secret
56 | env:
57 | - name: REPLICA
58 | value: enabled
59 | volumeMounts:
60 | - name: maps
61 | mountPath: /minecraft/maps:ro
62 | volumes:
63 | - name: maps
64 | hostPath:
65 | path: /storage/maps-tournament
66 |
--------------------------------------------------------------------------------
/manifests/minecraft/tournament/namespace.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: tm
5 |
--------------------------------------------------------------------------------
/manifests/minecraft/tournament/official/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: official
5 | namespace: tm
6 | labels:
7 | role: official
8 | spec:
9 | clusterIP: None
10 | ports:
11 | - port: 25565
12 | name: minecraft
13 | selector:
14 | role: official
15 |
--------------------------------------------------------------------------------
/manifests/minecraft/tournament/official/stateful-set.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: official
5 | namespace: tm
6 | spec:
7 | replicas: 1
8 | serviceName: official
9 | selector:
10 | matchLabels:
11 | role: official
12 | template:
13 | metadata:
14 | labels:
15 | role: official
16 | type: minecraft
17 | spec:
18 | nodeSelector:
19 | tm: 'true'
20 | containers:
21 | - name: minecraft
22 | image: gcr.io/stratus-197318/minecraft:bukkit-master
23 | imagePullPolicy: Always
24 | stdin: true
25 | tty: true
26 | ports:
27 | - containerPort: 25565
28 | name: minecraft
29 | readinessProbe:
30 | initialDelaySeconds: 15
31 | periodSeconds: 15
32 | timeoutSeconds: 5
33 | exec:
34 | command:
35 | - ruby
36 | - run.rb
37 | - ready?
38 | livenessProbe:
39 | initialDelaySeconds: 60
40 | periodSeconds: 30
41 | timeoutSeconds: 5
42 | exec:
43 | command:
44 | - ruby
45 | - run.rb
46 | - alive?
47 | resources:
48 | requests:
49 | cpu: 500m
50 | memory: 1Gi
51 | limits:
52 | cpu: 4000m
53 | memory: 4Gi
54 | envFrom:
55 | - secretRef:
56 | name: minecraft-secret
57 | env:
58 | - name: REPLICA
59 | value: enabled
60 | - name: TOURNAMENT
61 | value: enabled
62 | volumeMounts:
63 | - name: maps
64 | mountPath: /minecraft/maps:ro
65 | volumes:
66 | - name: maps
67 | hostPath:
68 | path: /storage/maps-tournament
69 |
--------------------------------------------------------------------------------
/manifests/minecraft/uhc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: uhc
5 | labels:
6 | role: uhc
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 25565
11 | name: minecraft
12 | selector:
13 | role: uhc
14 | ---
15 | apiVersion: apps/v1
16 | kind: StatefulSet
17 | metadata:
18 | name: uhc
19 | spec:
20 | serviceName: uhc
21 | selector:
22 | matchLabels:
23 | role: uhc
24 | template:
25 | metadata:
26 | labels:
27 | role: uhc
28 | spec:
29 | containers:
30 | - name: minecraft
31 | image: gcr.io/stratus-197318/minecraft:bukkit-staging
32 | imagePullPolicy: Always
33 | stdin: true
34 | tty: true
35 | ports:
36 | - containerPort: 25565
37 | name: minecraft
38 | readinessProbe:
39 | initialDelaySeconds: 15
40 | periodSeconds: 15
41 | timeoutSeconds: 5
42 | exec:
43 | command:
44 | - ruby
45 | - run.rb
46 | - ready?
47 | resources:
48 | requests:
49 | cpu: 100m
50 | memory: 2Gi
51 | envFrom:
52 | - secretRef:
53 | name: minecraft-secret
54 | volumeMounts:
55 | - name: git
56 | subPath: maps-staging
57 | mountPath: /minecraft/maps:ro
58 | volumes:
59 | - name: git
60 | persistentVolumeClaim:
61 | claimName: git
62 |
--------------------------------------------------------------------------------
/manifests/storage/git.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: git
5 | spec:
6 | storageClassName: do-block-storage
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 25Gi
12 | ---
13 | apiVersion: apps/v1
14 | kind: DaemonSet
15 | metadata:
16 | name: git
17 | spec:
18 | selector:
19 | matchLabels:
20 | role: git
21 | template:
22 | metadata:
23 | labels:
24 | role: git
25 | spec:
26 | containers:
27 | - name: data
28 | image: stratusnetwork/git
29 | env:
30 | - name: GIT_CMD
31 | value: curl http://web/load_models
32 | - name: GIT_URL
33 | value: https://github.com/StratusNetwork/data.git
34 | resources:
35 | requests:
36 | cpu: 5m
37 | memory: 10Mi
38 | volumeMounts:
39 | - name: git
40 | subPath: data
41 | mountPath: /data
42 | - name: maps-master
43 | image: stratusnetwork/git
44 | envFrom:
45 | - secretRef:
46 | name: maps-secret
47 | env:
48 | - name: GIT_BRANCH
49 | value: master
50 | resources:
51 | requests:
52 | cpu: 5m
53 | memory: 10Mi
54 | volumeMounts:
55 | - name: git
56 | subPath: maps-master
57 | mountPath: /data
58 | - name: maps-staging
59 | image: stratusnetwork/git
60 | envFrom:
61 | - secretRef:
62 | name: maps-secret
63 | env:
64 | - name: GIT_BRANCH
65 | value: staging
66 | resources:
67 | requests:
68 | cpu: 5m
69 | memory: 10Mi
70 | volumeMounts:
71 | - name: git
72 | subPath: maps-staging
73 | mountPath: /data
74 | - name: maps-tournament
75 | image: stratusnetwork/git
76 | envFrom:
77 | - secretRef:
78 | name: maps-secret
79 | env:
80 | - name: GIT_BRANCH
81 | value: tournament
82 | resources:
83 | requests:
84 | cpu: 5m
85 | memory: 10Mi
86 | volumeMounts:
87 | - name: git
88 | subPath: maps-tournament
89 | mountPath: /data
90 | - name: maps-private
91 | image: stratusnetwork/git
92 | envFrom:
93 | - secretRef:
94 | name: maps-secret
95 | env:
96 | - name: GIT_BRANCH
97 | value: private-server
98 | resources:
99 | requests:
100 | cpu: 5m
101 | memory: 10Mi
102 | volumeMounts:
103 | - name: git
104 | subPath: maps-private
105 | mountPath: /data
106 | volumes:
107 | - name: git
108 | persistentVolumeClaim:
109 | claimName: git
110 |
--------------------------------------------------------------------------------
/manifests/storage/spaces.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: spaces-config
5 | data:
6 | BUCKET_NAME: stratus
7 | BUCKET_LOCATION: US
8 | BUCKET_REGION: nyc3
9 | BUCKET_ACL: private
10 | BUCKET_ACL_SELECTOR: ""
11 | ---
12 | apiVersion: v1
13 | kind: ConfigMap
14 | metadata:
15 | name: spaces-maps-config
16 | data:
17 | BUCKET_FOLDER: maps
18 | BUCKET_ACL: public
19 | BUCKET_ACL_SELECTOR: xml,png
20 | ---
21 | apiVersion: extensions/v1beta1
22 | kind: ReplicaSet
23 | metadata:
24 | name: spaces
25 | spec:
26 | template:
27 | metadata:
28 | labels:
29 | role: spaces
30 | spec:
31 | containers:
32 | - name: maps
33 | image: stratusnetwork/spaces
34 | envFrom:
35 | - secretRef:
36 | name: spaces-secret
37 | - secretRef:
38 | name: maps-secret
39 | - configMapRef:
40 | name: spaces-config
41 | - configMapRef:
42 | name: spaces-maps-config
43 | volumeMounts:
44 | - name: git
45 | subPath: maps-master
46 | mountPath: /data
47 | volumes:
48 | - name: git
49 | persistentVolumeClaim:
50 | claimName: git
51 |
--------------------------------------------------------------------------------
/manifests/system/nginx.yml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: DaemonSet
3 | metadata:
4 | name: nginx-ingress-controller
5 | namespace: kube-system
6 | spec:
7 | selector:
8 | matchLabels:
9 | k8s-app: nginx-ingress-lb
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-ingress-lb
14 | spec:
15 | serviceAccountName: nginx-ingress-serviceaccount
16 | containers:
17 | - name: nginx-ingress-controller
18 | image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13
19 | args:
20 | - /nginx-ingress-controller
21 | - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
22 | - --default-ssl-certificate=$(POD_NAMESPACE)/tls-certificate
23 | env:
24 | - name: POD_NAME
25 | valueFrom:
26 | fieldRef:
27 | fieldPath: metadata.name
28 | - name: POD_NAMESPACE
29 | valueFrom:
30 | fieldRef:
31 | fieldPath: metadata.namespace
32 | ports:
33 | - name: http
34 | containerPort: 80
35 | - name: https
36 | containerPort: 443
37 | resources:
38 | requests:
39 | cpu: 10m
40 | memory: 25Mi
41 | ---
42 | apiVersion: v1
43 | kind: Service
44 | metadata:
45 | name: nginx-ingress
46 | namespace: kube-system
47 | spec:
48 | type: LoadBalancer
49 | ports:
50 | - name: http
51 | port: 80
52 | targetPort: 80
53 | protocol: TCP
54 | #- name: https
55 | # port: 443
56 | # targetPort: 443
57 | # protocol: TCP
58 | selector:
59 | k8s-app: nginx-ingress-lb
60 | ---
61 | apiVersion: v1
62 | kind: ServiceAccount
63 | metadata:
64 | name: nginx-ingress-serviceaccount
65 | namespace: kube-system
66 | ---
67 | apiVersion: rbac.authorization.k8s.io/v1beta1
68 | kind: ClusterRole
69 | metadata:
70 | name: nginx-ingress-clusterrole
71 | rules:
72 | - apiGroups:
73 | - ""
74 | resources:
75 | - configmaps
76 | - endpoints
77 | - nodes
78 | - pods
79 | - secrets
80 | verbs:
81 | - list
82 | - watch
83 | - apiGroups:
84 | - ""
85 | resources:
86 | - nodes
87 | verbs:
88 | - get
89 | - apiGroups:
90 | - ""
91 | resources:
92 | - services
93 | verbs:
94 | - get
95 | - list
96 | - watch
97 | - apiGroups:
98 | - "extensions"
99 | resources:
100 | - ingresses
101 | verbs:
102 | - get
103 | - list
104 | - watch
105 | - apiGroups:
106 | - ""
107 | resources:
108 | - events
109 | verbs:
110 | - create
111 | - patch
112 | - apiGroups:
113 | - "extensions"
114 | resources:
115 | - ingresses/status
116 | verbs:
117 | - update
118 | ---
119 | apiVersion: rbac.authorization.k8s.io/v1beta1
120 | kind: Role
121 | metadata:
122 | name: nginx-ingress-role
123 | namespace: kube-system
124 | rules:
125 | - apiGroups:
126 | - ""
127 | resources:
128 | - configmaps
129 | - pods
130 | - secrets
131 | - namespaces
132 | verbs:
133 | - get
134 | - apiGroups:
135 | - ""
136 | resources:
137 | - configmaps
138 | resourceNames:
139 | # Defaults to "-"
140 | # Here: "-"
141 | # This has to be adapted if you change either parameter
142 | # when launching the nginx-ingress-controller.
143 | - "ingress-controller-leader-nginx"
144 | verbs:
145 | - get
146 | - update
147 | - apiGroups:
148 | - ""
149 | resources:
150 | - configmaps
151 | verbs:
152 | - create
153 | - apiGroups:
154 | - ""
155 | resources:
156 | - endpoints
157 | verbs:
158 | - get
159 | - create
160 | - update
161 | ---
162 | apiVersion: rbac.authorization.k8s.io/v1beta1
163 | kind: RoleBinding
164 | metadata:
165 | name: nginx-ingress-role-nisa-binding
166 | namespace: kube-system
167 | roleRef:
168 | apiGroup: rbac.authorization.k8s.io
169 | kind: Role
170 | name: nginx-ingress-role
171 | subjects:
172 | - kind: ServiceAccount
173 | name: nginx-ingress-serviceaccount
174 | namespace: kube-system
175 | ---
176 | apiVersion: rbac.authorization.k8s.io/v1beta1
177 | kind: ClusterRoleBinding
178 | metadata:
179 | name: nginx-ingress-clusterrole-nisa-binding
180 | roleRef:
181 | apiGroup: rbac.authorization.k8s.io
182 | kind: ClusterRole
183 | name: nginx-ingress-clusterrole
184 | subjects:
185 | - kind: ServiceAccount
186 | name: nginx-ingress-serviceaccount
187 | namespace: kube-system
188 | ---
189 | apiVersion: apps/v1
190 | kind: Deployment
191 | metadata:
192 | name: default-http-backend
193 | labels:
194 | k8s-app: default-http-backend
195 | namespace: kube-system
196 | spec:
197 | selector:
198 | matchLabels:
199 | k8s-app: default-http-backend
200 | replicas: 1
201 | template:
202 | metadata:
203 | labels:
204 | k8s-app: default-http-backend
205 | spec:
206 | terminationGracePeriodSeconds: 60
207 | containers:
208 | - name: default-http-backend
209 | # Any image is permissable as long as:
210 | # 1. It serves a 404 page at /
211 | # 2. It serves 200 on a /healthz endpoint
212 | image: gcr.io/stratus-197318/default-backend:latest
213 | imagePullPolicy: Always
214 | livenessProbe:
215 | httpGet:
216 | path: /healthz
217 | port: 8080
218 | scheme: HTTP
219 | initialDelaySeconds: 30
220 | timeoutSeconds: 5
221 | ports:
222 | - containerPort: 8080
223 | resources:
224 | requests:
225 | memory: 10Mi
226 | cpu: 5m
227 | ---
228 | apiVersion: v1
229 | kind: Service
230 | metadata:
231 | name: default-http-backend
232 | namespace: kube-system
233 | labels:
234 | k8s-app: default-http-backend
235 | spec:
236 | clusterIP: None
237 | ports:
238 | - port: 80
239 | targetPort: 8080
240 | selector:
241 | k8s-app: default-http-backend
242 |
--------------------------------------------------------------------------------
/manifests/system/restart.yml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: api-public
5 | spec:
6 | schedule: '0 10 * * *' # 2am pst
7 | startingDeadlineSeconds: 60
8 | failedJobsHistoryLimit: 0
9 | successfulJobsHistoryLimit: 0
10 | concurrencyPolicy: Forbid
11 | jobTemplate:
12 | metadata:
13 | labels:
14 | role: api-public
15 | spec:
16 | template:
17 | metadata:
18 | labels:
19 | role: api-public
20 | spec:
21 | hostNetwork: true
22 | dnsPolicy: ClusterFirstWithHostNet
23 | restartPolicy: Never
24 | serviceAccountName: system
25 | containers:
26 | - name: kubectl
27 | image: lachlanevenson/k8s-kubectl:latest
28 | args:
29 | - patch
30 | - deployment
31 | - api-public
32 | - -p
33 | - "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"restart\":\"$(HOSTNAME)\"}}}}}"
--------------------------------------------------------------------------------
/manifests/system/system.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: system
5 | namespace: default
6 | labels:
7 | role: system
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1beta1
10 | kind: ClusterRoleBinding
11 | metadata:
12 | name: system
13 | labels:
14 | role: system
15 | roleRef:
16 | apiGroup: rbac.authorization.k8s.io
17 | kind: ClusterRole
18 | name: cluster-admin
19 | subjects:
20 | - kind: ServiceAccount
21 | name: system
22 | namespace: default
23 | ---
24 | apiVersion: rbac.authorization.k8s.io/v1beta1
25 | kind: ClusterRoleBinding
26 | metadata:
27 | name: system
28 | labels:
29 | role: system
30 | roleRef:
31 | apiGroup: rbac.authorization.k8s.io
32 | kind: ClusterRole
33 | name: cluster-admin
34 | subjects:
35 | - kind: ServiceAccount
36 | name: system
37 | namespace: default
38 |
--------------------------------------------------------------------------------
/manifests/web/api-public.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: api-public
5 | labels:
6 | role: api-public
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 80
11 | name: http
12 | selector:
13 | role: api-public
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: api-public
19 | spec:
20 | minReadySeconds: 15
21 | replicas: 1
22 | strategy:
23 | type: RollingUpdate
24 | rollingUpdate:
25 | maxSurge: 100%
26 | maxUnavailable: 0%
27 | selector:
28 | matchLabels:
29 | role: api-public
30 | template:
31 | metadata:
32 | labels:
33 | role: api-public
34 | annotations:
35 | scheduler.alpha.kubernetes.io/critical-pod: 'true'
36 | spec:
37 | affinity:
38 | podAffinity:
39 | requiredDuringSchedulingIgnoredDuringExecution:
40 | - topologyKey: kubernetes.io/hostname
41 | labelSelector:
42 | matchLabels:
43 | role: mongo
44 | containers:
45 | - name: api-public
46 | image: gcr.io/stratus-197318/web:master
47 | imagePullPolicy: Always
48 | ports:
49 | - containerPort: 80
50 | name: http
51 | readinessProbe:
52 | initialDelaySeconds: 15
53 | timeoutSeconds: 15
54 | periodSeconds: 15
55 | httpGet:
56 | path: /users/by_username/Notch
57 | port: 80
58 | livenessProbe:
59 | initialDelaySeconds: 60
60 | timeoutSeconds: 60
61 | periodSeconds: 60
62 | httpGet:
63 | path: /users/by_username/Notch
64 | port: 80
65 | envFrom:
66 | - secretRef:
67 | name: web-secret
68 | env:
69 | - name: WEB_ROLE
70 | value: api_public
71 | volumeMounts:
72 | - name: data
73 | mountPath: /minecraft/repo/data:ro
74 | resources:
75 | requests:
76 | cpu: 100m
77 | memory: 300Mi
78 | volumes:
79 | - name: data
80 | emptyDir:
81 |
--------------------------------------------------------------------------------
/manifests/web/api.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: api
5 | labels:
6 | role: api
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 80
11 | name: http
12 | selector:
13 | role: api
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: api
19 | spec:
20 | minReadySeconds: 15
21 | replicas: 1
22 | strategy:
23 | type: RollingUpdate
24 | rollingUpdate:
25 | maxSurge: 100%
26 | maxUnavailable: 0%
27 | selector:
28 | matchLabels:
29 | role: api
30 | template:
31 | metadata:
32 | labels:
33 | role: api
34 | annotations:
35 | scheduler.alpha.kubernetes.io/critical-pod: 'true'
36 | spec:
37 | affinity:
38 | podAffinity:
39 | requiredDuringSchedulingIgnoredDuringExecution:
40 | - topologyKey: kubernetes.io/hostname
41 | labelSelector:
42 | matchLabels:
43 | role: mongo
44 | containers:
45 | - name: api
46 | image: gcr.io/stratus-197318/web:master
47 | imagePullPolicy: Always
48 | ports:
49 | - containerPort: 80
50 | name: http
51 | readinessProbe:
52 | initialDelaySeconds: 15
53 | timeoutSeconds: 15
54 | periodSeconds: 15
55 | httpGet:
56 | path: /users/by_username/Notch
57 | port: 80
58 | livenessProbe:
59 | initialDelaySeconds: 60
60 | timeoutSeconds: 60
61 | periodSeconds: 60
62 | httpGet:
63 | path: /users/by_username/Notch
64 | port: 80
65 | envFrom:
66 | - secretRef:
67 | name: web-secret
68 | env:
69 | - name: WEB_ROLE
70 | value: api
71 | volumeMounts:
72 | - name: data
73 | mountPath: /minecraft/repo/data:ro
74 | resources:
75 | requests:
76 | cpu: 100m
77 | memory: 300Mi
78 | volumes:
79 | - name: data
80 | emptyDir:
81 |
--------------------------------------------------------------------------------
/manifests/web/dns.yml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: dns
5 | spec:
6 | schedule: '*/5 * * * *'
7 | startingDeadlineSeconds: 120
8 | failedJobsHistoryLimit: 0
9 | successfulJobsHistoryLimit: 0
10 | concurrencyPolicy: Forbid
11 | jobTemplate:
12 | metadata:
13 | labels:
14 | role: dns
15 | spec:
16 | template:
17 | metadata:
18 | labels:
19 | role: dns
20 | spec:
21 | restartPolicy: Never
22 | containers:
23 | - name: dns
24 | image: gcr.io/stratus-197318/web:master
25 | imagePullPolicy: Always
26 | command:
27 | - rails
28 | - runner
29 | - script/dns_rotate.rb
30 | envFrom:
31 | - secretRef:
32 | name: web-secret
33 |
--------------------------------------------------------------------------------
/manifests/web/ingress.yml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: ingress
5 | spec:
6 | rules:
7 | - host: api.stratus.network
8 | http:
9 | paths:
10 | - backend:
11 | serviceName: api-public
12 | servicePort: 80
13 | - host: stratus.network
14 | http:
15 | paths:
16 | - backend:
17 | serviceName: web
18 | servicePort: 80
19 | - host: confirm.stratus.network
20 | http:
21 | paths:
22 | - backend:
23 | serviceName: web
24 | servicePort: 80
25 |
26 |
--------------------------------------------------------------------------------
/manifests/web/site.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: web
5 | labels:
6 | role: web
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 80
11 | name: http
12 | selector:
13 | role: web
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: web
19 | spec:
20 | minReadySeconds: 15
21 | replicas: 1
22 | strategy:
23 | type: RollingUpdate
24 | rollingUpdate:
25 | maxSurge: 100%
26 | maxUnavailable: 0%
27 | selector:
28 | matchLabels:
29 | role: web
30 | template:
31 | metadata:
32 | labels:
33 | role: web
34 | annotations:
35 | scheduler.alpha.kubernetes.io/critical-pod: 'true'
36 | spec:
37 | affinity:
38 | podAffinity:
39 | requiredDuringSchedulingIgnoredDuringExecution:
40 | - topologyKey: kubernetes.io/hostname
41 | labelSelector:
42 | matchLabels:
43 | role: mongo
44 | containers:
45 | - name: site
46 | image: gcr.io/stratus-197318/web:master
47 | imagePullPolicy: Always
48 | ports:
49 | - containerPort: 80
50 | name: http
51 | readinessProbe:
52 | initialDelaySeconds: 15
53 | timeoutSeconds: 15
54 | periodSeconds: 15
55 | httpGet:
56 | path: /
57 | port: 80
58 | livenessProbe:
59 | initialDelaySeconds: 60
60 | timeoutSeconds: 60
61 | periodSeconds: 60
62 | httpGet:
63 | path: /
64 | port: 80
65 | envFrom:
66 | - secretRef:
67 | name: web-secret
68 | env:
69 | - name: WEB_ROLE
70 | value: octc
71 | volumeMounts:
72 | - name: git
73 | subPath: data
74 | mountPath: /minecraft/repo/data:ro
75 | resources:
76 | requests:
77 | cpu: 100m
78 | memory: 300Mi
79 | - name: smtp
80 | image: gcr.io/stratus-197318/smtp:latest
81 | imagePullPolicy: IfNotPresent
82 | ports:
83 | - containerPort: 25
84 | name: smtp
85 | envFrom:
86 | - secretRef:
87 | name: web-secret
88 | resources:
89 | requests:
90 | cpu: 5m
91 | memory: 10Mi
92 | volumes:
93 | - name: git
94 | persistentVolumeClaim:
95 | claimName: git
96 |
--------------------------------------------------------------------------------
/manifests/web/stats.yml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: stats
5 | spec:
6 | schedule: '*/5 * * * *'
7 | startingDeadlineSeconds: 60
8 | failedJobsHistoryLimit: 0
9 | successfulJobsHistoryLimit: 0
10 | concurrencyPolicy: Forbid
11 | jobTemplate:
12 | metadata:
13 | labels:
14 | role: stats
15 | spec:
16 | template:
17 | metadata:
18 | labels:
19 | role: stats
20 | spec:
21 | restartPolicy: Never
22 | containers:
23 | - name: stats
24 | image: mongo:4.2.0
25 | command:
26 | - mongo
27 | - --host=mongo:27017
28 | - /web/stats.js
29 | volumeMounts:
30 | - name: script
31 | mountPath: /web
32 | volumes:
33 | - name: script
34 | configMap:
35 | name: stats-script
36 | ---
37 | apiVersion: v1
38 | kind: ConfigMap
39 | metadata:
40 | name: stats-script
41 | data:
42 | stats.js: "// -----------------------\n// ---- Configuration ----\n// -----------------------\n\nvar
43 | second_duration = 1000;\nvar minute_duration = 60 * second_duration;\nvar
44 | hour_duration = 60 * minute_duration;\nvar day_duration = 24 * hour_duration;\nvar
45 | week_duration = 7 * day_duration;\nvar eternity_duration = new Date().getTime();\n\nvar
46 | profiles = {\n\t\"day\": day_duration,\n\t\"week\": week_duration,\n\t\"eternity\":
47 | eternity_duration,\n};\n\n// ---------------------------\n// ---- Utility Functions
48 | ----\n// ---------------------------\n\n_ = {\n// merge the given arguments into
49 | an array\nmerge: function(vargs) {\n\tvar result = {};\n\tfor (var i = 0; i <
50 | arguments.length; i++) {\n\t\tvar obj = arguments[i];\n\t\tfor (var key in obj)
51 | {\n\t\t\tresult[key] = obj[key];\n\t\t}\n\t}\n\treturn result;\n},\n// identity
52 | function - used in identity transformation example\nid: function(obj) {\n\treturn
53 | obj;\n},\n// invert numbers, arrays, or objects\ninverse: function(obj) {\n\tswitch
54 | (Object.prototype.toString.call(obj)) {\n\tcase \"[object Number]\":\n\t\treturn
55 | -obj;\n\n\tcase \"[object Array]\":\n\t\treturn obj.map(_.inverse);\n\n\tcase
56 | \"[object Object]\":\n\t\tvar result = {};\n\t\tfor (var key in obj) {\n\t\t\tresult[key]
57 | = _.inverse(obj[key]);\n\t\t}\n\t\treturn result;\n\n\tdefault:\n\t\treturn obj;\n\t}\n},\nsum:
58 | function(field, base, vargs) {\n\tvar result = base[field] || 0;\n\tfor(var i
59 | = 2; i < arguments.length; i++) {\n\t\tresult += arguments[i][field] || 0;\n\t}\n\tbase[field]
60 | = result;\n},\ngreatest: function(field, def, base, vargs) {\n\tvar result = base[field]
61 | || def;\n\tfor(var i = 3; i < arguments.length; i++) {\n\t\tvar x = arguments[i][field];\n\t\tif(x
62 | && x > result) {\n\t\t\tresult = x;\n\t\t}\n\t}\n\tbase[field] = result;\n},\n//
63 | determine the unique elements of an array\nuniq: function(array) {\n\tvar hash
64 | = {};\n\tvar result = [];\n\tvar array = array || result;\n\tfor (var i = 0, l
65 | = array.length; i < l; ++i){\n\t\tif(hash.hasOwnProperty(array[i])) continue;\n\t\tresult.push(array[i]);\n\t\thash[array[i]]
66 | = 1;\n\t}\n\treturn result;\n},\n// concat several objects\nconcat: function(field,
67 | vargs) {\n\tvar result = [];\n\tfor (var i = 1; i < arguments.length; i++) {\n\t\tvar
68 | x = arguments[i][field];\n\t\tresult.concat(x);\n\t}\n\treturn result;\n},\n//
69 | determine if a value is meaningless (empty, null, 0, infinite date)\nmeaningless:
70 | function(obj) {\n\tif (obj == null) return true;\n\tif (obj == 0) return true;\n\tif
71 | (isNaN(obj)) return true;\n\tif (obj instanceof Date) return obj.getTime() ==
72 | 0;\n\tif (obj instanceof Object && Object.keys(obj).length == 0) return true;\n\n\treturn
73 | false;\n},\n// calculate kd, kk, and tk ratios given a stats object\ncalculate_kd:
74 | function(obj) {\n\tobj.kd = (obj.kills || 0) / (obj.deaths ||
75 | 1);\n\tobj.kk = (obj.kills || 0) / (obj.deaths_player || 1);\n\tobj.tkrate
76 | = (obj.kills_team || 0) / (obj.kills || 1);\n},\n};\n\n// --------------------------------\n//
77 | ---- Generic Implementation ----\n// --------------------------------\n//\n\n/*\n
78 | * Map, Reduce, and Finalize can all use variables defined in the map reduce scope.\n
79 | *\n * The variables included in the scope and their descriptions are listed below.\n
80 | *\n * _ - collection of utility functions\n * profile - time period being
81 | map reduced\n * map - map function for the specific collection\n * reduce
82 | \ - reduce function for the specific collection\n * finalize - finalize function
83 | for the specific collection, can be null\n * key - string that specifies
84 | which field has the date object\n *\n */\n\nstats_map = function() {\n\t// get
85 | the implementation object\n\tvar map_result = map.call(this);\n\n\t// store the
86 | date and family\n\tvar date = map_result.date;\n\tvar family = map_result.family;\n\n\tfor
87 | (var emit_key in map_result.emit) {\n\t\tvar emit_obj = map_result.emit[emit_key];\n\n\t\ttransformations.forEach(function(transform)
88 | {\n\t\t\tif (transform.start <= date && date < transform.end) {\n\t\t\t\temit_obj
89 | = transform.fn(emit_obj);\n\t\t\t}\n\t\t});\n\n\t\tvar emit_result = {};\n\t\temit_result[profile]
90 | = {};\n\t\temit_result[profile][\"global\"] = {};\n\t\temit_result[profile][family]
91 | = emit_obj;\n\n\t\temit(emit_key, emit_result);\n\t}\n}\n\nstats_reduce = function(key,
92 | values) {\n\tvar result = {};\n\n\tvalues.forEach(function(value) {\n\t\tfor(var
93 | profile in value) {\n\t\t\tresult[profile] = result[profile] || {};\n\t\t\tresult[profile][\"global\"]
94 | = {};\n\n\t\t\tfor(var family in value[profile]) {\n\t\t\t\tvar family_result
95 | = result[profile][family] || {};\n\t\t\t\tvar obj = value[profile][family];\n\n\t\t\t\treduce(key,
96 | family_result, obj);\n\n\t\t\t\tfor (var key in obj) {\n\t\t\t\t\tif (family_result[key]
97 | === undefined && obj[key] !== undefined) {\n\t\t\t\t\t\tfamily_result[key] = obj[key];\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tresult[profile][family]
98 | = family_result;\n\t\t\t}\n\t\t}\n\t});\n\n\treturn result;\n}\n\nstats_finalize
99 | = function(key, value) {\n\tvar totals = {\n\t\tplaying_time : {result:
100 | 0, type: \"total\"},\n\t\tdeaths : {result: 0, type: \"total\"},\n\t\tdeaths_player
101 | \ : {result: 0, type: \"total\"},\n\t\tdeaths_team :
102 | {result: 0, type: \"total\"},\n\t\tkills : {result: 0, type:
103 | \"total\"},\n\t\tkills_team : {result: 0, type: \"total\"},\n\t\twool_placed
104 | \ : {result: 0, type: \"total\"},\n\t\tcores_leaked :
105 | {result: 0, type: \"total\"},\n\t\tdestroyables_destroyed : {result: 0, type:
106 | \"total\"},\n\t\tlast_death : {result: new Date(0), type: \"recent\"},\n\t\tlast_kill
107 | \ : {result: new Date(0), type: \"recent\"},\n\t\tlast_wool_placed
108 | \ : {result: new Date(0), type: \"recent\"},\n\t\tlast_core_leaked :
109 | {result: new Date(0), type: \"recent\"},\n\t\tlast_destroyable_destroyed : {result:
110 | new Date(0), type: \"recent\"},\n\t};\n\n\t// call finalize function (can be null)\n\tif
111 | (finalize) {\n\t\tfor (var profile in value) {\n\t\t\tfor (var family in value[profile])
112 | {\n\t\t\t\tfinalize(key, value[profile][family]);\n\t\t\t}\n\t\t}\n\t}\n\n\tfor
113 | (var stat in totals) {\n\t\tfor (var profile in value) {\n\t\t\tfor (var family
114 | in value[profile]) {\n\t\t\t\tif(family == \"global\") continue;\n\n\t\t\t\tif(_.meaningless(value[profile][family][stat]))
115 | {\n\t\t\t\t\tdelete value[profile][family][stat];\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tswitch
116 | (totals[stat].type) {\n\t\t\t\tcase \"total\":\n\t\t\t\t\ttotals[stat].result
117 | += (value[profile][family][stat] || 0);\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"recent\":\n\t\t\t\t\tif(value[profile][family][stat]
118 | > (totals[stat].result || new Date(0))) {\n\t\t\t\t\t\ttotals[stat].result = value[profile][family][stat];\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ttotals[stat]
119 | = totals[stat].result;\n\n\t\tif(_.meaningless(totals[stat])) delete totals[stat];\n\t}\n\n\t_.calculate_kd(totals);\n\n\tvalue[profile][\"global\"]
120 | = totals;\n\n\treturn value;\n}\n\nvar stats = {}; // records how to mapreduce
121 | on certain collections\n\n// -------------------------------\n// ---- Deaths Implementation
122 | ----\n// -------------------------------\n\ndeaths_map = function() {\n\tvar family
123 | = this.family || \"default\";\n\n\tvar victim = {last_death: this.date};\n\tvar
124 | killer = {};\n\n\tif (this.teamkill) {\n\t\tvictim.deaths_team = 1;\n\t\tif (this.killer)
125 | killer.kills_team = 1;\n\t} else {\n\t\tvictim.deaths = 1;\n\t\tif (this.killer)
126 | {\n\t\t\tvictim.deaths_player = 1;\n\t\t\tkiller.kills = 1;\n\t\t\tkiller.last_kill
127 | = this.date;\n\t\t}\n\t}\n\n\tvar emit = {};\n\temit[this.victim] = victim;\n\tif
128 | (this.killer) emit[this.killer] = killer;\n\n\treturn { \"date\": this.date, \"family\":
129 | family, \"emit\": emit };\n}\n\ndeaths_reduce = function(key, result, obj) {\n\t[\"deaths\",
130 | \"deaths_player\", \"deaths_team\", \"kills\", \"kills_team\"].forEach(function(field)
131 | {\n\t\t_.sum(field, result, obj);\n\t});\n\n\t[\"last_death\", \"last_kill\"].forEach(function(field)
132 | {\n\t\t_.greatest(field, new Date(0), result, obj);\n\t});\n}\n\ndeaths_finalize
133 | = function(key, value) {\n\t_.calculate_kd(value);\n\treturn value;\n}\n\nstats[\"deaths\"]
134 | = {\n\tmap: deaths_map,\n\treduce: deaths_reduce,\n\tfinalize: deaths_finalize,\n\tkey:
135 | \"date\",\n\tquery: {\n\t\tdate: {$exists: 1},\n\t\tvictim: {$exists: 1}\n\t},\n\tdb:
136 | \"oc_deaths\",\n};\n\n// ---------------------------------------\n// ---- Participations
137 | Implementation ----\n// ---------------------------------------\n\nparticipations_map
138 | = function() {\n\tvar emit = {};\n\n\tvar family = this.family || \"default\";\n\tvar
139 | duration = this.end.getTime() - this.start.getTime();\n\n\t// 'team_id' is the
140 | current field, 'team' is only on legacy documents\n\tif (this.team_id || (this.team
141 | && this.team != \"Observers\" && this.team != \"Spectators\")) {\n\t\temit[\"playing_time\"]
142 | = duration;\n\t}\n\n\tvar emit_result = {};\n\temit_result[this.player] = emit;\n\n\treturn
143 | { date: this.end, family: family, emit: emit_result };\n}\n\nparticipations_reduce
144 | = function(key, result, obj) {\n\t[\"playing_time\"].forEach(function(field) {\n\t\t_.sum(field,
145 | result, obj);\n\t});\n}\n\nstats[\"participations\"] = {\n\tmap: participations_map,\n\treduce:
146 | participations_reduce,\n\tkey: \"end\",\n\tquery: {\n\t\tstart: {$exists: 1},\n\t\tend:
147 | {$exists: 1},\n\t\tplayer: {$exists: 1},\n\t},\n\tdb: \"oc_participations\",\n};\n\n//
148 | -----------------------------------\n// ---- Objectives Implementation ----\n//
149 | -----------------------------------\n\nobjectives_map = function() {\n\tvar emit
150 | = {};\n\n\tvar family = this.family || \"default\";\n\n\tswitch (this.type) {\n\tcase
151 | \"wool_place\":\n\t\temit[\"wool_placed\"] = 1;\n\t\temit[\"last_wool_placed\"]
152 | = this.date;\n\t\tbreak;\n\tcase \"destroyable_destroy\":\n\t\temit[\"destroyables_destroyed\"]
153 | = 1;\n\t\temit[\"last_destroyable_destroyed\"] = this.date;\n\t\tbreak;\n\tcase
154 | \"core_break\":\n\t\temit[\"cores_leaked\"] = 1;\n\t\temit[\"last_core_leaked\"]
155 | = this.date;\n\t\tbreak;\n\t}\n\n\tvar emit_result = {};\n\temit_result[this.player]
156 | = emit;\n\n\treturn { date: this.date, family: family, emit: emit_result };\n}\n\nobjectives_reduce
157 | = function(key, result, obj) {\n\t[\"wool_placed\", \"destroyables_destroyed\",
158 | \"cores_leaked\"].forEach(function(field) {\n\t\t_.sum(field, result, obj);\n\t});\n\n\t[\"last_wool_placed\",
159 | \"last_destroyable_destroyed\", \"last_core_leaked\"].forEach(function(field)
160 | {\n\t\t_.greatest(field, new Date(0), result, obj);\n\t});\n}\n\nstats[\"objectives\"]
161 | = {\n\tmap: objectives_map,\n\treduce: objectives_reduce,\n\tkey: \"date\",\n\tquery:
162 | {\n\t\tdate: {$exists: 1},\n\t\tplayer: {$exists: 1},\n\t},\n\tdb: \"oc_objectives\",\n};\n\n//
163 | ----------------------------------\n// ---- Execution Implementation ----\n//
164 | ----------------------------------\n\n/*\n * We subtract 1 minute from the current
165 | time to help deal with improper statistics.\n *\n * Improper statistics happen
166 | because the timestamp is generated on the client side\n * and there can be anywhere
167 | from a millisecond to a multiple second delay on insertion.\n *\n * This bug originally
168 | caused negative statistics because the death/objective/playing time\n * statistic
169 | wasn't credited to the player but was later subtracted.\n *\n * Sliding our time
170 | frame window lets us catch some of these delayed statistics and\n * massively
171 | decrease the number of improper statistics. Using server-side timestamps\n * would
172 | fix the problem, but, we would rather have timestamps match the game.\n */\nvar
173 | now = new Date(new Date().getTime() - (1 * minute_duration));\n\nvar upsert =
174 | {};\n\nfor (var profile in profiles) {\n\tupsert[\"last_run.\" + profile] = now;\n}\n\nvar
175 | jobsDB = db.getSiblingDB(\"oc_jobs\");\nvar j = jobsDB.jobs.findAndModify({\n\tquery:
176 | {name: \"player_stats\"},\n\tupdate: {$set: upsert},\n\tupsert: true\n});\n\nvar
177 | scope_base = { \"_\": _ };\n\nfor (var profile in profiles) {\n\t// calculate
178 | when the profile was last run\n\tvar last_run = (j && j.last_run && j.last_run[profile])
179 | || new Date(0);\n\n\tprint(\"Profile '\" + profile + \"' last run at \" + last_run);\n\n\tvar
180 | duration = profiles[profile];\n\n\t// calculate the add / re\n\tvar add_start
181 | = last_run;\n\tvar add_end = now;\n\tvar sub_start = new Date(Math.max(0, add_start.getTime()
182 | - duration));\n\tvar sub_end = new Date(Math.max(0, add_end.getTime() - duration));\n\n\t//
183 | sub: |-----------|\n\t// add:\t\t|--------------|\n\t//\n\t// sub: |------|\n\t//
184 | add:\t\t\t |---------|\n\tif (add_start < sub_end) {\n\t\tvar old_end = sub_end;\n\t\tsub_end
185 | = add_start;\n\t\tadd_start = old_end;\n\t}\n\n\t// describes what function needs
186 | to apply to a selected range\n\t// the commented out identity transformation exists
187 | for educational purposes\n\tvar transformations = [\n\t\t/*\n\t\t{\n\t\t\tstart:
188 | add_start,\n\t\t\tend: add_end,\n\t\t\tfn: _.id,\n\t\t},\n\t\t*/\n\t\t{\n\t\t\tstart:
189 | sub_start,\n\t\t\tend: sub_end,\n\t\t\tfn: _.inverse,\n\t\t},\n\t];\n\n\tvar scope_profile
190 | = _.merge(scope_base, {\n\t\tprofile: profile,\n\t\ttransformations: transformations,\n\t});\n\n\tvar
191 | total_result = {\n\t\tresult: \"oc_player_stats_\" + profile,\n\t\ttimeMillis:
192 | 0,\n\t\tcounts: {\n\t\t\tinput: 0,\n\t\t\temit: 0,\n\t\t\treduce: 0,\n\t\t\toutput:
193 | 0,\n\t\t},\n\t};\n\n\tfor (var collection in stats) {\n\t\tprint(\"Processing
194 | collection: \" + collection);\n\n\t\tvar info = stats[collection];\n\n\t\t// local
195 | variables accessible by the map, reduce, and finalize functions\n\t\tvar scope
196 | = _.merge(scope_profile, {\n\t\t\tkey: info.key,\n\t\t\tmap: info.map,\n\t\t\treduce:
197 | info.reduce,\n\t\t\tfinalize: info.finalize,\n\t\t});\n\n\t\tvar add_range = {};\n\t\tadd_range[info.key]
198 | = {$gte: add_start, $lt: add_end};\n\n\t\tvar sub_range = {};\n\t\tsub_range[info.key]
199 | = {$gte: sub_start, $lt: sub_end};\n\n\t\tvar query = _.merge(info.query || {},
200 | {\n\t\t\t$or: [ add_range, sub_range ]\n\t\t});\n\n\t\t// do mapreduce\n\t\tvar
201 | options = {\n\t\t\tout: {reduce: \"player_stats_\" + profile, db: \"oc_playerstats\"},\n\t\t\tscope:
202 | scope,\n\t\t\tquery: query,\n\t\t\tfinalize: stats_finalize,\n\t\t};\n\n\t\tvar
203 | database = db.getSiblingDB(info.db);\n\t\tvar result = database[collection].mapReduce(stats_map,
204 | stats_reduce, options);\n\n\t\tprintjson(result);\n\n\t\tif (result.ok) {\n\t\t\ttotal_result.timeMillis
205 | += result.timeMillis;\n\t\t\ttotal_result.counts.input += result.counts.input;\n\t\t\ttotal_result.counts.emit
206 | += result.counts.emit;\n\t\t\ttotal_result.counts.reduce += result.counts.reduce;\n\t\t\ttotal_result.counts.output
207 | += result.counts.output;\n\t\t}\n\t}\n\n\tprint(\"Results for '\" + profile +
208 | \"' profile\")\n\tprintjson(total_result);\n}\n\n"
209 |
--------------------------------------------------------------------------------
/manifests/web/worker.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: worker
5 | spec:
6 | minReadySeconds: 60
7 | replicas: 1
8 | strategy:
9 | type: Recreate
10 | selector:
11 | matchLabels:
12 | role: worker
13 | template:
14 | metadata:
15 | labels:
16 | role: worker
17 | spec:
18 | serviceAccountName: system
19 | containers:
20 | - name: worker
21 | image: gcr.io/stratus-197318/web:master
22 | imagePullPolicy: Always
23 | command:
24 | - config/worker.rb
25 | livenessProbe:
26 | initialDelaySeconds: 60
27 | timeoutSeconds: 30
28 | periodSeconds: 30
29 | exec:
30 | command:
31 | - curl
32 | - api/users/by_username/Notch
33 | resources:
34 | requests:
35 | cpu: 100m
36 | memory: 1Gi
37 | envFrom:
38 | - secretRef:
39 | name: web-secret
40 | volumeMounts:
41 | - name: google
42 | mountPath: /web/config/google.json
43 | subPath: google.json
44 | volumes:
45 | - name: google
46 | secret:
47 | secretName: web-secret
48 | items:
49 | - key: GOOGLE_JSON
50 | path: google.json
51 |
--------------------------------------------------------------------------------
/models/default-backend/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ruby:2.3-alpine3.7
2 |
3 | COPY default-backend default-backend
4 |
5 | RUN mv default-backend/* .
6 |
7 | CMD ruby server.rb
8 |
--------------------------------------------------------------------------------
/models/default-backend/build.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - id: default-backend
3 | name: gcr.io/cloud-builders/docker
4 | args:
5 | - build
6 | - --tag=gcr.io/$PROJECT_ID/default-backend:latest
7 | - --file=models/default-backend/Dockerfile
8 | - .
9 | images:
10 | - gcr.io/$PROJECT_ID/default-backend:latest
11 |
--------------------------------------------------------------------------------
/models/default-backend/cloudbuild.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - id: default-backend
3 | name: gcr.io/cloud-builders/docker
4 | args:
5 | - build
6 | - --tag=gcr.io/$PROJECT_ID/default-backend:latest
7 | - --file=models/default-backend/Dockerfile
8 | - .
9 | images:
10 | - gcr.io/$PROJECT_ID/default-backend:latest
11 |
--------------------------------------------------------------------------------
/models/minecraft/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openjdk:10-jre-slim
2 |
3 | USER root
4 |
5 | RUN apt-get update && \
6 | apt-get install -y --no-install-recommends curl git build-essential ruby ruby-dev rubygems && \
7 | echo "gem: --no-rdoc --no-ri" > ~/.gemrc && \
8 | gem install bundler
9 |
10 | WORKDIR minecraft
11 |
12 | ADD https://${AUTH}api.github.com/repos/StratusNetwork/Data/git/refs/heads/${BRANCH} data.json
13 | RUN git clone -b master --depth 1 https://github.com/StratusNetwork/Data.git data
14 |
15 | WORKDIR repo
16 | RUN git clone -b master --depth 1 https://gitlab.com/stratus/config.git Config
17 |
18 | WORKDIR /minecraft/server
19 |
20 | COPY lib lib
21 | COPY Gemfile Gemfile
22 | COPY models/minecraft .
23 | RUN rm build.yml
24 |
25 | RUN bundle install --without test worker
26 |
27 | RUN apt-get remove -y build-essential ruby-dev rubygems && \
28 | apt-get -y autoremove
29 |
30 | ARG PROJECT_ID=stratus-197318
31 | ARG BRANCH=master
32 | ENV URL=https://storage.googleapis.com/artifacts.$PROJECT_ID.appspot.com/artifacts/$BRANCH/.m2
33 | ENV MASTER_URL=https://storage.googleapis.com/artifacts.$PROJECT_ID.appspot.com/artifacts/master/.m2
34 | ARG VERSION=1.12.2-SNAPSHOT
35 | ENV VERSION=$VERSION
36 |
37 | ENV STAGE=DEVELOPMENT
38 | ENV API=http://api
39 | ENV RABBIT=rabbit
40 |
41 | ENV ESC=$
42 |
43 | ENV JAVA_OPTS="-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -XshowSettings:vm -XX:MaxRAMFraction=1 -XX:+AggressiveOpts -XX:+AlwaysPreTouch -XX:LargePageSizeInBytes=2M -XX:+UseLargePages -XX:+UseLargePagesInMetaspace -XX:+AggressiveHeap -XX:+OptimizeStringConcat -XX:+UseStringDeduplication -XX:+UseCompressedOops -XX:TargetSurvivorRatio=90 -XX:InitiatingHeapOccupancyPercent=10 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSIncrementalPacing -XX:ParallelGCThreads=2 -XX:+DisableExplicitGC -XX:+UseAdaptiveGCBoundary -Xnoclassgc"
44 |
45 | ENV JAVA_EXPERIMENTAL_OPTS="-XX:+UseG1GC -XX:G1NewSizePercent=50 -XX:G1MaxNewSizePercent=80 -XX:G1MixedGCLiveThresholdPercent=50 -XX:MaxGCPauseMillis=100 -XX:+DisableExplicitGC -XX:TargetSurvivorRatio=90 -XX:InitiatingHeapOccupancyPercent=10"
46 |
47 | CMD cd ../data && git pull && cd ../repo/Config && git pull && \
48 | cd ../../server && ruby run.rb "load!" && \
49 | exec java -jar server.jar -stage $STAGE
50 |
--------------------------------------------------------------------------------
/models/minecraft/Dockerfile-bukkit:
--------------------------------------------------------------------------------
1 | FROM minecraft:base
2 |
3 | ARG BUKKIT_VERSION=1.12.2-R0.1-SNAPSHOT
4 | ADD $URL/tc/oc/sportbukkit/$BUKKIT_VERSION/sportbukkit-$BUKKIT_VERSION.jar server.jar
5 |
6 | ADD $URL/tc/oc/api-ocn/$VERSION/api-ocn-$VERSION.jar plugins/api-ocn.jar
7 | ADD $URL/tc/oc/api-bukkit/$VERSION/api-bukkit-$VERSION.jar plugins/api.jar
8 | ADD $URL/tc/oc/commons-bukkit/$VERSION/commons-bukkit-$VERSION.jar plugins/commons.jar
9 | ADD $URL/tc/oc/Lobby/$VERSION/Lobby-$VERSION.jar plugins/lobby.jar
10 | ADD $URL/tc/oc/PGM/$VERSION/PGM-$VERSION.jar plugins/pgm.jar
11 | ADD $URL/net/anxuiz/Tourney/$VERSION/Tourney-$VERSION.jar plugins/tourney.jar
12 | ADD $URL/me/anxuiz/bukkit-settings/1.9-SNAPSHOT/bukkit-settings-1.9-SNAPSHOT.jar plugins/settings.jar
13 | ADD $URL/tc/oc/raven-bukkit/1.11-SNAPSHOT/raven-bukkit-1.11-SNAPSHOT.jar plugins/raven.jar
14 | ADD $MASTER_URL/other/protocolsupport.jar plugins/protocol.jar
15 | ADD $MASTER_URL/other/commandbook.jar plugins/commandbook.jar
16 | ADD $MASTER_URL/other/buycraft.jar plugins/buycraftx.jar
17 | ADD $MASTER_URL/other/worldedit.jar plugins/worldedit.jar
18 |
--------------------------------------------------------------------------------
/models/minecraft/Dockerfile-bungee:
--------------------------------------------------------------------------------
1 | FROM minecraft:base
2 |
3 | ARG BUNGEE_VERSION=1.12-SNAPSHOT
4 | ADD $URL/tc/oc/bungeecord-bootstrap/$BUNGEE_VERSION/bungeecord-bootstrap-$BUNGEE_VERSION.jar server.jar
5 |
6 | ARG VIA_VERSION=1.6.1-SNAPSHOT
7 | add $MASTER_URL/us/myles/viaversion/$VIA_VERSION/viaversion-$VIA_VERSION.jar plugins/viaversion.jar
8 |
9 | ADD $URL/tc/oc/api-ocn/$VERSION/api-ocn-$VERSION.jar plugins/api-ocn.jar
10 | ADD $URL/tc/oc/api-bungee/$VERSION/api-bungee-$VERSION.jar plugins/api.jar
11 | ADD $URL/tc/oc/commons-bungee/$VERSION/commons-bungee-$VERSION.jar plugins/commons.jar
12 | ADD $URL/tc/oc/raven-bungee/1.11-SNAPSHOT/raven-bungee-1.11-SNAPSHOT.jar plugins/raven.jar
13 |
--------------------------------------------------------------------------------
/models/minecraft/build.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - id: git
3 | name: gcr.io/cloud-builders/git
4 | args:
5 | - clone
6 | - --depth=1
7 | - https://github.com/Electroid/infrastructure.git
8 | - id: base
9 | name: gcr.io/cloud-builders/docker
10 | args:
11 | - build
12 | - --tag=minecraft:base
13 | - --file=infrastructure/models/minecraft/Dockerfile
14 | - --build-arg=PROJECT_ID=$PROJECT_ID
15 | - --build-arg=BRANCH=$_BRANCH
16 | - --build-arg=VERSION=$_VERSION
17 | - infrastructure
18 | wait_for:
19 | - git
20 | - id: bukkit
21 | name: gcr.io/cloud-builders/docker
22 | args:
23 | - build
24 | - --tag=gcr.io/$PROJECT_ID/minecraft:bukkit-$_BRANCH
25 | - --file=infrastructure/models/minecraft/Dockerfile-bukkit
26 | - --build-arg=BUKKIT_VERSION=$_BUKKIT_VERSION
27 | - infrastructure
28 | wait_for:
29 | - base
30 | - id: bungee
31 | name: gcr.io/cloud-builders/docker
32 | args:
33 | - build
34 | - --tag=gcr.io/$PROJECT_ID/minecraft:bungee-$_BRANCH
35 | - --file=infrastructure/models/minecraft/Dockerfile-bungee
36 | - --build-arg=BUNGEE_VERSION=$_BUNGEE_VERSION
37 | - infrastructure
38 | wait_for:
39 | - base
40 | images:
41 | - gcr.io/$PROJECT_ID/minecraft:bukkit-$_BRANCH
42 | - gcr.io/$PROJECT_ID/minecraft:bungee-$_BRANCH
43 |
--------------------------------------------------------------------------------
/models/minecraft/run.rb:
--------------------------------------------------------------------------------
1 | # Shift load path to controller files
2 | $: << File.expand_path("../lib", __FILE__)
3 |
4 | # Require nessecary libraries for the server
5 | require "minecraft/server"
6 |
7 | # Load the local server
8 | @server = LocalServer.new("/minecraft")
9 |
10 | # Response with a health check for a given method for server.
11 | def check(method)
12 | value = @server.send(method.to_s)
13 | print "#{method.to_s}: #{value}"
14 | exit(value ? 0 : 1)
15 | end
16 |
17 | # Run different commands depending on argument
18 | case arg = ARGV[0]
19 | when "load!"
20 | @server.load!
21 | when "ready?"
22 | # HACK: DNS script broke, so now we alternate between
23 | # server ordinals based on the day of the week.
24 | if @server.role_cache == "BUNGEE"
25 | # check(:dns_enabled)
26 | if @server.ensure_cache == "running"
27 | exit(0)
28 | elsif @server.ensure_cache == "stopping"
29 | exit(1)
30 | elsif Time.now.wday % 2 == @server.name_cache.split("-").last.to_i
31 | exit(0)
32 | else
33 | exit(1)
34 | end
35 | else
36 | check(:online)
37 | end
38 | when "alive?"
39 | check(:alive?)
40 | else
41 | raise "Unknown argument: #{arg}"
42 | end
43 |
--------------------------------------------------------------------------------
/models/util/build.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - id: git
3 | name: gcr.io/cloud-builders/docker
4 | args:
5 | - build
6 | - --tag=git
7 | - --tag=gcr.io/$PROJECT_ID/git:latest
8 | - models/util/git
9 | - id: spaces
10 | name: gcr.io/cloud-builders/docker
11 | args:
12 | - build
13 | - --tag=gcr.io/$PROJECT_ID/spaces:latest
14 | - models/util/spaces
15 | wait_for:
16 | - git
17 | - id: proxy
18 | name: gcr.io/cloud-builders/docker
19 | args:
20 | - build
21 | - --tag=gcr.io/$PROJECT_ID/proxy:latest
22 | - models/util/proxy
23 | wait_for:
24 | - '-'
25 | images:
26 | - gcr.io/$PROJECT_ID/git:latest
27 | - gcr.io/$PROJECT_ID/spaces:latest
28 | - gcr.io/$PROJECT_ID/proxy:latest
29 |
--------------------------------------------------------------------------------
/models/util/git/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker
2 |
3 | # Install git packages
4 | RUN apk update && apk upgrade && apk add bash git openssh curl
5 |
6 | # Setup git identity
7 | RUN git config --global user.email null@stratus.network
8 | RUN git config --global user.name stratus
9 |
10 | # Setup git environment variables
11 | ENV GIT_URL=null
12 | ENV GIT_CMD=uptime
13 | ENV GIT_BRANCH=master
14 | ENV GIT_TIME=15
15 |
16 | # Copy and setup the git cron script
17 | COPY . .
18 | RUN chmod +x git.sh
19 | CMD ./git.sh
20 |
--------------------------------------------------------------------------------
/models/util/git/git.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | # Clone the repository
4 | git clone -b $GIT_BRANCH $GIT_URL data
5 | cd data
6 |
7 | # Ensure repository is in correct state (variables may have changed)
8 | git pull && git reset --hard origin/$GIT_BRANCH && git pull
9 |
10 | # Wait a bit and always run the command on start
11 | sleep $GIT_TIME && $GIT_CMD && echo "Listening..."
12 |
13 | # If any changes were made to the repository, pull and run the command
14 | while true; do
15 | git fetch origin
16 | log=$(git log HEAD..origin/$GIT_BRANCH --oneline)
17 | if [[ "${log}" != "" ]] ; then
18 | git merge origin/$GIT_BRANCH && $GIT_CMD
19 | echo "Listening..."
20 | fi
21 | sleep $GIT_TIME
22 | done
--------------------------------------------------------------------------------
/models/util/proxy/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM haproxy:1.5-alpine
2 |
3 | RUN apk add gettext --no-cache
4 |
5 | # Copy files over to the container
6 | COPY haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg
7 |
8 | # Port for players will connect (client ip will be passed through)
9 | ENV PROXY_PORT=null
10 |
11 | # Port that acts as a passthrough (client ip will be masked)
12 | ENV PASSTHROUGH_PORT=null
13 |
14 | # Port that the server is running on (proxy protocol should be enabled)
15 | ENV INTERNAL_PORT=null
16 | ENV INTERNAL_HOST=null
17 |
18 | # Inject environment variables and start haproxy
19 | CMD find /usr/local/etc/haproxy -name "haproxy.cfg" -type f -exec sh -c "envsubst < {} > env && rm {} && mv env {}" \; && haproxy -f /usr/local/etc/haproxy/haproxy.cfg
20 |
--------------------------------------------------------------------------------
/models/util/proxy/haproxy.cfg:
--------------------------------------------------------------------------------
1 | listen proxy
2 | bind *:$PROXY_PORT
3 | mode tcp
4 | balance leastconn
5 | option tcp-check
6 | server primary $INTERNAL_HOST:$INTERNAL_PORT check-send-proxy check send-proxy-v2
7 | server fallback $INTERNAL_HOST:$INTERNAL_PORT check-send-proxy check send-proxy-v2
8 |
9 | listen passthrough
10 | bind *:$PASSTHROUGH_PORT
11 | mode tcp
12 | balance leastconn
13 | option tcp-check
14 | server primary $INTERNAL_HOST:$INTERNAL_PORT check-send-proxy check
15 | server fallback $INTERNAL_HOST:$INTERNAL_PORT check-send-proxy check
--------------------------------------------------------------------------------
/models/util/spaces/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM git
2 |
3 | # Install dependencies
4 | RUN apk update && apk add python py-pip py-setuptools ca-certificates gettext
5 | RUN pip install python-dateutil
6 |
7 | # Download the s3cmd command line
8 | RUN git clone https://github.com/s3tools/s3cmd.git s3cmd
9 | RUN ln -s /s3cmd/s3cmd /usr/bin/s3cmd
10 |
11 | # Add our custom configuration
12 | ADD ./s3cfg /root/.s3cfg
13 | ADD . /root
14 |
15 | # Git related environment variables
16 | ENV GIT_TIME=0
17 | ENV GIT_CMD=/root/upload.sh
18 | ENV GIT_URL=$BUCKET_GIT
19 |
20 | # Inject custom secret variables
21 | CMD find /root -name ".s3cfg" -type f -exec sh -c "envsubst < {} > env && rm {} && mv env {}" \; && exec ./git.sh
22 |
--------------------------------------------------------------------------------
/models/util/spaces/acl.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | selected=$1
4 | s3cmd setacl "s3://${BUCKET_NAME}/${BUCKET_FOLDER}/${selected:2}" --acl-public
5 |
--------------------------------------------------------------------------------
/models/util/spaces/s3cfg:
--------------------------------------------------------------------------------
1 | [default]
2 | access_key = $BUCKET_ACCESS_KEY
3 | secret_key = $BUCKET_SECRET_KEY
4 | host_base = $BUCKET_REGION.digitaloceanspaces.com
5 | host_bucket = %(bucket)s.$BUCKET_REGION.digitaloceanspaces.com
6 | bucket_location = $BUCKET_LOCATION
7 | access_token =
8 | add_encoding_exts =
9 | add_headers =
10 | ca_certs_file =
11 | cache_file =
12 | check_ssl_certificate = True
13 | check_ssl_hostname = True
14 | cloudfront_host = cloudfront.amazonaws.com
15 | default_mime_type = binary/octet-stream
16 | delay_updates = False
17 | delete_after = False
18 | delete_after_fetch = False
19 | delete_removed = False
20 | dry_run = False
21 | enable_multipart = True
22 | encoding = UTF-8
23 | encrypt = False
24 | expiry_date =
25 | expiry_days =
26 | expiry_prefix =
27 | follow_symlinks = False
28 | force = False
29 | get_continue = False
30 | gpg_command = None
31 | gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
32 | gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
33 | gpg_passphrase =
34 | guess_mime_type = True
35 | human_readable_sizes = False
36 | invalidate_default_index_on_cf = False
37 | invalidate_default_index_root_on_cf = True
38 | invalidate_on_cf = False
39 | kms_key =
40 | limit = -1
41 | limitrate = 0
42 | list_md5 = False
43 | log_target_prefix =
44 | long_listing = False
45 | max_delete = -1
46 | mime_type =
47 | multipart_chunk_size_mb = 15
48 | multipart_max_chunks = 10000
49 | preserve_attrs = True
50 | progress_meter = True
51 | proxy_host =
52 | proxy_port = 0
53 | put_continue = False
54 | recursive = False
55 | recv_chunk = 65536
56 | reduced_redundancy = False
57 | requester_pays = False
58 | restore_days = 1
59 | restore_priority = Standard
60 | send_chunk = 65536
61 | server_side_encryption = False
62 | signature_v2 = False
63 | simpledb_host = sdb.amazonaws.com
64 | skip_existing = False
65 | socket_timeout = 300
66 | stats = False
67 | stop_on_error = False
68 | storage_class =
69 | urlencoding_mode = normal
70 | use_http_expect = False
71 | use_https = True
72 | use_mime_magic = True
73 | verbosity = WARNING
74 | website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
75 | website_error =
76 | website_index = index.html
77 |
--------------------------------------------------------------------------------
/models/util/spaces/upload.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | cd /data
4 |
5 | # Upload files recursively to the bucket
6 | s3cmd sync * s3://$BUCKET_NAME/$BUCKET_FOLDER/ --recursive
7 |
8 | # Forcefully set the ACL to either 'public' or 'private'
9 | if [ "$BUCKET_ACL" == "public" ]; then
10 | if [ -n "$BUCKET_ACL_SELECTOR" ]; then
11 | IFS="," read -r -a BUCKET_ACL_SELECTOR_ARRAY <<< "$BUCKET_ACL_SELECTOR"
12 | for ext in "${BUCKET_ACL_SELECTOR_ARRAY[@]}"; do
13 | find . -name "*.${ext}" -type f -exec /root/acl.sh {} \;
14 | done
15 | else
16 | s3cmd setacl s3://$BUCKET_NAME/$BUCKET_FOLDER/ --acl-public --recursive
17 | fi
18 | else
19 | s3cmd setacl s3://$BUCKET_NAME/$BUCKET_FOLDER/ --acl-private --recursive
20 | fi
21 |
--------------------------------------------------------------------------------
/models/web/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ruby:2.3.8
2 | RUN gem install bundle
3 |
4 | # Clone the repository
5 | RUN git clone https://github.com/StratusNetwork/web.git
6 | WORKDIR web
7 |
8 | RUN gem install therubyracer -v '0.12.1'
9 | RUN gem install libv8 -v '3.16.14.5' -- --with-system-v8
10 |
11 | # Build the cached version of the repo
12 | ARG CACHE=ccd5ccbcdd7dd84a654abf9d3bfde0b8e638855d
13 | RUN git reset --hard $CACHE
14 | RUN bundle install
15 |
16 | # Break the cache and get the latest version of the repository
17 | ARG BRANCH=master
18 | ADD https://${AUTH}api.github.com/repos/StratusNetwork/web/git/refs/heads/${BRANCH} web.json
19 |
20 | RUN git reset --hard && git pull && git reset --hard origin/$BRANCH && git pull
21 | RUN bundle install
22 |
23 | # Website role and port variables
24 | ENV RAILS_ENV=production
25 | ENV OCN_BOX=production
26 | ENV WEB_ROLE=octc
27 | ENV WEB_PORT=3000
28 |
29 | # Copy config files (override existing repository)
30 | COPY mongoid.yml ./config/mongoid.yml
31 |
32 | # Default to running rails with role on build
33 | CMD exec rails $WEB_ROLE -b 0.0.0.0 -p $WEB_PORT
34 |
35 | # Load ocn data repository (needs to be in this directory)
36 | VOLUME /minecraft/repo/data
37 |
--------------------------------------------------------------------------------
/models/web/mongoid.yml:
--------------------------------------------------------------------------------
1 | production:
2 | clients:
3 | default:
4 | database: pgm-prod
5 | hosts:
6 | - mongo:27017
7 | options:
8 | raise_not_found_error: false
9 |
10 | staging:
11 | clients:
12 | default:
13 | database: pgm-prod
14 | hosts:
15 | - mongo:27017
16 | options:
17 | raise_not_found_error: false
18 |
19 | development:
20 | clients:
21 | default:
22 | database: pgm-prod
23 | hosts:
24 | - localhost:27017
25 | options:
26 | raise_not_found_error: false
27 |
--------------------------------------------------------------------------------
/models/worker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ruby:2.3-alpine3.7
2 |
3 | RUN apk update && \
4 | apk --no-cache add git curl bash ruby-dev build-base
5 |
6 | WORKDIR worker
7 |
8 | COPY Gemfile Gemfile
9 | COPY lib lib
10 |
11 | RUN bundle install --without test
12 |
13 | RUN apk del ruby-dev build-base && \
14 | rm -rf /var/cache/apk/*
15 |
16 | RUN mv lib/worker/* .
17 |
18 | ENTRYPOINT ["ruby", "-I", "lib"]
19 | CMD worker.rb
20 |
--------------------------------------------------------------------------------
/models/worker/build.yml:
--------------------------------------------------------------------------------
1 | steps:
2 | - id: worker
3 | name: gcr.io/cloud-builders/docker
4 | args:
5 | - build
6 | - --tag=gcr.io/$PROJECT_ID/worker:latest
7 | - --file=models/worker/Dockerfile
8 | - .
9 | images:
10 | - gcr.io/$PROJECT_ID/worker:latest
11 |
--------------------------------------------------------------------------------
/models/worker/cloudbuild.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - id: worker
3 | name: gcr.io/cloud-builders/docker
4 | args:
5 | - build
6 | - --tag=gcr.io/$PROJECT_ID/worker:latest
7 | - --file=models/worker/Dockerfile
8 | - .
9 | images:
10 | - gcr.io/$PROJECT_ID/worker:latest
11 |
--------------------------------------------------------------------------------
/spec/dns_spec.rb:
--------------------------------------------------------------------------------
1 | require "minecraft/dns/parser"
2 |
3 | describe Minecraft::DNS::Parser do
4 |
5 | parser = Minecraft::DNS::Parser.new
6 | servers = [
7 | ["Lobby", "us", nil, nil, nil],
8 | ["Lobby-1", "tm", nil, 1, 25],
9 | ["Apple", "us", "private", 2, 25],
10 | ["Willy123", "us", "private", 3, 25],
11 | ["Lobby-2", "us", nil, 5, 25],
12 | ["01official", "tm", nil, 10, 25],
13 | ["Lobby-3", "us", nil, 15, 25],
14 | ["Beta", "us", nil, 20, 25],
15 | ["02official", "tm", nil, 25, 25],
16 | ["03official", "tm", nil, 30, 25],
17 | ].map{|values| RecursiveOpenStruct.new({
18 | name: values[0],
19 | bungee_name: values[0].downcase,
20 | datacenter: values[1].upcase,
21 | settings_profile: values[2],
22 | num_online: values[3],
23 | max_players: values[4]
24 | })}.sort_by{|s| (s.num_online || 0) / ([1, s.max_players || 0].max).to_f}
25 |
26 | it "validates mock server data" do
27 | expect(servers.size).to eql 10
28 | expect(servers[0].name).to eql "Lobby"
29 | expect(servers[1].name).to eql "Lobby-1"
30 | expect(servers[9].name).to eql "03official"
31 | end
32 |
33 | context "ip address" do
34 | it "parses a single name" do
35 | expect(parser.parse("beta.mc", servers)).to eql "beta.beta.default.svc.cluster.local"
36 | expect(parser.parse("willy123.mc", servers)).to eql "willy123.willy123.default.svc.cluster.local"
37 | end
38 |
39 | it "parses a name with datacenter" do
40 | expect(parser.parse("beta.us.mc", servers)).to eql "beta.beta.default.svc.cluster.local"
41 | expect(parser.parse("lobby.tm.mc", servers)).to eql "lobby-0.lobby.tm.svc.cluster.local"
42 | end
43 |
44 | it "parses a name with a selector" do
45 | expect(parser.parse("official.1.mc", servers)).to eql "official-1.official.tm.svc.cluster.local"
46 | expect(parser.parse("lobby.full.mc", servers)).to eql "lobby-2.lobby.default.svc.cluster.local"
47 | end
48 |
49 | it "parses a name with a selector and datacenter" do
50 | expect(parser.parse("lobby.2.us.mc", servers)).to eql "lobby-2.lobby.default.svc.cluster.local"
51 | expect(parser.parse("official.empty.tm.mc", servers)).to eql "official-0.official.tm.svc.cluster.local"
52 | end
53 | end
54 |
55 | context "components" do
56 | it "does not parse without proper suffix" do
57 | expect(parser.parse_components("alpha")).to be_nil
58 | expect(parser.parse_components("beta.lan")).to be_nil
59 | expect(parser.parse_components("beta.mc.cluster")).to be_nil
60 | end
61 |
62 | it "parses a single name" do
63 | expect(parser.parse_server_components("beta.mc", servers)).to eql({
64 | name: "beta", index: nil, datacenter: "US", selector: nil, server: servers[7]
65 | })
66 | expect(parser.parse_server_components("willy123.mc", servers)).to eql({
67 | name: "willy123", index: nil, datacenter: "US", selector: nil, server: servers[3]
68 | })
69 | end
70 |
71 | it "parses a name with datacenter" do
72 | expect(parser.parse_server_components("beta.us.mc", servers)).to eql({
73 | name: "beta", index: nil, datacenter: "US", selector: nil, server: servers[7]
74 | })
75 | expect(parser.parse_server_components("lobby.tm.mc", servers)).to eql({
76 | name: "lobby", index: 0, datacenter: "TM", selector: nil, server: servers[1]
77 | })
78 | end
79 |
80 | it "parses a name with a selector" do
81 | expect(parser.parse_server_components("official.1.mc", servers)).to eql({
82 | name: "official", index: 1, datacenter: "TM", selector: nil, server: servers[8]
83 | })
84 | expect(parser.parse_components("lobby.full.mc")).to eql({
85 | name: "lobby", index: nil, datacenter: nil, selector: "full"
86 | })
87 | expect(parser.parse_server_components("lobby.full.mc", servers)).to eql({
88 | name: "lobby", index: 2, datacenter: "US", selector: "full", server: servers[6]
89 | })
90 | end
91 |
92 | it "parses a name with a selector and datacenter" do
93 | expect(parser.parse_server_components("lobby.2.us.mc", servers)).to eql({
94 | name: "lobby", index: 2, datacenter: "US", selector: nil, server: servers[6]
95 | })
96 | expect(parser.parse_server_components("official.empty.tm.mc", servers)).to eql({
97 | name: "official", index: 0, datacenter: "TM", selector: "empty", server: servers[5]
98 | })
99 | end
100 | end
101 |
102 | context "datacenter" do
103 | it "parses valid datacenters" do
104 | expect(parser.parse_datacenter("us")).to eql "US"
105 | expect(parser.parse_datacenter("eU")).to eql "EU"
106 | expect(parser.parse_datacenter("Tm")).to eql "TM"
107 | end
108 |
109 | it "raises an exception when invalid" do
110 | expect{parser.parse_datacenter("blah")}.to raise_error(ParseException)
111 | expect{parser.parse_datacenter("")}.to raise_error(ParseException)
112 | expect{parser.parse_datacenter(nil)}.to raise_error(ParseException)
113 | end
114 | end
115 |
116 | context "selectors" do
117 | it "parses indexes as integers" do
118 | expect(parser.parse_selector("1")).to eql 1
119 | expect(parser.parse_selector("09")).to eql 9
120 | expect(parser.parse_selector("11")).to eql 11
121 | expect(parser.parse_selector("-1")).to eql 0
122 | end
123 |
124 | it "parses the special selector keywords" do
125 | expect(parser.parse_selector("rand")).to eql "rand"
126 | expect(parser.parse_selector("Empty")).to eql "empty"
127 | expect(parser.parse_selector("FULL")).to eql "full"
128 | end
129 |
130 | it "raises an exception when invalid" do
131 | expect{parser.parse_selector("blah")}.to raise_error(ParseException)
132 | expect{parser.parse_selector("")}.to raise_error(ParseException)
133 | expect{parser.parse_selector(nil)}.to raise_error(ParseException)
134 | end
135 | end
136 |
137 | end
138 |
--------------------------------------------------------------------------------
/spec/document_spec.rb:
--------------------------------------------------------------------------------
1 | require "document"
2 | require "timecop"
3 |
4 | describe Document do
5 |
6 | context "with incrementally updating document" do
7 | class IncrementalDoc
8 | include Document
9 |
10 | def initialize
11 | @revision = 0
12 | @cached = nil
13 | end
14 |
15 | def fetch!
16 | @cached = Time.now
17 | return @revision += 1
18 | end
19 |
20 | def cached
21 | @cached
22 | end
23 | end
24 |
25 | it "updates cache after document is called" do
26 | doc = IncrementalDoc.new
27 | expect(doc.cache).to eql 1
28 | expect(doc.document).to eql 2
29 | expect(doc.cache).to eql 2
30 | end
31 |
32 | it "does not update cache after fetch is called" do
33 | doc = IncrementalDoc.new
34 | expect(doc.cache).to eql 1
35 | expect(doc.fetch!).to eql 2
36 | expect(doc.cache).to eql 1
37 | end
38 |
39 | it "refreshes the cache" do
40 | doc = IncrementalDoc.new
41 | expect(doc.cached).to be_nil
42 | expect(doc.cache).to eql 1
43 | expect(doc.cached).to_not be_nil
44 | cached = doc.cached
45 | expect(doc.refresh!).to eql 2
46 | expect(doc.cached).to be > cached
47 | end
48 |
49 | it "expires the cache as time passes" do
50 | doc = IncrementalDoc.new
51 | expect(doc.cache).to eql 1
52 | Timecop.freeze(Time.now + 5.minutes) do
53 | expect(doc.cache).to eql 2
54 | end
55 | end
56 | end
57 |
58 | context "with nested data document" do
59 | class NestedDoc
60 | include Document
61 |
62 | def fetch!
63 | Array.new(5){rand(1...9999)}
64 | end
65 | end
66 |
67 | it "forwards missing method to document" do
68 | doc = NestedDoc.new
69 | expect(doc.size).to eql 5
70 | expect(doc.last).to be_an Integer
71 | expect(doc[0]).to be_an Integer
72 | end
73 |
74 | it "forwards missing method to cache" do
75 | doc = NestedDoc.new
76 | expect(doc.size_cache).to eql 5
77 | expect(doc.first_cache).to be_an Integer
78 | last = doc.last_cache
79 | expect(doc.last).not_to eql last
80 | end
81 | end
82 |
83 | end
84 |
--------------------------------------------------------------------------------
/spec/environment_spec.rb:
--------------------------------------------------------------------------------
1 | require "environment"
2 |
3 | describe Env do
4 | it "gets the environment variable" do
5 | expect(Env.get("HOME")).to eql ENV["HOME"]
6 | expect(Env.get("PWD")).to eql ENV["PWD"]
7 | end
8 |
9 | it "checks if environment variable exists" do
10 | expect(Env.has?("HOME")).to be true
11 | expect(Env.has?("FAKE-123-456")).to be false
12 | end
13 |
14 | it "gets multiple environment variables" do
15 | expect(Env.get_multi("PATH", File::SEPARATOR)).to eql ENV["PATH"].split(File::SEPARATOR)
16 | expect(Env.get_multi("HOME")).to eql [ENV["HOME"]]
17 | expect(Env.get("HOME", 1)).to eql [ENV["HOME"]]
18 | end
19 |
20 | it "ignores key case sensitivity" do
21 | expect(Env.get("home")).to_not be_nil
22 | expect(Env.get("Home")).to_not be_nil
23 | expect(Env.get("HOME")).to_not be_nil
24 | end
25 |
26 | it "sets an environment variable as override" do
27 | expect(Env.set("HOME", "not-force", false)).to be_nil
28 | expect(Env.get("HOME")).to eql ENV["HOME"]
29 | expect(Env.set("HOME", "force", true)).to eql "force"
30 | end
31 |
32 | it "extracts hostname and replica information" do
33 | expect(Env.replica).to eql -1
34 | expect(Env.set("HOSTNAME", "server-3", true)).to eql "server-3"
35 | expect(Env.host).to eql "server-3"
36 | expect(Env.replica).to eql 3
37 | end
38 | end
39 |
--------------------------------------------------------------------------------