├── .gitignore
├── LICENSE
├── README.md
├── _stack
├── crm
│ ├── app
│ │ └── conf
│ │ │ └── supervisor.ini
│ └── nginx
│ │ └── conf
│ │ └── app.conf
├── ctm
│ ├── app
│ │ └── conf
│ │ │ └── supervisor.ini
│ └── nginx
│ │ └── conf
│ │ └── app.conf
├── data-lake
│ └── .gitignore
├── ntm
│ ├── app
│ │ └── conf
│ │ │ └── supervisor.ini
│ └── nginx
│ │ └── conf
│ │ └── app.conf
└── observability
│ └── .gitignore
├── compose
├── DEV.env
├── QAA.env
├── QAB.env
├── crm.yml
├── ctm.yml
├── data-lake
│ ├── hadoop
│ │ └── main.yml
│ ├── kafka
│ │ └── main.yml
│ ├── mongo-db
│ │ └── main.yml
│ └── redis
│ │ └── main.yml
├── ntm.yml
└── observability
│ └── elk
│ └── main.yml
├── conf
└── server.json
├── image
├── ansible
│ └── 9.1.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── apm
│ └── 8.17.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── base
│ ├── 1.1.1
│ │ ├── alma-linux-8.dockerfile
│ │ ├── alma-linux-9.dockerfile
│ │ ├── amazon-linux-2.dockerfile
│ │ ├── context
│ │ │ └── .gitignore
│ │ ├── rocky-linux-8.dockerfile
│ │ ├── rocky-linux-9.dockerfile
│ │ └── ubuntu-linux-22-04.dockerfile
│ └── 1.2.1
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── chef
│ └── 18.3.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── elasticsearch
│ ├── 8.10.3
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ │ └── .gitignore
│ └── 8.17.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── hadoop
│ └── 2.10.1
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── kafka
│ └── 3.2.1
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── kibana
│ └── 8.17.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── logstash
│ └── 8.17.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── mongo-db
│ ├── 4.4.4
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ │ └── .gitignore
│ └── 7.0.2
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── nginx
│ ├── 1.14.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ │ └── .gitignore
│ └── 1.24.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── node-js
│ ├── 14.16.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ │ └── package.json
│ └── 17.7.1
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── package.json
├── php
│ └── 8.3.3
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── postgresql
│ └── 16.4
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── python
│ ├── 2.7
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ │ └── .gitignore
│ └── 3.12.0
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── redis
│ ├── 4.0.9
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ │ └── .gitignore
│ └── 7.2.1
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── .gitignore
├── ruby
│ └── 3.3.5
│ │ ├── amazon-linux-2.dockerfile
│ │ └── context
│ │ └── Gemfile
└── spark
│ └── 3.5.3
│ ├── amazon-linux-2.dockerfile
│ └── context
│ └── .gitignore
├── vagrant
└── alma-linux-9
│ ├── virtualbox
│ └── amd64
│ │ └── server
│ │ └── Vagrantfile
│ └── vmware
│ └── arm64
│ └── server
│ └── Vagrantfile
└── workload
├── elasticsearch
└── 8.17.0
│ └── conf
│ └── server.yml
├── hadoop
└── 2.10.1
│ ├── data
│ ├── conf
│ │ ├── core-site.xml
│ │ ├── env.sh
│ │ ├── hdfs-site.xml
│ │ └── supervisor.ini
│ └── script
│ │ ├── start.sh
│ │ └── stop.sh
│ └── name
│ ├── conf
│ ├── core-site.xml
│ ├── env.sh
│ ├── hdfs-site.xml
│ └── supervisor.ini
│ └── script
│ ├── start.sh
│ └── stop.sh
├── kafka
└── 3.2.1
│ ├── broker
│ ├── conf
│ │ ├── server.conf
│ │ └── supervisor.ini
│ └── script
│ │ ├── start.sh
│ │ └── stop.sh
│ └── controller
│ ├── conf
│ ├── server.conf
│ └── supervisor.ini
│ └── script
│ ├── start.sh
│ └── stop.sh
├── kibana
└── 8.17.0
│ └── conf
│ └── server.yml
├── logstash
└── 8.17.0
│ └── conf
│ ├── beats.conf
│ ├── jvm.options
│ ├── pipelines.yml
│ └── server.yml
├── mongo-db
└── 4.4.4
│ ├── conf
│ ├── server.conf
│ └── supervisor.ini
│ └── replica-cluster
│ ├── conf
│ ├── server.conf
│ └── supervisor.ini
│ └── script
│ └── init.js
├── nginx
├── 1.14.0
│ └── conf
│ │ ├── server.conf
│ │ └── supervisor.ini
└── 1.24.0
│ └── conf
│ ├── server.conf
│ └── supervisor.ini
├── postgresql
└── 16.4
│ └── conf
│ ├── hba.conf
│ ├── ident.conf
│ ├── server.conf
│ └── supervisor.ini
├── redis
├── 4.0.9
│ └── conf
│ │ ├── server.conf
│ │ └── supervisor.ini
└── 7.2.1
│ ├── cluster
│ ├── conf
│ │ ├── server.conf
│ │ └── supervisor.ini
│ └── script
│ │ └── init.sh
│ └── conf
│ ├── server.conf
│ └── supervisor.ini
└── supervisor
└── conf
└── server.conf
/.gitignore:
--------------------------------------------------------------------------------
1 | *.log
2 | tmp/*
3 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kickstart Docker
2 | Containerize and deploy popular fullstacks, microservices, and Big Data workloads using Containerd, Docker, Docker compose, and Docker swarm.
3 |
4 |
5 | ## Getting started
6 | Please refer to the [wiki](https://github.com/sloopstash/kickstart-docker/wiki) for detailed instructions on how to get started with our Docker starter-kit.
7 |
8 |
9 | ## Get support
10 | - [Issues](https://github.com/sloopstash/kickstart-docker/issues)
11 | - [Contact us](https://sloopstash.com/contact.html)
12 |
13 |
--------------------------------------------------------------------------------
/_stack/crm/app/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:app]
2 | command=bash -c "python init.py --port=2000"
3 | process_name=%(program_name)s
4 | directory=/opt/app/source
5 | autorestart=false
6 | stdout_logfile=/opt/app/log/stdout.log
7 | stderr_logfile=/opt/app/log/stderr.log
8 |
--------------------------------------------------------------------------------
/_stack/crm/nginx/conf/app.conf:
--------------------------------------------------------------------------------
1 | proxy_next_upstream error;
2 |
3 | upstream app-servers {
4 | server app:2000;
5 | }
6 |
7 | server {
8 | server_name app.crm.sloopstash.*;
9 | root /opt/app/source;
10 |
11 | server_tokens off;
12 | error_log /opt/nginx/log/error.log warn;
13 | access_log /opt/nginx/log/access.log;
14 |
15 | large_client_header_buffers 4 16k;
16 |
17 | location / {
18 | #proxy_pass_header Server;
19 | proxy_set_header Host $http_host;
20 | proxy_redirect off;
21 | proxy_set_header X-Real-IP $remote_addr;
22 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
23 | proxy_set_header X-Scheme $scheme;
24 | proxy_pass http://app-servers;
25 | proxy_http_version 1.1;
26 | proxy_intercept_errors on;
27 | }
28 | location = /favicon.ico {
29 | return 204;
30 | access_log off;
31 | log_not_found off;
32 | }
33 | location ~ /(asset|conf|controller|helper|library|model|script|theme|view) {
34 | return 403;
35 | }
36 | location ~* \.(py|pyc|js|css|txt|conf|ini|json|yml|yaml|xml|log)$ {
37 | return 403;
38 | }
39 | location ~ /(.git|.gitignore) {
40 | return 403;
41 | }
42 | }
43 |
44 | server {
45 | server_name app-static.crm.sloopstash.*;
46 | root /opt/app/source;
47 |
48 | server_tokens off;
49 | error_log /opt/nginx/log/error.log warn;
50 | access_log /opt/nginx/log/access.log;
51 |
52 | large_client_header_buffers 4 16k;
53 |
54 | location ^~ /library/ {
55 | add_header Access-Control-Allow-Origin "*";
56 | add_header Access-Control-Allow-Credentials "true";
57 | }
58 | location ^~ /theme/ {
59 | add_header Access-Control-Allow-Origin "*";
60 | add_header Access-Control-Allow-Credentials "true";
61 | }
62 | location ^~ /asset/ {
63 | add_header Access-Control-Allow-Origin "*";
64 | add_header Access-Control-Allow-Credentials "true";
65 | }
66 | location = /favicon.ico {
67 | return 204;
68 | access_log off;
69 | log_not_found off;
70 | }
71 | location ~ /(asset|conf|controller|helper|library|model|script|theme|view) {
72 | return 403;
73 | }
74 | location ~* \.(py|pyc|js|css|txt|conf|ini|json|yml|yaml|xml|log)$ {
75 | return 403;
76 | }
77 | location ~ /(.git|.gitignore) {
78 | return 403;
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/_stack/ctm/app/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:app]
2 | command=bash -c "node init.js --port=2000"
3 | process_name=%(program_name)s
4 | directory=/opt/app/source
5 | autorestart=false
6 | stdout_logfile=/opt/app/log/stdout.log
7 | stderr_logfile=/opt/app/log/stderr.log
8 |
--------------------------------------------------------------------------------
/_stack/ctm/nginx/conf/app.conf:
--------------------------------------------------------------------------------
1 | proxy_next_upstream error;
2 |
3 | upstream app-servers {
4 | server app:2000;
5 | }
6 |
7 | server {
8 | server_name app.ctm.sloopstash.*;
9 | root /opt/app/source;
10 |
11 | server_tokens off;
12 | error_log /opt/nginx/log/error.log warn;
13 | access_log /opt/nginx/log/access.log;
14 |
15 | large_client_header_buffers 4 16k;
16 |
17 | location / {
18 | #proxy_pass_header Server;
19 | proxy_set_header Host $http_host;
20 | proxy_redirect off;
21 | proxy_set_header X-Real-IP $remote_addr;
22 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
23 | proxy_set_header X-Scheme $scheme;
24 | proxy_pass http://app-servers;
25 | proxy_http_version 1.1;
26 | proxy_intercept_errors on;
27 | }
28 | location = /favicon.ico {
29 | return 204;
30 | access_log off;
31 | log_not_found off;
32 | }
33 | location ~ /(asset|conf|controller|helper|library|model|script|theme|view) {
34 | return 403;
35 | }
36 | location ~* \.(py|pyc|js|css|txt|conf|ini|json|yml|yaml|xml|log)$ {
37 | return 403;
38 | }
39 | location ~ /(.git|.gitignore) {
40 | return 403;
41 | }
42 | }
43 |
44 | server {
45 | server_name app-static.ctm.sloopstash.*;
46 | root /opt/app/source;
47 |
48 | server_tokens off;
49 | error_log /opt/nginx/log/error.log warn;
50 | access_log /opt/nginx/log/access.log;
51 |
52 | large_client_header_buffers 4 16k;
53 |
54 | location ^~ /library/ {
55 | add_header Access-Control-Allow-Origin "*";
56 | add_header Access-Control-Allow-Credentials "true";
57 | }
58 | location ^~ /theme/ {
59 | add_header Access-Control-Allow-Origin "*";
60 | add_header Access-Control-Allow-Credentials "true";
61 | }
62 | location ^~ /asset/ {
63 | add_header Access-Control-Allow-Origin "*";
64 | add_header Access-Control-Allow-Credentials "true";
65 | }
66 | location = /favicon.ico {
67 | return 204;
68 | access_log off;
69 | log_not_found off;
70 | }
71 | location ~ /(asset|conf|controller|helper|library|model|script|theme|view) {
72 | return 403;
73 | }
74 | location ~* \.(py|pyc|js|css|txt|conf|ini|json|yml|yaml|xml|log)$ {
75 | return 403;
76 | }
77 | location ~ /(.git|.gitignore) {
78 | return 403;
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/_stack/data-lake/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/_stack/ntm/app/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:app]
2 | command=bash -c "rails server -b 0.0.0.0 -p 2000"
3 | process_name=%(program_name)s
4 | directory=/opt/app/source
5 | autorestart=false
6 | stdout_logfile=/opt/app/log/stdout.log
7 | stderr_logfile=/opt/app/log/stderr.log
8 |
--------------------------------------------------------------------------------
/_stack/ntm/nginx/conf/app.conf:
--------------------------------------------------------------------------------
1 | proxy_next_upstream error;
2 |
3 | upstream app-servers {
4 | server app:2000;
5 | }
6 |
7 | server {
8 | server_name app.ntm.sloopstash.*;
9 | root /opt/app/source;
10 |
11 | server_tokens off;
12 | error_log /opt/nginx/log/error.log warn;
13 | access_log /opt/nginx/log/access.log;
14 |
15 | large_client_header_buffers 4 16k;
16 |
17 | location / {
18 | #proxy_pass_header Server;
19 | proxy_set_header Host $http_host;
20 | proxy_redirect off;
21 | proxy_set_header X-Real-IP $remote_addr;
22 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
23 | proxy_set_header X-Scheme $scheme;
24 | proxy_pass http://app-servers;
25 | proxy_http_version 1.1;
26 | proxy_intercept_errors on;
27 | }
28 | location = /favicon.ico {
29 | return 204;
30 | access_log off;
31 | log_not_found off;
32 | }
33 | location ~ /(asset|conf|controller|helper|library|model|script|theme|view) {
34 | return 403;
35 | }
36 | location ~* \.(py|pyc|js|css|txt|conf|ini|json|yml|yaml|xml|log)$ {
37 | return 403;
38 | }
39 | location ~ /(.git|.gitignore) {
40 | return 403;
41 | }
42 | }
43 |
44 | server {
45 | server_name app-static.ntm.sloopstash.*;
46 | root /opt/app/source;
47 |
48 | server_tokens off;
49 | error_log /opt/nginx/log/error.log warn;
50 | access_log /opt/nginx/log/access.log;
51 |
52 | large_client_header_buffers 4 16k;
53 |
54 | location ^~ /library/ {
55 | add_header Access-Control-Allow-Origin "*";
56 | add_header Access-Control-Allow-Credentials "true";
57 | }
58 | location ^~ /theme/ {
59 | add_header Access-Control-Allow-Origin "*";
60 | add_header Access-Control-Allow-Credentials "true";
61 | }
62 | location ^~ /asset/ {
63 | add_header Access-Control-Allow-Origin "*";
64 | add_header Access-Control-Allow-Credentials "true";
65 | }
66 | location = /favicon.ico {
67 | return 204;
68 | access_log off;
69 | log_not_found off;
70 | }
71 | location ~ /(asset|conf|controller|helper|library|model|script|theme|view) {
72 | return 403;
73 | }
74 | location ~* \.(py|pyc|js|css|txt|conf|ini|json|yml|yaml|xml|log)$ {
75 | return 403;
76 | }
77 | location ~ /(.git|.gitignore) {
78 | return 403;
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/_stack/observability/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/compose/DEV.env:
--------------------------------------------------------------------------------
1 | ENVIRONMENT=DEV
2 | EXTERNAL_DOMAIN=sloopstash.dv
3 | INTERNAL_DOMAIN=sloopstash-dev.internal
4 | HOME_DIR=/opt/kickstart-docker
5 |
6 | CRM_REDIS_VERSION=7.2.1
7 | CRM_PYTHON_VERSION=2.7
8 | CRM_NGINX_VERSION=1.24.0
9 | CRM_APP_SOURCE=/opt/sloopstash-crm-app
10 | CRM_NETWORK=14.1.1.0/24
11 | CRM_REDIS_IP=14.1.1.10
12 | CRM_APP_IP=14.1.1.20
13 | CRM_NGINX_IP=14.1.1.30
14 | CRM_NGINX_PORT=8001
15 |
16 | CTM_MONGO_DB_VERSION=4.4.4
17 | CTM_NODE_JS_VERSION=17.7.1
18 | CTM_NGINX_VERSION=1.24.0
19 | CTM_APP_SOURCE=/opt/sloopstash-ctm-app
20 | CTM_NETWORK=14.1.2.0/24
21 | CTM_MONGO_DB_IP=14.1.2.10
22 | CTM_APP_IP=14.1.2.20
23 | CTM_NGINX_IP=14.1.2.30
24 | CTM_NGINX_PORT=8001
25 |
26 | NTM_POSTGRESQL_VERSION=16.4
27 | NTM_RUBY_VERSION=3.3.5
28 | NTM_NGINX_VERSION=1.24.0
29 | NTM_APP_SOURCE=/opt/sloopstash-ntm-app
30 | NTM_NETWORK=14.1.7.0/24
31 | NTM_POSTGRESQL_IP=14.1.7.10
32 | NTM_APP_IP=14.1.7.20
33 | NTM_NGINX_IP=14.1.7.30
34 | NTM_NGINX_PORT=8001
35 |
36 | DATA_LAKE_HADOOP_VERSION=2.10.1
37 | DATA_LAKE_KAFKA_VERSION=3.2.1
38 | DATA_LAKE_REDIS_VERSION=7.2.1
39 | DATA_LAKE_MONGO_DB_VERSION=4.4.4
40 | DATA_LAKE_HADOOP_NETWORK=14.1.3.0/24
41 | DATA_LAKE_KAFKA_NETWORK=14.1.4.0/24
42 | DATA_LAKE_REDIS_NETWORK=14.1.5.0/24
43 | DATA_LAKE_MONGO_DB_NETWORK=14.1.6.0/24
44 |
--------------------------------------------------------------------------------
/compose/QAA.env:
--------------------------------------------------------------------------------
1 | ENVIRONMENT=QAA
2 | EXTERNAL_DOMAIN=sloopstash.qaa
3 | INTERNAL_DOMAIN=sloopstash-qaa.internal
4 | HOME_DIR=/opt/kickstart-docker
5 |
6 | CRM_REDIS_VERSION=7.2.1
7 | CRM_PYTHON_VERSION=2.7
8 | CRM_NGINX_VERSION=1.24.0
9 | CRM_APP_SOURCE=/opt/sloopstash-crm-app
10 | CRM_NETWORK=15.1.1.0/24
11 | CRM_REDIS_IP=15.1.1.10
12 | CRM_APP_IP=15.1.1.20
13 | CRM_NGINX_IP=15.1.1.30
14 | CRM_NGINX_PORT=8002
15 |
16 | CTM_MONGO_DB_VERSION=4.4.4
17 | CTM_NODE_JS_VERSION=17.7.1
18 | CTM_NGINX_VERSION=1.24.0
19 | CTM_APP_SOURCE=/opt/sloopstash-ctm-app
20 | CTM_NETWORK=15.1.2.0/24
21 | CTM_MONGO_DB_IP=15.1.2.10
22 | CTM_APP_IP=15.1.2.20
23 | CTM_NGINX_IP=15.1.2.30
24 | CTM_NGINX_PORT=8002
25 |
26 | NTM_POSTGRESQL_VERSION=16.4
27 | NTM_RUBY_VERSION=3.3.5
28 | NTM_NGINX_VERSION=1.24.0
29 | NTM_APP_SOURCE=/opt/sloopstash-ntm-app
30 | NTM_NETWORK=15.1.7.0/24
31 | NTM_POSTGRESQL_IP=15.1.7.10
32 | NTM_APP_IP=15.1.7.20
33 | NTM_NGINX_IP=15.1.7.30
34 | NTM_NGINX_PORT=8002
35 |
36 | DATA_LAKE_HADOOP_VERSION=2.10.1
37 | DATA_LAKE_KAFKA_VERSION=3.2.1
38 | DATA_LAKE_REDIS_VERSION=7.2.1
39 | DATA_LAKE_MONGO_DB_VERSION=4.4.4
40 | DATA_LAKE_HADOOP_NETWORK=15.1.3.0/24
41 | DATA_LAKE_KAFKA_NETWORK=15.1.4.0/24
42 | DATA_LAKE_REDIS_NETWORK=15.1.5.0/24
43 | DATA_LAKE_MONGO_DB_NETWORK=15.1.6.0/24
44 |
--------------------------------------------------------------------------------
/compose/QAB.env:
--------------------------------------------------------------------------------
1 | ENVIRONMENT=QAB
2 | EXTERNAL_DOMAIN=sloopstash.qab
3 | INTERNAL_DOMAIN=sloopstash-qab.internal
4 | HOME_DIR=/opt/kickstart-docker
5 |
6 | CRM_REDIS_VERSION=7.2.1
7 | CRM_PYTHON_VERSION=2.7
8 | CRM_NGINX_VERSION=1.24.0
9 | CRM_APP_SOURCE=/opt/sloopstash-crm-app
10 | CRM_NETWORK=16.1.1.0/24
11 | CRM_REDIS_IP=16.1.1.10
12 | CRM_APP_IP=16.1.1.20
13 | CRM_NGINX_IP=16.1.1.30
14 | CRM_NGINX_PORT=8003
15 |
16 | CTM_MONGO_DB_VERSION=4.4.4
17 | CTM_NODE_JS_VERSION=17.7.1
18 | CTM_NGINX_VERSION=1.24.0
19 | CTM_APP_SOURCE=/opt/sloopstash-ctm-app
20 | CTM_NETWORK=16.1.2.0/24
21 | CTM_MONGO_DB_IP=16.1.2.10
22 | CTM_APP_IP=16.1.2.20
23 | CTM_NGINX_IP=16.1.2.30
24 | CTM_NGINX_PORT=8003
25 |
26 | NTM_POSTGRESQL_VERSION=16.4
27 | NTM_RUBY_VERSION=3.3.5
28 | NTM_NGINX_VERSION=1.24.0
29 | NTM_APP_SOURCE=/opt/sloopstash-ntm-app
30 | NTM_NETWORK=16.1.7.0/24
31 | NTM_POSTGRESQL_IP=16.1.7.10
32 | NTM_APP_IP=16.1.7.20
33 | NTM_NGINX_IP=16.1.7.30
34 | NTM_NGINX_PORT=8003
35 |
36 | DATA_LAKE_HADOOP_VERSION=2.10.1
37 | DATA_LAKE_KAFKA_VERSION=3.2.1
38 | DATA_LAKE_REDIS_VERSION=7.2.1
39 | DATA_LAKE_MONGO_DB_VERSION=4.4.4
40 | DATA_LAKE_HADOOP_NETWORK=16.1.3.0/24
41 | DATA_LAKE_KAFKA_NETWORK=16.1.4.0/24
42 | DATA_LAKE_REDIS_NETWORK=16.1.5.0/24
43 | DATA_LAKE_MONGO_DB_NETWORK=16.1.6.0/24
44 |
--------------------------------------------------------------------------------
/compose/crm.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 | services:
3 | redis:
4 | image: sloopstash/redis:v${CRM_REDIS_VERSION}
5 | entrypoint: /usr/bin/supervisord
6 | command: "-c /etc/supervisord.conf"
7 | volumes:
8 | - redis-data:/opt/redis/data
9 | - redis-log:/opt/redis/log
10 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
11 | - ${HOME_DIR}/workload/redis/${CRM_REDIS_VERSION}/conf/supervisor.ini:/opt/redis/system/supervisor.ini
12 | - ${HOME_DIR}/workload/redis/${CRM_REDIS_VERSION}/conf/server.conf:/opt/redis/conf/server.conf
13 | networks:
14 | common:
15 | ipv4_address: ${CRM_REDIS_IP}
16 | app:
17 | image: sloopstash/python:v${CRM_PYTHON_VERSION}
18 | entrypoint: /usr/bin/supervisord
19 | command: "-c /etc/supervisord.conf"
20 | environment:
21 | - STATIC_ENDPOINT=http://app-static.crm.${EXTERNAL_DOMAIN}:${CRM_NGINX_PORT}
22 | volumes:
23 | - ${CRM_APP_SOURCE}:/opt/app/source
24 | - app-log:/opt/app/log
25 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
26 | - ${HOME_DIR}/_stack/crm/app/conf/supervisor.ini:/opt/app/system/supervisor.ini
27 | depends_on:
28 | - redis
29 | networks:
30 | common:
31 | ipv4_address: ${CRM_APP_IP}
32 | nginx:
33 | image: sloopstash/nginx:v${CRM_NGINX_VERSION}
34 | entrypoint: /usr/bin/supervisord
35 | command: "-c /etc/supervisord.conf"
36 | ports:
37 | - "${CRM_NGINX_PORT}:80"
38 | volumes:
39 | - ${CRM_APP_SOURCE}:/opt/app/source
40 | - nginx-log:/opt/nginx/log
41 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
42 | - ${HOME_DIR}/workload/nginx/${CRM_NGINX_VERSION}/conf/supervisor.ini:/opt/nginx/system/supervisor.ini
43 | - ${HOME_DIR}/workload/nginx/${CRM_NGINX_VERSION}/conf/server.conf:/opt/nginx/conf/server.conf
44 | - ${HOME_DIR}/_stack/crm/nginx/conf/app.conf:/opt/nginx/conf/app.conf
45 | depends_on:
46 | - app
47 | networks:
48 | common:
49 | ipv4_address: ${CRM_NGINX_IP}
50 | volumes:
51 | redis-data:
52 | driver: local
53 | redis-log:
54 | driver: local
55 | app-log:
56 | driver: local
57 | nginx-log:
58 | driver: local
59 | networks:
60 | common:
61 | driver: bridge
62 | ipam:
63 | driver: default
64 | config:
65 | - subnet: ${CRM_NETWORK}
66 |
--------------------------------------------------------------------------------
/compose/ctm.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 | services:
3 | mongo-db:
4 | image: sloopstash/mongo-db:v${CTM_MONGO_DB_VERSION}
5 | entrypoint: /usr/bin/supervisord
6 | command: "-c /etc/supervisord.conf"
7 | volumes:
8 | - mongo-db-data:/opt/mongo-db/data
9 | - mongo-db-log:/opt/mongo-db/log
10 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
11 | - ${HOME_DIR}/workload/mongo-db/${CTM_MONGO_DB_VERSION}/conf/supervisor.ini:/opt/mongo-db/system/supervisor.ini
12 | - ${HOME_DIR}/workload/mongo-db/${CTM_MONGO_DB_VERSION}/conf/server.conf:/opt/mongo-db/conf/server.conf
13 | networks:
14 | common:
15 | ipv4_address: ${CTM_MONGO_DB_IP}
16 | app:
17 | image: sloopstash/node-js:v${CTM_NODE_JS_VERSION}
18 | entrypoint: /usr/bin/supervisord
19 | command: "-c /etc/supervisord.conf"
20 | environment:
21 | - STATIC_ENDPOINT=http://app-static.ctm.${EXTERNAL_DOMAIN}:${CTM_NGINX_PORT}
22 | volumes:
23 | - ${CTM_APP_SOURCE}:/opt/app/source
24 | - app-log:/opt/app/log
25 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
26 | - ${HOME_DIR}/_stack/ctm/app/conf/supervisor.ini:/opt/app/system/supervisor.ini
27 | depends_on:
28 | - mongo-db
29 | networks:
30 | common:
31 | ipv4_address: ${CTM_APP_IP}
32 | nginx:
33 | image: sloopstash/nginx:v${CTM_NGINX_VERSION}
34 | entrypoint: /usr/bin/supervisord
35 | command: "-c /etc/supervisord.conf"
36 | ports:
37 | - "${CTM_NGINX_PORT}:80"
38 | volumes:
39 | - ${CTM_APP_SOURCE}:/opt/app/source
40 | - nginx-log:/opt/nginx/log
41 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
42 | - ${HOME_DIR}/workload/nginx/${CTM_NGINX_VERSION}/conf/supervisor.ini:/opt/nginx/system/supervisor.ini
43 | - ${HOME_DIR}/workload/nginx/${CTM_NGINX_VERSION}/conf/server.conf:/opt/nginx/conf/server.conf
44 | - ${HOME_DIR}/_stack/ctm/nginx/conf/app.conf:/opt/nginx/conf/app.conf
45 | depends_on:
46 | - app
47 | networks:
48 | common:
49 | ipv4_address: ${CTM_NGINX_IP}
50 | volumes:
51 | mongo-db-data:
52 | driver: local
53 | mongo-db-log:
54 | driver: local
55 | app-log:
56 | driver: local
57 | nginx-log:
58 | driver: local
59 | networks:
60 | common:
61 | driver: bridge
62 | ipam:
63 | driver: default
64 | config:
65 | - subnet: ${CTM_NETWORK}
66 |
--------------------------------------------------------------------------------
/compose/data-lake/hadoop/main.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 | services:
3 | hadoop-name-1:
4 | image: sloopstash/hadoop:v${DATA_LAKE_HADOOP_VERSION}
5 | entrypoint: /usr/bin/supervisord
6 | command: "-c /etc/supervisord.conf"
7 | environment:
8 | - JAVA_HOME=/usr/java/jdk1.8.0_131/jre
9 | - HADOOP_HOME=/usr/local/lib/hadoop
10 | - HADOOP_CONF_DIR=/usr/local/lib/hadoop/etc/hadoop
11 | - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/java/jdk1.8.0_131/jre/bin:/usr/local/lib/hadoop/bin
12 | volumes:
13 | - hadoop-name-1-data:/opt/hadoop/data
14 | - hadoop-name-1-log:/opt/hadoop/log
15 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/name/script:/opt/hadoop/script
16 | - hadoop-name-1-tmp:/opt/hadoop/tmp
17 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
18 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/name/conf/supervisor.ini:/opt/hadoop/system/supervisor.ini
19 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/name/conf/env.sh:/opt/hadoop/conf/env.sh
20 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/name/conf/core-site.xml:/opt/hadoop/conf/core-site.xml
21 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/name/conf/hdfs-site.xml:/opt/hadoop/conf/hdfs-site.xml
22 | networks:
23 | - common
24 | hadoop-data-1:
25 | image: sloopstash/hadoop:v${DATA_LAKE_HADOOP_VERSION}
26 | entrypoint: /usr/bin/supervisord
27 | command: "-c /etc/supervisord.conf"
28 | environment:
29 | - JAVA_HOME=/usr/java/jdk1.8.0_131/jre
30 | - HADOOP_HOME=/usr/local/lib/hadoop
31 | - HADOOP_CONF_DIR=/usr/local/lib/hadoop/etc/hadoop
32 | - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/java/jdk1.8.0_131/jre/bin:/usr/local/lib/hadoop/bin
33 | volumes:
34 | - hadoop-data-1-data:/opt/hadoop/data
35 | - hadoop-data-1-log:/opt/hadoop/log
36 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/script:/opt/hadoop/script
37 | - hadoop-data-1-tmp:/opt/hadoop/tmp
38 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
39 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/supervisor.ini:/opt/hadoop/system/supervisor.ini
40 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/env.sh:/opt/hadoop/conf/env.sh
41 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/core-site.xml:/opt/hadoop/conf/core-site.xml
42 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/hdfs-site.xml:/opt/hadoop/conf/hdfs-site.xml
43 | depends_on:
44 | - hadoop-name-1
45 | networks:
46 | - common
47 | hadoop-data-2:
48 | image: sloopstash/hadoop:v${DATA_LAKE_HADOOP_VERSION}
49 | entrypoint: /usr/bin/supervisord
50 | command: "-c /etc/supervisord.conf"
51 | environment:
52 | - JAVA_HOME=/usr/java/jdk1.8.0_131/jre
53 | - HADOOP_HOME=/usr/local/lib/hadoop
54 | - HADOOP_CONF_DIR=/usr/local/lib/hadoop/etc/hadoop
55 | - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/java/jdk1.8.0_131/jre/bin:/usr/local/lib/hadoop/bin
56 | volumes:
57 | - hadoop-data-2-data:/opt/hadoop/data
58 | - hadoop-data-2-log:/opt/hadoop/log
59 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/script:/opt/hadoop/script
60 | - hadoop-data-2-tmp:/opt/hadoop/tmp
61 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
62 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/supervisor.ini:/opt/hadoop/system/supervisor.ini
63 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/env.sh:/opt/hadoop/conf/env.sh
64 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/core-site.xml:/opt/hadoop/conf/core-site.xml
65 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/hdfs-site.xml:/opt/hadoop/conf/hdfs-site.xml
66 | depends_on:
67 | - hadoop-name-1
68 | networks:
69 | - common
70 | hadoop-data-3:
71 | image: sloopstash/hadoop:v${DATA_LAKE_HADOOP_VERSION}
72 | entrypoint: /usr/bin/supervisord
73 | command: "-c /etc/supervisord.conf"
74 | environment:
75 | - JAVA_HOME=/usr/java/jdk1.8.0_131/jre
76 | - HADOOP_HOME=/usr/local/lib/hadoop
77 | - HADOOP_CONF_DIR=/usr/local/lib/hadoop/etc/hadoop
78 | - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/java/jdk1.8.0_131/jre/bin:/usr/local/lib/hadoop/bin
79 | volumes:
80 | - hadoop-data-3-data:/opt/hadoop/data
81 | - hadoop-data-3-log:/opt/hadoop/log
82 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/script:/opt/hadoop/script
83 | - hadoop-data-3-tmp:/opt/hadoop/tmp
84 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
85 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/supervisor.ini:/opt/hadoop/system/supervisor.ini
86 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/env.sh:/opt/hadoop/conf/env.sh
87 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/core-site.xml:/opt/hadoop/conf/core-site.xml
88 | - ${HOME_DIR}/workload/hadoop/${DATA_LAKE_HADOOP_VERSION}/data/conf/hdfs-site.xml:/opt/hadoop/conf/hdfs-site.xml
89 | depends_on:
90 | - hadoop-name-1
91 | networks:
92 | - common
93 | volumes:
94 | hadoop-name-1-data:
95 | driver: local
96 | hadoop-name-1-log:
97 | driver: local
98 | hadoop-name-1-tmp:
99 | driver: local
100 | hadoop-data-1-data:
101 | driver: local
102 | hadoop-data-1-log:
103 | driver: local
104 | hadoop-data-1-tmp:
105 | driver: local
106 | hadoop-data-2-data:
107 | driver: local
108 | hadoop-data-2-log:
109 | driver: local
110 | hadoop-data-2-tmp:
111 | driver: local
112 | hadoop-data-3-data:
113 | driver: local
114 | hadoop-data-3-log:
115 | driver: local
116 | hadoop-data-3-tmp:
117 | driver: local
118 | networks:
119 | common:
120 | driver: bridge
121 | ipam:
122 | driver: default
123 | config:
124 | - subnet: ${DATA_LAKE_HADOOP_NETWORK}
125 |
--------------------------------------------------------------------------------
/compose/data-lake/kafka/main.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 | services:
3 | kafka-controller-1:
4 | image: sloopstash/kafka:v${DATA_LAKE_KAFKA_VERSION}
5 | entrypoint: /usr/bin/supervisord
6 | command: "-c /etc/supervisord.conf"
7 | environment:
8 | - NODE_ID=1
9 | volumes:
10 | - kafka-controller-1-data:/opt/kafka/data
11 | - kafka-controller-1-log:/opt/kafka/log
12 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/script:/opt/kafka/script
13 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
14 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/conf/supervisor.ini:/opt/kafka/system/supervisor.ini
15 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/conf/server.conf:/opt/kafka/conf/server-reference.conf
16 | networks:
17 | - common
18 | kafka-controller-2:
19 | image: sloopstash/kafka:v${DATA_LAKE_KAFKA_VERSION}
20 | entrypoint: /usr/bin/supervisord
21 | command: "-c /etc/supervisord.conf"
22 | environment:
23 | - NODE_ID=2
24 | volumes:
25 | - kafka-controller-2-data:/opt/kafka/data
26 | - kafka-controller-2-log:/opt/kafka/log
27 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/script:/opt/kafka/script
28 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
29 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/conf/supervisor.ini:/opt/kafka/system/supervisor.ini
30 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/conf/server.conf:/opt/kafka/conf/server-reference.conf
31 | networks:
32 | - common
33 | kafka-controller-3:
34 | image: sloopstash/kafka:v${DATA_LAKE_KAFKA_VERSION}
35 | entrypoint: /usr/bin/supervisord
36 | command: "-c /etc/supervisord.conf"
37 | environment:
38 | - NODE_ID=3
39 | volumes:
40 | - kafka-controller-3-data:/opt/kafka/data
41 | - kafka-controller-3-log:/opt/kafka/log
42 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/script:/opt/kafka/script
43 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
44 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/conf/supervisor.ini:/opt/kafka/system/supervisor.ini
45 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/conf/server.conf:/opt/kafka/conf/server-reference.conf
46 | networks:
47 | - common
48 | kafka-broker-1:
49 | image: sloopstash/kafka:v${DATA_LAKE_KAFKA_VERSION}
50 | entrypoint: /usr/bin/supervisord
51 | command: "-c /etc/supervisord.conf"
52 | environment:
53 | - NODE_ID=4
54 | volumes:
55 | - kafka-broker-1-data:/opt/kafka/data
56 | - kafka-broker-1-log:/opt/kafka/log
57 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/script:/opt/kafka/script
58 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
59 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/conf/supervisor.ini:/opt/kafka/system/supervisor.ini
60 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/conf/server.conf:/opt/kafka/conf/server-reference.conf
61 | depends_on:
62 | - kafka-controller-1
63 | - kafka-controller-2
64 | - kafka-controller-3
65 | networks:
66 | - common
67 | kafka-broker-2:
68 | image: sloopstash/kafka:v${DATA_LAKE_KAFKA_VERSION}
69 | entrypoint: /usr/bin/supervisord
70 | command: "-c /etc/supervisord.conf"
71 | environment:
72 | - NODE_ID=5
73 | volumes:
74 | - kafka-broker-2-data:/opt/kafka/data
75 | - kafka-broker-2-log:/opt/kafka/log
76 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/script:/opt/kafka/script
77 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
78 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/conf/supervisor.ini:/opt/kafka/system/supervisor.ini
79 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/conf/server.conf:/opt/kafka/conf/server-reference.conf
80 | depends_on:
81 | - kafka-controller-1
82 | - kafka-controller-2
83 | - kafka-controller-3
84 | networks:
85 | - common
86 | kafka-broker-3:
87 | image: sloopstash/kafka:v${DATA_LAKE_KAFKA_VERSION}
88 | entrypoint: /usr/bin/supervisord
89 | command: "-c /etc/supervisord.conf"
90 | environment:
91 | - NODE_ID=6
92 | volumes:
93 | - kafka-broker-3-data:/opt/kafka/data
94 | - kafka-broker-3-log:/opt/kafka/log
95 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/script:/opt/kafka/script
96 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
97 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/conf/supervisor.ini:/opt/kafka/system/supervisor.ini
98 | - ${HOME_DIR}/workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/conf/server.conf:/opt/kafka/conf/server-reference.conf
99 | depends_on:
100 | - kafka-controller-1
101 | - kafka-controller-2
102 | - kafka-controller-3
103 | networks:
104 | - common
105 | volumes:
106 | kafka-controller-1-data:
107 | driver: local
108 | kafka-controller-1-log:
109 | driver: local
110 | kafka-controller-2-data:
111 | driver: local
112 | kafka-controller-2-log:
113 | driver: local
114 | kafka-controller-3-data:
115 | driver: local
116 | kafka-controller-3-log:
117 | driver: local
118 | kafka-broker-1-data:
119 | driver: local
120 | kafka-broker-1-log:
121 | driver: local
122 | kafka-broker-2-data:
123 | driver: local
124 | kafka-broker-2-log:
125 | driver: local
126 | kafka-broker-3-data:
127 | driver: local
128 | kafka-broker-3-log:
129 | driver: local
130 | networks:
131 | common:
132 | driver: bridge
133 | ipam:
134 | driver: default
135 | config:
136 | - subnet: ${DATA_LAKE_KAFKA_NETWORK}
137 |
--------------------------------------------------------------------------------
/compose/data-lake/mongo-db/main.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 | services:
3 | mongo-db-1:
4 | image: sloopstash/mongo-db:v${DATA_LAKE_MONGO_DB_VERSION}
5 | entrypoint: /usr/bin/supervisord
6 | command: "-c /etc/supervisord.conf"
7 | volumes:
8 | - mongo-db-1-data:/opt/mongo-db/data
9 | - mongo-db-1-log:/opt/mongo-db/log
10 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/script:/opt/mongo-db/script
11 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
12 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/conf/supervisor.ini:/opt/mongo-db/system/supervisor.ini
13 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/conf/server.conf:/opt/mongo-db/conf/server.conf
14 | networks:
15 | - common
16 | mongo-db-2:
17 | image: sloopstash/mongo-db:v${DATA_LAKE_MONGO_DB_VERSION}
18 | entrypoint: /usr/bin/supervisord
19 | command: "-c /etc/supervisord.conf"
20 | volumes:
21 | - mongo-db-2-data:/opt/mongo-db/data
22 | - mongo-db-2-log:/opt/mongo-db/log
23 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/script:/opt/mongo-db/script
24 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
25 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/conf/supervisor.ini:/opt/mongo-db/system/supervisor.ini
26 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/conf/server.conf:/opt/mongo-db/conf/server.conf
27 | depends_on:
28 | - mongo-db-1
29 | networks:
30 | - common
31 | mongo-db-3:
32 | image: sloopstash/mongo-db:v${DATA_LAKE_MONGO_DB_VERSION}
33 | entrypoint: /usr/bin/supervisord
34 | command: "-c /etc/supervisord.conf"
35 | volumes:
36 | - mongo-db-3-data:/opt/mongo-db/data
37 | - mongo-db-3-log:/opt/mongo-db/log
38 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/script:/opt/mongo-db/script
39 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
40 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/conf/supervisor.ini:/opt/mongo-db/system/supervisor.ini
41 | - ${HOME_DIR}/workload/mongo-db/${DATA_LAKE_MONGO_DB_VERSION}/replica-cluster/conf/server.conf:/opt/mongo-db/conf/server.conf
42 | depends_on:
43 | - mongo-db-1
44 | networks:
45 | - common
46 | volumes:
47 | mongo-db-1-data:
48 | driver: local
49 | mongo-db-1-log:
50 | driver: local
51 | mongo-db-2-data:
52 | driver: local
53 | mongo-db-2-log:
54 | driver: local
55 | mongo-db-3-data:
56 | driver: local
57 | mongo-db-3-log:
58 | driver: local
59 | networks:
60 | common:
61 | driver: bridge
62 | ipam:
63 | driver: default
64 | config:
65 | - subnet: ${DATA_LAKE_MONGO_DB_NETWORK}
66 |
--------------------------------------------------------------------------------
/compose/data-lake/redis/main.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 | services:
3 | redis-1:
4 | image: sloopstash/redis:v${DATA_LAKE_REDIS_VERSION}
5 | entrypoint: /usr/bin/supervisord
6 | command: "-c /etc/supervisord.conf"
7 | volumes:
8 | - redis-1-data:/opt/redis/data
9 | - redis-1-log:/opt/redis/log
10 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/script:/opt/redis/script
11 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
12 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/supervisor.ini:/opt/redis/system/supervisor.ini
13 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/server.conf:/opt/redis/conf/server.conf
14 | networks:
15 | - common
16 | redis-2:
17 | image: sloopstash/redis:v${DATA_LAKE_REDIS_VERSION}
18 | entrypoint: /usr/bin/supervisord
19 | command: "-c /etc/supervisord.conf"
20 | volumes:
21 | - redis-2-data:/opt/redis/data
22 | - redis-2-log:/opt/redis/log
23 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/script:/opt/redis/script
24 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
25 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/supervisor.ini:/opt/redis/system/supervisor.ini
26 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/server.conf:/opt/redis/conf/server.conf
27 | depends_on:
28 | - redis-1
29 | networks:
30 | - common
31 | redis-3:
32 | image: sloopstash/redis:v${DATA_LAKE_REDIS_VERSION}
33 | entrypoint: /usr/bin/supervisord
34 | command: "-c /etc/supervisord.conf"
35 | volumes:
36 | - redis-3-data:/opt/redis/data
37 | - redis-3-log:/opt/redis/log
38 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/script:/opt/redis/script
39 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
40 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/supervisor.ini:/opt/redis/system/supervisor.ini
41 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/server.conf:/opt/redis/conf/server.conf
42 | depends_on:
43 | - redis-1
44 | networks:
45 | - common
46 | redis-4:
47 | image: sloopstash/redis:v${DATA_LAKE_REDIS_VERSION}
48 | entrypoint: /usr/bin/supervisord
49 | command: "-c /etc/supervisord.conf"
50 | volumes:
51 | - redis-4-data:/opt/redis/data
52 | - redis-4-log:/opt/redis/log
53 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/script:/opt/redis/script
54 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
55 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/supervisor.ini:/opt/redis/system/supervisor.ini
56 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/server.conf:/opt/redis/conf/server.conf
57 | depends_on:
58 | - redis-1
59 | networks:
60 | - common
61 | redis-5:
62 | image: sloopstash/redis:v${DATA_LAKE_REDIS_VERSION}
63 | entrypoint: /usr/bin/supervisord
64 | command: "-c /etc/supervisord.conf"
65 | volumes:
66 | - redis-5-data:/opt/redis/data
67 | - redis-5-log:/opt/redis/log
68 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/script:/opt/redis/script
69 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
70 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/supervisor.ini:/opt/redis/system/supervisor.ini
71 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/server.conf:/opt/redis/conf/server.conf
72 | depends_on:
73 | - redis-1
74 | networks:
75 | - common
76 | redis-6:
77 | image: sloopstash/redis:v${DATA_LAKE_REDIS_VERSION}
78 | entrypoint: /usr/bin/supervisord
79 | command: "-c /etc/supervisord.conf"
80 | volumes:
81 | - redis-6-data:/opt/redis/data
82 | - redis-6-log:/opt/redis/log
83 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/script:/opt/redis/script
84 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
85 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/supervisor.ini:/opt/redis/system/supervisor.ini
86 | - ${HOME_DIR}/workload/redis/${DATA_LAKE_REDIS_VERSION}/cluster/conf/server.conf:/opt/redis/conf/server.conf
87 | depends_on:
88 | - redis-1
89 | networks:
90 | - common
91 | volumes:
92 | redis-1-data:
93 | driver: local
94 | redis-1-log:
95 | driver: local
96 | redis-2-data:
97 | driver: local
98 | redis-2-log:
99 | driver: local
100 | redis-3-data:
101 | driver: local
102 | redis-3-log:
103 | driver: local
104 | redis-4-data:
105 | driver: local
106 | redis-4-log:
107 | driver: local
108 | redis-5-data:
109 | driver: local
110 | redis-5-log:
111 | driver: local
112 | redis-6-data:
113 | driver: local
114 | redis-6-log:
115 | driver: local
116 | networks:
117 | common:
118 | driver: bridge
119 | ipam:
120 | driver: default
121 | config:
122 | - subnet: ${DATA_LAKE_REDIS_NETWORK}
123 |
--------------------------------------------------------------------------------
/compose/ntm.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 | services:
3 | postgresql:
4 | image: sloopstash/postgresql:v${NTM_POSTGRESQL_VERSION}
5 | entrypoint: /usr/bin/supervisord
6 | command: "-c /etc/supervisord.conf"
7 | volumes:
8 | - postgresql-data:/opt/postgresql/data
9 | - postgresql-log:/opt/postgresql/log
10 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
11 | - ${HOME_DIR}/workload/postgresql/${NTM_POSTGRESQL_VERSION}/conf/supervisor.ini:/opt/postgresql/system/supervisor.ini
12 | - ${HOME_DIR}/workload/postgresql/${NTM_POSTGRESQL_VERSION}/conf/hba.conf:/opt/postgresql/conf/hba.conf
13 | - ${HOME_DIR}/workload/postgresql/${NTM_POSTGRESQL_VERSION}/conf/ident.conf:/opt/postgresql/conf/ident.conf
14 | - ${HOME_DIR}/workload/postgresql/${NTM_POSTGRESQL_VERSION}/conf/server.conf:/opt/postgresql/conf/server.conf
15 | networks:
16 | common:
17 | ipv4_address: ${NTM_POSTGRESQL_IP}
18 | app:
19 | image: sloopstash/ruby:v${NTM_RUBY_VERSION}
20 | entrypoint: /usr/bin/supervisord
21 | command: "-c /etc/supervisord.conf"
22 | environment:
23 | - STATIC_ENDPOINT=http://app-static.ntm.${EXTERNAL_DOMAIN}:${NTM_NGINX_PORT}
24 | volumes:
25 | - ${NTM_APP_SOURCE}:/opt/app/source
26 | - app-log:/opt/app/log
27 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
28 | - ${HOME_DIR}/_stack/ntm/app/conf/supervisor.ini:/opt/app/system/supervisor.ini
29 | depends_on:
30 | - postgresql
31 | networks:
32 | common:
33 | ipv4_address: ${NTM_APP_IP}
34 | nginx:
35 | image: sloopstash/nginx:v${NTM_NGINX_VERSION}
36 | entrypoint: /usr/bin/supervisord
37 | command: "-c /etc/supervisord.conf"
38 | ports:
39 | - "${NTM_NGINX_PORT}:80"
40 | volumes:
41 | - ${NTM_APP_SOURCE}:/opt/app/source
42 | - nginx-log:/opt/nginx/log
43 | - ${HOME_DIR}/workload/supervisor/conf/server.conf:/etc/supervisord.conf
44 | - ${HOME_DIR}/workload/nginx/${NTM_NGINX_VERSION}/conf/supervisor.ini:/opt/nginx/system/supervisor.ini
45 | - ${HOME_DIR}/workload/nginx/${NTM_NGINX_VERSION}/conf/server.conf:/opt/nginx/conf/server.conf
46 | - ${HOME_DIR}/_stack/ntm/nginx/conf/app.conf:/opt/nginx/conf/app.conf
47 | depends_on:
48 | - app
49 | networks:
50 | common:
51 | ipv4_address: ${NTM_NGINX_IP}
52 | volumes:
53 | postgresql-data:
54 | driver: local
55 | postgresql-log:
56 | driver: local
57 | app-log:
58 | driver: local
59 | nginx-log:
60 | driver: local
61 | networks:
62 | common:
63 | driver: bridge
64 | ipam:
65 | driver: default
66 | config:
67 | - subnet: ${NTM_NETWORK}
68 |
--------------------------------------------------------------------------------
/compose/observability/elk/main.yml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sloopstash/kickstart-docker/8e24e0124e5b6553537b40178ace05ab23760bd5/compose/observability/elk/main.yml
--------------------------------------------------------------------------------
/conf/server.json:
--------------------------------------------------------------------------------
1 | {
2 | "hosts": [
3 | "tcp://0.0.0.0:2375",
4 | "unix:///var/run/docker.sock"
5 | ],
6 | "containerd": "/run/containerd/containerd.sock",
7 | "storage-driver": "overlay2",
8 | "log-driver": "json-file",
9 | "log-opts": {
10 | "max-size": "100m"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/image/ansible/9.1.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install OpenSSH server.
5 | RUN set -x \
6 | && yum install -y openssh-server openssh-clients passwd
7 |
8 | # Configure OpenSSH server.
9 | RUN set -x \
10 | && mkdir /var/run/sshd \
11 | && ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N '' \
12 | && sed -i 's/PasswordAuthentication yes/PasswordAuthentication no/' /etc/ssh/sshd_config
13 |
14 | # Configure OpenSSH user.
15 | RUN set -x \
16 | && mkdir /root/.ssh \
17 | && touch /root/.ssh/authorized_keys \
18 | && touch /root/.ssh/config \
19 | && echo -e "Host *\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile=/dev/null" >> /root/.ssh/config \
20 | && chmod 400 /root/.ssh/config
21 | ADD node.pub /root/.ssh/authorized_keys
22 |
23 | # Set default work directory.
24 | WORKDIR /
25 |
--------------------------------------------------------------------------------
/image/ansible/9.1.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/apm/8.17.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y perl-Digest-SHA
6 |
7 | # Install APM.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://artifacts.elastic.co/downloads/apm-server/apm-server-8.17.0-linux-x86_64.tar.gz --quiet \
11 | && wget https://artifacts.elastic.co/downloads/apm-server/apm-server-8.17.0-linux-x86_64.tar.gz.sha512 --quiet \
12 | && shasum -a 512 -c apm-server-8.17.0-linux-x86_64.tar.gz.sha512 \
13 | && tar xvzf apm-server-8.17.0-linux-x86_64.tar.gz > /dev/null \
14 | && mkdir /usr/local/lib/apm \
15 | && cp -r apm-server-8.17.0/* /usr/local/lib/apm/ \
16 | && rm -rf apm-server-8.17.0*
17 |
18 | # Create APM directories.
19 | RUN set -x \
20 | && mkdir /opt/apm \
21 | && mkdir /opt/apm/data \
22 | && mkdir /opt/apm/log \
23 | && mkdir /opt/apm/conf \
24 | && mkdir /opt/apm/script \
25 | && mkdir /opt/apm/system \
26 | && touch /opt/apm/system/server.pid \
27 | && touch /opt/apm/system/supervisor.ini \
28 | && ln -s /opt/apm/system/supervisor.ini /etc/supervisord.d/apm.ini \
29 | && history -c
30 |
31 | # Set default work directory.
32 | WORKDIR /opt/apm
33 |
--------------------------------------------------------------------------------
/image/apm/8.17.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/base/1.1.1/alma-linux-8.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM almalinux:8
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && dnf update -y \
7 | && dnf install -y epel-release \
8 | && dnf install -y wget vim net-tools gcc make tar git unzip sysstat tree initscripts bind-utils nc nmap logrotate crontabs
9 |
10 | # Install Supervisor.
11 | RUN set -x \
12 | && dnf install -y supervisor \
13 | && history -c
14 |
15 | # Set default work directory.
16 | WORKDIR /
17 |
--------------------------------------------------------------------------------
/image/base/1.1.1/alma-linux-9.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM almalinux:9
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && dnf update -y \
7 | && dnf install -y epel-release \
8 | && dnf install -y wget vim net-tools gcc make tar git unzip sysstat tree initscripts bind-utils nc nmap logrotate crontabs
9 |
10 | # Install Supervisor.
11 | RUN set -x \
12 | && dnf install -y supervisor \
13 | && history -c
14 |
15 | # Set default work directory.
16 | WORKDIR /
17 |
--------------------------------------------------------------------------------
/image/base/1.1.1/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM amazonlinux:2
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && yum update -y \
7 | && yum install -y wget vim net-tools gcc make tar git unzip sysstat tree initscripts bind-utils nc nmap logrotate crontabs \
8 | && yum install -y python-devel python-pip python-setuptools
9 |
10 | # Install Supervisor.
11 | RUN set -x \
12 | && python -m pip install supervisor \
13 | && mkdir /etc/supervisord.d \
14 | && history -c
15 |
16 | # Set default work directory.
17 | WORKDIR /
18 |
--------------------------------------------------------------------------------
/image/base/1.1.1/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/base/1.1.1/rocky-linux-8.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM rockylinux:8
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && dnf update -y \
7 | && dnf install -y epel-release \
8 | && dnf install -y wget vim net-tools gcc make tar git unzip sysstat tree initscripts bind-utils nc nmap logrotate crontabs
9 |
10 | # Install Supervisor.
11 | RUN set -x \
12 | && dnf install -y supervisor \
13 | && history -c
14 |
15 | # Set default work directory.
16 | WORKDIR /
17 |
--------------------------------------------------------------------------------
/image/base/1.1.1/rocky-linux-9.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM rockylinux:9
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && dnf update -y \
7 | && dnf install -y epel-release \
8 | && dnf install -y wget vim net-tools gcc make tar git unzip sysstat tree initscripts bind-utils nc nmap logrotate crontabs
9 |
10 | # Install Supervisor.
11 | RUN set -x \
12 | && dnf install -y supervisor \
13 | && history -c
14 |
15 | # Set default work directory.
16 | WORKDIR /
17 |
--------------------------------------------------------------------------------
/image/base/1.1.1/ubuntu-linux-22-04.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM ubuntu:22.04
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && apt update \
7 | && apt upgrade -y \
8 | && apt install -y wget vim net-tools gcc make tar git unzip sysstat tree netcat nmap logrotate cron
9 |
10 | # Install Supervisor.
11 | RUN set -x \
12 | && apt install -y supervisor \
13 | && history -c
14 |
15 | # Set default work directory.
16 | WORKDIR /
17 |
--------------------------------------------------------------------------------
/image/base/1.2.1/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM amazonlinux:2
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && yum update -y \
7 | && yum install -y wget vim net-tools gcc make tar git unzip sysstat tree initscripts bind-utils nc nmap logrotate crontabs \
8 | && yum install -y python-devel python-pip python-setuptools \
9 | && yum clean all \
10 | && rm -rf /var/cache/yum
11 |
12 | # Install Supervisor.
13 | RUN set -x \
14 | && python -m pip install supervisor \
15 | && mkdir /etc/supervisord.d \
16 | && history -c
17 |
18 | # Set default work directory.
19 | WORKDIR /
20 |
--------------------------------------------------------------------------------
/image/base/1.2.1/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/chef/18.3.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install OpenSSH server.
5 | RUN set -x \
6 | && yum install -y openssh-server openssh-clients passwd
7 |
8 | # Configure OpenSSH server.
9 | RUN set -x \
10 | && mkdir /var/run/sshd \
11 | && ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N '' \
12 | && sed -i 's/PasswordAuthentication yes/PasswordAuthentication no/' /etc/ssh/sshd_config
13 |
14 | # Configure OpenSSH user.
15 | RUN set -x \
16 | && mkdir /root/.ssh \
17 | && touch /root/.ssh/authorized_keys \
18 | && touch /root/.ssh/config \
19 | && echo -e "Host *\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile=/dev/null" >> /root/.ssh/config \
20 | && chmod 400 /root/.ssh/config
21 | ADD node.pub /root/.ssh/authorized_keys
22 |
23 | # Install Chef infra client.
24 | WORKDIR /tmp
25 | COPY chef-18.3.0-1.el7.x86_64.rpm ./
26 | RUN set -x \
27 | && mkdir /var/log/chef \
28 | && yum install -y chef-18.3.0-1.el7.x86_64.rpm \
29 | && rm -f chef-18.3.0-1.el7.x86_64.rpm \
30 | && history -c
31 |
32 | # Set default work directory.
33 | WORKDIR /
34 |
--------------------------------------------------------------------------------
/image/chef/18.3.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/elasticsearch/8.10.3/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y perl-Digest-SHA
6 |
7 | # Install Elasticsearch.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.10.3-linux-x86_64.tar.gz --quiet \
11 | && wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.10.3-linux-x86_64.tar.gz.sha512 --quiet \
12 | && shasum -a 512 -c elasticsearch-8.10.3-linux-x86_64.tar.gz.sha512 \
13 | && tar xvzf elasticsearch-8.10.3-linux-x86_64.tar.gz > /dev/null \
14 | && mkdir /usr/local/lib/elasticsearch \
15 | && cp -r elasticsearch-8.10.3/* /usr/local/lib/elasticsearch/ \
16 | && rm -rf elasticsearch-8.10.3*
17 |
18 | # Create Elasticsearch directories.
19 | RUN set -x \
20 | && mkdir /opt/elasticsearch \
21 | && mkdir /opt/elasticsearch/data \
22 | && mkdir /opt/elasticsearch/log \
23 | && mkdir /opt/elasticsearch/conf \
24 | && mkdir /opt/elasticsearch/script \
25 | && mkdir /opt/elasticsearch/system \
26 | && touch /opt/elasticsearch/system/node.pid \
27 | && touch /opt/elasticsearch/system/supervisor.ini \
28 | && ln -s /opt/elasticsearch/system/supervisor.ini /etc/supervisord.d/elasticsearch.ini \
29 | && history -c
30 |
31 | # Set default work directory.
32 | WORKDIR /opt/elasticsearch
33 |
--------------------------------------------------------------------------------
/image/elasticsearch/8.10.3/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/elasticsearch/8.17.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y perl-Digest-SHA
6 |
7 | # Install Elasticsearch.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.17.0-linux-x86_64.tar.gz --quiet \
11 | && wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.17.0-linux-x86_64.tar.gz.sha512 --quiet \
12 | && shasum -a 512 -c elasticsearch-8.17.0-linux-x86_64.tar.gz.sha512 \
13 | && tar xvzf elasticsearch-8.17.0-linux-x86_64.tar.gz > /dev/null \
14 | && mkdir /usr/local/lib/elasticsearch \
15 | && cp -r elasticsearch-8.17.0/* /usr/local/lib/elasticsearch/ \
16 | && rm -rf elasticsearch-8.17.0*
17 |
18 | # Create Elasticsearch directories.
19 | RUN set -x \
20 | && mkdir /opt/elasticsearch \
21 | && mkdir /opt/elasticsearch/data \
22 | && mkdir /opt/elasticsearch/log \
23 | && mkdir /opt/elasticsearch/conf \
24 | && mkdir /opt/elasticsearch/script \
25 | && mkdir /opt/elasticsearch/system \
26 | && touch /opt/elasticsearch/system/node.pid \
27 | && touch /opt/elasticsearch/system/supervisor.ini \
28 | && ln -s /opt/elasticsearch/system/supervisor.ini /etc/supervisord.d/elasticsearch.ini \
29 | && history -c
30 |
31 | # Set default work directory.
32 | WORKDIR /opt/elasticsearch
33 |
--------------------------------------------------------------------------------
/image/elasticsearch/8.17.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/hadoop/2.10.1/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install Oracle JDK.
5 | WORKDIR /tmp
6 | COPY jdk-8u131-linux-x64.rpm ./
7 | RUN set -x \
8 | && yum install -y jdk-8u131-linux-x64.rpm \
9 | && rm -f jdk-8u131-linux-x64.rpm
10 |
11 | # Install Hadoop.
12 | RUN set -x \
13 | && wget https://archive.apache.org/dist/hadoop/common/hadoop-2.10.1/hadoop-2.10.1.tar.gz --quiet \
14 | && tar xvzf hadoop-2.10.1.tar.gz > /dev/null \
15 | && mkdir /usr/local/lib/hadoop \
16 | && cp -r hadoop-2.10.1/* /usr/local/lib/hadoop/ \
17 | && rm -rf hadoop-2.10.1*
18 |
19 | # Create Hadoop directories.
20 | RUN set -x \
21 | && mkdir /opt/hadoop \
22 | && mkdir /opt/hadoop/data \
23 | && mkdir /opt/hadoop/log \
24 | && mkdir /opt/hadoop/conf \
25 | && mkdir /opt/hadoop/script \
26 | && mkdir /opt/hadoop/system \
27 | && mkdir /opt/hadoop/tmp \
28 | && touch /opt/hadoop/conf/env.sh \
29 | && touch /opt/hadoop/conf/core-site.xml \
30 | && touch /opt/hadoop/conf/hdfs-site.xml \
31 | && touch /opt/hadoop/system/node.pid \
32 | && touch /opt/hadoop/system/supervisor.ini \
33 | && ln -sf /opt/hadoop/conf/env.sh /usr/local/lib/hadoop/etc/hadoop/hadoop-env.sh \
34 | && ln -sf /opt/hadoop/conf/core-site.xml /usr/local/lib/hadoop/etc/hadoop/core-site.xml \
35 | && ln -sf /opt/hadoop/conf/hdfs-site.xml /usr/local/lib/hadoop/etc/hadoop/hdfs-site.xml \
36 | && ln -s /opt/hadoop/system/supervisor.ini /etc/supervisord.d/hadoop.ini \
37 | && history -c
38 |
39 | # Set default work directory.
40 | WORKDIR /opt/hadoop
41 |
--------------------------------------------------------------------------------
/image/hadoop/2.10.1/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/kafka/3.2.1/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install Oracle JDK.
5 | WORKDIR /tmp
6 | COPY jdk-8u131-linux-x64.rpm ./
7 | RUN set -x \
8 | && yum install -y jdk-8u131-linux-x64.rpm \
9 | && rm -f jdk-8u131-linux-x64.rpm
10 |
11 | # Install Kafka.
12 | RUN set -x \
13 | && wget https://archive.apache.org/dist/kafka/3.2.1/kafka_2.13-3.2.1.tgz --quiet \
14 | && tar xvzf kafka_2.13-3.2.1.tgz > /dev/null \
15 | && mkdir /usr/local/lib/kafka \
16 | && cp -r kafka_2.13-3.2.1/* /usr/local/lib/kafka/ \
17 | && rm -rf kafka_2.13-3.2.1*
18 |
19 | # Create Kafka directories.
20 | RUN set -x \
21 | && mkdir /opt/kafka \
22 | && mkdir /opt/kafka/data \
23 | && mkdir /opt/kafka/log \
24 | && mkdir /opt/kafka/conf \
25 | && mkdir /opt/kafka/script \
26 | && mkdir /opt/kafka/system \
27 | && touch /opt/kafka/system/node.pid \
28 | && touch /opt/kafka/system/supervisor.ini \
29 | && ln -s /opt/kafka/system/supervisor.ini /etc/supervisord.d/kafka.ini \
30 | && history -c
31 |
32 | # Set default work directory.
33 | WORKDIR /opt/kafka
34 |
--------------------------------------------------------------------------------
/image/kafka/3.2.1/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/kibana/8.17.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y perl-Digest-SHA
6 |
7 | # Install Kibana.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://artifacts.elastic.co/downloads/kibana/kibana-8.17.0-linux-x86_64.tar.gz --quiet \
11 | && wget https://artifacts.elastic.co/downloads/kibana/kibana-8.17.0-linux-x86_64.tar.gz.sha512 --quiet \
12 | && shasum -a 512 -c kibana-8.17.0-linux-x86_64.tar.gz.sha512 \
13 | && tar xvzf kibana-8.17.0-linux-x86_64.tar.gz > /dev/null \
14 | && mkdir /usr/local/lib/kibana \
15 | && cp -r kibana-8.17.0/* /usr/local/lib/kibana/ \
16 | && rm -rf kibana-8.17.0*
17 |
18 | # Create Kibana directories.
19 | RUN set -x \
20 | && mkdir /opt/kibana \
21 | && mkdir /opt/kibana/data \
22 | && mkdir /opt/kibana/log \
23 | && mkdir /opt/kibana/conf \
24 | && mkdir /opt/kibana/script \
25 | && mkdir /opt/kibana/system \
26 | && touch /opt/kibana/system/server.pid \
27 | && touch /opt/kibana/system/supervisor.ini \
28 | && ln -s /opt/kibana/system/supervisor.ini /etc/supervisord.d/kibana.ini \
29 | && history -c
30 |
31 | # Set default work directory.
32 | WORKDIR /opt/kibana
33 |
--------------------------------------------------------------------------------
/image/kibana/8.17.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/logstash/8.17.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y perl-Digest-SHA
6 |
7 | # Install Logstash.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://artifacts.elastic.co/downloads/logstash/logstash-8.17.0-linux-x86_64.tar.gz --quiet \
11 | && wget https://artifacts.elastic.co/downloads/logstash/logstash-8.17.0-linux-x86_64.tar.gz.sha512 --quiet \
12 | && shasum -a 512 -c logstash-8.17.0-linux-x86_64.tar.gz.sha512 \
13 | && tar xvzf logstash-8.17.0-linux-x86_64.tar.gz > /dev/null \
14 | && mkdir /usr/local/lib/logstash \
15 | && cp -r logstash-8.17.0/* /usr/local/lib/logstash/ \
16 | && rm -rf logstash-8.17.0*
17 |
18 | # Create Logstash directories.
19 | RUN set -x \
20 | && mkdir /opt/logstash \
21 | && mkdir /opt/logstash/data \
22 | && mkdir /opt/logstash/log \
23 | && mkdir /opt/logstash/conf \
24 | && mkdir /opt/logstash/script \
25 | && mkdir /opt/logstash/system \
26 | && touch /opt/logstash/system/server.pid \
27 | && touch /opt/logstash/system/supervisor.ini \
28 | && ln -s /opt/logstash/system/supervisor.ini /etc/supervisord.d/logstash.ini \
29 | && history -c
30 |
31 | # Set default work directory.
32 | WORKDIR /opt/logstash
33 |
--------------------------------------------------------------------------------
/image/logstash/8.17.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/mongo-db/4.4.4/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y libcurl openssl xz-libs
6 |
7 | # Install MongoDB.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon2-4.4.4.tgz --quiet \
11 | && tar xvzf mongodb-linux-x86_64-amazon2-4.4.4.tgz > /dev/null \
12 | && mv mongodb-linux-x86_64-amazon2-4.4.4/bin/* /usr/local/bin/ \
13 | && rm -rf mongodb-linux-x86_64-amazon2-4.4.4*
14 |
15 | # Install MongoDB shell.
16 | RUN set -x \
17 | && wget https://downloads.mongodb.com/compass/mongosh-2.1.5-linux-x64.tgz --quiet \
18 | && tar xvzf mongosh-2.1.5-linux-x64.tgz > /dev/null \
19 | && mv mongosh-2.1.5-linux-x64/bin/* /usr/local/bin/ \
20 | && rm -rf mongosh-2.1.5-linux-x64*
21 |
22 | # Create MongoDB directories.
23 | RUN set -x \
24 | && mkdir /opt/mongo-db \
25 | && mkdir /opt/mongo-db/data \
26 | && mkdir /opt/mongo-db/log \
27 | && mkdir /opt/mongo-db/conf \
28 | && mkdir /opt/mongo-db/script \
29 | && mkdir /opt/mongo-db/system \
30 | && touch /opt/mongo-db/system/server.pid \
31 | && touch /opt/mongo-db/system/supervisor.ini \
32 | && ln -s /opt/mongo-db/system/supervisor.ini /etc/supervisord.d/mongo-db.ini \
33 | && history -c
34 |
35 | # Set default work directory.
36 | WORKDIR /opt/mongo-db
37 |
--------------------------------------------------------------------------------
/image/mongo-db/4.4.4/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/mongo-db/7.0.2/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y libcurl openssl xz-libs
6 |
7 | # Install MongoDB.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon2-7.0.2.tgz --quiet \
11 | && tar xvzf mongodb-linux-x86_64-amazon2-7.0.2.tgz > /dev/null \
12 | && mv mongodb-linux-x86_64-amazon2-7.0.2/bin/* /usr/local/bin/ \
13 | && rm -rf mongodb-linux-x86_64-amazon2-7.0.2*
14 |
15 | # Install MongoDB shell.
16 | RUN set -x \
17 | && wget https://downloads.mongodb.com/compass/mongosh-2.1.5-linux-x64.tgz --quiet \
18 | && tar xvzf mongosh-2.1.5-linux-x64.tgz > /dev/null \
19 | && mv mongosh-2.1.5-linux-x64/bin/* /usr/local/bin/ \
20 | && rm -rf mongosh-2.1.5-linux-x64*
21 |
22 | # Create MongoDB directories.
23 | RUN set -x \
24 | && mkdir /opt/mongo-db \
25 | && mkdir /opt/mongo-db/data \
26 | && mkdir /opt/mongo-db/log \
27 | && mkdir /opt/mongo-db/conf \
28 | && mkdir /opt/mongo-db/script \
29 | && mkdir /opt/mongo-db/system \
30 | && touch /opt/mongo-db/system/server.pid \
31 | && touch /opt/mongo-db/system/supervisor.ini \
32 | && ln -s /opt/mongo-db/system/supervisor.ini /etc/supervisord.d/mongo-db.ini \
33 | && history -c
34 |
35 | # Set default work directory.
36 | WORKDIR /opt/mongo-db
37 |
--------------------------------------------------------------------------------
/image/mongo-db/7.0.2/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/nginx/1.14.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install Nginx.
5 | WORKDIR /tmp
6 | RUN set -x \
7 | && wget https://nginx.org/packages/rhel/7/x86_64/RPMS/nginx-1.14.0-1.el7_4.ngx.x86_64.rpm --quiet \
8 | && yum install -y nginx-1.14.0-1.el7_4.ngx.x86_64.rpm \
9 | && rm -f nginx-1.14.0-1.el7_4.ngx.x86_64.rpm
10 |
11 | # Create App and Nginx directories.
12 | RUN set -x \
13 | && mkdir /opt/app \
14 | && mkdir /opt/app/source \
15 | && mkdir /opt/nginx \
16 | && mkdir /opt/nginx/log \
17 | && mkdir /opt/nginx/conf \
18 | && mkdir /opt/nginx/script \
19 | && mkdir /opt/nginx/system \
20 | && touch /opt/nginx/log/access.log \
21 | && touch /opt/nginx/log/error.log \
22 | && touch /opt/nginx/conf/server.conf \
23 | && touch /opt/nginx/conf/app.conf \
24 | && touch /opt/nginx/system/server.pid \
25 | && touch /opt/nginx/system/supervisor.ini \
26 | && ln -s /opt/nginx/conf/app.conf /etc/nginx/conf.d/app.conf \
27 | && ln -s /opt/nginx/system/supervisor.ini /etc/supervisord.d/nginx.ini \
28 | && history -c
29 |
30 | # Set default work directory.
31 | WORKDIR /opt/nginx
32 |
--------------------------------------------------------------------------------
/image/nginx/1.14.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/nginx/1.24.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install Nginx.
5 | WORKDIR /tmp
6 | RUN set -x \
7 | && wget https://nginx.org/packages/rhel/7/x86_64/RPMS/nginx-1.24.0-1.el7.ngx.x86_64.rpm --quiet \
8 | && yum install -y nginx-1.24.0-1.el7.ngx.x86_64.rpm \
9 | && rm -f nginx-1.24.0-1.el7.ngx.x86_64.rpm
10 |
11 | # Create App and Nginx directories.
12 | RUN set -x \
13 | && mkdir /opt/app \
14 | && mkdir /opt/app/source \
15 | && mkdir /opt/nginx \
16 | && mkdir /opt/nginx/log \
17 | && mkdir /opt/nginx/conf \
18 | && mkdir /opt/nginx/script \
19 | && mkdir /opt/nginx/system \
20 | && touch /opt/nginx/log/access.log \
21 | && touch /opt/nginx/log/error.log \
22 | && touch /opt/nginx/conf/server.conf \
23 | && touch /opt/nginx/conf/app.conf \
24 | && touch /opt/nginx/system/server.pid \
25 | && touch /opt/nginx/system/supervisor.ini \
26 | && ln -s /opt/nginx/conf/app.conf /etc/nginx/conf.d/app.conf \
27 | && ln -s /opt/nginx/system/supervisor.ini /etc/supervisord.d/nginx.ini \
28 | && history -c
29 |
30 | # Set default work directory.
31 | WORKDIR /opt/nginx
32 |
--------------------------------------------------------------------------------
/image/nginx/1.24.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/node-js/14.16.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y xz
6 |
7 | # Install JQ.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux-amd64 --quiet \
11 | && mv jq-linux-amd64 /usr/local/bin/jq \
12 | && chmod +x /usr/local/bin/jq
13 |
14 | # Install NodeJS.
15 | RUN set -x \
16 | && wget https://nodejs.org/dist/v14.16.0/node-v14.16.0-linux-x64.tar.xz --quiet \
17 | && tar xvJf node-v14.16.0-linux-x64.tar.xz > /dev/null \
18 | && mkdir /usr/local/lib/node-js \
19 | && cp -r node-v14.16.0-linux-x64/* /usr/local/lib/node-js/ \
20 | && rm -rf node-v14.16.0-linux-x64*
21 | ENV PATH=/usr/local/lib/node-js/bin:$PATH
22 | ENV NODE_PATH=/usr/local/lib/node-js/lib/node_modules
23 |
24 | # Install NodeJS packages.
25 | COPY package.json ./
26 | RUN set -x \
27 | && npm install -g $(jq -r '.dependencies | to_entries | map("\(.key)@\(.value)") | join(" ")' package.json) \
28 | && rm -f package.json
29 |
30 | # Create App directories.
31 | RUN set -x \
32 | && mkdir /opt/app \
33 | && mkdir /opt/app/source \
34 | && mkdir /opt/app/log \
35 | && mkdir /opt/app/system \
36 | && touch /opt/app/system/supervisor.ini \
37 | && ln -s /opt/app/system/supervisor.ini /etc/supervisord.d/app.ini \
38 | && history -c
39 |
40 | # Set default work directory.
41 | WORKDIR /opt/app
42 |
--------------------------------------------------------------------------------
/image/node-js/14.16.0/context/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name":"sloopstash",
3 | "version":"1.1.1",
4 | "dependencies":{
5 | "express":"^4.18.2",
6 | "mongodb":"^4.8.1",
7 | "mongoose":"^8.8.2",
8 | "jsonwebtoken":"^9.0.2",
9 | "bcryptjs":"^2.4.3",
10 | "cors":"^2.8.5",
11 | "dotenv":"^16.4.5",
12 | "bootstrap":"^5.3.0",
13 | "react":"^18.2.0",
14 | "react-dom":"^18.2.0",
15 | "react-router-dom":"^6.4.3",
16 | "react-bootstrap":"^2.8.0",
17 | "react-scripts":"^5.0.0",
18 | "@fortawesome/free-solid-svg-icons":"^6.7.1",
19 | "@fortawesome/react-fontawesome":"^0.2.2",
20 | "axios":"^1.0.0",
21 | "sass":"^1.81.0"
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/image/node-js/17.7.1/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y xz gcc-c++
6 |
7 | # Install JQ.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux-amd64 --quiet \
11 | && mv jq-linux-amd64 /usr/local/bin/jq \
12 | && chmod +x /usr/local/bin/jq
13 |
14 | # Install NodeJS.
15 | RUN set -x \
16 | && wget https://nodejs.org/dist/v17.7.1/node-v17.7.1-linux-x64.tar.xz --quiet \
17 | && tar xvJf node-v17.7.1-linux-x64.tar.xz > /dev/null \
18 | && mkdir /usr/local/lib/node-js \
19 | && cp -r node-v17.7.1-linux-x64/* /usr/local/lib/node-js/ \
20 | && rm -rf node-v17.7.1-linux-x64*
21 | ENV PATH=/usr/local/lib/node-js/bin:$PATH
22 | ENV NODE_PATH=/usr/local/lib/node-js/lib/node_modules
23 |
24 | # Install NodeJS packages.
25 | COPY package.json ./
26 | RUN set -x \
27 | && npm install -g $(jq -r '.dependencies | to_entries | map("\(.key)@\(.value)") | join(" ")' package.json) \
28 | && rm -f package.json
29 |
30 | # Create App directories.
31 | RUN set -x \
32 | && mkdir /opt/app \
33 | && mkdir /opt/app/source \
34 | && mkdir /opt/app/log \
35 | && mkdir /opt/app/system \
36 | && touch /opt/app/system/supervisor.ini \
37 | && ln -s /opt/app/system/supervisor.ini /etc/supervisord.d/app.ini \
38 | && history -c
39 |
40 | # Set default work directory.
41 | WORKDIR /opt/app
42 |
--------------------------------------------------------------------------------
/image/node-js/17.7.1/context/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name":"sloopstash",
3 | "version":"1.1.1",
4 | "dependencies":{
5 | "express":"^4.18.2",
6 | "mongodb":"^4.8.1",
7 | "mongoose":"^8.8.2",
8 | "jsonwebtoken":"^9.0.2",
9 | "bcryptjs":"^2.4.3",
10 | "cors":"^2.8.5",
11 | "dotenv":"^16.4.5",
12 | "bootstrap":"^5.3.0",
13 | "react":"^18.2.0",
14 | "react-dom":"^18.2.0",
15 | "react-router-dom":"^6.4.3",
16 | "react-bootstrap":"^2.8.0",
17 | "react-scripts":"^5.0.0",
18 | "@fortawesome/free-solid-svg-icons":"^6.7.1",
19 | "@fortawesome/react-fontawesome":"^0.2.2",
20 | "axios":"^1.0.0",
21 | "sass":"^1.81.0"
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/image/php/8.3.3/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && yum install -y amazon-linux-extras libxml2-devel sqlite-devel
7 |
8 | # Download and extract PHP.
9 | WORKDIR /tmp
10 | RUN set -x \
11 | && wget https://www.php.net/distributions/php-8.3.3.tar.gz --quiet \
12 | && tar xvzf php-8.3.3.tar.gz > /dev/null
13 |
14 | # Compile and install PHP.
15 | WORKDIR php-8.3.3
16 | RUN set -x \
17 | && ./configure \
18 | && make \
19 | && make install
20 |
21 | # Create App directories.
22 | WORKDIR ../
23 | RUN set -x \
24 | && rm -rf php-8.3.3* \
25 | && mkdir /opt/app \
26 | && mkdir /opt/app/source \
27 | && mkdir /opt/app/log \
28 | && mkdir /opt/app/system \
29 | && touch /opt/app/system/supervisor.ini \
30 | && ln -s /opt/app/system/supervisor.ini /etc/supervisord.d/app.ini \
31 | && history -c
32 |
33 | # Set default work directory.
34 | WORKDIR /opt/app
35 |
--------------------------------------------------------------------------------
/image/php/8.3.3/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/postgresql/16.4/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.2.1 AS install_system_packages
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && yum install -y zlib-devel libicu-devel readline-devel \
7 | && yum clean all \
8 | && rm -rf /var/cache/yum
9 |
10 | # Intermediate Docker image to use.
11 | FROM install_system_packages AS install_postgresql
12 |
13 | # Download and extract PostgreSQL.
14 | WORKDIR /tmp
15 | RUN set -x \
16 | && wget https://ftp.postgresql.org/pub/source/v16.4/postgresql-16.4.tar.gz --quiet \
17 | && tar xvzf postgresql-16.4.tar.gz > /dev/null
18 |
19 | # Compile and install PostgreSQL.
20 | WORKDIR postgresql-16.4
21 | RUN set -x \
22 | && ./configure \
23 | && make \
24 | && make install
25 |
26 | # Docker image to use.
27 | FROM sloopstash/base:v1.2.1 AS create_postgresql_directories
28 |
29 | # Create PostgreSQL directories.
30 | RUN set -x \
31 | && mkdir /opt/postgresql \
32 | && mkdir /opt/postgresql/data \
33 | && mkdir /opt/postgresql/log \
34 | && mkdir /opt/postgresql/conf \
35 | && mkdir /opt/postgresql/script \
36 | && mkdir /opt/postgresql/system \
37 | && touch /opt/postgresql/system/server.pid \
38 | && touch /opt/postgresql/system/supervisor.ini
39 |
40 | # Intermediate Docker image to use.
41 | FROM install_system_packages AS finalize_postgresql_oci_image
42 |
43 | # Create system user for PostgreSQL.
44 | RUN useradd -m postgresql
45 |
46 | # Copy PostgreSQL installation.
47 | COPY --from=install_postgresql /usr/local/pgsql /usr/local/pgsql
48 | ENV PATH=/usr/local/pgsql/bin:$PATH
49 |
50 | # Copy PostgreSQL directories.
51 | COPY --from=create_postgresql_directories /opt/postgresql /opt/postgresql
52 |
53 | # Symlink Supervisor configuration.
54 | RUN set -x \
55 | && ln -s /opt/postgresql/system/supervisor.ini /etc/supervisord.d/postgresql.ini \
56 | && chown -R postgresql:postgresql /opt/postgresql \
57 | && history -c
58 |
59 | # Set default work directory.
60 | WORKDIR /opt/postgresql
61 |
--------------------------------------------------------------------------------
/image/postgresql/16.4/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/python/2.7/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install Python.
5 | RUN set -x \
6 | && yum install -y python-2.7.18 python-devel python-pip python-setuptools
7 |
8 | # Install Python packages.
9 | RUN set -x \
10 | && pip install flask==0.12.4 \
11 | && pip install redis==2.10.6 \
12 | && pip install elastic-apm[flask]==3.0.5
13 |
14 | # Create App directories.
15 | RUN set -x \
16 | && mkdir /opt/app \
17 | && mkdir /opt/app/source \
18 | && mkdir /opt/app/log \
19 | && mkdir /opt/app/system \
20 | && touch /opt/app/system/supervisor.ini \
21 | && ln -s /opt/app/system/supervisor.ini /etc/supervisord.d/app.ini \
22 | && history -c
23 |
24 | # Set default work directory.
25 | WORKDIR /opt/app
26 |
--------------------------------------------------------------------------------
/image/python/2.7/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/python/3.12.0/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && yum install -y bzip2-devel zlib-devel xz-devel libffi-devel gdbm-devel libuuid-devel sqlite-devel readline-devel
7 |
8 | # Download and extract Python.
9 | WORKDIR /tmp
10 | RUN set -x \
11 | && wget https://www.python.org/ftp/python/3.12.0/Python-3.12.0.tgz --quiet \
12 | && tar xvzf Python-3.12.0.tgz > /dev/null
13 |
14 | # Compile and install Python.
15 | WORKDIR Python-3.12.0
16 | RUN set -x \
17 | && ./configure --enable-optimizations \
18 | && make \
19 | && make install \
20 | && update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1 \
21 | && update-alternatives --install /usr/bin/python python /usr/local/bin/python3.12 2
22 |
23 | # Create App directories.
24 | WORKDIR ../
25 | RUN set -x \
26 | && rm -rf Python-3.12.0* \
27 | && mkdir /opt/app \
28 | && mkdir /opt/app/source \
29 | && mkdir /opt/app/log \
30 | && mkdir /opt/app/system \
31 | && touch /opt/app/system/supervisor.ini \
32 | && ln -s /opt/app/system/supervisor.ini /etc/supervisord.d/app.ini \
33 | && history -c
34 |
35 | # Set default work directory.
36 | WORKDIR /opt/app
37 |
--------------------------------------------------------------------------------
/image/python/3.12.0/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/redis/4.0.9/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN yum install -y tcl
6 |
7 | # Download and extract Redis.
8 | WORKDIR /tmp
9 | RUN set -x \
10 | && wget http://download.redis.io/releases/redis-4.0.9.tar.gz --quiet \
11 | && tar xvzf redis-4.0.9.tar.gz > /dev/null
12 |
13 | # Compile and install Redis.
14 | WORKDIR redis-4.0.9
15 | RUN set -x \
16 | && make distclean \
17 | && make \
18 | && make install
19 |
20 | # Create Redis directories.
21 | WORKDIR ../
22 | RUN set -x \
23 | && rm -rf redis-4.0.9* \
24 | && mkdir /opt/redis \
25 | && mkdir /opt/redis/data \
26 | && mkdir /opt/redis/log \
27 | && mkdir /opt/redis/conf \
28 | && mkdir /opt/redis/script \
29 | && mkdir /opt/redis/system \
30 | && touch /opt/redis/system/server.pid \
31 | && touch /opt/redis/system/supervisor.ini \
32 | && ln -s /opt/redis/system/supervisor.ini /etc/supervisord.d/redis.ini \
33 | && history -c
34 |
35 | # Set default work directory.
36 | WORKDIR /opt/redis
37 |
--------------------------------------------------------------------------------
/image/redis/4.0.9/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/redis/7.2.1/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.2.1 AS install_system_packages
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && yum install -y tcl \
7 | && yum clean all \
8 | && rm -rf /var/cache/yum
9 |
10 | # Intermediate Docker image to use.
11 | FROM install_system_packages AS install_redis
12 |
13 | # Download and extract Redis.
14 | WORKDIR /tmp
15 | RUN set -x \
16 | && wget http://download.redis.io/releases/redis-7.2.1.tar.gz --quiet \
17 | && tar xvzf redis-7.2.1.tar.gz > /dev/null
18 |
19 | # Compile and install Redis.
20 | WORKDIR redis-7.2.1
21 | RUN set -x \
22 | && make distclean \
23 | && make \
24 | && make install
25 |
26 | # Docker image to use.
27 | FROM sloopstash/base:v1.2.1 AS create_redis_directories
28 |
29 | # Create Redis directories.
30 | RUN set -x \
31 | && mkdir /opt/redis \
32 | && mkdir /opt/redis/data \
33 | && mkdir /opt/redis/log \
34 | && mkdir /opt/redis/conf \
35 | && mkdir /opt/redis/script \
36 | && mkdir /opt/redis/system \
37 | && touch /opt/redis/system/server.pid \
38 | && touch /opt/redis/system/supervisor.ini
39 |
40 | # Docker image to use.
41 | FROM sloopstash/base:v1.2.1 AS finalize_redis_oci_image
42 |
43 | # Copy Redis binary executable programs.
44 | COPY --from=install_redis /usr/local/bin/redis-server /usr/local/bin/redis-server
45 | COPY --from=install_redis /usr/local/bin/redis-cli /usr/local/bin/redis-cli
46 |
47 | # Copy Redis directories.
48 | COPY --from=create_redis_directories /opt/redis /opt/redis
49 |
50 | # Symlink Supervisor configuration.
51 | RUN set -x \
52 | && ln -s /opt/redis/system/supervisor.ini /etc/supervisord.d/redis.ini \
53 | && history -c
54 |
55 | # Set default work directory.
56 | WORKDIR /opt/redis
57 |
--------------------------------------------------------------------------------
/image/redis/7.2.1/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/image/ruby/3.3.5/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install system packages.
5 | RUN set -x \
6 | && amazon-linux-extras enable postgresql14 \
7 | && yum install -y openssl-devel zlib-devel libyaml-devel libffi-devel postgresql-devel
8 |
9 | # Download and extract Ruby.
10 | WORKDIR /tmp
11 | RUN set -x \
12 | && wget https://cache.ruby-lang.org/pub/ruby/3.3/ruby-3.3.5.tar.gz --quiet \
13 | && tar xvzf ruby-3.3.5.tar.gz > /dev/null
14 |
15 | # Compile and install Ruby.
16 | WORKDIR ruby-3.3.5
17 | RUN set -x \
18 | && ./configure \
19 | && make \
20 | && make install
21 |
22 | # Install Ruby packages.
23 | WORKDIR ../
24 | COPY Gemfile ./
25 | RUN set -x \
26 | && gem install bundler \
27 | && bundle install \
28 | && rm -f Gemfile*
29 |
30 | # Create App directories.
31 | RUN set -x \
32 | && rm -rf ruby-3.3.5* \
33 | && mkdir /opt/app \
34 | && mkdir /opt/app/source \
35 | && mkdir /opt/app/log \
36 | && mkdir /opt/app/system \
37 | && touch /opt/app/system/supervisor.ini \
38 | && ln -s /opt/app/system/supervisor.ini /etc/supervisord.d/app.ini \
39 | && history -c
40 |
41 | # Set default work directory.
42 | WORKDIR /opt/app
43 |
--------------------------------------------------------------------------------
/image/ruby/3.3.5/context/Gemfile:
--------------------------------------------------------------------------------
1 | ruby '3.3.5'
2 | source 'https://rubygems.org'
3 | gem 'rails','~> 7.2.1'
4 | gem 'pg','~> 1.5.8'
5 | gem 'msgpack','~> 1.7.3'
6 | gem 'bootsnap','~> 1.18.4'
7 | gem 'ostruct','~> 0.6.0'
8 | gem 'sprockets-rails','~> 3.5.2'
9 | gem 'bcrypt','~> 3.1.20'
10 |
--------------------------------------------------------------------------------
/image/spark/3.5.3/amazon-linux-2.dockerfile:
--------------------------------------------------------------------------------
1 | # Docker image to use.
2 | FROM sloopstash/base:v1.1.1
3 |
4 | # Install Oracle JDK.
5 | WORKDIR /tmp
6 | COPY jdk-8u131-linux-x64.rpm ./
7 | RUN set -x \
8 | && yum install -y jdk-8u131-linux-x64.rpm \
9 | && rm -f jdk-8u131-linux-x64.rpm
10 |
11 | # Install Spark.
12 | RUN set -x \
13 | && wget https://dlcdn.apache.org/spark/spark-3.5.3/spark-3.5.3-bin-hadoop3.tgz --quiet \
14 | && tar xvzf spark-3.5.3-bin-hadoop3.tgz > /dev/null \
15 | && mkdir /usr/local/lib/spark \
16 | && cp -r spark-3.5.3-bin-hadoop3/* /usr/local/lib/spark/ \
17 | && rm -rf spark-3.5.3-bin-hadoop3*
18 |
19 | # Create Spark directories.
20 | RUN set -x \
21 | && mkdir /opt/spark \
22 | && mkdir /opt/spark/data \
23 | && mkdir /opt/spark/log \
24 | && mkdir /opt/spark/conf \
25 | && mkdir /opt/spark/script \
26 | && mkdir /opt/spark/system \
27 | && touch /opt/spark/system/node.pid \
28 | && touch /opt/spark/system/supervisor.ini \
29 | && ln -s /opt/spark/system/supervisor.ini /etc/supervisord.d/spark.ini \
30 | && history -c
31 |
32 | # Set default work directory.
33 | WORKDIR /opt/spark
34 |
--------------------------------------------------------------------------------
/image/spark/3.5.3/context/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory.
2 | *
3 | # Except this file.
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/vagrant/alma-linux-9/virtualbox/amd64/server/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure
5 | # configures the configuration version (we support older styles for
6 | # backwards compatibility). Please don't change it unless you know what
7 | # you're doing.
8 | Vagrant.configure("2") do |config|
9 | # The most common configuration options are documented and commented below.
10 | # For a complete reference, please see the online documentation at
11 | # https://docs.vagrantup.com.
12 |
13 | # Every Vagrant development environment requires a box. You can search for
14 | # boxes at https://vagrantcloud.com/search.
15 | config.vm.box = "sloopstash/alma-linux-9"
16 | config.vm.box_version = "1.1.1"
17 |
18 | # Define virtual machine name.
19 | # config.vm.define "sloopstash-alma-linux-9-server"
20 |
21 | # Set virtual machine hostname.
22 | # config.vm.hostname = "sloopstash-alma-linux-9-server"
23 |
24 | # Disable automatic box update checking. If you disable this, then
25 | # boxes will only be checked for updates when the user runs
26 | # `vagrant box outdated`. This is not recommended.
27 | config.vm.box_check_update = false
28 |
29 | # Create a forwarded port mapping which allows access to a specific port
30 | # within the machine from a port on the host machine. In the example below,
31 | # accessing "localhost:8080" will access port 80 on the guest machine.
32 | # NOTE: This will enable public access to the opened port
33 | # config.vm.network "forwarded_port", guest: 443, host: 443, auto_correct: false
34 |
35 | # Create a forwarded port mapping which allows access to a specific port
36 | # within the machine from a port on the host machine and only allow access
37 | # via 127.0.0.1 to disable public access
38 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1"
39 |
40 | # Create a private network, which allows host-only access to the machine
41 | # using a specific IP.
42 | # config.vm.network "private_network", ip: "192.168.101.8"
43 |
44 | # Create a public network, which generally matched to bridged network.
45 | # Bridged networks make the machine appear as another physical device on
46 | # your network.
47 | # config.vm.network "public_network"
48 |
49 | # Share an additional folder to the guest VM. The first argument is
50 | # the path on the host to the actual folder. The second argument is
51 | # the path on the guest to mount the folder. And the optional third
52 | # argument is a set of non-required options.
53 | config.vm.synced_folder ".", "/vagrant", disabled: true
54 |
55 | # SSH credentials to connect to virtual machine.
56 | config.ssh.username = "vagrant"
57 | config.ssh.private_key_path = ["~/.vagrant.d/insecure_private_key"]
58 | config.ssh.insert_key = false
59 |
60 | # Provider-specific configuration so you can fine-tune various
61 | # backing providers for Vagrant. These expose provider-specific options.
62 | # Example for VirtualBox:
63 | #
64 | config.vm.provider "virtualbox" do |vb|
65 | # Enable USB controller for the virtual machine.
66 | # vb.customize ["modifyvm",:id,"--usb","on"]
67 |
68 | # Disable GUI when booting the virtual machine.
69 | vb.gui = false
70 |
71 | # Allocate memory to the virtual machine.
72 | # vb.memory = "2048"
73 |
74 | # Allocate processors to the virtual machine.
75 | # vb.cpus = "1"
76 |
77 | # Set name for the virtual machine.
78 | # vb.name = "sloopstash-alma-linux-9-server"
79 | end
80 | #
81 | # View the documentation for the provider you are using for more
82 | # information on available options.
83 |
84 | # Enable provisioning with a shell script. Additional provisioners such as
85 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
86 | # documentation for more information about their specific syntax and use.
87 | config.vm.provision "shell", inline: <<-SHELL
88 | # Update installed packages.
89 | dnf update -y
90 | # Install system packages.
91 | dnf install -y wget vim nano net-tools gcc make tar git unzip sysstat tree initscripts bind-utils nc nmap logrotate crontabs
92 | dnf install -y python-devel python-pip python-setuptools
93 | # Install Supervisor.
94 | python -m pip install supervisor
95 | # Create Supervisor configuration directory.
96 | mkdir /etc/supervisord.d
97 | SHELL
98 |
99 | # Define multiple virtual machines.
100 | config.vm.define "sloopstash-dkr-mgr-1" do |sloopstash_dkr_mgr_1|
101 | sloopstash_dkr_mgr_1.vm.hostname = "sloopstash-dkr-mgr-1"
102 | sloopstash_dkr_mgr_1.vm.network "private_network", ip: "192.168.101.51"
103 | sloopstash_dkr_mgr_1.vm.provider "virtualbox" do |vb|
104 | vb.memory = "2048"
105 | vb.cpus = "1"
106 | vb.name = "sloopstash-dkr-mgr-1"
107 | end
108 | end
109 | config.vm.define "sloopstash-dkr-mgr-2" do |sloopstash_dkr_mgr_2|
110 | sloopstash_dkr_mgr_2.vm.hostname = "sloopstash-dkr-mgr-2"
111 | sloopstash_dkr_mgr_2.vm.network "private_network", ip: "192.168.101.52"
112 | sloopstash_dkr_mgr_2.vm.provider "virtualbox" do |vb|
113 | vb.memory = "2048"
114 | vb.cpus = "1"
115 | vb.name = "sloopstash-dkr-mgr-2"
116 | end
117 | end
118 | config.vm.define "sloopstash-dkr-wkr-1" do |sloopstash_dkr_wkr_1|
119 | sloopstash_dkr_wkr_1.vm.hostname = "sloopstash-dkr-wkr-1"
120 | sloopstash_dkr_wkr_1.vm.network "private_network", ip: "192.168.101.54"
121 | sloopstash_dkr_wkr_1.vm.provider "virtualbox" do |vb|
122 | vb.memory = "2048"
123 | vb.cpus = "1"
124 | vb.name = "sloopstash-dkr-wkr-1"
125 | end
126 | end
127 | config.vm.define "sloopstash-dkr-wkr-2" do |sloopstash_dkr_wkr_2|
128 | sloopstash_dkr_wkr_2.vm.hostname = "sloopstash-dkr-wkr-2"
129 | sloopstash_dkr_wkr_2.vm.network "private_network", ip: "192.168.101.55"
130 | sloopstash_dkr_wkr_2.vm.provider "virtualbox" do |vb|
131 | vb.memory = "2048"
132 | vb.cpus = "1"
133 | vb.name = "sloopstash-dkr-wkr-2"
134 | end
135 | end
136 | config.vm.define "sloopstash-dkr-wkr-3" do |sloopstash_dkr_wkr_3|
137 | sloopstash_dkr_wkr_3.vm.hostname = "sloopstash-dkr-wkr-3"
138 | sloopstash_dkr_wkr_3.vm.network "private_network", ip: "192.168.101.56"
139 | sloopstash_dkr_wkr_3.vm.provider "virtualbox" do |vb|
140 | vb.memory = "2048"
141 | vb.cpus = "1"
142 | vb.name = "sloopstash-dkr-wkr-3"
143 | end
144 | end
145 | end
146 |
--------------------------------------------------------------------------------
/vagrant/alma-linux-9/vmware/arm64/server/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure
5 | # configures the configuration version (we support older styles for
6 | # backwards compatibility). Please don't change it unless you know what
7 | # you're doing.
8 | Vagrant.configure("2") do |config|
9 | # The most common configuration options are documented and commented below.
10 | # For a complete reference, please see the online documentation at
11 | # https://docs.vagrantup.com.
12 |
13 | # Every Vagrant development environment requires a box. You can search for
14 | # boxes at https://vagrantcloud.com/search.
15 | config.vm.box = "sloopstash/alma-linux-9"
16 | config.vm.box_version = "2.1.1"
17 |
18 | # Define virtual machine name.
19 | # config.vm.define "sloopstash-alma-linux-9-server"
20 |
21 | # Set virtual machine hostname.
22 | # config.vm.hostname = "sloopstash-alma-linux-9-server"
23 |
24 | # Disable automatic box update checking. If you disable this, then
25 | # boxes will only be checked for updates when the user runs
26 | # `vagrant box outdated`. This is not recommended.
27 | config.vm.box_check_update = false
28 |
29 | # Create a forwarded port mapping which allows access to a specific port
30 | # within the machine from a port on the host machine. In the example below,
31 | # accessing "localhost:8080" will access port 80 on the guest machine.
32 | # NOTE: This will enable public access to the opened port
33 | # config.vm.network "forwarded_port", guest: 443, host: 443, auto_correct: false
34 |
35 | # Create a forwarded port mapping which allows access to a specific port
36 | # within the machine from a port on the host machine and only allow access
37 | # via 127.0.0.1 to disable public access
38 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1"
39 |
40 | # Create a private network, which allows host-only access to the machine
41 | # using a specific IP.
42 | # config.vm.network "private_network", ip: "192.168.201.8"
43 |
44 | # Create a public network, which generally matched to bridged network.
45 | # Bridged networks make the machine appear as another physical device on
46 | # your network.
47 | # config.vm.network "public_network"
48 |
49 | # Share an additional folder to the guest VM. The first argument is
50 | # the path on the host to the actual folder. The second argument is
51 | # the path on the guest to mount the folder. And the optional third
52 | # argument is a set of non-required options.
53 | config.vm.synced_folder ".", "/vagrant", disabled: true
54 |
55 | # SSH credentials to connect to virtual machine.
56 | config.ssh.username = "vagrant"
57 | config.ssh.private_key_path = ["~/.vagrant.d/insecure_private_key"]
58 | config.ssh.insert_key = false
59 |
60 | # Provider-specific configuration so you can fine-tune various
61 | # backing providers for Vagrant. These expose provider-specific options.
62 | # Example for VirtualBox:
63 | #
64 | config.vm.provider "vmware_fusion" do |vf|
65 | # Disable GUI when booting the virtual machine.
66 | vf.gui = false
67 |
68 | # Allocate memory to the virtual machine.
69 | # vf.memory = "2048"
70 |
71 | # Allocate processors to the virtual machine.
72 | # vf.cpus = "1"
73 | end
74 | #
75 | # View the documentation for the provider you are using for more
76 | # information on available options.
77 |
78 | # Enable provisioning with a shell script. Additional provisioners such as
79 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
80 | # documentation for more information about their specific syntax and use.
81 | config.vm.provision "shell", inline: <<-SHELL
82 | # Update installed packages.
83 | dnf update -y
84 | # Install system packages.
85 | dnf install -y wget vim nano net-tools gcc make tar git unzip sysstat tree initscripts bind-utils nc nmap logrotate crontabs
86 | dnf install -y python-devel python-pip python-setuptools
87 | # Install Supervisor.
88 | python -m pip install supervisor
89 | # Create Supervisor configuration directory.
90 | mkdir /etc/supervisord.d
91 | SHELL
92 |
93 | # Define multiple virtual machines.
94 | config.vm.define "sloopstash-dkr-mgr-1" do |sloopstash_dkr_mgr_1|
95 | sloopstash_dkr_mgr_1.vm.hostname = "sloopstash-dkr-mgr-1"
96 | sloopstash_dkr_mgr_1.vm.network "private_network", ip: "192.168.201.51"
97 | sloopstash_dkr_mgr_1.vm.provider "vmware_fusion" do |vf|
98 | vf.memory = "2048"
99 | vf.cpus = "1"
100 | end
101 | end
102 | config.vm.define "sloopstash-dkr-mgr-2" do |sloopstash_dkr_mgr_2|
103 | sloopstash_dkr_mgr_2.vm.hostname = "sloopstash-dkr-mgr-2"
104 | sloopstash_dkr_mgr_2.vm.network "private_network", ip: "192.168.201.52"
105 | sloopstash_dkr_mgr_2.vm.provider "vmware_fusion" do |vf|
106 | vf.memory = "2048"
107 | vf.cpus = "1"
108 | end
109 | end
110 | config.vm.define "sloopstash-dkr-wkr-1" do |sloopstash_dkr_wkr_1|
111 | sloopstash_dkr_wkr_1.vm.hostname = "sloopstash-dkr-wkr-1"
112 | sloopstash_dkr_wkr_1.vm.network "private_network", ip: "192.168.201.54"
113 | sloopstash_dkr_wkr_1.vm.provider "vmware_fusion" do |vf|
114 | vf.memory = "2048"
115 | vf.cpus = "1"
116 | end
117 | end
118 | config.vm.define "sloopstash-dkr-wkr-2" do |sloopstash_dkr_wkr_2|
119 | sloopstash_dkr_wkr_2.vm.hostname = "sloopstash-dkr-wkr-2"
120 | sloopstash_dkr_wkr_2.vm.network "private_network", ip: "192.168.201.55"
121 | sloopstash_dkr_wkr_2.vm.provider "vmware_fusion" do |vf|
122 | vf.memory = "2048"
123 | vf.cpus = "1"
124 | end
125 | end
126 | config.vm.define "sloopstash-dkr-wkr-3" do |sloopstash_dkr_wkr_3|
127 | sloopstash_dkr_wkr_3.vm.hostname = "sloopstash-dkr-wkr-3"
128 | sloopstash_dkr_wkr_3.vm.network "private_network", ip: "192.168.201.56"
129 | sloopstash_dkr_wkr_3.vm.provider "vmware_fusion" do |vf|
130 | vf.memory = "2048"
131 | vf.cpus = "1"
132 | end
133 | end
134 | end
135 |
--------------------------------------------------------------------------------
/workload/elasticsearch/8.17.0/conf/server.yml:
--------------------------------------------------------------------------------
1 | # ======================== Elasticsearch Configuration =========================
2 | #
3 | # NOTE: Elasticsearch comes with reasonable defaults for most settings.
4 | # Before you set out to tweak and tune the configuration, make sure you
5 | # understand what are you trying to accomplish and the consequences.
6 | #
7 | # The primary way of configuring a node is via this file. This template lists
8 | # the most important settings you may want to configure for a production cluster.
9 | #
10 | # Please consult the documentation for further information on configuration options:
11 | # https://www.elastic.co/guide/en/elasticsearch/reference/index.html
12 | #
13 | # ---------------------------------- Cluster -----------------------------------
14 | #
15 | # Use a descriptive name for your cluster:
16 | #
17 | #cluster.name: my-application
18 | #
19 | # ------------------------------------ Node ------------------------------------
20 | #
21 | # Use a descriptive name for the node:
22 | #
23 | #node.name: node-1
24 | #
25 | # Add custom attributes to the node:
26 | #
27 | #node.attr.rack: r1
28 | #
29 | # ----------------------------------- Paths ------------------------------------
30 | #
31 | # Path to directory where to store the data (separate multiple locations by comma):
32 | #
33 | #path.data: /path/to/data
34 | #
35 | # Path to log files:
36 | #
37 | #path.logs: /path/to/logs
38 | #
39 | # ----------------------------------- Memory -----------------------------------
40 | #
41 | # Lock the memory on startup:
42 | #
43 | #bootstrap.memory_lock: true
44 | #
45 | # Make sure that the heap size is set to about half the memory available
46 | # on the system and that the owner of the process is allowed to use this
47 | # limit.
48 | #
49 | # Elasticsearch performs poorly when the system is swapping the memory.
50 | #
51 | # ---------------------------------- Network -----------------------------------
52 | #
53 | # By default Elasticsearch is only accessible on localhost. Set a different
54 | # address here to expose this node on the network:
55 | #
56 | #network.host: 192.168.0.1
57 | #
58 | # By default Elasticsearch listens for HTTP traffic on the first free port it
59 | # finds starting at 9200. Set a specific HTTP port here:
60 | #
61 | #http.port: 9200
62 | #
63 | # For more information, consult the network module documentation.
64 | #
65 | # --------------------------------- Discovery ----------------------------------
66 | #
67 | # Pass an initial list of hosts to perform discovery when this node is started:
68 | # The default list of hosts is ["127.0.0.1", "[::1]"]
69 | #
70 | #discovery.seed_hosts: ["host1", "host2"]
71 | #
72 | # Bootstrap the cluster using an initial set of master-eligible nodes:
73 | #
74 | #cluster.initial_master_nodes: ["node-1", "node-2"]
75 | #
76 | # For more information, consult the discovery and cluster formation module documentation.
77 | #
78 | # ---------------------------------- Various -----------------------------------
79 | #
80 | # Allow wildcard deletion of indices:
81 | #
82 | #action.destructive_requires_name: false
83 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/data/conf/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | fs.defaultFS
4 | hdfs://hadoop-name-1:9000
5 |
6 |
7 | hadoop.tmp.dir
8 | /opt/hadoop/tmp
9 |
10 |
11 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/data/conf/env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # Set Hadoop-specific environment variables here.
18 |
19 | # The only required environment variable is JAVA_HOME. All others are
20 | # optional. When running a distributed configuration it is best to
21 | # set JAVA_HOME in this file, so that it is correctly defined on
22 | # remote nodes.
23 |
24 | # The java implementation to use.
25 | export JAVA_HOME=${JAVA_HOME}
26 |
27 | # The jsvc implementation to use. Jsvc is required to run secure datanodes
28 | # that bind to privileged ports to provide authentication of data transfer
29 | # protocol. Jsvc is not required if SASL is configured for authentication of
30 | # data transfer protocol using non-privileged ports.
31 | #export JSVC_HOME=${JSVC_HOME}
32 |
33 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
34 |
35 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
36 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
37 | if [ "$HADOOP_CLASSPATH" ]; then
38 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
39 | else
40 | export HADOOP_CLASSPATH=$f
41 | fi
42 | done
43 |
44 | # The maximum amount of heap to use, in MB. Default is 1000.
45 | export HADOOP_HEAPSIZE=256
46 | export HADOOP_NAMENODE_INIT_HEAPSIZE=256
47 |
48 | # Enable extra debugging of Hadoop's JAAS binding, used to set up
49 | # Kerberos security.
50 | # export HADOOP_JAAS_DEBUG=true
51 |
52 | # Extra Java runtime options. Empty by default.
53 | # For Kerberos debugging, an extended option set logs more invormation
54 | # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
55 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
56 |
57 | # Command specific options appended to HADOOP_OPTS when specified
58 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
59 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
60 |
61 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
62 |
63 | export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
64 | export HADOOP_PORTMAP_OPTS="-Xmx256m $HADOOP_PORTMAP_OPTS"
65 |
66 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
67 | export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS"
68 | # set heap args when HADOOP_HEAPSIZE is empty
69 | if [ "$HADOOP_HEAPSIZE" = "" ]; then
70 | export HADOOP_CLIENT_OPTS="-Xmx256m $HADOOP_CLIENT_OPTS"
71 | fi
72 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
73 |
74 | # On secure datanodes, user to run the datanode as after dropping privileges.
75 | # This **MUST** be uncommented to enable secure HDFS if using privileged ports
76 | # to provide authentication of data transfer protocol. This **MUST NOT** be
77 | # defined if SASL is configured for authentication of data transfer protocol
78 | # using non-privileged ports.
79 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
80 |
81 | # Where log files are stored. $HADOOP_HOME/logs by default.
82 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
83 |
84 | # Where log files are stored in the secure data environment.
85 | #export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
86 |
87 | ###
88 | # HDFS Mover specific parameters
89 | ###
90 | # Specify the JVM options to be used when starting the HDFS Mover.
91 | # These options will be appended to the options specified as HADOOP_OPTS
92 | # and therefore may override any similar flags set in HADOOP_OPTS
93 | #
94 | # export HADOOP_MOVER_OPTS=""
95 |
96 | ###
97 | # Router-based HDFS Federation specific parameters
98 | # Specify the JVM options to be used when starting the RBF Routers.
99 | # These options will be appended to the options specified as HADOOP_OPTS
100 | # and therefore may override any similar flags set in HADOOP_OPTS
101 | #
102 | # export HADOOP_DFSROUTER_OPTS=""
103 | ###
104 |
105 | ###
106 | # Advanced Users Only!
107 | ###
108 |
109 | # The directory where pid files are stored. /tmp by default.
110 | # NOTE: this should be set to a directory that can only be written to by
111 | # the user that will run the hadoop daemons. Otherwise there is the
112 | # potential for a symlink attack.
113 | export HADOOP_PID_DIR=${HADOOP_PID_DIR}
114 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
115 |
116 | # A string representing this instance of hadoop. $USER by default.
117 | export HADOOP_IDENT_STRING=$USER
118 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/data/conf/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | dfs.datanode.data.dir
4 | /opt/hadoop/data
5 |
6 |
7 | dfs.namenode.datanode.registration.ip-hostname-check
8 | false
9 |
10 |
11 | dfs.client.use.datanode.hostname
12 | false
13 |
14 |
15 | dfs.datanode.use.datanode.hostname
16 | false
17 |
18 |
19 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/data/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:hadoop]
2 | command=bash -c "/opt/hadoop/script/start.sh"
3 | process_name=%(program_name)s
4 | pidfile=/opt/hadoop/system/node.pid
5 | numprocs=1
6 | autorestart=false
7 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/data/script/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | /usr/local/lib/hadoop/sbin/hadoop-daemon.sh start datanode
4 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/data/script/stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | /usr/local/lib/hadoop/sbin/hadoop-daemon.sh stop datanode
4 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/name/conf/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | fs.defaultFS
4 | hdfs://0.0.0.0:9000
5 |
6 |
7 | hadoop.tmp.dir
8 | /opt/hadoop/tmp
9 |
10 |
11 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/name/conf/env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # Set Hadoop-specific environment variables here.
18 |
19 | # The only required environment variable is JAVA_HOME. All others are
20 | # optional. When running a distributed configuration it is best to
21 | # set JAVA_HOME in this file, so that it is correctly defined on
22 | # remote nodes.
23 |
24 | # The java implementation to use.
25 | export JAVA_HOME=${JAVA_HOME}
26 |
27 | # The jsvc implementation to use. Jsvc is required to run secure datanodes
28 | # that bind to privileged ports to provide authentication of data transfer
29 | # protocol. Jsvc is not required if SASL is configured for authentication of
30 | # data transfer protocol using non-privileged ports.
31 | #export JSVC_HOME=${JSVC_HOME}
32 |
33 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
34 |
35 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
36 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
37 | if [ "$HADOOP_CLASSPATH" ]; then
38 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
39 | else
40 | export HADOOP_CLASSPATH=$f
41 | fi
42 | done
43 |
44 | # The maximum amount of heap to use, in MB. Default is 1000.
45 | export HADOOP_HEAPSIZE=256
46 | export HADOOP_NAMENODE_INIT_HEAPSIZE=256
47 |
48 | # Enable extra debugging of Hadoop's JAAS binding, used to set up
49 | # Kerberos security.
50 | # export HADOOP_JAAS_DEBUG=true
51 |
52 | # Extra Java runtime options. Empty by default.
53 | # For Kerberos debugging, an extended option set logs more invormation
54 | # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
55 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
56 |
57 | # Command specific options appended to HADOOP_OPTS when specified
58 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
59 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
60 |
61 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
62 |
63 | export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
64 | export HADOOP_PORTMAP_OPTS="-Xmx256m $HADOOP_PORTMAP_OPTS"
65 |
66 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
67 | export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS"
68 | # set heap args when HADOOP_HEAPSIZE is empty
69 | if [ "$HADOOP_HEAPSIZE" = "" ]; then
70 | export HADOOP_CLIENT_OPTS="-Xmx256m $HADOOP_CLIENT_OPTS"
71 | fi
72 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
73 |
74 | # On secure datanodes, user to run the datanode as after dropping privileges.
75 | # This **MUST** be uncommented to enable secure HDFS if using privileged ports
76 | # to provide authentication of data transfer protocol. This **MUST NOT** be
77 | # defined if SASL is configured for authentication of data transfer protocol
78 | # using non-privileged ports.
79 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
80 |
81 | # Where log files are stored. $HADOOP_HOME/logs by default.
82 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
83 |
84 | # Where log files are stored in the secure data environment.
85 | #export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
86 |
87 | ###
88 | # HDFS Mover specific parameters
89 | ###
90 | # Specify the JVM options to be used when starting the HDFS Mover.
91 | # These options will be appended to the options specified as HADOOP_OPTS
92 | # and therefore may override any similar flags set in HADOOP_OPTS
93 | #
94 | # export HADOOP_MOVER_OPTS=""
95 |
96 | ###
97 | # Router-based HDFS Federation specific parameters
98 | # Specify the JVM options to be used when starting the RBF Routers.
99 | # These options will be appended to the options specified as HADOOP_OPTS
100 | # and therefore may override any similar flags set in HADOOP_OPTS
101 | #
102 | # export HADOOP_DFSROUTER_OPTS=""
103 | ###
104 |
105 | ###
106 | # Advanced Users Only!
107 | ###
108 |
109 | # The directory where pid files are stored. /tmp by default.
110 | # NOTE: this should be set to a directory that can only be written to by
111 | # the user that will run the hadoop daemons. Otherwise there is the
112 | # potential for a symlink attack.
113 | export HADOOP_PID_DIR=${HADOOP_PID_DIR}
114 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
115 |
116 | # A string representing this instance of hadoop. $USER by default.
117 | export HADOOP_IDENT_STRING=$USER
118 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/name/conf/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | dfs.namenode.name.dir
4 | /opt/hadoop/data
5 |
6 |
7 | dfs.namenode.datanode.registration.ip-hostname-check
8 | false
9 |
10 |
11 | dfs.client.use.datanode.hostname
12 | false
13 |
14 |
15 | dfs.datanode.use.datanode.hostname
16 | false
17 |
18 |
19 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/name/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:hadoop]
2 | command=bash -c "/opt/hadoop/script/start.sh"
3 | process_name=%(program_name)s
4 | pidfile=/opt/hadoop/system/node.pid
5 | numprocs=1
6 | autorestart=false
7 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/name/script/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ! -d '/opt/hadoop/data/current' ]; then
4 | /usr/local/lib/hadoop/bin/hdfs namenode -format hadoop-cluster
5 | fi
6 | /usr/local/lib/hadoop/sbin/hadoop-daemon.sh start namenode
7 |
--------------------------------------------------------------------------------
/workload/hadoop/2.10.1/name/script/stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | /usr/local/lib/hadoop/sbin/hadoop-daemon.sh stop namenode
4 |
--------------------------------------------------------------------------------
/workload/kafka/3.2.1/broker/conf/server.conf:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | #
17 | # This configuration file is intended for use in KRaft mode, where
18 | # Apache ZooKeeper is not present. See config/kraft/README.md for details.
19 | #
20 |
21 | ############################# Server Basics #############################
22 |
23 | # The role of this server. Setting this puts us in KRaft mode
24 | process.roles=broker
25 |
26 | # The node id associated with this instance's roles
27 | node.id=4
28 |
29 | # The connect string for the controller quorum
30 | controller.quorum.voters=1@kafka-controller-1:9093,2@kafka-controller-2:9093,3@kafka-controller-3:9093
31 |
32 | ############################# Socket Server Settings #############################
33 |
34 | # The address the socket server listens on. If not configured, the host name will be equal to the value of
35 | # java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
36 | # FORMAT:
37 | # listeners = listener_name://host_name:port
38 | # EXAMPLE:
39 | # listeners = PLAINTEXT://your.host.name:9092
40 | listeners=PLAINTEXT://:9092
41 |
42 | # Name of listener used for communication between brokers.
43 | inter.broker.listener.name=PLAINTEXT
44 |
45 | # Listener name, hostname and port the broker will advertise to clients.
46 | # If not set, it uses the value for "listeners".
47 | advertised.listeners=PLAINTEXT://:9092
48 |
49 | # A comma-separated list of the names of the listeners used by the controller.
50 | # This is required if running in KRaft mode. On a node with `process.roles=broker`, only the first listed listener will be used by the broker.
51 | controller.listener.names=CONTROLLER
52 |
53 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
54 | listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
55 |
56 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
57 | num.network.threads=3
58 |
59 | # The number of threads that the server uses for processing requests, which may include disk I/O
60 | num.io.threads=8
61 |
62 | # The send buffer (SO_SNDBUF) used by the socket server
63 | socket.send.buffer.bytes=102400
64 |
65 | # The receive buffer (SO_RCVBUF) used by the socket server
66 | socket.receive.buffer.bytes=102400
67 |
68 | # The maximum size of a request that the socket server will accept (protection against OOM)
69 | socket.request.max.bytes=104857600
70 |
71 |
72 | ############################# Log Basics #############################
73 |
74 | # A comma separated list of directories under which to store log files
75 | log.dirs=/opt/kafka/data
76 |
77 | # The default number of log partitions per topic. More partitions allow greater
78 | # parallelism for consumption, but this will also result in more files across
79 | # the brokers.
80 | num.partitions=1
81 |
82 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
83 | # This value is recommended to be increased for installations with data dirs located in RAID array.
84 | num.recovery.threads.per.data.dir=1
85 |
86 | ############################# Internal Topic Settings #############################
87 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
88 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
89 | offsets.topic.replication.factor=1
90 | transaction.state.log.replication.factor=1
91 | transaction.state.log.min.isr=1
92 |
93 | ############################# Log Flush Policy #############################
94 |
95 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
96 | # the OS cache lazily. The following configurations control the flush of data to disk.
97 | # There are a few important trade-offs here:
98 | # 1. Durability: Unflushed data may be lost if you are not using replication.
99 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
100 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
101 | # The settings below allow one to configure the flush policy to flush data after a period of time or
102 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
103 |
104 | # The number of messages to accept before forcing a flush of data to disk
105 | #log.flush.interval.messages=10000
106 |
107 | # The maximum amount of time a message can sit in a log before we force a flush
108 | #log.flush.interval.ms=1000
109 |
110 | ############################# Log Retention Policy #############################
111 |
112 | # The following configurations control the disposal of log segments. The policy can
113 | # be set to delete segments after a period of time, or after a given size has accumulated.
114 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
115 | # from the end of the log.
116 |
117 | # The minimum age of a log file to be eligible for deletion due to age
118 | log.retention.hours=168
119 |
120 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
121 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
122 | #log.retention.bytes=1073741824
123 |
124 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
125 | log.segment.bytes=1073741824
126 |
127 | # The interval at which log segments are checked to see if they can be deleted according
128 | # to the retention policies
129 | log.retention.check.interval.ms=300000
130 |
--------------------------------------------------------------------------------
/workload/kafka/3.2.1/broker/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:kafka]
2 | command=bash -c "/opt/kafka/script/start.sh"
3 | process_name=%(program_name)s
4 | pidfile=/opt/kafka/system/node.pid
5 | numprocs=1
6 | autorestart=false
7 |
--------------------------------------------------------------------------------
/workload/kafka/3.2.1/broker/script/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SERVER_REFERENCE_CONF=/opt/kafka/conf/server-reference.conf
4 | SERVER_CONF=/opt/kafka/conf/server.conf
5 | CLUSTER_ID="OKbXWnLxQI-iFjS3AFuIzw"
6 | export KAFKA_HEAP_OPTS="-Xmx256M -Xms256M"
7 | sed "s/node.id=.*/node.id=$NODE_ID/" $SERVER_REFERENCE_CONF > $SERVER_CONF
8 | /usr/local/lib/kafka/bin/kafka-storage.sh format -t $CLUSTER_ID -c $SERVER_CONF
9 | /usr/local/lib/kafka/bin/kafka-server-start.sh $SERVER_CONF
10 |
--------------------------------------------------------------------------------
/workload/kafka/3.2.1/broker/script/stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SERVER_CONF=/opt/kafka/conf/server.conf
4 | /usr/local/lib/kafka/bin/kafka-server-stop.sh $SERVER_CONF
5 |
--------------------------------------------------------------------------------
/workload/kafka/3.2.1/controller/conf/server.conf:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | #
17 | # This configuration file is intended for use in KRaft mode, where
18 | # Apache ZooKeeper is not present. See config/kraft/README.md for details.
19 | #
20 |
21 | ############################# Server Basics #############################
22 |
23 | # The role of this server. Setting this puts us in KRaft mode
24 | process.roles=controller
25 |
26 | # The node id associated with this instance's roles
27 | node.id=1
28 |
29 | # The connect string for the controller quorum
30 | controller.quorum.voters=1@kafka-controller-1:9093,2@kafka-controller-2:9093,3@kafka-controller-3:9093
31 |
32 | ############################# Socket Server Settings #############################
33 |
34 | # The address the socket server listens on.
35 | # Note that only the controller listeners are allowed here when `process.roles=controller`, and this listener should be consistent with `controller.quorum.voters` value.
36 | # FORMAT:
37 | # listeners = listener_name://host_name:port
38 | # EXAMPLE:
39 | # listeners = PLAINTEXT://your.host.name:9092
40 | listeners=CONTROLLER://0.0.0.0:9093
41 |
42 | # A comma-separated list of the names of the listeners used by the controller.
43 | # This is required if running in KRaft mode.
44 | controller.listener.names=CONTROLLER
45 |
46 | # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
47 | #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
48 |
49 | # The number of threads that the server uses for receiving requests from the network and sending responses to the network
50 | num.network.threads=3
51 |
52 | # The number of threads that the server uses for processing requests, which may include disk I/O
53 | num.io.threads=8
54 |
55 | # The send buffer (SO_SNDBUF) used by the socket server
56 | socket.send.buffer.bytes=102400
57 |
58 | # The receive buffer (SO_RCVBUF) used by the socket server
59 | socket.receive.buffer.bytes=102400
60 |
61 | # The maximum size of a request that the socket server will accept (protection against OOM)
62 | socket.request.max.bytes=104857600
63 |
64 |
65 | ############################# Log Basics #############################
66 |
67 | # A comma separated list of directories under which to store log files
68 | log.dirs=/opt/kafka/data
69 |
70 | # The default number of log partitions per topic. More partitions allow greater
71 | # parallelism for consumption, but this will also result in more files across
72 | # the brokers.
73 | num.partitions=1
74 |
75 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
76 | # This value is recommended to be increased for installations with data dirs located in RAID array.
77 | num.recovery.threads.per.data.dir=1
78 |
79 | ############################# Internal Topic Settings #############################
80 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
81 | # For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
82 | offsets.topic.replication.factor=1
83 | transaction.state.log.replication.factor=1
84 | transaction.state.log.min.isr=1
85 |
86 | ############################# Log Flush Policy #############################
87 |
88 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
89 | # the OS cache lazily. The following configurations control the flush of data to disk.
90 | # There are a few important trade-offs here:
91 | # 1. Durability: Unflushed data may be lost if you are not using replication.
92 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
93 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
94 | # The settings below allow one to configure the flush policy to flush data after a period of time or
95 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
96 |
97 | # The number of messages to accept before forcing a flush of data to disk
98 | #log.flush.interval.messages=10000
99 |
100 | # The maximum amount of time a message can sit in a log before we force a flush
101 | #log.flush.interval.ms=1000
102 |
103 | ############################# Log Retention Policy #############################
104 |
105 | # The following configurations control the disposal of log segments. The policy can
106 | # be set to delete segments after a period of time, or after a given size has accumulated.
107 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
108 | # from the end of the log.
109 |
110 | # The minimum age of a log file to be eligible for deletion due to age
111 | log.retention.hours=168
112 |
113 | # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
114 | # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
115 | #log.retention.bytes=1073741824
116 |
117 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
118 | log.segment.bytes=1073741824
119 |
120 | # The interval at which log segments are checked to see if they can be deleted according
121 | # to the retention policies
122 | log.retention.check.interval.ms=300000
123 |
--------------------------------------------------------------------------------
/workload/kafka/3.2.1/controller/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:kafka]
2 | command=bash -c "/opt/kafka/script/start.sh"
3 | process_name=%(program_name)s
4 | pidfile=/opt/kafka/system/node.pid
5 | numprocs=1
6 | autorestart=false
7 |
--------------------------------------------------------------------------------
/workload/kafka/3.2.1/controller/script/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SERVER_REFERENCE_CONF=/opt/kafka/conf/server-reference.conf
4 | SERVER_CONF=/opt/kafka/conf/server.conf
5 | CLUSTER_ID="OKbXWnLxQI-iFjS3AFuIzw"
6 | export KAFKA_HEAP_OPTS="-Xmx256M -Xms256M"
7 | sed "s/node.id=.*/node.id=$NODE_ID/" $SERVER_REFERENCE_CONF > $SERVER_CONF
8 | /usr/local/lib/kafka/bin/kafka-storage.sh format -t $CLUSTER_ID -c $SERVER_CONF
9 | /usr/local/lib/kafka/bin/kafka-server-start.sh $SERVER_CONF
10 |
--------------------------------------------------------------------------------
/workload/kafka/3.2.1/controller/script/stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SERVER_CONF=/opt/kafka/conf/server.conf
4 | /usr/local/lib/kafka/bin/kafka-server-stop.sh $SERVER_CONF
5 |
--------------------------------------------------------------------------------
/workload/kibana/8.17.0/conf/server.yml:
--------------------------------------------------------------------------------
1 | # For more configuration options see the configuration guide for Kibana in
2 | # https://www.elastic.co/guide/index.html
3 |
4 | # =================== System: Kibana Server ===================
5 | # Kibana is served by a back end server. This setting specifies the port to use.
6 | #server.port: 5601
7 |
8 | # Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
9 | # The default is 'localhost', which usually means remote machines will not be able to connect.
10 | # To allow connections from remote users, set this parameter to a non-loopback address.
11 | #server.host: "localhost"
12 |
13 | # Enables you to specify a path to mount Kibana at if you are running behind a proxy.
14 | # Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
15 | # from requests it receives, and to prevent a deprecation warning at startup.
16 | # This setting cannot end in a slash.
17 | #server.basePath: ""
18 |
19 | # Specifies whether Kibana should rewrite requests that are prefixed with
20 | # `server.basePath` or require that they are rewritten by your reverse proxy.
21 | # Defaults to `false`.
22 | #server.rewriteBasePath: false
23 |
24 | # Specifies the public URL at which Kibana is available for end users. If
25 | # `server.basePath` is configured this URL should end with the same basePath.
26 | #server.publicBaseUrl: ""
27 |
28 | # The maximum payload size in bytes for incoming server requests.
29 | #server.maxPayload: 1048576
30 |
31 | # The Kibana server's name. This is used for display purposes.
32 | #server.name: "your-hostname"
33 |
34 | # =================== System: Kibana Server (Optional) ===================
35 | # Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
36 | # These settings enable SSL for outgoing requests from the Kibana server to the browser.
37 | #server.ssl.enabled: false
38 | #server.ssl.certificate: /path/to/your/server.crt
39 | #server.ssl.key: /path/to/your/server.key
40 |
41 | # =================== System: Elasticsearch ===================
42 | # The URLs of the Elasticsearch instances to use for all your queries.
43 | #elasticsearch.hosts: ["http://localhost:9200"]
44 |
45 | # If your Elasticsearch is protected with basic authentication, these settings provide
46 | # the username and password that the Kibana server uses to perform maintenance on the Kibana
47 | # index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
48 | # is proxied through the Kibana server.
49 | #elasticsearch.username: "kibana_system"
50 | #elasticsearch.password: "pass"
51 |
52 | # Kibana can also authenticate to Elasticsearch via "service account tokens".
53 | # Service account tokens are Bearer style tokens that replace the traditional username/password based configuration.
54 | # Use this token instead of a username/password.
55 | # elasticsearch.serviceAccountToken: "my_token"
56 |
57 | # Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
58 | # the elasticsearch.requestTimeout setting.
59 | #elasticsearch.pingTimeout: 1500
60 |
61 | # Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
62 | # must be a positive integer.
63 | #elasticsearch.requestTimeout: 30000
64 |
65 | # The maximum number of sockets that can be used for communications with elasticsearch.
66 | # Defaults to `Infinity`.
67 | #elasticsearch.maxSockets: 1024
68 |
69 | # Specifies whether Kibana should use compression for communications with elasticsearch
70 | # Defaults to `false`.
71 | #elasticsearch.compression: false
72 |
73 | # List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
74 | # headers, set this value to [] (an empty list).
75 | #elasticsearch.requestHeadersWhitelist: [ authorization ]
76 |
77 | # Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
78 | # by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
79 | #elasticsearch.customHeaders: {}
80 |
81 | # Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
82 | #elasticsearch.shardTimeout: 30000
83 |
84 | # =================== System: Elasticsearch (Optional) ===================
85 | # These files are used to verify the identity of Kibana to Elasticsearch and are required when
86 | # xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
87 | #elasticsearch.ssl.certificate: /path/to/your/client.crt
88 | #elasticsearch.ssl.key: /path/to/your/client.key
89 |
90 | # Enables you to specify a path to the PEM file for the certificate
91 | # authority for your Elasticsearch instance.
92 | #elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
93 |
94 | # To disregard the validity of SSL certificates, change this setting's value to 'none'.
95 | #elasticsearch.ssl.verificationMode: full
96 |
97 | # =================== System: Logging ===================
98 | # Set the value of this setting to off to suppress all logging output, or to debug to log everything. Defaults to 'info'
99 | #logging.root.level: debug
100 |
101 | # Enables you to specify a file where Kibana stores log output.
102 | #logging.appenders.default:
103 | # type: file
104 | # fileName: /var/logs/kibana.log
105 | # layout:
106 | # type: json
107 |
108 | # Example with size based log rotation
109 | #logging.appenders.default:
110 | # type: rolling-file
111 | # fileName: /var/logs/kibana.log
112 | # policy:
113 | # type: size-limit
114 | # size: 256mb
115 | # strategy:
116 | # type: numeric
117 | # max: 10
118 | # layout:
119 | # type: json
120 |
121 | # Logs queries sent to Elasticsearch.
122 | #logging.loggers:
123 | # - name: elasticsearch.query
124 | # level: debug
125 |
126 | # Logs http responses.
127 | #logging.loggers:
128 | # - name: http.server.response
129 | # level: debug
130 |
131 | # Logs system usage information.
132 | #logging.loggers:
133 | # - name: metrics.ops
134 | # level: debug
135 |
136 | # Enables debug logging on the browser (dev console)
137 | #logging.browser.root:
138 | # level: debug
139 |
140 | # =================== System: Other ===================
141 | # The path where Kibana stores persistent data not saved in Elasticsearch. Defaults to data
142 | #path.data: data
143 |
144 | # Specifies the path where Kibana creates the process ID file.
145 | #pid.file: /run/kibana/kibana.pid
146 |
147 | # Set the interval in milliseconds to sample system and process performance
148 | # metrics. Minimum is 100ms. Defaults to 5000ms.
149 | #ops.interval: 5000
150 |
151 | # Specifies locale to be used for all localizable strings, dates and number formats.
152 | # Supported languages are the following: English (default) "en", Chinese "zh-CN", Japanese "ja-JP", French "fr-FR".
153 | #i18n.locale: "en"
154 |
155 | # =================== Frequently used (Optional)===================
156 |
157 | # =================== Saved Objects: Migrations ===================
158 | # Saved object migrations run at startup. If you run into migration-related issues, you might need to adjust these settings.
159 |
160 | # The number of documents migrated at a time.
161 | # If Kibana can't start up or upgrade due to an Elasticsearch `circuit_breaking_exception`,
162 | # use a smaller batchSize value to reduce the memory pressure. Defaults to 1000 objects per batch.
163 | #migrations.batchSize: 1000
164 |
165 | # The maximum payload size for indexing batches of upgraded saved objects.
166 | # To avoid migrations failing due to a 413 Request Entity Too Large response from Elasticsearch.
167 | # This value should be lower than or equal to your Elasticsearch cluster’s `http.max_content_length`
168 | # configuration option. Default: 100mb
169 | #migrations.maxBatchSizeBytes: 100mb
170 |
171 | # The number of times to retry temporary migration failures. Increase the setting
172 | # if migrations fail frequently with a message such as `Unable to complete the [...] step after
173 | # 15 attempts, terminating`. Defaults to 15
174 | #migrations.retryAttempts: 15
175 |
176 | # =================== Search Autocomplete ===================
177 | # Time in milliseconds to wait for autocomplete suggestions from Elasticsearch.
178 | # This value must be a whole number greater than zero. Defaults to 1000ms
179 | #unifiedSearch.autocomplete.valueSuggestions.timeout: 1000
180 |
181 | # Maximum number of documents loaded by each shard to generate autocomplete suggestions.
182 | # This value must be a whole number greater than zero. Defaults to 100_000
183 | #unifiedSearch.autocomplete.valueSuggestions.terminateAfter: 100000
184 |
--------------------------------------------------------------------------------
/workload/logstash/8.17.0/conf/beats.conf:
--------------------------------------------------------------------------------
1 | # Sample Logstash configuration for creating a simple
2 | # Beats -> Logstash -> Elasticsearch pipeline.
3 |
4 | input {
5 | beats {
6 | port => 5044
7 | }
8 | }
9 |
10 | output {
11 | elasticsearch {
12 | hosts => ["http://localhost:9200"]
13 | index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
14 | #user => "elastic"
15 | #password => "changeme"
16 | }
17 | }
--------------------------------------------------------------------------------
/workload/logstash/8.17.0/conf/jvm.options:
--------------------------------------------------------------------------------
1 | ## JVM configuration
2 |
3 | # Xms represents the initial size of total heap space
4 | # Xmx represents the maximum size of total heap space
5 |
6 | -Xms512m
7 | -Xmx512m
8 |
9 | ################################################################
10 | ## Expert settings
11 | ################################################################
12 | ##
13 | ## All settings below this section are considered
14 | ## expert settings. Don't tamper with them unless
15 | ## you understand what you are doing
16 | ##
17 | ################################################################
18 |
19 | ## GC configuration
20 | 11-13:-XX:+UseConcMarkSweepGC
21 | 11-13:-XX:CMSInitiatingOccupancyFraction=75
22 | 11-13:-XX:+UseCMSInitiatingOccupancyOnly
23 |
24 | ## Locale
25 | # Set the locale language
26 | #-Duser.language=en
27 |
28 | # Set the locale country
29 | #-Duser.country=US
30 |
31 | # Set the locale variant, if any
32 | #-Duser.variant=
33 |
34 | ## basic
35 |
36 | # set the I/O temp directory
37 | #-Djava.io.tmpdir=$HOME
38 |
39 | # set to headless, just in case
40 | -Djava.awt.headless=true
41 |
42 | # ensure UTF-8 encoding by default (e.g. filenames)
43 | -Dfile.encoding=UTF-8
44 |
45 | # use our provided JNA always versus the system one
46 | #-Djna.nosys=true
47 |
48 | # Turn on JRuby invokedynamic
49 | -Djruby.compile.invokedynamic=true
50 |
51 | ## heap dumps
52 |
53 | # generate a heap dump when an allocation from the Java heap fails
54 | # heap dumps are created in the working directory of the JVM
55 | -XX:+HeapDumpOnOutOfMemoryError
56 |
57 | # specify an alternative path for heap dumps
58 | # ensure the directory exists and has sufficient space
59 | #-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof
60 |
61 | ## GC logging
62 | #-Xlog:gc*,gc+age=trace,safepoint:file=@loggc@:utctime,pid,tags:filecount=32,filesize=64m
63 |
64 | # log GC status to a file with time stamps
65 | # ensure the directory exists
66 | #-Xloggc:${LS_GC_LOG_FILE}
67 |
68 | # Entropy source for randomness
69 | -Djava.security.egd=file:/dev/urandom
70 |
71 | # Copy the logging context from parent threads to children
72 | -Dlog4j2.isThreadContextMapInheritable=true
73 |
74 | # FasterXML/jackson defaults
75 | #
76 | # Sets the maximum string length (in chars or bytes, depending on input context).
77 | # This limit is not exact and an exception will happen at sizes greater than this limit.
78 | # Some text values that are a little bigger than the limit may be treated as valid but no
79 | # text values with sizes less than or equal to this limit will be treated as invalid.
80 | # This value should be higher than `logstash.jackson.stream-read-constraints.max-number-length`.
81 | # The jackson library defaults to 20000000 or 20MB, whereas Logstash defaults to 200MB or 200000000 characters.
82 | -Dlogstash.jackson.stream-read-constraints.max-string-length=200000000
83 | #
84 | # Sets the maximum number length (in chars or bytes, depending on input context).
85 | # The jackson library defaults to 1000, whereas Logstash defaults to 10000.
86 | -Dlogstash.jackson.stream-read-constraints.max-number-length=10000
87 | #
88 | # Sets the maximum nesting depth. The depth is a count of objects and arrays that have not
89 | # been closed, `{` and `[` respectively.
90 | #-Dlogstash.jackson.stream-read-constraints.max-nesting-depth=1000
91 |
--------------------------------------------------------------------------------
/workload/logstash/8.17.0/conf/pipelines.yml:
--------------------------------------------------------------------------------
1 | # List of pipelines to be loaded by Logstash
2 | #
3 | # This document must be a list of dictionaries/hashes, where the keys/values are pipeline settings.
4 | # Default values for omitted settings are read from the `logstash.yml` file.
5 | # When declaring multiple pipelines, each MUST have its own `pipeline.id`.
6 | #
7 | # Example of two pipelines:
8 | #
9 | # - pipeline.id: test
10 | # pipeline.workers: 1
11 | # pipeline.batch.size: 1
12 | # config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }"
13 | # - pipeline.id: another_test
14 | # queue.type: persisted
15 | # path.config: "/tmp/logstash/*.config"
16 | #
17 | # Available options:
18 | #
19 | # # name of the pipeline
20 | # pipeline.id: mylogs
21 | #
22 | # # The configuration string to be used by this pipeline
23 | # config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }"
24 | #
25 | # # The path from where to read the configuration text
26 | # path.config: "/etc/conf.d/logstash/myconfig.cfg"
27 | #
28 | # # How many worker threads execute the Filters+Outputs stage of the pipeline
29 | # pipeline.workers: 1 (actually defaults to number of CPUs)
30 | #
31 | # # How many events to retrieve from inputs before sending to filters+workers
32 | # pipeline.batch.size: 125
33 | #
34 | # # How long to wait in milliseconds while polling for the next event
35 | # # before dispatching an undersized batch to filters+outputs
36 | # pipeline.batch.delay: 50
37 | #
38 | # Set the pipeline event ordering. Options are "auto" (the default), "true" # # or "false".
39 | # "auto" automatically enables ordering if the 'pipeline.workers' setting
40 | # is also set to '1', and disables otherwise.
41 | # "true" enforces ordering on a pipeline and prevents logstash from starting
42 | # a pipeline with multiple workers allocated.
43 | # "false" disable any extra processing necessary for preserving ordering.
44 | #
45 | # pipeline.ordered: auto
46 | #
47 | # # Internal queuing model, "memory" for legacy in-memory based queuing and
48 | # # "persisted" for disk-based acked queueing. Defaults is memory
49 | # queue.type: memory
50 | #
51 | # # If using queue.type: persisted, the page data files size. The queue data consists of
52 | # # append-only data files separated into pages. Default is 64mb
53 | # queue.page_capacity: 64mb
54 | #
55 | # # If using queue.type: persisted, the maximum number of unread events in the queue.
56 | # # Default is 0 (unlimited)
57 | # queue.max_events: 0
58 | #
59 | # # If using queue.type: persisted, the total capacity of the queue in number of bytes.
60 | # # Default is 1024mb or 1gb
61 | # queue.max_bytes: 1024mb
62 | #
63 | # # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
64 | # # Default is 1024, 0 for unlimited
65 | # queue.checkpoint.acks: 1024
66 | #
67 | # # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
68 | # # Default is 1024, 0 for unlimited
69 | # queue.checkpoint.writes: 1024
70 | #
71 | # # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
72 | # # Default is 1000, 0 for no periodic checkpoint.
73 | # queue.checkpoint.interval: 1000
74 | #
75 | # # Enable Dead Letter Queueing for this pipeline.
76 | # dead_letter_queue.enable: false
77 | #
78 | # If using dead_letter_queue.enable: true, the maximum size of dead letter queue for this pipeline. Entries
79 | # will be dropped if they would increase the size of the dead letter queue beyond this setting.
80 | # Default is 1024mb
81 | # dead_letter_queue.max_bytes: 1024mb
82 | #
83 | # If using dead_letter_queue.enable: true, the interval in milliseconds where if no further events eligible for the DLQ
84 | # have been created, a dead letter queue file will be written. A low value here will mean that more, smaller, queue files
85 | # may be written, while a larger value will introduce more latency between items being "written" to the dead letter queue, and
86 | # being available to be read by the dead_letter_queue input when items are are written infrequently.
87 | # Default is 5000.
88 | #
89 | # dead_letter_queue.flush_interval: 5000
90 |
91 | # If using dead_letter_queue.enable: true, controls which entries should be dropped to avoid exceeding the size limit.
92 | # Set the value to `drop_newer` (default) to stop accepting new events that would push the DLQ size over the limit.
93 | # Set the value to `drop_older` to remove queue pages containing the oldest events to make space for new ones.
94 | #
95 | # dead_letter_queue.storage_policy: drop_newer
96 |
97 | # If using dead_letter_queue.enable: true, the interval that events have to be considered valid. After the interval has
98 | # expired the events could be automatically deleted from the DLQ.
99 | # The interval could be expressed in days, hours, minutes or seconds, using as postfix notation like 5d,
100 | # to represent a five days interval.
101 | # The available units are respectively d, h, m, s for day, hours, minutes and seconds.
102 | # If not specified then the DLQ doesn't use any age policy for cleaning events.
103 | #
104 | # dead_letter_queue.retain.age: 1d
105 |
106 | #
107 | # If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
108 | # Default is path.data/dead_letter_queue
109 | #
110 | # path.dead_letter_queue:
111 |
--------------------------------------------------------------------------------
/workload/logstash/8.17.0/conf/server.yml:
--------------------------------------------------------------------------------
1 | # Settings file in YAML
2 | #
3 | # Settings can be specified either in hierarchical form, e.g.:
4 | #
5 | # pipeline:
6 | # batch:
7 | # size: 125
8 | # delay: 5
9 | #
10 | # Or as flat keys:
11 | #
12 | # pipeline.batch.size: 125
13 | # pipeline.batch.delay: 5
14 | #
15 | # ------------ Node identity ------------
16 | #
17 | # Use a descriptive name for the node:
18 | #
19 | # node.name: test
20 | #
21 | # If omitted the node name will default to the machine's host name
22 | #
23 | # ------------ Data path ------------------
24 | #
25 | # Which directory should be used by logstash and its plugins
26 | # for any persistent needs. Defaults to LOGSTASH_HOME/data
27 | #
28 | # path.data:
29 | #
30 | # ------------ Pipeline Settings --------------
31 | #
32 | # The ID of the pipeline.
33 | #
34 | # pipeline.id: main
35 | #
36 | # Set the number of workers that will, in parallel, execute the filters+outputs
37 | # stage of the pipeline.
38 | #
39 | # This defaults to the number of the host's CPU cores.
40 | #
41 | # pipeline.workers: 2
42 | #
43 | # How many events to retrieve from inputs before sending to filters+workers
44 | #
45 | # pipeline.batch.size: 125
46 | #
47 | # How long to wait in milliseconds while polling for the next event
48 | # before dispatching an undersized batch to filters+outputs
49 | #
50 | # pipeline.batch.delay: 50
51 | #
52 | # Force Logstash to exit during shutdown even if there are still inflight
53 | # events in memory. By default, logstash will refuse to quit until all
54 | # received events have been pushed to the outputs.
55 | #
56 | # WARNING: Enabling this can lead to data loss during shutdown
57 | #
58 | # pipeline.unsafe_shutdown: false
59 | #
60 | # Set the pipeline event ordering. Options are "auto" (the default), "true" or "false".
61 | # "auto" automatically enables ordering if the 'pipeline.workers' setting
62 | # is also set to '1', and disables otherwise.
63 | # "true" enforces ordering on the pipeline and prevent logstash from starting
64 | # if there are multiple workers.
65 | # "false" disables any extra processing necessary for preserving ordering.
66 | #
67 | # pipeline.ordered: auto
68 | #
69 | # Sets the pipeline's default value for `ecs_compatibility`, a setting that is
70 | # available to plugins that implement an ECS Compatibility mode for use with
71 | # the Elastic Common Schema.
72 | # Possible values are:
73 | # - disabled
74 | # - v1
75 | # - v8 (default)
76 | # Pipelines defined before Logstash 8 operated without ECS in mind. To ensure a
77 | # migrated pipeline continues to operate as it did before your upgrade, opt-OUT
78 | # of ECS for the individual pipeline in its `pipelines.yml` definition. Setting
79 | # it here will set the default for _all_ pipelines, including new ones.
80 | #
81 | # pipeline.ecs_compatibility: v8
82 | #
83 | # ------------ Pipeline Configuration Settings --------------
84 | #
85 | # Where to fetch the pipeline configuration for the main pipeline
86 | #
87 | # path.config:
88 | #
89 | # Pipeline configuration string for the main pipeline
90 | #
91 | # config.string:
92 | #
93 | # At startup, test if the configuration is valid and exit (dry run)
94 | #
95 | # config.test_and_exit: false
96 | #
97 | # Periodically check if the configuration has changed and reload the pipeline
98 | # This can also be triggered manually through the SIGHUP signal
99 | #
100 | # config.reload.automatic: false
101 | #
102 | # How often to check if the pipeline configuration has changed (in seconds)
103 | # Note that the unit value (s) is required. Values without a qualifier (e.g. 60)
104 | # are treated as nanoseconds.
105 | # Setting the interval this way is not recommended and might change in later versions.
106 | #
107 | # config.reload.interval: 3s
108 | #
109 | # Show fully compiled configuration as debug log message
110 | # NOTE: --log.level must be 'debug'
111 | #
112 | # config.debug: false
113 | #
114 | # When enabled, process escaped characters such as \n and \" in strings in the
115 | # pipeline configuration files.
116 | #
117 | # config.support_escapes: false
118 | #
119 | # ------------ API Settings -------------
120 | # Define settings related to the HTTP API here.
121 | #
122 | # The HTTP API is enabled by default. It can be disabled, but features that rely
123 | # on it will not work as intended.
124 | #
125 | # api.enabled: true
126 | #
127 | # By default, the HTTP API is not secured and is therefore bound to only the
128 | # host's loopback interface, ensuring that it is not accessible to the rest of
129 | # the network.
130 | # When secured with SSL and Basic Auth, the API is bound to _all_ interfaces
131 | # unless configured otherwise.
132 | #
133 | # api.http.host: 127.0.0.1
134 | #
135 | # The HTTP API web server will listen on an available port from the given range.
136 | # Values can be specified as a single port (e.g., `9600`), or an inclusive range
137 | # of ports (e.g., `9600-9700`).
138 | #
139 | # api.http.port: 9600-9700
140 | #
141 | # The HTTP API includes a customizable "environment" value in its response,
142 | # which can be configured here.
143 | #
144 | # api.environment: "production"
145 | #
146 | # The HTTP API can be secured with SSL (TLS). To do so, you will need to provide
147 | # the path to a password-protected keystore in p12 or jks format, along with credentials.
148 | #
149 | # api.ssl.enabled: false
150 | # api.ssl.keystore.path: /path/to/keystore.jks
151 | # api.ssl.keystore.password: "y0uRp4$$w0rD"
152 | #
153 | # The availability of SSL/TLS protocols depends on the JVM version. Certain protocols are
154 | # disabled by default and need to be enabled manually by changing `jdk.tls.disabledAlgorithms`
155 | # in the $JDK_HOME/conf/security/java.security configuration file.
156 | #
157 | # api.ssl.supported_protocols: [TLSv1.2,TLSv1.3]
158 | #
159 | # The HTTP API can be configured to require authentication. Acceptable values are
160 | # - `none`: no auth is required (default)
161 | # - `basic`: clients must authenticate with HTTP Basic auth, as configured
162 | # with `api.auth.basic.*` options below
163 | # api.auth.type: none
164 | #
165 | # When configured with `api.auth.type` `basic`, you must provide the credentials
166 | # that requests will be validated against. Usage of Environment or Keystore
167 | # variable replacements is encouraged (such as the value `"${HTTP_PASS}"`, which
168 | # resolves to the value stored in the keystore's `HTTP_PASS` variable if present
169 | # or the same variable from the environment)
170 | #
171 | # api.auth.basic.username: "logstash-user"
172 | # api.auth.basic.password: "s3cUreP4$$w0rD"
173 | #
174 | # When setting `api.auth.basic.password`, the password should meet
175 | # the default password policy requirements.
176 | # The default password policy requires non-empty minimum 8 char string that
177 | # includes a digit, upper case letter and lower case letter.
178 | # Policy mode sets Logstash to WARN or ERROR when HTTP authentication password doesn't
179 | # meet the password policy requirements.
180 | # The default is WARN. Setting to ERROR enforces stronger passwords (recommended).
181 | #
182 | # api.auth.basic.password_policy.mode: WARN
183 | #
184 | # ------------ Module Settings ---------------
185 | # Define modules here. Modules definitions must be defined as an array.
186 | # The simple way to see this is to prepend each `name` with a `-`, and keep
187 | # all associated variables under the `name` they are associated with, and
188 | # above the next, like this:
189 | #
190 | # modules:
191 | # - name: MODULE_NAME
192 | # var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE
193 | # var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE
194 | # var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE
195 | # var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE
196 | #
197 | # Module variable names must be in the format of
198 | #
199 | # var.PLUGIN_TYPE.PLUGIN_NAME.KEY
200 | #
201 | # modules:
202 | #
203 | # ------------ Cloud Settings ---------------
204 | # Define Elastic Cloud settings here.
205 | # Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy
206 | # and it may have an label prefix e.g. staging:dXMtZ...
207 | # This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host'
208 | # cloud.id:
209 | #
210 | # Format of cloud.auth is: :
211 | # This is optional
212 | # If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password'
213 | # If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password'
214 | # cloud.auth: elastic:
215 | #
216 | # ------------ Queuing Settings --------------
217 | #
218 | # Internal queuing model, "memory" for legacy in-memory based queuing and
219 | # "persisted" for disk-based acked queueing. Defaults is memory
220 | #
221 | # queue.type: memory
222 | #
223 | # If `queue.type: persisted`, the directory path where the pipeline data files will be stored.
224 | # Each pipeline will group its PQ files in a subdirectory matching its `pipeline.id`.
225 | # Default is path.data/queue.
226 | #
227 | # path.queue:
228 | #
229 | # If using queue.type: persisted, the page data files size. The queue data consists of
230 | # append-only data files separated into pages. Default is 64mb
231 | #
232 | # queue.page_capacity: 64mb
233 | #
234 | # If using queue.type: persisted, the maximum number of unread events in the queue.
235 | # Default is 0 (unlimited)
236 | #
237 | # queue.max_events: 0
238 | #
239 | # If using queue.type: persisted, the total capacity of the queue in number of bytes.
240 | # If you would like more unacked events to be buffered in Logstash, you can increase the
241 | # capacity using this setting. Please make sure your disk drive has capacity greater than
242 | # the size specified here. If both max_bytes and max_events are specified, Logstash will pick
243 | # whichever criteria is reached first
244 | # Default is 1024mb or 1gb
245 | #
246 | # queue.max_bytes: 1024mb
247 | #
248 | # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
249 | # Default is 1024, 0 for unlimited
250 | #
251 | # queue.checkpoint.acks: 1024
252 | #
253 | # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
254 | # Default is 1024, 0 for unlimited
255 | #
256 | # queue.checkpoint.writes: 1024
257 | #
258 | # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
259 | # Default is 1000, 0 for no periodic checkpoint.
260 | #
261 | # queue.checkpoint.interval: 1000
262 | #
263 | # ------------ Dead-Letter Queue Settings --------------
264 | # Flag to turn on dead-letter queue.
265 | #
266 | # dead_letter_queue.enable: false
267 |
268 | # If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries
269 | # will be dropped if they would increase the size of the dead letter queue beyond this setting.
270 | # Default is 1024mb
271 | # dead_letter_queue.max_bytes: 1024mb
272 |
273 | # If using dead_letter_queue.enable: true, the interval in milliseconds where if no further events eligible for the DLQ
274 | # have been created, a dead letter queue file will be written. A low value here will mean that more, smaller, queue files
275 | # may be written, while a larger value will introduce more latency between items being "written" to the dead letter queue, and
276 | # being available to be read by the dead_letter_queue input when items are written infrequently.
277 | # Default is 5000.
278 | #
279 | # dead_letter_queue.flush_interval: 5000
280 |
281 | # If using dead_letter_queue.enable: true, controls which entries should be dropped to avoid exceeding the size limit.
282 | # Set the value to `drop_newer` (default) to stop accepting new events that would push the DLQ size over the limit.
283 | # Set the value to `drop_older` to remove queue pages containing the oldest events to make space for new ones.
284 | #
285 | # dead_letter_queue.storage_policy: drop_newer
286 |
287 | # If using dead_letter_queue.enable: true, the interval that events have to be considered valid. After the interval has
288 | # expired the events could be automatically deleted from the DLQ.
289 | # The interval could be expressed in days, hours, minutes or seconds, using as postfix notation like 5d,
290 | # to represent a five days interval.
291 | # The available units are respectively d, h, m, s for day, hours, minutes and seconds.
292 | # If not specified then the DLQ doesn't use any age policy for cleaning events.
293 | #
294 | # dead_letter_queue.retain.age: 1d
295 |
296 | # If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
297 | # Default is path.data/dead_letter_queue
298 | #
299 | # path.dead_letter_queue:
300 | #
301 | # ------------ Debugging Settings --------------
302 | #
303 | # Options for log.level:
304 | # * fatal
305 | # * error
306 | # * warn
307 | # * info (default)
308 | # * debug
309 | # * trace
310 | # log.level: info
311 | #
312 | # Options for log.format:
313 | # * plain (default)
314 | # * json
315 | #
316 | # log.format: plain
317 | # log.format.json.fix_duplicate_message_fields: false
318 | #
319 | # path.logs:
320 | #
321 | # ------------ Other Settings --------------
322 | #
323 | # Allow or block running Logstash as superuser (default: true)
324 | # allow_superuser: false
325 | #
326 | # Where to find custom plugins
327 | # path.plugins: []
328 | #
329 | # Flag to output log lines of each pipeline in its separate log file. Each log filename contains the pipeline.name
330 | # Default is false
331 | # pipeline.separate_logs: false
332 | #
333 | # Determine where to allocate memory buffers, for plugins that leverage them.
334 | # Default to direct, optionally can be switched to heap to select Java heap space.
335 | # pipeline.buffer.type: heap
336 | #
337 | # ------------ X-Pack Settings (not applicable for OSS build)--------------
338 | #
339 | # X-Pack Monitoring
340 | # https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
341 | #xpack.monitoring.enabled: false
342 | #xpack.monitoring.elasticsearch.username: logstash_system
343 | #xpack.monitoring.elasticsearch.password: password
344 | #xpack.monitoring.elasticsearch.proxy: ["http://proxy:port"]
345 | #xpack.monitoring.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"]
346 | # an alternative to hosts + username/password settings is to use cloud_id/cloud_auth
347 | #xpack.monitoring.elasticsearch.cloud_id: monitoring_cluster_id:xxxxxxxxxx
348 | #xpack.monitoring.elasticsearch.cloud_auth: logstash_system:password
349 | # another authentication alternative is to use an Elasticsearch API key
350 | #xpack.monitoring.elasticsearch.api_key: "id:api_key"
351 | #xpack.monitoring.elasticsearch.ssl.certificate_authority: "/path/to/ca.crt"
352 | #xpack.monitoring.elasticsearch.ssl.ca_trusted_fingerprint: xxxxxxxxxx
353 | #xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file
354 | #xpack.monitoring.elasticsearch.ssl.truststore.password: password
355 | # use either keystore.path/keystore.password or certificate/key configurations
356 | #xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
357 | #xpack.monitoring.elasticsearch.ssl.keystore.password: password
358 | #xpack.monitoring.elasticsearch.ssl.certificate: /path/to/file
359 | #xpack.monitoring.elasticsearch.ssl.key: /path/to/key
360 | #xpack.monitoring.elasticsearch.ssl.verification_mode: full
361 | #xpack.monitoring.elasticsearch.ssl.cipher_suites: []
362 | #xpack.monitoring.elasticsearch.sniffing: false
363 | #xpack.monitoring.collection.interval: 10s
364 | #xpack.monitoring.collection.pipeline.details.enabled: true
365 | #
366 | # X-Pack Management
367 | # https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html
368 | #xpack.management.enabled: false
369 | #xpack.management.pipeline.id: ["main", "apache_logs"]
370 | #xpack.management.elasticsearch.username: logstash_admin_user
371 | #xpack.management.elasticsearch.password: password
372 | #xpack.management.elasticsearch.proxy: ["http://proxy:port"]
373 | #xpack.management.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"]
374 | # an alternative to hosts + username/password settings is to use cloud_id/cloud_auth
375 | #xpack.management.elasticsearch.cloud_id: management_cluster_id:xxxxxxxxxx
376 | #xpack.management.elasticsearch.cloud_auth: logstash_admin_user:password
377 | # another authentication alternative is to use an Elasticsearch API key
378 | #xpack.management.elasticsearch.api_key: "id:api_key"
379 | #xpack.management.elasticsearch.ssl.ca_trusted_fingerprint: xxxxxxxxxx
380 | #xpack.management.elasticsearch.ssl.certificate_authority: "/path/to/ca.crt"
381 | #xpack.management.elasticsearch.ssl.truststore.path: /path/to/file
382 | #xpack.management.elasticsearch.ssl.truststore.password: password
383 | # use either keystore.path/keystore.password or certificate/key configurations
384 | #xpack.management.elasticsearch.ssl.keystore.path: /path/to/file
385 | #xpack.management.elasticsearch.ssl.keystore.password: password
386 | #xpack.management.elasticsearch.ssl.certificate: /path/to/file
387 | #xpack.management.elasticsearch.ssl.key: /path/to/certificate_key_file
388 | #xpack.management.elasticsearch.ssl.cipher_suites: []
389 | #xpack.management.elasticsearch.ssl.verification_mode: full
390 | #xpack.management.elasticsearch.sniffing: false
391 | #xpack.management.logstash.poll_interval: 5s
392 |
393 | # X-Pack GeoIP Database Management
394 | # https://www.elastic.co/guide/en/logstash/current/plugins-filters-geoip.html#plugins-filters-geoip-manage_update
395 | #xpack.geoip.downloader.enabled: true
396 | #xpack.geoip.downloader.endpoint: "https://geoip.elastic.co/v1/database"
397 |
--------------------------------------------------------------------------------
/workload/mongo-db/4.4.4/conf/server.conf:
--------------------------------------------------------------------------------
1 | net:
2 | bindIp: 0.0.0.0
3 | port: 7000
4 | storage:
5 | dbPath: "/opt/mongo-db/data"
6 | journal:
7 | enabled: true
8 | systemLog:
9 | destination: file
10 | path: "/opt/mongo-db/log/server.log"
11 | logAppend: true
12 | processManagement:
13 | fork: false
14 | pidFilePath: "/opt/mongo-db/system/server.pid"
15 |
--------------------------------------------------------------------------------
/workload/mongo-db/4.4.4/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:mongo-db]
2 | command=bash -c "mongod -f /opt/mongo-db/conf/server.conf"
3 | process_name=%(program_name)s
4 | pidfile=/opt/mongo-db/system/server.pid
5 | autorestart=false
6 |
--------------------------------------------------------------------------------
/workload/mongo-db/4.4.4/replica-cluster/conf/server.conf:
--------------------------------------------------------------------------------
1 | net:
2 | bindIp: 0.0.0.0
3 | port: 7000
4 | replication:
5 | replSetName: main
6 | storage:
7 | dbPath: "/opt/mongo-db/data"
8 | journal:
9 | enabled: true
10 | systemLog:
11 | destination: file
12 | path: "/opt/mongo-db/log/server.log"
13 | logAppend: true
14 | processManagement:
15 | fork: false
16 | pidFilePath: "/opt/mongo-db/system/server.pid"
17 |
--------------------------------------------------------------------------------
/workload/mongo-db/4.4.4/replica-cluster/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [group:mongo-db]
2 | programs=init,server
3 |
4 | [program:init]
5 | command=bash -c "sleep 3 && mongosh /opt/mongo-db/script/init.js --port 7000"
6 | process_name=%(program_name)s
7 | autorestart=false
8 | exitcodes=0,1
9 | priority=200
10 |
11 | [program:server]
12 | command=bash -c "mongod -f /opt/mongo-db/conf/server.conf"
13 | process_name=%(program_name)s
14 | pidfile=/opt/mongo-db/system/server.pid
15 | autorestart=false
16 | priority=100
17 |
--------------------------------------------------------------------------------
/workload/mongo-db/4.4.4/replica-cluster/script/init.js:
--------------------------------------------------------------------------------
1 | // Initialize MongoDB replica cluster.
2 | var client = new Mongo('mongo-db-1:7000');
3 | var database = client.getDB('admin');
4 | var status = null;
5 | try {
6 | status = rs.status();
7 | print('MongoDB replica cluster already initialized.');
8 | } catch(exception) {
9 | if(exception['codeName']=='NotYetInitialized') {
10 | print('Initializing MongoDB replica cluster...');
11 | rs.initiate({
12 | '_id':'main',
13 | 'members':[
14 | {'_id':1,'host':'mongo-db-1:7000'},
15 | {'_id':2,'host':'mongo-db-2:7000'},
16 | {'_id':3,'host':'mongo-db-3:7000'}
17 | ]
18 | });
19 | print('MongoDB replica cluster initialized.');
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/workload/nginx/1.14.0/conf/server.conf:
--------------------------------------------------------------------------------
1 | user root;
2 | worker_processes 1;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /opt/nginx/system/server.pid;
6 | daemon off;
7 |
8 | events {
9 | worker_connections 1024;
10 | }
11 |
12 |
13 | http {
14 | include /etc/nginx/mime.types;
15 | default_type application/octet-stream;
16 |
17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
18 | '$status $body_bytes_sent "$http_referer" '
19 | '"$http_user_agent" "$http_x_forwarded_for"';
20 |
21 | access_log /var/log/nginx/access.log main;
22 |
23 | sendfile on;
24 | #tcp_nopush on;
25 |
26 | keepalive_timeout 65;
27 |
28 | #gzip on;
29 |
30 | include /etc/nginx/conf.d/*.conf;
31 | }
32 |
--------------------------------------------------------------------------------
/workload/nginx/1.14.0/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:nginx]
2 | command=bash -c "nginx -c /opt/nginx/conf/server.conf"
3 | process_name=%(program_name)s
4 | pidfile=/opt/nginx/system/server.pid
5 | autorestart=false
6 |
--------------------------------------------------------------------------------
/workload/nginx/1.24.0/conf/server.conf:
--------------------------------------------------------------------------------
1 | user root;
2 | worker_processes 1;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /opt/nginx/system/server.pid;
6 | daemon off;
7 |
8 | events {
9 | worker_connections 1024;
10 | }
11 |
12 |
13 | http {
14 | include /etc/nginx/mime.types;
15 | default_type application/octet-stream;
16 |
17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
18 | '$status $body_bytes_sent "$http_referer" '
19 | '"$http_user_agent" "$http_x_forwarded_for"';
20 |
21 | access_log /var/log/nginx/access.log main;
22 |
23 | sendfile on;
24 | #tcp_nopush on;
25 |
26 | keepalive_timeout 65;
27 |
28 | #gzip on;
29 |
30 | include /etc/nginx/conf.d/*.conf;
31 | }
32 |
--------------------------------------------------------------------------------
/workload/nginx/1.24.0/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:nginx]
2 | command=bash -c "nginx -c /opt/nginx/conf/server.conf"
3 | process_name=%(program_name)s
4 | pidfile=/opt/nginx/system/server.pid
5 | autorestart=false
6 |
--------------------------------------------------------------------------------
/workload/postgresql/16.4/conf/hba.conf:
--------------------------------------------------------------------------------
1 | # PostgreSQL Client Authentication Configuration File
2 | # ===================================================
3 | #
4 | # Refer to the "Client Authentication" section in the PostgreSQL
5 | # documentation for a complete description of this file. A short
6 | # synopsis follows.
7 | #
8 | # ----------------------
9 | # Authentication Records
10 | # ----------------------
11 | #
12 | # This file controls: which hosts are allowed to connect, how clients
13 | # are authenticated, which PostgreSQL user names they can use, which
14 | # databases they can access. Records take one of these forms:
15 | #
16 | # local DATABASE USER METHOD [OPTIONS]
17 | # host DATABASE USER ADDRESS METHOD [OPTIONS]
18 | # hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
19 | # hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
20 | # hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS]
21 | # hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS]
22 | #
23 | # (The uppercase items must be replaced by actual values.)
24 | #
25 | # The first field is the connection type:
26 | # - "local" is a Unix-domain socket
27 | # - "host" is a TCP/IP socket (encrypted or not)
28 | # - "hostssl" is a TCP/IP socket that is SSL-encrypted
29 | # - "hostnossl" is a TCP/IP socket that is not SSL-encrypted
30 | # - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted
31 | # - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted
32 | #
33 | # DATABASE can be "all", "sameuser", "samerole", "replication", a
34 | # database name, a regular expression (if it starts with a slash (/))
35 | # or a comma-separated list thereof. The "all" keyword does not match
36 | # "replication". Access to replication must be enabled in a separate
37 | # record (see example below).
38 | #
39 | # USER can be "all", a user name, a group name prefixed with "+", a
40 | # regular expression (if it starts with a slash (/)) or a comma-separated
41 | # list thereof. In both the DATABASE and USER fields you can also write
42 | # a file name prefixed with "@" to include names from a separate file.
43 | #
44 | # ADDRESS specifies the set of hosts the record matches. It can be a
45 | # host name, or it is made up of an IP address and a CIDR mask that is
46 | # an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
47 | # specifies the number of significant bits in the mask. A host name
48 | # that starts with a dot (.) matches a suffix of the actual host name.
49 | # Alternatively, you can write an IP address and netmask in separate
50 | # columns to specify the set of hosts. Instead of a CIDR-address, you
51 | # can write "samehost" to match any of the server's own IP addresses,
52 | # or "samenet" to match any address in any subnet that the server is
53 | # directly connected to.
54 | #
55 | # METHOD can be "trust", "reject", "md5", "password", "scram-sha-256",
56 | # "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert".
57 | # Note that "password" sends passwords in clear text; "md5" or
58 | # "scram-sha-256" are preferred since they send encrypted passwords.
59 | #
60 | # OPTIONS are a set of options for the authentication in the format
61 | # NAME=VALUE. The available options depend on the different
62 | # authentication methods -- refer to the "Client Authentication"
63 | # section in the documentation for a list of which options are
64 | # available for which authentication methods.
65 | #
66 | # Database and user names containing spaces, commas, quotes and other
67 | # special characters must be quoted. Quoting one of the keywords
68 | # "all", "sameuser", "samerole" or "replication" makes the name lose
69 | # its special character, and just match a database or username with
70 | # that name.
71 | #
72 | # ---------------
73 | # Include Records
74 | # ---------------
75 | #
76 | # This file allows the inclusion of external files or directories holding
77 | # more records, using the following keywords:
78 | #
79 | # include FILE
80 | # include_if_exists FILE
81 | # include_dir DIRECTORY
82 | #
83 | # FILE is the file name to include, and DIR is the directory name containing
84 | # the file(s) to include. Any file in a directory will be loaded if suffixed
85 | # with ".conf". The files of a directory are ordered by name.
86 | # include_if_exists ignores missing files. FILE and DIRECTORY can be
87 | # specified as a relative or an absolute path, and can be double-quoted if
88 | # they contain spaces.
89 | #
90 | # -------------
91 | # Miscellaneous
92 | # -------------
93 | #
94 | # This file is read on server startup and when the server receives a
95 | # SIGHUP signal. If you edit the file on a running system, you have to
96 | # SIGHUP the server for the changes to take effect, run "pg_ctl reload",
97 | # or execute "SELECT pg_reload_conf()".
98 | #
99 | # ----------------------------------
100 | # Put your actual configuration here
101 | # ----------------------------------
102 | #
103 | # If you want to allow non-local connections, you need to add more
104 | # "host" records. In that case you will also need to make PostgreSQL
105 | # listen on a non-local interface via the listen_addresses
106 | # configuration parameter, or via the -i or -h command line switches.
107 |
108 | # CAUTION: Configuring the system for local "trust" authentication
109 | # allows any local user to connect as any PostgreSQL user, including
110 | # the database superuser. If you do not trust all your local users,
111 | # use another authentication method.
112 |
113 |
114 | # TYPE DATABASE USER ADDRESS METHOD
115 |
116 | # "local" is for Unix domain socket connections only
117 | local all all trust
118 | # IPv4 local connections:
119 | host all all 127.0.0.1/32 trust
120 | host all all 0.0.0.0/0 trust
121 | # IPv6 local connections:
122 | host all all ::1/128 trust
123 | # Allow replication connections from localhost, by a user with the
124 | # replication privilege.
125 | local replication all trust
126 | host replication all 127.0.0.1/32 trust
127 | host replication all ::1/128 trust
128 |
--------------------------------------------------------------------------------
/workload/postgresql/16.4/conf/ident.conf:
--------------------------------------------------------------------------------
1 | # PostgreSQL User Name Maps
2 | # =========================
3 | #
4 | # ---------------
5 | # Mapping Records
6 | # ---------------
7 | #
8 | # Refer to the PostgreSQL documentation, chapter "Client
9 | # Authentication" for a complete description. A short synopsis
10 | # follows.
11 | #
12 | # This file controls PostgreSQL user name mapping. It maps external
13 | # user names to their corresponding PostgreSQL user names. Records
14 | # are of the form:
15 | #
16 | # MAPNAME SYSTEM-USERNAME PG-USERNAME
17 | #
18 | # (The uppercase quantities must be replaced by actual values.)
19 | #
20 | # MAPNAME is the (otherwise freely chosen) map name that was used in
21 | # pg_hba.conf. SYSTEM-USERNAME is the detected user name of the
22 | # client. PG-USERNAME is the requested PostgreSQL user name. The
23 | # existence of a record specifies that SYSTEM-USERNAME may connect as
24 | # PG-USERNAME.
25 | #
26 | # If SYSTEM-USERNAME starts with a slash (/), it will be treated as a
27 | # regular expression. Optionally this can contain a capture (a
28 | # parenthesized subexpression). The substring matching the capture
29 | # will be substituted for \1 (backslash-one) if present in
30 | # PG-USERNAME.
31 | #
32 | # PG-USERNAME can be "all", a user name, a group name prefixed with "+", or
33 | # a regular expression (if it starts with a slash (/)). If it is a regular
34 | # expression, the substring matching with \1 has no effect.
35 | #
36 | # Multiple maps may be specified in this file and used by pg_hba.conf.
37 | #
38 | # No map names are defined in the default configuration. If all
39 | # system user names and PostgreSQL user names are the same, you don't
40 | # need anything in this file.
41 | #
42 | # ---------------
43 | # Include Records
44 | # ---------------
45 | #
46 | # This file allows the inclusion of external files or directories holding
47 | # more records, using the following keywords:
48 | #
49 | # include FILE
50 | # include_if_exists FILE
51 | # include_dir DIRECTORY
52 | #
53 | # FILE is the file name to include, and DIR is the directory name containing
54 | # the file(s) to include. Any file in a directory will be loaded if suffixed
55 | # with ".conf". The files of a directory are ordered by name.
56 | # include_if_exists ignores missing files. FILE and DIRECTORY can be
57 | # specified as a relative or an absolute path, and can be double-quoted if
58 | # they contain spaces.
59 | #
60 | # -------------------------------
61 | # Miscellaneous
62 | # -------------------------------
63 | #
64 | # This file is read on server startup and when the postmaster receives
65 | # a SIGHUP signal. If you edit the file on a running system, you have
66 | # to SIGHUP the postmaster for the changes to take effect. You can
67 | # use "pg_ctl reload" to do that.
68 |
69 | # Put your actual configuration here
70 | # ----------------------------------
71 |
72 | # MAPNAME SYSTEM-USERNAME PG-USERNAME
73 |
--------------------------------------------------------------------------------
/workload/postgresql/16.4/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [group:postgresql]
2 | programs=init,server
3 |
4 | [program:init]
5 | command=su postgresql bash -c "initdb -D /opt/postgresql/data"
6 | process_name=%(program_name)s
7 | autorestart=false
8 | exitcodes=0,1
9 | priority=100
10 |
11 | [program:server]
12 | command=su postgresql bash -c "sleep 3 && postgres -c config_file=/opt/postgresql/conf/server.conf"
13 | process_name=%(program_name)s
14 | pidfile=/opt/postgresql/system/server.pid
15 | autorestart=false
16 | priority=200
17 |
--------------------------------------------------------------------------------
/workload/redis/4.0.9/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:redis]
2 | command=bash -c "redis-server /opt/redis/conf/server.conf"
3 | process_name=%(program_name)s
4 | pidfile=/opt/redis/system/server.pid
5 | autorestart=false
6 |
--------------------------------------------------------------------------------
/workload/redis/7.2.1/cluster/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [group:redis]
2 | programs=init,server
3 |
4 | [program:init]
5 | command=bash -c "/opt/redis/script/init.sh"
6 | process_name=%(program_name)s
7 | autorestart=false
8 |
9 | [program:server]
10 | command=bash -c "redis-server /opt/redis/conf/server.conf"
11 | process_name=%(program_name)s
12 | pidfile=/opt/redis/system/server.pid
13 | autorestart=false
14 |
--------------------------------------------------------------------------------
/workload/redis/7.2.1/cluster/script/init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function wait_for_redis_node() {
4 | local node=$1;
5 | until redis-cli -h $node -p 3000 ping > /dev/null 2>&1; do
6 | printf "Waiting for $node...\n";
7 | sleep 5;
8 | done;
9 | printf "$node is ready.\n";
10 | }
11 |
12 | function create_redis_cluster() {
13 | yes yes | redis-cli --cluster create redis-{1..6}:3000 --cluster-replicas 1;
14 | printf "Redis cluster started.\n";
15 | }
16 |
17 | printf "Starting Redis cluster initialization...\n";
18 |
19 | for node in redis-{1..6}; do
20 | wait_for_redis_node $node;
21 | done;
22 |
23 | create_redis_cluster;
24 |
--------------------------------------------------------------------------------
/workload/redis/7.2.1/conf/supervisor.ini:
--------------------------------------------------------------------------------
1 | [program:redis]
2 | command=bash -c "redis-server /opt/redis/conf/server.conf"
3 | process_name=%(program_name)s
4 | pidfile=/opt/redis/system/server.pid
5 | autorestart=false
6 |
--------------------------------------------------------------------------------
/workload/supervisor/conf/server.conf:
--------------------------------------------------------------------------------
1 | ; Sample supervisor config file.
2 |
3 | [unix_http_server]
4 | file=/var/run/supervisor.sock ; (the path to the socket file)
5 | ;chmod=0700 ; sockef file mode (default 0700)
6 | ;chown=nobody:nogroup ; socket file uid:gid owner
7 | ;username=user ; (default is no username (open server))
8 | ;password=123 ; (default is no password (open server))
9 |
10 | ;[inet_http_server] ; inet (TCP) server disabled by default
11 | ;port=127.0.0.1:9001 ; (ip_address:port specifier, *:port for all iface)
12 | ;username=user ; (default is no username (open server))
13 | ;password=123 ; (default is no password (open server))
14 |
15 | [supervisord]
16 | logfile=/var/log/supervisord.log ; (main log file;default $CWD/supervisord.log)
17 | logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB)
18 | logfile_backups=10 ; (num of main logfile rotation backups;default 10)
19 | loglevel=info ; (log level;default info; others: debug,warn,trace)
20 | pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
21 | nodaemon=true ; (start in foreground if true;default false)
22 | minfds=1024 ; (min. avail startup file descriptors;default 1024)
23 | minprocs=200 ; (min. avail process descriptors;default 200)
24 | ;umask=022 ; (process file creation umask;default 022)
25 | ;user=chrism ; (default is current user, required if root)
26 | ;identifier=supervisor ; (supervisord identifier, default is 'supervisor')
27 | ;directory=/tmp ; (default is not to cd during start)
28 | ;nocleanup=true ; (don't clean up tempfiles at start;default false)
29 | ;childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP)
30 | ;environment=KEY=value ; (key value pairs to add to environment)
31 | ;strip_ansi=false ; (strip ansi escape codes in logs; def. false)
32 |
33 | ; the below section must remain in the config file for RPC
34 | ; (supervisorctl/web interface) to work, additional interfaces may be
35 | ; added by defining them in separate rpcinterface: sections
36 | [rpcinterface:supervisor]
37 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
38 |
39 | [supervisorctl]
40 | serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket
41 | ;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket
42 | ;username=chris ; should be same as http_username if set
43 | ;password=123 ; should be same as http_password if set
44 | ;prompt=mysupervisor ; cmd line prompt (default "supervisor")
45 | ;history_file=~/.sc_history ; use readline history if available
46 |
47 | ; The below sample program section shows all possible program subsection values,
48 | ; create one or more 'real' program: sections to be able to control them under
49 | ; supervisor.
50 |
51 | ;[program:theprogramname]
52 | ;command=/bin/cat ; the program (relative uses PATH, can take args)
53 | ;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
54 | ;numprocs=1 ; number of processes copies to start (def 1)
55 | ;directory=/tmp ; directory to cwd to before exec (def no cwd)
56 | ;umask=022 ; umask for process (default None)
57 | ;priority=999 ; the relative start priority (default 999)
58 | ;autostart=true ; start at supervisord start (default: true)
59 | ;autorestart=true ; retstart at unexpected quit (default: true)
60 | ;startsecs=10 ; number of secs prog must stay running (def. 1)
61 | ;startretries=3 ; max # of serial start failures (default 3)
62 | ;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
63 | ;stopsignal=QUIT ; signal used to kill process (default TERM)
64 | ;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
65 | ;user=chrism ; setuid to this UNIX account to run the program
66 | ;redirect_stderr=true ; redirect proc stderr to stdout (default false)
67 | ;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
68 | ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
69 | ;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
70 | ;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
71 | ;stdout_events_enabled=false ; emit events on stdout writes (default false)
72 | ;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
73 | ;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
74 | ;stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)
75 | ;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
76 | ;stderr_events_enabled=false ; emit events on stderr writes (default false)
77 | ;environment=A=1,B=2 ; process environment additions (def no adds)
78 | ;serverurl=AUTO ; override serverurl computation (childutils)
79 |
80 | ; The below sample eventlistener section shows all possible
81 | ; eventlistener subsection values, create one or more 'real'
82 | ; eventlistener: sections to be able to handle event notifications
83 | ; sent by supervisor.
84 |
85 | ;[eventlistener:theeventlistenername]
86 | ;command=/bin/eventlistener ; the program (relative uses PATH, can take args)
87 | ;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
88 | ;numprocs=1 ; number of processes copies to start (def 1)
89 | ;events=EVENT ; event notif. types to subscribe to (req'd)
90 | ;buffer_size=10 ; event buffer queue size (default 10)
91 | ;directory=/tmp ; directory to cwd to before exec (def no cwd)
92 | ;umask=022 ; umask for process (default None)
93 | ;priority=-1 ; the relative start priority (default -1)
94 | ;autostart=true ; start at supervisord start (default: true)
95 | ;autorestart=unexpected ; restart at unexpected quit (default: unexpected)
96 | ;startsecs=10 ; number of secs prog must stay running (def. 1)
97 | ;startretries=3 ; max # of serial start failures (default 3)
98 | ;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
99 | ;stopsignal=QUIT ; signal used to kill process (default TERM)
100 | ;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
101 | ;user=chrism ; setuid to this UNIX account to run the program
102 | ;redirect_stderr=true ; redirect proc stderr to stdout (default false)
103 | ;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
104 | ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
105 | ;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
106 | ;stdout_events_enabled=false ; emit events on stdout writes (default false)
107 | ;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
108 | ;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
109 | ;stderr_logfile_backups ; # of stderr logfile backups (default 10)
110 | ;stderr_events_enabled=false ; emit events on stderr writes (default false)
111 | ;environment=A=1,B=2 ; process environment additions
112 | ;serverurl=AUTO ; override serverurl computation (childutils)
113 |
114 | ; The below sample group section shows all possible group values,
115 | ; create one or more 'real' group: sections to create "heterogeneous"
116 | ; process groups.
117 |
118 | ;[group:thegroupname]
119 | ;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions
120 | ;priority=999 ; the relative start priority (default 999)
121 |
122 | ; The [include] section can just contain the "files" setting. This
123 | ; setting can list multiple files (separated by whitespace or
124 | ; newlines). It can also contain wildcards. The filenames are
125 | ; interpreted as relative to this file. Included files *cannot*
126 | ; include files themselves.
127 |
128 | [include]
129 | files = supervisord.d/*.ini
130 |
--------------------------------------------------------------------------------