├── .gitignore
├── cockroachdb
├── README.md
└── kontena.yml
├── drone
├── README.md
├── github.yml
└── gitlab.yml
├── efk
└── README.md
├── elasticsearch
├── README.md
└── kontena.yml
├── fluentd
├── README.md
├── cloudwatch-logs
│ ├── Dockerfile
│ └── fluent.conf
├── cloudwatch.yml
├── elasticsearch.yml
├── elasticsearch
│ ├── Dockerfile
│ ├── elasticsearch-template-es5x.json
│ └── fluent.conf
├── gelf.yml
├── gelf
│ ├── Dockerfile
│ └── fluent.conf
├── loggly.yml
├── loggly
│ ├── Dockerfile
│ └── fluent.conf
├── splunkhec.yml
└── splunkhec
│ ├── Dockerfile
│ └── fluent.conf
├── ingress-lb
├── README.md
└── kontena.yml
├── kafka
├── README.md
├── docker
│ ├── Dockerfile
│ └── kafka.run
└── kontena.yml
├── kibana
├── README.md
└── kontena.yml
├── kong-dashboard
├── README.md
└── kontena.yml
├── kong
├── README.md
└── kontena.yml
├── mariadb
├── README.md
└── kontena.yml
├── mongodb-replica-set
├── README.md
└── kontena.yml
├── rabbitmq
├── kontena.yml
└── readme.md
├── redis-sentinel
├── 3.0
│ ├── Dockerfile
│ └── entrypoint.sh
├── README.md
└── kontena.yml
├── redis
├── README.md
└── kontena.yml
├── stolon
├── README.md
└── kontena.yml
├── wordpress-cluster
├── README.md
└── kontena.yml
└── zookeeper
├── README.md
├── docker
├── Dockerfile
└── zookeeper.docker-entrypoint.sh
└── kontena.yml
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
--------------------------------------------------------------------------------
/cockroachdb/README.md:
--------------------------------------------------------------------------------
1 | # CockroachDB on Kontena
2 |
3 | [CockroachDB](https://www.cockroachlabs.com/product/cockroachdb-core/) is a distributed SQL database built on a transactional and strongly-consistent key-value store. It scales horizontally; survives disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports strongly-consistent ACID transactions; and provides a familiar SQL API for structuring, manipulating, and querying data.
4 |
5 | ## Install
6 | > Prerequisites: You need to have working [Kontena](https://www.kontena.io) Container Platform installed. If you are new to Kontena, check [quick start guide](https://www.kontena.io/docs/quick-start).
7 |
8 |
9 | First create a volume config for CockroadbDB deployment:
10 |
11 | ```
12 | $ kontena volume create --driver local --scope instance cockroachdb-data
13 | ```
14 |
15 | Then deploy CockroachDB stack:
16 |
17 | ```
18 | $ kontena stack install kontena/cockroachdb
19 | > Version : 1.1.3
20 | > Cluster size : 3
21 | > Affinity : label==~cockroachdb
22 | [done] Installing stack cockroachdb
23 | [done] Triggering deployment of stack cockroachdb
24 | [done] Waiting for deployment to start
25 | [done] Deploying service seed
26 | [done] Deploying service node
27 | [done] Deploying service lb
28 | ```
29 |
30 | ## Administration
31 |
32 | You can find CockroachDB admin web ui at `http://cockroachdb.${GRID}.kontena.local:8080` (accessible via [Kontena VPN](https://www.kontena.io/docs/using-kontena/vpn-access.html))
33 |
34 | **Note:** Replace `${GRID}` with your grid name.
--------------------------------------------------------------------------------
/cockroachdb/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/cockroachdb
2 | version: 0.2.0
3 | description: CockroachDB is an open source, survivable, strongly consistent, scale-out SQL database.
4 | expose: lb
5 | variables:
6 | version:
7 | type: string
8 | default: "1.1.3"
9 | from:
10 | prompt: Version
11 | cluster_size:
12 | type: integer
13 | default: 3
14 | from:
15 | prompt: Cluster size (min 3)
16 | memory_limit:
17 | type: integer
18 | default: 1024
19 | from:
20 | prompt: Node memory limit (MB)
21 | affinity:
22 | type: string
23 | default: label==~cockroach
24 | from:
25 | prompt: Affinity
26 | cache_size:
27 | type: integer
28 | from:
29 | evaluate: ${memory_limit} * 0.25
30 | services:
31 | node:
32 | image: "cockroachdb/cockroach:v{{ version}}"
33 | stateful: true
34 | instances: {{ cluster_size }}
35 | command: "start --logtostderr --insecure --cache {{ cache_size }}MB --max-sql-memory {{ cache_size }}MB --join node-1,node-2,node-3"
36 | mem_limit: "{{ memory_limit }}m"
37 | deploy:
38 | wait_for_port: 26257
39 | affinity:
40 | - {{ affinity }}
41 | environment:
42 | KONTENA_LB_EXTERNAL_PORT: 26257
43 | KONTENA_LB_INTERNAL_PORT: 26257
44 | KONTENA_LB_MODE: tcp
45 | health_check:
46 | protocol: tcp
47 | initial_delay: 120
48 | port: 26257
49 | hooks:
50 | post_start:
51 | - name: init
52 | cmd: sleep 10 && /cockroach/cockroach init --insecure
53 | instances: 1
54 | oneshot: true
55 | volumes:
56 | - data:/cockroach/cockroach-data
57 | links:
58 | - lb
59 | lb:
60 | image: kontena/lb:latest
61 | instances: 2
62 | affinity:
63 | - {{ affinity }}
64 | volumes:
65 | data:
66 | external:
67 | name: ${STACK}-data
68 |
--------------------------------------------------------------------------------
/drone/README.md:
--------------------------------------------------------------------------------
1 | # Drone on Kontena
2 |
3 | [Drone](https://github.com/drone/drone) is a Continuous Delivery system built on container technology. Drone uses a simple yaml configuration file, a superset of docker-compose, to define and execute Pipelines inside Docker containers.
4 |
5 | ## Install
6 |
7 | > Prerequisites: You need to have working [Kontena](https://kontena.io) Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/quick-start).
8 |
9 | ### Install Kontena Load Balancer
10 |
11 | You need to have [Kontena Loadbalancer](https://www.kontena.io/docs/using-kontena/loadbalancer.html) installed. You can install one with following command:
12 |
13 | ```
14 | $ kontena stack install kontena/ingress-lb
15 | ```
16 |
17 | ### Drone with GitHub integration
18 |
19 | ```
20 | $ kontena stack install kontena/drone-github
21 | ```
22 |
23 | ### Drone with GitLab
24 |
25 | ```
26 | $ kontena stack install kontena/drone-gitlab
27 | ```
--------------------------------------------------------------------------------
/drone/github.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/drone-github
2 | version: 0.2.1
3 | description: Drone is a Continuous Delivery platform built on Docker
4 | variables:
5 | loadbalancer:
6 | type: string
7 | from:
8 | env: LOADBALANCER
9 | service_link:
10 | prompt: Choose a loadbalancer
11 | image: kontena/lb
12 | drone_host:
13 | type: string
14 | from:
15 | prompt: Drone hostname
16 | agent_instances:
17 | type: integer
18 | default: 1
19 | from:
20 | prompt: Number of Drone agents
21 | gh_client_id:
22 | type: string
23 | from:
24 | env: GITHUB_CLIENT
25 | vault: ${STACK}-gh-client
26 | prompt: GitHub client id
27 | to:
28 | vault: ${STACK}-gh-client
29 | gh_client_secret:
30 | type: string
31 | from:
32 | env: GITHUB_SECRET
33 | vault: ${STACK}-gh-secret
34 | prompt: GitHub secret
35 | to:
36 | vault: ${STACK}-gh-secret
37 | admins:
38 | type: string
39 | required: false
40 | from:
41 | prompt: Comma separated list of admin users
42 | organizations:
43 | type: string
44 | required: false
45 | from:
46 | prompt: Comma separated list of approved organizations
47 | shared_secret:
48 | type: string
49 | from:
50 | env: SECRET
51 | vault: ${STACK}-secret
52 | random_string: 24
53 | to:
54 | vault: ${STACK}-secret
55 | services:
56 | server:
57 | image: drone/drone:0.7
58 | instances: 1
59 | stateful: true
60 | mem_limit: 64m
61 | environment:
62 | - DRONE_DEBUG=true
63 | - DRONE_GITHUB=true
64 | - DRONE_OPEN=true
65 | # {% if admins %}
66 | - DRONE_ADMIN={{ admins }}
67 | # {% endif %}
68 | # {% if organizations %}
69 | - DRONE_ORGS={{ organizations }}
70 | # {% endif %}
71 | - DRONE_HOST=https://{{ drone_host }}
72 | - KONTENA_LB_INTERNAL_PORT=8000
73 | - KONTENA_LB_VIRTUAL_HOSTS={{ drone_host }}
74 | secrets:
75 | - secret: ${STACK}-gh-client
76 | name: DRONE_GITHUB_CLIENT
77 | type: env
78 | - secret: ${STACK}-gh-secret
79 | name: DRONE_GITHUB_SECRET
80 | type: env
81 | - secret: ${STACK}-secret
82 | name: DRONE_AGENT_SECRET
83 | type: env
84 | volumes:
85 | - /var/lib/drone
86 | links:
87 | - "{{ loadbalancer }}"
88 |
89 | agent:
90 | image: drone/drone:0.7
91 | command: agent
92 | instances: {{ agent_instances }}
93 | mem_limit: 64m
94 | depends_on:
95 | - server
96 | environment:
97 | - DRONE_SERVER=ws://server:8000/ws/broker
98 | - DOCKER_API_VERSION=1.24
99 | secrets:
100 | - secret: ${STACK}-secret
101 | name: DRONE_SECRET
102 | type: env
103 | volumes:
104 | - /var/run/docker.sock:/var/run/docker.sock
--------------------------------------------------------------------------------
/drone/gitlab.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/drone-gitlab
2 | version: 0.2.1
3 | description: Drone is a Continuous Delivery platform built on Docker
4 | variables:
5 | loadbalancer:
6 | type: string
7 | from:
8 | env: LOADBALANCER
9 | service_link:
10 | prompt: Choose a loadbalancer
11 | image: kontena/lb
12 | drone_host:
13 | type: string
14 | from:
15 | prompt: Drone hostname
16 | agent_instances:
17 | type: integer
18 | default: 1
19 | from:
20 | prompt: Number of Drone agents
21 | gl_url:
22 | type: string
23 | from:
24 | env: GITLAB_URL
25 | vault: ${STACK}-gl-url
26 | prompt: GitLab URL
27 | to:
28 | vault: ${STACK}-gl-url
29 | gl_client_id:
30 | type: string
31 | from:
32 | env: GITLAB_CLIENT
33 | vault: ${STACK}-gl-client
34 | prompt: GitLab client id
35 | to:
36 | vault: ${STACK}-gl-client
37 | gl_client_secret:
38 | type: string
39 | from:
40 | env: GITLAB_SECRET
41 | vault: ${STACK}-gl-secret
42 | prompt: GitLab secret
43 | to:
44 | vault: ${STACK}-gl-secret
45 | shared_secret:
46 | type: string
47 | from:
48 | env: SECRET
49 | vault: ${STACK}-secret
50 | random_string: 24
51 | to:
52 | vault: ${STACK}-secret
53 | services:
54 | server:
55 | image: drone/drone:0.7
56 | instances: 1
57 | stateful: true
58 | mem_limit: 64m
59 | environment:
60 | - DRONE_DEBUG=true
61 | - DRONE_GITLAB=true
62 | - DRONE_OPEN=true
63 | - DRONE_HOST=https://{{ drone_host }}
64 | - KONTENA_LB_INTERNAL_PORT=8000
65 | - KONTENA_LB_VIRTUAL_HOSTS={{ drone_host }}
66 | secrets:
67 | - secret: ${STACK}-gl-url
68 | name: DRONE_GITLAB_URL
69 | type: env
70 | - secret: ${STACK}-gl-client
71 | name: DRONE_GITLAB_CLIENT
72 | type: env
73 | - secret: ${STACK}-gl-secret
74 | name: DRONE_GITLAB_SECRET
75 | type: env
76 | - secret: ${STACK}-secret
77 | name: DRONE_AGENT_SECRET
78 | type: env
79 | volumes:
80 | - /var/lib/drone
81 | links:
82 | - "{{ loadbalancer }}"
83 |
84 | agent:
85 | image: drone/drone:0.7
86 | command: agent
87 | instances: {{ agent_instances }}
88 | mem_limit: 64m
89 | depends_on:
90 | - server
91 | environment:
92 | - DRONE_SERVER=ws://server:8000/ws/broker
93 | - DOCKER_API_VERSION=1.24
94 | secrets:
95 | - secret: ${STACK}-secret
96 | name: DRONE_SECRET
97 | type: env
98 | volumes:
99 | - /var/run/docker.sock:/var/run/docker.sock
--------------------------------------------------------------------------------
/efk/README.md:
--------------------------------------------------------------------------------
1 | # EFK Stack
2 |
3 | EFK stack sets up log storage and analytics stack using Elasticsearch, Fluentd and Kibana.
4 |
5 | Currently Kontena EFK setup is actually using multiple stacks to make all the parts highly re-usable.
6 |
7 | Following chapters describe how to set up EFK using ready made stacks. It's advised to install the stacks in the order defined here.
8 |
9 | ## Elasticsearch
10 |
11 | Set up Elasticsearch using [`kontena/elasticsearch`](https://github.com/kontena/kontena-stacks/tree/master/elasticsearch) stack. It needs volumes to store Elasticsearch data persistently. Those volumes should be `instance` scoped as Elasticsearch itself replicates the data.
12 |
13 | The initial setup of Elasticsearch stack creates new passwords in Kontena secrets vault for the default users in Elasticsearch and configures Elasticsearch to use those. The same secrets are also automatically used when installing Kibana and Fluentd stacks.
14 |
15 | ## Fluentd
16 |
17 | Fluentd is used to receive the log data and to store it in Elasticsearch.
18 |
19 | Fluentd can be set up using [`kontena/fluentd-elasticsearch`](https://github.com/kontena/kontena-stacks/tree/master/fluentd/elasticsearch) stack. You need to configure the elasticsearch hostname where fluentd will store the data.
20 |
21 |
22 | ## Kibana
23 |
24 | Kibana is used to query and visualize the logs stored and indexed in Elasticsearch.
25 |
26 | Kibana can be setup easily with [`kontena/kibana`](https://github.com/kontena/kontena-stacks/tree/master/kibana) stack.
27 |
28 |
29 | ## TODO
30 |
31 | With upcoming Kontena 1.4 stack dependency mechanism it will be possible to create "top-level" EFK stack that uses these other stacks as building blocks.
32 |
33 | So when 1.4 is generally available, there shall be a top level EFK stack created. :)
--------------------------------------------------------------------------------
/elasticsearch/README.md:
--------------------------------------------------------------------------------
1 | # Elasticsearch on Kontena
2 |
3 | [Elasticsearch](https://www.elastic.co/products/elasticsearch) is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases.
4 |
5 | ### Prerequisites
6 |
7 | You need to have working [Kontena](http://www.kontena.io) Container Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/getting-started/quick-start).
8 |
9 | Elasticsearch is a bit memory hungry, so your nodes should have plenty of mem available.
10 |
11 | The default operating system limits on mmap counts is likely to be too low, which may result in out of memory exceptions. So increase the `max_map_count`on the hosts. This can be done for example with:
12 | ```
13 | kontena node ssh node-1 sudo sysctl -w vm.max_map_count=262144
14 | ```
15 |
16 | ## Install
17 |
18 | Elastcisearch stack needs `instance` scoped [volume](https://kontena.io/docs/using-kontena/volumes.html) named `elasticdata` to persist the data. Volume configuration can be created with:
19 | ```
20 | kontena volume create --scope instance --driver local elasticdata
21 | ```
22 |
23 | Install Elasticsearch stack
24 | `$ kontena stack install kontena/elasticsearch`
25 |
26 | This will deploy Elasticsearch. If you've specified more than one instance, Elasticsearch will automatically clusterize itself. Before connecting any apps to it it's probably good idea to wait until the cluster status goes green. See https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html how to check cluster status.
27 |
28 |
29 |
--------------------------------------------------------------------------------
/elasticsearch/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/elasticsearch
2 | version: 0.1.0
3 | description: Elasticsearch
4 | expose: elastic
5 | variables:
6 | instances:
7 | type: integer
8 | from:
9 | prompt: How many instances?
10 | reset_passwords:
11 | type: boolean
12 | from:
13 | prompt: Reset all default passwords during initial stack installation?
14 | elastic_password:
15 | only_if: reset_passwords
16 | type: string
17 | from:
18 | vault: elastic-password
19 | random_string: 12
20 | to:
21 | vault: elastic-password
22 | kibana_password:
23 | only_if: reset_passwords
24 | type: string
25 | from:
26 | vault: kibana-password
27 | random_string: 12
28 | to:
29 | vault: kibana-password
30 | logstash_password:
31 | only_if: reset_passwords
32 | type: string
33 | from:
34 | vault: logstash-password
35 | random_string: 12
36 | to:
37 | vault: logstash-password
38 |
39 | mem_limit:
40 | type: integer
41 | default: 2048
42 | from:
43 | prompt: Memory limit (in megabytes).
44 |
45 | jvm_limit:
46 | type: integer
47 | from:
48 | evaluate: ${mem_limit} * 0.8
49 |
50 | affinity:
51 | type: string
52 | required: false
53 | from:
54 | prompt: Affinity rule
55 |
56 | services:
57 | elastic:
58 | image: docker.elastic.co/elasticsearch/elasticsearch:5.5.2
59 | instances: {{ instances }}
60 | stateful: true
61 | mem_limit: "{{ mem_limit }}m"
62 | environment:
63 | cluster.name: ${STACK}
64 | network.host: _ethwe_
65 | discovery.zen.ping.unicast.hosts: elastic
66 | ES_JAVA_OPTS: "-Xms{{ jvm_limit }}m -Xmx{{ jvm_limit }}m"
67 | volumes:
68 | - elasticdata:/usr/share/elasticsearch/data
69 | affinity:
70 | # {% if affinity %}
71 | - {{ affinity }}
72 | # {% endif %}
73 | hooks:
74 | post_start:
75 | # {% if reset_passwords %}
76 | - cmd: while ! (echo > /dev/tcp/$$HOSTNAME/9200) >/dev/null 2>&1; do sleep 1; done
77 | name: Wait for elastic to startup
78 | instances: {{ instances }}
79 | oneshot: true
80 | - cmd: curl -u elastic:changeme -XPOST -d '{"password":"'"$$ELASTIC_PASSWORD"'"}' $$HOSTNAME:9200/_xpack/security/user/elastic/_password
81 | name: Change default user password
82 | instances: {{ instances }}
83 | oneshot: true
84 | - cmd: curl -u elastic:$$ELASTIC_PASSWORD -XPOST -d '{"password":"'"$$KIBANA_PASSWORD"'"}' $$HOSTNAME:9200/_xpack/security/user/kibana/_password
85 | name: Change kibana user password
86 | instances: {{ instances }}
87 | oneshot: true
88 | - cmd: curl -u elastic:$$ELASTIC_PASSWORD -XPOST -d '{"password":"'"$$LOGSTASH_PASSWORD"'"}' $$HOSTNAME:9200/_xpack/security/user/logstash_system/_password
89 | name: Change logstash_system user password
90 | instances: {{ instances }}
91 | oneshot: true
92 | # {% endif %}
93 | secrets:
94 | - secret: elastic-password
95 | name: ELASTIC_PASSWORD
96 | type: env
97 | - secret: kibana-password
98 | name: KIBANA_PASSWORD
99 | type: env
100 | - secret: logstash-password
101 | name: LOGSTASH_PASSWORD
102 | type: env
103 |
104 | volumes:
105 | elasticdata:
106 | external: true
107 |
--------------------------------------------------------------------------------
/fluentd/README.md:
--------------------------------------------------------------------------------
1 | # Fluentd log forwarders for Kontena
2 |
3 | [Fluentd](http://www.fluentd.org/) is an open source data collector for unified logging layer.
4 |
5 | Kontena supports fluentd log shipping that can be configured on each grid. When fluentd forwarding is enabled, all container logs are automatically sent to fluentd for further processing in addition of storing them in Kontena Master.
6 |
7 | ## AWS Cloudwatch Logs
8 |
9 | You can use Amazon [CloudWatch Logs](http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) to monitor, store, and access your log files.
10 |
11 | ### Installation
12 |
13 | ```
14 | $ kontena stack install kontena/fluentd-cloudwatch
15 | ```
16 |
17 | ### Grid Configuration
18 |
19 | ```
20 | $ kontena grid update --log-forwarder fluentd --log-opt fluentd-address=fluentd-cloudwatch.${GRID}.kontena.local:24224 ${GRID}
21 | ```
22 |
23 | ## Graylog Extended Log Format (GELF)
24 |
25 | ### Installation
26 |
27 | ```
28 | $ kontena stack install kontena/fluentd-gelf
29 | ```
30 |
31 | ### Grid Configuration
32 |
33 | ```
34 | $ kontena grid update --log-forwarder fluentd --log-opt fluentd-address=fluentd-gelf.${GRID}.kontena.local:24224 ${GRID}
35 | ```
36 |
37 | ## Loggly
38 |
39 | [Loggly](https://www.loggly.com/) is a SaaS solution for log data management. With Loggly’s log management software, you’re able to bring logs from the depths of your entire infrastructure to one place where you can track activity and analyze trends.
40 |
41 | ### Installation
42 |
43 | ```
44 | $ kontena stack install kontena/fluentd-loggly
45 | ```
46 |
47 | ### Grid Configuration
48 |
49 | ```
50 | $ kontena grid update --log-forwarder fluentd --log-opt fluentd-address=fluentd-loggly.${GRID}.kontena.local:24224 ${GRID}
51 | ```
52 |
53 | ## Splunk HEC
54 |
55 | [Splunk](https://www.splunk.com/) HTTP Event Collector (HEC) is a fast and efficient way to send data to Splunk Enterprise, Splunk Light and Splunk Cloud.
56 |
57 | ### Installation
58 |
59 | ```
60 | $ kontena stack install kontena/fluentd-splunkhec
61 | ```
62 |
63 | ### Grid Configuration
64 |
65 | ```
66 | $ kontena grid update --log-forwarder fluentd --log-opt fluentd-address=fluentd-splunkhec.${GRID}.kontena.local:24224 ${GRID}
67 | ```
68 |
69 | ## Elasticsearch
70 |
71 | Fluentd agent forwards logs to Elasticsearch for indexing. Logs are stored in "logstash" way so it's easier to put e.g. Kibana in front.
72 |
73 | > Note: You need to have working installation of `kontena/elasticsearch` stack running in the grid.
74 |
75 | ### Installation
76 |
77 | ```
78 | $ kontena stack install kontena/fluentd-elasticsearch
79 | ```
80 |
81 | ### Grid Configuration
82 |
83 | ```
84 | $ kontena grid update --log-forwarder fluentd --log-opt fluentd-address=localhost ${GRID}
85 | ```
86 |
--------------------------------------------------------------------------------
/fluentd/cloudwatch-logs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fluent/fluentd:v0.14-onbuild
2 |
3 | RUN apk add --update --virtual .build-deps \
4 | sudo build-base ruby-dev \
5 | && sudo gem install \
6 | fluent-plugin-cloudwatch-logs \
7 | && sudo gem sources --clear-all \
8 | && apk del .build-deps \
9 | && rm -rf /var/cache/apk/* \
10 | /home/fluent/.gem/ruby/2.3.0/cache/*.gem
--------------------------------------------------------------------------------
/fluentd/cloudwatch-logs/fluent.conf:
--------------------------------------------------------------------------------
1 |
2 | @type forward
3 | port 24224
4 |
5 |
6 |
7 | @type record_transformer
8 |
9 | node "${tag_parts[0]}"
10 | grid "${tag_parts[1]}"
11 | stack "${tag_parts[2]}"
12 | service "${tag_parts[3]}"
13 | instance "${tag_parts[4]}"
14 |
15 |
16 |
17 |
18 | @type cloudwatch_logs
19 | log_group_name "#{ENV['LOG_GROUP']}"
20 | log_stream_name "#{ENV['LOG_STREAM']}"
21 | auto_create_stream true
22 |
--------------------------------------------------------------------------------
/fluentd/cloudwatch.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/fluentd-cloudwatch
2 | version: 0.1.0
3 | description: Fluentd to AWS CloudWatch Logs Forwarder
4 | expose: agent
5 | variables:
6 | region:
7 | type: string
8 | default: us-east-1
9 | from:
10 | prompt: AWS region
11 | aws_access_key_id:
12 | type: string
13 | from:
14 | vault: ${STACK}-aws-access-key-id
15 | prompt: AWS access key id
16 | to:
17 | vault: ${STACK}-aws-access-key-id
18 | aws_secret_access_key:
19 | type: string
20 | from:
21 | vault: ${STACK}-aws-secret-access-key
22 | prompt: AWS secret access key
23 | to:
24 | vault: ${STACK}-aws-secret-access-key
25 | log_group:
26 | type: string
27 | default: "${GRID"
28 | from:
29 | prompt: Cloudwatch log group
30 | log_stream:
31 | type: string
32 | default: fluent
33 | from:
34 | prompt: Cloudwatch log stream
35 | services:
36 | agent:
37 | image: kontena/fluentd-cloudwatch:latest
38 | instances: 2
39 | secrets:
40 | - secret: ${STACK}-aws-access-key-id
41 | name: AWS_ACCESS_KEY_ID
42 | type: env
43 | - secret: ${STACK}-aws-secret-access-key
44 | name: AWS_SECRET_ACCESS_KEY
45 | type: env
46 | environment:
47 | AWS_REGION: "{{ region }}"
48 | LOG_GROUP: "{{ log_group }}"
49 | LOG_STREAM: "{{ log_stream }}"
50 | mem_limit: 128m
51 | cpu_shares: 256
--------------------------------------------------------------------------------
/fluentd/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/fluentd-elasticsearch
2 | version: 0.1.1
3 | description: Fluent forwarding to Elasticsearch
4 | variables:
5 | host:
6 | type: string
7 | default: elasticsearch.${GRID}.kontena.local
8 | from:
9 | prompt: Elasticsearch host
10 | port:
11 | type: integer
12 | default: 9200
13 | from:
14 | prompt: Elasticsearch port
15 | scheme:
16 | type: string
17 | default: http
18 | from:
19 | prompt: Elasticsearch URL scheme
20 | user:
21 | type: string
22 | default: elastic
23 | from:
24 | prompt: Elasticsearch user
25 |
26 | services:
27 | agent:
28 | image: kontena/fluentd-elasticsearch:latest
29 | environment:
30 | ELASTIC_USER: {{ user }}
31 | ELASTIC_HOST: {{ host }}
32 | ELASTIC_PORT: {{ port }}
33 | ELASTIC_SCHEME: {{ scheme }}
34 | ports:
35 | - 24224:24224
36 | deploy:
37 | strategy: daemon
38 | mem_limit: 128m
39 | cpu_shares: 256
40 |
41 | secrets:
42 | - secret: elastic-password
43 | name: ELASTIC_PASSWORD
44 | type: env
45 |
--------------------------------------------------------------------------------
/fluentd/elasticsearch/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fluent/fluentd:v0.14-onbuild
2 |
3 | RUN apk add --update --virtual .build-deps \
4 | sudo build-base ruby-dev \
5 | && sudo gem install fluent-plugin-elasticsearch \
6 | && sudo gem sources --clear-all \
7 | && apk del .build-deps \
8 | && apk add curl \
9 | && rm -rf /var/cache/apk/* \
10 | /home/fluent/.gem/ruby/2.3.0/cache/*.gem
11 |
12 | ADD elasticsearch-template-es5x.json .
13 |
--------------------------------------------------------------------------------
/fluentd/elasticsearch/elasticsearch-template-es5x.json:
--------------------------------------------------------------------------------
1 | {
2 | "template" : "logstash-*",
3 | "version" : 50001,
4 | "settings" : {
5 | "index.refresh_interval" : "5s"
6 | },
7 | "mappings" : {
8 | "_default_" : {
9 | "_all" : {"enabled" : true, "norms" : false},
10 | "dynamic_templates" : [ {
11 | "message_field" : {
12 | "path_match" : "message",
13 | "match_mapping_type" : "string",
14 | "mapping" : {
15 | "type" : "text",
16 | "norms" : false
17 | }
18 | }
19 | }, {
20 | "string_fields" : {
21 | "match" : "*",
22 | "match_mapping_type" : "string",
23 | "mapping" : {
24 | "type" : "text", "norms" : false,
25 | "fields" : {
26 | "keyword" : { "type": "keyword", "ignore_above": 256 }
27 | }
28 | }
29 | }
30 | } ],
31 | "properties" : {
32 | "@timestamp": { "type": "date", "include_in_all": false },
33 | "@version": { "type": "keyword", "include_in_all": false },
34 | "geoip" : {
35 | "dynamic": true,
36 | "properties" : {
37 | "ip": { "type": "ip" },
38 | "location" : { "type" : "geo_point" },
39 | "latitude" : { "type" : "half_float" },
40 | "longitude" : { "type" : "half_float" }
41 | }
42 | }
43 | }
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/fluentd/elasticsearch/fluent.conf:
--------------------------------------------------------------------------------
1 |
2 | @type forward
3 | port 24224
4 |
5 |
6 |
7 | @type record_transformer
8 |
9 | node "${tag_parts[0]}"
10 | grid "${tag_parts[1]}"
11 | stack "${tag_parts[2]}"
12 | service "${tag_parts[3]}"
13 | instance "${tag_parts[4]}"
14 |
15 |
16 |
17 |
18 | @type elasticsearch
19 | host "#{ENV['ELASTIC_HOST']}"
20 | port "#{ENV['ELASTIC_PORT']}"
21 | scheme "#{ENV['ELASTIC_SCHEME']}"
22 | user "#{ENV['ELASTIC_USER']}"
23 | password "#{ENV['ELASTIC_PASSWORD']}"
24 |
25 | template_name logstash
26 | template_file elasticsearch-template-es5x.json
27 |
28 | logstash_format true # Make using Kibana easier
29 |
30 |
31 |
--------------------------------------------------------------------------------
/fluentd/gelf.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/fluentd-gelf
2 | version: 0.1.0
3 | description: Fluentd to GELF Forwarder
4 | expose: agent
5 | variables:
6 | host:
7 | type: string
8 | default: graylog.${GRID}.kontena.local
9 | from:
10 | prompt: GELF host
11 | port:
12 | type: integer
13 | default: 12201
14 | from:
15 | prompt: GELF port
16 | protocol:
17 | type: string
18 | default: udp
19 | from:
20 | prompt: GELF protocol
21 | services:
22 | agent:
23 | image: kontena/fluentd-gelf:latest
24 | instances: 2
25 | environment:
26 | - GELF_HOST={{ host }}
27 | - GELF_PORT={{ port }}
28 | - GELF_PROTOCOL={{ protocol }}
29 | mem_limit: 128m
30 | cpu_shares: 256
--------------------------------------------------------------------------------
/fluentd/gelf/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fluent/fluentd:v0.14-onbuild
2 |
3 | RUN apk add --update --virtual .build-deps \
4 | sudo build-base ruby-dev \
5 | && sudo gem install \
6 | fluent-plugin-gelf-hs \
7 | && sudo gem sources --clear-all \
8 | && apk del .build-deps \
9 | && rm -rf /var/cache/apk/* \
10 | /home/fluent/.gem/ruby/2.3.0/cache/*.gem
11 |
12 | ENV GELF_PORT=12201 \
13 | GELF_PROTOCOL=udp
--------------------------------------------------------------------------------
/fluentd/gelf/fluent.conf:
--------------------------------------------------------------------------------
1 |
2 | @type forward
3 | port 24224
4 |
5 |
6 |
7 | @type record_transformer
8 |
9 | node "${tag_parts[0]}"
10 | grid "${tag_parts[1]}"
11 | stack "${tag_parts[2]}"
12 | service "${tag_parts[3]}"
13 | instance "${tag_parts[4]}"
14 |
15 |
16 |
17 |
18 | @type gelf
19 | host "#{ENV['GELF_HOST']}"
20 | port "#{ENV['GELF_PORT']}"
21 | protocol "#{ENV['GELF_PROTOCOL']}"
22 |
--------------------------------------------------------------------------------
/fluentd/loggly.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/fluentd-loggly
2 | version: 0.1.0
3 | description: Fluentd to Loggly Forwarder
4 | expose: agent
5 | variables:
6 | token:
7 | type: string
8 | from:
9 | vault: ${STACK}-token
10 | prompt: Loggly token
11 | to:
12 | vault: ${STACK}-token
13 | services:
14 | agent:
15 | image: kontena/fluentd-loggly:latest
16 | instances: 2
17 | secrets:
18 | - secret: ${STACK}-token
19 | name: LOGGLY_TOKEN
20 | type: env
21 | mem_limit: 128m
22 | cpu_shares: 256
--------------------------------------------------------------------------------
/fluentd/loggly/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fluent/fluentd:v0.14-onbuild
2 |
3 | RUN apk add --update --virtual .build-deps \
4 | sudo build-base ruby-dev \
5 | && sudo gem install \
6 | fluent-plugin-loggly \
7 | && sudo gem sources --clear-all \
8 | && apk del .build-deps \
9 | && rm -rf /var/cache/apk/* \
10 | /home/fluent/.gem/ruby/2.3.0/cache/*.gem
--------------------------------------------------------------------------------
/fluentd/loggly/fluent.conf:
--------------------------------------------------------------------------------
1 |
2 | @type forward
3 | port 24224
4 |
5 |
6 |
7 | @type record_transformer
8 |
9 | node "${tag_parts[0]}"
10 | grid "${tag_parts[1]}"
11 | stack "${tag_parts[2]}"
12 | service "${tag_parts[3]}"
13 | instance "${tag_parts[4]}"
14 |
15 |
16 |
17 |
18 | @type loggly
19 | loggly_url "https://logs-01.loggly.com/inputs/#{ENV['LOGGLY_TOKEN']}"
20 |
--------------------------------------------------------------------------------
/fluentd/splunkhec.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/fluentd-splunkhec
2 | version: 0.1.0
3 | description: Fluentd to Splunk (HTTP Event Collector) Forwarder
4 | expose: agent
5 | variables:
6 | host:
7 | type: string
8 | from:
9 | prompt: Splunk hostname
10 | protocol:
11 | type: string
12 | default: http
13 | from:
14 | prompt: Splunk protocol
15 | port:
16 | type: string
17 | default: 8080
18 | from:
19 | prompt: Splunk port
20 | token:
21 | type: string
22 | from:
23 | vault: ${STACK}-token
24 | prompt: Splunk token
25 | to:
26 | vault: ${STACK}-token
27 | index:
28 | type: string
29 | default: main
30 | from:
31 | prompt: Index
32 | source:
33 | type: string
34 | default: "${GRID}"
35 | from:
36 | prompt: Source
37 | services:
38 | agent:
39 | image: kontena/fluentd-splunkhec:latest
40 | instances: 2
41 | secrets:
42 | - secret: ${STACK}-token
43 | name: SPLUNK_TOKEN
44 | type: env
45 | mem_limit: 128m
46 | cpu_shares: 256
--------------------------------------------------------------------------------
/fluentd/splunkhec/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fluent/fluentd:v0.14-onbuild
2 |
3 | RUN apk add --update --virtual .build-deps \
4 | sudo build-base ruby-dev \
5 | && sudo gem install \
6 | fluent-plugin-splunkhec \
7 | && sudo gem sources --clear-all \
8 | && apk del .build-deps \
9 | && rm -rf /var/cache/apk/* \
10 | /home/fluent/.gem/ruby/2.3.0/cache/*.gem
--------------------------------------------------------------------------------
/fluentd/splunkhec/fluent.conf:
--------------------------------------------------------------------------------
1 |
2 | @type forward
3 | port 24224
4 |
5 |
6 |
7 | @type record_transformer
8 |
9 | node "${tag_parts[0]}"
10 | grid "${tag_parts[1]}"
11 | stack "${tag_parts[2]}"
12 | service "${tag_parts[3]}"
13 | instance "${tag_parts[4]}"
14 |
15 |
16 |
17 |
18 | @type splunkhec
19 | host "${ENV['SPLUNK_HOST']}"
20 | protocol "${ENV['SPLUNK_PROTOCOL']}" #optional
21 | port "${ENV['SPLUNK_PORT']}" #optional
22 | token "${ENV['SPLUNK_TOKEN']}"
23 | index "${ENV['SPLUNK_INDEX']}" #optional
24 | source "${ENV['SPLUNK_SOURCE']}" #optional
25 |
--------------------------------------------------------------------------------
/ingress-lb/README.md:
--------------------------------------------------------------------------------
1 | # Kontena Load Balancer
2 |
3 | [Kontena Load Balancer](http://kontena.io/docs/using-kontena/loadbalancer.html) is fully managed by [Kontena](https://kontena.io) orchestration and enables consistent, portable load balancing on any infrastructure where Kontena Nodes are running.
4 |
5 |
6 | ## Install
7 | > Prerequisites: You need to have working [Kontena](http://www.kontena.io) Container Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/getting-started/quick-start).
8 |
9 | `$ kontena stack install kontena/ingress-lb`
10 |
11 | ## Using Kontena Load Balancer
12 |
13 | Services are connected automatically by linking services to the load balancer service and defining load balancing rules via environment variables.
14 |
15 | ```
16 | stack: foo/bar
17 | version: 0.1.0
18 | services:
19 | web:
20 | image: nginx:latest
21 | environment:
22 | - KONTENA_LB_MODE=http
23 | - KONTENA_LB_BALANCE=roundrobin
24 | - KONTENA_LB_INTERNAL_PORT=80
25 | - KONTENA_LB_VIRTUAL_HOSTS=www.kontena.io,kontena.io
26 | links:
27 | - ingress-lb/lb
28 | api:
29 | image: registry.kontena.local/restapi:latest
30 | environment:
31 | - KONTENA_LB_MODE=http
32 | - KONTENA_LB_BALANCE=roundrobin
33 | - KONTENA_LB_INTERNAL_PORT=8080
34 | - KONTENA_LB_VIRTUAL_PATH=/api
35 | links:
36 | - ingress-lb/lb
37 | ```
38 |
39 | For further information, please see the complete [Kontena Load Balancer reference](http://kontena.io/docs/using-kontena/loadbalancer.html).
40 |
--------------------------------------------------------------------------------
/ingress-lb/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/ingress-lb
2 | version: 0.2.0
3 | description: Generic ingress load balancer for http/https
4 | expose: lb
5 | variables:
6 | lb_affinity:
7 | type: string
8 | default: label==~ingress-lb
9 | from:
10 | prompt: Affinity
11 | lb_certs:
12 | type: array
13 | required: false
14 | from:
15 | certificates: Select SSL certificates
16 | lb_stats_password:
17 | type: string
18 | from:
19 | vault: LB_STATS_PASSWORD
20 | random_string: 12
21 | to:
22 | vault: LB_STATS_PASSWORD
23 | lb_custom_ports:
24 | type: array
25 | from:
26 | prompt: Custom port mappings (comma separated, eg 2022:2022,25:25)
27 | services:
28 | lb:
29 | image: kontena/lb:latest
30 | ports:
31 | - 80:80
32 | - 443:443
33 | # {% if lb_custom_ports %}
34 | # {% for port in lb_custom_ports %}
35 | - {{ port }}
36 | # {% endfor %}
37 | # {% endif %}
38 | # {% if lb_affinity %}
39 | affinity:
40 | - {{ lb_affinity }}
41 | # {% endif %}
42 | deploy:
43 | strategy: daemon
44 | wait_for_port: 80
45 | interval: 7d # re-deploy once in a week for security updates
46 | health_check:
47 | protocol: http
48 | port: 80
49 | uri: /__health
50 | environment:
51 | - KONTENA_LB_HEALTH_URI=/__health
52 | secrets:
53 | - secret: LB_STATS_PASSWORD
54 | name: STATS_PASSWORD
55 | type: env
56 | # {% if lb_certs.size > 0 %}
57 | certificates:
58 | # {% for subject in lb_certs %}
59 | - subject: {{ subject }}
60 | name: "SSL_CERT_{{ subject }}"
61 | type: env
62 | # {% endfor %}
63 | # {% else %}
64 | certificates: []
65 | # {% endif %}
--------------------------------------------------------------------------------
/kafka/README.md:
--------------------------------------------------------------------------------
1 | HA Kafka cluster on Kontena
2 | ===========================
3 |
4 | [Kafka](https://kafka.apache.org/) is used for building real-time data pipelines and streaming apps. It is horizontally scalable, fault-tolerant, wicked fast, and runs in production in thousands of companies.
5 |
6 | (based on Kafka stack used in [HHypermap BOP -- Harvard Hypermap, Billion Object Platform](https://github.com/cga-harvard/hhypermap-bop))
7 |
8 | ## Install
9 |
10 | Prerequisites: You need to have working Kontena Container Platform installed. If you are new to Kontena, check quick start guide. You also need a running Zookeeper cluster. For the simplest installation, use the Kontena provided stack `kontena/zookeeper-cluster`.
11 |
12 | Kafka is a stateful service, therefore you must first create a Kontena volume. For a local volume run the following command:
13 |
14 | ```
15 | $ kontena volume create --scope instance --driver local kafka-cluster-data
16 | ```
17 |
18 | For local development purposes you can skip volume creation by using the `SKIP_VOLUMES` variable.
19 |
20 | Next install the stack itself. There are multiple options available, configured with environment variables or interactively via prompts:
21 |
22 | | Option | Description |
23 | | -------| ------------|
24 | | `LINK_ZOOKEEPER` | Boolean, if true use a Zookeeper Kontena service, otherwise specify an external URI. Defaults to true. |
25 | | `ZOOKEEPER_LINK` | If `LINK_ZOOKEEPER` is true, this is the service to link to. |
26 | | `ZOOKEEPER_URI` | If `LINK_ZOOKEEPER` is false, this is the URI of Zookeeper to use. |
27 | | `NUM_INSTANCES` | Number of instances of Kafka broker. Default is 3. |
28 | | `EXPOSE_KAFKA_PORT` | Boolean, if true the Kafka port 9092 will be exposed to the host node. Defaults to `false` |
29 | | `SKIP_VOLUMES` | Boolean, if true no volumes are mapped. Useful for local development. Defaults to `false` |
30 | | `KAFKA_VARIABLES` | Comma delimited list of Kafka config variables. Of the form `KAFKA_SOME_VAR=value`, which is transformed into the Kafka config form `some.var=value`. Any variable containing `replication.factor` will never be set to a value greater than `NUM_INSTANCES` (`offsets.topic.replication.factor`, `transaction.state.log.replication.factor`, `default.replication.factor`, `config.storage.replication.factor`, `offset.storage.replication.factor`, `status.storage.replication.factor`). |
31 |
32 | Generally, the default values are good for a basic cluster setup.
33 |
34 | To initially install:
35 |
36 | ```
37 | $ kontena stack install
38 | ```
39 |
40 | To upgrade:
41 |
42 | ```
43 | $ kontena stack upgrade kafka-cluster kontena/kafka-cluster
44 | ```
45 |
46 | Other services inside your Kontena Grid can now connect to Kafka using the address `kafka.kafka-cluster.${GRID}.kontena.local:9092`.
47 |
48 | ## Local Development
49 | The `kafka-cluster` stack is also very useful if you are developing systems that use Kafka even when your development environment itself is not running inside Kontena. To run a local development setup, make sure to do the following steps (examples assume a Kontena Grid named `dev`):
50 |
51 | 1. Create a local Zookeeper stack: `NUM_INSTANCES=1 SKIP_VOLUMES=true kontena stack install kontena/zookeeper-cluster`
52 | 2. Create a local Kafka stack: `NUM_INSTANCES=1 SKIP_VOLUMES=true EXPOSE_KAFKA_PORT=true kontena stack install kontena/kafka-cluster`
53 | 3. Add an `/etc/hosts` file entry (assuming here our Kontena grid name is `dev`) `127.0.0.1 kafka.kafka-cluster.dev.kontena.local` (for Vagrant development setups you may want to change 127.0.0.1 to the address of your Vagrant VM).
54 |
55 | Now services outside of the Kontena grid can connect to Kafka using the address `kafka.kafka-cluster.dev.kontena.local:9092`.
56 |
57 | ## Configuration
58 | Thanks to Kontena's `service exec` feature, it's fairly easy to run the various management tools Kafka provides without requiring `ssh` access to Kafka servers.
59 |
60 | First, log into a new interactive shell and switch to the `/usr/bin` directory where all the Kafka tools are:
61 |
62 | ```
63 | $ kontena service exec -it kafka-cluster/kafka bash
64 | $ cd /usr/bin
65 | ```
66 |
67 | Examples:
68 |
69 | - List all topics:
70 |
71 | ```
72 | $ ./kafka-topics --list --zookeeper $KAFKA_ZOOKEEPER_CONNECT:2181
73 | ```
74 |
75 | - Creating a single partition, single replica topic:
76 |
77 | ```
78 | $ ./kafka-topics --create --zookeeper $KAFKA_ZOOKEEPER_CONNECT:2181 --replication-factor 1 --partitions 1 --topic mytopic
79 | ```
80 |
81 | - Interactively publish events to a topic (assuming a 3 node cluster for `broker-list` argument):
82 |
83 | ```
84 | $ ./kafka-console-producer --broker-list $BROKER_LIST --topic mytopic
85 | ```
86 |
87 | - Interactively listen for all events on a topic from the beginning (assuming a 3 node cluster for `broker-list` argument):
88 |
89 | ```
90 | $ ./kafka-console-consumer --bootstrap-server $BROKER_LIST --topic mytopic --from-beginning
91 | ```
92 |
--------------------------------------------------------------------------------
/kafka/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM confluentinc/cp-kafka:3.3.0
2 | COPY kafka.run /etc/confluent/docker/run
3 | RUN ["chmod", "+x", "/etc/confluent/docker/run"]
--------------------------------------------------------------------------------
/kafka/docker/kafka.run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Copyright 2016 Confluent Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # Copied from HHypermap BOP
18 | # See https://github.com/cga-harvard/hhypermap-bop/blob/master/kafka/kafka.run
19 |
20 | set -o nounset \
21 | -o errexit \
22 | -o verbose \
23 | -o xtrace
24 |
25 | echo "===> ENV Variables ..."
26 | env | sort
27 |
28 | echo "===> User"
29 | id
30 |
31 | echo "===> Configuring ..."
32 |
33 | echo "Harvard CGA intercept for Kontena"
34 |
35 | if [ "${KONTENA_SERVICE_INSTANCE_NUMBER:=x}" != "x" ] && [ "${KAFKA_BROKER_ID:=x}" == "x" ]; then
36 | export KAFKA_BROKER_ID="$KONTENA_SERVICE_INSTANCE_NUMBER"
37 | fi
38 | if [ "${KONTENA_SERVICE_NAME:=x}" != "x" ] && [ "${KAFKA_ADVERTISED_LISTENERS:=x}" == "x" ]; then
39 | FULL_DNS_NAME="${HOSTNAME}.${KONTENA_STACK_NAME}.${KONTENA_GRID_NAME}.kontena.local"
40 | export KAFKA_ADVERTISED_LISTENERS="PLAINTEXT://${FULL_DNS_NAME}:9092"
41 | fi
42 |
43 | /etc/confluent/docker/configure
44 |
45 | echo "===> Running preflight checks ... "
46 | /etc/confluent/docker/ensure
47 |
48 | echo "===> Launching ... "
49 | exec /etc/confluent/docker/launch
--------------------------------------------------------------------------------
/kafka/kontena.yml:
--------------------------------------------------------------------------------
1 | ---
2 | stack: kontena/kafka-cluster
3 | version: 1.0.4
4 | description: Kafka cluster based on Harvard Hypermap
5 | expose: kafka
6 | variables:
7 | link_zookeeper:
8 | type: boolean
9 | default: true
10 | from:
11 | env: LINK_ZOOKEEPER
12 | prompt: Link Zookeeper stack?
13 | zookeeper_link:
14 | type: string
15 | only_if: link_zookeeper
16 | from:
17 | env: LINK_ZOOKEEPER
18 | service_link:
19 | prompt: Choose zookeeper
20 | name: zookeeper
21 | zookeeper_uri:
22 | type: string
23 | default: zookeeper.zookeeper-cluster.${GRID}.kontena.local:9092
24 | skip_if: link_zookeeper
25 | from:
26 | env: ZOOKEEPER_URI
27 | prompt: Zookeeper uri
28 | num_instances:
29 | type: integer
30 | min: 1
31 | default: 3
32 | from:
33 | env: NUM_INSTANCES
34 | prompt: Number of instances of Kafka?
35 | expose_kafka_port:
36 | type: boolean
37 | default: false
38 | from:
39 | env: EXPOSE_KAFKA_PORT
40 | prompt: Expose Kafka port to host?
41 | skip_volumes:
42 | type: boolean
43 | default: false
44 | from:
45 | env: SKIP_VOLUMES
46 | kafka_variables:
47 | type: array
48 | split: ','
49 | uniq: true
50 | compact: true
51 | default: "KAFKA_DELETE_TOPIC_ENABLE=true,KAFKA_DEFAULT_REPLICATION_FACTOR=2,KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=2"
52 | from:
53 | env: KAFKA_VARIABLES
54 |
55 | services:
56 | kafka:
57 | image: kontena/kafka:3.3.0
58 | stateful: true
59 | # {% unless skip_volumes %}
60 | volumes:
61 | - kafka-data:/var/lib/kafka
62 | # {% endunless %}
63 | instances: ${num_instances}
64 | # {% if expose_kafka_port %}
65 | ports:
66 | - "9092:9092"
67 | # {% endif %}
68 | # {% if link_zookeeper %}
69 | links:
70 | - ${zookeeper_link}
71 | # {% endif %}
72 | deploy:
73 | wait_for_port: 9092
74 | min_health: 0.5
75 | environment:
76 | - KAFKA_JMX_PORT=9999
77 | #
78 | # ***** Configure Broker list *****
79 | # {% assign broker_list = "" %}
80 | # {% for i in (1..num_instances) %}
81 | # {% capture broker %}kafka-{{ i }}.{{ STACK }}.{{ GRID }}.kontena.local:9092{% endcapture %}
82 | # {% assign broker_list = broker_list | append: broker %}
83 | # {% if i < num_instances %}
84 | # {% assign broker_list = broker_list | append: "," %}
85 | # {% endif %}
86 | # {% endfor %}
87 | - BROKER_LIST={{ broker_list }}
88 | #
89 | # ***** Configure Zookeeper connect uri *****
90 | # {% assign zookeeper_connect = "" %}
91 | # {% if link_zookeeper %}
92 | # {% assign parts = zookeeper_link | split: '/' %}
93 | # {% capture zookeeper_connect %}{{ parts[1] }}.{{ parts[0] }}.{{ GRID }}.kontena.local {% endcapture %}
94 | # {% else %}
95 | # {% assign zookeeper_connect = zookeeper_uri %}
96 | # {% endif %}
97 | - KAFKA_ZOOKEEPER_CONNECT={{ zookeeper_connect }}
98 | #
99 | # ***** Configure Kafka variables *****
100 | # {% for kafka_variable in kafka_variables %}
101 | # {% assign env_var = kafka_variable %}
102 | # {% assign parts = kafka_variable | split: '=' %}
103 | # {% if parts[0] contains "REPLICATION_FACTOR" %}
104 | # {% assign val = parts[1] | plus: 0 %}
105 | # {% if val > num_instances %}
106 | # {% capture env_var %}{{ parts[0] }}={{ num_instances }}{% endcapture %}
107 | # {% endif %}
108 | # {% endif %}
109 | - "{{ env_var }}"
110 | # {% endfor %}
111 |
112 | # {% unless skip_volumes %}
113 | volumes:
114 | kafka-data:
115 | external:
116 | name: ${STACK}-data
117 | # {% endunless %}
--------------------------------------------------------------------------------
/kibana/README.md:
--------------------------------------------------------------------------------
1 | # Kibana on Kontena
2 |
3 | [Kibana](https://www.elastic.co/products/kibana) Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack.
4 |
5 | ## Install
6 |
7 | > Prerequisites: You need to have working [Kontena](http://www.kontena.io) Container Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/getting-started/quick-start). You also need to have working installation of `kontena/elasticsearch` stack.
8 |
9 | Install Kontena Load Balancer stack (optional)
10 | `$ kontena stack install kontena/ingress-lb`
11 |
12 | Install Kibana stack
13 | `$ kontena stack install kontena/kibana`
14 |
15 | This will deploy Kibana and connect it to Elasticsearch.
16 |
17 |
18 | ## Logging in
19 |
20 | Kibana is now running behind the loadbalancer, so you should access it using the virtual host you gave during the stack install.
21 |
22 | To log into Kibana use username `elastic`. The password is in Kontena secrets vault (auto-generated during Elasticsearch stack installation) and can be read (with sufficient permissions) with `kontena vault read --value ELASTIC_PASSWORD`
23 |
--------------------------------------------------------------------------------
/kibana/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/kibana
2 | version: 0.1.0
3 | description: Kibana
4 | variables:
5 | host:
6 | type: string
7 | default: elasticsearch.${GRID}.kontena.local
8 | from:
9 | prompt: Elasticsearch host
10 | port:
11 | type: integer
12 | default: 9200
13 | from:
14 | prompt: Elastic port
15 | user:
16 | type: string
17 | default: elastic
18 | from:
19 | prompt: Elasticsearch user
20 | loadbalancer:
21 | type: string
22 | from:
23 | env: LOADBALANCER
24 | service_link:
25 | prompt: Choose a loadbalancer
26 | image: kontena/lb
27 | virtual_host:
28 | only_if:
29 | - loadbalancer
30 | type: string
31 | from:
32 | env: VIRTUAL_HOSTS
33 | prompt: Domain name for Kibana
34 |
35 |
36 | services:
37 | kibana:
38 | image: docker.elastic.co/kibana/kibana:5.5.2
39 | environment:
40 | SERVER_NAME: {{ virtual_host }}
41 | ELASTICSEARCH_URL: http://{{ host }}:{{ port }}
42 | ELASTICSEARCH_USERNAME: {{ user }}
43 | # {% if loadbalancer %}
44 | KONTENA_LB_INTERNAL_PORT: 5601
45 | KONTENA_LB_VIRTUAL_HOSTS: ${virtual_host}
46 | # {% endif %}
47 | links:
48 | # {% if loadbalancer %}
49 | - ${loadbalancer}
50 | # {% endif %}
51 | secrets:
52 | - secret: elastic-password
53 | name: ELASTICSEARCH_PASSWORD
54 | type: env
--------------------------------------------------------------------------------
/kong-dashboard/README.md:
--------------------------------------------------------------------------------
1 | # Kong Dashboard
2 |
3 | Dashboard for managing [Kong](https://getkong.org/) gateway.
4 |
5 | ## Install
6 | > Prerequisites: You need to have working [Kontena](http://www.kontena.io) Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/quick-start).
7 |
8 | Install Kong stack
9 | `$ kontena stack install kontena/kong`
10 |
11 | Install [Kong Dashboard](https://github.com/PGBI/kong-dashboard)
12 | `$ kontena stack install kontena/kong-dashboard`
13 |
14 | After it is deployed you can access the Dashboard with [Kontena VPN](http://kontena.io/docs/using-kontena/vpn-access.html):
15 | - http://kong-dashboard.${GRID}.kontena.local:8080
16 |
17 |
18 | **Note:** Replace `${GRID}` with your grid name.
19 |
--------------------------------------------------------------------------------
/kong-dashboard/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/kong-dashboard
2 | version: 0.1.0
3 | description: Dashboard for managing Kong gateway
4 | expose: ui
5 | services:
6 | ui:
7 | image: pgbi/kong-dashboard
--------------------------------------------------------------------------------
/kong/README.md:
--------------------------------------------------------------------------------
1 | # Kong API gateway on Kontena
2 |
3 | [Kong](https://getkong.org/) is the open-source API Gateway and Microservices Management Layer, delivering high performance and reliability.
4 |
5 | ## Install
6 | > Prerequisites: You need to have working [Kontena](http://www.kontena.io) Container Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/quick-start).
7 |
8 | Install Kontena Load Balancer stack (optional)
9 | `$ kontena stack install kontena/ingress-lb`
10 |
11 | Install Kong stack
12 | `$ kontena stack install kontena/kong`
13 |
14 | This will deploy Kong API gateway with PostgreSQL to your Kontena grid. After it is deployed you can access the Admin API with [Kontena VPN](http://kontena.io/docs/using-kontena/vpn-access.html):
15 | - http://kong.${GRID}.kontena.local:8001
16 |
17 | You can connect to API Gateway with the internal DNS:
18 | - http://kong.${GRID}.kontena.local:8000
19 |
20 | It can also be exposed to public internet via [Kontena Load Balancer](http://kontena.io/docs/using-kontena/loadbalancer.html)
21 |
22 | **Note:** Replace `${GRID}` with your grid name.
23 |
--------------------------------------------------------------------------------
/kong/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/kong
2 | version: 0.2.0
3 | description: Kong API gateway with PostgreSQL
4 | expose: api
5 | variables:
6 | bundle_postgres:
7 | type: boolean
8 | default: true
9 | from:
10 | env: BUNDLE_POSTGRES
11 | prompt: Use bundled postgres?
12 | postgres_host:
13 | skip_if:
14 | - bundle_postgres
15 | type: string
16 | default: postgres
17 | from:
18 | env: PG_HOST
19 | prompt: PostgreSQL Host
20 | postgres_user:
21 | skip_if:
22 | - bundle_postgres
23 | type: string
24 | default: kong
25 | from:
26 | env: PG_USER
27 | prompt: PostgreSQL User
28 | postgres_password:
29 | type: string
30 | from:
31 | vault: ${STACK}-postgres-password
32 | env: PG_PASSWORD
33 | prompt: Postgres password? (or empty to generate random)
34 | random_string: 32
35 | to:
36 | vault: ${STACK}-postgres-password
37 | postgres_db:
38 | skip_if:
39 | - bundle_postgres
40 | type: string
41 | default: kong
42 | from:
43 | env: PG_DATABASE
44 | prompt: PostgreSQL Database
45 | loadbalancer:
46 | type: string
47 | required: false
48 | from:
49 | env: LOADBALANCER
50 | service_link:
51 | prompt: Choose a loadbalancer
52 | image: kontena/lb
53 | virtual_hosts:
54 | only_if:
55 | - loadbalancer
56 | type: string
57 | from:
58 | env: VIRTUAL_HOSTS
59 | prompt: Domain names that point to your APIs (comma-separated list)
60 | services:
61 | migrations:
62 | image: "kong:0.11-alpine"
63 | environment:
64 | - KONG_DATABASE=postgres
65 | # {% if bundle_postgres %}
66 | - KONG_PG_HOST=postgres.${STACK}.${GRID}.kontena.local
67 | - KONG_PG_DATABASE=kong
68 | - KONG_PG_USER=kong
69 | # {% else %}
70 | - KONG_PG_HOST=${postgres_host}
71 | - KONG_PG_USER=${postgres_user}
72 | - KONG_PG_DATABASE=${postgres_db}
73 | # {% endif %}
74 | secrets:
75 | - secret: ${STACK}-postgres-password
76 | name: KONG_PG_PASSWORD
77 | type: env
78 | command: sh -c "kong migrations up && tail -f /dev/null"
79 | # {% if bundle_postgres %}
80 | depends_on:
81 | - postgres
82 | # {% endif %}
83 | api:
84 | image: "kong:0.11-alpine"
85 | environment:
86 | - KONG_DATABASE=postgres
87 | # {% if bundle_postgres %}
88 | - KONG_PG_HOST=postgres.${STACK}.${GRID}.kontena.local
89 | - KONG_PG_DATABASE=kong
90 | - KONG_PG_USER=kong
91 | # {% else %}
92 | - KONG_PG_HOST=${postgres_host}
93 | - KONG_PG_USER=${postgres_user}
94 | - KONG_PG_DATABASE=${postgres_db}
95 | # {% endif %}
96 | # {% if loadbalancer %}
97 | - KONTENA_LB_INTERNAL_PORT=8000
98 | - KONTENA_LB_VIRTUAL_HOSTS=${virtual_hosts}
99 | links:
100 | - ${loadbalancer}
101 | # {% endif %}
102 | secrets:
103 | - secret: ${STACK}-postgres-password
104 | name: KONG_PG_PASSWORD
105 | type: env
106 | depends_on:
107 | - migrations
108 | # {% if bundle_postgres %}
109 | - postgres
110 | postgres:
111 | image: postgres:9.6-alpine
112 | stateful: true
113 | environment:
114 | - POSTGRES_USER=kong
115 | - POSTGRES_DB=kong
116 | secrets:
117 | - secret: ${STACK}-postgres-password
118 | name: POSTGRES_PASSWORD
119 | type: env
120 | # {% endif %}
121 |
--------------------------------------------------------------------------------
/mariadb/README.md:
--------------------------------------------------------------------------------
1 | # MariaDB on Kontena
2 |
3 | [MariaDB](https://mariadb.org) is a community-developed fork of the MySQL relational database management system intended to remain free under the GNU GPL.
4 |
5 | ## Install
6 |
7 | > Prerequisites: You need to have working [Kontena](https://www.kontena.io) Platform installed. If you are new to Kontena, check [quick start guide](https://www.kontena.io/docs/quick-start).
8 |
9 | ```
10 | $ kontena volume create --driver local --scope grid mariadb-data
11 | $ kontena stack install kontena/mariadb
12 | ```
13 |
14 | This will deploy MariaDB server to your grid. Other services can connect to it with `mariadb.${GRID}.kontena.local:3306` address.
15 |
16 | **Note:** Replace `${GRID}` with your grid name.
17 |
18 | ## Administration
19 |
20 | Access `mysql` cli via interactive exec:
21 |
22 | ```
23 | $ kontena service exec -it --shell mariadb/server "mysql --user root --password=\$MYSQL_ROOT_PASSWORD mysql"
24 | ```
--------------------------------------------------------------------------------
/mariadb/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/mariadb
2 | version: 0.2.1
3 | description: MariaDB is a community-developed fork of MySQL intended to remain free under the GNU GPL.
4 | expose: server
5 | variables:
6 | root_password:
7 | type: string
8 | from:
9 | vault: ${STACK}-root-password
10 | random_string: 16
11 | to:
12 | vault: ${STACK}-root-password
13 | user:
14 | type: string
15 | default: mariadb
16 | from:
17 | vault: ${STACK}-user
18 | prompt: Username
19 | to:
20 | vault: ${STACK}-user
21 | password:
22 | type: string
23 | from:
24 | vault: ${STACK}-password
25 | random_string: 16
26 | to:
27 | vault: ${STACK}-password
28 | db:
29 | type: string
30 | default: mariadb
31 | from:
32 | vault: ${STACK}-database
33 | prompt: Database name
34 | to:
35 | vault: ${STACK}-database
36 | affinity:
37 | type: string
38 | default: label==~mariadb
39 | from:
40 | prompt: Affinity
41 | mem_limit:
42 | type: integer
43 | default: 512
44 | from:
45 | prompt: Memory limit (MB)
46 | innodb_buffer_pool_size:
47 | type: string
48 | from:
49 | evaluate: ((${mem_limit} * 10) * 7) / 100
50 | services:
51 | server:
52 | image: mariadb:10.2
53 | instances: 1
54 | command: "mysqld --innodb-buffer-pool-size={{ innodb_buffer_pool_size }}M"
55 | mem_limit: "{{ mem_limit }}m"
56 | stateful: true
57 | affinity:
58 | - {{ affinity }}
59 | deploy:
60 | wait_for_port: 3306
61 | health_check:
62 | protocol: tcp
63 | initial_delay: 120
64 | port: 3306
65 | stop_grace_period: 60s
66 | secrets:
67 | - secret: ${STACK}-root-password
68 | name: MYSQL_ROOT_PASSWORD
69 | type: env
70 | - secret: ${STACK}-user
71 | name: MYSQL_USER
72 | type: env
73 | - secret: ${STACK}-password
74 | name: MYSQL_PASSWORD
75 | type: env
76 | - secret: ${STACK}-database
77 | name: MYSQL_DATABASE
78 | type: env
79 | volumes:
80 | - data:/var/lib/mysql
81 | volumes:
82 | data:
83 | external:
84 | name: ${STACK}-data
85 |
--------------------------------------------------------------------------------
/mongodb-replica-set/README.md:
--------------------------------------------------------------------------------
1 | # MongoDB Replica Set on Kontena
2 |
3 | [MongoDB](https://www.mongodb.com/what-is-mongodb) is a document database with the scalability and flexibility that you want with the querying and indexing that you need.
4 |
5 | ## Install
6 | > Prerequisites: You need to have working [Kontena](http://www.kontena.io) Container Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/quick-start).
7 |
8 | First create a volume configuration for MongoDB:
9 |
10 | ```
11 | $ kontena volume create --driver local --scope instance mongo-rs-data
12 | ```
13 |
14 | Then install `kontena/mongo-rs` stack:
15 |
16 | ```
17 | $ kontena stack install kontena/mongo-rs
18 | > Memory limit (GB) : 1
19 | [done] Creating stack mongo-rs
20 | [done] Triggering deployment of stack mongo-rs
21 | [done] Waiting for deployment to start
22 | [done] Deploying service peer
23 | ```
24 |
25 | This will deploy MongoDB replica set to your grid. Other services can connect to it using following replica set member addresses:
26 |
27 | - `peer-1.${GRID}.kontena.local:27017`
28 | - `peer-2.${GRID}.kontena.local:27017`
29 | - `peer-3.${GRID}.kontena.local:27017`
30 |
31 | **Note:** Replace `${GRID}` with your grid name.
32 |
33 | ## Administration
34 |
35 | You can connect to MongoDB command line interface using following command:
36 |
37 | ```
38 | $ kontena service exec -it --shell mongo-rs/peer mongo -u admin -p $MONGO_ADMIN_PASSWORD --authenticationDatabase admin
39 | ```
--------------------------------------------------------------------------------
/mongodb-replica-set/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/mongo-rs
2 | version: 0.2.1
3 | description: MongoDB 3.6 replica set (3 members)
4 | variables:
5 | mem_limit:
6 | type: integer
7 | default: 1
8 | from:
9 | prompt: Memory limit (GB)
10 | wtiger_cache:
11 | type: string
12 | from:
13 | evaluate: ${mem_limit} / 2.0
14 | key_file:
15 | type: string
16 | from:
17 | vault: ${STACK}-keyfile
18 | random_string: 64
19 | to:
20 | vault: ${STACK}-keyfile
21 | admin_password:
22 | type: string
23 | from:
24 | vault: ${STACK}-password
25 | random_string: 18
26 | to:
27 | vault: ${STACK}-password
28 |
29 | services:
30 | peer:
31 | image: mongo:3.6
32 | instances: 3
33 | stateful: true
34 | mem_limit: "{{ mem_limit }}g"
35 | command: mongod --replSet ${STACK} --keyFile /data/db/mongo.key --wiredTigerCacheSizeGB {{ wtiger_cache }} --bind_ip 0.0.0.0
36 | secrets:
37 | - secret: ${STACK}-keyfile
38 | name: MONGO_KEYFILE
39 | type: env
40 | - secret: ${STACK}-password
41 | name: MONGO_ADMIN_PASSWORD
42 | type: env
43 | volumes:
44 | - data:/data/db
45 | deploy:
46 | wait_for_port: 27017
47 | health_check:
48 | protocol: tcp
49 | port: 27017
50 | initial_delay: 60
51 | hooks:
52 | pre_start:
53 | - cmd: echo $${MONGO_KEYFILE} > /data/db/mongo.key && chmod 600 /data/db/mongo.key && chown mongodb /data/db/mongo.key
54 | name: keyfile
55 | instances: '*'
56 | oneshot: false
57 | post_start:
58 | - cmd: |
59 | while ! (mongo --eval 'db.serverStatus()') >/dev/null 2>&1; do echo "waiting for mongo" && sleep 1; done
60 | mongo --eval "printjson(rs.initiate({'_id': '${STACK}', 'members': [{'_id': 1, 'host': 'peer-3.${STACK}.${GRID}.kontena.local:27017'}, {'_id': 2, 'host': 'peer-2.${STACK}.${GRID}.kontena.local:27017'}, {'_id': 3, 'host': 'peer-1.${STACK}.${GRID}.kontena.local:27017'}]}));" admin
61 | name: rs_initiate
62 | instances: 3
63 | oneshot: true
64 | - cmd: |
65 | while ! ( [ $(mongo --eval 'rs.status().myState' --quiet) -eq "1" ] ) ; do echo "waiting for replica set init" && sleep 1; done
66 | mongo --eval "db.createUser({'user':'admin','pwd':\"$${MONGO_ADMIN_PASSWORD}\",'roles':[{'role':'root','db':'admin'}]});" admin
67 | name: rs_add_admin
68 | instances: 3
69 | oneshot: true
70 | volumes:
71 | data:
72 | external:
73 | name: ${STACK}-data
74 |
--------------------------------------------------------------------------------
/rabbitmq/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/harbur-rabbitmq-cluster
2 | version: 1.0.0
3 | description: RabbitMQ cluster based on harbur/rabbitmq-cluster image
4 | expose: node
5 | variables:
6 | num_instances:
7 | type: integer
8 | min: 1
9 | default: 3
10 | from:
11 | env: NUM_INSTANCES
12 | prompt: number of nodes
13 | cookie:
14 | type: string
15 | from:
16 | vault: RABBITMQ_COOKIE
17 | random_string: 12
18 | to:
19 | vault: RABBITMQ_COOKIE
20 | admin_user:
21 | type: string
22 | default: admin
23 | admin_password:
24 | type: string
25 | from:
26 | vault: ${STACK}-admin-user-password
27 | random_string: 16
28 | to:
29 | vault: ${STACK}-admin-user-password
30 | eventbus_vhost:
31 | type: string
32 | default: eventbus
33 | eventbus_user:
34 | type: string
35 | default: eventbus_user
36 | eventbus_password:
37 | type: string
38 | from:
39 | vault: ${STACK}-eventbus-user-password
40 | random_string: 16
41 | to:
42 | vault: ${STACK}-eventbus-user-password
43 | eventbus_uri:
44 | type: string
45 | from:
46 | vault: ${STACK}-eventbus-uri
47 | interpolate: amqp://${eventbus_user}:${eventbus_password}@node.${STACK}.${GRID}.kontena.local/${eventbus_vhost}
48 | to:
49 | vault: ${STACK}-eventbus-uri
50 |
51 | services:
52 | seed:
53 | image: harbur/rabbitmq-cluster
54 | instances: 1
55 | stateful: true
56 | volumes:
57 | - rabbitmq-seed-data:/var/lib/rabbitmq
58 | secrets:
59 | - secret: RABBITMQ_COOKIE
60 | name: ERLANG_COOKIE
61 | type: env
62 | node:
63 | image: harbur/rabbitmq-cluster
64 | instances: ${num_instances}
65 | stateful: true
66 | volumes:
67 | - rabbitmq-node-data:/var/lib/rabbitmq
68 | secrets:
69 | - secret: RABBITMQ_COOKIE
70 | name: ERLANG_COOKIE
71 | type: env
72 | environment:
73 | - CLUSTER_WITH=seed-1
74 | depends_on:
75 | - seed
76 | hooks:
77 | post_start:
78 | - name: sleep a bit
79 | cmd: sleep 10
80 | instances: 1
81 | - name: create eventbus vhost
82 | cmd: if [ $(rabbitmqctl list_vhosts | grep -c eventbus) -eq 0 ];
83 | then rabbitmqctl add_vhost eventbus;
84 | else echo "vhost 'eventbus'' already exists";
85 | fi
86 | instances: 1
87 | - name: remove guest user
88 | cmd: if [ $(rabbitmqctl list_users | grep -c guest) -eq 1 ];
89 | then rabbitmqctl delete_user guest;
90 | else echo "user 'guest' already removed";
91 | fi
92 | instances: 1
93 | - name: create admin user
94 | cmd: if [ $(rabbitmqctl list_users | grep -c admin) -eq 0 ];
95 | then rabbitmqctl add_user admin ${admin_password};
96 | else echo "user 'admin' already created";
97 | fi
98 | instances: 1
99 | - name: set admin user permissions
100 | cmd: rabbitmqctl set_user_tags admin administrator &&
101 | rabbitmqctl clear_permissions admin &&
102 | rabbitmqctl set_permissions admin ".*" ".*" ".*" &&
103 | rabbitmqctl clear_permissions -p eventbus admin &&
104 | rabbitmqctl set_permissions -p eventbus admin ".*" ".*" ".*";
105 | instances: 1
106 | - name: create eventbus user
107 | cmd: if [ $(rabbitmqctl list_users | grep -c eventbus_user) -eq 0 ];
108 | then rabbitmqctl add_user eventbus_user ${eventbus_password};
109 | else echo "user 'eventbus' already created";
110 | fi
111 | instances: 1
112 | - name: set eventbus user permissions
113 | cmd: rabbitmqctl clear_permissions -p eventbus eventbus_user &&
114 | rabbitmqctl set_permissions -p eventbus eventbus_user ".*" ".*" ".*"
115 | instances: 1
116 | - name: turn on HA for default vhost
117 | cmd: if [ $(rabbitmqctl list_policies | grep -c ha-all) -eq 0 ];
118 | then rabbitmqctl set_policy ha-all ".*" '{"ha-mode":"all", "ha-sync-mode":"automatic"}';
119 | else echo "ha policy already set for default vhost";
120 | fi
121 | instances: 1
122 | - name: turn on HA for eventbus vhost
123 | cmd: if [ $(rabbitmqctl list_policies -p eventbus | grep -c ha-all) -eq 0 ];
124 | then rabbitmqctl set_policy -p eventbus ha-all ".*" '{"ha-mode":"all", "ha-sync-mode":"automatic"}';
125 | else echo "ha policy already set for eventbus vhost";
126 | fi
127 | instances: 1
128 |
129 | volumes:
130 | rabbitmq-seed-data:
131 | external:
132 | name: ${STACK}-seed-data
133 | rabbitmq-node-data:
134 | external:
135 | name: ${STACK}-node-data
136 |
--------------------------------------------------------------------------------
/rabbitmq/readme.md:
--------------------------------------------------------------------------------
1 | RabbitMQ Cluster on Kontena
2 | ===========================
3 |
4 | [RabbitMQ](https://www.rabbitmq.com) is the most widely deployed open source message broker
5 |
6 | (based on [harbur/rabbitmq-cluster](https://github.com/harbur/docker-rabbitmq-cluster))
7 |
8 | ## Install
9 |
10 | Prerequisites: You need to have working Kontena Container Platform installed. If you are new to Kontena, check quick start guide.
11 |
12 | RabbitMQ is a stateful service, therefore you must first create a Kontena volume. For a local volume run the following command:
13 |
14 | ```
15 | $ kontena volume create --scope instance --driver local harbur-rabbitmq-cluster-seed-data
16 | $ kontena volume create --scope instance --driver local harbur-rabbitmq-cluster-node-data
17 | ```
18 |
19 | $ kontena stack install kontena/harbur-rabbitmq-cluster
20 |
21 | This will deploy stateful RabbitMQ cluster to your grid. Other services can connect to it with amqp://harbur-rabbitmq-cluster.${GRID}.kontena.local address.
22 |
23 | Note: Replace ${GRID} with your grid name.
24 |
25 | ## Event Bus
26 |
27 | This stack will create a new VHost called "eventbus" as well as an associated user and password. This can be used by applications instead of having to give out access to the default VHost and admin user. To connect to this VHost, just link to the Vault secret `harbur-rabbitmq-cluster-eventbus-uri`.
28 |
29 |
30 | ## Admin UI
31 |
32 | The admin UI will be enabled, but is only available after opening a VPN connection to the Grid. After that you should be able to connect via the URL `http://node.harbur-rabbitmq-cluster.${GRID}.kontena.local:15672`.
--------------------------------------------------------------------------------
/redis-sentinel/3.0/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM redis:3.0-alpine
2 | MAINTAINER Kontena, Inc.
3 |
4 | RUN set -x \
5 | && apk update \
6 | && apk add curl bash
7 |
8 | ADD docker-entrypoint.sh /
9 | RUN chmod +x /docker-entrypoint.sh
10 |
11 | EXPOSE 26379
12 |
13 | ENTRYPOINT ["/docker-entrypoint.sh"]
--------------------------------------------------------------------------------
/redis-sentinel/3.0/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | SENTINEL_CONFIGURATION_FILE=/etc/sentinel.conf
6 |
7 | rm -f $SENTINEL_CONFIGURATION_FILE
8 |
9 | QUORUM=$QUORUM
10 | : ${QUORUM:=2}
11 |
12 | DEFAULT_PORT=$DEFAULT_PORT
13 | : ${DEFAULT_PORT:=6379}
14 |
15 | DOWN_AFTER=$DOWN_AFTER
16 | : ${DOWN_AFTER:=30000}
17 |
18 | FAILOVER_TIMEOUT=$FAILOVER_TIMEOUT
19 | : ${FAILOVER_TIMEOUT:=180000}
20 |
21 | PARALLEL_SYNCS=$PARALLEL_SYNCS
22 | : ${PARALLEL_SYNCS:=1}
23 |
24 | parse_addr () {
25 | local _retvar=$1
26 | IFS=':' read -ra ADDR <<< "$2"
27 |
28 | if [ "${ADDR[1]}" = "" ]; then
29 | ADDR[1]=$DEFAULT_PORT
30 | fi
31 |
32 | eval $_retvar='("${ADDR[@]}")'
33 | }
34 |
35 | print_slave () {
36 | local -a ADDR
37 | parse_addr ADDR $1
38 | echo "sentinel known-slave $MASTER_NAME ${ADDR[0]} ${ADDR[1]}" >> $SENTINEL_CONFIGURATION_FILE
39 | }
40 |
41 | print_master () {
42 | local -a ADDR
43 | parse_addr ADDR $1
44 | echo "sentinel monitor $MASTER_NAME ${ADDR[0]} ${ADDR[1]} $QUORUM" >> $SENTINEL_CONFIGURATION_FILE
45 | }
46 |
47 | echo "port 26379" >> $SENTINEL_CONFIGURATION_FILE
48 |
49 | if [ "$ANNOUNCE_IP" ]; then
50 | echo "sentinel announce-ip $ANNOUNCE_IP" >> $SENTINEL_CONFIGURATION_FILE
51 | fi
52 |
53 | if [ "$ANNOUNCE_PORT" ]; then
54 | echo "sentinel announce-port $ANNOUNCE_PORT" >> $SENTINEL_CONFIGURATION_FILE
55 | fi
56 |
57 | if [ "$MASTER_NAME" ]; then
58 | print_master $MASTER
59 | echo "sentinel down-after-milliseconds $MASTER_NAME $DOWN_AFTER" >> $SENTINEL_CONFIGURATION_FILE
60 | echo "sentinel failover-timeout $MASTER_NAME $FAILOVER_TIMEOUT" >> $SENTINEL_CONFIGURATION_FILE
61 | echo "sentinel parallel-syncs $MASTER_NAME $PARALLEL_SYNCS" >> $SENTINEL_CONFIGURATION_FILE
62 |
63 | if [ "$SLAVES" ]; then
64 | for SLAVE in $(echo $SLAVES | tr ";" "\n")
65 | do
66 | if [ "$SLAVE" ]; then
67 | print_slave $SLAVE
68 | fi
69 | done
70 | fi
71 | fi
72 |
73 | redis-server $SENTINEL_CONFIGURATION_FILE --sentinel
--------------------------------------------------------------------------------
/redis-sentinel/README.md:
--------------------------------------------------------------------------------
1 | # Redis Sentinel Cluster on Kontena
2 |
3 | > Prerequisites: You need to have working [Kontena](https://kontena.io) Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/getting-started/quick-start).
4 |
5 | First you need to provide [volume](https://kontena.io/docs/using-kontena/volumes) configurations for Redis Sentinel instances:
6 |
7 | ```
8 | $ kontena volume create --driver local --scope instance redis-sentinel-data
9 | $ kontena volume create --driver local --scope instance redis-sentinel-monitor
10 | ```
11 |
12 | Then you can deploy Redis Sentinel stack simply with following command:
13 |
14 | ```
15 | $ kontena stack install kontena/redis-sentinel
16 | ```
17 |
18 | Or directly from your filesystem:
19 |
20 | ```
21 | $ kontena stack install kontena.yml
22 | ```
--------------------------------------------------------------------------------
/redis-sentinel/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/redis-sentinel
2 | version: 0.1.0
3 | expose: lb
4 | description: Redis Sentinel Cluster with a Loadbalancer
5 | variables:
6 | node_password:
7 | type: string
8 | from:
9 | vault: ${STACK}-password
10 | random_string: 16
11 | to:
12 | vault: ${STACK}-password
13 | node_opts:
14 | type: string
15 | required: false
16 | from:
17 | prompt: "Additional redis-server command-line arguments (example: --appendonly yes )"
18 | node_mem_limit:
19 | type: integer
20 | default: 256
21 | from:
22 | prompt: "Node instance memory limit (MB)"
23 | monitor_mem_limit:
24 | type: integer
25 | default: 128
26 | from:
27 | prompt: "Monitor instance memory limit (MB)"
28 | lb_mem_limit:
29 | type: integer
30 | default: 128
31 | from:
32 | prompt: "Loadbalancer instance memory limit (MB)"
33 | node_max_memory:
34 | type: integer
35 | from:
36 | evaluate: "${node_mem_limit} * 1024 * 1024 * 0.95"
37 | node_data_name:
38 | type: string
39 | default: "${STACK}-data"
40 | from:
41 | prompt: Redis data volume name
42 | monitor_data_name:
43 | type: string
44 | default: "${STACK}-monitor"
45 | from:
46 | prompt: Redis monitor volume name
47 | services:
48 | node:
49 | image: redis:3.0-alpine
50 | instances: 5
51 | stateful: true
52 | command: "/bin/sh -c 'redis-server --maxmemory {{ node_max_memory }} --requirepass $$REDIS_SENTINEL_PASSWORD --masterauth $$REDIS_SENTINEL_PASSWORD {{ node_opts }}'"
53 | mem_limit: "{{ node_mem_limit }}m"
54 | health_check:
55 | protocol: tcp
56 | port: 6379
57 | initial_delay: 60
58 | interval: 60
59 | secrets:
60 | - secret: ${STACK}-password
61 | name: REDIS_SENTINEL_PASSWORD
62 | type: env
63 | environment:
64 | KONTENA_LB_MODE: tcp
65 | KONTENA_LB_EXTERNAL_PORT: 6379
66 | KONTENA_LB_INTERNAL_PORT: 6379
67 | KONTENA_LB_CUSTOM_SETTINGS: |
68 | option tcp-check
69 | tcp-check connect
70 | tcp-check send AUTH\ {{ node_password }}\r\n
71 | tcp-check expect string +OK
72 | tcp-check send PING\r\n
73 | tcp-check expect string +PONG
74 | tcp-check send info\ replication\r\n
75 | tcp-check expect string role:master
76 | tcp-check send QUIT\r\n
77 | tcp-check expect string +OK
78 | links:
79 | - lb
80 | volumes:
81 | - node-data:/data
82 | monitor:
83 | image: kontena/redis-sentinel:3.0-alpine
84 | instances: 5
85 | mem_limit: "{{ monitor_mem_limit }}m"
86 | deploy:
87 | wait_for_port: 26379
88 | health_check:
89 | protocol: tcp
90 | port: 26379
91 | initial_delay: 300
92 | interval: 60
93 | secrets:
94 | - secret: ${STACK}-password
95 | name: PASSWORD
96 | type: env
97 | environment:
98 | - MASTER_NAME=${STACK}
99 | - QUORUM=3
100 | - DOWN_AFTER=5000
101 | - FAILOVER_TIMEOUT=30000
102 | - MASTER=node-1
103 | - SLAVES=node-2;node-3;node-4;node-5
104 | depends_on:
105 | - node
106 | volumes:
107 | - monitor-data:/data
108 | lb:
109 | image: kontena/lb:latest
110 | instances: 2
111 | mem_limit: "{{ lb_mem_limit }}m"
112 | volumes:
113 | node-data:
114 | external:
115 | name: {{ node_data_name }}
116 | monitor-data:
117 | external:
118 | name: {{ monitor_data_name }}
--------------------------------------------------------------------------------
/redis/README.md:
--------------------------------------------------------------------------------
1 | # Redis on Kontena
2 |
3 | [Redis](https://redis.io) is an open source (BSD licensed), in-memory data structure store, used as a database, cache and message broker
4 |
5 | ## Install
6 | > Prerequisites: You need to have working [Kontena](http://www.kontena.io) Container Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/getting-started/quick-start).
7 |
8 | `$ kontena stack install kontena/redis`
9 |
10 | This will deploy stateful Redis database to your grid. Other services can connect to it with `redis://redis.${GRID}.kontena.local:6379` address
11 |
12 | **Note:** Replace `${GRID}` with your grid name.
13 |
--------------------------------------------------------------------------------
/redis/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/redis
2 | description: Stateful Redis database
3 | version: 0.1.0
4 | expose: db
5 | variables:
6 | version:
7 | type: string
8 | default: 3.2-alpine
9 | from:
10 | env: VERSION
11 | prompt: Redis version?
12 | services:
13 | db:
14 | image: 'redis:${version}'
15 | stateful: true
16 |
--------------------------------------------------------------------------------
/stolon/README.md:
--------------------------------------------------------------------------------
1 | # stolon on Kontena
2 |
3 | [stolon](https://github.com/sorintlab/stolon/) is a cloud native PostgreSQL manager for PostgreSQL high availability. It's cloud native because it'll let you keep an high available PostgreSQL inside your containers.
4 |
5 | ## Install
6 | > Prerequisites: You need to have working [Kontena](http://www.kontena.io) Container Platform installed. If you are new to Kontena, check [quick start guide](http://www.kontena.io/docs/getting-started/quick-start).
7 |
8 | ```
9 | $ kontena volume create --driver local --scope instance stolon-keeper
10 | $ kontena stack install kontena/stolon
11 | ```
12 |
13 | This will deploy stolon (PostgreSQL) cluster to your grid. Other services can connect to it with `stolon.${GRID}.kontena.local:5432` address. Stolon proxy at that address will make sure that client always connects to the database master instance.
14 |
15 | **Note:** Replace `${GRID}` with your grid name.
16 |
17 | ## Administration
18 |
19 | Access `stolonctl` via interactive exec:
20 |
21 | ```
22 | $ kontena service exec --shell -it stolon/keeper 'stolonctl status --cluster-name $STKEEPER_CLUSTER_NAME --store-backend $STKEEPER_STORE_BACKEND --store-endpoints $STKEEPER_STORE_ENDPOINTS'
23 | ```
24 |
25 | Access `psql` (master node) via interactive exec:
26 |
27 | ```
28 | $ kontena service exec -it --shell stolon/keeper 'PGPASSWORD=$STKEEPER_PG_SU_PASSWORD psql --host proxy --username stolon postgres'
29 | ```
--------------------------------------------------------------------------------
/stolon/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/stolon
2 | version: 0.1.1
3 | description: PostgreSQL cluster for high-availability
4 | expose: proxy
5 | variables:
6 | affinity:
7 | type: string
8 | default: label!=no-stolon
9 | from:
10 | prompt: Affinity
11 | repl_password:
12 | type: string
13 | from:
14 | vault: ${STACK}-repl-password
15 | random_string: 24
16 | to:
17 | vault: ${STACK}-repl-password
18 | su_password:
19 | type: string
20 | from:
21 | vault: ${STACK}-su-password
22 | random_string: 24
23 | to:
24 | vault: ${STACK}-su-password
25 | sentinel_mem_limit:
26 | type: integer
27 | default: 64
28 | from:
29 | prompt: Sentinel mem_limit (megabytes)
30 | proxy_mem_limit:
31 | type: integer
32 | default: 128
33 | from:
34 | prompt: Proxy mem_limit (megabytes)
35 | services:
36 | keeper:
37 | image: sorintlab/stolon:v0.6.0-pg9.6
38 | stateful: true
39 | instances: 3
40 | command: /bin/sh -c "mkdir -p $$STOLON_DATA && chown stolon:stolon $$STOLON_DATA && STKEEPER_PG_LISTEN_ADDRESS=$(hostname -i) STKEEPER_UID=\"keeper_$${KONTENA_SERVICE_INSTANCE_NUMBER}\" gosu stolon stolon-keeper --data-dir $$STOLON_DATA"
41 | affinity:
42 | - {{ affinity }}
43 | secrets:
44 | - secret: ${STACK}-repl-password
45 | name: STKEEPER_PG_REPL_PASSWORD
46 | type: env
47 | - secret: ${STACK}-su-password
48 | name: STKEEPER_PG_SU_PASSWORD
49 | type: env
50 | environment:
51 | STKEEPER_CLUSTER_NAME: ${STACK}
52 | STKEEPER_STORE_BACKEND: etcd
53 | STKEEPER_STORE_ENDPOINTS: http://etcd.kontena.local:2379
54 | STKEEPER_PG_REPL_USERNAME: repluser
55 | STKEEPER_PG_SU_USERNAME: stolon
56 | STKEEPER_LISTEN_ADDRESS: "0.0.0.0"
57 | STOLON_DATA: /data/stolon
58 | hooks:
59 | post_start:
60 | - name: init
61 | cmd: sleep 10 && stolonctl init --cluster-name $$STKEEPER_CLUSTER_NAME --store-backend $$STKEEPER_STORE_BACKEND --store-endpoints $$STKEEPER_STORE_ENDPOINTS -y
62 | instances: 1
63 | oneshot: true
64 | volumes:
65 | - keeper:/data
66 |
67 | sentinel:
68 | image: sorintlab/stolon:v0.6.0-pg9.6
69 | command: gosu stolon stolon-sentinel
70 | instances: 3
71 | mem_limit: "{{ sentinel_mem_limit }}m"
72 | affinity:
73 | - {{ affinity }}
74 | environment:
75 | STSENTINEL_CLUSTER_NAME: ${STACK}
76 | STSENTINEL_STORE_BACKEND: etcd
77 | STSENTINEL_STORE_ENDPOINTS: http://etcd.kontena.local:2379
78 | depends_on:
79 | - keeper
80 |
81 | proxy:
82 | image: sorintlab/stolon:v0.6.0-pg9.6
83 | command: gosu stolon stolon-proxy
84 | instances: 3
85 | mem_limit: "{{ proxy_mem_limit }}m"
86 | affinity:
87 | - {{ affinity }}
88 | deploy:
89 | wait_for_port: 5432
90 | environment:
91 | STPROXY_CLUSTER_NAME: ${STACK}
92 | STPROXY_STORE_BACKEND: etcd
93 | STPROXY_STORE_ENDPOINTS: http://etcd.kontena.local:2379
94 | STPROXY_LISTEN_ADDRESS: "0.0.0.0"
95 | depends_on:
96 | - keeper
97 | - sentinel
98 |
99 | volumes:
100 | keeper:
101 | external:
102 | name: ${STACK}-keeper
103 |
--------------------------------------------------------------------------------
/wordpress-cluster/README.md:
--------------------------------------------------------------------------------
1 | # WordPress Cluster on Kontena
2 |
3 | [WordPress](https://wordpress.org/) is open source software you can use to create a beautiful website, blog, or app.
4 |
5 | ## Install
6 |
7 | > Prerequisites: You need to have working [Kontena](http://www.kontena.io) Platform installed. If you are new to Kontena, check [quick start guide](https://www.kontena.io/docs/quick-start.html).
8 |
9 |
10 | ### Create Volume Configurations
11 |
12 | ```
13 | $ kontena volume create --driver local --scope instance wordpress-cluster-mysql
14 | $ kontena volume create --driver local --scope stack wordpress-cluster-data
15 | ```
16 |
17 | ### Create Resilio Sync Secret
18 |
19 | ```
20 | $ docker run -it --rm nimmis/resilio-sync rslsync --generate-secret
21 | ```
22 |
23 | ### Install Load Balancer
24 |
25 | ```
26 | $ kontena stack install kontena/ingress-lb
27 | ```
28 |
29 | ### Deploy Wordpress Cluster Stack
30 |
31 | ```
32 | $ kontena stack install kontena/wordpress-cluster
33 | ```
34 |
35 | This will deploy:
36 | - [MariaDB Galera](https://github.com/severalnines/galera-docker-mariadb) cluster with internal load balancer
37 | - [Resilio Sync](https://github.com/nimmis/docker-resilio-sync) for syncing Wordpress uploads across cluster
38 | - WordPress cluster
39 |
40 | Installation can be finished via browser (node public ip or virtual host).
41 |
42 |
43 | ## Uninstall
44 |
45 | ```
46 | $ kontena stack rm wordpress-cluster
47 | $ kontena volume rm wordpress-cluster-mysql
48 | $ kontena volume rm wordpress-cluster-data
49 | $ kontena etcd rm --recursive /galera/wordpress-cluster
50 | ```
--------------------------------------------------------------------------------
/wordpress-cluster/kontena.yml:
--------------------------------------------------------------------------------
1 | stack: kontena/wordpress-cluster
2 | version: 0.2.0
3 | description: Example WordPress cluster stack
4 | variables:
5 | loadbalancer:
6 | type: string
7 | required: true
8 | from:
9 | env: LOADBALANCER
10 | service_link:
11 | prompt: Choose a loadbalancer
12 | image: kontena/lb
13 | virtual_host:
14 | type: string
15 | required: false
16 | from:
17 | env: VIRTUALHOST
18 | prompt: Virtualhost (leave blank if none)
19 | mariadb_affinity:
20 | type: string
21 | default: label!=no-mariadb
22 | from:
23 | env: MARIADB_AFFINITY
24 | prompt: Affinity rule for MariaDB Galera
25 | wp_affinity:
26 | type: string
27 | default: label!=no-wordpress
28 | from:
29 | env: WORDPRESS_AFFINITY
30 | prompt: Affinity rule for WordPress
31 | mysql_root_pwd:
32 | type: string
33 | from:
34 | vault: ${STACK}-mysql-pwd
35 | random_string: 24
36 | to:
37 | vault: ${STACK}-mysql-pwd
38 | mysql_xtrabackup_pwd:
39 | type: string
40 | from:
41 | vault: ${STACK}-xtrabackup-pwd
42 | random_string: 24
43 | to:
44 | vault: ${STACK}-xtrabackup-pwd
45 | resilio_secret:
46 | type: string
47 | from:
48 | vault: ${STACK}-resilio-secret
49 | env: RESILIO_SECRET
50 | prompt: Sync secret (generate with 'docker run -it --rm nimmis/resilio-sync rslsync --generate-secret')
51 | to:
52 | vault: ${STACK}-resilio-secret
53 | wp_secret:
54 | type: string
55 | from:
56 | vault: ${STACK}-secret
57 | env: WORDPRESS_SECRET
58 | random_string: 24
59 | to:
60 | vault: ${STACK}-secret
61 |
62 | services:
63 | mariadb-lb:
64 | image: kontena/lb:latest
65 | instances: 2
66 | mem_limit: 128m
67 | affinity:
68 | - "{{ mariadb_affinity }}"
69 | mariadb-galera:
70 | image: severalnines/mariadb:latest
71 | instances: 3
72 | affinity:
73 | - "{{ mariadb_affinity }}"
74 | deploy:
75 | wait_for_port: 3306
76 | secrets:
77 | - secret: ${STACK}-mysql-pwd
78 | name: MYSQL_ROOT_PASSWORD
79 | type: env
80 | - secret: ${STACK}-xtrabackup-pwd
81 | name: XTRABACKUP_PASSWORD
82 | type: env
83 | environment:
84 | DISCOVERY_SERVICE: etcd.kontena.local:2379
85 | CLUSTER_NAME: "${STACK}"
86 | KONTENA_LB_MODE: tcp
87 | KONTENA_LB_INTERNAL_PORT: 3306
88 | KONTENA_LB_EXTERNAL_PORT: 3306
89 | links:
90 | - mariadb-lb
91 | volumes:
92 | - mysql:/var/lib/mysql
93 |
94 | resilio:
95 | image: nimmis/resilio-sync:latest
96 | mem_limit: 256m
97 | deploy:
98 | strategy: daemon
99 | affinity:
100 | - "{{ wp_affinity }}"
101 | secrets:
102 | - secret: ${STACK}-resilio-secret
103 | name: RSLSYNC_SECRET
104 | type: env
105 | volumes:
106 | - wordpress:/data
107 |
108 | wordpress:
109 | image: wordpress:4.8
110 | instances: 3
111 | mem_limit: 512m
112 | affinity:
113 | - "{{ wp_affinity }}"
114 | secrets:
115 | - secret: ${STACK}-mysql-pwd
116 | name: WORDPRESS_DB_PASSWORD
117 | type: env
118 | - secret: ${STACK}-secret
119 | name: WORDPRESS_AUTH_KEY
120 | type: env
121 | - secret: ${STACK}-secret
122 | name: WORDPRESS_SECURE_AUTH_KEY
123 | type: env
124 | - secret: ${STACK}-secret
125 | name: WORDPRESS_LOGGED_IN_KEY
126 | type: env
127 | - secret: ${STACK}-secret
128 | name: WORDPRESS_NONCE_KEY
129 | type: env
130 | - secret: ${STACK}-secret
131 | name: WORDPRESS_AUTH_SALT
132 | type: env
133 | - secret: ${STACK}-secret
134 | name: WORDPRESS_SECURE_AUTH_SALT
135 | type: env
136 | - secret: ${STACK}-secret
137 | name: WORDPRESS_LOGGED_IN_SALT
138 | type: env
139 | - secret: ${STACK}-secret
140 | name: WORDPRESS_NONCE_SALT
141 | type: env
142 | environment:
143 | KONTENA_LB_MODE: http
144 | KONTENA_LB_BALANCE: source
145 | KONTENA_LB_INTERNAL_PORT: 80
146 | # {% if virtual_host %}
147 | KONTENA_LB_VIRTUAL_HOSTS: "{{ virtual_host }}"
148 | # {% endif %}
149 | WORDPRESS_DB_HOST: mariadb-lb
150 | volumes:
151 | - wordpress:/var/www/html
152 | depends_on:
153 | - mariadb-galera
154 | - resilio
155 | links:
156 | - {{ loadbalancer }}
157 |
158 | volumes:
159 | mysql: # scope=instance
160 | external:
161 | name: ${STACK}-mysql
162 | wordpress: # scope=stack
163 | external:
164 | name: ${STACK}-data
--------------------------------------------------------------------------------
/zookeeper/README.md:
--------------------------------------------------------------------------------
1 | HA Zookeeper cluster on Kontena
2 | ===============================
3 |
4 | [Zookeeper](https://zookeeper.apache.org/) is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services.
5 |
6 | (based on Kafka/Zookeeper stack used in [HHypermap BOP -- Harvard Hypermap, Billion Object Platform](https://github.com/cga-harvard/hhypermap-bop))
7 |
8 | ## Install
9 |
10 | Prerequisites: You need to have working Kontena Container Platform installed. If you are new to Kontena, check quick start guide.
11 |
12 |
13 | Zookeeper is a stateful service, therefore you must first create a Kontena volume. For a local volume run the following command:
14 |
15 | ```
16 | $ kontena volume create --scope instance --driver local zookeeper-cluster-data
17 | ```
18 |
19 | For local development purposes you can skip volume creation by using the `SKIP_VOLUMES` variable.
20 |
21 | Next install the stack itself. There are a few options available:
22 |
23 | | Option | Description |
24 | | -------| ------------|
25 | | `NUM_INSTANCES` | Number of instances of Zookeeper. Default is 3. |
26 | | `SKIP_VOLUMES` | Boolean, if true no volumes are mapped. Useful for local development. Defaults to `false` |
27 |
28 | Generally, the default values are good for a basic cluster setup.
29 |
30 | To initially install:
31 |
32 | ```
33 | $ kontena stack install
34 | ```
35 |
36 | To upgrade:
37 |
38 | ```
39 | $ kontena stack upgrade zookeeper-cluster
40 | ```
41 |
42 | Other services inside your Kontena Grid can now connect to Zookeeper using the address `zookeeper.zookeeper-cluster.${GRID}.kontena.local`.
43 |
44 | ## Caveats
45 |
46 | Due to the way Zookeeper works, Kontena service scaling via the `kontena service scale` command will not work properly. This is due to the `ZOO_SERVERS` variable requiring all available Zookeeper host names to be present. In order to scale Zookeeper up and down, please run `kontena stack upgrade` and specify the new cluster size via the `NUM_INSTANCES` environment variable or CLI prompt. Doing this will allow the stack file's Liquid templates to rerun and set the correct `ZOO_SERVERS` value.
47 |
--------------------------------------------------------------------------------
/zookeeper/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM zookeeper:3.4
2 |
3 | # note: zk default interval is 0 (none)
4 | ENV ZOO_AUTOPURGE_SNAP_RETAIN_COUNT=3 \
5 | ZOO_AUTOPURGE_PURGE_INTERVAL=24
6 |
7 | COPY zookeeper.docker-entrypoint.sh /docker-entrypoint.sh
8 | RUN ["chmod", "+x", "/docker-entrypoint.sh"]
--------------------------------------------------------------------------------
/zookeeper/docker/zookeeper.docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Copied from HHypermap BOP
4 | # See https://github.com/cga-harvard/hhypermap-bop/blob/master/kafka/zookeeper.docker-entrypoint.sh
5 |
6 | set -e
7 |
8 | # Allow the container to be started with `--user`
9 | if [ "$1" = 'zkServer.sh' -a "$(id -u)" = '0' ]; then
10 | chown -R "$ZOO_USER" "$ZOO_DATA_DIR" "$ZOO_DATA_LOG_DIR"
11 | exec su-exec "$ZOO_USER" "$0" "$@"
12 | fi
13 |
14 | # Generate the config only if it doesn't exist
15 | if [ ! -f "$ZOO_CONF_DIR/zoo.cfg" ]; then
16 | CONFIG="$ZOO_CONF_DIR/zoo.cfg"
17 |
18 | echo "clientPort=$ZOO_PORT" >> "$CONFIG"
19 | echo "dataDir=$ZOO_DATA_DIR" >> "$CONFIG"
20 | echo "dataLogDir=$ZOO_DATA_LOG_DIR" >> "$CONFIG"
21 |
22 | echo "tickTime=$ZOO_TICK_TIME" >> "$CONFIG"
23 | echo "initLimit=$ZOO_INIT_LIMIT" >> "$CONFIG"
24 | echo "syncLimit=$ZOO_SYNC_LIMIT" >> "$CONFIG"
25 |
26 | echo "autopurge.snapRetainCount=$ZOO_AUTOPURGE_SNAP_RETAIN_COUNT" >> "$CONFIG"
27 | echo "autopurge.purgeInterval=$ZOO_AUTOPURGE_PURGE_INTERVAL" >> "$CONFIG"
28 |
29 | for server in $ZOO_SERVERS; do
30 | echo "$server" >> "$CONFIG"
31 | done
32 | fi
33 |
34 | # Write myid only if it doesn't exist
35 | if [ ! -f "$ZOO_DATA_DIR/myid" ]; then
36 | defId="${KONTENA_SERVICE_INSTANCE_NUMBER:-1}"
37 | echo "${ZOO_MY_ID:-$defId}" > "$ZOO_DATA_DIR/myid"
38 | fi
39 |
40 | exec "$@"
--------------------------------------------------------------------------------
/zookeeper/kontena.yml:
--------------------------------------------------------------------------------
1 | ---
2 | stack: kontena/zookeeper-cluster
3 | version: 1.0.1
4 | description: Zookeeper cluster based on Harvard Hypermap
5 | expose: zookeeper
6 | variables:
7 | num_instances:
8 | type: integer
9 | min: 1
10 | default: 3
11 | from:
12 | env: NUM_INSTANCES
13 | prompt: number of instances of Zookeeper?
14 | skip_volumes:
15 | type: boolean
16 | default: false
17 | from:
18 | env: SKIP_VOLUMES
19 |
20 | services:
21 | zookeeper:
22 | image: kontena/zookeeper:3.4
23 | stateful: true
24 | # {% unless skip_volumes %}
25 | volumes:
26 | - zookeeper-data:/var/lib/zookeeper
27 | # {% endunless %}
28 | instances: ${num_instances}
29 | deploy:
30 | wait_for_port: 2181
31 | min_health: 0.5
32 | environment:
33 | # {% assign zookeeper_servers = "" %}
34 | # {% for i in (1..num_instances) %}
35 | # {% capture server %}server.{{ i }}=zookeeper-{{ i }}.{{ STACK }}.{{ GRID }}.kontena.local:2888:3888 {% endcapture %}
36 | # {% assign zookeeper_servers = zookeeper_servers | append: server %}
37 | # {% endfor %}
38 | ZOO_SERVERS: {{ zookeeper_servers }}
39 | ZOO_AUTOPURGE_PURGE_INTERVAL: 24
40 | JMXPORT: 9999
41 | SERVER_JVMFLAGS: -Xmx500M -XX:+CrashOnOutOfMemoryError
42 | health_check:
43 | protocol: tcp
44 | port: 2181
45 | timeout: 10
46 | interval: 300
47 | initial_delay: 10
48 |
49 | # {% unless skip_volumes %}
50 | volumes:
51 | zookeeper-data:
52 | external:
53 | name: ${STACK}-data
54 | # {% endunless %}
--------------------------------------------------------------------------------