├── .circleci
└── config.yml
├── .gitignore
├── consul
└── Dockerfile
├── dind-compose
├── Dockerfile
├── Dockerfile.erlang
└── README.md
├── dummy
├── Dockerfile
└── dummy_app.py
├── elastalert
├── Dockerfile
└── docker-entrypoint.sh
├── elasticsearch-curator
├── 2
│ └── Dockerfile
└── 5
│ └── Dockerfile
├── elasticsearch
└── 5
│ └── Dockerfile
├── ganglia-frontend
├── Dockerfile
├── README.md
└── bin
│ └── entry
├── grafana
├── .env
├── Dockerfile
├── README.md
├── conf
│ └── grafana.ini
├── docker-compose.yaml
└── testdata
│ └── grafana.sql
├── haproxy-consul-template
├── Dockerfile
└── entry
├── httpbin
└── Dockerfile
├── java11
└── Dockerfile
├── java7
├── Dockerfile
├── README.md
├── java7.debconf
└── java7.list
├── java8-bionic-zulu
└── Dockerfile
├── java8
└── Dockerfile
├── logstash
└── Dockerfile
├── mongodb
├── Dockerfile
├── mongod.conf
└── run.sh
├── multibinder-haproxy-consul
├── Dockerfile
├── README.md
├── config
│ ├── common.hcl
│ └── haproxy.hcl
└── entrypoint.sh
├── multibinder-haproxy
├── Dockerfile
├── README.md
└── entrypoint.sh
├── multibinder
├── Dockerfile
├── README.md
└── entrypoint.sh
├── notebook-mysql
├── Dockerfile
└── requirements.txt
├── opentsdb
└── Dockerfile
├── spark
├── Dockerfile
├── core-site.xml
├── hdfs-site.xml
├── spark-defaults.conf
└── spark-env.sh
├── spark2.0
├── Dockerfile
├── core-site.xml
├── hdfs-site.xml
├── spark-defaults.conf
└── spark-env.sh
├── spark2.1
├── Dockerfile
├── core-site.xml
├── hdfs-site.xml
├── spark-defaults.conf
└── spark-env.sh
├── spark2.4
├── Dockerfile
├── core-site.xml
├── hdfs-site.xml
├── spark-defaults.conf
└── spark-env.sh
├── spark3.1
├── Dockerfile
├── core-site.xml
├── hdfs-site.xml
├── spark-defaults.conf
└── spark-env.sh
├── spark3.2
├── Dockerfile
├── core-site.xml
├── hdfs-site.xml
├── spark-defaults.conf
└── spark-env.sh
├── splash-dbg
├── Dockerfile
└── Makefile
├── squid-deb-proxy
├── Dockerfile
└── run.sh
├── vsftpd
├── Dockerfile
├── run-vsftpd.sh
├── vsftpd.conf
└── vsftpd_virtual
├── zabbix-frontend-php
├── Dockerfile
├── apache.conf
├── php.ini
└── run.sh
├── zabbix-java-gateway
├── Dockerfile
└── run.sh
└── zabbix-server-mysql
├── Dockerfile
└── run.sh
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | jobs:
4 | build-and-publish:
5 | docker:
6 | - image: cimg/python:3.7
7 | steps:
8 | - checkout
9 | - setup_remote_docker:
10 | docker_layer_caching: true
11 | - run:
12 | name: Publish Grafana Docker Image
13 | command: |
14 | set -ueo pipefail
15 | cd grafana
16 | . .env
17 | export GRAFANA_VERSION
18 | TARGET="images.scrapinghub.com/docker-images/grafana:${GRAFANA_VERSION}-${CIRCLE_BRANCH}"
19 |
20 | docker login \
21 | -u "${DOCKER_USER}" \
22 | -p "${DOCKER_PASS}" \
23 | "${TARGET%%/*}"
24 | docker build --build-arg GRAFANA_VERSION -t "${TARGET}" .
25 | docker push "${TARGET}"
26 |
27 | workflows:
28 | version: 2
29 | build-and-publish-workflow:
30 | jobs:
31 | - build-and-publish:
32 | context:
33 | - Internal
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | grafana/testdata/grafana.sql
2 |
--------------------------------------------------------------------------------
/consul/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 | EXPOSE 8500
3 | RUN apt-get update -qq && apt-get -qy install unzip
4 | ADD https://dl.bintray.com/mitchellh/consul/0.4.1_linux_amd64.zip /consul.zip
5 | ADD https://dl.bintray.com/mitchellh/consul/0.4.1_web_ui.zip /consul-ui.zip
6 | RUN unzip -d /usr/local/bin/ /consul.zip &&\
7 | unzip -d /consul-ui /consul-ui.zip
8 | ENTRYPOINT ["consul"]
9 | VOLUME /consul-data
10 | VOLUME /etc/consul.d
11 | CMD ["agent", "-server", "-ui-dir", "/consul-ui", "-data-dir", "/consul-data", "-bootstrap-expect", "1", "-config-dir", "/etc/consul.d"]
12 |
--------------------------------------------------------------------------------
/dind-compose/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker:20.10.17 as builder
2 |
3 | WORKDIR /build
4 |
5 | RUN apk add git make go
6 | RUN git clone https://github.com/docker/compose -b v2.6.1 \
7 | && cd compose \
8 | && make -f builder.Makefile compose-plugin
9 |
10 |
11 | FROM docker:20.10.17
12 |
13 | RUN apk add --no-cache py3-pip python3 make bash git curl
14 |
15 | COPY --from=builder /build/compose/bin/docker-compose /bin/
16 |
--------------------------------------------------------------------------------
/dind-compose/Dockerfile.erlang:
--------------------------------------------------------------------------------
1 | FROM docker:20.10.22-dind-alpine3.17
2 |
3 | RUN apk update && apk --no-cache --update add \
4 | py3-pip \
5 | python3 \
6 | make \
7 | bash \
8 | git \
9 | curl \
10 | lksctp-tools \
11 | ca-certificates \
12 | erlang
13 |
14 | RUN curl -s -L \
15 | -o /usr/local/bin/rebar3 \
16 | https://github.com/erlang/rebar3/releases/download/3.20.0/rebar3 && \
17 | chmod +x /usr/local/bin/rebar3
18 |
--------------------------------------------------------------------------------
/dind-compose/README.md:
--------------------------------------------------------------------------------
1 | Build images
2 | ============
3 |
4 | We are using docker-compose images to run builds on k8s.
5 |
6 |
7 | Build image
8 | -----------
9 |
10 | ```
11 | $ docker build -f Dockerfile . -t images.scrapinghub.com/infra/dind-compose:20.10.17
12 | $ docker build -f Dockerfile.erlang . -t images.scrapinghub.com/infra/dind-compose-erlang:20.10.22
13 | ```
14 |
--------------------------------------------------------------------------------
/dummy/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-alpine
2 |
3 | CMD /dummy_app.py
4 |
5 | ADD dummy_app.py /dummy_app.py
6 |
--------------------------------------------------------------------------------
/dummy/dummy_app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #coding: UTF-8
3 |
4 | import logging
5 | import sys
6 | import signal
7 | import time
8 |
9 | def cb_sigint(sig, *a):
10 | global stop
11 | logging.info('Got signal %s', sig)
12 |
13 | def main():
14 | signal.signal(signal.SIGINT, cb_sigint)
15 | signal.signal(signal.SIGTERM, cb_sigint)
16 | while True:
17 | logging.info('hello')
18 | time.sleep(10)
19 |
20 | def setup_logging(level=logging.INFO):
21 | kw = {
22 | 'format': '[%(asctime)s][%(module)s]: %(message)s',
23 | 'datefmt': '%m/%d/%Y %H:%M:%S',
24 | 'level': level,
25 | 'stream': sys.stdout
26 | }
27 |
28 | logging.basicConfig(**kw)
29 |
30 | if __name__ == '__main__':
31 | setup_logging()
32 | main()
33 |
--------------------------------------------------------------------------------
/elastalert/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.6
2 |
3 | # Add user and group to run process
4 | RUN groupadd -r elastalert && useradd -r -m -g elastalert elastalert
5 |
6 | # Install wget
7 | RUN apt-get update && \
8 | apt-get install -y --no-install-recommends wget && \
9 | rm -rf /var/lib/apt/lists/*
10 |
11 | # grab gosu for easy step-down from root
12 | ENV GOSU_VERSION 1.11
13 | RUN set -x \
14 | && wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)" \
15 | && wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc" \
16 | && export GNUPGHOME="$(mktemp -d)" \
17 | && gpg --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \
18 | && gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu \
19 | && rm -r "$GNUPGHOME" /usr/local/bin/gosu.asc \
20 | && chmod +x /usr/local/bin/gosu \
21 | && gosu nobody true
22 |
23 | # Install elastalert
24 | RUN pip install elastalert==0.2.1
25 |
26 | # The ElastAlert version pinned before uses elasticsearch==7 which is not
27 | # fully compatible with Elasticsearch 5. `elasticsearch` library should
28 | # match the version of the Elasticsearch cluster when possible.
29 | RUN pip install --force-reinstall 'elasticsearch>=5.0.0,<6.0.0'
30 |
31 | COPY docker-entrypoint.sh /
32 |
33 | ENTRYPOINT ["/docker-entrypoint.sh"]
34 |
--------------------------------------------------------------------------------
/elastalert/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -xe
3 |
4 | # Add elastalert as command by default
5 | if [[ "$1" == -* ]]; then
6 | set -- elastalert "$@"
7 | fi
8 |
9 | # Run as user "elastalert" if the command is "elastalert"
10 | if [ "$1" = 'elastalert' -a "$(id -u)" = '0' ]; then
11 | set -- gosu elastalert "$@"
12 | fi
13 |
14 | exec "$@"
15 |
--------------------------------------------------------------------------------
/elasticsearch-curator/2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 | RUN apt-get update -q && apt-get install -qy python-pip
3 | RUN pip install elasticsearch-curator==2.1.2
4 | ENTRYPOINT ["/usr/local/bin/curator"]
5 |
--------------------------------------------------------------------------------
/elasticsearch-curator/5/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 | RUN apt-get update -q && apt-get install -qy python-pip
3 | RUN pip install elasticsearch-curator==5.8.3
4 | ENTRYPOINT ["/usr/local/bin/curator"]
5 |
--------------------------------------------------------------------------------
/elasticsearch/5/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM elasticsearch:5.3.2
2 |
3 | # Install x-pack
4 | RUN /usr/share/elasticsearch/bin/elasticsearch-plugin install x-pack
5 |
6 |
--------------------------------------------------------------------------------
/ganglia-frontend/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 |
3 | ENV DEBIAN_FRONTEND noninteractive
4 |
5 | RUN sed 's/main$/main universe/' -i /etc/apt/sources.list && \
6 | apt-get update -q && \
7 | apt-get install -y gmetad ganglia-webfrontend && \
8 | a2dissite 000-default && \
9 | sed -r "s/Alias \/ganglia (.*)/Alias \/ \1\//" -i /etc/ganglia-webfrontend/apache.conf && \
10 | ln -s /etc/ganglia-webfrontend/apache.conf /etc/apache2/sites-available/000-ganglia.conf && \
11 | a2ensite 000-ganglia
12 |
13 | # Add the start script
14 | ADD bin/entry entry
15 |
16 | # entrypoint is the start script
17 | ENTRYPOINT ["bash", "entry"]
18 |
19 | # Default
20 | CMD []
21 |
--------------------------------------------------------------------------------
/ganglia-frontend/README.md:
--------------------------------------------------------------------------------
1 | scrapinghub/ganglia-frontend
2 | #########################
3 |
4 | Run the container with the default values (uses as config /etc/ganglia/gmetad.conf):
5 |
6 | docker run -p 0.0.0.0:80:80 scrapinghub/ganglia-frontend
7 |
8 | To set a custom gmetad.conf file via the bind mount option:
9 |
10 | docker run -v /my/path/to/conf:/etc/ganglia scrapinghub/ganglia-frontend
11 |
12 | To set a custom gmetad.conf file via environment variable:
13 |
14 | docker run -e DEFAULT_CONFIG_FILE=/mnt/gmetad.conf scrapinghub/ganglia-frontend
15 |
16 | To set a custom gmetad.conf file via `--config` argument:
17 |
18 | docker run scrapinghub/ganglia-frontend --config /mnt/gmetad.conf
19 |
20 | To set a custom debug mode for gmetad, use `--debug` argument:
21 |
22 | docker run scrapinghub/ganglia-frontend --debug 9
23 |
24 | > Note: Environment variable overrides default config file value, the `--config` argument overrides all other values.
25 |
--------------------------------------------------------------------------------
/ganglia-frontend/bin/entry:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # By default, no debug info
5 | DEBUG_MODE=1
6 | # Default config file
7 | CONFIG_FILE=/etc/ganglia/gmetad.conf
8 |
9 | if [[ $DEFAULT_CONFIG_FILE ]]; then
10 | CONFIG_FILE=$DEFAULT_CONFIG_FILE
11 | fi
12 |
13 | while true ; do
14 | case "$1" in
15 |
16 | --config)
17 | shift
18 | CONFIG_FILE=$1
19 | shift
20 | ;;
21 |
22 | --debug)
23 | shift
24 | DEBUG_MODE=$1
25 | shift
26 | ;;
27 |
28 | "")
29 | break
30 | ;;
31 |
32 | esac
33 | done
34 |
35 | # Avoid: "Please make sure that /var/lib/ganglia/rrds is owned by nobody"
36 | chown -R nobody /var/lib/ganglia/rrds
37 |
38 | service apache2 restart
39 |
40 | echo "Using configuration file: $CONFIG_FILE"
41 |
42 | # Start gmetad service on foreground
43 | gmetad -d $DEBUG_MODE -p /var/run/gmetad.pid -c $CONFIG_FILE
44 |
--------------------------------------------------------------------------------
/grafana/.env:
--------------------------------------------------------------------------------
1 | GRAFANA_VERSION="8.5.5"
2 |
--------------------------------------------------------------------------------
/grafana/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG GRAFANA_VERSION
2 | FROM "grafana/grafana:${GRAFANA_VERSION}"
3 |
4 | SHELL [ "/usr/bin/env", "bash", "-ueo", "pipefail", "-c"]
5 |
6 | # Use root to install
7 | USER root
8 |
9 | RUN apk --no-cache upgrade
10 | RUN apk add --quiet --no-cache \
11 | ca-certificates wget curl jq udev ttf-opensans chromium; \
12 | update-ca-certificates;
13 |
14 |
15 | # list of existing plugins
16 | # https://grafana.net/api/plugins?orderBy=name
17 | ENV PLUGINS="\
18 | alexanderzobnin-zabbix-app \
19 | grafana-azure-data-explorer-datasource \
20 | grafana-bigquery-datasource \
21 | grafana-github-datasource \
22 | grafana-googlesheets-datasource \
23 | grafana-iot-sitewise-datasource \
24 | grafana-strava-datasource \
25 | grafana-timestream-datasource \
26 | grafana-x-ray-datasource \
27 | marcusolsson-csv-datasource \
28 | oci-logs-datasource \
29 | oci-metrics-datasource \
30 | redis-datasource \
31 | vertamedia-clickhouse-datasource \
32 | vertica-grafana-datasource \
33 | "
34 |
35 | RUN \
36 | for plugin in ${PLUGINS}; do \
37 | grafana-cli plugins install "${plugin}" | \
38 | grep '^✔ Downloaded.*successfully' || \
39 | { echo "failed ${plugin}"; exit 1; }; \
40 | done
41 |
42 | # workaround https://github.com/sbueringer/grafana-consul-datasource/issues/29
43 | RUN grafana-cli \
44 | --pluginUrl https://github.com/sbueringer/grafana-consul-datasource/releases/download/v0.2.1/sbueringer-consul-datasource-0.2.1.zip \
45 | plugins install sbueringer-consul-datasource
46 |
47 | # picking upstream Grafana Image Renderer because it can be installed without chromium
48 | ENV GF_PLUGIN_RENDERING_CHROME_BIN="/usr/bin/chromium-browser"
49 | RUN grafana-cli \
50 | --pluginUrl https://github.com/grafana/grafana-image-renderer/releases/latest/download/plugin-linux-x64-glibc-no-chromium.zip \
51 | plugins install grafana-image-renderer
52 |
53 | # Run as regular user
54 | USER grafana
55 |
--------------------------------------------------------------------------------
/grafana/README.md:
--------------------------------------------------------------------------------
1 | ``
2 | # check GRAFANA_VERSION variable
3 | cat .env
4 | docker-compose build
5 | docker-compose up -d
6 | docker-compose logs -f
7 | # wait for 1 minute until mysql is ready
8 |
9 | `
10 |
11 | now you can go to `http://localhost:3000` and login as `admin:admin`
12 |
13 |
14 | If you put a mysqldump into `testdata/grafana.sql` and then start mysql it will automatically import the dump
15 |
--------------------------------------------------------------------------------
/grafana/conf/grafana.ini:
--------------------------------------------------------------------------------
1 | #
2 | # Grafana Configuration
3 | #
4 |
5 | force_migration = true
6 |
7 | [alerting]
8 | enabled = true
9 |
10 | [analytics]
11 | reporting_enabled = false
12 |
13 | [auth]
14 | disable_login_form = false
15 | disable_signout_menu = true
16 |
17 | [auth.anonymous]
18 | enabled = false
19 |
20 | [auth.basic]
21 | enabled = true
22 |
23 |
24 | [security]
25 | admin_user = admin
26 | admin_password = admin
27 |
28 |
29 | [database]
30 | name = grafana
31 | type = mysql
32 | host = mysql
33 | user = grafana
34 | password = grafana
35 |
36 |
37 | [log]
38 | mode = console
39 | level = info
40 |
41 | [plugins]
42 | allow_loading_unsigned_plugins = alexanderzobnin-zabbix-datasource,grafana-strava-datasource,oci-datasource,sbueringer-consul-datasource,vertamedia-clickhouse-datasource,vertica-grafana-datasource
43 |
44 | [server]
45 | domain = localhost
46 | root_url = http://%(domain)s/
47 |
48 | [users]
49 | allow_sign_up = false
50 | allow_org_create = false
51 | auto_assign_org = true
52 | auto_assign_org_role = Editor
53 | viewers_can_edit = false
54 |
55 |
--------------------------------------------------------------------------------
/grafana/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.3"
2 | services:
3 | grafana:
4 | image: zyte/grafana:latest
5 | build:
6 | context: ./
7 | dockerfile: Dockerfile
8 | args:
9 | GRAFANA_VERSION: "${GRAFANA_VERSION}"
10 | volumes:
11 | - ./conf/grafana.ini:/etc/grafana/grafana.ini:rw
12 | ports:
13 | - 3000:3000
14 | depends_on:
15 | - mysql
16 | mysql:
17 | image: mysql:8
18 | command: --default-authentication-plugin=mysql_native_password
19 | restart: on-failure
20 | volumes:
21 | - ./testdata/grafana.sql:/docker-entrypoint-initdb.d/grafana.sql:ro
22 | ports:
23 | - 33060:33060
24 | healthcheck:
25 | test: ["CMD", 'mysqladmin', 'ping', '-h', 'localhost', '-u', 'root', '-p$$MYSQL_ROOT_PASSWORD' ]
26 | interval: 10s
27 | timeout: 1s
28 | retries: 6
29 | start_period: 60s
30 | environment:
31 | MYSQL_ROOT_PASSWORD: grafana
32 | MYSQL_PASSWORD: grafana
33 | MYSQL_USER: grafana
34 | MYSQL_DATABASE: grafana
35 |
--------------------------------------------------------------------------------
/grafana/testdata/grafana.sql:
--------------------------------------------------------------------------------
1 | -- put your Grafana db sqldump here for an initial provisioning on docker-compose up
2 |
--------------------------------------------------------------------------------
/haproxy-consul-template/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 |
3 | ENV DEBIAN_FRONTEND noninteractive
4 |
5 | # Install haproxy
6 | RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 505D97A41C61B9CD && \
7 | apt-get update && \
8 | apt-get install -y --no-install-recommends haproxy && \
9 | apt-get clean && \
10 | rm -rf /var/lib/apt/lists/* && \
11 | sed -i "s/ENABLED=0/ENABLED=1/" /etc/default/haproxy
12 |
13 | # Install consul-template
14 | ENV CONSUL_TEMPLATE_SHA256 f3eea6dcb480ba1f82cd14c6a8f7a739dc844ac067a3541cd186eb4f9920e4e3
15 |
16 | RUN deps='curl ca-certificates' && \
17 | apt-get update && apt-get install -y --no-install-recommends $deps && rm -rf /var/lib/apt/lists/* && \
18 | curl -sSL "https://releases.hashicorp.com/consul-template/0.18.5/consul-template_0.18.5_linux_amd64.tgz" -o consul-template.tar.gz && \
19 | echo "${CONSUL_TEMPLATE_SHA256} consul-template.tar.gz" | sha256sum -c && \
20 | tar -xzf consul-template.tar.gz -C /usr/local/bin --touch && \
21 | rm consul-template.tar.gz && \
22 | mkdir -p /etc/consul-template && \
23 | curl -o /usr/local/bin/filterproxy https://s3.amazonaws.com/scrapinghub-app-splash/filterproxy && \
24 | chmod 755 /usr/local/bin/filterproxy && \
25 | apt-get purge -y --auto-remove $deps
26 |
27 | # Start script
28 | ADD entry entry
29 | ENTRYPOINT ["bash","entry"]
30 |
--------------------------------------------------------------------------------
/haproxy-consul-template/entry:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x
3 |
4 | CONSUL_CONFIG=${1:-/etc/consul-template}
5 |
6 | /usr/local/bin/filterproxy -l :$PORT_8050 -r localhost:$PORT_8051 &
7 |
8 | # Start consul-template service
9 | /usr/local/bin/consul-template -config=$CONSUL_CONFIG &
10 |
11 | # Naive check runs checks once a minute to see if either of the processes exited.
12 | # The container exits with an error if it detects that either of the processes has exited.
13 | # Otherwise it loops forever, waking up every 60 seconds
14 |
15 | while sleep 60; do
16 | p1=$( ps aux | grep filterproxy | grep -v grep | wc -l );
17 | p2=$( ps aux | grep haproxy | grep -v grep | wc -l );
18 | if [ $p1 -ne 0 -a $p2 -ne 0 ]; then
19 | echo "processes are running"
20 | else
21 | echo "processes are not running"
22 | exit 1
23 | fi
24 | done
25 |
--------------------------------------------------------------------------------
/httpbin/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.5
2 | EXPOSE 8000
3 | RUN pip install gunicorn httpbin
4 | CMD gunicorn --bind=0.0.0.0:8000 httpbin:app
5 |
--------------------------------------------------------------------------------
/java11/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:20.04
2 |
3 | ENV DEBIAN_FRONTEND=noninteractive \
4 | JAVA_HOME=/usr/lib/jvm/zulu11
5 |
6 | # python3 (and set it as default)
7 | RUN apt-get update -qq && \
8 | apt-get install -qy software-properties-common curl vim && \
9 | add-apt-repository -y ppa:deadsnakes/ppa && \
10 | apt-get update -qq && \
11 | apt-get install -y python3.12 python3.12-dev libpython3.12-dev && \
12 | curl -s -o /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py && \
13 | apt-get install -y python3.12-distutils && \
14 | python3.12 /tmp/get-pip.py && rm /tmp/get-pip.py && \
15 | update-alternatives --install /usr/bin/python python /usr/bin/python3.12 3 && \
16 | update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 3
17 |
18 | # Maven
19 | RUN curl -k -sSL -o - https://dlcdn.apache.org/maven/maven-3/3.9.4/binaries/apache-maven-3.9.4-bin.tar.gz \
20 | | tar xzf - -C /usr/local/ \
21 | && ln -sf /usr/local/apache-maven-3.9.4/bin/mvn /usr/bin/mvn
22 |
23 | # Zulu Java 11
24 | # https://docs.azul.com/zulu/zuludocs/#ZuluUserGuide/InstallingZulu/InstallOnLinuxUsingAPTRepository.htm
25 | RUN apt-get update -qq && \
26 | apt-get install -y curl apt-transport-https && \
27 | curl -s https://repos.azul.com/azul-repo.key | gpg --dearmor -o /usr/share/keyrings/azul.gpg && \
28 | echo "deb [signed-by=/usr/share/keyrings/azul.gpg] https://repos.azul.com/zulu/deb stable main" > /etc/apt/sources.list.d/zulu.list && \
29 | apt-get update -qq && \
30 | apt-get install -y zulu11-jdk
31 |
32 | # Install lein
33 | RUN curl -sSL https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein -o /usr/bin/lein && \
34 | chmod +x /usr/bin/lein && cd /tmp && lein
35 |
36 | RUN apt-get update -qq && \
37 | apt-get install -qy sudo git vim curl wget telnet netcat unzip rlwrap net-tools && \
38 | apt-get install -qy \
39 | netbase ca-certificates apt-transport-https \
40 | build-essential \
41 | libxml2-dev libssl-dev libxslt1-dev \
42 | libmysqlclient-dev \
43 | libevent-dev \
44 | libffi-dev libssl-dev \
45 | libpcre3-dev libz-dev
46 |
47 | # Clean up for docker squash
48 | # See https://github.com/goldmann/docker-squash
49 | RUN rm -rf \
50 | /root/.cache \
51 | /root/.npm \
52 | /root/.pip \
53 | /usr/local/share/doc/* \
54 | /usr/share/doc/* \
55 | /usr/share/man/man*/* \
56 | /usr/share/vim/*/doc \
57 | /usr/share/vim/*/lang \
58 | /usr/share/vim/*/spell/en* \
59 | /usr/share/vim/*/tutor \
60 | /var/lib/apt/lists/* \
61 | /tmp/*
62 |
--------------------------------------------------------------------------------
/java7/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 | ENV DEBIAN_FRONTEND noninteractive
3 |
4 | # Setup Java
5 | # ==========
6 |
7 | # Add webupdate8 apt repo & signing key
8 | ADD ./java7.list /etc/apt/sources.list.d/java7.list
9 | RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 7B2C3B0889BF5709A105D03AC2518248EEA14886
10 |
11 | # Accept oracle license in debconf
12 | ADD ./java7.debconf /tmp/java7.debconf
13 | RUN cat /tmp/java7.debconf |debconf-set-selections
14 |
15 | # Install Oracle Java7
16 | RUN apt-get update -qq \
17 | && apt-get install -qy oracle-java7-installer \
18 | && rm -f /var/cache/oracle-jdk7-installer/jdk*tar.gz \
19 | && ln -sf java-7-oracle /usr/lib/jvm/default-java \
20 | && apt-get purge -y openjdk-\* icedtea-\* icedtea6-\* \
21 | && rm -rf /var/lib/apt/lists
22 |
23 | # Install maven 3.x
24 | RUN wget -qO- http://www.eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \
25 | | tar xzf - -C /usr/local/ \
26 | && ln -sf /usr/local/apache-maven-3.3.9/bin/mvn /usr/bin/mvn
27 |
--------------------------------------------------------------------------------
/java7/README.md:
--------------------------------------------------------------------------------
1 | Docker image for java programs
2 |
3 | - Oracle JDK 7
4 | - Maven 3.x
5 |
--------------------------------------------------------------------------------
/java7/java7.debconf:
--------------------------------------------------------------------------------
1 | debconf shared/accepted-oracle-license-v1-1 select true
2 | debconf shared/accepted-oracle-license-v1-1 seen true
3 | oracle-java7-installer oracle-java7-installer/local string /var/cache/apt/archives
4 | oracle-java7-installer oracle-java7-installer/local seen true
5 |
--------------------------------------------------------------------------------
/java7/java7.list:
--------------------------------------------------------------------------------
1 | deb http://ppa.launchpad.net/webupd8team/java/ubuntu precise main
2 | deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu precise main
3 |
--------------------------------------------------------------------------------
/java8-bionic-zulu/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | ENV DEBIAN_FRONTEND=noninteractive \
4 | JAVA_HOME=/usr/lib/jvm/zulu-8-amd64
5 |
6 | # Zulu Java 8
7 | # https://docs.azul.com/zulu/zuludocs/#ZuluUserGuide/InstallingZulu/InstallOnLinuxUsingAPTRepository.htm
8 | RUN apt-get update -qq && \
9 | apt-get install -y curl python apt-transport-https gnupg2 && \
10 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0xB1998361219BD9C9 && \
11 | echo "deb http://repos.azulsystems.com/ubuntu stable main" > /etc/apt/sources.list.d/zulu.list && \
12 | echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \
13 | apt-get update -qq && \
14 | apt-get install -y zulu-8
15 |
16 | # Maven
17 | RUN curl -sSL -o - http://www.eu.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz \
18 | | tar xzf - -C /usr/local/ \
19 | && ln -sf /usr/local/apache-maven-3.6.3/bin/mvn /usr/bin/mvn
20 |
21 | RUN apt-get update -qq && \
22 | apt-get install -qy \
23 | build-essential \
24 | git \
25 | libevent-dev \
26 | libffi-dev \
27 | libfreetype6-dev \
28 | libjpeg8-dev \
29 | libmysqlclient-dev \
30 | libpcre3-dev \
31 | libpq-dev \
32 | libssl-dev \
33 | libssl-dev \
34 | libxml2-dev \
35 | libxslt1-dev \
36 | libz-dev \
37 | pkg-config \
38 | python-dev \
39 | && \
40 | rm -rf /var/lib/apt/lists /var/cache/apt/archives
41 |
42 | RUN curl -o /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py && \
43 | python /tmp/get-pip.py && \
44 | rm -rf /tmp/get-pip.py && \
45 | pip install --no-cache-dir -U wheel && \
46 | pip install --no-cache-dir requests[security]==2.23.0 && \
47 | rm -rf ~/.cache/pip
48 |
49 | RUN apt-get update -qq && \
50 | apt-get install -qy software-properties-common && \
51 | add-apt-repository -y ppa:deadsnakes/ppa && \
52 | apt-get update -qq && \
53 | apt-get install -qy python3.8-dev && \
54 | cp -a /usr/bin/python3.8 /usr/bin/python3 && \
55 | cp -a /usr/local/bin/pip /tmp/pip2 && \
56 | curl -s -o /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py && \
57 | python3.8 /tmp/get-pip.py && \
58 | cp -a /usr/local/bin/pip /usr/local/bin/pip3 && \
59 | cp -a /tmp/pip2 /usr/local/bin/pip && \
60 | cp -a /tmp/pip2 /usr/local/bin/pip2 && \
61 | rm -f /tmp/get-pip.py /tmp/pip2 && \
62 | rm -rf /var/lib/apt/lists /var/cache/apt/archives && \
63 | pip3 install pipenv
64 |
--------------------------------------------------------------------------------
/java8/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 |
3 | ENV DEBIAN_FRONTEND noninteractive
4 |
5 | RUN apt-get update -qq && \
6 | apt-get install -y curl python apt-transport-https && \
7 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8756C4F765C9AC3CB6B85D62379CE192D401AB61 && \
8 | echo "deb https://dl.bintray.com/scrapinghub/3rdparty trusty main" > /etc/apt/sources.list.d/scrapinghub.list && \
9 | echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \
10 | apt-get update -qq && \
11 | apt-get install -y oracle-java8-installer oracle-java8-set-default && \
12 | rm -rf /var/lib/apt/lists /var/cache/apt/archives
13 |
14 | RUN curl -q -o - http://www.eu.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \
15 | | tar xzf - -C /usr/local/ \
16 | && ln -sf /usr/local/apache-maven-3.3.9/bin/mvn /usr/bin/mvn
17 |
18 | RUN apt-get update -qq && \
19 | apt-get install -qy \
20 | build-essential \
21 | git \
22 | libevent-dev \
23 | libffi-dev \
24 | libfreetype6-dev \
25 | libjpeg8-dev \
26 | libmysqlclient-dev \
27 | libpcre3-dev \
28 | libpq-dev \
29 | libssl-dev \
30 | libssl-dev \
31 | libxml2-dev \
32 | libxslt1-dev \
33 | libz-dev \
34 | pkg-config \
35 | python-dev \
36 | && \
37 | rm -rf /var/lib/apt/lists /var/cache/apt/archives
38 |
39 | RUN curl -o /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py && \
40 | python /tmp/get-pip.py && \
41 | rm -rf /tmp/get-pip.py && \
42 | pip install --no-cache-dir -U wheel && \
43 | pip install --no-cache-dir requests[security]==2.10.0 && \
44 | rm -rf ~/.cache/pip
45 |
46 | RUN apt-get update -qq && \
47 | apt-get install -qy software-properties-common && \
48 | add-apt-repository -y ppa:deadsnakes/ppa && \
49 | apt-get update -qq && \
50 | apt-get install -qy python3.6-dev && \
51 | cp -a /usr/bin/python3.6 /usr/bin/python3 && \
52 | cp -a /usr/local/bin/pip /tmp/pip2 && \
53 | curl -s -o /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py && \
54 | python3.6 /tmp/get-pip.py && \
55 | cp -a /usr/local/bin/pip /usr/local/bin/pip3 && \
56 | cp -a /tmp/pip2 /usr/local/bin/pip && \
57 | cp -a /tmp/pip2 /usr/local/bin/pip2 && \
58 | rm -f /tmp/get-pip.py /tmp/pip2 && \
59 | rm -rf /var/lib/apt/lists /var/cache/apt/archives && \
60 | pip3 install pipenv
61 |
--------------------------------------------------------------------------------
/logstash/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.elastic.co/logstash/logstash-oss:7.9.3
2 | RUN bin/logstash-plugin install logstash-output-google_bigquery
3 |
--------------------------------------------------------------------------------
/mongodb/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 | #ENV DEBIAN_FRONTEND noninteractive
3 | RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
4 | RUN echo "deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen" \
5 | >/etc/apt/sources.list.d/docker.list
6 | RUN apt-get update -qq && apt-get install -qy mongodb-org
7 | VOLUME /var/lib/mongodb
8 | EXPOSE 27017
9 | ADD mongod.conf /etc/mongod.conf
10 | ADD run.sh /run.sh
11 | CMD ["/run.sh"]
12 |
--------------------------------------------------------------------------------
/mongodb/mongod.conf:
--------------------------------------------------------------------------------
1 | dbpath=/var/lib/mongodb
2 | directoryperdb=true
3 | smallfiles=true
4 | nssize=16
5 |
--------------------------------------------------------------------------------
/mongodb/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [[ ! -e /var/lib/mongodb/journal && $ADMIN_SECRET ]]; then
3 | /usr/bin/mongod --config /etc/mongod.conf &
4 | pid=$!
5 | sleep 1
6 | mongo admin <
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark/spark-defaults.conf:
--------------------------------------------------------------------------------
1 | # Default system properties included when running spark-submit.
2 | # This is useful for setting default environmental settings.
3 |
4 | spark.files file:///etc/hadoop/conf/hdfs-site.xml,file:///etc/hadoop/conf/core-site.xml
5 | spark.mesos.coarse false
6 | spark.mesos.role spark
7 | spark.mesos.constraints role:c2-node
8 | spark.mesos.uris file:///root/.dockercfg
9 | spark.mesos.mesosExecutor.cores 0.5
10 |
--------------------------------------------------------------------------------
/spark/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This file is sourced when running various Spark programs.
4 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
5 |
6 | export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so
7 |
8 | if [[ -e /mnt/mesos/sandbox/spark-env.sh ]]; then
9 | . /mnt/mesos/sandbox/spark-env.sh
10 | fi
11 |
--------------------------------------------------------------------------------
/spark2.0/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scrapinghub/java:8
2 |
3 | WORKDIR /opt/spark
4 | ENV SPARK_HOME=/opt/spark \
5 | HADOOP_CONF_DIR=/etc/hadoop/conf \
6 | PYSPARK_PYTHON=/usr/bin/python \
7 | MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so \
8 | PYTHONPATH=/opt/spark/python:/opt/spark/python/lib/py4j-0.10.3-src.zip
9 |
10 | RUN mkdir -p /opt/spark && \
11 | curl -sSL -o - http://d3kbcqa49mib13.cloudfront.net/spark-2.0.2-bin-hadoop2.6.tgz \
12 | | tar xzf - --strip-components=1 -C /opt/spark && \
13 | apt-get update -qq && \
14 | apt-get install -y mesos=1.0.1-2.0.93.ubuntu1404 && \
15 | mkdir -p /etc/hadoop/conf
16 |
17 | RUN mkdir -p /etc/hadoop/conf
18 | ADD core-site.xml hdfs-site.xml /etc/hadoop/conf/
19 |
20 | ADD spark-env.sh spark-defaults.conf /opt/spark/conf/
21 |
22 | # Clean up for docker squash
23 | # See https://github.com/goldmann/docker-squash
24 | RUN rm -rf \
25 | /root/.cache \
26 | /root/.npm \
27 | /root/.pip \
28 | /usr/local/share/doc \
29 | /usr/share/doc \
30 | /usr/share/man \
31 | /usr/share/vim/vim74/doc \
32 | /usr/share/vim/vim74/lang \
33 | /usr/share/vim/vim74/spell/en* \
34 | /usr/share/vim/vim74/tutor \
35 | /var/lib/apt/lists/* \
36 | /tmp/*
37 |
--------------------------------------------------------------------------------
/spark2.0/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark2.0/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark2.0/spark-defaults.conf:
--------------------------------------------------------------------------------
1 | # Default system properties included when running spark-submit.
2 | # This is useful for setting default environmental settings.
3 |
4 | spark.files file:///etc/hadoop/conf/hdfs-site.xml,file:///etc/hadoop/conf/core-site.xml
5 | spark.mesos.coarse true
6 | spark.cores.max 2
7 | spark.executor.cores 1
8 | spark.executor.memory 1g
9 | spark.mesos.role spark
10 | spark.mesos.constraints role:c2-node
11 | spark.mesos.uris file:///root/.dockercfg
12 |
13 | ##########################################################################################
14 | # Settings for dynamic resource allocation, DO NOT EDIT IT unless you know what you're doing.
15 | ##########################################################################################
16 | spark.dynamicAllocation.enabled true
17 | spark.shuffle.service.enabled true
18 | # default 0
19 | spark.dynamicAllocation.initialExecutors 2
20 | # default 1min
21 | spark.dynamicAllocation.executorIdleTimeout 10m
22 | # default infinity
23 | spark.dynamicAllocation.cachedExecutorIdleTimeout 1h
24 | # default infinity
25 | spark.dynamicAllocation.maxExecutors 10
26 | # The executor must share the same data dir with the external shuffle service
27 | # See https://issues.apache.org/jira/browse/SPARK-17555
28 | spark.mesos.executor.docker.volumes /tmp/spark:/tmp/spark:rw
29 | spark.local.dir /tmp/spark
30 |
--------------------------------------------------------------------------------
/spark2.0/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This file is sourced when running various Spark programs.
4 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
5 |
6 | export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so
7 |
8 | if [[ -e /mnt/mesos/sandbox/spark-env.sh ]]; then
9 | . /mnt/mesos/sandbox/spark-env.sh
10 | fi
11 |
--------------------------------------------------------------------------------
/spark2.1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scrapinghub/java:8
2 |
3 | WORKDIR /opt/spark
4 | ENV SPARK_HOME=/opt/spark \
5 | HADOOP_CONF_DIR=/etc/hadoop/conf \
6 | PYSPARK_PYTHON=/usr/bin/python \
7 | MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so \
8 | PYTHONPATH=/opt/spark/python:/opt/spark/python/lib/py4j-0.10.4-src.zip
9 |
10 | RUN mkdir -p /opt/spark && \
11 | curl -sSL -o - http://d3kbcqa49mib13.cloudfront.net/spark-2.1.1-bin-hadoop2.6.tgz \
12 | | tar xzf - --strip-components=1 -C /opt/spark && \
13 | apt-get update -qq && \
14 | apt-get install -y mesos=1.0.1-2.0.93.ubuntu1404 && \
15 | mkdir -p /etc/hadoop/conf
16 |
17 | RUN mkdir -p /etc/hadoop/conf
18 | ADD core-site.xml hdfs-site.xml /etc/hadoop/conf/
19 |
20 | ADD spark-env.sh spark-defaults.conf /opt/spark/conf/
21 |
22 | # Clean up for docker squash
23 | # See https://github.com/goldmann/docker-squash
24 | RUN rm -rf \
25 | /root/.cache \
26 | /root/.npm \
27 | /root/.pip \
28 | /usr/local/share/doc \
29 | /usr/share/doc \
30 | /usr/share/man \
31 | /usr/share/vim/vim74/doc \
32 | /usr/share/vim/vim74/lang \
33 | /usr/share/vim/vim74/spell/en* \
34 | /usr/share/vim/vim74/tutor \
35 | /var/lib/apt/lists/* \
36 | /tmp/*
37 |
--------------------------------------------------------------------------------
/spark2.1/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark2.1/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark2.1/spark-defaults.conf:
--------------------------------------------------------------------------------
1 | # Default system properties included when running spark-submit.
2 | # This is useful for setting default environmental settings.
3 |
4 | spark.files file:///etc/hadoop/conf/hdfs-site.xml,file:///etc/hadoop/conf/core-site.xml
5 | spark.mesos.coarse true
6 | spark.cores.max 2
7 | spark.executor.cores 1
8 | spark.executor.memory 1g
9 | spark.mesos.role spark
10 | spark.mesos.constraints role:c2-node
11 | spark.mesos.uris file:///root/.dockercfg
12 |
13 | ##########################################################################################
14 | # Settings for dynamic resource allocation, DO NOT EDIT IT unless you know what you're doing.
15 | ##########################################################################################
16 | spark.dynamicAllocation.enabled true
17 | spark.shuffle.service.enabled true
18 | # default 0
19 | spark.dynamicAllocation.initialExecutors 2
20 | # default 1min
21 | spark.dynamicAllocation.executorIdleTimeout 10m
22 | # default infinity
23 | spark.dynamicAllocation.cachedExecutorIdleTimeout 1h
24 | # default infinity
25 | spark.dynamicAllocation.maxExecutors 10
26 | # The executor must share the same data dir with the external shuffle service
27 | # See https://issues.apache.org/jira/browse/SPARK-17555
28 | spark.mesos.executor.docker.volumes /tmp/spark:/tmp/spark:rw
29 | spark.local.dir /tmp/spark
30 |
--------------------------------------------------------------------------------
/spark2.1/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This file is sourced when running various Spark programs.
4 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
5 |
6 | export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so
7 |
8 | if [[ -e /mnt/mesos/sandbox/spark-env.sh ]]; then
9 | . /mnt/mesos/sandbox/spark-env.sh
10 | fi
11 |
--------------------------------------------------------------------------------
/spark2.4/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scrapinghub/java:8-bionic-zulu
2 |
3 | WORKDIR /opt/spark
4 | ENV SPARK_HOME=/opt/spark \
5 | HADOOP_CONF_DIR=/etc/hadoop/conf \
6 | PYSPARK_PYTHON=/usr/bin/python \
7 | MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so \
8 | PYTHONPATH=/opt/spark/python:/opt/spark/python/lib/py4j-0.10.7-src.zip
9 |
10 | RUN mkdir -p /opt/spark && \
11 | curl -sSL -o - https://downloads.apache.org/spark/spark-2.4.5/spark-2.4.5-bin-hadoop2.7.tgz \
12 | | tar xzf - --strip-components=1 -C /opt/spark
13 |
14 | RUN apt-get update -qq && \
15 | apt-get install -y curl python apt-transport-https && \
16 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8756C4F765C9AC3CB6B85D62379CE192D401AB61 && \
17 | echo "deb https://dl.bintray.com/scrapinghub/spark bionic main" > /etc/apt/sources.list.d/seafio.list && \
18 | apt-get update -qq && \
19 | apt-get install -y --no-install-recommends mesos=1.4.3-0.1.20200507155028.ubuntu1804
20 |
21 | RUN mkdir -p /etc/hadoop/conf
22 | ADD core-site.xml hdfs-site.xml /etc/hadoop/conf/
23 |
24 | ADD spark-env.sh spark-defaults.conf /opt/spark/conf/
25 |
--------------------------------------------------------------------------------
/spark2.4/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark2.4/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark2.4/spark-defaults.conf:
--------------------------------------------------------------------------------
1 | # Default system properties included when running spark-submit.
2 | # This is useful for setting default environmental settings.
3 |
4 | spark.files file:///etc/hadoop/conf/hdfs-site.xml,file:///etc/hadoop/conf/core-site.xml
5 | spark.mesos.coarse true
6 | spark.cores.max 2
7 | spark.executor.cores 1
8 | spark.executor.memory 1g
9 | spark.mesos.role spark
10 | spark.mesos.constraints role:c2-node
11 | spark.mesos.uris file:///root/.dockercfg
12 |
13 | ##########################################################################################
14 | # Settings for dynamic resource allocation, DO NOT EDIT IT unless you know what you're doing.
15 | ##########################################################################################
16 | spark.dynamicAllocation.enabled true
17 | spark.shuffle.service.enabled true
18 | # default 0
19 | spark.dynamicAllocation.initialExecutors 2
20 | # default 1min
21 | spark.dynamicAllocation.executorIdleTimeout 10m
22 | # default infinity
23 | spark.dynamicAllocation.cachedExecutorIdleTimeout 1h
24 | # default infinity
25 | spark.dynamicAllocation.maxExecutors 10
26 | # The executor must share the same data dir with the external shuffle service
27 | # See https://issues.apache.org/jira/browse/SPARK-17555
28 | spark.mesos.executor.docker.volumes /tmp/spark:/tmp/spark:rw
29 | spark.local.dir /tmp/spark
30 |
--------------------------------------------------------------------------------
/spark2.4/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This file is sourced when running various Spark programs.
4 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
5 |
6 | export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so
7 |
8 | if [[ -e /mnt/mesos/sandbox/spark-env.sh ]]; then
9 | . /mnt/mesos/sandbox/spark-env.sh
10 | fi
11 |
--------------------------------------------------------------------------------
/spark3.1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scrapinghub/java:11
2 |
3 | WORKDIR /opt/spark
4 | ENV SPARK_HOME=/opt/spark \
5 | HADOOP_CONF_DIR=/etc/hadoop/conf \
6 | PYSPARK_PYTHON=/usr/bin/python \
7 | MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so \
8 | PYTHONPATH=/opt/spark/python:/opt/spark/python/lib/py4j-0.10.9-src.zip
9 |
10 | RUN cd /tmp && \
11 | wget http://apt.zyte.group/zyte/key.asc \
12 | && apt-key add key.asc \
13 | && rm -f key.asc \
14 | && echo 'deb http://apt.zyte.group/zyte trusty main' > /etc/apt/sources.list.d/zyte.list \
15 | && apt-get update -qq
16 |
17 | RUN mkdir -p /opt/spark && \
18 | curl -sSL -o - https://dlcdn.apache.org/spark/spark-3.1.3/spark-3.1.3-bin-hadoop3.2.tgz \
19 | | tar xzf - --strip-components=1 -C /opt/spark && \
20 | apt-get update -qq && \
21 | mkdir -p /etc/hadoop/conf
22 |
23 | # 2022.07.06 The latest zulu-11 could provide the virtual java pkg mesos depends on
24 | RUN apt-get update -qq \
25 | && apt-get install -y \
26 | mesos=1.4.3-0.1.20200507155028.ubuntu1804 \
27 | zulu-11
28 |
29 | RUN mkdir -p /etc/hadoop/conf
30 | ADD core-site.xml hdfs-site.xml /etc/hadoop/conf/
31 |
32 | ADD spark-env.sh spark-defaults.conf /opt/spark/conf/
33 |
34 | # Clean up for docker squash
35 | # See https://github.com/goldmann/docker-squash
36 | # RUN rm -rf \
37 | # /root/.cache \
38 | # /root/.npm \
39 | # /root/.pip \
40 | # /usr/local/share/doc \
41 | # /usr/share/doc \
42 | # /usr/share/man \
43 | # /usr/share/vim/vim74/doc \
44 | # /usr/share/vim/vim74/lang \
45 | # /usr/share/vim/vim74/spell/en* \
46 | # /usr/share/vim/vim74/tutor \
47 | # /var/lib/apt/lists/* \
48 | # /tmp/*
49 |
--------------------------------------------------------------------------------
/spark3.1/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark3.1/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark3.1/spark-defaults.conf:
--------------------------------------------------------------------------------
1 | # Default system properties included when running spark-submit.
2 | # This is useful for setting default environmental settings.
3 |
4 | spark.files file:///etc/hadoop/conf/hdfs-site.xml,file:///etc/hadoop/conf/core-site.xml
5 | spark.mesos.coarse true
6 | spark.cores.max 2
7 | spark.executor.cores 1
8 | spark.executor.memory 1g
9 | spark.mesos.role spark
10 | spark.mesos.constraints role:c5-node
11 | spark.mesos.uris file:///root/.dockercfg
12 |
13 | ##########################################################################################
14 | # Settings for dynamic resource allocation, DO NOT EDIT IT unless you know what you're doing.
15 | ##########################################################################################
16 | spark.dynamicAllocation.enabled true
17 | spark.shuffle.service.enabled true
18 | # default 0
19 | spark.dynamicAllocation.initialExecutors 2
20 | # default 1min
21 | spark.dynamicAllocation.executorIdleTimeout 10m
22 | # default infinity
23 | spark.dynamicAllocation.cachedExecutorIdleTimeout 1h
24 | # default infinity
25 | spark.dynamicAllocation.maxExecutors 10
26 | # The executor must share the same data dir with the external shuffle service
27 | # See https://issues.apache.org/jira/browse/SPARK-17555
28 | spark.mesos.executor.docker.volumes /tmp/spark:/tmp/spark:rw
29 | spark.local.dir /tmp/spark
30 |
--------------------------------------------------------------------------------
/spark3.1/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This file is sourced when running various Spark programs.
4 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
5 |
6 | export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so
7 |
8 | if [[ -e /mnt/mesos/sandbox/spark-env.sh ]]; then
9 | . /mnt/mesos/sandbox/spark-env.sh
10 | fi
11 |
--------------------------------------------------------------------------------
/spark3.2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scrapinghub/java:11
2 |
3 | WORKDIR /opt/spark
4 | ENV SPARK_HOME=/opt/spark \
5 | HADOOP_CONF_DIR=/etc/hadoop/conf \
6 | PYSPARK_PYTHON=/usr/bin/python \
7 | MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so \
8 | PYTHONPATH=/opt/spark/python:/opt/spark/python/lib/py4j-0.10.9.3-src.zip
9 |
10 | RUN mkdir -p /opt/spark && \
11 | curl -sSL -o - https://dlcdn.apache.org/spark/spark-3.2.1/spark-3.2.1-bin-hadoop3.2.tgz \
12 | | tar xzf - --strip-components=1 -C /opt/spark && \
13 | apt-get update -qq && \
14 | mkdir -p /etc/hadoop/conf
15 |
16 | # RUN apt-get install -y mesos=1.0.1-2.0.93.ubuntu1404
17 |
18 | RUN mkdir -p /etc/hadoop/conf
19 | ADD core-site.xml hdfs-site.xml /etc/hadoop/conf/
20 |
21 | ADD spark-env.sh spark-defaults.conf /opt/spark/conf/
22 |
23 | # Clean up for docker squash
24 | # See https://github.com/goldmann/docker-squash
25 | RUN rm -rf \
26 | /root/.cache \
27 | /root/.npm \
28 | /root/.pip \
29 | /usr/local/share/doc \
30 | /usr/share/doc \
31 | /usr/share/man \
32 | /usr/share/vim/vim74/doc \
33 | /usr/share/vim/vim74/lang \
34 | /usr/share/vim/vim74/spell/en* \
35 | /usr/share/vim/vim74/tutor \
36 | /var/lib/apt/lists/* \
37 | /tmp/*
38 |
--------------------------------------------------------------------------------
/spark3.2/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark3.2/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/spark3.2/spark-defaults.conf:
--------------------------------------------------------------------------------
1 | # Default system properties included when running spark-submit.
2 | # This is useful for setting default environmental settings.
3 |
4 | spark.files file:///etc/hadoop/conf/hdfs-site.xml,file:///etc/hadoop/conf/core-site.xml
5 | spark.mesos.coarse true
6 | spark.cores.max 2
7 | spark.executor.cores 1
8 | spark.executor.memory 1g
9 | spark.mesos.role spark
10 | spark.mesos.constraints role:c2-node
11 | spark.mesos.uris file:///root/.dockercfg
12 |
13 | ##########################################################################################
14 | # Settings for dynamic resource allocation, DO NOT EDIT IT unless you know what you're doing.
15 | ##########################################################################################
16 | spark.dynamicAllocation.enabled true
17 | spark.shuffle.service.enabled true
18 | # default 0
19 | spark.dynamicAllocation.initialExecutors 2
20 | # default 1min
21 | spark.dynamicAllocation.executorIdleTimeout 10m
22 | # default infinity
23 | spark.dynamicAllocation.cachedExecutorIdleTimeout 1h
24 | # default infinity
25 | spark.dynamicAllocation.maxExecutors 10
26 | # The executor must share the same data dir with the external shuffle service
27 | # See https://issues.apache.org/jira/browse/SPARK-17555
28 | spark.mesos.executor.docker.volumes /tmp/spark:/tmp/spark:rw
29 | spark.local.dir /tmp/spark
30 |
--------------------------------------------------------------------------------
/spark3.2/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This file is sourced when running various Spark programs.
4 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
5 |
6 | export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so
7 |
8 | if [[ -e /mnt/mesos/sandbox/spark-env.sh ]]; then
9 | . /mnt/mesos/sandbox/spark-env.sh
10 | fi
11 |
--------------------------------------------------------------------------------
/splash-dbg/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scrapinghub/splash:3.2
2 |
3 | RUN apt-get update -qq && \
4 | apt-get install -y gdb
5 |
--------------------------------------------------------------------------------
/splash-dbg/Makefile:
--------------------------------------------------------------------------------
1 | image=scrapinghub/splash:3.2-dbg
2 |
3 | all:
4 | docker build -t $(image) .
5 |
6 | push:
7 | docker push $(image)
8 |
9 | .PHONY: push
10 |
--------------------------------------------------------------------------------
/squid-deb-proxy/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 | #ENV DEBIAN_FRONTEND noninteractive
3 | RUN apt-get update -q && apt-get install -qy squid-deb-proxy
4 | RUN sed -i -e 's#^http_port .*$#http_port 3129#' /etc/squid-deb-proxy/squid-deb-proxy.conf
5 | RUN echo all >/etc/squid-deb-proxy/allowed-networks-src.acl.d/20-vagrant
6 | RUN sed -i -e '/to_archive_mirrors/d' /etc/squid-deb-proxy/squid-deb-proxy.conf
7 | VOLUME /var/cache/squid-deb-proxy
8 | EXPOSE 3129
9 | ADD run.sh /run.sh
10 | CMD ["/run.sh"]
11 |
--------------------------------------------------------------------------------
/squid-deb-proxy/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | dir=/var/cache/squid-deb-proxy
3 | # workaround shared folders permissions problems on vagrant
4 | if ! (chown proxy $dir && sudo -u proxy touch $dir); then
5 | uid=$(stat -c %u $dir)
6 | userdel proxy
7 | useradd -M -r -s /usr/sbin/nologin -d /bin -u 1000 proxy
8 | chown -R proxy /var/log/squid-deb-proxy /var/cache/squid-deb-proxy
9 | echo 'cache_effective_user proxy' >>/etc/squid-deb-proxy/squid-deb-proxy.conf
10 | fi
11 |
12 | . /usr/share/squid-deb-proxy/init-common.sh
13 | pre_start
14 | exec /usr/sbin/squid3 -N -f /etc/squid-deb-proxy/squid-deb-proxy.conf
15 |
--------------------------------------------------------------------------------
/vsftpd/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copied from https://github.com/fauria/docker-vsftpd/tree/e5544259
2 | FROM centos:7
3 | ENV FTP_USER =*String** \
4 | FTP_PASS=**Random** \
5 | PASV_ADDRESS=**IPv4** \
6 | PASV_MIN_PORT=21100 \
7 | PASV_MAX_PORT=21110 \
8 | LOG_STDOUT=**Boolean**
9 |
10 | VOLUME /home/vsftpd
11 | VOLUME /var/log/vsftpd
12 |
13 | EXPOSE 20 21
14 |
15 | CMD ["/usr/sbin/run-vsftpd.sh"]
16 |
17 | RUN yum install -y vsftpd db4-utils db4 \
18 | && yum clean all
19 |
20 | RUN mkdir -p /home/vsftpd/
21 | RUN chown -R ftp:ftp /home/vsftpd/
22 |
23 | COPY vsftpd.conf /etc/vsftpd/
24 | COPY vsftpd_virtual /etc/pam.d/
25 | COPY run-vsftpd.sh /usr/sbin/
26 | RUN chmod +x /usr/sbin/run-vsftpd.sh
27 |
--------------------------------------------------------------------------------
/vsftpd/run-vsftpd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # If no env var for FTP_USER has been specified, use 'admin':
4 | if [ "$FTP_USER" = "**String**" ]; then
5 | export FTP_USER='admin'
6 | fi
7 |
8 | # If no env var has been specified, generate a random password for FTP_USER:
9 | if [ "$FTP_PASS" = "**Random**" ]; then
10 | export FTP_PASS=`cat /dev/urandom | tr -dc A-Z-a-z-0-9 | head -c${1:-16}`
11 | fi
12 |
13 | # Do not log to STDOUT by default:
14 | if [ "$LOG_STDOUT" = "**Boolean**" ]; then
15 | export LOG_STDOUT=''
16 | else
17 | export LOG_STDOUT='Yes.'
18 | fi
19 |
20 | # Create home dir and update vsftpd user db:
21 | mkdir -p "/home/vsftpd/${FTP_USER}"
22 | chown -R ftp:ftp /home/vsftpd/
23 |
24 | echo -e "${FTP_USER}\n${FTP_PASS}" > /etc/vsftpd/virtual_users.txt
25 | /usr/bin/db_load -T -t hash -f /etc/vsftpd/virtual_users.txt /etc/vsftpd/virtual_users.db
26 |
27 | # Set passive mode parameters:
28 | if [ "$PASV_ADDRESS" = "**IPv4**" ]; then
29 | export PASV_ADDRESS=$(/sbin/ip route|awk '/default/ { print $3 }')
30 | fi
31 |
32 | echo "pasv_address=${PASV_ADDRESS}" >> /etc/vsftpd/vsftpd.conf
33 | echo "pasv_max_port=${PASV_MAX_PORT}" >> /etc/vsftpd/vsftpd.conf
34 | echo "pasv_min_port=${PASV_MIN_PORT}" >> /etc/vsftpd/vsftpd.conf
35 | # Get log file path
36 | export LOG_FILE=`grep xferlog_file /etc/vsftpd/vsftpd.conf|cut -d= -f2`
37 |
38 | # stdout server info:
39 | if [ ! $LOG_STDOUT ]; then
40 | cat << EOB
41 | *************************************************
42 | * *
43 | * Docker image: fauria/vsftd *
44 | * https://github.com/fauria/docker-vsftpd *
45 | * *
46 | *************************************************
47 |
48 | SERVER SETTINGS
49 | ---------------
50 | · FTP User: $FTP_USER
51 | · FTP Password: $FTP_PASS
52 | · Log file: $LOG_FILE
53 | · Redirect vsftpd log to STDOUT: No.
54 | EOB
55 | else
56 | /usr/bin/ln -sf /dev/stdout $LOG_FILE
57 | fi
58 |
59 | # Run vsftpd:
60 | &>/dev/null /usr/sbin/vsftpd /etc/vsftpd/vsftpd.conf
61 |
--------------------------------------------------------------------------------
/vsftpd/vsftpd.conf:
--------------------------------------------------------------------------------
1 | # Run in the foreground to keep the container running:
2 | background=NO
3 |
4 | # Allow anonymous FTP? (Beware - allowed by default if you comment this out).
5 | anonymous_enable=NO
6 |
7 | # Uncomment this to allow local users to log in.
8 | local_enable=YES
9 |
10 | ## Enable virtual users
11 | guest_enable=YES
12 |
13 | ## Virtual users will use the same permissions as anonymous
14 | virtual_use_local_privs=YES
15 |
16 | # Uncomment this to enable any form of FTP write command.
17 | write_enable=YES
18 |
19 | ## PAM file name
20 | pam_service_name=vsftpd_virtual
21 |
22 | ## Home Directory for virtual users
23 | user_sub_token=$USER
24 | local_root=/home/vsftpd/$USER
25 |
26 | # You may specify an explicit list of local users to chroot() to their home
27 | # directory. If chroot_local_user is YES, then this list becomes a list of
28 | # users to NOT chroot().
29 | chroot_local_user=YES
30 |
31 | # Workaround chroot check.
32 | # See https://www.benscobie.com/fixing-500-oops-vsftpd-refusing-to-run-with-writable-root-inside-chroot/
33 | # and http://serverfault.com/questions/362619/why-is-the-chroot-local-user-of-vsftpd-insecure
34 | allow_writeable_chroot=YES
35 |
36 | ## Hide ids from user
37 | hide_ids=YES
38 |
39 | ## Set passive port address
40 | pasv_addr_resolve=NO
41 |
42 | ## Enable logging
43 | xferlog_enable=YES
44 | xferlog_file=/var/log/vsftpd/vsftpd.log
45 |
46 | ## Enable active mode
47 | port_enable=YES
48 | connect_from_port_20=YES
49 | ftp_data_port=20
50 |
51 | ## Disable seccomp filter sanboxing
52 | seccomp_sandbox=NO
53 |
54 | ## Enable passive mode
55 | pasv_enable=YES
56 |
--------------------------------------------------------------------------------
/vsftpd/vsftpd_virtual:
--------------------------------------------------------------------------------
1 | #%PAM-1.0
2 | auth required pam_userdb.so db=/etc/vsftpd/virtual_users
3 | account required pam_userdb.so db=/etc/vsftpd/virtual_users
4 | session required pam_loginuid.so
5 |
6 |
--------------------------------------------------------------------------------
/zabbix-frontend-php/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 | ENV DEBIAN_FRONTEND noninteractive
3 |
4 | ADD http://repo.zabbix.com/zabbix/3.0/ubuntu/pool/main/z/zabbix-release/zabbix-release_3.0-1+trusty_all.deb /zabbix-release.deb
5 | RUN dpkg -i /zabbix-release.deb
6 |
7 | RUN apt-get update -qq && apt-get install -qy --no-install-recommends zabbix-frontend-php && rm -rf /var/lib/apt/lists/*
8 | RUN ln -sf /etc/zabbix/apache.conf /etc/apache2/conf-enabled/zabbix.conf
9 |
10 | ADD php.ini /etc/php5/apache2/conf.d/30-zabbix.ini
11 | ADD apache.conf /etc/apache2/conf-enabled/zabbix-server.conf
12 |
13 | ADD run.sh /run.sh
14 | CMD ["/run.sh"]
15 | EXPOSE 80
16 |
--------------------------------------------------------------------------------
/zabbix-frontend-php/apache.conf:
--------------------------------------------------------------------------------
1 | ServerName zabbix-server
2 |
--------------------------------------------------------------------------------
/zabbix-frontend-php/php.ini:
--------------------------------------------------------------------------------
1 | [Date]
2 | date.timezone = UTC
3 |
--------------------------------------------------------------------------------
/zabbix-frontend-php/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [[ $ZABBIX_CONF ]]; then
3 | ln -sf "$ZABBIX_CONF" /etc/zabbix/web/zabbix.conf.php
4 | fi
5 | if [[ $APACHE_CONF ]]; then
6 | rm -rf /etc/apache2/sites-enabled/*
7 | ln -sf "$APACHE_CONF" /etc/apache2/sites-enabled/
8 | fi
9 | . /etc/default/apache2
10 | . /etc/apache2/envvars
11 | /usr/sbin/apache2 -D FOREGROUND
12 |
--------------------------------------------------------------------------------
/zabbix-java-gateway/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 |
3 | ENV DEBIAN_FRONTEND noninteractive
4 |
5 | RUN apt-get update -qq
6 | RUN apt-get install -qy --no-install-recommends openjdk-7-jre-headless
7 |
8 | ADD http://repo.zabbix.com/zabbix/3.0/ubuntu/pool/main/z/zabbix/zabbix-java-gateway_3.0.3-1+trusty_all.deb /zabbix-jgateway.deb
9 | RUN dpkg -i /zabbix-jgateway.deb
10 |
11 | ADD run.sh /run.sh
12 | CMD ["/run.sh"]
13 |
14 | EXPOSE 10052
15 |
--------------------------------------------------------------------------------
/zabbix-java-gateway/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | logfile=/var/log/zabbix/zabbix_java_gateway.log
3 | logdir=$(dirname $logfile)
4 | pidfile=/var/run/zabbix/zabbix_java_gateway.pid
5 | piddir=$(dirname $pidfile)
6 | mkdir -p $piddir $logdir
7 | touch $logfile
8 | chown -R zabbix:zabbix $piddir $logfile
9 | /etc/init.d/zabbix-java-gateway start
10 | while [ ! -f $pidfile ]
11 | do
12 | sleep 1
13 | done
14 | tail -f $logfile --pid=$(cat $pidfile)
15 |
--------------------------------------------------------------------------------
/zabbix-server-mysql/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 |
3 | ENV DEBIAN_FRONTEND noninteractive
4 |
5 | ADD http://repo.zabbix.com/zabbix/3.0/ubuntu/pool/main/z/zabbix-release/zabbix-release_3.0-1+trusty_all.deb /zabbix-release.deb
6 |
7 | RUN dpkg -i /zabbix-release.deb
8 | RUN apt-get update -qq && apt-get install -qy --no-install-recommends zabbix-server-mysql python python-requests
9 |
10 | ADD run.sh /run.sh
11 | CMD ["/run.sh"]
12 |
13 | EXPOSE 10051
14 |
--------------------------------------------------------------------------------
/zabbix-server-mysql/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | PIDDIR=${PIDDIR:-/var/run/zabbix}
4 | PIDFILE=${PIDFILE:-$PIDDIR/zabbix_server.pid}
5 | CFGFILE=${CFGFILE:-/etc/zabbix/zabbix_server.conf}
6 | LOGFILE=${LOGFILE:-/var/log/zabbix/zabbix_server.log}
7 |
8 | if [[ ${INIT_SCRIPT} ]]; then
9 | source ${INIT_SCRIPT}
10 | fi
11 |
12 | mkdir -p $PIDDIR
13 | chown -R zabbix:zabbix $PIDDIR
14 |
15 | /usr/sbin/zabbix_server -c $CFGFILE
16 |
17 | sleep 5
18 | tail -f $LOGFILE --pid=$(cat $PIDFILE)
19 |
20 | # Provide some time in case something needs to be debugged
21 | sleep 30
22 |
--------------------------------------------------------------------------------