├── .github └── workflows │ └── h2.yml ├── .gitignore ├── LICENSE ├── README.adoc ├── alpine-openstack-terraform └── Dockerfile ├── axon-server └── README.md ├── cassandra ├── cassandra │ ├── Dockerfile │ ├── README.md │ └── docker-entrypoint.sh └── example │ ├── docker-compose.yml │ └── docker-entrypoint-initdb.d │ └── init.cql ├── docker-compose └── docker-compose.yml ├── docker-in-docker ├── README.md └── dind.bash ├── gitlab ├── docker-compose.yml └── gitlab.bash ├── glassfish └── Dockerfile ├── gradle ├── Dockerfile └── docker-compose.yaml ├── h2 └── Dockerfile ├── jboss-eap-6.4 └── Dockerfile ├── jboss-eap-7.1 └── Dockerfile ├── jboss-eap-7.2 └── Dockerfile ├── jboss-eap-7.3 └── Dockerfile ├── jboss └── Dockerfile ├── jboss4 └── Dockerfile ├── jenkins ├── Dockerfile ├── docker-compose.yml └── jenkins.bash ├── kafka ├── Dockerfile └── docker-compose.yaml ├── kong └── docker-compose.yml ├── maven ├── Dockerfile └── docker-compose.yaml ├── mercurial └── Dockerfile ├── mongo ├── Dockerfile ├── docker-compose-healthcheck-mongo-express.yml ├── docker-compose-healthcheck.yml └── docker-compose.yml ├── mysql-phpmyadmin ├── README.md └── docker-compose.yaml ├── mysql ├── docker-compose-heathcheck.yml ├── docker-compose-old.yml └── docker-compose.yaml ├── nginx ├── Dockerfile └── README.md ├── oracle ├── daggerok-11xe.bash ├── docker-compose.yml └── sath89-12c.bash ├── portainer └── README.md ├── postgres ├── Dockerfile ├── docker-compose-healthcheck.yml ├── docker-compose-legacy.yaml ├── docker-compose-old.yml ├── docker-compose.yaml └── migrations │ ├── README.adoc │ ├── db │ ├── Dockerfile │ └── migration.sql │ └── docker-compose.yaml ├── rabbitmq-mongo ├── docker.compose.yml └── rabbitmq-healthcheck │ └── Dockerfile ├── rabbitmq ├── Dockerfile-stomp ├── custom │ ├── Dockerfile │ └── docker-compose.yml ├── docker-compose-healthcheck.yml └── docker-compose.yml ├── rancher └── README.md ├── redis-commander ├── README.adoc ├── docker-compose-old.yaml └── docker-compose-yaml ├── redis ├── Dockerfile ├── docker-compose-old.yml ├── docker-compose-redis-commander-healthcheck.yml ├── docker-compose-redis-ui.yml ├── docker-compose.yml ├── redis-commander-healthcheck │ └── Dockerfile ├── target │ ├── appendonly.aof │ └── dump.rdb └── v3 │ ├── Dockerfile │ └── docker-compose-healthcheck.yml ├── sonarqube └── docker-compose.yml └── spring-boot ├── Dockerfile ├── Dockerfile-healthcheck ├── Dockerfile-no-healthcheck ├── build └── libs │ └── spring-config-0.0.1.jar ├── config-server └── Dockerfile └── docker-compose.yml /.github/workflows/h2.yml: -------------------------------------------------------------------------------- 1 | name: h2 2 | on: 3 | push: 4 | paths: 5 | - 'h2/Dockerfile' 6 | - '.github/workflows/h2.yml' 7 | jobs: 8 | test-h2: 9 | name: test h2 10 | if: github.event.inputs.trigger == '' 11 | || !startsWith(github.event.inputs.trigger, 'm') 12 | || !startsWith(github.event.inputs.trigger, 'M') 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v1 16 | - uses: actions/cache@v3 17 | with: 18 | path: | 19 | ~/.docker 20 | key: ${{ runner.os }}-h2 21 | - run: if [[ "" != `docker ps -aq` ]] ; then docker rm -f -v `docker ps -aq` ; fi 22 | - uses: actions/setup-node@v3 23 | with: 24 | node-version: 18.2.0 25 | - run: sudo apt install -y httpie 26 | - run: docker build --no-cache -t daggerok/h2 -f $GITHUB_WORKSPACE/h2/Dockerfile . 27 | #- run: | 28 | # mkdir -p /tmp/h2/data ; docker run --rm -d -p 8082:8082 -p 9092:9092 --name h2 -v /tmp/h2/data:/home/h2/data \ 29 | # --health-start-period=1s --health-retries=1111 --health-interval=1s --health-timeout=5s \ 30 | # --health-cmd='curl 127.0.0.1:8082 || exit 1' \ 31 | # daggerok/h2 32 | - run: mkdir -p /tmp/h2/data ; docker run --rm -d -p 8082:8082 -p 9092:9092 --name h2 -v /tmp/h2/data:/home/h2/data daggerok/h2 33 | - run: while [[ $(docker ps -n 1 -q -f health=healthy -f status=running | wc -l) -lt 1 ]] ; do sleep 3 ; echo -n '.' ; done ; echo 'H2 is ready.' 34 | - run: npm_config_yes=true npx wait-port 8082 9092 35 | - run: http --ignore-stdin :8082 36 | - run: docker stop h2 37 | push-h2: 38 | name: Push daggerok/h2 Docker image to Docker Hub 39 | runs-on: ubuntu-latest 40 | steps: 41 | - uses: actions/checkout@v3 42 | - name: Log in to Docker Hub 43 | uses: docker/login-action@v2 44 | with: 45 | username: ${{ secrets.DOCKER_USERNAME }} 46 | password: ${{ secrets.DOCKER_PASSWORD }} 47 | - name: Extract metadata (tags, labels) for Docker 48 | id: meta 49 | uses: docker/metadata-action@v3 50 | with: 51 | images: daggerok/h2 52 | - name: Build and push Docker image 53 | uses: docker/build-push-action@v3 54 | with: 55 | file: "./h2/Dockerfile" 56 | context: . 57 | push: true 58 | tags: ${{ steps.meta.outputs.tags }},${{ secrets.DOCKER_USERNAME }}/h2:latest 59 | labels: ${{ steps.meta.outputs.labels }} 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | *.ipr 4 | *.iml 5 | *.iws 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018-2019 Maksim Kostromin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.adoc: -------------------------------------------------------------------------------- 1 | # docker image:https://github.com/daggerok/docker/actions/workflows/h2.yml/badge.svg["h2", link=https://github.com/daggerok/docker/actions/workflows/h2.yml] 2 | 3 | collection of docker / docker-compose files for: 4 | 5 | - link:h2/[H2 database 2.1.214 openjdk:8u332-jre-buster] 6 | + 7 | [source,bash] 8 | ---- 9 | docker build --no-cache -t daggerok/h2 -f ./h2/Dockerfile . 10 | docker run --rm -it -p 8082:8082 -p 9092:9092 --name h2 -v /tmp/h2/data:/home/h2/data daggerok/h2 11 | # and use next JDBC url: jdbc:h2:tcp://127.0.0.1;9092/test:Mode=MySQL 12 | ---- 13 | - link:alpine-openstack-terraform/[Alpine + OpenStack + Terraform] 14 | - link:cassandra/[cassandra] 15 | - link:docker-in-docker/[docker in docker (dind)] 16 | - link:gitlab/[gitlab] 17 | - link:glassfish/[glassfish] 18 | - link:gradle/[gradle built app] 19 | - link:jboss/[JBoss EAP / WildFly] 20 | - link:jenkins/[jenkins] 21 | - link:maven/[maven built app] 22 | - link:mercurial/[mercurial web server] 23 | - link:kong/[kong] 24 | - link:mongo/[mongo] 25 | - link:mysql/[mysql] 26 | - link:mysql-phpmyadmin/[MySQL + phpMyAdmin] 27 | - link:nginx/[nginx] 28 | - link:alpine-openstack-terraform/[openstack] 29 | - link:oracle/[oracle] 30 | - link:postgres/[postgres] 31 | - link:rabbitmq-mongo/[rabbitmq-mongo] 32 | - link:rabbitmq/[rabbitmq] 33 | - link:redis/[redis] 34 | - link:sonarqube/[sonarqube] 35 | - link:spring-boot/[spring-boot (java jar)] 36 | - link:alpine-openstack-terraform/[terraform] 37 | - etc... 38 | -------------------------------------------------------------------------------- /alpine-openstack-terraform/Dockerfile: -------------------------------------------------------------------------------- 1 | # 1) 2 | # docker run --rm -it alpine:3.8 ash 3 | # ... 4 | 5 | # 2) 6 | # docker build -t daggerok/devops . 7 | # docker run --rm -it daggerok/devops 8 | # ... 9 | 10 | FROM alpine:3.8 11 | RUN apk add --upgrade --no-cache ca-certificates \ 12 | wget \ 13 | && apk add --upgrade --no-cache musl-dev \ 14 | libffi-dev \ 15 | openssl-dev \ 16 | readline-dev \ 17 | && apk add --upgrade --no-cache gcc \ 18 | py2-cffi \ 19 | python-dev \ 20 | python2-tkinter \ 21 | py2-netifaces \ 22 | py-setuptools \ 23 | py2-pip \ 24 | && pip install --upgrade pip \ 25 | && pip install --upgrade python-openstackclient \ 26 | && apk add --upgrade --no-cache git terraform \ 27 | && apk del --purge --no-cache ca-certificates \ 28 | wget \ 29 | musl-dev \ 30 | libffi-dev \ 31 | python-dev \ 32 | openssl-dev \ 33 | readline-dev \ 34 | py-setuptools \ 35 | py2-cffi \ 36 | py2-netifaces \ 37 | python2-tkinter \ 38 | gcc \ 39 | py2-pip \ 40 | && rm -rf /var/cache/apk/* 41 | CMD /bin/ash 42 | -------------------------------------------------------------------------------- /axon-server/README.md: -------------------------------------------------------------------------------- 1 | # axon server 2 | run in docker 3 | 4 | ```bash 5 | docker run -it --rm --name axon-server -p 8024:8024 -p 8124:8124 axoniq/axonserver 6 | ``` 7 | 8 | open http://127.0.0.1:8024/ 9 | -------------------------------------------------------------------------------- /cassandra/cassandra/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | # maybe use laTesT docker-image-version of cassandra? 3 | FROM cassandra:3.11 4 | 5 | # replace docker's official enTrypoinT here 6 | COPY docker-entrypoint.sh /docker-entrypoint.sh 7 | 8 | # make my modified enTrypoinT execuTable 9 | RUN chmod a+x docker-entrypoint.sh 10 | 11 | # ENTRYPOINT will be called in parenT-docker-image! 12 | 13 | -------------------------------------------------------------------------------- /cassandra/cassandra/README.md: -------------------------------------------------------------------------------- 1 | # Cassandra for Docker 2 | 3 | clone dschroe/cassandra-docker repository: 4 | 5 | ```bash 6 | git clone https://github.com/dschroe/cassandra-docker . 7 | ``` 8 | 9 | prepare initial db script: 10 | 11 | ```bash 12 | mkdir docker-entrypoint-initdb.d 13 | vim docker-entrypoint-initdb.d/init.cql 14 | # ... 15 | ``` 16 | 17 | use compose to simplify bootstrap: 18 | 19 | ```bash 20 | vim docker-compose.yml 21 | # ... 22 | ``` 23 | 24 | ```bash 25 | docker-compose up --build 26 | # ... 27 | cassadnra_1 | INFO [MigrationStage:1] 2017-10-06 20:14:48,548 ColumnFamilyStore.java:406 - Initializing demo.model 28 | ``` 29 | 30 | have fun! 31 | 32 | **original README** 33 | 34 | You can copy .cql files under __docker-entrypoint-initdb.d/__ directory. 35 | 36 | They will automatically be invoked as soon as Apache-Cassandra is available. 37 | -------------------------------------------------------------------------------- /cassandra/cassandra/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This EnTrypoinT is more or less The same as The EnTrypoinT from The official image: changes 68-73! 4 | 5 | if [ "${1:0:1}" = '-' ]; then 6 | set -- cassandra -f "$@" 7 | fi 8 | 9 | if [ "$1" = 'cassandra' -a "$(id -u)" = '0' ]; then 10 | 11 | chown -R cassandra /var/lib/cassandra /var/log/cassandra "$CASSANDRA_CONFIG" 12 | exec gosu cassandra "$BASH_SOURCE" "$@" 13 | fi 14 | 15 | if [ "$1" = 'cassandra' ]; then 16 | : ${CASSANDRA_RPC_ADDRESS='0.0.0.0'} 17 | 18 | : ${CASSANDRA_LISTEN_ADDRESS='auto'} 19 | 20 | if [ "$CASSANDRA_LISTEN_ADDRESS" = 'auto' ]; then 21 | CASSANDRA_LISTEN_ADDRESS="$(hostname --ip-address)" 22 | fi 23 | 24 | : ${CASSANDRA_BROADCAST_ADDRESS="$CASSANDRA_LISTEN_ADDRESS"} 25 | 26 | if [ "$CASSANDRA_BROADCAST_ADDRESS" = 'auto' ]; then 27 | CASSANDRA_BROADCAST_ADDRESS="$(hostname --ip-address)" 28 | fi 29 | 30 | : ${CASSANDRA_BROADCAST_RPC_ADDRESS:=$CASSANDRA_BROADCAST_ADDRESS} 31 | 32 | if [ -n "${CASSANDRA_NAME:+1}" ]; then 33 | : ${CASSANDRA_SEEDS:="cassandra"} 34 | fi 35 | 36 | : ${CASSANDRA_SEEDS:="$CASSANDRA_BROADCAST_ADDRESS"} 37 | 38 | sed -ri 's/(- seeds:).*/\1 "'"$CASSANDRA_SEEDS"'"/' "$CASSANDRA_CONFIG/cassandra.yaml" 39 | 40 | for yaml in \ 41 | broadcast_rpc_address \ 42 | broadcast_address \ 43 | endpoint_snitch \ 44 | listen_address \ 45 | cluster_name \ 46 | rpc_address \ 47 | num_tokens \ 48 | start_rpc \ 49 | ; do 50 | var="CASSANDRA_${yaml^^}" 51 | val="${!var}" 52 | if [ "$val" ]; then 53 | sed -ri 's/^(# )?('"$yaml"':).*/\2 '"$val"'/' "$CASSANDRA_CONFIG/cassandra.yaml" 54 | fi 55 | done 56 | 57 | for rackdc in dc rack; do 58 | var="CASSANDRA_${rackdc^^}" 59 | val="${!var}" 60 | if [ "$val" ]; then 61 | sed -ri 's/^('"$rackdc"'=).*/\1 '"$val"'/' "$CASSANDRA_CONFIG/cassandra-rackdc.properties" 62 | fi 63 | done 64 | fi 65 | 66 | # Works only if you define Tables like: CreaTe if noT exisTs - oTherwise following loop runs infiniTely! 67 | 68 | for f in docker-entrypoint-initdb.d/*; do 69 | case "$f" in 70 | *.cql) echo "$0: running $f" && until cqlsh -f "$f"; do >&2 echo "Cassandra is unavailable - sleeping"; sleep 2; done & ;; 71 | esac 72 | echo 73 | done 74 | 75 | exec "$@" 76 | -------------------------------------------------------------------------------- /cassandra/example/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | cassadnra: 6 | image: 127.0.0.1:5000/cassandra 7 | build: 8 | context: ../cassandra 9 | restart: unless-stopped 10 | ports: ["9042:9042"] 11 | volumes: 12 | - cassandra-data:/var/lib/cassandra 13 | - cassandra-data:/home/cassandra/.cassandra 14 | - ./docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d 15 | networks: [backing-services] 16 | 17 | volumes: 18 | cassandra-data: {} 19 | 20 | networks: 21 | backing-services: 22 | 23 | -------------------------------------------------------------------------------- /cassandra/example/docker-entrypoint-initdb.d/init.cql: -------------------------------------------------------------------------------- 1 | CREATE KEYSPACE IF NOT EXISTS demo WITH replication = 2 | {'class':'SimpleStrategy','replication_factor':'1'}; 3 | 4 | -------------------------------------------------------------------------------- /docker-compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.1' 2 | networks: 3 | step5-compose-network: 4 | driver: bridge 5 | services: 6 | postgres: 7 | image: postgres:alpine 8 | labels: 9 | MAINTAINER: Maksim Kostromin https://github.com/daggerok 10 | ports: ['5432:5432'] 11 | environment: 12 | POSTGRES_DB: postgres 13 | POSTGRES_USER: postgres 14 | POSTGRES_PASSWORD: postgres 15 | POSTGRES_HOST: postgres 16 | POSTGRES_PORT: 5432 17 | networks: 18 | step5-compose-network: 19 | aliases: 20 | - postgres 21 | - postgres.step5-compose-network 22 | healthcheck: 23 | retries: 9 24 | timeout: 3s 25 | interval: 3s 26 | test: pg_isready -h 127.0.0.1 -p $$POSTGRES_PORT -d $$POSTGRES_DB -U $$POSTGRES_USER 27 | sessions: 28 | image: openjdk:8u212-jre-alpine 29 | labels: 30 | MAINTAINER: Maksim Kostromin https://github.com/daggerok 31 | volumes: ['./step5-sessions-rsocket-service/target:/tmp'] 32 | entrypoint: ash -c 'java -jar /tmp/*.jar' 33 | ports: ['8093:8093'] 34 | environment: 35 | SERVER_PORT: 8093 36 | POSTGRES_DB: postgres 37 | POSTGRES_USER: postgres 38 | POSTGRES_PASSWORD: postgres 39 | POSTGRES_HOST: postgres 40 | POSTGRES_PORT: 5432 41 | depends_on: 42 | postgres: 43 | condition: service_healthy 44 | networks: 45 | step5-compose-network: 46 | aliases: 47 | - sessions 48 | - sessions.step5-compose-network 49 | healthcheck: 50 | retries: 9 51 | timeout: 3s 52 | interval: 3s 53 | test: wget --quiet --tries=1 --spider http://127.0.0.1:$$SERVER_PORT/actuator/health || exit 54 | speakers: 55 | image: openjdk:8u212-jre-alpine 56 | labels: 57 | MAINTAINER: Maksim Kostromin https://github.com/daggerok 58 | volumes: ['./step5-speakers-rest-api-service/target:/tmp'] 59 | entrypoint: ash -c 'java -jar /tmp/*.jar' 60 | ports: ['8094:8094'] 61 | environment: 62 | SERVER_PORT: 8094 63 | POSTGRES_DB: postgres 64 | POSTGRES_USER: postgres 65 | POSTGRES_PASSWORD: postgres 66 | POSTGRES_HOST: postgres 67 | POSTGRES_PORT: 5432 68 | depends_on: 69 | postgres: 70 | condition: service_healthy 71 | networks: 72 | step5-compose-network: 73 | aliases: 74 | - speakers 75 | - speakers.step5-compose-network 76 | healthcheck: 77 | retries: 9 78 | timeout: 3s 79 | interval: 3s 80 | test: wget --quiet --tries=1 --spider http://127.0.0.1:$$SERVER_PORT/actuator/health || exit 81 | frontend: 82 | image: openjdk:8u212-jre-alpine 83 | labels: 84 | MAINTAINER: Maksim Kostromin https://github.com/daggerok 85 | volumes: ['./step5-frontend/target:/tmp'] 86 | entrypoint: ash -c 'java -jar /tmp/*.jar' 87 | ports: ['8092:8092'] 88 | environment: 89 | SERVER_PORT: 8092 90 | SESSIONS_HOST: sessions 91 | SESSIONS_PORT: 8093 92 | SPEAKERS_HOST: speakers 93 | SPEAKERS_PORT: 8094 94 | depends_on: 95 | sessions: 96 | condition: service_healthy 97 | speakers: 98 | condition: service_healthy 99 | networks: 100 | step5-compose-network: 101 | aliases: 102 | - frontend 103 | - frontend.step5-compose-network 104 | healthcheck: 105 | retries: 9 106 | timeout: 3s 107 | interval: 3s 108 | test: wget --quiet --tries=1 --spider http://127.0.0.1:$$SERVER_PORT/actuator/health || exit 109 | healthcheck: 110 | image: openjdk:8u212-jre-alpine 111 | depends_on: 112 | postgres: 113 | condition: service_started 114 | sessions: 115 | condition: service_started 116 | speakers: 117 | condition: service_started 118 | frontend: 119 | condition: service_healthy 120 | -------------------------------------------------------------------------------- /docker-in-docker/README.md: -------------------------------------------------------------------------------- 1 | # docker in docker 2 | 3 | ```bash 4 | export DOCKER_VERSION=1.11.2 5 | export DIND_VERSION="$DOCKER_VERSION-dind" 6 | export DOCKER_NAME="my-docker" 7 | export DIND_NAME="my-dind" 8 | 9 | docker rm -f $DIND_NAME 10 | 11 | docker pull docker:$DIND_VERSION 12 | docker run --name $DIND_NAME -it --privileged -d docker:$DIND_VERSION 13 | docker logs $DIND_NAME 14 | 15 | export dind="docker run --name $DOCKER_NAME -it --rm --link $DIND_NAME:docker docker:$DOCKER_VERSION" 16 | 17 | eval $dind version 18 | # eval $dind sh 19 | eval $dind info 20 | 21 | docker stop $DIND_NAME 22 | docker start $DIND_NAME 23 | 24 | docker rm -f $DIND_NAME 25 | docker run --name $DIND_NAME -it --privileged -d docker:$DIND_VERSION --storage-driver=vfs 26 | eval $dind info 27 | ``` 28 | 29 | fish 30 | 31 | ```fish 32 | set dind "docker run --name $DOCKER_NAME -it --rm --link $DIND_NAME:docker docker:$DOCKER_VERSION" 33 | ``` 34 | -------------------------------------------------------------------------------- /docker-in-docker/dind.bash: -------------------------------------------------------------------------------- 1 | export DOCKER_VERSION=1.11.2 2 | export DIND_VERSION="$DOCKER_VERSION-dind" 3 | export DOCKER_NAME="my-docker" 4 | export DIND_NAME="my-dind" 5 | 6 | docker rm -f $DIND_NAME 7 | 8 | docker pull docker:$DIND_VERSION 9 | docker run --name $DIND_NAME -it --privileged -d docker:$DIND_VERSION 10 | docker logs $DIND_NAME 11 | bash: 12 | export dind="docker run --name $DOCKER_NAME -it --rm --link $DIND_NAME:docker docker:$DOCKER_VERSION" 13 | # fish: 14 | # set dind "docker run --name $DOCKER_NAME -it --rm --link $DIND_NAME:docker docker:$DOCKER_VERSION" 15 | 16 | eval $dind version 17 | # eval $dind sh 18 | eval $dind info 19 | 20 | docker stop $DIND_NAME 21 | docker start $DIND_NAME 22 | 23 | docker rm -f $DIND_NAME 24 | docker run --name $DIND_NAME -it --privileged -d docker:$DIND_VERSION --storage-driver=vfs 25 | eval $dind info 26 | -------------------------------------------------------------------------------- /gitlab/docker-compose.yml: -------------------------------------------------------------------------------- 1 | my-gitlab: 2 | hostname: my-gitlab 3 | container_name: my-gitlab 4 | image: 'gitlab/gitlab-ce:8.15.3-ce.0' 5 | environment: 6 | GITLAB_OMNIBUS_CONFIG: | 7 | external_url 'http://localhost' 8 | gitlab_rails['gitlab_shell_ssh_port'] = 2221 9 | ports: 10 | - '80:80' 11 | - '443:443' 12 | - '2222:22' 13 | volumes: 14 | - './etc/gitlab:/etc/gitlab' 15 | - './var/log/gitlab:/var/log/gitlab' 16 | - './var/opt/gitlab:/var/opt/gitlab' 17 | -------------------------------------------------------------------------------- /gitlab/gitlab.bash: -------------------------------------------------------------------------------- 1 | export GITLAB_VERSION="8.15.3-ce.0" 2 | export GITLAB_NAME="8.15.3-ce.0" 3 | 4 | docker rm $GITLAB_NAME 5 | docker pull gitlab/gitlab-ce:$GITLAB_VERSION 6 | 7 | # docker run --detach \ 8 | # --name $GITLAB_NAME \ 9 | # --publish 2222:22 \ 10 | # --publish 80:80 \ 11 | # --publish 443:443 \ 12 | # --volume $PWD/etc/gitlab:/etc/gitlab \ 13 | # --volume $PWD/var/log/gitlab:/var/log/gitlab \ 14 | # --volume $PWD/var/opt/gitlab:/var/opt/gitlab \ 15 | # gitlab/gitlab-ce:$GITLAB_VERSION 16 | 17 | docker run --detach --publish 443:443 --publish 80:80 --publish 22:22 --name gitlab --volume $PWD/gitlab/data:/var/opt/gitlab gitlab/gitlab-ce:$GITLAB_VERSION 18 | 19 | # docker exec -it $GITLAB_NAME bash 20 | # docker rm -f $GITLAB_NAME 21 | -------------------------------------------------------------------------------- /glassfish/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u151-jdk-alpine 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok/docker 3 | 4 | ARG GLASSFISH_VERSION_ARG="5.0" 5 | ENV GLASSFISH_VERSION=${GLASSFISH_VERSION_ARG} \ 6 | GLASSFISH_USER="glassfish5" \ 7 | GLASSFISH_FILE="glassfish-${GLASSFISH_VERSION}.zip" 8 | # GLASSFISH_FILE="glassfish-${GLASSFISH_VERSION}-web.zip" 9 | ENV GLASSFISH_USER_HOME="/home/${GLASSFISH_USER}" 10 | ENV GLASSFISH_HOME="${GLASSFISH_USER_HOME}/${GLASSFISH_USER}" \ 11 | GLASSFISH_URL="http://download.oracle.com/glassfish/${GLASSFISH_VERSION}/release/${GLASSFISH_FILE}" 12 | 13 | RUN apk --no-cache --update add busybox-suid bash wget ca-certificates unzip \ 14 | && rm -rf /var/cache/apk/* \ 15 | && addgroup ${GLASSFISH_USER}-group \ 16 | && adduser -h ${GLASSFISH_USER_HOME} -s /bin/bash -D -u 1025 ${GLASSFISH_USER} ${GLASSFISH_USER}-group 17 | USER ${GLASSFISH_USER} 18 | WORKDIR ${GLASSFISH_USER_HOME} 19 | 20 | CMD /bin/bash 21 | EXPOSE 8080 22 | ENTRYPOINT /bin/bash ${GLASSFISH_HOME}/bin/asadmin restart-domain domain1 \ 23 | && tail -f ${GLASSFISH_HOME}/glassfish/domains/domain1/logs/server.log 24 | 25 | RUN wget ${GLASSFISH_URL} -O ${GLASSFISH_USER_HOME}/${GLASSFISH_FILE} \ 26 | && unzip ${GLASSFISH_USER_HOME}/${GLASSFISH_FILE} -d ${GLASSFISH_USER_HOME} \ 27 | && rm -rf ${GLASSFISH_USER_HOME}/${GLASSFISH_FILE} 28 | 29 | ## check all apps healthy (in current example we are expecting to have apps with /ui/ and /rest-api/ contexts deployed: 30 | #HEALTHCHECK --interval=1s --timeout=3s --retries=30 \ 31 | # CMD wget -q --spider http://127.0.0.1:8080/rest-api/health \ 32 | # && wget -q --spider http://127.0.0.1:8080/ui/ \ 33 | # || exit 1 34 | 35 | # deploy apps 36 | COPY ./path/to/*.war ./path/to/another/*.war ${GLASSFISH_HOME}/glassfish/domains/domain1/autodeploy/ 37 | -------------------------------------------------------------------------------- /gradle/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u151-jre-alpine3.7 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok 3 | RUN apk --no-cache --update add busybox-suid bash curl unzip sudo openssh-client shadow wget \ 4 | && adduser -h /home/appuser -s /bin/bash -D -u 1025 appuser wheel \ 5 | && echo "appuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 6 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 7 | && wget --no-cookies \ 8 | --no-check-certificate \ 9 | --header "Cookie: oraclelicense=accept-securebackup-cookie" \ 10 | "http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip" \ 11 | -O /tmp/jce_policy-8.zip \ 12 | && unzip -o /tmp/jce_policy-8.zip -d /tmp \ 13 | && mv -f ${JAVA_HOME}/lib/security ${JAVA_HOME}/lib/backup-security || true \ 14 | && mv -f /tmp/UnlimitedJCEPolicyJDK8 ${JAVA_HOME}/lib/security \ 15 | && apk del busybox-suid unzip openssh-client shadow wget \ 16 | && rm -rf /var/cache/apk/* /tmp/* 17 | USER appuser 18 | WORKDIR /home/appuser 19 | VOLUME /home/appuser 20 | # ARG JAVA_OPTS_ARGS="\ 21 | # -Djava.net.preferIPv4Stack=true \ 22 | # -XX:+UnlockExperimentalVMOptions \ 23 | # -XX:+UseCGroupMemoryLimitForHeap \ 24 | # -XshowSettings:vm " 25 | # ENV JAVA_OPTS="${JAVA_OPTS} ${JAVA_OPTS_ARGS}" 26 | # ENTRYPOINT java ${JAVA_OPTS} -jar ./app.jar 27 | # CMD /bin/bash 28 | ENTRYPOINT java -Djava.net.preferIPv4Stack=true \ 29 | -XX:+UnlockExperimentalVMOptions \ 30 | -XX:+UseCGroupMemoryLimitForHeap \ 31 | -XshowSettings:vm \ 32 | -jar ./app.jar 33 | CMD /bin/bash 34 | EXPOSE 8080 35 | HEALTHCHECK --timeout=1s \ 36 | --retries=35 \ 37 | CMD curl -f http://127.0.0.1:8080/actuator/health || exit 1 38 | COPY --chown=appuser ./build/libs/*.jar ./app.jar 39 | -------------------------------------------------------------------------------- /gradle/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | gradle-app: 6 | build: 7 | context: . 8 | dockerfile: ./Dockerfile 9 | volumes: ["gradle-app-data:/home/app"] 10 | ports: 11 | # - "5005:5005" 12 | - "8080:8080" 13 | restart: unless-stopped 14 | networks: [backing-services] 15 | 16 | volumes: 17 | gradle-app-data: {} 18 | 19 | networks: 20 | backing-services: 21 | driver: bridge 22 | -------------------------------------------------------------------------------- /h2/Dockerfile: -------------------------------------------------------------------------------- 1 | ### 2 | # docker build --no-cache -t daggerok/h2 -f ./h2/Dockerfile . 3 | # docker run --rm -it -p 8082:8082 -p 9092:9092 --name h2 -v /tmp/h2/data:/home/h2/data daggerok/h2 4 | # mkdir -p /tmp/h2/data ; docker run --rm -d -p 8082:8082 -p 9092:9092 --name h2 -v /tmp/h2/data:/home/h2/data \ 5 | # --health-start-period=1s --health-retries=3 --health-interval=3s --health-timeout=3s \ 6 | # --health-cmd='curl 127.0.0.1:8082 || exit 1' \ 7 | # daggerok/h2 8 | # docker stop h2 9 | ### 10 | FROM openjdk:8u332-jre-buster 11 | 12 | ARG H2_VERSION_ARG='2.1.214' 13 | ARG H2_USERNANE_ARG='h2' 14 | ARG H2_TCP_PORT_ARG='9092' 15 | ARG H2_WEB_PORT_ARG='8082' 16 | 17 | ENV H2_VERSION=${H2_VERSION_ARG} \ 18 | H2_USERNANE=${H2_USERNANE_ARG} \ 19 | H2_TCP_PORT=${H2_TCP_PORT_ARG} \ 20 | H2_WEB_PORT=${H2_WEB_PORT_ARG} 21 | 22 | ENV H2_USER_HOME="/home/${H2_USERNANE_ARG}" \ 23 | H2_URL="https://repo1.maven.org/maven2/com/h2database/h2/${H2_VERSION}/h2-${H2_VERSION}.jar" 24 | 25 | ENV H2_JAR="${H2_USER_HOME}/h2-${H2_VERSION}.jar" \ 26 | H2_DATA="${H2_USER_HOME}/data" \ 27 | PATH="${PATH}:${JAVA_HOME}/bin" 28 | 29 | RUN apt-get --no-install-recommends -yqq update \ 30 | && apt-get --no-install-recommends -yqq install wget sudo \ 31 | && echo "${H2_USERNANE} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 32 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 33 | && adduser --home /home/${H2_USERNANE} --shell /bin/bash ${H2_USERNANE} \ 34 | && wget ${H2_URL} -O ${H2_JAR} \ 35 | && mkdir -p ${H2_DATA} \ 36 | && chown -Rcv ${H2_USERNANE}:${H2_USERNANE} ${H2_JAR} ${H2_DATA} 37 | 38 | HEALTHCHECK --start-period=1s --retries=3 --interval=3s --timeout=3s \ 39 | CMD ( curl --silent --fail "127.0.0.1:${H2_WEB_PORT}" ) || exit 1 40 | 41 | USER ${H2_USERNANE} 42 | WORKDIR ${H2_USER_HOME} 43 | VOLUME ${H2_USER_HOME}/data 44 | 45 | ARG JAVA_OPTS_ARGS=' \ 46 | -Djava.net.preferIPv4Stack=true \ 47 | -XX:+UnlockExperimentalVMOptions \ 48 | -XX:+UseCGroupMemoryLimitForHeap \ 49 | -XshowSettings:vm ' 50 | ENV JAVA_OPTS="${JAVA_OPTS} ${JAVA_OPTS_ARGS}" 51 | 52 | EXPOSE ${H2_TCP_PORT} ${H2_WEB_PORT} 53 | ENTRYPOINT ["/bin/bash", "-c"] 54 | CMD [" \ 55 | mkdir -p ${H2_DATA} \ 56 | && java -cp ./*.jar org.h2.tools.Server \ 57 | -tcp -tcpAllowOthers -tcpPort ${H2_TCP_PORT} \ 58 | -web -webAllowOthers -webPort ${H2_WEB_PORT} \ 59 | -trace -ifNotExists -baseDir ${H2_DATA} \ 60 | "] 61 | -------------------------------------------------------------------------------- /jboss-eap-6.4/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u151-jdk-alpine 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok/docker 3 | 4 | ARG JBOSS_USER="jboss-eap-6.4" 5 | ARG JBOSS_FILE_ARG="jboss-eap-6.4.0.zip" 6 | ARG JBOSS_ADMIN_USER_ARG="admin" 7 | ARG JBOSS_ADMIN_PASSWORD_ARG="Admin.123" 8 | 9 | ENV JBOSS_FILE=${JBOSS_FILE_ARG} 10 | ENV JBOSS_URL="https://www.dropbox.com/s/xl2io9dhc6zxw9m/${JBOSS_FILE}" 11 | ENV JBOSS_ADMIN_PASSWORD=${JBOSS_ADMIN_PASSWORD_ARG} 12 | ENV JBOSS_ADMIN_USER=${JBOSS_ADMIN_USER_ARG} 13 | ENV JBOSS_USER_HOME="/home/${JBOSS_USER}" 14 | ENV JBOSS_HOME="${JBOSS_USER_HOME}/${JBOSS_USER}" 15 | 16 | RUN apk --no-cache --update add busybox-suid bash wget ca-certificates unzip sudo openssh-client shadow \ 17 | && rm -rf /var/cache/apk/* \ 18 | && addgroup ${JBOSS_USER}-group \ 19 | && echo "${JBOSS_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 20 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 21 | && adduser -h ${JBOSS_USER_HOME} -s /bin/bash -D -u 1025 ${JBOSS_USER} ${JBOSS_USER}-group \ 22 | && usermod -a -G wheel ${JBOSS_USER} 23 | 24 | USER ${JBOSS_USER} 25 | WORKDIR ${JBOSS_USER_HOME} 26 | 27 | CMD /bin/bash 28 | EXPOSE 8080 9990 29 | ENTRYPOINT /bin/bash ${JBOSS_HOME}/bin/standalone.sh 30 | 31 | RUN wget ${JBOSS_URL} -O ${JBOSS_USER_HOME}/${JBOSS_FILE} \ 32 | && unzip ${JBOSS_USER_HOME}/${JBOSS_FILE} -d ${JBOSS_USER_HOME} \ 33 | && rm -rf ${JBOSS_USER_HOME}/${JBOSS_FILE} \ 34 | && ${JBOSS_HOME}/bin/add-user.sh ${JBOSS_ADMIN_USER} ${JBOSS_ADMIN_PASSWORD} --silent \ 35 | && echo "JAVA_OPTS=\"\$JAVA_OPTS -Djboss.bind.address=0.0.0.0 -Djboss.bind.address.management=0.0.0.0\"" >> ${JBOSS_HOME}/bin/standalone.conf 36 | 37 | ## check all apps healthy (in current example we are expecting to have apps with /ui/ and /rest-api/ contexts deployed: 38 | #HEALTHCHECK --interval=1s --timeout=3s --retries=30 \ 39 | # CMD wget -q --spider http://127.0.0.1:8080/rest-api/health \ 40 | # && wget -q --spider http://127.0.0.1:8080/ui/ \ 41 | # || exit 1 42 | 43 | # deploy apps 44 | COPY ./path/to/*.war ./path/to/another/*.war ${JBOSS_HOME}/standalone/deployments/ 45 | -------------------------------------------------------------------------------- /jboss-eap-7.1/Dockerfile: -------------------------------------------------------------------------------- 1 | # docker build -t daggerok/jboss:eap-7.1 . 2 | # docker tag daggerok/jboss:eap-7.1 daggerok/jboss:eap-7.1-alpine 3 | # docker tag daggerok/jboss:eap-7.1 daggerok/jboss:eap-7.1-latest 4 | # docker push daggerok/jboss:eap-7.1 5 | 6 | FROM openjdk:8u151-jdk-alpine 7 | MAINTAINER Maksim Kostromin https://github.com/daggerok/docker 8 | 9 | ARG JBOSS_USER="jboss-eap-7.1" 10 | ARG DROPBOX_HASH_ARG="fx1jnh89w9mosjs" 11 | ARG JBOSS_FILE_ARG="jboss-eap-7.1.0.zip" 12 | ARG JBOSS_ADMIN_USER_ARG="admin" 13 | ARG JBOSS_ADMIN_PASSWORD_ARG="Admin.123" 14 | 15 | ENV DROPBOX_HASH=${DROPBOX_HASH_ARG} \ 16 | JBOSS_FILE=${JBOSS_FILE_ARG} 17 | ENV JBOSS_URL="https://www.dropbox.com/s/${DROPBOX_HASH}/${JBOSS_FILE}" \ 18 | JBOSS_ADMIN_PASSWORD=${JBOSS_ADMIN_PASSWORD_ARG} \ 19 | JBOSS_ADMIN_USER=${JBOSS_ADMIN_USER_ARG} \ 20 | JBOSS_USER_HOME="/home/${JBOSS_USER}" 21 | ENV JBOSS_HOME="${JBOSS_USER_HOME}/${JBOSS_USER}" 22 | 23 | RUN apk --no-cache --update add busybox-suid bash wget ca-certificates unzip sudo openssh-client shadow \ 24 | && addgroup ${JBOSS_USER}-group \ 25 | && echo "${JBOSS_USER} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 26 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 27 | && adduser -h ${JBOSS_USER_HOME} -s /bin/bash -D -u 1025 ${JBOSS_USER} ${JBOSS_USER}-group \ 28 | && usermod -a -G wheel ${JBOSS_USER} \ 29 | && apk --no-cache --no-network --purge del busybox-suid ca-certificates unzip shadow \ 30 | && rm -rf /var/cache/apk/* /tmp/* 31 | 32 | USER ${JBOSS_USER} 33 | WORKDIR ${JBOSS_USER_HOME} 34 | 35 | CMD /bin/bash 36 | EXPOSE 8080 9990 8443 37 | ENTRYPOINT /bin/bash ${JBOSS_HOME}/bin/standalone.sh 38 | 39 | RUN wget ${JBOSS_URL} -O ${JBOSS_USER_HOME}/${JBOSS_FILE} \ 40 | && unzip ${JBOSS_USER_HOME}/${JBOSS_FILE} -d ${JBOSS_USER_HOME} \ 41 | && rm -rf ${JBOSS_USER_HOME}/${JBOSS_FILE} \ 42 | && ${JBOSS_HOME}/bin/add-user.sh ${JBOSS_ADMIN_USER} ${JBOSS_ADMIN_PASSWORD} --silent \ 43 | && echo "JAVA_OPTS=\"\$JAVA_OPTS -Djboss.bind.address=0.0.0.0 -Djboss.bind.address.management=0.0.0.0\"" >> ${JBOSS_HOME}/bin/standalone.conf 44 | 45 | ## check all apps healthy (in current example we are expecting to have apps with /ui/ and /rest-api/ contexts deployed: 46 | #HEALTHCHECK --interval=1s --timeout=3s --retries=30 \ 47 | # CMD wget -q --spider http://127.0.0.1:8080/rest-api/health \ 48 | # && wget -q --spider http://127.0.0.1:8080/ui/ \ 49 | # || exit 1 50 | # 51 | ## deploy apps 52 | #COPY ./path/to/*.war ./path/to/another/*.war ${JBOSS_HOME}/standalone/deployments/ 53 | #COPY --chown=jboss-eap-7.1 ./path/to/*.war ./path/to/another/*.war ${JBOSS_HOME}/standalone/deployments/ 54 | -------------------------------------------------------------------------------- /jboss-eap-7.2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM daggerok/jboss-eap-7.2:7.2.71-alpine 2 | LABEL MAINTAINER="daggerok https://github.com/daggerok/jboss-eap-7.2" 3 | ENV JAVA_OPTS="$JAVA_OPTS -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005" 4 | EXPOSE 5005 5 | HEALTHCHECK --retries=33 \ 6 | --timeout=3s \ 7 | --interval=3s \ 8 | --start-period=3s \ 9 | CMD wget -q --spider http://127.0.0.1:8080/my-service/health || exit 1 10 | #CMD curl -f http://127.0.0.1:8080/my-servicehealth || exit 1 11 | #CMD test `netstat -ltnp | grep 9990 | wc -l` -ge 1 || exit 1 12 | COPY --chown=jboss ./target/*.war ${JBOSS_HOME}/standalone/deployments/my-service.war 13 | -------------------------------------------------------------------------------- /jboss-eap-7.3/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM daggerok/jboss-eap-7.3:7.3.0-centos 2 | LABEL MAINTAINER="daggerok https://github.com/daggerok/jboss-eap-7.3" 3 | ENV JAVA_OPTS="$JAVA_OPTS -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005" 4 | EXPOSE 5005 5 | HEALTHCHECK --retries=33 \ 6 | --timeout=3s \ 7 | --interval=3s \ 8 | --start-period=3s \ 9 | CMD wget -q --spider http://127.0.0.1:8080/my-service/health || exit 1 10 | #CMD curl -f http://127.0.0.1:8080/my-servicehealth || exit 1 11 | #CMD test `netstat -ltnp | grep 9990 | wc -l` -ge 1 || exit 1 12 | COPY --chown=jboss ./target/*.war ${JBOSS_HOME}/standalone/deployments/my-service.war 13 | -------------------------------------------------------------------------------- /jboss/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | FROM daggerok/jboss-eap-7.2:7.2.5-alpine 3 | EXPOSE 8080 4 | HEALTHCHECK --retries=33 \ 5 | --timeout=1s \ 6 | --interval=1s \ 7 | --start-period=3s \ 8 | CMD ( wget -q --spider http://127.0.0.1:8080/ ) || exit 1 9 | COPY --chown=jboss ./target/*.war ${JBOSS_HOME}/standalone/deployments/app.war 10 | 11 | ################################################################################################## 12 | # # 13 | # # eap: # 14 | # FROM daggerok/jboss:eap-7.1 # 15 | # # wildfly: # 16 | # FROM daggerok/jboss:wildfly-13.0.0.Final # 17 | # # 18 | # # remote debug (JetBrains IDEA remote debug zero-configuration): # 19 | # ENV JAVA_OPTS="$JAVA_OPTS -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 " # 20 | # EXPOSE 5005 # 21 | # # 22 | # # health (check assuming '/health' endpoint implemented): # 23 | # HEALTHCHECK --timeout=2s --retries=22 \ # 24 | # CMD wget -q --spider http://127.0.0.1:8080/my-service/health \ # 25 | # || exit 1 # 26 | # # 27 | # # multi-deployments: # 28 | # COPY ./build/libs/*.war ./target/*.war ${JBOSS_HOME}/standalone/deployments/ # 29 | # # 30 | # # single app deployment: # 31 | # ADD ./build/libs/*.war ${JBOSS_HOME}/standalone/deployments/my-service.war # 32 | # # 33 | ################################################################################################## 34 | -------------------------------------------------------------------------------- /jboss4/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM lwis/java5 2 | MAINTAINER Maksim Kostormin https://github.com/daggerok 3 | ARG JBOSS_VERSION_ARG="4.2.3.GA" 4 | RUN apt-get update -y \ 5 | && apt-get install -y --no-install-recommends wget unzip bash sudo openssh-client \ 6 | && adduser -q --home /home/jboss --shell /bin/bash --uid 1025 --disabled-password jboss \ 7 | && adduser jboss jboss \ 8 | && echo "jboss ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 9 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 10 | && rm -rf /tmp/* \ 11 | && apt-get remove openssh-client 12 | WORKDIR /home/jboss 13 | USER jboss 14 | EXPOSE 8080 15 | ENV JBOSS_VERSION="${JBOSS_VERSION_ARG}" 16 | ENV JBOSS_HOME="/home/jboss/jboss-${JBOSS_VERSION}" 17 | ENV JAVA_OPTS="$JAVA_OPTS \ 18 | -Djboss.bind.address=0.0.0.0 \ 19 | -Djboss.bind.address.management=0.0.0.0 \ 20 | -Djava.net.preferIPv4Stack=true \ 21 | -XX:+UnlockExperimentalVMOptions \ 22 | -XX:+UseCGroupMemoryLimitForHeap \ 23 | -XshowSettings:vm" 24 | CMD /bin/bash 25 | ENTRYPOINT /bin/bash ${JBOSS_HOME}/bin/run.sh 26 | RUN wget --no-check-certificate \ 27 | -O /tmp/jboss-${JBOSS_VERSION}.zip \ 28 | https://sourceforge.net/projects/jboss/files/JBoss/JBoss-${JBOSS_VERSION}/jboss-${JBOSS_VERSION}.zip \ 29 | && unzip -d /home/jboss /tmp/jboss-${JBOSS_VERSION}.zip \ 30 | && rm -rf /tmp/* 31 | 32 | ############################################ USAGE ############################################## 33 | # FROM daggerok/jboss4-java5 # 34 | # COPY --chown=jboss target/*.war ${JBOSS_HOME}/default/deploy/ # 35 | ################################################################################################# 36 | 37 | ######################################## DEBUG USAGE ############################################ 38 | # FROM daggerok/jboss4-java5 # 39 | # ENV JAVA_OPTS="$JAVA_OPTS -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005" # 40 | # EXPOSE 5005 # 41 | # COPY --chown=jboss target/*.war ${JBOSS_HOME}/default/deploy/ # 42 | ################################################################################################# 43 | -------------------------------------------------------------------------------- /jenkins/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jenkins 2 | # if we want to install via apt 3 | USER root 4 | # attention: hardcoded user and password (lines 7, 8, 9 and 12) 5 | RUN apt update \ 6 | && apt install -y htop sudo openssh-server vim \ 7 | && useradd -m mak && echo "mak:mak" | chpasswd \ 8 | && adduser mak sudo \ 9 | && echo "\nmak ALL=(ALL:ALL) NOPASSWD: ALL" >> /etc/sudoers \ 10 | && service ssh restart 11 | 12 | # attention: hardcoded user (lines: 10 and 11) and rsa public key (line 13) 13 | USER mak 14 | # RUN echo -e "\n\n\n" | ssh-keygen -t rsa -b 4096 15 | RUN ssh-keygen -t rsa -b 4096 -N "" -f ~/.ssh/id_rsa \ 16 | && export PUBLIC_KEY="ssh-rsa AAA..." \ 17 | && echo $PUBLIC_KEY >> ~/.ssh/authorized_keys \ 18 | # RUN export USER=mak \ 19 | # && mkdir -p /home/$USER/.ssh \ 20 | # && export PUBLIC_KEY="ssh-rsa AA..." \ 21 | # && echo $PUBLIC_KEY >> /home/$USER/.ssh/authorized_keys \ 22 | # && chown -R $USER:$USER /home/$USER \ 23 | # && chmod g-w /home/$USER \ 24 | # && chmod 750 /home/$USER \ 25 | # && chmod 700 /home/$USER/.ssh \ 26 | # && chmod 600 /home/$USER/.ssh/authorized_keys 27 | # drop back to the regular jenkins user - good practice 28 | USER jenkins 29 | EXPOSE 22 8080 5000 30 | -------------------------------------------------------------------------------- /jenkins/docker-compose.yml: -------------------------------------------------------------------------------- 1 | my-jenkins: 2 | hostname: my-jenkins 3 | container_name: my-jenkins 4 | image: 'jenkins:2.32.1' 5 | ports: 6 | - '50000:50000' 7 | - '8080:8080' 8 | volumes: 9 | - './var/jenkins_home:/var/jenkins_home' 10 | 11 | # jenkins-from-dokerfile-container: 12 | # hostname: jenkins-from-dokerfile-container 13 | # container_name: jenkins-from-dokerfile-container 14 | # build: . 15 | # image: 'jenkins:2.32.1' 16 | # ports: 17 | # - '50000:50000' 18 | # - '8080:8080' 19 | # volumes: 20 | # - './var/jenkins_home:/var/jenkins_home' 21 | -------------------------------------------------------------------------------- /jenkins/jenkins.bash: -------------------------------------------------------------------------------- 1 | export JENKINS_VERSION=2.32.1 2 | export JENKINS_NAME=my-jenkins 3 | 4 | docker stop $JENKINS_NAME 5 | docker rm $JENKINS_NAME 6 | docker pull jenkins:$JENKINS_VERSION 7 | 8 | # docker run -d \ 9 | # -name $JENKINS_NAME \ 10 | # -p 8080:8080 \ 11 | # -p 50000:50000 \ 12 | # -v $PWD/var/jenkins_home:/var/jenkins_home \ 13 | # jenkins:$JENKINS_VERSION 14 | 15 | docker run --name $JENKINS_NAME -d -p 8080:8080 -p 50000:50000 -v $PWD/var/jenkins_home:/var/jenkins_home jenkins:$JENKINS_VERSION 16 | # docker run --name $JENKINS_NAME -p 8080:8080 -p 50000:50000 -v $PWD/var/jenkins_home:/var/jenkins_home jenkins:$JENKINS_VERSION 17 | 18 | # docker exec -it $JENKINS_NAME bash 19 | # docker exec -u root -it $JENKINS_NAME bash 20 | 21 | # rm -rf $PWD/var/jenkins_home 22 | # docker stop $JENKINS_NAME 23 | # docker rm -f $JENKINS_NAME 24 | # vim Dockerfile # update user passsword and id_rsa.pub key 25 | # docker build -t $JENKINS_NAME . 26 | # docker run --name $JENKINS_NAME -p 8080:8080 -p 50000:50000 -v $PWD/var/jenkins_home:/var/jenkins_home $JENKINS_NAME 27 | 28 | # docker stop $JENKINS_NAME 29 | # docker start $JENKINS_NAME 30 | -------------------------------------------------------------------------------- /kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u151-jre-alpine3.7 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok 3 | RUN apk --no-cache --update add busybox-suid bash curl unzip sudo openssh-client shadow wget \ 4 | && adduser -h /home/appuser -s /bin/bash -D -u 1025 appuser wheel \ 5 | && echo "appuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 6 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 7 | && wget --no-cookies \ 8 | --no-check-certificate \ 9 | --header "Cookie: oraclelicense=accept-securebackup-cookie" \ 10 | "http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip" \ 11 | -O /tmp/jce_policy-8.zip \ 12 | && unzip -o /tmp/jce_policy-8.zip -d /tmp \ 13 | && mv -f ${JAVA_HOME}/lib/security ${JAVA_HOME}/lib/backup-security \ 14 | && mv -f /tmp/UnlimitedJCEPolicyJDK8 ${JAVA_HOME}/lib/security \ 15 | && wget -O /home/appuser/kafka.jar https://raw.githubusercontent.com/daggerok/embedded-kafka/mvn-repo/embedded-kafka-0.0.3-all.jar \ 16 | && chown -R appuser:wheel /home/appuser/kafka.jar \ 17 | && apk del busybox-suid unzip openssh-client shadow wget \ 18 | && rm -rf /var/cache/apk/* /tmp/* 19 | USER appuser 20 | WORKDIR /home/appuser 21 | VOLUME /home/appuser 22 | CMD /bin/bash 23 | ARG ZOOKEEPER_PORT_ARG="2181" 24 | ARG ZOOKEEPER_DIR_ARG=/home/appuser 25 | ARG KAFKA_PORT_ARG="9092" 26 | ARG KAFKA_TOPICS_ARG="\ 27 | topic1,topic2,topic3" 28 | ARG HTTP_PORT_ARG="8080" 29 | ARG HTTP_CONTEXT_ARG="/" 30 | ENV ZOOKEEPER_PORT="${ZOOKEEPER_PORT_ARG}" \ 31 | ZOOKEEPER_DIR="${ZOOKEEPER_DIR_ARG}" \ 32 | KAFKA_PORT="${KAFKA_PORT_ARG}" \ 33 | KAFKA_TOPICS="${KAFKA_TOPICS_ARG}" \ 34 | HTTP_PORT="${HTTP_PORT_ARG}" \ 35 | HTTP_CONTEXT="${HTTP_CONTEXT_ARG}" 36 | ENTRYPOINT java -Djava.net.preferIPv4Stack=true \ 37 | -XX:+UnlockExperimentalVMOptions \ 38 | -XX:+UseCGroupMemoryLimitForHeap \ 39 | -XshowSettings:vm \ 40 | -jar /home/appuser/kafka.jar \ 41 | --zookeeperPort="${ZOOKEEPER_PORT}" \ 42 | --zookeeperDir="${ZOOKEEPER_DIR}" \ 43 | --kafkaPort="${KAFKA_PORT}" \ 44 | --kafkaTopics="${KAFKA_TOPICS}" \ 45 | --httpPort="${HTTP_PORT}" \ 46 | --httpContext="${HTTP_CONTEXT}" 47 | EXPOSE ${ZOOKEEPER_PORT} ${KAFKA_PORT} ${HTTP_PORT} 48 | HEALTHCHECK --timeout=2s \ 49 | --retries=22 \ 50 | CMD curl -f "http://127.0.0.1:${HTTP_PORT}${HTTP_CONTEXT}" || exit 1 51 | 52 | ### you can use next docker-compose ### 53 | # 54 | # version: "2.1" 55 | # services: 56 | # kafka-app: 57 | # image: daggerok/kafka:v9 58 | # environment: 59 | # ZOOKEEPER_PORT: 2181 60 | # ZOOKEEPER_DIR: ./zk 61 | # KAFKA_PORT: 9092 62 | # KAFKA_TOPICS: topic1,topic2 63 | # HTTP_PORT: 8080 64 | # HTTP_CONTEXT: / 65 | # volumes: ["kafka-app-data:/home"] 66 | # ports: ["8080:8080"] 67 | # networks: [backing-services] 68 | # volumes: 69 | # kafka-app-data: {} 70 | # networks: 71 | # backing-services: 72 | # driver: bridge 73 | # 74 | ####################################### 75 | -------------------------------------------------------------------------------- /kafka/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | kafka: 6 | image: daggerok/kafka:v5 7 | environment: 8 | KAFKA_TOPICS: 'top1,top2,top3' 9 | ports: 10 | - "2181:2181" 11 | - "9092:9092" 12 | volumes: 13 | - "kafka-data:/home/appuser" 14 | - "kafka-data:/var" 15 | - "kafka-data:/tmp" 16 | networks: [backing-services] 17 | 18 | volumes: 19 | kafka-data: {} 20 | 21 | networks: 22 | backing-services: 23 | driver: bridge 24 | -------------------------------------------------------------------------------- /kong/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | kong-database: 6 | # image: healthcheck/postgres:alpine 7 | image: postgres:9.4-alpine 8 | ports: ["5432:5432"] 9 | environment: 10 | POSTGRES_USER: kong 11 | POSTGRES_DB: kong 12 | healthcheck: 13 | test: ["CMD", "pg_isready", "-U", "postgres"] 14 | interval: 10s 15 | timeout: 5s 16 | retries: 5 17 | networks: [backing-services] 18 | 19 | kong-migration: 20 | depends_on: 21 | kong-database: 22 | condition: service_healthy 23 | image: kong:latest 24 | environment: 25 | KONG_DATABASE: postgres 26 | KONG_PG_HOST: kong-database 27 | KONG_CASSANDRA_CONTACT_POINTS: kong-database 28 | command: kong migrations -v up 29 | networks: [backing-services] 30 | 31 | kong: 32 | depends_on: 33 | kong-database: 34 | condition: service_healthy 35 | kong-migration: 36 | condition: service_started 37 | image: kong:latest 38 | environment: 39 | KONG_DATABASE: postgres 40 | KONG_PG_HOST: kong-database 41 | KONG_PG_DATABASE: kong 42 | ports: 43 | - "8000:8000" 44 | - "8001:8001" 45 | - "8443:8443" 46 | - "8444:8444" 47 | healthcheck: 48 | test: ["CMD-SHELL", "curl -I -s -L http://127.0.0.1:8000 || exit 1"] 49 | interval: 5s 50 | retries: 10 51 | networks: [backing-services] 52 | 53 | dashboard: 54 | depends_on: 55 | kong: 56 | condition: service_healthy 57 | image: pgbi/kong-dashboard:v2 58 | ports: ["8080:8080"] 59 | networks: 60 | - backing-services 61 | - public 62 | 63 | networks: 64 | backing-services: 65 | public: 66 | -------------------------------------------------------------------------------- /maven/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u151-jre-alpine3.7 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok 3 | RUN apk --no-cache --update add busybox-suid bash curl unzip sudo openssh-client shadow wget \ 4 | && adduser -h /home/appuser -s /bin/bash -D -u 1025 appuser wheel \ 5 | && echo "appuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 6 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 7 | && wget --no-cookies \ 8 | --no-check-certificate \ 9 | --header "Cookie: oraclelicense=accept-securebackup-cookie" \ 10 | "http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip" \ 11 | -O /tmp/jce_policy-8.zip \ 12 | && unzip -o /tmp/jce_policy-8.zip -d /tmp \ 13 | && mv -f ${JAVA_HOME}/lib/security ${JAVA_HOME}/lib/backup-security || true \ 14 | && mv -f /tmp/UnlimitedJCEPolicyJDK8 ${JAVA_HOME}/lib/security \ 15 | && apk del busybox-suid unzip openssh-client shadow wget \ 16 | && rm -rf /var/cache/apk/* /tmp/* 17 | USER appuser 18 | WORKDIR /home/appuser 19 | VOLUME /home/appuser 20 | ARG JAVA_OPTS_ARGS="\ 21 | -Djava.net.preferIPv4Stack=true \ 22 | -XX:+UnlockExperimentalVMOptions \ 23 | -XX:+UseCGroupMemoryLimitForHeap \ 24 | -XshowSettings:vm " 25 | ENV JAVA_OPTS="${JAVA_OPTS} ${JAVA_OPTS_ARGS}" 26 | ENTRYPOINT java ${JAVA_OPTS} -jar ./app.jar 27 | CMD /bin/bash 28 | # ENTRYPOINT java -Djava.net.preferIPv4Stack=true \ 29 | # -XX:+UnlockExperimentalVMOptions \ 30 | # -XX:+UseCGroupMemoryLimitForHeap \ 31 | # -XshowSettings:vm \ 32 | # -jar ./app.jar 33 | # CMD /bin/bash 34 | EXPOSE 8080 35 | HEALTHCHECK --timeout=2s \ 36 | --retries=22 \ 37 | CMD curl -f http://127.0.0.1:8080/actuator/health || exit 1 38 | COPY --chown=appuser ./target/*.jar ./app.jar 39 | -------------------------------------------------------------------------------- /maven/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | maven-app: 6 | build: 7 | context: . 8 | dockerfile: ./Dockerfile 9 | volumes: ["maven-app-data:/home/app"] 10 | ports: 11 | - "8080:8080" 12 | # - "5005:5005" 13 | restart: unless-stopped 14 | networks: [backing-services] 15 | 16 | volumes: 17 | maven-app-data: {} 18 | 19 | networks: 20 | backing-services: 21 | driver: bridge 22 | -------------------------------------------------------------------------------- /mercurial/Dockerfile: -------------------------------------------------------------------------------- 1 | # docker build -t hg . 2 | # docker run --rm --name hg -it -p 22:22 -p 8080:8080 hg:latest 3 | 4 | # docker exec -it hg bash 5 | # sudo -s 6 | 7 | # hg clone http://localhost:8080/ repo-dir 8 | # cd repo-dir/ 9 | 10 | ## disable sudo no password: 11 | #echo "hg ALL=(ALL:ALL) ALL" >> /etc/sudoers 12 | 13 | FROM alpine:3.5 14 | MAINTAINER Maksim Kostromin https://github.com/daggerok 15 | RUN apk --no-cache --update add busybox-suid bash mercurial sudo \ 16 | && adduser -h /home/hg -s /bin/bash -D -u 1025 hg wheel \ 17 | && echo "hg ALL=(ALL:ALL) NOPASSWD:ALL" >> /etc/sudoers \ 18 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 19 | && hgpassword=$(echo Admin.123 | mkpasswd) \ 20 | && echo "hg sudo password: ${hgpassword}" \ 21 | && echo "hg:${hgpassword}" | chpasswd \ 22 | && apk del busybox-suid \ 23 | && rm -rf /tmp/* /var/cache/apk/* 24 | USER hg 25 | RUN mkdir -p /home/hg/repo \ 26 | && cd /home/hg/repo \ 27 | && hg init 28 | WORKDIR /home/hg/repo 29 | VOLUME /home/hg 30 | ENTRYPOINT hg serve -p 8080 -a 0.0.0.0 --verbose 31 | CMD /bin/bash 32 | EXPOSE 22 8080 33 | -------------------------------------------------------------------------------- /mongo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM healthcheck/mongo:latest 2 | MAINTAINER Maksim Kostromin https://github.comm/daggerok 3 | -------------------------------------------------------------------------------- /mongo/docker-compose-healthcheck-mongo-express.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | services: 3 | mongo: 4 | restart: unless-stopped 5 | image: healthcheck/mongo:latest 6 | environment: 7 | MONGO_PORT_27017_TCP_PORT: 27017 8 | JAVA_OPTS: "" 9 | ports: ["27017:27017"] 10 | volumes: ["mongo-data:/data/db"] 11 | networks: [backing-services] 12 | mongo-express: 13 | restart: unless-stopped 14 | depends_on: 15 | mongo: 16 | condition: service_healthy 17 | image: mongo-express:0.42.2 18 | environment: 19 | ME_CONFIG_MONGODB_SERVER: mongo 20 | ME_CONFIG_BASICAUTH_USERNAME: guest 21 | ME_CONFIG_BASICAUTH_PASSWORD: guest 22 | ports: ["8002:8081"] 23 | networks: [backing-services] 24 | volumes: 25 | mongo-data: {} 26 | networks: 27 | backing-services: 28 | driver: bridge 29 | -------------------------------------------------------------------------------- /mongo/docker-compose-healthcheck.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | services: 3 | mongo: 4 | container_name: mongo 5 | image: healthcheck/mongo:latest 6 | ports: ["27017:27017"] 7 | networks: [backing-services] 8 | volumes: ["mongo-data:/data/db"] 9 | environment: 10 | JAVA_OPTS: "" 11 | volumes: 12 | mongo-data: {} 13 | networks: 14 | backing-services: 15 | driver: bridge 16 | -------------------------------------------------------------------------------- /mongo/docker-compose.yml: -------------------------------------------------------------------------------- 1 | my-mongo: 2 | container_name: my-mongo # a custom mongo container_name 3 | image: mongo 4 | environment: 5 | - MONGO_PORT_27017_TCP_ADDR=localhost 6 | - MONGO_PORT_27017_TCP_PORT=27017 7 | ports: 8 | - "27017:27017" 9 | 10 | ##https://github.com/mongo-express/mongo-express-docker 11 | #my-mongo-express: 12 | # container_name: my-mongo-express 13 | # image: mongo-express 14 | # ports: 15 | # - "8081:8081" 16 | # links: 17 | # - mongo # a custom mongo container_name 18 | # environment: 19 | # # should be a custom mongo container name: 20 | # ME_CONFIG_MONGODB_SERVER: my-mongo 21 | # ME_CONFIG_OPTIONS_EDITORTHEME: ambiance 22 | # ME_CONFIG_SITE_BASEURL: / 23 | -------------------------------------------------------------------------------- /mysql-phpmyadmin/README.md: -------------------------------------------------------------------------------- 1 | # mysql + phpMyAdmin docker compose file. 2 | 3 | _Could be used for quick project bootstrap or even development_ 4 | 5 | ## phpMyAdmin connection configuration: 6 | 7 | ``` 8 | server: mysql 9 | user: root 10 | password: root-db-password 11 | ``` 12 | 13 | open: http://0.0.0.0/ 14 | 15 | ## MySQL connection configuration (per app): 16 | 17 | ``` 18 | server (from docker): mysql 19 | server (from local): 0.0.0.0 or 127.0.0.1 or localhost 20 | application user: app-db-user 21 | application password: app-db-password 22 | application database: app-database 23 | ``` 24 | -------------------------------------------------------------------------------- /mysql-phpmyadmin/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | ### 2 | # 3 | # This docker compose file Could be used for quick project bootstrap or even development apps 4 | # uses mysql server with phpMyAdmin administration UI (https://github.com/phpmyadmin/docker). 5 | # 6 | # phpMyAdmin connection configuration (open: http://0.0.0.0/): 7 | # 8 | # server : mysql 9 | # user : root 10 | # password : root-db-password 11 | # 12 | # MySQL connection configuration (per app): 13 | # 14 | # server (from docker): mysql 15 | # server (from local) : 0.0.0.0 or 127.0.0.1 or localhost 16 | # application user : app-db-user 17 | # application password: app-db-password 18 | # application database: app-database 19 | # 20 | ### 21 | 22 | version: "2.1" 23 | 24 | services: 25 | 26 | mysql: 27 | image: healthcheck/mysql 28 | ports: ["3306:3306"] 29 | environment: 30 | MYSQL_USER: app-db-user 31 | MYSQL_PASSWORD: app-db-password 32 | MYSQL_DATABASE: app-db-database 33 | MYSQL_ROOT_PASSWORD: root-db-password 34 | networks: [backing-services] 35 | 36 | mysql-ui: 37 | image: phpmyadmin/phpmyadmin:4.7 38 | ports: ["80:80"] 39 | environment: 40 | PMA_ARBITRARY: 1 41 | TESTSUITE_PASSWORD: Admin.123 42 | PMA_VERBOSES: mysql 43 | PMA_HOST: mysql 44 | PMA_PORT: 3306 45 | MYSQL_USER: app-db-user 46 | MYSQL_PASSWORD: app-db-password 47 | MYSQL_DATABASE: app-db-database 48 | MYSQL_ROOT_PASSWORD: root-db-password 49 | depends_on: [mysql] 50 | links: ["mysql:db"] 51 | networks: [backing-services] 52 | 53 | networks: 54 | backing-services: 55 | driver: bridge 56 | -------------------------------------------------------------------------------- /mysql/docker-compose-heathcheck.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | services: 3 | mysql: 4 | # image: mysql:5 5 | image: healthcheck/mysql:latest 6 | environment: 7 | - MYSQL_ROOT_PASSWORD=app 8 | - MYSQL_PASSWORD=app 9 | - MYSQL_USER=app 10 | - MYSQL_DATABASE=app 11 | volumes: 12 | - mysql-data:/var/lib/mysql:rw 13 | - mysql-etc:/etc/mysql:rw 14 | ports: ["3306:3306"] 15 | networks: [backing-services] 16 | volumes: 17 | mysql-data: {} 18 | mysql-etc: {} 19 | networks: 20 | backing-services: 21 | driver: bridge 22 | -------------------------------------------------------------------------------- /mysql/docker-compose-old.yml: -------------------------------------------------------------------------------- 1 | my-mysql: 2 | container_name: my-mysql 3 | image: mysql:5 4 | environment: 5 | - MYSQL_ROOT_PASSWORD=my-mysql 6 | - MYSQL_PASSWORD=my-mysql 7 | - MYSQL_USER=my-mysql 8 | - MYSQL_DATABASE=my-mysql 9 | ports: 10 | - "3306:3306" 11 | -------------------------------------------------------------------------------- /mysql/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | mysql: 5 | image: mysql:5 6 | environment: 7 | MYSQL_ROOT_PASSWORD: mysqlpassowrd 8 | MYSQL_PASSWORD: mysqlpassowrd 9 | MYSQL_USER: mysqluser 10 | MYSQL_DATABASE: mysqldatabase 11 | ports: ["3306:3306"] 12 | networks: [backing-services] 13 | healthcheck: 14 | test: ["CMD", "mysqladmin" ,"ping", "-h", "127.0.0.1"] 15 | interval: 10s 16 | timeout: 2s 17 | retries: 22 18 | #restart: unless-stopped 19 | #mem_limit: 786432000 # container limit: 700Mb (700 * 1024 * 1024) 20 | 21 | networks: 22 | backing-services: 23 | driver: bridge 24 | -------------------------------------------------------------------------------- /nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:stable-alpine 2 | MAINTAINER Maksim Kostromin 3 | RUN apk --no-cache add curl 4 | HEALTHCHECK --interval=30s --timeout=30s --retries=10 CMD curl -f http://127.0.0.1:80/ || exit 1 5 | COPY ./www /usr/share/nginx/html 6 | -------------------------------------------------------------------------------- /nginx/README.md: -------------------------------------------------------------------------------- 1 | 2 | ```bash 3 | docker run -d --rm --name app -p 80:80 -v `pwd`/dist:/usr/share/nginx/html:ro \ 4 | --health-cmd="wget -q --spider http://127.0.0.1:80/ || exit 1" \ 5 | --health-start-period=1s \ 6 | --health-interval=1s \ 7 | --health-retries=33 \ 8 | --health-timeout=1s \ 9 | nginx:1.17.6-alpine 10 | ``` 11 | -------------------------------------------------------------------------------- /oracle/daggerok-11xe.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker container run -it -p 8080:8080 -p 1521:1521 --name oracle-xe --shm-size=1g daggerok/oracle-xe 3 | #docker container stop oracle 4 | #docker container run -ia oracle 5 | ##docker container run -it -e ORACLE_PWD=password -p 8080:8080 -p 1521:1521 --name oracle --shm-size=1g daggerok/oracle:xe 6 | -------------------------------------------------------------------------------- /oracle/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | oracle: 5 | image: daggerok/oracle:11.2.0.2-xe 6 | shm_size: 1g 7 | environment: 8 | ORACLE_PWD: oraclepassword 9 | ports: ["1521:1521"] 10 | networks: [backing-services] 11 | healthcheck: 12 | test: sleep 10; curl -uSYSTEM:$$ORACLE_PWD -v http://127.0.0.1:8080//apex/ 13 | #interval: 30s 14 | #timeout: 30s 15 | retries: 20 16 | #restart: unless-stopped 17 | #mem_limit: 786432000 # container limit: 700Mb (700 * 1024 * 1024) 18 | 19 | networks: 20 | backing-services: 21 | driver: bridge 22 | -------------------------------------------------------------------------------- /oracle/sath89-12c.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker container run -it -p 8080:8080 -p 1521:1521 --name oracle sath89/oracle-12c 3 | #docker container stop oracle 4 | #docker container start -ia oracle 5 | -------------------------------------------------------------------------------- /portainer/README.md: -------------------------------------------------------------------------------- 1 | # portainer 2 | read more [here](https://www.portainer.io/), [here](https://www.upnxtblog.com/index.php/2018/01/17/top-6-gui-tools-for-managing-docker-environments/) and [here (ru)](https://habr.com/ru/company/flant/blog/338332/) and of course [here](https://hub.docker.com/r/portainer/portainer) 3 | 4 | ```bash 5 | mkdir /tmp/portainer-data 6 | docker run -d -p 9000:9000 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/portainer-data:/data portainer/portainer 7 | ``` 8 | 9 | open http://127.0.0.1:9000/ 10 | -------------------------------------------------------------------------------- /postgres/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM postgres:9.4-alpine 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok 3 | HEALTHCHECK --interval=10s --timeout=5s --retries=5 CMD pg_isready -U postgres 4 | -------------------------------------------------------------------------------- /postgres/docker-compose-healthcheck.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | postgres: 6 | image: healthcheck/postgres:alpine 7 | volumes: ["postgres-data:/var/lib/postgresql/data"] 8 | networks: [backing-services] 9 | ports: ["5432:5432"] 10 | environment: 11 | POSTGRES_DB: db 12 | POSTGRES_USER: user 13 | POSTGRES_PASSWORD: password 14 | 15 | volumes: 16 | postgres-data: {} 17 | 18 | networks: 19 | backing-services: 20 | -------------------------------------------------------------------------------- /postgres/docker-compose-legacy.yaml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | postgres: 5 | image: postgres:9.4-alpine 6 | environment: 7 | POSTGRES_DB: postgresdatabase 8 | POSTGRES_USER: postgresuser 9 | POSTGRES_PASSWORD: postgrespassword 10 | ports: ["5432:5432"] 11 | networks: [backing-services] 12 | healthcheck: 13 | test: ["CMD", "pg_isready", "-U", "postgres"] 14 | interval: 10s 15 | timeout: 2s 16 | retries: 22 17 | #restart: unless-stopped 18 | #mem_limit: 786432000 # container limit: 700Mb (700 * 1024 * 1024) 19 | 20 | networks: 21 | backing-services: 22 | driver: bridge 23 | -------------------------------------------------------------------------------- /postgres/docker-compose-old.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | postgres: 6 | image: postgres:9.4-alpine 7 | volumes: ["postgres-data:/var/lib/postgresql/data"] 8 | networks: [backing-services] 9 | ports: ["5432:5432"] 10 | environment: 11 | POSTGRES_DB: db 12 | POSTGRES_USER: user 13 | POSTGRES_PASSWORD: password 14 | healthcheck: 15 | test: ["CMD", "pg_isready", "-U", "postgres"] 16 | interval: 10s 17 | timeout: 5s 18 | retries: 5 19 | 20 | volumes: 21 | postgres-data: {} 22 | 23 | networks: 24 | backing-services: 25 | driver: bridge 26 | -------------------------------------------------------------------------------- /postgres/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '2.1' 2 | services: 3 | postgres: 4 | image: postgres:9.4-alpine 5 | environment: 6 | POSTGRES_DB: db 7 | POSTGRES_USER: user 8 | POSTGRES_PASSWORD: password 9 | ports: ['5432:5432'] 10 | healthcheck: 11 | test: ['CMD', 'pg_isready', '-h', '127.0.0.1', '-p', '5432', '-d', '$$POSTGRES_DB', '-U', '$$POSTGRES_USER'] 12 | #test: pg_isready --host=127.0.0.1 --port=5432 --dbname=$$POSTGRES_DB --username=$$POSTGRES_USER 13 | timeout: 3s 14 | retries: 33 15 | interval: 30s 16 | restart: unless-stopped 17 | networks: 18 | backing-services: 19 | aliases: 20 | - p 21 | - pg 22 | - db 23 | - postgres 24 | #mem_limit: 786432000 # container limit: 700Mb (700 * 1024 * 1024) 25 | networks: 26 | backing-services: 27 | driver: bridge 28 | -------------------------------------------------------------------------------- /postgres/migrations/README.adoc: -------------------------------------------------------------------------------- 1 | = postgres 2 | 3 | .build, deploy, migrate.. 4 | ---- 5 | docker-compose build 6 | 7 | docker stack deploy -c docker-compose.yaml db 8 | 9 | kubectl get pods 10 | # output: 11 | NAME READY STATUS RESTARTS AGE 12 | postgres-7bbcd69cd7-4s6rd 1/1 Running 0 4m 13 | 14 | kubectl port-forward postgres-7bbcd69cd7-4s6rd 5432 15 | 16 | # now you can connect to `postgres` database on: `127.0.0.1:5432` url via any supported client using `app_user` / `password` creds... 17 | 18 | # cleanup: 19 | docker stack rm db 20 | ---- 21 | -------------------------------------------------------------------------------- /postgres/migrations/db/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM postgres:10.0-alpine 2 | COPY migration.sql /docker-entrypoint-initdb.d/ 3 | -------------------------------------------------------------------------------- /postgres/migrations/db/migration.sql: -------------------------------------------------------------------------------- 1 | -- CREATE USER app_user 2 | -- WITH PASSWORD 'password' 3 | -- ; 4 | 5 | CREATE USER app_user 6 | WITH PASSWORD 'password' 7 | SUPERUSER 8 | CREATEDB 9 | CREATEROLE 10 | REPLICATION 11 | BYPASSRLS 12 | ; 13 | 14 | -- CREATE DATABASE app_db 15 | -- OWNER app_user 16 | -- ; 17 | -- 18 | -- GRANT ALL PRIVILEGES ON DATABASE app_db to app_user 19 | -- ; 20 | 21 | ALTER DEFAULT PRIVILEGES IN SCHEMA public 22 | GRANT INSERT, UPDATE, DELETE ON TABLES TO app_user 23 | ; 24 | 25 | -- GRANT SELECT, INSERT, UPDATE, DELETE 26 | -- ON ALL TABLES 27 | -- IN SCHEMA public 28 | -- TO app_user 29 | -- ; 30 | 31 | CREATE TABLE IF NOT EXISTS items 32 | ( 33 | id SERIAL NOT NULL 34 | CONSTRAINT items_pk 35 | PRIMARY KEY , 36 | title VARCHAR(255) NOT NULL , 37 | description VARCHAR(1024) NULL 38 | ) 39 | ; 40 | 41 | INSERT INTO items (title) VALUES 42 | ('one'), 43 | ('two'), 44 | ('three') 45 | ; 46 | -------------------------------------------------------------------------------- /postgres/migrations/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | 5 | postgres: 6 | build: ./db 7 | image: daggerok/application-db 8 | -------------------------------------------------------------------------------- /rabbitmq-mongo/docker.compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | services: 3 | rabbitmq: 4 | restart: unless-stopped 5 | build: ./rabbitmq-healthcheck 6 | environment: 7 | RABBITMQ_DEFAULT_USER: guest 8 | RABBITMQ_DEFAULT_PASS: guest 9 | ports: 10 | - "5672:5672" 11 | - "15672:15672" 12 | - "25672:25672" 13 | volumes: ["rabbitmq-data:/var/lib/rabbitmq"] 14 | networks: [backing-services] 15 | mongo: 16 | restart: unless-stopped 17 | image: healthcheck/mongo:latest 18 | environment: 19 | MONGO_PORT_27017_TCP_PORT: 27017 20 | JAVA_OPTS: "" 21 | ports: ["27017:27017"] 22 | volumes: ["mongo-data:/data/db"] 23 | networks: [backing-services] 24 | mongo-express: 25 | restart: unless-stopped 26 | depends_on: 27 | mongo: 28 | condition: service_healthy 29 | image: mongo-express:0.42.2 30 | environment: 31 | ME_CONFIG_MONGODB_SERVER: mongo 32 | ME_CONFIG_BASICAUTH_USERNAME: guest 33 | ME_CONFIG_BASICAUTH_PASSWORD: guest 34 | ports: ["8081:8081"] 35 | networks: [backing-services] 36 | volumes: 37 | rabbitmq-data: {} 38 | mongo-data: {} 39 | networks: 40 | backing-services: 41 | driver: bridge 42 | -------------------------------------------------------------------------------- /rabbitmq-mongo/rabbitmq-healthcheck/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rabbitmq:3.6.12-management-alpine 2 | MAINTAINER Maksim Kostromin 3 | EXPOSE 5672 15672 61613 4 | RUN rabbitmq-plugins enable rabbitmq_stomp --offline \ 5 | && apk --no-cache add curl 6 | HEALTHCHECK --interval=5s --timeout=5s --retries=20 CMD curl -f http://127.0.0.1:15672 || exit 1 7 | -------------------------------------------------------------------------------- /rabbitmq/Dockerfile-stomp: -------------------------------------------------------------------------------- 1 | FROM rabbitmq:3.6.9-management-alpine 2 | MAINTAINER Maksim Kostromin https://github.comm/daggerok 3 | RUN rabbitmq-plugins enable rabbitmq_stomp --offline 4 | EXPOSE 61613 5 | -------------------------------------------------------------------------------- /rabbitmq/custom/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM healthcheck/rabbitmq:alpine 2 | MAINTAINER Maksim Kostromin 3 | RUN rabbitmq-plugins enable rabbitmq_management rabbitmq_stomp --offline 4 | EXPOSE 5672 15672 5 | -------------------------------------------------------------------------------- /rabbitmq/custom/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | services: 3 | rabbitmq: 4 | build: . 5 | volumes: 6 | - "rabbitmq-data:/var/lib/rabbitmq" 7 | - "rabbitmq-data:/etc/rabbitmq" 8 | ports: 9 | - "5672:5672" 10 | - "15672:15672" 11 | networks: [backing-services] 12 | volumes: 13 | rabbitmq-data: {} 14 | networks: 15 | backing-services: 16 | driver: bridge 17 | -------------------------------------------------------------------------------- /rabbitmq/docker-compose-healthcheck.yml: -------------------------------------------------------------------------------- 1 | 2 | version: "2.1" 3 | services: 4 | rabbitmq: 5 | image: healthcheck/rabbitmq:alpine 6 | volumes: 7 | - "rabbitmq-data:/var/lib/rabbitmq" 8 | - "rabbitmq-data:/etc/rabbitmq" 9 | ports: ["5672:5672"] 10 | networks: [backing-services] 11 | volumes: 12 | rabbitmq-data: {} 13 | networks: 14 | backing-services: 15 | driver: bridge 16 | -------------------------------------------------------------------------------- /rabbitmq/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | services: 3 | rabbitmq: 4 | image: rabbitmq:3.6.12-management-alpine 5 | volumes: 6 | - "rabbitmq-data:/var/lib/rabbitmq" 7 | - "rabbitmq-data:/etc/rabbitmq" 8 | ports: 9 | - "5672:5672" 10 | - "15672:15672" 11 | networks: [backing-services] 12 | volumes: 13 | rabbitmq-data: {} 14 | networks: 15 | backing-services: 16 | driver: bridge 17 | -------------------------------------------------------------------------------- /rancher/README.md: -------------------------------------------------------------------------------- 1 | # rancher 2 | ignore this playground.... 3 | 22 | run rancher 23 | 24 | 25 | ```bash 26 | docker run -it --rm -p 8080:8080 rancher/server 27 | ``` 28 | 29 | links: 30 | 31 | - [Running Docker and Rancher Locally on Dev Machines - May 2016 Online Meetup](https://www.youtube.com/watch?v=sS3gO5h88M4) 32 | - [used boot2docker v.1.10.3](https://github.com/boot2docker/boot2docker/releases/tag/v1.10.3) 33 | - [instasll runcher](https://rancher.com/docs/rancher/v1.6/en/installing-rancher/installing-server/) 34 | -------------------------------------------------------------------------------- /redis-commander/README.adoc: -------------------------------------------------------------------------------- 1 | # redis commander 2 | 3 | open: http://0.0.0.0:8082/ 4 | -------------------------------------------------------------------------------- /redis-commander/docker-compose-old.yaml: -------------------------------------------------------------------------------- 1 | ### 2 | # 3 | # This docker compose file Could be used for quick project bootstrap or development apps 4 | # uses redis server with redis-commander administration UI (https://github.com/joeferner/redis-commander). 5 | # 6 | # To get redis-commander administration tool, open: http://0.0.0.0/ 7 | # 8 | # Redis connection configuration (per app): 9 | # 10 | # server (from docker): mysql 11 | # server (from local) : 0.0.0.0 or 127.0.0.1 or localhost 12 | # application user : app-db-user 13 | # application password: app-db-password 14 | # application database: app-database 15 | # 16 | ### 17 | 18 | version: "2.1" 19 | 20 | services: 21 | 22 | redis: 23 | image: healthcheck/redis:alpine 24 | ports: ["6379:6379"] 25 | networks: [backing-services] 26 | command: " redis-server --appendonly yes " 27 | 28 | redis-ui: 29 | image: tenstartups/redis-commander 30 | ports: ["8082:8081"] 31 | depends_on: [redis] 32 | links: ["redis:redis"] 33 | networks: [backing-services] 34 | command: " --redis-host redis " 35 | 36 | networks: 37 | backing-services: 38 | driver: bridge 39 | -------------------------------------------------------------------------------- /redis-commander/docker-compose-yaml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | redis: 6 | image: healthcheck/redis:alpine 7 | ports: ["6379:6379"] 8 | networks: [backing-services] 9 | command: " redis-server --appendonly yes " 10 | 11 | redis-ui: 12 | image: rediscommander/redis-commander 13 | ports: ["8082:8081"] 14 | environment: 15 | REDIS_HOSTS: app-redis-server:redis:6379 16 | depends_on: [redis] 17 | links: ["redis:redis"] 18 | networks: [backing-services] 19 | 20 | networks: 21 | backing-services: 22 | driver: bridge 23 | -------------------------------------------------------------------------------- /redis/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | RUN apk add --update redis 3 | CMD ["redis-server"] 4 | 5 | # docker build -t my-redis . 6 | # docker run --rm --name run-my-redis my-redis 7 | # docker exec -it run-my-redis redis-cli 8 | -------------------------------------------------------------------------------- /redis/docker-compose-old.yml: -------------------------------------------------------------------------------- 1 | my-redis: 2 | container_name: my-redis 3 | image: redis 4 | ports: 5 | - "6379:6379" 6 | -------------------------------------------------------------------------------- /redis/docker-compose-redis-commander-healthcheck.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | redis: 6 | image: healthcheck/redis:alpine 7 | ports: ["6379:6379"] 8 | networks: [backing-services] 9 | volumes: ["redis-data:/data"] 10 | 11 | redis-commander-healthcheck: 12 | build: ./redis-commander-healthcheck 13 | depends_on: 14 | redis: 15 | condition: service_healthy 16 | image: 127.0.0.1:5000/redis-commander-healthcheck 17 | ports: ["8081:8081"] 18 | networks: [backing-services] 19 | command: " --redis-host redis " 20 | 21 | volumes: 22 | redis-data: {} 23 | 24 | networks: 25 | backing-services: 26 | driver: bridge 27 | -------------------------------------------------------------------------------- /redis/docker-compose-redis-ui.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | services: 3 | redis: 4 | image: healthcheck/redis:alpine 5 | ports: ["6379:6379"] 6 | networks: [backing-services] 7 | volumes: ["redis-data-volume:/data"] 8 | redis-ui: 9 | image: tenstartups/redis-commander 10 | ports: ["80:8081"] 11 | networks: [backing-services] 12 | depends_on: [redis] 13 | command: " --redis-host redis" 14 | volumes: 15 | redis-data-volume: {} 16 | networks: 17 | backing-services: 18 | driver: bridge 19 | -------------------------------------------------------------------------------- /redis/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | networks: 3 | localnet: 4 | services: 5 | redis: 6 | image: redislabs/redismod 7 | ports: ['6379:6379'] 8 | networks: [localnet] 9 | volumes: ['./target:/data'] 10 | entrypoint: > 11 | redis-server 12 | --loadmodule /usr/lib/redis/modules/redisai.so 13 | --loadmodule /usr/lib/redis/modules/redisearch.so 14 | --loadmodule /usr/lib/redis/modules/redisgraph.so 15 | --loadmodule /usr/lib/redis/modules/redistimeseries.so 16 | --loadmodule /usr/lib/redis/modules/rejson.so 17 | --loadmodule /usr/lib/redis/modules/redisbloom.so 18 | --loadmodule /usr/lib/redis/modules/redisgears.so 19 | --appendonly yes 20 | deploy: 21 | replicas: 1 22 | restart_policy: 23 | condition: on-failure 24 | -------------------------------------------------------------------------------- /redis/redis-commander-healthcheck/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tenstartups/redis-commander 2 | MAINTAINER Maksim Kostromin https://github.comm/daggerok 3 | RUN apk --no-cache add curl 4 | HEALTHCHECK --interval=30s --timeout=30s --retries=10 CMD curl -f http://127.0.0.1:8081 || exit 1 5 | -------------------------------------------------------------------------------- /redis/target/appendonly.aof: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daggerok/docker/3f07f3f89bf7ec8acfcf13ab2e40649eb3f94734/redis/target/appendonly.aof -------------------------------------------------------------------------------- /redis/target/dump.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daggerok/docker/3f07f3f89bf7ec8acfcf13ab2e40649eb3f94734/redis/target/dump.rdb -------------------------------------------------------------------------------- /redis/v3/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:8.7.0-alpine 2 | MAINTAINER Maksim Kostromin https://github.comm/daggerok 3 | RUN apk --no-cache add busybox-suid bash curl \ 4 | && addgroup app-group \ 5 | && adduser -h /home/app -s /bin/bash -D -u 1025 app app-group \ 6 | && npm install -g redis-commander 7 | EXPOSE 8081 8 | USER app 9 | WORKDIR /home/app 10 | RUN echo "{}" > ${HOME}/.redis-commander 11 | ARG REDIS_HOST="redis" 12 | ENV REDIS_HOST ${REDIS_HOST} 13 | ENTRYPOINT redis-commander --redis-host ${REDIS_HOST} 14 | HEALTHCHECK --interval=10s --timeout=10s --retries=10 CMD curl -f http://127.0.0.1:8081 || exit 1 15 | -------------------------------------------------------------------------------- /redis/v3/docker-compose-healthcheck.yml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | 3 | services: 4 | 5 | visualizer: 6 | image: dockersamples/visualizer:stable 7 | ports: ["8080:8080"] 8 | stop_grace_period: 1m30s 9 | volumes: ["/var/run/docker.sock:/var/run/docker.sock"] 10 | restart: unless-stopped 11 | deploy: 12 | placement: 13 | constraints: [node.role == manager] 14 | 15 | redis-healthcheck: 16 | image: healthcheck/redis:alpine 17 | ports: ["6379:6379"] 18 | networks: [backing-services] 19 | volumes: ["redis-healthcheck-data:/data"] 20 | restart: unless-stopped 21 | deploy: 22 | mode: global 23 | restart_policy: 24 | condition: on-failure 25 | delay: 5s 26 | window: 15s 27 | placement: 28 | constraints: [node.role == manager] 29 | 30 | redis-commander-healthcheck: 31 | image: 127.0.0.1:5000/redis-commander-healthcheck 32 | depends_on: [redis-healthcheck] 33 | build: 34 | context: . 35 | args: 36 | REDIS_HOST: redis-healthcheck 37 | ports: ["8081:8081"] 38 | networks: 39 | - backing-services 40 | - public 41 | restart: unless-stopped 42 | deploy: 43 | mode: replicated 44 | replicas: 1 45 | endpoint_mode: vip 46 | restart_policy: 47 | condition: on-failure 48 | delay: 5s 49 | window: 15s 50 | 51 | volumes: 52 | redis-healthcheck-data: 53 | 54 | networks: 55 | backing-services: 56 | public: 57 | -------------------------------------------------------------------------------- /sonarqube/docker-compose.yml: -------------------------------------------------------------------------------- 1 | my-sonarqube: 2 | container_name: my-sonarqube 3 | image: sonarqube:latest 4 | ports: 5 | - "9000:9000" 6 | - "9002:9002" 7 | -------------------------------------------------------------------------------- /spring-boot/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u151-jre-alpine3.7 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok 3 | RUN apk --no-cache --update add busybox-suid bash curl unzip sudo openssh-client shadow wget \ 4 | && adduser -h /home/appuser -s /bin/bash -D -u 1025 appuser wheel \ 5 | && echo "appuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 6 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 7 | && wget --no-cookies \ 8 | --no-check-certificate \ 9 | --header "Cookie: oraclelicense=accept-securebackup-cookie" \ 10 | "http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip" \ 11 | -O /tmp/jce_policy-8.zip \ 12 | && unzip -o /tmp/jce_policy-8.zip -d /tmp \ 13 | && mv -f ${JAVA_HOME}/lib/security ${JAVA_HOME}/lib/backup-security \ 14 | && mv -f /tmp/UnlimitedJCEPolicyJDK8 ${JAVA_HOME}/lib/security \ 15 | && apk del busybox-suid unzip openssh-client shadow wget \ 16 | && rm -rf /var/cache/apk/* /tmp/* 17 | USER appuser 18 | WORKDIR /home/appuser 19 | VOLUME /home/appuser 20 | ARG JAVA_OPTS_ARGS="\ 21 | -Djava.net.preferIPv4Stack=true \ 22 | -XX:+UnlockExperimentalVMOptions \ 23 | -XX:+UseCGroupMemoryLimitForHeap \ 24 | -XshowSettings:vm " 25 | ENV JAVA_OPTS="${JAVA_OPTS} ${JAVA_OPTS_ARGS}" 26 | ENTRYPOINT java ${JAVA_OPTS} -jar ./app.jar 27 | CMD /bin/bash 28 | EXPOSE 8080 29 | HEALTHCHECK --timeout=2s \ 30 | --retries=22 \ 31 | CMD curl -f http://127.0.0.1:8080/actuator/health || exit 1 32 | COPY --chown=appuser ./build/libs/*.jar ./app.jar 33 | -------------------------------------------------------------------------------- /spring-boot/Dockerfile-healthcheck: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u131-jre-alpine 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok 3 | RUN apk --no-cache add busybox-suid bash curl \ 4 | && addgroup app-group \ 5 | && adduser -h /home/app -s /bin/bash -D -u 1025 app app-group 6 | USER app 7 | WORKDIR /home/app 8 | VOLUME ["/home/app"] 9 | ENTRYPOINT java -XX:+UnlockExperimentalVMOptions \ 10 | -XX:+UseCGroupMemoryLimitForHeap \ 11 | -XshowSettings:vm \ 12 | -jar ./app.jar 13 | CMD /bin/bash 14 | EXPOSE 8080 15 | HEALTHCHECK --interval=1s \ 16 | --timeout=1s \ 17 | --retries=33 \ 18 | CMD curl -f http://127.0.0.1:8080/health || exit 1 19 | COPY ./build/libs/*.jar ./app.jar 20 | -------------------------------------------------------------------------------- /spring-boot/Dockerfile-no-healthcheck: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u131-jre-alpine 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok 3 | RUN apk --no-cache add busybox-suid bash \ 4 | && addgroup app-group \ 5 | && adduser -h /home/app -s /bin/bash -D -u 1025 app app-group 6 | USER app 7 | WORKDIR /home/app 8 | VOLUME ["/home/app"] 9 | ENTRYPOINT java -XX:+UnlockExperimentalVMOptions \ 10 | -XX:+UseCGroupMemoryLimitForHeap \ 11 | -XshowSettings:vm \ 12 | -jar ./app.jar 13 | CMD /bin/bash 14 | EXPOSE 8080 15 | COPY ./build/libs/*.jar ./app.jar 16 | -------------------------------------------------------------------------------- /spring-boot/build/libs/spring-config-0.0.1.jar: -------------------------------------------------------------------------------- 1 | this is sould be an output fat jar file of spring-boot application 2 | -------------------------------------------------------------------------------- /spring-boot/config-server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8u151-jre-alpine3.7 2 | MAINTAINER Maksim Kostromin https://github.com/daggerok 3 | RUN apk --no-cache --update add busybox-suid bash curl unzip sudo openssh-client shadow wget \ 4 | && adduser -h /home/appuser -s /bin/bash -D -u 1025 appuser wheel \ 5 | && echo "appuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 6 | && sed -i "s/.*requiretty$/Defaults !requiretty/" /etc/sudoers \ 7 | && wget --no-cookies \ 8 | --no-check-certificate \ 9 | --header "Cookie: oraclelicense=accept-securebackup-cookie" \ 10 | "http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip" \ 11 | -O /tmp/jce_policy-8.zip \ 12 | && unzip -o /tmp/jce_policy-8.zip -d /tmp \ 13 | && mv -f ${JAVA_HOME}/lib/security ${JAVA_HOME}/lib/backup-security \ 14 | && mv -f /tmp/UnlimitedJCEPolicyJDK8 ${JAVA_HOME}/lib/security \ 15 | && apk del busybox-suid unzip openssh-client shadow wget \ 16 | && rm -rf /var/cache/apk/* /tmp/* 17 | USER appuser 18 | WORKDIR /home/appuser 19 | VOLUME /home/appuser 20 | ARG JAVA_OPTS_ARGS="\ 21 | -Djava.net.preferIPv4Stack=true \ 22 | -XX:+UnlockExperimentalVMOptions \ 23 | -XX:+UseCGroupMemoryLimitForHeap \ 24 | -XshowSettings:vm " 25 | ENV JAVA_OPTS="${JAVA_OPTS} ${JAVA_OPTS_ARGS}" 26 | ENTRYPOINT java ${JAVA_OPTS} -jar ./app.jar 27 | CMD /bin/bash 28 | EXPOSE 8888 29 | HEALTHCHECK --timeout=2s \ 30 | --retries=22 \ 31 | CMD curl -f http://127.0.0.1:8888/health || exit 1 32 | COPY --chown=appuser ./build/libs/*.jar ./app.jar 33 | -------------------------------------------------------------------------------- /spring-boot/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | 5 | rabbitmq: 6 | restart: unless-stopped 7 | build: ../rabbitmq/custom 8 | volumes: 9 | - "rabbitmq-data:/var/lib/rabbitmq" 10 | - "rabbitmq-data:/etc/rabbitmq" 11 | ports: 12 | - "5672:5672" 13 | - "15672:15672" 14 | networks: [backing-services] 15 | 16 | config-server: 17 | restart: unless-stopped 18 | depends_on: 19 | rabbitmq: 20 | condition: service_healthy 21 | build: . 22 | environment: 23 | SPRING_RABBITMQ_HOST: rabbitmq 24 | volumes: ["config-server-data:/home/app"] 25 | ports: ["8888:8888"] 26 | networks: [backing-services] 27 | 28 | app: 29 | image: 127.0.0.1:5000/app 30 | build: 31 | context: . 32 | dockerfile: ./Dockerfile 33 | depends_on: 34 | rabbitmq: 35 | condition: service_healthy 36 | volumes: ["app-data:/home/app"] 37 | ports: ["8080:8080"] 38 | mem_limit: 512m 39 | networks: 40 | - backing-services 41 | - public 42 | healthcheck: 43 | test: ["CMD-SHELL", "curl -f http://127.0.0.1:8080/ || exit 1"] 44 | #interval: 10s 45 | timeout: 10s 46 | retries: 15 47 | 48 | volumes: 49 | rabbitmq-data: {} 50 | config-server-data: {} 51 | app-data: {} 52 | 53 | networks: 54 | backing-services: 55 | driver: bridge 56 | public: 57 | driver: bridge 58 | --------------------------------------------------------------------------------