├── .gitignore ├── scenario ├── elk │ ├── .env │ ├── logstash │ │ ├── Dockerfile │ │ ├── pipeline │ │ │ └── logstash.conf │ │ └── config │ │ │ └── logstash.yml │ ├── elasticsearch │ │ ├── Dockerfile │ │ └── config │ │ │ └── elasticsearch.yml │ ├── kibana │ │ ├── Dockerfile │ │ └── config │ │ │ └── kibana.yml │ └── docker-compose.yml ├── wordpress-with-nginx │ ├── wordpress.png │ ├── docker-compose.yml │ └── ReadMe.md ├── registry │ ├── docker-compose.yml │ ├── registry_part-2.md │ ├── registry.md │ ├── registry_part-1.md │ └── registry_part-3.md ├── portainer-docker-compose.yml ├── redis_cluster_sample │ ├── sentinel │ │ ├── sentinel-entrypoint.sh │ │ ├── sentinel.conf │ │ └── Dockerfile │ ├── docker-compose.yml │ ├── my_test.sh │ └── README.md ├── logging │ ├── promtail │ │ └── docker-config.yaml │ ├── loki │ │ └── local-config.yaml │ ├── README.md │ └── docker-compose.yml ├── web-service-nginx │ ├── docker-compose.yml │ ├── nginx.conf │ └── nginx_web.md ├── monitoring │ ├── prometheus │ │ ├── alerts │ │ │ ├── Prometheus.rules │ │ │ └── Alertmanager.rules │ │ └── prometheus.yml │ ├── alertmanager │ │ └── config.yml │ ├── README.md │ ├── docker-compose.yml │ └── grafana-dashboard │ │ └── docker-monitoring_rev1.json ├── weavescope-docker-compose.yml ├── gitlab_traefik │ ├── traefik │ │ ├── traefik │ │ │ └── config.yml │ │ └── docker-compose.yml │ ├── README.md │ └── gitlab │ │ └── docker-compose.yml ├── graylog-docker-compose.yml └── running_first_container.md ├── Dockerfile ├── flask-app │ ├── requirements.txt │ ├── Dockerfile │ ├── templates │ │ └── index.html │ └── app.py ├── static-site │ ├── Dockerfile │ └── Hello_docker.html ├── dockerfile_nginx_simple.md ├── dockerfile_perl ├── dockerfile_best_practice.md └── dockerfile_multistage.md ├── images ├── vote.png ├── apptopology.png ├── multitenant.png ├── pets-overlay.png ├── 2node-macvlan-app.png ├── bd3-architecture.png ├── multihost-bridge.png ├── servicediscovery.png ├── singlehost-bridge.png ├── docker_osx_insecure_registry.png └── docker_windows_insecure_registry.png ├── configuration ├── plugin-logging.md ├── plugin-volumes.md ├── bridge-networking.md ├── docker_daemon_config.md └── overlay-networking.md ├── README.md ├── LICENSE └── swarm ├── deploying_app_with_swarm.md └── swarm.md /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | .DS_Store 3 | *.iso -------------------------------------------------------------------------------- /scenario/elk/.env: -------------------------------------------------------------------------------- 1 | ELK_VERSION=7.10.1 2 | -------------------------------------------------------------------------------- /Dockerfile/flask-app/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.0 2 | -------------------------------------------------------------------------------- /images/vote.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/vote.png -------------------------------------------------------------------------------- /images/apptopology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/apptopology.png -------------------------------------------------------------------------------- /images/multitenant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/multitenant.png -------------------------------------------------------------------------------- /images/pets-overlay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/pets-overlay.png -------------------------------------------------------------------------------- /images/2node-macvlan-app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/2node-macvlan-app.png -------------------------------------------------------------------------------- /images/bd3-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/bd3-architecture.png -------------------------------------------------------------------------------- /images/multihost-bridge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/multihost-bridge.png -------------------------------------------------------------------------------- /images/servicediscovery.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/servicediscovery.png -------------------------------------------------------------------------------- /images/singlehost-bridge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/singlehost-bridge.png -------------------------------------------------------------------------------- /images/docker_osx_insecure_registry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/docker_osx_insecure_registry.png -------------------------------------------------------------------------------- /images/docker_windows_insecure_registry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/images/docker_windows_insecure_registry.png -------------------------------------------------------------------------------- /scenario/wordpress-with-nginx/wordpress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AhmadRafiee/Docker_training_with_DockerMe/HEAD/scenario/wordpress-with-nginx/wordpress.png -------------------------------------------------------------------------------- /Dockerfile/static-site/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx 2 | ENV AUTHOR=Docker 3 | 4 | WORKDIR /usr/share/nginx/html 5 | COPY Hello_docker.html /usr/share/nginx/html 6 | 7 | CMD cd /usr/share/nginx/html && sed -e s/Docker/"$AUTHOR"/ Hello_docker.html > index.html ; nginx -g 'daemon off;' 8 | 9 | -------------------------------------------------------------------------------- /scenario/elk/logstash/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ELK_VERSION 2 | 3 | # https://github.com/elastic/logstash-docker 4 | #FROM docker.elastic.co/logstash/logstash:${ELK_VERSION} 5 | FROM logstash:${ELK_VERSION} 6 | 7 | # Add your logstash plugins setup here 8 | # Example: RUN logstash-plugin install logstash-filter-json 9 | -------------------------------------------------------------------------------- /scenario/elk/logstash/pipeline/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | tcp { 3 | port => 5000 4 | } 5 | } 6 | 7 | ## Add your filters / logstash plugins configuration here 8 | 9 | output { 10 | elasticsearch { 11 | hosts => "elasticsearch:9200" 12 | user => elastic 13 | password => changeme 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /scenario/registry/docker-compose.yml: -------------------------------------------------------------------------------- 1 | nginx: 2 | image: "nginx:alpine" 3 | ports: 4 | - 443:443 5 | - 80:80 6 | links: 7 | - registry:registry 8 | volumes: 9 | - ./auth:/etc/nginx/conf.d 10 | - ./nginx.conf:/etc/nginx/nginx.conf:ro 11 | 12 | registry: 13 | image: registry:2 14 | volumes: 15 | - ./data:/var/lib/registry 16 | -------------------------------------------------------------------------------- /scenario/portainer-docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | volumes: 4 | portainer_data: 5 | 6 | services: 7 | portainer: 8 | image: portainer/portainer 9 | command: -H unix:///var/run/docker.sock 10 | restart: always 11 | ports: 12 | - 9000:9000 13 | volumes: 14 | - /var/run/docker.sock:/var/run/docker.sock 15 | - portainer_data:/data -------------------------------------------------------------------------------- /scenario/redis_cluster_sample/sentinel/sentinel-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | sed -i "s/\$SENTINEL_QUORUM/$SENTINEL_QUORUM/g" /etc/redis/sentinel.conf 4 | sed -i "s/\$SENTINEL_DOWN_AFTER/$SENTINEL_DOWN_AFTER/g" /etc/redis/sentinel.conf 5 | sed -i "s/\$SENTINEL_FAILOVER/$SENTINEL_FAILOVER/g" /etc/redis/sentinel.conf 6 | 7 | exec docker-entrypoint.sh redis-server /etc/redis/sentinel.conf --sentinel -------------------------------------------------------------------------------- /scenario/elk/elasticsearch/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ELK_VERSION 2 | 3 | # https://github.com/elastic/elasticsearch-docker 4 | #FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} 5 | FROM elasticsearch:${ELK_VERSION} 6 | 7 | # Add your elasticsearch plugins setup here 8 | # Example: RUN elasticsearch-plugin install analysis-icu 9 | #RUN bin/elasticsearch-plugin install -b https://store.kcdn.ir/elk/prometheus-exporter-7.1.0.0.zip 10 | -------------------------------------------------------------------------------- /scenario/elk/kibana/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ELK_VERSION 2 | 3 | # https://github.com/elastic/kibana-docker 4 | #FROM docker.elastic.co/kibana/kibana:${ELK_VERSION} 5 | FROM kibana:${ELK_VERSION} 6 | 7 | # Add your kibana plugins setup here 8 | # Example: RUN kibana-plugin install 9 | # 10 | #RUN bin/kibana-plugin install https://github.com/pjhampton/kibana-prometheus-exporter/releases/download/7.1.0/kibana-prometheus-exporter-7.1.0.zip 11 | -------------------------------------------------------------------------------- /scenario/logging/promtail/docker-config.yaml: -------------------------------------------------------------------------------- 1 | server: 2 | http_listen_port: 9080 3 | grpc_listen_port: 0 4 | 5 | positions: 6 | filename: /tmp/positions.yaml 7 | 8 | clients: 9 | - url: http://loki:3100/api/prom/push 10 | 11 | scrape_configs: 12 | - job_name: system 13 | static_configs: 14 | - targets: 15 | - localhost 16 | labels: 17 | job: varlogs 18 | host: monitoring 19 | __path__: /var/log/*.log 20 | -------------------------------------------------------------------------------- /scenario/redis_cluster_sample/sentinel/sentinel.conf: -------------------------------------------------------------------------------- 1 | # Example sentinel.conf can be downloaded from http://download.redis.io/redis-stable/sentinel.conf 2 | 3 | port 26379 4 | 5 | dir /tmp 6 | 7 | sentinel monitor mymaster redis-master 6379 $SENTINEL_QUORUM 8 | 9 | sentinel down-after-milliseconds mymaster $SENTINEL_DOWN_AFTER 10 | 11 | sentinel parallel-syncs mymaster 1 12 | 13 | sentinel failover-timeout mymaster $SENTINEL_FAILOVER 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /scenario/elk/logstash/config/logstash.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Default Logstash configuration from logstash-docker. 3 | ## from https://github.com/elastic/logstash-docker/blob/master/build/logstash/config/logstash-full.yml 4 | # 5 | http.host: "0.0.0.0" 6 | 7 | ## X-Pack security credentials 8 | xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] 9 | xpack.monitoring.elasticsearch.username: elastic 10 | xpack.monitoring.elasticsearch.password: changeme 11 | -------------------------------------------------------------------------------- /scenario/redis_cluster_sample/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | master: 4 | image: redis:3 5 | slave: 6 | image: redis:3 7 | command: redis-server --slaveof redis-master 6379 8 | links: 9 | - master:redis-master 10 | sentinel: 11 | build: sentinel 12 | environment: 13 | - SENTINEL_DOWN_AFTER=5000 14 | - SENTINEL_FAILOVER=5000 15 | links: 16 | - master:redis-master 17 | - slave 18 | -------------------------------------------------------------------------------- /scenario/redis_cluster_sample/sentinel/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis:3 2 | 3 | MAINTAINER Li Yi 4 | 5 | EXPOSE 26379 6 | ADD sentinel.conf /etc/redis/sentinel.conf 7 | RUN chown redis:redis /etc/redis/sentinel.conf 8 | ENV SENTINEL_QUORUM 2 9 | ENV SENTINEL_DOWN_AFTER 30000 10 | ENV SENTINEL_FAILOVER 180000 11 | COPY sentinel-entrypoint.sh /usr/local/bin/ 12 | RUN chmod +x /usr/local/bin/sentinel-entrypoint.sh 13 | ENTRYPOINT ["sentinel-entrypoint.sh"] 14 | -------------------------------------------------------------------------------- /scenario/elk/kibana/config/kibana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Default Kibana configuration from kibana-docker. 3 | ## https://github.com/elastic/kibana-docker/blob/master/.tedi/template/kibana.yml.j2 4 | # 5 | server.name: kibana 6 | server.host: "0" 7 | elasticsearch.hosts: [ "http://elasticsearch:9200" ] 8 | xpack.monitoring.ui.container.elasticsearch.enabled: true 9 | 10 | ## X-Pack security credentials 11 | # 12 | elasticsearch.username: elastic 13 | elasticsearch.password: changeme 14 | -------------------------------------------------------------------------------- /scenario/web-service-nginx/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.4' 3 | 4 | networks: 5 | http_net: 6 | external: true 7 | web_net: 8 | external: false 9 | 10 | services: 11 | web: 12 | image: nginx:alpine 13 | restart: on-failure 14 | container_name: web 15 | ports: 16 | - 443:443 17 | - 80:80 18 | volumes: 19 | - ./certs/:/etc/nginx/certs 20 | - ./conf.d/:/etc/nginx/conf.d 21 | networks: 22 | - http_net 23 | - web_net -------------------------------------------------------------------------------- /scenario/monitoring/prometheus/alerts/Prometheus.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: Prometheus_Alerts 3 | rules: 4 | 5 | - alert: PrometheusConfigurationReload 6 | expr: prometheus_config_last_reload_successful != 1 7 | for: 2m 8 | labels: 9 | severity: error 10 | annotations: 11 | summary: "Prometheus configuration reload (instance {{ $labels.instance }})" 12 | description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 13 | -------------------------------------------------------------------------------- /scenario/monitoring/prometheus/alerts/Alertmanager.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: Alertmanager_Alerts 3 | rules: 4 | 5 | - alert: AlertmanagerConfigurationReload 6 | expr: alertmanager_config_last_reload_successful != 1 7 | for: 5m 8 | labels: 9 | severity: page 10 | annotations: 11 | summary: "AlertManager configuration reload (instance {{ $labels.instance }})" 12 | description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 13 | -------------------------------------------------------------------------------- /scenario/weavescope-docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | scope: 4 | image: weaveworks/scope:latest 5 | pid: "host" 6 | privileged: true 7 | container_name: weavescope 8 | restart: on-failure 9 | labels: 10 | - "works.weave.role=system" 11 | volumes: 12 | - "/var/run/docker.sock:/var/run/docker.sock:rw" 13 | command: 14 | - "--probe.docker=true" 15 | networks: 16 | - scope_net 17 | - http_net 18 | ports: 19 | - 4040:4040 20 | 21 | networks: 22 | scope_net: 23 | external: false 24 | http_net: 25 | external: true 26 | -------------------------------------------------------------------------------- /Dockerfile/flask-app/Dockerfile: -------------------------------------------------------------------------------- 1 | # our base image 2 | FROM alpine:3.5 3 | 4 | # Install python and pip 5 | RUN apk add --update py2-pip 6 | 7 | # upgrade pip 8 | RUN pip install --upgrade pip 9 | 10 | # install Python modules needed by the Python app 11 | COPY requirements.txt /usr/src/app/ 12 | RUN pip install --no-cache-dir -r /usr/src/app/requirements.txt 13 | 14 | # copy files required for the app to run 15 | COPY app.py /usr/src/app/ 16 | COPY templates/index.html /usr/src/app/templates/ 17 | 18 | # tell the port number the container should expose 19 | EXPOSE 5000 20 | 21 | # run the application 22 | CMD ["python", "/usr/src/app/app.py"] 23 | -------------------------------------------------------------------------------- /scenario/monitoring/alertmanager/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | # The smarthost and SMTP sender used for mail notifications. 3 | smtp_smarthost: 'smtp.SENDER-SITE:587' 4 | smtp_from: 'EMAIL-MAILBOX-FOR-SENDER' 5 | smtp_auth_username: 'EMAIL-MAILBOX-FOR-SENDER' 6 | smtp_auth_password: 'PASSWORD-SENDER-MAILBOX' 7 | smtp_auth_identity: 'USER-SENDER-MAILBOX' 8 | 9 | route: 10 | group_by: ['instance', 'severity'] 11 | group_wait: 30s 12 | group_interval: 30s 13 | repeat_interval: 30m 14 | receiver: 'stage' 15 | 16 | receivers: 17 | - name: 'stage' 18 | email_configs: 19 | - send_resolved: true 20 | to: 'EMAIL-MAILBOX-FOR-RECEIVER' 21 | -------------------------------------------------------------------------------- /scenario/logging/loki/local-config.yaml: -------------------------------------------------------------------------------- 1 | auth_enabled: false 2 | 3 | server: 4 | http_listen_port: 3100 5 | 6 | ingester: 7 | lifecycler: 8 | address: 127.0.0.1 9 | ring: 10 | kvstore: 11 | store: inmemory 12 | replication_factor: 1 13 | chunk_idle_period: 15m 14 | 15 | schema_config: 16 | configs: 17 | - from: 2019-01-01 18 | store: boltdb 19 | object_store: filesystem 20 | schema: v9 21 | index: 22 | prefix: index_ 23 | period: 168h 24 | 25 | storage_config: 26 | boltdb: 27 | directory: ./loki/index 28 | 29 | filesystem: 30 | directory: ./loki/chunks 31 | 32 | limits_config: 33 | enforce_metric_name: false 34 | -------------------------------------------------------------------------------- /Dockerfile/dockerfile_nginx_simple.md: -------------------------------------------------------------------------------- 1 | # Dockerfile sample 2 | ### Simple nginx dockerfile 3 | ```bash 4 | FROM ubuntu:latest 5 | LABEL maintainer="Ahmad Rafiee " 6 | RUN apt update \ 7 | && apt install -y vim nginx 8 | 9 | # forward request and error logs to docker log collector 10 | RUN ln -sf /dev/stdout /var/log/nginx/access.log \ 11 | && ln -sf /dev/stderr /var/log/nginx/error.log 12 | 13 | EXPOSE 80 14 | CMD ["nginx", "-g", "daemon off;"] 15 | ``` 16 | ### Build this dockerfile 17 | ```bash 18 | docker build -t -f . 19 | # For example 20 | docker build -t nginx:test . 21 | ``` 22 | ### Run nginx image 23 | ```bash 24 | docker run -d --name web -p 80:80 nginx:test 25 | ``` -------------------------------------------------------------------------------- /scenario/elk/elasticsearch/config/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Default Elasticsearch configuration from elasticsearch-docker. 3 | ## from https://github.com/elastic/elasticsearch-docker/blob/master/.tedi/template/elasticsearch.yml 4 | # 5 | cluster.name: "docker-cluster" 6 | network.host: 0.0.0.0 7 | 8 | ## Use single node discovery in order to disable production mode and avoid bootstrap checks 9 | ## see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html 10 | # 11 | discovery.type: single-node 12 | 13 | ## X-Pack settings 14 | ## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html 15 | xpack.license.self_generated.type: trial 16 | xpack.security.enabled: true 17 | xpack.monitoring.collection.enabled: true 18 | -------------------------------------------------------------------------------- /Dockerfile/flask-app/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 19 | 20 | 21 |
22 |

Cat Gif of the day

23 | 24 |

Courtesy: Buzzfeed

25 |
26 | 27 | 28 | -------------------------------------------------------------------------------- /scenario/web-service-nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | 2 | user nginx; 3 | worker_processes auto; 4 | 5 | error_log /var/log/nginx/error.log notice; 6 | pid /var/run/nginx.pid; 7 | 8 | 9 | events { 10 | worker_connections 1024; 11 | } 12 | 13 | 14 | http { 15 | include /etc/nginx/mime.types; 16 | default_type application/octet-stream; 17 | 18 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 19 | '$status $body_bytes_sent "$http_referer" ' 20 | '"$http_user_agent" "$http_x_forwarded_for"'; 21 | 22 | access_log /var/log/nginx/access.log main; 23 | 24 | sendfile on; 25 | #tcp_nopush on; 26 | 27 | keepalive_timeout 65; 28 | 29 | #gzip on; 30 | 31 | include /etc/nginx/conf.d/*.conf; 32 | } 33 | -------------------------------------------------------------------------------- /scenario/gitlab_traefik/traefik/traefik/config.yml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | security: 4 | headers: 5 | frameDeny: true 6 | contentTypeNosniff: true 7 | browserXssFilter: true 8 | hsts: 9 | headers: 10 | stsSeconds: 31536000 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | forceSTSHeader: true 14 | tls: 15 | options: 16 | default: 17 | minVersion: VersionTLS12 18 | maxVersion: VersionTLS13 19 | cipherSuites: 20 | - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 21 | - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 22 | - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 23 | - TLS_AES_256_GCM_SHA384 24 | - TLS_CHACHA20_POLY1305_SHA256 25 | 26 | certificates: 27 | - certFile: /traefik/certs/cert.pem 28 | keyFile: /traefik/certs/key.pem -------------------------------------------------------------------------------- /scenario/monitoring/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 30s 3 | evaluation_interval: 30s 4 | 5 | # Attach these labels to any time series or alerts when communicating with 6 | # external systems (federation, remote storage, Alertmanager). 7 | external_labels: 8 | monitor: 'prom' 9 | 10 | # Load and evaluate rules in this file every 'evaluation_interval' seconds. 11 | rule_files: 12 | - 'alerts/*.rules' 13 | 14 | # alert 15 | alerting: 16 | alertmanagers: 17 | - scheme: http 18 | static_configs: 19 | - targets: 20 | - "alertmanager:9093" 21 | 22 | scrape_configs: 23 | - job_name: 'prometheus' 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | 27 | - job_name: 'alertmanager' 28 | static_configs: 29 | - targets: ['alertmanager:9093'] 30 | 31 | - job_name: 'grafana' 32 | static_configs: 33 | - targets: ['grafana:3000'] 34 | 35 | - job_name: 'cadvisor' 36 | static_configs: 37 | - targets: ['cadvisor:8080'] 38 | 39 | - job_name: 'node-exporter' 40 | static_configs: 41 | - targets: ['node-exporter:9100'] 42 | -------------------------------------------------------------------------------- /configuration/plugin-logging.md: -------------------------------------------------------------------------------- 1 | # Docker plugin - logging 2 | ## Plugin Installation​ 3 | ```bash 4 | # install plugin 5 | docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions​ 6 | 7 | # check 8 | docker plugin ls​ 9 | ``` 10 | 11 | ## Configure the default logging driver​ 12 | ```bash 13 | # check daemon logging before configuration 14 | docker info | grep "Logging Driver" 15 | 16 | # configure 17 | vim /etc/systemd/system/docker.service.d/override.conf 18 | [Service]​ 19 | ExecStart=​ 20 | ExecStart=/usr/bin/dockerd --log-driver loki --log-opt loki-url="https://:@/api/prom/push" --log-opt loki-batch-size=400 ​ 21 | 22 | # reload systemd and restart docker 23 | systemctl daemon-reload 24 | systemctl restart docker 25 | systemctl status docker 26 | 27 | # check daemon logging after configuration 28 | docker info | grep "Logging Driver" 29 | ``` 30 | ## Configure the logging driver for a container​ 31 | ```bash 32 | docker run --log-driver=loki \​ 33 | --log-opt loki-url="https://:@/api/prom/push" \​ 34 | --log-opt loki-batch-size=400 \​ 35 | grafana/grafana​ 36 | ``` -------------------------------------------------------------------------------- /scenario/logging/README.md: -------------------------------------------------------------------------------- 1 | # Monitoring and Alerting with Docker 2 | 3 | **Logging Stack:** Loki, Promtail and Grafana 4 | 5 | **Web Interface:** Traefik 6 | 7 | ## Requirement before running compose file 8 | 1. Hardening OS 9 | 10 | 2. Install docker 11 | 12 | 3. Install docker-compose 13 | 14 | 4. Change and complate config files 15 | 16 | 17 | ## Installation 18 | 19 | **Step1**: chnage service config files: 20 | ```bash 21 | tree logging 22 | . 23 | |-- README.md 24 | |-- docker-compose.yml 25 | |-- loki 26 | | `-- local-config.yaml 27 | `-- promtail 28 | `-- docker-config.yaml 29 | 30 | 2 directories, 4 files 31 | ``` 32 | 33 | **Step2:** chnage **DOMAIN** on docker-comose file with your domain. 34 | 35 | **Step3:** change promtail config 36 | 37 | **Step4:** check compose file and Run all services 38 | 39 | ```bash 40 | docker-compose config 41 | docker-compose up -d 42 | ``` 43 | 44 | **Step5:** Check compose services and view all services logs 45 | ```bash 46 | docker-compose ps 47 | docker-compose logs -f --tail 100 48 | ``` 49 | 50 | **Step6:** check and visit your domain service: 51 | 52 | 1. loki.DOMAIN: loki web interface 53 | 54 | 2. web.DOMAIN: traefik2 dashboard 55 | 56 | 3. grafana.DOMAIN: grafana dashboard 57 | 58 | **Step7:** config grafana service for view all log on Explore menu 59 | 60 | ## License 61 | [DockerMe.ir](https://dockerme.ir) -------------------------------------------------------------------------------- /scenario/wordpress-with-nginx/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.8' 3 | 4 | networks: 5 | wp_net: 6 | name: wp_net 7 | driver_opts: 8 | com.docker.network.bridge.name: wp_net 9 | 10 | volumes: 11 | wp_db: 12 | name: wp_db 13 | wp_wp: 14 | name: wp_wp 15 | 16 | services: 17 | db: 18 | image: mysql:5.7 19 | container_name: mysql 20 | volumes: 21 | - wp_db:/var/lib/mysql 22 | restart: always 23 | environment: 24 | MYSQL_ROOT_PASSWORD: sdfascsdvsfdvweliuoiquowecefcwaefef 25 | MYSQL_DATABASE: DockerMe 26 | MYSQL_USER: DockerMe 27 | MYSQL_PASSWORD: sdfascsdvsfdvweliuoiquowecefcwaefef 28 | networks: 29 | - wp_net 30 | 31 | wordpress: 32 | image: wordpress:latest 33 | container_name: wordpress 34 | volumes: 35 | - wp_wp:/var/www/html/ 36 | depends_on: 37 | - db 38 | restart: always 39 | environment: 40 | WORDPRESS_DB_HOST: db:3306 41 | WORDPRESS_DB_USER: DockerMe 42 | WORDPRESS_DB_NAME: DockerMe 43 | WORDPRESS_DB_PASSWORD: sdfascsdvsfdvweliuoiquowecefcwaefef 44 | ports: 45 | - 8000:80 46 | networks: 47 | - wp_net 48 | 49 | nginx: 50 | image: nginx:alpine 51 | container_name: nginx 52 | restart: always 53 | depends_on: 54 | - wordpress 55 | volumes: 56 | - ./nginx/conf.d:/etc/nginx/conf.d 57 | - ./nginx/certs:/etc/nginx/certs 58 | ports: 59 | - 80:80 60 | - 443:443 61 | networks: 62 | - wp_net -------------------------------------------------------------------------------- /Dockerfile/dockerfile_perl: -------------------------------------------------------------------------------- 1 | FROM buildpack-deps:buster 2 | LABEL maintainer="Peter Martini , Zak B. Elep " 3 | 4 | COPY *.patch /usr/src/perl/ 5 | WORKDIR /usr/src/perl 6 | 7 | RUN true \ 8 | && curl -SL https://www.cpan.org/src/5.0/perl-5.34.0.tar.xz -o perl-5.34.0.tar.xz \ 9 | && echo '82c2e5e5c71b0e10487a80d79140469ab1f8056349ca8545140a224dbbed7ded *perl-5.34.0.tar.xz' | sha256sum -c - \ 10 | && tar --strip-components=1 -xaf perl-5.34.0.tar.xz -C /usr/src/perl \ 11 | && rm perl-5.34.0.tar.xz \ 12 | && cat *.patch | patch -p1 \ 13 | && gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)" \ 14 | && archBits="$(dpkg-architecture --query DEB_BUILD_ARCH_BITS)" \ 15 | && archFlag="$([ "$archBits" = '64' ] && echo '-Duse64bitall' || echo '-Duse64bitint')" \ 16 | && ./Configure -Darchname="$gnuArch" "$archFlag" -Duseshrplib -Dvendorprefix=/usr/local -des \ 17 | && make -j$(nproc) \ 18 | && TEST_JOBS=$(nproc) make test_harness \ 19 | && make install \ 20 | && cd /usr/src \ 21 | && curl -LO https://www.cpan.org/authors/id/M/MI/MIYAGAWA/App-cpanminus-1.7044.tar.gz \ 22 | && echo '9b60767fe40752ef7a9d3f13f19060a63389a5c23acc3e9827e19b75500f81f3 *App-cpanminus-1.7044.tar.gz' | sha256sum -c - \ 23 | && tar -xzf App-cpanminus-1.7044.tar.gz && cd App-cpanminus-1.7044 && perl bin/cpanm . && cd /root \ 24 | && true \ 25 | && rm -fr ./cpanm /root/.cpanm /usr/src/perl /usr/src/App-cpanminus-1.7044* /tmp/* 26 | 27 | WORKDIR / 28 | 29 | CMD ["perl5.34.0","-de0"] 30 | -------------------------------------------------------------------------------- /scenario/monitoring/README.md: -------------------------------------------------------------------------------- 1 | # Monitoring and Alerting with Docker 2 | 3 | **Monitoring Stack:** Prometheus, Exporters and Grafana 4 | 5 | **Alerting:** Alertmanager 6 | 7 | **Web Interface:** Traefik 8 | 9 | ## Requirement before running compose file 10 | 1. Hardening OS 11 | 12 | 2. Install docker 13 | 14 | 3. Install docker-compose 15 | 16 | 4. Change and complate config files 17 | 18 | 19 | ## Installation 20 | 21 | **Step1**: chnage service config files: 22 | ```bash 23 | tree monitoring 24 | . 25 | |-- README.md 26 | |-- alertmanager 27 | | `-- config.yml 28 | |-- docker-compose.yml 29 | |-- grafana-dashboard 30 | | |-- docker-monitoring_rev1.json 31 | | `-- node-exporter-full_rev16.json 32 | `-- prometheus 33 | |-- alerts 34 | | |-- Alertmanager.rules 35 | | `-- Prometheus.rules 36 | `-- prometheus.yml 37 | 38 | 4 directories, 8 files 39 | ``` 40 | 41 | **Step2:** chnage **DOMAIN** on docker-comose file with your domain. 42 | 43 | **Step3:** change alertmanager email notification config 44 | 45 | **Step4:**check compose file and Run all services 46 | ```bash 47 | docker-compose config 48 | docker-compose up -d 49 | ``` 50 | 51 | **Step5:** Check compose services and view all services logs 52 | ```bash 53 | docker-compose ps 54 | docker-compose logs -f --tail 100 55 | ``` 56 | 57 | **Step6:** check and visit your domain service: 58 | 59 | 1. prometheus.DOMAIN: prometheus dashboard 60 | 61 | 2. web.DOMAIN: traefik2 dashboard 62 | 63 | 3. alert.DOMAIN: alertmanager dashboard 64 | 65 | 4. grafana.DOMAIN: grafana dashboard 66 | 67 | **Step7:** config grafana service for view all metric on visualize dashboard 68 | 69 | ## License 70 | [DockerMe.ir](https://dockerme.ir) -------------------------------------------------------------------------------- /Dockerfile/flask-app/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template 2 | import random 3 | 4 | app = Flask(__name__) 5 | 6 | # list of cat images 7 | images = [ 8 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr05/15/9/anigif_enhanced-buzz-26388-1381844103-11.gif", 9 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr01/15/9/anigif_enhanced-buzz-31540-1381844535-8.gif", 10 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr05/15/9/anigif_enhanced-buzz-26390-1381844163-18.gif", 11 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr06/15/10/anigif_enhanced-buzz-1376-1381846217-0.gif", 12 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr03/15/9/anigif_enhanced-buzz-3391-1381844336-26.gif", 13 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr06/15/10/anigif_enhanced-buzz-29111-1381845968-0.gif", 14 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr03/15/9/anigif_enhanced-buzz-3409-1381844582-13.gif", 15 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr02/15/9/anigif_enhanced-buzz-19667-1381844937-10.gif", 16 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr05/15/9/anigif_enhanced-buzz-26358-1381845043-13.gif", 17 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr06/15/9/anigif_enhanced-buzz-18774-1381844645-6.gif", 18 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr06/15/9/anigif_enhanced-buzz-25158-1381844793-0.gif", 19 | "http://ak-hdl.buzzfed.com/static/2013-10/enhanced/webdr03/15/10/anigif_enhanced-buzz-11980-1381846269-1.gif" 20 | ] 21 | 22 | @app.route('/') 23 | def index(): 24 | url = random.choice(images) 25 | return render_template('index.html', url=url) 26 | 27 | if __name__ == "__main__": 28 | app.run(host="0.0.0.0") 29 | -------------------------------------------------------------------------------- /scenario/graylog-docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | volumes: 4 | mongo_data: 5 | name: mongo_data 6 | elasticsearch_data: 7 | name: elasticsearch_data 8 | 9 | networks: 10 | graylog: 11 | driver: bridge 12 | 13 | services: 14 | mongo: 15 | image: mongo:4.2 16 | restart: always 17 | container_name: mongo 18 | volumes: 19 | - mongo_data:/data/db 20 | networks: 21 | - graylog 22 | 23 | elasticsearch: 24 | image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2 25 | restart: always 26 | container_name: elasticsearch 27 | environment: 28 | - http.host=0.0.0.0 29 | - transport.host=localhost 30 | - network.host=0.0.0.0 31 | - "ES_JAVA_OPTS=-Xms512m -Xmx512m" 32 | volumes: 33 | - elasticsearch_data:/usr/share/elasticsearch/data 34 | deploy: 35 | resources: 36 | limits: 37 | memory: 1g 38 | ulimits: 39 | memlock: 40 | soft: -1 41 | hard: -1 42 | mem_limit: 1g 43 | networks: 44 | - graylog 45 | 46 | graylog: 47 | image: graylog/graylog:4.0 48 | container_name: graylog 49 | environment: 50 | # CHANGE ME (must be at least 16 characters)! 51 | - GRAYLOG_PASSWORD_SECRET=somepasswordpepper 52 | # Password: admin 53 | - GRAYLOG_ROOT_PASSWORD_SHA2=8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918 54 | - GRAYLOG_HTTP_EXTERNAL_URI=http://127.0.0.1:9000/ 55 | entrypoint: /usr/bin/tini -- wait-for-it elasticsearch:9200 -- /docker-entrypoint.sh 56 | networks: 57 | - graylog 58 | restart: always 59 | depends_on: 60 | - mongo 61 | - elasticsearch 62 | ports: 63 | # Graylog web interface and REST API 64 | - 9000:9000 65 | # Syslog TCP 66 | - 1514:1514 67 | # Syslog UDP 68 | - 1514:1514/udp 69 | # GELF TCP 70 | - 12201:12201 71 | # GELF UDP 72 | - 12201:12201/udp 73 | -------------------------------------------------------------------------------- /scenario/elk/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | volumes: 4 | es_data: 5 | name: es_data 6 | 7 | networks: 8 | http_network: 9 | external: true 10 | elk_network: 11 | external: false 12 | 13 | services: 14 | elasticsearch: 15 | build: 16 | context: elasticsearch/ 17 | args: 18 | ELK_VERSION: $ELK_VERSION 19 | volumes: 20 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro 21 | - es_data:/usr/share/elasticsearch/data 22 | container_name: elasticsearch 23 | restart: on-failure 24 | ports: 25 | - "9200:9200" 26 | environment: 27 | ES_JAVA_OPTS: "-Xmx1g -Xms1g" 28 | ELASTIC_PASSWORD: changeme 29 | cluster.name: es-cluster 30 | bootstrap.memory_lock: "true" 31 | http.cors.enabled: "true" 32 | http.cors.allow-origin: "*" 33 | ulimits: 34 | memlock: 35 | soft: -1 36 | hard: -1 37 | networks: 38 | - elk_network 39 | 40 | logstash: 41 | build: 42 | context: logstash/ 43 | args: 44 | ELK_VERSION: $ELK_VERSION 45 | volumes: 46 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro 47 | - ./logstash/pipeline:/usr/share/logstash/pipeline:ro 48 | ports: 49 | - "5000:5000" 50 | # - "9600:9600" 51 | container_name: logstash 52 | restart: on-failure 53 | environment: 54 | LS_JAVA_OPTS: "-Xmx1g -Xms1g" 55 | ELASTICSEARCH_HOSTS: http://elasticsearch_node1:9200 56 | networks: 57 | - http_network 58 | - elk_network 59 | depends_on: 60 | - elasticsearch 61 | 62 | kibana: 63 | build: 64 | context: kibana/ 65 | args: 66 | ELK_VERSION: $ELK_VERSION 67 | volumes: 68 | - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro 69 | ports: 70 | - "5601:5601" 71 | container_name: kibana 72 | restart: on-failure 73 | environment: 74 | ELASTICSEARCH_PASSWORD: changeme 75 | networks: 76 | - http_network 77 | - elk_network 78 | depends_on: 79 | - elasticsearch -------------------------------------------------------------------------------- /configuration/plugin-volumes.md: -------------------------------------------------------------------------------- 1 | # Docker plugin - Volumes 2 | 3 | ## Blockbridge installation with docker 4 | ```bash 5 | curl -sSL https://get.blockbridge.com/container | sh 6 | 7 | # sample output 8 | ================================================================================= 9 | Blockbridge Storage Container 4.3.0-5544.1 (7bad60f1-2479-4939-b457-271165daa4eb) 10 | Mode: converged 11 | 12 | Generated Credentials (may not reflect current system state) 13 | 14 | System admin API token: 1/1RGEpVLjN6AqQu1gtPWDzAabQWncs2S1ROCxhekvzvzFEYSlUuM24Q 15 | System admin username: system 16 | System admin password: 263cfc0940d06a219bdac593abef87c0 17 | Default user username: default 18 | Default user password: b8570b319c0a858294acda49a444000a 19 | Volume plugin API token: 1/z0D6Yj2w8x0T+8JRaxThCIQYabWEoz/mPPNs061SWnrvQPpiPbDyXA 20 | ================================================================================= 21 | ``` 22 | 23 | ## Plugin Installation​ 24 | ```bash 25 | # install plugin 26 | docker plugin install --alias block blockbridge/volume-plugin BLOCKBRIDGE_API_HOST="YOUR HOST" BLOCKBRIDGE_API_KEY="YOUR KEY" 27 | # for example 28 | docker plugin install --alias block blockbridge/volume-plugin BLOCKBRIDGE_API_HOST="172.16.10.153" BLOCKBRIDGE_API_KEY="1/1RGEpVLjN6AqQu1gtPWDzAabQWncs2S1ROCxhekvzvzFEYSlUuM24Q" 29 | 30 | # check 31 | docker plugin ls​ 32 | ``` 33 | ## Create volume with blockbridge driver 34 | ```bash 35 | # create volume 36 | docker volume create --name default --driver block default 37 | 38 | # cehck 39 | docker volume ls -f driver=block 40 | ``` 41 | 42 | ## Create volume with capacity option and blockbridge driver 43 | ```bash 44 | # create volume 45 | docker volume create --name custom --driver block --opt capacity=4GiB 46 | 47 | # cehck 48 | docker volume ls -f driver=block 49 | ``` 50 | 51 | ## Create container with volume on blockbridge storage 52 | ```bash 53 | # create container 54 | docker run --name test --volume-driver block -v implicit:/data -d -it busybox sh 55 | 56 | # check 57 | docker ps 58 | ``` 59 | 60 | ## Refrence 61 | - https://www.blockbridge.com/container/ 62 | - https://docs.docker.com/engine/extend/legacy_plugins/ -------------------------------------------------------------------------------- /Dockerfile/dockerfile_best_practice.md: -------------------------------------------------------------------------------- 1 | # [dockerfile best-practices](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) 2 | ## Pipe Dockerfile through `stdin` 3 | Docker has the ability to build images by piping Dockerfile through stdin with a local or remote build context. Piping a Dockerfile through stdin can be useful to perform one-off builds without writing a Dockerfile to disk, or in situations where the Dockerfile is generated, and should not persist afterwards. 4 | 5 | ```bash 6 | echo -e 'FROM busybox\nRUN echo "hello world"' | docker build - 7 | ``` 8 | ```bash 9 | docker build -< 2 | version: '3.4' 3 | services: 4 | traefik: 5 | image: traefik:latest 6 | container_name: traefik 7 | command: 8 | - "--api=true" 9 | - "--api.insecure=true" 10 | - "--providers.docker.endpoint=unix:///var/run/docker.sock" 11 | - "--providers.docker.exposedbydefault=false" 12 | - "--providers.docker.network=http_network" 13 | - "--entrypoints.http.address=:80" 14 | labels: 15 | - "traefik.enable=true" 16 | - "traefik.docker.network=http_network" 17 | - "traefik.http.routers.tra.entrypoints=http" 18 | - "traefik.http.routers.tra.rule=Host(`web.DockerMe.ir`)" 19 | - "traefik.http.services.tra.loadbalancer.server.port=8080" 20 | ports: 21 | - "80:80" 22 | volumes: 23 | - /var/run/docker.sock:/var/run/docker.sock:ro 24 | networks: 25 | - http_network 26 | 27 | grafana: 28 | image: grafana/grafana 29 | container_name: grafana 30 | volumes: 31 | - grafana:/var/lib/grafana 32 | labels: 33 | - "traefik.enable=true" 34 | - "traefik.docker.network=http_network" 35 | - "traefik.http.routers.gra.entrypoints=http" 36 | - "traefik.http.routers.gra.rule=Host(`grafana.DockerMe.ir`)" 37 | - "traefik.http.services.gra.loadbalancer.server.port=3000" 38 | networks: 39 | - http_network 40 | - services_network 41 | 42 | loki: 43 | image: grafana/loki:latest 44 | container_name: loki 45 | command: -config.file=/etc/loki/local-config.yaml 46 | volumes: 47 | - loki:/tmp/loki 48 | - ./loki/local-config.yaml:/etc/loki/local-config.yaml 49 | labels: 50 | - "traefik.enable=true" 51 | - "traefik.docker.network=http_network" 52 | - "traefik.http.routers.lok.entrypoints=http" 53 | - "traefik.http.routers.lok.rule=Host(`loki.DockerMe.ir`)" 54 | - "traefik.http.services.lok.loadbalancer.server.port=3100" 55 | networks: 56 | - http_network 57 | - services_network 58 | 59 | promtail: 60 | image: grafana/promtail:latest 61 | container_name: promtail 62 | volumes: 63 | - /var/log:/var/log 64 | - ./promtail/docker-config.yaml:/etc/promtail/docker-config.yaml 65 | command: -config.file=/etc/promtail/docker-config.yaml 66 | networks: 67 | - services_network 68 | 69 | networks: 70 | http_network: 71 | services_network: 72 | 73 | volumes: 74 | grafana: 75 | name: grafana 76 | loki: 77 | name: loki 78 | # DockerMe.ir 79 | -------------------------------------------------------------------------------- /scenario/redis_cluster_sample/my_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | slave_nu=$(docker ps | grep redis-cluster_slave_* | wc -l) 3 | sentinel_nu=$(docker ps | grep redis-cluster_sentinel_* | wc -l) 4 | MASTER_IP=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' redis-cluster_master_1) 5 | 6 | 7 | echo -------------------------- 8 | echo "###### INFORMATION ######" 9 | echo -------------------------- 10 | echo Redis Slave Numbers: $slave_nu 11 | echo Redis Sentinel Numbers: $sentinel_nu 12 | echo -------------------------- 13 | echo 14 | echo -------------------------- 15 | echo Redis master: $MASTER_IP 16 | echo -------------------------- 17 | echo 18 | echo -------------------------- 19 | for ((i=1;i<=$slave_nu;i++)); 20 | do 21 | SLAVE_IP_[$i]=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' redis-cluster_slave_$i) 22 | echo Redis Slave $i: "${SLAVE_IP_[$i]}" 23 | echo -------------------------- 24 | done 25 | echo 26 | echo ----------------------------- 27 | for ((i=1;i<=$sentinel_nu;i++)); 28 | do 29 | SENTINEL_IP_[$i]=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' redis-cluster_sentinel_$i) 30 | echo Redis Sentinel $i: "${SENTINEL_IP_[$i]}" 31 | echo ----------------------------- 32 | done 33 | 34 | 35 | echo -------------------------- 36 | echo Initial status of sentinel 37 | echo -------------------------- 38 | #docker exec redis-cluster_sentinel_1 redis-cli -p 26379 info Sentinel 39 | echo Current master is 40 | #docker exec redis-cluster_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster 41 | docker-compose exec sentinel redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster 42 | echo ------------------------------------------------ 43 | 44 | echo Stop redis master 45 | docker pause redis-cluster_master_1 46 | echo Wait for 15 seconds 47 | sleep 15 48 | echo Current infomation of sentinel 49 | #docker exec redis-cluster_sentinel_1 redis-cli -p 26379 info Sentinel 50 | #docker-compose exec sentinel redis-cli -p 26379 info sentinel 51 | echo Current master is 52 | #docker exec redis-cluster_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster 53 | docker-compose exec sentinel redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster 54 | 55 | echo ------------------------------------------------ 56 | echo Restart Redis master 57 | docker unpause redis-cluster_master_1 58 | #sleep 5 59 | echo Current infomation of sentinel 60 | #docker exec redis-cluster_sentinel_1 redis-cli -p 26379 info Sentinel 61 | #docker-compose exec sentinel redis-cli -p 26379 info sentinel 62 | echo Current master is 63 | #docker exec redis-cluster_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster 64 | docker-compose exec sentinel redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster 65 | 66 | -------------------------------------------------------------------------------- /scenario/gitlab_traefik/gitlab/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | 3 | networks: 4 | web_net: 5 | external: true 6 | app_net: 7 | external: true 8 | 9 | volumes: 10 | gitlab_backup: 11 | name: gitlab_backup 12 | gitlab_data: 13 | name: gitlab_data 14 | gitlab_logs: 15 | name: gitlab_logs 16 | gitlab_config: 17 | name: gitlab_config 18 | 19 | services: 20 | gitlab: 21 | image: gitlab/gitlab-ce:13.10.3-ce.0 22 | restart: always 23 | container_name: gitlab 24 | hostname: git.DockerMe.ir 25 | environment: 26 | GITLAB_OMNIBUS_CONFIG: | 27 | # Set external url 28 | external_url 'https://git.DockerMe.ir' 29 | nginx['listen_port'] = 80 30 | nginx['listen_https'] = false 31 | nginx['http2_enabled'] = false 32 | 33 | nginx['proxy_set_headers'] = { 34 | "Host" => "$$http_host", 35 | "X-Real-IP" => "$$remote_addr", 36 | "X-Forwarded-For" => "$$proxy_add_x_forwarded_for", 37 | "X-Forwarded-Proto" => "https", 38 | "X-Forwarded-Ssl" => "on" 39 | } 40 | 41 | # Nginx Configuration 42 | nginx['client_max_body_size'] = '10240m' 43 | nginx['gzip_enabled'] = true 44 | nginx['listen_port'] = 80 45 | nginx['listen_https'] = false 46 | nginx['proxy_cache'] = 'gitlab' 47 | nginx['http2_enabled'] = true 48 | 49 | # gitlab backup config 50 | gitlab_rails['manage_backup_path'] = true 51 | gitlab_rails['backup_path'] = "/var/opt/gitlab/backups" 52 | gitlab_rails['backup_archive_permissions'] = 0644 53 | gitlab_rails['backup_keep_time'] = 604800 54 | gitlab_rails['env'] = {"SKIP" => "registry"} 55 | 56 | labels: 57 | - "traefik.enable=true" 58 | - "traefik.docker.network=web_net" 59 | - "traefik.http.routers.gitlab.entrypoints=http" 60 | - "traefik.http.routers.gitlab.rule=Host(`git.DockerMe.ir`)" 61 | - "traefik.http.routers.gitlab.middlewares=https-redirect" 62 | - "traefik.http.routers.gitlab-secure.entrypoints=https" 63 | - "traefik.http.routers.gitlab-secure.rule=Host(`git.DockerMe.ir`)" 64 | - "traefik.http.routers.gitlab-secure.tls=true" 65 | - "traefik.http.routers.gitlab-secure.tls.options=default" 66 | - "traefik.http.routers.gitlab-secure.middlewares=security@file,hsts@file" 67 | - "traefik.http.routers.gitlab.service=gitlab" 68 | - "traefik.http.routers.gitlab-secure.service=gitlab" 69 | - "traefik.http.routers.gitlab-secure.tls.certresolver=mycert" 70 | - "traefik.http.services.gitlab.loadbalancer.server.port=80" 71 | volumes: 72 | - gitlab_backup:/var/opt/gitlab/backups 73 | - gitlab_data:/var/opt/gitlab 74 | - gitlab_logs:/var/log/gitlab 75 | - gitlab_config:/etc/gitlab 76 | ports: 77 | - "2222:22" 78 | networks: 79 | app_net: 80 | web_net: -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Docker training with DockerMe 2 | ### The tools and sample needed to learn the Docker 3 | 4 | 5 | # Table and Content 6 | - [Configuration](configuration) 7 | - [daemon config](configuration/docker_daemon_config.md) 8 | - [docker networking](configuration/networking.md) 9 | - [daemon network bridge](configuration/bridge-networking.md) 10 | - [daemon network overlay](configuration/overlay-networking.md) 11 | - [docker logging plugin](configuration/plugin-logging.md) 12 | - [docker volume plugin](configuration/plugin-volumes.md) 13 | - [Scenario](scenario) 14 | - [running first container](scenario/running_first_container.md) 15 | - [webapps with docker](scenario/webapps_with_docker.md) 16 | - [portainer with docker](scenario/portainer-docker-compose.yml) 17 | - [graylog with docker](scenario/graylog-docker-compose.yml) 18 | - [weavescope with docker](scenario/weavescope-docker-compose.yml) 19 | - [wordpress with docker](scenario/wordpress-with-nginx) 20 | - [nginx with docker](scenario/web-service-nginx) 21 | - [private registry](scenario/registry) 22 | - [redis cluster with docker](scenario/redis_cluster_sample) 23 | - [elk with docker](scenario/elk) 24 | - [gitlab with docker](scenario/gitlab_traefik/gitlab) 25 | - [traefik with docker](scenario/gitlab_traefik/traefik) 26 | - [monitoring with docker](scenario/monitoring) 27 | - [logging with docker](scenario/logging) 28 | - [Dockerfile](Dockerfile) 29 | - [Dockerfile best practice](Dockerfile/dockerfile_best_practice.md) 30 | - [Dockerfile perl sample](Dockerfile/dockerfile_perl) 31 | - [Dockerfile multi-stage build](Dockerfile/dockerfile_multistage.md) 32 | - [Dockerfile nginx simple](Dockerfile/dockerfile_nginx_simple.md) 33 | - [Dockerfile flask app](Dockerfile/flask-app) 34 | - [Dockerfile static site](Dockerfile/static-site) 35 | - [Swarm Mode](swarm) 36 | - [Docker Orchestration](swarm/swarm.md) 37 | - [deploy app with swarm](swarm/deploying_app_with_swarm.md) 38 | 39 | 40 | # in-progress 41 | - **Security** 42 | - https://github.com/docker/labs/tree/master/security 43 | - https://github.com/docker/docker-bench-security 44 | - https://www.cisecurity.org/benchmark/docker/ 45 | - **Other learning docker resources** 46 | - book 47 | - free course 48 | - interactive shell 49 | 50 | # Reference 51 | - https://training.play-with-docker.com/ 52 | - https://github.com/docker/labs 53 | - https://container.training/ 54 | - https://github.com/AliyunContainerService/redis-cluster.git 55 | - https://www.docker.com 56 | - https://docs.docker.com/compose/ 57 | - https://registry.hub.docker.com/u/joshula/redis-sentinel/ 58 | - https://github.com/mdevilliers/docker-rediscluster 59 | 60 | 61 | 62 | # 🔗 Links 63 | [![Site](https://img.shields.io/badge/Dockerme.ir-0A66C2?style=for-the-badge&logo=docker&logoColor=white)](https://dockerme.ir/) 64 | [![linkedin](https://img.shields.io/badge/linkedin-0A66C2?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/ahmad-rafiee/) 65 | [![twitter](https://img.shields.io/badge/twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white)](https://twitter.com/@rafiee1001) 66 | [![telegram](https://img.shields.io/badge/telegram-0A66C2?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/dockerme) 67 | -------------------------------------------------------------------------------- /Dockerfile/dockerfile_multistage.md: -------------------------------------------------------------------------------- 1 | # [MULTI-STAGE BUILDS WITH DOCKER](https://duo.com/labs/tech-notes/multi-stage-builds-with-docker) 2 | 3 | ## 01. Introduction 4 | One of the potential benefits of containers is being able to dramatically reduce the attack surface of a running application. However, oftentimes tools are needed to build the application (e.g compilers) that aren't needed in the container that runs the application in production. Multi-stage containers are perfect for this. Your Dockerfile builds multiple containers, where the first ones do the building of the application, and the final container uses the output of the initial containers. 5 | 6 | The easiest way to get started with this is to have two FROM statements in your Dockerfile. The second FROM will build a second container and you can move files from the first container in the second one using the standard COPY command but with the flag --from=0 that will copy from the first container instead of from your local filesystem. 7 | 8 | ## 02. Walkthrough 9 | Let's walk through building a container for CoreDNS. CoreDNS "is a DNS server/forwarder, written in Go." As this will generate a compiled binary, we can aim to have a final container that contains just the CoreDNS binary and nothing else, keeping the attack surface as minimal as possible. 10 | 11 | Let's step through this Dockerfile 12 | 13 | ```bash 14 | FROM golang:1.14 15 | 16 | RUN git clone https://github.com/coredns/coredns.git /coredns 17 | RUN cd /coredns && make 18 | 19 | FROM scratch 20 | COPY --from=0 /coredns/coredns /coredns 21 | 22 | EXPOSE 53 53/udp 23 | CMD ["/coredns"] 24 | ``` 25 | The first container starts with the official golang container (FROM golang:1.14), then git clones the CoreDNS repository into the /coredns directory. Then it runs make and generates the CoreDNS binary at the location /coredns/coredns. 26 | 27 | Using FROM again starts a second container, in this case starting with the base container "scratch". Scratch is a special base container that is "an explicitly empty image, especially for building images 'FROM scratch'". 28 | 29 | Next, COPY --from=0 /coredns/coredns /coredns uses the --from=0 flag to tell the COPY command to pull from the first container instead of the local filesystem so it pulls the /coredns/coredns binary from the first container and places it in the location /coredns in the current container. 30 | 31 | EXPOSE exposes port 53 on TCP and UDP so the DNS service can be accessed. CMD designates /coredns as the command that's run when the container is started. 32 | 33 | To build the container described above, put the Dockerfile commands listed into a file called Dockerfile and run docker build -t my-coredns-container . and it will build two containers but only tag the final container containing only the CoreDNS binary as "my-coredns-container" which can then be run with docker run my-coredns-container. 34 | 35 | ## 03. Naming Stages 36 | For longer Dockerfiles, or just for clarity you can name the stages of the build using the AS command. For that replace the first line in the example above with FROM golang:1.14 AS builder. Then when referencing it later you would use the name instead of number, so you would use COPY --from=builder /coredns/coredns /coredns. 37 | 38 | ## 04. Using External Images 39 | You can also reference an external image as a stage directly. For example if your container needs to use the default nginx config file, you can use COPY --from=nginx:latest /etc/nginx/nginx.conf /nginx.conf. 40 | 41 | ## 05. Summary 42 | Multi-Staging builds in Docker allows for greater clarity in Dockerfiles, and simpler final production containers with a potentially smaller attack surface. If you want to separate out your build process and separate it from your final production containers, you can do so a fairly straightforward manner. 43 | 44 | -------------------------------------------------------------------------------- /scenario/monitoring/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # DockerMe.ir 2 | version: '3.4' 3 | services: 4 | traefik: 5 | image: traefik:latest 6 | container_name: traefik 7 | command: 8 | - "--api=true" 9 | - "--api.insecure=true" 10 | - "--providers.docker.endpoint=unix:///var/run/docker.sock" 11 | - "--providers.docker.exposedbydefault=false" 12 | - "--providers.docker.network=http_network" 13 | - "--entrypoints.http.address=:80" 14 | labels: 15 | - "traefik.enable=true" 16 | - "traefik.docker.network=http_network" 17 | - "traefik.http.routers.tra.entrypoints=http" 18 | - "traefik.http.routers.tra.rule=Host(`web.DockerMe.ir`)" 19 | - "traefik.http.services.tra.loadbalancer.server.port=8080" 20 | ports: 21 | - "80:80" 22 | volumes: 23 | - /var/run/docker.sock:/var/run/docker.sock:ro 24 | networks: 25 | - http_network 26 | 27 | prometheus: 28 | image: prom/prometheus:latest 29 | container_name: prometheus 30 | volumes: 31 | - ./prometheus/:/etc/prometheus/ 32 | - prometheus:/prometheus 33 | command: 34 | - '--config.file=/etc/prometheus/prometheus.yml' 35 | - '--storage.tsdb.path=/prometheus' 36 | - '--web.console.libraries=/usr/share/prometheus/console_libraries' 37 | - '--web.console.templates=/usr/share/prometheus/consoles' 38 | - '--web.enable-lifecycle' 39 | labels: 40 | - "traefik.enable=true" 41 | - "traefik.docker.network=http_network" 42 | - "traefik.http.routers.pro.entrypoints=http" 43 | - "traefik.http.routers.pro.rule=Host(`prometheus.DockerMe.ir`)" 44 | - "traefik.http.services.pro.loadbalancer.server.port=9090" 45 | networks: 46 | - http_network 47 | - services_network 48 | 49 | grafana: 50 | image: grafana/grafana 51 | container_name: grafana 52 | depends_on: 53 | - prometheus 54 | volumes: 55 | - grafana:/var/lib/grafana 56 | labels: 57 | - "traefik.enable=true" 58 | - "traefik.docker.network=http_network" 59 | - "traefik.http.routers.gra.entrypoints=http" 60 | - "traefik.http.routers.gra.rule=Host(`grafana.DockerMe.ir`)" 61 | - "traefik.http.services.gra.loadbalancer.server.port=3000" 62 | networks: 63 | - http_network 64 | - services_network 65 | 66 | alertmanager: 67 | image: prom/alertmanager 68 | container_name: alertmanager 69 | volumes: 70 | - ./alertmanager/:/etc/alertmanager/ 71 | command: 72 | - '--config.file=/etc/alertmanager/config.yml' 73 | - '--storage.path=/alertmanager' 74 | labels: 75 | - "traefik.enable=true" 76 | - "traefik.docker.network=http_network" 77 | - "traefik.http.routers.ale.entrypoints=http" 78 | - "traefik.http.routers.ale.rule=Host(`alert.DockerMe.ir`)" 79 | - "traefik.http.services.ale.loadbalancer.server.port=9093" 80 | networks: 81 | - http_network 82 | - services_network 83 | 84 | node-exporter: 85 | image: prom/node-exporter:latest 86 | container_name: node-exporter 87 | volumes: 88 | - /proc:/host/proc:ro 89 | - /sys:/host/sys:ro 90 | - /:/rootfs:ro 91 | command: 92 | - --path.procfs=/host/proc 93 | - --path.sysfs=/host/sys 94 | - --collector.filesystem.ignored-mount-points 95 | - ^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/) 96 | networks: 97 | - services_network 98 | 99 | cadvisor: 100 | image: google/cadvisor:latest 101 | container_name: cadvisor 102 | volumes: 103 | - /:/rootfs:ro 104 | - /var/run:/var/run:rw 105 | - /sys:/sys:ro 106 | - /var/lib/docker/:/var/lib/docker:ro 107 | - /dev/disk/:/dev/disk:ro 108 | networks: 109 | - services_network 110 | 111 | networks: 112 | http_network: 113 | services_network: 114 | 115 | volumes: 116 | grafana: 117 | name: grafana 118 | prometheus: 119 | name: prometheus 120 | # DockerMe.ir 121 | -------------------------------------------------------------------------------- /scenario/redis_cluster_sample/README.md: -------------------------------------------------------------------------------- 1 | # redis-cluster 2 | **Redis cluster with Docker Compose** 3 | 4 | Using Docker Compose to setup a redis cluster with sentinel. 5 | 6 | This project is inspired by the project of [https://github.com/mdevilliers/docker-rediscluster][1] 7 | 8 | ## Prerequisite 9 | 10 | Install [Docker][4] and [Docker Compose][3] in testing environment 11 | 12 | If you are using Windows, please execute the following command before "git clone" to disable changing the line endings of script files into DOS format 13 | 14 | ``` 15 | git config --global core.autocrlf false 16 | ``` 17 | 18 | ## Docker Compose template of Redis cluster 19 | 20 | The template defines the topology of the Redis cluster 21 | 22 | ``` 23 | master: 24 | image: redis:3 25 | slave: 26 | image: redis:3 27 | command: redis-server --slaveof redis-master 6379 28 | links: 29 | - master:redis-master 30 | sentinel: 31 | build: sentinel 32 | environment: 33 | - SENTINEL_DOWN_AFTER=5000 34 | - SENTINEL_FAILOVER=5000 35 | links: 36 | - master:redis-master 37 | - slave 38 | ``` 39 | 40 | There are following services in the cluster, 41 | 42 | * master: Redis master 43 | * slave: Redis slave 44 | * sentinel: Redis sentinel 45 | 46 | 47 | The sentinels are configured with a "mymaster" instance with the following properties - 48 | 49 | ``` 50 | sentinel monitor mymaster redis-master 6379 2 51 | sentinel down-after-milliseconds mymaster 5000 52 | sentinel parallel-syncs mymaster 1 53 | sentinel failover-timeout mymaster 5000 54 | ``` 55 | 56 | The details could be found in sentinel/sentinel.conf 57 | 58 | The default values of the environment variables for Sentinel are as following 59 | 60 | * SENTINEL_QUORUM: 2 61 | * SENTINEL_DOWN_AFTER: 30000 62 | * SENTINEL_FAILOVER: 180000 63 | 64 | 65 | 66 | ## Play with it 67 | 68 | Build the sentinel Docker image 69 | 70 | ``` 71 | docker-compose build 72 | ``` 73 | 74 | Start the redis cluster 75 | 76 | ``` 77 | docker-compose up -d 78 | ``` 79 | 80 | Check the status of redis cluster 81 | 82 | ``` 83 | docker-compose ps 84 | ``` 85 | 86 | The result is 87 | 88 | ``` 89 | Name Command State Ports 90 | -------------------------------------------------------------------------------------- 91 | rediscluster_master_1 docker-entrypoint.sh redis ... Up 6379/tcp 92 | rediscluster_sentinel_1 docker-entrypoint.sh redis ... Up 26379/tcp, 6379/tcp 93 | rediscluster_slave_1 docker-entrypoint.sh redis ... Up 6379/tcp 94 | ``` 95 | 96 | Scale out the instance number of sentinel and Scale out the instance number of slaves 97 | 98 | ``` 99 | docker-compose up -d --scale slave=2 --scale sentinel=3 100 | ``` 101 | 102 | Check the status of redis cluster 103 | 104 | ``` 105 | docker-compose ps 106 | ``` 107 | 108 | The result is 109 | 110 | ``` 111 | Name Command State Ports 112 | --------------------------------------------------------------------------------------- 113 | redis_cluster_master_1 docker-entrypoint.sh redis ... Up 6379/tcp 114 | redis_cluster_sentinel_1 sentinel-entrypoint.sh Up 26379/tcp, 6379/tcp 115 | redis_cluster_sentinel_2 sentinel-entrypoint.sh Up 26379/tcp, 6379/tcp 116 | redis_cluster_sentinel_3 sentinel-entrypoint.sh Up 26379/tcp, 6379/tcp 117 | redis_cluster_slave_1 docker-entrypoint.sh redis ... Up 6379/tcp 118 | redis_cluster_slave_2 docker-entrypoint.sh redis ... Up 6379/tcp 119 | ``` 120 | 121 | Execute the test scripts 122 | ``` 123 | ./my_test.sh 124 | ``` 125 | to simulate stop and recover the Redis master. And you will see the master is switched to slave automatically. 126 | 127 | Or, you can do the test manually to pause/unpause redis server through 128 | 129 | ``` 130 | docker pause redis_cluster_master_1 131 | docker unpause redis_cluster_master_1 132 | ``` 133 | And get the sentinel infomation with following commands 134 | 135 | ``` 136 | docker exec redis_cluster_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster 137 | ``` -------------------------------------------------------------------------------- /scenario/registry/registry_part-2.md: -------------------------------------------------------------------------------- 1 | # Part 2 - Running a Secured Registry Container in Linux 2 | 3 | We saw how to run a simple registry container in [Part 1](part-1.md), using the official Docker registry image. The registry server con be configured to serve HTTPS traffic on a known domain, so it's straightforward to run a secure registry for private use with a self-signed SSL certificate. 4 | 5 | ## Generating the SSL Certificate in Linux 6 | 7 | The Docker docs explain how to [generate a self-signed certificate](https://docs.docker.com/registry/insecure/#/using-self-signed-certificates) on Linux using OpenSSL: 8 | 9 | ``` 10 | $ mkdir -p certs 11 | $ openssl req \ 12 | -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ 13 | -x509 -days 365 -out certs/domain.crt 14 | Generating a 4096 bit RSA private key 15 | ........++ 16 | ............................................................++ 17 | writing new private key to 'certs/domain.key' 18 | ----- 19 | You are about to be asked to enter information that will be incorporated 20 | into your certificate request. 21 | What you are about to enter is what is called a Distinguished Name or a DN. 22 | There are quite a few fields but you can leave some blank 23 | For some fields there will be a default value, 24 | If you enter '.', the field will be left blank. 25 | ----- 26 | Country Name (2 letter code) [AU]:US 27 | State or Province Name (full name) [Some-State]: 28 | Locality Name (eg, city) []: 29 | Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker 30 | Organizational Unit Name (eg, section) []: 31 | Common Name (e.g. server FQDN or YOUR name) []:localhost 32 | Email Address []: 33 | ``` 34 | If you are running the registry locally, be sure to use your host name as the CN. 35 | 36 | To get the docker daemon to trust the certificate, copy the domain.crt file. 37 | ``` 38 | $ sudo su 39 | $ mkdir /etc/docker/certs.d 40 | $ mkdir /etc/docker/certs.d/:5000 41 | $ cp `pwd`/certs/domain.crt /etc/docker/certs.d/:5000/ca.crt 42 | ``` 43 | Make sure to restart the docker daemon. 44 | ``` 45 | $ sudo service docker restart 46 | ``` 47 | Now we have an SSL certificate and can run a secure registry. 48 | 49 | ## Running the Registry Securely 50 | 51 | The registry server supports several configuration switches as environment variables, including the details for running securely. We can use the same image we've already used, but configured for HTTPS. 52 | 53 | If you have an insecure registry container still running from [Part 2](part-2.md), remove it: 54 | 55 | ``` 56 | $ docker kill registry 57 | $ docker rm registry 58 | ``` 59 | 60 | For the secure registry, we need to run a container which has the SSL certificate and key files available, which we'll do with an additional volume mount (so we have one volume for registry data, and one for certs). We also need to specify the location of the certificate files, which we'll do with environment variables: 61 | 62 | ``` 63 | $ mkdir registry-data 64 | $ docker run -d -p 5000:5000 --name registry \ 65 | --restart unless-stopped \ 66 | -v $(pwd)/registry-data:/var/lib/registry -v $(pwd)/certs:/certs \ 67 | -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ 68 | -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ 69 | registry 70 | ``` 71 | 72 | The new parts to this command are: 73 | 74 | - `--restart unless-stopped` - restart the container when it exits, unless it has been explicitly stopped. When the host restarts, Docker will start the registry container, so it's always available. 75 | - `-v $pwd\certs:c:\certs` - mount the local `certs` folder into the container, so the registry server can access the certificate and key files; 76 | - `-e REGISTRY_HTTP_TLS_CERTIFICATE` - specify the location of the SSL certificate file; 77 | - `-e REGISTRY_HTTP_TLS_KEY` - specify the location of the SSL key file. 78 | 79 | We'll let Docker assign a random IP address to this container, because we'll be accessing it by host name. The registry is running securely now, but we've used a self-signed certificate for an internal domain name. 80 | 81 | ## Accessing the Secure Registry 82 | 83 | We're ready to push an image into our secure registry. 84 | ``` 85 | $ docker push localhost:5000/hello-world 86 | $ docker pull localhost:5000/hello-world 87 | ``` 88 | We can go one step further with the open-source registry server, and add basic authentication - so we can require users to securely log in to push and pull images. 89 | 90 | ## Next 91 | 92 | - [Part 3 - Using Basic Authentication with a Secured Registry](part-3.md) -------------------------------------------------------------------------------- /scenario/web-service-nginx/nginx_web.md: -------------------------------------------------------------------------------- 1 | # Running nginx web service with docker 2 | 3 | ## Step1: running nginx with docker commands 4 | 5 | ### Run Simple Nginx Service 6 | ```bash 7 | docker run --name nginx -p 80:80 -d nginx:alpine 8 | ``` 9 | 10 | ### create nginx directory 11 | ```bash 12 | mkdir -p ./conf.d 13 | mkdir -p ./certs 14 | tree ./nginx 15 | ``` 16 | 17 | ### create self sign certificate with openssl command and check it 18 | ```bash 19 | # certificate location 20 | CERT_LOCATION=./certs 21 | 22 | # generate key and cert 23 | openssl req -x509 -nodes -newkey \ 24 | rsa:4096 -days 365 \ 25 | -keyout $CERT_LOCATION/key.pem \ 26 | -subj "/C=IR/ST=Iran/L=Tehran/O=DockerMe/OU=IT/CN=DockerMe.ir/emailAddress=rafiee1001@gmail.com" \ 27 | -out $CERT_LOCATION/cert.pem 28 | 29 | # cehck certificate 30 | openssl x509 -text -noout -in $CERT_LOCATION/cert.pem 31 | 32 | # check nginx directory 33 | tree nginx 34 | ``` 35 | ### create sample nginx.conf 36 | ```bash 37 | 38 | user nginx; 39 | worker_processes auto; 40 | 41 | error_log /var/log/nginx/error.log notice; 42 | pid /var/run/nginx.pid; 43 | 44 | 45 | events { 46 | worker_connections 1024; 47 | } 48 | 49 | 50 | http { 51 | include /etc/nginx/mime.types; 52 | default_type application/octet-stream; 53 | 54 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 55 | '$status $body_bytes_sent "$http_referer" ' 56 | '"$http_user_agent" "$http_x_forwarded_for"'; 57 | 58 | access_log /var/log/nginx/access.log main; 59 | 60 | sendfile on; 61 | #tcp_nopush on; 62 | 63 | keepalive_timeout 65; 64 | 65 | #gzip on; 66 | 67 | include /etc/nginx/conf.d/*.conf; 68 | } 69 | ``` 70 | 71 | ### create sample configuration 72 | ```bash 73 | vim ./conf.d/weavescope.conf 74 | server { 75 | listen 443 ssl http2; 76 | server_name weavescope.dockerme.ir; 77 | 78 | # SSL 79 | ssl_certificate /etc/nginx/certs/cert.pem; 80 | ssl_certificate_key /etc/nginx/certs/key.pem; 81 | 82 | location / { 83 | proxy_pass http://weavescope:4040; 84 | proxy_http_version 1.1; 85 | proxy_set_header Upgrade $http_upgrade; 86 | proxy_set_header Connection "upgrade"; 87 | proxy_set_header Host $host; 88 | add_header Strict-Transport-Security "max-age=63072000; includeSubdomains"; 89 | add_header X-XSS-Protection "1; mode=block"; 90 | add_header X-Content-Type-Options nosniff; 91 | add_header X-Frame-Options DENY; 92 | add_header 'Referrer-Policy' 'strict-origin'; 93 | add_header X-Powered-By "Ahmad Rafiee | DockerMe.ir"; 94 | proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP 95 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 96 | proxy_set_header X-Forwarded-Proto $scheme; 97 | proxy_connect_timeout 90; 98 | proxy_send_timeout 90; 99 | proxy_read_timeout 90; 100 | proxy_buffers 32 4k; 101 | #Http Authentication 102 | auth_basic "Access to the staging site"; 103 | auth_basic_user_file /etc/nginx/conf.d/.htpasswd; 104 | } 105 | } 106 | server { 107 | listen 80; 108 | server_name weavescope.dockerme.ir; 109 | return 301 https://$host$request_uri; 110 | } 111 | ``` 112 | 113 | ### Restricting Access with HTTP Basic Authentication 114 | 115 | Creating a Password File: 116 | 117 | ```bash 118 | # Verify that apache2-utils (Debian, Ubuntu) or httpd-tools (RHEL/CentOS/Oracle Linux) is installed. 119 | apt install apache2-utils 120 | 121 | # Create a password file and a first user. Run the htpasswd utility with the -c flag (to create a new file), the file pathname as the first argument, and the username as the second argument 122 | sudo htpasswd -c ./conf.d/.htpasswd user1 123 | 124 | # Configuring NGINX and NGINX Plus for HTTP Basic Authentication 125 | location /api { 126 | auth_basic “Administrator’s Area”; 127 | auth_basic_user_file /etc/nginx/conf.d/.htpasswd; 128 | } 129 | ``` 130 | ### Combining Basic Authentication with Access Restriction by IP Address 131 | ```bash 132 | # Combining Basic Authentication with Access Restriction by IP Address 133 | location /api { 134 | #... 135 | deny 192.168.1.2; 136 | allow 192.168.1.1/24; 137 | allow 127.0.0.1; 138 | deny all; 139 | } 140 | ``` 141 | 142 | ### Run with other configuration 143 | ```bash 144 | docker run -d --name nginx --hostname nginx -p 80:80 -p 443:443 -v ${PWD}/nginx.conf:/etc/nginx/nginx.conf:ro -v ${PWD}/conf.d:/etc/nginx/conf.d/ -v ${PWD}/certs:/etc/nginx/certs nginx:alpine 145 | ``` 146 | 147 | ## Step2: running nginx web service with compose file and docker-compose 148 | 149 | ### compose file 150 | ```bash 151 | --- 152 | version: '3.4' 153 | 154 | networks: 155 | http_net: 156 | external: true 157 | web_net: 158 | external: false 159 | 160 | services: 161 | web: 162 | image: nginx:alpine 163 | restart: on-failure 164 | container_name: web 165 | ports: 166 | - 443:443 167 | - 80:80 168 | volumes: 169 | - ./certs/:/etc/nginx/certs 170 | - ./conf.d/:/etc/nginx/conf.d 171 | networks: 172 | - http_net 173 | - web_net 174 | ``` 175 | 176 | ### check and run compose file 177 | ```bash 178 | docker-compose config 179 | docker-compose up -d 180 | docker-compose logs -f 181 | docker-compose ps 182 | ``` 183 | 184 | 185 | ### 🔗 Refrence link: 186 | 187 | [Nginx Restricting Access](https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-http-basic-authentication/) -------------------------------------------------------------------------------- /scenario/wordpress-with-nginx/ReadMe.md: -------------------------------------------------------------------------------- 1 | # Running wordpress with docker 2 | ## 3 | ![wordpress with DockerMe](wordpress.png) 4 | 5 | ## Step1: running wordpress with docker commands 6 | ### pull all needed images 7 | ```bash 8 | docker pull wordpress:latest 9 | docker pull nginx:latest 10 | docker pull mysql:5.7 11 | ``` 12 | 13 | ### create network and check it 14 | ```bash 15 | docker network create --driver bridge wp-net 16 | docker network ls 17 | docker inspect wp-net 18 | ``` 19 | 20 | ### create volume and check it 21 | ```bash 22 | docker volume create --name wp-data 23 | docker volume create --name db-data 24 | docker volume ls 25 | docker inspect wp-data 26 | docker inspect db-data 27 | ``` 28 | 29 | ### run mysql service and check it 30 | ```bash 31 | docker run -d --name mysql --hostname mysql \ 32 | --network=wp-net --restart=always \ 33 | --mount=source=db-data,target=/var/lib/mysql \ 34 | -e MYSQL_ROOT_PASSWORD=sdvfsacsiojoijsaefawefmwervs \ 35 | -e MYSQL_DATABASE=DockerMe \ 36 | -e MYSQL_USER=DockerMe \ 37 | -e MYSQL_PASSWORD=sdvfsacsiojoijsaefawefmwervs \ 38 | mysql:5.7 39 | 40 | # check mysql services 41 | docker ps 42 | docker stats --no-stream mysql 43 | docker logs mysql 44 | docker exec -i mysql mysql -u root -psdvfsacsiojoijsaefawefmwervs <<< "show databases" 45 | ``` 46 | 47 | ### run wordpress service and check it 48 | ```bash 49 | docker run -d --name wordpress --hostname wordpress \ 50 | --network=wp-net --restart=always \ 51 | --mount=source=wp-data,target=/var/www/html/ \ 52 | -e WORDPRESS_DB_PASSWORD=sdvfsacsiojoijsaefawefmwervs \ 53 | -e WORDPRESS_DB_HOST=mysql:3306 \ 54 | -e WORDPRESS_DB_USER=DockerMe \ 55 | -e WORDPRESS_DB_NAME=DockerMe \ 56 | wordpress:latest 57 | 58 | # check wordpress services 59 | docker ps 60 | docker stats --no-stream 61 | docker logs wordpress 62 | ``` 63 | 64 | ### create nginx directory 65 | ```bash 66 | mkdir -p ./nginx/conf.d 67 | mkdir -p ./nginx/certs 68 | tree ./nginx 69 | ``` 70 | 71 | ### create nginx config file for wordpress proxy pass 72 | ```bash 73 | vim ./nginx/conf.d/wordpress.conf 74 | server { 75 | listen 443 ssl; 76 | server_name wp.dockerme.ir; 77 | 78 | # SSL 79 | ssl_certificate /etc/nginx/certs/cert.pem; 80 | ssl_certificate_key /etc/nginx/certs/key.pem; 81 | 82 | location / { 83 | proxy_pass http://wordpress:80; 84 | proxy_set_header Host $http_host; # required for docker client's sake 85 | proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP 86 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 87 | proxy_set_header X-Forwarded-Proto $scheme; 88 | add_header X-Powered-By "Ahmad Rafiee | DockerMe.ir"; 89 | } 90 | } 91 | 92 | server { 93 | listen 80; 94 | server_name wp.dockerme.ir; 95 | return 301 https://$host$request_uri; 96 | } 97 | ``` 98 | 99 | ### create self sign certificate with openssl command and check it 100 | ```bash 101 | # certificate location 102 | CERT_LOCATION=./nginx/certs 103 | 104 | # generate key and cert 105 | openssl req -x509 -nodes -newkey \ 106 | rsa:4096 -days 365 \ 107 | -keyout $CERT_LOCATION/key.pem \ 108 | -subj "/C=IR/ST=Iran/L=Tehran/O=DockerMe/OU=IT/CN=DockerMe.ir/emailAddress=rafiee1001@gmail.com" \ 109 | -out $CERT_LOCATION/cert.pem 110 | 111 | # cehck certificate 112 | openssl x509 -text -noout -in $CERT_LOCATION/cert.pem 113 | 114 | # check nginx directory 115 | tree nginx 116 | ``` 117 | 118 | ### run nginx services and check it 119 | ```bash 120 | docker run -itd --name nginx --hostname nginx \ 121 | --network=wp-net --restart=always \ 122 | -v ${PWD}/nginx/conf.d:/etc/nginx/conf.d \ 123 | -v ${PWD}/nginx/certs:/etc/nginx/certs \ 124 | -p 80:80 -p 443:443 \ 125 | nginx:latest 126 | 127 | # check nginx services 128 | docker ps 129 | docker stats --no-stream 130 | docker logs nginx 131 | curl -I -L -k http://127.0.0.1 132 | ``` 133 | 134 | ### backup databases 135 | ```bash 136 | docker exec -i mysql mysqldump -u root -psdvfsacsiojoijsaefawefmwervs --all-databases --single-transaction --quick > full-backup-$(date +%F).sql 137 | 138 | # check backup 139 | ls | grep full-backup-*.sql 140 | du -sh full-backup-*.sql 141 | ``` 142 | 143 | ## Step2: running wordpress with compose-file and docker-compose 144 | 145 | ### compose file 146 | ```bash 147 | --- 148 | version: '3.8' 149 | 150 | networks: 151 | wp_net: 152 | name: wp_net 153 | driver_opts: 154 | com.docker.network.bridge.name: wp_net 155 | 156 | volumes: 157 | wp_db: 158 | name: wp_db 159 | wp_wp: 160 | name: wp_wp 161 | 162 | services: 163 | db: 164 | image: mysql:5.7 165 | container_name: mysql 166 | volumes: 167 | - wp_db:/var/lib/mysql 168 | restart: always 169 | environment: 170 | MYSQL_ROOT_PASSWORD: sdfascsdvsfdvweliuoiquowecefcwaefef 171 | MYSQL_DATABASE: DockerMe 172 | MYSQL_USER: DockerMe 173 | MYSQL_PASSWORD: sdfascsdvsfdvweliuoiquowecefcwaefef 174 | networks: 175 | - wp_net 176 | 177 | wordpress: 178 | image: wordpress:latest 179 | container_name: wordpress 180 | volumes: 181 | - wp_wp:/var/www/html/ 182 | depends_on: 183 | - db 184 | restart: always 185 | environment: 186 | WORDPRESS_DB_HOST: db:3306 187 | WORDPRESS_DB_USER: DockerMe 188 | WORDPRESS_DB_NAME: DockerMe 189 | WORDPRESS_DB_PASSWORD: sdfascsdvsfdvweliuoiquowecefcwaefef 190 | ports: 191 | - 8000:80 192 | networks: 193 | - wp_net 194 | 195 | nginx: 196 | image: nginx:alpine 197 | container_name: nginx 198 | restart: always 199 | depends_on: 200 | - wordpress 201 | volumes: 202 | - ./nginx/conf.d:/etc/nginx/conf.d 203 | - ./nginx/certs:/etc/nginx/certs 204 | ports: 205 | - 80:80 206 | - 443:443 207 | networks: 208 | - wp_net 209 | ``` 210 | ### check and run compose file 211 | ```bash 212 | docker-compose config 213 | docker-compose up -d 214 | docker-compose logs -f 215 | docker-compose ps 216 | ``` 217 | -------------------------------------------------------------------------------- /scenario/registry/registry.md: -------------------------------------------------------------------------------- 1 | ### Create the required directories 2 | ```bash 3 | # create directory 4 | mkdir -p auth data 5 | 6 | # check 7 | tree 8 | ``` 9 | 10 | ### Create the main nginx configuration. Paste this code block into a new file called auth/nginx.conf: 11 | ```bash 12 | vim nginx.conf 13 | 14 | events { 15 | worker_connections 1024; 16 | } 17 | 18 | http { 19 | 20 | upstream docker-registry { 21 | server registry:5000; 22 | } 23 | 24 | ## Set a variable to help us decide if we need to add the 25 | ## 'Docker-Distribution-Api-Version' header. 26 | ## The registry always sets this header. 27 | ## In the case of nginx performing auth, the header is unset 28 | ## since nginx is auth-ing before proxying. 29 | map $upstream_http_docker_distribution_api_version $docker_distribution_api_version { 30 | '' 'registry/2.0'; 31 | } 32 | 33 | server { 34 | listen 443 ssl; 35 | server_name repo.DockerMe.ir; 36 | 37 | # SSL 38 | ssl_certificate /etc/nginx/conf.d/cert.pem; 39 | ssl_certificate_key /etc/nginx/conf.d/key.pem; 40 | 41 | # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html 42 | ssl_protocols TLSv1.1 TLSv1.2; 43 | ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; 44 | ssl_prefer_server_ciphers on; 45 | ssl_session_cache shared:SSL:10m; 46 | 47 | # disable any limits to avoid HTTP 413 for large image uploads 48 | client_max_body_size 0; 49 | 50 | # required to avoid HTTP 411: see Issue #1486 (https://github.com/moby/moby/issues/1486) 51 | chunked_transfer_encoding on; 52 | 53 | location /v2/ { 54 | # Do not allow connections from docker 1.5 and earlier 55 | # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents 56 | if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { 57 | return 404; 58 | } 59 | 60 | # To add basic authentication to v2 use auth_basic setting. 61 | auth_basic "Registry realm"; 62 | auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd; 63 | 64 | ## If $docker_distribution_api_version is empty, the header is not added. 65 | ## See the map directive above where this variable is defined. 66 | add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always; 67 | 68 | proxy_pass http://docker-registry; 69 | proxy_set_header Host $http_host; # required for docker client's sake 70 | proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP 71 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 72 | proxy_set_header X-Forwarded-Proto $scheme; 73 | proxy_read_timeout 900; 74 | } 75 | } 76 | server { 77 | listen 80; 78 | server_name repo.DockerMe.ir; 79 | return 301 https://$host$request_uri; 80 | } 81 | } 82 | ``` 83 | 84 | ### Create a password file auth/nginx.htpasswd for “ahmad” and “Docker_training_with_DockerMe” 85 | ```bash 86 | # create htpasswd file 87 | sudo htpasswd -c auth/nginx.htpasswd ahmad 88 | 89 | # check 90 | cat auth/nginx.htpasswd 91 | ``` 92 | 93 | ### create certificate files to the auth/ directory. 94 | ```bash 95 | # certificate location 96 | CERT_LOCATION=./auth/ 97 | 98 | # generate key and cert 99 | openssl req -x509 -nodes -newkey \ 100 | rsa:4096 -days 365 \ 101 | -keyout $CERT_LOCATION/key.pem \ 102 | -subj "/C=IR/ST=Iran/L=Tehran/O=DockerMe/OU=IT/CN=DockerMe.ir/emailAddress=rafiee1001@gmail.com" \ 103 | -out $CERT_LOCATION/cert.pem 104 | 105 | # cehck certificate 106 | openssl x509 -text -noout -in $CERT_LOCATION/cert.pem 107 | 108 | # check 109 | tree 110 | ``` 111 | 112 | ### docker network create 113 | ```bash 114 | docker network create hub 115 | docker network ls 116 | ``` 117 | 118 | ## run with docker commands 119 | ### run registry container 120 | ```bash 121 | # run registry container 122 | docker run -d -p 127.0.0.1:5000:5000 -v ${PWD}/data:/var/lib/registry --net hub --restart=always --name registry registry:2 123 | 124 | # check 125 | docker ps 126 | docker logs -f registry 127 | ``` 128 | 129 | ### run web container 130 | ```bash 131 | # run web container 132 | docker run -d -p 443:443 -p 80:80 -v ${PWD}/auth:/etc/nginx/conf.d -v ${PWD}/nginx.conf:/etc/nginx/nginx.conf:ro \ 133 | --net hub --restart=always --name web nginx:alpine 134 | 135 | # check 136 | docker ps 137 | docker logs -f web 138 | ``` 139 | 140 | ## run with docker compose 141 | ```bash 142 | nginx: 143 | image: "nginx:alpine" 144 | ports: 145 | - 443:443 146 | - 80:80 147 | links: 148 | - registry:registry 149 | volumes: 150 | - ./auth:/etc/nginx/conf.d 151 | - ./nginx.conf:/etc/nginx/nginx.conf:ro 152 | 153 | registry: 154 | image: registry:2 155 | volumes: 156 | - ./data:/var/lib/registry 157 | ``` 158 | 159 | ### run compose file and check it 160 | ```bash 161 | # run 162 | docker-compose up -d 163 | # check 164 | docker-compose ps 165 | docker-compose logs -f 166 | ``` 167 | 168 | ### For the use of the self-sign certificate, we should configure insecure-registry option 169 | ```bash 170 | # --insecure-registry list Enable insecure registry communication 171 | vim /etc/systemd/system/docker.service.d/override.conf 172 | [Service] 173 | ExecStart= 174 | ExecStart=/usr/bin/dockerd --insecure-registry https://repo.dockerme.ir 175 | 176 | # systemd reload and restart docker 177 | systemctl daemon-reload 178 | systemctl restart docker 179 | systemctl status docker 180 | 181 | # check 182 | docker info | grep -A1 "Insecure Registries:" 183 | ``` 184 | 185 | ### login to registry 186 | ```bash 187 | docker login https://repo.dockerme.ir -u ahmad -p Docker_training_with_DockerMe 188 | ``` 189 | 190 | ### Image Tag and Push to local Registry 191 | ```bash 192 | # tag image 193 | docker tag nginx:alpine repo.dockerme.ir/nginx:alpine 194 | # push image 195 | docker push repo.dockerme.ir/nginx:alpine 196 | # check repository 197 | curl -u "ahmad:Docker_training_with_DockerMe" -k https://repo.dockerme.ir/v2/_catalog 198 | ``` 199 | -------------------------------------------------------------------------------- /scenario/registry/registry_part-1.md: -------------------------------------------------------------------------------- 1 | # Part 1 - Running a Registry Container in Linux 2 | 3 | There are several ways to run a registry container. The simplest is to run an insecure registry over HTTP, but for that we need to configure Docker to explicitly allow insecure access to the registry. 4 | 5 | Docker expects all registries to run on HTTPS. The next section of this lab will introduce a secure version of our registry container, but for this part of the tutorial we will run a version on HTTP. When registering a image, Docker returns an error message like this: 6 | ``` 7 | http: server gave HTTP response to HTTPS client 8 | ``` 9 | The Docker Engine needs to be explicitly setup to use HTTP for the insecure registry. Edit or create `/etc/docker/docker` file: 10 | ``` 11 | $ sudo vi /etc/docker/docker 12 | 13 | # add this line 14 | DOCKER_OPTS="--insecure-registry localhost:5000" 15 | ``` 16 | Close and save the file, then restart the docker daemon. 17 | ``` 18 | $ sudo service docker restart 19 | ``` 20 | In Docker for Mac, the `Preferences` menu lets you set the address for an insecure registry under the `Daemon` panel: 21 | ![MacOS menu](../images/docker_osx_insecure_registry.png) 22 | 23 | In Docker for Windows, the `Settings` menu lets you set the address for an insecure registry under the `Daemon` panel: 24 | ![MacOS menu](../images/docker_windows_insecure_registry.png) 25 | ## Testing the Registry Image 26 | First we'll test that the registry image is working correctly, by running it without any special configuration: 27 | ``` 28 | $ sudo docker run -d -p 5000:5000 --name registry registry:2 29 | ``` 30 | ## Understanding Image Names 31 | Typically we work with images from the Docker Store, which is the default registry for the Docker Engine. Commands using just the image repository name work fine, like this: 32 | ``` 33 | $ sudo docker pull hello-world 34 | ``` 35 | `hello-world` is the repository name, which we are using as a short form of the full image name. The full name is `docker.io/hello-world:latest`. That breaks down into three parts: 36 | 37 | - `docker.io` - the hostname of the registry which stores the image; 38 | - `hello-world` - the repository name, in this case in `{imageName}` format; 39 | - `latest` - the image tag. 40 | 41 | If a tag isn't specified, then the default `latest` is used. If a registry hostname isn't specified then the default `docker.io` for Docker Store is used. If you want to use images with any other registry, you need to explicitly specify the hostname - the default is always Docker Store, you can't change to a different default registry. 42 | 43 | With a local registry, the hostname and the custom port used by the registry is the full registry address, e.g. `localhost:5000`. 44 | ``` 45 | $ hostname 46 | ``` 47 | 48 | ## Pushing and Pulling from the Local Registry 49 | 50 | Docker uses the hostname from the full image name to determine which registry to use. We can build images and include the local registry hostname in the image tag, or use the `docker tag` command to add a new tag to an existing image. 51 | 52 | These commands pull a public image from Docker Store, tag it for use in the private registry with the full name `localhost:5000/hello-world`, and then push it to the registry: 53 | 54 | ``` 55 | $ sudo docker tag hello-world localhost:5000/hello-world 56 | $ sudo docker push localhost:5000/hello-world 57 | ``` 58 | 59 | When you push the image to your local registry, you'll see similar output to when you push a public image to the Hub: 60 | 61 | ``` 62 | The push refers to a repository [localhost:5000/hello-world] 63 | a55ad2cda2bf: Pushed 64 | cfbe7916c207: Pushed 65 | fe4c16cbf7a4: Pushed 66 | latest: digest: sha256:79e028398829da5ce98799e733bf04ac2ee39979b238e4b358e321ec549da5d6 size: 948 67 | ``` 68 | On the local machine, you can remove the new image tag and the original image, and pull it again from the local registry to verify it was correctly stored: 69 | ``` 70 | $ sudo docker rmi localhost:5000/hello-world 71 | $ sudo docker rmi hello-world 72 | $ sudo docker pull localhost:5000/hello-world 73 | ``` 74 | That exercise shows the registry works correctly, but at the moment it's not very useful because all the image data is stored in the container's writable storage area, which will be lost when the container is removed. To store the data outside of the container, we need to mount a host directory when we start the container. 75 | 76 | ## Running a Registry Container with External Storage 77 | Remove the existing registry container by removing the container which holds the storage layer. Any images pushed will be deleted: 78 | ``` 79 | $ sudo docker kill registry 80 | $ sudo docker rm registry 81 | ``` 82 | In this example, the new container will use a host-mounted Docker volume. When the registry server in the container writes image layer data, it appears to be writing to a local directory in the container but it will be writing to a directory on the host. 83 | 84 | Create the registry: 85 | ``` 86 | $ mkdir registry-data 87 | $ sudo docker run -d -p 5000:5000 \ 88 | --name registry \ 89 | -v `pwd`/registry-data:/var/lib/registry \ 90 | registry:2 91 | ``` 92 | Tag and push the container with the new IP address of the registry. 93 | ``` 94 | docker tag hello-world localhost:5000/hello-world 95 | docker push localhost:5000/hello-world 96 | ``` 97 | Repeating the previous `docker push` command uploads an image to the registry container, and the layers will be stored in the container's `/var/lib/registry` directory, which is actually mapped to the `$(pwd)/registry-data` directory on you local machine. The `tree` command will show the directory structure the registry server uses: 98 | 99 | ``` 100 | $ tree registry-data 101 | . 102 | |____docker 103 | | |____registry 104 | | | |____v2 105 | | | | |____blobs 106 | | | | | |____sha256 107 | | | | | | |____1f 108 | | | | | | | |____1fad42e8a0d9781677d366b1100defcadbe653280300cf62a23e07eb5e9d3a41 109 | 110 | ... 111 | ``` 112 | Storing data outside of the container means we can build a new version of the registry image and replace the old container with a new one using the same host mapping - so the new registry container has all the images stored by the previous container. 113 | 114 | Using an insecure registry also isn't practical in multi-user scenarios. Effectively there's no security so anyone can push and pull images if they know the registry hostname. The registry server supports authentication, but only over a secure SSL connection. We'll run a secure version of the registry server in a container next. 115 | 116 | ## Next 117 | 118 | - [Part 2 - Running a Secured Registry Container](part-2.md) -------------------------------------------------------------------------------- /scenario/registry/registry_part-3.md: -------------------------------------------------------------------------------- 1 | w# Part 3 - Using Basic Authentication with a Secured Registry in Linux 2 | 3 | From [Part 2](part-2.md) we have a registry running in a Docker container, which we can securely access over HTTPS from any machine in our network. We used a self-signed certificate, which has security implications, but you could buy an SSL from a CA instead, and use that for your registry. With secure communication in place, we can set up user authentication. 4 | 5 | ## Usernames and Passwords 6 | 7 | The registry server and the Docker client support [basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) over HTTPS. The server uses a file with a collection of usernames and encrypted passwords. The file uses Apache's htpasswd. 8 | 9 | Create the password file with an entry for user "moby" with password "gordon"; 10 | ``` 11 | $ mkdir auth 12 | $ sudo docker run --entrypoint htpasswd registry:latest -Bbn moby gordon > auth/htpasswd 13 | ``` 14 | The options are: 15 | 16 | - --entrypoint Overwrite the default ENTRYPOINT of the image 17 | - -B to force bcrypt vs default md5 18 | - -b run in batch mode 19 | - -n display results 20 | 21 | We can verify the entries have been written by checking the file contents - which shows the user names in plain text and a cipher text password: 22 | 23 | ``` 24 | $ cat auth/htpasswd 25 | moby:$2y$05$Geu2Z4LN0QDpUJBHvP5JVOsKOLH/XPoJBqISv1D8Aeh6LVGvjWWVC 26 | ``` 27 | 28 | ## Running an Authenticated Secure Registry 29 | 30 | Adding authentication to the registry is a similar process to adding SSL - we need to run the registry with access to the `htpasswd` file on the host, and configure authentication using environment variables. 31 | 32 | As before, we'll remove the existing container and run a new one with authentication configured: 33 | 34 | ``` 35 | $ sudo docker kill registry 36 | $ sudo docker rm registry 37 | $ sudo docker run -d -p 5000:5000 --name registry \ 38 | --restart unless-stopped \ 39 | -v $(pwd)/registry-data:/var/lib/registry \ 40 | -v $(pwd)/certs:/certs \ 41 | -v $(pwd)/auth:/auth \ 42 | -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ 43 | -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ 44 | -e REGISTRY_AUTH=htpasswd \ 45 | -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ 46 | -e "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd" \ 47 | registry 48 | ``` 49 | 50 | The options for this container are: 51 | 52 | - `-v $(pwd)/auth:/auth` - mount the local `auth` folder into the container, so the registry server can access `htpasswd` file; 53 | - `-e REGISTRY_AUTH=htpasswd` - use the registry's `htpasswd` authentication method; 54 | - `-e REGISTRY_AUTH_HTPASSWD_REALM='Registry Realm'` - specify the authentication realm; 55 | - `-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd` - specify the location of the `htpasswd` file. 56 | 57 | Now the registry is using secure transport and user authentication. 58 | 59 | ## Authenticating with the Registry 60 | 61 | With basic authentication, users cannot push or pull from the registry unless they are authenticated. If you try and pull an image without authenticating, you will get an error: 62 | 63 | ``` 64 | $ sudo docker pull localhost:5000/hello-world 65 | Using default tag: latest 66 | Error response from daemon: Get https://localhost:5000/v2/hello-world/manifests/latest: no basic auth credentials 67 | ``` 68 | 69 | The result is the same for valid and invalid image names, so you can't even check a repository exists without authenticating. Logging in to the registry is the same `docker login` command you use for Docker Store, specifying the registry hostname: 70 | 71 | ``` 72 | $ sudo docker login registry.local:5000 73 | Username: moby 74 | Password: 75 | Login Succeeded 76 | ``` 77 | 78 | If you use the wrong password or a username that doesn't exist, you get a `401` error message: 79 | 80 | ``` 81 | Error response from daemon: login attempt to https://registry.local:5000/v2/ failed with status: 401 Unauthorized 82 | ``` 83 | 84 | Now you're authenticated, you can push and pull as before: 85 | 86 | ``` 87 | $ sudo docker pull localhost:5000/hello-world 88 | Using default tag: latest 89 | latest: Pulling from hello-world 90 | Digest: sha256:961497c5ca49dc217a6275d4d64b5e4681dd3b2712d94974b8ce4762675720b4 91 | Status: Image is up to date for registry.local:5000/hello-world:latest 92 | ``` 93 | 94 | > Note. The open-source registry does not support the same authorization model as Docker Store or Docker Trusted Registry. Once you are logged in to the registry, you can push and pull from any repository, there is no restriction to limit specific users to specific repositories. 95 | 96 | ## Using Docker Compose to Start the Registry 97 | Typing in all the options to start the registry can become tedious. An easier and simpler way is to use [Docker Compose](https://docs.docker.com/compose/). Here's an example of a `docker-compose.yml` file that will start the registry. 98 | ``` 99 | registry: 100 | restart: always 101 | image: registry:2 102 | ports: 103 | - 5000:5000 104 | environment: 105 | REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt 106 | REGISTRY_HTTP_TLS_KEY: /certs/domain.key 107 | REGISTRY_AUTH: htpasswd 108 | REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd 109 | REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm 110 | volumes: 111 | - /path/registry-data:/var/lib/registry 112 | - /path/certs:/certs 113 | - /path/auth:/auth 114 | ``` 115 | 116 | To start the registry, type: 117 | ``` 118 | $ sudo docker-compose up 119 | ``` 120 | 121 | 122 | ## Conclusion 123 | 124 | [Docker Registry](https://docs.docker.com/registry/) is a free, open-source application for storing and accessing Docker images. You can run the registry in a container on your own network, or in a virtual network in the cloud, to host private images with secure access. For Linux hosts, there is an [official registry image](https://store.docker.com/images/registry) on Docker Store. 125 | 126 | We've covered all the options, from running an insecure registry, through adding SSL to encrypt traffic, and finally adding basic authentication to restrict access. By now you know how to set up a usable registry in your own environment, and you've also used some key Docker patterns - using containers as build agents and to run basic commands, without having to install software on your host machines. 127 | 128 | There is still more you can do with Docker Registry - using a different [storage driver](https://docs.docker.com/registry/storage-drivers/) so the image data is saved to reliable share storage, and setting up your registry as a [caching proxy for Docker Store](https://docs.docker.com/registry/recipes/mirror/) are good next steps. 129 | -------------------------------------------------------------------------------- /scenario/running_first_container.md: -------------------------------------------------------------------------------- 1 | ## 1.0 Running your first container 2 | Now that you have everything setup, it's time to get our hands dirty. In this section, you are going to run an [Alpine Linux](http://www.alpinelinux.org/) container (a lightweight linux distribution) on your system and get a taste of the `docker run` command. 3 | 4 | To get started, let's run the following in our terminal: 5 | ``` 6 | $ docker pull alpine 7 | ``` 8 | 9 | > **Note:** Depending on how you've installed docker on your system, you might see a `permission denied` error after running the above command. Try the commands from the Getting Started tutorial to [verify your installation](https://docs.docker.com/engine/getstarted/step_one/#/step-3-verify-your-installation). If you're on Linux, you may need to prefix your `docker` commands with `sudo`. Alternatively you can [create a docker group](https://docs.docker.com/engine/installation/linux/ubuntulinux/#/create-a-docker-group) to get rid of this issue. 10 | 11 | The `pull` command fetches the alpine **image** from the **Docker registry** and saves it in our system. You can use the `docker images` command to see a list of all images on your system. 12 | ``` 13 | $ docker images 14 | REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE 15 | alpine latest c51f86c28340 4 weeks ago 1.109 MB 16 | hello-world latest 690ed74de00f 5 months ago 960 B 17 | ``` 18 | 19 | ### 1.1 Docker Run 20 | Great! Let's now run a Docker **container** based on this image. To do that you are going to use the `docker run` command. 21 | 22 | ``` 23 | $ docker run alpine ls -l 24 | total 48 25 | drwxr-xr-x 2 root root 4096 Mar 2 16:20 bin 26 | drwxr-xr-x 5 root root 360 Mar 18 09:47 dev 27 | drwxr-xr-x 13 root root 4096 Mar 18 09:47 etc 28 | drwxr-xr-x 2 root root 4096 Mar 2 16:20 home 29 | drwxr-xr-x 5 root root 4096 Mar 2 16:20 lib 30 | ...... 31 | ...... 32 | ``` 33 | What happened? Behind the scenes, a lot of stuff happened. When you call `run`, 34 | 1. The Docker client contacts the Docker daemon 35 | 2. The Docker daemon checks local store if the image (alpine in this case) is available locally, and if not, downloads it from Docker Store. (Since we have issued `docker pull alpine` before, the download step is not necessary) 36 | 3. The Docker daemon creates the container and then runs a command in that container. 37 | 4. The Docker daemon streams the output of the command to the Docker client 38 | 39 | When you run `docker run alpine`, you provided a command (`ls -l`), so Docker started the command specified and you saw the listing. 40 | 41 | Let's try something more exciting. 42 | 43 | ``` 44 | $ docker run alpine echo "hello from alpine" 45 | hello from alpine 46 | ``` 47 | OK, that's some actual output. In this case, the Docker client dutifully ran the `echo` command in our alpine container and then exited it. If you've noticed, all of that happened pretty quickly. Imagine booting up a virtual machine, running a command and then killing it. Now you know why they say containers are fast! 48 | 49 | Try another command. 50 | ``` 51 | $ docker run alpine /bin/sh 52 | ``` 53 | 54 | Wait, nothing happened! Is that a bug? Well, no. These interactive shells will exit after running any scripted commands, unless they are run in an interactive terminal - so for this example to not exit, you need to `docker run -it alpine /bin/sh`. 55 | 56 | You are now inside the container shell and you can try out a few commands like `ls -l`, `uname -a` and others. Exit out of the container by giving the `exit` command. 57 | 58 | 59 | Ok, now it's time to see the `docker ps` command. The `docker ps` command shows you all containers that are currently running. 60 | 61 | ``` 62 | $ docker ps 63 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 64 | ``` 65 | 66 | Since no containers are running, you see a blank line. Let's try a more useful variant: `docker ps -a` 67 | 68 | ``` 69 | $ docker ps -a 70 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 71 | 36171a5da744 alpine "/bin/sh" 5 minutes ago Exited (0) 2 minutes ago fervent_newton 72 | a6a9d46d0b2f alpine "echo 'hello from alp" 6 minutes ago Exited (0) 6 minutes ago lonely_kilby 73 | ff0a5c3750b9 alpine "ls -l" 8 minutes ago Exited (0) 8 minutes ago elated_ramanujan 74 | c317d0a9e3d2 hello-world "/hello" 34 seconds ago Exited (0) 12 minutes ago stupefied_mcclintock 75 | ``` 76 | 77 | What you see above is a list of all containers that you ran. Notice that the `STATUS` column shows that these containers exited a few minutes ago. You're probably wondering if there is a way to run more than just one command in a container. Let's try that now: 78 | 79 | ``` 80 | $ docker run -it alpine /bin/sh 81 | / # ls 82 | bin dev etc home lib linuxrc media mnt proc root run sbin sys tmp usr var 83 | / # uname -a 84 | Linux 97916e8cb5dc 4.4.27-moby #1 SMP Wed Oct 26 14:01:48 UTC 2016 x86_64 Linux 85 | ``` 86 | Running the `run` command with the `-it` flags attaches us to an interactive tty in the container. Now you can run as many commands in the container as you want. Take some time to run your favorite commands. 87 | 88 | That concludes a whirlwind tour of the `docker run` command which would most likely be the command you'll use most often. It makes sense to spend some time getting comfortable with it. To find out more about `run`, use `docker run --help` to see a list of all flags it supports. As you proceed further, we'll see a few more variants of `docker run`. 89 | ### 1.2 Terminology 90 | In the last section, you saw a lot of Docker-specific jargon which might be confusing to some. So before you go further, let's clarify some terminology that is used frequently in the Docker ecosystem. 91 | 92 | - *Images* - The file system and configuration of our application which are used to create containers. To find out more about a Docker image, run `docker inspect alpine`. In the demo above, you used the `docker pull` command to download the **alpine** image. When you executed the command `docker run hello-world`, it also did a `docker pull` behind the scenes to download the **hello-world** image. 93 | - *Containers* - Running instances of Docker images — containers run the actual applications. A container includes an application and all of its dependencies. It shares the kernel with other containers, and runs as an isolated process in user space on the host OS. You created a container using `docker run` which you did using the alpine image that you downloaded. A list of running containers can be seen using the `docker ps` command. 94 | - *Docker daemon* - The background service running on the host that manages building, running and distributing Docker containers. 95 | - *Docker client* - The command line tool that allows the user to interact with the Docker daemon. 96 | - *Docker Store* - A [registry](https://store.docker.com/) of Docker images, where you can find trusted and enterprise ready containers, plugins, and Docker editions. You'll be using this later in this tutorial. 97 | 98 | ## Next Steps 99 | For the next step in the tutorial, head over to [2.0 Webapps with Docker](./webapps.md) 100 | -------------------------------------------------------------------------------- /Dockerfile/static-site/Hello_docker.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 304 | 305 | 306 |


307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |

316 | 317 |

Hello Docker!

318 | 319 |

This is being served from a docker
320 | container running Nginx.

321 | 322 | 323 | 324 | 325 | 326 | -------------------------------------------------------------------------------- /configuration/bridge-networking.md: -------------------------------------------------------------------------------- 1 | # Bridge networking 2 | 3 | # Lab Meta 4 | 5 | > **Difficulty**: Intermediate 6 | 7 | > **Time**: Approximately 15 minutes 8 | 9 | In this lab you'll learn how to build, manage, and use **bridge** networks. 10 | 11 | You will complete the following steps as part of this lab. 12 | 13 | - [Step 1 - The default **bridge** network](#default_bridge) 14 | - [Step 2 - Connect a container to the default *bridge* network](#connect_container) 15 | - [Step 3 - Test the network connectivity](#ping_local) 16 | - [Step 4 - Configure NAT for external access](#nat) 17 | 18 | # Prerequisites 19 | 20 | You will need all of the following to complete this lab: 21 | 22 | - A Linux-based Docker host running Docker 1.12 or higher 23 | - The lab was built and tested using Ubuntu 16.04 24 | 25 | # Step 1: The default **bridge** network 26 | 27 | Every clean installation of Docker comes with a pre-built network called **bridge**. Verify this with the `docker network ls` command. 28 | 29 | ``` 30 | $ docker network ls 31 | NETWORK ID NAME DRIVER SCOPE 32 | 1befe23acd58 bridge bridge local 33 | 726ead8f4e6b host host local 34 | ef4896538cc7 none null local 35 | ``` 36 | 37 | The output above shows that the **bridge** network is associated with the *bridge* driver. It's important to note that the network and the driver are connected, but they are not the same. In this example the network and the driver have the same name - but they are not the same thing! 38 | 39 | The output above also shows that the **bridge** network is scoped locally. This means that the network only exists on this Docker host. This is true of all networks using the *bridge* driver - the *bridge* driver provides single-host networking. 40 | 41 | All networks created with the *bridge* driver are based on a Linux bridge (a.k.a. a virtual switch). 42 | 43 | Install the `brctl` command and use it to list the Linux bridges on your Docker host. 44 | 45 | ``` 46 | # Install the brctl tools 47 | 48 | $ apt-get install bridge-utils 49 | 50 | 51 | # List the bridges on your Docker host 52 | 53 | $ brctl show 54 | bridge name bridge id STP enabled interfaces 55 | docker0 8000.0242f17f89a6 no 56 | ``` 57 | 58 | The output above shows a single Linux bridge called **docker0**. This is the bridge that was automatically created for the **bridge** network. You can see that it has no interfaces currently connected to it. 59 | 60 | You can also use the `ip` command to view details of the **docker0** bridge. 61 | 62 | ``` 63 | $ ip a 64 | 65 | 3: docker0: mtu 1500 qdisc noqueue state DOWN group default 66 | link/ether 02:42:f1:7f:89:a6 brd ff:ff:ff:ff:ff:ff 67 | inet 172.17.0.1/16 scope global docker0 68 | valid_lft forever preferred_lft forever 69 | inet6 fe80::42:f1ff:fe7f:89a6/64 scope link 70 | valid_lft forever preferred_lft forever 71 | ``` 72 | 73 | # Step 2: Connect a container 74 | 75 | The **bridge** network is the default network for new containers. This means that unless you specify a different network, all new containers will be connected to the **bridge** network. 76 | 77 | Create a new container. 78 | 79 | ``` 80 | $ docker run -dt ubuntu sleep infinity 81 | 6dd93d6cdc806df6c7812b6202f6096e43d9a013e56e5e638ee4bfb4ae8779ce 82 | ``` 83 | 84 | This command will create a new container based on the `ubuntu:latest` image and will run the `sleep` command to keep the container running in the background. As no network was specified on the `docker run` command, the container will be added to the **bridge** network. 85 | 86 | Run the `brctl show` command again. 87 | 88 | ``` 89 | $ brctl show 90 | bridge name bridge id STP enabled interfaces 91 | docker0 8000.0242f17f89a6 no veth3a080f 92 | ``` 93 | 94 | Notice how the **docker0** bridge now has an interface connected. This interface connects the **docker0** bridge to the new container just created. 95 | 96 | Inspect the **bridge** network again to see the new container attached to it. 97 | 98 | ``` 99 | $ docker network inspect bridge 100 | 101 | "Containers": { 102 | "6dd93d6cdc806df6c7812b6202f6096e43d9a013e56e5e638ee4bfb4ae8779ce": { 103 | "Name": "reverent_dubinsky", 104 | "EndpointID": "dda76da5577960b30492fdf1526c7dd7924725e5d654bed57b44e1a6e85e956c", 105 | "MacAddress": "02:42:ac:11:00:02", 106 | "IPv4Address": "172.17.0.2/16", 107 | "IPv6Address": "" 108 | } 109 | }, 110 | 111 | ``` 112 | 113 | # Step 3: Test network connectivity 114 | 115 | The output to the previous `docker network inspect` command shows the IP address of the new container. In the previous example it is "172.17.0.2" but yours might be different. 116 | 117 | Ping the IP address of the container from the shell prompt of your Docker host. Remember to use the IP of the container in **your** environment. 118 | 119 | ``` 120 | $ ping 172.17.0.2 121 | 64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.069 ms 122 | 64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.052 ms 123 | 64 bytes from 172.17.0.2: icmp_seq=3 ttl=64 time=0.050 ms 124 | 64 bytes from 172.17.0.2: icmp_seq=4 ttl=64 time=0.049 ms 125 | 64 bytes from 172.17.0.2: icmp_seq=5 ttl=64 time=0.049 ms 126 | ^C 127 | --- 172.17.0.2 ping statistics --- 128 | 5 packets transmitted, 5 received, 0% packet loss, time 3999ms 129 | rtt min/avg/max/mdev = 0.049/0.053/0.069/0.012 ms 130 | ``` 131 | 132 | Press `Ctrl-C` to stop the ping. The replies above show that the Docker host can ping the container over the **bridge** network. 133 | 134 | Log in to the container, install the `ping` 135 | program and ping `www.dockercon.com`. 136 | 137 | ``` 138 | # Get the ID of the container started in the previous step. 139 | $ docker ps 140 | CONTAINER ID IMAGE COMMAND CREATED STATUS NAMES 141 | 6dd93d6cdc80 ubuntu "sleep infinity" 5 mins Up reverent_dubinsky 142 | 143 | # Exec into the container 144 | $ docker exec -it 6dd93d6cdc80 /bin/bash 145 | 146 | # Update APT package lists and install the iputils-ping package 147 | root@6dd93d6cdc80:/# apt-get update 148 | 149 | 150 | apt-get install iputils-ping 151 | Reading package lists... Done 152 | 153 | 154 | # Ping www.dockercon.com from within the container 155 | root@6dd93d6cdc80:/# ping www.dockercon.com 156 | PING www.dockercon.com (104.239.220.248) 56(84) bytes of data. 157 | 64 bytes from 104.239.220.248: icmp_seq=1 ttl=39 time=93.9 ms 158 | 64 bytes from 104.239.220.248: icmp_seq=2 ttl=39 time=93.8 ms 159 | 64 bytes from 104.239.220.248: icmp_seq=3 ttl=39 time=93.8 ms 160 | ^C 161 | --- www.dockercon.com ping statistics --- 162 | 3 packets transmitted, 3 received, 0% packet loss, time 2002ms 163 | rtt min/avg/max/mdev = 93.878/93.895/93.928/0.251 ms 164 | ``` 165 | 166 | This shows that the new container can ping the internet and therefore has a valid and working network configuration. 167 | 168 | 169 | # Step 4: Configure NAT for external connectivity 170 | 171 | In this step we'll start a new **NGINX** container and map port 8080 on the Docker host to port 80 inside of the container. This means that traffic that hits the Docker host on port 8080 will be passed on to port 80 inside the container. 172 | 173 | > **NOTE:** If you start a new container from the official NGINX image without specifying a command to run, the container will run a basic web server on port 80. 174 | 175 | Start a new container based off the official NGINX image. 176 | 177 | ``` 178 | $ docker run --name web1 -d -p 8080:80 nginx 179 | Unable to find image 'nginx:latest' locally 180 | latest: Pulling from library/nginx 181 | 386a066cd84a: Pull complete 182 | 7bdb4b002d7f: Pull complete 183 | 49b006ddea70: Pull complete 184 | Digest: sha256:9038d5645fa5fcca445d12e1b8979c87f46ca42cfb17beb1e5e093785991a639 185 | Status: Downloaded newer image for nginx:latest 186 | b747d43fa277ec5da4e904b932db2a3fe4047991007c2d3649e3f0c615961038 187 | ``` 188 | 189 | Check that the container is running and view the port mapping. 190 | 191 | ``` 192 | $ docker ps 193 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 194 | b747d43fa277 nginx "nginx -g 'daemon off" 3 seconds ago Up 2 seconds 443/tcp, 0.0.0.0:8080->80/tcp web1 195 | 6dd93d6cdc80 ubuntu "sleep infinity" About an hour ago Up About an hour reverent_dubinsky 196 | ``` 197 | 198 | There are two containers listed in the output above. The top line shows the new **web1** container running NGINX. Take note of the command the container is running as well as the port mapping - `0.0.0.0:8080->80/tcp` maps port 8080 on all host interfaces to port 80 inside the **web1** container. This port mapping is what effectively makes the containers web service accessible from external sources (via the Docker hosts IP address on port 8080). 199 | 200 | Now that the container is running and mapped to a port on a host interface you can test connectivity to the NGINX web server. 201 | 202 | To complete the following task you will need the IP address of your Docker host. This will need to be an IP address that you can reach (e.g. if your lab is in AWS this will need to be the instance's Public IP). 203 | 204 | Point your web browser to the IP and port 8080 of your Docker host. The following example shows a web browser pointed to `52.213.169.69:8080` 205 | 206 | ![](concepts/img/browser.png) 207 | 208 | If you try connecting to the same IP address on a different port number it will fail. 209 | 210 | If for some reason you cannot open a session from a web broswer, you can connect from your Docker host using the `curl` command. 211 | 212 | ``` 213 | $ curl 127.0.0.1:8080 214 | 215 | 216 | 217 | Welcome to nginx! 218 | 219 |

Thank you for using nginx.

220 | 221 | 222 | ``` 223 | 224 | If you try and curl the IP address on a different port number it will fail. 225 | 226 | > **NOTE:** The port mapping is actually port address translation (PAT). 227 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2016 Docker, Inc. 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /swarm/deploying_app_with_swarm.md: -------------------------------------------------------------------------------- 1 | ## 3.0 Deploying an app to a Swarm 2 | This portion of the tutorial will guide you through the creation and customization of a voting app. It's important that you follow the steps in order, and make sure to customize the portions that are customizable. 3 | 4 | **Important.** 5 | To complete this section, you will need to have Docker installed on your machine as mentioned in the [Setup](./setup.md) section. You'll also need to have git installed. There are many options for installing it. For instance, you can get it from [GitHub](https://help.github.com/articles/set-up-git/). 6 | 7 | ### Voting app 8 | For this application we will use the [Docker Example Voting App](https://github.com/docker/example-voting-app). This app consists of five components: 9 | 10 | * Python webapp which lets you vote between two options 11 | * Redis queue which collects new votes 12 | * .NET worker which consumes votes and stores them in… 13 | * Postgres database backed by a Docker volume 14 | * Node.js webapp which shows the results of the voting in real time 15 | 16 | Clone the repository onto your machine and `cd` into the directory: 17 | 18 | ``` 19 | git clone https://github.com/docker/example-voting-app.git 20 | cd example-voting-app 21 | ``` 22 | 23 | ### 3.1 Deploying the app 24 | For this first stage, we will use existing images that are in Docker Store. 25 | 26 | This app relies on [Docker Swarm mode](https://docs.docker.com/engine/swarm/). Swarm mode is the cluster management and orchestration features embedded in the Docker engine. You can easily deploy to a swarm using a file that declares your desired state for the app. Swarm allows you to run your containers on more than one machine. In this tutorial, you can run on just one machine, or you can use something like [Docker for AWS](https://beta.docker.com/) or [Docker for Azure](https://beta.docker.com/) to quickly create a multiple node machine. Alternately, you can use Docker Machine to create a number of local nodes on your development machine. See [the Swarm Mode lab](../../swarm-mode/beginner-tutorial/README.md#creating-the-nodes-and-swarm) for more information. 27 | 28 | First, create a Swarm. 29 | 30 | ``` 31 | docker swarm init 32 | ``` 33 | 34 | Next, you will need a [Docker Compose](https://docs.docker.com/compose) file. You don't need Docker Compose installed, though if you are using Docker for Mac or Docker for Windows you have it installed. However, `docker stack deploy` accepts a file in the Docker Compose format. The file you need is in Docker Example Voting App at the root level. It's called docker-stack.yml. You can also just copy and paste it from here: 35 | 36 | ``` 37 | version: "3" 38 | services: 39 | 40 | redis: 41 | image: redis:alpine 42 | ports: 43 | - "6379" 44 | networks: 45 | - frontend 46 | deploy: 47 | replicas: 2 48 | update_config: 49 | parallelism: 2 50 | delay: 10s 51 | restart_policy: 52 | condition: on-failure 53 | db: 54 | image: postgres:9.4 55 | volumes: 56 | - db-data:/var/lib/postgresql/data 57 | networks: 58 | - backend 59 | deploy: 60 | placement: 61 | constraints: [node.role == manager] 62 | vote: 63 | image: dockersamples/examplevotingapp_vote:before 64 | ports: 65 | - 5000:80 66 | networks: 67 | - frontend 68 | depends_on: 69 | - redis 70 | deploy: 71 | replicas: 2 72 | update_config: 73 | parallelism: 2 74 | restart_policy: 75 | condition: on-failure 76 | result: 77 | image: dockersamples/examplevotingapp_result:before 78 | ports: 79 | - 5001:80 80 | networks: 81 | - backend 82 | depends_on: 83 | - db 84 | deploy: 85 | replicas: 1 86 | update_config: 87 | parallelism: 2 88 | delay: 10s 89 | restart_policy: 90 | condition: on-failure 91 | 92 | worker: 93 | image: dockersamples/examplevotingapp_worker 94 | networks: 95 | - frontend 96 | - backend 97 | deploy: 98 | mode: replicated 99 | replicas: 1 100 | labels: [APP=VOTING] 101 | restart_policy: 102 | condition: on-failure 103 | delay: 10s 104 | max_attempts: 3 105 | window: 120s 106 | placement: 107 | constraints: [node.role == manager] 108 | 109 | visualizer: 110 | image: dockersamples/visualizer 111 | ports: 112 | - "8080:8080" 113 | stop_grace_period: 1m30s 114 | volumes: 115 | - /var/run/docker.sock:/var/run/docker.sock 116 | deploy: 117 | placement: 118 | constraints: [node.role == manager] 119 | 120 | networks: 121 | frontend: 122 | backend: 123 | 124 | volumes: 125 | db-data: 126 | ``` 127 | 128 | First deploy it, and then we will look more deeply into the details: 129 | 130 | ``` 131 | docker stack deploy --compose-file docker-stack.yml vote 132 | Creating network vote_frontend 133 | Creating network vote_backend 134 | Creating network vote_default 135 | Creating service vote_vote 136 | Creating service vote_result 137 | Creating service vote_worker 138 | Creating service vote_redis 139 | Creating service vote_db 140 | ``` 141 | to verify your stack has deployed, use `docker stack services vote` 142 | ``` 143 | docker stack services vote 144 | ID NAME MODE REPLICAS IMAGE 145 | 25wo6p7fltyn vote_db replicated 1/1 postgres:9.4 146 | 2ot4sz0cgvw3 vote_worker replicated 1/1 dockersamples/examplevotingapp_worker:latest 147 | 9faz4wbvxpck vote_redis replicated 2/2 redis:alpine 148 | ocm8x2ijtt88 vote_vote replicated 2/2 dockersamples/examplevotingapp_vote:before 149 | p1dcwi0fkcbb vote_result replicated 2/2 dockersamples/examplevotingapp_result:before 150 | ``` 151 | 152 | If you take a look at `docker-stack.yml`, you will see that the file defines 153 | 154 | * vote container based on a Python image 155 | * result container based on a Node.js image 156 | * redis container based on a redis image, to temporarily store the data. 157 | * .NET based worker app based on a .NET image 158 | * Postgres container based on a postgres image 159 | 160 | The Compose file also defines two networks, front-tier and back-tier. Each container is placed on one or two networks. Once on those networks, they can access other services on that network in code just by using the name of the service. Services can be on any number of networks. Services are isolated on their network. Services are only able to discover each other by name if they are on the same network. To learn more about networking check out the [Networking Lab](https://github.com/docker/labs/tree/master/networking). 161 | 162 | Take a look at the file again. You'll see it starts with 163 | 164 | ``` 165 | version: "3" 166 | ``` 167 | It's important that you use [version 3](https://docs.docker.com/compose/compose-file/) of compose files, as `docker stack deploy` won't support use of earlier versions. You will see there's also a `services` key, under which there is a separate key for each of the services. Such as: 168 | ``` 169 | vote: 170 | image: dockersamples/examplevotingapp_vote:before 171 | ports: 172 | - 5000:80 173 | networks: 174 | - frontend 175 | depends_on: 176 | - redis 177 | deploy: 178 | replicas: 2 179 | update_config: 180 | parallelism: 2 181 | restart_policy: 182 | condition: on-failure 183 | ``` 184 | 185 | The `image` key there specifies which image you can use, in this case the image `dockersamples/examplevotingapp_vote:before`. If you're familiar with Compose, you may know that there's a `build` key, which builds based on a Dockerfile. However, `docker stack deploy` does not suppport `build`, so you need to use pre-built images. 186 | 187 | Much like `docker run` you will see you can define `ports` and `networks`. There's also a `depends_on` key which allows you to specify that a service is only deployed after another service, in this case `vote` only deploys after `redis`. 188 | 189 | The [`deploy`](https://docs.docker.com/compose/compose-file/#deploy) key is new in version 3. It allows you to specify various properties of the deployment to the Swarm. In this case, you are specifying that you want two replicas, that is two containers are deployed on the Swarm. You can specify other properties, like when to restart, what [healthcheck](https://docs.docker.com/engine/reference/builder/#healthcheck) to use, placement constraints, resources, etc. 190 | 191 | #### Test run 192 | 193 | Now that the app is running, you can go to `http://localhost:5000` to see: 194 | 195 | 196 | 197 | Click on one to vote. You can check the results at `http://localhost:5001`. 198 | 199 | **NOTE**: If you are running this tutorial in a cloud environment like AWS, Azure, Digital Ocean, or GCE you will not have direct access to localhost or 127.0.0.1 via a browser. A work around for this is to leverage ssh port forwarding. Below is an example for Mac OS. Similarly this can be done for Windows and Putty users. 200 | 201 | ``` 202 | $ ssh -L 5000:localhost:5000 @ 203 | ``` 204 | 205 | ### 3.2 Customize the app 206 | In this step, you will customize the app and redeploy it. We've supplied the same images but with the votes changed from Cats and Dogs to Java and .NET using the `after` tag. 207 | 208 | #### 3.2.1 Change the images used 209 | 210 | Going back to `docker-stack.yml`, change the `vote` and `result` images to use the `after` tag, so they look like this: 211 | 212 | ``` 213 | vote: 214 | image: dockersamples/examplevotingapp_vote:after 215 | ports: 216 | - 5000:80 217 | networks: 218 | - frontend 219 | depends_on: 220 | - redis 221 | deploy: 222 | replicas: 2 223 | update_config: 224 | parallelism: 2 225 | restart_policy: 226 | condition: on-failure 227 | result: 228 | image: dockersamples/examplevotingapp_result:after 229 | ports: 230 | - 5001:80 231 | networks: 232 | - backend 233 | depends_on: 234 | - db 235 | deploy: 236 | replicas: 2 237 | update_config: 238 | parallelism: 2 239 | delay: 10s 240 | restart_policy: 241 | condition: on-failure 242 | ``` 243 | 244 | #### 3.2.3 Redeploy 245 | Redeployment is the same as deploying 246 | 247 | ``` 248 | docker stack deploy --compose-file docker-stack.yml vote 249 | ``` 250 | #### 3.2.4 Another test run 251 | 252 | Now take it for a spin again. Go to the URLs you used in section [3.1](#31-deploying-the-app) and see the new votes. 253 | 254 | #### 3.2.5 Remove the stack 255 | 256 | Remove the stack from the swarm. 257 | 258 | ``` 259 | docker stack rm vote 260 | ``` 261 | 262 | ### 3.3 Next steps 263 | Now that you've built some images and pushed them to Docker Cloud, and learned the basics of Swarm mode, you can explore more of Docker by checking out [the documentation](https://docs.docker.com). And if you need any help, check out the [Docker Forums](https://forums.docker.com) or [StackOverflow](https://stackoverflow.com/tags/docker/). 264 | -------------------------------------------------------------------------------- /configuration/docker_daemon_config.md: -------------------------------------------------------------------------------- 1 | # [Docker daemon configuration](https://docs.docker.com/engine/reference/commandline/dockerd/) 2 | 3 | ## There are two ways for docker daemon Configuration. 4 | - In a first way, you change the docker systemd file. 5 | - In a second way, you add the Docker daemon configuration. 6 | 7 | ### Change systemd configuration 8 | 9 | **Create docker directory** 10 | 11 | ```bash 12 | mkdir /etc/systemd/system/docker.service.d 13 | ```` 14 | **Create configuration file** 15 | 16 | ```bash 17 | touch /etc/systemd/system/docker.service.d/override.conf 18 | ``` 19 | **Add `--registry-mirror` config on `override.conf`** 20 | 21 | ```bash 22 | [Service] 23 | ExecStart= 24 | ExecStart=/usr/bin/dockerd --registry-mirror https://docker.DockerMe.ir/ 25 | ```` 26 | **apply this change and check it** 27 | 28 | ```bash 29 | # updating systemd configuration 30 | systemctl daemon-reload 31 | # restart docker service 32 | systemctl restart docker 33 | # check docker service status 34 | systemctl status docker 35 | ``` 36 | 37 | **check docker config** 38 | ```bash 39 | docker info | grep -A1 "Registry Mirrors" 40 | 41 | # sample output 42 | Registry Mirrors: 43 | https://docker.DockerMe.ir/ 44 | ``` 45 | 46 | 47 | ### create docker systemd directory and touch `override.conf` with `systemctl edit` 48 | ```bash 49 | # systemctl edit UNIT ==> Edit one or more unit files 50 | systemctl edit docker 51 | ``` 52 | 53 | ### Example daemon config file 54 | ```bash 55 | [Service] 56 | Environment=HTTP_PROXY=http://127.0.0.1:8123 57 | Environment=HTTPS_PROXY=http://127.0.0.1:8123 58 | Environment=NO_PROXY=localhost,127.0.0.1,repo.dockerme.ir 59 | ExecStart= 60 | ExecStart=/usr/bin/dockerd \ 61 | --registry-mirror https://docker.DockerMe.ir \ 62 | -H tcp://127.0.0.1:2375 \ 63 | --bip 172.21.0.1/24 \ 64 | --ip 0.0.0.0 \ 65 | --data-root /mnt/docker/ \ 66 | --log-opt max-size=100m --log-opt max-file=5 67 | ``` 68 | 69 | 70 | ## Change daemon configuration 71 | 72 | **Create daemon config file** 73 | 74 | ```bash 75 | touch /etc/docker/daemon.json 76 | ``` 77 | 78 | Either pass the --registry-mirror option when starting dockerd manually, or edit /etc/docker/daemon.json and add the registry-mirrors key and value, to make the change persistent. 79 | 80 | 81 | ```bash 82 | { 83 | "registry-mirrors": ["https://docker.DockerMe.ir/"] 84 | } 85 | ```` 86 | 87 | **apply this change and check it** 88 | 89 | ```bash 90 | # restart docker service 91 | systemctl restart docker 92 | # check docker service status 93 | systemctl status docker 94 | ``` 95 | 96 | **check docker config** 97 | ```bash 98 | docker info | grep -A1 "Registry Mirrors" 99 | 100 | # sample output 101 | Registry Mirrors: 102 | https://docker.DockerMe.ir/ 103 | ``` 104 | 105 | 106 | ## dockerd command: 107 | ```bash 108 | Usage: dockerd COMMAND 109 | 110 | A self-sufficient runtime for containers. 111 | 112 | Options: 113 | --add-runtime runtime Register an additional OCI compatible runtime (default []) 114 | --allow-nondistributable-artifacts list Allow push of nondistributable artifacts to registry 115 | --api-cors-header string Set CORS headers in the Engine API 116 | --authorization-plugin list Authorization plugins to load 117 | --bip string Specify network bridge IP 118 | -b, --bridge string Attach containers to a network bridge 119 | --cgroup-parent string Set parent cgroup for all containers 120 | --config-file string Daemon configuration file (default "/etc/docker/daemon.json") 121 | --containerd string containerd grpc address 122 | --containerd-namespace string Containerd namespace to use (default "moby") 123 | --containerd-plugins-namespace string Containerd namespace to use for plugins (default "plugins.moby") 124 | --cpu-rt-period int Limit the CPU real-time period in microseconds for the 125 | parent cgroup for all containers 126 | --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds for the 127 | parent cgroup for all containers 128 | --cri-containerd start containerd with cri 129 | --data-root string Root directory of persistent Docker state (default "/var/lib/docker") 130 | -D, --debug Enable debug mode 131 | --default-address-pool pool-options Default address pools for node specific local networks 132 | --default-cgroupns-mode string Default mode for containers cgroup namespace ("host" | "private") (default "host") 133 | --default-gateway ip Container default gateway IPv4 address 134 | --default-gateway-v6 ip Container default gateway IPv6 address 135 | --default-ipc-mode string Default mode for containers ipc ("shareable" | "private") (default "private") 136 | --default-runtime string Default OCI runtime for containers (default "runc") 137 | --default-shm-size bytes Default shm size for containers (default 64MiB) 138 | --default-ulimit ulimit Default ulimits for containers (default []) 139 | --dns list DNS server to use 140 | --dns-opt list DNS options to use 141 | --dns-search list DNS search domains to use 142 | --exec-opt list Runtime execution options 143 | --exec-root string Root directory for execution state files (default "/var/run/docker") 144 | --experimental Enable experimental features 145 | --fixed-cidr string IPv4 subnet for fixed IPs 146 | --fixed-cidr-v6 string IPv6 subnet for fixed IPs 147 | -G, --group string Group for the unix socket (default "docker") 148 | --help Print usage 149 | -H, --host list Daemon socket(s) to connect to 150 | --host-gateway-ip ip IP address that the special 'host-gateway' string in --add-host resolves to. 151 | Defaults to the IP address of the default bridge 152 | --icc Enable inter-container communication (default true) 153 | --init Run an init in the container to forward signals and reap processes 154 | --init-path string Path to the docker-init binary 155 | --insecure-registry list Enable insecure registry communication 156 | --ip ip Default IP when binding container ports (default 0.0.0.0) 157 | --ip-forward Enable net.ipv4.ip_forward (default true) 158 | --ip-masq Enable IP masquerading (default true) 159 | --iptables Enable addition of iptables rules (default true) 160 | --ip6tables Enable addition of ip6tables rules (default false) 161 | --ipv6 Enable IPv6 networking 162 | --label list Set key=value labels to the daemon 163 | --live-restore Enable live restore of docker when containers are still running 164 | --log-driver string Default driver for container logs (default "json-file") 165 | -l, --log-level string Set the logging level ("debug"|"info"|"warn"|"error"|"fatal") (default "info") 166 | --log-opt map Default log driver options for containers (default map[]) 167 | --max-concurrent-downloads int Set the max concurrent downloads for each pull (default 3) 168 | --max-concurrent-uploads int Set the max concurrent uploads for each push (default 5) 169 | --max-download-attempts int Set the max download attempts for each pull (default 5) 170 | --metrics-addr string Set default address and port to serve the metrics api on 171 | --mtu int Set the containers network MTU 172 | --network-control-plane-mtu int Network Control plane MTU (default 1500) 173 | --no-new-privileges Set no-new-privileges by default for new containers 174 | --node-generic-resource list Advertise user-defined resource 175 | --oom-score-adjust int Set the oom_score_adj for the daemon (default -500) 176 | -p, --pidfile string Path to use for daemon PID file (default "/var/run/docker.pid") 177 | --raw-logs Full timestamps without ANSI coloring 178 | --registry-mirror list Preferred Docker registry mirror 179 | --rootless Enable rootless mode; typically used with RootlessKit 180 | --seccomp-profile string Path to seccomp profile 181 | --selinux-enabled Enable selinux support 182 | --shutdown-timeout int Set the default shutdown timeout (default 15) 183 | -s, --storage-driver string Storage driver to use 184 | --storage-opt list Storage driver options 185 | --swarm-default-advertise-addr string Set default address or interface for swarm advertised address 186 | --tls Use TLS; implied by --tlsverify 187 | --tlscacert string Trust certs signed only by this CA (default "~/.docker/ca.pem") 188 | --tlscert string Path to TLS certificate file (default "~/.docker/cert.pem") 189 | --tlskey string Path to TLS key file (default "~/.docker/key.pem") 190 | --tlsverify Use TLS and verify the remote 191 | --userland-proxy Use userland proxy for loopback traffic (default true) 192 | --userland-proxy-path string Path to the userland proxy binary 193 | --userns-remap string User/Group setting for user namespaces 194 | -v, --version Print version information and quit 195 | 196 | ``` 197 | 198 | ## Daemon configuration file 199 | ```bash 200 | { 201 | "allow-nondistributable-artifacts": [], 202 | "api-cors-header": "", 203 | "authorization-plugins": [], 204 | "bip": "", 205 | "bridge": "", 206 | "cgroup-parent": "", 207 | "cluster-advertise": "", 208 | "cluster-store": "", 209 | "cluster-store-opts": {}, 210 | "containerd": "/run/containerd/containerd.sock", 211 | "containerd-namespace": "docker", 212 | "containerd-plugin-namespace": "docker-plugins", 213 | "data-root": "", 214 | "debug": true, 215 | "default-address-pools": [ 216 | { 217 | "base": "172.80.0.0/16", 218 | "size": 24 219 | }, 220 | { 221 | "base": "172.90.0.0/16", 222 | "size": 24 223 | } 224 | ], 225 | "default-cgroupns-mode": "private", 226 | "default-gateway": "", 227 | "default-gateway-v6": "", 228 | "default-runtime": "runc", 229 | "default-shm-size": "64M", 230 | "default-ulimits": { 231 | "nofile": { 232 | "Hard": 64000, 233 | "Name": "nofile", 234 | "Soft": 64000 235 | } 236 | }, 237 | "dns": [], 238 | "dns-opts": [], 239 | "dns-search": [], 240 | "exec-opts": [], 241 | "exec-root": "", 242 | "experimental": false, 243 | "features": {}, 244 | "fixed-cidr": "", 245 | "fixed-cidr-v6": "", 246 | "group": "", 247 | "hosts": [], 248 | "icc": false, 249 | "init": false, 250 | "init-path": "/usr/libexec/docker-init", 251 | "insecure-registries": [], 252 | "ip": "0.0.0.0", 253 | "ip-forward": false, 254 | "ip-masq": false, 255 | "iptables": false, 256 | "ip6tables": false, 257 | "ipv6": false, 258 | "labels": [], 259 | "live-restore": true, 260 | "log-driver": "json-file", 261 | "log-level": "", 262 | "log-opts": { 263 | "cache-disabled": "false", 264 | "cache-max-file": "5", 265 | "cache-max-size": "20m", 266 | "cache-compress": "true", 267 | "env": "os,customer", 268 | "labels": "somelabel", 269 | "max-file": "5", 270 | "max-size": "10m" 271 | }, 272 | "max-concurrent-downloads": 3, 273 | "max-concurrent-uploads": 5, 274 | "max-download-attempts": 5, 275 | "mtu": 0, 276 | "no-new-privileges": false, 277 | "node-generic-resources": [ 278 | "NVIDIA-GPU=UUID1", 279 | "NVIDIA-GPU=UUID2" 280 | ], 281 | "oom-score-adjust": -500, 282 | "pidfile": "", 283 | "raw-logs": false, 284 | "registry-mirrors": [], 285 | "runtimes": { 286 | "cc-runtime": { 287 | "path": "/usr/bin/cc-runtime" 288 | }, 289 | "custom": { 290 | "path": "/usr/local/bin/my-runc-replacement", 291 | "runtimeArgs": [ 292 | "--debug" 293 | ] 294 | } 295 | }, 296 | "seccomp-profile": "", 297 | "selinux-enabled": false, 298 | "shutdown-timeout": 15, 299 | "storage-driver": "", 300 | "storage-opts": [], 301 | "swarm-default-advertise-addr": "", 302 | "tls": true, 303 | "tlscacert": "", 304 | "tlscert": "", 305 | "tlskey": "", 306 | "tlsverify": true, 307 | "userland-proxy": false, 308 | "userland-proxy-path": "/usr/libexec/docker-proxy", 309 | "userns-remap": "" 310 | } 311 | 312 | ``` 313 | -------------------------------------------------------------------------------- /configuration/overlay-networking.md: -------------------------------------------------------------------------------- 1 | # Overlay networking and service discovery 2 | 3 | # Lab Meta 4 | 5 | > **Difficulty**: Intermediate 6 | 7 | > **Time**: Approximately 20 minutes 8 | 9 | In this lab you'll learn how to build, manage, and use an **overlay** network with a *service* in *Swarm mode*. 10 | 11 | You will complete the following steps as part of this lab. 12 | 13 | - [Step 1 - Create a new Swarm](#swarm_init) 14 | - [Step 2 - Create an overlay network](#create_network) 15 | - [Step 3 - Create a service](#create_service) 16 | - [Step 4 - Test the network](#test) 17 | - [Step 5 - Test service discovery](#discover) 18 | 19 | # Prerequisites 20 | 21 | You will need all of the following to complete this lab: 22 | 23 | - Two Linux-based Docker hosts running **Docker 1.12** or higher in Engine mode (i.e. not yet configured for Swarm mode). You should use **node1** and **node2** from your lab. 24 | 25 | 26 | # Step 1: Create a new Swarm 27 | 28 | In this step you'll initialize a new Swarm, join a single worker node, and verify the operations worked. 29 | 30 | 1. Execute the following command on **node1**. 31 | 32 | ``` 33 | node1$ docker swarm init 34 | Swarm initialized: current node (cw6jpk7pqfg0jkilff5hr8z42) is now a manager. 35 | To add a worker to this swarm, run the following command: 36 | 37 | docker swarm join \ 38 | --token SWMTKN-1-3n2iuzpj8jynx0zd8axr0ouoagvy0o75uk5aqjrn0297j4uaz7-63eslya31oza2ob78b88zg5xe \ 39 | 172.31.34.123:2377 40 | 41 | To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. 42 | ``` 43 | 44 | 2. Copy the entire `docker swarm join` command that is displayed as part of the output from the command. 45 | 46 | 3. Paste the copied command into the terminal of **node2**. 47 | 48 | ``` 49 | node2$ docker swarm join \ 50 | > --token SWMTKN-1-3n2iuzpj8jynx0zd8axr0ouoagvy0o75uk5aqjrn0297j4uaz7-63eslya31oza2ob78b88zg5xe \ 51 | > 172.31.34.123:2377 52 | 53 | This node joined a swarm as a worker. 54 | ``` 55 | 56 | 4. Run a `docker node ls` on **node1** to verify that both nodes are part of the Swarm. 57 | 58 | ``` 59 | node1$ docker node ls 60 | ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS 61 | 4nb02fhvhy8sb0ygcvwya9skr ip-172-31-43-74 Ready Active 62 | cw6jpk7pqfg0jkilff5hr8z42 * ip-172-31-34-123 Ready Active Leader 63 | ``` 64 | 65 | The `ID` and `HOSTNAME` values may be different in your lab. The important thing to check is that both nodes have joined the Swarm and are *ready* and *active*. 66 | 67 | # Step 2: Create an overlay network 68 | 69 | Now that you have a Swarm initialized it's time to create an **overlay** network. 70 | 71 | 1. Create a new overlay network called "overnet" by executing the following command on **node1**. 72 | 73 | ``` 74 | node1$ docker network create -d overlay overnet 75 | 0cihm9yiolp0s9kcczchqorhb 76 | ``` 77 | 78 | 2. Use the `docker network ls` command to verify the network was created successfully. 79 | 80 | ``` 81 | node1$ docker network ls 82 | NETWORK ID NAME DRIVER SCOPE 83 | 1befe23acd58 bridge bridge local 84 | 0ea6066635df docker_gwbridge bridge local 85 | 726ead8f4e6b host host local 86 | 8eqnahrmp9lv ingress overlay swarm 87 | ef4896538cc7 none null local 88 | 0cihm9yiolp0 overnet overlay swarm 89 | ``` 90 | 91 | The new "overnet" network is shown on the last line of the output above. Notice how it is associated with the **overlay** driver and is scoped to the entire Swarm. 92 | 93 | > **NOTE:** The other new networks (ingress and docker_gwbridge) were created automatically when the Swarm cluster was created. 94 | 95 | 3. Run the same `docker network ls` command from **node2** 96 | 97 | ``` 98 | node2$ docker network ls 99 | NETWORK ID NAME DRIVER SCOPE 100 | b76635120433 bridge bridge local 101 | ea13f975a254 docker_gwbridge bridge local 102 | 73edc8c0cc70 host host local 103 | 8eqnahrmp9lv ingress overlay swarm 104 | c4fb141606ca none null local 105 | ``` 106 | 107 | Notice that the "overnet" network does not appear in the list. This is because Docker only extends overlay networks to hosts when they are needed. This is usually when a host runs a task from a service that is created on the network. We will see this shortly. 108 | 109 | 4. Use the `docker network inspect` command to view more detailed information about the "overnet" network. You will need to run this command from **node1**. 110 | 111 | ``` 112 | node1$ docker network inspect overnet 113 | [ 114 | { 115 | "Name": "overnet", 116 | "Id": "0cihm9yiolp0s9kcczchqorhb", 117 | "Scope": "swarm", 118 | "Driver": "overlay", 119 | "EnableIPv6": false, 120 | "IPAM": { 121 | "Driver": "default", 122 | "Options": null, 123 | "Config": [] 124 | }, 125 | "Internal": false, 126 | "Containers": null, 127 | "Options": { 128 | "com.docker.network.driver.overlay.vxlanid_list": "257" 129 | }, 130 | "Labels": null 131 | } 132 | ] 133 | ``` 134 | 135 | # Step 3: Create a service 136 | 137 | Now that you have a Swarm initialized and an overlay network, it's time to create a service that uses the network. 138 | 139 | 1. Execute the following command from **node1** to create a new service called *myservice* on the *overnet* network with two tasks/replicas. 140 | 141 | ``` 142 | node1$ docker service create --name myservice \ 143 | --network overnet \ 144 | --replicas 2 \ 145 | ubuntu sleep infinity 146 | 147 | e9xu03wsxhub3bij2tqyjey5t 148 | ``` 149 | 150 | 2. Verify that the service is created and both replicas are up. 151 | 152 | ``` 153 | node1$ docker service ls 154 | ID NAME REPLICAS IMAGE COMMAND 155 | e9xu03wsxhub myservice 2/2 ubuntu sleep infinity 156 | ``` 157 | 158 | The `2/2` in the `REPLICAS` column shows that both tasks in the service are up and running. 159 | 160 | 3. Verify that a single task (replica) is running on each of the two nodes in the Swarm. 161 | 162 | ``` 163 | node1$ docker service ps myservice 164 | ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR 165 | 5t4wh...fsvz myservice.1 ubuntu node1 Running Running 2 mins 166 | 8d9b4...te27 myservice.2 ubuntu node2 Running Running 2 mins 167 | ``` 168 | 169 | The `ID` and `NODE` values might be different in your output. The important thing to note is that each task/replica is running on a different node. 170 | 171 | 4. Now that **node2** is running a task on the "overnet" network it will be able to see the "overnet" network. Run the following command from **node2** to verify this. 172 | 173 | ``` 174 | node2$ docker network ls 175 | NETWORK ID NAME DRIVER SCOPE 176 | b76635120433 bridge bridge local 177 | ea13f975a254 docker_gwbridge bridge local 178 | 73edc8c0cc70 host host local 179 | 8eqnahrmp9lv ingress overlay swarm 180 | c4fb141606ca none null local 181 | 0cihm9yiolp0 overnet overlay swarm 182 | ``` 183 | 184 | 5. Run the following command on **node2** to get more detailed information about the "overnet" network and obtain the IP address of the task running on **node2**. 185 | 186 | ``` 187 | node2$ docker network inspect overnet 188 | [ 189 | { 190 | "Name": "overnet", 191 | "Id": "0cihm9yiolp0s9kcczchqorhb", 192 | "Scope": "swarm", 193 | "Driver": "overlay", 194 | "EnableIPv6": false, 195 | "IPAM": { 196 | "Driver": "default", 197 | "Options": null, 198 | "Config": [ 199 | { 200 | "Subnet": "10.0.0.0/24", 201 | "Gateway": "10.0.0.1" 202 | } 203 | ] 204 | }, 205 | "Internal": false, 206 | "Containers": { 207 | "286d2e98c764...37f5870c868": { 208 | "Name": "myservice.1.5t4wh7ngrzt9va3zlqxbmfsvz", 209 | "EndpointID": "43590b5453a...4d641c0c913841d657", 210 | "MacAddress": "02:42:0a:00:00:04", 211 | "IPv4Address": "10.0.0.4/24", 212 | "IPv6Address": "" 213 | } 214 | }, 215 | "Options": { 216 | "com.docker.network.driver.overlay.vxlanid_list": "257" 217 | }, 218 | "Labels": {} 219 | } 220 | ] 221 | ``` 222 | 223 | You should note that as of Docker 1.12, `docker network inspect` only shows containers/tasks running on the local node. This means that `10.0.0.4` is the IPv4 address of the container running on **node2**. Make a note of this IP address for the next step (the IP address in your lab might be different than the one shown here in the lab guide). 224 | 225 | # Step 4: Test the network 226 | 227 | To complete this step you will need the IP address of the service task running on **node2** that you saw in the previous step. 228 | 229 | 1. Execute the following commands from **node1**. 230 | 231 | ``` 232 | node1$ docker network inspect overnet 233 | [ 234 | { 235 | "Name": "overnet", 236 | "Id": "0cihm9yiolp0s9kcczchqorhb", 237 | "Scope": "swarm", 238 | "Driver": "overlay", 239 | "Containers": { 240 | "053abaa...e874f82d346c23a7a": { 241 | "Name": "myservice.2.8d9b4i6vnm4hf6gdhxt40te27", 242 | "EndpointID": "25d4d5...faf6abd60dba7ff9b5fff6", 243 | "MacAddress": "02:42:0a:00:00:03", 244 | "IPv4Address": "10.0.0.3/24", 245 | "IPv6Address": "" 246 | } 247 | }, 248 | "Options": { 249 | "com.docker.network.driver.overlay.vxlanid_list": "257" 250 | }, 251 | "Labels": {} 252 | } 253 | ] 254 | ``` 255 | 256 | Notice that the IP address listed for the service task (container) running on **node1** is different to the IP address for the service task running on **node2**. Note also that they are one the sane "overnet" network. 257 | 258 | 2. Run a `docker ps` command to get the ID of the service task on **node1** so that you can log in to it in the next step. 259 | 260 | ``` 261 | node1$ docker ps 262 | CONTAINER ID IMAGE COMMAND CREATED STATUS NAMES 263 | 053abaac4f93 ubuntu:latest "sleep infinity" 19 mins ago Up 19 mins myservice.2.8d9b4i6vnm4hf6gdhxt40te27 264 | 265 | ``` 266 | 267 | 3. Log on to the service task. Be sure to use the container `ID` from your environment as it will be different from the example shown below. 268 | 269 | ``` 270 | node1$ docker exec -it 053abaac4f93 /bin/bash 271 | root@053abaac4f93:/# 272 | ``` 273 | 274 | 4. Install the ping command and ping the service task running on **node2**. 275 | 276 | ``` 277 | root@053abaac4f93:/# apt-get update && apt-get install iputils-ping 278 | 279 | root@053abaac4f93:/# 280 | root@053abaac4f93:/# 281 | root@053abaac4f93:/# ping 10.0.0.4 282 | PING 10.0.0.4 (10.0.0.4) 56(84) bytes of data. 283 | 64 bytes from 10.0.0.4: icmp_seq=1 ttl=64 time=0.726 ms 284 | 64 bytes from 10.0.0.4: icmp_seq=2 ttl=64 time=0.647 ms 285 | ^C 286 | --- 10.0.0.4 ping statistics --- 287 | 2 packets transmitted, 2 received, 0% packet loss, time 999ms 288 | rtt min/avg/max/mdev = 0.647/0.686/0.726/0.047 ms 289 | ``` 290 | 291 | The output above shows that both tasks from the **myservice** service are on the same overlay network spanning both nodes and that they can use this network to communicate. 292 | 293 | # Step 5: Test service discovery 294 | 295 | Now that you have a working service using an overlay network, let's test service discovery. 296 | 297 | If you are not still inside of the container on **node1**, log back into it with the `docker exec` command. 298 | 299 | 1. Run the following command form inside of the container on **node1**. 300 | 301 | ``` 302 | root@053abaac4f93:/# cat /etc/resolv.conf 303 | search eu-west-1.compute.internal 304 | nameserver 127.0.0.11 305 | options ndots:0 306 | ``` 307 | 308 | The value that we are interested in is the `nameserver 127.0.0.11`. This value sends all DNS queries from the container to an embedded DNS resolver running inside the container listening on 127.0.0.11:53. All Docker container run an embedded DNS server at this address. 309 | 310 | > **NOTE:** Some of the other values in your file may be different to those shown in this guide. 311 | 312 | 2. Try and ping the `myservice` name from within the container. 313 | 314 | ``` 315 | root@053abaac4f93:/# ping myservice 316 | PING myservice (10.0.0.2) 56(84) bytes of data. 317 | 64 bytes from ip-10-0-0-2.eu-west-1.compute.internal (10.0.0.2): icmp_seq=1 ttl=64 time=0.020 ms 318 | 64 bytes from ip-10-0-0-2.eu-west-1.compute.internal (10.0.0.2): icmp_seq=2 ttl=64 time=0.041 ms 319 | 64 bytes from ip-10-0-0-2.eu-west-1.compute.internal (10.0.0.2): icmp_seq=3 ttl=64 time=0.039 ms 320 | ^C 321 | --- myservice ping statistics --- 322 | 3 packets transmitted, 3 received, 0% packet loss, time 2001ms 323 | rtt min/avg/max/mdev = 0.020/0.033/0.041/0.010 ms 324 | ``` 325 | 326 | The output clearly shows that the container can ping the `myservice` service by name. Notice that the IP address returned is `10.0.0.2`. In the next few steps we'll verify that this address is the virtual IP (VIP) assigned to the `myservice` service. 327 | 328 | 3. Type the `exit` command to leave the `exec` container session and return to the shell prompt of your **node1** Docker host. 329 | 330 | 4. Inspect the configuration of the `myservice` service and verify that the VIP value matches the value returned by the previous `ping myservice` command. 331 | 332 | ``` 333 | node1$ docker service inspect myservice 334 | [ 335 | { 336 | "ID": "e9xu03wsxhub3bij2tqyjey5t", 337 | "Version": { 338 | "Index": 20 339 | }, 340 | "CreatedAt": "2016-11-23T09:28:57.888561605Z", 341 | "UpdatedAt": "2016-11-23T09:28:57.890326642Z", 342 | "Spec": { 343 | "Name": "myservice", 344 | "TaskTemplate": { 345 | "ContainerSpec": { 346 | "Image": "ubuntu", 347 | "Args": [ 348 | "sleep", 349 | "infinity" 350 | ] 351 | }, 352 | 353 | "Endpoint": { 354 | "Spec": { 355 | "Mode": "vip" 356 | }, 357 | "VirtualIPs": [ 358 | { 359 | "NetworkID": "0cihm9yiolp0s9kcczchqorhb", 360 | "Addr": "10.0.0.2/24" 361 | } 362 | 363 | ``` 364 | 365 | Towards the bottom of the output you will see the VIP of the service listed. The VIP in the output above is `10.0.0.2` but the value may be different in your setup. The important point to note is that the VIP listed here matches the value returned by the `ping myservice` command. 366 | 367 | Feel free to create a new `docker exec` session to the service task (container) running on **node2** and perform the same `ping service` command. You will get a response form the same VIP. 368 | -------------------------------------------------------------------------------- /scenario/monitoring/grafana-dashboard/docker-monitoring_rev1.json: -------------------------------------------------------------------------------- 1 | { 2 | "__inputs": [ 3 | { 4 | "name": "DS_PROMETHEUS", 5 | "label": "prometheus", 6 | "description": "prometheus with cAdvisor as a target", 7 | "type": "datasource", 8 | "pluginId": "prometheus", 9 | "pluginName": "Prometheus" 10 | } 11 | ], 12 | "__requires": [ 13 | { 14 | "type": "panel", 15 | "id": "singlestat", 16 | "name": "Singlestat", 17 | "version": "" 18 | }, 19 | { 20 | "type": "panel", 21 | "id": "graph", 22 | "name": "Graph", 23 | "version": "" 24 | }, 25 | { 26 | "type": "grafana", 27 | "id": "grafana", 28 | "name": "Grafana", 29 | "version": "3.1.0" 30 | }, 31 | { 32 | "type": "datasource", 33 | "id": "prometheus", 34 | "name": "Prometheus", 35 | "version": "1.0.0" 36 | } 37 | ], 38 | "id": null, 39 | "title": "Docker monitoring", 40 | "description": "Docker monitoring with Prometheus and cAdvisor", 41 | "tags": [ 42 | "docker" 43 | ], 44 | "style": "dark", 45 | "timezone": "browser", 46 | "editable": true, 47 | "hideControls": false, 48 | "sharedCrosshair": true, 49 | "rows": [ 50 | { 51 | "collapse": false, 52 | "editable": true, 53 | "height": "50", 54 | "panels": [ 55 | { 56 | "cacheTimeout": null, 57 | "colorBackground": false, 58 | "colorValue": false, 59 | "colors": [ 60 | "rgba(245, 54, 54, 0.9)", 61 | "rgba(237, 129, 40, 0.89)", 62 | "rgba(50, 172, 45, 0.97)" 63 | ], 64 | "datasource": "${DS_PROMETHEUS}", 65 | "editable": true, 66 | "error": false, 67 | "format": "none", 68 | "gauge": { 69 | "maxValue": 100, 70 | "minValue": 0, 71 | "show": false, 72 | "thresholdLabels": false, 73 | "thresholdMarkers": true 74 | }, 75 | "height": "20", 76 | "id": 7, 77 | "interval": null, 78 | "isNew": true, 79 | "links": [], 80 | "mappingType": 1, 81 | "mappingTypes": [ 82 | { 83 | "name": "value to text", 84 | "value": 1 85 | }, 86 | { 87 | "name": "range to text", 88 | "value": 2 89 | } 90 | ], 91 | "maxDataPoints": 100, 92 | "nullPointMode": "connected", 93 | "nullText": null, 94 | "postfix": "", 95 | "postfixFontSize": "50%", 96 | "prefix": "", 97 | "prefixFontSize": "50%", 98 | "rangeMaps": [ 99 | { 100 | "from": "null", 101 | "text": "N/A", 102 | "to": "null" 103 | } 104 | ], 105 | "span": 4, 106 | "sparkline": { 107 | "fillColor": "rgba(31, 118, 189, 0.18)", 108 | "full": false, 109 | "lineColor": "rgb(31, 120, 193)", 110 | "show": false 111 | }, 112 | "targets": [ 113 | { 114 | "expr": "count(container_last_seen{image!=\"\"})", 115 | "intervalFactor": 2, 116 | "legendFormat": "", 117 | "metric": "container_last_seen", 118 | "refId": "A", 119 | "step": 240 120 | } 121 | ], 122 | "thresholds": "", 123 | "title": "Running containers", 124 | "transparent": true, 125 | "type": "singlestat", 126 | "valueFontSize": "80%", 127 | "valueMaps": [ 128 | { 129 | "op": "=", 130 | "text": "N/A", 131 | "value": "null" 132 | } 133 | ], 134 | "valueName": "avg" 135 | }, 136 | { 137 | "cacheTimeout": null, 138 | "colorBackground": false, 139 | "colorValue": false, 140 | "colors": [ 141 | "rgba(245, 54, 54, 0.9)", 142 | "rgba(237, 129, 40, 0.89)", 143 | "rgba(50, 172, 45, 0.97)" 144 | ], 145 | "datasource": "${DS_PROMETHEUS}", 146 | "editable": true, 147 | "error": false, 148 | "format": "mbytes", 149 | "gauge": { 150 | "maxValue": 100, 151 | "minValue": 0, 152 | "show": false, 153 | "thresholdLabels": false, 154 | "thresholdMarkers": true 155 | }, 156 | "height": "20", 157 | "id": 5, 158 | "interval": null, 159 | "isNew": true, 160 | "links": [], 161 | "mappingType": 1, 162 | "mappingTypes": [ 163 | { 164 | "name": "value to text", 165 | "value": 1 166 | }, 167 | { 168 | "name": "range to text", 169 | "value": 2 170 | } 171 | ], 172 | "maxDataPoints": 100, 173 | "nullPointMode": "connected", 174 | "nullText": null, 175 | "postfix": "", 176 | "postfixFontSize": "50%", 177 | "prefix": "", 178 | "prefixFontSize": "50%", 179 | "rangeMaps": [ 180 | { 181 | "from": "null", 182 | "text": "N/A", 183 | "to": "null" 184 | } 185 | ], 186 | "span": 4, 187 | "sparkline": { 188 | "fillColor": "rgba(31, 118, 189, 0.18)", 189 | "full": false, 190 | "lineColor": "rgb(31, 120, 193)", 191 | "show": false 192 | }, 193 | "targets": [ 194 | { 195 | "expr": "sum(container_memory_usage_bytes{image!=\"\"})/1024/1024", 196 | "intervalFactor": 2, 197 | "legendFormat": "", 198 | "metric": "container_memory_usage_bytes", 199 | "refId": "A", 200 | "step": 240 201 | } 202 | ], 203 | "thresholds": "", 204 | "title": "Total Memory Usage", 205 | "transparent": true, 206 | "type": "singlestat", 207 | "valueFontSize": "80%", 208 | "valueMaps": [ 209 | { 210 | "op": "=", 211 | "text": "N/A", 212 | "value": "null" 213 | } 214 | ], 215 | "valueName": "current" 216 | }, 217 | { 218 | "cacheTimeout": null, 219 | "colorBackground": false, 220 | "colorValue": false, 221 | "colors": [ 222 | "rgba(245, 54, 54, 0.9)", 223 | "rgba(237, 129, 40, 0.89)", 224 | "rgba(50, 172, 45, 0.97)" 225 | ], 226 | "datasource": "${DS_PROMETHEUS}", 227 | "editable": true, 228 | "error": false, 229 | "format": "percent", 230 | "gauge": { 231 | "maxValue": 100, 232 | "minValue": 0, 233 | "show": false, 234 | "thresholdLabels": false, 235 | "thresholdMarkers": true 236 | }, 237 | "height": "20", 238 | "id": 6, 239 | "interval": null, 240 | "isNew": true, 241 | "links": [], 242 | "mappingType": 1, 243 | "mappingTypes": [ 244 | { 245 | "name": "value to text", 246 | "value": 1 247 | }, 248 | { 249 | "name": "range to text", 250 | "value": 2 251 | } 252 | ], 253 | "maxDataPoints": 100, 254 | "nullPointMode": "connected", 255 | "nullText": null, 256 | "postfix": "", 257 | "postfixFontSize": "50%", 258 | "prefix": "", 259 | "prefixFontSize": "50%", 260 | "rangeMaps": [ 261 | { 262 | "from": "null", 263 | "text": "N/A", 264 | "to": "null" 265 | } 266 | ], 267 | "span": 4, 268 | "sparkline": { 269 | "fillColor": "rgba(31, 118, 189, 0.18)", 270 | "full": false, 271 | "lineColor": "rgb(31, 120, 193)", 272 | "show": false 273 | }, 274 | "targets": [ 275 | { 276 | "expr": "sum(rate(container_cpu_user_seconds_total{image!=\"\"}[5m]) * 100)", 277 | "intervalFactor": 2, 278 | "legendFormat": "", 279 | "metric": "container_memory_usage_bytes", 280 | "refId": "A", 281 | "step": 240 282 | } 283 | ], 284 | "thresholds": "", 285 | "title": "Total CPU Usage", 286 | "transparent": true, 287 | "type": "singlestat", 288 | "valueFontSize": "80%", 289 | "valueMaps": [ 290 | { 291 | "op": "=", 292 | "text": "N/A", 293 | "value": "null" 294 | } 295 | ], 296 | "valueName": "current" 297 | } 298 | ], 299 | "title": "New row" 300 | }, 301 | { 302 | "collapse": false, 303 | "editable": true, 304 | "height": "250px", 305 | "panels": [ 306 | { 307 | "aliasColors": {}, 308 | "bars": false, 309 | "datasource": "${DS_PROMETHEUS}", 310 | "decimals": 2, 311 | "editable": true, 312 | "error": false, 313 | "fill": 1, 314 | "grid": { 315 | "threshold1": null, 316 | "threshold1Color": "rgba(216, 200, 27, 0.27)", 317 | "threshold2": null, 318 | "threshold2Color": "rgba(234, 112, 112, 0.22)" 319 | }, 320 | "id": 2, 321 | "isNew": true, 322 | "legend": { 323 | "alignAsTable": true, 324 | "avg": true, 325 | "current": true, 326 | "max": false, 327 | "min": false, 328 | "rightSide": true, 329 | "show": true, 330 | "total": false, 331 | "values": true 332 | }, 333 | "lines": true, 334 | "linewidth": 2, 335 | "links": [], 336 | "nullPointMode": "connected", 337 | "percentage": false, 338 | "pointradius": 5, 339 | "points": false, 340 | "renderer": "flot", 341 | "seriesOverrides": [], 342 | "span": 12, 343 | "stack": false, 344 | "steppedLine": false, 345 | "targets": [ 346 | { 347 | "expr": "rate(container_cpu_user_seconds_total{image!=\"\"}[5m]) * 100", 348 | "intervalFactor": 2, 349 | "legendFormat": "{{name}}", 350 | "metric": "cpu", 351 | "refId": "A", 352 | "step": 10 353 | } 354 | ], 355 | "timeFrom": null, 356 | "timeShift": null, 357 | "title": "CPU Usage", 358 | "tooltip": { 359 | "msResolution": false, 360 | "shared": true, 361 | "sort": 0, 362 | "value_type": "cumulative" 363 | }, 364 | "transparent": false, 365 | "type": "graph", 366 | "xaxis": { 367 | "show": true 368 | }, 369 | "yaxes": [ 370 | { 371 | "format": "percent", 372 | "label": null, 373 | "logBase": 1, 374 | "max": null, 375 | "min": null, 376 | "show": true 377 | }, 378 | { 379 | "format": "short", 380 | "label": null, 381 | "logBase": 1, 382 | "max": null, 383 | "min": null, 384 | "show": true 385 | } 386 | ] 387 | } 388 | ], 389 | "title": "Row" 390 | }, 391 | { 392 | "collapse": false, 393 | "editable": true, 394 | "height": "250px", 395 | "panels": [ 396 | { 397 | "aliasColors": {}, 398 | "bars": false, 399 | "datasource": "${DS_PROMETHEUS}", 400 | "decimals": 2, 401 | "editable": true, 402 | "error": false, 403 | "fill": 1, 404 | "grid": { 405 | "threshold1": null, 406 | "threshold1Color": "rgba(216, 200, 27, 0.27)", 407 | "threshold2": null, 408 | "threshold2Color": "rgba(234, 112, 112, 0.22)" 409 | }, 410 | "id": 1, 411 | "isNew": true, 412 | "legend": { 413 | "alignAsTable": true, 414 | "avg": true, 415 | "current": true, 416 | "max": false, 417 | "min": false, 418 | "rightSide": true, 419 | "show": true, 420 | "total": false, 421 | "values": true 422 | }, 423 | "lines": true, 424 | "linewidth": 2, 425 | "links": [], 426 | "nullPointMode": "connected", 427 | "percentage": false, 428 | "pointradius": 5, 429 | "points": false, 430 | "renderer": "flot", 431 | "seriesOverrides": [], 432 | "span": 12, 433 | "stack": false, 434 | "steppedLine": false, 435 | "targets": [ 436 | { 437 | "expr": "container_memory_usage_bytes{image!=\"\"}", 438 | "hide": false, 439 | "intervalFactor": 2, 440 | "legendFormat": "{{name}}", 441 | "metric": "container_memory_usage_bytes", 442 | "refId": "A", 443 | "step": 10 444 | } 445 | ], 446 | "timeFrom": null, 447 | "timeShift": null, 448 | "title": "Memory Usage", 449 | "tooltip": { 450 | "msResolution": false, 451 | "shared": true, 452 | "sort": 0, 453 | "value_type": "cumulative" 454 | }, 455 | "transparent": false, 456 | "type": "graph", 457 | "xaxis": { 458 | "show": true 459 | }, 460 | "yaxes": [ 461 | { 462 | "format": "bytes", 463 | "label": "", 464 | "logBase": 1, 465 | "max": null, 466 | "min": null, 467 | "show": true 468 | }, 469 | { 470 | "format": "short", 471 | "label": null, 472 | "logBase": 1, 473 | "max": null, 474 | "min": null, 475 | "show": false 476 | } 477 | ] 478 | } 479 | ], 480 | "title": "New row" 481 | }, 482 | { 483 | "collapse": false, 484 | "editable": true, 485 | "height": "250px", 486 | "panels": [ 487 | { 488 | "aliasColors": {}, 489 | "bars": false, 490 | "datasource": "${DS_PROMETHEUS}", 491 | "editable": true, 492 | "error": false, 493 | "fill": 1, 494 | "grid": { 495 | "threshold1": null, 496 | "threshold1Color": "rgba(216, 200, 27, 0.27)", 497 | "threshold2": null, 498 | "threshold2Color": "rgba(234, 112, 112, 0.22)" 499 | }, 500 | "id": 3, 501 | "isNew": true, 502 | "legend": { 503 | "avg": false, 504 | "current": false, 505 | "max": false, 506 | "min": false, 507 | "show": true, 508 | "total": false, 509 | "values": false 510 | }, 511 | "lines": true, 512 | "linewidth": 2, 513 | "links": [], 514 | "nullPointMode": "connected", 515 | "percentage": false, 516 | "pointradius": 5, 517 | "points": false, 518 | "renderer": "flot", 519 | "seriesOverrides": [], 520 | "span": 6, 521 | "stack": false, 522 | "steppedLine": false, 523 | "targets": [ 524 | { 525 | "expr": "irate(container_network_receive_bytes_total{image!=\"\"}[5m])", 526 | "intervalFactor": 2, 527 | "legendFormat": "{{name}}", 528 | "metric": "container_network_receive_bytes_total", 529 | "refId": "A", 530 | "step": 20 531 | } 532 | ], 533 | "timeFrom": null, 534 | "timeShift": null, 535 | "title": "Network Rx", 536 | "tooltip": { 537 | "msResolution": false, 538 | "shared": true, 539 | "sort": 0, 540 | "value_type": "cumulative" 541 | }, 542 | "type": "graph", 543 | "xaxis": { 544 | "show": true 545 | }, 546 | "yaxes": [ 547 | { 548 | "format": "Bps", 549 | "label": null, 550 | "logBase": 1, 551 | "max": null, 552 | "min": null, 553 | "show": true 554 | }, 555 | { 556 | "format": "short", 557 | "label": null, 558 | "logBase": 1, 559 | "max": null, 560 | "min": null, 561 | "show": true 562 | } 563 | ] 564 | }, 565 | { 566 | "aliasColors": {}, 567 | "bars": false, 568 | "datasource": "${DS_PROMETHEUS}", 569 | "editable": true, 570 | "error": false, 571 | "fill": 1, 572 | "grid": { 573 | "threshold1": null, 574 | "threshold1Color": "rgba(216, 200, 27, 0.27)", 575 | "threshold2": null, 576 | "threshold2Color": "rgba(234, 112, 112, 0.22)" 577 | }, 578 | "id": 4, 579 | "isNew": true, 580 | "legend": { 581 | "avg": false, 582 | "current": false, 583 | "max": false, 584 | "min": false, 585 | "show": true, 586 | "total": false, 587 | "values": false 588 | }, 589 | "lines": true, 590 | "linewidth": 2, 591 | "links": [], 592 | "nullPointMode": "connected", 593 | "percentage": false, 594 | "pointradius": 5, 595 | "points": false, 596 | "renderer": "flot", 597 | "seriesOverrides": [], 598 | "span": 6, 599 | "stack": false, 600 | "steppedLine": false, 601 | "targets": [ 602 | { 603 | "expr": "irate(container_network_transmit_bytes_total{image!=\"\"}[5m])", 604 | "intervalFactor": 2, 605 | "legendFormat": "{{name}}", 606 | "refId": "A", 607 | "step": 20 608 | } 609 | ], 610 | "timeFrom": null, 611 | "timeShift": null, 612 | "title": "Network Tx", 613 | "tooltip": { 614 | "msResolution": false, 615 | "shared": true, 616 | "sort": 0, 617 | "value_type": "cumulative" 618 | }, 619 | "type": "graph", 620 | "xaxis": { 621 | "show": true 622 | }, 623 | "yaxes": [ 624 | { 625 | "format": "Bps", 626 | "label": null, 627 | "logBase": 1, 628 | "max": null, 629 | "min": null, 630 | "show": true 631 | }, 632 | { 633 | "format": "short", 634 | "label": null, 635 | "logBase": 1, 636 | "max": null, 637 | "min": null, 638 | "show": true 639 | } 640 | ] 641 | } 642 | ], 643 | "title": "New row" 644 | } 645 | ], 646 | "time": { 647 | "from": "now-3h", 648 | "to": "now" 649 | }, 650 | "timepicker": { 651 | "refresh_intervals": [ 652 | "5s", 653 | "10s", 654 | "30s", 655 | "1m", 656 | "5m", 657 | "15m", 658 | "30m", 659 | "1h", 660 | "2h", 661 | "1d" 662 | ], 663 | "time_options": [ 664 | "5m", 665 | "15m", 666 | "1h", 667 | "6h", 668 | "12h", 669 | "24h", 670 | "2d", 671 | "7d", 672 | "30d" 673 | ] 674 | }, 675 | "templating": { 676 | "list": [] 677 | }, 678 | "annotations": { 679 | "list": [] 680 | }, 681 | "refresh": "10s", 682 | "schemaVersion": 12, 683 | "version": 26, 684 | "links": [], 685 | "gnetId": 193 686 | } -------------------------------------------------------------------------------- /swarm/swarm.md: -------------------------------------------------------------------------------- 1 | # Docker Orchestration 2 | 3 | Hi, welcome to the Orchestration lab for DockerCon 2017! 4 | 5 | In this lab you will play around with the container orchestration features of Docker. You will deploy a simple application to a single host and learn how that works. Then, you will configure Docker Swarm Mode, and learn to deploy the same simple application across multiple hosts. You will then see how to scale the application and move the workload across different hosts easily. 6 | 7 | > **Difficulty**: Beginner 8 | 9 | > **Time**: Approximately 30 minutes 10 | 11 | > **Tasks**: 12 | > 13 | > * [Section #1 - What is Orchestration](#basics) 14 | > * [Section #2 - Configure Swarm Mode](#start-cluster) 15 | > * [Section #3 - Deploy applications across multiple hosts](#multi-application) 16 | > * [Section #4 - Scale the application](#scale-application) 17 | > * [Section #5 - Drain a node and reschedule the containers](#recover-application) 18 | > * [Cleaning Up](#cleanup) 19 | 20 | ## Document conventions 21 | 22 | When you encounter a phrase in between `<` and `>` you are meant to substitute in a different value. 23 | 24 | For instance if you see `ssh @` you would actually type something like `ssh ubuntu@node0-a.ivaf2i2atqouppoxund0tvddsa.jx.internal.cloudapp.net` 25 | 26 | You will be asked to SSH into various nodes. These nodes are referred to as **node0-a**, **node1-b**, **node2-c**, etc. 27 | 28 | ## Prerequisites 29 | 30 | This lab requires three Linux nodes with Docker 17.03 (or higher) installed. 31 | 32 | Also, please make sure you can SSH into the Linux nodes. If you haven't already done so, please SSH in to **node0-a**, **node1-b**, and **node2-c**. 33 | 34 | You can do that by SSHing into **node0-a**. 35 | 36 | ``` 37 | $ ssh ubuntu@ 38 | ``` 39 | 40 | Then, **node1-b**. 41 | 42 | ``` 43 | $ ssh ubuntu@ 44 | ``` 45 | 46 | And, finally **node2-c**. 47 | 48 | ``` 49 | $ ssh ubuntu@ 50 | ``` 51 | 52 | # Section 1: What is Orchestration 53 | 54 | So, what is Orchestration anyways? Well, Orchestration is probably best described using an example. Lets say that you have an application that has high traffic along with high-availability requirements. Due to these requirements, you typically want to deploy across at least 3+ machines, so that in the event a host fails, your application will still be accessible from at least two others. Obviously, this is just an example and your use-case will likely have its own requirements, but you get the idea. 55 | 56 | Deploying your application without Orchestration is typically very time consuming and error prone, because you would have to manually SSH into each machine, start up your application, and then continually keep tabs on things to make sure it is running as you expect. 57 | 58 | But, with Orchestration tooling, you can typically off-load much of this manual work and let automation do the heavy lifting. One cool feature of Orchestration with Docker Swarm, is that you can deploy an application across many hosts with only a single command (once Swarm mode is enabled). Plus, if one of the supporting nodes dies in your Docker Swarm, other nodes will automatically pick up load, and your application will continue to hum along as usual. 59 | 60 | If you are typically only using `docker run` to deploy your applications, then you could likely really benefit from using Docker Compose, Docker Swarm mode, and both Docker Compose and Swarm. 61 | 62 | # Section 2: Configure Swarm Mode 63 | 64 | Real-world applications are typically deployed across multiple hosts as discussed earlier. This improves application performance and availability, as well as allowing individual application components to scale independently. Docker has powerful native tools to help you do this. 65 | 66 | An example of running things manually and on a single host would be to create a new container on **node0-a** by running `docker run -dt ubuntu sleep infinity`. 67 | 68 | ``` 69 | $ docker run -dt ubuntu sleep infinity 70 | Unable to find image 'ubuntu:latest' locally 71 | latest: Pulling from library/ubuntu 72 | d54efb8db41d: Pull complete 73 | f8b845f45a87: Pull complete 74 | e8db7bf7c39f: Pull complete 75 | 9654c40e9079: Pull complete 76 | 6d9ef359eaaa: Pull complete 77 | Digest: sha256:dd7808d8792c9841d0b460122f1acf0a2dd1f56404f8d1e56298048885e45535 78 | Status: Downloaded newer image for ubuntu:latest 79 | 846af8479944d406843c90a39cba68373c619d1feaa932719260a5f5afddbf71 80 | ``` 81 | 82 | This command will create a new container based on the `ubuntu:latest` image and will run the `sleep` command to keep the container running in the background. You can verify our example container is up by running `docker ps` on **node0-a**. 83 | 84 | ``` 85 | $ docker ps 86 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 87 | 044bea1c2277 ubuntu "sleep infinity" 2 seconds ago Up 1 second distracted_mayer 88 | ``` 89 | 90 | But, this is only on a single node. What happens if this node goes down? Well, our application just dies and it is never restarted. To restore service, we would have to manually log into this machine, and start tweaking things to get it back up and running. So, it would be helpful if we had some type of system that would allow us to run this "sleep" application/service across many machines. 91 | 92 | In this section you will configure *Swarm Mode*. This is a new optional mode in which multiple Docker hosts form into a self-orchestrating group of engines called a *swarm*. Swarm mode enables new features such as *services* and *bundles* that help you deploy and manage multi-container apps across multiple Docker hosts. 93 | 94 | You will complete the following: 95 | 96 | - Configure *Swarm mode* 97 | - Run the app 98 | - Scale the app 99 | - Drain nodes for maintenance and reschedule containers 100 | 101 | For the remainder of this lab we will refer to *Docker native clustering* as ***Swarm mode***. The collection of Docker engines configured for Swarm mode will be referred to as the *swarm*. 102 | 103 | A swarm comprises one or more *Manager Nodes* and one or more *Worker Nodes*. The manager nodes maintain the state of swarm and schedule application containers. The worker nodes run the application containers. As of Docker 1.12, no external backend, or 3rd party components, are required for a fully functioning swarm - everything is built-in! 104 | 105 | In this part of the demo you will use all three of the nodes in your lab. __node0-a__ will be the Swarm manager, while __node1-b__ and __node2-c__ will be worker nodes. Swarm mode supports a highly available redundant manager nodes, but for the purposes of this lab you will only deploy a single manager node. 106 | 107 | ## Step 2.1 - Create a Manager node 108 | 109 | If you haven't already done so, please SSH in to **node0-a**. 110 | 111 | ``` 112 | $ ssh ubuntu@ 113 | ``` 114 | 115 | In this step you'll initialize a new Swarm, join a single worker node, and verify the operations worked. 116 | 117 | Run `docker swarm init` on **node0-a**. 118 | 119 | ``` 120 | $ docker swarm init 121 | Swarm initialized: current node (6dlewb50pj2y66q4zi3egnwbi) is now a manager. 122 | 123 | To add a worker to this swarm, run the following command: 124 | 125 | docker swarm join \ 126 | --token SWMTKN-1-1wxyoueqgpcrc4xk2t3ec7n1poy75g4kowmwz64p7ulqx611ih-68pazn0mj8p4p4lnuf4ctp8xy \ 127 | 10.0.0.5:2377 128 | 129 | To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. 130 | ``` 131 | 132 | You can run the `docker info` command to verify that **node0-a** was successfully configured as a swarm manager node. 133 | 134 | ``` 135 | $ docker info 136 | Containers: 2 137 | Running: 0 138 | Paused: 0 139 | Stopped: 2 140 | Images: 2 141 | Server Version: 17.03.1-ee-3 142 | Storage Driver: aufs 143 | Root Dir: /var/lib/docker/aufs 144 | Backing Filesystem: extfs 145 | Dirs: 13 146 | Dirperm1 Supported: true 147 | Logging Driver: json-file 148 | Cgroup Driver: cgroupfs 149 | Plugins: 150 | Volume: local 151 | Network: bridge host macvlan null overlay 152 | Swarm: active 153 | NodeID: rwezvezez3bg1kqg0y0f4ju22 154 | Is Manager: true 155 | ClusterID: qccn5eanox0uctyj6xtfvesy2 156 | Managers: 1 157 | Nodes: 1 158 | Orchestration: 159 | Task History Retention Limit: 5 160 | Raft: 161 | Snapshot Interval: 10000 162 | Number of Old Snapshots to Retain: 0 163 | Heartbeat Tick: 1 164 | Election Tick: 3 165 | Dispatcher: 166 | Heartbeat Period: 5 seconds 167 | CA Configuration: 168 | Expiry Duration: 3 months 169 | Node Address: 10.0.0.5 170 | Manager Addresses: 171 | 10.0.0.5:2377 172 | 173 | ``` 174 | 175 | The swarm is now initialized with **node0-a** as the only Manager node. In the next section you will add **node1-b** and **node2-c** as *Worker nodes*. 176 | 177 | ## Step 2.2 - Join Worker nodes to the Swarm 178 | 179 | You will perform the following procedure on **node1-b** and **node2-c**. Towards the end of the procedure you will switch back to **node0-a**. 180 | 181 | Open a new SSH session to __node1-b__ (Keep your SSH session to **node0-a** open in another tab or window). 182 | 183 | ``` 184 | $ ssh ubuntu@ 185 | ``` 186 | 187 | Now, take that entire `docker swarm join ...` command we copied earlier from `node0-a` where it was displayed as terminal output. We need to paste the copied command into the terminal of **node1-b** and **node2-c**. 188 | 189 | It should look something like this for **node1-b**. By the way, if the `docker swarm join ...` command scrolled off your screen already, you can run the `docker swarm join-token worker` command on the Manager node to get it again. 190 | 191 | ``` 192 | $ docker swarm join \ 193 | --token SWMTKN-1-1wxyoueqgpcrc4xk2t3ec7n1poy75g4kowmwz64p7ulqx611ih-68pazn0mj8p4p4lnuf4ctp8xy \ 194 | 10.0.0.5:2377 195 | ``` 196 | 197 | Again, ssh into **node2-c** and it should look something like this. 198 | 199 | ``` 200 | $ ssh ubuntu@ 201 | ``` 202 | 203 | ``` 204 | $ docker swarm join \ 205 | --token SWMTKN-1-1wxyoueqgpcrc4xk2t3ec7n1poy75g4kowmwz64p7ulqx611ih-68pazn0mj8p4p4lnuf4ctp8xy \ 206 | 10.0.0.5:2377 207 | ``` 208 | 209 | Once you have run this on **node1-b** and **node2-c**, switch back to **node0-a**, and run a `docker node ls` to verify that both nodes are part of the Swarm. You should see three nodes, **node0-a** as the Manager node and **node1-b** and **node2-c** both as Worker nodes. 210 | 211 | ``` 212 | $ docker node ls 213 | ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS 214 | 6dlewb50pj2y66q4zi3egnwbi * node0-a Ready Active Leader 215 | ym6sdzrcm08s6ohqmjx9mk3dv node2-c Ready Active 216 | yu3hbegvwsdpy9esh9t2lr431 node1-b Ready Active 217 | ``` 218 | 219 | The `docker node ls` command shows you all of the nodes that are in the swarm as well as their roles in the swarm. The `*` identifies the node that you are issuing the command from. 220 | 221 | Congratulations! You have configured a swarm with one manager node and two worker nodes. 222 | 223 | # Section 3: Deploy applications across multiple hosts 224 | 225 | Now that you have a swarm up and running, it is time to deploy our really simple sleep application. 226 | 227 | You will perform the following procedure from **node0-a**. 228 | 229 | ## Step 3.1 - Deploy the application components as Docker services 230 | 231 | Our `sleep` application is becoming very popular on the internet (due to hitting Reddit and HN). People just love it. So, you are going to have to scale your application to meet peak demand. You will have to do this across multiple hosts for high availability too. We will use the concept of *Services* to scale our application easily and manage many containers as a single entity. 232 | 233 | > *Services* were a new concept in Docker 1.12. They work with swarms and are intended for long-running containers. 234 | 235 | You will perform this procedure from **node0-a**. 236 | 237 | Lets deploy `sleep` as a *Service* across our Docker Swarm. 238 | 239 | ``` 240 | $ docker service create --name sleep-app ubuntu sleep infinity 241 | of5rxsxsmm3asx53dqcq0o29c 242 | ``` 243 | 244 | Verify that the `service create` has been received by the Swarm manager. 245 | 246 | ``` 247 | $ docker service ls 248 | ID NAME MODE REPLICAS IMAGE 249 | of5rxsxsmm3a sleep-app replicated 1/1 ubuntu:latest 250 | ``` 251 | 252 | The state of the service may change a couple times until it is running. The image is being downloaded from Docker Store to the other engines in the Swarm. Once the image is downloaded the container goes into a running state on one of the three nodes. 253 | 254 | At this point it may not seem that we have done anything very differently than just running a `docker run ...`. We have again deployed a single container on a single host. The difference here is that the container has been scheduled on a swarm cluster. 255 | 256 | Well done. You have deployed the sleep-app to your new Swarm using Docker services. 257 | 258 | # Section 4: Scale the application 259 | 260 | Demand is crazy! Everybody loves your `sleep` app! It's time to scale out. 261 | 262 | One of the great things about *services* is that you can scale them up and down to meet demand. In this step you'll scale the service up and then back down. 263 | 264 | You will perform the following procedure from **node0-a**. 265 | 266 | Scale the number of containers in the **sleep-app** service to 7 with the `docker service update --replicas 7 sleep-app` command. `replicas` is the term we use to describe identical containers providing the same service. 267 | 268 | ``` 269 | $ docker service update --replicas 7 sleep-app 270 | ``` 271 | 272 | The Swarm manager schedules so that there are 7 `sleep-app` containers in the cluster. These will be scheduled evenly across the Swarm members. 273 | 274 | We are going to use the `docker service ps sleep-app` command. If you do this quick fast enough after using the `--replicas` option you can see the containers come up in real time. 275 | 276 | ``` 277 | $ docker service ps sleep-app 278 | ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 279 | 7k0flfh2wpt1 sleep-app.1 ubuntu:latest node0-a Running Running 9 minutes ago 280 | wol6bzq7xf0v sleep-app.2 ubuntu:latest node2-c Running Running 2 minutes ago 281 | id50tzzk1qbm sleep-app.3 ubuntu:latest node1-b Running Running 2 minutes ago 282 | ozj2itmio16q sleep-app.4 ubuntu:latest node2-c Running Running 2 minutes ago 283 | o4rk5aiely2o sleep-app.5 ubuntu:latest node1-b Running Running 2 minutes ago 284 | 35t0eamu0rue sleep-app.6 ubuntu:latest node1-b Running Running 2 minutes ago 285 | 44s8d59vr4a8 sleep-app.7 ubuntu:latest node0-a Running Running 2 minutes ago 286 | ``` 287 | 288 | Notice that there are now 7 containers listed. It may take a few seconds for the new containers in the service to all show as **RUNNING**. The `NODE` column tells us on which node a container is running. 289 | 290 | Scale the service back down just five containers again with the `docker service update --replicas 4 sleep-app` command. 291 | 292 | ``` 293 | $ docker service update --replicas 4 sleep-app 294 | ``` 295 | 296 | Verify that the number of containers has been reduced to 4 using the `docker service ps sleep-app` command. 297 | 298 | ``` 299 | $ docker service ps sleep-app 300 | ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 301 | 7k0flfh2wpt1 sleep-app.1 ubuntu:latest node0-a Running Running 13 minutes ago 302 | wol6bzq7xf0v sleep-app.2 ubuntu:latest node2-c Running Running 5 minutes ago 303 | 35t0eamu0rue sleep-app.6 ubuntu:latest node1-b Running Running 5 minutes ago 304 | 44s8d59vr4a8 sleep-app.7 ubuntu:latest node0-a Running Running 5 minutes ago 305 | ``` 306 | 307 | You have successfully scaled a swarm service up and down. 308 | 309 | # Section 5: Drain a node and reschedule the containers 310 | 311 | Your sleep-app has been doing amazing after hitting Reddit and HN. It's now number 1 on the Apple Store! You have scaled up during the holidays and down during the slow season. Now you are doing maintenance on one of your servers so you will need to gracefully take a server out of the swarm without interrupting service to your customers. 312 | 313 | 314 | Take a look at the status of your nodes again by running `docker node ls` on **node0-a**. 315 | 316 | ``` 317 | $ docker node ls 318 | ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS 319 | 6dlewb50pj2y66q4zi3egnwbi * node0-a Ready Active Leader 320 | ym6sdzrcm08s6ohqmjx9mk3dv node2-c Ready Active 321 | yu3hbegvwsdpy9esh9t2lr431 node1-b Ready Active 322 | ``` 323 | 324 | You will be taking **node1-b** out of service for maintenance. 325 | 326 | If you haven't already done so, please SSH in to **node1-b**. 327 | 328 | ``` 329 | $ ssh ubuntu@ 330 | ``` 331 | 332 | Then lets see the containers that you have running there. 333 | 334 | ``` 335 | $ docker ps 336 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 337 | 4e7ea1154ea4 ubuntu@sha256:dd7808d8792c9841d0b460122f1acf0a2dd1f56404f8d1e56298048885e45535 "sleep infinity" 9 minutes ago Up 9 minutes sleep-app.6.35t0eamu0rueeozz0pj2xaesi 338 | ``` 339 | 340 | You can see that we have one of the slepp-app containers running here (your output might look different though). 341 | 342 | Now lets jump back to **node0-a** (the Swarm manager) and take **node1-b** out of service. To do that, lets run `docker node ls` again. 343 | 344 | ``` 345 | $ docker node ls 346 | ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS 347 | 6dlewb50pj2y66q4zi3egnwbi * node0-a Ready Active Leader 348 | ym6sdzrcm08s6ohqmjx9mk3dv node2-c Ready Active 349 | yu3hbegvwsdpy9esh9t2lr431 node1-b Ready Active 350 | ``` 351 | 352 | We are going to take the **ID** for **node1-b** and run `docker node update --availability drain yu3hbegvwsdpy9esh9t2lr431`. We are using the **node1-b** host **ID** as input into our `drain` command. 353 | 354 | ``` 355 | $ docker node update --availability drain yu3hbegvwsdpy9esh9t2lr431 356 | ``` 357 | Check the status of the nodes 358 | 359 | ``` 360 | $ docker node ls 361 | ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS 362 | 6dlewb50pj2y66q4zi3egnwbi * node0-a Ready Active Leader 363 | ym6sdzrcm08s6ohqmjx9mk3dv node2-c Ready Active 364 | yu3hbegvwsdpy9esh9t2lr431 node1-b Ready Drain 365 | ``` 366 | 367 | Node **node1-b** is now in the `Drain` state. 368 | 369 | 370 | Switch back to **node1-b** and see what is running there by running `docker ps`. 371 | 372 | ``` 373 | $ docker ps 374 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 375 | ``` 376 | 377 | **node1-b** does not have any containers running on it. 378 | 379 | Lastly, check the service again on **node0-a** to make sure that the container were rescheduled. You should see all four containers running on the remaining two nodes. 380 | 381 | ``` 382 | $ docker service ps sleep-app 383 | ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS 384 | 7k0flfh2wpt1 sleep-app.1 ubuntu:latest node0-a Running Running 25 minutes ago 385 | wol6bzq7xf0v sleep-app.2 ubuntu:latest node2-c Running Running 18 minutes ago 386 | s3548wki7rlk sleep-app.6 ubuntu:latest node2-c Running Running 3 minutes ago 387 | 35t0eamu0rue \_ sleep-app.6 ubuntu:latest node1-b Shutdown Shutdown 3 minutes ago 388 | 44s8d59vr4a8 sleep-app.7 ubuntu:latest node0-a Running Running 18 minutes ago 389 | ``` 390 | 391 | # Cleaning Up 392 | 393 | Execute the `docker service rm sleep-app` command on **node0-a** to remove the service called *myservice*. 394 | 395 | ``` 396 | $ docker service rm sleep-app 397 | ``` 398 | 399 | Execute the `docker ps` command on **node0-a** to get a list of running containers. 400 | 401 | ``` 402 | $ docker ps 403 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 404 | 044bea1c2277 ubuntu "sleep infinity" 17 minutes ago 17 minutes ag distracted_mayer 405 | ``` 406 | 407 | You can use the `docker kill ` command on **node0-a** to kill the sleep container we started at the beginning. 408 | 409 | ``` 410 | $ docker kill 044bea1c2277 411 | ``` 412 | 413 | Finally, lets remove node0-a, node1-b, and node2-c from the Swarm. We can use the `docker swarm leave --force` command to do that. 414 | 415 | Lets run `docker swarm leave --force` on **node0-a**. 416 | 417 | ``` 418 | $ docker swarm leave --force 419 | ``` 420 | 421 | Then, run `docker swarm leave --force` on **node1-b**. 422 | 423 | ``` 424 | $ docker swarm leave --force 425 | ``` 426 | 427 | Finally, run `docker swarm leave --force` on **node2-c**. 428 | 429 | ``` 430 | $ docker swarm leave --force 431 | ``` 432 | 433 | Congratulations! You've completed this lab. You now know how to build a swarm, deploy applications as collections of services, and scale individual services up and down. 434 | --------------------------------------------------------------------------------