├── .gitignore ├── README.md ├── ansible ├── README.md └── centos7 │ ├── host │ └── playbook │ ├── docker.yml │ ├── java-docker.yml │ └── java.yml ├── ecommerce-build ├── .env ├── Dockerfile └── docker-compose-devel.yml ├── elkstack └── docker-compose.yml ├── git └── Dockerfile ├── haproxy ├── bchaproxy.cfg ├── config │ └── haproxy.cfg └── docker-compose.yml ├── java └── Dockerfile ├── jenkins └── docker-compose.yml ├── kali-linux ├── Dockerfile └── docker-compose.yml ├── letsencrypt └── docker-compose.yml ├── mysql └── docker-compose.yml ├── nginx ├── Dockerfile ├── default.conf ├── docker-compose-load-balancer.yml ├── docker-compose.yml ├── node.conf └── prod.conf ├── nginxconsul ├── README.md ├── app │ └── docker-compose.yml ├── consul-template.service ├── consul_watches_config.json ├── docker-compose.yml ├── nginx.conf ├── nginx.conf.tmp ├── nginx.service └── script.sh ├── nginxlb ├── consul-template.service ├── consul_watches_config.json ├── docker-compose.yml ├── nginx.conf ├── nginx.service ├── reload-nginx.sh └── script.sh ├── nginxplus ├── docker-compose.yml ├── nginx-repo.crt └── nginx-repo.key ├── nodejs ├── Dockerfile └── Dockerfile~ ├── nsq ├── .gitignore ├── client │ └── main.go ├── docker-compose.yml ├── nodejs │ ├── publish.js │ ├── server.js │ └── subscribe.js └── server │ └── main.go ├── postgresql ├── Dockerfile └── docker-compose.yml ├── prometheus ├── alert │ ├── alertmanagerdata │ │ └── config.yml │ └── docker-compose.yml ├── alertmanagerdata │ └── config.yml ├── docker-compose.yml ├── grafana │ └── docker-compose.yml ├── prometheus-node-exporter │ └── docker-compose.yml └── prometheusdata │ ├── prometheus.rules │ └── prometheus.yml ├── redis ├── docker-compose.yml └── redis.conf ├── registry └── docker-compose.yml ├── sonarqube └── docker-compose.yml ├── staging ├── cert_export_myCa.crt ├── cert_export_server.crt ├── cert_export_trueoffice.key ├── docker-compose.yml └── validation_engine-1.0.2-SNAPSHOT.jar ├── tomcat └── docker-compose.yml └── website ├── docker-compose-devel.yml ├── docker-compose.yml ├── hhvm ├── Dockerfile ├── hhvm.ini ├── php.ini └── supervisor-hhvm.sh └── nginx ├── Dockerfile ├── cms.conf ├── default.conf ├── nginx.conf └── website.conf /.gitignore: -------------------------------------------------------------------------------- 1 | website/data/.env 2 | website/data/Dockerfile 3 | website/data/config.js 4 | website/data/constants.php 5 | Dockerfile 6 | .idea/ 7 | mongodb/ 8 | validation-engine/ 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # docker 2 | all configuration docker 3 | -------------------------------------------------------------------------------- /ansible/README.md: -------------------------------------------------------------------------------- 1 | # ansible 2 | all configuration docker 3 | -------------------------------------------------------------------------------- /ansible/centos7/host: -------------------------------------------------------------------------------- 1 | [pricebook-staging-mars] 2 | 52.220.162.226 3 | [pricebook-staging-pluto] 4 | 52.221.152.157 -------------------------------------------------------------------------------- /ansible/centos7/playbook/docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: pricebook-staging-pluto 3 | become: yes 4 | #connection: local 5 | tasks: 6 | 7 | - name: install the latest version of Docker 8 | yum: 9 | name: docker 10 | state: latest 11 | 12 | - name: install docker-compose via source 13 | get_url: 14 | url: https://github.com/docker/compose/releases/download/1.11.2/docker-compose-Linux-x86_64 15 | dest: /opt/docker-compose 16 | 17 | - name: give permission to docker-compose 18 | file: 19 | path: /opt/docker-compose 20 | mode: 755 21 | 22 | - name: Copy docker-compose file to bin directory 23 | file: 24 | src: /opt/docker-compose 25 | dest: /usr/bin/docker-compose 26 | state: link 27 | 28 | -------------------------------------------------------------------------------- /ansible/centos7/playbook/java-docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: staging 3 | tasks: 4 | - name: Download Java JRE 5 | get_url: 6 | url: http://download.oracle.com/otn-pub/java/jdk/8u60-b27/jre-8u60-linux-x64.rpm 7 | dest: /opt/jre-8u60-linux-x64.rpm 8 | headers: 'Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie' 9 | 10 | - name: install Java JRE rpm from a local file 11 | yum: 12 | name: /opt/jre-8u60-linux-x64.rpm 13 | state: present 14 | 15 | 16 | - name: Download Java JDK 17 | get_url: 18 | url: http://download.oracle.com/otn-pub/java/jdk/8u60-b27/jdk-8u60-linux-x64.rpm 19 | dest: /opt/jdk-8u60-linux-x64.rpm 20 | headers: 'Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie' 21 | 22 | 23 | - name: Install Java JDK rpm from a local file 24 | yum: 25 | name: /opt/jdk-8u60-linux-x64.rpm 26 | state: present 27 | 28 | 29 | - name: install the latest version of Docker 30 | yum: 31 | name: docker 32 | state: latest 33 | 34 | - name: install docker-compose via source 35 | get_url: 36 | url: https://github.com/docker/compose/releases/download/1.11.2/docker-compose-Linux-x86_64 37 | dest: /opt/docker-compose 38 | 39 | - name: give permission to docker-compose 40 | file: 41 | path: /opt/docker-compose 42 | mode: 775 43 | 44 | - name: Copy docker-compose file to bin directory 45 | file: 46 | src: /opt/docker-compose 47 | dest: /usr/bin/docker-compose 48 | state: link 49 | 50 | - name: install unzip 51 | yum: 52 | name: unzip 53 | state: latest 54 | 55 | - name: download and unzip apache maven 56 | unarchive: 57 | #src: http://mirror.wanxp.id/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.zip 58 | src: http://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.zip 59 | dest: /opt/ 60 | remote_src: True 61 | 62 | - name: copy apache maven to bin directory 63 | file: 64 | src: /opt/apache-maven-3.3.9/bin/mvn 65 | dest: /usr/local/bin/mvn 66 | mode: 775 67 | state: link 68 | -------------------------------------------------------------------------------- /ansible/centos7/playbook/java.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: aws-jenkinsbuild 3 | become: yes 4 | tasks: 5 | - name: Download Java JDK 6 | get_url: 7 | url: http://download.oracle.com/otn-pub/java/jdk/8u60-b27/jdk-8u60-linux-x64.rpm 8 | dest: /opt/jdk-8u60-linux-x64.rpm 9 | headers: 'Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie' 10 | 11 | 12 | - name: Install Java JDK rpm from a local file 13 | yum: 14 | name: /opt/jdk-8u60-linux-x64.rpm 15 | state: present 16 | 17 | - name: download and unzip apache maven 18 | unarchive: 19 | src: http://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.zip 20 | dest: /opt/ 21 | remote_src: yes 22 | 23 | - name: copy apache maven to bin directory 24 | file: 25 | src: /opt/apache-maven-3.3.9/bin/mvn 26 | dest: /usr/local/bin/mvn 27 | mode: 775 28 | state: link 29 | -------------------------------------------------------------------------------- /ecommerce-build/.env: -------------------------------------------------------------------------------- 1 | APP_ENV=production 2 | APP_PORT=8085 3 | MONGODB_HOST=mongodbs 4 | MONGODB_PORT=27017 5 | MONGODB_DOCUMENT=ecommerce 6 | MONGODB_DOCUMENT_TEST=ecommerce-test 7 | -------------------------------------------------------------------------------- /ecommerce-build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM secret06/nodejs:3.0 2 | 3 | MAINTAINER agung.julisman@yahoo.com 4 | 5 | RUN git clone https://github.com/julisman/ecommerce.git /ecommerce/ 6 | RUN cp -R /ecommerce/* /app/ 7 | 8 | COPY .env . 9 | 10 | RUN npm install --production 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /ecommerce-build/docker-compose-devel.yml: -------------------------------------------------------------------------------- 1 | 2 | api: 3 | container_name: ecommerce2 4 | image: secret06/ecommercebuild:0.1 5 | command : pm2 start app.js --no-daemon --watch -i max --max-memory-restart 3G --merge-logs --log-date-format="YYYY-MM-DD HH:mm Z" 6 | restart: always 7 | ports: 8 | - "8085:8085" 9 | 10 | links: 11 | - mongodbs 12 | 13 | mongodbs: 14 | container_name: mongodbecommerce2 15 | restart: always 16 | volumes: 17 | - /var/test:/data/db 18 | image: mongo:3.2 19 | ports: 20 | - "27017:27017" 21 | -------------------------------------------------------------------------------- /elkstack/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | elasticsearch: 5 | build: elasticsearch/ 6 | ports: 7 | - "9200:9200" 8 | - "9300:9300" 9 | environment: 10 | ES_JAVA_OPTS: "-Xms1g -Xmx1g" 11 | networks: 12 | - docker_elk 13 | logstash: 14 | build: logstash/ 15 | command: -f /etc/logstash/conf.d/ 16 | volumes: 17 | - ./logstash/config:/etc/logstash/conf.d 18 | ports: 19 | - "5000:5000" 20 | networks: 21 | - docker_elk 22 | depends_on: 23 | - elasticsearch 24 | kibana: 25 | build: kibana/ 26 | volumes: 27 | - ./kibana/config/:/etc/kibana/ 28 | ports: 29 | - "5601:5601" 30 | networks: 31 | - docker_elk 32 | depends_on: 33 | - elasticsearch 34 | 35 | networks: 36 | docker_elk: 37 | driver: bridge -------------------------------------------------------------------------------- /git/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | 3 | MAINTAINER Luke Crooks "agung.julisman@yahoo.com" 4 | 5 | # Update aptitude with new repo 6 | RUN apt-get update 7 | 8 | # Install software 9 | RUN apt-get install -y git 10 | # Make ssh dir 11 | RUN mkdir /root/.ssh/ 12 | 13 | # Copy over private key, and set permissions 14 | ADD id_rsa /root/.ssh/id_rsa 15 | 16 | # Create known_hosts 17 | RUN touch /root/.ssh/known_hosts 18 | # Add bitbuckets key 19 | RUN ssh-keyscan bitbucket.org >> /root/.ssh/known_hosts 20 | 21 | # Clone the conf files into the docker container 22 | RUN git clone git@bitbucket.org:User/repo.git -------------------------------------------------------------------------------- /haproxy/bchaproxy.cfg: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Truemoney API HAProxy configuration 3 | # Version 20160223 4 | #--------------------------------------------------------------------- 5 | global 6 | log 127.0.0.1 local2 7 | #log /var/log/haproxy.log local4 8 | chroot /var/lib/haproxy 9 | pidfile /var/run/haproxy.pid 10 | maxconn 100000 11 | stats socket /run/haproxy/admin.sock mode 660 level admin 12 | stats timeout 30s 13 | 14 | defaults 15 | mode http 16 | log global 17 | option httplog 18 | maxconn 10000 19 | timeout connect 5000 20 | timeout client 50000 21 | timeout server 50000 22 | 23 | frontend webapi 24 | bind *:80 25 | mode http 26 | option httplog 27 | 28 | acl APIANDROID url_beg -i /Android 29 | acl APIEDC url_beg -i /APIEDC 30 | acl APICASH2CASH url_beg -i /api-cash2cash 31 | 32 | use_backend APIANDROID if APIANDROID 33 | use_backend APIEDC if APIEDC 34 | use_backend APICASH2CASH if APICASH2CASH 35 | 36 | backend APIANDROID 37 | balance roundrobin 38 | hash-type consistent 39 | option httpchk HEAD /Android HTTP/1.0 40 | cookie SRVNAME insert 41 | server API-ANDROID_1 172.16.50.135:32771 check 42 | server API-ANDROID_2 172.16.50.135:32772 check 43 | server API-ANDROID_3 172.16.50.135:32773 check 44 | server API-ANDROID_4 172.16.50.135:32774 check 45 | server API-ANDROID_5 172.16.50.135:32775 check 46 | server API-ANDROID_6 172.16.50.135:32776 check 47 | 48 | backend APIEDC 49 | balance roundrobin 50 | hash-type consistent 51 | option httpchk HEAD /APIEDC HTTP/1.0 52 | cookie SRVNAME insert 53 | server API-EDC_1 172.16.50.135:32871 check 54 | server API-EDC_2 172.16.50.135:32872 check 55 | server API-EDC_3 172.16.50.135:32873 check 56 | server API-EDC_4 172.16.50.135:32874 check 57 | server API-EDC_5 172.16.50.135:32875 check 58 | server API-EDC_6 172.16.50.135:32876 check 59 | 60 | backend APICASH2CASH 61 | balance roundrobin 62 | hash-type consistent 63 | option httpchk HEAD /api-cash2cash HTTP/1.0 64 | cookie SRVNAME insert 65 | server API-EDC_1 172.16.50.135:32971 check 66 | server API-EDC_2 172.16.50.135:32972 check 67 | server API-EDC_3 172.16.50.135:32973 check 68 | server API-EDC_4 172.16.50.135:32974 check 69 | server API-EDC_5 172.16.50.135:32975 check 70 | server API-EDC_6 172.16.50.135:32976 check 71 | 72 | listen stats 73 | bind 0.0.0.0:5000 74 | stats enable 75 | stats uri / 76 | stats hide-version 77 | stats auth someuser:password -------------------------------------------------------------------------------- /haproxy/config/haproxy.cfg: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Truemoney API HAProxy configuration 3 | # Version 20160223 4 | #--------------------------------------------------------------------- 5 | global 6 | log 127.0.0.1 local2 7 | #log /var/log/haproxy.log local4 8 | chroot /var/lib/haproxy 9 | pidfile /var/run/haproxy.pid 10 | maxconn 100000 11 | stats socket /run/haproxy/admin.sock mode 660 level admin 12 | stats timeout 30s 13 | 14 | defaults 15 | mode http 16 | log global 17 | option httplog 18 | maxconn 10000 19 | timeout connect 5000 20 | timeout client 50000 21 | timeout server 50000 22 | 23 | frontend webapi 24 | bind *:80 25 | mode http 26 | option httplog 27 | 28 | acl APIANDROID url_beg -i /Android 29 | acl APIEDC url_beg -i /APIEDC 30 | acl APICASH2CASH url_beg -i /api-cash2cash 31 | acl APICHANGESTATUS url_beg -i /APIChangeStatus 32 | acl APIWEBSITE url_beg -i /api_web_app 33 | acl WEBSITE url_beg -i /truemoney 34 | acl BOT url_beg -i /BackOfficeTools 35 | acl APIBIMASAKTI url_beg -i /APIBimasakti 36 | acl APIGCM url_beg -i /api_gcm 37 | 38 | use_backend APIANDROID if APIANDROID 39 | use_backend APIEDC if APIEDC 40 | use_backend APICASH2CASH if APICASH2CASH 41 | use_backend APICHANGESTATUS if APICHANGESTATUS 42 | use_backend APIWEBSITE if APIWEBSITE 43 | use_backend WEBSITE if WEBSITE 44 | use_backend BOT if BOT 45 | use_backend APIBIMASAKTI if APIBIMASAKTI 46 | use_backend APIGCM if APIGCM 47 | 48 | 49 | backend APIANDROID 50 | balance roundrobin 51 | hash-type consistent 52 | option httpchk HEAD /Android HTTP/1.0 53 | cookie SRVNAME insert 54 | server API-ANDROID_1 172.16.50.135:32771 check 55 | server API-ANDROID_2 172.16.50.135:32772 check 56 | server API-ANDROID_3 172.16.50.135:32773 check 57 | server API-ANDROID_4 172.16.50.135:32774 check 58 | server API-ANDROID_5 172.16.50.135:32775 check 59 | server API-ANDROID_6 172.16.50.135:32776 check 60 | 61 | backend APIEDC 62 | balance roundrobin 63 | hash-type consistent 64 | option httpchk HEAD /APIEDC HTTP/1.0 65 | cookie SRVNAME insert 66 | server API-EDC_1 172.16.50.135:32871 check 67 | server API-EDC_2 172.16.50.135:32872 check 68 | server API-EDC_3 172.16.50.135:32873 check 69 | server API-EDC_4 172.16.50.135:32874 check 70 | server API-EDC_5 172.16.50.135:32875 check 71 | server API-EDC_6 172.16.50.135:32876 check 72 | 73 | backend APICASH2CASH 74 | balance roundrobin 75 | hash-type consistent 76 | option httpchk HEAD /api-cash2cash HTTP/1.0 77 | cookie SRVNAME insert 78 | server API-CASH2CASH_1 172.16.50.135:32971 check 79 | server API-CASH2CASH_2 172.16.50.135:32972 check 80 | server API-CASH2CASH_3 172.16.50.135:32973 check 81 | server API-CASH2CASH_4 172.16.50.135:32974 check 82 | server API-CASH2CASH_5 172.16.50.135:32975 check 83 | server API-CASH2CASH_6 172.16.50.135:32976 check 84 | 85 | backend APICHANGESTATUS 86 | balance roundrobin 87 | hash-type consistent 88 | option httpchk HEAD /APIChangeStatus HTTP/1.0 89 | cookie SRVNAME insert 90 | server API-CHANGESTATUS_1 172.16.50.135:32671 check 91 | server API-CHANGESTATUS_2 172.16.50.135:32672 check 92 | server API-CHANGESTATUS_3 172.16.50.135:32673 check 93 | server API-CHANGESTATUS_4 172.16.50.135:32674 check 94 | server API-CHANGESTATUS_5 172.16.50.135:32675 check 95 | server API-CHANGESTATUS_6 172.16.50.135:32676 check 96 | 97 | backend APIWEBSITE 98 | balance roundrobin 99 | hash-type consistent 100 | option httpchk HEAD /api_web_app HTTP/1.0 101 | cookie SRVNAME insert 102 | server API-WEBSITE_1 172.16.50.135:32571 check 103 | server API-WEBSITE_2 172.16.50.135:32572 check 104 | server API-WEBSITE_3 172.16.50.135:32573 check 105 | server API-WEBSITE_4 172.16.50.135:32574 check 106 | server API-WEBSITE_5 172.16.50.135:32575 check 107 | server API-WEBSITE_6 172.16.50.135:32576 check 108 | 109 | backend WEBSITE 110 | balance roundrobin 111 | hash-type consistent 112 | option httpchk HEAD /truemoney HTTP/1.0 113 | cookie SRVNAME insert 114 | server WEBSITE_1 172.16.50.135:32584 check 115 | 116 | backend BOT 117 | balance roundrobin 118 | hash-type consistent 119 | option httpchk HEAD /BackOfficeTools HTTP/1.0 120 | cookie SRVNAME insert 121 | server BOT_1 172.16.50.136:32771 check 122 | 123 | backend APIBIMASAKTI 124 | balance roundrobin 125 | hash-type consistent 126 | option httpchk HEAD /APIBimasakti HTTP/1.0 127 | cookie SRVNAME insert 128 | server APIBIMASAKTI 172.16.50.135:32371 check 129 | 130 | backend APIGCM 131 | balance roundrobin 132 | hash-type consistent 133 | option httpchk HEAD /api_gcm HTTP/1.0 134 | cookie SRVNAME insert 135 | server APIGCM 172.16.50.135:31771 check 136 | 137 | listen stats 138 | bind 0.0.0.0:5000 139 | stats enable 140 | stats uri / -------------------------------------------------------------------------------- /haproxy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | haproxy: 2 | container_name: haproxy 3 | image: haproxy:1.7-alpine 4 | restart: always 5 | ports: 6 | - "80:80" 7 | volumes: 8 | - /run/haproxy/:/run/haproxy:rw 9 | - ./config:/usr/local/etc/haproxy:rw 10 | - /var/lib/haproxy:/var/lib/haproxy 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /java/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM phusion/baseimage:0.9.17 2 | 3 | MAINTAINER Agung Julisman 4 | 5 | ENV TZ=Asia/Jakarta 6 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone 7 | 8 | RUN echo "deb http://archive.ubuntu.com/ubuntu trusty main universe" > /etc/apt/sources.list 9 | 10 | RUN apt-get -y update 11 | 12 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y -q python-software-properties software-properties-common 13 | 14 | ENV JAVA_VER 8 15 | ENV JAVA_HOME /usr/lib/jvm/java-8-oracle 16 | 17 | RUN echo 'deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main' >> /etc/apt/sources.list && \ 18 | echo 'deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main' >> /etc/apt/sources.list && \ 19 | apt-key adv --keyserver keyserver.ubuntu.com --recv-keys C2518248EEA14886 && \ 20 | apt-get update && \ 21 | echo oracle-java${JAVA_VER}-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections && \ 22 | apt-get install -y --force-yes --no-install-recommends oracle-java${JAVA_VER}-installer oracle-java${JAVA_VER}-set-default && \ 23 | apt-get clean && \ 24 | rm -rf /var/cache/oracle-jdk${JAVA_VER}-installer 25 | 26 | RUN update-java-alternatives -s java-8-oracle 27 | 28 | RUN echo "export JAVA_HOME=/usr/lib/jvm/java-8-oracle" >> ~/.bashrc 29 | 30 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 31 | 32 | CMD ["/sbin/my_init"] -------------------------------------------------------------------------------- /jenkins/docker-compose.yml: -------------------------------------------------------------------------------- 1 | newjenkins: 2 | container_name: newjenkins 3 | image: jenkinsci/jenkins 4 | restart: always 5 | ports: 6 | - "8080:8080" 7 | - "50000:50000" 8 | volumes: 9 | - /var/jenkins_home:/var/jenkins_home:rw 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /kali-linux/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM kalilinux/kali-linux-docker 2 | 3 | RUN apt-get -y update 4 | RUN apt-get -y install net-tools && \ 5 | apt-get install -y aircrack-ng 6 | 7 | RUN apt-get install -y pciutils 8 | 9 | RUN apt-get install -y netdiscover 10 | RUN apt-get -y update 11 | RUN apt-get install -y nmap 12 | RUN apt-get install -y dsniff 13 | RUN apt-get install -y arpspoof -------------------------------------------------------------------------------- /kali-linux/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | secret06kalilinux: 4 | container_name: secret06kalilinux 5 | image: secret06/kalilinux:0.3 6 | network_mode: host 7 | privileged: true 8 | command: tail -f /dev/null 9 | volumes: 10 | - ./capt:/home:rw -------------------------------------------------------------------------------- /letsencrypt/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | 4 | nginx: 5 | image: nginx 6 | volumes: 7 | - "./etc/nginx/conf.d/:/etc/nginx/conf.d/" 8 | environment: 9 | VIRTUAL_HOST: myafin.com 10 | LETSENCRYPT_HOST: myafin.com 11 | LETSENCRYPT_EMAIL: myafin@example.com 12 | 13 | nginx-proxy: 14 | image: jwilder/nginx-proxy 15 | ports: 16 | - "80:80" 17 | - "443:443" 18 | volumes: 19 | - "./nginx/vhost.d:/etc/nginx/vhost.d" 20 | - "./nginx/html:/usr/share/nginx/html" 21 | - "./nginx/certs:/etc/nginx/certs" 22 | - "/var/run/docker.sock:/tmp/docker.sock:ro" 23 | 24 | letsencrypt-nginx-proxy-companion: 25 | image: jrcs/letsencrypt-nginx-proxy-companion 26 | volumes: 27 | - "/var/run/docker.sock:/var/run/docker.sock:ro" 28 | volumes_from: 29 | - "nginx-proxy" 30 | -------------------------------------------------------------------------------- /mysql/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | db: 5 | container_name: mysql 6 | image: mysql:5.7 7 | volumes: 8 | - db_data:/var/lib/mysql 9 | restart: always 10 | environment: 11 | MYSQL_ROOT_PASSWORD: changeit 12 | MYSQL_DATABASE: changeit 13 | MYSQL_USER: changeit 14 | MYSQL_PASSWORD: changeit 15 | 16 | volumes: 17 | db_data: -------------------------------------------------------------------------------- /nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx 2 | MAINTAINER agung 3 | 4 | RUN mkdir -p /etc/nginx/conf.d 5 | 6 | RUN rm -f /etc/nginx/nginx.conf 7 | 8 | ADD templates/config/nginx.conf /etc/nginx/nginx.conf 9 | 10 | EXPOSE 80 11 | EXPOSE 443 -------------------------------------------------------------------------------- /nginx/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | #charset koi8-r; 6 | #access_log /var/log/nginx/log/host.access.log main; 7 | 8 | location / { 9 | root /usr/share/nginx/html; 10 | index index.html index.htm; 11 | } 12 | 13 | #error_page 404 /404.html; 14 | 15 | # redirect server error pages to the static page /50x.html 16 | # 17 | error_page 500 502 503 504 /50x.html; 18 | location = /50x.html { 19 | root /usr/share/nginx/html; 20 | } 21 | 22 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 23 | # 24 | #location ~ \.php$ { 25 | # proxy_pass http://127.0.0.1; 26 | #} 27 | 28 | # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 29 | # 30 | #location ~ \.php$ { 31 | # root html; 32 | # fastcgi_pass 127.0.0.1:9000; 33 | # fastcgi_index index.php; 34 | # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; 35 | # include fastcgi_params; 36 | #} 37 | 38 | # deny access to .htaccess files, if Apache's document root 39 | # concurs with nginx's one 40 | # 41 | #location ~ /\.ht { 42 | # deny all; 43 | #} 44 | } 45 | -------------------------------------------------------------------------------- /nginx/docker-compose-load-balancer.yml: -------------------------------------------------------------------------------- 1 | nginx: 2 | build: ./nginx 3 | links: 4 | - node1:node1 5 | - node2:node2 6 | - node3:node3 7 | ports: 8 | - "80:80" 9 | node1: 10 | build: ./api_courier 11 | ports: 12 | - "8084" 13 | node2: 14 | build: ./api_courier 15 | ports: 16 | - "8084" 17 | node3: 18 | build: ./api_courier 19 | ports: 20 | - "8084" -------------------------------------------------------------------------------- /nginx/docker-compose.yml: -------------------------------------------------------------------------------- 1 | nginx: 2 | container_name: nginx 3 | image: secret06/nginx 4 | restart: always 5 | ports: 6 | - "80:80" 7 | - "443:443" 8 | volumes: 9 | - /var/www/html:/var/www/html:rw 10 | - /mnt/docker/nginx/conf.d:/etc/nginx/conf.d:rw 11 | links: 12 | - hhvm 13 | 14 | hhvm: 15 | container_name: hhvm 16 | restart: always 17 | image: secret06/hhvm:2.0 18 | command: hhvm --mode server --config /etc/hhvm/hhvm.ini 19 | volumes: 20 | - /var/www/html:/var/www/html:rw 21 | 22 | -------------------------------------------------------------------------------- /nginx/node.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | 4 | server_name example.com; 5 | 6 | location / { 7 | proxy_pass http://APP_PRIVATE_IP_ADDRESS:8080; 8 | proxy_http_version 1.1; 9 | proxy_set_header Upgrade $http_upgrade; 10 | proxy_set_header Connection 'upgrade'; 11 | proxy_set_header Host $host; 12 | proxy_cache_bypass $http_upgrade; 13 | } 14 | } -------------------------------------------------------------------------------- /nginx/prod.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name yourdomain.com; 4 | return 301 https://www.yourdomain.com$request_uri; 5 | } 6 | 7 | server { 8 | listen 80; 9 | server_name www.yourdomain.com; 10 | return 301 https://www.yourdomain.com$request_uri; 11 | } 12 | 13 | server { 14 | listen 443 ssl; 15 | server_name www.yourdomain.com; 16 | server_tokens off; 17 | client_max_body_size 20M; 18 | 19 | gzip on; 20 | gzip_disable "msie6"; 21 | gzip_comp_level 6; 22 | gzip_min_length 1100; 23 | gzip_buffers 16 8k; 24 | gzip_proxied any; 25 | gzip_types 26 | text/plain 27 | text/css 28 | text/js 29 | text/xml 30 | text/javascript 31 | application/javascript 32 | application/x-javascript 33 | application/json 34 | application/xml 35 | application/xml+rss; 36 | 37 | root /var/www/php/beta/public; 38 | index index.php; 39 | 40 | ssl_certificate /etc/nginx/conf.d/certs/wildcard.yourdomain.com.crt; 41 | ssl_certificate_key /etc/nginx/conf.d/certs/wildcard.yourdomain.com.key; 42 | 43 | error_log /var/log/nginx/prod-error.log error; 44 | access_log /var/log/nginx/prod-access.log; 45 | 46 | #error_log /var/log/nginx/error.log error; 47 | #access_log /var/log/nginx/access.log; 48 | 49 | location ~* \.(?:ico|css|js|gif|jpe?g|png)$ { 50 | expires 30d; 51 | access_log off; 52 | add_header Cache-Control "public"; 53 | } 54 | 55 | location /robots.txt { 56 | alias /var/www/php/beta/public/robots.txt; 57 | } 58 | 59 | location /nginx_status { 60 | stub_status on; 61 | access_log off; 62 | allow 192.168.1.1; 63 | allow 10.62.1.115; 64 | allow 127.0.0.1; 65 | allow 52.76.88.179; 66 | deny all; 67 | } 68 | 69 | location / { 70 | try_files $uri $uri/ @abtesting; 71 | } 72 | 73 | location @abtesting { 74 | if (!-e $request_filename) { 75 | rewrite ^/(.*)$ /index.php/$1 last; 76 | break; 77 | } 78 | } 79 | 80 | location ~ \.php { 81 | 82 | #fastcgi_buffer_size 32k; 83 | #fastcgi_busy_buffers_size 64k; 84 | #fastcgi_buffers 4 32k; 85 | #fastcgi_index index.php; 86 | #fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 87 | #fastcgi_param SCRIPT_NAME $fastcgi_script_name; 88 | #fastcgi_param SERVER_NAME $host; 89 | #fastcgi_pass backend; 90 | #fastcgi_intercept_errors on; 91 | #fastcgi_split_path_info ^(.+\.php)(/.+)$; 92 | #fastcgi_keep_conn on; 93 | #include /etc/nginx/fastcgi_params; 94 | 95 | set $php_root /var/www/php/beta/public; 96 | 97 | fastcgi_intercept_errors on; 98 | error_page 502 = @fallback; 99 | 100 | fastcgi_buffer_size 32k; 101 | fastcgi_busy_buffers_size 64k; 102 | fastcgi_buffers 4 32k; 103 | fastcgi_index index.php; 104 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 105 | fastcgi_param SCRIPT_NAME $fastcgi_script_name; 106 | fastcgi_param SCRIPT_FILENAME $php_root$fastcgi_script_name; 107 | fastcgi_param SERVER_NAME $host; 108 | fastcgi_param TIDEWAYS_APIKEY 3erqP93rxMnMvHx8; 109 | fastcgi_param TIDEWAYS_FRAMEWORK laravel; 110 | fastcgi_pass 192.168.1.1:9000; 111 | fastcgi_split_path_info ^(.+\.php)(/.+)$; 112 | fastcgi_keep_conn on; 113 | include /etc/nginx/fastcgi_params; 114 | 115 | } 116 | 117 | location @fallback { 118 | 119 | set $php_root /var/www/php/beta/public; 120 | 121 | fastcgi_split_path_info ^(.+\.php)(/.+)$; 122 | 123 | include /etc/nginx/fastcgi_params; 124 | fastcgi_index index.php; 125 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 126 | fastcgi_param SCRIPT_NAME $fastcgi_script_name; 127 | fastcgi_param SCRIPT_FILENAME $php_root$fastcgi_script_name; 128 | fastcgi_param SERVER_NAME $host; 129 | fastcgi_param TIDEWAYS_APIKEY 3erqP93rxMnMvHx8; 130 | fastcgi_param TIDEWAYS_FRAMEWORK laravel; 131 | fastcgi_pass 192.168.1.1:9001; 132 | 133 | } 134 | 135 | } 136 | -------------------------------------------------------------------------------- /nginxconsul/README.md: -------------------------------------------------------------------------------- 1 | 1. Execute the following two `docker exec` commands to install [jq](https://stedolan.github.io/jq/) inside consul container 2 | ``` 3 | docker exec -ti consul apk update 4 | docker exec -ti consul apk add jq 5 | docker-compose scale http=5 6 | ``` 7 | 8 | 9 | 2. -------------------------------------------------------------------------------- /nginxconsul/app/docker-compose.yml: -------------------------------------------------------------------------------- 1 | app: 2 | image: tutum/hello-world:latest 3 | environment: 4 | SERVICE_NAME: app 5 | SERVICE_TAGS: production 6 | SERVICE_80_NAME: app 7 | SERVICE_80_CHECK_HTTP: / 8 | SERVICE_80_CHECK_INTERVAL: 15s 9 | ports: 10 | - "80" -------------------------------------------------------------------------------- /nginxconsul/consul-template.service: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec consul-template \ 3 | -consul=consul:8500 \ 4 | -template "/etc/consul-templates/nginx.conf:/etc/nginx/conf.d/app.conf:nginx -s reload" -------------------------------------------------------------------------------- /nginxconsul/consul_watches_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "node_name": "consul", 3 | "server": true, 4 | "log_level": "DEBUG", 5 | "watches": [ 6 | { 7 | "type": "service", 8 | "service": "app", 9 | "handler": "/tmp/script.sh" 10 | }, 11 | { 12 | "type": "service", 13 | "service": "apiedc", 14 | "handler": "/tmp/script.sh" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /nginxconsul/docker-compose.yml: -------------------------------------------------------------------------------- 1 | consul: 2 | container_name: consul 3 | command: -server -bootstrap -ui-dir /ui consul -advertise 172.17.0.1 -config-file /etc/consul.d/config.json 4 | image: progrium/consul:latest 5 | mem_limit: 128m 6 | ports: 7 | - "8300:8300" 8 | - "8400:8400" 9 | - "8500:8500" 10 | - "8600:53/udp" 11 | volumes: 12 | - "./consul_watches_config.json:/etc/consul.d/config.json" 13 | - "./script.sh:/tmp/script.sh" 14 | environment: 15 | - GOMAXPROCS=4 16 | - HOST_IP=172.17.0.1 17 | 18 | registrator: 19 | container_name: registrator 20 | command: -internal consul://consul:8500 21 | image: gliderlabs/registrator:latest 22 | links: 23 | - consul 24 | volumes: 25 | - "/var/run/docker.sock:/tmp/docker.sock" 26 | 27 | nginxlb: 28 | container_name: nginxlb 29 | environment: 30 | SERVICE_NAME: nginx 31 | build: ./ 32 | links: 33 | - consul 34 | ports: 35 | - "80:80" -------------------------------------------------------------------------------- /nginxconsul/nginx.conf: -------------------------------------------------------------------------------- 1 | 2 | 3 | {{range services}} 4 | {{if .Tags.Contains "production"}} 5 | upstream {{.Name}} { 6 | least_conn;{{range service .Name}} 7 | server {{.Address}}:{{.Port}} max_fails=3 fail_timeout=60 weight=1;{{else}} 8 | server 0.0.0.1:80; # invalid placeholder{{end}} 9 | }{{range $index, $element := service .Name}}{{if eq $index 0}} 10 | server { 11 | listen 80; 12 | location /{{.Name}} { 13 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 14 | proxy_set_header Host $http_host; 15 | proxy_set_header X-Forwarded-Proto https; 16 | proxy_redirect off; 17 | proxy_connect_timeout 240; 18 | proxy_send_timeout 240; 19 | proxy_read_timeout 240; 20 | proxy_pass http://{{.Name}}; 21 | } 22 | } 23 | {{end}}{{end}}{{end}} 24 | {{end}} -------------------------------------------------------------------------------- /nginxconsul/nginx.conf.tmp: -------------------------------------------------------------------------------- 1 | upstream app { 2 | least_conn;{{range service "apiedc"}} 3 | server {{.Address}}:{{.Port}} max_fails=3 fail_timeout=60 weight=1; 4 | {{else}}server 127.0.0.1:65535; # force a 502{{end}} 5 | } 6 | 7 | server { 8 | listen 8080; 9 | 10 | location = /APIEDC { 11 | proxy_pass http://app; 12 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 13 | proxy_set_header Host $host; 14 | proxy_set_header X-Real-IP $remote_addr; 15 | } 16 | } -------------------------------------------------------------------------------- /nginxconsul/nginx.service: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /usr/sbin/nginx -c /etc/nginx/nginx.conf -t && \ 4 | exec /usr/sbin/nginx -c /etc/nginx/nginx.conf -g "daemon off;" -------------------------------------------------------------------------------- /nginxconsul/script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CURL='/usr/bin/curl' 4 | OPTIONS='-s' 5 | CONSUL_SERVICES_API="http://172.17.0.1:8500/v1/catalog/services" 6 | CONSUL_SERVICE_API="http://172.17.0.1:8500/v1/catalog/service" 7 | STATUS_UPSTREAMS_API="http://172.17.0.1:8080/status/upstreams" 8 | UPSTREAM_CONF_API="http://172.17.0.1/upstream_conf?" 9 | 10 | # Get the list of current Nginx upstreams 11 | upstreams=$($CURL $OPTIONS $STATUS_UPSTREAMS_API | jq -r '. as $in| keys[]') 12 | servers=$($CURL $OPTIONS {$UPSTREAM_CONF_API}upstream=$upstreams) 13 | echo "Nginx upstreams in $upstreams:" 14 | echo $servers 15 | 16 | # Loop through the registered servers in consul tagged with production (i.e backend servers to be proxied through nginx) and add the ones not present in the Nginx upstream block 17 | echo "Servers registered with consul:" 18 | service=$($CURL $OPTIONS $CONSUL_SERVICES_API | jq --raw-output 'to_entries| .[] | select(.value[0] == "production") | .key') 19 | 20 | ports=$($CURL $OPTIONS $CONSUL_SERVICE_API/$service | jq -r '.[]|.ServicePort') 21 | for port in ${ports[@]}; do 22 | entry=$HOST_IP:$port 23 | echo $entry 24 | if [[ ! $servers =~ $entry ]]; then 25 | $CURL $OPTIONS "{$UPSTREAM_CONF_API}add=&upstream=$upstreams&server=$entry" 26 | echo "Added $entry to the nginx upstream group $upstreams!" 27 | fi 28 | done 29 | 30 | # Loop through the Nginx upstreams and remove the ones not present in consul 31 | servers=$($CURL $OPTIONS {$UPSTREAM_CONF_API}upstream=$upstreams) 32 | for params in ${servers[@]}; do 33 | if [[ $params =~ ":" ]]; then 34 | server=$params 35 | continue 36 | elif [[ $params =~ "id=" ]]; then 37 | id=$params 38 | else 39 | continue 40 | fi 41 | 42 | service=$($CURL $OPTIONS $CONSUL_SERVICES_API | jq --raw-output 'to_entries| .[] | select(.value[0] == "production") | .key') 43 | ports=$($CURL $OPTIONS $CONSUL_SERVICE_API/$service | jq -r '.[]|.ServicePort') 44 | found=0 45 | for port in ${ports[@]}; do 46 | entry=$HOST_IP:$port 47 | if [[ $server =~ $entry ]]; then 48 | #echo "$server matches consul entry $entry" 49 | found=1 50 | break 51 | else 52 | continue 53 | fi 54 | done 55 | 56 | if [ $found -eq 0 ]; then 57 | $CURL $OPTIONS "{$UPSTREAM_CONF_API}remove=&upstream=$upstreams&$id" 58 | echo "Removed $server # $id from nginx upstream block $upstreams!" 59 | fi 60 | done 61 | -------------------------------------------------------------------------------- /nginxlb/consul-template.service: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | exec consul-template \ 3 | -consul=consul:8500 \ 4 | -template "/etc/consul-templates/nginx.conf:/etc/nginx/conf.d/app.conf:nginx -s reload" -------------------------------------------------------------------------------- /nginxlb/consul_watches_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "node_name": "consul", 3 | "server": true, 4 | "log_level": "DEBUG", 5 | "watches": [ 6 | { 7 | "type": "service", 8 | "service": "http", 9 | "handler": "/tmp/script.sh" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /nginxlb/docker-compose.yml: -------------------------------------------------------------------------------- 1 | 2 | 3 | consul: 4 | container_name: consul 5 | command: -server -bootstrap -ui-dir /ui 6 | image: progrium/consul:latest 7 | mem_limit: 128m 8 | ports: 9 | - "8300:8300" 10 | - "8400:8400" 11 | - "8500:8500" 12 | - "8600:53/udp" 13 | volumes: 14 | - "./consul_watches_config.json:/etc/consul.d/config.json" 15 | - "./script.sh:/tmp/script.sh" 16 | environment: 17 | - GOMAXPROCS=4 18 | - HOST_IP=172.17.0.1 19 | 20 | registrator: 21 | container_name: registrator 22 | command: -internal consul://consul:8500 23 | image: gliderlabs/registrator:latest 24 | links: 25 | - consul 26 | volumes: 27 | - "/var/run/docker.sock:/tmp/docker.sock" 28 | 29 | app: 30 | image: tutum/hello-world:latest 31 | links: 32 | - consul 33 | environment: 34 | SERVICE_NAME: app 35 | SERVICE_TAGS: production 36 | SERVICE_80_NAME: http 37 | SERVICE_80_CHECK_HTTP: / 38 | SERVICE_80_CHECK_INTERVAL: 15s 39 | ports: 40 | - "80" 41 | 42 | lb: 43 | container_name: nginx 44 | environment: 45 | SERVICE_NAME: nginx 46 | build: ./ 47 | links: 48 | - consul 49 | ports: 50 | - "80:80" -------------------------------------------------------------------------------- /nginxlb/nginx.conf: -------------------------------------------------------------------------------- 1 | upstream app { 2 | least_conn;{{range service "production.app"}} 3 | server {{.Address}}:{{.Port}} max_fails=3 fail_timeout=60 weight=1; 4 | {{else}}server 127.0.0.1:65535; # force a 502{{end}} 5 | } 6 | 7 | server { 8 | listen 80 default_server; 9 | 10 | location / { 11 | proxy_pass http://app; 12 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 13 | proxy_set_header Host $host; 14 | proxy_set_header X-Real-IP $remote_addr; 15 | } 16 | } -------------------------------------------------------------------------------- /nginxlb/nginx.service: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /usr/sbin/nginx -c /etc/nginx/nginx.conf -t && \ 4 | exec /usr/sbin/nginx -c /etc/nginx/nginx.conf -g "daemon off;" -------------------------------------------------------------------------------- /nginxlb/reload-nginx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Render Nginx configuration template using values from Consul, 4 | # but do not reload because Nginx has't started yet 5 | preStart() { 6 | consul-template \ 7 | -once \ 8 | -consul consul:8500 \ 9 | -template "/etc/consul-templates/nginx.conf:/etc/nginx/conf.d/app.conf" 10 | } 11 | 12 | # Render Nginx configuration template using values from Consul, 13 | # then gracefully reload Nginx 14 | onChange() { 15 | consul-template \ 16 | -once \ 17 | -consul consul:8500 \ 18 | -template "/etc/consul-templates/nginx.conf:/etc/nginx/conf.d/app.conf:nginx -s reload" 19 | } 20 | 21 | until 22 | cmd=$1 23 | if [ -z "$cmd" ]; then 24 | onChange 25 | fi 26 | shift 1 27 | $cmd "$@" 28 | [ "$?" -ne 127 ] 29 | do 30 | onChange 31 | exit 32 | done -------------------------------------------------------------------------------- /nginxlb/script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CURL='/usr/bin/curl' 4 | OPTIONS='-s' 5 | CONSUL_SERVICES_API="http://localhost:8500/v1/catalog/services" 6 | CONSUL_SERVICE_API="http://localhost:8500/v1/catalog/service" 7 | STATUS_UPSTREAMS_API="http://localhost:8080/status/upstreams" 8 | UPSTREAM_CONF_API="http://localhost/upstream_conf?" 9 | 10 | # Get the list of current Nginx upstreams 11 | upstreams=$($CURL $OPTIONS $STATUS_UPSTREAMS_API | jq -r '. as $in| keys[]') 12 | servers=$($CURL $OPTIONS {$UPSTREAM_CONF_API}upstream=$upstreams) 13 | echo "Nginx upstreams in $upstreams:" 14 | echo $servers 15 | 16 | # Loop through the registered servers in consul tagged with production (i.e backend servers to be proxied through nginx) and add the ones not present in the Nginx upstream block 17 | echo "Servers registered with consul:" 18 | service=$($CURL $OPTIONS $CONSUL_SERVICES_API | jq --raw-output 'to_entries| .[] | select(.value[0] == "production") | .key') 19 | 20 | ports=$($CURL $OPTIONS $CONSUL_SERVICE_API/$service | jq -r '.[]|.ServicePort') 21 | for port in ${ports[@]}; do 22 | entry=$HOST_IP:$port 23 | echo $entry 24 | if [[ ! $servers =~ $entry ]]; then 25 | $CURL $OPTIONS "{$UPSTREAM_CONF_API}add=&upstream=$upstreams&server=$entry" 26 | echo "Added $entry to the nginx upstream group $upstreams!" 27 | fi 28 | done 29 | 30 | # Loop through the Nginx upstreams and remove the ones not present in consul 31 | servers=$($CURL $OPTIONS {$UPSTREAM_CONF_API}upstream=$upstreams) 32 | for params in ${servers[@]}; do 33 | if [[ $params =~ ":" ]]; then 34 | server=$params 35 | continue 36 | elif [[ $params =~ "id=" ]]; then 37 | id=$params 38 | else 39 | continue 40 | fi 41 | 42 | service=$($CURL $OPTIONS $CONSUL_SERVICES_API | jq --raw-output 'to_entries| .[] | select(.value[0] == "production") | .key') 43 | ports=$($CURL $OPTIONS $CONSUL_SERVICE_API/$service | jq -r '.[]|.ServicePort') 44 | found=0 45 | for port in ${ports[@]}; do 46 | entry=$HOST_IP:$port 47 | if [[ $server =~ $entry ]]; then 48 | #echo "$server matches consul entry $entry" 49 | found=1 50 | break 51 | else 52 | continue 53 | fi 54 | done 55 | 56 | if [ $found -eq 0 ]; then 57 | $CURL $OPTIONS "{$UPSTREAM_CONF_API}remove=&upstream=$upstreams&$id" 58 | echo "Removed $server # $id from nginx upstream block $upstreams!" 59 | fi 60 | done 61 | -------------------------------------------------------------------------------- /nginxplus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | nginxplus: 2 | container_name: nginxplus 3 | image: nginxplus:1.0 4 | #restart: always 5 | 6 | ports: 7 | - "80:80" 8 | - "8080:8080" 9 | volumes: 10 | - /var/log/nginxplus:/var/log/nginx:rw 11 | - /mnt/docker/nginxplus/conf.d:/etc/nginx/conf.d:rw 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /nginxplus/nginx-repo.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDrDCCApSgAwIBAgIDALmrMA0GCSqGSIb3DQEBBQUAMF4xCzAJBgNVBAYTAlJV 3 | MQ8wDQYDVQQIDAZNb3Njb3cxDzANBgNVBAcMBk1vc2NvdzESMBAGA1UECgwJTkdJ 4 | TlggSW5jMRkwFwYDVQQDDBBuZ2lueCBjbGllbnRzIENBMB4XDTE3MDEyNzA4MDAw 5 | MFoXDTE3MDIyNzA4MDAwMFowdjELMAkGA1UEBhMCUlUxDzANBgNVBAgMBk1vc2Nv 6 | dzEPMA0GA1UEBwwGTW9zY293MRIwEAYDVQQKDAlOR0lOWCBJbmMxHDAaBgNVBAsM 7 | E0NsaWVudCBjZXJ0aWZpY2F0ZXMxEzARBgNVBAMMClQwMDAwNDQ5MDAwggEiMA0G 8 | CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAduRM7q/En1K8PpxJ9c+PHGa3FvUG 9 | 8rdYrbjzoIOfdiQ+BZb0WlN/3Tl6c8WkYE5pauNE/eeFF/evWBBkusxAAPjK403+ 10 | 4fogxN1Rx/iXs3i67WH4Y0U6aZBNwJ9rxr3XuDY0chN4/cojzsyMG/nrs6+ic60Q 11 | pavlE7XXbLIeo3uhdS7doB+eW3rabqIf1kLXnilamhZp9kENFZq4HTZto1H2qaxb 12 | andmBD8uF0hG5zBA7NrhC1QXArU82XKmupti/sJXL1re8OKfIW1zLjwAjxWDwuS8 13 | yxK2L5eEkwETSxqZ2F9rcQea+aAw0YCcgNPCG/2RyVitWqFqnvfdkxULAgMBAAGj 14 | WzBZMCEGCWCGSAGG+EIBDQQUFhJUcmlhbCBzdWJzY3JpcHRpb24wDgYDVR0PAQH/ 15 | BAQDAgOIMAwGA1UdEwEB/wQCMAAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwIwDQYJ 16 | KoZIhvcNAQEFBQADggEBADzpudlEMlYBVxSSf3IjC/5fL1K6T99ObSACy5NW7Vth 17 | 5r4k/xKo4N8aYCJBr3F3rNMGB44NRMwQAl6b18I3LfTETKuuuUnsP32MKgUtmnjb 18 | Yrv2MwxQ06VSGe8dLw1L1jAQ1VXe4HbRKYthpwTva9j+hATTOexCxF9Bh8xBSUxr 19 | TjK4mSyyIPvpM33sgueHR0jCKP0CkED5ezjjO5IgNOo3NdFnwbFuS36nhMfm2Xw1 20 | n62aPN3KZcNzoDuqu8+6QnmUjyJW27hfKcn8bMQls3VjbsF3YcU/diep+hSVTz9t 21 | LFps98YSyUttr6KbXcifcH3urZbT0bABrhu7fVBLZ10= 22 | -----END CERTIFICATE----- 23 | -------------------------------------------------------------------------------- /nginxplus/nginx-repo.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDAduRM7q/En1K8 3 | PpxJ9c+PHGa3FvUG8rdYrbjzoIOfdiQ+BZb0WlN/3Tl6c8WkYE5pauNE/eeFF/ev 4 | WBBkusxAAPjK403+4fogxN1Rx/iXs3i67WH4Y0U6aZBNwJ9rxr3XuDY0chN4/coj 5 | zsyMG/nrs6+ic60QpavlE7XXbLIeo3uhdS7doB+eW3rabqIf1kLXnilamhZp9kEN 6 | FZq4HTZto1H2qaxbandmBD8uF0hG5zBA7NrhC1QXArU82XKmupti/sJXL1re8OKf 7 | IW1zLjwAjxWDwuS8yxK2L5eEkwETSxqZ2F9rcQea+aAw0YCcgNPCG/2RyVitWqFq 8 | nvfdkxULAgMBAAECggEAE99y3Vq7/XJyCYvxeVWPU/ibxoXxUQFY2yg887L8REff 9 | oCPVnW+CNa6zE6jGnJ1xG0N1KheJg12hLpLKRoxfMLCo/9IOqKXOvPl23M3oJkna 10 | ynpeZlZhvJFP3CdqjqdbzN1xg2LQDesupxngVwtAeAz6sqx9Dz38DNBHM3I+xhjv 11 | UNYcD6RA3YqbfjRaTZogj6+TlEMCVUypiwXh5McMc47vosOdDMQxNbjQUtn1xOhv 12 | vWer5Ecj4V5tqlh+wg7svmhd9Rrg2O6d321uegJlCgGjQyJ0zuCb+LwICFuL3T/7 13 | PVNnFNkHaZCjvpgOqvNeWGQBjBq9tJAg3ae/IntzUQKBgQDzCferHC22jB/yw5Zm 14 | 3yv/XCR2aOIaG/88sGNc+YmCt1PK6bmoPR3VMZaQKJVoeJ0vKruXT6r6HVO620Q0 15 | Asy3tfq1Igmk5WS1p5zsRj/HKNDwKrlDvtmVHsYlUBTSrNAYJhZgmDlym5UkgCOT 16 | Br46KD9bmZ19/5ZBeGrru9gcgwKBgQDKunexUCuI0aNDDi/xB9PB9Q7RQK/UV/19 17 | 9jJqIfXTd4jydo0iQUiWLYrZcTz/vWipD453qZ/mpfPdyi28cbPASqbmEqbngJXe 18 | IEmbHFQks8ctYGMtGqQYRJm6EY2lRZyL/iflUVcfeSErtRNEN4XLgj4C2XrQYJlO 19 | qmpXohFO2QKBgQDycCv2LZrQMYGhKkg7AD6kcecRk1iQkQMQKChGU8o/cd8NWA9S 20 | avz6cYfu7/zyhPbPWUJeEyPMcjrW9+lWNMDANbEMxRJ3wjivGK9p5j2yzt115jh/ 21 | Ar+LiHJCQFs1ORwwKrZBzzfnQpoJGiXigWR7ITDqkBdR3Aan8pY951kSPwKBgCvt 22 | tev1HEnca00ncNoAwTEBIgHTy4Xuvx4hUBFRZIw7FbC8YZ6vmMP6AGFXuUkNSPWB 23 | SnTBi8WV5j8EBV8VIEclpf6qwgHeChuE0Y5L2CvPzPQXM8ZdRHuBufHoxdGvQNnQ 24 | 6+Iz+vX18GshJJiek15izX8ekw8+L1otcTemBawZAoGBAKHHeVrZ5OgtCcro2dnl 25 | fkswMltSVcWW4ujZ+4mmcsea+UD1v0rGc6Vgl3NcthYL8eB0O18vb6pNOui/0soP 26 | 6VJ0LcYZBCWae0zWLblZIXuY55rh6c/Wusr20k9m7F0WHbDDGMSbJgCqPPrHISJS 27 | DkoI50V30QIbOyMcbt/WWRoe 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /nodejs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | MAINTAINER agung.julisman@yahoo.com 3 | 4 | # Replace shell with bash so we can source files 5 | RUN rm /bin/sh && ln -s /bin/bash /bin/sh 6 | 7 | # change time zone to asia jakarta 8 | RUN rm /etc/timezone \ 9 | && echo "Asia/Jakarta" > /etc/timezone \ 10 | && chmod 644 /etc/timezone \ 11 | && dpkg-reconfigure --frontend noninteractive tzdata 12 | 13 | # update 14 | RUN apt-get update -y 15 | 16 | # install 17 | RUN apt-get install -y curl git 18 | 19 | # clean after install 20 | RUN apt-get clean 21 | 22 | # set environment 23 | # we use node v6.0.0 as default 24 | 25 | ENV NVM_DIR /usr/local/nvm 26 | ENV NODE_VERSION 6.0.0 27 | 28 | # Install nvm with node and npm 29 | RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.29.0/install.sh | bash \ 30 | && source $NVM_DIR/nvm.sh \ 31 | && nvm install $NODE_VERSION \ 32 | && nvm alias default $NODE_VERSION \ 33 | && nvm use default 34 | 35 | # Set up our PATH correctly so we don't have to long-reference npm, node, &c. 36 | ENV NODE_PATH $NVM_DIR/versions/node/v$NODE_VERSION/lib/node_modules 37 | ENV PATH $NVM_DIR/versions/node/v$NODE_VERSION/bin:$PATH 38 | 39 | #you can pick this one for process manager 40 | RUN npm install -g nodemon 41 | RUN npm install pm2 -g 42 | 43 | ENV dir /app 44 | WORKDIR ${dir} 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /nodejs/Dockerfile~: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | MAINTAINER agung.julisman@yahoo.com 3 | 4 | RUN apt-get update 5 | RUN apt-get install git 6 | 7 | RUN apt-get clean 8 | -------------------------------------------------------------------------------- /nsq/.gitignore: -------------------------------------------------------------------------------- 1 | nodejs/node_modules -------------------------------------------------------------------------------- /nsq/client/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "sync" 6 | 7 | "github.com/bitly/go-nsq" 8 | ) 9 | 10 | func main() { 11 | 12 | wg := &sync.WaitGroup{} 13 | wg.Add(1) 14 | 15 | config := nsq.NewConfig() 16 | q, _ := nsq.NewConsumer("write_test", "ch", config) 17 | q.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { 18 | log.Printf("Got a message: %v", message) 19 | wg.Done() 20 | return nil 21 | })) 22 | err := q.ConnectToNSQD("127.0.0.1:4150") 23 | if err != nil { 24 | log.Panic("Could not connect") 25 | } 26 | wg.Wait() 27 | 28 | } -------------------------------------------------------------------------------- /nsq/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | nsqlookupd: 4 | image: nsqio/nsq:v0.3.5 5 | command: /nsqlookupd 6 | ports: 7 | - "4160:4160" 8 | - "4161:4161" 9 | nsqd: 10 | image: nsqio/nsq:v0.3.5 11 | command: /nsqd --lookupd-tcp-address=nsqlookupd:4160 12 | ports: 13 | - "4150:4150" 14 | - "4151:4151" 15 | nsqadmin: 16 | image: nsqio/nsq:v0.3.5 17 | command: /nsqadmin --lookupd-http-address=nsqlookupd:4161 18 | ports: 19 | - "4171:4171" 20 | -------------------------------------------------------------------------------- /nsq/nodejs/publish.js: -------------------------------------------------------------------------------- 1 | var express = require('express') 2 | var app = express() 3 | 4 | var nsq = require('nsq.js'); 5 | var writer = nsq.writer(':4150'); 6 | writer.on('ready', function() { 7 | 8 | writer.publish('tesevent', '333'); 9 | app.get('/', function (req, res) { 10 | writer.publish('tesevent', 'baru '); 11 | res.send('Hello World!') 12 | 13 | }) 14 | 15 | app.listen(3000, function () { 16 | console.log('Example app listening on port 3000!') 17 | }) 18 | }); 19 | 20 | 21 | 22 | // publish 23 | 24 | /* 25 | var writer = nsq.writer(':4150'); 26 | 27 | writer.on('ready', function() { 28 | writer.publish('tesevent', 'ganti 55555'); 29 | });*/ 30 | -------------------------------------------------------------------------------- /nsq/nodejs/server.js: -------------------------------------------------------------------------------- 1 | var nsq = require('nsq.js'); 2 | 3 | // subscribe 4 | 5 | var reader = nsq.reader({ 6 | nsqd: [':4150'], 7 | maxInFlight: 1, 8 | maxAttempts: 5, 9 | topic: 'events', 10 | channel: 'ingestion' 11 | }); 12 | 13 | reader.on('error', function(err){ 14 | console.log(err.stack); 15 | }); 16 | 17 | reader.on('message', function(msg){ 18 | var body = msg.body.toString(); 19 | console.log('%s attempts=%s', body, msg.attempts); 20 | msg.requeue(2000); 21 | }); 22 | 23 | reader.on('discard', function(msg){ 24 | var body = msg.body.toString(); 25 | console.log('giving up on %s', body); 26 | msg.finish(); 27 | }); 28 | 29 | // publish 30 | 31 | var writer = nsq.writer(':4150'); 32 | 33 | writer.on('ready', function() { 34 | writer.publish('events', 'foo'); 35 | writer.publish('events', 'bar'); 36 | writer.publish('events', 'baz'); 37 | }); -------------------------------------------------------------------------------- /nsq/nodejs/subscribe.js: -------------------------------------------------------------------------------- 1 | var nsq = require('nsq.js'); 2 | 3 | // subscribe 4 | 5 | var reader = nsq.reader({ 6 | nsqd: [':4150'], 7 | maxInFlight: 3, 8 | maxAttempts: 5, 9 | topic: 'tesevent', 10 | channel: 'ingestion' 11 | }); 12 | 13 | reader.on('error', function(err){ 14 | console.log(err.stack); 15 | }); 16 | 17 | reader.on('message', function(msg){ 18 | var body = msg.body.toString(); 19 | console.log('giving up on %s', body); 20 | msg.finish(); 21 | /*var body = msg.body.toString(); 22 | console.log('%s attempts=%s', body, msg.attempts); 23 | msg.requeue(2000);*/ 24 | }); 25 | 26 | reader.on('discard', function(msg){ 27 | var body = msg.body.toString(); 28 | console.log('giving up on %s', body); 29 | msg.finish(); 30 | }); 31 | 32 | -------------------------------------------------------------------------------- /nsq/server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "github.com/bitly/go-nsq" 6 | ) 7 | 8 | func main() { 9 | config := nsq.NewConfig() 10 | w, _ := nsq.NewProducer("127.0.0.1:4150", config) 11 | 12 | err := w.Publish("write_test", []byte("test")) 13 | if err != nil { 14 | log.Panic("Could not connect") 15 | } 16 | 17 | w.Stop() 18 | } -------------------------------------------------------------------------------- /postgresql/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # example Dockerfile for https://docs.docker.com/examples/postgresql_service/ 3 | # 4 | 5 | FROM ubuntu 6 | MAINTAINER agung.julisman@yahoo.com 7 | 8 | # Add the PostgreSQL PGP key to verify their Debian packages. 9 | # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc 10 | RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 11 | 12 | # Add PostgreSQL's repository. It contains the most recent stable release 13 | # of PostgreSQL, ``9.3``. 14 | RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list 15 | 16 | # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 17 | # There are some warnings (in red) that show up during the build. You can hide 18 | # them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive 19 | RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 20 | 21 | # Note: The official Debian and Ubuntu images automatically ``apt-get clean`` 22 | # after each ``apt-get`` 23 | 24 | # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` 25 | USER postgres 26 | 27 | # Create a PostgreSQL role named ``docker`` with ``docker`` as the password and 28 | # then create a database `docker` owned by the ``docker`` role. 29 | # Note: here we use ``&&\`` to run commands one after the other - the ``\`` 30 | # allows the RUN command to span multiple lines. 31 | RUN /etc/init.d/postgresql start &&\ 32 | psql --command "CREATE USER truemoney WITH SUPERUSER PASSWORD 'tru3money';" &&\ 33 | createdb -O truemoney truemoney 34 | 35 | # Adjust PostgreSQL configuration so that remote connections to the 36 | # database are possible. 37 | RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf 38 | 39 | # And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` 40 | RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf 41 | 42 | # Expose the PostgreSQL port 43 | EXPOSE 5432 44 | 45 | # Add VOLUMEs to allow backup of config, logs and databases 46 | VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] 47 | 48 | # Set the default command to run when starting the container 49 | CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] 50 | -------------------------------------------------------------------------------- /postgresql/docker-compose.yml: -------------------------------------------------------------------------------- 1 | postgresql: 2 | container_name: postgresql 3 | image: secret06/postgresql:0.1 4 | restart: always 5 | ports: 6 | - "5432:5432" 7 | volumes: 8 | - /etc/postgresql:/etc/postgresql:rw 9 | -------------------------------------------------------------------------------- /prometheus/alert/alertmanagerdata/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | # The smarthost and SMTP sender used for mail notifications. 3 | smtp_smarthost: 'localhost:25' 4 | smtp_from: 'alertmanager@example.org' 5 | smtp_auth_username: 'alertmanager' 6 | smtp_auth_password: 'password' 7 | # The auth token for Hipchat. 8 | hipchat_auth_token: '1234556789' 9 | # Alternative host for Hipchat. 10 | hipchat_url: 'https://hipchat.foobar.org/' 11 | 12 | # The directory from which notification templates are read. 13 | templates: 14 | - '/etc/alertmanager/template/*.tmpl' 15 | 16 | # The root route on which each incoming alert enters. 17 | route: 18 | # The labels by which incoming alerts are grouped together. For example, 19 | # multiple alerts coming in for cluster=A and alertname=LatencyHigh would 20 | # be batched into a single group. 21 | group_by: ['alertname', 'cluster', 'service'] 22 | 23 | # When a new group of alerts is created by an incoming alert, wait at 24 | # least 'group_wait' to send the initial notification. 25 | # This way ensures that you get multiple alerts for the same group that start 26 | # firing shortly after another are batched together on the first 27 | # notification. 28 | group_wait: 30s 29 | 30 | # When the first notification was sent, wait 'group_interval' to send a batch 31 | # of new alerts that started firing for that group. 32 | group_interval: 5m 33 | 34 | # If an alert has successfully been sent, wait 'repeat_interval' to 35 | # resend them. 36 | repeat_interval: 3h 37 | 38 | # A default receiver 39 | receiver: devops 40 | 41 | # All the above attributes are inherited by all child routes and can 42 | # overwritten on each. 43 | 44 | # The child route trees. 45 | routes: 46 | # This routes performs a regular expression match on alert labels to 47 | # catch alerts that are related to a list of services. 48 | - match_re: 49 | service: ^(foo1|foo2|baz)$ 50 | receiver: devops 51 | # The service has a sub-route for critical alerts, any alerts 52 | # that do not match, i.e. severity != critical, fall-back to the 53 | # parent node and are sent to 'team-X-mails' 54 | routes: 55 | - match: 56 | severity: critical 57 | receiver: devops 58 | - match: 59 | service: files 60 | receiver: devops 61 | 62 | routes: 63 | - match: 64 | severity: critical 65 | receiver: devops 66 | 67 | # This route handles all alerts coming from a database service. If there's 68 | # no team to handle it, it defaults to the DB team. 69 | - match: 70 | service: database 71 | receiver: devops 72 | # Also group alerts by affected database. 73 | group_by: [alertname, cluster, database] 74 | routes: 75 | - match: 76 | owner: team-X 77 | receiver: devops 78 | - match: 79 | owner: team-Y 80 | receiver: devops 81 | 82 | 83 | # Inhibition rules allow to mute a set of alerts given that another alert is 84 | # firing. 85 | # We use this to mute any warning-level notifications if the same alert is 86 | # already critical. 87 | inhibit_rules: 88 | - source_match: 89 | severity: 'critical' 90 | target_match: 91 | severity: 'warning' 92 | # Apply inhibition if the alertname is the same. 93 | equal: ['alertname', 'cluster', 'service'] 94 | 95 | 96 | receivers: 97 | - name: 'devops' 98 | opsgenie_configs: 99 | - api_key: <4212d335-a72c-4478-a8c6-5c0689516f49 100 | 101 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /prometheus/alert/docker-compose.yml: -------------------------------------------------------------------------------- 1 | alertmanager: 2 | container_name: alertmanager 3 | image: prom/alertmanager:v0.5.0 4 | restart: always 5 | ports: 6 | - "9093:9093" 7 | volumes: 8 | - ./alertmanagerdata:/etc/alertmanager:rw 9 | 10 | 11 | -------------------------------------------------------------------------------- /prometheus/alertmanagerdata/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | # The smarthost and SMTP sender used for mail notifications. 3 | smtp_smarthost: 'localhost:25' 4 | smtp_from: 'alertmanager@example.org' 5 | smtp_auth_username: 'alertmanager' 6 | smtp_auth_password: 'password' 7 | # The auth token for Hipchat. 8 | hipchat_auth_token: '1234556789' 9 | # Alternative host for Hipchat. 10 | hipchat_url: 'https://hipchat.foobar.org/' 11 | 12 | # The directory from which notification templates are read. 13 | templates: 14 | - '/etc/alertmanager/template/*.tmpl' 15 | 16 | # The root route on which each incoming alert enters. 17 | route: 18 | # The labels by which incoming alerts are grouped together. For example, 19 | # multiple alerts coming in for cluster=A and alertname=LatencyHigh would 20 | # be batched into a single group. 21 | group_by: ['alertname', 'cluster', 'service'] 22 | 23 | # When a new group of alerts is created by an incoming alert, wait at 24 | # least 'group_wait' to send the initial notification. 25 | # This way ensures that you get multiple alerts for the same group that start 26 | # firing shortly after another are batched together on the first 27 | # notification. 28 | group_wait: 30s 29 | 30 | # When the first notification was sent, wait 'group_interval' to send a batch 31 | # of new alerts that started firing for that group. 32 | group_interval: 5m 33 | 34 | # If an alert has successfully been sent, wait 'repeat_interval' to 35 | # resend them. 36 | repeat_interval: 3h 37 | 38 | # A default receiver 39 | receiver: devops 40 | 41 | # All the above attributes are inherited by all child routes and can 42 | # overwritten on each. 43 | 44 | # The child route trees. 45 | routes: 46 | # This routes performs a regular expression match on alert labels to 47 | # catch alerts that are related to a list of services. 48 | - match_re: 49 | service: ^(foo1|foo2|baz)$ 50 | receiver: devops 51 | # The service has a sub-route for critical alerts, any alerts 52 | # that do not match, i.e. severity != critical, fall-back to the 53 | # parent node and are sent to 'team-X-mails' 54 | routes: 55 | - match: 56 | severity: critical 57 | receiver: devops 58 | - match: 59 | service: files 60 | receiver: devops 61 | 62 | routes: 63 | - match: 64 | severity: critical 65 | receiver: devops 66 | 67 | # This route handles all alerts coming from a database service. If there's 68 | # no team to handle it, it defaults to the DB team. 69 | - match: 70 | service: database 71 | receiver: devops 72 | # Also group alerts by affected database. 73 | group_by: [alertname, cluster, database] 74 | routes: 75 | - match: 76 | owner: team-X 77 | receiver: devops 78 | - match: 79 | owner: team-Y 80 | receiver: devops 81 | 82 | 83 | # Inhibition rules allow to mute a set of alerts given that another alert is 84 | # firing. 85 | # We use this to mute any warning-level notifications if the same alert is 86 | # already critical. 87 | inhibit_rules: 88 | - source_match: 89 | severity: 'critical' 90 | target_match: 91 | severity: 'warning' 92 | # Apply inhibition if the alertname is the same. 93 | equal: ['alertname', 'cluster', 'service'] 94 | 95 | 96 | receivers: 97 | - name: 'devops' 98 | opsgenie_configs: 99 | - api_key: 100 | 101 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /prometheus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | alertmanager: 6 | container_name: alertmanager 7 | image: prom/alertmanager:v0.5.0 8 | restart: always 9 | ports: 10 | - "9093:9093" 11 | volumes: 12 | - ./alertmanagerdata:/etc/alertmanager:rw 13 | 14 | prometheus: 15 | container_name: prometheus 16 | image: prom/prometheus:v1.5.2 17 | links: 18 | - alertmanager 19 | command: -config.file=/etc/prometheus/prometheus.yml -alertmanager.url=http://alertmanager:9093 20 | restart: always 21 | ports: 22 | - "9090:9090" 23 | volumes: 24 | - ./prometheusdata:/etc/prometheus:rw 25 | - ./prometheusdb:/prometheus/data:rw 26 | 27 | 28 | 29 | #curl -X POST http://localhost:9090/-/reload for reload config -------------------------------------------------------------------------------- /prometheus/grafana/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | grafana: 5 | container_name: grafana 6 | image: grafana/grafana:4.1.1 7 | ports: 8 | - "3000:3000" 9 | -------------------------------------------------------------------------------- /prometheus/prometheus-node-exporter/docker-compose.yml: -------------------------------------------------------------------------------- 1 | prometheus: 2 | container_name: prometheus 3 | image: prom/node-exporter:v0.13.0 4 | restart: always 5 | ports: 6 | - "9100:9100" 7 | volumes: 8 | - /proc:/host/proc:rw 9 | - /sys:/host/sys:rw 10 | - /:/rootfs:rw 11 | 12 | 13 | 14 | # -------------------------------------------------------------------------------- /prometheus/prometheusdata/prometheus.rules: -------------------------------------------------------------------------------- 1 | ALERT host_down 2 | IF node_filesystem_avail{job="technetium.truemoney.co.id"} > 1 3 | FOR 7s 4 | 5 | ALERT 6 | 7 | 8 | -------------------------------------------------------------------------------- /prometheus/prometheusdata/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | evaluation_interval: 15s 4 | 5 | external_labels: 6 | monitor: 'codelab-monitor' 7 | 8 | rule_files: 9 | - 'prometheus.rules' 10 | 11 | scrape_configs: 12 | - job_name: 'prometheus' 13 | 14 | scrape_interval: 5s 15 | 16 | static_configs: 17 | - targets: ['localhost:9090'] 18 | 19 | - job_name: 'node-exporter' 20 | 21 | scrape_interval: 5s 22 | 23 | static_configs: 24 | - targets: ['192.168.90.71:9100'] 25 | labels: 26 | group: 'production' 27 | 28 | -------------------------------------------------------------------------------- /redis/docker-compose.yml: -------------------------------------------------------------------------------- 1 | redis: 2 | container_name: redis 3 | image: ralali/redis-devel 4 | # restart: always 5 | volumes: 6 | - /mnt/docker/redis/etc:/usr/local/etc/redis:rw 7 | command: redis-server /usr/local/etc/redis/redis.conf 8 | ports: 9 | - "6379:6379" -------------------------------------------------------------------------------- /redis/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration 2 | # ./redis-server /path/to/redis.conf 3 | # Benchmark 4 | # redis-benchmark -q -n 100000 -c 50 -P 12 5 | # Memory 6 | # 1gb => 1024*1024*1024 bytes 7 | # Includes 8 | #include /path/to/local.conf 9 | #include /path/to/other.conf 10 | # General 11 | daemonize no 12 | pidfile /var/run/redis.pid 13 | port 6379 14 | tcp-backlog 511 15 | bind 0.0.0.0 16 | timeout 0 17 | tcp-keepalive 120 18 | loglevel debug 19 | logfile "" 20 | databases 16 21 | # Snapshot 22 | save 900 1 23 | save 300 10 24 | save 60 10000 25 | stop-writes-on-bgsave-error yes 26 | rdbcompression yes 27 | rdbchecksum yes 28 | dbfilename dump.rdb 29 | # Working directory. 30 | dir ./ 31 | # Replication 32 | slave-serve-stale-data yes 33 | slave-read-only yes 34 | repl-diskless-sync no 35 | repl-diskless-sync-delay 5 36 | repl-disable-tcp-nodelay no 37 | slave-priority 100 38 | # min-slaves-to-write 3 39 | # min-slaves-max-lag 10 40 | # min-slaves-max-lag is set to 10. 41 | # Security 42 | requirepass redisPa$$ 43 | # Limits 44 | # maxclients 10000 45 | # maxmemory 46 | # maxmemory-policy noeviction 47 | # maxmemory-samples 5 48 | # Append Only Mode 49 | appendonly no 50 | appendfilename "appendonly.aof" 51 | appendfsync everysec 52 | no-appendfsync-on-rewrite no 53 | auto-aof-rewrite-percentage 100 54 | auto-aof-rewrite-min-size 64mb 55 | aof-load-truncated yes 56 | # LUA Scripting 57 | lua-time-limit 5000 58 | # REDIS Cluster 59 | # cluster-enabled yes 60 | # cluster-config-file nodes-6379.conf 61 | # cluster-node-timeout 15000 62 | # cluster-slave-validity-factor 10 63 | # cluster-migration-barrier 1 64 | # cluster-require-full-coverage yes 65 | # Slow Log 66 | slowlog-log-slower-than 10000 67 | slowlog-max-len 128 68 | # Latency Monitor 69 | latency-monitor-threshold 0 70 | # Event Notification 71 | notify-keyspace-events "" 72 | # Advanced Config 73 | hash-max-ziplist-entries 512 74 | hash-max-ziplist-value 64 75 | list-max-ziplist-entries 512 76 | list-max-ziplist-value 64 77 | set-max-intset-entries 512 78 | zset-max-ziplist-entries 128 79 | zset-max-ziplist-value 64 80 | hll-sparse-max-bytes 3000 81 | activerehashing yes 82 | client-output-buffer-limit normal 0 0 0 83 | client-output-buffer-limit slave 256mb 64mb 60 84 | client-output-buffer-limit pubsub 32mb 8mb 60 85 | hz 10 86 | aof-rewrite-incremental-fsync yes 87 | Add Comment -------------------------------------------------------------------------------- /registry/docker-compose.yml: -------------------------------------------------------------------------------- 1 | nginx: 2 | image: "nginx:1.9" 3 | ports: 4 | - 5043:443 5 | links: 6 | - registry:registry 7 | volumes: 8 | - ./nginx/:/etc/nginx/conf.d 9 | registry: 10 | image: registry:2 11 | ports: 12 | - 127.0.0.1:5000:5000 13 | environment: 14 | REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /data 15 | volumes: 16 | - ./data:/data -------------------------------------------------------------------------------- /sonarqube/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | sonarqube: 5 | container_name: sonarqube 6 | image: sonarqube:6.2-alpine 7 | ports: 8 | - "9000:9000" 9 | networks: 10 | - sonarnet 11 | environment: 12 | - SONARQUBE_JDBC_URL=jdbc:postgresql://db:5432/sonar 13 | volumes: 14 | - sonarqube_conf:/opt/sonarqube/conf 15 | - sonarqube_data:/opt/sonarqube/data 16 | - sonarqube_extensions:/opt/sonarqube/extensions 17 | - sonarqube_bundled-plugins:/opt/sonarqube/lib/bundled-plugins 18 | 19 | db: 20 | image: postgres 21 | networks: 22 | - sonarnet 23 | environment: 24 | - POSTGRES_USER=sonar 25 | - POSTGRES_PASSWORD=sonar 26 | volumes: 27 | - postgresql:/var/lib/postgresql 28 | # This needs explicit mapping due to https://github.com/docker-library/postgres/blob/4e48e3228a30763913ece952c611e5e9b95c8759/Dockerfile.template#L52 29 | - postgresql_data:/var/lib/postgresql/data 30 | 31 | networks: 32 | sonarnet: 33 | driver: bridge 34 | 35 | volumes: 36 | sonarqube_conf: 37 | sonarqube_data: 38 | sonarqube_extensions: 39 | sonarqube_bundled-plugins: 40 | postgresql: 41 | postgresql_data: -------------------------------------------------------------------------------- /staging/cert_export_myCa.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDNzCCAh+gAwIBAgIESR2nXTANBgkqhkiG9w0BAQsFADAPMQ0wCwYDVQQDDARt 3 | eUNhMB4XDTE1MTIyMjA4MjQwNVoXDTE2MTIyMTA4MjQwNVowDzENMAsGA1UEAwwE 4 | bXlDYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/FUkoS2EAouPek 5 | zGri9uHARNdJC4NZJFfaxV1ZvxkHGv62o4UfbWAn166HTQcZR18lrfIIH0xK244I 6 | 9qYAALVpbap/L4TIFaoo9Rj/jl9o6z00BlmMtwlK+Pr0nFm3+ZK8WaP348/iB2dv 7 | sdllAC/F4QWUufttdM187A4uXsxv5L6TpxaJUjbggJ3LjS9ZNLhsoM5voeWklW4j 8 | ect0TrlytHqZHqm4Atndf7opmQdVY+J/43m9DsKIv45aVQpZvgKhALCwaMmrcXK/ 9 | nJfnzO2wYYyys38oBCnmdR59fHSqpEIyKZzsXcy6R8cAfDsX55zzmKSgI/DtKQxP 10 | gFQNJpcCAwEAAaOBmjCBlzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB 11 | BjAdBgNVHQ4EFgQUIl/7i+MNHBw5mAkjSccGJgLjB8IwLwYDVR0fBCgwJjAkoCKg 12 | IIYeaHR0cDovLzE4Mi4yMy45Mi4xNjIvY3JsLzEuY3JsMCQGCWCGSAGG+EIBDQQX 13 | FhVHZW5lcmF0ZWQgYnkgUm91dGVyT1MwDQYJKoZIhvcNAQELBQADggEBADu5Me2w 14 | vuSuomE0siR3yamIdIfhj6CeHCBP0i4dOHlxh+UnhQ1/EWVz5I+ZbTceRUX0EPHu 15 | +JA6ATTTWjPwr0HbYk4wNx8voE/xYr8/aQ4b5vcPwS4bbXPI193ZeI96DyXslWU0 16 | G5b4lw7PyZbp57uXd2Wcpra9jZFdAzVtn2fKquZ8pysqqEj00bhMDJUi8jm0DTgc 17 | zhwSqtJkXfhBbWQzPXBKKDPzAgcxC8QDcmZb3Wrwm85sr+Car9ELvbqONwv4KKg9 18 | iGAtu4afFpJEBPfP+UfUv8HqI48yVGdnZj90jkax42nWhh5yNbVZgKyyvkoWE2F8 19 | csliESl3iTyT6P8= 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /staging/cert_export_server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDRTCCAi2gAwIBAgIBATANBgkqhkiG9w0BAQsFADAPMQ0wCwYDVQQDDARteUNh 3 | MB4XDTE1MTIyMjA4MjQxNFoXDTE2MTIyMTA4MjQxNFowETEPMA0GA1UEAwwGc2Vy 4 | dmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxD0EEexX2j+sqDj3 5 | Uyj+md12tpgWHQCqn/CUnNO4EwE8ZDnvydwMSD/ja60PdjG1I0qn6ZWhGPdIOxuy 6 | Esmx1+dQfihukCZhOM3p1sIVUNsPixrrgAv3Au1UZc2mUip6uUQ8px5NBD5xc9Gk 7 | NYOOb7lr/f7x/wP9xaZYZorx8aO9o+ZqskudL2rJEUviRUxBHh1CUo+VUekqsMDj 8 | N0yMX8fibuS89dq4qGMl6dtpZawKUAdIEBq+4VOOscU4gNXpSd/ODxCVm45gR/dP 9 | G8Sx2dUE6X6fPNMUhOxxFYHw8GhTeI1YDpX9AcGGX3bsCyUAYbwfqz1Jio+U99Wp 10 | aw9jHQIDAQABo4GpMIGmMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgG2 11 | MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUnb+0yW+d 12 | 7xx7N29yo55YUDeg9CUwHwYDVR0jBBgwFoAUIl/7i+MNHBw5mAkjSccGJgLjB8Iw 13 | JAYJYIZIAYb4QgENBBcWFUdlbmVyYXRlZCBieSBSb3V0ZXJPUzANBgkqhkiG9w0B 14 | AQsFAAOCAQEAEj9bi3piCPoVUWwnANepcZqZx5hLrRmehI1ccQIYNuJVrHiCVMbb 15 | xsKZYdhjV5eIn4fH6js8J053JEikpY6VBgYZRhtFTyGKrFIgUqiqpmdYV32+ETP6 16 | 8k+LnFLYIa87rzIiofmZiNw5baRvNQ/NaCJO4c7XN7I3I7bJNiUEC5rhsmGcbUjK 17 | cNkxw3Lbcjf3ZZp5z5Hp2V1VdR4oassc/Jr9WZirP+Y1+H6dGO9+BnhtlfJyjmVV 18 | vqo/OpExCIO/aoWW5DTECpkrjLzoHfX+ZbwupPFAAXCC50KNIGepZfapnc5OjHaV 19 | bMBUx/QEJOpuXcUf5TEz5D1gCbsWcz096A== 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /staging/cert_export_trueoffice.key: -------------------------------------------------------------------------------- 1 | -----BEGIN ENCRYPTED PRIVATE KEY----- 2 | MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQInELk00hxPTwCAggA 3 | MB0GCWCGSAFlAwQBKgQQyCdbNsQ+R7qVdLce4rXw0ASCBNBuivYJOWQM1wpVatTT 4 | KgsRsL+UH+meWYrLKw9kV4UYiNCL/NNduaf/0P+2NGT5PRDZUe9kNowEiW+sZ1YD 5 | m+yvQNxBLpmH/3Qq1vkl9NENxJ3+9/VUG8ZmM6PO37fr5wGOTaqWTkc6OpoK9E67 6 | 8usA8DqLQrpEgvmfXqaQ3ANbhl5nOdpaSFfzHCi9+dvRMJRlsaczkhhCJV1Xf5KE 7 | wZZmjy4AJskzKreJadQ/KTNazbGv+YHYKGRmafQOOTaWkiaetPeI2XFjRDOa/bQ2 8 | xHi+xb8TLKNdFUhRJRtJhqOr1Esv56UH/j7UkbKWAa4jpuBO/aN0Q2d3DuXzHbQ8 9 | W7VVfEdwBOp8H2mnw+1l1SdZdPnjJMFo0VBAznBAn2xKEBttaNwXOMznyV5zoryI 10 | mFnGr6/8hBvi0LhvhB301W+AmZO7m5XFA6QU8LSjz/7hCCMO/bXXkZ5ZIYClkzWd 11 | q4eSnor79XrEziICTOFKb9msvKggJ35rNJBVG4JwBUCbqW1fulakjmValDVZuGlY 12 | gYGsM1GbqBw2RblTzsWTzShWhR+HjKrI0m/AZPsREAfiij3hAJra3k/MqiJig8Z2 13 | w30dR08oQ+XFbHnacdzdI/f3OlNf7jbsDnB52dXdvMxXDMtA28OpVEKbo7wy+604 14 | m9FMVxH7xnk4RYv7EJPgPLGk5Va2WosSdieSdNbpuBmGf2LGHYwmJwcQMpbgilG3 15 | 5Pal0BODjQMtX6WZwFeMvjZusT3fxqmtGkjJ9M/xfMSrgAaT5lGFZeOe2CeRB+tH 16 | UygINeHS8cYoutFEddvkvgiqdO1BCo3rrgvevb/+7vlhx7LlKHGOIL1Bri+AtNCg 17 | yYhZA/br59U6Bcp7E3fDNd5NdvsUqadiFIHFpYjmXpu2Y7KqoIEkzED1bIqNpoel 18 | 7oAMhDQAhauzrHOeqDI2kC4qVF4RIlZRchOpK7dUXvHe7JdrXra4seVw5n3uXO3r 19 | Gf6hyN9oh9gdoEBocnHyENcz1CxAcOZHsls3/JoDrZ7ge/OoOPmOgvwDa9D2YMec 20 | leHxC88DKdvizGT0EX2QeJnnLLoh2GmDpfjmeZ9vt9cK7upnOyv0GERS6S7Dvbkv 21 | fqkJBXGPIjuGI1kzP1djyRHaPkxMrb6PJidyBTwQBX3WnVRarZSmjswCk6en/m/d 22 | CBGgUtKwo64FfmjDroFKZo6OG8lQ4ltewI3JJZ3cLzDq660+I+3I9Du13BlTLc4b 23 | VeVFgLxmRD6KXwfhghtExEbxlmpEro4g6AOYnXByk/7qFmqSSgk5kTVo/4rPggvv 24 | DPRVpPMzE4HLTWkV9NHl4XaZrV9NVoVUCa5mPWIkm945w5C9a/rPKUpWsh1UcJ4+ 25 | iiqkMsbZQ+7yqRXnJzqL8oVNjAtgewCWM0+OM8zHr3jUbmgXgvSmgMdnVw64vTup 26 | wsAKSzkwp4rtlgtuvAPArGJtr6iQ0+KEvhe+tMJ1cZP9GDMmnifWDerQigSFHDVb 27 | QWoc7Fdv+3ynumiIdsBKazGYluH+R/EGd6sDueN9yKCI5Ly3B6H0CTBAXULCuSx8 28 | c6DRHsMrtGNZzCE0NGLJd6YUySGacmodjKv5R9v5Kezy4weyjWJA+w5tHXH/gpne 29 | aqPG1w2OPGp91oCWQvvhhk2ctg== 30 | -----END ENCRYPTED PRIVATE KEY----- 31 | -------------------------------------------------------------------------------- /staging/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | nginx: 5 | container_name: nginx 6 | image: secret06/nginx 7 | restart: always 8 | ports: 9 | - "80:80" 10 | - "443:443" 11 | - "8000:8000" 12 | - "9000:9000" 13 | volumes: 14 | - /var/www/html:/var/www/html:rw 15 | - /mnt/docker/nginx/conf.d:/etc/nginx/conf.d:rw 16 | links: 17 | - hhvm2 18 | - mysqldb 19 | # networks: 20 | # - be 21 | 22 | hhvm2: 23 | container_name: hhvm2 24 | restart: always 25 | image: secret06/hhvm:2.0 26 | command: hhvm --mode server --config /etc/hhvm/hhvm.ini 27 | volumes: 28 | - /var/www/html:/var/www/html:rw 29 | # networks: 30 | # - be 31 | mysqldb: 32 | container_name: mysqldb 33 | image: mysql:5.7 34 | ports: 35 | - "3306:3306" 36 | volumes: 37 | - db_data:/var/lib/mysql:rw 38 | - /var/www/html/tmid/newcms:/var/bc 39 | restart: always 40 | # networks: 41 | # - be 42 | environment: 43 | MYSQL_ROOT_PASSWORD: Tru3money 44 | MYSQL_DATABASE: tmw 45 | MYSQL_USER: tmw 46 | MYSQL_PASSWORD: tmwTru3money 47 | 48 | volumes: 49 | db_data: 50 | 51 | #networks: 52 | # be: 53 | # external: 54 | # name: dockerelk_docker_elk 55 | -------------------------------------------------------------------------------- /staging/validation_engine-1.0.2-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/julisman/docker/43e90a7bc8fd917172dc66dd09270d1263735ce3/staging/validation_engine-1.0.2-SNAPSHOT.jar -------------------------------------------------------------------------------- /tomcat/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | tomcat: 6 | container_name: tomcat 7 | image: tomcat:8-alpine 8 | restart: always 9 | ports: 10 | - "8080:8080" 11 | 12 | 13 | -------------------------------------------------------------------------------- /website/docker-compose-devel.yml: -------------------------------------------------------------------------------- 1 | ralali_nginx: 2 | image: ralali/nginx:2.0 3 | ports: 4 | - "8000:8000" 5 | volumes: 6 | - .:/var/www/php/website 7 | links: 8 | - ralali_hhvm 9 | 10 | ralali_hhvm: 11 | image: ralali/hhvm:2.0 12 | command: hhvm --mode server --config /etc/hhvm/hhvm.ini 13 | volumes: 14 | - .:/var/www/php/website 15 | 16 | 17 | -------------------------------------------------------------------------------- /website/docker-compose.yml: -------------------------------------------------------------------------------- 1 | website_data: 2 | container_name: website_data 3 | image: website:2.0 4 | 5 | ralali_nginx: 6 | container_name: nginx 7 | image: nginx:2.0 8 | ports: 9 | - "8000:8000" 10 | volumes_from: 11 | - website_data 12 | links: 13 | - ralali_hhvm_build 14 | 15 | ralali_hhvm_build: 16 | container_name: hhvm 17 | image: hhvm:2.0 18 | command: hhvm --mode server --config /etc/hhvm/hhvm.ini 19 | volumes_from: 20 | - website_data 21 | 22 | 23 | -------------------------------------------------------------------------------- /website/hhvm/Dockerfile: -------------------------------------------------------------------------------- 1 | # Set the base image to Ubuntu 2 | FROM ubuntu:trusty 3 | 4 | # File Author / Maintainer 5 | MAINTAINER Agung Julisman 6 | 7 | # Install HHVM 8 | RUN apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0x5a16e7281be7a449 9 | RUN apt-get update -y && apt-get install -y software-properties-common \ 10 | && add-apt-repository "deb http://dl.hhvm.com/ubuntu trusty main" \ 11 | && apt-get update -y \ 12 | && apt-get install -y hhvm=3.14.4~trusty \ 13 | && apt-get clean \ 14 | && rm -rf /var/lib/apt/lists/* 15 | 16 | # Add all config hhvm 17 | ADD hhvm.ini /etc/hhvm/hhvm.ini 18 | ADD php.ini /etc/hhvm/php.ini 19 | ADD supervisor-hhvm.sh /scripts/supervisor-hhvm.sh 20 | 21 | # Make sure that our web server will restart automatically when the server is rebooted 22 | RUN update-rc.d hhvm defaults 23 | 24 | # Exposing HHVM-FastCGI port 25 | EXPOSE 9000 -------------------------------------------------------------------------------- /website/hhvm/hhvm.ini: -------------------------------------------------------------------------------- 1 | pid = /var/run/hhvm/pid 2 | 3 | ; hhvm specific 4 | 5 | hhvm.server.port = 9000 6 | hhvm.server.type = fastcgi 7 | hhvm.server.default_document = index.php 8 | hhvm.log.use_log_file = true 9 | 10 | hhvm.log.file = /proc/self/fd/2 11 | 12 | hhvm.repo.central.path = /var/run/hhvm/hhvm.hhbc 13 | -------------------------------------------------------------------------------- /website/hhvm/php.ini: -------------------------------------------------------------------------------- 1 | ; php options 2 | session.save_handler = files 3 | session.save_path = /var/lib/hhvm/sessions 4 | session.gc_maxlifetime = 1440 5 | error_log = /proc/self/fd/2 6 | 7 | ; hhvm specific 8 | hhvm.log.level = Warning 9 | hhvm.log.always_log_unhandled_exceptions = true 10 | hhvm.log.runtime_error_reporting_level = 8191 11 | hhvm.mysql.typed_results = false 12 | -------------------------------------------------------------------------------- /website/hhvm/supervisor-hhvm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # exec hhvm --mode server -vServer.Type=fastcgi -vServer.Port=9000 -v Log.UseLogFile=true -v Log.File=/proc/self/fd/2 3 | exec hhvm --mode server --config /etc/hhvm/hhvm.ini 4 | 5 | -------------------------------------------------------------------------------- /website/nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx 2 | MAINTAINER Agung Julisman 3 | 4 | # change time zone to asia jakarta 5 | RUN rm /etc/timezone \ 6 | && echo "Asia/Jakarta" > /etc/timezone \ 7 | && chmod 644 /etc/timezone \ 8 | && dpkg-reconfigure --frontend noninteractive tzdata 9 | 10 | RUN mkdir -p /etc/nginx/conf.d 11 | RUN rm -f /etc/nginx/conf.d/default.conf 12 | 13 | ADD nginx.conf /etc/nginx/ 14 | ADD website.conf /etc/nginx/conf.d/website.conf 15 | ADD cms.conf /etc/nginx/conf.d/cms.conf 16 | 17 | EXPOSE 80 443 18 | -------------------------------------------------------------------------------- /website/nginx/cms.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 9000; 3 | root /var/www/php/cms/public; 4 | index index.php index.html index.htm; 5 | 6 | location / { 7 | try_files $uri $uri/ /index.php?$query_string; 8 | } 9 | 10 | location ~ \.php$ { 11 | 12 | try_files $uri =404; 13 | fastcgi_split_path_info ^(.+\.php)(/.+)$; 14 | fastcgi_pass ralali_hhvm_build:9000; 15 | fastcgi_index index.php; 16 | include fastcgi_params; 17 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 18 | fastcgi_param PATH_INFO $fastcgi_path_info; 19 | fastcgi_read_timeout 300; 20 | } 21 | } -------------------------------------------------------------------------------- /website/nginx/default.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | 3 | error_log /var/log/nginx/error.log warn; 4 | pid /var/run/nginx.pid; 5 | 6 | worker_processes 4; 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | http { 13 | include /etc/nginx/mime.types; 14 | default_type application/octet-stream; 15 | server_names_hash_bucket_size 64; 16 | client_max_body_size 64m; 17 | 18 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 19 | '$status $body_bytes_sent "$http_referer" ' 20 | '"$http_user_agent" "$http_x_forwarded_for"'; 21 | 22 | access_log /var/log/nginx/access.log main buffer=16k; 23 | 24 | sendfile on; 25 | tcp_nopush on; 26 | tcp_nodelay on; 27 | keepalive_timeout 65; 28 | keepalive_requests 100; 29 | include /etc/nginx/conf.d/*.conf; 30 | } -------------------------------------------------------------------------------- /website/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | 3 | error_log /var/log/nginx/error.log warn; 4 | pid /var/run/nginx.pid; 5 | 6 | worker_processes 4; 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | http { 13 | include /etc/nginx/mime.types; 14 | default_type application/octet-stream; 15 | server_names_hash_bucket_size 64; 16 | client_max_body_size 64m; 17 | 18 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 19 | '$status $body_bytes_sent "$http_referer" ' 20 | '"$http_user_agent" "$http_x_forwarded_for"'; 21 | 22 | access_log /var/log/nginx/access.log main buffer=16k; 23 | 24 | sendfile on; 25 | tcp_nopush on; 26 | tcp_nodelay on; 27 | keepalive_timeout 65; 28 | keepalive_requests 100; 29 | include /etc/nginx/conf.d/*.conf; 30 | } -------------------------------------------------------------------------------- /website/nginx/website.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8000; 3 | root /var/www/php/website/public; 4 | index index.php index.html index.htm; 5 | 6 | location / { 7 | try_files $uri $uri/ /index.php?$query_string; 8 | } 9 | 10 | location ~ \.php$ { 11 | 12 | try_files $uri =404; 13 | fastcgi_split_path_info ^(.+\.php)(/.+)$; 14 | fastcgi_pass hhvm2:9000; 15 | fastcgi_index index.php; 16 | include fastcgi_params; 17 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 18 | fastcgi_param PATH_INFO $fastcgi_path_info; 19 | fastcgi_read_timeout 300; 20 | } 21 | } --------------------------------------------------------------------------------