├── .gitignore ├── LICENSE ├── MongoDB ├── 0.1.0 │ ├── docker-compose.yml │ └── rancher-compose.yml └── containers │ └── 0.1.0 │ └── mongodb-config │ ├── Dockerfile │ ├── connect.sh │ ├── entrypoint.sh │ ├── initiate.sh │ └── scaling.sh ├── README.md ├── drone ├── 0.1.0 │ ├── docker-compose.yml │ └── rancher-compose.yml └── containers │ ├── 0.1.0 │ ├── drone-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ │ └── dronerc.toml │ │ ├── rancher_entry.sh │ │ ├── run.sh │ │ └── templates │ │ │ └── dronerc.tmpl │ └── drone │ │ ├── Dockerfile │ │ ├── contrib │ │ └── docker │ │ │ └── etc │ │ │ └── nsswitch.conf │ │ └── drone_static │ ├── 0.5-dec16-1 │ ├── drone-config │ │ ├── Dockerfile │ │ ├── confd │ │ │ ├── agent │ │ │ │ ├── conf.d │ │ │ │ │ └── dronerc.toml │ │ │ │ └── templates │ │ │ │ │ └── dronerc.tmpl │ │ │ └── server │ │ │ │ ├── conf.d │ │ │ │ └── dronerc.toml │ │ │ │ └── templates │ │ │ │ └── dronerc.tmpl │ │ └── scripts │ │ │ └── rancher_drone-config_entrypoint.sh │ └── drone │ │ ├── Dockerfile │ │ ├── drone_static │ │ └── scripts │ │ └── rancher_drone_entrypoint.sh │ └── 0.5 │ ├── drone-config │ ├── Dockerfile │ ├── confd │ │ ├── agent │ │ │ ├── conf.d │ │ │ │ └── dronerc.toml │ │ │ └── templates │ │ │ │ └── dronerc.tmpl │ │ └── server │ │ │ ├── conf.d │ │ │ └── dronerc.toml │ │ │ └── templates │ │ │ └── dronerc.tmpl │ └── scripts │ │ └── rancher_drone-config_entrypoint.sh │ ├── drone-debug │ └── Dockerfile │ └── drone │ ├── Dockerfile │ ├── contrib │ └── docker │ │ └── etc │ │ └── nsswitch.conf │ ├── drone_static │ └── scripts │ └── rancher_drone_entrypoint.sh ├── elasticsearch ├── 0.1.0 │ └── docker-compose.yml ├── 0.2.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.2.1 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.3.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.3.1 │ ├── docker-compose.yml │ └── rancher-compose.yml └── containers │ ├── 0.1.0 │ ├── elasticsearch-bootstrap │ │ ├── Dockerfile │ │ └── run.sh │ ├── elasticsearch-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ │ ├── elasticsearch.toml │ │ │ ├── logging.toml │ │ │ └── plugins.toml │ │ └── templates │ │ │ ├── elasticsearch.tmpl │ │ │ ├── logging.tmpl │ │ │ └── plugins.tmpl │ └── kopf │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── nginx.conf.tpl │ │ └── run.sh │ ├── 0.2.0 │ ├── elasticsearch-bootstrap │ │ ├── Dockerfile │ │ └── run.sh │ └── elasticsearch-conf │ │ ├── Dockerfile │ │ ├── conf.d │ │ ├── elasticsearch.toml │ │ ├── logging.toml │ │ └── plugins.toml │ │ └── templates │ │ ├── elasticsearch.tmpl │ │ ├── logging.tmpl │ │ └── plugins.tmpl │ ├── 0.3.0 │ └── elasticsearch-bootstrap │ │ ├── Dockerfile │ │ └── run.sh │ ├── 0.4.0 │ ├── elasticsearch-conf │ │ ├── Dockerfile │ │ ├── conf.d │ │ │ ├── elasticsearch.toml │ │ │ ├── logging.toml │ │ │ └── plugins.toml │ │ ├── run.sh │ │ └── templates │ │ │ ├── elasticsearch.tmpl │ │ │ ├── logging.tmpl │ │ │ └── plugins.tmpl │ └── kopf │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── nginx.conf.tpl │ │ └── run.sh │ └── 0.5.0 │ └── elasticsearch-conf │ ├── Dockerfile │ ├── conf.d │ ├── elasticsearch.toml │ ├── logging.toml │ └── plugins.toml │ ├── dockerentry.sh │ ├── run.sh │ └── templates │ ├── elasticsearch.tmpl │ ├── logging.tmpl │ └── plugins.tmpl ├── etcd ├── 0.1.0 │ └── docker-compose.yml ├── 0.2.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.3.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.4.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.5.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.6.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.8.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.9.0 │ ├── docker-compose.yml │ └── rancher-compose.yml └── containers │ ├── 0.1.0 │ └── etcd │ │ ├── Dockerfile │ │ └── run.sh │ ├── 0.10.0 │ └── etcd │ │ ├── Dockerfile │ │ ├── Dockerfile.hcproxy │ │ ├── build │ │ ├── disaster │ │ ├── hcproxy.go │ │ └── run.sh │ ├── 0.11.0 │ └── etcd │ │ ├── Dockerfile │ │ ├── Dockerfile.wrapper │ │ ├── build │ │ ├── delete │ │ ├── disaster │ │ ├── run.sh │ │ └── wrapper.go │ ├── 0.12.0 │ └── etcd │ │ ├── Dockerfile │ │ ├── Dockerfile.wrapper │ │ ├── build │ │ ├── delete │ │ ├── disaster │ │ ├── run.sh │ │ └── wrapper.go │ ├── 0.13.0 │ └── etcd │ │ ├── Dockerfile │ │ ├── Dockerfile.wrapper │ │ ├── build │ │ ├── delete │ │ ├── disaster │ │ ├── run.sh │ │ └── wrapper.go │ ├── 0.14.0 │ └── etcd │ │ ├── Dockerfile │ │ ├── Dockerfile.wrapper │ │ ├── build │ │ ├── delete │ │ ├── disaster │ │ ├── run.sh │ │ └── wrapper.go │ ├── 0.2.0 │ └── etcd │ │ ├── Dockerfile │ │ └── run.sh │ ├── 0.3.0 │ └── etcd │ │ ├── Dockerfile │ │ └── run.sh │ ├── 0.4.0 │ └── etcd │ │ ├── Dockerfile │ │ └── run.sh │ ├── 0.5.0 │ └── etcd │ │ ├── Dockerfile │ │ └── run.sh │ ├── 0.6.0 │ └── etcd │ │ ├── Dockerfile │ │ ├── build │ │ └── run.sh │ ├── 0.7.0 │ └── etcd │ │ ├── Dockerfile │ │ ├── build │ │ └── run.sh │ ├── 0.8.0 │ └── etcd │ │ ├── Dockerfile │ │ ├── build │ │ ├── healthcheck.sh │ │ └── run.sh │ └── 0.9.0 │ └── etcd │ ├── Dockerfile │ ├── build │ ├── disaster │ └── run.sh ├── galera ├── 0.1.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.2.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── README.md └── containers │ ├── 0.1.0 │ ├── galera-conf │ │ ├── Dockerfile │ │ ├── common.sh │ │ ├── conf.d │ │ │ ├── cluster_ips.toml │ │ │ └── galera.toml │ │ ├── lowest_idx.sh │ │ ├── run │ │ ├── start_galera │ │ ├── templates │ │ │ ├── cluster_ips.tmpl │ │ │ └── galera.cnf.tmpl │ │ └── testing │ └── galera │ │ ├── Dockerfile │ │ ├── docker-entrypoint.sh │ │ └── setup_datadir.sh │ └── 0.2.0 │ ├── galera-conf │ ├── Dockerfile │ ├── conf.d │ │ └── galera.toml │ ├── run │ ├── start_galera │ └── templates │ │ └── galera.cnf.tmpl │ ├── galera-leader-proxy │ ├── Dockerfile │ └── run.sh │ └── galera │ ├── Dockerfile │ └── docker-entrypoint.sh ├── glusterfs ├── 0.1.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.1.1 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.1.2 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.1.3 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.2.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.2.1 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── README.md └── containers │ ├── 0.1.0 │ └── glusterfs │ │ ├── Dockerfile │ │ ├── common.sh │ │ ├── lowest_idx.sh │ │ ├── peerprobe.sh │ │ └── replicated_volume_create.sh │ ├── 0.1.1 │ └── glusterfs │ │ ├── Dockerfile │ │ ├── common.sh │ │ ├── lowest_idx.sh │ │ ├── peerprobe.sh │ │ └── replicated_volume_create.sh │ ├── 0.1.2 │ └── glusterfs │ │ ├── Dockerfile │ │ ├── common.sh │ │ ├── lowest_idx.sh │ │ ├── peerprobe.sh │ │ └── replicated_volume_create.sh │ ├── 0.1.3 │ └── glusterfs │ │ ├── Dockerfile │ │ ├── common.sh │ │ ├── lowest_idx.sh │ │ ├── peerprobe.sh │ │ └── replicated_volume_create.sh │ ├── 0.2.0 │ └── glusterfs │ │ ├── Dockerfile │ │ ├── common.sh │ │ ├── peerprobe.sh │ │ └── replicated_volume_create.sh │ └── 0.2.1 │ └── glusterfs │ ├── Dockerfile │ ├── common.sh │ ├── peerprobe.sh │ └── replicated_volume_create.sh ├── hadoop ├── 0.1.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.2.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.3.0 │ ├── README.md │ ├── docker-compose.yml │ ├── env-file │ └── rancher-compose.yml ├── 0.3.5 │ ├── docker-compose.yml │ ├── env-vars │ └── rancher-compose.yml ├── README.md └── containers │ ├── 0.1.0 │ ├── build.sh │ ├── hadoop-base │ │ ├── Dockerfile │ │ ├── bootstrap-hdfs.sh │ │ ├── hdfs-site.xml │ │ └── refreshnodes.sh │ ├── hadoop-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ │ ├── core-site.toml │ │ │ ├── hadoop-env.toml │ │ │ ├── hdfs-site.toml │ │ │ ├── mapred-site.toml │ │ │ └── yarn-site.toml │ │ └── templates │ │ │ ├── core-site.xml.tmpl │ │ │ ├── hadoop-env.sh.tmpl │ │ │ ├── hdfs-site.xml.tmpl │ │ │ ├── mapred-site.xml.tmpl │ │ │ └── yarn-site.xml.tmpl │ ├── hadoop-followers-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ │ └── slaves.toml │ │ └── templates │ │ │ └── slaves.tmpl │ ├── hadoop-namenode-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ │ ├── core-site.toml │ │ │ └── hdfs-site.toml │ │ └── templates │ │ │ ├── core-site.xml.tmpl │ │ │ └── hdfs-site.xml.tmpl │ └── hadoop-yarnrm-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ └── yarn-site.toml │ │ └── templates │ │ └── yarn-site.xml.tmpl │ ├── 0.2.0 │ ├── build.sh │ ├── hadoop-base │ │ ├── Dockerfile │ │ ├── bootstrap-hdfs.sh │ │ ├── hdfs-site.xml │ │ └── refreshnodes.sh │ ├── hadoop-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ │ ├── core-site.toml │ │ │ ├── hadoop-env.toml │ │ │ ├── hdfs-site.toml │ │ │ ├── mapred-site.toml │ │ │ └── yarn-site.toml │ │ └── templates │ │ │ ├── core-site.xml.tmpl │ │ │ ├── hadoop-env.sh.tmpl │ │ │ ├── hdfs-site.xml.tmpl │ │ │ ├── mapred-site.xml.tmpl │ │ │ └── yarn-site.xml.tmpl │ └── hadoop-followers-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ └── slaves.toml │ │ └── templates │ │ └── slaves.tmpl │ ├── 0.3.0 │ ├── build.sh │ ├── hadoop-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ │ ├── capacity-scheduler.toml │ │ │ ├── configuration.toml │ │ │ ├── container-executor.toml │ │ │ ├── core-site.toml │ │ │ ├── hadoop-env.toml │ │ │ ├── hadoop-metrics.toml │ │ │ ├── hadoop-metrics2.toml │ │ │ ├── hadoop-policy.toml │ │ │ ├── hdfs-site.toml │ │ │ ├── httpfs-env.toml │ │ │ ├── httpfs-log4j.toml │ │ │ ├── httpfs-signature.toml │ │ │ ├── httpfs-site.toml │ │ │ ├── kms-acls.toml │ │ │ ├── kms-env.toml │ │ │ ├── kms-log4j.toml │ │ │ ├── kms-site.toml │ │ │ ├── log4j.toml │ │ │ ├── mapred-env.toml │ │ │ ├── mapred-site.toml │ │ │ ├── yarn-env.toml │ │ │ └── yarn-site.toml │ │ └── templates │ │ │ ├── capacity-scheduler.xml.tmpl │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml.tmpl │ │ │ ├── hadoop-env.sh.tmpl │ │ │ ├── hadoop-metrics.properties │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hdfs-site.xml.tmpl │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-site.xml.tmpl │ │ │ ├── yarn-env.sh │ │ │ └── yarn-site.xml.tmpl │ └── hadoop-followers-config │ │ ├── Dockerfile │ │ ├── conf.d │ │ └── slaves.toml │ │ └── templates │ │ └── slaves.tmpl │ └── 0.3.5 │ ├── build.sh │ ├── hadoop-base │ ├── Dockerfile │ ├── bootstrap-hdfs.sh │ ├── bootstrap-local.sh │ ├── hdfs-site.xml │ └── refreshnodes.sh │ ├── hadoop-config │ ├── Dockerfile │ ├── conf.d │ │ ├── capacity-scheduler.toml │ │ ├── configuration.toml │ │ ├── container-executor.toml │ │ ├── core-site.toml │ │ ├── hadoop-env.toml │ │ ├── hadoop-metrics.toml │ │ ├── hadoop-metrics2.toml │ │ ├── hadoop-policy.toml │ │ ├── hdfs-site.toml │ │ ├── httpfs-env.toml │ │ ├── httpfs-log4j.toml │ │ ├── httpfs-signature.toml │ │ ├── httpfs-site.toml │ │ ├── kms-acls.toml │ │ ├── kms-env.toml │ │ ├── kms-log4j.toml │ │ ├── kms-site.toml │ │ ├── log4j.toml │ │ ├── mapred-env.toml │ │ ├── mapred-site.toml │ │ ├── yarn-env.toml │ │ └── yarn-site.toml │ └── templates │ │ ├── capacity-scheduler.xml.tmpl │ │ ├── configuration.xsl │ │ ├── container-executor.cfg │ │ ├── core-site.xml.tmpl │ │ ├── hadoop-env.sh.tmpl │ │ ├── hadoop-metrics.properties │ │ ├── hadoop-metrics2.properties │ │ ├── hadoop-policy.xml │ │ ├── hdfs-site.xml.tmpl │ │ ├── httpfs-env.sh │ │ ├── httpfs-log4j.properties │ │ ├── httpfs-signature.secret │ │ ├── httpfs-site.xml │ │ ├── kms-acls.xml │ │ ├── kms-env.sh │ │ ├── kms-log4j.properties │ │ ├── kms-site.xml │ │ ├── log4j.properties │ │ ├── mapred-env.sh │ │ ├── mapred-site.xml.tmpl │ │ ├── yarn-env.sh │ │ └── yarn-site.xml.tmpl │ └── hadoop-followers-config │ ├── Dockerfile │ ├── conf.d │ └── slaves.toml │ └── templates │ └── slaves.tmpl ├── jenkins ├── 0.1.0 │ ├── docker-compose.yml │ └── rancher-compose.yml ├── 0.1.1 │ ├── docker-compose.yml │ └── rancher-compose.yml └── containers │ ├── 0.1.0 │ └── plugins │ │ ├── Dockerfile │ │ ├── conf.d │ │ └── plugins.toml │ │ ├── jenkins_ci.sh │ │ └── templates │ │ └── plugins.tmpl │ └── 0.1.1 │ └── plugins │ ├── Dockerfile │ ├── conf.d │ └── plugins.toml │ ├── jenkins_ci.sh │ └── templates │ └── plugins.tmpl ├── jenkins_swarm_clients ├── 0.1.0 │ └── docker-compose.yml ├── 0.2.0 │ └── docker-compose.yml └── container │ ├── 0.1.0 │ └── jenkins-swarm │ │ ├── Dockerfile │ │ └── run.sh │ └── 0.2.0 │ └── jenkins-swarm │ ├── Dockerfile │ └── run.sh ├── kibana ├── 0.1.0 │ └── docker-compose.yml ├── 0.2.0 │ ├── docker-compose.yml │ └── rancher-compose.yml └── containers │ └── nginx │ ├── Dockerfile │ └── entrypoint.sh ├── logspout ├── containers │ └── logspout-logstash │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── entrypoint.sh │ │ ├── logstash.go │ │ └── modules.go └── docker-compose.yml ├── logstash ├── 0.1.0 │ ├── README.md │ └── docker-compose.yml ├── 0.2.0 │ ├── docker-compose.yml │ └── rancher-compose.yml └── containers │ ├── 0.1.0 │ └── logstash-config │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── conf.d │ │ ├── logstashconfig.toml │ │ └── patterns.toml │ │ └── templates │ │ ├── logstash.conf.tmpl │ │ └── patterns.tmpl │ └── 0.2.0 │ └── logstash-config │ ├── .dockerignore │ ├── Dockerfile │ ├── README.md │ ├── conf.d │ ├── logstashconfig.toml │ └── patterns.toml │ └── templates │ ├── logstash.conf.tmpl │ └── patterns.tmpl ├── spark ├── 0.1.0 │ ├── docker-compose.yml │ └── rancher-compose.yml └── containers │ └── 0.1.0 │ ├── spark-conf │ ├── Dockerfile │ ├── conf.d │ │ ├── spark-defaults.toml │ │ └── spark-env.toml │ └── templates │ │ ├── spark-defaults.conf.tmpl │ │ └── spark-env.sh.tmpl │ └── spark │ ├── Dockerfile │ ├── common.sh │ ├── start_spark.sh │ └── work_dir_setup.sh ├── utils └── containers │ ├── confd │ ├── Dockerfile │ ├── Dockerfile.rancher │ ├── confd-0.10.0-linux-amd64 │ └── confd-0.11.0-dev-rancher-linux-amd64 │ └── nginx-conf │ ├── 0.1.0 │ ├── Dockerfile │ ├── conf.d │ │ ├── htpasswd.toml │ │ └── nginx.toml │ └── templates │ │ ├── kibana.htpasswd.tmpl │ │ └── nginx.conf.tmpl │ └── 0.2.0 │ ├── Dockerfile │ ├── conf.d │ ├── htpasswd.toml │ └── nginx.toml │ └── templates │ ├── htpasswd.tmpl │ └── nginx.conf.tmpl └── zookeeper ├── 0.1.0 ├── docker-compose.yml └── rancher-compose.yml ├── 0.2.0 ├── docker-compose.yml └── rancher-compose.yml ├── 0.3.0 ├── docker-compose.yml └── rancher-compose.yml ├── 0.4.0 ├── docker-compose.yml └── rancher-compose.yml ├── README.md └── containers ├── 0.1.0 ├── zookeeper-config │ ├── Dockerfile │ ├── conf.d │ │ ├── myid.toml │ │ └── zoo.cfg.toml │ └── templates │ │ ├── myid.tmpl │ │ └── zoo.cfg.tmpl └── zookeeper │ ├── Dockerfile │ ├── README.md │ └── entry.sh ├── 0.2.0 ├── zookeeper-config │ ├── Dockerfile │ ├── conf.d │ │ ├── myid.toml │ │ ├── startup.toml │ │ └── zoo.cfg.toml │ └── templates │ │ ├── myid.tmpl │ │ ├── startup.tmpl │ │ └── zoo.cfg.tmpl └── zookeeper │ ├── Dockerfile │ ├── README.md │ └── entry.sh ├── 0.3.0 └── zookeeper │ ├── Dockerfile │ ├── run.sh │ └── zoo.cfg └── 0.4.0 └── zookeeper ├── Dockerfile ├── conf.d ├── myid.toml ├── startup.toml └── zoo.cfg.toml ├── run.sh └── templates ├── myid.tmpl ├── startup.tmpl └── zoo.cfg.tmpl /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_store 2 | *.swp 3 | *.env 4 | -------------------------------------------------------------------------------- /MongoDB/0.1.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | mongo-cluster: 2 | restart: always 3 | environment: 4 | MONGO_SERVICE_NAME: mongo-cluster 5 | CATTLE_SCRIPT_DEBUG: 6 | entrypoint: /opt/rancher/bin/entrypoint.sh 7 | command: 8 | - --replSet 9 | - "rs0" 10 | image: mongo:3.0 11 | labels: 12 | io.rancher.container.hostname_override: container_name 13 | io.rancher.sidekicks: mongo-base, mongo-datavolume 14 | volumes_from: 15 | - mongo-datavolume 16 | - mongo-base 17 | mongo-base: 18 | restart: always 19 | labels: 20 | io.rancher.container.hostname_override: container_name 21 | io.rancher.container.start_once: true 22 | image: rancher/mongodb-conf:v0.2.0 23 | stdin_open: true 24 | entrypoint: /bin/true 25 | mongo-datavolume: 26 | net: none 27 | labels: 28 | io.rancher.container.hostname_override: container_name 29 | io.rancher.container.start_once: true 30 | volumes: 31 | - /data/db 32 | entrypoint: /bin/true 33 | image: busybox 34 | -------------------------------------------------------------------------------- /MongoDB/0.1.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | mongo-cluster: 2 | scale: 3 3 | -------------------------------------------------------------------------------- /MongoDB/containers/0.1.0/mongodb-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.1 2 | MAINTAINER Hussein Galal 3 | 4 | # install giddyup 5 | RUN apk add -U curl \ 6 | && mkdir -p /opt/rancher/bin \ 7 | && curl -L https://github.com/cloudnautique/giddyup/releases/download/v0.14.0/giddyup -o /opt/rancher/bin/giddyup \ 8 | && chmod u+x /opt/rancher/bin/* 9 | 10 | ADD ./*.sh /opt/rancher/bin/ 11 | RUN chmod u+x /opt/rancher/bin/*.sh 12 | 13 | VOLUME /opt/rancher/bin 14 | -------------------------------------------------------------------------------- /MongoDB/containers/0.1.0/mongodb-config/connect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -n "$CATTLE_SCRIPT_DEBUG" ]; then 4 | set -x 5 | fi 6 | 7 | GIDDYUP=/opt/rancher/bin/giddyup 8 | 9 | function cluster_init { 10 | sleep 10 11 | MYIP=$($GIDDYUP ip myip) 12 | mongo --eval "printjson(rs.initiate())" 13 | for member in $($GIDDYUP ip stringify --delimiter " "); do 14 | if [ "$member" != "$MYIP" ]; then 15 | mongo --eval "printjson(rs.add('$member:27017'))" 16 | sleep 5 17 | fi 18 | done 19 | 20 | } 21 | 22 | function find_master { 23 | for member in $($GIDDYUP ip stringify --delimiter " "); do 24 | IS_MASTER=$(mongo --host $member --eval "printjson(db.isMaster())" | grep 'ismaster') 25 | if echo $IS_MASTER | grep "true"; then 26 | return 0 27 | fi 28 | done 29 | return 1 30 | } 31 | # Script starts here 32 | # wait for mongo to start 33 | $GIDDYUP service wait scale --timeout 120 34 | 35 | # Wait until all services are up 36 | sleep 10 37 | find_master 38 | if [ $? -eq 0 ]; then 39 | echo 'Master is already initated.. nothing to do!' 40 | else 41 | echo 'Initiating the cluster!' 42 | cluster_init 43 | fi 44 | -------------------------------------------------------------------------------- /MongoDB/containers/0.1.0/mongodb-config/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -n "$CATTLE_SCRIPT_DEBUG" ]; then 4 | set -x 5 | fi 6 | 7 | # Check for lowest ID 8 | sleep 10 9 | /opt/rancher/bin/giddyup leader check 10 | if [ "$?" -eq "0" ]; then 11 | echo "This is the lowest numbered contianer.. Handling the initiation." 12 | /opt/rancher/bin/initiate.sh $@ 13 | else 14 | 15 | # Run the scaling script 16 | /opt/rancher/bin/scaling.sh & 17 | # Start mongodb 18 | if [ $? -ne 0 ] 19 | then 20 | echo "Error Occurred.." 21 | fi 22 | 23 | set -e 24 | 25 | if [ "${1:0:1}" = '-' ]; then 26 | set -- mongod "$@" 27 | fi 28 | 29 | if [ "$1" = 'mongod' ]; then 30 | chown -R mongodb /data/db 31 | 32 | numa='numactl --interleave=all' 33 | if $numa true &> /dev/null; then 34 | set -- $numa "$@" 35 | fi 36 | 37 | exec gosu mongodb "$@" 38 | fi 39 | exec "$@" 40 | 41 | fi 42 | -------------------------------------------------------------------------------- /MongoDB/containers/0.1.0/mongodb-config/initiate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | # Run cluster init script 5 | /opt/rancher/bin/connect.sh & 6 | 7 | # Start mongodb 8 | if [ $? -ne 0 ] 9 | then 10 | echo "Error Occurred.." 11 | fi 12 | 13 | set -e 14 | 15 | if [ "${1:0:1}" = '-' ]; then 16 | set -- mongod "$@" 17 | fi 18 | 19 | if [ "$1" = 'mongod' ]; then 20 | chown -R mongodb /data/db 21 | 22 | numa='numactl --interleave=all' 23 | if $numa true &> /dev/null; then 24 | set -- $numa "$@" 25 | fi 26 | 27 | exec gosu mongodb "$@" 28 | fi 29 | -------------------------------------------------------------------------------- /MongoDB/containers/0.1.0/mongodb-config/scaling.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -n "$CATTLE_SCRIPT_DEBUG" ]; then 4 | set -x 5 | fi 6 | 7 | sleep 5 8 | GIDDYUP=/opt/rancher/bin/giddyup 9 | 10 | function scaleup { 11 | MYIP=$($GIDDYUP ip myip) 12 | for IP in $($GIDDYUP ip stringify --delimiter " "); do 13 | IS_MASTER=$(mongo --host $IP --eval "printjson(db.isMaster())" | grep 'ismaster') 14 | if echo $IS_MASTER | grep "true"; then 15 | mongo --host $IP --eval "printjson(rs.add('$MYIP:27017'))" 16 | return 0 17 | fi 18 | done 19 | return 1 20 | } 21 | 22 | # Script starts here 23 | if [ $($GIDDYUP service scale --current) -gt 3 ]; then 24 | scaleup 25 | fi 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dockerfiles for Rancher Catalog Items. 2 | ---- 3 | ## Purpose 4 | 5 | These are the Dockerfiles for the containers that we use for Rancher entries in the community catalog. Also, you will find compose files that are here for development purposes. 6 | 7 | ## Layout 8 | Each App has its own folder. There is a containers directory where the various containers for the apps are kept. We try to version the directories to the version of the container. 9 | 10 | ## License 11 | 12 | Copyright (c) 2014-2016 Rancher Labs, Inc. 13 | 14 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 15 | 16 | http://www.apache.org/licenses/LICENSE-2.0 17 | 18 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /drone/0.1.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | drone-lb: 2 | ports: 3 | - 8000:8000 4 | tty: true 5 | image: rancher/load-balancer-service 6 | links: 7 | - drone-server:drone-server 8 | stdin_open: true 9 | 10 | drone-healthcheck: 11 | image: rancher/drone-config:v0.1.0 12 | volumes_from: 13 | - drone-data-volume 14 | entrypoint: /giddyup health 15 | 16 | drone-server: 17 | image: rancher/drone-config:v0.1.0 18 | volumes_from: 19 | - drone-data-volume 20 | labels: 21 | io.rancher.sidekicks: drone-data-volume,drone-daemon,drone-healthcheck 22 | external_links: 23 | - galera/galera-lb:mysqldb 24 | 25 | drone-daemon: 26 | image: rancher/drone:0.4 27 | net: 'container:drone-server' 28 | volumes: 29 | - /var/run/docker.sock:/var/run/docker.sock 30 | volumes_from: 31 | - drone-data-volume 32 | entrypoint: /opt/rancher/rancher_entry.sh 33 | 34 | ## Do not change below. Could cause data loss in upgrade. 35 | drone-data-volume: 36 | image: busybox 37 | net: none 38 | command: /bin/true 39 | labels: 40 | io.rancher.container.start_once: true 41 | volumes: 42 | - /var/lib/drone 43 | - /etc/drone 44 | - /opt/rancher 45 | -------------------------------------------------------------------------------- /drone/0.1.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | drone-lb: 2 | scale: 1 3 | load_balancer_config: 4 | haproxy_config: {} 5 | health_check: 6 | port: 42 7 | interval: 2000 8 | unhealthy_threshold: 3 9 | healthy_threshold: 2 10 | response_timeout: 2000 11 | drone-server: 12 | scale: 1 13 | metadata: 14 | remote_driver: ${remote_driver} 15 | remote_config: "${remote_config}" 16 | database_driver: ${database_driver} 17 | database_config: "${database_config}" 18 | http_proxy_on: false 19 | debug: true 20 | -------------------------------------------------------------------------------- /drone/containers/0.1.0/drone-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.2 2 | 3 | RUN apk add --update bash && rm -rf /var/cache/apk/* 4 | 5 | # Confd 6 | ADD ./conf.d /etc/confd/conf.d 7 | ADD ./templates /etc/confd/templates 8 | 9 | ADD https://github.com/cloudnautique/giddyup/releases/download/v0.7.0/giddyup /giddyup 10 | ADD https://github.com/rancher/confd/releases/download/0.11.0-dev-rancher/confd-0.11.0-dev-rancher-linux-amd64 /confd 11 | ADD https://github.com/cloudnautique/dynamic-drone-nodes/releases/download/v0.1.1/dynamic-drone-nodes /dynamic-drone-nodes 12 | RUN chmod +x /confd /giddyup /dynamic-drone-nodes 13 | 14 | ADD ./*.sh / 15 | 16 | ENTRYPOINT ["/run.sh"] 17 | -------------------------------------------------------------------------------- /drone/containers/0.1.0/drone-config/conf.d/dronerc.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="dronerc.tmpl" 3 | dest="/etc/drone/dronerc" 4 | keys = [ 5 | "/self" 6 | ] 7 | -------------------------------------------------------------------------------- /drone/containers/0.1.0/drone-config/rancher_entry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | while [ ! -e /etc/drone/dronerc ]; do 4 | sleep 1 5 | done 6 | 7 | source /etc/drone/dronerc 8 | 9 | exec /drone_static 10 | -------------------------------------------------------------------------------- /drone/containers/0.1.0/drone-config/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! -e /opt/rancher ]; then 4 | mkdir -p /opt/rancher 5 | fi 6 | 7 | cp /rancher_entry.sh /opt/rancher 8 | 9 | exec /confd -backend=rancher -prefix=/2015-07-25 10 | -------------------------------------------------------------------------------- /drone/containers/0.1.0/drone-config/templates/dronerc.tmpl: -------------------------------------------------------------------------------- 1 | export REMOTE_DRIVER={{ getv "/self/service/metadata/remote_driver" }} 2 | export REMOTE_CONFIG="{{ getv "/self/service/metadata/remote_config" }}" 3 | export DATABASE_DRIVER="{{ getv "/self/service/metadata/database_driver" }}" 4 | export DATABASE_CONFIG="{{ getv "/self/service/metadata/database_config" }}" 5 | export DEBUG={{ getv "/self/service/metadata/debug" }} 6 | 7 | {{ if (eq "/self/service/metadata/http_proxy_on" "true") }} 8 | export HTTPS_PROXY={{ getv "/self/service/metadata/https_proxy" }} 9 | export https_proxy={{ getv "/self/service/metadata/https_proxy" }} 10 | export HTTP_PROXY={{ getv "/self/service/metadata/http_proxy" }} 11 | export http_proxy={{ getv "/self/service/metadata/http_proxy" }} 12 | export NO_PROXY={{ getv "/self/service/metadata/noproxy" }} 13 | {{end}} 14 | -------------------------------------------------------------------------------- /drone/containers/0.1.0/drone/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the drone executable on a x64 Linux host: 2 | # 3 | # go build --ldflags '-extldflags "-static"' -o drone_static 4 | # 5 | # 6 | # Alternate command for Go 1.4 and older: 7 | # 8 | # go build -a -tags netgo --ldflags '-extldflags "-static"' -o drone_static 9 | # 10 | # 11 | # Build the docker image: 12 | # 13 | # docker build --rm=true -t drone/drone . 14 | 15 | ## Built from cloudnautique/drone fork on github. 16 | 17 | FROM busybox 18 | EXPOSE 8000 19 | ADD contrib/docker/etc/nsswitch.conf /etc/ 20 | 21 | # Pulled from centurylin/ca-certs source. 22 | ADD https://raw.githubusercontent.com/CenturyLinkLabs/ca-certs-base-image/master/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 23 | 24 | ENV DATABASE_DRIVER=sqlite3 25 | ENV DATABASE_CONFIG=/var/lib/drone/drone.sqlite 26 | 27 | ADD drone_static /drone_static 28 | 29 | ENTRYPOINT ["/drone_static"] 30 | -------------------------------------------------------------------------------- /drone/containers/0.1.0/drone/contrib/docker/etc/nsswitch.conf: -------------------------------------------------------------------------------- 1 | # /etc/nsswitch.conf 2 | # 3 | # Example configuration of GNU Name Service Switch functionality. 4 | # If you have the `glibc-doc-reference' and `info' packages installed, try: 5 | # `info libc "Name Service Switch"' for information about this file. 6 | 7 | passwd: compat 8 | group: compat 9 | shadow: compat 10 | 11 | hosts: files dns 12 | networks: files 13 | 14 | protocols: files 15 | services: files 16 | ethers: files 17 | rpc: files 18 | 19 | netgroup: nis 20 | -------------------------------------------------------------------------------- /drone/containers/0.1.0/drone/drone_static: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/catalog-dockerfiles/8493c9633b462f06693f19abe579c1b744026dbd/drone/containers/0.1.0/drone/drone_static -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox 2 | 3 | LABEL vendor="Rancher Labs, Inc." \ 4 | com.rancher.version="0.5" \ 5 | com.rancher.repo="https://github.com/rancher/catalog-dockerfiles" 6 | 7 | ENV GIDDYUP_VERSION 'v0.14.0' 8 | ENV CONFD_VERSION '0.11.0' 9 | 10 | ADD ./confd/ /etc/confd/ 11 | 12 | ADD "https://github.com/cloudnautique/giddyup/releases/download/${GIDDYUP_VERSION}/giddyup /giddyup" 13 | ADD "https://github.com/kelseyhightower/confd/releases/download/v${CONFD_VERSION}/confd-${CONFD_VERSION}-linux-amd64 /confd" 14 | ADD /scripts/*.sh /opt/rancher/scripts/ 15 | 16 | RUN chmod +x /confd /giddyup /opt/rancher/scripts/* 17 | 18 | ENTRYPOINT ["/opt/rancher/scripts/rancher_drone-config_entrypoint.sh"] 19 | CMD ["server"] 20 | 21 | ADD Dockerfile /opt/rancher/ 22 | -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone-config/confd/agent/conf.d/dronerc.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="dronerc.tmpl" 3 | dest="/etc/drone/dronerc" 4 | keys = [ 5 | "/self" 6 | ] 7 | -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone-config/confd/agent/templates/dronerc.tmpl: -------------------------------------------------------------------------------- 1 | {{ if exists "/self/service/metadata/debug_mode" }} 2 | export DRONE_DEBUG='{{ getv "/self/service/metadata/debug_mode" }}' 3 | {{ end }} 4 | 5 | export DRONE_SERVER='ws://drone.rancher.internal:{{ getv "/self/service/metadata/drone_service_tcp_port" }}/ws/broker' 6 | {{ getv "/self/service/metadata/dronerc_contents" }} 7 | -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone-config/confd/server/conf.d/dronerc.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="dronerc.tmpl" 3 | dest="/etc/drone/dronerc" 4 | keys = [ 5 | "/self" 6 | ] 7 | -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone-config/confd/server/templates/dronerc.tmpl: -------------------------------------------------------------------------------- 1 | {{ if exists "/self/service/metadata/debug_mode" }} 2 | export DRONE_DEBUG='{{ getv "/self/service/metadata/debug_mode" }}' 3 | {{ end }} 4 | 5 | {{ getv "/self/service/metadata/dronerc_contents" }} 6 | -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone-config/scripts/rancher_drone-config_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ 'agent' == "$1" ] || [ 'server' == "$1" ]; then 4 | echo "$0: Starting drone in $1 mode..." 5 | else 6 | echo "$0: Must specify mode of 'agent' or 'server'." >&2 ; exit -1 7 | fi 8 | 9 | confd_cmd='/confd -backend=rancher -prefix=/2015-07-25' 10 | 11 | if [ -n "${DEBUG}" ]; then 12 | extra_confd_opts='-log-level=debug' 13 | ${confd_cmd} ${extra_confd_opts} -noop -onetime 14 | fi 15 | 16 | exec ${confd_cmd} ${extra_confd_opts} -confdir="/etc/confd/$1" 17 | -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the drone executable on a x64 Linux host: 2 | # 3 | # go build --ldflags '-extldflags "-static"' -o drone_static 4 | # 5 | # 6 | # Alternate command for Go 1.4 and older: 7 | # 8 | # go build -a -tags netgo --ldflags '-extldflags "-static"' -o drone_static 9 | # 10 | # 11 | # Build the docker image: 12 | # 13 | # docker build --rm=true -t drone/drone . 14 | 15 | ## Built from cloudnautique/drone fork on github. 16 | 17 | FROM busybox 18 | 19 | LABEL vendor="Rancher Labs, Inc" \ 20 | com.rancher.version="v0.5-dec16-1" \ 21 | com.rancher.repo="https://github.com/rancher/catalog-dockerfiles" 22 | 23 | EXPOSE 8000 24 | ENV DATABASE_DRIVER=sqlite3 25 | ENV DATABASE_CONFIG=/var/lib/drone/drone.sqlite 26 | 27 | 28 | # Pulled from centurylin/ca-certs source. 29 | ADD https://raw.githubusercontent.com/CenturyLinkLabs/ca-certs-base-image/master/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 30 | 31 | COPY drone_static /drone_static 32 | COPY ./scripts/*.sh /opt/rancher/scripts/ 33 | RUN chmod +x /opt/rancher/scripts/*.sh 34 | 35 | # default to server though it could also be 'agent' 36 | ENTRYPOINT ["/opt/rancher/scripts/rancher_drone_entrypoint.sh"] 37 | CMD ["server"] 38 | 39 | ADD Dockerfile /opt/rancher/ 40 | -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone/drone_static: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/catalog-dockerfiles/8493c9633b462f06693f19abe579c1b744026dbd/drone/containers/0.5-dec16-1/drone/drone_static -------------------------------------------------------------------------------- /drone/containers/0.5-dec16-1/drone/scripts/rancher_drone_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ 'agent' == "$1" ] || [ 'server' == "$1" ]; then 4 | echo "$0: Starting drone in $1 mode..." 5 | else 6 | echo "$0: Must specify mode of 'agent' or 'server'." >&2 ; exit -1 7 | fi 8 | 9 | while [ ! -e /etc/drone/dronerc ]; do 10 | sleep 1 11 | done 12 | 13 | if [ -n "${DEBUG}" ]; then 14 | echo "Contents of /etc/drone/dronerc..." 15 | cat /etc/drone/dronerc 16 | fi 17 | 18 | source /etc/drone/dronerc 19 | 20 | exec /drone_static $1 21 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone-config/Dockerfile: -------------------------------------------------------------------------------- 1 | #FROM nrvale0/drone-debug:0.5 2 | FROM alpine:3.4 3 | 4 | LABEL vendor="Rancher Labs, Inc." \ 5 | com.rancher.version="0.5" \ 6 | com.rancher.repo="https://github.com/rancher/catalog-dockerfiles" 7 | 8 | ENV GIDDYUP_VERSION='v0.14.0' CONFD_VERSION='v0.11.0' 9 | 10 | RUN apk add --update bash && rm -rf /var/cache/apk/* 11 | 12 | ADD ./confd/ /etc/confd/ 13 | 14 | ADD "https://github.com/cloudnautique/giddyup/releases/download/${GIDDYUP_VERSION}/giddyup /giddyup" 15 | ADD "https://github.com/rancher/confd/releases/download/${CONFD_VERSION/confd-${CONFD_VERSION}-amd64 /confd" 16 | ADD /scripts/*.sh /opt/rancher/scripts/ 17 | 18 | RUN chmod +x /confd /giddyup /opt/rancher/scripts/* 19 | 20 | ENTRYPOINT ["/opt/rancher/scripts/rancher_drone-config_entrypoint.sh", "server"] 21 | 22 | ADD Dockerfile /opt/rancher/ 23 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone-config/confd/agent/conf.d/dronerc.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="dronerc.tmpl" 3 | dest="/etc/drone/dronerc" 4 | keys = [ 5 | "/self" 6 | ] 7 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone-config/confd/agent/templates/dronerc.tmpl: -------------------------------------------------------------------------------- 1 | {{ if exists "/self/service/metadata/debug_mode" }} 2 | export DRONE_DEBUG='{{ getv "/self/service/metadata/debug_mode" }}' 3 | {{ end }} 4 | 5 | export DRONE_SERVER='http://drone:{{ getv "/self/service/metadata/drone_service_tcp_port" }}' 6 | {{ getv "/self/service/metadata/dronerc_contents" }} 7 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone-config/confd/server/conf.d/dronerc.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="dronerc.tmpl" 3 | dest="/etc/drone/dronerc" 4 | keys = [ 5 | "/self" 6 | ] 7 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone-config/confd/server/templates/dronerc.tmpl: -------------------------------------------------------------------------------- 1 | {{ if exists "/self/service/metadata/debug_mode" }} 2 | export DRONE_DEBUG='{{ getv "/self/service/metadata/debug_mode" }}' 3 | {{ end }} 4 | 5 | {{ getv "/self/service/metadata/dronerc_contents" }} 6 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone-config/scripts/rancher_drone-config_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ 'agent' == "$1" ] || [ 'server' == "$1" ]; then 4 | echo "$0: Starting drone in $1 mode..." 5 | else 6 | echo "$0: Must specify mode of 'agent' or 'server'." >&2 ; exit -1 7 | fi 8 | 9 | confd_cmd='/confd -backend=rancher -prefix=/2015-07-25' 10 | 11 | if [ -n "${DEBUG}" ]; then 12 | extra_confd_opts='-log-level=debug' 13 | ${confd_cmd} ${extra_confd_opts} -noop -onetime 14 | fi 15 | 16 | exec ${confd_cmd} ${extra_confd_opts} -confdir="/etc/confd/$1" 17 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone-debug/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | MAINTAINER Nathan Valentine < nathan@rancher.com | nrvale0@gmail.com > 3 | 4 | ENV DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true 5 | 6 | RUN apt-get update && apt-get dist-upgrade -y && \ 7 | rm -rf /var/cache/apt/archive 8 | RUN apt-get install -y mysql-client nmap python-pip curl wget vim bash && \ 9 | rm -rf /var/cache/apt/archive 10 | RUN pip install httpie 11 | 12 | CMD /bin/bash 13 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the drone executable on a x64 Linux host: 2 | # 3 | # go build --ldflags '-extldflags "-static"' -o drone_static 4 | # 5 | # 6 | # Alternate command for Go 1.4 and older: 7 | # 8 | # go build -a -tags netgo --ldflags '-extldflags "-static"' -o drone_static 9 | # 10 | # 11 | # Build the docker image: 12 | # 13 | # docker build --rm=true -t drone/drone . 14 | 15 | ## Built from cloudnautique/drone fork on github. 16 | 17 | #FROM nrvale0/drone-debug:0.5 18 | FROM alpine:3.4 19 | 20 | LABEL vendor="Rancher Labs, Inc" \ 21 | com.rancher.version="0.5" \ 22 | com.rancher.repo="https://github.com/rancher/catalog-dockerfiles" 23 | 24 | EXPOSE 8000 25 | ADD contrib/docker/etc/nsswitch.conf /etc/ 26 | 27 | # Pulled from centurylin/ca-certs source. 28 | ADD https://raw.githubusercontent.com/CenturyLinkLabs/ca-certs-base-image/master/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 29 | 30 | ENV DATABASE_DRIVER=sqlite3 31 | ENV DATABASE_CONFIG=/var/lib/drone/drone.sqlite 32 | 33 | ADD drone_static /drone_static 34 | ADD ./scripts/*.sh /opt/rancher/scripts/ 35 | RUN chmod +x /opt/rancher/scripts/*.sh 36 | 37 | # default to server though it could also be 'agent' 38 | ENTRYPOINT ["/opt/rancher/scripts/rancher_drone_entrypoint.sh", "server"] 39 | 40 | ADD Dockerfile /opt/rancher/ 41 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone/contrib/docker/etc/nsswitch.conf: -------------------------------------------------------------------------------- 1 | # /etc/nsswitch.conf 2 | # 3 | # Example configuration of GNU Name Service Switch functionality. 4 | # If you have the `glibc-doc-reference' and `info' packages installed, try: 5 | # `info libc "Name Service Switch"' for information about this file. 6 | 7 | passwd: compat 8 | group: compat 9 | shadow: compat 10 | 11 | hosts: files dns 12 | networks: files 13 | 14 | protocols: files 15 | services: files 16 | ethers: files 17 | rpc: files 18 | 19 | netgroup: nis 20 | -------------------------------------------------------------------------------- /drone/containers/0.5/drone/drone_static: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/catalog-dockerfiles/8493c9633b462f06693f19abe579c1b744026dbd/drone/containers/0.5/drone/drone_static -------------------------------------------------------------------------------- /drone/containers/0.5/drone/scripts/rancher_drone_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ 'agent' == "$1" ] || [ 'server' == "$1" ]; then 4 | echo "$0: Starting drone in $1 mode..." 5 | else 6 | echo "$0: Must specify mode of 'agent' or 'server'." >&2 ; exit -1 7 | fi 8 | 9 | while [ ! -e /etc/drone/dronerc ]; do 10 | sleep 1 11 | done 12 | 13 | if [ -n "${DEBUG}" ]; then 14 | echo "Contents of /etc/drone/dronerc..." 15 | cat /etc/drone/dronerc 16 | fi 17 | 18 | source /etc/drone/dronerc 19 | 20 | exec /drone_static $1 21 | -------------------------------------------------------------------------------- /elasticsearch/0.2.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | elasticsearch-masters: 2 | metadata: 3 | elasticsearch: 4 | yml: 5 | cluster.name: "logs" 6 | node.name: "$${HOSTNAME}" 7 | node.data: "false" 8 | node.master: "true" 9 | elasticsearch-datanodes: 10 | metadata: 11 | elasticsearch: 12 | yml: 13 | cluster.name: "logs" 14 | node.name: "$${HOSTNAME}" 15 | node.data: "true" 16 | node.master: "false" 17 | http.enabled: "false" 18 | elasticsearch-clients: 19 | metadata: 20 | elasticsearch: 21 | yml: 22 | cluster.name: "logs" 23 | node.name: "$${HOSTNAME}" 24 | node.data: "false" 25 | node.master: "false" 26 | -------------------------------------------------------------------------------- /elasticsearch/0.2.1/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | elasticsearch-masters: 2 | metadata: 3 | elasticsearch: 4 | yml: 5 | cluster.name: "logs" 6 | node.name: "$${HOSTNAME}" 7 | node.data: "false" 8 | node.master: "true" 9 | elasticsearch-datanodes: 10 | metadata: 11 | elasticsearch: 12 | yml: 13 | cluster.name: "logs" 14 | node.name: "$${HOSTNAME}" 15 | node.data: "true" 16 | node.master: "false" 17 | http.enabled: "false" 18 | elasticsearch-clients: 19 | metadata: 20 | elasticsearch: 21 | yml: 22 | cluster.name: "logs" 23 | node.name: "$${HOSTNAME}" 24 | node.data: "false" 25 | node.master: "false" 26 | -------------------------------------------------------------------------------- /elasticsearch/0.3.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | elasticsearch-masters: 2 | metadata: 3 | elasticsearch: 4 | yml: 5 | cluster.name: "logs" 6 | node.name: "$${HOSTNAME}" 7 | node.data: "false" 8 | node.master: "true" 9 | elasticsearch-datanodes: 10 | metadata: 11 | elasticsearch: 12 | yml: 13 | cluster.name: "logs" 14 | node.name: "$${HOSTNAME}" 15 | node.data: "true" 16 | node.master: "false" 17 | http.enabled: "false" 18 | elasticsearch-clients: 19 | metadata: 20 | elasticsearch: 21 | yml: 22 | cluster.name: "logs" 23 | node.name: "$${HOSTNAME}" 24 | node.data: "false" 25 | node.master: "false" 26 | -------------------------------------------------------------------------------- /elasticsearch/0.3.1/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | elasticsearch-masters: 2 | metadata: 3 | elasticsearch: 4 | yml: 5 | cluster.name: "logs" 6 | node.name: "$${HOSTNAME}" 7 | node.data: "false" 8 | node.master: "true" 9 | elasticsearch-datanodes: 10 | metadata: 11 | elasticsearch: 12 | yml: 13 | cluster.name: "logs" 14 | node.name: "$${HOSTNAME}" 15 | node.data: "true" 16 | node.master: "false" 17 | http.enabled: "false" 18 | elasticsearch-clients: 19 | metadata: 20 | elasticsearch: 21 | yml: 22 | cluster.name: "logs" 23 | node.name: "$${HOSTNAME}" 24 | node.data: "false" 25 | node.master: "false" 26 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-bootstrap/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM elasticsearch:1.7.1 2 | 3 | ADD ./run.sh /run.sh 4 | 5 | CMD ["/run.sh"] 6 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-bootstrap/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | PLUGIN_TXT=${PLUGIN_TXT:-/usr/share/elasticsearch/plugins.txt} 6 | 7 | if [ -f "$PLUGIN_TXT" ]; then 8 | for plugin in $(<"${PLUGIN_TXT}"); do 9 | /usr/share/elasticsearch/bin/plugin --install $plugin 10 | done 11 | fi 12 | 13 | exec "elasticsearch" 14 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:v0.1.0 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME /usr/share/elasticsearch/config 7 | VOLUME /data/confd 8 | 9 | ENTRYPOINT ["/confd"] 10 | CMD [] 11 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-config/conf.d/elasticsearch.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "elasticsearch.tmpl" 3 | dest = "/usr/share/elasticsearch/config/elasticsearch.yml" 4 | keys = [ 5 | "/elasticsearch/yml/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-config/conf.d/logging.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "logging.tmpl" 3 | dest = "/usr/share/elasticsearch/config/logging.yml" 4 | keys = [ 5 | "/elasticsearch/log/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-config/conf.d/plugins.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "plugins.tmpl" 3 | dest = "/data/confd/plugins.txt" 4 | keys = [ 5 | "/elasticsearch/plugins/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-config/templates/elasticsearch.tmpl: -------------------------------------------------------------------------------- 1 | # Placed by confd. Do not hand edit. 2 | {{range gets "/elasticsearch/yml/*"}}{{ $data := json .Value}}{{range $key, $value := $data}} 3 | {{$key}}: {{$value}}{{end}}{{end}} 4 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-config/templates/logging.tmpl: -------------------------------------------------------------------------------- 1 | # Placed by confd. Do not hand edit. 2 | {{if (gt (len (ls "/elasticsearch/log")) 0)}} 3 | {{range gets "/elasticsearch/log/*"}}{{ $data := json .Value}}{{range $key, $value := $data}} 4 | {{$key}}: {{$value}}{{end}}{{end}} 5 | {{else}} 6 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG 7 | es.logger.level: INFO 8 | rootLogger: ${es.logger.level}, console 9 | logger: 10 | # log action execution errors for easier debugging 11 | action: DEBUG 12 | # reduce the logging for aws, too much is logged under the default INFO 13 | com.amazonaws: WARN 14 | 15 | appender: 16 | console: 17 | type: console 18 | layout: 19 | type: consolePattern 20 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 21 | {{end}} 22 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/elasticsearch-config/templates/plugins.tmpl: -------------------------------------------------------------------------------- 1 | {{range $dir := ls "/elasticsearch/plugins"}}{{getv (printf "%s/%s" "/elasticsearch/plugins" $dir)}} 2 | {{end}} 3 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/kopf/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.9.4 2 | 3 | # upgrade 4 | RUN apt-get update && \ 5 | apt-get upgrade -y && \ 6 | apt-get install -y --no-install-recommends python-pip curl && \ 7 | rm -rf /var/lib/apt/lists/* && \ 8 | pip install envtpl 9 | 10 | # nginx 11 | ADD nginx.conf.tpl /etc/nginx/nginx.conf.tpl 12 | 13 | # run script 14 | ADD ./run.sh ./run.sh 15 | 16 | # kopf 17 | ENV KOPF_VERSION 1.5.7 18 | RUN curl -s -L "https://github.com/lmenezes/elasticsearch-kopf/archive/v${KOPF_VERSION}.tar.gz" | \ 19 | tar xz -C /tmp && mv "/tmp/elasticsearch-kopf-${KOPF_VERSION}" /kopf 20 | 21 | # logs 22 | VOLUME ["/var/log/nginx"] 23 | 24 | # ports 25 | EXPOSE 80 443 26 | 27 | ENTRYPOINT ["/run.sh"] 28 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.1.0/kopf/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | envtpl /etc/nginx/nginx.conf.tpl 6 | 7 | if [ ! -z "${KOPF_BASIC_AUTH_LOGIN}" ]; then 8 | echo "${KOPF_BASIC_AUTH_LOGIN}:${KOPF_BASIC_AUTH_PASSWORD}" > /etc/nginx/kopf.htpasswd 9 | fi 10 | 11 | KOPF_REFRESH_RATE="${KOPF_REFRESH_RATE:-5000}" 12 | KOPF_THEME="${KOPF_THEME:-dark}" 13 | KOPF_WITH_CREDENTIALS="${KOPF_WITH_CREDENTIALS:-false}" 14 | 15 | cat < /kopf/_site/kopf_external_settings.json 16 | { 17 | "elasticsearch_root_path": "/es", 18 | "with_credentials": ${KOPF_WITH_CREDENTIALS}, 19 | "theme": "${KOPF_THEME}", 20 | "refresh_rate": ${KOPF_REFRESH_RATE} 21 | } 22 | EOF 23 | 24 | exec nginx 25 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-bootstrap/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM elasticsearch:1.7.1 2 | 3 | ADD ./run.sh /run.sh 4 | 5 | CMD ["/run.sh"] 6 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-bootstrap/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | PLUGIN_TXT=${PLUGIN_TXT:-/usr/share/elasticsearch/plugins.txt} 6 | 7 | if [ -f "$PLUGIN_TXT" ]; then 8 | for plugin in $(<"${PLUGIN_TXT}"); do 9 | /usr/share/elasticsearch/bin/plugin --install $plugin 10 | done 11 | fi 12 | 13 | while [ ! -f "/usr/share/elasticsearch/config/elasticsearch.yml" ]; do 14 | sleep 1 15 | done 16 | 17 | exec "elasticsearch" 18 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-conf/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME /usr/share/elasticsearch/config 7 | VOLUME /data/confd 8 | 9 | ENTRYPOINT ["/confd"] 10 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 11 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-conf/conf.d/elasticsearch.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "elasticsearch.tmpl" 3 | dest = "/usr/share/elasticsearch/config/elasticsearch.yml" 4 | keys = [ 5 | "/self/service", 6 | "/containers", 7 | ] 8 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-conf/conf.d/logging.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "logging.tmpl" 3 | dest = "/usr/share/elasticsearch/config/logging.yml" 4 | keys = [ 5 | "/elasticsearch/log/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-conf/conf.d/plugins.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "plugins.tmpl" 3 | dest = "/data/confd/plugins.txt" 4 | keys = [ 5 | "/elasticsearch/plugins/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-conf/templates/elasticsearch.tmpl: -------------------------------------------------------------------------------- 1 | #datanodei Placed by confd. Do not hand edit. 2 | {{range ls "/self/service/metadata/elasticsearch/yml"}} 3 | {{.}}: {{getv (printf "/self/service/metadata/elasticsearch/yml/%s" .)}}{{end}} 4 | 5 | bootstrap.mlockall: true 6 | discovery.zen.ping.multicast.enabled: false 7 | 8 | {{with get "/self/service/name"}}{{if eq "elasticsearch-masters" .Value}} 9 | discovery.zen.ping.unicast.hosts: {{range ls "/self/service/containers"}}{{ $containerName := getv (printf "/self/service/containers/%s" .)}} 10 | - {{getv (printf "/containers/%s/primary_ip" $containerName)}}{{end}} 11 | {{else}} 12 | discovery.zen.ping.unicast.hosts: ["es-masters"] 13 | {{end}}{{end}} 14 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-conf/templates/logging.tmpl: -------------------------------------------------------------------------------- 1 | # Placed by confd. Do not hand edit. 2 | {{if exists "/elasticsearch/log"}} 3 | {{range gets "/elasticsearch/log/*"}}{{ $data := json .Value}}{{range $key, $value := $data}} 4 | {{$key}}: {{$value}}{{end}}{{end}} 5 | {{else}} 6 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG 7 | es.logger.level: INFO 8 | rootLogger: ${es.logger.level}, console 9 | logger: 10 | # log action execution errors for easier debugging 11 | action: DEBUG 12 | # reduce the logging for aws, too much is logged under the default INFO 13 | com.amazonaws: WARN 14 | 15 | appender: 16 | console: 17 | type: console 18 | layout: 19 | type: consolePattern 20 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 21 | {{end}} 22 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.2.0/elasticsearch-conf/templates/plugins.tmpl: -------------------------------------------------------------------------------- 1 | {{if exists "/elasticsearch/plugins"}}{{range $dir := ls "/elasticsearch/plugins"}}{{getv (printf "%s/%s" "/elasticsearch/plugins" $dir)}} 2 | {{end}}{{end}} 3 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.3.0/elasticsearch-bootstrap/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM elasticsearch:1.7.3 2 | 3 | ADD ./run.sh /run.sh 4 | 5 | CMD ["/run.sh"] 6 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.3.0/elasticsearch-bootstrap/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | PLUGIN_TXT=${PLUGIN_TXT:-/usr/share/elasticsearch/plugins.txt} 6 | 7 | while [ ! -f "/usr/share/elasticsearch/config/elasticsearch.yml" ]; do 8 | sleep 1 9 | done 10 | 11 | if [ -f "$PLUGIN_TXT" ]; then 12 | for plugin in $(<"${PLUGIN_TXT}"); do 13 | /usr/share/elasticsearch/bin/plugin --install $plugin 14 | done 15 | fi 16 | 17 | exec "elasticsearch" 18 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/elasticsearch-conf/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | ADD ./run.sh /opt/rancher/bin/ 6 | 7 | VOLUME /usr/share/elasticsearch/config 8 | VOLUME /data/confd 9 | VOLUME /opt/rancher/bin 10 | 11 | ENTRYPOINT ["/confd"] 12 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 13 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/elasticsearch-conf/conf.d/elasticsearch.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "elasticsearch.tmpl" 3 | dest = "/usr/share/elasticsearch/config/elasticsearch.yml" 4 | keys = [ 5 | "/self/service", 6 | "/containers", 7 | ] 8 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/elasticsearch-conf/conf.d/logging.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "logging.tmpl" 3 | dest = "/usr/share/elasticsearch/config/logging.yml" 4 | keys = [ 5 | "/elasticsearch/log/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/elasticsearch-conf/conf.d/plugins.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "plugins.tmpl" 3 | dest = "/data/confd/plugins.txt" 4 | keys = [ 5 | "/elasticsearch/plugins/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/elasticsearch-conf/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | PLUGIN_TXT=${PLUGIN_TXT:-/usr/share/elasticsearch/plugins.txt} 6 | 7 | while [ ! -f "/usr/share/elasticsearch/config/elasticsearch.yml" ]; do 8 | sleep 1 9 | done 10 | 11 | if [ -f "$PLUGIN_TXT" ]; then 12 | for plugin in $(<"${PLUGIN_TXT}"); do 13 | /usr/share/elasticsearch/bin/plugin --install $plugin 14 | done 15 | fi 16 | 17 | exec /docker-entrypoint.sh elasticsearch 18 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/elasticsearch-conf/templates/elasticsearch.tmpl: -------------------------------------------------------------------------------- 1 | #datanodei Placed by confd. Do not hand edit. 2 | {{range ls "/self/service/metadata/elasticsearch/yml"}} 3 | {{.}}: {{getv (printf "/self/service/metadata/elasticsearch/yml/%s" .)}}{{end}} 4 | 5 | bootstrap.mlockall: true 6 | discovery.zen.ping.multicast.enabled: false 7 | 8 | {{with get "/self/service/name"}}{{if eq "elasticsearch-masters" .Value}} 9 | discovery.zen.ping.unicast.hosts: {{range ls "/self/service/containers"}}{{ $containerName := getv (printf "/self/service/containers/%s" .)}} 10 | - {{getv (printf "/containers/%s/primary_ip" $containerName)}}{{end}} 11 | {{else}} 12 | discovery.zen.ping.unicast.hosts: ["es-masters"] 13 | {{end}}{{end}} 14 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/elasticsearch-conf/templates/logging.tmpl: -------------------------------------------------------------------------------- 1 | # Placed by confd. Do not hand edit. 2 | {{if exists "/elasticsearch/log"}} 3 | {{range gets "/elasticsearch/log/*"}}{{ $data := json .Value}}{{range $key, $value := $data}} 4 | {{$key}}: {{$value}}{{end}}{{end}} 5 | {{else}} 6 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG 7 | es.logger.level: INFO 8 | rootLogger: ${es.logger.level}, console 9 | logger: 10 | # log action execution errors for easier debugging 11 | action: DEBUG 12 | # reduce the logging for aws, too much is logged under the default INFO 13 | com.amazonaws: WARN 14 | 15 | appender: 16 | console: 17 | type: console 18 | layout: 19 | type: consolePattern 20 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 21 | {{end}} 22 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/elasticsearch-conf/templates/plugins.tmpl: -------------------------------------------------------------------------------- 1 | {{if exists "/elasticsearch/plugins"}}{{range $dir := ls "/elasticsearch/plugins"}}{{getv (printf "%s/%s" "/elasticsearch/plugins" $dir)}} 2 | {{end}}{{end}} 3 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/kopf/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.9.4 2 | 3 | # upgrade 4 | RUN apt-get update && \ 5 | apt-get upgrade -y && \ 6 | apt-get install -y --no-install-recommends python-pip curl && \ 7 | rm -rf /var/lib/apt/lists/* && \ 8 | pip install envtpl 9 | 10 | # nginx 11 | ADD nginx.conf.tpl /etc/nginx/nginx.conf.tpl 12 | 13 | # run script 14 | ADD ./run.sh ./run.sh 15 | 16 | # kopf 17 | ENV KOPF_VERSION 1.5.7 18 | RUN curl -s -L "https://github.com/lmenezes/elasticsearch-kopf/archive/v${KOPF_VERSION}.tar.gz" | \ 19 | tar xz -C /tmp && mv "/tmp/elasticsearch-kopf-${KOPF_VERSION}" /kopf 20 | 21 | # logs 22 | VOLUME ["/var/log/nginx"] 23 | 24 | # ports 25 | EXPOSE 80 443 26 | 27 | ENTRYPOINT ["/run.sh"] 28 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.4.0/kopf/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | envtpl --keep-template /etc/nginx/nginx.conf.tpl 6 | 7 | if [ ! -z "${KOPF_BASIC_AUTH_LOGIN}" ]; then 8 | echo "${KOPF_BASIC_AUTH_LOGIN}:${KOPF_BASIC_AUTH_PASSWORD}" > /etc/nginx/kopf.htpasswd 9 | fi 10 | 11 | KOPF_REFRESH_RATE="${KOPF_REFRESH_RATE:-5000}" 12 | KOPF_THEME="${KOPF_THEME:-dark}" 13 | KOPF_WITH_CREDENTIALS="${KOPF_WITH_CREDENTIALS:-false}" 14 | 15 | cat < /kopf/_site/kopf_external_settings.json 16 | { 17 | "elasticsearch_root_path": "/es", 18 | "with_credentials": ${KOPF_WITH_CREDENTIALS}, 19 | "theme": "${KOPF_THEME}", 20 | "refresh_rate": ${KOPF_REFRESH_RATE} 21 | } 22 | EOF 23 | 24 | exec nginx 25 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox 2 | 3 | ADD https://github.com/kelseyhightower/confd/releases/download/v0.11.0/confd-0.11.0-linux-amd64 /confd 4 | RUN chmod +x /confd 5 | 6 | ADD ./conf.d /etc/confd/conf.d 7 | ADD ./templates /etc/confd/templates 8 | ADD ./run.sh /run.sh 9 | ADD ./dockerentry.sh /dockerentry.sh 10 | 11 | VOLUME /data/confd 12 | VOLUME /opt/rancher/bin 13 | VOLUME /usr/share/elasticsearch/config 14 | 15 | ENTRYPOINT ["/dockerentry.sh"] 16 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 17 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/conf.d/elasticsearch.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "elasticsearch.tmpl" 3 | dest = "/usr/share/elasticsearch/config/elasticsearch.yml" 4 | keys = [ 5 | "/self/service", 6 | "/containers", 7 | ] 8 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/conf.d/logging.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "logging.tmpl" 3 | dest = "/usr/share/elasticsearch/config/logging.yml" 4 | keys = [ 5 | "/elasticsearch/log/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/conf.d/plugins.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "plugins.tmpl" 3 | dest = "/data/confd/plugins.txt" 4 | keys = [ 5 | "/elasticsearch/plugins/", 6 | ] 7 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/dockerentry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cp /run.sh /opt/rancher/bin/ 4 | 5 | exec /confd $@ 6 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | PLUGIN_TXT=${PLUGIN_TXT:-/usr/share/elasticsearch/plugins.txt} 6 | 7 | while [ ! -f "/usr/share/elasticsearch/config/elasticsearch.yml" ]; do 8 | sleep 1 9 | done 10 | 11 | if [ -f "$PLUGIN_TXT" ]; then 12 | for plugin in $(<"${PLUGIN_TXT}"); do 13 | /usr/share/elasticsearch/bin/plugin --install $plugin 14 | done 15 | fi 16 | 17 | exec /docker-entrypoint.sh elasticsearch 18 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/templates/elasticsearch.tmpl: -------------------------------------------------------------------------------- 1 | #datanodei Placed by confd. Do not hand edit. 2 | {{range ls "/self/service/metadata/elasticsearch/yml"}} 3 | {{.}}: {{getv (printf "/self/service/metadata/elasticsearch/yml/%s" .)}}{{end}} 4 | 5 | bootstrap.mlockall: true 6 | discovery.zen.ping.multicast.enabled: false 7 | 8 | {{with get "/self/service/name"}}{{if eq "elasticsearch-masters" .Value}} 9 | discovery.zen.ping.unicast.hosts: {{range ls "/self/service/containers"}}{{ $containerName := getv (printf "/self/service/containers/%s" .)}} 10 | - {{getv (printf "/containers/%s/primary_ip" $containerName)}}{{end}} 11 | {{else}} 12 | discovery.zen.ping.unicast.hosts: ["es-masters"] 13 | {{end}}{{end}} 14 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/templates/logging.tmpl: -------------------------------------------------------------------------------- 1 | # Placed by confd. Do not hand edit. 2 | {{if exists "/elasticsearch/log"}} 3 | {{range gets "/elasticsearch/log/*"}}{{ $data := json .Value}}{{range $key, $value := $data}} 4 | {{$key}}: {{$value}}{{end}}{{end}} 5 | {{else}} 6 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG 7 | es.logger.level: INFO 8 | rootLogger: ${es.logger.level}, console 9 | logger: 10 | # log action execution errors for easier debugging 11 | action: DEBUG 12 | # reduce the logging for aws, too much is logged under the default INFO 13 | com.amazonaws: WARN 14 | 15 | appender: 16 | console: 17 | type: console 18 | layout: 19 | type: consolePattern 20 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 21 | {{end}} 22 | -------------------------------------------------------------------------------- /elasticsearch/containers/0.5.0/elasticsearch-conf/templates/plugins.tmpl: -------------------------------------------------------------------------------- 1 | {{if exists "/elasticsearch/plugins"}}{{range $dir := ls "/elasticsearch/plugins"}}{{getv (printf "%s/%s" "/elasticsearch/plugins" $dir)}} 2 | {{end}}{{end}} 3 | -------------------------------------------------------------------------------- /etcd/0.1.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd-init: 2 | image: rancher/etcd:v2.2.1 3 | labels: 4 | io.rancher.container.hostname_override: container_name 5 | ports: 6 | - '4001' 7 | - '2380' 8 | - '2379' 9 | - '7001' 10 | command: /opt/rancher/run.sh 11 | -------------------------------------------------------------------------------- /etcd/0.2.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | image: rancher/etcd:v2.2.1-2 3 | labels: 4 | io.rancher.container.hostname_override: container_name 5 | ports: 6 | - '4001' 7 | - '2380' 8 | - '2379' 9 | - '7001' 10 | command: /opt/rancher/run.sh 11 | -------------------------------------------------------------------------------- /etcd/0.2.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | scale: 3 3 | -------------------------------------------------------------------------------- /etcd/0.3.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | image: rancher/etcd:v2.3.0 3 | labels: 4 | io.rancher.sidekicks: data 5 | # try not to schedule etcd nodes on the same host 6 | io.rancher.scheduler.affinity:container_label_soft_ne: etcd=node 7 | etcd: node 8 | expose: 9 | - "2379" 10 | - "2380" 11 | environment: 12 | ETCDCTL_ENDPOINT: http://etcd:2379 13 | volumes_from: 14 | - data 15 | # containerize data volume to enable restarts and upgrades 16 | data: 17 | image: busybox 18 | command: /bin/true 19 | net: none 20 | volumes: 21 | - /data 22 | labels: 23 | io.rancher.container.start_once: 'true' 24 | 25 | # Discovery containers are used for bootstrapping a cluster. 26 | # They will shutdown once this process is completed. 27 | etcd-discovery: 28 | image: rancher/etcd:v2.3.0 29 | command: discovery_node 30 | labels: 31 | io.rancher.container.start_once: 'true' 32 | io.rancher.sidekicks: bootstrap 33 | bootstrap: 34 | image: rancher/etcd:v2.3.0 35 | command: bootstrap ${REPLICAS} 36 | link: container:etcd-discovery 37 | labels: 38 | io.rancher.container.start_once: 'true' 39 | -------------------------------------------------------------------------------- /etcd/0.3.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | retain_ip: true 3 | scale: ${REPLICAS} 4 | -------------------------------------------------------------------------------- /etcd/0.4.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | image: rancher/etcd:v2.3.3 3 | labels: 4 | io.rancher.sidekicks: data 5 | # try not to schedule etcd nodes on the same host 6 | io.rancher.scheduler.affinity:container_label_soft_ne: etcd=node 7 | etcd: node 8 | expose: 9 | - "2379" 10 | - "2380" 11 | environment: 12 | ETCDCTL_ENDPOINT: http://etcd:2379 13 | volumes_from: 14 | - data 15 | # containerize data volume to enable restarts and upgrades 16 | data: 17 | image: busybox 18 | command: /bin/true 19 | net: none 20 | volumes: 21 | - /data 22 | labels: 23 | io.rancher.container.start_once: 'true' 24 | 25 | # Discovery containers are used for bootstrapping a cluster. 26 | # They will shutdown once this process is completed. 27 | etcd-discovery: 28 | image: rancher/etcd:v2.3.3 29 | command: discovery_node 30 | labels: 31 | io.rancher.container.start_once: 'true' 32 | io.rancher.sidekicks: bootstrap 33 | bootstrap: 34 | image: rancher/etcd:v2.3.3 35 | command: bootstrap ${REPLICAS} 36 | link: container:etcd-discovery 37 | labels: 38 | io.rancher.container.start_once: 'true' 39 | 40 | -------------------------------------------------------------------------------- /etcd/0.4.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | retain_ip: true 3 | scale: ${REPLICAS} 4 | -------------------------------------------------------------------------------- /etcd/0.5.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | image: rancher/etcd:v2.3.6 3 | labels: 4 | io.rancher.scheduler.affinity:container_label_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} 5 | io.rancher.sidekicks: data 6 | environment: 7 | ETCD_DATA_DIR: /data 8 | ETCDCTL_ENDPOINT: http://etcd:2379 9 | links: 10 | - data 11 | - discovery 12 | volumes_from: 13 | - data 14 | # containerize data volume to enable restarts and upgrades 15 | data: 16 | image: busybox 17 | command: /bin/true 18 | net: none 19 | volumes: 20 | - /data 21 | labels: 22 | io.rancher.container.start_once: 'true' 23 | # Discovery containers are used for bootstrapping a cluster. 24 | # They will shutdown once the bootstrap process is completed. 25 | discovery: 26 | image: rancher/etcd:v2.3.6 27 | command: discovery_node 28 | labels: 29 | io.rancher.container.start_once: 'true' 30 | io.rancher.sidekicks: bootstrap 31 | bootstrap: 32 | image: rancher/etcd:v2.3.6 33 | command: bootstrap 34 | links: 35 | - discovery 36 | labels: 37 | io.rancher.container.start_once: 'true' 38 | environment: 39 | ETCDCTL_ENDPOINT: http://etcd:2379 40 | -------------------------------------------------------------------------------- /etcd/0.5.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | .catalog: 2 | name: "Etcd" 3 | version: "2.3.6-rancher1" 4 | description: | 5 | Distributed highly-available key-value store 6 | minimum_rancher_version: "v0.46.0" 7 | questions: 8 | - variable: "REPLICAS" 9 | description: "Number of Etcd nodes. 3, 5, or 7 are good choices" 10 | label: "Number of Nodes:" 11 | required: true 12 | default: 3 13 | type: "int" 14 | etcd: 15 | retain_ip: true 16 | scale: ${REPLICAS} 17 | health_check: 18 | port: 2379 19 | interval: 7500 20 | unhealthy_threshold: 2 21 | request_line: '/health' 22 | healthy_threshold: 2 23 | response_timeout: 5000 24 | -------------------------------------------------------------------------------- /etcd/0.6.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | image: llparse/etcd:v2.3.6-2 3 | labels: 4 | io.rancher.scheduler.affinity:container_label_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} 5 | io.rancher.sidekicks: data 6 | environment: 7 | ETCD_DATA_DIR: /data 8 | ETCDCTL_ENDPOINT: http://etcd:2379 9 | RANCHER_DEBUG: ${DEBUG} 10 | links: 11 | - data 12 | volumes_from: 13 | - data 14 | # containerize data volume to enable restarts and upgrades 15 | data: 16 | image: busybox 17 | entrypoint: /bin/true 18 | net: none 19 | volumes: 20 | - /data 21 | labels: 22 | io.rancher.container.start_once: 'true' 23 | # Discovery containers are used for bootstrapping a cluster. 24 | # They will shutdown once the bootstrap process is completed. 25 | discovery: 26 | image: llparse/etcd:v2.3.6-2 27 | command: discovery_node 28 | labels: 29 | io.rancher.container.start_once: 'true' 30 | io.rancher.sidekicks: bootstrap 31 | environment: 32 | RANCHER_DEBUG: ${DEBUG} 33 | bootstrap: 34 | image: llparse/etcd:v2.3.6-2 35 | command: bootstrap 36 | links: 37 | - etcd 38 | - discovery 39 | labels: 40 | io.rancher.container.start_once: 'true' 41 | environment: 42 | ETCDCTL_ENDPOINT: http://etcd:2379 43 | RANCHER_DEBUG: ${DEBUG} 44 | -------------------------------------------------------------------------------- /etcd/0.6.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | .catalog: 2 | name: Etcd 3 | version: 2.3.6-rancher2 4 | description: | 5 | Distributed highly-available key-value store 6 | minimum_rancher_version: v1.1.0-rc2 7 | questions: 8 | - variable: REPLICAS 9 | description: Number of Etcd nodes. 3, 5, or 7 are good choices 10 | label: Number of Nodes 11 | required: true 12 | default: 3 13 | type: int 14 | - variable: DEBUG 15 | description: Enable or disable verbose logging and other debugging features 16 | label: Debug 17 | type: boolean 18 | default: false 19 | etcd: 20 | scale_policy: 21 | min: 1 22 | max: 3 23 | increment: 1 24 | metadata: 25 | scale_policy: 26 | min: 1 27 | scale: ${REPLICAS} 28 | health_check: 29 | port: 2379 30 | request_line: '/health' 31 | interval: 7500 32 | response_timeout: 5000 33 | healthy_threshold: 2 34 | unhealthy_threshold: 4 35 | -------------------------------------------------------------------------------- /etcd/0.8.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | image: cloudnautique/etcd:v2.3.6-12 3 | restart: always 4 | labels: 5 | io.rancher.scheduler.affinity:container_label_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} 6 | io.rancher.sidekicks: data,healthcheck 7 | io.rancher.container.hostname_override: container_name 8 | environment: 9 | ETCD_DATA_DIR: /data 10 | ETCDCTL_ENDPOINT: http://etcd:2379 11 | RANCHER_DEBUG: ${DEBUG} 12 | links: 13 | - data 14 | volumes_from: 15 | - data 16 | healthcheck: 17 | net: container:etcd 18 | entrypoint: /usr/local/bin/giddyup health --check-command /healthcheck.sh 19 | image: cloudnautique/etcd:v2.3.6-12 20 | volumes_from: 21 | - data 22 | # containerize data volume to enable restarts and upgrades 23 | data: 24 | image: busybox 25 | entrypoint: /bin/true 26 | net: none 27 | volumes: 28 | - /data 29 | labels: 30 | io.rancher.container.start_once: 'true' 31 | -------------------------------------------------------------------------------- /etcd/0.8.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | .catalog: 2 | name: Etcd 3 | version: 2.3.6-rancher2 4 | description: | 5 | Distributed highly-available key-value store 6 | minimum_rancher_version: v1.1.0-rc2 7 | questions: 8 | - variable: REPLICAS 9 | description: Number of Etcd nodes. 3, 5, or 7 are good choices 10 | label: Number of Nodes 11 | required: true 12 | default: 3 13 | type: int 14 | - variable: DEBUG 15 | description: Enable or disable verbose logging and other debugging features 16 | label: Debug 17 | type: boolean 18 | default: false 19 | etcd: 20 | scale_policy: 21 | min: 1 22 | max: 3 23 | increment: 1 24 | metadata: 25 | scale_policy: 26 | min: 1 27 | scale: ${REPLICAS} 28 | retain_ip: true 29 | health_check: 30 | port: 1620 31 | request_line: '/ping' 32 | interval: 10000 33 | recreate_on_quorum_strategy_config: 34 | quorum: 2 35 | strategy: recreateOnQuorum 36 | response_timeout: 12000 37 | healthy_threshold: 2 38 | unhealthy_threshold: 6 39 | -------------------------------------------------------------------------------- /etcd/0.9.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | etcd: 2 | image: cloudnautique/etcd:v2.3.6-9 3 | labels: 4 | io.rancher.scheduler.affinity:container_label_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} 5 | io.rancher.sidekicks: data 6 | environment: 7 | ETCD_DATA_DIR: /data 8 | ETCDCTL_ENDPOINT: http://etcd:2379 9 | RANCHER_DEBUG: ${DEBUG} 10 | links: 11 | - data 12 | volumes_from: 13 | - data 14 | # containerize data volume to enable restarts and upgrades 15 | data: 16 | image: busybox 17 | entrypoint: /bin/true 18 | net: none 19 | volumes: 20 | - /data 21 | labels: 22 | io.rancher.container.start_once: 'true' 23 | # Discovery containers are used for bootstrapping a cluster. 24 | # They will shutdown once the bootstrap process is completed. 25 | discovery: 26 | image: cloudnautique/etcd:v2.3.6-9 27 | command: discovery_node 28 | labels: 29 | io.rancher.container.start_once: 'true' 30 | io.rancher.sidekicks: bootstrap 31 | environment: 32 | RANCHER_DEBUG: ${DEBUG} 33 | bootstrap: 34 | image: cloudnautique/etcd:v2.3.6-9 35 | command: bootstrap 36 | links: 37 | - etcd 38 | - discovery 39 | labels: 40 | io.rancher.container.start_once: 'true' 41 | environment: 42 | ETCDCTL_ENDPOINT: http://etcd:2379 43 | RANCHER_DEBUG: ${DEBUG} 44 | -------------------------------------------------------------------------------- /etcd/0.9.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | .catalog: 2 | name: Etcd 3 | version: 2.3.6-rancher2 4 | description: | 5 | Distributed highly-available key-value store 6 | minimum_rancher_version: v1.1.0-rc2 7 | questions: 8 | - variable: REPLICAS 9 | description: Number of Etcd nodes. 3, 5, or 7 are good choices 10 | label: Number of Nodes 11 | required: true 12 | default: 3 13 | type: int 14 | - variable: DEBUG 15 | description: Enable or disable verbose logging and other debugging features 16 | label: Debug 17 | type: boolean 18 | default: false 19 | etcd: 20 | scale_policy: 21 | min: 1 22 | max: 3 23 | increment: 1 24 | metadata: 25 | scale_policy: 26 | min: 1 27 | scale: ${REPLICAS} 28 | health_check: 29 | port: 2379 30 | request_line: '/health' 31 | interval: 7500 32 | response_timeout: 5000 33 | healthy_threshold: 2 34 | unhealthy_threshold: 4 35 | -------------------------------------------------------------------------------- /etcd/containers/0.1.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.2 2 | 3 | RUN apk add --update bash curl ca-certificates && rm -rf /var/cache/apk/* 4 | ADD ./run.sh /opt/rancher/run.sh 5 | ADD https://github.com/coreos/etcd/releases/download/v2.2.1/etcd-v2.2.1-linux-amd64.tar.gz /etcd-v2.2.1-linux-amd64.tar.gz 6 | RUN tar -xzvf /etcd-*.tar.gz -C /tmp && \ 7 | mv /tmp/etcd-*/etcd /etcd && \ 8 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 9 | 10 | VOLUME "/opt/rancher" 11 | 12 | CMD ["/bin/sleep", "5"] 13 | -------------------------------------------------------------------------------- /etcd/containers/0.1.0/etcd/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | set -x 5 | 6 | # Let metadata come up 7 | sleep 10 8 | 9 | IP_ADDRESS=$(curl http://rancher-metadata/2015-07-25/self/container/primary_ip) 10 | CREATE_INDEX=$(curl http://rancher-metadata/2015-07-25/self/container/create_index) 11 | CLUSTER_NAME=$(curl http://rancher-metadata/2015-07-25/self/stack/name) 12 | 13 | exec /etcd -name etcd${CREATE_INDEX} \ 14 | -advertise-client-urls http://${IP_ADDRESS}:2379,http://${IP_ADDRESS}:4001 \ 15 | -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \ 16 | -initial-advertise-peer-urls http://${IP_ADDRESS}:2380 \ 17 | -listen-peer-urls http://0.0.0.0:2380 \ 18 | -initial-cluster-token etcd-cluster-1 \ 19 | -initial-cluster etcd${CREATE_INDEX}=http://${IP_ADDRESS}:2380 \ 20 | -initial-cluster-state new 21 | -------------------------------------------------------------------------------- /etcd/containers/0.10.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | RUN \ 4 | apk add --update bash ca-certificates && \ 5 | rm -rf /var/cache/apk/* && \ 6 | wget -q -O /usr/local/bin/giddyup https://github.com/cloudnautique/giddyup/releases/download/v0.13.0/giddyup && \ 7 | chmod +x /usr/local/bin/giddyup 8 | 9 | RUN \ 10 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.7/etcd-v2.3.7-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 11 | mv /tmp/etcd-*/etcd /usr/local/bin/etcd && \ 12 | mv /tmp/etcd-*/etcdctl /usr/local/bin/etcdctl && \ 13 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 14 | 15 | ADD etcdhc /usr/bin/etcdhc 16 | ADD run.sh /run.sh 17 | ADD disaster /usr/bin/disaster 18 | 19 | ENTRYPOINT ["/run.sh"] 20 | CMD ["node"] 21 | -------------------------------------------------------------------------------- /etcd/containers/0.10.0/etcd/Dockerfile.hcproxy: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | 3 | RUN apk update && apk add git 4 | RUN go get github.com/urfave/cli 5 | 6 | RUN mkdir -p /go/src/etcdhc 7 | WORKDIR /go/src/etcdhc 8 | ADD hcproxy.go . 9 | 10 | RUN go build 11 | -------------------------------------------------------------------------------- /etcd/containers/0.10.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v2.3.7-6} 5 | 6 | docker build -t hcproxy -f Dockerfile.hcproxy . 7 | id=$(docker run -d --entrypoint=/bin/sh --name hcproxy hcproxy sleep 15) 8 | docker cp hcproxy:/go/src/etcdhc/etcdhc . 9 | docker rm -f $id 10 | 11 | docker build -t $ACCT/etcd:$VERSION . 12 | docker push $ACCT/etcd:$VERSION 13 | 14 | rm -f etcdhc 15 | 16 | if [ "$(which gsync)" ]; then 17 | gsync $ACCT/etcd:$VERSION 18 | fi 19 | -------------------------------------------------------------------------------- /etcd/containers/0.10.0/etcd/disaster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DATA_DIR=/data 4 | DR_FLAG=${DATA_DIR}/DR 5 | 6 | mkdir -p $DR_FLAG 7 | 8 | echo -e "Disaster flag set.\n\nTriggering disaster recovery...this will automatically restart the container." 9 | 10 | sleep 1 11 | 12 | # Continuously send SIGTERM 13 | PID=$(pidof etcd) 14 | while kill -0 $PID &> /dev/null; do 15 | kill $PID 16 | sleep 1 17 | done 18 | -------------------------------------------------------------------------------- /etcd/containers/0.11.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | WORKDIR /opt/rancher 4 | ENV PATH $PATH:/opt/rancher 5 | 6 | RUN \ 7 | apk add --update bash ca-certificates && \ 8 | rm -rf /var/cache/apk/* && \ 9 | wget -q -O /opt/rancher/giddyup https://github.com/cloudnautique/giddyup/releases/download/v0.13.0/giddyup && \ 10 | chmod +x /opt/rancher/giddyup 11 | 12 | RUN \ 13 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.7/etcd-v2.3.7-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 14 | mv /tmp/etcd-*/etcd /opt/rancher/etcd && \ 15 | mv /tmp/etcd-*/etcdctl /opt/rancher/etcdctl && \ 16 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 17 | 18 | ADD etcdwrapper run.sh disaster delete /opt/rancher/ 19 | 20 | ENTRYPOINT ["/opt/rancher/run.sh"] 21 | CMD ["node"] 22 | -------------------------------------------------------------------------------- /etcd/containers/0.11.0/etcd/Dockerfile.wrapper: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | 3 | RUN apk update && apk add git 4 | RUN go get github.com/urfave/cli 5 | RUN go get github.com/Sirupsen/logrus 6 | 7 | RUN mkdir -p /go/src/etcdwrapper 8 | WORKDIR /go/src/etcdwrapper 9 | ADD wrapper.go . 10 | 11 | RUN go build 12 | -------------------------------------------------------------------------------- /etcd/containers/0.11.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v2.3.7-9} 5 | 6 | docker build -t etcdwrapper -f Dockerfile.wrapper . 7 | id=$(docker run -d --entrypoint=/bin/sh --name etcdwrapper etcdwrapper sleep 15) 8 | docker cp etcdwrapper:/go/src/etcdwrapper/etcdwrapper . 9 | docker rm -f $id 10 | 11 | docker build -t $ACCT/etcd:$VERSION . 12 | docker push $ACCT/etcd:$VERSION 13 | 14 | rm -f etcdwrapper 15 | 16 | if [ "$(which gsync)" ]; then 17 | gsync $ACCT/etcd:$VERSION 18 | fi 19 | -------------------------------------------------------------------------------- /etcd/containers/0.11.0/etcd/delete: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Deleting persistent data..." 4 | 5 | rm -rf /pdata/* 6 | rm -rf /data/* 7 | 8 | PID=$(pidof etcd) 9 | while kill -0 $PID &> /dev/null; do 10 | kill $PID 11 | sleep 1 12 | done 13 | -------------------------------------------------------------------------------- /etcd/containers/0.11.0/etcd/disaster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DATA_DIR=/pdata 4 | DR_FLAG=${DATA_DIR}/DR 5 | 6 | cd $DATA_DIR 7 | 8 | echo 'data.current' > $DR_FLAG 9 | echo -e "Disaster flag set.\n\nTriggering disaster recovery...this will automatically restart the container." 10 | 11 | sleep 5 12 | 13 | # Continuously send SIGTERM 14 | PID=$(pidof etcd) 15 | while kill -0 $PID &> /dev/null; do 16 | kill $PID 17 | sleep 1 18 | done 19 | -------------------------------------------------------------------------------- /etcd/containers/0.12.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | WORKDIR /opt/rancher 4 | ENV PATH $PATH:/opt/rancher 5 | 6 | RUN \ 7 | apk add --update bash ca-certificates && \ 8 | rm -rf /var/cache/apk/* && \ 9 | wget -q -O /opt/rancher/giddyup https://github.com/cloudnautique/giddyup/releases/download/v0.13.0/giddyup && \ 10 | chmod +x /opt/rancher/giddyup 11 | 12 | RUN \ 13 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.7/etcd-v2.3.7-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 14 | mv /tmp/etcd-*/etcd /opt/rancher/etcd && \ 15 | mv /tmp/etcd-*/etcdctl /opt/rancher/etcdctl && \ 16 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 17 | 18 | ADD etcdwrapper run.sh disaster delete /opt/rancher/ 19 | 20 | ENTRYPOINT ["/opt/rancher/run.sh"] 21 | CMD ["node"] 22 | -------------------------------------------------------------------------------- /etcd/containers/0.12.0/etcd/Dockerfile.wrapper: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | 3 | RUN apk update && apk add git 4 | RUN go get github.com/urfave/cli 5 | RUN go get github.com/Sirupsen/logrus 6 | 7 | RUN mkdir -p /go/src/etcdwrapper 8 | WORKDIR /go/src/etcdwrapper 9 | ADD wrapper.go . 10 | 11 | RUN go build 12 | -------------------------------------------------------------------------------- /etcd/containers/0.12.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v2.3.7-11} 5 | 6 | docker build -t etcdwrapper -f Dockerfile.wrapper . 7 | id=$(docker run -d --entrypoint=/bin/sh --name etcdwrapper etcdwrapper sleep 15) 8 | docker cp etcdwrapper:/go/src/etcdwrapper/etcdwrapper . 9 | docker rm -f $id 10 | 11 | docker build -t $ACCT/etcd:$VERSION . 12 | docker push $ACCT/etcd:$VERSION 13 | 14 | rm -f etcdwrapper 15 | 16 | if [ "$(which gsync)" ]; then 17 | gsync $ACCT/etcd:$VERSION 18 | fi 19 | -------------------------------------------------------------------------------- /etcd/containers/0.12.0/etcd/delete: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Deleting persistent data..." 4 | 5 | rm -rf /pdata/* 6 | rm -rf /data/* 7 | 8 | PID=$(pidof etcd) 9 | while kill -0 $PID &> /dev/null; do 10 | kill $PID 11 | sleep 1 12 | done 13 | -------------------------------------------------------------------------------- /etcd/containers/0.12.0/etcd/disaster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DATA_DIR=/pdata 4 | DR_FLAG=${DATA_DIR}/DR 5 | 6 | cd $DATA_DIR 7 | 8 | echo 'data.current' > $DR_FLAG 9 | echo -e "Disaster flag set.\n\nTriggering disaster recovery...this will automatically restart the container." 10 | 11 | sleep 5 12 | 13 | # Continuously send SIGTERM 14 | PID=$(pidof etcd) 15 | while kill -0 $PID &> /dev/null; do 16 | kill $PID 17 | sleep 1 18 | done 19 | -------------------------------------------------------------------------------- /etcd/containers/0.13.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | WORKDIR /opt/rancher 4 | ENV PATH $PATH:/opt/rancher 5 | 6 | RUN \ 7 | apk add --update bash ca-certificates && \ 8 | rm -rf /var/cache/apk/* && \ 9 | wget -q -O /opt/rancher/giddyup https://github.com/rancher/giddyup/releases/download/v0.18.0/giddyup && \ 10 | chmod +x /opt/rancher/giddyup 11 | 12 | RUN \ 13 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.7/etcd-v2.3.7-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 14 | mv /tmp/etcd-*/etcd /opt/rancher/etcd && \ 15 | mv /tmp/etcd-*/etcdctl /opt/rancher/etcdctl && \ 16 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 17 | 18 | ADD etcdwrapper run.sh disaster delete /opt/rancher/ 19 | 20 | ENTRYPOINT ["/opt/rancher/run.sh"] 21 | CMD ["node"] 22 | -------------------------------------------------------------------------------- /etcd/containers/0.13.0/etcd/Dockerfile.wrapper: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | 3 | RUN apk update && apk add git 4 | RUN go get github.com/urfave/cli 5 | RUN go get github.com/Sirupsen/logrus 6 | 7 | RUN mkdir -p /go/src/etcdwrapper 8 | WORKDIR /go/src/etcdwrapper 9 | ADD wrapper.go . 10 | 11 | RUN go build 12 | -------------------------------------------------------------------------------- /etcd/containers/0.13.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v2.3.7-13} 5 | 6 | docker build -t etcdwrapper -f Dockerfile.wrapper . 7 | id=$(docker run -d --entrypoint=/bin/sh --name etcdwrapper etcdwrapper sleep 15) 8 | docker cp etcdwrapper:/go/src/etcdwrapper/etcdwrapper . 9 | docker rm -f $id 10 | 11 | docker build -t $ACCT/etcd:$VERSION . 12 | docker push $ACCT/etcd:$VERSION 13 | 14 | rm -f etcdwrapper 15 | 16 | -------------------------------------------------------------------------------- /etcd/containers/0.13.0/etcd/delete: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Deleting persistent data..." 4 | 5 | rm -rf /pdata/* 6 | rm -rf /data/* 7 | 8 | PID=$(pidof etcd) 9 | while kill -0 $PID &> /dev/null; do 10 | kill $PID 11 | sleep 1 12 | done 13 | -------------------------------------------------------------------------------- /etcd/containers/0.13.0/etcd/disaster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DATA_DIR=/pdata 4 | DR_FLAG=${DATA_DIR}/DR 5 | 6 | cd $DATA_DIR 7 | 8 | echo 'data.current' > $DR_FLAG 9 | echo -e "Disaster flag set.\n\nTriggering disaster recovery...this will automatically restart the container." 10 | 11 | sleep 5 12 | 13 | # Continuously send SIGTERM 14 | PID=$(pidof etcd) 15 | while kill -0 $PID &> /dev/null; do 16 | kill $PID 17 | sleep 1 18 | done 19 | -------------------------------------------------------------------------------- /etcd/containers/0.14.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | WORKDIR /opt/rancher 4 | ENV PATH $PATH:/opt/rancher 5 | 6 | RUN \ 7 | apk add --update bash ca-certificates && \ 8 | rm -rf /var/cache/apk/* && \ 9 | wget -q -O /opt/rancher/giddyup https://github.com/rancher/giddyup/releases/download/v0.18.0/giddyup && \ 10 | chmod +x /opt/rancher/giddyup 11 | 12 | RUN \ 13 | wget -q -O - https://github.com/coreos/etcd/releases/download/v3.0.17/etcd-v3.0.17-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 14 | mv /tmp/etcd-*/etcd /opt/rancher/etcd && \ 15 | mv /tmp/etcd-*/etcdctl /opt/rancher/etcdctl && \ 16 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 17 | 18 | ADD etcdwrapper run.sh disaster delete /opt/rancher/ 19 | 20 | ENTRYPOINT ["/opt/rancher/run.sh"] 21 | CMD ["node"] 22 | -------------------------------------------------------------------------------- /etcd/containers/0.14.0/etcd/Dockerfile.wrapper: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | 3 | RUN apk update && apk add git 4 | RUN go get github.com/urfave/cli 5 | RUN go get github.com/Sirupsen/logrus 6 | 7 | RUN mkdir -p /go/src/etcdwrapper 8 | WORKDIR /go/src/etcdwrapper 9 | ADD wrapper.go . 10 | 11 | RUN go build 12 | -------------------------------------------------------------------------------- /etcd/containers/0.14.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v3.0.17-2} 5 | 6 | gofmt -w -d *.go 7 | docker build -t etcdwrapper -f Dockerfile.wrapper . 8 | id=$(docker run -d --entrypoint=/bin/sh --name etcdwrapper etcdwrapper sleep 15) 9 | docker cp etcdwrapper:/go/src/etcdwrapper/etcdwrapper . 10 | docker rm -f $id 11 | 12 | docker build -t $ACCT/etcd:$VERSION . 13 | docker push $ACCT/etcd:$VERSION 14 | 15 | rm -f etcdwrapper 16 | 17 | -------------------------------------------------------------------------------- /etcd/containers/0.14.0/etcd/delete: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Deleting persistent data..." 4 | 5 | rm -rf /data/* 6 | 7 | PID=$(pidof etcd) 8 | while kill -0 $PID &> /dev/null; do 9 | kill $PID 10 | sleep 1 11 | done 12 | -------------------------------------------------------------------------------- /etcd/containers/0.14.0/etcd/disaster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DATA_DIR=/data 4 | DR_FLAG=${DATA_DIR}/disaster 5 | 6 | ETCDCTL_API=3 etcdctl snapshot save $DATA_DIR/snapshot 7 | 8 | if [ $? -ne 0 ] || [ ! -f $DATA_DIR/snapshot ]; then 9 | echo "Error saving snapshot! Aborting." 10 | exit 1 11 | fi 12 | 13 | touch $DR_FLAG 14 | echo -e "Disaster flag set.\n\nTriggering disaster recovery...this will automatically restart the container." 15 | 16 | sleep 5 17 | 18 | # Continuously send SIGTERM 19 | PID=$(pidof etcd) 20 | while kill -0 $PID &> /dev/null; do 21 | kill $PID 22 | sleep 1 23 | done 24 | -------------------------------------------------------------------------------- /etcd/containers/0.2.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.2 2 | 3 | RUN apk add --update bash curl jq ca-certificates && rm -rf /var/cache/apk/* 4 | ADD ./run.sh /opt/rancher/run.sh 5 | ADD https://github.com/coreos/etcd/releases/download/v2.2.1/etcd-v2.2.1-linux-amd64.tar.gz /etcd-v2.2.1-linux-amd64.tar.gz 6 | RUN tar -xzvf /etcd-*.tar.gz -C /tmp && \ 7 | mv /tmp/etcd-*/etcd /etcd && \ 8 | mv /tmp/etcd-*/etcdctl /etcdctl && \ 9 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 10 | 11 | VOLUME "/opt/rancher" 12 | 13 | CMD ["/bin/sleep", "5"] 14 | -------------------------------------------------------------------------------- /etcd/containers/0.3.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.2 2 | 3 | RUN \ 4 | apk add --update bash curl jq ca-certificates && \ 5 | rm -rf /var/cache/apk/* 6 | RUN \ 7 | curl -L https://github.com/coreos/etcd/releases/download/v2.3.0/etcd-v2.3.0-linux-amd64.tar.gz -o /etcd-v2.3.0-linux-amd64.tar.gz && \ 8 | tar -xzvf /etcd-*.tar.gz -C /tmp && \ 9 | mv /tmp/etcd-*/etcd /usr/local/bin/etcd && \ 10 | mv /tmp/etcd-*/etcdctl /usr/local/bin/etcdctl && \ 11 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 12 | 13 | ADD run.sh /run.sh 14 | 15 | ENTRYPOINT ["/run.sh"] 16 | CMD ["node"] 17 | -------------------------------------------------------------------------------- /etcd/containers/0.4.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.2 2 | 3 | RUN \ 4 | apk add --update bash curl jq ca-certificates && \ 5 | rm -rf /var/cache/apk/* 6 | 7 | RUN \ 8 | curl -L https://github.com/coreos/etcd/releases/download/v2.3.3/etcd-v2.3.3-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 9 | mv /tmp/etcd-*/etcd /usr/local/bin/etcd && \ 10 | mv /tmp/etcd-*/etcdctl /usr/local/bin/etcdctl && \ 11 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 12 | 13 | ADD run.sh /run.sh 14 | 15 | ENTRYPOINT ["/run.sh"] 16 | CMD ["node"] 17 | -------------------------------------------------------------------------------- /etcd/containers/0.5.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | RUN \ 4 | apk add --update bash ca-certificates && \ 5 | rm -rf /var/cache/apk/* && \ 6 | wget -q -O /usr/local/bin/giddyup https://github.com/cloudnautique/giddyup/releases/download/v0.11.0/giddyup && \ 7 | chmod +x /usr/local/bin/giddyup 8 | 9 | RUN \ 10 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.6/etcd-v2.3.6-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 11 | mv /tmp/etcd-*/etcd /usr/local/bin/etcd && \ 12 | mv /tmp/etcd-*/etcdctl /usr/local/bin/etcdctl && \ 13 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 14 | 15 | ADD run.sh /run.sh 16 | 17 | ENTRYPOINT ["/run.sh"] 18 | CMD ["node"] 19 | -------------------------------------------------------------------------------- /etcd/containers/0.6.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | RUN \ 4 | apk add --update bash ca-certificates && \ 5 | rm -rf /var/cache/apk/* && \ 6 | wget -q -O /usr/local/bin/giddyup https://github.com/cloudnautique/giddyup/releases/download/v0.11.0/giddyup && \ 7 | chmod +x /usr/local/bin/giddyup 8 | 9 | RUN \ 10 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.6/etcd-v2.3.6-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 11 | mv /tmp/etcd-*/etcd /usr/local/bin/etcd && \ 12 | mv /tmp/etcd-*/etcdctl /usr/local/bin/etcdctl && \ 13 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 14 | 15 | ADD run.sh /run.sh 16 | 17 | ENTRYPOINT ["/run.sh"] 18 | CMD ["node"] 19 | -------------------------------------------------------------------------------- /etcd/containers/0.6.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v2.3.6-2} 5 | 6 | docker build -t $ACCT/etcd:$VERSION . 7 | docker push $ACCT/etcd:$VERSION 8 | 9 | if [ "$(which gsync)" ]; then 10 | gsync $ACCT/etcd:$VERSION 11 | fi 12 | -------------------------------------------------------------------------------- /etcd/containers/0.7.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | RUN \ 4 | apk add --update bash ca-certificates && \ 5 | rm -rf /var/cache/apk/* && \ 6 | wget -q -O /usr/local/bin/giddyup https://github.com/cloudnautique/giddyup/releases/download/v0.11.0/giddyup && \ 7 | chmod +x /usr/local/bin/giddyup 8 | 9 | RUN \ 10 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.6/etcd-v2.3.6-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 11 | mv /tmp/etcd-*/etcd /usr/local/bin/etcd && \ 12 | mv /tmp/etcd-*/etcdctl /usr/local/bin/etcdctl && \ 13 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 14 | 15 | ADD run.sh /run.sh 16 | 17 | ENTRYPOINT ["/run.sh"] 18 | CMD ["node"] 19 | -------------------------------------------------------------------------------- /etcd/containers/0.7.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v2.3.6-2} 5 | 6 | docker build -t $ACCT/etcd:$VERSION . 7 | docker push $ACCT/etcd:$VERSION 8 | 9 | if [ "$(which gsync)" ]; then 10 | gsync $ACCT/etcd:$VERSION 11 | fi 12 | -------------------------------------------------------------------------------- /etcd/containers/0.8.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | RUN \ 4 | apk add --update bash ca-certificates && \ 5 | rm -rf /var/cache/apk/* && \ 6 | wget -q -O /usr/local/bin/giddyup https://github.com/cloudnautique/giddyup/releases/download/v0.13.0/giddyup && \ 7 | chmod +x /usr/local/bin/giddyup 8 | 9 | RUN \ 10 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.6/etcd-v2.3.6-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 11 | mv /tmp/etcd-*/etcd /usr/local/bin/etcd && \ 12 | mv /tmp/etcd-*/etcdctl /usr/local/bin/etcdctl && \ 13 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 14 | 15 | ADD run.sh /run.sh 16 | ADD healthcheck.sh /healthcheck.sh 17 | 18 | ENTRYPOINT ["/run.sh"] 19 | CMD ["node"] 20 | -------------------------------------------------------------------------------- /etcd/containers/0.8.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v2.3.6-2} 5 | 6 | docker build -t $ACCT/etcd:$VERSION . 7 | docker push $ACCT/etcd:$VERSION 8 | 9 | if [ "$(which gsync)" ]; then 10 | gsync $ACCT/etcd:$VERSION 11 | fi 12 | -------------------------------------------------------------------------------- /etcd/containers/0.8.0/etcd/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=/usr/local/bin:$PATH 4 | 5 | if ! etcdctl cluster-health | grep $(giddyup ip myip)|grep 'got\ healthy' ; then 6 | exit 1 7 | fi 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /etcd/containers/0.9.0/etcd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | RUN \ 4 | apk add --update bash ca-certificates && \ 5 | rm -rf /var/cache/apk/* && \ 6 | wget -q -O /usr/local/bin/giddyup https://github.com/cloudnautique/giddyup/releases/download/v0.13.0/giddyup && \ 7 | chmod +x /usr/local/bin/giddyup 8 | 9 | RUN \ 10 | wget -q -O - https://github.com/coreos/etcd/releases/download/v2.3.6/etcd-v2.3.6-linux-amd64.tar.gz | tar xzf - -C /tmp && \ 11 | mv /tmp/etcd-*/etcd /usr/local/bin/etcd && \ 12 | mv /tmp/etcd-*/etcdctl /usr/local/bin/etcdctl && \ 13 | rm -rf /tmp/etcd-* && rm -f /etcd-*.tar.gz 14 | 15 | ADD run.sh /run.sh 16 | ADD disaster /usr/bin/disaster 17 | 18 | ENTRYPOINT ["/run.sh"] 19 | CMD ["node"] 20 | -------------------------------------------------------------------------------- /etcd/containers/0.9.0/etcd/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ACCT=${ACCT:-llparse} 4 | VERSION=${VERSION:-v2.3.6-4} 5 | 6 | docker build -t $ACCT/etcd:$VERSION . 7 | docker push $ACCT/etcd:$VERSION 8 | 9 | if [ "$(which gsync)" ]; then 10 | gsync $ACCT/etcd:$VERSION 11 | fi 12 | -------------------------------------------------------------------------------- /etcd/containers/0.9.0/etcd/disaster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BACKUP_DIR=${BACKUP_DIR:-/data.backup} 4 | 5 | echo -e "You seem to be having a bad day. Sorry about that.\n\nCreating backup..." 6 | 7 | etcdctl backup --data-dir $ETCD_DATA_DIR --backup-dir $BACKUP_DIR 8 | 9 | echo -e "Complete.\n\nPlease restart (DO NOT DELETE) this container to begin disaster recovery." 10 | echo -e "Once this is completed, add more hosts to scale your etcd cluster." -------------------------------------------------------------------------------- /galera/0.1.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | galera: 2 | scale: 3 3 | metadata: 4 | mysqld: | 5 | innodb_file_per_table = 1 6 | innodb_autoinc_lock_mode=2 7 | query_cache_size=0 8 | query_cache_type=0 9 | innodb_flush_log_at_trx_commit=0 10 | binlog_format=ROW 11 | default-storage-engine=innodb 12 | wsrep_provider=/usr/lib/galera/libgalera_smm.so 13 | wsrep_provider_options="gcache.size = 2G" 14 | wsrep_sst_method=mysqldump 15 | wsrep_sst_auth=root:password 16 | progress=1 17 | galera-lb: 18 | scale: 1 19 | load_balancer_config: {} 20 | health_check: 21 | port: 42 22 | interval: 2000 23 | unhealthy_threshold: 3 24 | healthy_threshold: 2 25 | response_timeout: 2000 26 | -------------------------------------------------------------------------------- /galera/0.2.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | galera: 2 | scale: 3 3 | metadata: 4 | mysqld: | 5 | innodb_file_per_table = 1 6 | innodb_autoinc_lock_mode=2 7 | query_cache_size=0 8 | query_cache_type=0 9 | innodb_flush_log_at_trx_commit=0 10 | binlog_format=ROW 11 | default-storage-engine=innodb 12 | wsrep_provider=/usr/lib/galera/libgalera_smm.so 13 | wsrep_provider_options="gcache.size = 2G" 14 | wsrep_sst_method=mysqldump 15 | wsrep_sst_auth=root:password 16 | progress=1 17 | galera-lb: 18 | scale: 1 19 | load_balancer_config: {} 20 | health_check: 21 | port: 42 22 | interval: 2000 23 | unhealthy_threshold: 3 24 | healthy_threshold: 2 25 | response_timeout: 2000 26 | -------------------------------------------------------------------------------- /galera/containers/0.1.0/galera-conf/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.2 2 | 3 | RUN apk add --update bash curl jq && rm -rf /var/cache/apk/* 4 | 5 | ADD ./run ./start_galera ./lowest_idx.sh ./common.sh / 6 | 7 | # Confd 8 | ADD ./conf.d /etc/confd/conf.d 9 | ADD ./templates /etc/confd/templates 10 | 11 | ADD https://github.com/rancher/confd/releases/download/0.11.0-dev-rancher/confd-0.11.0-dev-rancher-linux-amd64 /confd 12 | RUN chmod +x /confd 13 | 14 | entrypoint ["/run"] 15 | -------------------------------------------------------------------------------- /galera/containers/0.1.0/galera-conf/common.sh: -------------------------------------------------------------------------------- 1 | wait_for_all_service_containers() 2 | { 3 | META_URL="${1:-http://rancher-metadata/2015-07-25}" 4 | SET_SCALE=$(curl -s -H 'Accept: application/json' ${META_URL}/self/service| jq -r .scale) 5 | while [ "$(curl -s -H 'Accept: application/json' ${META_URL}/self/service|jq '.containers |length')" -lt "${SET_SCALE}" ]; do 6 | sleep 1 7 | done 8 | } 9 | 10 | 11 | random_sleep() 12 | { 13 | SLEEP_TIME=$RANDOM 14 | let "SLEEP_TIME %= 15" 15 | sleep ${SLEEP_TIME} 16 | } 17 | -------------------------------------------------------------------------------- /galera/containers/0.1.0/galera-conf/conf.d/cluster_ips.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="cluster_ips.tmpl" 3 | dest="/opt/rancher/cluster_ips" 4 | keys = [ 5 | "/self", 6 | "/containers", 7 | ] 8 | -------------------------------------------------------------------------------- /galera/containers/0.1.0/galera-conf/conf.d/galera.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="galera.cnf.tmpl" 3 | dest="/etc/mysql/conf.d/001-galera.cnf" 4 | keys = [ 5 | "/self" 6 | ] 7 | -------------------------------------------------------------------------------- /galera/containers/0.1.0/galera-conf/lowest_idx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ### 4 | # Detect if this container has the lowest create ID 5 | ### 6 | 7 | META_URL="http://rancher-metadata/2015-07-25" 8 | 9 | ALLMETA=$(curl -s -H 'Accept: application/json' ${META_URL}) 10 | MY_CREATE_INDEX="$(echo ${ALLMETA} | jq -r .self.container.create_index)" 11 | MY_STACK_NAME="$(echo ${ALLMETA} | jq -r .self.container.stack_name)" 12 | 13 | get_create_index() 14 | { 15 | echo $(echo ${ALLMETA}| jq -r ".containers[]| select(.name==\"${1}\")| .create_index") 16 | } 17 | 18 | SMALLEST="${MY_CREATE_INDEX}" 19 | for container in $(echo ${ALLMETA}| jq -r .self.service.containers[]); do 20 | IDX=$(get_create_index "${container}") 21 | if [ "${IDX}" -lt "${SMALLEST}" ]; then 22 | SMALLEST=${IDX} 23 | fi 24 | done 25 | 26 | if [ "${MY_CREATE_INDEX}" -eq "${SMALLEST}" ]; then 27 | exit 0 28 | fi 29 | 30 | exit 1 31 | -------------------------------------------------------------------------------- /galera/containers/0.1.0/galera-conf/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cp /start_galera /lowest_idx.sh /common.sh /opt/rancher/ 4 | . /opt/rancher/common.sh 5 | 6 | sleep 10 7 | wait_for_all_service_containers 8 | 9 | exec /confd --backend=rancher --prefix=/2015-07-25 10 | -------------------------------------------------------------------------------- /galera/containers/0.1.0/galera-conf/start_galera: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . /opt/rancher/common.sh 4 | wait_for_all_service_containers 5 | 6 | cd $(dirname $0) 7 | 8 | GALERA_CONF='/etc/mysql/conf.d/001-galera.cnf' 9 | 10 | echo "Waiting for Config..." 11 | while [ ! -f "${GALERA_CONF}" ] && [ ! -f "/opt/rancher/cluster_ips" ]; do 12 | sleep 1 13 | done 14 | echo "Starting galera..." 15 | 16 | if [ "$#" -eq "0" ]; then 17 | bootstrap="false" 18 | 19 | /opt/rancher/lowest_idx.sh 20 | if [ "$?" -eq "0" ] && [ ! -f '/opt/rancher/initialized' ]; then 21 | bootstrap="true" 22 | fi 23 | 24 | connect_string="--wsrep_cluster_address=gcomm://$(tr '\n' ',' /dev/null 10 | echo "Building: docker build --rm -t $DOCKER_NAMESPACE/$(basename $(pwd)):${TAG} ." 11 | docker build --rm -t $DOCKER_NAMESPACE/$(basename $(pwd)):${TAG} . 12 | 13 | if [ "${PUSH}" = "true" ]; then 14 | docker push ${DOCKER_NAMESPACE}/$(basename $(pwd)):${TAG} 15 | fi 16 | 17 | popd >/dev/null 18 | done 19 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-base/bootstrap-hdfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=/usr/local/hadoop-${HADOOP_VERSION}/bin:$PATH 4 | 5 | start() { 6 | echo "starting setup of: ${1}..." 7 | } 8 | 9 | end(){ 10 | echo "Finished setting up: ${1}..." 11 | } 12 | 13 | # Mapreduce area 14 | start "/tmp/mapred" 15 | hdfs dfs -mkdir -p /tmp/mapred 16 | hdfs dfs -chown mapred:hadoop /tmp/mapred 17 | hdfs dfs -chmod 1750 /tmp/mapred 18 | end "/tmp/mapred" 19 | 20 | start "/tmp/hadoop-yarn" 21 | hdfs dfs -mkdir -p /tmp/hadoop-yarn 22 | hdfs dfs -chown mapred:hadoop /tmp/hadoop-yarn 23 | hdfs dfs -chmod 1775 /tmp/hadoop-yarn 24 | end "/tmp/hadoop-yarn" 25 | 26 | start "/tmp/logs" 27 | hdfs dfs -mkdir -p /tmp/logs 28 | hdfs dfs -chown yarn:hadoop /tmp/hadoop-yarn 29 | hdfs dfs -chmod 1775 /tmp/hadoop-yarn 30 | end "/tmp/logs" 31 | 32 | start "/users/hadoop" 33 | hdfs dfs -mkdir -p /users/hadoop 34 | hdfs dfs -chown hadoop:hadoop /users/hadoop 35 | end "/users/hadoop" 36 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-base/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | dfs.name.dir 6 | /hadoop/dfs/name 7 | 8 | 9 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-base/refreshnodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export JAVA_HOME=${JAVA_HOME} 4 | export PATH=/usr/local/hadoop-${HADOOP_VERSION}/bin:$PATH 5 | 6 | if [ "${1}" = "hdfs" ]; then 7 | hdfs dfsadmin -refreshNodes 8 | elif [ "${1}" = "yarn" ]; then 9 | yarn rmadmin -refreshNodes 10 | else 11 | echo "Need to specify 'hdfs' or 'yarn'" 12 | fi 13 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-config/conf.d/core-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "core-site.xml.tmpl" 3 | dest = "/etc/hadoop/core-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self/stack", 8 | "/services", 9 | ] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-config/conf.d/hadoop-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-env.sh.tmpl" 3 | dest = "/etc/hadoop/hadoop-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | ] 8 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-config/conf.d/hdfs-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hdfs-site.xml.tmpl" 3 | dest = "/etc/hadoop/hdfs-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self/stack", 8 | "/services", 9 | ] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-config/conf.d/mapred-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "mapred-site.xml.tmpl" 3 | dest = "/etc/hadoop/mapred-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self/stack", 8 | "/services", 9 | "/containers", 10 | ] 11 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-config/conf.d/yarn-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "yarn-site.xml.tmpl" 3 | dest = "/etc/hadoop/yarn-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | ] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-config/templates/core-site.xml.tmpl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | fs.defaultFS 23 | hdfs://namenode 24 | 25 | 26 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-config/templates/yarn-site.xml.tmpl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | yarn.resourcemanager.hostname 6 | yarn-rm 7 | 8 | 9 | 10 | yarn.nodemanager.local-dirs 11 | /hadoop/yarn/nm-local 12 | 13 | 14 | 15 | yarn.nodemanager.aux-services 16 | mapreduce_shuffle 17 | 18 | 19 | 20 | yarn.nodemanager.resource.memory-mb 21 | 8192 22 | 23 | 24 | 25 | yarn.nodemanager.resource.cpu-vcores 26 | 8 27 | 28 | 29 | 30 | yarn.nodemanager.hostname 31 | {{getv "/self/container/primary_ip"}} 32 | 33 | 34 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-followers-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--interval", "10", "--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-followers-config/conf.d/slaves.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "slaves.tmpl" 3 | dest = "/etc/hadoop/slaves" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/services", 8 | "/containers", 9 | ] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-followers-config/templates/slaves.tmpl: -------------------------------------------------------------------------------- 1 | {{range ls "/services/datanode/containers"}}{{ $containerName := getv (printf "/services/datanode/containers/%s" .)}}{{getv (printf "/containers/%s/primary_ip" $containerName)}} 2 | {{end}} 3 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-namenode-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM cloudnautique/hadoop-config:latest 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-namenode-config/conf.d/core-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "core-site.xml.tmpl" 3 | dest = "/etc/hadoop/core-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | ] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-namenode-config/conf.d/hdfs-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hdfs-site.xml.tmpl" 3 | dest = "/etc/hadoop/hdfs-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self/container", 8 | ] 9 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-namenode-config/templates/core-site.xml.tmpl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | fs.defaultFS 23 | hdfs://{{getv "/self/container/primary_ip"}}/ 24 | 25 | 26 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-yarnrm-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM cloudnautique/hadoop-config:latest 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-yarnrm-config/conf.d/yarn-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "yarn-site.xml.tmpl" 3 | dest = "/etc/hadoop/yarn-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | ] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.1.0/hadoop-yarnrm-config/templates/yarn-site.xml.tmpl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | yarn.resourcemanager.hostname 6 | {{getv "/self/container/primary_ip"}} 7 | 8 | 9 | 10 | yarn.nodemanager.local-dirs 11 | /hadoop/yarn/nm-local 12 | 13 | 14 | 15 | yarn.nodemanager.aux-services 16 | mapreduce_shuffle 17 | 18 | 19 | 20 | yarn.log-aggregation-enable 21 | true 22 | 23 | 24 | 25 | yarn.nodemanager.resource.memory-mb 26 | 8192 27 | 28 | 29 | 30 | yarn.nodemanager.resource.cpu-vcores 31 | 4 32 | 33 | 34 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd $(dirname $0) 4 | DOCKER_NAMESPACE=${1:-""} 5 | PUSH=${2:-"false"} 6 | TAG=${TAG:-"dev"} 7 | 8 | for i in $(ls -d */); do 9 | pushd ./$i >/dev/null 10 | echo "Building: docker build --rm -t $DOCKER_NAMESPACE/$(basename $(pwd)):${TAG} ." 11 | docker build --rm -t $DOCKER_NAMESPACE/$(basename $(pwd)):${TAG} . 12 | 13 | if [ "${PUSH}" = "true" ]; then 14 | docker push ${DOCKER_NAMESPACE}/$(basename $(pwd)):${TAG} 15 | fi 16 | 17 | popd >/dev/null 18 | done 19 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-base/bootstrap-hdfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=/usr/local/hadoop-${HADOOP_VERSION}/bin:$PATH 4 | 5 | start() { 6 | echo "starting setup of: ${1}..." 7 | } 8 | 9 | end(){ 10 | echo "Finished setting up: ${1}..." 11 | } 12 | 13 | create_hdfs_path() #signature path, user, perms, group 14 | { 15 | local path="${1}" 16 | local user="${2}" 17 | local perms="${3}" 18 | local group="${4:-hadoop}" 19 | 20 | start "${path}" 21 | hdfs dfs -mkdir -p "${path}" 22 | hdfs dfs -chown "${user}:${group}" "${path}" 23 | hdfs dfs -chmod "${perms}" "${path}" 24 | end "${path}" 25 | } 26 | 27 | add_hdfs_user() 28 | { 29 | start "/user/${1}" 30 | hdfs dfs -mkdir -p "/tmp/hadoop-${1}" 31 | hdfs dfs -chown "${1}:hadoop" "/tmp/hadoop-${1}" 32 | 33 | hdfs dfs -mkdir -p "/user/${1}" 34 | hdfs dfs -chown "${1}:hadoop" "/user/${1}" 35 | end "/user/${1}" 36 | } 37 | 38 | # Temp area 39 | create_hdfs_path "/tmp/mapred" "mapred" "1750" 40 | create_hdfs_path "/tmp/hadoop-yarn" "mapred" "1775" 41 | create_hdfs_path "/tmp/logs" "yarn" "1775" 42 | 43 | # Users 44 | add_hdfs_user hadoop 45 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-base/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | dfs.name.dir 6 | /hadoop/dfs/name 7 | 8 | 9 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-base/refreshnodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export JAVA_HOME=${JAVA_HOME} 4 | export PATH=/usr/local/hadoop-${HADOOP_VERSION}/bin:$PATH 5 | 6 | if [ "${1}" = "hdfs" ]; then 7 | hdfs dfsadmin -refreshNodes 8 | elif [ "${1}" = "yarn" ]; then 9 | yarn rmadmin -refreshNodes 10 | else 11 | echo "Need to specify 'hdfs' or 'yarn'" 12 | fi 13 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-config/conf.d/core-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "core-site.xml.tmpl" 3 | dest = "/etc/hadoop/core-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self/", 8 | "/services/", 9 | "/containers/" 10 | ] 11 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-config/conf.d/hadoop-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-env.sh.tmpl" 3 | dest = "/etc/hadoop/hadoop-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | ] 8 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-config/conf.d/hdfs-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hdfs-site.xml.tmpl" 3 | dest = "/etc/hadoop/hdfs-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | ] 11 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-config/conf.d/mapred-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "mapred-site.xml.tmpl" 3 | dest = "/etc/hadoop/mapred-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | ] 11 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-config/conf.d/yarn-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "yarn-site.xml.tmpl" 3 | dest = "/etc/hadoop/yarn-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | ] 11 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-config/templates/yarn-site.xml.tmpl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | yarn.resourcemanager.hostname 6 | {{getv (printf "/containers/%s/primary_ip" (getv "/services/yarn-resourcemanager/containers/0"))}} 7 | 8 | 9 | 10 | yarn.nodemanager.local-dirs 11 | /hadoop/yarn/nm-local 12 | 13 | 14 | 15 | yarn.nodemanager.aux-services 16 | mapreduce_shuffle 17 | 18 | 19 | 20 | yarn.nodemanager.hostname 21 | {{getv "/self/container/primary_ip"}} 22 | 23 | 24 | {{ if (eq (getv "/self/service/name") "yarn-nodemanager") }} 25 | 26 | yarn.nodemanager.webapp.address 27 | {{getv "/self/container/primary_ip"}} 28 | 29 | {{end}} 30 | 31 | 32 | {{if (gt (len (ls "/self/service/metadata/yarn-site")) 0)}}{{range ls "/self/service/metadata/yarn-site"}} 33 | {{.}} 34 | {{getv (printf "/self/service/metadata/yarn-site/%s" .)}} 35 | 36 | {{end}}{{end}} 37 | 38 | 39 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-followers-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--interval", "10", "--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-followers-config/conf.d/slaves.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "slaves.tmpl" 3 | dest = "/etc/hadoop/slaves" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/services", 8 | "/containers", 9 | ] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.2.0/hadoop-followers-config/templates/slaves.tmpl: -------------------------------------------------------------------------------- 1 | {{range ls "/services/datanode/containers"}}{{ $containerName := getv (printf "/services/datanode/containers/%s" .)}}{{getv (printf "/containers/%s/primary_ip" $containerName)}} 2 | {{end}} 3 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd $(dirname $0) 4 | DOCKER_NAMESPACE=${1:-""} 5 | PUSH=${2:-"false"} 6 | TAG=${TAG:-"dev"} 7 | 8 | for i in $(ls -d */); do 9 | pushd ./$i >/dev/null 10 | echo "Building: docker build --rm -t $DOCKER_NAMESPACE/$(basename $(pwd)):${TAG} ." 11 | docker build --rm -t $DOCKER_NAMESPACE/$(basename $(pwd)):${TAG} . 12 | 13 | if [ "${PUSH}" = "true" ]; then 14 | docker push ${DOCKER_NAMESPACE}/$(basename $(pwd)):${TAG} 15 | fi 16 | 17 | popd >/dev/null 18 | done 19 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/capacity-scheduler.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "capacity-scheduler.xml.tmpl" 3 | dest = "/etc/hadoop/capacity-scheduler.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | ] 8 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/configuration.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "configuration.xsl" 3 | dest = "/etc/hadoop/configuration.xsl" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/container-executor.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "container-executor.cfg" 3 | dest = "/etc/hadoop/container-executor.cfg" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/core-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "core-site.xml.tmpl" 3 | dest = "/etc/hadoop/core-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self/", 8 | "/services/", 9 | "/containers/", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/hadoop-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-env.sh.tmpl" 3 | dest = "/etc/hadoop/hadoop-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | ] 8 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/hadoop-metrics.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-metrics.properties" 3 | dest = "/etc/hadoop/hadoop-metrics.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/hadoop-metrics2.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-metrics2.properties" 3 | dest = "/etc/hadoop/hadoop-metrics2.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/hadoop-policy.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-policy.xml" 3 | dest = "/etc/hadoop/hadoop-policy.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/hdfs-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hdfs-site.xml.tmpl" 3 | dest = "/etc/hadoop/hdfs-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/httpfs-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "httpfs-env.sh" 3 | dest = "/etc/hadoop/httpfs-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/httpfs-log4j.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "httpfs-log4j.properties" 3 | dest = "/etc/hadoop/httpfs-log4j.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/httpfs-signature.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "httpfs-signature.secret" 3 | dest = "/etc/hadoop/httpfs-signature.secret" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/httpfs-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "httpfs-site.xml" 3 | dest = "/etc/hadoop/httpfs-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/kms-acls.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "kms-acls.xml" 3 | dest = "/etc/hadoop/kms-acls.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/kms-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "kms-env.sh" 3 | dest = "/etc/hadoop/kms-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/kms-log4j.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "kms-log4j.properties" 3 | dest = "/etc/hadoop/kms-log4j.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/kms-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "kms-site.xml" 3 | dest = "/etc/hadoop/kms-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/log4j.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "log4j.properties" 3 | dest = "/etc/hadoop/log4j.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/mapred-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "mapred-env.sh" 3 | dest = "/etc/hadoop/mapred-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/mapred-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "mapred-site.xml.tmpl" 3 | dest = "/etc/hadoop/mapred-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/yarn-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "yarn-env.sh" 3 | dest = "/etc/hadoop/yarn-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/conf.d/yarn-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "yarn-site.xml.tmpl" 3 | dest = "/etc/hadoop/yarn-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/templates/container-executor.cfg: -------------------------------------------------------------------------------- 1 | yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group 2 | banned.users=#comma separated list of users who can not run applications 3 | min.user.id=1000#Prevent other super-users 4 | allowed.system.users=##comma separated list of system users who CAN run applications 5 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/templates/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-config/templates/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-followers-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--interval", "10", "--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-followers-config/conf.d/slaves.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "slaves.tmpl" 3 | dest = "/etc/hadoop/slaves" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/services", 8 | "/containers", 9 | "/hosts", 10 | ] 11 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.0/hadoop-followers-config/templates/slaves.tmpl: -------------------------------------------------------------------------------- 1 | {{range ls "/services/node-config/containers"}}{{ $containerName := getv (printf "/services/node-config/containers/%s" .)}}{{getv (printf "/hosts/%s/agent_ip" (getv (printf "/containers/%s/hostname" $containerName)))}} 2 | {{end}} 3 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd $(dirname $0) 4 | DOCKER_NAMESPACE=${1:-""} 5 | PUSH=${2:-"false"} 6 | TAG=${TAG:-"dev"} 7 | 8 | for i in $(ls -d */); do 9 | pushd ./$i >/dev/null 10 | echo "Building: docker build --rm -t $DOCKER_NAMESPACE/$(basename $(pwd)):${TAG} ." 11 | docker build --rm -t $DOCKER_NAMESPACE/$(basename $(pwd)):${TAG} . 12 | 13 | if [ "${PUSH}" = "true" ]; then 14 | docker push ${DOCKER_NAMESPACE}/$(basename $(pwd)):${TAG} 15 | fi 16 | 17 | popd >/dev/null 18 | done 19 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-base/bootstrap-hdfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=/usr/local/hadoop-${HADOOP_VERSION}/bin:$PATH 4 | 5 | start() { 6 | echo "starting setup of: ${1}..." 7 | } 8 | 9 | end(){ 10 | echo "Finished setting up: ${1}..." 11 | } 12 | 13 | create_hdfs_path() #signature path, user, perms, group 14 | { 15 | local path="${1}" 16 | local user="${2}" 17 | local perms="${3}" 18 | local group="${4:-hadoop}" 19 | 20 | start "${path}" 21 | hdfs dfs -mkdir -p "${path}" 22 | hdfs dfs -chown "${user}:${group}" "${path}" 23 | hdfs dfs -chmod "${perms}" "${path}" 24 | end "${path}" 25 | } 26 | 27 | add_hdfs_user() 28 | { 29 | start "/user/${1}" 30 | hdfs dfs -mkdir -p "/tmp/hadoop-${1}" 31 | hdfs dfs -chown "${1}:hadoop" "/tmp/hadoop-${1}" 32 | 33 | hdfs dfs -mkdir -p "/user/${1}" 34 | hdfs dfs -chown "${1}:hadoop" "/user/${1}" 35 | end "/user/${1}" 36 | } 37 | 38 | # Temp area 39 | create_hdfs_path "/tmp/mapred" "mapred" "1750" 40 | create_hdfs_path "/tmp/hadoop-yarn" "mapred" "1775" 41 | create_hdfs_path "/tmp/logs" "yarn" "1775" 42 | 43 | # Users 44 | add_hdfs_user hadoop 45 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-base/bootstrap-local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=/usr/local/hadoop-${HADOOP_VERSION}/bin:$PATH 4 | 5 | start() { 6 | echo "starting setup of: ${1}..." 7 | } 8 | 9 | end(){ 10 | echo "Finished setting up: ${1}..." 11 | } 12 | 13 | create_local_path() #signature path, user, perms, group 14 | { 15 | local path="${1}" 16 | local user="${2}" 17 | local perms="${3}" 18 | local group="${4:-hadoop}" 19 | 20 | start "${path}" 21 | mkdir -p "${path}" 22 | chown "${user}:${group}" "${path}" 23 | chmod "${perms}" "${path}" 24 | end "${path}" 25 | } 26 | 27 | # Temp area 28 | create_local_path "/tmp/hadoop-mapred" "mapred" "1750" 29 | create_local_path "/tmp/hadoop-yarn" "yarn" "1775" 30 | create_local_path "/tmp/logs" "yarn" "1775" 31 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-base/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | dfs.name.dir 6 | /hadoop/dfs/name 7 | 8 | 9 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-base/refreshnodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export JAVA_HOME=${JAVA_HOME} 4 | export PATH=/usr/local/hadoop-${HADOOP_VERSION}/bin:$PATH 5 | 6 | if [ "${1}" = "hdfs" ]; then 7 | su -c "JAVA_HOME=${JAVA_HOME} /usr/local/hadoop-${HADOOP_VERSION}/bin/hdfs dfsadmin -refreshNodes" hdfs 8 | elif [ "${1}" = "yarn" ]; then 9 | su -c "JAVA_HOME=${JAVA_HOME} /usr/local/hadoop-${HADOOP_VERSION}/bin/yarn rmadmin -refreshNodes" yarn 10 | else 11 | echo "Need to specify 'hdfs' or 'yarn'" 12 | fi 13 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/etc/hadoop"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/capacity-scheduler.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "capacity-scheduler.xml.tmpl" 3 | dest = "/etc/hadoop/capacity-scheduler.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | ] 8 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/configuration.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "configuration.xsl" 3 | dest = "/etc/hadoop/configuration.xsl" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/container-executor.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "container-executor.cfg" 3 | dest = "/etc/hadoop/container-executor.cfg" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/core-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "core-site.xml.tmpl" 3 | dest = "/etc/hadoop/core-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self/", 8 | "/services/", 9 | "/containers/", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/hadoop-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-env.sh.tmpl" 3 | dest = "/etc/hadoop/hadoop-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | ] 8 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/hadoop-metrics.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-metrics.properties" 3 | dest = "/etc/hadoop/hadoop-metrics.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/hadoop-metrics2.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-metrics2.properties" 3 | dest = "/etc/hadoop/hadoop-metrics2.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/hadoop-policy.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hadoop-policy.xml" 3 | dest = "/etc/hadoop/hadoop-policy.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/hdfs-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hdfs-site.xml.tmpl" 3 | dest = "/etc/hadoop/hdfs-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/httpfs-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "httpfs-env.sh" 3 | dest = "/etc/hadoop/httpfs-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/httpfs-log4j.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "httpfs-log4j.properties" 3 | dest = "/etc/hadoop/httpfs-log4j.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/httpfs-signature.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "httpfs-signature.secret" 3 | dest = "/etc/hadoop/httpfs-signature.secret" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/httpfs-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "httpfs-site.xml" 3 | dest = "/etc/hadoop/httpfs-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/kms-acls.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "kms-acls.xml" 3 | dest = "/etc/hadoop/kms-acls.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/kms-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "kms-env.sh" 3 | dest = "/etc/hadoop/kms-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/kms-log4j.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "kms-log4j.properties" 3 | dest = "/etc/hadoop/kms-log4j.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/kms-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "kms-site.xml" 3 | dest = "/etc/hadoop/kms-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/log4j.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "log4j.properties" 3 | dest = "/etc/hadoop/log4j.properties" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/mapred-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "mapred-env.sh" 3 | dest = "/etc/hadoop/mapred-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/mapred-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "mapred-site.xml.tmpl" 3 | dest = "/etc/hadoop/mapred-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/yarn-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "yarn-env.sh" 3 | dest = "/etc/hadoop/yarn-env.sh" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [] 7 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/conf.d/yarn-site.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "yarn-site.xml.tmpl" 3 | dest = "/etc/hadoop/yarn-site.xml" 4 | owner = "hadoop" 5 | mode = "0644" 6 | keys = [ 7 | "/self", 8 | "/services", 9 | "/containers", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/templates/container-executor.cfg: -------------------------------------------------------------------------------- 1 | yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group 2 | banned.users=#comma separated list of users who can not run applications 3 | min.user.id=1000#Prevent other super-users 4 | allowed.system.users=##comma separated list of system users who CAN run applications 5 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/templates/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-config/templates/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-followers-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/hadoop-base:v0.3.5 2 | 3 | ADD https://github.com/rancher/confd/releases/download/0.11.0-dev-rancher/confd-0.11.0-dev-rancher-linux-amd64 /confd 4 | RUN chmod +x /confd 5 | 6 | ADD ./conf.d /etc/confd/conf.d 7 | ADD ./templates /etc/confd/templates 8 | 9 | VOLUME ["/etc/hadoop"] 10 | 11 | ENTRYPOINT ["/confd"] 12 | CMD ["--interval", "10", "--backend", "rancher", "--prefix", "/2015-07-25"] 13 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-followers-config/conf.d/slaves.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "slaves.tmpl" 3 | dest = "/etc/hadoop/slaves" 4 | owner = "hadoop" 5 | mode = "0644" 6 | reload_cmd = "/refreshnodes.sh ${NODETYPE}" 7 | keys = [ 8 | "/services", 9 | "/containers", 10 | "/hosts", 11 | ] 12 | -------------------------------------------------------------------------------- /hadoop/containers/0.3.5/hadoop-followers-config/templates/slaves.tmpl: -------------------------------------------------------------------------------- 1 | {{range ls "/services/datanode/containers"}}{{ $containerName := getv (printf "/services/datanode/containers/%s" .)}}{{getv (printf "/containers/%s/primary_ip" $containerName)}} 2 | {{end}} 3 | -------------------------------------------------------------------------------- /jenkins/0.1.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | jenkins-primary: 2 | image: "jenkins:1.625.1" 3 | ports: 4 | - "8080:8080" 5 | labels: 6 | io.rancher.sidekicks: jenkins-plugins,jenkins-datavolume 7 | io.rancher.container.hostname_override: container_name 8 | volumes_from: 9 | - jenkins-plugins 10 | - jenkins-datavolume 11 | entrypoint: /usr/share/jenkins/rancher/jenkins.sh 12 | jenkins-plugins: 13 | image: rancher/jenkins-plugins:v0.1.0 14 | jenkins-datavolume: 15 | image: "jenkins:1.625.1" 16 | labels: 17 | io.rancher.container.start_once: true 18 | entrypoint: /bin/true 19 | -------------------------------------------------------------------------------- /jenkins/0.1.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | jenkins-primary: 2 | metadata: 3 | plugins: | 4 | credentials 5 | greenballs 6 | git 7 | junit 8 | git-client 9 | github-api 10 | github-oauth 11 | github 12 | plain-credentials 13 | scm-api 14 | ssh-credentials 15 | ssh-slaves 16 | swarm 17 | -------------------------------------------------------------------------------- /jenkins/0.1.1/docker-compose.yml: -------------------------------------------------------------------------------- 1 | jenkins-primary: 2 | image: "jenkins:1.625.1" 3 | ports: 4 | - "8080:8080" 5 | labels: 6 | io.rancher.sidekicks: jenkins-plugins,jenkins-datavolume 7 | io.rancher.container.hostname_override: container_name 8 | volumes_from: 9 | - jenkins-plugins 10 | - jenkins-datavolume 11 | entrypoint: /usr/share/jenkins/rancher/jenkins.sh 12 | jenkins-plugins: 13 | image: rancher/jenkins-plugins:v0.1.1 14 | jenkins-datavolume: 15 | image: "jenkins:1.625.1" 16 | labels: 17 | io.rancher.container.start_once: true 18 | entrypoint: /bin/true 19 | -------------------------------------------------------------------------------- /jenkins/0.1.1/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | jenkins-primary: 2 | metadata: 3 | plugins: | 4 | credentials 5 | greenballs 6 | git 7 | junit 8 | git-client 9 | github-api 10 | github-oauth 11 | github 12 | plain-credentials 13 | scm-api 14 | ssh-credentials 15 | ssh-slaves 16 | swarm 17 | -------------------------------------------------------------------------------- /jenkins/containers/0.1.0/plugins/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | ADD ./jenkins_ci.sh /usr/share/jenkins/rancher/jenkins.sh 6 | VOLUME /usr/share/jenkins/rancher 7 | 8 | ENTRYPOINT ["/confd"] 9 | 10 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 11 | -------------------------------------------------------------------------------- /jenkins/containers/0.1.0/plugins/conf.d/plugins.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="plugins.tmpl" 3 | dest="/usr/share/jenkins/rancher/plugins.txt" 4 | keys= [ 5 | "/self/service/metadata/plugins", 6 | ] 7 | -------------------------------------------------------------------------------- /jenkins/containers/0.1.0/plugins/jenkins_ci.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | if [ ! -f /usr/share/jenkins/rancher/plugins.txt ]; then 5 | sleep 1 6 | else 7 | /usr/local/bin/plugins.sh /usr/share/jenkins/rancher/plugins.txt 8 | fi 9 | 10 | exec /bin/tini -- /usr/local/bin/jenkins.sh 11 | -------------------------------------------------------------------------------- /jenkins/containers/0.1.0/plugins/templates/plugins.tmpl: -------------------------------------------------------------------------------- 1 | {{if exists "/self/service/metadata/plugins"}}{{getv "/self/service/metadata/plugins"}}{{end}} 2 | -------------------------------------------------------------------------------- /jenkins/containers/0.1.1/plugins/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | ADD ./jenkins_ci.sh /usr/share/jenkins/rancher/jenkins.sh 6 | VOLUME /usr/share/jenkins/rancher 7 | 8 | ENTRYPOINT ["/confd"] 9 | 10 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 11 | -------------------------------------------------------------------------------- /jenkins/containers/0.1.1/plugins/conf.d/plugins.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="plugins.tmpl" 3 | dest="/usr/share/jenkins/rancher/plugins.txt" 4 | keys= [ 5 | "/self/service/metadata/plugins", 6 | ] 7 | -------------------------------------------------------------------------------- /jenkins/containers/0.1.1/plugins/jenkins_ci.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | while [ ! -f /usr/share/jenkins/rancher/plugins.txt ]; do 5 | sleep 1 6 | done 7 | 8 | /usr/local/bin/plugins.sh /usr/share/jenkins/rancher/plugins.txt 9 | exec /bin/tini -- /usr/local/bin/jenkins.sh 10 | -------------------------------------------------------------------------------- /jenkins/containers/0.1.1/plugins/templates/plugins.tmpl: -------------------------------------------------------------------------------- 1 | {{if exists "/self/service/metadata/plugins"}}{{getv "/self/service/metadata/plugins"}}{{end}} 2 | -------------------------------------------------------------------------------- /jenkins_swarm_clients/0.1.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | swarm-clients: 2 | image: "rancher/jenkins-swarm:v0.1.0" 3 | user: "root" 4 | labels: 5 | io.rancher.scheduler.global: true 6 | io.rancher.scheduler.affinity:host_label_soft: ci=worker 7 | external_links: 8 | - "jenkins-primary/jenkins-primary:jenkins-primary" 9 | environment: 10 | JENKINS_PASS: "${jenkins_pass}" 11 | JENKINS_USER: "${jenkins_user}" 12 | SWARM_EXECUTORS: "${swarm_executors}" 13 | volumes: 14 | - '/var/run/docker.sock:/var/run/docker.sock' 15 | - '/usr/bin/docker:/usr/bin/docker' 16 | -------------------------------------------------------------------------------- /jenkins_swarm_clients/0.2.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | swarm-clients: 2 | image: "rancher/jenkins-swarm:v0.2.0" 3 | user: "root" 4 | labels: 5 | io.rancher.scheduler.global: "true" 6 | #io.rancher.scheduler.affinity:host_label_soft: ci=worker 7 | external_links: 8 | - "jenkins-ci/jenkins-primary:jenkins-primary" 9 | environment: 10 | JENKINS_PASS: "${jenkins_pass}" 11 | JENKINS_USER: "${jenkins_user}" 12 | SWARM_EXECUTORS: "${swarm_executors}" 13 | volumes: 14 | - '/var/run/docker.sock:/var/run/docker.sock' 15 | - '/var/jenkins_home/workspace:/var/jenkins_home/workspace' 16 | - '/tmp:/tmp' 17 | -------------------------------------------------------------------------------- /jenkins_swarm_clients/container/0.1.0/jenkins-swarm/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jenkins:1.625.1 2 | 3 | USER root 4 | 5 | RUN apt-get update && apt-get install -y libapparmor-dev 6 | 7 | ENV SWARM_CLIENT_VERSION 2.0 8 | ADD http://maven.jenkins-ci.org/content/repositories/releases/org/jenkins-ci/plugins/swarm-client/${SWARM_CLIENT_VERSION}/swarm-client-${SWARM_CLIENT_VERSION}-jar-with-dependencies.jar /usr/share/jenkins/swarm-client-${SWARM_CLIENT_VERSION}.jar 9 | RUN chmod 644 /usr/share/jenkins/swarm-client-${SWARM_CLIENT_VERSION}.jar 10 | 11 | ADD ./run.sh /run.sh 12 | 13 | USER jenkins 14 | WORKDIR /var/jenkins_home 15 | 16 | ENTRYPOINT ["/run.sh"] 17 | -------------------------------------------------------------------------------- /jenkins_swarm_clients/container/0.1.0/jenkins-swarm/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SWARM_ARGS="" 4 | if [ -n "${JENKINS_USER}" ]; then 5 | SWARM_ARGS="${SWARM_ARGS} -username ${JENKINS_USER}" 6 | fi 7 | 8 | if [ -n "${JENKINS_PASS}" ]; then 9 | SWARM_ARGS="${SWARM_ARGS} -passwordEnvVariable JENKINS_PASS" 10 | fi 11 | 12 | if [ -n "${SWARM_EXECUTORS}" ]; then 13 | SWARM_ARGS="${SWARM_ARGS} -executors ${SWARM_EXECUTORS}" 14 | fi 15 | 16 | exec java -jar /usr/share/jenkins/swarm-client-${SWARM_CLIENT_VERSION}.jar -fsroot /var/jenkins_home ${SWARM_ARGS} -master http://jenkins-primary:${JENKINS_PORT:-8080} 17 | -------------------------------------------------------------------------------- /jenkins_swarm_clients/container/0.2.0/jenkins-swarm/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jenkins:1.625.3 2 | 3 | USER root 4 | 5 | RUN apt-get update && apt-get install -y ca-certificates libapparmor-dev 6 | ADD ./run.sh /run.sh 7 | 8 | ENV SWARM_CLIENT_VERSION 2.0 9 | ADD http://maven.jenkins-ci.org/content/repositories/releases/org/jenkins-ci/plugins/swarm-client/${SWARM_CLIENT_VERSION}/swarm-client-${SWARM_CLIENT_VERSION}-jar-with-dependencies.jar /usr/share/jenkins/swarm-client-${SWARM_CLIENT_VERSION}.jar 10 | RUN chmod 644 /usr/share/jenkins/swarm-client-${SWARM_CLIENT_VERSION}.jar 11 | RUN curl -s -L https://get.docker.com/builds/Linux/x86_64/docker-1.9.1 > /usr/bin/docker; chmod +x /usr/bin/docker 12 | 13 | USER jenkins 14 | WORKDIR /var/jenkins_home 15 | 16 | ENTRYPOINT ["/run.sh"] 17 | -------------------------------------------------------------------------------- /jenkins_swarm_clients/container/0.2.0/jenkins-swarm/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SWARM_ARGS="" 4 | if [ -n "${JENKINS_USER}" ]; then 5 | SWARM_ARGS="${SWARM_ARGS} -username ${JENKINS_USER}" 6 | fi 7 | 8 | if [ -n "${JENKINS_PASS}" ]; then 9 | SWARM_ARGS="${SWARM_ARGS} -passwordEnvVariable JENKINS_PASS" 10 | fi 11 | 12 | if [ -n "${SWARM_EXECUTORS}" ]; then 13 | SWARM_ARGS="${SWARM_ARGS} -executors ${SWARM_EXECUTORS}" 14 | fi 15 | 16 | exec java -jar /usr/share/jenkins/swarm-client-${SWARM_CLIENT_VERSION}.jar -fsroot /var/jenkins_home ${SWARM_ARGS} -master http://jenkins-primary:${JENKINS_PORT:-8080} 17 | -------------------------------------------------------------------------------- /kibana/0.1.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | kibana-vip: 2 | ports: 3 | - 80:80 4 | restart: always 5 | tty: true 6 | image: rancher/load-balancer-service 7 | links: 8 | - nginx-proxy:kibana4 9 | stdin_open: true 10 | nginx-proxy-conf: 11 | image: rancher/nginx-conf:v0.1.0 12 | command: -backend=env 13 | environment: 14 | NGINX_WEB_KIBANA_UPSTREAMS_0: '{"IP": "kibana", "PORT": "5601"}' 15 | NGINX_WEB_KIBANA_SERVERNAME: 'kibana' 16 | labels: 17 | io.rancher.container.hostname_override: container_name 18 | nginx-proxy: 19 | image: rancher/nginx:v1.9.4-2 20 | links: 21 | - kibana4:kibana 22 | volumes_from: 23 | - nginx-proxy-conf 24 | labels: 25 | io.rancher.container.hostname_override: container_name 26 | io.rancher.sidekicks: nginx-proxy-conf 27 | kibana4: 28 | restart: always 29 | external_links: 30 | - es/elasticsearch-clients:elasticsearch 31 | tty: true 32 | image: kibana:4.1.1 33 | stdin_open: true 34 | labels: 35 | io.rancher.container.hostname_override: container_name 36 | -------------------------------------------------------------------------------- /kibana/0.2.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | kibana-vip: 2 | ports: 3 | - 80:80 4 | restart: always 5 | tty: true 6 | image: rancher/load-balancer-service 7 | links: 8 | - nginx-proxy:kibana4 9 | stdin_open: true 10 | nginx-proxy-conf: 11 | image: rancher/nginx-conf:v0.2.0 12 | command: "-backend=rancher --prefix=/2015-07-25" 13 | labels: 14 | io.rancher.container.hostname_override: container_name 15 | nginx-proxy: 16 | image: rancher/nginx:v1.9.4-3 17 | volumes_from: 18 | - nginx-proxy-conf 19 | labels: 20 | io.rancher.container.hostname_override: container_name 21 | io.rancher.sidekicks: nginx-proxy-conf,kibana4 22 | external_links: 23 | - es/elasticsearch-clients:elasticsearch 24 | kibana4: 25 | restart: always 26 | tty: true 27 | image: kibana:4.1.1 28 | net: "container:nginx-proxy" 29 | stdin_open: true 30 | environment: 31 | ELASTICSEARCH_URL: "http://elasticsearch:9200" 32 | labels: 33 | io.rancher.container.hostname_override: container_name 34 | -------------------------------------------------------------------------------- /kibana/0.2.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | nginx-proxy: 2 | metadata: 3 | nginx: 4 | conf: 5 | servername: "kibana" 6 | upstream_port: 5601 7 | -------------------------------------------------------------------------------- /kibana/containers/nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.9.4 2 | 3 | ADD ./entrypoint.sh /entrypoint.sh 4 | 5 | ENTRYPOINT ["/entrypoint.sh"] 6 | CMD ["nginx", "-g", "daemon off;"] 7 | -------------------------------------------------------------------------------- /kibana/containers/nginx/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | while [ ! -f "/etc/nginx/conf.d/nginx.conf" ]; do 4 | sleep 1 5 | done 6 | 7 | exec "$@" 8 | -------------------------------------------------------------------------------- /logspout/containers/logspout-logstash/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gliderlabs/logspout:master 2 | 3 | ADD ./entrypoint.sh /entrypoint.sh 4 | ENTRYPOINT ["/entrypoint.sh"] 5 | -------------------------------------------------------------------------------- /logspout/containers/logspout-logstash/README.md: -------------------------------------------------------------------------------- 1 | # logspout-logstash 2 | A minimalistic adapter for github.com/gliderlabs/logspout to write to Logstash UDP 3 | 4 | Follow the instructions in https://github.com/gliderlabs/logspout/tree/master/custom on how to build your own Logspout container with custom modules. Basically just copy the contents of the custom folder and include github.com/looplab/logspout-logstash in modules.go. 5 | 6 | Use by setting `ROUTE_URIS=logstash://host:port` to the Logstash host and port for UDP. 7 | 8 | In your logstash config, set the input codec to `json` e.g: 9 | 10 | input { 11 | udp { 12 | port => 5000 13 | codec => json 14 | } 15 | } 16 | 17 | -------------------------------------------------------------------------------- /logspout/containers/logspout-logstash/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | sleep 2 4 | exec /bin/logspout $@ 5 | -------------------------------------------------------------------------------- /logspout/containers/logspout-logstash/modules.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | _ "github.com/gliderlabs/logspout/adapters/raw" 5 | _ "github.com/gliderlabs/logspout/adapters/syslog" 6 | _ "github.com/gliderlabs/logspout/httpstream" 7 | _ "github.com/gliderlabs/logspout/routesapi" 8 | _ "github.com/gliderlabs/logspout/transports/tcp" 9 | _ "github.com/gliderlabs/logspout/transports/udp" 10 | _ "github.com/looplab/logspout-logstash" 11 | ) 12 | -------------------------------------------------------------------------------- /logspout/docker-compose.yml: -------------------------------------------------------------------------------- 1 | logspout: 2 | restart: always 3 | environment: 4 | ROUTE_URIS: 'logstash://logstash:5000' 5 | LOGSPOUT: 'ignore' 6 | volumes: 7 | - '/var/run/docker.sock:/var/run/docker.sock' 8 | external_links: 9 | - logstash/logstash-collector:logstash 10 | labels: 11 | io.rancher.scheduler.global: 'true' 12 | io.rancher.container.hostname_override: container_name 13 | tty: true 14 | image: rancher/logspout-logstash:v0.2.0 15 | stdin_open: true 16 | -------------------------------------------------------------------------------- /logstash/0.1.0/README.md: -------------------------------------------------------------------------------- 1 | ## Logstash Compose File 2 | 3 | ---- 4 | 5 | This will create a logstash pipeline. 6 | 7 | [INGRES] 8 | 9 | [REDIS] 10 | 11 | [Parsers] -------------------------------------------------------------------------------- /logstash/0.2.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | logstash-indexer: 2 | metadata: 3 | logstash: 4 | inputs: | 5 | redis { 6 | host => "redis" 7 | port => "6379" 8 | data_type => "list" 9 | key => "logstash" 10 | } 11 | filters: | 12 | if [docker.name] == "/rancher-server" { 13 | json { 14 | source => "message" 15 | } 16 | 17 | kv {} 18 | 19 | if [@message] { 20 | mutate { 21 | replace => { "message" => "%{@message}" } 22 | } 23 | } 24 | } 25 | outputs: | 26 | elasticsearch { 27 | host => "elasticsearch" 28 | protocol => "http" 29 | index => "logstash-demo-%{+YYYY.MM.dd}" 30 | } 31 | logstash-collector: 32 | metadata: 33 | logstash: 34 | inputs: | 35 | udp { 36 | port => 5000 37 | codec => "json" 38 | } 39 | outputs: | 40 | redis { 41 | host => "redis" 42 | port => "6379" 43 | data_type => "list" 44 | key => "logstash" 45 | } 46 | -------------------------------------------------------------------------------- /logstash/containers/0.1.0/logstash-config/.dockerignore: -------------------------------------------------------------------------------- 1 | README.md 2 | -------------------------------------------------------------------------------- /logstash/containers/0.1.0/logstash-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:v0.1.0 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | VOLUME /etc/logstash 6 | VOLUME /opt/logstash/patterns 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD [] 10 | -------------------------------------------------------------------------------- /logstash/containers/0.1.0/logstash-config/conf.d/logstashconfig.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "logstash.conf.tmpl" 3 | dest = "/etc/logstash/logstash.conf" 4 | keys = [ 5 | "/logstash/config/inputs", 6 | "/logstash/config/outputs", 7 | "/logstash/config/filters", 8 | ] 9 | -------------------------------------------------------------------------------- /logstash/containers/0.1.0/logstash-config/conf.d/patterns.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "patterns.tmpl" 3 | dest = "/opt/logstash/patterns/extra" 4 | keys = [ 5 | "/logstash/patterns/", 6 | ] 7 | -------------------------------------------------------------------------------- /logstash/containers/0.1.0/logstash-config/templates/patterns.tmpl: -------------------------------------------------------------------------------- 1 | {{if (gt (len (ls "/logstash/patterns")) 0)}}{{range ls "/logstash/patterns"}}{{getv (printf "/logstash/patterns/%s" .)}} 2 | {{end}}{{end}} 3 | -------------------------------------------------------------------------------- /logstash/containers/0.2.0/logstash-config/.dockerignore: -------------------------------------------------------------------------------- 1 | README.md 2 | -------------------------------------------------------------------------------- /logstash/containers/0.2.0/logstash-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | VOLUME /etc/logstash 6 | VOLUME /opt/logstash/patterns 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /logstash/containers/0.2.0/logstash-config/conf.d/logstashconfig.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "logstash.conf.tmpl" 3 | dest = "/etc/logstash/logstash.conf" 4 | keys = [ 5 | "/self/service/metadata/logstash/inputs", 6 | "/self/service/metadata/logstash/outputs", 7 | "/self/service/metadata/logstash/filters", 8 | ] 9 | -------------------------------------------------------------------------------- /logstash/containers/0.2.0/logstash-config/conf.d/patterns.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "patterns.tmpl" 3 | dest = "/opt/logstash/patterns/extra" 4 | keys = [ 5 | "/self/service/metadata/logstash/patterns/", 6 | ] 7 | -------------------------------------------------------------------------------- /logstash/containers/0.2.0/logstash-config/templates/logstash.conf.tmpl: -------------------------------------------------------------------------------- 1 | input { 2 | {{getv "/self/service/metadata/logstash/inputs"}} 3 | } 4 | 5 | {{if exists "/self/service/metadata/logstash/filters"}} 6 | filter { 7 | {{getv "/self/service/metadata/logstash/filters"}} 8 | }{{end}} 9 | 10 | output { 11 | {{getv "/self/service/metadata/logstash/outputs"}} 12 | } 13 | -------------------------------------------------------------------------------- /logstash/containers/0.2.0/logstash-config/templates/patterns.tmpl: -------------------------------------------------------------------------------- 1 | {{if (gt (len (ls "/logstash/patterns")) 0)}}{{range ls "/logstash/patterns"}}{{getv (printf "/logstash/patterns/%s" .)}} 2 | {{end}}{{end}} 3 | -------------------------------------------------------------------------------- /spark/0.1.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | spark-master: 2 | scale: 1 3 | metadata: 4 | spark-env: | 5 | SPARK_DAEMON_JAVA_OPTS="$${JAVA_OPTS} \ 6 | -Dspark.deploy.recoveryMode=ZOOKEEPER \ 7 | -Dspark.deploy.zookeeper.url=$${ZK_STRING} \ 8 | -Dspark.deploy.zookeeper.dir=/spark/${stack}" 9 | spark-worker: 10 | scale: 1 11 | metadata: 12 | spark-env: | 13 | export SPARK_WORKER_DIR=/spark/work 14 | export SPARK_LOCAL_DIRS=/spark/work 15 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark-conf/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | ENTRYPOINT ["/confd"] 7 | 8 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 9 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark-conf/conf.d/spark-defaults.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="spark-defaults.conf.tmpl" 3 | dest="/etc/spark/spark-defaults.conf" 4 | keys= [ 5 | "/self", 6 | ] 7 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark-conf/conf.d/spark-env.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="spark-env.sh.tmpl" 3 | dest="/etc/spark/spark-env.sh" 4 | mode="0755" 5 | keys= [ 6 | "/self", 7 | ] 8 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark-conf/templates/spark-defaults.conf.tmpl: -------------------------------------------------------------------------------- 1 | {{if exists "/self/service/metadata/spark-defaults"}} 2 | {{getv "/self/service/meatadata/spark-defaults"}} 3 | {{end}} 4 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark-conf/templates/spark-env.sh.tmpl: -------------------------------------------------------------------------------- 1 | export SPARK_LOCAL_IP="{{getv "/self/container/primary_ip"}}" 2 | 3 | {{if exists "/self/service/metadata/spark-env"}} 4 | {{getv "/self/service/metadata/spark-env"}} 5 | {{end}} 6 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/hadoop-base:v0.2.0 2 | 3 | RUN apt-get update && apt-get install -y --no-install-recommends openjdk-7-jre-headless \ 4 | curl \ 5 | jq \ 6 | maven \ 7 | python \ 8 | git \ 9 | scala 10 | 11 | RUN curl -sL http://d3kbcqa49mib13.cloudfront.net/spark-1.5.2-bin-hadoop2.6.tgz | tar -xz -C /usr/local && \ 12 | ln -s /usr/local/spark-1.5.2-bin-hadoop2.6 /usr/local/spark &&\ 13 | useradd -d /home/spark -m spark && \ 14 | cp -r /usr/local/spark/conf /etc/spark && \ 15 | rm -rf /usr/local/spark/conf && ln -s /etc/spark /usr/local/spark/conf && \ 16 | mkdir -p /usr/local/spark/logs && chown -R spark:spark /usr/local/spark/logs 17 | 18 | VOLUME ["/etc/spark"] 19 | VOLUME ["/spark/work"] 20 | 21 | ADD ./*.sh / 22 | 23 | USER spark 24 | ENV JAVA_HOME /usr/lib/jvm/java-7-openjdk-amd64 25 | ENV SPARK_HOME /usr/local/spark 26 | 27 | CMD ["/bin/bash"] 28 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export MASTERS= 4 | export ZK= 5 | 6 | get_container_ip() 7 | { 8 | local container=${1} 9 | echo $(curl -s -H 'Accept: application/json' "${METADATA_URL}" | jq -r ".containers[] | select(.name==${container}) | .primary_ip") 10 | } 11 | 12 | get_master_string() 13 | { 14 | for container in $(curl -s -H 'Accept: application/json' "${METADATA_URL}"|jq '.services[] | select(.name=="spark-master") | .containers[]'); do 15 | if [ -z "$MASTERS" ]; then 16 | MASTERS="spark://$(get_container_ip ${container}):7077" 17 | else 18 | MASTERS="${MASTERS},$(get_container_ip ${container}):7077" 19 | fi 20 | done 21 | echo "${MASTERS}" 22 | } 23 | 24 | get_zookeeper_string() 25 | { 26 | for container in $(curl -s -H 'Accept: application/json' "${METADATA_URL}"|jq -r '.services[] | select(.name=="zookeeper") | .containers[]'); do 27 | if [ -z "$ZK" ]; then 28 | ZK="${container}:2181" 29 | else 30 | ZK="${ZK},${container}:2181" 31 | fi 32 | done 33 | 34 | echo "${ZK}" 35 | } 36 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark/start_spark.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export METADATA_URL="http://rancher-metadata/2015-07-25" 4 | . common.sh 5 | 6 | role="${1}" 7 | if [ -z "${role}" ]; then 8 | echo "Need either master or worker as first arg" 9 | exit 1 10 | fi 11 | 12 | while [ ! -f "/etc/spark/spark-env.sh" ]; do 13 | sleep .5 14 | done 15 | 16 | opts= 17 | class="org.apache.spark.deploy.master.Master" 18 | if [ "$role" = "worker" ]; then 19 | class="org.apache.spark.deploy.worker.Worker" 20 | opts=$(get_master_string) 21 | fi 22 | 23 | if [ "${role}" = "master" ]; then 24 | export ZK_STRING=$(get_zookeeper_string) 25 | fi 26 | 27 | 28 | exec /usr/local/spark/bin/spark-class ${class} ${opts} 29 | -------------------------------------------------------------------------------- /spark/containers/0.1.0/spark/work_dir_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | echo "Checking /spark/work directory is owned by spark user" 5 | if [ -d "/spark/work" ] && [ ! "$(stat -c %U /spark/work)" = "spark" ]; then 6 | echo "Directory ["/spark/work"] is not owned by spark, changing owners.." 7 | chown -R spark:spark /spark/work 8 | fi 9 | 10 | echo "work dir setup" 11 | -------------------------------------------------------------------------------- /utils/containers/confd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | ADD ./confd-0.10.0-linux-amd64 /confd 4 | -------------------------------------------------------------------------------- /utils/containers/confd/Dockerfile.rancher: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | ADD ./confd-0.11.0-dev-rancher-linux-amd64 /confd 4 | -------------------------------------------------------------------------------- /utils/containers/confd/confd-0.10.0-linux-amd64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/catalog-dockerfiles/8493c9633b462f06693f19abe579c1b744026dbd/utils/containers/confd/confd-0.10.0-linux-amd64 -------------------------------------------------------------------------------- /utils/containers/confd/confd-0.11.0-dev-rancher-linux-amd64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/catalog-dockerfiles/8493c9633b462f06693f19abe579c1b744026dbd/utils/containers/confd/confd-0.11.0-dev-rancher-linux-amd64 -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.1.0/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:v0.1.0 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME /etc/nginx/conf.d 7 | VOLUME /etc/nginx/access 8 | 9 | ENTRYPOINT ["/confd"] 10 | CMD [] 11 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.1.0/conf.d/htpasswd.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="kibana.htpasswd.tmpl" 3 | dest="/etc/nginx/access/kibana.htpasswd" 4 | keys = [ 5 | "/nginx/web/" 6 | ] 7 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.1.0/conf.d/nginx.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="nginx.conf.tmpl" 3 | dest="/etc/nginx/conf.d/nginx.conf" 4 | keys = [ 5 | "/nginx/web/" 6 | ] 7 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.1.0/templates/kibana.htpasswd.tmpl: -------------------------------------------------------------------------------- 1 | {{ if (gt (len (ls "/nginx/web/kibana/users")) 0)}}{{range gets "/nginx/web/kibana/users/*"}}{{$data := json .Value}}{{$data.username}}:{{$data.password}} 2 | {{end}}{{end}} 3 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.1.0/templates/nginx.conf.tmpl: -------------------------------------------------------------------------------- 1 | {{range $dir := lsdir "/nginx/web"}} 2 | upstream {{base $dir}} { {{range gets (printf "/nginx/web/%s/upstreams/*" $dir)}} 3 | server {{$data := json .Value}}{{$data.IP}}:{{$data.PORT}};{{end}} 4 | } 5 | 6 | server { 7 | listen 80; 8 | server_name {{getv (printf "/nginx/web/%s/servername" $dir)}}; 9 | 10 | {{if (gt (len (ls (printf "/nginx/web/%s/users" $dir))) 0)}} 11 | auth_basic "Access restricted"; 12 | auth_basic_user_file /etc/nginx/access/kibana.htpasswd; 13 | {{end}} 14 | 15 | # Do not pass Auth headers along. 16 | proxy_set_header Authorization ""; 17 | 18 | location / { 19 | proxy_pass http://{{base $dir}}; 20 | } 21 | } 22 | {{end}} 23 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.2.0/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME /etc/nginx/conf.d 7 | VOLUME /etc/nginx/access 8 | 9 | ENTRYPOINT ["/confd"] 10 | CMD [] 11 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.2.0/conf.d/htpasswd.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="htpasswd.tmpl" 3 | dest="/etc/nginx/access/htpasswd" 4 | keys = [ 5 | "/self/service/metadata/nginx" 6 | ] 7 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.2.0/conf.d/nginx.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="nginx.conf.tmpl" 3 | dest="/etc/nginx/conf.d/nginx.conf" 4 | keys = [ 5 | "/self/service/metadata" 6 | ] 7 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.2.0/templates/htpasswd.tmpl: -------------------------------------------------------------------------------- 1 | {{if exists "/self/service/metadata/nginx/users/0/username"}}{{range ls "/self/service/metadata/nginx/users"}}{{getv (printf "/self/service/metadata/nginx/users/%s/username" .)}}:{{getv (printf "/self/service/metadata/nginx/users/%s/password" .)}} 2 | {{end}}{{end}} 3 | -------------------------------------------------------------------------------- /utils/containers/nginx-conf/0.2.0/templates/nginx.conf.tmpl: -------------------------------------------------------------------------------- 1 | {{$serverName := getv "/self/service/metadata/nginx/conf/servername"}}upstream {{$serverName}} { 2 | server 127.0.0.1:{{getv "/self/service/metadata/nginx/conf/upstream_port"}}; 3 | } 4 | 5 | server { 6 | listen 80; 7 | server_name {{$serverName}}; 8 | 9 | {{if exists "/self/service/metadata/nginx/users/0/username"}} 10 | auth_basic "Access restricted"; 11 | auth_basic_user_file /etc/nginx/access/htpasswd; 12 | {{end}} 13 | 14 | # Do not pass Auth headers along. 15 | proxy_set_header Authorization ""; 16 | 17 | location / { 18 | proxy_pass http://{{$serverName}}; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /zookeeper/0.1.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | zookeeper-config: 2 | image: rancher/zookeeper-config:v0.1.0 3 | volumes_from: 4 | - zookeeper 5 | net: "container:zookeeper" 6 | zookeeper: 7 | labels: 8 | io.rancher.sidekicks: zookeeper-config 9 | image: rancher/zookeeper:3.4.6-1 10 | -------------------------------------------------------------------------------- /zookeeper/0.1.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | zookeeper: 2 | scale: 3 3 | -------------------------------------------------------------------------------- /zookeeper/0.2.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | zookeeper-conf: 2 | image: rancher/zookeeper-config:v0.2.0 3 | volumes_from: 4 | - zookeeper 5 | net: "container:zookeeper" 6 | zookeeper: 7 | image: rancher/zookeeper:3.4.6-2 8 | labels: 9 | io.rancher.sidekicks: zookeeper-conf 10 | volumes: 11 | - /opt/rancher 12 | -------------------------------------------------------------------------------- /zookeeper/0.2.0/rancher-compose.yml: -------------------------------------------------------------------------------- 1 | zookeeper: 2 | scale: 3 3 | -------------------------------------------------------------------------------- /zookeeper/0.3.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | zookeeper: 2 | image: rancher/zookeeper:3.4.8 3 | environment: 4 | ZK_ENSEMBLE_SIZE: ${ZK_ENSEMBLE_SIZE} 5 | ZK_TICK_TIME: ${ZK_TICK_TIME} 6 | ZK_INIT_LIMIT: ${ZK_INIT_LIMIT} 7 | ZK_SYNC_LIMIT: ${ZK_SYNC_LIMIT} 8 | ZK_MAX_CLIENT_CXNS: ${ZK_MAX_CLIENT_CXNS} 9 | JVMFLAGS: "-Xms${ZK_HEAP_SIZE}m -Xmx${ZK_HEAP_SIZE}m" 10 | labels: 11 | io.rancher.sidekicks: data 12 | volumes_from: 13 | - data 14 | data: 15 | image: rancher/zookeeper:3.4.8 16 | entrypoint: /bin/true 17 | labels: 18 | io.rancher.container.start_once: 'true' 19 | net: none 20 | volumes: 21 | - /data 22 | - /log -------------------------------------------------------------------------------- /zookeeper/0.4.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | zookeeper: 2 | image: rancher/zookeeper:3.4.8 3 | environment: 4 | ZK_ENSEMBLE_SIZE: ${ZK_ENSEMBLE_SIZE} 5 | ZK_TICK_TIME: ${ZK_TICK_TIME} 6 | ZK_INIT_LIMIT: ${ZK_INIT_LIMIT} 7 | ZK_SYNC_LIMIT: ${ZK_SYNC_LIMIT} 8 | ZK_MAX_CLIENT_CXNS: ${ZK_MAX_CLIENT_CXNS} 9 | JVMFLAGS: "-Xms${ZK_HEAP_SIZE}m -Xmx${ZK_HEAP_SIZE}m" 10 | labels: 11 | io.rancher.sidekicks: data 12 | volumes_from: 13 | - data 14 | data: 15 | image: rancher/zookeeper:3.4.8 16 | entrypoint: /bin/true 17 | labels: 18 | io.rancher.container.start_once: 'true' 19 | net: none 20 | volumes: 21 | - /data -------------------------------------------------------------------------------- /zookeeper/containers/0.1.0/zookeeper-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/var/lib/zookeeper", "/opt/zookeeper/conf/"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.1.0/zookeeper-config/conf.d/myid.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | prefix = "/self/container" 3 | src = "myid.tmpl" 4 | dest = "/var/lib/zookeeper/myid" 5 | owner = "root" 6 | mode = "0644" 7 | keys = [ 8 | "/create_index", 9 | ] 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.1.0/zookeeper-config/conf.d/zoo.cfg.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "zoo.cfg.tmpl" 3 | dest = "/opt/zookeeper/conf/zoo.cfg" 4 | owner = "root" 5 | mode = "0644" 6 | keys = [ 7 | "/self/service/containers", 8 | "/containers", 9 | ] 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.1.0/zookeeper-config/templates/myid.tmpl: -------------------------------------------------------------------------------- 1 | {{getv "/create_index"}} 2 | -------------------------------------------------------------------------------- /zookeeper/containers/0.1.0/zookeeper-config/templates/zoo.cfg.tmpl: -------------------------------------------------------------------------------- 1 | tickTime=2000 2 | initLimit=10 3 | syncLimit=5 4 | dataDir=/var/lib/zookeeper 5 | clientPort=2181 6 | autopurge.snapRetainCount=3 7 | autopurge.purgeInterval=1 8 | {{range ls "/self/service/containers"}}{{ $containerName := getv (printf "/self/service/containers/%s" .)}} 9 | server.{{getv (printf "/containers/%s/create_index" $containerName)}}={{getv (printf "/containers/%s/primary_ip" $containerName)}}:2888:3888{{end}} 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.1.0/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:jessie 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y --no-install-recommends openjdk-7-jre-headless 5 | 6 | ADD entry.sh /entry.sh 7 | 8 | ADD http://mirror.metrocast.net/apache/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz /opt/ 9 | RUN cd /opt && \ 10 | tar -zxvf zookeeper-3.4.6.tar.gz && \ 11 | mv zookeeper-3.4.6 zookeeper && \ 12 | rm -rf ./zookeeper-*tar.gz && \ 13 | mkdir -p /var/lib/zookeeper 14 | 15 | WORKDIR /opt/zookeeper 16 | EXPOSE 2181 2888 3888 17 | VOLUME ["/var/lib/zookeeper", "/opt/zookeeper/conf", "/tmp/zookeeper"] 18 | 19 | ENTRYPOINT ["/entry.sh"] 20 | -------------------------------------------------------------------------------- /zookeeper/containers/0.1.0/zookeeper/README.md: -------------------------------------------------------------------------------- 1 | ## Zookeeper Container 2 | 3 | ---- 4 | This container runs a stock zookeeper 3.6 instance. 5 | 6 | It leverages confd to populate the config files. Given the way Zookeeper 7 | To use the container it is initialized via like so: 8 | 9 | ``` 10 | docker run -d --net=host --name=zookeeper \ 11 | -e SERVICES_ZOOKEEPER_MYID= \ 12 | -e SERVICES_ZOOKEEPER_HOST_1='{"Id": "1", "Ip": ""}' \ 13 | -e SERVICES_ZOOKEEPER_HOST_2='{"Id": "", "Ip": ""}' \ 14 | rancher/zookeeer 15 | ``` 16 | 17 | The container will setup ports on 2181,2888,3888 on the host. 18 | 19 | Each node in the zookeeper cluster will need an entry, and the ID/IP pairs must be the same for all hosts. 20 | 21 | The `network = host` so that ZK can bind to the instances IP. 22 | -------------------------------------------------------------------------------- /zookeeper/containers/0.1.0/zookeeper/entry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | while [ ! -f "/var/lib/zookeeper/myid" ]; do 6 | sleep 1 7 | done 8 | 9 | exec /opt/zookeeper/bin/zkServer.sh start-foreground 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper-config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/confd-base:0.11.0-dev-rancher 2 | 3 | ADD ./conf.d /etc/confd/conf.d 4 | ADD ./templates /etc/confd/templates 5 | 6 | VOLUME ["/var/lib/zookeeper", "/opt/zookeeper/conf/", "/opt/rancher"] 7 | 8 | ENTRYPOINT ["/confd"] 9 | CMD ["--interval", "30", "--backend", "rancher", "--prefix", "/2015-07-25"] 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper-config/conf.d/myid.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | prefix = "/self/container" 3 | src = "myid.tmpl" 4 | dest = "/var/lib/zookeeper/myid" 5 | owner = "root" 6 | mode = "0644" 7 | keys = [ 8 | "/create_index", 9 | ] 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper-config/conf.d/startup.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src="startup.tmpl" 3 | dest="/opt/rancher/startup.meta" 4 | keys = [ 5 | "/self/service" 6 | ] 7 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper-config/conf.d/zoo.cfg.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "zoo.cfg.tmpl" 3 | dest = "/opt/zookeeper/conf/zoo.cfg" 4 | owner = "root" 5 | mode = "0644" 6 | keys = [ 7 | "/self/service/containers", 8 | "/containers", 9 | ] 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper-config/templates/myid.tmpl: -------------------------------------------------------------------------------- 1 | {{getv "/create_index"}} 2 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper-config/templates/startup.tmpl: -------------------------------------------------------------------------------- 1 | {{getv "/self/service/scale"}} 2 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper-config/templates/zoo.cfg.tmpl: -------------------------------------------------------------------------------- 1 | tickTime=2000 2 | initLimit=10 3 | syncLimit=5 4 | dataDir=/var/lib/zookeeper 5 | clientPort=2181 6 | autopurge.snapRetainCount=3 7 | autopurge.purgeInterval=1 8 | {{range ls "/self/service/containers"}}{{ $containerName := getv (printf "/self/service/containers/%s" .)}} 9 | server.{{getv (printf "/containers/%s/create_index" $containerName)}}={{getv (printf "/containers/%s/primary_ip" $containerName)}}:2888:3888{{end}} 10 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:jessie 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y --no-install-recommends openjdk-7-jre-headless 5 | 6 | ADD http://mirror.metrocast.net/apache/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz /opt/ 7 | RUN cd /opt && \ 8 | tar -zxvf zookeeper-3.4.6.tar.gz && \ 9 | mv zookeeper-3.4.6 zookeeper && \ 10 | rm -rf ./zookeeper-*tar.gz && \ 11 | mkdir -p /var/lib/zookeeper 12 | 13 | ADD entry.sh /entry.sh 14 | 15 | WORKDIR /opt/zookeeper 16 | EXPOSE 2181 2888 3888 17 | VOLUME ["/var/lib/zookeeper", "/opt/zookeeper/conf", "/tmp/zookeeper"] 18 | 19 | ENTRYPOINT ["/entry.sh"] 20 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper/README.md: -------------------------------------------------------------------------------- 1 | ## Zookeeper Container 2 | 3 | ---- 4 | This container runs a stock zookeeper 3.6 instance. 5 | 6 | It leverages confd to populate the config files. Given the way Zookeeper 7 | To use the container it is initialized via like so: 8 | 9 | ``` 10 | docker run -d --net=host --name=zookeeper \ 11 | -e SERVICES_ZOOKEEPER_MYID= \ 12 | -e SERVICES_ZOOKEEPER_HOST_1='{"Id": "1", "Ip": ""}' \ 13 | -e SERVICES_ZOOKEEPER_HOST_2='{"Id": "", "Ip": ""}' \ 14 | rancher/zookeeer 15 | ``` 16 | 17 | The container will setup ports on 2181,2888,3888 on the host. 18 | 19 | Each node in the zookeeper cluster will need an entry, and the ID/IP pairs must be the same for all hosts. 20 | 21 | The `network = host` so that ZK can bind to the instances IP. 22 | -------------------------------------------------------------------------------- /zookeeper/containers/0.2.0/zookeeper/entry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | run_zk() 6 | { 7 | exec /opt/zookeeper/bin/zkServer.sh start-foreground 8 | } 9 | 10 | while [ ! -f "/var/lib/zookeeper/myid" ]; do 11 | sleep 1 12 | done 13 | 14 | if [ ! -f "/opt/rancher/startup.meta" ]; then 15 | sleep 1 16 | else 17 | while [ "$(grep ^server /opt/zookeeper/conf/zoo.cfg|wc -l)" -lt "$(