├── awscli └── Dockerfile ├── backup-volume-container ├── Dockerfile └── run.sh ├── brooklyn └── Dockerfile ├── ddclient ├── Dockerfile ├── README.md ├── ddclient ├── ddclient.conf └── entrypoint.sh ├── debian-jessie └── Dockerfile ├── debian-wheezy └── Dockerfile ├── haproxy-confd ├── Dockerfile ├── README.md ├── confd │ ├── conf.d │ │ └── haproxy.toml │ ├── confd.toml │ └── templates │ │ └── haproxy.tmpl └── entrypoint.sh ├── journald-forwarder ├── Dockerfile ├── README.md └── entrypoint.sh ├── jre-8-oracle └── Dockerfile ├── jupyter-hub └── Dockerfile ├── kibana ├── Dockerfile └── entrypoint.sh ├── logstash ├── Dockerfile ├── entrypoint.sh └── patterns │ └── haproxy ├── mesos ├── Dockerfile └── entrypoint.sh ├── netcat ├── Dockerfile └── entrypoint.sh ├── openjdk-7-jdk └── Dockerfile ├── openjdk-7-jre └── Dockerfile ├── s3curl └── Dockerfile ├── tomcat7 ├── Dockerfile ├── README.md ├── logrotate ├── run.sh └── server.xml ├── xwiki-tomcat7 ├── Dockerfile ├── README.md ├── hibernate.cfg.xml └── xwiki.properties └── zookeeper ├── Dockerfile ├── entrypoint.sh └── zoo.cfg /awscli/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.13 2 | 3 | MAINTAINER yaronr 4 | 5 | RUN apk add --no-cache \ 6 | python3 \ 7 | py3-pip \ 8 | && pip3 install --upgrade pip \ 9 | && pip3 install awscli \ 10 | && rm -rf /var/cache/apk/* 11 | -------------------------------------------------------------------------------- /backup-volume-container/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM stackbrew/debian:wheezy 2 | MAINTAINER yaronr 3 | 4 | ENV AWS_ACCESS_KEY_ID foobar_aws_key_id 5 | ENV AWS_SECRET_ACCESS_KEY foobar_aws_access_key 6 | ENV INOTIFYWAIT_EXCLUDE 'matchnothing^' 7 | 8 | RUN (echo "deb http://http.debian.net/debian/ wheezy main contrib non-free" > /etc/apt/sources.list && echo "deb http://http.debian.net/debian/ wheezy-updates main contrib non-free" >> /etc/apt/sources.list && echo "deb http://security.debian.org/ wheezy/updates main contrib non-free" >> /etc/apt/sources.list) && \ 9 | echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup 10 | 11 | RUN apt-get update && \ 12 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends ntpdate inotify-tools python-paramiko python-gobject-2 python-boto duplicity && \ 13 | apt-get clean && \ 14 | rm -rf /var/lib/apt/lists/* 15 | 16 | VOLUME /var/backup 17 | 18 | ADD ./run.sh /run.sh 19 | ENTRYPOINT ["/run.sh"] 20 | -------------------------------------------------------------------------------- /backup-volume-container/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! $# -eq 2 ] 4 | then 5 | echo "Invalid / incorrect / missing arguments supplied." 6 | echo "run " 7 | echo 8 | echo "example:" 9 | echo "run.sh s3://s3.amazonaws.com/my_bucker/my_directory 60" 10 | echo 11 | echo "This script will first try to restore backup from the given url, and then start backing up to that URL continuously, after every change + quiet period." 12 | exit 1 13 | fi 14 | 15 | if [[ ${AWS_ACCESS_KEY_ID} = "foobar_aws_key_id" || ${AWS_SECRET_ACCESS_KEY} = "foobar_aws_access_key" ]] ; then 16 | echo "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables MUST be set" 17 | exit 1 18 | fi 19 | 20 | echo "Using $1 as S3 URL" 21 | echo "Using $2 as required quiet (file system inactivity) period before executing backup" 22 | echo 23 | echo "Updating time data to prevent problems with S3 time mismatch" 24 | 25 | inotifywait_events="modify,attrib,move,create,delete" 26 | 27 | cd /var/backup 28 | 29 | # start by restoring the last backup: 30 | # This could fail if there's nothing to restore. 31 | 32 | duplicity $DUPLICITY_OPTIONS --no-encryption $1 . 33 | 34 | # Now, start waiting for file system events on this path. 35 | # After an event, wait for a quiet period of N seconds before doing a backup 36 | 37 | while inotifywait -r -e $inotifywait_events --exclude $INOTIFYWAIT_EXCLUDE . ; do 38 | echo "Change detected." 39 | while inotifywait -r -t $2 -e $inotifywait_events --exclude $INOTIFYWAIT_EXCLUDE . ; do 40 | echo "waiting for quiet period.." 41 | done 42 | 43 | echo "starting backup" 44 | duplicity $DUPLICITY_OPTIONS --no-encryption --allow-source-mismatch --full-if-older-than 7D . $1 45 | echo "starting cleanup" 46 | duplicity remove-all-but-n-full 3 $DUPLICITY_OPTIONS --force --no-encryption --allow-source-mismatch $1 47 | duplicity cleanup $DUPLICITY_OPTIONS --force --no-encryption $1 48 | done 49 | -------------------------------------------------------------------------------- /brooklyn/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one or more 4 | # contributor license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright ownership. 6 | # The ASF licenses this file to You under the Apache License, Version 2.0 7 | # (the "License"); you may not use this file except in compliance with 8 | # the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | FROM yaronr/openjdk-7-jdk 19 | MAINTAINER yaronr 20 | 21 | ENV BROOKLYN_VERSION 0.7.0-M1 22 | 23 | # install utilities 24 | RUN apt-get -y install tar && groupadd brooklyn && useradd brooklyn -s /bin/bash -m -g brooklyn -G brooklyn && adduser brooklyn sudo 25 | 26 | USER brooklyn 27 | WORKDIR /home/brooklyn/ 28 | RUN wget -qO brooklyn-dist-${BROOKLYN_VERSION}-dist.tar.gz http://search.maven.org/remotecontent?filepath=io/brooklyn/brooklyn-dist/${BROOKLYN_VERSION}/brooklyn-dist-${BROOKLYN_VERSION}-dist.tar.gz && \ 29 | tar -zxf brooklyn-dist-${BROOKLYN_VERSION}-dist.tar.gz && \ 30 | rm brooklyn-dist-${BROOKLYN_VERSION}-dist.tar.gz && \ 31 | mkdir -p .brooklyn && \ 32 | wget -qO .brooklyn/brooklyn.properties http://brooklyncentral.github.io/use/guide/quickstart/brooklyn.properties && \ 33 | sed -i.bak 's/^# brooklyn.webconsole.security.provider = brooklyn.rest.security.provider.AnyoneSecurityProvider/brooklyn.webconsole.security.provider = brooklyn.rest.security.provider.AnyoneSecurityProvider/' .brooklyn/brooklyn.properties && \ 34 | wget -qO .brooklyn/catalog.xml http://brooklyncentral.github.io/use/guide/quickstart/catalog.xml 35 | 36 | # expose the brooklyn port 37 | EXPOSE 8081 38 | 39 | # launch brooklyn 40 | CMD brooklyn-$BROOKLYN_VERSION/bin/brooklyn launch 41 | -------------------------------------------------------------------------------- /ddclient/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM stackbrew/debian:wheezy 2 | MAINTAINER yaronr 3 | 4 | RUN (echo "deb http://http.debian.net/debian/ wheezy main contrib non-free" > /etc/apt/sources.list && echo "deb http://http.debian.net/debian/ wheezy-updates main contrib non-free" >> /etc/apt/sources.list && echo "deb http://security.debian.org/ wheezy/updates main contrib non-free" >> /etc/apt/sources.list) && \ 5 | echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup 6 | 7 | RUN apt-get update && \ 8 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends ssh libio-socket-ssl-perl ddclient && \ 9 | apt-get clean && \ 10 | rm -rf /var/lib/apt/lists/* 11 | 12 | # Add configuration files 13 | ADD ddclient /etc/default/ddclient 14 | ADD ddclient.conf /etc/ddclient.conf 15 | ADD entrypoint.sh /opt/entrypoint.sh 16 | RUN chmod +x /opt/entrypoint.sh 17 | 18 | ENTRYPOINT ["/opt/entrypoint.sh"] -------------------------------------------------------------------------------- /ddclient/README.md: -------------------------------------------------------------------------------- 1 | **Dynamic DNS client 2 | ** 3 | 4 | Use this to register a server in a DNS that supports dynamic DNS. 5 | 6 | Usage: 7 | `docker run --rm yaronr/ddclient:latest {dynamic_dns_server} {your_domain} {password} {dynamic_dns_protocol} {host} {sleep_interval_sec}` 8 | 9 | {sleep_interval_sec} default = 3600, -1 means execute once and exit, with the appropriate exit code. Any other number / default - This process will exit, if the DNS registration is not successful (or it is terminated / interrupted). Otherwise it will continuously update the DNS. 10 | 11 | All other params are required. 12 | 13 | **Example: 14 | ** 15 | 16 | `docker run --rm yaronr/ddclient:latest dynamicdns.park-your-domain.com mydomain.com pass1234 namecheap my-dns-server-name 17 | ` 18 | 19 | I created this for two reasons: 20 | 21 | 1) I have a VPN client (yaronr/softether) that runs as a service, on a random hosts in my cluster. Using this, I can always refer to my VPN as 'my-vpn.my-domain.com' - regardless of where the VPN docker instance will actually run. 22 | 23 | 2) As it happens, I use CoreOS. Instead of manually doing `export FLEETCTL_TUNNEL=...` every time my cluster moves or one of my nodes die, I use the DDNS name in my export. 24 | 25 | ` 26 | ExecStartPre=/bin/bash -cx ' \ 27 | echo "Updating ddns" \ 28 | /usr/bin/docker run \ 29 | --rm \ 30 | yaronr/ddclient:latest dynamicdns.park-your-domain.com multicloud.me pass1234 namecheap vpn-${CLUSTER_NAME} -1' 31 | ` 32 | 33 | **Note: 34 | ** 35 | Some DNS providers require that a DNS 'A' record of the type you're trying to update, will pre-exist, for the update to succeed. To overcome this, just set a fake A record to 127.0.0.1 for the server you plan to update using ddclient (in the example above, vpn-${CLUSTER_NAME}) 36 | 37 | **Documentation** 38 | 39 | [ddclient documentation](https://sourceforge.net/p/ddclient/wiki/Home/) 40 | 41 | Supported protocols: 42 | 43 | * dnspark 44 | * dslreports 45 | * dyndns1 46 | * dyndns2 47 | * easydns 48 | * namecheap 49 | * zoneedit1 50 | * Changeip 51 | * googledomains 52 | * duckdns 53 | * nsupdate 54 | 55 | See the [protocols page](https://sourceforge.net/p/ddclient/wiki/protocols/) for more details. 56 | -------------------------------------------------------------------------------- /ddclient/ddclient: -------------------------------------------------------------------------------- 1 | # Configuration for ddclient scripts 2 | # generated from debconf on Tue Dec 9 19:52:12 EST 2008 3 | # 4 | # /etc/default/ddclient 5 | # Set to “true” if ddclient should be run every time a 6 | # new ppp connection is established. This might be useful, 7 | # if you are using dial-on-demand 8 | run_ipup=”false” 9 | 10 | # Set to “true” if ddclient should run in daemon mode 11 | # For Systemd (CoreOS), we use a non-daemon loop process 12 | run_daemon=”false” 13 | 14 | # Set the time interval between the updates of the dynamic DNS name in seconds. 15 | # This option only takes effect if the ddclient runs in daemon mode. 16 | daemon_interval=”300″ 17 | -------------------------------------------------------------------------------- /ddclient/ddclient.conf: -------------------------------------------------------------------------------- 1 | # Configuration file for ddclient generated by debconf 2 | # 3 | # /etc/ddclient.conf 4 | 5 | daemon=0 6 | #no daemon 7 | #pid=/var/run/ddclient.pid 8 | 9 | #ssl=yes 10 | use=web, web=dynamicdns.park-your-domain.com/getip 11 | 12 | server={dynamic_dns_server} 13 | login={domain} 14 | password='{password}' 15 | protocol={protocol} 16 | {host} -------------------------------------------------------------------------------- /ddclient/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! $# -gt 4 ] 4 | then 5 | echo 6 | echo "Invalid / incorrect / missing arguments supplied." 7 | echo "Expected parameters:" 8 | echo 9 | echo "entrypoint.sh {dynamic_dns_server} {your_domain} {password} {dynamic_dns_protocol} {host} {sleep_interval_sec}" 10 | echo 11 | echo "Example:" 12 | echo "entrypoint.sh dynamicdns.park-your-domain.com mydomain.com 12345 namecheap www" 13 | echo 14 | echo "{sleep_interval_sec} default = 3600, -1 means execute once and exit" 15 | echo "All other params are required." 16 | exit 1 17 | fi 18 | 19 | sleeptime=${6:-3600} 20 | ddclient_flags='-verbose -noquiet' 21 | 22 | on_die() 23 | { 24 | echo "Stopping..." 25 | exit 0 26 | } 27 | 28 | trap 'on_die' TERM SIGINT 29 | 30 | sed -i 's/{dynamic_dns_server}/'$1'/' /etc/ddclient.conf && 31 | sed -i 's/{domain}/'$2'/' /etc/ddclient.conf && 32 | sed -i 's/{password}/'$3'/' /etc/ddclient.conf && 33 | sed -i 's/{protocol}/'$4'/' /etc/ddclient.conf && 34 | sed -i 's/{host}/'$5'/' /etc/ddclient.conf && 35 | 36 | if [ "$sleeptime" -eq -1 ] ; then 37 | ddclient -daemon=0 $ddclient_flags | grep -i -q 'success'; 38 | else 39 | while timeout 10 ddclient $ddclient_flags | grep -i -q 'success'; do 40 | sleep $sleeptime 41 | done 42 | fi 43 | 44 | -------------------------------------------------------------------------------- /debian-jessie/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:jessie 2 | MAINTAINER yaronr 3 | 4 | RUN (echo "deb http://http.debian.net/debian/ jessie main contrib non-free" > /etc/apt/sources.list) && \ 5 | (echo "deb http://http.debian.net/debian jessie-backports main" > /etc/apt/sources.list.d/backports.list) && \ 6 | (echo "deb http://http.debian.net/debian/ jessie-updates main contrib non-free" >> /etc/apt/sources.list) && \ 7 | (echo "deb http://security.debian.org/ jessie/updates main contrib non-free" >> /etc/apt/sources.list) && \ 8 | echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup 9 | 10 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ 11 | DEBIAN_FRONTEND=noninteractive apt-get upgrade -yq --no-install-recommends && \ 12 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends wget zip nano curl && \ 13 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends --reinstall procps && \ 14 | DEBIAN_FRONTEND=noninteractive apt-get clean && \ 15 | rm -rf /var/lib/apt/lists/* 16 | 17 | -------------------------------------------------------------------------------- /debian-wheezy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM stackbrew/debian:wheezy 2 | MAINTAINER yaronr 3 | 4 | RUN (echo "deb http://cdn.debian.net/debian wheezy-backports main" > /etc/apt/sources.list.d/backports.list) && \ 5 | (echo "deb http://http.debian.net/debian/ wheezy main contrib non-free" > /etc/apt/sources.list) && \ 6 | (echo "deb http://http.debian.net/debian/ wheezy-updates main contrib non-free" >> /etc/apt/sources.list) && \ 7 | (echo "deb http://security.debian.org/ wheezy/updates main contrib non-free" >> /etc/apt/sources.list) && \ 8 | echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup 9 | 10 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ 11 | DEBIAN_FRONTEND=noninteractive apt-get upgrade -yq --no-install-recommends && \ 12 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends wget zip unzip nano curl && \ 13 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends --reinstall procps && \ 14 | DEBIAN_FRONTEND=noninteractive apt-get clean && \ 15 | rm -rf /var/lib/apt/lists/* 16 | -------------------------------------------------------------------------------- /haproxy-confd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM yaronr/debian-wheezy 2 | 3 | MAINTAINER yaronr 4 | 5 | ENV ETCD_NODE 172.17.42.1:4001 6 | ENV confd_ver 0.7.1 7 | 8 | ENTRYPOINT ["/entrypoint.sh"] 9 | 10 | RUN (echo "deb http://cdn.debian.net/debian wheezy-backports main" > /etc/apt/sources.list.d/backports.list) && \ 11 | DEBIAN_FRONTEND=noninteractive apt-get update -y && \ 12 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends \ 13 | ca-certificates \ 14 | libssl1.0.0 \ 15 | software-properties-common python-software-properties \ 16 | haproxy -t wheezy-backports && \ 17 | apt-get remove --purge -y software-properties-common python-software-properties && \ 18 | apt-get clean && \ 19 | rm -rf /var/cache/apt/* /var/lib/apt/lists/* && \ 20 | sed -i 's/^ENABLED=.*/ENABLED=1/' /etc/default/haproxy 21 | 22 | RUN wget --progress=bar:force --retry-connrefused -t 5 https://github.com/kelseyhightower/confd/releases/download/v${confd_ver}/confd-${confd_ver}-linux-amd64 -O /bin/confd && \ 23 | chmod +x /bin/confd 24 | 25 | ADD entrypoint.sh /entrypoint.sh 26 | ADD confd /etc/confd 27 | 28 | # Expose ports. 29 | EXPOSE 8080 30 | EXPOSE 8090 31 | -------------------------------------------------------------------------------- /haproxy-confd/README.md: -------------------------------------------------------------------------------- 1 | haproxy combined with confd for HTTP load balancing 2 | 3 | ## Usage 4 | 5 | I apologise but I don't have the time to properly document this right now. 6 | What's important to know: 7 | 8 | * HAProxy 1.5.x backed by Confd 1.7.0 beta 1 9 | * Uses zero-downtime reconfiguration (e.g - instead of harpy reload, which will drop all connections, will gradually transfer new connections to the new config) 10 | * Added support for url rexeg (not reggae, damn you spell checker) for routing, in addition to the usual hostname pattern 11 | * Added validation for existence of keys in backing kv store, to prevent failures 12 | 13 | 14 | Create the paths allowing confd to find the services: 15 | ```bash 16 | etcdctl mkdir "/haproxy-discover/services" 17 | etcdctl mkdir "/haproxy-discover/tcp-services" 18 | ``` 19 | 20 | Depending on your needs, create one or more services or tcp-services. 21 | For instance, to create an http service named *myapp* linked to the domain *example.org* and load balancing on servers *1.2.3.4:80* (we'll call it *nodeA*) and *2.3.4.5:80* (called *nodeB*), run these commands: 22 | ```bash 23 | etcdctl set "/haproxy-discover/services/myapp/domain" "example.org" 24 | etcdctl set "/haproxy-discover/services/myapp/upstreams/nodeA" "1.2.3.4:80" 25 | etcdctl set "/haproxy-discover/services/myapp/upstreams/nodeB" "2.3.4.5:80" 26 | ``` 27 | 28 | 29 | Start the container making sure to expose port 80 on the host machine 30 | 31 | ```bash 32 | docker run -e ETCD_NODE=172.17.42.1:4001 -p 1000:1000 -p 80:8080 yaronr/haproxy-confd 33 | ``` 34 | 35 | 36 | To *add an upstream node*, let's say *nodeB2*, *2.3.4.5:90*, you just have to run this, and the configuration should safely be updated ! 37 | ```bash 38 | etcdctl set "/haproxy-discover/services/myapp/upstreams/nodeB2" "2.3.4.5:90" 39 | ``` 40 | 41 | To *remove an upstream server*, let's say ... *nodeB2* (added by mistake ?), just run 42 | ```bash 43 | etcdctl rm "/haproxy-discover/services/myapp/upstreams/nodeB2" 44 | ``` 45 | 46 | To *remove a service*, and so a directory, you must type 47 | ```bash 48 | etcdctl rmdir "/haproxy-discover/services/myapp" 49 | ``` 50 | 51 | The commands for a tcp-service are the same but with *tcp-services* instead of *services* 52 | 53 | 54 | Have fun ! -------------------------------------------------------------------------------- /haproxy-confd/confd/conf.d/haproxy.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | prefix = "haproxy-discover" 3 | keys = [ 4 | "services", 5 | "tcp-services" 6 | ] 7 | 8 | owner = "haproxy" 9 | mode = "0644" 10 | src = "haproxy.tmpl" 11 | dest = "/etc/haproxy/haproxy.cfg" 12 | 13 | check_cmd = "/usr/sbin/haproxy -c -f {{ .src }}" 14 | reload_cmd = "haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -D -sf $(cat /var/run/haproxy.pid)" 15 | 16 | -------------------------------------------------------------------------------- /haproxy-confd/confd/confd.toml: -------------------------------------------------------------------------------- 1 | interval = 10 2 | debug = false 3 | verbose = false 4 | quiet = true 5 | -------------------------------------------------------------------------------- /haproxy-confd/confd/templates/haproxy.tmpl: -------------------------------------------------------------------------------- 1 | global 2 | daemon 3 | log /dev/log local0 4 | log /dev/log local1 notice 5 | pidfile /var/run/haproxy.pid 6 | nbproc 2 #no of processes 7 | maxconn 128000 8 | user haproxy 9 | group haproxy 10 | 11 | defaults 12 | log global 13 | option log-health-checks 14 | # make sure log-format is on a single line 15 | log-format {"type":"haproxy","timestamp":%Ts,"http_status":%ST,"http_request":"%r","remote_addr":"%ci","bytes_read":%B,"upstream_addr":"%si","backend_name":"%b","retries":%rc,"bytes_uploaded":%U,"upstream_response_time":"%Tr","upstream_connect_time":"%Tc","session_duration":"%Tt","termination_state":"%ts"} 16 | mode http 17 | option httplog 18 | option dontlognull 19 | option forwardfor 20 | option http-keep-alive 21 | option http-tunnel 22 | timeout connect 5000 23 | timeout client 50000 24 | timeout server 50000 25 | errorfile 400 /etc/haproxy/errors/400.http 26 | errorfile 403 /etc/haproxy/errors/403.http 27 | errorfile 408 /etc/haproxy/errors/408.http 28 | errorfile 500 /etc/haproxy/errors/500.http 29 | errorfile 502 /etc/haproxy/errors/502.http 30 | errorfile 503 /etc/haproxy/errors/503.http 31 | errorfile 504 /etc/haproxy/errors/504.http 32 | 33 | frontend http-in 34 | bind *:8080 35 | 36 | {{range $service := ls "/services"}} 37 | 38 | {{$key := printf "/services/%s/domain" $service}} {{if exists $key}} 39 | acl host_{{$service}} hdr(host) -i {{getv $key}} 40 | {{end}} 41 | {{$key := printf "/services/%s/url_reg" $service}} {{if exists $key}} 42 | acl host_{{$service}} url_reg -i {{getv $key}} 43 | {{end}} 44 | acl {{$service}}_health path_beg /ping 45 | {{end}} 46 | 47 | {{range $service := ls "/services"}} 48 | use_backend {{$service}} if host_{{$service}} 49 | use_backend {{$service}} if {{$service}}_health 50 | {{end}} 51 | 52 | {{range $service := ls "/services"}} 53 | backend {{$service}} 54 | balance leastconn 55 | {{range $upstream := ls (printf "/services/%s/upstreams" $service)}} 56 | server {{$upstream}} {{printf "/services/%s/upstreams/%s" $service $upstream | getv}} check 57 | {{end}} 58 | {{end}} 59 | 60 | 61 | {{range $name := ls "/tcp-services"}} 62 | listen {{$name}} 63 | mode tcp 64 | bind {{printf "/tcp-services/%s/ports" $name | getv}} 65 | {{range $upstream := ls (printf "/tcp-services/%s/upstreams" $name)}} 66 | server {{$upstream}} {{printf "/tcp-services/%s/upstreams/%s" $name $upstream | getv}} check 67 | {{end}} 68 | {{end}} 69 | 70 | listen MyStats 71 | mode http 72 | bind 0.0.0.0:8090 73 | stats enable 74 | stats uri / 75 | stats refresh 5s 76 | stats show-node 77 | stats show-legends 78 | 79 | # if authentication is wanted 80 | acl auth_ok http_auth(stats-auth) 81 | http-request auth unless auth_ok 82 | 83 | userlist stats-auth 84 | user admin insecure-password p4ss 85 | -------------------------------------------------------------------------------- /haproxy-confd/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$ETCD_NODE" ] 4 | then 5 | echo "Missing ETCD_NODE env var" 6 | exit -1 7 | fi 8 | 9 | set -eo pipefail 10 | 11 | #confd will start haproxy, since conf will be different than existing (which is null) 12 | 13 | echo "[haproxy-confd] booting container. ETCD: $ETCD_NODE" 14 | 15 | function config_fail() 16 | { 17 | echo "Failed to start due to config error" 18 | exit -1 19 | } 20 | 21 | # Loop until confd has updated the haproxy config 22 | n=0 23 | until confd -onetime -node "$ETCD_NODE"; do 24 | if [ "$n" -eq "4" ]; then config_fail; fi 25 | echo "[haproxy-confd] waiting for confd to refresh haproxy.cfg" 26 | n=$((n+1)) 27 | sleep $n 28 | done 29 | 30 | echo "[haproxy-confd] Initial HAProxy config created. Starting confd" 31 | 32 | confd -node "$ETCD_NODE" 33 | -------------------------------------------------------------------------------- /journald-forwarder/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM multicloud/netcat 2 | MAINTAINER yaronr 3 | 4 | #Ignore /etc/hosts. Resolve this host via DNS 5 | RUN sed 's/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/' -i /etc/nsswitch.conf 6 | 7 | ADD entrypoint.sh /entrypoint.sh 8 | RUN chmod a+x /entrypoint.sh 9 | 10 | ENTRYPOINT ["./entrypoint.sh"] 11 | -------------------------------------------------------------------------------- /journald-forwarder/README.md: -------------------------------------------------------------------------------- 1 | ## Journald (and anything else, really) forwarder 2 | 3 | 4 | Usage: 5 | 6 | ExecStart=/bin/bash -cx ' \ 7 | set -eo pipefail; \ 8 | bridge_ip="$(ifconfig docker0 | grep \'inet \' | awk \'{ print $2}\')"; \ 9 | /usr/bin/journalctl -o short -f | /usr/bin/docker run \ 10 | --name=yaronr/journald-forwarder \ 11 | --rm \ 12 | -ti \ 13 | --net=host \ 14 | -e MULTICLOUD_DNS=$bridge_ip \ 15 | -e MULTICLOUD_DNS_SEARCH=${CLUSTER_NAME}.multicloud.local \ 16 | ${DOCKER_IMAGE} logstash 8080 ' 17 | 18 | 19 | Optional: 20 | 21 | -e MULTICLOUD_DNS= 22 | -e MULTICLOUD_DNS_SEARCH= 23 | 24 | The above would override the resolv.conf of the container (even if --net=host) with the values you specify. -------------------------------------------------------------------------------- /journald-forwarder/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -n $MULTICLOUD_DNS && -n MULTICLOUD_DNS_SEARCH ]] ; then 4 | echo "Replacing /etc/resolv.conf with:" 5 | echo "nameserver $MULTICLOUD_DNS" 6 | echo "search $MULTICLOUD_DNS_SEARCH" 7 | echo "nameserver $MULTICLOUD_DNS" > /etc/resolv.conf 8 | echo "search $MULTICLOUD_DNS_SEARCH" >> /etc/resolv.conf 9 | fi; 10 | 11 | exec "$@" 12 | -------------------------------------------------------------------------------- /jre-8-oracle/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM multicloud/debian-jessie 2 | MAINTAINER yaronr 3 | 4 | ENV JAVA_VERSION_MAJOR 8 5 | ENV JAVA_VERSION_MINOR 45 6 | ENV JAVA_VERSION_BUILD 14 7 | ENV JAVA_PACKAGE server-jre 8 | 9 | # Download and unarchive Java 10 | RUN curl -kLOH "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie"\ 11 | http://download.oracle.com/otn-pub/java/jdk/${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-b${JAVA_VERSION_BUILD}/${JAVA_PACKAGE}-${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-linux-x64.tar.gz &&\ 12 | gunzip ${JAVA_PACKAGE}-${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-linux-x64.tar.gz &&\ 13 | tar -xf ${JAVA_PACKAGE}-${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-linux-x64.tar -C /opt &&\ 14 | rm ${JAVA_PACKAGE}-${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-linux-x64.tar &&\ 15 | ln -s /opt/jdk1.${JAVA_VERSION_MAJOR}.0_${JAVA_VERSION_MINOR} /opt/jdk &&\ 16 | rm -rf /opt/jdk/*src.zip \ 17 | /opt/jdk/man \ 18 | /opt/jdk/lib/missioncontrol \ 19 | /opt/jdk/lib/visualvm \ 20 | /opt/jdk/lib/*javafx* \ 21 | /opt/jdk/jre/lib/plugin.jar \ 22 | /opt/jdk/jre/lib/ext/jfxrt.jar \ 23 | /opt/jdk/jre/bin/javaws \ 24 | /opt/jdk/jre/lib/javaws.jar \ 25 | /opt/jdk/jre/lib/desktop \ 26 | /opt/jdk/jre/plugin \ 27 | /opt/jdk/jre/lib/deploy* \ 28 | /opt/jdk/jre/lib/*javafx* \ 29 | /opt/jdk/jre/lib/*jfx* \ 30 | /opt/jdk/jre/lib/amd64/libdecora_sse.so \ 31 | /opt/jdk/jre/lib/amd64/libprism_*.so \ 32 | /opt/jdk/jre/lib/amd64/libfxplugins.so \ 33 | /opt/jdk/jre/lib/amd64/libglass.so \ 34 | /opt/jdk/jre/lib/amd64/libgstreamer-lite.so \ 35 | /opt/jdk/jre/lib/amd64/libjavafx*.so \ 36 | /opt/jdk/jre/lib/amd64/libjfx*.so 37 | 38 | # Set environment 39 | ENV JAVA_HOME /opt/jdk 40 | ENV PATH ${PATH}:${JAVA_HOME}/bin 41 | -------------------------------------------------------------------------------- /jupyter-hub/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jupyterhub/jupyterhub:latest 2 | LABEL maintainer="Yaron Rosenbaum" 3 | 4 | # Install pip3 install notebook 5 | RUN pip3 install --no-cache-dir notebook jupyterlab ray 6 | 7 | # add default users and passwords 8 | RUN useradd -ms /bin/bash yaron 9 | RUN echo 'yaron:iamyaron' | chpasswd 10 | 11 | RUN useradd -ms /bin/bash mike 12 | RUN echo 'yaron:iammike' | chpasswd 13 | -------------------------------------------------------------------------------- /kibana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM multicloud/jre-8-oracle 2 | MAINTAINER yaronr 3 | 4 | ENV version 4.1.1-linux-x64 5 | ENV elasticsearch_server_url elasticsearch 6 | ENV elasticsearch_server_port 9200 7 | 8 | RUN wget --no-check-certificate --progress=bar:force --retry-connrefused -t 5 https://download.elasticsearch.org/kibana/kibana/kibana-${version}.tar.gz -O /tmp/kibana.tar.gz && \ 9 | (cd /tmp && tar zxf kibana.tar.gz && mv kibana-* /opt/kibana && \ 10 | rm kibana.tar.gz) 11 | 12 | ADD entrypoint.sh /entrypoint.sh 13 | RUN chmod a+x /entrypoint.sh 14 | 15 | EXPOSE 5601 16 | ENTRYPOINT ["/entrypoint.sh"] 17 | 18 | #Ignore /etc/hosts 19 | RUN sed 's/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/' -i /etc/nsswitch.conf -------------------------------------------------------------------------------- /kibana/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | sed "s/^elasticsearch_url:.*/elasticsearch_url: http:\/\/${elasticsearch_server_url}:${elasticsearch_server_port}/" -i /opt/kibana/config/kibana.yml 6 | 7 | /opt/kibana/bin/kibana 8 | -------------------------------------------------------------------------------- /logstash/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM multicloud/jre-8-oracle 2 | MAINTAINER yaronr 3 | 4 | ENV version 1.4.2 5 | 6 | RUN wget --progress=bar:force --no-check-certificate --retry-connrefused -t 5 https://download.elasticsearch.org/logstash/logstash/logstash-${version}.tar.gz -O /tmp/logstash.tar.gz && \ 7 | (cd /tmp && tar zxf logstash.tar.gz && mv logstash-${version} /opt/logstash && \ 8 | rm logstash.tar.gz) && \ 9 | (cd /opt/logstash && \ 10 | /opt/logstash/bin/plugin install contrib) 11 | 12 | ADD entrypoint.sh /entrypoint.sh 13 | RUN chmod a+x /entrypoint.sh 14 | 15 | #sed is too complicated... really. give me a break 16 | ADD /patterns/haproxy /opt/logstash/patterns/haproxy 17 | 18 | EXPOSE 514 514/udp 19 | ENTRYPOINT ["/entrypoint.sh"] 20 | 21 | #Ignore /etc/hosts 22 | RUN sed 's/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/' -i /etc/nsswitch.conf 23 | -------------------------------------------------------------------------------- /logstash/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /opt/logstash/bin/logstash "$@" 4 | 5 | -------------------------------------------------------------------------------- /logstash/patterns/haproxy: -------------------------------------------------------------------------------- 1 | ## These patterns were tested w/ haproxy-1.4.15 2 | 3 | ## Documentation of the haproxy log formats can be found at the following links: 4 | ## http://code.google.com/p/haproxy-docs/wiki/HTTPLogFormat 5 | ## http://code.google.com/p/haproxy-docs/wiki/TCPLogFormat 6 | 7 | HAPROXYTIME (?!<[0-9])%{HOUR:haproxy_hour}:%{MINUTE:haproxy_minute}(?::%{SECOND:haproxy_second})(?![0-9]) 8 | HAPROXYDATE %{MONTHDAY:haproxy_monthday}/%{MONTH:haproxy_month}/%{YEAR:haproxy_year}:%{HAPROXYTIME:haproxy_time}.%{INT:haproxy_milliseconds} 9 | 10 | # Override these default patterns to parse out what is captured in your haproxy.cfg 11 | # HAPROXYCAPTUREDREQUESTHEADERS %{DATA:captured_request_headers} 12 | HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:captured_response_headers} 13 | 14 | # Example: 15 | # These haproxy config lines will add data to the logs that are captured 16 | # by the patterns below. Place them in your custom patterns directory to 17 | # override the defaults. 18 | # 19 | # capture request header Host len 40 20 | # capture request header X-Forwarded-For len 50 21 | # capture request header Accept-Language len 50 22 | # capture request header Referer len 200 23 | # capture request header User-Agent len 200 24 | # 25 | # capture response header Content-Type len 30 26 | # capture response header Content-Encoding len 10 27 | # capture response header Cache-Control len 200 28 | # capture response header Last-Modified len 200 29 | # 30 | HAPROXYCAPTUREDREQUESTHEADERS %{DATA:request_header_host}\|%{DATA:request_header_x_forwarded_for}\|%{DATA:request_header_accept_language}\|%{DATA:request_header_referer}\|%{DATA:request_header_user_agent} 31 | # HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:response_header_content_type}\|%{DATA:response_header_content_encoding}\|%{DATA:response_header_cache_control}\|%{DATA:response_header_last_modified} 32 | 33 | # parse a haproxy 'httplog' line 34 | HAPROXYHTTP %{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{DATA:captured_request_cookie} %{DATA:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} (\{%{HAPROXYCAPTUREDREQUESTHEADERS}\})?( )?(\{%{HAPROXYCAPTUREDRESPONSEHEADERS}\})?( )?"(|(%{WORD:http_verb} (%{URIPROTO:http_proto}://)?(?:%{USER:http_user}(?::[^@]*)?@)?(?:%{URIHOST:http_host})?(?:%{URIPATHPARAM:http_request})?( HTTP/%{NUMBER:http_version})?))?" 35 | 36 | # parse a haproxy 'tcplog' line 37 | HAPROXYTCP %{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} 38 | -------------------------------------------------------------------------------- /mesos/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM yaronr/debian-jessie 2 | MAINTAINER yaronr 3 | 4 | # Setup 5 | # Mesos fetcher uses unzip to extract staged zip files 6 | # for lsb, see http://affy.blogspot.co.il/2014/11/is-using-lsbrelease-cs-good-idea-inside.html 7 | RUN apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF && \ 8 | DISTRO=debian && \ 9 | CODENAME=wheezy && \ 10 | echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | tee /etc/apt/sources.list.d/mesosphere.list && \ 11 | DEBIAN_FRONTEND=noninteractive apt-get -y update && \ 12 | apt-get -y install -yq --no-install-recommends mesos marathon chronos unzip && \ 13 | apt-get clean && \ 14 | rm -rf /var/lib/apt/lists/* && \ 15 | rm /etc/mesos/zk && \ 16 | rm /etc/mesos-master/quorum 17 | 18 | #Ignore /etc/hosts. Resolve this host via DNS 19 | RUN sed 's/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/' -i /etc/nsswitch.conf 20 | 21 | ADD entrypoint.sh /entrypoint.sh 22 | RUN chmod a+x /entrypoint.sh 23 | 24 | ENTRYPOINT ["./entrypoint.sh"] 25 | -------------------------------------------------------------------------------- /mesos/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -n $MULTICLOUD_DNS && -n MULTICLOUD_DNS_SEARCH ]] ; then 4 | echo "Replacing /etc/resolv.conf with:" 5 | echo "nameserver $MULTICLOUD_DNS" 6 | echo "search $MULTICLOUD_DNS_SEARCH" 7 | echo "nameserver $MULTICLOUD_DNS" > /etc/resolv.conf 8 | echo "search $MULTICLOUD_DNS_SEARCH" >> /etc/resolv.conf 9 | fi; 10 | 11 | echo "That was a good nap. Now to work..." 12 | exec "$@" 13 | -------------------------------------------------------------------------------- /netcat/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM multicloud/debian-wheezy 2 | MAINTAINER yaronr 3 | 4 | RUN DEBIAN_FRONTEND=noninteractive apt-get update -y && \ 5 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends netcat && \ 6 | apt-get clean && \ 7 | rm -rf /var/cache/apt/* /var/lib/apt/lists/* 8 | 9 | ADD entrypoint.sh /entrypoint.sh 10 | RUN chmod a+x /entrypoint.sh 11 | 12 | ENTRYPOINT ["/entrypoint.sh"] -------------------------------------------------------------------------------- /netcat/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | echo "That was a good nap. Now to work..." 6 | netcat "$@" 7 | -------------------------------------------------------------------------------- /openjdk-7-jdk/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM stackbrew/debian:wheezy 2 | MAINTAINER yaronr 3 | 4 | RUN (echo "deb http://http.debian.net/debian/ wheezy main contrib non-free" > /etc/apt/sources.list && echo "deb http://http.debian.net/debian/ wheezy-updates main contrib non-free" >> /etc/apt/sources.list && echo "deb http://security.debian.org/ wheezy/updates main contrib non-free" >> /etc/apt/sources.list) && \ 5 | echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup 6 | 7 | RUN apt-get update && \ 8 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends openjdk-7-jre-headless wget zip nano && \ 9 | apt-get clean && \ 10 | rm -rf /var/lib/apt/lists/* 11 | 12 | ENV JAVA_HOME /usr/lib/jvm/java-7-openjdk-amd64 13 | -------------------------------------------------------------------------------- /openjdk-7-jre/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM stackbrew/debian:wheezy 2 | MAINTAINER yaronr 3 | 4 | RUN (echo "deb http://http.debian.net/debian/ wheezy main contrib non-free" > /etc/apt/sources.list && echo "deb http://http.debian.net/debian/ wheezy-updates main contrib non-free" >> /etc/apt/sources.list && echo "deb http://security.debian.org/ wheezy/updates main contrib non-free" >> /etc/apt/sources.list) && \ 5 | echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup 6 | 7 | RUN apt-get update && \ 8 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends openjdk-7-jre-headless wget zip && \ 9 | apt-get clean && \ 10 | rm -rf /var/lib/apt/lists/* 11 | -------------------------------------------------------------------------------- /s3curl/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | MAINTAINER yaronr 4 | 5 | RUN apt-get update && \ 6 | apt-get install -y perl curl wget unzip libdigest-hmac-perl && \ 7 | apt-get clean && \ 8 | rm -rf /var/lib/apt/lists/* 9 | 10 | # install s3-curl 11 | RUN wget http://s3.amazonaws.com/doc/s3-example-code/s3-curl.zip && \ 12 | unzip s3-curl && \ 13 | mv /s3-curl/s3curl.pl /usr/local/bin/s3curl.pl && \ 14 | chmod +x /usr/local/bin/s3curl.pl && \ 15 | rm s3-curl.zip && \ 16 | rm -rf s3-curl 17 | 18 | RUN s3curl.pl --id=$AWS_ACCESS_KEY_ID --key=$AWS_ACCESS_KEY_SECRET -- $S3_FILE_URL -vv -O 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /tomcat7/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM yaronr/openjdk-7-jre 2 | MAINTAINER yaronr 3 | 4 | ENV TOMCATVER 7.0.57 5 | ENV MYSQL_CONNECTOR_JAVA_VER 5.1.34 6 | ENV TOMCAT_HOME /opt/tomcat 7 | 8 | RUN (wget --progress=bar:force --retry-connrefused -t 5 -O /tmp/tomcat7.tar.gz http://apache.mivzakim.net/tomcat/tomcat-7/v${TOMCATVER}/bin/apache-tomcat-${TOMCATVER}.tar.gz && \ 9 | cd /opt && \ 10 | tar zxf /tmp/tomcat7.tar.gz && \ 11 | mv /opt/apache-tomcat* ${TOMCAT_HOME} && \ 12 | rm /tmp/tomcat7.tar.gz) && \ 13 | rm -rf ${TOMCAT_HOME}/webapps/ && \ 14 | mkdir ${TOMCAT_HOME}/webapps && \ 15 | wget --progress=bar:force --retry-connrefused -t 5 -O /tmp/mysql-connector-java.tar.gz http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-${MYSQL_CONNECTOR_JAVA_VER}.tar.gz && \ 16 | tar zxf /tmp/mysql-connector-java.tar.gz -C /tmp && \ 17 | rm /tmp/mysql-connector-java.tar.gz && \ 18 | mv /tmp/mysql-connector-java-${MYSQL_CONNECTOR_JAVA_VER}/mysql-connector-java-${MYSQL_CONNECTOR_JAVA_VER}-bin.jar /opt/tomcat/lib/ && \ 19 | rm -rf /tmp/mysql-connector-java-${MYSQL_CONNECTOR_JAVA_VER} 20 | 21 | # 'Host manager' and 'manager' examples etc tomcat apps are are removed for security hardening 22 | 23 | ADD ./run.sh /usr/local/bin/run 24 | 25 | # RUN mv /etc/cron.daily/logrotate /etc/cron.hourly/logrotate 26 | ADD logrotate /etc/logrotate.d/tomcat7 27 | RUN chmod 644 /etc/logrotate.d/tomcat7 28 | 29 | # User limits 30 | RUN sed -i.bak '/\# End of file/ i\\# Following 2 lines added by Dockerfile' /etc/security/limits.conf && \ 31 | sed -i.bak '/\# End of file/ i\\* hard nofile 65536' /etc/security/limits.conf && \ 32 | sed -i.bak '/\# End of file/ i\\* soft nofile 65536\n' /etc/security/limits.conf 33 | 34 | EXPOSE 8080 35 | 36 | CMD ["/bin/bash", "-e", "/usr/local/bin/run"] 37 | 38 | ADD ./server.xml ${TOMCAT_HOME}/conf/ 39 | 40 | # 41 | #ADD yourfile.war ${TOMCAT_HOME}/webapps/ROOT.war 42 | -------------------------------------------------------------------------------- /tomcat7/README.md: -------------------------------------------------------------------------------- 1 | 2 | To deploy a specific .war file to ROOT, see the commented out section of the `Dockerfile`. 3 | 4 | The following environment variables can be passed to the docker image: 5 | 6 | `ADMIN_USER` (default: admin) 7 | 8 | `ADMIN_PASS` (default: tomcat) 9 | 10 | `MAX_UPLOAD_SIZE` (default: 52428800) 11 | 12 | `CATALINA_OPTS` (default: "-Xms128m -Xmx1024m -XX:PermSize=128m -XX:MaxPermSize=256m") -------------------------------------------------------------------------------- /tomcat7/logrotate: -------------------------------------------------------------------------------- 1 | compress 2 | daily 3 | rotate 8 4 | missingok 5 | dateext 6 | 7 | 8 | /var/log/tomcat7/*.*{ 9 | olddir /var/log/tomcat7/old 10 | copytruncate 11 | size 500m 12 | } 13 | -------------------------------------------------------------------------------- /tomcat7/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ADMIN_USER=${ADMIN_USER:-admin} 3 | ADMIN_PASS=${ADMIN_PASS:-tomcat} 4 | MAX_UPLOAD_SIZE=${MAX_UPLOAD_SIZE:-52428800} 5 | CATALINA_OPTS=${CATALINA_OPTS:-"-Xms128m -Xmx1024m -XX:PermSize=128m -XX:MaxPermSize=256m"} 6 | 7 | export CATALINA_OPTS="${CATALINA_OPTS}" 8 | 9 | cat << EOF > /opt/tomcat/conf/tomcat-users.xml 10 | 11 | 12 | 13 | 14 | EOF 15 | 16 | if [ -f "/opt/tomcat/webapps/manager/WEB-INF/web.xml" ] 17 | then 18 | sed -i "s#.*max-file-size.*#\t${MAX_UPLOAD_SIZE}#g" /opt/tomcat/webapps/manager/WEB-INF/web.xml 19 | sed -i "s#.*max-request-size.*#\t${MAX_UPLOAD_SIZE}#g" /opt/tomcat/webapps/manager/WEB-INF/web.xml 20 | fi 21 | 22 | /bin/bash -e /opt/tomcat/bin/catalina.sh run 23 | -------------------------------------------------------------------------------- /tomcat7/server.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 13 | 14 | 15 | 16 | 17 | 18 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 38 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /xwiki-tomcat7/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM yaronr/tomcat7 2 | MAINTAINER yaronr 3 | 4 | ENV XWIKI_VER 6.2.3 5 | ENV MYSQL_CONN_VER 5.1.33 6 | ENV TOMCAT_HOME /opt/tomcat 7 | 8 | #Unfortunately, this also installs Mysql client, server, tomcat, and pearl... 9 | #RUN wget -q "http://maven.xwiki.org/public.gpg" -O- | apt-key add - && \ 10 | # wget "http://maven.xwiki.org/stable/xwiki-stable.list" -P /etc/apt/sources.list.d/ && \ 11 | # apt-get update && \ 12 | # apt-get install -y --no-install-recommends xwiki-enterprise-common && \ 13 | # apt-get clean 14 | 15 | RUN wget --progress=bar:force --retry-connrefused -t 5 -O /tmp/xwiki.war "http://download.forge.ow2.org/xwiki/xwiki-enterprise-web-${XWIKI_VER}.war" && \ 16 | unzip -q /tmp/xwiki.war -d "${TOMCAT_HOME}/webapps/xwiki" && \ 17 | rm /tmp/xwiki.war && \ 18 | wget --progress=bar:force --retry-connrefused -t 5 -P /tmp "http://central.maven.org/maven2/mysql/mysql-connector-java/${MYSQL_CONN_VER}/mysql-connector-java-${MYSQL_CONN_VER}.jar" && \ 19 | mv /tmp/mysql-connector-java*.jar ${TOMCAT_HOME}/webapps/xwiki/WEB-INF/lib 20 | 21 | ADD ./hibernate.cfg.xml ${TOMCAT_HOME}/webapps/xwiki/WEB-INF/ 22 | ADD ./xwiki.properties ${TOMCAT_HOME}/webapps/xwiki/WEB-INF/ 23 | 24 | # OK I don't have the time right now to handle the following as -e envs through a start / configure script: 25 | #ENV connection.url 26 | #ENV connection.username 27 | #ENV connection.password 28 | #ENV connection.driver_class 29 | #ENV dialect 30 | #ENV dbcp.ps.maxActive 31 | 32 | -------------------------------------------------------------------------------- /xwiki-tomcat7/README.md: -------------------------------------------------------------------------------- 1 | 2 | # xWiki-Tomcat7 3 | 4 | ### This is a xWiki-Tomcat7 docker setup that's opinionated towards an external MySQL server. 5 | 6 | The following environment variables can be passed to the docker image: 7 | 8 | `MAX_UPLOAD_SIZE` (default: 52428800) 9 | 10 | `CATALINA_OPTS` mandatory, and must include the following -D params: `hibernate.connection.url=jdbc:mysql:///, hibernate.connection.username=.., hibernate.connection.password=..` 11 | 12 | 13 | 14 | State in xWiki is stored at the Database (mainly), and file system (extensions, etc). 15 | 16 | For xWiki to really be ephemeral, you need to figure out a way to persist and sync the file-system state between runs / instances. 17 | 18 | environment.permanentDirectory defines the folder where file-system 'state' is stored. 19 | 20 | environment.permanentDirectory has been set to /var/local/xwiki/ 21 | You may choose to -v mount that directory and map it to a docker volume container or the host. 22 | 23 | To connect to different databases - change the hibernate.cfg.xml. -Dhibernate.* CATALINA_OPTS are equivalent to hibernate.cfg 24 | In the future, the DB connection parameters could be set via -e env vars, but that requires tinkering with the startup script and 'sed', and I don't have time for that now. 25 | 26 | ### Example: 27 | > docker run --name=${CONTAINER_NAME} -p 8080:8080 -e CATALINA_OPTS="-Xms128m -Xmx1024m -XX:PermSize=128m -XX:MaxPermSize=256m -Dhibernate.connection.url=jdbc:mysql://my-db:3306/xwiki -Dhibernate.connection.username=xwiki -Dhibernate.connection.password=p4ssw0rd" yaronr/xwiki-tomcat7 -------------------------------------------------------------------------------- /xwiki-tomcat7/hibernate.cfg.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 22 | 23 | 26 | 27 | 28 | 29 | 36 | 37 | 38 | 39 | false 40 | true 41 | 42 | 2 43 | 2 44 | 45 | 46 | false 47 | 48 | 50 | false 51 | 50 52 | 5 53 | 30000 54 | 1 55 | 1 56 | 120000 57 | 20 58 | com.xpn.xwiki.store.DBCPConnectionProvider 59 | 60 | 70 | 71 | 82 | 83 | 84 | 98 | 99 | 106 | com.mysql.jdbc.Driver 107 | org.hibernate.dialect.MySQL5InnoDBDialect 108 | 20 109 | 110 | 111 | 112 | 113 | 114 | 127 | 128 | 148 | 149 | 171 | 172 | 184 | 185 | 198 | 199 | 200 | 201 | -------------------------------------------------------------------------------- /xwiki-tomcat7/xwiki.properties: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------- 2 | # See the NOTICE file distributed with this work for additional 3 | # information regarding copyright ownership. 4 | # 5 | # This is free software; you can redistribute it and/or modify it 6 | # under the terms of the GNU Lesser General Public License as 7 | # published by the Free Software Foundation; either version 2.1 of 8 | # the License, or (at your option) any later version. 9 | # 10 | # This software is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 | # Lesser General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public 16 | # License along with this software; if not, write to the Free 17 | # Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 18 | # 02110-1301 USA, or see the FSF site: http://www.fsf.org. 19 | # --------------------------------------------------------------------------- 20 | 21 | # This is the new XWiki configuration file. In the future it'll replace the old 22 | # xwiki.cfg file. However right now it's only used by some XWiki components. 23 | # As time progresses more and more component will get their configurations from 24 | # this file. 25 | 26 | #------------------------------------------------------------------------------------- 27 | # Core 28 | #------------------------------------------------------------------------------------- 29 | 30 | #-# [Since 1.8RC2] 31 | #-# Specifies the default syntax to use when creating new documents. 32 | #-# Default value is xwiki/2.1. 33 | # core.defaultDocumentSyntax = xwiki/2.1 34 | 35 | #-# [Since 2.4M1] 36 | #-# Indicate if the rendering cache is enabled. 37 | #-# Default value is false. 38 | # core.renderingcache.enabled=true 39 | 40 | #-# [Since 2.4M1] 41 | #-# A list of Java regex patterns matching full documents reference. 42 | # core.renderingcache.documents=wiki:Space\.Page 43 | # core.renderingcache.documents=wiki:Space\..* 44 | # core.renderingcache.documents=Space\.PageOnWhateverWiki 45 | 46 | #-# [Since 2.4M1] 47 | #-# The time (in seconds) after which data should be removed from the cache when not used. 48 | #-# Default value is 300 (5 min). 49 | # core.renderingcache.duration=300 50 | 51 | #-# [Since 2.4M1] 52 | #-# The size of the rendering cache. Not that it's not the number of cached documents but the number of cached results. 53 | #-# (For a single document several cache entries are created, because each action, language and request query string 54 | #-# produces a unique rendering result) 55 | #-# Default value is 100. 56 | # core.renderingcache.size=100 57 | 58 | #------------------------------------------------------------------------------------- 59 | # Environment 60 | #------------------------------------------------------------------------------------- 61 | 62 | #-# [Since 3.5M1, replaces the container.persistentDirectory property] 63 | #-# The directory used to store persistent data (data that should persist across server restarts). This is an 64 | #-# important directory containing important data and thus it should never be deleted (it should be backed-up along 65 | #-# with the database). 66 | #-# For example this is where the Extension Manager stores downloaded extensions if the extension.localRepository 67 | #-# property isn't configured. 68 | #-# 69 | #-# You can set: 70 | #-# * an absolute path (recommended) 71 | #-# * a relative path (not recommended at all)but in this case the directory will be relative to where the XWiki server 72 | #-# is started and thus the user under which XWiki is started will need write permissions for the current directory 73 | #-# 74 | #-# Note if the system property xwiki.data.dir is set then this property is not used. 75 | #-# If neither the system property nor this configuration value here are set then the Servlet container's temporary 76 | #-# directory is used; This is absolutely not recommended since that directory could be wiped out at any time and you 77 | #-# should specify a value. 78 | environment.permanentDirectory=/var/local/xwiki/ 79 | 80 | #------------------------------------------------------------------------------------- 81 | # Rendering 82 | #------------------------------------------------------------------------------------- 83 | 84 | #-# [Since 1.8RC2] 85 | #-# Specifies how links labels are displayed when the user doesn't specify the label explicitly. 86 | #-# Valid values: 87 | #-# %w: wiki name 88 | #-# %s: space name 89 | #-# %p: page name 90 | #-# %P: page name with spaces between camel case words, i.e. "My Page" if the page name is "MyPage" 91 | #-# %t: page title 92 | #-# 93 | #-# Note that if the page title is empty or not defined then it defaults to %p. This is also the case 94 | #-# if the title cannot be retrieved for the document. 95 | #-# 96 | #-# The default is "%p". Some examples: "%s.%p", "%w:%s.%p". 97 | # rendering.linkLabelFormat = %p 98 | 99 | #-# [Since 2.0M3] 100 | #-# Overrides default macro categories (Each macro has a default category already defined, for example 101 | #-# "presentation" for the Table of Contents Macro). 102 | #-# 103 | #-# Ex: To redefine the macro category for the TOC macro so that it'd be in the "My Category" category + 104 | #-# redefine the category for the Script Macro to be "My Other Category", you'd use: 105 | # rendering.macroCategories = toc:My Category 106 | # rendering.macroCategories = script:My Other Category 107 | 108 | #-# [Since 2.5M2] 109 | #-# Specify whether the image dimensions should be extracted from the image parameters and included in the image URL 110 | #-# or not. When image dimensions are included in the image URL the image can be resized on the server side before being 111 | #-# downloaded, improving thus the page loading speed. 112 | #-# 113 | #-# Default value is true. 114 | # rendering.imageDimensionsIncludedInImageURL = true 115 | 116 | #-# [Since 2.5M2] 117 | #-# One way to improve page load speed is to resize images on the server side just before rendering the page. The 118 | #-# rendering module can use the image width provided by the user to scale the image (See 119 | #-# rendering.includeImageDimensionsInImageURL configuration parameter). When the user doesn't specify the image width 120 | #-# the rendering module can limit the width of the image based on this configuration parameter. 121 | #-# 122 | #-# The default value is -1 which means image width is not limited by default. Use a value greater than 0 to limit the 123 | #-# image width (pixels). Note that the aspect ratio is kept even when both the width and the height of the image are 124 | #-# limited. 125 | # rendering.imageWidthLimit = 1024 126 | # rendering.imageWidthLimit = -1 127 | 128 | #-# [Since 2.5M2] 129 | #-# See rendering.imageWidthLimit 130 | # rendering.imageHeightLimit = 768 131 | # rendering.imageHeightLimit = -1 132 | 133 | #-# [Since 2.5M2] 134 | #-# InterWiki definitions in the format alias=URL 135 | #-# See http://en.wikipedia.org/wiki/Interwiki_links for a definition of an InterWiki link 136 | # Some examples: 137 | # rendering.interWikiDefinitions = wikipedia = http://en.wikipedia.org/wiki/ 138 | # rendering.interWikiDefinitions = definition = http://www.yourdictionary.com/ 139 | 140 | #------------------------------------------------------------------------------------- 141 | # Rendering Transformations 142 | #------------------------------------------------------------------------------------- 143 | 144 | #-# [Since 2.6RC1] 145 | #-# Controls what transformations will be executed when rendering content. 146 | #-# A transformation modifies the parsed content. For example the Icon transformation replaces some characters with 147 | #-# icons, a WikiWord transformation will automatically create links when it finds wiki words, etc. 148 | #-# Note that the Macro transformation is a special transformation that replaces macro markers by the result of the 149 | #-# macro execution. If you don't list it, macros won't get executed. 150 | #-# The default value is: macro, icon 151 | # rendering.transformations = macro, icon 152 | 153 | #-# [Since 2.6RC1] 154 | #-# Icon Transformation Configuration 155 | #-# Defines mappings between suite of characters and the icon to display when those characters are found. 156 | #-# The format is: rendering.transformation.icon.mappings = = 157 | #-# The following mappings are already predefined and you don't need to redefine them unless you wish to override them 158 | #-# rendering.transformation.icon.mappings = :) = emoticon_smile 159 | #-# rendering.transformation.icon.mappings = :( = emoticon_unhappy 160 | #-# rendering.transformation.icon.mappings = :P = emoticon_tongue 161 | #-# rendering.transformation.icon.mappings = :D = emoticon_grin 162 | #-# rendering.transformation.icon.mappings = ;) = emoticon_wink 163 | #-# rendering.transformation.icon.mappings = (y) = thumb_up 164 | #-# rendering.transformation.icon.mappings = (n) = thumb_down 165 | #-# rendering.transformation.icon.mappings = (i) = information 166 | #-# rendering.transformation.icon.mappings = (/) = accept 167 | #-# rendering.transformation.icon.mappings = (x) = cancel 168 | #-# rendering.transformation.icon.mappings = (!) = error 169 | #-# rendering.transformation.icon.mappings = (+) = add 170 | #-# rendering.transformation.icon.mappings = (-) = delete 171 | #-# rendering.transformation.icon.mappings = (?) = help 172 | #-# rendering.transformation.icon.mappings = (on) = lightbulb 173 | #-# rendering.transformation.icon.mappings = (off) = lightbulb_off 174 | #-# rendering.transformation.icon.mappings = (*) = star 175 | 176 | #------------------------------------------------------------------------------------- 177 | # LinkChecker Transformation 178 | #------------------------------------------------------------------------------------- 179 | 180 | #-# [Since 3.3M2] 181 | #-# Defines the time (in ms) after which an external link should be checked again for validity. 182 | #-# the default configuration is: 183 | # rendering.transformation.linkchecker.timeout = 3600000 184 | 185 | #-# [Since 5.3RC1] 186 | #-# List of document references that are excluded from link checking, specified using regexes. 187 | #-# the default configuration is: 188 | # rendering.transformation.linkchecker.excludedReferencePatterns = .*:XWiki\.ExternalLinksJSON 189 | 190 | #------------------------------------------------------------------------------------- 191 | # Rendering Macros 192 | #------------------------------------------------------------------------------------- 193 | 194 | #-# Velocity Macro 195 | 196 | #-# [Since 2.0M1] 197 | #-# Defines which Velocity Filter to use by default. This offers the possibility to filter the Velocity macro content 198 | #-# before and after the Velocity Engine execution. 199 | #-# The following filters are available: 200 | #-# - indent (the default): Remove all first whites spaces of lines to support source code indentation without 201 | #-# generating whitespaces in the resulting XDOM. 202 | #-# - none: Doesn't change the content 203 | #-# - html: Removes all leading and trailing white spaces and new lines. If you need a space you'll need to use 204 | #-# \$sp and if you need a new line you'll need to use \$nl 205 | #-# rendering.macro.velocity.filter = indent 206 | 207 | #------------------------------------------------------------------------------------- 208 | # Cache 209 | #------------------------------------------------------------------------------------- 210 | 211 | #-# [Since 1.7M1] 212 | #-# The standard cache component implementation to use (can be local or distributed depending on the implementation). 213 | #-# The default standard cache implementation is Infinispan. 214 | # cache.defaultCache=infinispan 215 | 216 | #-# [Since 1.7M1] 217 | #-# The local cache implementation to use. 218 | #-# The default local cache implementation is Infinispan. 219 | # cache.defaultLocalCache=infinispan/local 220 | 221 | #------------------------------------------------------------------------------------- 222 | # Settings for the OpenOffice server instance consumed by the OfficeImporter component 223 | #------------------------------------------------------------------------------------- 224 | 225 | #-# [Since 1.9M2] 226 | #-# Type of the openoffice server instance used by officeimporter component. 227 | #-# 0 - Internally managed server instance. (Default) 228 | #-# 1 - Externally managed (local) server instance. 229 | # openoffice.serverType=0 230 | 231 | #-# [Since 1.9M2] 232 | #-# Port number used for connecting to the openoffice server instance. 233 | #-# Default port is 8100 234 | # openoffice.serverPort=8100 235 | 236 | #-# [Since 1.9M2] 237 | #-# If the openoffice server should be started / connected upon XE start. 238 | #-# Default value is false 239 | # openoffice.autoStart=false 240 | 241 | #-# [Since 1.8RC3] 242 | #-# Path to openoffice installation (serverType:0 only). 243 | #-# If no path is provided, a default value will be calculated based on the operating environment. 244 | # openoffice.homePath=/opt/openoffice.org3/ 245 | 246 | #-# [Since 1.8RC3] 247 | #-# Path to openoffice execution profile (serverType:0 only). 248 | #-# If no path is provided, a default value will be calculated based on the operating environment. 249 | # openoffice.profilePath=/home/user/.openoffice.org/3 250 | 251 | #-# [Since 1.8RC3] 252 | #-# Maximum number of simultaneous conversion tasks to be handled by a single openoffice process (serverType:0 only). 253 | #-# Default value is 50 254 | # openoffice.maxTasksPerProcess=50 255 | 256 | #-# [Since 1.8RC3] 257 | #-# Timeout for conversion tasks (in milliseconds) (serverType:0 only). 258 | #-# Default value is 30 seconds 259 | # openoffice.taskExecutionTimeout=30000 260 | 261 | #------------------------------------------------------------------------------------- 262 | # Velocity 263 | #------------------------------------------------------------------------------------- 264 | 265 | #-# [Since 2.0M1] 266 | #-# Velocity Tools that will be available from your scripts. The format is 267 | #-# velocity.tools = = 268 | #-# Default values (no need to add them) 269 | #-# velocity.tools = listtool = org.apache.velocity.tools.generic.ListTool 270 | #-# velocity.tools = numbertool = org.apache.velocity.tools.generic.NumberTool 271 | #-# velocity.tools = datetool = org.apache.velocity.tools.generic.ComparisonDateTool 272 | #-# velocity.tools = mathtool = org.apache.velocity.tools.generic.MathTool 273 | #-# velocity.tools = sorttool = org.apache.velocity.tools.generic.SortTool 274 | #-# velocity.tools = escapetool = org.apache.velocity.tools.generic.EscapeTool 275 | #-# velocity.tools = regextool = org.xwiki.velocity.tools.RegexTool 276 | 277 | #-# [Since 2.0M1] 278 | #-# Velocity configuration properties. The format is 279 | #-# velocity.properties = = 280 | #-# Default values (no need to add them) 281 | #-# velocity.properties = resource.loader = webapp 282 | #-# velocity.properties = directive.set.null.allowed = true 283 | #-# velocity.properties = webapp.resource.loader.class = org.apache.velocity.tools.view.servlet.WebappLoader 284 | #-# velocity.properties = velocimacro.messages.on = false 285 | #-# velocity.properties = resource.manager.logwhenfound = false 286 | #-# velocity.properties = velocimacro.permissions.allow.inline.local.scope = true 287 | #-# velocity.properties = runtime.introspector.uberspect = org.xwiki.velocity.introspection.ChainingUberspector 288 | #-# velocity.properties = runtime.introspector.uberspect.chainClasses = org.xwiki.velocity.introspection.SecureUberspector\,org.xwiki.velocity.introspection.DeprecatedCheckUberspector\,org.xwiki.velocity.introspection.MethodArgumentsUberspector 289 | 290 | #------------------------------------------------------------------------------------- 291 | # Groovy 292 | #------------------------------------------------------------------------------------- 293 | 294 | #-# [Since 4.1M1] 295 | #-# Allows to specify Compiler customization for the Groovy execution engine. 296 | #-# There's no customizers defined by default. Available customizer ids: 297 | #-# - timedinterrupt: interrupt script execution if it takes longer than a given time (default to 1 minute) 298 | #-# - secure: runs Groovy in a security sandbox 299 | #-# groovy.compilationCustomizers= 300 | 301 | #-# Timed Interrupt Customizer 302 | 303 | #-# [Since 4.1M1] 304 | #-# Default execution time for a script before a timeout occurs, in seconds. 305 | #-# groovy.customizer.timedInterrupt.timeout=60 306 | 307 | #------------------------------------------------------------------------------------- 308 | # Events distribution 309 | #------------------------------------------------------------------------------------- 310 | 311 | #-# [Since 2.0M3] 312 | #-# Indicate if the network distribution module is enabled or not. 313 | #-# By default remote events are disabled. 314 | # observation.remote.enabled = false 315 | 316 | #-# [Since 2.0M3] 317 | #-# The list of events communication channels to start when the application starts. 318 | #-# By default no channel is configured. 319 | #-# 320 | #-# The default remote event distribution implementation is using JGroups and you'll need to either use embedded JGroups configuration 321 | #-# files or drop your custom configuration in the WEB-INF/observation/remote/jgroups/ directory. 322 | #-# There's a README file in that directory with more information. 323 | #-# Example: observation.remote.channels = public, cluster 324 | 325 | #-# [Since 2.0M4] 326 | #-# The implementation of network adapter to use. 327 | #-# The default is jgroups. 328 | #-# 329 | #-# By default only jgroups is provided. To add one implements NetworkAdaptor component interface. The identifier provided in the configuration is matched with the component role hint. 330 | #-# Example: observation.remote.networkadapter = jgroups 331 | 332 | #------------------------------------------------------------------------------------- 333 | # Cryptographic services 334 | #------------------------------------------------------------------------------------- 335 | 336 | #-# [Since 2.5M1] 337 | #-# Which cipher should be used for encrypting text with a password. 338 | #-# 339 | #-# Options are: 340 | #-# CAST5PasswordCiphertext (Uses CAST-5 cipher engine with a 128 bit key) 341 | #-# AESPasswordCiphertext (Uses AES cipher engine with a 128 bit key) 342 | #-# 343 | #-# NOTE: Encrypted text can still be decrypted even if the cipher or key function has changed. 344 | #-# 345 | #crypto.passwd.passwordCiphertext = CAST5PasswordCiphertext 346 | 347 | #-# [Since 2.5M1] 348 | #-# Which key derivation function to use. 349 | #-# Since the easiest attack on password encrypted text is to guess passwords, this function ensures that verification 350 | #-# of a password takes a long time for the computer and is inherently difficult to parallelize. 351 | #-# 352 | #-# Options are: 353 | #-# ScryptMemoryHardKeyDerivationFunction (Uses the scrypt key function which forces password guessers to expend a 354 | #-# a configurable amount of processor time and memory to validate guesses 355 | #-# Scrypt is conjectured to be 260 times the strength of PBKDF2 356 | #-# Function definition available here: http://www.tarsnap.com/scrypt.html) 357 | #-# PBKDF2KeyDerivationFunction (Uses password based key derivation function 2 (PBKDF2) developed by RSA labs as part 358 | #-# of the PKCS#5 standard. This function uses a configurable amount of processor time 359 | #-# but an insignificant amount of memory. 360 | #-# Function definition available here: http://www.apps.ietf.org/rfc/rfc2898.html#sec-5.2) 361 | #-# 362 | #crypto.passwd.keyDerivationFunctionClassForEncryption = ScryptMemoryHardKeyDerivationFunction 363 | 364 | #-# [Since 2.5M1] 365 | #-# Define the properties for initializing the dey derivation functions for encryption. 366 | #-# 367 | #-# millisecondsOfProcessorTimeToSpend is used to test run the key function and decide how many iterations it should 368 | #-# use. Remember this amount of time will be required to convert the password to 369 | #-# the decryption key every time the text needs to be decrypted. 370 | #-# numberOfKilobytesOfMemoryToUse will be ignored unless a memory hard function such as scrypt is chosen in which 371 | #-# case it will be used to define how much memory should be required to derive the 372 | #-# decryption key from the password. 373 | #-# 374 | #-# CAUTION: If numberOfKilobytesOfMemoryToUse is set too large, the computer may be able to encrypt a piece of text 375 | #-# when it has lots of free memory available, then be unable to decrypt that text when less memory is 376 | #-# available. Unless you are very paranoid, 1 megabyte (1024) is plenty of strength. 377 | #-# 378 | #crypto.passwd.keyDerivationFunctionPropertiesForEncryption = millisecondsOfProcessorTimeToSpend = 200 379 | #crypto.passwd.keyDerivationFunctionPropertiesForEncryption = numberOfKilobytesOfMemoryToUse = 1024 380 | 381 | #-# [Since 2.5M1] 382 | #-# Which key derivation function to use for protecting (hashing) passwords. 383 | #-# Options include: 384 | #-# ScryptMemoryHardKeyDerivationFunction (See above for more information) 385 | #-# PBKDF2KeyDerivationFunction (See above for more information) 386 | #-# 387 | #crypto.passwd.keyDerivationFunctionClassForPasswordVerification = ScryptMemoryHardKeyDerivationFunction 388 | 389 | #-# [Since 2.5M1] 390 | #-# Properties to use when initializing key derivation functions for password protection. 391 | #-# 392 | #-# millisecondsOfProcessorTimeToSpend (See above for description.) 393 | #-# numberOfKilobytesOfMemoryToUse (See above for description.) 394 | #-# derivedKeyLength is the number of bytes of length which the output key should be. In a password verification 395 | #-# context, this is only valid for decreasing the chance of a collision. 396 | #-# 397 | #-# CAUTION: If numberOfKilobytesOfMemoryToUse is set too large, the computer may be able to protect a password 398 | #-# when it has lots of free memory available, then be unable to validate that password when less memory is 399 | #-# available. Unless you are very paranoid, 1 megabyte (1024) is plenty of strength. 400 | #-# 401 | #crypto.passwd.keyDerivationFunctionPropertiesForPasswordVerification = millisecondsOfProcessorTimeToSpend = 200 402 | #crypto.passwd.keyDerivationFunctionPropertiesForPasswordVerification = numberOfKilobytesOfMemoryToUse = 1024 403 | #crypto.passwd.keyDerivationFunctionPropertiesForPasswordVerification = derivedKeyLength = 32 404 | 405 | #------------------------------------------------------------------------------------- 406 | # CSRF token component 407 | #------------------------------------------------------------------------------------- 408 | 409 | #-# [Since 2.5M2] 410 | #-# Controls whether secret token validation mechanism should be used (to prevent CSRF attacks). 411 | #-# 412 | #-# If enabled, all actions requiring "comment", "edit", "delete", "admin" or "programming" rights 413 | #-# will check that the parameter "form_token" with the value of a random secret token is present 414 | #-# in the request. 415 | #-# 416 | #-# Valid values: 417 | #-# true : Enabled 418 | #-# false: Disabled 419 | #-# 420 | #-# Default value is true 421 | # csrf.enabled = true 422 | 423 | #------------------------------------------------------------------------------------- 424 | # Extension Manager 425 | #------------------------------------------------------------------------------------- 426 | 427 | #-# [Since 2.5] 428 | #-# Repositories to use when searching and downloading extensions. 429 | #-# 430 | #-# The format is :: where 431 | #-# * id can be anything as long as there is only one 432 | #-# * type is the type of the repository (maven, xwiki, etc.) 433 | #-# * url is the URL or the root of the repository 434 | #-# 435 | #-# [Since 4.3] It's also possible to associate various properties to each repository. 436 | #-# Here are the standard properties: 437 | #-# * user: the user to use to authenticate to the repository 438 | #-# * password: the password to use to authenticate to the repository 439 | #-# 440 | #-# Here is an example: 441 | # extension.repositories=privatemavenid:maven:http://host.com/private/maven/ 442 | # extension.repositories.privatemavenid.auth.user=someuser 443 | # extension.repositories.privatemavenid.auth.password=thepassword 444 | #-# 445 | #-# Here's an example to add your local Maven Repository 446 | # extension.repositories=local:maven:file://${sys:user.home}/.m2/repository 447 | #-# 448 | #-# And an example to add the XWiki Maven Snapshot Repository 449 | # extension.repositories=maven-xwiki-snapshot:maven:http://nexus.xwiki.org/nexus/content/groups/public-snapshots 450 | #-# 451 | #-# When not set the following is taken: 452 | # extension.repositories=maven-xwiki:maven:http://nexus.xwiki.org/nexus/content/groups/public 453 | # extension.repositories=extensions.xwiki.org:xwiki:http://extensions.xwiki.org/xwiki/rest/ 454 | #-# 455 | #-# To not have any repository enabled (including disabling default repositories) you can explicitly make this list empty: 456 | # extension.repositories= 457 | 458 | #-# [Since 2.5] 459 | #-# The directory where extensions are stored after being downloaded. 460 | #-# 461 | #-# The default is extension/repository in whatever is the general persistent directory. 462 | #-# See container.persistentDirectory. 463 | # extension.localRepository=extension/repository 464 | 465 | #-# [Since 3.4] 466 | #-# The user agent to use when communication with external services (generally repositories). 467 | #-# 468 | #-# The default is: 469 | # extension.userAgent=XWikiExtensionManager 470 | 471 | #------------------------------------------------------------------------------------- 472 | # Solr Search 473 | #------------------------------------------------------------------------------------- 474 | 475 | #-# [Since 4.5M1] 476 | #-# The Solr server type. Currently accepted values are "embedded" (default) and "remote". 477 | # solr.type=embedded 478 | 479 | #-# [Since 4.5M1] 480 | #-# The location where the embedded Solr instance stores its configuration and the indexed data. 481 | #-# The default is the subfolder "solr" inside the folder defined by the property "environment.permanentDirectory". 482 | # solr.embedded.home=/var/local/xwiki/solr 483 | 484 | #-# [Since 4.5M1] 485 | #-# The URL to use to connect to the remote solr server. 486 | #-# The default value assumes that the remote Solr server is started in a different process on the same machine, using the default port. 487 | # solr.remote.url=http://localhost:8983/solr 488 | 489 | #-# [Since 5.1M1] 490 | #-# Elements to index are not sent to the Solr server one by one but in batch to improve performances. 491 | #-# It's possible to configure this behavior with the following properties: 492 | #-# 493 | #-# The maximum number of elements sent at the same time to the Solr server 494 | #-# The default is 50. 495 | # solr.indexer.batch.size=50 496 | #-# The maximum number of characters in the batch of elements to send to the Solr server. 497 | #-# The default is 10000. 498 | # solr.indexer.batch.maxLength=10000 499 | 500 | #-# [Since 5.1M1] 501 | #-# The maximum number of elements in the background queue of elements to index/delete 502 | #-# The default is 10000. 503 | # solr.indexer.queue.capacity=100000 504 | 505 | #-# [Since 6.1M2] 506 | #-# Indicating if a synchronization between SOLR index and XWiki database should be run at startup. 507 | #-# Synchronization can be started from search administration. 508 | #-# The default is true. 509 | # solr.synchronizeAtStartup=false 510 | 511 | #------------------------------------------------------------------------------------- 512 | # Security 513 | #------------------------------------------------------------------------------------- 514 | 515 | #-# [Since 5.0M2] 516 | #-# Define the authorization policies by choosing another implementation of the AuthorizationSettler. This component 517 | #-# is solely responsible for settling access decisions based on user, target entity and available security rules. 518 | #-# The identifier provided here is matched with the component role hint. 519 | #-# 520 | #-# The default is: 521 | # security.authorization.settler=default 522 | 523 | #------------------------------------------------------------------------------------- 524 | # URL 525 | #------------------------------------------------------------------------------------- 526 | 527 | #-# IMPORTANT: The URL module is a feature still in development and as such should be considered experimental at the 528 | #-# moment. The configuration parameters below are used only in some part of the code at the moment. The idea is to 529 | #-# progressively refactor more and more till only the new properties are used. For the moment you should continue to 530 | #-# use the following old properties located in xwiki.cfg: 531 | #-# xwiki.virtual.usepath 532 | #-# xwiki.virtual.usepath.servletpath 533 | 534 | #-# [Since 5.1M1] 535 | #-# The id of the URL format to use. This allows to plug in different implementations and thus allows to completely 536 | #-# control the format of XWiki URLs. 537 | #-# 538 | #-# The default is: 539 | # url.format=standard 540 | 541 | #-# [Since 5.1M1] 542 | #-# Defines where the wiki part is defined in a URL pointing to a subwiki 543 | #-# If true then the wiki part is located in the URL path (a.k.a path-based), for example: 544 | #-# http://server/xwiki/wiki/mywiki/view/Space/Page 545 | #-# If false then the wiki part is located in the URL host domain (a.k.a domain-based), for example: 546 | #-# http://mywiki.domain/xwiki/bin/view/Space/Page 547 | #-# 548 | #-# The default is: 549 | # url.standard.multiwiki.isPathBased=true 550 | 551 | #-# [Since 5.1M1] 552 | #-# For path-based setups, this property defines the path segment before the one identifying the subwiki in the URL. 553 | #-# For example if set to "thewiki", then the following URL will point to a subwiki named "mywiki": 554 | #-# http://server/xwiki/thewiki/mywiki/view/Space/Page 555 | #-# Note that the mapping in web.xml has to be modified accordingly if you don't use the default value: 556 | #-# 557 | #-# action 558 | #-# /wiki/* 559 | #-# 560 | #-# 561 | #-# The default is: 562 | # url.standard.multiwiki.wikiPathPrefix=wiki 563 | 564 | #-# [Since 5.2M1] 565 | #-# Defines the URL path prefix used for Entity URLs, i.e. URLs pointing to a Document, Space, Object, etc. 566 | #-# For example this is the "bin" part in the following URL: 567 | #-# http://server/xwiki/bin/view/space/page 568 | #-# Note that this replaces the old xwiki.defaultservletpath property in the old xwiki.cfg file. 569 | #-# 570 | #-# The default is: 571 | # url.standard.getEntityPathPrefix=bin 572 | 573 | #-# [Since 5.3M1] 574 | #-# The action to take when a subwiki is not found (ie there's no wiki descriptor for it). Valid values are: 575 | #-# - redirect_to_main_wiki: default to displaying the main wiki 576 | #-# - display_error: redirect to a vm to display an error 577 | #-# 578 | #-# The default is: 579 | # url.standard.multiwiki.notFoundBehavior=redirect_to_main_wiki 580 | 581 | #------------------------------------------------------------------------------------- 582 | # Attachment 583 | #------------------------------------------------------------------------------------- 584 | 585 | #-# [Since 5.2M2] 586 | #-# Define the kind of attachment that can be displayed inline. You can either choose to do it through a whitelist (only 587 | #-# the mimetypes defined in this list would be displayed inline) or a blacklist (every mimetype that is not in this list 588 | #-# would be displayed inline if possible) 589 | #-# 590 | #-# By default we use the following whitelist (coma separated list of values). 591 | # attachment.download.whitelist=audio/basic,audio/L24,audio/mp4,audio/mpeg,audio/ogg,audio/vorbis,audio/vnd.rn-realaudio,audio/vnd.wave,audio/webm,image/gif,image/jpeg,image/pjpeg,image/png,image/svg+xml,image/tiff,text/csv,text/plain,text/xml,text/rtf,video/mpeg,video/ogg,video/quicktime,video/webm,video/x-matroska,video/x-ms-wmv,video/x-flv 592 | #-# 593 | #-# If you prefer to use a blacklist instead, you can define the forbidden types here, as a coma separated list of values. 594 | #-# We advise you to forbid at least the following mimetypes : text/html, text/javascript 595 | # attachment.download.blacklist=text/html,text/javascript 596 | 597 | #------------------------------------------------------------------------------------- 598 | # Active Installs 599 | #------------------------------------------------------------------------------------- 600 | 601 | #-# [Since 5.2M2] 602 | #-# The URL of where the Active Installs module should connect to in order to send a ping of activity. This feature 603 | #-# regularly sends the following information to xwiki.org about the current instance: 604 | #-# - its unique id 605 | #-# - the id and versions of all installed extensions) 606 | #-# The goal is to count the number of active installs of XWiki out there and provide statistics on xwiki.org 607 | #-# 608 | #-# The default is: 609 | # activeinstalls.pingURL=http://extensions.xwiki.org/activeinstalls 610 | 611 | #------------------------------------------------------------------------------------- 612 | # Wikis 613 | #------------------------------------------------------------------------------------- 614 | 615 | #-# [Since 5.4.4] 616 | #-# Add a default suffix to the alias of a new wiki in the wiki creation wizard, only when the path mode is used 617 | #-# (see url.standard.multiwiki.isPathBased). If this value is empty, XWiki will try to compute it automatically from 618 | #-# request URL. 619 | #-# 620 | #-# eg: if wiki.alias.suffix is "xwiki.org" and the wiki name is "playground" 621 | #-# then the computed alias will be: "playground.xwiki.org". 622 | #-# 623 | #-# The default is: 624 | # wiki.alias.suffix= 625 | 626 | #------------------------------------------------------------------------------------- 627 | # Store 628 | #------------------------------------------------------------------------------------- 629 | 630 | #-# [Since 6.1M2] 631 | #-# If active, the Filesystem Attachment Store will automatically clear empty directories on startup, 632 | #-# in some cases this may create undue load on the server and may need to be disabled. To do that, 633 | #-# set the following to false. 634 | #-# Note that if you disable this feature, empty directories will accumulate and you are responsible 635 | #-# for cleaning them up. 636 | # store.fsattach.cleanOnStartup=true 637 | 638 | #------------------------------------------------------------------------------------- 639 | # Mail 640 | #------------------------------------------------------------------------------------- 641 | 642 | #-# [Since 6.1M2] 643 | #-# SMTP host when sending emails, defaults to "localhost". 644 | #-# This configuration property can be overridden in XWikiPreferences objects, by using the "smtp_server" property name. 645 | # mail.sender.host = localhost 646 | 647 | #-# [Since 6.1M2] 648 | #-# SMTP port when sending emails, defaults to 25. 649 | #-# This configuration property can be overridden in XWikiPreferences objects, by using the "smtp_port" property name. 650 | # mail.sender.port = 25 651 | 652 | #-# [Since 6.1M2] 653 | #-# From email address to use. Not defined by default and needs to be set up when calling the mail API. 654 | #-# This configuration property can be overridden in XWikiPreferences objects, by using the "admin_email" property name. 655 | # mail.sender.from = john@doe.com 656 | 657 | #-# [Since 6.1M2] 658 | #-# Username to authenticate on the SMTP server, if needed. By default no authentication is performed. 659 | #-# This configuration property can be overridden in XWikiPreferences objects, by using the "smtp_server_username" 660 | #-# property name. 661 | # mail.sender.username = someuser 662 | 663 | #-# [Since 6.1M2] 664 | #-# Password to authenticate on the SMTP server, if needed. By default no authentication is performed. 665 | #-# This configuration property can be overridden in XWikiPreferences objects, by using the "smtp_server_password" 666 | #-# property name. 667 | # mail.sender.password = somepassword 668 | 669 | #-# [Since 6.1M2] 670 | #-# Password to authenticate on the SMTP server, if needed. By default no authentication is performed. 671 | #-# This configuration property can be overridden in XWikiPreferences objects, by using the "javamail_extra_props" 672 | #-# property name. 673 | #-# By default the following properties are set automatically: 674 | #-# mail.transport.protocol = smtp 675 | #-# mail.smtp.host = 676 | #-# mail.smtp.port = 677 | #-# mail.smtp.user = 678 | #-# mail.smtp.from = 679 | #-# Example: 680 | # mail.sender.properties = mail.smtp.starttls.enable = true 681 | # mail.sender.properties = mail.smtp.socks.host = someserver 682 | 683 | 684 | -------------------------------------------------------------------------------- /zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM multicloud/jre-8-oracle 2 | MAINTAINER yaronr 3 | 4 | ENV ZOOKEEPER_VER 3.4.7 5 | 6 | RUN wget -q -O - http://apache.mirrors.pair.com/zookeeper/zookeeper-${ZOOKEEPER_VER}/zookeeper-${ZOOKEEPER_VER}.tar.gz | tar -xzf - -C /opt && \ 7 | mv /opt/zookeeper-${ZOOKEEPER_VER} /opt/zookeeper && \ 8 | rm /opt/zookeeper/conf/zoo_sample.cfg && \ 9 | mkdir -p /tmp/zookeeper 10 | 11 | EXPOSE 2181 2888 3888 12 | 13 | WORKDIR /opt/zookeeper 14 | 15 | VOLUME ["/opt/zookeeper/conf", "/tmp/zookeeper"] 16 | ADD ./zoo.cfg /opt/zookeeper/conf/ 17 | 18 | #resolve hosts from DNS first 19 | RUN sed 's/^\(hosts:[\ ]*\)\(files\)\ \(dns\)$/\1\3 \2/' -i /etc/nsswitch.conf 20 | 21 | ADD entrypoint.sh /entrypoint.sh 22 | RUN chmod a+x /entrypoint.sh 23 | 24 | ENTRYPOINT ["/entrypoint.sh", "/opt/zookeeper/bin/zkServer.sh"] 25 | CMD ["start-foreground"] 26 | -------------------------------------------------------------------------------- /zookeeper/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "taking a nap, to allow other zookeeper instances to start and update in DNS" 4 | sleep 5 5 | 6 | if [[ -n $ZOOKEEPER_ID ]] ; then 7 | echo "Setting zookeeper ID to: $ZOOKEEPER_ID" 8 | echo "$ZOOKEEPER_ID" > /tmp/zookeeper/myid 9 | else 10 | echo "ZOOKEEPER_ID must be specified" 11 | exit 1 12 | fi 13 | 14 | exec "$@" 15 | -------------------------------------------------------------------------------- /zookeeper/zoo.cfg: -------------------------------------------------------------------------------- 1 | # The number of milliseconds of each tick 2 | tickTime=2000 3 | 4 | # The number of ticks that the initial 5 | # synchronization phase can take 6 | initLimit=10 7 | 8 | # The number of ticks that can pass between 9 | # sending a request and getting an acknowledgement 10 | syncLimit=5 11 | 12 | # the directory where the snapshot is stored. 13 | # do not use /tmp for storage, /tmp here is just 14 | # example sakes. 15 | dataDir=/tmp/zookeeper 16 | 17 | # the port at which the clients will connect 18 | clientPort=2181 19 | # the maximum number of client connections. 20 | # increase this if you need to handle more clients 21 | #maxClientCnxns=60 22 | 23 | # 24 | # Be sure to read the maintenance section of the 25 | # administrator guide before turning on autopurge. 26 | # 27 | # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance 28 | # 29 | # The number of snapshots to retain in dataDir 30 | autopurge.snapRetainCount=3 31 | 32 | # Purge task interval in hours 33 | # Set to "0" to disable auto purge feature 34 | autopurge.purgeInterval=1 35 | 36 | server.1=zoo1:2888:3888 37 | server.2=zoo2:2888:3888 38 | server.3=zoo3:2888:3888 39 | --------------------------------------------------------------------------------