├── Dockerfile ├── LICENSE ├── README.md ├── doc ├── kibana-screenshot.png ├── launch.sh └── teardown.sh ├── dockergen.sh ├── fluentd.sh └── templates └── fluentd.conf.tmpl /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM phusion/baseimage 2 | MAINTAINER Brian Prodoehl 3 | 4 | # Set correct environment variables. 5 | ENV HOME /root 6 | 7 | # Use baseimage-docker's init system. 8 | CMD ["/sbin/my_init"] 9 | 10 | RUN apt-get update && apt-get -y upgrade 11 | 12 | # install default Ruby 13 | RUN apt-get install -y curl build-essential ruby ruby-dev wget 14 | 15 | # install RVM, Ruby, and Bundler 16 | #RUN \curl -k -L https://get.rvm.io | bash -s stable 17 | #RUN /bin/bash -l -c "rvm requirements" 18 | #RUN /bin/bash -l -c "rvm install 2.0" 19 | #RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc" 20 | 21 | RUN gem install fluentd --no-ri --no-rdoc 22 | 23 | # install ElasticSearch plugin 24 | RUN apt-get install -y libcurl4-openssl-dev 25 | RUN gem install fluent-plugin-elasticsearch --no-ri --no-rdoc 26 | 27 | RUN mkdir /etc/service/fluentd 28 | ADD fluentd.sh /etc/service/fluentd/run 29 | 30 | RUN mkdir /app 31 | WORKDIR /app 32 | ADD . /app 33 | 34 | RUN wget https://github.com/jwilder/docker-gen/releases/download/0.3.2/docker-gen-linux-amd64-0.3.2.tar.gz 35 | RUN tar xvzf docker-gen-linux-amd64-0.3.2.tar.gz 36 | 37 | RUN mkdir /etc/service/dockergen 38 | ADD dockergen.sh /etc/service/dockergen/run 39 | 40 | # Clean up APT when done. 41 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Brian Prodoehl 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | docker-log-collector 2 | ============== 3 | 4 | A Docker container that collects the Docker logs of all running containers 5 | using fluentd. By default, it passes everything along to an ElasticSearch 6 | container linked in as es1. 7 | 8 | 9 | ``` 10 | docker run -td \ 11 | -v /var/run/docker.sock:/var/run/docker.sock \ 12 | -v /var/lib/docker/containers:/var/lib/docker/containers \ 13 | --link elasticsearch:es1 \ 14 | --name collector bprodoehl/log-collector 15 | ``` 16 | 17 | This is based on the very excellent article at 18 | http://jasonwilder.com/blog/2014/03/17/docker-log-management-using-fluentd/, 19 | with a pile of real-world fixes added on top so that it all actually works, and time formats are properly parsed, and the logs can be read with Kibana, and so on. 20 | 21 | 22 | Watch the logs from all your local containers with Kibana 23 | =================== 24 | 25 | You can easily watch and analyze the logs from all of your containers by combining this container with Kibana. As you can see in the screenshot below, each line of output from each locally-running container will show up in Kibana, with the ```_type``` field set to the container name and the ```log``` field containing the message. 26 | 27 | ![Kibana screenshot](doc/kibana-screenshot.png) 28 | 29 | This sample script will launch the containers necessary to watch the logs from all of your locally-running Docker containers, and will open Kibana in your default browser. The script, and an accompanying script to tear everything down, is also in the ```doc``` folder. 30 | 31 | ```bash 32 | #!/bin/bash 33 | 34 | ### Pull the necessary containers 35 | docker pull balsamiq/docker-elasticsearch 36 | docker pull bprodoehl/kibana 37 | docker pull bprodoehl/log-collector 38 | 39 | ### Launch them 40 | # Launch ElasticSearch 41 | docker run -d \ 42 | --name elasticsearch \ 43 | --hostname elasticsearch \ 44 | balsamiq/docker-elasticsearch 45 | 46 | # Launch Kibana 47 | docker run -d \ 48 | -e KIBANA_SECURE=false \ 49 | --link elasticsearch:es \ 50 | --name kibana \ 51 | --hostname kibana \ 52 | bprodoehl/kibana 53 | 54 | # Launch the log collector 55 | docker run -d \ 56 | -v /var/run/docker.sock:/var/run/docker.sock \ 57 | -v /var/lib/docker/containers:/var/lib/docker/containers \ 58 | --link elasticsearch:es1 \ 59 | --name collector \ 60 | --hostname collector \ 61 | bprodoehl/log-collector 62 | 63 | ### Open up Kibana in your default browser 64 | OPEN_CMD=open 65 | for cmd in xdg-open gnome-open sensible-browser open; 66 | do 67 | which $cmd &> /dev/null 68 | if [ 0 == $? ]; then 69 | OPEN_CMD=$cmd 70 | break 71 | fi 72 | done 73 | 74 | echo Opening Kibana in default browser with $cmd 75 | KIBANA_IP=`docker inspect --format '{{ .NetworkSettings.IPAddress }}' kibana` 76 | $OPEN_CMD "http://$KIBANA_IP/index.html#/dashboard/file/logstash.json" 77 | ``` 78 | -------------------------------------------------------------------------------- /doc/kibana-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bprodoehl/docker-log-collector/da2c7d96b25bb7196a337d0787f92327553b6b90/doc/kibana-screenshot.png -------------------------------------------------------------------------------- /doc/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ### Pull the necessary containers 4 | docker pull balsamiq/docker-elasticsearch 5 | docker pull bprodoehl/kibana 6 | docker pull bprodoehl/log-collector 7 | 8 | ### Launch them 9 | # Launch ElasticSearch 10 | docker run -d \ 11 | --name elasticsearch \ 12 | --hostname elasticsearch \ 13 | balsamiq/docker-elasticsearch 14 | 15 | # Launch Kibana 16 | docker run -d \ 17 | -e KIBANA_SECURE=false \ 18 | --link elasticsearch:es \ 19 | --name kibana \ 20 | --hostname kibana \ 21 | bprodoehl/kibana 22 | 23 | # Launch the log collector 24 | docker run -d \ 25 | -v /var/run/docker.sock:/var/run/docker.sock \ 26 | -v /var/lib/docker/containers:/var/lib/docker/containers \ 27 | --link elasticsearch:es1 \ 28 | --name collector \ 29 | --hostname collector \ 30 | bprodoehl/log-collector 31 | 32 | ### Open up Kibana in your default browser 33 | OPEN_CMD=open 34 | for cmd in xdg-open gnome-open sensible-browser open; 35 | do 36 | which $cmd &> /dev/null 37 | if [ 0 == $? ]; then 38 | OPEN_CMD=$cmd 39 | break 40 | fi 41 | done 42 | 43 | echo Giving things a few seconds to spin up... 44 | sleep 10 45 | 46 | echo Opening Kibana in default browser 47 | KIBANA_IP=`docker inspect --format '{{ .NetworkSettings.IPAddress }}' kibana` 48 | $OPEN_CMD "http://$KIBANA_IP/index.html#/dashboard/file/logstash.json" 49 | 50 | sleep 3 51 | echo All done! 52 | -------------------------------------------------------------------------------- /doc/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker stop collector 4 | docker stop kibana 5 | docker stop elasticsearch 6 | 7 | docker rm collector 8 | docker rm kibana 9 | docker rm elasticsearch 10 | -------------------------------------------------------------------------------- /dockergen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec /app/docker-gen -watch -notify "sv force-restart fluentd" /app/templates/fluentd.conf.tmpl /etc/fluent.conf 4 | 5 | -------------------------------------------------------------------------------- /fluentd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec /usr/local/bin/fluentd -c /etc/fluent.conf -vv >>/var/log/fluentd.log 2>&1 4 | -------------------------------------------------------------------------------- /templates/fluentd.conf.tmpl: -------------------------------------------------------------------------------- 1 | ## File input 2 | ## read docker logs with tag=docker.container 3 | 4 | {{range $key, $value := .}} 5 | 6 | type tail 7 | format json 8 | time_key time 9 | time_format %Y-%m-%dT%T.%LZ 10 | path /var/lib/docker/containers/{{ $value.ID }}/{{ $value.ID }}-json.log 11 | pos_file /var/lib/docker/containers/{{ $value.ID }}/{{ $value.ID }}-json.log.pos 12 | tag docker.container.{{ $value.Name }} 13 | # tag docker.container.{{printf "%.*s" 12 $value.ID}} 14 | rotate_wait 5 15 | read_from_head true 16 | 17 | {{end}} 18 | 19 | ### wide matches won't stack up, so if you enable this, you'll lose elasticsearch output 20 | # 21 | # type stdout 22 | # 23 | 24 | {{range $key, $value := .}} 25 | 26 | type elasticsearch 27 | host es1 28 | port 9200 29 | index_name fluentd 30 | type_name {{ $value.Name }} 31 | logstash_format true 32 | buffer_type memory 33 | flush_interval 3 34 | retry_limit 17 35 | retry_wait 1.0 36 | num_threads 1 37 | 38 | {{end}} 39 | --------------------------------------------------------------------------------