├── LICENSE.txt ├── README.md ├── batch.go ├── batcher.go ├── cloudwatch.go ├── docker ├── Dockerfile ├── build.sh └── modules.go ├── ec2info.go ├── factbook.yaml ├── make.sh ├── rendercontext.go └── uploader.go /LICENSE.txt: -------------------------------------------------------------------------------- 1 | OSI MIT License 2 | 3 | Copyright (c) 2016 Medidata Solutions, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | 11 | Copyright (C) 2015 Glider Labs, LLC 12 | 13 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 14 | 15 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | logspout-cloudwatch 2 | ================ 3 | A logspout adapter that pushes logs to the AWS Cloudwatch Logs service. 4 | 5 | ---------------- 6 | Overview 7 | ---------------- 8 | This software is a plugin for [logspout][1], which is a container-based application that collects Docker logs from the *other* containers run on a given Docker host. This plugin then sends those log messages on to Amazon's [Cloudwatch Logs][2] web service. 9 | 10 | 11 | ---------------- 12 | Features 13 | ---------------- 14 | 15 | * Runs as a single Docker container with access to the docker socket file -- no setup is required within the logged containers. Auto-detects Region when run in EC2, and uses IAM Role credentials when available. 16 | 17 | * By default, names each Cloudwatch Log Stream after its container name, and groups the streams by host name. But also... 18 | 19 | * Provides flexible, dynamic control of stream and group names, based on [templates][3]. Can assign names based on container [labels][4] or environment variables. Defines host-wide defaults while allowing per-container overrides. 20 | 21 | * Batches messages by stream, but periodically flushes all batches to AWS, based on a configurable timeout. 22 | 23 | 24 | ---------------- 25 | Installation 26 | ---------------- 27 | The software runs in a container, so just run `docker pull mdsol/logspout`. 28 | 29 | 30 | ---------------- 31 | Workstation Usage / Outside EC2 32 | ---------------- 33 | 34 | First, make sure you're not running any containers that might be logging sensitive information -- that is, logs that you *don't* want showing up in your [Cloudwatch console][5]. 35 | 36 | 1. To test the plugin, run a container that just prints out the date every few seconds: 37 | 38 | docker run --name=echo3 -d ubuntu bash -c \ 39 | 'while true; do echo "Hi, the date is $(date)" ; sleep 3 ; done' 40 | 41 | Notice that the container is run _without_ the `-t` option. Logspout will not process output from containers with a TTY attached. 42 | 43 | 2. Now run the logspout container with a route URL of `cloudwatch://us-east-1?DEBUG=1` (substitute your desired AWS region). The plugin needs AWS credentials to push data to the service, so if your credentials are set up in the [usual way][6] (at `~/.aws/credentials`), you can run: 44 | 45 | docker run -h $(hostname) -v ~/.aws/credentials:/root/.aws/credentials \ 46 | --volume=/var/run/docker.sock:/tmp/docker.sock --name=logspout \ 47 | --rm -it mdsol/logspout 'cloudwatch://us-east-1?DEBUG=1&NOEC2' 48 | 49 | 50 | Notice the `-h $(hostname -f)` parameter; you probably want the logging container name to share hostnames with the Docker host, because the default behavior is to group logs by hostname. The `DEBUG=1` route option allows you to make sure each batch of logs gets submitted to AWS without errors. The `NOEC2` option tells the plugin not to look for the EC2 Metadata service. 51 | 52 | 3. Navigate to the [Cloudwatch console][5], click on `Logs`, then look for a Log Group named after your workstation's hostname. You should see a Log Stream within it named `echo3`, which should be receiving your container's output messages every four seconds. 53 | 54 | 55 | ---------------- 56 | Production Usage / Inside EC2 57 | ---------------- 58 | 59 | 1. Logspout needs the following policy permissions to create and write log streams and groups. Make sure your EC2 instance has a Role that includes the following: 60 | 61 | "Statement": [{ 62 | "Action": [ 63 | "logs:CreateLogGroup", 64 | "logs:CreateLogStream", 65 | "logs:DescribeLogGroups", 66 | "logs:DescribeLogStreams", 67 | "logs:PutLogEvents" 68 | ], 69 | "Effect": "Allow", 70 | "Resource": "*" 71 | }] 72 | 73 | 2. Now run the logspout container with a route URL of `cloudwatch://auto`. The AWS Region and the IAM Role credentials will be read from the EC2 Metadata Service. 74 | 75 | docker run -h $(hostname) -dt --name=logspout \ 76 | --volume=/var/run/docker.sock:/tmp/docker.sock \ 77 | mdsol/logspout 'cloudwatch://auto' 78 | 79 | The `-d` and `-t` flags are optional, depending on whether you want to background the process, or run it under some supervisory daemon. But if you *do* omit the `-t` flag, you can use the environment variable `LOGSPOUT=ignore` to prevent Logspout from attempting to post its own output to AWS. 80 | 81 | 82 | ---------------- 83 | Customizing the Group and Stream Names 84 | ---------------- 85 | 86 | The first time a message is received from a given container, its Log Group and Log Stream names are computed. When planning how to group your logs, make sure the combination of these two will be unique, because if more than one container tries to write to a given stream simultaneously, errors will occur. 87 | 88 | By default, each Log Stream is named after its associated container, and each stream's Log Group is the hostname of the container running Logspout. These two values can be overridden by setting the Environment variables `LOGSPOUT_GROUP` and `LOGSPOUT_STREAM` on the Logspout container, or on any individual log-producing container (container-specific values take precendence). In this way, precomputed values can be set for each container. 89 | 90 | Furthermore, when the Log Group and Log Stream names are computed, these Envinronment-based values are passed through Go's standard [template engine][3], and provided with the following render context: 91 | 92 | 93 | type RenderContext struct { 94 | Host string // container host name 95 | Env map[string]string // container ENV 96 | Labels map[string]string // container Labels 97 | Name string // container Name 98 | ID string // container ID 99 | LoggerHost string // hostname of logging container (os.Hostname) 100 | InstanceID string // EC2 Instance ID 101 | Region string // EC2 region 102 | } 103 | 104 | So you may use the `{{}}` template-syntax to build complex Log Group and Log Stream names from container Labels, or from other Env vars. Here are some examples: 105 | 106 | # Prefix the default stream name with the EC2 Instance ID: 107 | LOGSPOUT_STREAM={{.InstanceID}}-{{.Name}} 108 | 109 | # Group streams by application and workflow stage (dev, prod, etc.), 110 | # where these values are set as container environment vars: 111 | LOGSPOUT_GROUP={{.Env.APP_NAME}}-{{.Env.STAGE_NAME}} 112 | 113 | # Or use container Labels to do the same thing: 114 | LOGSPOUT_GROUP={{.Labels.APP_NAME}}-{{.Labels.STAGE_NAME}} 115 | 116 | # If the labels contain the period (.) character, you can do this: 117 | LOGSPOUT_GROUP={{.Lbl "com.mycompany.loggroup"}} 118 | LOGSPOUT_STREAM={{.Lbl "com.mycompany.logstream"}} 119 | 120 | Complex settings like this are most easily applied to contaners by putting them into a separate "environment file", and passing its path to docker at runtime: `docker run --env-file /path/to/file [...]` 121 | 122 | 123 | ---------------- 124 | Further Configuration 125 | ---------------- 126 | 127 | * Adding the route option `NOEC2`, as in `cloudwatch://[region]?NOEC2` causes the adapter to skip its usual check for the EC2 Metadata service, for faster startup time when running outside EC2. 128 | 129 | * Adding the route option `DELAY=8`, as in `cloudwatch://[region]?DELAY=8` causes the adapter to push all logs to AWS every 8 seconds instead of the default of 4 seconds. If you run this adapter at scale, you may need to tune this value to avoid overloading your request rate limit on the Cloudwatch Logs API. 130 | 131 | 132 | ---------------- 133 | Contribution / Development 134 | ---------------- 135 | This software was created by Benton Roberts _(broberts@mdsol.com)_ 136 | 137 | By default, the Docker image builds from the Go source on GitHub, not from local disk, as per the instructions for [Logspout custom builds][7]. 138 | 139 | 140 | 141 | [1]: https://github.com/gliderlabs/logspout 142 | [2]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html 143 | [3]: https://golang.org/pkg/text/template/ 144 | [4]: https://docs.docker.com/engine/userguide/labels-custom-metadata/ 145 | [5]: https://console.aws.amazon.com/cloudwatch/home?#logs 146 | [6]: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html 147 | [7]: https://github.com/gliderlabs/logspout/tree/master/custom 148 | -------------------------------------------------------------------------------- /batch.go: -------------------------------------------------------------------------------- 1 | package cloudwatch 2 | 3 | import "time" 4 | 5 | // CloudwatchMessage is a simple JSON input to Cloudwatch. 6 | type CloudwatchMessage struct { 7 | Message string `json:"message"` 8 | Group string `json:"group"` 9 | Stream string `json:"stream"` 10 | Time time.Time `json:"time"` 11 | Container string `json:"container"` 12 | } 13 | 14 | type CloudwatchBatch struct { 15 | Msgs []CloudwatchMessage 16 | Size int64 17 | } 18 | 19 | // Rules for creating Cloudwatch Log batches, from https://goo.gl/TrIN8c 20 | const MAX_BATCH_COUNT = 10000 // messages 21 | const MAX_BATCH_SIZE = 1048576 // bytes 22 | const MSG_OVERHEAD = 26 // bytes 23 | 24 | func msgSize(msg CloudwatchMessage) int64 { 25 | return int64((len(msg.Message) * 8) + MSG_OVERHEAD) 26 | } 27 | 28 | func NewCloudwatchBatch() *CloudwatchBatch { 29 | return &CloudwatchBatch{ 30 | Msgs: []CloudwatchMessage{}, 31 | Size: 0, 32 | } 33 | } 34 | 35 | func (b *CloudwatchBatch) Append(msg CloudwatchMessage) { 36 | b.Msgs = append(b.Msgs, msg) 37 | b.Size = b.Size + msgSize(msg) 38 | } 39 | -------------------------------------------------------------------------------- /batcher.go: -------------------------------------------------------------------------------- 1 | package cloudwatch 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/gliderlabs/logspout/router" 10 | ) 11 | 12 | const DEFAULT_DELAY = 4 //seconds 13 | 14 | // CloudwatchBatcher receieves Cloudwatch messages on its input channel, 15 | // stores them in CloudwatchBatches until enough data is ready to send, then 16 | // sends each CloudwatchMessageBatch on its output channel. 17 | type CloudwatchBatcher struct { 18 | Input chan CloudwatchMessage 19 | output chan CloudwatchBatch 20 | route *router.Route 21 | timer chan bool 22 | // maintain a batch for each container, indexed by its name 23 | batches map[string]*CloudwatchBatch 24 | } 25 | 26 | // constructor for CloudwatchBatcher - requires the adapter 27 | func NewCloudwatchBatcher(adapter *CloudwatchAdapter) *CloudwatchBatcher { 28 | batcher := CloudwatchBatcher{ 29 | Input: make(chan CloudwatchMessage), 30 | output: NewCloudwatchUploader(adapter).Input, 31 | batches: map[string]*CloudwatchBatch{}, 32 | timer: make(chan bool), 33 | route: adapter.Route, 34 | } 35 | go batcher.Start() 36 | return &batcher 37 | } 38 | 39 | // Main loop for the Batcher - just sorts each messages into a batch, but 40 | // submits the batch first and replaces it if the message is too big. 41 | func (b *CloudwatchBatcher) Start() { 42 | go b.RunTimer() 43 | for { // run forever, and... 44 | select { // either batch up a message, or respond to the timer 45 | case msg := <-b.Input: // a message - put it into its slice 46 | if len(msg.Message) == 0 { // empty messages are not allowed 47 | break 48 | } 49 | // get or create the correct slice of messages for this message 50 | if _, exists := b.batches[msg.Container]; !exists { 51 | b.batches[msg.Container] = NewCloudwatchBatch() 52 | } 53 | // if Msg is too long for the current batch, submit the batch 54 | if (b.batches[msg.Container].Size+msgSize(msg)) > MAX_BATCH_SIZE || 55 | len(b.batches[msg.Container].Msgs) >= MAX_BATCH_COUNT { 56 | b.output <- *b.batches[msg.Container] 57 | b.batches[msg.Container] = NewCloudwatchBatch() 58 | } 59 | thisBatch := b.batches[msg.Container] 60 | thisBatch.Append(msg) 61 | case <-b.timer: // submit and delete all existing batches 62 | for container, batch := range b.batches { 63 | b.output <- *batch 64 | delete(b.batches, container) 65 | } 66 | } 67 | } 68 | } 69 | 70 | func (b *CloudwatchBatcher) RunTimer() { 71 | delayText := strconv.Itoa(DEFAULT_DELAY) 72 | if routeDelay, isSet := b.route.Options[`DELAY`]; isSet { 73 | delayText = routeDelay 74 | } 75 | if envDelay := os.Getenv(`DELAY`); envDelay != "" { 76 | delayText = envDelay 77 | } 78 | delay, err := strconv.Atoi(delayText) 79 | if err != nil { 80 | log.Printf("WARNING: ERROR parsing DELAY %s, using default of %d\n", 81 | delayText, DEFAULT_DELAY) 82 | delay = DEFAULT_DELAY 83 | } 84 | for { 85 | time.Sleep(time.Duration(delay) * time.Second) 86 | b.timer <- true 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /cloudwatch.go: -------------------------------------------------------------------------------- 1 | package cloudwatch 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "strings" 7 | "time" 8 | 9 | "github.com/fsouza/go-dockerclient" 10 | "github.com/gliderlabs/logspout/router" 11 | ) 12 | 13 | func init() { 14 | router.AdapterFactories.Register(NewCloudwatchAdapter, "cloudwatch") 15 | } 16 | 17 | // CloudwatchAdapter is an adapter that streams JSON to AWS CloudwatchLogs. 18 | // It mostly just checkes ENV vars and other container info to determine 19 | // the LogGroup and LogStream for each message, then sends each message 20 | // on to a CloudwatchBatcher, which batches messages for upload to AWS. 21 | type CloudwatchAdapter struct { 22 | Route *router.Route 23 | OsHost string 24 | Ec2Region string 25 | Ec2Instance string 26 | 27 | client *docker.Client 28 | batcher *CloudwatchBatcher // batches up messages by log group and stream 29 | groupnames map[string]string // maps container names to log groups 30 | streamnames map[string]string // maps container names to log streams 31 | } 32 | 33 | // NewCloudwatchAdapter creates a CloudwatchAdapter for the current region. 34 | func NewCloudwatchAdapter(route *router.Route) (router.LogAdapter, error) { 35 | dockerHost := `unix:///var/run/docker.sock` 36 | if envVal := os.Getenv(`DOCKER_HOST`); envVal != "" { 37 | dockerHost = envVal 38 | } 39 | client, err := docker.NewClient(dockerHost) 40 | if err != nil { 41 | return nil, err 42 | } 43 | hostname, err := os.Hostname() 44 | if err != nil { 45 | return nil, err 46 | } 47 | ec2info, err := NewEC2Info(route) // get info from EC2 48 | if err != nil { 49 | return nil, err 50 | } 51 | adapter := CloudwatchAdapter{ 52 | Route: route, 53 | OsHost: hostname, 54 | Ec2Instance: ec2info.InstanceID, 55 | Ec2Region: ec2info.Region, 56 | client: client, 57 | groupnames: map[string]string{}, 58 | streamnames: map[string]string{}, 59 | } 60 | adapter.batcher = NewCloudwatchBatcher(&adapter) 61 | return &adapter, nil 62 | } 63 | 64 | // Stream implements the router.LogAdapter interface. 65 | func (a *CloudwatchAdapter) Stream(logstream chan *router.Message) { 66 | for m := range logstream { 67 | // determine the log group name and log stream name 68 | var groupName, streamName string 69 | // first, check the in-memory cache so this work is done per-container 70 | if cachedGroup, isCached := a.groupnames[m.Container.ID]; isCached { 71 | groupName = cachedGroup 72 | } 73 | if cachedStream, isCached := a.streamnames[m.Container.ID]; isCached { 74 | streamName = cachedStream 75 | } 76 | if (streamName == "") || (groupName == "") { 77 | // make a render context with the required info 78 | containerData, err := a.client.InspectContainer(m.Container.ID) 79 | if err != nil { 80 | log.Println("cloudwatch: error inspecting container:", err) 81 | continue 82 | } 83 | context := RenderContext{ 84 | Env: parseEnv(m.Container.Config.Env), 85 | Labels: containerData.Config.Labels, 86 | Name: strings.TrimPrefix(m.Container.Name, `/`), 87 | ID: m.Container.ID, 88 | Host: m.Container.Config.Hostname, 89 | LoggerHost: a.OsHost, 90 | InstanceID: a.Ec2Instance, 91 | Region: a.Ec2Region, 92 | } 93 | groupName = a.renderEnvValue(`LOGSPOUT_GROUP`, &context, a.OsHost) 94 | streamName = a.renderEnvValue(`LOGSPOUT_STREAM`, &context, context.Name) 95 | a.groupnames[m.Container.ID] = groupName // cache the group name 96 | a.streamnames[m.Container.ID] = streamName // and the stream name 97 | } 98 | a.batcher.Input <- CloudwatchMessage{ 99 | Message: m.Data, 100 | Group: groupName, 101 | Stream: streamName, 102 | Time: time.Now(), 103 | Container: m.Container.ID, 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gliderlabs/logspout:master 2 | -------------------------------------------------------------------------------- /docker/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | apk add --update go build-base git mercurial ca-certificates 4 | mkdir -p /go/src/github.com/gliderlabs 5 | cp -r /src /go/src/github.com/gliderlabs/logspout 6 | cd /go/src/github.com/gliderlabs/logspout 7 | export GOPATH=/go 8 | go get 9 | go build -ldflags "-X main.Version=$1" -o /bin/logspout 10 | apk del go git mercurial build-base 11 | rm -rf /go /var/cache/apk/* /root/.glide 12 | 13 | # backwards compatibility 14 | ln -fs /tmp/docker.sock /var/run/docker.sock 15 | -------------------------------------------------------------------------------- /docker/modules.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | _ "github.com/gliderlabs/logspout/adapters/syslog" 5 | _ "github.com/gliderlabs/logspout/httpstream" 6 | _ "github.com/gliderlabs/logspout/routesapi" 7 | _ "github.com/gliderlabs/logspout/healthcheck" 8 | _ "github.com/mdsol/logspout-cloudwatch" 9 | ) 10 | -------------------------------------------------------------------------------- /ec2info.go: -------------------------------------------------------------------------------- 1 | package cloudwatch 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | 8 | "github.com/aws/aws-sdk-go/aws/ec2metadata" 9 | "github.com/aws/aws-sdk-go/aws/session" 10 | "github.com/gliderlabs/logspout/router" 11 | ) 12 | 13 | type EC2Info struct { 14 | InstanceID string 15 | Region string 16 | } 17 | 18 | func NewEC2Info(route *router.Route) (EC2Info, error) { 19 | _, skip_ec2 := route.Options[`NOEC2`] 20 | if skip_ec2 || (os.Getenv(`NOEC2`) != "") { 21 | return EC2Info{}, nil 22 | } 23 | // get my instance ID 24 | mySession := session.New() 25 | metadataSvc := ec2metadata.New(mySession) 26 | if !metadataSvc.Available() { 27 | log.Println("cloudwatch: WARNING EC2 Metadata service not available") 28 | return EC2Info{}, nil 29 | } 30 | instance_id, err := metadataSvc.GetMetadata(`instance-id`) 31 | if err != nil { 32 | return EC2Info{}, fmt.Errorf("ERROR getting instance ID: %s", err) 33 | } 34 | region, err := metadataSvc.Region() 35 | if err != nil { 36 | return EC2Info{}, fmt.Errorf("ERROR getting EC2 region: %s", err) 37 | } 38 | return EC2Info{ 39 | InstanceID: instance_id, 40 | Region: region, 41 | }, nil 42 | } 43 | -------------------------------------------------------------------------------- /factbook.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: backstage.io/v1alpha1 # This is a constant, just leave it 3 | kind: Component 4 | spec: 5 | type: other # web_backend == only API, web_frontend == only UI, web_fullstack == both, mobile == mobile app 6 | lifecycle: production # values: experimental, production or deprecated. 7 | owner: devtools@mdsol.com 8 | metadata: 9 | json_schema: "https://github.com/mdsol/platform-standards/tree/master/schemas/v1alpha1.schema.json" 10 | name: Logspout Cloudwatch 11 | description: A logspout adapter that pushes logs to the AWS Cloudwatch Logs service. 12 | aliases: [] # Other names for this service 13 | security: 14 | authentication: ["none"] # List all the authentication methods used in our service. See the json schema for options. 15 | network_accessiblity: ["private"] # `private` == internal vpn only, `public` == directly accesible outside the vpn 16 | data_types: [] # List the type of data used in your service. 17 | teams: 18 | - name: "Devtools" 19 | number: 34 20 | email: devtools@mdsol.com 21 | people: 22 | - role: "project manager" 23 | email: mcatenacci@mdsol.com 24 | - role: "engineer" 25 | email: broberts@mdsol.com 26 | channels: # List all the places were we can find you. Mainly slack channels 27 | - url: https://mdsol.slack.com/messages/devtools-public-forum 28 | automated_messaging: false 29 | role: slack 30 | -------------------------------------------------------------------------------- /make.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # builds the mdsol/logspout-cloudwatch docker image 3 | set -xe 4 | 5 | docker build --no-cache --tag=mdsol/logspout -f docker/Dockerfile docker 6 | -------------------------------------------------------------------------------- /rendercontext.go: -------------------------------------------------------------------------------- 1 | package cloudwatch 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "log" 7 | "os" 8 | "strings" 9 | "text/template" 10 | ) 11 | 12 | type RenderContext struct { 13 | Host string // container host name 14 | Env map[string]string // container ENV 15 | Labels map[string]string // container Labels 16 | Name string // container Name 17 | ID string // container ID 18 | LoggerHost string // hostname of logging container (os.Hostname) 19 | InstanceID string // EC2 Instance ID 20 | Region string // EC2 region 21 | } 22 | 23 | // renders a label value based on a given key 24 | func (r *RenderContext) Lbl(key string) (string, error) { 25 | if val, exists := r.Labels[key]; exists { 26 | return val, nil 27 | } 28 | return "", fmt.Errorf("ERROR reading container label %s", key) 29 | } 30 | 31 | // HELPER FUNCTIONS 32 | 33 | // Searches the OS environment, then the route options, then the render context 34 | // Env for a given key, then uses the value (or the provided default value) 35 | // as template text, which is then rendered in the given context. 36 | // The rendered result is returned - or the default value on any errors. 37 | func (a *CloudwatchAdapter) renderEnvValue( 38 | envKey string, context *RenderContext, defaultVal string) string { 39 | finalVal := defaultVal 40 | if logspoutEnvVal := os.Getenv(envKey); logspoutEnvVal != "" { 41 | finalVal = logspoutEnvVal // use $envKey, if set 42 | } 43 | if routeOptionsVal, exists := a.Route.Options[envKey]; exists { 44 | finalVal = routeOptionsVal 45 | } 46 | if containerEnvVal, exists := context.Env[envKey]; exists { 47 | finalVal = containerEnvVal // or, $envKey from container! 48 | } 49 | template, err := template.New("template").Parse(finalVal) 50 | if err != nil { 51 | log.Println("cloudwatch: error parsing template", finalVal, ":", err) 52 | return defaultVal 53 | } else { // render the templates in the generated context 54 | var renderedValue bytes.Buffer 55 | err = template.Execute(&renderedValue, context) 56 | if err != nil { 57 | log.Printf("cloudwatch: error rendering template %s : %s\n", 58 | finalVal, err) 59 | return defaultVal 60 | } 61 | finalVal = renderedValue.String() 62 | } 63 | return finalVal 64 | } 65 | 66 | func parseEnv(envLines []string) map[string]string { 67 | env := map[string]string{} 68 | for _, line := range envLines { 69 | fields := strings.Split(line, `=`) 70 | if len(fields) > 1 { 71 | env[fields[0]] = strings.Join(fields[1:], `=`) 72 | } 73 | } 74 | return env 75 | } 76 | -------------------------------------------------------------------------------- /uploader.go: -------------------------------------------------------------------------------- 1 | package cloudwatch 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "log" 7 | "os" 8 | "strings" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/aws/session" 12 | "github.com/aws/aws-sdk-go/service/cloudwatchlogs" 13 | ) 14 | 15 | // CloudwatchUploader receieves CloudwatchBatches on its input channel, 16 | // and sends them on to the AWS Cloudwatch Logs endpoint. 17 | type CloudwatchUploader struct { 18 | Input chan CloudwatchBatch 19 | svc *cloudwatchlogs.CloudWatchLogs 20 | tokens map[string]string 21 | debugSet bool 22 | } 23 | 24 | func NewCloudwatchUploader(adapter *CloudwatchAdapter) *CloudwatchUploader { 25 | region := adapter.Route.Address 26 | if (region == "auto") || (region == "") { 27 | if adapter.Ec2Region == "" { 28 | log.Println("cloudwatch: ERROR - could not get region from EC2") 29 | } else { 30 | region = adapter.Ec2Region 31 | } 32 | } 33 | debugSet := false 34 | _, debugOption := adapter.Route.Options[`DEBUG`] 35 | if debugOption || (os.Getenv(`DEBUG`) != "") { 36 | debugSet = true 37 | log.Println("cloudwatch: Creating AWS Cloudwatch client for region", 38 | region) 39 | } 40 | uploader := CloudwatchUploader{ 41 | Input: make(chan CloudwatchBatch), 42 | tokens: map[string]string{}, 43 | debugSet: debugSet, 44 | svc: cloudwatchlogs.New(session.New(), 45 | &aws.Config{Region: aws.String(region)}), 46 | } 47 | go uploader.Start() 48 | return &uploader 49 | } 50 | 51 | // Main loop for the Uploader - POSTs each batch to AWS Cloudwatch Logs, 52 | // while keeping track of the unique sequence token for each log stream. 53 | func (u *CloudwatchUploader) Start() { 54 | for batch := range u.Input { 55 | msg := batch.Msgs[0] 56 | u.log("Submitting batch for %s-%s (length %d, size %v)", 57 | msg.Group, msg.Stream, len(batch.Msgs), batch.Size) 58 | 59 | // fetch and cache the upload sequence token 60 | var token *string 61 | if cachedToken, isCached := u.tokens[msg.Container]; isCached { 62 | token = &cachedToken 63 | u.log("Got token from cache: %s", *token) 64 | } else { 65 | u.log("Fetching token from AWS...") 66 | awsToken, err := u.getSequenceToken(msg) 67 | if err != nil { 68 | u.log("ERROR:", err) 69 | continue 70 | } 71 | if awsToken != nil { 72 | u.tokens[msg.Container] = *(awsToken) 73 | u.log("Got token from AWS:", *awsToken) 74 | token = awsToken 75 | } 76 | } 77 | 78 | // generate the array of InputLogEvent from the batch's contents 79 | events := []*cloudwatchlogs.InputLogEvent{} 80 | for _, msg := range batch.Msgs { 81 | event := cloudwatchlogs.InputLogEvent{ 82 | Message: aws.String(msg.Message), 83 | Timestamp: aws.Int64(msg.Time.UnixNano() / 1000000), 84 | } 85 | events = append(events, &event) 86 | } 87 | params := &cloudwatchlogs.PutLogEventsInput{ 88 | LogEvents: events, 89 | LogGroupName: aws.String(msg.Group), 90 | LogStreamName: aws.String(msg.Stream), 91 | SequenceToken: token, 92 | } 93 | 94 | u.log("POSTing PutLogEvents to %s-%s with %d messages, %d bytes", 95 | msg.Group, msg.Stream, len(batch.Msgs), batch.Size) 96 | resp, err := u.svc.PutLogEvents(params) 97 | if err != nil { 98 | u.log(err.Error()) 99 | continue 100 | } 101 | u.log("Got 200 response") 102 | if resp.NextSequenceToken != nil { 103 | u.log("Caching new sequence token for %s-%s: %s", 104 | msg.Group, msg.Stream, *resp.NextSequenceToken) 105 | u.tokens[msg.Container] = *resp.NextSequenceToken 106 | } 107 | } 108 | } 109 | 110 | // AWS CLIENT METHODS 111 | 112 | // returns the next sequence token for the log stream associated 113 | // with the given message's group and stream. Creates the stream as needed. 114 | func (u *CloudwatchUploader) getSequenceToken(msg CloudwatchMessage) (*string, 115 | error) { 116 | group, stream := msg.Group, msg.Stream 117 | groupExists, err := u.groupExists(group) 118 | if err != nil { 119 | return nil, err 120 | } 121 | if !groupExists { 122 | err = u.createGroup(group) 123 | if err != nil { 124 | return nil, err 125 | } 126 | } 127 | params := &cloudwatchlogs.DescribeLogStreamsInput{ 128 | LogGroupName: aws.String(group), 129 | LogStreamNamePrefix: aws.String(stream), 130 | } 131 | u.log("Describing stream %s-%s...", group, stream) 132 | resp, err := u.svc.DescribeLogStreams(params) 133 | if err != nil { 134 | return nil, err 135 | } 136 | if count := len(resp.LogStreams); count > 1 { // too many matching streams! 137 | return nil, errors.New(fmt.Sprintf( 138 | "%d streams match group %s, stream %s!", count, group, stream)) 139 | } 140 | if len(resp.LogStreams) == 0 { // no matching streams - create one and retry 141 | if err = u.createStream(group, stream); err != nil { 142 | return nil, err 143 | } 144 | token, err := u.getSequenceToken(msg) 145 | return token, err 146 | } 147 | return resp.LogStreams[0].UploadSequenceToken, nil 148 | } 149 | 150 | func (u *CloudwatchUploader) groupExists(group string) (bool, error) { 151 | u.log("Checking for group: %s...", group) 152 | resp, err := u.svc.DescribeLogGroups(&cloudwatchlogs.DescribeLogGroupsInput{ 153 | LogGroupNamePrefix: aws.String(group), 154 | }) 155 | if err != nil { 156 | return false, err 157 | } 158 | for _, matchedGroup := range resp.LogGroups { 159 | if *matchedGroup.LogGroupName == group { 160 | return true, nil 161 | } 162 | } 163 | return false, nil 164 | } 165 | 166 | func (u *CloudwatchUploader) createGroup(group string) error { 167 | u.log("Creating group: %s...", group) 168 | params := &cloudwatchlogs.CreateLogGroupInput{ 169 | LogGroupName: aws.String(group), 170 | } 171 | if _, err := u.svc.CreateLogGroup(params); err != nil { 172 | return err 173 | } 174 | return nil 175 | } 176 | 177 | func (u *CloudwatchUploader) createStream(group, stream string) error { 178 | u.log("Creating stream for group %s, stream %s...", group, stream) 179 | params := &cloudwatchlogs.CreateLogStreamInput{ 180 | LogGroupName: aws.String(group), 181 | LogStreamName: aws.String(stream), 182 | } 183 | if _, err := u.svc.CreateLogStream(params); err != nil { 184 | return err 185 | } 186 | return nil 187 | } 188 | 189 | // HELPER METHODS 190 | 191 | func (u *CloudwatchUploader) log(format string, args ...interface{}) { 192 | if u.debugSet { 193 | msg := fmt.Sprintf(format, args...) 194 | msg = fmt.Sprintf("cloudwatch: %s", msg) 195 | if !strings.HasSuffix(msg, "\n") { 196 | msg = fmt.Sprintf("%s\n", msg) 197 | } 198 | log.Print(msg) 199 | } 200 | } 201 | --------------------------------------------------------------------------------