├── VERSION
├── requirements.txt
├── setup.cfg
├── .gitignore
├── .travis.yml
├── sample_templates
├── README.md
├── sqs.yml.j2
├── asg.yml.j2
├── ec2.yml.j2
├── billing.yml.j2
├── dynamodb.yml.j2
├── ebs.yml.j2
├── cloudfront.yml.j2
├── kinesisapp.yml.js
├── kinesis.yml.js
├── elb.yml.j2
├── rds.yml.j2
├── elasticache.yml.j2
└── emr.yml.j2
├── config.yaml.example
├── Makefile
├── setup.py
├── CONTRIBUTING.rst
├── test_plumbum.py
├── README.rst
├── test_leadbutt.py
├── leadbutt.py
├── plumbum.py
└── LICENSE
/VERSION:
--------------------------------------------------------------------------------
1 | 0.11.0
2 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | boto==2.48.0
2 | PyYAML==3.12
3 | docopt==0.6.2
4 | Jinja2==2.10
5 | retrying==1.3.3
6 |
7 | # tests
8 | mock==2.0.0
9 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal = 1
3 |
4 | [flake8]
5 | # 79 is preferred, but don't have a cow until you go over 100
6 | max-line-length = 100
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 |
3 | *.swp
4 |
5 |
6 | # Private User Data
7 | .env
8 | config.yaml
9 |
10 |
11 | # created during testing
12 | .tox
13 |
14 |
15 | # created during packaging
16 | *.egg-info
17 | build/
18 | dist/
19 | MANIFEST
20 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "2.7"
4 | - "3.4"
5 | - "3.5"
6 | - "3.6"
7 | install: pip install -r requirements.txt
8 | script: make test
9 |
10 | # HACK needed for bad Boto config on TravisCI https://github.com/travis-ci/travis-ci/issues/7940
11 | sudo: false
12 |
--------------------------------------------------------------------------------
/sample_templates/README.md:
--------------------------------------------------------------------------------
1 | Here are some sample jinja2 templates you can use with `plumbum` to generate
2 | configuration files for `leadbutt`.
3 |
4 |
5 | Usage
6 | -----
7 |
8 | ### Create a config file for getting metrics from ec2:
9 |
10 | plumbum sample_templates/ec2.yml.j2 ec2 > ec2.yml
11 |
12 | ### Using that config file
13 |
14 | leadbutt -c ec2.yml | nc -q0 graphite.local 2003
15 |
16 |
17 | For more, see `plumbum --help`
18 |
--------------------------------------------------------------------------------
/sample_templates/sqs.yml.j2:
--------------------------------------------------------------------------------
1 | # Sample config.yaml
2 | #
3 | Auth:
4 | region: "{{ region }}"
5 | Metrics:
6 | {%- for sqs in resources %}
7 | - Namespace: "AWS/SQS"
8 | MetricName: "NumberOfMessagesReceived"
9 | Statistics:
10 | - "Sum"
11 | Unit: "Count"
12 | Dimensions:
13 | QueueName: {{ sqs.name }}
14 | {%- endfor %}
15 | Options:
16 | Count: 5
17 | Period: 5
18 | Formatter: 'cloudwatch.%(Namespace)s.{{ region }}.%(dimension)s.%(MetricName)s.%(statistic)s.%(Unit)s'
19 |
--------------------------------------------------------------------------------
/sample_templates/asg.yml.j2:
--------------------------------------------------------------------------------
1 | # Sample config.yaml
2 | #
3 | Auth:
4 | region: "{{ region }}"
5 | Metrics:
6 | {% for asg in resources %}
7 | - Namespace: "AWS/EC2"
8 | MetricName: "CPUUtilization"
9 | Statistics:
10 | - "Maximum"
11 | - "Average"
12 | Unit: "Percent"
13 | Dimensions:
14 | AutoScalingGroupName: {{ asg.name }}
15 | {% endfor %}
16 | Options:
17 | Count: 3
18 | Period: 5
19 | Formatter: 'cloudwatch.aws.asg.{{ region }}.%(dimension)s.%(MetricName)s.%(statistic)s.%(Unit)s'
20 |
--------------------------------------------------------------------------------
/sample_templates/ec2.yml.j2:
--------------------------------------------------------------------------------
1 | # Sample config.yaml
2 | #
3 | Auth:
4 | region: "{{ region }}"
5 | Metrics:
6 | {%- for instance in resources %}
7 | - Namespace: "AWS/EC2"
8 | MetricName: "CPUUtilization"
9 | Statistics:
10 | - "Maximum"
11 | - "Average"
12 | Unit: "Percent"
13 | Dimensions:
14 | InstanceId: "{{ instance.id }}"
15 | Options:
16 | {#- I'm assuming my tag names are safe to use as metric names here #}
17 | Formatter: 'cloudwatch.%(Namespace)s.{{ instance.tags['Name'] }}.%(MetricName)s.%(statistic)s.%(Unit)s'
18 | Period: 5
19 | {% endfor %}
20 |
--------------------------------------------------------------------------------
/sample_templates/billing.yml.j2:
--------------------------------------------------------------------------------
1 | {#- config template for billing -#}
2 |
3 | Auth:
4 | region: "{{ region }}"
5 |
6 | Metrics:
7 | {%- for service in resources %}
8 | - Namespace: "AWS/Billing"
9 | MetricName: "{{ service.name }}"
10 | Statistics:
11 | - "Maximum"
12 | Unit: None
13 | Dimensions:
14 | {%- if service.dimensions.has_key('ServiceName') %}
15 | ServiceName: "{{ service.dimensions.get('ServiceName')[0] }}"
16 | {%- endif %}
17 | Currency: "{{ service.dimensions.get('Currency')[0] }}"
18 | Options:
19 | {%- if service.dimensions.has_key('ServiceName') %}
20 | Formatter: 'cloudwatch.%(Namespace)s.{{ service.dimensions.get('ServiceName')[0] }}.%(MetricName)s.%(statistic)s.%(Unit)s'
21 | {%- else %}
22 | Formatter: 'cloudwatch.%(Namespace)s.%(MetricName)s.%(statistic)s.%(Unit)s'
23 | {%- endif %}
24 | Period: 240
25 | {% endfor %}
26 |
--------------------------------------------------------------------------------
/sample_templates/dynamodb.yml.j2:
--------------------------------------------------------------------------------
1 | {%- set metrics = {'ConsumedReadCapacityUnits': {'stat': ['Minimum', 'Maximum', 'Sum', 'Average'], 'unit': 'Count'},
2 | 'ConsumedWriteCapacityUnits': {'stat': ['Minimum', 'Maximum', 'Sum', 'Average'], 'unit': 'Count'},
3 | } -%}
4 |
5 | # If connecting to a different region other than default, set region
6 | Auth:
7 | region: "{{ region }}"
8 | Metrics:
9 | {%- for table_name in resources %}
10 | {%- for metric in metrics %}
11 | - Namespace: "AWS/DynamoDB"
12 | MetricName: "{{ metric }}"
13 | Statistics: {{ metrics[metric]['stat'] }}
14 | Unit: "{{ metrics[metric]['unit'] }}"
15 | Dimensions:
16 | TableName: "{{ table_name }}"
17 | Options:
18 | Formatter: 'cloudwatch.%(Namespace)s.{{ table_name }}.%(MetricName)s.%(statistic)s.%(Unit)s'
19 | Period: 1
20 | {% endfor %}
21 | {% endfor %}
22 |
23 |
--------------------------------------------------------------------------------
/sample_templates/ebs.yml.j2:
--------------------------------------------------------------------------------
1 | {%- set metrics = {
2 | 'VolumeReadOps': {'stat': 'Sum', 'unit': 'Count'},
3 | 'VolumeWriteOps': {'stat': 'Sum', 'unit': 'Count'},
4 | 'VolumeQueueLength': {'stat': 'Sum', 'unit': 'Count'},
5 | 'BurstBalance': {'stat': 'Average', 'unit': 'Percent'},
6 | }
7 | -%}
8 |
9 | Auth:
10 | region: "{{ region }}"
11 |
12 | Metrics:
13 | {%- for instance in resources %}
14 | {%- for metric in metrics %}
15 | - Namespace: "AWS/EBS"
16 | MetricName: "{{ metric }}"
17 | Statistics:
18 | - "{{ metrics[metric]['stat'] }}"
19 | Unit: "{{ metrics[metric]['unit'] }}"
20 | Dimensions:
21 | VolumeId: "{{ instance.id }}"
22 | Options:
23 | Formatter: 'cloudwatch.%(Namespace)s.{{ instance.attach_data.instance_id }}.%(MetricName)s.%(statistic)s.%(Unit)s'
24 | Period: 1
25 | {%- endfor %}
26 | {%- endfor %}
27 |
--------------------------------------------------------------------------------
/sample_templates/cloudfront.yml.j2:
--------------------------------------------------------------------------------
1 | {#- config template for CloudFront -#}
2 | {#- http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cf-metricscollected.html -#}
3 |
4 | {%- set metrics = {
5 | 'Requests': {'stat': 'Sum'},
6 | 'BytesDownloaded': {'stat': 'Sum'},
7 | 'BytesUploaded': {'stat': 'Sum'},
8 | 'TotalErrorRate': {'stat': 'Average'},
9 | '4xxErrorRate': {'stat': 'Average'},
10 | '5xxErrorRate': {'stat': 'Average'},
11 | }
12 | -%}
13 |
14 | Auth:
15 | region: "{{ region }}"
16 |
17 | Metrics:
18 | {%- for distribution in resources %}
19 | {%- for metric in metrics %}
20 | - Namespace: "AWS/CloudFront"
21 | MetricName: "{{ metric }}"
22 | Statistics:
23 | - "{{ metrics[metric]['stat'] }}"
24 | Dimensions:
25 | DistributionId: "{{ distribution.id }}"
26 | Region: "Global"
27 | Options:
28 | Count: 60
29 | Period: 1440
30 | Formatter: 'cloudwatch.%(Namespace)s.{{ distribution.id }}.%(MetricName)s.%(statistic)s.%(Unit)s'
31 | {% endfor %}
32 | {% endfor %}
33 |
--------------------------------------------------------------------------------
/config.yaml.example:
--------------------------------------------------------------------------------
1 | # Sample config.yaml
2 |
3 | # If connecting to a different region other than default, set region
4 | Auth:
5 | region: "us-west-2"
6 | Metrics:
7 | - Namespace: "AWS/ELB"
8 | MetricName: "RequestCount"
9 | Statistics: "Sum"
10 | Unit: "Count"
11 | Dimensions:
12 | # You can have multiple dimensions, but boto will only return the last one
13 | LoadBalancerName: "my-load-balancer"
14 | # You can list additional metrics in one file. Just be careful about rate limits.
15 | - Namespace: "AWS/EC2"
16 | MetricName: "CPUUtilization"
17 | # You can have multiple statistics too
18 | Statistics:
19 | - "Maximum"
20 | - "Average"
21 | Unit: "Percent"
22 | Dimensions:
23 | InstanceId: "i-r0b0t"
24 | # OPTIONAL: custom options just for this metric
25 | Options:
26 | # Set this to customize your Graphite metric names
27 | Formatter: 'cloudwatch.%(Namespace)s.%(dimension)s.%(MetricName)s.%(statistic)s.%(Unit)s'
28 | # EC2 defaults to 5 minute reports
29 | Period: 5
30 | # OPTIONAL: set defaults for all metrics in this file
31 | Options:
32 | Count: 10
33 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | VERSION = $(shell cat VERSION)
2 | ifeq ($(shell uname), Darwin)
3 | # Get this with: `brew install gnu-sed`
4 | SED = gsed
5 | else
6 | SED = sed
7 | endif
8 |
9 | help: ## Shows this help
10 | @echo "$$(grep -h '#\{2\}' $(MAKEFILE_LIST) | sed 's/: #\{2\} / /' | column -t -s ' ')"
11 |
12 |
13 | clean: ## Remove temporary files
14 | find . -name "*.pyc" -delete
15 | find . -name ".DS_Store" -delete
16 | rm -rf *.egg
17 | rm -rf *.egg-info
18 | rm -rf __pycache__
19 | rm -rf build
20 | rm -rf dist
21 |
22 | test: ## Run test suite
23 | python -m unittest discover
24 |
25 | .PHONY: version
26 | version:
27 | @$(SED) -i -r /version/s/[0-9.]+/$(VERSION)/ setup.py
28 | @$(SED) -i -r /__version__/s/[0-9.]+/$(VERSION)/ leadbutt.py
29 |
30 | # Release instructions
31 | # 1. bump VERSION file
32 | # 2. run `make release`
33 | # 3. `git push --tags origin master`
34 | # 4. update release notes
35 | release: clean version
36 | @-git commit -am "bump version to v$(VERSION)"
37 | @-git tag $(VERSION)
38 | @-pip install wheel > /dev/null
39 | python setup.py sdist bdist_wheel upload
40 |
41 | # makes it easier to test setup.py's entry points
42 | install: ## Install this package locally
43 | -pip uninstall cloudwatch-to-graphite --yes
44 | pip install .
45 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 |
4 | setup(
5 | name='cloudwatch-to-graphite',
6 | description='Helper for pushing AWS CloudWatch metrics to Graphite',
7 | version='0.11.0',
8 | author='Chris Chang',
9 | author_email='c@crccheck.com',
10 | url='https://github.com/crccheck/cloudwatch-to-graphite',
11 | py_modules=['leadbutt', 'plumbum'],
12 | entry_points={
13 | 'console_scripts': [
14 | 'leadbutt = leadbutt:main',
15 | 'plumbum = plumbum:main',
16 | ],
17 | },
18 | install_requires=[
19 | 'boto',
20 | 'PyYAML',
21 | 'docopt',
22 | 'Jinja2',
23 | 'retrying',
24 | ],
25 | license='Apache License, Version 2.0',
26 | long_description=open('README.rst').read(),
27 | classifiers=[
28 | 'Development Status :: 4 - Beta',
29 | 'Environment :: Web Environment',
30 | 'Intended Audience :: Developers',
31 | 'License :: OSI Approved :: Apache Software License',
32 | 'Operating System :: OS Independent',
33 | 'Programming Language :: Python :: 2',
34 | 'Programming Language :: Python :: 2.7',
35 | 'Programming Language :: Python :: 3',
36 | 'Programming Language :: Python :: 3.4',
37 | 'Programming Language :: Python :: 3.5',
38 | 'Programming Language :: Python :: 3.6',
39 | ],
40 | )
41 |
--------------------------------------------------------------------------------
/sample_templates/kinesisapp.yml.js:
--------------------------------------------------------------------------------
1 | {%- set metrics = {'DataBytesProcessed': {'stat': 'Average', 'unit': 'Bytes'},
2 | 'KinesisDataFetcher.getRecords.Success': {'stat': 'Average', 'unit': 'Count'},
3 | 'KinesisDataFetcher.getRecords.Time': {'stat': 'Average', 'unit': 'Milliseconds'},
4 | 'MillisBehindLatest': {'stat': 'Average', 'unit': 'Milliseconds'},
5 | 'RecordsProcessed': {'stat': 'Average', 'unit': 'Count'},
6 | 'Success': {'stat': 'Average', 'unit': 'Count'},
7 | 'Time': {'stat': 'Average', 'unit': 'Milliseconds'},
8 | 'UpdateLease.Success': {'stat': 'Average', 'unit': 'Count'},
9 | 'UpdateLease.Time': {'stat': 'Average', 'unit': 'Milliseconds'}
10 | } -%}
11 |
12 | # If connecting to a different region other than default, set region
13 | Auth:
14 | region: "{{ region }}"
15 | Metrics:
16 | {%- for stream_name, shards in resources.iteritems() %}
17 | {%- for shard in shards %}
18 | {%- for metric in metrics %}
19 | - Namespace: "kinesis-application-name"
20 | MetricName: "{{ metric }}"
21 | Statistics: "{{ metrics[metric]['stat'] }}"
22 | Unit: "{{ metrics[metric]['unit'] }}"
23 | Dimensions:
24 | ShardId: "{{ shard }}"
25 | Operation: "ProcessTask"
26 | Options:
27 | Formatter: 'cloudwatch.{{ stream_name}}.%(Namespace)s.%(MetricName)s.{{ shard }}.%(statistic)s.%(Unit)s'
28 | Period: 5
29 | {% endfor %}
30 | {% endfor %}
31 | {% endfor %}
32 |
--------------------------------------------------------------------------------
/sample_templates/kinesis.yml.js:
--------------------------------------------------------------------------------
1 | # This is an example template file for getting metrics for a kinesis stream.
2 | # If you want an example of a kinesis application look at kinesisapp.yml.js.
3 | # Both can be run the same way
4 | {%- set metrics = {'IncomingBytes': {'stat': 'Average', 'unit': 'Bytes'},
5 | 'IncomingRecords': {'stat': 'Average', 'unit': 'Count'},
6 | 'PutRecord.Bytes': {'stat': 'Average', 'unit': 'Bytes'},
7 | 'PutRecord.Latency': {'stat': 'Average', 'unit': 'Milliseconds'},
8 | 'PutRecord.Success': {'stat': 'Average', 'unit': 'Count'},
9 | 'GetRecords.Bytes': {'stat': 'Average', 'unit': 'Bytes'},
10 | 'GetRecords.IteratorAge': {'stat': 'Average', 'unit': 'Milliseconds'},
11 | 'GetRecords.IteratorAgeMilliseconds': {'stat': 'Average', 'unit': 'Milliseconds'},
12 | 'GetRecords.Latency': {'stat': 'Average', 'unit': 'Milliseconds'},
13 | 'GetRecords.Success': {'stat': 'Average', 'unit': 'Count'}
14 | } -%}
15 |
16 | # If connecting to a different region other than default, set region
17 | Auth:
18 | region: "{{ region }}"
19 | Metrics:
20 | {%- for stream_name in resources %}
21 | {%- for metric in metrics %}
22 | - Namespace: "AWS/Kinesis"
23 | MetricName: "{{ metric }}"
24 | Statistics: "{{ metrics[metric]['stat'] }}"
25 | Unit: "{{ metrics[metric]['unit'] }}"
26 | Dimensions:
27 | StreamName: "{{ stream_name }}"
28 | Options:
29 | Formatter: 'cloudwatch.%(Namespace)s.{{ stream_name}}.%(MetricName)s.%(statistic)s.%(Unit)s'
30 | Period: 5
31 | {% endfor %}
32 | {% endfor %}
33 |
--------------------------------------------------------------------------------
/sample_templates/elb.yml.j2:
--------------------------------------------------------------------------------
1 | {#- config template for ELB -#}
2 |
3 | {%- set metrics = {
4 | 'BackendConnectionErrors': {'stat': 'Average', 'unit': 'Count'},
5 | 'HTTPCode_Backend_2XX': {'stat': 'Sum', 'unit': 'Count'},
6 | 'HTTPCode_Backend_3XX': {'stat': 'Sum', 'unit': 'Count'},
7 | 'HTTPCode_Backend_4XX': {'stat': 'Sum', 'unit': 'Count'},
8 | 'HTTPCode_Backend_5XX': {'stat': 'Sum', 'unit': 'Count'},
9 | 'HealthyHostCount': {'stat': 'Average', 'unit': 'Count'},
10 | 'Latency': {'stat': 'Average', 'unit': 'Seconds'},
11 | 'RequestCount': {'stat': 'Sum', 'unit': 'Count'},
12 | 'UnHealthyHostCount': {'stat': 'Average', 'unit': 'Count'},
13 | 'HTTPCode_ELB_4XX': {'stat': 'Sum', 'unit': 'Count'},
14 | 'HTTPCode_ELB_5XX': {'stat': 'Sum', 'unit': 'Count'},
15 | 'SurgeQueueLength': {'stat': 'Maximum', 'unit': 'Count'},
16 | 'SpilloverCount': {'stat': 'Sum', 'unit': 'Count'},
17 | }
18 | -%}
19 |
20 | Auth:
21 | region: "{{ region }}"
22 |
23 | Metrics:
24 | {%- for elb in resources %}
25 | {%- for metric in metrics %}
26 | - Namespace: "AWS/ELB"
27 | MetricName: "{{ metric }}"
28 | Statistics:
29 | - "{{ metrics[metric]['stat'] }}"
30 | Unit: "{{ metrics[metric]['unit'] }}"
31 | Dimensions:
32 | LoadBalancerName: "{{ elb.name }}"
33 | Options:
34 | Formatter: 'cloudwatch.%(Namespace)s.{{ elb.name }}.%(MetricName)s.%(statistic)s.%(Unit)s'
35 | {%- endfor %}
36 | {%- endfor %}
37 |
38 |
--------------------------------------------------------------------------------
/sample_templates/rds.yml.j2:
--------------------------------------------------------------------------------
1 | {#- config template for RDS -#}
2 |
3 | {%- set metrics = {
4 | 'BinLogDiskUsage': {'stat': 'Average', 'unit': 'Bytes'},
5 | 'CPUCreditUsage': {'stat': 'Sum', 'unit': 'Count'},
6 | 'CPUCreditBalance': {'stat': 'Sum', 'unit': 'Count'},
7 | 'CPUUtilization': {'stat': 'Average', 'unit': 'Percent'},
8 | 'DatabaseConnections': {'stat': 'Average', 'unit': 'Count'},
9 | 'DiskQueueDepth': {'stat': 'Average', 'unit': 'Count'},
10 | 'FreeableMemory': {'stat': 'Average', 'unit': 'Bytes'},
11 | 'FreeStorageSpace': {'stat': 'Average', 'unit': 'Bytes'},
12 | 'ReplicaLag': {'stat': 'Average', 'unit': 'Seconds'},
13 | 'SwapUsage': {'stat': 'Average', 'unit': 'Bytes'},
14 | 'ReadIOPS': {'stat': 'Average', 'unit': 'Count/Second'},
15 | 'WriteIOPS': {'stat': 'Average', 'unit': 'Count/Second'},
16 | 'ReadLatency': {'stat': 'Average', 'unit': 'Seconds'},
17 | 'WriteLatency': {'stat': 'Average', 'unit': 'Seconds'},
18 | 'ReadThroughput': {'stat': 'Average', 'unit': 'Bytes/Second'},
19 | 'WriteThroughput': {'stat': 'Average', 'unit': 'Bytes/Second'},
20 | 'NetworkReceiveThroughput': {'stat': 'Average', 'unit': 'Bytes/Second'},
21 | 'NetworkTransmitThroughput': {'stat': 'Average', 'unit': 'Bytes/Second'},
22 | }
23 | -%}
24 |
25 | Auth:
26 | region: "{{ region }}"
27 |
28 | Metrics:
29 | {%- for rds in resources %}
30 | {%- for metric in metrics %}
31 | - Namespace: "AWS/RDS"
32 | MetricName: "{{ metric }}"
33 | Statistics:
34 | - "{{ metrics[metric]['stat'] }}"
35 | Unit: "{{ metrics[metric]['unit'] }}"
36 | Dimensions:
37 | DBInstanceIdentifier: "{{ rds.id }}"
38 | Options:
39 | Formatter: 'cloudwatch.%(Namespace)s.{{ rds.id }}.%(MetricName)s.%(statistic)s.%(Unit)s'
40 | {%- endfor %}
41 | {%- endfor %}
42 |
--------------------------------------------------------------------------------
/sample_templates/elasticache.yml.j2:
--------------------------------------------------------------------------------
1 | {#- config template for elasticache -#}
2 |
3 | {%- set metrics = {'BytesUsedForCache': {'stat': 'Average', 'unit': 'Bytes'},
4 | 'CPUUtilization': {'stat': 'Average', 'unit': 'Percent'},
5 | 'CacheHits': {'stat': 'Sum', 'unit': 'Count'},
6 | 'CacheMisses': {'stat': 'Sum', 'unit': 'Count'},
7 | 'CurrConnections': {'stat': 'Sum', 'unit': 'Count'},
8 | 'CurrItems': {'stat': 'Sum', 'unit': 'Count'},
9 | 'Evictions': {'stat': 'Sum', 'unit': 'Count'},
10 | 'FreeableMemory': {'stat': 'Average', 'unit': 'Bytes'},
11 | 'GetTypeCmds': {'stat': 'Sum', 'unit': 'Count'},
12 | 'HashBasedCmds': {'stat': 'Average', 'unit': 'Count'},
13 | 'KeyBasedCmds': {'stat': 'Average', 'unit': 'Count'},
14 | 'NetworkBytesIn': {'stat': 'Sum', 'unit': 'Bytes'},
15 | 'NetworkBytesOut': {'stat': 'Sum', 'unit': 'Bytes'},
16 | 'NewConnections': {'stat': 'Sum', 'unit': 'Count'},
17 | 'Reclaimed': {'stat': 'Sum', 'unit': 'Count'},
18 | 'ReplicationLag': {'stat': 'Average', 'unit': 'Seconds'},
19 | 'SetTypeCmds': {'stat': 'Sum', 'unit': 'Count'},
20 | 'StringBasedCmds': {'stat': 'Sum', 'unit': 'Count'},
21 | 'SwapUsage': {'stat': 'Average', 'unit': 'Bytes'}
22 | } -%}
23 |
24 | Auth:
25 | region: "{{ region }}"
26 |
27 | Metrics:
28 | {%- for cluster in resources %}
29 | {%- for metric in metrics %}
30 | - Namespace: "AWS/ElastiCache"
31 | MetricName: "{{ metric }}"
32 | Statistics:
33 | - "{{ metrics[metric]['stat'] }}"
34 | Unit: "{{ metrics[metric]['unit'] }}"
35 | Dimensions:
36 | CacheClusterId: "{{ cluster }}"
37 | CacheNodeId: "0001"
38 | Options:
39 | Formatter: 'cloudwatch.%(Namespace)s.{{ cluster }}.%(MetricName)s.%(statistic)s.%(Unit)s'
40 | Period: 5
41 | {%- endfor %}
42 | {%- endfor %}
43 |
44 |
--------------------------------------------------------------------------------
/CONTRIBUTING.rst:
--------------------------------------------------------------------------------
1 | Contributing to Cloudwatch-to-Graphite
2 | ======================================
3 |
4 | First off, thanks for taking the time to contribute!
5 |
6 | These guidelines are a living document and open to change via a pull
7 | request.
8 |
9 | How Can I Contribute?
10 | ---------------------
11 |
12 | Coding style
13 | ~~~~~~~~~~~~
14 |
15 | It's a Python project, there is no reason not to follow
16 | `PEP8 `__. There are a few exceptions:
17 |
18 | * line length < 100 (not 80) if at all possible
19 | * one-line docstrings are acceptable, but feel free to step up to full docstrings with parameters named and return values specified.
20 |
21 | Open Issues
22 | ~~~~~~~~~~~
23 |
24 | The project uses GitHub's built-in issue tracker. Check there if there
25 | is something that matches your skills and interests.
26 |
27 | Developing
28 | ~~~~~~~~~~
29 |
30 | 1. Optional: create a virtual environment:
31 | ``virtualenv cw2g && . cw2g/bin/activate``
32 | 2. Install requirements: ``pip install -r requirements.txt``
33 | 3. Run the test suite: ``make test``
34 | 4. Verify the tests pass over all supported Python versions: ``tox``
35 |
36 | Pull requests
37 | ~~~~~~~~~~~~~
38 |
39 | Standard-issue github project flow, summarized:
40 |
41 | 1. Fork the repo
42 | 2. Create a branch
43 | 3. Check in your changes
44 | 4. Create a pull request
45 | 5. An existing contributor will do a code review, and as applicable ask for
46 | changes, mark the pull request with a :+1:, or merge it in, bump the
47 | version number, and cut/tag a release.
48 |
49 | Reporting Bugs
50 | ~~~~~~~~~~~~~~
51 |
52 | This section guides you through submitting a bug report for cloudwatch-to-graphite.
53 | Following these guidelines helps maintainers and the community
54 | understand your report, reproduce the behavior, and find related
55 | reports.
56 |
57 | Before creating bug reports, please check the issue tracker, as you
58 | might find out that you don't need to create one. When you are creating
59 | a bug report, please include as many details as possible.
60 |
61 | Code of Conduct
62 | ~~~~~~~~~~~~~~~
63 |
64 | http://contributor-covenant.org/version/1/3/0/
65 |
--------------------------------------------------------------------------------
/sample_templates/emr.yml.j2:
--------------------------------------------------------------------------------
1 | # This is an example template file for getting metrics for EMR.
2 |
3 | {%- set metrics= { 'AppsCompleted': {'stat': 'Average', 'unit': 'Count'},
4 | 'AppsFailed': {'stat': 'Average', 'unit': 'Count'},
5 | 'AppsKilled': {'stat': 'Average', 'unit': 'Count'},
6 | 'AppsPending': {'stat': 'Average', 'unit': 'Count'},
7 | 'AppsRunning': {'stat': 'Average', 'unit': 'Count'},
8 | 'AppsSubmitted': {'stat': 'Average', 'unit': 'Count'},
9 | 'CapacityRemainingGB': {'stat': 'Average', 'unit': 'Count'},
10 | 'ContainerAllocated': {'stat': 'Average', 'unit': 'Count'},
11 | 'ContainerPending': {'stat': 'Average', 'unit': 'Count'},
12 | 'ContainerReserved': {'stat': 'Average', 'unit': 'Count'},
13 | 'CoreNodesPending': {'stat': 'Average', 'unit': 'Count'},
14 | 'CoreNodesRunning': {'stat': 'Average', 'unit': 'Count'},
15 | 'CorruptBlocks': {'stat': 'Average', 'unit': 'Count'},
16 | 'HDFSCountRead': {'stat': 'Average', 'unit': 'Count'},
17 | 'HDFSCountWritten': {'stat': 'Average', 'unit': 'Count'},
18 | 'IsIdle': {'stat': 'Average', 'unit': 'None'},
19 | 'MRActiveNodes': {'stat': 'Average', 'unit': 'Count'},
20 | 'MRDecommissionedNodes': {'stat': 'Average', 'unit': 'Count'},
21 | 'MRLostNodes': {'stat': 'Average', 'unit': 'Count'},
22 | 'MRRebootedNodes': {'stat': 'Average', 'unit': 'Count'},
23 | 'MRTotalNodes': {'stat': 'Average', 'unit': 'Count'},
24 | 'MRUnhealthyNodes': {'stat': 'Average', 'unit': 'Count'},
25 | 'MissingBlocks': {'stat': 'Average', 'unit': 'Count'},
26 | 'PendingDeletionBlocks': {'stat': 'Average', 'unit': 'Count'},
27 | 'S3CountRead': {'stat': 'Average', 'unit': 'Count'},
28 | 'S3CountWritten': {'stat': 'Average', 'unit': 'Count'},
29 | 'TotalLoad': {'stat': 'Average', 'unit': 'Count'},
30 | 'UnderReplicatedBlocks': {'stat': 'Average', 'unit': 'Count'},
31 | 'MemoryAllocatedMB': {'stat': 'Average', 'unit': 'Count'},
32 | 'MemoryAvailableMB': {'stat': 'Average', 'unit': 'Count'},
33 | 'MemoryReservedMB': {'stat': 'Average', 'unit': 'Count'},
34 | 'MemoryTotalMB': {'stat': 'Average', 'unit': 'Count'},
35 | 'LiveDataNodes': {'stat': 'Average', 'unit': 'Percent'},
36 | 'HDFSUtilization': {'stat': 'Average', 'unit': 'Percent'}
37 | } -%}
38 |
39 | Auth:
40 | region: "{{ region }}"
41 | Metrics:
42 | {%- for emr in resources %}
43 | {%- for metric in metrics %}
44 | - Namespace: "AWS/ElasticMapReduce"
45 | MetricName: "{{ metric }}"
46 | Statistics: "{{ metrics[metric]['stat'] }}"
47 | Unit: "{{ metrics[metric]['unit'] }}"
48 | Dimensions:
49 | JobFlowId: {{ emr.id }}
50 | {% endfor %}
51 | {% endfor %}
52 |
53 | Options:
54 | Count: 1
55 | Period: 5
56 | Formatter: 'cloudwatch.%(Namespace)s.{{ region }}.%(dimension)s.%(MetricName)s.%(statistic)s.%(Unit)s'
57 |
--------------------------------------------------------------------------------
/test_plumbum.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | """
3 | Tests for Cloudwatch to Graphite (leadbutt)
4 |
5 | My class names are funny because I name them after the function they cover.
6 | """
7 | from __future__ import unicode_literals
8 |
9 | import unittest
10 | import mock
11 |
12 | import plumbum
13 |
14 |
15 | class GetCLIOptionsTests(unittest.TestCase): # flake8: noqa
16 |
17 | def test_all_args(self):
18 | args = [
19 | '-r', 'non-legal-region',
20 | '-f', 'instance-type=c3.large',
21 | 'foo.yaml.j2',
22 | 'ec2',
23 | ]
24 | templ, ns, region, filter_by, token = plumbum.interpret_options(args)
25 |
26 | self.assertEqual(region, 'non-legal-region')
27 | self.assertEqual(ns, 'ec2')
28 | self.assertEqual(templ, 'foo.yaml.j2')
29 | self.assertEqual(filter_by, {u'instance-type': u'c3.large'})
30 |
31 | def test_namespace_can_use_cloudwatch_syntax(self):
32 | args = [
33 | 'foo.yaml.j2',
34 | 'AWS/EC2',
35 | ]
36 | templ, ns, region, filter_by, token = plumbum.interpret_options(args)
37 | self.assertEqual(templ, 'foo.yaml.j2')
38 | self.assertEqual(ns, 'ec2')
39 |
40 | @mock.patch('plumbum.sys.exit')
41 | def test_no_template(self, mock_exit):
42 | """
43 | Test that if the namespace and template are not passed,
44 | we get the correct failure/exit.
45 | """
46 | args = [
47 | '-f', 'instance-type=c3.large',
48 | 'foo.yaml.j2',
49 | ]
50 | templ, ns, region, filter_by, token = plumbum.interpret_options(args)
51 | self.assertEqual(ns, None)
52 | self.assertEqual(region, plumbum.DEFAULT_REGION)
53 | self.assertEqual(filter_by, {u'instance-type': u'c3.large'})
54 |
55 |
56 | class FilterTests(unittest.TestCase):
57 |
58 | # define 2 mock ec2 instances to test filters with
59 | instances= [mock.Mock(
60 | root_device_type=u'ebs',
61 | id=u'i-12345678',
62 | private_ip_address='10.4.3.2',
63 | ), mock.Mock(
64 | root_device_type=u'ebs',
65 | id=u'i-87654321',
66 | private_ip_address='10.5.4.3',
67 | )]
68 |
69 | # verify that you get the instance back from the filter
70 | def test_filter_hit(self):
71 | filter_args = {'root_device_type': 'ebs', 'private_ip_address': '10.4.3.2'}
72 | filtered_instances = plumbum.lookup(self.instances, filter_by=filter_args)
73 | self.assertEqual(1, len(filtered_instances))
74 | self.assertEqual(self.instances[0].id, filtered_instances[0].id)
75 |
76 | # verify that you *do not* get the instance back from the filter
77 | def test_filter_miss(self):
78 | filter_args = {'root_device_type': 'instance-store'}
79 | filtered_instances = plumbum.lookup(self.instances, filter_by=filter_args)
80 | self.assertEqual(len(filtered_instances), 0)
81 |
82 |
83 | class ListXXXTests(unittest.TestCase):
84 | @mock.patch('boto.elasticache.connect_to_region')
85 | def test_list_elasticache_trivial_case(self, mock_boto):
86 | clusters = plumbum.list_elasticache('moo', None)
87 | self.assertEqual(clusters, [])
88 |
89 | clusters = plumbum.list_elasticache('moo', {})
90 | self.assertEqual(clusters, [])
91 |
92 | @mock.patch('boto.dynamodb.connect_to_region')
93 | def test_list_dynamodb_trivial_case(self, mock_boto):
94 | mock_boto.return_value.list_tables.return_value = []
95 | tables = plumbum.list_dynamodb('moo', None)
96 | self.assertEqual(tables, [])
97 |
98 | tables = plumbum.list_dynamodb('moo', {})
99 | self.assertEqual(tables, [])
100 |
101 |
102 | if __name__ == '__main__':
103 | unittest.main()
104 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | Cloudwatch-to-Graphite
2 | ======================
3 |
4 | .. image:: https://travis-ci.org/crccheck/cloudwatch-to-graphite.svg
5 | :target: https://travis-ci.org/crccheck/cloudwatch-to-graphite
6 |
7 | Cloudwatch-to-Graphite (leadbutt) is a small utility to take metrics from
8 | CloudWatch to Graphite.
9 |
10 |
11 | Installation
12 | ------------
13 |
14 | Install using pip::
15 |
16 | pip install cloudwatch-to-graphite
17 |
18 | Configuring ``boto``
19 | ~~~~~~~~~~~~~~~~~~~~
20 |
21 | Cloudwatch-to-Graphite uses `boto`_, so make sure to follow its `configuration
22 | instructions`_. The easiest way to do this is to set up the
23 | ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment variables.
24 |
25 | .. _configuration instructions: http://boto.readthedocs.org/en/latest/boto_config_tut.html
26 |
27 |
28 | Usage
29 | -----
30 |
31 | Configuration Files
32 | ~~~~~~~~~~~~~~~~~~~
33 |
34 | If you have a simple setup, the easiest way to get started is to set up a
35 | config.yaml. You can copy the included config.yaml.example. Then just run::
36 |
37 | leadbutt
38 |
39 | If you have several configs you want to switch between, you can specify a
40 | custom configuration file::
41 |
42 | leadbutt --config-file=production.yaml -n 20
43 |
44 | You can even generate configs on the fly and send them in via stdin by setting
45 | the config file to '-'::
46 |
47 | generate_config_from_inventory | leadbutt --config-file=-
48 |
49 | There's a helper to generate configuration files called ``plumbum``. Use it like::
50 |
51 | plumbum [-r REGION] [-f FILTER] [--token TOKEN] template namespace
52 |
53 | Namespace is the CloudWatch namespace for the resources of interest; for example ``AWS/RDS``.
54 | The template is a Jinja2 template. You can add arbitrary replacement tokens, eg ``{{ replace_me }}``, and then
55 | pass in values on the CLI via ``--token``. For example, if you called::
56 |
57 | plumbum --token replace_me='hello, world' sample_templates/rds.yml.j2 AWS/RDS
58 |
59 | You would get all instances of ``{{ replace_me }}`` in the templace replaced with ``hello, world``.
60 |
61 | Filters
62 | ~~~~~~~
63 |
64 | You can pass simple ``key=value`` filters in to ``plumbum``; be aware of the limitations:
65 |
66 | * the filters run against whatever the AWS API has returned; if you have a lot of objects of whatever type, expect the API request to take a while.
67 | * they work only against object attributes and tags returned by the API. For example, RDS and ELB objects can be tagged, but as getting the tags is a per-object subrequest; ``plumbum`` does not do those, so you can only filter on the object attributes.
68 |
69 | Example: ``plumbum -f Name=my-dev-instance sample_templates/ec2.yml.j2 ec2``
70 |
71 |
72 | Sending Data to Graphite
73 | ~~~~~~~~~~~~~~~~~~~~~~~~
74 |
75 | If your graphite server is at graphite.local, you can send metrics by chaining
76 | with netcat::
77 |
78 | leadbutt | nc -q0 graphite.local 2003
79 |
80 | Or if you want to use UDP::
81 |
82 | leadbutt | nc -uw0 graphite.local 2003
83 |
84 | If you need to namespace your metrics for a hosted Graphite provider, you could
85 | provide a custom formatter, but the easiest way is to just run the output
86 | through awk::
87 |
88 | leadbutt | \
89 | awk -v namespace="$HOSTEDGRAPHITE_APIKEY" '{print namespace"."$0}' | \
90 | nc -uw0 my-graphite-provider.xxx 2003
91 |
92 | Customizing Your Graphite Metric Names
93 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
94 |
95 | Set the ``Formatter`` option to set the template used to generate Graphite
96 | metric names. I wasn't sure what should be default, so I copied
97 | `cloudwatch2graphite`_'s. Here's what it looks like::
98 |
99 | cloudwatch.%(Namespace)s.%(dimension)s.%(MetricName)s.%(statistic)s.%(Unit)s
100 |
101 | TitleCased variables come directly from the YAML configuration, while lowercase
102 | variables are derived:
103 |
104 | * **statistic** -- the current statistic since ``Statistics`` can be a list
105 | * **dimension** -- the dimension value, e.g. "i-r0b0t" or "my-load-balancer"
106 |
107 | The format string is Python's `%-style `_.
108 |
109 | config.yaml
110 | -----------
111 |
112 | What metrics are pulled is in a YAML configuration file. See the example
113 | config.yaml.example for an idea of what you can do.
114 |
115 |
116 | Developing
117 | ----------
118 |
119 | See: : `Contributing `__.
120 |
121 | Useful References
122 | -----------------
123 |
124 | * `CloudWatch Reference `_
125 | * `boto CloudWatch docs `_
126 |
127 |
128 | Prior Art
129 | ---------
130 |
131 | Cloudwatch-to-Graphite was inspired by edasque's `cloudwatch2graphite`_. I was
132 | looking to expand it, but I wanted to use `boto`_.
133 |
134 | .. _cloudwatch2graphite: https://github.com/edasque/cloudwatch2graphite
135 | .. _boto: https://boto.readthedocs.org/en/latest/
136 |
--------------------------------------------------------------------------------
/test_leadbutt.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | """
3 | Tests for Cloudwatch to Graphite (leadbutt)
4 |
5 | WISHLIST: supress chatty stderr and stdout in tests
6 | """
7 | from __future__ import unicode_literals
8 |
9 | from subprocess import call
10 | import datetime
11 | import os
12 | import unittest
13 |
14 | import mock
15 |
16 | import leadbutt
17 |
18 |
19 | class get_configTest(unittest.TestCase):
20 | def test_example_config_loads(self):
21 | config = leadbutt.get_config('config.yaml.example')
22 | self.assertIn('Metrics', config)
23 |
24 | @mock.patch('sys.stdin')
25 | def test_config_can_be_stdin(self, mock_stdin):
26 | # simulate reading stdin
27 | mock_stdin.read.side_effect = ['test: "123"\n', '']
28 | # mock_stdin.name = 'oops'
29 | config = leadbutt.get_config('-')
30 | self.assertIn('test', config)
31 |
32 | @mock.patch('sys.stderr')
33 | @mock.patch('sys.stdin')
34 | def test_config_handles_malformed_yaml(self, mock_stdin, mock_stderr):
35 | mock_stdin.read.side_effect = ['-\nmalformed yaml', '']
36 | mock_stdin.name = 'oops'
37 | with self.assertRaises(SystemExit) as e:
38 | leadbutt.get_config('-')
39 | self.assertEqual(e.exception.code, 1)
40 | self.assertTrue(mock_stderr.write.called)
41 |
42 | @mock.patch('sys.stderr')
43 | def test_config_handles_missing_file(self, mock_stderr):
44 | with self.assertRaises(SystemExit) as e:
45 | leadbutt.get_config('whatever_the_default_config_is')
46 | self.assertEqual(e.exception.code, 2)
47 | self.assertTrue(mock_stderr.write.called)
48 |
49 |
50 | class get_optionsTest(unittest.TestCase):
51 | def test_get_options_returns_right_option(self):
52 | # only have the defaults
53 | options = leadbutt.get_options(None, None, None)
54 | self.assertEqual(options['Period'], 1)
55 | self.assertEqual(options['Count'], 5)
56 |
57 | # config options were specified
58 | config_options = {
59 | 'Period': 2,
60 | }
61 | options = leadbutt.get_options(config_options, None, None)
62 | self.assertEqual(options['Period'], 2)
63 | self.assertEqual(options['Count'], 5)
64 |
65 | # local_options were specified
66 | local_options = {
67 | 'Period': 3,
68 | }
69 | options = leadbutt.get_options(config_options, local_options, None)
70 | self.assertEqual(options['Period'], 3)
71 | self.assertEqual(options['Count'], 5)
72 |
73 | # cli_options were specified
74 | cli_options = {
75 | 'Period': 4,
76 | 'Count': 10,
77 | }
78 | options = leadbutt.get_options(config_options, local_options, cli_options)
79 | self.assertEqual(options['Period'], 4)
80 | self.assertEqual(options['Count'], 10)
81 |
82 |
83 | class output_resultsTest(unittest.TestCase):
84 | @mock.patch('sys.stdout')
85 | def test_default_formatter_used(self, mock_sysout):
86 | mock_results = [{
87 | 'Timestamp': datetime.datetime.utcnow(),
88 | 'Unit': 'Count',
89 | 'Sum': 1337.0,
90 | }]
91 | metric = {
92 | 'Namespace': 'AWS/Foo',
93 | 'MetricName': 'RequestCount',
94 | 'Statistics': 'Sum',
95 | 'Unit': 'Count',
96 | 'Dimensions': {'Krang': 'X'},
97 | }
98 | options = leadbutt.get_options(None, metric.get('Options'), None)
99 | leadbutt.output_results(mock_results, metric, options)
100 | self.assertTrue(mock_sysout.write.called)
101 | out = mock_sysout.write.call_args[0][0]
102 | name, value, timestamp = out.split()
103 | # assert default formatter was used
104 | self.assertEqual(name, 'cloudwatch.aws.foo.x.requestcount.sum.count')
105 | self.assertEqual(value, '1337.0')
106 |
107 | @mock.patch('sys.stdout')
108 | def test_custom_formatter_used(self, mock_sysout):
109 | mock_results = [{
110 | 'Timestamp': datetime.datetime.utcnow(),
111 | 'Unit': 'Count',
112 | 'Sum': 1337.0,
113 | }]
114 | metric = {
115 | 'Namespace': 'AWS/Foo',
116 | 'MetricName': 'RequestCount',
117 | 'Statistics': 'Sum',
118 | 'Unit': 'Count',
119 | 'Dimensions': {'Krang': 'X'},
120 | 'Options': {'Formatter': 'tmnt.%(dimension)s'}
121 | }
122 | options = leadbutt.get_options(None, metric.get('Options'), None)
123 | leadbutt.output_results(mock_results, metric, options)
124 | self.assertTrue(mock_sysout.write.called)
125 | out = mock_sysout.write.call_args[0][0]
126 | name, value, timestamp = out.split()
127 | # assert custom formatter was used
128 | self.assertEqual(name, 'tmnt.x')
129 | self.assertEqual(value, '1337.0')
130 |
131 | @mock.patch('sys.stdout')
132 | def test_multiple_statistics_get_multiple_lines(self, mock_sysout):
133 | mock_results = [{
134 | 'Timestamp': datetime.datetime.utcnow(),
135 | 'Maximum': 9001.0,
136 | 'Average': 1337.0,
137 | 'Unit': 'Count',
138 | }]
139 | metric = {
140 | 'Namespace': 'AWS/Foo',
141 | 'MetricName': 'RequestCount',
142 | 'Statistics': ['Maximum', 'Average'],
143 | 'Unit': 'Count',
144 | 'Dimensions': {'Krang': 'X'},
145 | }
146 | options = leadbutt.get_options(None, metric.get('Options'), None)
147 | leadbutt.output_results(mock_results, metric, options)
148 |
149 | self.assertEqual(
150 | mock_sysout.write.call_count, len(metric['Statistics']))
151 |
152 |
153 | class leadbuttTest(unittest.TestCase):
154 | @mock.patch('boto.ec2.cloudwatch.connect_to_region')
155 | @mock.patch('leadbutt.get_config')
156 | def test_can_get_auth_from_config(self, mock_get_config, mock_connect):
157 | mock_get_config.return_value = {
158 | 'Metrics': [],
159 | 'Auth': {
160 | 'aws_access_key_id': 'foo',
161 | 'aws_secret_access_key': 'bar',
162 | }
163 | }
164 | leadbutt.leadbutt('dummy_config_file', {'Count': 1, 'Period': 5})
165 | self.assertTrue(mock_connect.called)
166 | args, kwargs = mock_connect.call_args
167 | self.assertEqual(kwargs['aws_access_key_id'], 'foo')
168 | self.assertEqual(kwargs['aws_secret_access_key'], 'bar')
169 |
170 |
171 | @unittest.skipUnless('TOX_TEST_ENTRYPOINT' in os.environ,
172 | 'This is only applicable if leadbutt is installed')
173 | class mainTest(unittest.TestCase):
174 | def test_entry_point(self):
175 | # assert this does not raise an exception
176 | call(['leadbutt', '--help'])
177 |
178 |
179 | if __name__ == '__main__':
180 | unittest.main()
181 |
--------------------------------------------------------------------------------
/leadbutt.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | """
3 | Usage:
4 | leadbutt [options]
5 |
6 | Options:
7 | -h --help Show this screen.
8 | -c FILE --config-file=FILE Path to a YAML configuration file [default: config.yaml].
9 | -i INTERVAL Interval, in ms, to wait between metric requests. Doubles as the backoff multiplier. [default: 50]
10 | -m MAX_INTERVAL The maximum interval time to back off to, in ms [default: 4000]
11 | -p INT --period INT Period length, in minutes [default: 1]
12 | -n INT Number of data points to try to get [default: 5]
13 | -v Verbose
14 | --version Show version.
15 | """
16 | from __future__ import unicode_literals
17 |
18 | from calendar import timegm
19 | import datetime
20 | import os.path
21 | import sys
22 | import time
23 |
24 | from docopt import docopt
25 | import boto.ec2.cloudwatch
26 | from retrying import retry
27 | import yaml
28 |
29 |
30 | # emulate six.text_type based on https://docs.python.org/3/howto/pyporting.html#str-unicode
31 | if sys.version_info[0] >= 3:
32 | text_type = str
33 | else:
34 | text_type = unicode
35 |
36 | __version__ = '0.11.0'
37 |
38 |
39 | # configuration
40 |
41 | DEFAULT_REGION = 'us-east-1'
42 |
43 | DEFAULT_OPTIONS = {
44 | 'Period': 1, # 1 minute
45 | 'Count': 5, # 5 periods
46 | 'Formatter': ('cloudwatch.%(Namespace)s.%(dimension)s.%(MetricName)s'
47 | '.%(statistic)s.%(Unit)s')
48 | }
49 |
50 |
51 | def get_config(config_file):
52 | """Get configuration from a file."""
53 | def load(fp):
54 | try:
55 | return yaml.safe_load(fp)
56 | except yaml.YAMLError as e:
57 | sys.stderr.write(text_type(e))
58 | sys.exit(1) # TODO document exit codes
59 |
60 | if config_file == '-':
61 | return load(sys.stdin)
62 | if not os.path.exists(config_file):
63 | sys.stderr.write('ERROR: Must either run next to config.yaml or'
64 | ' specify a config file.\n' + __doc__)
65 | sys.exit(2)
66 | with open(config_file) as fp:
67 | return load(fp)
68 |
69 |
70 | def get_options(config_options, local_options, cli_options):
71 | """
72 | Figure out what options to use based on the four places it can come from.
73 |
74 | Order of precedence:
75 | * cli_options specified by the user at the command line
76 | * local_options specified in the config file for the metric
77 | * config_options specified in the config file at the base
78 | * DEFAULT_OPTIONS hard coded defaults
79 | """
80 | options = DEFAULT_OPTIONS.copy()
81 | if config_options is not None:
82 | options.update(config_options)
83 | if local_options is not None:
84 | options.update(local_options)
85 | if cli_options is not None:
86 | options.update(cli_options)
87 | return options
88 |
89 |
90 | def output_results(results, metric, options):
91 | """
92 | Output the results to stdout.
93 |
94 | TODO: add AMPQ support for efficiency
95 | """
96 | formatter = options['Formatter']
97 | context = metric.copy() # XXX might need to sanitize this
98 | try:
99 | context['dimension'] = list(metric['Dimensions'].values())[0]
100 | except AttributeError:
101 | context['dimension'] = ''
102 | for result in results:
103 | stat_keys = metric['Statistics']
104 | if not isinstance(stat_keys, list):
105 | stat_keys = [stat_keys]
106 | for statistic in stat_keys:
107 | context['statistic'] = statistic
108 | # get and then sanitize metric name, first copy the unit name from the
109 | # result to the context to keep the default format happy
110 | context['Unit'] = result['Unit']
111 | metric_name = (formatter % context).replace('/', '.').lower()
112 | line = '{0} {1} {2}\n'.format(
113 | metric_name,
114 | result[statistic],
115 | timegm(result['Timestamp'].timetuple()),
116 | )
117 | sys.stdout.write(line)
118 |
119 |
120 | def leadbutt(config_file, cli_options, verbose=False, **kwargs):
121 |
122 | # This function is defined in here so that the decorator can take CLI options, passed in from main()
123 | # we'll re-use the interval to sleep at the bottom of the loop that calls get_metric_statistics.
124 | @retry(wait_exponential_multiplier=kwargs.get('interval', None),
125 | wait_exponential_max=kwargs.get('max_interval', None),
126 | # give up at the point the next cron of this script probably runs; Period is minutes; some_max_delay needs ms
127 | stop_max_delay=cli_options['Count'] * cli_options['Period'] * 60 * 1000)
128 | def get_metric_statistics(**kwargs):
129 | """
130 | A thin wrapper around boto.cloudwatch.connection.get_metric_statistics, for the
131 | purpose of adding the @retry decorator
132 | :param kwargs:
133 | :return:
134 | """
135 | connection = kwargs.pop('connection')
136 | return connection.get_metric_statistics(**kwargs)
137 |
138 | config = get_config(config_file)
139 | config_options = config.get('Options')
140 | auth_options = config.get('Auth', {})
141 |
142 | region = auth_options.get('region', DEFAULT_REGION)
143 | connect_args = {
144 | 'debug': 2 if verbose else 0,
145 | }
146 | if 'aws_access_key_id' in auth_options:
147 | connect_args['aws_access_key_id'] = auth_options['aws_access_key_id']
148 | if 'aws_secret_access_key' in auth_options:
149 | connect_args['aws_secret_access_key'] = auth_options['aws_secret_access_key']
150 | conn = boto.ec2.cloudwatch.connect_to_region(region, **connect_args)
151 | for metric in config['Metrics']:
152 | options = get_options(
153 | config_options, metric.get('Options'), cli_options)
154 | period_local = options['Period'] * 60
155 | count_local = options['Count']
156 | end_time = datetime.datetime.utcnow()
157 | start_time = end_time - datetime.timedelta(
158 | seconds=period_local * count_local)
159 | # if 'Unit 'is in the config, request only that; else get all units
160 | unit = metric.get('Unit')
161 | metric_names = metric['MetricName']
162 | if not isinstance(metric_names, list):
163 | metric_names = [metric_names]
164 | for metric_name in metric_names:
165 | # we need a copy of the metric dict with the MetricName swapped out
166 | this_metric = metric.copy()
167 | this_metric['MetricName'] = metric_name
168 | results = get_metric_statistics(
169 | connection=conn,
170 | period=period_local,
171 | start_time=start_time,
172 | end_time=end_time,
173 | metric_name=metric_name,
174 | namespace=metric['Namespace'],
175 | statistics=metric['Statistics'],
176 | dimensions=metric['Dimensions'],
177 | unit=unit
178 | )
179 | output_results(results, this_metric, options)
180 | time.sleep(kwargs.get('interval', 0) / 1000.0)
181 |
182 |
183 | def main(*args, **kwargs):
184 | options = docopt(__doc__, version=__version__)
185 | # help: http://boto.readthedocs.org/en/latest/ref/cloudwatch.html#boto.ec2.cloudwatch.CloudWatchConnection.get_metric_statistics
186 | config_file = options.pop('--config-file')
187 | period = int(options.pop('--period'))
188 | count = int(options.pop('-n'))
189 | verbose = options.pop('-v')
190 | cli_options = {}
191 | if period is not None:
192 | cli_options['Period'] = period
193 | if count is not None:
194 | cli_options['Count'] = count
195 | leadbutt(config_file, cli_options, verbose,
196 | interval=float(options.pop('-i')),
197 | max_interval=float(options.pop('-m'))
198 | )
199 |
200 |
201 | if __name__ == '__main__':
202 | main()
203 |
--------------------------------------------------------------------------------
/plumbum.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | """
3 | Usage:
4 | plumbum [region] [options]...
5 |
6 | Options:
7 | template path to the jinja2 template
8 | namespace AWS namespace. Currently supports: elasticache, elb, ec2, rds, asg, sqs
9 | region AWS region [default: us-east-1]
10 | options key value combinations, they can be tags or any other property
11 |
12 | Examples:
13 |
14 | plumbum elb.yaml.j2 elb
15 | plumbum elb.yaml.j2 elb us-west-2
16 | plumbum ec2.yaml.j2 ec2 environment=production
17 | plumbum ec2.yaml.j2 ec2 us-west-2 environment=production
18 |
19 | Outputs to stdout.
20 |
21 | About Templates:
22 |
23 | Templates are used to generate config.yml files based on running resources.
24 | They're written in jinja2, and have these variables available:
25 |
26 | filters A dictionary of the filters that were passed in
27 | region The region the resource is located in
28 | resources A list of the resources as boto objects
29 | """
30 | from __future__ import unicode_literals
31 |
32 | import argparse
33 | import sys
34 |
35 | import boto
36 | import boto.dynamodb
37 | import boto.ec2
38 | import boto.emr
39 | import boto.ec2.elb
40 | import boto.ec2.cloudwatch
41 | import boto.rds
42 | import boto.elasticache
43 | import boto.ec2.autoscale
44 | import boto.kinesis
45 | import boto.sqs
46 | import jinja2
47 | import os.path
48 |
49 | from leadbutt import __version__
50 |
51 | # DEFAULT_NAMESPACE = 'ec2' # TODO
52 | DEFAULT_REGION = 'us-east-1'
53 |
54 |
55 | class CliArgsException(Exception):
56 | pass
57 |
58 |
59 | def get_property_func(key):
60 | """
61 | Get the accessor function for an instance to look for `key`.
62 |
63 | Look for it as an attribute, and if that does not work, look to see if it
64 | is a tag.
65 | """
66 | def get_it(obj):
67 | try:
68 | return getattr(obj, key)
69 | except AttributeError:
70 | return obj.tags.get(key)
71 | return get_it
72 |
73 |
74 | def filter_key(filter_args):
75 | def filter_instance(instance):
76 | return all([value == get_property_func(key)(instance)
77 | for key, value in filter_args.items()])
78 | return filter_instance
79 |
80 |
81 | def lookup(instances, filter_by=None):
82 | if filter_by is not None:
83 | return list(filter(filter_key(filter_by), instances))
84 | return instances
85 |
86 |
87 | def interpret_options(args=sys.argv[1:]):
88 |
89 | parser = argparse.ArgumentParser()
90 | parser.add_argument('--version', action='version', version=__version__)
91 | parser.add_argument("-r", "--region", help="AWS region", default=DEFAULT_REGION)
92 | parser.add_argument("-f", "--filter", action='append', default=[],
93 | help="filter to apply to AWS objects in key=value form, can be used multiple times")
94 | parser.add_argument('--token', action='append', help='a key=value pair to use when populating templates')
95 | parser.add_argument("template", type=str, help="the template to interpret")
96 | parser.add_argument("namespace", type=str, help="AWS namespace")
97 |
98 | args = parser.parse_args(args=args)
99 |
100 | # filters are passed in as list of key=values pairs, we need a dictionary to pass to lookup()
101 | filters = dict([x.split('=', 1) for x in args.filter])
102 |
103 | # Support 'ec2' (human friendly) and 'AWS/EC2' (how CloudWatch natively calls these things)
104 | if args.namespace is not None: # Just making test pass, argparse will catch this missing.
105 | namespace = args.namespace.rsplit('/', 2)[-1].lower()
106 | else:
107 | namespace = None
108 | return args.template, namespace, args.region, filters, args.token
109 |
110 |
111 | def list_billing(region, filter_by_kwargs):
112 | """List available billing metrics"""
113 | conn = boto.ec2.cloudwatch.connect_to_region(region)
114 | metrics = conn.list_metrics(metric_name='EstimatedCharges')
115 | # Filtering is based on metric Dimensions. Only really valuable one is
116 | # ServiceName.
117 | if filter_by_kwargs:
118 | filter_key = filter_by_kwargs.keys()[0]
119 | filter_value = filter_by_kwargs.values()[0]
120 | if filter_value:
121 | filtered_metrics = [x for x in metrics if x.dimensions.get(filter_key) and x.dimensions.get(filter_key)[0] == filter_value]
122 | else:
123 | # ServiceName=''
124 | filtered_metrics = [x for x in metrics if not x.dimensions.get(filter_key)]
125 | else:
126 | filtered_metrics = metrics
127 | return filtered_metrics
128 |
129 |
130 | def list_cloudfront(region, filter_by_kwargs):
131 | """List running ec2 instances."""
132 | conn = boto.connect_cloudfront()
133 | instances = conn.get_all_distributions()
134 | return lookup(instances, filter_by=filter_by_kwargs)
135 |
136 |
137 | def list_ec2(region, filter_by_kwargs):
138 | """List running ec2 instances."""
139 | conn = boto.ec2.connect_to_region(region)
140 | instances = conn.get_only_instances()
141 | return lookup(instances, filter_by=filter_by_kwargs)
142 |
143 | def list_ebs(region, filter_by_kwargs):
144 | """List running ebs volumes."""
145 | conn = boto.ec2.connect_to_region(region)
146 | instances = conn.get_all_volumes()
147 | return lookup(instances, filter_by=filter_by_kwargs)
148 |
149 |
150 | def list_elb(region, filter_by_kwargs):
151 | """List all load balancers."""
152 | conn = boto.ec2.elb.connect_to_region(region)
153 | instances = conn.get_all_load_balancers()
154 | return lookup(instances, filter_by=filter_by_kwargs)
155 |
156 |
157 | def list_rds(region, filter_by_kwargs):
158 | """List all RDS thingys."""
159 | conn = boto.rds.connect_to_region(region)
160 | instances = conn.get_all_dbinstances()
161 | return lookup(instances, filter_by=filter_by_kwargs)
162 |
163 |
164 | def list_elasticache(region, filter_by_kwargs):
165 | """List all ElastiCache Clusters."""
166 | conn = boto.elasticache.connect_to_region(region)
167 | req = conn.describe_cache_clusters()
168 | data = req["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]["CacheClusters"]
169 | if filter_by_kwargs:
170 | clusters = [x['CacheClusterId'] for x in data if x[filter_by_kwargs.keys()[0]] == filter_by_kwargs.values()[0]]
171 | else:
172 | clusters = [x['CacheClusterId'] for x in data]
173 | return clusters
174 |
175 |
176 | def list_autoscaling_group(region, filter_by_kwargs):
177 | """List all Auto Scaling Groups."""
178 | conn = boto.ec2.autoscale.connect_to_region(region)
179 | groups = conn.get_all_groups()
180 | return lookup(groups, filter_by=filter_by_kwargs)
181 |
182 |
183 | def list_sqs(region, filter_by_kwargs):
184 | """List all SQS Queues."""
185 | conn = boto.sqs.connect_to_region(region)
186 | queues = conn.get_all_queues()
187 | return lookup(queues, filter_by=filter_by_kwargs)
188 |
189 |
190 | def list_kinesis_applications(region, filter_by_kwargs):
191 | """List all the kinesis applications along with the shards for each stream"""
192 | conn = boto.kinesis.connect_to_region(region)
193 | streams = conn.list_streams()['StreamNames']
194 | kinesis_streams = {}
195 | for stream_name in streams:
196 | shard_ids = []
197 | shards = conn.describe_stream(stream_name)['StreamDescription']['Shards']
198 | for shard in shards:
199 | shard_ids.append(shard['ShardId'])
200 | kinesis_streams[stream_name] = shard_ids
201 | return kinesis_streams
202 |
203 |
204 | def list_dynamodb(region, filter_by_kwargs):
205 | """List all DynamoDB tables."""
206 | conn = boto.dynamodb.connect_to_region(region)
207 | tables = conn.list_tables()
208 | return lookup(tables, filter_by=filter_by_kwargs)
209 |
210 |
211 | def list_emr(region, filter_by_kwargs):
212 | conn = boto.emr.connect_to_region(region)
213 | q_list = conn.list_clusters(cluster_states=['WAITING', 'RUNNING'])
214 | queues = q_list.clusters
215 | return lookup(queues, filter_by=filter_by_kwargs)
216 |
217 | list_resources = {
218 | 'cloudfront': list_cloudfront,
219 | 'ec2': list_ec2,
220 | 'ebs': list_ebs,
221 | 'elb': list_elb,
222 | 'rds': list_rds,
223 | 'elasticache': list_elasticache,
224 | 'asg': list_autoscaling_group,
225 | 'sqs': list_sqs,
226 | 'kinesisapp': list_kinesis_applications,
227 | 'dynamodb': list_dynamodb,
228 | 'billing': list_billing,
229 | 'emr': list_emr,
230 | }
231 |
232 |
233 | def main():
234 |
235 | template, namespace, region, filters, tokens = interpret_options()
236 |
237 | # get the template first so this can fail before making a network request
238 | fs_path = os.path.abspath(os.path.dirname(template))
239 | loader = jinja2.FileSystemLoader(fs_path)
240 | jinja2_env = jinja2.Environment(loader=loader)
241 | template = jinja2_env.get_template(os.path.basename(template))
242 |
243 | # insure a valid region is set
244 | if region not in [r.name for r in boto.ec2.regions()]:
245 | raise ValueError("Invalid region:{0}".format(region))
246 |
247 | # should I be using ARNs?
248 | try:
249 | resources = list_resources[namespace](region, filters)
250 | except KeyError:
251 | print('ERROR: AWS namespace "{}" not supported or does not exist'
252 | .format(namespace))
253 | sys.exit(1)
254 |
255 | # base tokens
256 | template_tokens = {
257 | 'filters': filters,
258 | 'region': region, # Use for Auth config section if needed
259 | 'resources': resources,
260 | }
261 | # add tokens passed as cli args:
262 | if tokens is not None:
263 | for token_pair in tokens:
264 | if token_pair.count('=') != 1:
265 | raise CliArgsException("token pair '{0}' invalid, must contain exactly one '=' character.".format(token_pair))
266 | (key, value) = token_pair.split('=')
267 | template_tokens[key] = value
268 |
269 | print(template.render(template_tokens))
270 |
271 |
272 | if __name__ == '__main__':
273 | main()
274 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction, and
10 | distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by the
13 | copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all other
16 | entities that control, are controlled by, or are under common control with
17 | that entity. For the purposes of this definition, "control" means (i) the
18 | power, direct or indirect, to cause the direction or management of such
19 | entity, whether by contract or otherwise, or (ii) ownership of
20 | fifty percent (50%) or more of the outstanding shares, or (iii) beneficial
21 | ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity exercising
24 | permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation source,
28 | and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical transformation
31 | or translation of a Source form, including but not limited to compiled
32 | object code, generated documentation, and conversions to
33 | other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or Object
36 | form, made available under the License, as indicated by a copyright notice
37 | that is included in or attached to the work (an example is provided in the
38 | Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object form,
41 | that is based on (or derived from) the Work and for which the editorial
42 | revisions, annotations, elaborations, or other modifications represent,
43 | as a whole, an original work of authorship. For the purposes of this
44 | License, Derivative Works shall not include works that remain separable
45 | from, or merely link (or bind by name) to the interfaces of, the Work and
46 | Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including the original
49 | version of the Work and any modifications or additions to that Work or
50 | Derivative Works thereof, that is intentionally submitted to Licensor for
51 | inclusion in the Work by the copyright owner or by an individual or
52 | Legal Entity authorized to submit on behalf of the copyright owner.
53 | For the purposes of this definition, "submitted" means any form of
54 | electronic, verbal, or written communication sent to the Licensor or its
55 | representatives, including but not limited to communication on electronic
56 | mailing lists, source code control systems, and issue tracking systems
57 | that are managed by, or on behalf of, the Licensor for the purpose of
58 | discussing and improving the Work, but excluding communication that is
59 | conspicuously marked or otherwise designated in writing by the copyright
60 | owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity on
63 | behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License.
67 |
68 | Subject to the terms and conditions of this License, each Contributor
69 | hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,
70 | royalty-free, irrevocable copyright license to reproduce, prepare
71 | Derivative Works of, publicly display, publicly perform, sublicense,
72 | and distribute the Work and such Derivative Works in
73 | Source or Object form.
74 |
75 | 3. Grant of Patent License.
76 |
77 | Subject to the terms and conditions of this License, each Contributor
78 | hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,
79 | royalty-free, irrevocable (except as stated in this section) patent
80 | license to make, have made, use, offer to sell, sell, import, and
81 | otherwise transfer the Work, where such license applies only to those
82 | patent claims licensable by such Contributor that are necessarily
83 | infringed by their Contribution(s) alone or by combination of their
84 | Contribution(s) with the Work to which such Contribution(s) was submitted.
85 | If You institute patent litigation against any entity (including a
86 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a
87 | Contribution incorporated within the Work constitutes direct or
88 | contributory patent infringement, then any patent licenses granted to
89 | You under this License for that Work shall terminate as of the date such
90 | litigation is filed.
91 |
92 | 4. Redistribution.
93 |
94 | You may reproduce and distribute copies of the Work or Derivative Works
95 | thereof in any medium, with or without modifications, and in Source or
96 | Object form, provided that You meet the following conditions:
97 |
98 | 1. You must give any other recipients of the Work or Derivative Works a
99 | copy of this License; and
100 |
101 | 2. You must cause any modified files to carry prominent notices stating
102 | that You changed the files; and
103 |
104 | 3. You must retain, in the Source form of any Derivative Works that You
105 | distribute, all copyright, patent, trademark, and attribution notices from
106 | the Source form of the Work, excluding those notices that do not pertain
107 | to any part of the Derivative Works; and
108 |
109 | 4. If the Work includes a "NOTICE" text file as part of its distribution,
110 | then any Derivative Works that You distribute must include a readable copy
111 | of the attribution notices contained within such NOTICE file, excluding
112 | those notices that do not pertain to any part of the Derivative Works,
113 | in at least one of the following places: within a NOTICE text file
114 | distributed as part of the Derivative Works; within the Source form or
115 | documentation, if provided along with the Derivative Works; or, within a
116 | display generated by the Derivative Works, if and wherever such
117 | third-party notices normally appear. The contents of the NOTICE file are
118 | for informational purposes only and do not modify the License.
119 | You may add Your own attribution notices within Derivative Works that You
120 | distribute, alongside or as an addendum to the NOTICE text from the Work,
121 | provided that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and may
125 | provide additional or different license terms and conditions for use,
126 | reproduction, or distribution of Your modifications, or for any such
127 | Derivative Works as a whole, provided Your use, reproduction, and
128 | distribution of the Work otherwise complies with the conditions
129 | stated in this License.
130 |
131 | 5. Submission of Contributions.
132 |
133 | Unless You explicitly state otherwise, any Contribution intentionally
134 | submitted for inclusion in the Work by You to the Licensor shall be under
135 | the terms and conditions of this License, without any additional
136 | terms or conditions. Notwithstanding the above, nothing herein shall
137 | supersede or modify the terms of any separate license agreement you may
138 | have executed with Licensor regarding such Contributions.
139 |
140 | 6. Trademarks.
141 |
142 | This License does not grant permission to use the trade names, trademarks,
143 | service marks, or product names of the Licensor, except as required for
144 | reasonable and customary use in describing the origin of the Work and
145 | reproducing the content of the NOTICE file.
146 |
147 | 7. Disclaimer of Warranty.
148 |
149 | Unless required by applicable law or agreed to in writing, Licensor
150 | provides the Work (and each Contributor provides its Contributions)
151 | on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
152 | either express or implied, including, without limitation, any warranties
153 | or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS
154 | FOR A PARTICULAR PURPOSE. You are solely responsible for determining the
155 | appropriateness of using or redistributing the Work and assume any risks
156 | associated with Your exercise of permissions under this License.
157 |
158 | 8. Limitation of Liability.
159 |
160 | In no event and under no legal theory, whether in tort
161 | (including negligence), contract, or otherwise, unless required by
162 | applicable law (such as deliberate and grossly negligent acts) or agreed
163 | to in writing, shall any Contributor be liable to You for damages,
164 | including any direct, indirect, special, incidental, or consequential
165 | damages of any character arising as a result of this License or out of
166 | the use or inability to use the Work (including but not limited to damages
167 | for loss of goodwill, work stoppage, computer failure or malfunction,
168 | or any and all other commercial damages or losses), even if such
169 | Contributor has been advised of the possibility of such damages.
170 |
171 | 9. Accepting Warranty or Additional Liability.
172 |
173 | While redistributing the Work or Derivative Works thereof, You may choose
174 | to offer, and charge a fee for, acceptance of support, warranty,
175 | indemnity, or other liability obligations and/or rights consistent with
176 | this License. However, in accepting such obligations, You may act only
177 | on Your own behalf and on Your sole responsibility, not on behalf of any
178 | other Contributor, and only if You agree to indemnify, defend, and hold
179 | each Contributor harmless for any liability incurred by, or claims
180 | asserted against, such Contributor by reason of your accepting any such
181 | warranty or additional liability.
182 |
183 | END OF TERMS AND CONDITIONS
184 |
185 | APPENDIX: How to apply the Apache License to your work
186 |
187 | To apply the Apache License to your work, attach the following boilerplate
188 | notice, with the fields enclosed by brackets "[]" replaced with your own
189 | identifying information. (Don't include the brackets!) The text should be
190 | enclosed in the appropriate comment syntax for the file format. We also
191 | recommend that a file or class name and description of purpose be included
192 | on the same "printed page" as the copyright notice for easier
193 | identification within third-party archives.
194 |
195 | Copyright 2015 Chris Chang
196 |
197 | Licensed under the Apache License, Version 2.0 (the "License");
198 | you may not use this file except in compliance with the License.
199 | You may obtain a copy of the License at
200 |
201 | http://www.apache.org/licenses/LICENSE-2.0
202 |
203 | Unless required by applicable law or agreed to in writing, software
204 | distributed under the License is distributed on an "AS IS" BASIS,
205 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
206 | or implied. See the License for the specific language governing
207 | permissions and limitations under the License.
208 |
--------------------------------------------------------------------------------