├── .dockerignore ├── metadataproxy ├── routes │ ├── __init__.py │ ├── proxy.py │ └── mock.py ├── __init__.py ├── settings.py └── roles.py ├── MANIFEST.in ├── wsgi.py ├── .github └── workflows │ └── actions.yml ├── Makefile ├── requirements_wsgi.txt ├── Dockerfile ├── .gitignore ├── run-server.sh ├── LICENSE ├── docker_push.sh ├── setup.cfg ├── .travis.yml ├── setup.py ├── requirements.txt ├── config ├── logging.conf └── gunicorn.conf ├── CHANGELOG.md └── README.md /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | -------------------------------------------------------------------------------- /metadataproxy/routes/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt 2 | include requirements_wsgi.txt 3 | -------------------------------------------------------------------------------- /wsgi.py: -------------------------------------------------------------------------------- 1 | from metadataproxy import app 2 | 3 | 4 | if __name__ == '__main__': 5 | app.run( 6 | host=app.config['HOST'], 7 | port=app.config['PORT'], 8 | debug=app.config['DEBUG'] 9 | ) 10 | -------------------------------------------------------------------------------- /.github/workflows/actions.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | jobs: 3 | update-ci: 4 | runs-on: ubuntu-latest 5 | steps: 6 | - run: echo "CI is no longer running on this repo, update the files in .github/workflows to re-enable it. See the .travis.yml file for your previous CI config"; exit 1 7 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # bash needed for pipefail 2 | SHELL := /bin/bash 3 | 4 | test: test_lint test_unit 5 | 6 | test_lint: 7 | mkdir -p build 8 | set -o pipefail; flake8 | sed "s#^\./##" > build/flake8.txt || (cat build/flake8.txt && exit 1) 9 | 10 | test_unit: 11 | # Disabled for now. We need to fully mock AWS calls. 12 | echo nosetests tests/unit 13 | -------------------------------------------------------------------------------- /metadataproxy/__init__.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | 3 | from metadataproxy import settings 4 | 5 | app = Flask(__name__) 6 | app.config.from_object(settings) 7 | app.debug = app.config['DEBUG'] 8 | 9 | if app.config['PATCH_ECS_ALLOWED_HOSTS']: 10 | from botocore.utils import ContainerMetadataFetcher # NOQA 11 | ContainerMetadataFetcher._ALLOWED_HOSTS.append(app.config['PATCH_ECS_ALLOWED_HOSTS']) 12 | 13 | if app.config['MOCK_API']: 14 | from metadataproxy.routes import mock # NOQA 15 | else: 16 | from metadataproxy.routes import proxy # NOQA 17 | -------------------------------------------------------------------------------- /requirements_wsgi.txt: -------------------------------------------------------------------------------- 1 | # License: MIT 2 | # Upstream url: http://gunicorn.org/ 3 | # Use: For wsgi running 4 | gunicorn==19.9.0 5 | 6 | # License: MIT 7 | # Upstream url: http://www.gevent.org/ 8 | # Use: For concurrency 9 | gevent==1.3.6 10 | 11 | # License: MIT 12 | # Upstream url: https://github.com/python-greenlet/greenlet 13 | # Use: For concurrency 14 | greenlet==0.4.15 15 | 16 | # Dispatching system for parties to subscribe to events 17 | # Supports per-endpoint timing in flask 18 | # License: MIT 19 | # Upstream url: https://pypi.python.org/pypi/blinker 20 | blinker==1.4 21 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.6 2 | 3 | RUN mkdir /srv/metadataproxy 4 | COPY requirements.txt requirements_wsgi.txt /srv/metadataproxy/ 5 | RUN pip --no-cache-dir install -r /srv/metadataproxy/requirements.txt && \ 6 | pip --no-cache-dir install -r /srv/metadataproxy/requirements_wsgi.txt 7 | 8 | RUN mkdir -p /etc/gunicorn /etc/metadataproxy 9 | COPY config/gunicorn.conf /etc/gunicorn/gunicorn.conf 10 | COPY config/logging.conf /etc/metadataproxy/logging.conf 11 | 12 | COPY . /srv/metadataproxy/ 13 | 14 | EXPOSE 8000 15 | VOLUME ["/var/run/docker.sock"] 16 | 17 | WORKDIR /srv/metadataproxy 18 | CMD ["/bin/sh", "run-server.sh"] 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Packages 9 | *.egg 10 | *.egg-info 11 | dist 12 | build 13 | eggs 14 | parts 15 | bin 16 | var 17 | sdist 18 | develop-eggs 19 | .installed.cfg 20 | lib 21 | lib64 22 | __pycache__ 23 | 24 | # Installer logs 25 | pip-log.txt 26 | 27 | # Unit test / coverage reports 28 | .coverage 29 | .tox 30 | nosetests.xml 31 | 32 | # Translations 33 | *.mo 34 | 35 | # Mr Developer (mac, editor, IDEs, etc) 36 | .mr.developer.cfg 37 | .project 38 | .pydevproject 39 | .vagrant 40 | .ropeproject 41 | .tmp 42 | .sass-cache 43 | .DS_store 44 | .zedstate 45 | .idea 46 | 47 | # virtualenvs 48 | venv 49 | -------------------------------------------------------------------------------- /run-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | if [ "z$GUNICORN_CONFIG" = "z" ]; then 4 | GUNICORN_CONFIG="/etc/gunicorn/gunicorn.conf" 5 | fi 6 | 7 | if [ "z$HOST" = "z" ]; then 8 | HOST="0.0.0.0" 9 | fi 10 | 11 | if [ "z$PORT" = "z" ]; then 12 | PORT=8000 13 | fi 14 | 15 | if [ "$DEBUG" = "True" ]; then 16 | LEVEL="debug" 17 | else 18 | LEVEL="warning" 19 | fi 20 | 21 | if [ "z$WORKERS" = "z" ]; then 22 | WORKERS="1" 23 | fi 24 | 25 | export PYTHONUNBUFFERED="true" 26 | 27 | /usr/local/bin/gunicorn metadataproxy:app -c $GUNICORN_CONFIG --log-level $LEVEL --workers=$WORKERS -k gevent -b $HOST:$PORT --access-logfile - --error-logfile - --log-file - 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | metadataproxy - Proxy for AWS's metadata service with STS assume role support 2 | 3 | Copyright 2014-2015 Lyft Inc. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -------------------------------------------------------------------------------- /docker_push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$TRAVIS_PULL_REQUEST" == "false" -a -n "$TRAVIS_TAG" ] 3 | then 4 | docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD 5 | export TAG="$TRAVIS_TAG" 6 | echo "TAG is $TAG" 7 | docker tag $TRAVIS_REPO_SLUG:$TRAVIS_COMMIT $REPO:$TAG 8 | docker push $TRAVIS_REPO_SLUG:$TAG 9 | elif [ "$TRAVIS_PULL_REQUEST" == "false" -a "$TRAVIS_BRANCH" == "master" ] 10 | then 11 | docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD 12 | export TAG="latest" 13 | echo "TAG is $TAG" 14 | docker tag $TRAVIS_REPO_SLUG:$TRAVIS_COMMIT $REPO:$TAG 15 | docker push $TRAVIS_REPO_SLUG:$TAG 16 | else 17 | echo 'Ignoring PR branch for docker push.' 18 | fi 19 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # Project specific configuration used by the following tools: 2 | # - nosetests 3 | # - flake8 4 | # 5 | # nosetests only support setup.cfg. flake8 supports both setup.cfg and tox.ini. In 6 | # In order to not have too many files around, we'll use setup.cfg for now. 7 | 8 | [nosetests] 9 | # Turn this back on if the logs to too spammy. 10 | #nocapture=1 11 | with-xunit = 1 12 | xunit-file = build/nosetests.xml 13 | with-coverage = 1 14 | cover-package = metadataproxy 15 | cover-xml = 1 16 | cover-xml-file = build/coverage.xml 17 | cover-min-percentage = 0 18 | 19 | [flake8] 20 | # The jenkins violations plugin can read the pylint format. 21 | format = pylint 22 | max-line-length = 120 23 | 24 | # .svn,CVS,.bzr,.hg,.git,__pycache__: 25 | # default excludes 26 | # venv/: 27 | # third party libraries are all stored in venv - so we don't want to 28 | # check them for style issues. 29 | exclude = .git,__pycache__,venv,tests/,.ropeproject 30 | 31 | [pep8] 32 | max-line-length = 120 33 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - '3.6' 4 | env: 5 | - REPO=lyft/metadataproxy 6 | sudo: required 7 | services: 8 | - docker 9 | before_install: 10 | - docker build -f Dockerfile -t $REPO:$TRAVIS_COMMIT . 11 | install: pip install -r requirements.txt 12 | script: make test 13 | after_success: 14 | - "./docker_push.sh" 15 | deploy: 16 | provider: pypi 17 | username: __token__ 18 | password: 19 | secure: fanFwIpex+ZJw6dL9iR92/3S1piQpOzskDx9pSJoc4JU9JI++YYLDAhyZrr5uU6+sQDJr+IJllZMkGQIhknclH4R3IEqjuK11lpxiExr052urBMQmdO8dSTbpSG+OgwNP4rAnm5PDUYVXNSFHh6R37TL5ldYdY5ytOkSGcjN53DeictxXhr/W8/18PYVDTSW4ZcY68FSfK6dgCD6LP35UMo3yoU0zpjW5CwfUD1xWgOK2oU/hs/VqUG+y2fMC4rD4y6z2SUw9B3tdiXndj4FLxx6LMlsfDsND4f+4tAg2kC4SfjjpgG4CvtOT59uu+6bD1T9O08ne65IZ+qotKhULLlWLAOBacYGar5EYsCZdSM/tsrbior0p8pWLIm+Ez7uAQjxYLfIrtwtvFnvUjg+ulNeeLcRiJt0Mm7IfBGXIZs3skzhgq7tC2F6jOMNke3wfoO2eVPjgZiKBHDz1aDc1B5Djn6O/4mIll2TTKdc4WWbQNwhdsSEsD/XTBEKG44wj7SmDngWi5v108vcCDfan5jkyD8huOriPz/oHIX99T+WJeYHs/k6Dns/Q0z9kQYGCH64zhX3WvZb1whRQqmifX3BbmPGfTeW+iC2Nb7BVokhofCmRWwZgo1e02c0U1F3aM9pN1xO9AQjiwlUoOB76rZYzA18M4nAiU6CMAvI1uk= 20 | on: 21 | tags: true 22 | distributions: sdist bdist_wheel 23 | repo: lyft/metadataproxy 24 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 | # implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | from setuptools import setup, find_packages 15 | 16 | with open('requirements.txt') as f: 17 | reqs_base = f.read().splitlines() 18 | reqs_base = [r for r in reqs_base if not r.startswith('#') and r] 19 | 20 | with open('requirements_wsgi.txt') as f: 21 | reqs_wsgi = f.read().splitlines() 22 | reqs_wsgi = [r for r in reqs_wsgi if not r.startswith('#') and r] 23 | 24 | reqs = reqs_base + reqs_wsgi 25 | 26 | setup( 27 | name="metadataproxy", 28 | version="2.2.0", 29 | packages=find_packages(exclude=["test*"]), 30 | include_package_data=True, 31 | zip_safe=False, 32 | install_requires=reqs, 33 | author="Ryan Lane", 34 | author_email="rlane@lyft.com", 35 | description=("A proxy for AWS's metadata service that gives out" 36 | " scoped IAM credentials from STS"), 37 | license="apache2", 38 | url="https://github.com/lyft/metadataproxy" 39 | ) 40 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # YAML parser and emitter for Python 2 | # License: MIT 3 | # Upstream url: http://pyyaml.org/wiki/PyYAML 4 | PyYAML==5.4 5 | 6 | # Boto3 is the Amazon Web Services (AWS) Software Development Kit (SDK) 7 | # for Python. 8 | # License: Apache2 9 | # Use: For KMS 10 | boto3==1.9.14 11 | 12 | # Flask 13 | # License: BSD 14 | # Upstream url: http://github.com/mitsuhiko/flask/ 15 | # Use: For API. 16 | Flask==1.0.2 17 | 18 | # Python client for Docker. 19 | # License: Apache2 20 | # Upstream url: https://github.com/docker/docker-py/ 21 | docker-py==1.10.6 22 | 23 | # The modular source code checker: pep8, pyflakes and co 24 | # License: MIT 25 | # Upstream url: http://bitbucket.org/tarek/flake8 26 | flake8==3.5.0 27 | 28 | # Measures code coverage and emits coverage reports 29 | # Licence: BSD 30 | # Upstream url: https://pypi.python.org/pypi/coverage 31 | coverage==4.5.1 32 | 33 | # tool to check your Python code against some of the style conventions 34 | # License: Expat License 35 | # Upstream url: https://github.com/jcrocholl/pep8.git 36 | pep8==1.7.1 37 | 38 | # nose makes testing easier 39 | # License: GNU Library or Lesser General Public License (LGPL) 40 | # Upstream url: http://readthedocs.org/docs/nose 41 | nose==1.3.7 42 | 43 | # http requests made easy 44 | # License: Apache2 45 | # Upstream url: https://github.com/kennethreitz/requests 46 | requests==2.22.0 47 | 48 | # makes in memory caching easy 49 | # License: MIT 50 | # Upstream url: https://github.com/tkem/cachetools 51 | cachetools==3.1.1 52 | 53 | # Json Formatter for the standard python logger 54 | # Licence: BSD 55 | # Upstream url: https://github.com/madzak/python-json-logger 56 | python-json-logger==0.1.11 57 | -------------------------------------------------------------------------------- /config/logging.conf: -------------------------------------------------------------------------------- 1 | version: 1 2 | 3 | # gunicorn configures python logging *after* its own logger have been 4 | # created. Disabling existing loggers (the default) will overwrite (ie delete) 5 | # gunicorn loggers. access and errors logs will no longer work. 6 | disable_existing_loggers: false 7 | 8 | root: 9 | level: INFO 10 | handlers: 11 | - console_stdout 12 | 13 | loggers: 14 | gunicorn.error: 15 | # Don't log debug messages given that it pollutes the log with useless 16 | # "Closing Connection" messages when the root logger level is set to 17 | # DEBUG (eg in development). 18 | level: INFO 19 | propagate: True 20 | handlers: 21 | - console_stderr 22 | gunicorn.access: 23 | level: WARN 24 | # Don't propagate messages given that we'll log access log messages once 25 | # to the access.log file. When a logger doesn't propagate make sure to 26 | # set relevant handlers. 27 | propagate: False 28 | # reduce logging noise from various libraries 29 | requests.packages.urllib3.connectionpool: 30 | level: WARN 31 | propagate: True 32 | urllib3.connectionpool: 33 | level: WARN 34 | propagate: True 35 | boto: 36 | level: WARN 37 | propagate: True 38 | boto3: 39 | level: WARN 40 | propagate: True 41 | botocore: 42 | level: WARN 43 | propagate: True 44 | 45 | handlers: 46 | console_stdout: 47 | class: logging.StreamHandler 48 | formatter: json 49 | stream: 'ext://sys.stdout' 50 | console_stderr: 51 | class: logging.StreamHandler 52 | formatter: json 53 | stream: 'ext://sys.stderr' 54 | # access_file handler is commented out to keep it from being instantiated. 55 | # uncomment and add to gunicorn.access handlers to enable gunicorn access logs. 56 | # 57 | # access_file: 58 | # class: logging.FileHandler 59 | # filename: '/var/log/metadataproxy-web-access.log' 60 | 61 | formatters: 62 | json: 63 | class: 'pythonjsonlogger.jsonlogger.JsonFormatter' 64 | format: 'ts=%(asctime)s name=%(name)s lvlname=%(levelname)s msg=%(message)s' 65 | -------------------------------------------------------------------------------- /metadataproxy/routes/proxy.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import requests 4 | from flask import Response 5 | from flask import request 6 | from flask import stream_with_context 7 | from flask import jsonify 8 | 9 | from metadataproxy import app 10 | from metadataproxy import roles 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | def _supports_iam(version): 16 | '''Check the meta-data version for IAM support 17 | 18 | API versions before 2012-01-12 don't support the iam/ subtree. 19 | This function works because: 20 | >>> '1.0' < '2007-01-19' < '2014-11-05' < 'latest' 21 | True 22 | ''' 23 | return version >= '2012-01-12' 24 | 25 | 26 | @app.route('//meta-data/iam/info', strict_slashes=False) 27 | @app.route('//meta-data/iam/info/') 28 | def iam_role_info(api_version, junk=None): 29 | if not _supports_iam(api_version): 30 | return passthrough(request.path) 31 | 32 | role_params_from_ip = roles.get_role_params_from_ip(request.remote_addr) 33 | if role_params_from_ip['name']: 34 | log.debug('Providing IAM role info for {0}'.format(role_params_from_ip['name'])) 35 | return jsonify(roles.get_role_info_from_params(role_params_from_ip)) 36 | else: 37 | log.error('Role name not found; returning 404.') 38 | return '', 404 39 | 40 | 41 | @app.route('//meta-data/iam/security-credentials/') 42 | def iam_role_name(api_version): 43 | if not _supports_iam(api_version): 44 | return passthrough(request.path) 45 | 46 | role_params_from_ip = roles.get_role_params_from_ip(request.remote_addr) 47 | if role_params_from_ip['name']: 48 | return role_params_from_ip['name'] 49 | else: 50 | log.error('Role name not found; returning 404.') 51 | return '', 404 52 | 53 | 54 | @app.route('//meta-data/iam/security-credentials/', 55 | strict_slashes=False) 56 | def iam_sts_credentials(api_version, requested_role): 57 | if not _supports_iam(api_version): 58 | return passthrough(request.path) 59 | 60 | try: 61 | role_params = roles.get_role_params_from_ip( 62 | request.remote_addr, 63 | requested_role=requested_role.rstrip('/') 64 | ) 65 | except roles.UnexpectedRoleError: 66 | msg = "Role name {0} doesn't match expected role for container" 67 | log.error(msg.format(requested_role)) 68 | return '', 404 69 | 70 | log.debug('Providing assumed role credentials for {0}'.format(role_params['name'])) 71 | assumed_role = roles.get_assumed_role_credentials( 72 | role_params=role_params, 73 | api_version=api_version 74 | ) 75 | return jsonify(assumed_role) 76 | 77 | 78 | @app.route('/') 79 | @app.route('/') 80 | def passthrough(url=''): 81 | log.debug('Did not match credentials request url; passing through.') 82 | req = requests.get( 83 | '{0}/{1}'.format(app.config['METADATA_URL'], url), 84 | stream=True 85 | ) 86 | return Response( 87 | stream_with_context(req.iter_content()), 88 | content_type=req.headers['content-type'], 89 | status=req.status_code 90 | ) 91 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 2.2.0 2 | 3 | * Added `PATCH_ECS_ALLOWED_HOSTS` config setting, to support aws-vault's --ecs-server option 4 | 5 | ## 2.1.0 6 | 7 | * Fix for the gunicorn logging run location in gunicorn.conf, when trying to catch an exception that only exists in python3. 8 | 9 | ## 2.0.0 10 | 11 | * Though we don't expect this to be a breaking change, the default renewal time for IAM credentials has been changed from 5 minutes to 15 minutes, for better compatibility with aws-sdk-java. This time can be changed via the `ROLE_EXPIRATION_THRESHOLD` setting. 12 | 13 | ## 1.11.0 14 | 15 | * Added PyYAML, python-json-logger, and blinker dependencies 16 | * Included a default gunicorn config and logging config 17 | * All logs are now sent to stdout by default, which should make flask logs available and written into the log file now 18 | 19 | ## 1.10.0 20 | 21 | * Support assuming roles with a Path 22 | 23 | ## 1.9.1 24 | 25 | * Docker packaging issue fix 26 | 27 | ## 1.9.0 28 | 29 | * Split envvars correctly, when vars are `KEY`, rather than `KEY=VAL`, rather than throwing an exception 30 | 31 | ## 1.8.0 32 | 33 | * Added support for finding mesos containers 34 | 35 | ## 1.7.0 36 | 37 | * Update mock URI for returning availability-zone. Fix for incorrect mocking of ``/latest/meta-data/placement/availability-zone`` 38 | 39 | ## 1.6.0 40 | 41 | * When proxying requests, also return the status code of the proxied request. 42 | 43 | ## 1.5.2 44 | 45 | * Prevent possibility of race condition during docker inspect 46 | 47 | ## 1.5.1 48 | 49 | * Fix 500 error when retrieving role session name from Docker label 50 | 51 | ## 1.5.0 52 | 53 | * New support retrieving container IP from Rancher labels 54 | 55 | ## 1.4.0 56 | 57 | * Add IAM\_EXTERNAL\_ID variable: if found value will be populated into ExternalId parameter when making AssumeRole call. 58 | * add ROLE\_SESSION\_KEY variable: if found will use value to look up key from Docker container labels or environment variable to set RoleSessionName when making AssumeRole call. See documentation for details. 59 | * Reduce number of calls to Docker API when retrieving credentials. 60 | * Bump WSGI dependency versions 61 | 62 | ## 1.3.2 63 | 64 | * Packaging fixes for travis releases to docker hub 65 | 66 | ## 1.3.1 67 | 68 | * Fix for k8s network lookup stacktrace 69 | 70 | ## 1.3.0 71 | 72 | * Fix for reformatting IAM\_ROLE when it matches ARN format 73 | * Add logging for when the expected role does not match the available role 74 | * Export PYTHONUNBUFFERED in run-server.sh so logs come out as they are made available vs when python decides it's time 75 | * Send log-file to stdout as well in run-server.sh 76 | 77 | ## 1.2.6 78 | 79 | * In run-server.sh, sent stdout and stderr to stdout 80 | * In run-server.sh, make the workers configurable 81 | * In run-server.sh, use better bash syntax 82 | 83 | ## 1.2.5 84 | 85 | * Add more package data to setup.py for sdist packing fix 86 | 87 | ## 1.2.4 88 | 89 | * Add package data to setup.py for sdist packing fix 90 | 91 | ## 1.2.3 92 | 93 | * Attempt to fix sdist packaging 94 | 95 | ## 1.2.2 96 | 97 | * Attempt to fix sdist packaging 98 | 99 | ## 1.2.1 100 | 101 | * Travis docker fix (packaging change) 102 | 103 | ## 1.2.0 104 | 105 | * Look for container IP address in container's networks datastructure 106 | 107 | ## 1.1.4 108 | 109 | * Upgrade docker-py to fix auth parsing issue 110 | 111 | ## 1.1.3 112 | 113 | * Bump in release to fix pypi release process 114 | 115 | ## 1.1.2 116 | 117 | * Bump in release to be able to publish to pypi 118 | 119 | ## 1.1.1 120 | 121 | * Security release. [Ross Vandegrift](https://github.com/rvandegrift/) discovered a flaw in the proxy functionality when used in passthrough mode that would expose the host's IAM role credentials when extra paths were added to the end of the security-credentials end-point. metadataproxy will now properly capture any call to iam/security-credentials/ and return the scoped credentials, rather than the host's credentials. 122 | 123 | ## 1.1.0 124 | 125 | * Added support for cross-account role assumption. 126 | 127 | ## 1.0 128 | 129 | * Initial release 130 | -------------------------------------------------------------------------------- /metadataproxy/settings.py: -------------------------------------------------------------------------------- 1 | import json 2 | from os import getenv 3 | 4 | 5 | def bool_env(var_name, default=False): 6 | """ 7 | Get an environment variable coerced to a boolean value. 8 | Example: 9 | Bash: 10 | $ export SOME_VAL=True 11 | settings.py: 12 | SOME_VAL = bool_env('SOME_VAL', False) 13 | Arguments: 14 | var_name: The name of the environment variable. 15 | default: The default to use if `var_name` is not specified in the 16 | environment. 17 | Returns: `var_name` or `default` coerced to a boolean using the following 18 | rules: 19 | "False", "false" or "" => False 20 | Any other non-empty string => True 21 | """ 22 | test_val = getenv(var_name, default) 23 | # Explicitly check for 'False', 'false', and '0' since all non-empty 24 | # string are normally coerced to True. 25 | if test_val in ('False', 'false', '0'): 26 | return False 27 | return bool(test_val) 28 | 29 | 30 | def float_env(var_name, default=0.0): 31 | """ 32 | Get an environment variable coerced to a float value. 33 | This has the same arguments as bool_env. If a value cannot be coerced to a 34 | float, a ValueError will be raised. 35 | """ 36 | return float(getenv(var_name, default)) 37 | 38 | 39 | def int_env(var_name, default=0): 40 | """ 41 | Get an environment variable coerced to an integer value. 42 | This has the same arguments as bool_env. If a value cannot be coerced to an 43 | integer, a ValueError will be raised. 44 | """ 45 | return int(getenv(var_name, default)) 46 | 47 | 48 | def str_env(var_name, default=''): 49 | """ 50 | Get an environment variable as a string. 51 | This has the same arguments as bool_env. 52 | """ 53 | return getenv(var_name, default) 54 | 55 | 56 | PORT = int_env('PORT', 45001) 57 | HOST = str_env('HOST', '0.0.0.0') 58 | DEBUG = bool_env('DEBUG', False) 59 | 60 | # Url of the docker daemon. The default is to access docker via its socket. 61 | DOCKER_URL = str_env('DOCKER_URL', 'unix://var/run/docker.sock') 62 | # URL of the metadata service. Default is the normal location of the 63 | # metadata service in AWS. 64 | METADATA_URL = str_env('METADATA_URL', 'http://169.254.169.254') 65 | # Whether or not to mock all metadata endpoints. If True, mocked data will be 66 | # returned to callers. If False, all endpoints except for IAM endpoints will be 67 | # proxied through to the real metadata service. 68 | MOCK_API = bool_env('MOCK_API', False) 69 | # When mocking the API, use the following instance id in returned data. 70 | MOCKED_INSTANCE_ID = str_env('MOCKED_INSTANCE_ID', 'mockedid') 71 | 72 | # Role to use if IAM_ROLE is not set in a container's environment. If unset 73 | # the container will get no IAM credentials. 74 | DEFAULT_ROLE = str_env('DEFAULT_ROLE') 75 | # The default account ID to assume roles in, if IAM_ROLE does not contain 76 | # account information. If unset, metadataproxy will attempt to lookup role 77 | # ARNs using IAM:GET_ROLE, if the IAM_ROLE name is not an ARN. 78 | DEFAULT_ACCOUNT_ID = str_env('DEFAULT_ACCOUNT_ID') 79 | # A mapping of account names to account IDs. This allows you to use 80 | # user-friendly names in the IAM_ROLE environment variable; for instance: 81 | # 82 | # AWS_ACCOUNT_MAP={'my-account-name':'12345'} 83 | # 84 | # A lookup of myrole@my-account-name would map to 85 | # 86 | # role_name: myrole 87 | # account_id: 12345 88 | AWS_ACCOUNT_MAP = json.loads(str_env('AWS_ACCOUNT_MAP', '{}')) 89 | # AWS Region to resolve region based STS service endpoint and to make calls against it. 90 | AWS_REGION = str_env('AWS_REGION') 91 | # The threshold before credentials expire in minutes at which metadataproxy will attempt 92 | # to load new credentials. The default in previous versions of metadataproxy was 5, but 93 | # we choose to make the new default 15 for better compatibility with aws-sdk-java. 94 | ROLE_EXPIRATION_THRESHOLD = int_env('ROLE_EXPIRATION_THRESHOLD', 15) 95 | # A json file that has a dict mapping of IP addresses to role names. Can be 96 | # used if docker networking has been disabled and you are managing IP 97 | # addressing for containers through another process. 98 | ROLE_MAPPING_FILE = str_env('ROLE_MAPPING_FILE') 99 | # Do a reverse lookup of incoming IP addresses to match containers by hostname. 100 | # Useful if you've disabled networking in docker, but set hostnames for 101 | # containers in /etc/hosts or DNS. 102 | ROLE_REVERSE_LOOKUP = bool_env('ROLE_REVERSE_LOOKUP', False) 103 | # Limit reverse lookup container matching to hostnames that match the specified 104 | # pattern. 105 | HOSTNAME_MATCH_REGEX = str_env('HOSTNAME_MATCH_REGEX', '^.*$') 106 | # Optional key in container labels or environment variables to use for role session name. 107 | # Prefix with Labels: or Env: respectively to indicate where key should be found. 108 | ROLE_SESSION_KEY = str_env('ROLE_SESSION_KEY') 109 | # In case we also want to query the mesos state api 110 | MESOS_STATE_LOOKUP = bool_env('MESOS_STATE_LOOKUP', False) 111 | # URL of the mesos state endpoint to query 112 | MESOS_STATE_URL = str_env('MESOS_STATE_URL', 'http://localhost:5051/state') 113 | # Timeout to use when calling the mesos state endpoint 114 | MESOS_STATE_TIMEOUT = int_env('MESOS_STATE_TIMEOUT', 2) 115 | 116 | # Patch botocore's allowed hosts for ContainerMetadataFetcher to support aws-vault's 117 | # --ecs-server option. This will inject docker for mac's URL for the host into the 118 | # allowed addresses botocore will talk to. 119 | PATCH_ECS_ALLOWED_HOSTS = str_env('PATCH_ECS_ALLOWED_HOSTS') 120 | -------------------------------------------------------------------------------- /config/gunicorn.conf: -------------------------------------------------------------------------------- 1 | # -*- Python -*- 2 | import blinker 3 | import os 4 | import random 5 | 6 | from gunicorn.config import make_settings, validate_callable, Setting 7 | import resource 8 | from six import itervalues 9 | from six.moves import xrange as range 10 | from six.moves import zip_longest 11 | from uuid import UUID 12 | 13 | try: 14 | import gunicorn.workers.ggevent 15 | except RuntimeError: 16 | # ggevent throws a RuntimeError if gevent is not installed 17 | GEVENT_WORKER_AVAILABLE = False 18 | else: 19 | GEVENT_WORKER_AVAILABLE = True 20 | 21 | # Set this lazily to avoid accidentally importing flask before gevent patch 22 | FLASK_ENDPOINT_TIMING_AVAILABLE = None 23 | 24 | # default to 30s, similar gunicorn default 30s for worker timeout 25 | GEVENT_REQUEST_TIMEOUT_MS_DEFAULT = 30 * 1000 26 | GEVENT_REQUEST_TIMEOUT_MS = int(os.environ.get('GEVENT_REQUEST_TIMEOUT_MS', GEVENT_REQUEST_TIMEOUT_MS_DEFAULT)) 27 | 28 | MAXFD = 1024 29 | 30 | # Override default FORWARDED_ALLOW_IPS gunicorn setting, since metadataproxy will not work 31 | # with a forwarded_allow_ips setting of 127.0.0.1. This is still configurable using the env 32 | # var. 33 | forwarded_allow_ips = os.environ.get('FORWARDED_ALLOW_IPS', '*') 34 | 35 | # Access log format: 36 | # [time] "request" status bytes_received bytes_sent microsecond_response_time "X-Forwarded-For_header" "User-Agent_header" remote_user_header "unique_id" "Accept_header" 37 | # Use quotes (") for values that may have space in them (eg headers). Gunicorn 38 | # will automatically escape any quotes in the value: 39 | # "my-user-agent-\"-with-quote" 40 | # 41 | access_log_format = '%(t)s "%(r)s" %(s)s %({Content-Length}i)s %(b)s %(D)s "%({X-Forwarded-For}i)s" "%(a)s" %(u)s "%({X-Request-Id}i)s" "%({Accept}i)s"' 42 | log_conf_file = os.environ.get('LOGGING_CONF_FILE', '/etc/metadataproxy/logging.conf') 43 | with open(log_conf_file, "r") as fd: 44 | import os.path 45 | import yaml 46 | 47 | logconfig_dict = yaml.safe_load(os.path.expandvars(fd.read())) 48 | 49 | worker_class = 'gevent' 50 | 51 | keepalive = 0 52 | try: 53 | os.mkdir('/run/gunicorn') 54 | except OSError: 55 | pass 56 | # Put gunicorn healthchecks onto tmpfs so we don't block on EBS-only hosts 57 | worker_tmp_dir = "/run/gunicorn" 58 | 59 | if os.environ.get('RELOAD', 'false').lower() == 'true': 60 | reload = True 61 | 62 | reload_extra_files = filter(None, os.environ.get('RELOAD_EXTRA_FILES', '').split(',')) 63 | 64 | 65 | # Gunicorn hooks provide the ability to add extra functionality at 66 | # specific points in the lifecycle of a request or the server. The 67 | # Blinker library is used as a signaling mechanism, so integrating 68 | # with your application requires importing this library then 69 | # connecting your code to the appropriate signal. Signal handlers 70 | # accept a single sender positional argument and a set of kwargs that 71 | # matches the respective gunicorn callback arguments. Examples of 72 | # this can be found in this file for the default behavior used for all 73 | # applications. 74 | # 75 | # Custom signal handlers for your service should be added to a 76 | # gunicorn_hooks module. 77 | # 78 | # Warning: there is no guarantee for the order in which signals get 79 | # processed so if you require this functionality then all 80 | # order-dependent logic should be wrapped into a single callback. 81 | # 82 | # More info: 83 | # - http://docs.gunicorn.org/en/stable/settings.html#server-hooks 84 | # - https://pythonhosted.org/blinker/ 85 | class ServerSignals(object): 86 | def __init__(self, settings, delete=True): 87 | self._settings = settings 88 | self.allocate(delete) 89 | self.wrap_hooks() 90 | 91 | def allocate(self, delete): 92 | for setting in itervalues(self._settings): 93 | if (isinstance(setting, Setting) and 94 | callable(getattr(setting, 'default', ''))): 95 | # disable inspection of callback arguments 96 | cls = setting.__class__ 97 | cls.validator = staticmethod(validate_callable(-1)) 98 | hook = setting.default.__code__.co_name 99 | # delete signal if it already exists, so we don't add 100 | # a connect per execfile of gunicorn.conf on HUP 101 | signal_name = 'gunicorn_hook.{}'.format(hook) 102 | if delete: 103 | blinker.signal.__self__.pop(signal_name, None) 104 | # create signal and attach it 105 | setattr(self, hook, blinker.signal(signal_name)) 106 | 107 | @staticmethod 108 | def make_signal(func): 109 | s = blinker.signal('gunicorn_hook.{}'.format(func.__code__.co_name)) 110 | arg_names = func.__code__.co_varnames 111 | 112 | def func_wrapper(*args, **kwargs): 113 | if s.receivers: 114 | # turn all arguments into kwargs for signal send 115 | s_kwargs = dict(zip_longest(arg_names, args)) 116 | s_kwargs.update(kwargs) 117 | s.send('gunicorn', **s_kwargs) 118 | return func_wrapper 119 | 120 | def wrap_hooks(self): 121 | namespace = globals() 122 | for setting in itervalues(self._settings): 123 | if (isinstance(setting, Setting) and 124 | callable(getattr(setting, 'default', ''))): 125 | fn = setting.default 126 | namespace[fn.__code__.co_name] = self.make_signal(fn) 127 | 128 | @staticmethod 129 | def any_sender(func): 130 | def func_wrapper(sender, *args, **kwargs): 131 | return func(*args, **kwargs) 132 | return func_wrapper 133 | 134 | # setup our gunicorn server hook signals 135 | global server_hooks 136 | server_hooks = ServerSignals(make_settings()) 137 | 138 | 139 | class GeventWorkerTimeoutException(Exception): 140 | """ 141 | By default, gevent timeouts throw an exception that extends from BaseException and is not 142 | handled properly by gunicorn (no 500 returned to client, no access log). Extending from Exception rather 143 | than BaseException fixes that. Defining a custom exception also helps with readability in the log. 144 | 145 | """ 146 | pass 147 | 148 | def get_maxfd(): 149 | maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] 150 | if (maxfd == resource.RLIM_INFINITY): 151 | maxfd = MAXFD 152 | return maxfd 153 | 154 | 155 | @server_hooks.pre_request.connect 156 | @server_hooks.any_sender 157 | def set_request_timeout(worker, req): 158 | if GEVENT_WORKER_AVAILABLE and isinstance(worker, gunicorn.workers.ggevent.GeventWorker): 159 | # timeout will be caught as an unhandled exception, gunicorn will return and log a 500 160 | timeout_sec = GEVENT_REQUEST_TIMEOUT_MS / float(1000) 161 | req._timeout = gunicorn.workers.ggevent.gevent.Timeout( 162 | seconds=timeout_sec, 163 | exception=GeventWorkerTimeoutException('{} seconds'.format(timeout_sec)) 164 | ) 165 | req._timeout.start() 166 | 167 | @server_hooks.post_request.connect 168 | @server_hooks.any_sender 169 | def cancel_request_timeout(worker, req, environ, resp): 170 | if GEVENT_WORKER_AVAILABLE and getattr(req, '_timeout', None): 171 | req._timeout.cancel() 172 | 173 | # Generate a unique request id if a X-Request-ID header doesn't exist. 174 | @server_hooks.pre_request.connect 175 | @server_hooks.any_sender 176 | def pre_request_add_request_id(worker, req): 177 | for h in req.headers: 178 | # all request header name are upper cased in a gunicorn request. 179 | if 'X-REQUEST-ID' == h[0]: 180 | # There is a X-Request-ID header set. Don't set our own. 181 | return 182 | # We haven't found a request-id header. Add one with our own 183 | # generated request id. UUID4 implementation is backported from 184 | # Python 3.5 and avoids calling into libuuid. See also: 185 | # https://bugs.python.org/issue25515 186 | req.headers.append(('X-REQUEST-ID', str(UUID(bytes=os.urandom(16), version=4)))) 187 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # metadataproxy 2 | 3 | The metadataproxy is used to allow containers to acquire IAM roles. By metadata we mean [EC2 instance meta data](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) which is normally available to EC2 instances. This proxy exposes the meta data to containers inside or outside of EC2 hosts, allowing you to provide scoped IAM roles to individual containers, rather than giving them the full IAM permissions of an IAM role or IAM user. 4 | 5 | ## Installation 6 | 7 | From inside of the repo run the following commands: 8 | 9 | ```bash 10 | mkdir -p /srv/metadataproxy 11 | cd /srv/metadataproxy 12 | virtualenv venv 13 | source venv/bin/activate 14 | pip install metadataproxy 15 | deactivate 16 | ``` 17 | 18 | ## Configuration 19 | 20 | ### Modes of operation 21 | 22 | See [the settings file](https://github.com/lyft/metadataproxy/blob/master/metadataproxy/settings.py) 23 | for specific configuration options. 24 | 25 | The metadataproxy has two basic modes of operation: 26 | 27 | 1. Running in AWS where it simply proxies most routes to the real metadata 28 | service. 29 | 2. Running outside of AWS where it mocks out most routes. 30 | 31 | To enable mocking, use the environment variable: 32 | 33 | ``` 34 | export MOCK_API=true 35 | ``` 36 | 37 | ### AWS credentials 38 | 39 | metadataproxy relies on boto configuration for its AWS credentials. If metadata 40 | IAM credentials are available, it will use this. Otherwise, you'll need to use 41 | .aws/credentials, .boto, or environment variables to specify the IAM 42 | credentials before the service is started. 43 | 44 | ### Role assumption 45 | 46 | For IAM routes, the metadataproxy will use STS to assume roles for containers. 47 | To do so it takes the incoming IP address of metadata requests and finds the 48 | running docker container associated with the IP address. It uses the value of 49 | the container's `IAM_ROLE` environment variable as the role it will assume. It 50 | then assumes the role and gives back STS credentials in the metadata response. 51 | 52 | STS-attained credentials are cached and automatically rotated as they expire. 53 | 54 | #### Container-specific roles 55 | 56 | To specify the role of a container, simply launch it with the `IAM_ROLE` 57 | environment variable set to the IAM role you wish the container to run with. 58 | 59 | If the trust policy for the role requires an ExternalId, you can set this 60 | using the `IAM_EXTERNAL_ID` environment variable. This is most frequently 61 | used with cross-account role access scenarios. For more information on 62 | when you should use an External ID for your roles, see: 63 | 64 | http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html 65 | 66 | ```shell 67 | docker run -e IAM_ROLE=my-role ubuntu:14.04 68 | docker run -e IAM_ROLE=their-role@another-account -e IAM_EXTERNAL_ID=random-unique-string ubuntu:14.04 69 | ``` 70 | 71 | #### Configurable Behavior 72 | 73 | There are a number of environment variables that can be set to tune 74 | metadata proxy's behavior. They can either be exported by the start 75 | script, or set via docker environment variables. 76 | 77 | | Variable | Type | Default | Description | 78 | | -------- | ---- | ------- | ----------- | 79 | | **DEFAULT\_ROLE** | String | | Role to use if IAM\_ROLE is not set in a container's environment. If unset the container will get no IAM credentials. | 80 | | **DEFAULT\_ACCOUNT\_ID** | String | | The default account ID to assume roles in, if IAM\_ROLE does not contain account information. If unset, metadataproxy will attempt to lookup role ARNs using iam:GetRole. | 81 | | **ROLE\_SESSION\_KEY** | String | | Optional key in container labels or environment variables to use for role session name. Prefix with `Labels:` or `Env:` respectively to indicate where key should be found. Useful to pass through metadata such as a CI job ID or launching user for audit purposes, as the role session name is included in the ARN that appears in access logs. | 82 | | DEBUG | Boolean | False | Enable debug mode. You should not do this in production as it will leak IAM credentials into your logs | 83 | | DOCKER\_URL | String | unix://var/run/docker.sock | Url of the docker daemon. The default is to access docker via its socket. | 84 | | METADATA\_URL | String | http://169.254.169.254 | URL of the metadata service. Default is the normal location of the metadata service in AWS. | 85 | | MOCK\_API | Boolean | False | Whether or not to mock all metadata endpoints. If True, mocked data will be returned to callers. If False, all endpoints except for IAM endpoints will be proxied through to the real metadata service. | 86 | | MOCKED\_INSTANCE\_ID | String | mockedid | When mocking the API, use the following instance id in returned data. | 87 | | AWS\_ACCOUNT\_MAP | JSON String | `{}` | A mapping of account names to account IDs. This allows you to use user-friendly names instead of account IDs in IAM\_ROLE environment variable values. | 88 | | AWS\_REGION | String | | AWS Region for the STS endpoint allow you to call region based endpoint instead of global one. [AWS STS region endpoints.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_region-endpoints) | 89 | | ROLE\_EXPIRATION\_THRESHOLD | Integer | 15 | The threshold before credentials expire in minutes at which metadataproxy will attempt to load new credentials. | 90 | | ROLE\_MAPPING\_FILE | Path String | | A json file that has a dict mapping of IP addresses to role names. Can be used if docker networking has been disabled and you are managing IP addressing for containers through another process. | 91 | | ROLE\_REVERSE\_LOOKUP | Boolean | False | Enable performing a reverse lookup of incoming IP addresses to match containers by hostname. Useful if you've disabled networking in docker, but set hostnames for containers in /etc/hosts or DNS. | 92 | | HOSTNAME\_MATCH\_REGEX | Regex String | `^.*$` | Limit reverse lookup container matching to hostnames that match the specified pattern. | 93 | | PATCH_ECS_ALLOWED_HOSTS | String | | Patch botocore's allowed hosts for ContainerMetadataFetcher to support aws-vault's --ecs-server option. This will inject the provided host into the allowed addresses botocore will allow for the AWS_CONTAINER_CREDENTIALS_FULL_URI environment. | 94 | 95 | #### Default Roles 96 | 97 | When no role is matched, `metadataproxy` will use the role specified in the 98 | `DEFAULT_ROLE` `metadataproxy` environment variable. If no DEFAULT\_ROLE is 99 | specified as a fallback, then your docker container without an `IAM_ROLE` 100 | environment variable will fail to retrieve credentials. 101 | 102 | #### Role Formats 103 | 104 | The following are all supported formats for specifying roles: 105 | 106 | - By Role: 107 | 108 | ```shell 109 | IAM_ROLE=my-role 110 | ``` 111 | 112 | - By Role@AccountId 113 | 114 | ```shell 115 | IAM_ROLE=my-role@012345678910 116 | ``` 117 | 118 | - By ARN: 119 | 120 | ```shell 121 | IAM_ROLE=arn:aws:iam::012345678910:role/my-role 122 | ``` 123 | 124 | ### Role structure 125 | 126 | A useful way to deploy this metadataproxy is with a two-tier role 127 | structure: 128 | 129 | 1. The first tier is the EC2 service role for the instances running 130 | your containers. Call it `DockerHostRole`. Your instances must 131 | be launched with a policy that assigns this role. 132 | 133 | 2. The second tier is the role that each container will use. These 134 | roles must trust your own account ("Role for Cross-Account 135 | Access" in AWS terms). Call it `ContainerRole1`. 136 | 137 | 3. metadataproxy needs to query and assume the container role. So 138 | the `DockerHostRole` policy must permit this for each container 139 | role. For example: 140 | ``` 141 | "Statement": [ { 142 | "Effect": "Allow", 143 | "Action": [ 144 | "iam:GetRole", 145 | "sts:AssumeRole" 146 | ], 147 | "Resource": [ 148 | "arn:aws:iam::012345678901:role/ContainerRole1", 149 | "arn:aws:iam::012345678901:role/ContainerRole2" 150 | ] 151 | } ] 152 | ``` 153 | 154 | 4. Now customize `ContainerRole1` & friends as you like 155 | 156 | Note: The `ContainerRole1` role should have a trust relationship that allows it to be assumed by the `user` which is associated to the host machine running the `sts:AssumeRole` command. An example trust relationship for `ContainRole1` may look like: 157 | 158 | ``` 159 | { 160 | "Version": "2012-10-17", 161 | "Statement": [ 162 | { 163 | "Effect": "Allow", 164 | "Principal": { 165 | "AWS": "arn:aws:iam::012345678901:root", 166 | "Service": "ec2.amazonaws.com" 167 | }, 168 | "Action": "sts:AssumeRole" 169 | } 170 | ] 171 | } 172 | ``` 173 | 174 | ### Routing container traffic to metadataproxy 175 | 176 | Using iptables, we can forward traffic meant to 169.254.169.254 from docker0 to 177 | the metadataproxy. The following example assumes the metadataproxy is run on 178 | the host, and not in a container: 179 | 180 | ``` 181 | /sbin/iptables \ 182 | --append PREROUTING \ 183 | --destination 169.254.169.254 \ 184 | --protocol tcp \ 185 | --dport 80 \ 186 | --in-interface docker0 \ 187 | --jump DNAT \ 188 | --table nat \ 189 | --to-destination 127.0.0.1:8000 \ 190 | --wait 191 | ``` 192 | 193 | If you'd like to start the metadataproxy in a container, it's recommended to 194 | use host-only networking. Also, it's necessary to volume mount in the docker 195 | socket, as metadataproxy must be able to interact with docker. 196 | 197 | Be aware that non-host-mode containers will not be able to contact 198 | 127.0.0.1 in the host network stack. As an alternative, you can use 199 | the meta-data service to find the local address. In this case, you 200 | probably want to restrict proxy access to the docker0 interface! 201 | 202 | ``` 203 | LOCAL_IPV4=$(curl http://169.254.169.254/latest/meta-data/local-ipv4) 204 | 205 | /sbin/iptables \ 206 | --append PREROUTING \ 207 | --destination 169.254.169.254 \ 208 | --protocol tcp \ 209 | --dport 80 \ 210 | --in-interface docker0 \ 211 | --jump DNAT \ 212 | --table nat \ 213 | --to-destination $LOCAL_IPV4:8000 \ 214 | --wait 215 | 216 | /sbin/iptables \ 217 | --wait \ 218 | --insert INPUT 1 \ 219 | --protocol tcp \ 220 | --dport 80 \ 221 | \! \ 222 | --in-interface docker0 \ 223 | --jump DROP 224 | ``` 225 | 226 | ## Run metadataproxy without docker 227 | 228 | In the following we assume \_my\_config\_ is a bash file with exports for all of 229 | the necessary settings discussed in the configuration section. 230 | 231 | ``` 232 | source my_config 233 | cd /srv/metadataproxy 234 | source venv/bin/activate 235 | gunicorn metadataproxy:app --workers=2 -k gevent 236 | ``` 237 | 238 | ## Run metadataproxy with docker 239 | 240 | For production purposes, you'll want to kick up a container to run. 241 | You can build one with the included Dockerfile. To run, do something like: 242 | ```bash 243 | docker run --net=host \ 244 | -v /var/run/docker.sock:/var/run/docker.sock \ 245 | lyft/metadataproxy 246 | ``` 247 | 248 | ### gunicorn settings 249 | 250 | The following environment variables can be set to configure gunicorn (defaults 251 | are set in the examples): 252 | 253 | ``` 254 | # Change the IP address the gunicorn worker is listening on. You likely want to 255 | # leave this as the default 256 | HOST=0.0.0.0 257 | 258 | # Change the port the gunicorn worker is listening on. 259 | PORT=8000 260 | 261 | # Change the number of worker processes gunicorn will run with. The default is 262 | # 1, which is likely enough since metadataproxy is using gevent and its work is 263 | # completely IO bound. Increasing the number of workers will likely make your 264 | # in-memory cache less efficient 265 | WORKERS=1 266 | 267 | # Enable debug mode (you should not do this in production as it will leak IAM 268 | # credentials into your logs) 269 | DEBUG=False 270 | ``` 271 | 272 | ## Contributing 273 | 274 | ### Code of conduct 275 | 276 | This project is governed by [Lyft's code of 277 | conduct](https://github.com/lyft/code-of-conduct). 278 | All contributors and participants agree to abide by its terms. 279 | 280 | ### Sign the Contributor License Agreement (CLA) 281 | 282 | We require a CLA for code contributions, so before we can accept a pull request 283 | we need to have a signed CLA. Please [visit our CLA 284 | service](https://oss.lyft.com/cla) 285 | follow the instructions to sign the CLA. 286 | 287 | ### File issues in Github 288 | 289 | In general all enhancements or bugs should be tracked via github issues before 290 | PRs are submitted. We don't require them, but it'll help us plan and track. 291 | 292 | When submitting bugs through issues, please try to be as descriptive as 293 | possible. It'll make it easier and quicker for everyone if the developers can 294 | easily reproduce your bug. 295 | 296 | ### Submit pull requests 297 | 298 | Our only method of accepting code changes is through github pull requests. 299 | -------------------------------------------------------------------------------- /metadataproxy/roles.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import datetime 3 | import json 4 | import logging 5 | import re 6 | import socket 7 | import timeit 8 | 9 | # Import third party libs 10 | import boto3 11 | import dateutil.tz 12 | import docker 13 | import docker.errors 14 | import requests 15 | from botocore.exceptions import ClientError 16 | from cachetools import cached, TTLCache 17 | 18 | # Import metadataproxy libs 19 | from metadataproxy import app 20 | 21 | log = logging.getLogger(__name__) 22 | 23 | ROLES = {} 24 | CONTAINER_MAPPING = {} 25 | _docker_client = None 26 | _iam_client = None 27 | _sts_client = None 28 | 29 | if app.config['ROLE_MAPPING_FILE']: 30 | with open(app.config.get('ROLE_MAPPING_FILE'), 'r') as f: 31 | ROLE_MAPPINGS = json.loads(f.read()) 32 | else: 33 | ROLE_MAPPINGS = {} 34 | 35 | RE_IAM_ARN = re.compile(r"arn:aws:iam::(\d+):role/(.*)") 36 | 37 | 38 | class BlockTimer(object): 39 | def __enter__(self): 40 | self.start_time = timeit.default_timer() 41 | return self 42 | 43 | def __exit__(self, *args): 44 | self.end_time = timeit.default_timer() 45 | self.exec_duration = self.end_time - self.start_time 46 | 47 | 48 | class PrintingBlockTimer(BlockTimer): 49 | def __init__(self, prefix=''): 50 | self.prefix = prefix 51 | 52 | def __exit__(self, *args): 53 | super(PrintingBlockTimer, self).__exit__(*args) 54 | msg = "Execution took {0:f}s".format(self.exec_duration) 55 | if self.prefix: 56 | msg = self.prefix + ': ' + msg 57 | log.debug(msg) 58 | 59 | 60 | def log_exec_time(method): 61 | def timed(*args, **kw): 62 | with PrintingBlockTimer(method.__name__): 63 | result = method(*args, **kw) 64 | return result 65 | return timed 66 | 67 | 68 | def docker_client(): 69 | global _docker_client 70 | if _docker_client is None: 71 | _docker_client = docker.Client(base_url=app.config['DOCKER_URL']) 72 | return _docker_client 73 | 74 | 75 | def iam_client(): 76 | global _iam_client 77 | if _iam_client is None: 78 | _iam_client = boto3.client('iam') 79 | return _iam_client 80 | 81 | 82 | def sts_client(): 83 | global _sts_client 84 | if _sts_client is None: 85 | aws_region = app.config.get('AWS_REGION') 86 | 87 | _sts_client = boto3.client( 88 | service_name='sts', 89 | region_name=aws_region, 90 | endpoint_url=f'https://sts.{aws_region}.amazonaws.com' 91 | ) if aws_region else boto3.client(service_name='sts') 92 | return _sts_client 93 | 94 | 95 | @log_exec_time 96 | def find_container(ip): 97 | pattern = re.compile(app.config['HOSTNAME_MATCH_REGEX']) 98 | client = docker_client() 99 | # Try looking at the container mapping cache first 100 | container_id = CONTAINER_MAPPING.get(ip) 101 | if container_id: 102 | log.info('Container id for IP {0} in cache'.format(ip)) 103 | try: 104 | with PrintingBlockTimer('Container inspect'): 105 | container = client.inspect_container(container_id) 106 | # Only return a cached container if it is running. 107 | if container['State']['Running']: 108 | return container 109 | else: 110 | log.error('Container id {0} is no longer running'.format(ip)) 111 | if ip in CONTAINER_MAPPING: 112 | del CONTAINER_MAPPING[ip] 113 | except docker.errors.NotFound: 114 | msg = 'Container id {0} no longer mapped to {1}' 115 | log.error(msg.format(container_id, ip)) 116 | if ip in CONTAINER_MAPPING: 117 | del CONTAINER_MAPPING[ip] 118 | 119 | _fqdn = None 120 | with PrintingBlockTimer('Reverse DNS'): 121 | if app.config['ROLE_REVERSE_LOOKUP']: 122 | try: 123 | _fqdn = socket.gethostbyaddr(ip)[0] 124 | except socket.error as e: 125 | log.error('gethostbyaddr failed: {0}'.format(e.args)) 126 | pass 127 | 128 | with PrintingBlockTimer('Container fetch'): 129 | _ids = [c['Id'] for c in client.containers()] 130 | 131 | for _id in _ids: 132 | try: 133 | with PrintingBlockTimer('Container inspect'): 134 | c = client.inspect_container(_id) 135 | except docker.errors.NotFound: 136 | log.error('Container id {0} not found'.format(_id)) 137 | continue 138 | # Try matching container to caller by IP address 139 | _ip = c['NetworkSettings']['IPAddress'] 140 | if ip == _ip: 141 | msg = 'Container id {0} mapped to {1} by IP match' 142 | log.debug(msg.format(_id, ip)) 143 | CONTAINER_MAPPING[ip] = _id 144 | return c 145 | # Try matching container to caller by sub network IP address 146 | _networks = c['NetworkSettings']['Networks'] 147 | if _networks: 148 | for _network in _networks: 149 | if _networks[_network]['IPAddress'] == ip: 150 | msg = 'Container id {0} mapped to {1} by sub-network IP match' 151 | log.debug(msg.format(_id, ip)) 152 | CONTAINER_MAPPING[ip] = _id 153 | return c 154 | # Not Found ? Let's see if we are running under rancher 1.2+,which uses a label to store the IP 155 | try: 156 | _labels = c.get('Config', {}).get('Labels', {}) 157 | except (KeyError, ValueError): 158 | _labels = {} 159 | try: 160 | if _labels.get('io.rancher.container.ip'): 161 | _ip = _labels.get('io.rancher.container.ip').split("/")[0] 162 | except docker.errors.NotFound: 163 | log.error('Container: {0} Label container.ip not found'.format(_id)) 164 | if ip == _ip: 165 | msg = 'Container id {0} mapped to {1} by Rancher IP match' 166 | log.debug(msg.format(_id, ip)) 167 | CONTAINER_MAPPING[ip] = _id 168 | return c 169 | # Try matching container to caller by hostname match 170 | if app.config['ROLE_REVERSE_LOOKUP']: 171 | hostname = c['Config']['Hostname'] 172 | domain = c['Config']['Domainname'] 173 | fqdn = '{0}.{1}'.format(hostname, domain) 174 | # Default pattern matches _fqdn == fqdn 175 | _groups = re.match(pattern, _fqdn).groups() 176 | groups = re.match(pattern, fqdn).groups() 177 | if _groups and groups: 178 | if groups[0] == _groups[0]: 179 | msg = 'Container id {0} mapped to {1} by FQDN match' 180 | log.debug(msg.format(_id, ip)) 181 | CONTAINER_MAPPING[ip] = _id 182 | return c 183 | # Try to find the container over the mesos state api and use the labels attached to it 184 | # as a replacement for docker env and labels 185 | if app.config['MESOS_STATE_LOOKUP']: 186 | mesos_container = find_mesos_container(ip) 187 | if mesos_container is not None: 188 | return mesos_container 189 | 190 | log.error('No container found for ip {0}'.format(ip)) 191 | return None 192 | 193 | 194 | @cached(cache=TTLCache(maxsize=512, ttl=60)) 195 | @log_exec_time 196 | def find_mesos_container(ip): 197 | mesos_state_url = app.config['MESOS_STATE_URL'] 198 | try: 199 | state = requests.get(mesos_state_url, timeout=app.config['MESOS_STATE_TIMEOUT']).json() 200 | for framework in state['frameworks']: 201 | for executor in framework['executors']: 202 | for task in executor['tasks']: 203 | for status in task['statuses']: 204 | if status['state'] == 'TASK_RUNNING': 205 | for network in status['container_status']['network_infos']: 206 | for ip_map in network['ip_addresses']: 207 | if ip_map['ip_address'] == ip: 208 | if 'labels' in task: 209 | env = [] 210 | for label in task['labels']: 211 | key = label['key'] 212 | val = label['value'] 213 | env_var = '{0}={1}'.format(key, val) 214 | env.append(env_var) 215 | container = {'Config': {'Env': env, 'Labels': env}} 216 | return container 217 | 218 | except requests.exceptions.Timeout: 219 | log.error('Timeout when trying to call the mesos http api: {0}'.format(mesos_state_url)) 220 | except requests.exceptions.RequestException: 221 | log.exception('Error while trying to call the mesos http api: {0}'.format(mesos_state_url)) 222 | except KeyError: 223 | log.exception('Error while trying to lookup the required keys in the json object') 224 | return None 225 | 226 | 227 | def split_envvar(envvar): 228 | """Splits str formatted as `key=val` into [key, val] 229 | 230 | if string is missing an `=val` it will return [key, None] 231 | """ 232 | return (envvar.split('=', 1) + [None])[:2] 233 | 234 | 235 | @log_exec_time 236 | def get_role_params_from_ip(ip, requested_role=None): 237 | params = {'name': None, 'account_id': None, 'external_id': None, 'session_name': None} 238 | role_name = None 239 | if app.config['ROLE_MAPPING_FILE']: 240 | role = ROLE_MAPPINGS.get(ip, app.config['DEFAULT_ROLE']) 241 | if isinstance(role, dict): 242 | params.update(role) 243 | else: 244 | role_name = role 245 | else: 246 | container = find_container(ip) 247 | if container: 248 | env = container['Config']['Env'] or [] 249 | # Look up IAM_ROLE and IAM_EXTERNAL_ID values from environment 250 | for e in env: 251 | key, val = split_envvar(e) 252 | if key == 'IAM_ROLE': 253 | m = RE_IAM_ARN.match(val) 254 | if m: 255 | val = '{0}@{1}'.format(m.group(2), m.group(1)) 256 | role_name = val 257 | elif key == 'IAM_EXTERNAL_ID': 258 | params['external_id'] = val 259 | if not role_name: 260 | msg = "Couldn't find IAM_ROLE variable. Returning DEFAULT_ROLE: {0}" 261 | log.debug(msg.format(app.config['DEFAULT_ROLE'])) 262 | role_name = app.config['DEFAULT_ROLE'] 263 | 264 | # Optionally, look up role session name from environment or labels 265 | if app.config['ROLE_SESSION_KEY']: 266 | skey = app.config['ROLE_SESSION_KEY'] 267 | sval = None 268 | if skey.startswith('Env:'): 269 | skey = skey[4:] 270 | for e in env: 271 | key, val = split_envvar(e) 272 | if skey == key: 273 | sval = val 274 | elif skey.startswith('Labels:'): 275 | skey = skey[7:] 276 | if container['Config']['Labels'] and skey in container['Config']['Labels']: 277 | sval = container['Config']['Labels'][skey] 278 | if sval and len(sval) > 1: 279 | # The docs on RoleSessionName are slightly contradictory, and state: 280 | # > The regex used to validate this parameter is a string of characters consisting 281 | # > of upper- and lower-case alphanumeric characters with no spaces. You can also 282 | # > include underscores or any of the following characters: =,.@- 283 | # > Type: String 284 | # > Length Constraints: Minimum length of 2. Maximum length of 64. 285 | # > Pattern: [\w+=,.@-]* 286 | # We replace any invalid chars with underscore, and trim to 64. 287 | params['session_name'] = re.sub(r'[^\w+=,.@-]', '_', sval)[:64] 288 | if role_name: 289 | role_parts = role_name.split('@') 290 | params['name'] = role_parts[0] 291 | if len(role_parts) > 1: 292 | params['account_id'] = role_parts[1] 293 | 294 | if requested_role and requested_role != params['name']: 295 | raise UnexpectedRoleError 296 | 297 | return params 298 | 299 | 300 | @log_exec_time 301 | def get_role_info_from_params(role_params): 302 | if not role_params['name']: 303 | return {} 304 | try: 305 | role = get_assumed_role(role_params) 306 | except GetRoleError: 307 | return {} 308 | time_format = "%Y-%m-%dT%H:%M:%SZ" 309 | expiration = role['Credentials']['Expiration'] 310 | updated = expiration - datetime.timedelta(minutes=60) 311 | return { 312 | 'Code': 'Success', 313 | 'LastUpdated': updated.strftime(time_format), 314 | 'InstanceProfileArn': role['AssumedRoleUser']['Arn'], 315 | 'InstanceProfileId': role['AssumedRoleUser']['AssumedRoleId'] 316 | } 317 | 318 | 319 | def get_role_arn(role_params): 320 | if role_params['account_id']: 321 | # Try to map the name to an account ID. If it isn't found, assume an ID was passed 322 | # in and use it as-is. 323 | role_params['account_id'] = app.config['AWS_ACCOUNT_MAP'].get( 324 | role_params['account_id'], 325 | role_params['account_id'] 326 | ) 327 | else: 328 | if app.config['DEFAULT_ACCOUNT_ID']: 329 | role_params['account_id'] = app.config['DEFAULT_ACCOUNT_ID'] 330 | # No default account id defined. Get the ARN by looking up the role 331 | # name. This is a backwards compat use-case for when we didn't require 332 | # the default account id. 333 | else: 334 | iam = iam_client() 335 | try: 336 | with PrintingBlockTimer('iam.get_role'): 337 | if '/' in role_params['name']: 338 | path, name = role_params['name'].rsplit('/', 1) 339 | role = iam.get_role(Path=path + '/', RoleName=name) 340 | else: 341 | role = iam.get_role(RoleName=role_params['name']) 342 | return role['Role']['Arn'] 343 | except ClientError as e: 344 | response = e.response['ResponseMetadata'] 345 | raise GetRoleError((response['HTTPStatusCode'], e.message)) 346 | # Return a generated ARN 347 | return 'arn:aws:iam::{account_id}:role/{name}'.format(**role_params) 348 | 349 | 350 | @log_exec_time 351 | def get_assumed_role(role_params): 352 | arn = get_role_arn(role_params) 353 | if arn in ROLES: 354 | assumed_role = ROLES[arn] 355 | expiration = assumed_role['Credentials']['Expiration'] 356 | now = datetime.datetime.now(dateutil.tz.tzutc()) 357 | expire_check = now + datetime.timedelta(minutes=app.config['ROLE_EXPIRATION_THRESHOLD']) 358 | if expire_check < expiration: 359 | return assumed_role 360 | with PrintingBlockTimer('sts.assume_role'): 361 | sts = sts_client() 362 | session_name = role_params['session_name'] or 'devproxyauth' 363 | kwargs = {'RoleArn': arn, 'RoleSessionName': session_name} 364 | if role_params['external_id']: 365 | kwargs['ExternalId'] = role_params['external_id'] 366 | assumed_role = sts.assume_role(**kwargs) 367 | ROLES[arn] = assumed_role 368 | return assumed_role 369 | 370 | 371 | @log_exec_time 372 | def get_assumed_role_credentials(role_params, api_version='latest'): 373 | assumed_role = get_assumed_role(role_params) 374 | time_format = "%Y-%m-%dT%H:%M:%SZ" 375 | credentials = assumed_role['Credentials'] 376 | expiration = credentials['Expiration'] 377 | updated = expiration - datetime.timedelta(minutes=60) 378 | return { 379 | 'Code': 'Success', 380 | 'LastUpdated': updated.strftime(time_format), 381 | 'Type': 'AWS-HMAC', 382 | 'AccessKeyId': credentials['AccessKeyId'], 383 | 'SecretAccessKey': credentials['SecretAccessKey'], 384 | 'Token': credentials['SessionToken'], 385 | 'Expiration': expiration.strftime(time_format) 386 | } 387 | 388 | 389 | class GetRoleError(Exception): 390 | pass 391 | 392 | 393 | class UnexpectedRoleError(Exception): 394 | pass 395 | -------------------------------------------------------------------------------- /metadataproxy/routes/mock.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import dateutil 3 | import logging 4 | 5 | from flask import request 6 | from flask import redirect 7 | from flask import url_for 8 | from flask import jsonify 9 | 10 | from metadataproxy import app 11 | from metadataproxy import roles 12 | 13 | log = logging.getLogger(__name__) 14 | 15 | 16 | @app.route( 17 | '/' 18 | ) 19 | def root_noslash(api_version): 20 | return redirect( 21 | url_for('root_slash', api_version=api_version), 22 | code=301 23 | ) 24 | 25 | 26 | @app.route( 27 | '//' 28 | ) 29 | def root_slash(api_version): 30 | return 'meta-data', 200 31 | 32 | 33 | @app.route( 34 | '//meta-data' 35 | ) 36 | def get_meta_data_noslash(api_version): 37 | return redirect( 38 | url_for('get_meta_data_slash', api_version=api_version), 39 | code=301 40 | ) 41 | 42 | 43 | @app.route('//meta-data/') 44 | def get_meta_data_slash(api_version): 45 | meta_data_list = [ 46 | 'ami-id', 'ami-launch-index', 'ami-manifest-path', 47 | 'block-device-mapping/', 'hostname', 'iam/', 'instance-action', 48 | 'instance-id', 'instance-type', 'local-hostname', 'local-ipv4', 49 | 'mac', 'metrics/', 'network/', 'placement/', 'profile', 50 | 'public-hostname', 'public-ipv4', 'public-keys/', 'reservation-id', 51 | 'security-groups', 'services/' 52 | ] 53 | return '\n'.join(meta_data_list), 200 54 | 55 | 56 | @app.route('//meta-data/ami-id') 57 | def get_ami_id(api_version): 58 | return 'ami-mockedami', 200 59 | 60 | 61 | @app.route('//meta-data/ami-launch-index') 62 | def get_ami_launch_index(api_version): 63 | return '0', 200 64 | 65 | 66 | @app.route('//meta-data/ami-manifest-path') 67 | def get_ami_manifest_path(api_version): 68 | return '(unknown)', 200 69 | 70 | 71 | @app.route('//meta-data/block-device-mapping') 72 | def get_block_device_mapping_noslash(api_version): 73 | return redirect( 74 | url_for('get_block_device_mapping_slash', api_version=api_version), 75 | code=301 76 | ) 77 | 78 | 79 | @app.route('//meta-data/block-device-mapping/') 80 | def get_block_device_mapping_slash(api_version): 81 | return 'ami\nroot', 200 82 | 83 | 84 | @app.route('//meta-data/block-device-mapping/ami') 85 | def get_block_device_mapping_ami(api_version): 86 | return '/dev/sda1', 200 87 | 88 | 89 | @app.route('//meta-data/block-device-mapping/root') 90 | def get_block_device_mapping_root(api_version): 91 | return '/dev/sda1', 200 92 | 93 | 94 | @app.route('//meta-data/hostname') 95 | def get_hostname(api_version): 96 | return 'mocked.internal', 200 97 | 98 | 99 | @app.route('//meta-data/iam') 100 | def get_iam_noslash(api_version): 101 | return redirect( 102 | url_for('get_iam_slash', api_version=api_version), 103 | 301 104 | ) 105 | 106 | 107 | @app.route('//meta-data/iam/') 108 | def get_iam_slash(api_version): 109 | return 'info\nsecurity-credentials/', 200 110 | 111 | 112 | @app.route('//meta-data/iam/info', strict_slashes=False) 113 | @app.route('//meta-data/iam/info/') 114 | def get_iam_info(api_version, junk=None): 115 | role_params_from_ip = roles.get_role_params_from_ip(request.remote_addr) 116 | if role_params_from_ip['name']: 117 | log.debug('Providing IAM role info for {0}'.format(role_params_from_ip['name'])) 118 | return jsonify(roles.get_role_info_from_params(role_params_from_ip)) 119 | else: 120 | log.error('Role name not found; returning 404.') 121 | return '', 404 122 | 123 | 124 | @app.route('//meta-data/iam/security-credentials') 125 | def get_security_credentials_noslash(api_version): 126 | return redirect( 127 | url_for('get_security_credentials_slash', api_version=api_version), 128 | code=301 129 | ) 130 | 131 | 132 | @app.route('//meta-data/iam/security-credentials/') 133 | def get_security_credentials_slash(api_version): 134 | role_params = roles.get_role_params_from_ip(request.remote_addr) 135 | if not role_params['name']: 136 | return '', 404 137 | return role_params['name'], 200 138 | 139 | 140 | @app.route( 141 | '//meta-data/iam/security-credentials/', 142 | methods=['GET'], 143 | strict_slashes=False 144 | ) 145 | @app.route( 146 | '//meta-data/iam/security-credentials//', 147 | methods=['GET'] 148 | ) 149 | def get_role_credentials(api_version, requested_role, junk=None): 150 | try: 151 | role_params = roles.get_role_params_from_ip( 152 | request.remote_addr, 153 | requested_role=requested_role 154 | ) 155 | except roles.UnexpectedRoleError: 156 | return '', 403 157 | 158 | try: 159 | assumed_role = roles.get_assumed_role_credentials( 160 | role_params=role_params, 161 | api_version=api_version 162 | ) 163 | except roles.GetRoleError as e: 164 | return '', e.args[0][0] 165 | return jsonify(assumed_role) 166 | 167 | 168 | @app.route('//meta-data/instance-action') 169 | def get_instance_action(api_version): 170 | return 'none', 200 171 | 172 | 173 | @app.route('//meta-data/instance-id') 174 | def get_instance_id(api_version): 175 | return 'i-{0}'.format(app.config['MOCKED_INSTANCE_ID']), 200 176 | 177 | 178 | @app.route('//meta-data/instance-type') 179 | def get_instance_type(api_version): 180 | return 't2.medium', 200 181 | 182 | 183 | @app.route('//meta-data/mac') 184 | def get_mac(api_version): 185 | return 'AE-30-76-CE-38-62', 200 186 | 187 | 188 | @app.route('//meta-data/metrics') 189 | def get_metrics_noslash(api_version): 190 | return redirect( 191 | url_for('get_metrics_slash', api_version=api_version), 192 | code=301 193 | ) 194 | 195 | 196 | @app.route('//meta-data/metrics/') 197 | def get_metrics_slash(api_version): 198 | return 'vhostmd', 200 199 | 200 | 201 | @app.route('//meta-data/metrics/vhostmd') 202 | def get_metrics_vhostmd(api_version): 203 | return '', 200 204 | 205 | 206 | @app.route('//meta-data/network') 207 | def get_network_noslash(api_version): 208 | return redirect( 209 | url_for('get_network_slash', api_version=api_version), 210 | code=301 211 | ) 212 | 213 | 214 | @app.route('//meta-data/network/') 215 | def get_network_slash(api_version): 216 | return 'interfaces/', 200 217 | 218 | 219 | @app.route('//meta-data/network/interfaces') 220 | def get_network_interfaces_noslash(api_version): 221 | return redirect( 222 | url_for('get_network_interfaces_slash', api_version=api_version), 223 | code=301 224 | ) 225 | 226 | 227 | @app.route('//meta-data/network/interfaces/') 228 | def get_network_interfaces_slash(api_version): 229 | return 'macs/', 200 230 | 231 | 232 | @app.route('//meta-data/network/interfaces/macs') 233 | def get_network_interfaces_macs_noslash(api_version): 234 | return redirect( 235 | url_for('get_network_interfaces_macs_slash', api_version=api_version), 236 | code=301 237 | ) 238 | 239 | 240 | @app.route('//meta-data/network/interfaces/macs/') 241 | def get_network_interfaces_macs_slash(api_version): 242 | return 'AE:30:76:CE:38:62/', 200 243 | 244 | 245 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62') 246 | def get_network_interfaces_macaddr_noslash(api_version): 247 | return redirect( 248 | url_for( 249 | 'get_network_interfaces_macaddr_slash', 250 | api_version=api_version 251 | ), 252 | code=301 253 | ) 254 | 255 | 256 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/') 257 | def get_network_interfaces_macaddr_slash(api_version): 258 | info = [ 259 | 'device-number', 'interface-id', 'ipv4-associations/', 260 | 'local-hostname', 'local-ipv4s', 'mac', 'owner-id', 'public-hostname', 261 | 'public-ipv4s', 'security-group-ids', 'security-groups', 'subnet-id', 262 | 'subnet-ipv4-cidr-block', 'vpc-id', 'vpc-ipv4-cidr-block' 263 | ] 264 | return '\n'.join(info), 200 265 | 266 | 267 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/device-number') 268 | def get_macaddr_device_number(api_version): 269 | return '0', 200 270 | 271 | 272 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/interface-id') 273 | def get_macaddr_interface_id(api_version): 274 | return 'eni-1234', 200 275 | 276 | 277 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/ipv4-associations') 278 | def get_macaddr_ipv4_associations_noslash(api_version): 279 | return redirect( 280 | url_for( 281 | 'get_macaddr_ipv4_associations_slash', 282 | api_version=api_version 283 | ), 284 | code=301 285 | ) 286 | 287 | 288 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/ipv4-associations/') 289 | def get_macaddr_ipv4_associations_slash(api_version): 290 | return '127.255.0.1', 200 291 | 292 | 293 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/local-hostname') 294 | def get_macaddr_local_hostname(api_version): 295 | return 'mocked.internal', 200 296 | 297 | 298 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/local-ipv4s') 299 | def get_macaddr_local_ipv4s(api_version): 300 | return 'mocked.internal', 200 301 | 302 | 303 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/mac') 304 | def get_macaddr_mac(api_version): 305 | return 'AE:30:76:CE:38:62', 200 306 | 307 | 308 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/owner-id') 309 | def get_macaddr_owner_id(api_version): 310 | return '12345', 200 311 | 312 | 313 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/public-hostname') 314 | def get_macaddr_public_hostname(api_version): 315 | return 'mocked.internal', 200 316 | 317 | 318 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/public-ipv4s') 319 | def get_macaddr_public_ipv4s(api_version): 320 | return '127.255.0.1', 200 321 | 322 | 323 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/security-group-ids') 324 | def get_macaddr_security_group_ids(api_version): 325 | return 'sg-1234', 200 326 | 327 | 328 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/security-groups') 329 | def get_macaddr_security_groups(api_version): 330 | return 'default', 200 331 | 332 | 333 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/subnet-id') 334 | def get_macaddr_subnet_id(api_version): 335 | return 'subnet-1234', 200 336 | 337 | 338 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/subnet-ipv4-cidr-block') 339 | def get_macaddr_subnet_ipv4_cidr_block(api_version): 340 | return '127.255.0.0/20', 200 341 | 342 | 343 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/vpc-id') 344 | def get_macaddr_vpc_id(api_version): 345 | return 'vpc-1234', 200 346 | 347 | 348 | @app.route('//meta-data/network/interfaces/macs/AE:30:76:CE:38:62/vpc-ipv4-cidr-block') 349 | def get_macaddr_vpc_ipv4_cidr_block(api_version): 350 | return '127.255.0.0/16', 200 351 | 352 | 353 | @app.route('//meta-data/placement') 354 | def get_placement_noslash(api_version): 355 | return redirect( 356 | url_for('get_placement_slash', api_version=api_version), 357 | code=301 358 | ) 359 | 360 | 361 | @app.route('//meta-data/placement/') 362 | def get_placement_slash(api_version): 363 | return 'availability-zone', 200 364 | 365 | 366 | @app.route('//meta-data/placement/availability-zone') 367 | def get_placement_az(api_version): 368 | return 'us-east-1a', 200 369 | 370 | 371 | @app.route('//meta-data/profile') 372 | def get_profile(api_version): 373 | return 'default-hvm', 200 374 | 375 | 376 | @app.route('//meta-data/public-hostname') 377 | def get_public_hostname(api_version): 378 | return 'mocked.internal', 200 379 | 380 | 381 | @app.route('//meta-data/public-ipv4') 382 | def get_public_ipv4s(api_version): 383 | return '127.255.0.1', 200 384 | 385 | 386 | @app.route('//meta-data/public-keys') 387 | def get_public_keys_noslash(api_version): 388 | return redirect( 389 | url_for('get_public_keys_slash', api_version=api_version), 390 | code=301 391 | ) 392 | 393 | 394 | @app.route('//meta-data/public-keys/') 395 | def get_public_keys_slash(api_version): 396 | return '0=boot', 200 397 | 398 | 399 | @app.route('//meta-data/reservation-id') 400 | def get_reservation_id(api_version): 401 | return 'r-1234', 200 402 | 403 | 404 | @app.route('//meta-data/security-groups') 405 | def get_security_groups(api_version): 406 | return 'default', 200 407 | 408 | 409 | @app.route('//meta-data/services') 410 | def get_services_noslash(api_version): 411 | return redirect( 412 | url_for('get_services_slash', api_version=api_version), 413 | code=301 414 | ) 415 | 416 | 417 | @app.route('//meta-data/services/') 418 | def get_services_slash(api_version): 419 | return 'domain', 200 420 | 421 | 422 | @app.route('//meta-data/services/domain') 423 | def get_services_domain(api_version): 424 | return 'amazonaws.com', 200 425 | 426 | 427 | @app.route('//dynamic') 428 | def get_dynamic_noslash(api_version): 429 | return redirect( 430 | url_for('get_dynamic_slash', api_version=api_version), 431 | code=301 432 | ) 433 | 434 | 435 | @app.route('//dynamic/') 436 | def get_dynamic_slash(api_version): 437 | return 'instance-identity/\nfws/\n', 200 438 | 439 | 440 | @app.route('//dynamic/instance-identity') 441 | def get_instance_identity_noslash(api_version): 442 | return redirect( 443 | url_for('get_instance_identity_slash', api_version=api_version), 444 | code=301 445 | ) 446 | 447 | 448 | @app.route('//dynamic/instance-identity/') 449 | def get_instance_identity_slash(api_version): 450 | return 'document\npkcs7\nsignature\ndsa2048', 200 451 | 452 | 453 | @app.route('//dynamic/instance-identity/document') 454 | def get_instance_identity_document(api_version): 455 | time_format = "%Y-%m-%dT%H:%M:%SZ" 456 | now = datetime.datetime.now(dateutil.tz.tzutc()) 457 | ret = { 458 | 'privateIp': '127.255.0.1', 459 | 'devpayProductCodes': None, 460 | 'availabilityZone': 'us-east-1a', 461 | 'version': '2010-08-31', 462 | 'accountId': '1234', 463 | 'instanceId': 'i-{0}'.format(app.config['MOCKED_INSTANCE_ID']), 464 | 'billingProducts': None, 465 | 'instanceType': 't2.medium', 466 | # This may be a terrible mock for this... 467 | 'pendingTime': now.strftime(time_format), 468 | 'imageId': 'ami-1234', 469 | 'kernelId': None, 470 | 'ramdiskId': None, 471 | 'architecture': 'x86_64', 472 | 'region': 'us-east-1' 473 | } 474 | return jsonify(ret) 475 | 476 | 477 | @app.route('//dynamic/instance-identity/pkcs7') 478 | def get_instance_identity_pkcs7(api_version): 479 | # TODO: determine a reasonable mock for this 480 | return 'mocked', 200 481 | 482 | 483 | @app.route('//dynamic/instance-identity/signature') 484 | def get_instance_identity_signature(api_version): 485 | # TODO: determine a reasonable mock for this 486 | return 'mocked', 200 487 | 488 | 489 | @app.route('//dynamic/instance-identity/dsa2048') 490 | def get_instance_identity_dsa2048(api_version): 491 | # TODO: determine a reasonable mock for this 492 | return 'mocked', 200 493 | 494 | 495 | @app.route('//dynamic/fws') 496 | def get_fws_noslash(api_version): 497 | return redirect( 498 | url_for('get_fws_slash', api_version=api_version), 499 | code=301 500 | ) 501 | 502 | 503 | @app.route('//dynamic/fws/') 504 | def get_fws_slash(api_version): 505 | return 'instance-monitoring\n', 200 506 | 507 | 508 | @app.route('//dynamic/fws/instance-monitoring') 509 | def get_instance_monitoring(api_version): 510 | return 'enabled', 200 511 | --------------------------------------------------------------------------------