├── tests ├── backends │ ├── __init__.py │ ├── bitbucket_test.py │ ├── github_test.py │ ├── dbapi_test.py │ └── cloud_test.py ├── identity_test.py ├── __init__.py ├── test_id_rsa ├── sftpd.py ├── keystore_test.py ├── conftest.py ├── masterkey_test.py └── remote_test.py ├── docs ├── contribute.rst ├── geofront │ ├── team.rst │ ├── remote.rst │ ├── server.rst │ ├── identity.rst │ ├── keystore.rst │ ├── regen.rst │ ├── version.rst │ ├── masterkey.rst │ ├── backends │ │ ├── cloud.rst │ │ ├── dbapi.rst │ │ ├── github.rst │ │ ├── oauth.rst │ │ ├── stash.rst │ │ └── bitbucket.rst │ └── backends.rst ├── cli.rst ├── geofront.rst ├── index.rst ├── api.rst ├── install.rst ├── Makefile ├── make.bat ├── conf.py ├── changes.rst └── config.rst ├── .ackrc ├── geofront ├── __init__.py ├── backends │ ├── __init__.py │ ├── bitbucket.py │ ├── github.py │ ├── dbapi.py │ ├── stash.py │ └── oauth.py ├── version.py ├── identity.py ├── regen.py ├── keystore.py └── team.py ├── .coveragerc ├── .gitignore ├── runtests.sh ├── mypy.ini ├── tox.ini ├── CONTRIBUTING.rst ├── .travis.yml ├── example.cfg.py ├── README.rst └── setup.py /tests/backends/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/contribute.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../CONTRIBUTING.rst 3 | -------------------------------------------------------------------------------- /docs/geofront/team.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.team 3 | :members: -------------------------------------------------------------------------------- /docs/geofront/remote.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.remote 3 | :members: -------------------------------------------------------------------------------- /docs/geofront/server.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.server 3 | :members: -------------------------------------------------------------------------------- /.ackrc: -------------------------------------------------------------------------------- 1 | --ignore-dir=build 2 | --ignore-dir=dist 3 | --ignore-dir=docs/_build 4 | -------------------------------------------------------------------------------- /docs/geofront/identity.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.identity 3 | :members: -------------------------------------------------------------------------------- /docs/geofront/keystore.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.keystore 3 | :members: -------------------------------------------------------------------------------- /docs/geofront/regen.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.regen 3 | :members: 4 | -------------------------------------------------------------------------------- /docs/geofront/version.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.version 3 | :members: -------------------------------------------------------------------------------- /docs/geofront/masterkey.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.masterkey 3 | :members: 4 | -------------------------------------------------------------------------------- /docs/geofront/backends/cloud.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.backends.cloud 3 | :members: 4 | -------------------------------------------------------------------------------- /docs/geofront/backends/dbapi.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.backends.dbapi 3 | :members: 4 | -------------------------------------------------------------------------------- /docs/geofront/backends/github.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.backends.github 3 | :members: -------------------------------------------------------------------------------- /docs/geofront/backends/oauth.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.backends.oauth 3 | :members: 4 | -------------------------------------------------------------------------------- /docs/geofront/backends/stash.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.backends.stash 3 | :members: 4 | -------------------------------------------------------------------------------- /docs/geofront/backends/bitbucket.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.backends.bitbucket 3 | :members: 4 | -------------------------------------------------------------------------------- /geofront/__init__.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront` --- Simple SSH key management service 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | """ 5 | -------------------------------------------------------------------------------- /geofront/backends/__init__.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.backends` --- Backend implementations 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | """ 5 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | exclude_lines = 3 | pragma: no cover 4 | def __repr__ 5 | raise NotImplementedError 6 | if logger.isEnabledFor 7 | if app.debug: 8 | if __name__ == .__main__.: 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.egg 3 | *.egg-info 4 | *.cfg.py 5 | .*.swo 6 | .*.swp 7 | .cache 8 | .coverage 9 | .env 10 | .idea 11 | .mypy_cache 12 | .swp 13 | .Sync* 14 | .tox 15 | build 16 | dist 17 | docs/_build 18 | venv 19 | -------------------------------------------------------------------------------- /docs/cli.rst: -------------------------------------------------------------------------------- 1 | CLI 2 | === 3 | 4 | .. seealso:: 5 | 6 | :doc:`config` 7 | 8 | .. autoprogram:: geofront.server:main_parser() 9 | :prog: geofront-server 10 | 11 | .. autoprogram:: geofront.regen:main_parser() 12 | :prog: geofront-key-regen 13 | -------------------------------------------------------------------------------- /docs/geofront/backends.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront.backends 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | backends/bitbucket 8 | backends/cloud 9 | backends/dbapi 10 | backends/github 11 | backends/oauth 12 | backends/stash 13 | -------------------------------------------------------------------------------- /docs/geofront.rst: -------------------------------------------------------------------------------- 1 | 2 | .. automodule:: geofront 3 | 4 | .. toctree:: 5 | :maxdepth: 3 6 | 7 | geofront/backends 8 | geofront/identity 9 | geofront/keystore 10 | geofront/masterkey 11 | geofront/regen 12 | geofront/remote 13 | geofront/server 14 | geofront/team 15 | geofront/version 16 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | 3 | 4 | User's guide 5 | ------------ 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | install 11 | contribute 12 | changes 13 | 14 | 15 | References 16 | ---------- 17 | 18 | .. toctree:: 19 | :maxdepth: 3 20 | 21 | api 22 | cli 23 | config 24 | geofront 25 | 26 | 27 | Indices and tables 28 | ------------------ 29 | 30 | * :ref:`genindex` 31 | * :ref:`modindex` 32 | * :ref:`search` 33 | 34 | -------------------------------------------------------------------------------- /geofront/version.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.version` --- Version data 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | """ 5 | 6 | #: (:class:`~typing.Tuple`\ [:class:`int`, :class:`int`, :class:`int`]) 7 | #: The triple of version numbers e.g. ``(1, 2, 3)``. 8 | VERSION_INFO = (0, 5, 0) 9 | 10 | #: (:class:`str`) The version string e.g. ``'1.2.3'``. 11 | VERSION = '{}.{}.{}'.format(*VERSION_INFO) 12 | 13 | 14 | if __name__ == '__main__': 15 | print(VERSION) 16 | -------------------------------------------------------------------------------- /runtests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [[ $VIRTUAL_ENV = "" ]]; then 3 | echo You seem not using virtualenv. Try pyvenv command. 4 | exit 1 5 | fi 6 | if [[ ! $(which pip | grep $VIRTUAL_ENV) ]]; then 7 | pushd /tmp 8 | if [[ $(which wget) ]]; then 9 | wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py 10 | else 11 | if [[ $(which curl) ]]; then 12 | curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py 13 | else 14 | echo "You need wget or curl at least." 15 | exit 1 16 | fi 17 | fi 18 | python get-pip.py 19 | popd 20 | fi 21 | pip install -f https://github.com/spoqa/sftpserver/releases -e .[tests] flake8 22 | py.test --cov geofront tests 23 | flake8 24 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | python_version = 3.5 3 | check_untyped_defs = true 4 | follow_imports = silent 5 | show_none_errors = true 6 | strict_optional = true 7 | warn_unused_ignores = true 8 | 9 | [mypy-tests.*] 10 | check_untyped_defs = false 11 | 12 | [mypy-flask.*] 13 | ignore_missing_imports = true 14 | 15 | [mypy-iso8601.*] 16 | ignore_missing_imports = true 17 | 18 | [mypy-libcloud.*] 19 | ignore_missing_imports = true 20 | 21 | [mypy-oauthlib.*] 22 | ignore_missing_imports = true 23 | 24 | [mypy-paramiko.*] 25 | ignore_missing_imports = true 26 | 27 | [mypy-pytest.*] 28 | ignore_missing_imports = true 29 | 30 | [mypy-sftpserver.*] 31 | ignore_missing_imports = true 32 | 33 | [mypy-typeguard.*] 34 | ignore_missing_imports = true 35 | 36 | [mypy-waitress.*] 37 | ignore_missing_imports = true 38 | -------------------------------------------------------------------------------- /tests/identity_test.py: -------------------------------------------------------------------------------- 1 | from geofront.identity import Identity 2 | from geofront.team import Team 3 | 4 | 5 | class DummyTeamA(Team): 6 | 7 | pass 8 | 9 | 10 | class DummyTeamB(Team): 11 | 12 | pass 13 | 14 | 15 | def test_identity_eq(): 16 | assert Identity(DummyTeamA, 1) == Identity(DummyTeamA, 1) 17 | assert not (Identity(DummyTeamA, 1) == Identity(DummyTeamA, 2)) 18 | assert not (Identity(DummyTeamA, 1) == Identity(DummyTeamB, 1)) 19 | assert not (Identity(DummyTeamA, 1) == Identity(DummyTeamB, 2)) 20 | 21 | 22 | def test_identity_ne(): 23 | assert not (Identity(DummyTeamA, 1) != Identity(DummyTeamA, 1)) 24 | assert Identity(DummyTeamA, 1) != Identity(DummyTeamA, 2) 25 | assert Identity(DummyTeamA, 1) != Identity(DummyTeamB, 1) 26 | assert Identity(DummyTeamA, 1) != Identity(DummyTeamB, 2) 27 | 28 | 29 | def test_identity_hash(): 30 | assert hash(Identity(DummyTeamA, 1)) == hash(Identity(DummyTeamA, 1)) 31 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py33, py34, py35, py36 3 | minversion = 1.8.0 4 | 5 | [testenv] 6 | deps = 7 | py33: pytest >= 3.2.5, < 3.3.0 8 | {py34,py35,py36}: pytest >= 3.2.5, < 4.0.0 9 | sftpserver >= 0.3 10 | iso8601 >= 0.1.11 11 | redis 12 | pytest-cov 13 | py33: asyncio >= 3.4.3 14 | pep8 >= 1.7.0 15 | pytest-flake8 >= 0.9.1, < 1.0.0 16 | flake8 >= 3.5.0, < 4.0.0 17 | flake8-import-order >= 0.16, < 1.0 18 | flake8-import-order-spoqa >= 1.2.0, < 2.0.0 19 | {py34,py35,py36}: mypy >= 0.550 20 | psycopg2 21 | PyMySQL 22 | https://cdn.mysql.com/Downloads/Connector-Python/mysql-connector-python-2.1.7.tar.gz 23 | commands = 24 | pytest {posargs:--cov geofront --durations=5} 25 | {py34,py35,py36}: mypy -p geofront 26 | {py34,py35,py36}: mypy -p tests 27 | 28 | [pytest] 29 | addopts = --ff --flake8 30 | testpaths = tests/ geofront/ 31 | 32 | [flake8] 33 | exclude = .env, .tox, docs, *.cfg.py 34 | import-order-style = spoqa 35 | application-import-names = geofront, tests 36 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | from geofront.server import app 4 | 5 | 6 | @app.errorhandler(400) 7 | def bad_request_handler_for_testing(exception: Exception): 8 | """Custom error handler of :http:statuscode:`400` for unit testing 9 | to know how it's going in the application. 10 | 11 | """ 12 | traceback.print_exc() 13 | return ( 14 | traceback.format_exc(), 15 | 400, 16 | {'Content-Type': 'text/plain; charset=utf-8'} 17 | ) 18 | 19 | 20 | @app.errorhandler(500) 21 | def server_error_handler_for_testing(exception: Exception): 22 | """Custom error handler of :http:statuscode:`500` for unit testing 23 | to know how it's going in the application. 24 | 25 | """ 26 | traceback.print_exc() 27 | return ( 28 | traceback.format_exc(), 29 | 500, 30 | {'Content-Type': 'text/plain; charset=utf-8'} 31 | ) 32 | 33 | 34 | app.config['TESTING'] = True 35 | 36 | # Set app.secret_key for functional testing of web app. 37 | app.secret_key = 'test' 38 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | HTTP API 2 | ======== 3 | 4 | Server version 5 | -------------- 6 | 7 | The release policy of Geofront follows `Semantic Versioning`_, and the HTTP 8 | API which this docs covers also does the same. You can treat what you could 9 | do on Geofront 1.2.3: 10 | 11 | - might be broken on Geofront 2.0.0; 12 | - shouldn't be broken 1.3.0; 13 | - must not be broken on Geofront 1.2.4. 14 | 15 | Also broken things on Geofront 1.2.3 might be fixed on Geofront 1.2.4. 16 | 17 | So how does the server tell its version through HTTP API? It provides two 18 | headers that are equivalent: 19 | 20 | :mailheader:`Server` 21 | Which is a standard compliant header. The form follows also the standard 22 | e.g. ``Geofront/1.2.3``. 23 | 24 | :mailheader:`X-Geofront-Version` 25 | Which is a non-standard extended header. The form consists of only the 26 | version number e.g. ``1.2.3``. 27 | 28 | These headers even are provided when the response is error: 29 | 30 | .. code-block:: http 31 | 32 | HTTP/1.0 404 Not Found 33 | Content-Length: 9 34 | Content-Type: text/plain 35 | Date: Tue, 01 Apr 2014 17:46:36 GMT 36 | Server: Geofront/0.9.0 37 | X-Geofront-Version: 0.9.0 38 | 39 | Not Found 40 | 41 | .. _Semantic Versioning: http://semver.org/ 42 | 43 | 44 | Endpoints 45 | --------- 46 | 47 | .. autoflask:: geofront.server:app 48 | :undoc-static: 49 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | How to contribute 2 | ================= 3 | 4 | License agreement 5 | ----------------- 6 | 7 | All contributed codes have to be free software licensed under the terms of 8 | the `GNU Affero General Public License Version 3`__ or any later version. 9 | We treat all pull requests imply agreement of it, but if a significant 10 | amount of code is involved, it is safest to mention in the pull request 11 | comments that you agree to let the patch be used under the GNU Affero General 12 | Public License Version 3 or any later version as part of the Geofront code. 13 | 14 | __ http://www.gnu.org/licenses/agpl-3.0.html 15 | 16 | 17 | Coding style 18 | ------------ 19 | 20 | - Follow `PEP 8`_ except you can limit all lines to 21 | a maximum of 80 characters (not 79). 22 | - Order ``import``\ s in lexicographical order. 23 | - Prefer relative ``import``\ s. 24 | - All functions, classes, methods, attributes, and modules 25 | should have the docstring. 26 | 27 | 28 | .. _PEP 8: http://www.python.org/dev/peps/pep-0008/ 29 | 30 | 31 | Tests 32 | ----- 33 | 34 | - All code patches should contain one or more unit tests of 35 | the feature to add or regression tests of the bug to fix. 36 | - You can run the test suite using ``runtests.sh`` script. It installs 37 | librearies for testing as well if not installed. 38 | - Or you can simply run ``py.test`` command if you have all dependencies 39 | for testing. 40 | - Some tests would be skipped unless you give additional options. You can 41 | see the list of available options in *custom options* section of 42 | ``py.test --help``. 43 | - All commits will be tested by `Travis CI`__. 44 | 45 | __ https://travis-ci.org/spoqa/geofront 46 | -------------------------------------------------------------------------------- /tests/test_id_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAu/nw6UPbECiHc176k9086z6djqVAjibVFt5NKIra6qh3tV4g 3 | pHvvg0qQL+eqaAqaU4/aqNzdR8oWYUr/4x+/3o2fDWXbbyyGRjgkGVXSjctHIO45 4 | Ypq1gNrPF9tNWJ94WKVK2CWG4QALkDOX5b28gbZ+f5ojjUPDVYpoIxulFQDVal+F 5 | h/ZKna+EgX+Of/gQHXp39o+Qwt8XHjjlvMGX3tqziaUNJrFcTHlxMRqhfQ0thN26 6 | jsBNQjDxBmK+SmQseyHif+LnQbinP4Dl2/LT4k0zcbWFxAXKoQXQmvzOqRHVuS4h 7 | 8caBeW+sWPatJ5ou0Z9MHlytErn4RoCq6Uh/gQIDAQABAoIBAD2WRjgm4z8ICFjk 8 | ZXTMp4jrmvOBcQUh8+7qJp2B4PfA+neRv2AkuvRpGZSYclxkHOwD1cA9nMJIk8yM 9 | pgnj/RkNjW72UE68Z8HZ4vnqBHIwh71iT6rexVjSV8eZMl424Vcp0LvAj9BoE2HL 10 | hZjf8UMOZ6Om/L7r1LPSBQx7ojqe242jjYvaahOt4EOUklq3+gEG6LCqsBVG9sQg 11 | /P6HE/c/fe6fuqf5p0gBiO3V1zuIR8sa1KzViSA971ppRP3abbhFPqoIieUwPzn2 12 | 9xKwsDldq31kFmvlcszTTqrDBe0KjcbM3U+/e4Fdh6bOxnl3D/rezHoo2NwWyz+6 13 | ddVaiAECgYEA3OaQSy+U0DC4nmrNEdFCIEqkymrNE94A2qcSnkWSq4UtvKysd7PT 14 | b9bbplNStZ1swdqw1HulxwyI6jPcTy6JBwShoq3trMvgkwqjD8xFxUT7jRLwCedN 15 | wtCWe2GEKYV2LGENLCq4mtP8c5y7EMrvYcHwpeP6Zj3RFvqAx23e7VkCgYEA2dgi 16 | 1pW16gsCfPZmrz4KAXT4R8fPEa/xjNV7ue1n7kchdPpgPyIfvqhA4ZMEhczwHl3X 17 | Dy/6Nvmsa0DfVdfzzqKH/N85qhUB/laOqIuT145GJXmscuWLBtVSZCB2KDDDyGwI 18 | HLulnJ8MGoshglIDlSVArxZESpaJB/GC39wflmkCgYAHcT0rS08YNIwDylVeZE52 19 | CA3FRBPkt3YP59EYw/9NCA+ia67j2YsmGXcNA4Gl3MftQBD1F7VeH239dgHWKRs0 20 | wVMwx+taBwRp79HOlnNDUi+ncRvlOYXFKoHnZkGtpHF2zU5ETqwpizrWKlGjHzMx 21 | d/dDn4qwJyUEQSxVi74XAQKBgFheRduYRYAYUT0R0xu5ZeOraF/t3MDHB4xiuVr1 22 | xBgzgI3sVwilNg1oQVGACvCf7rPSyz+c7bpJU7j6St6OyJcE9XcNsl2dQf7DbAFR 23 | BXsGeswO7HZ7SQdPl6mIRYYOKg1uTWgico7zMby+vawYyt1AjFm6BZ/gKtwvCzD2 24 | IVYpAoGAVf0uMfTwx0w+F2nyYnlRel0xhzvXOunLJ3AiF9hQNVaLuA7CbHSyzON5 25 | Lu0aPurPUolSDAxTTSc3lmLANnGte0C1HvcMRxm8I22FMS0VIZ7FDFwTdFF0B1eA 26 | aRLnG1bLITJy9iORrizQYu7N4zD55H9HF9fAgPMjrEkBaTfKfmU= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | sudo: false 3 | python: 4 | - 3.3 5 | - 3.4 6 | - 3.5 7 | - 3.6 8 | - pypy3 9 | matrix: 10 | allow_failures: 11 | - python: pypy3 12 | env: 13 | global: 14 | # GitHub 15 | - secure: "jZVzNDRii4P1ay6FIlzMbuIVE2u4o4trSwWD09G0MVLv1PLbv05a4ToDjwYv7b8hcOeTIUAUHtTNaaIwuPf256OUhpmRoROy/XkdShZGh5pw4RrTH2Pgo3hxjRN011AzcUmVoiw68hYHSN9yzVZHhtVrKKwXP2H96Aae6BTmCPA=" 16 | # S3 17 | - secure: "F8AKUR2WrOt2POoFL4I1h2KEvffYGc+RgjzYnmLUNsFCfsL+tZSa3enxCPJQs7UZLr23PIhKDiPgASGscQJbH7yiqQ5EHnrdiPQVAphj7qtrKHJkqGy3UeSsrOI/PhWTO0fmE0bs+9BWl86Lg+M5R0oLHdapOOFhu+pwXyoC8rc=" 18 | - secure: "GTRvF2s/RcnmUINh6LlHGBZedDP8KyaD15u/Cy4fgq+9pqqbhGNCcBlrlvpw/Vlf1iDLqG9nm5LRFLcbv6SmdEeucU2vQlC9g2c4MZJGEq+yMMbG/LKnpGGSXH08zMIAzk4hjNWGXRdT3N5HswLAnc2YlHj6tk2Y5dlufgQGEJw=" 19 | services: 20 | - redis-server 21 | install: 22 | - pip install psycopg2 PyMySQL http://cdn.mysql.com/Downloads/Connector-Python/mysql-connector-python-2.0.4.zip 23 | - pip install tox-travis codecov 24 | - pip freeze || true 25 | before_script: 26 | - createdb -U postgres -E utf8 -T postgres geofront_test 27 | - mysql -e 'CREATE DATABASE geofront_test;' 28 | script: 29 | - python -mgeofront.version 30 | - | 31 | port_min="$(expr '(' "${TRAVIS_JOB_NUMBER#[0-9][0-9]*.}" - 1 ')' '*' 180 + 12220)" 32 | port_max="$(expr "${TRAVIS_JOB_NUMBER#[0-9][0-9]*.}" '*' 180 + 12220 - 1)" 33 | tox -- \ 34 | --flake8 \ 35 | --cov geofront \ 36 | --durations=20 \ 37 | --sshd-port-min="$port_min" \ 38 | --sshd-port-max="$port_max" \ 39 | --sshd-state-timeout=60 \ 40 | --redis-host=localhost \ 41 | --postgresql-database=geofront_test \ 42 | --postgresql-user=postgres \ 43 | --mysql-database=geofront_test \ 44 | --mysql-host=localhost \ 45 | --mysql-user=root \ 46 | --github-access-token="$GITHUB_ACCESS_TOKEN" \ 47 | --github-org-login="$GITHUB_ORG_LOGIN" \ 48 | --github-team-slugs="$GITHUB_TEAM_SLUGS" \ 49 | --aws-access-key="$AWS_ACCESS_KEY" \ 50 | --aws-secret-key="$AWS_SECRET_KEY" \ 51 | --aws-s3-bucket="$AWS_S3_BUCKET" \ 52 | -vv 53 | after_success: 54 | - codecov 55 | -------------------------------------------------------------------------------- /tests/backends/bitbucket_test.py: -------------------------------------------------------------------------------- 1 | import typing 2 | 3 | from pytest import fixture, skip 4 | 5 | from geofront.backends.bitbucket import BitbucketTeam 6 | from geofront.backends.oauth import request 7 | from geofront.identity import Identity 8 | 9 | 10 | @fixture(scope='session') 11 | def fx_bitbucket_access_token(request) -> str: 12 | try: 13 | token = request.config.getoption('--bitbucket-access-token') 14 | except ValueError: 15 | token = None 16 | if not token: 17 | skip('--bitbucket-access-token is not set; skipped') 18 | return token 19 | 20 | 21 | @fixture 22 | def fx_bitbucket_team_username(request) -> str: 23 | try: 24 | org_login = request.config.getoption('--bitbucket-team-username') 25 | except ValueError: 26 | org_login = None 27 | if not org_login: 28 | skip('--bitbucket-team-username is not provided; skipped') 29 | return org_login 30 | 31 | 32 | @fixture 33 | def fx_bitbucket_group_slugs(request) -> typing.AbstractSet[str]: 34 | try: 35 | slugs = request.config.getoption('--bitbucket-group-slugs') 36 | except ValueError: 37 | slugs = None 38 | if not slugs: 39 | skip('--bitbucket-group-slugs is not provided; skipped') 40 | return {slug.strip() for slug in slugs.split()} 41 | 42 | 43 | @fixture(scope='session') 44 | def fx_bitbucket_identity(fx_bitbucket_access_token: str) -> Identity: 45 | resp = request( 46 | fx_bitbucket_access_token, 47 | 'https://api.bitbucket.org/2.0/user', 48 | 'GET' 49 | ) 50 | return Identity(BitbucketTeam, resp['username'], fx_bitbucket_access_token) 51 | 52 | 53 | def test_request(fx_bitbucket_access_token: str, 54 | fx_bitbucket_identity: Identity): 55 | result = request( 56 | fx_bitbucket_access_token, 57 | 'https://api.bitbucket.org/2.0/user', 58 | 'GET' 59 | ) 60 | assert result['type'] == 'user' 61 | result2 = request( 62 | fx_bitbucket_identity, 63 | 'https://api.bitbucket.org/2.0/user', 64 | 'GET' 65 | ) 66 | assert result == result2 67 | 68 | 69 | def test_authorize(fx_bitbucket_identity: Identity, 70 | fx_bitbucket_team_username: str): 71 | team = BitbucketTeam('', '', fx_bitbucket_team_username) 72 | assert team.authorize(fx_bitbucket_identity) 73 | 74 | 75 | def test_list_groups(fx_bitbucket_identity: Identity, 76 | fx_bitbucket_team_username: str, 77 | fx_bitbucket_group_slugs: typing.AbstractSet[str]): 78 | org = BitbucketTeam('', '', fx_bitbucket_team_username) 79 | groups = org.list_groups(fx_bitbucket_identity) 80 | assert groups == fx_bitbucket_group_slugs 81 | -------------------------------------------------------------------------------- /tests/sftpd.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | try: 3 | import selectors 4 | except ImportError: 5 | from asyncio import selectors # type: ignore 6 | import socket 7 | import threading 8 | import time 9 | 10 | from paramiko.common import AUTH_FAILED, AUTH_SUCCESSFUL, OPEN_SUCCEEDED 11 | from paramiko.rsakey import RSAKey 12 | from paramiko.server import ServerInterface 13 | from paramiko.sftp_server import SFTPServer 14 | from paramiko.transport import Transport 15 | from sftpserver.stub_sftp import StubSFTPServer 16 | 17 | from geofront.keystore import parse_openssh_pubkey 18 | 19 | 20 | class StubServer(ServerInterface): 21 | 22 | def __init__(self, path, users={'user'}): 23 | self.path = path 24 | self.users = frozenset(users) 25 | 26 | @property 27 | def authorized_keys(self): 28 | list_file = os.path.join(self.path, '.ssh', 'authorized_keys') 29 | with open(list_file) as f: 30 | for line in f.readlines(): 31 | yield parse_openssh_pubkey(line.strip()) 32 | 33 | def get_allowed_auths(self, username): 34 | return 'publickey' 35 | 36 | def check_auth_password(self, username, password): 37 | return AUTH_FAILED 38 | 39 | def check_auth_publickey(self, username, key): 40 | if username in self.users: 41 | for authorized_key in self.authorized_keys: 42 | if authorized_key == key: 43 | return AUTH_SUCCESSFUL 44 | return AUTH_FAILED 45 | 46 | def check_channel_request(self, kind, chanid): 47 | return OPEN_SUCCEEDED 48 | 49 | 50 | def start_server(path: str, host: str, port: int, terminated: threading.Event): 51 | server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 52 | server_socket.settimeout(1) 53 | server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) 54 | server_socket.bind((host, port)) 55 | server_socket.listen(1) 56 | stub_cls = type('StubSFTPServer', (StubSFTPServer,), {'ROOT': path}) 57 | host_key = RSAKey.generate(1024) 58 | 59 | def accept(server_socket, mask): 60 | conn, addr = server_socket.accept() 61 | transport = Transport(conn) 62 | transport.add_server_key(host_key) 63 | transport.set_subsystem_handler('sftp', SFTPServer, stub_cls) 64 | server = StubServer(path) 65 | transport.start_server(server=server) 66 | while not terminated.is_set(): 67 | channel = transport.accept(1) 68 | if channel is not None and not terminated.is_set(): 69 | while transport.is_active() and not terminated.is_set(): 70 | terminated.wait(1) 71 | break 72 | 73 | sel = selectors.DefaultSelector() 74 | sel.register(server_socket, selectors.EVENT_READ, accept) 75 | last_used = time.time() 76 | while not terminated.is_set() and last_used + 10 > time.time(): 77 | events = sel.select(1) 78 | for key, mask in events: 79 | key.data(key.fileobj, mask) 80 | last_used = time.time() 81 | -------------------------------------------------------------------------------- /geofront/identity.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.identity` --- Member identification 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | """ 5 | import collections.abc 6 | from typing import TYPE_CHECKING, Hashable, Type, Union, cast 7 | 8 | if TYPE_CHECKING: 9 | from .team import Team # noqa: F401 10 | 11 | __all__ = 'Identity', 12 | 13 | 14 | class Identity(collections.abc.Hashable): 15 | """Hashable value object which purposes to identify the owner of 16 | each public key in the store. 17 | 18 | :param team_type: a sbclass of :class:`~.team.Team` 19 | :type team_type: :class:`~typing.Type`\ [:class:`~.team.Team`] 20 | :param identifier: any hashable identifier for the owner. 21 | it's interpreted by ``team_type`` 22 | :type identifier: :class:`~typing.Hashable` 23 | :param access_token: an optional access token which may used by key store 24 | 25 | """ 26 | 27 | #: (:class:`~typing.Type`\ [:class:`~.team.Team`]) A subclass of 28 | #: :class:`~.team.Team`. 29 | team_type = None # type: Type[Team] 30 | 31 | #: (:class:`~typing.Hashable`) Any hashable identifier for 32 | #: the owner. It's interpreted by :attr:`team_type`. 33 | identifier = None # type: Union[Hashable, str, int] 34 | 35 | #: An optional access token which may be used by key store. 36 | #: 37 | #: .. note:: 38 | #: 39 | #: The attribute is ignored by :token:`==`, and :token:`!=` 40 | #: operators, and :func:`hash()` function. 41 | access_token = None 42 | 43 | def __init__(self, 44 | team_type: Type['Team'], 45 | identifier: Union[Hashable, str, int], # workaround mypy bug 46 | access_token=None) -> None: 47 | if not isinstance(team_type, type): 48 | raise TypeError('team_type must be a type, not ' + repr(team_type)) 49 | from .team import Team # noqa: F811 50 | if not issubclass(team_type, Team): 51 | raise TypeError('team_type must be a subclass of {0.__module__}.' 52 | '{0.__qualname__}'.format(Team)) 53 | elif not callable(getattr(identifier, '__hash__')): 54 | raise TypeError('identifier must be hashable, not ' + 55 | repr(identifier)) 56 | self.team_type = cast(Type[Team], team_type) 57 | self.identifier = identifier # type: Union[Hashable, str, int] 58 | self.access_token = access_token 59 | 60 | def __eq__(self, other) -> bool: 61 | return (isinstance(other, type(self)) and 62 | self.team_type is other.team_type and 63 | self.identifier == other.identifier) 64 | 65 | def __ne__(self, other) -> bool: 66 | return not self == other 67 | 68 | def __hash__(self) -> int: 69 | return hash((self.team_type, self.identifier)) 70 | 71 | def __repr__(self) -> str: 72 | fmt = ('{0.__module__}.{0.__qualname__}' 73 | '({1.__module__}.{1.__qualname__}, {2!r}, access_token={3!r})') 74 | return fmt.format( 75 | type(self), 76 | self.team_type, 77 | self.identifier, 78 | self.access_token 79 | ) 80 | -------------------------------------------------------------------------------- /tests/backends/github_test.py: -------------------------------------------------------------------------------- 1 | import typing 2 | 3 | from pytest import fixture, skip, yield_fixture 4 | 5 | from ..keystore_test import assert_keystore_compliance 6 | from geofront.backends.github import GitHubKeyStore, GitHubOrganization 7 | from geofront.backends.oauth import request 8 | from geofront.identity import Identity 9 | 10 | 11 | @fixture(scope='session') 12 | def fx_github_access_token(request) -> str: 13 | try: 14 | token = request.config.getoption('--github-access-token') 15 | except ValueError: 16 | token = None 17 | if not token: 18 | skip('--github-access-token is not set; skipped') 19 | return token 20 | 21 | 22 | @fixture 23 | def fx_github_org_login(request) -> str: 24 | try: 25 | org_login = request.config.getoption('--github-org-login') 26 | except ValueError: 27 | org_login = None 28 | if not org_login: 29 | skip('--github-org-login is not provided; skipped') 30 | return org_login 31 | 32 | 33 | @fixture 34 | def fx_github_team_slugs(request) -> typing.AbstractSet[str]: 35 | try: 36 | slugs = request.config.getoption('--github-team-slugs') 37 | except ValueError: 38 | slugs = None 39 | if not slugs: 40 | skip('--github-team-slugs is not provided; skipped') 41 | return {slug.strip() for slug in slugs.split()} 42 | 43 | 44 | @fixture(scope='session') 45 | def fx_github_identity(fx_github_access_token: str) -> Identity: 46 | resp = request( 47 | fx_github_access_token, 48 | 'https://api.github.com/user', 49 | 'GET' 50 | ) 51 | return Identity(GitHubOrganization, resp['login'], fx_github_access_token) 52 | 53 | 54 | def test_request(fx_github_access_token: str, fx_github_identity: Identity): 55 | result = request( 56 | fx_github_access_token, 57 | 'https://api.github.com/user', 58 | 'GET' 59 | ) 60 | assert result['type'] == 'User' 61 | result2 = request( 62 | fx_github_identity, 63 | 'https://api.github.com/user', 64 | 'GET' 65 | ) 66 | assert result == result2 67 | 68 | 69 | def test_authorize(fx_github_identity: Identity, fx_github_org_login: str): 70 | org = GitHubOrganization('', '', fx_github_org_login) 71 | assert org.authorize(fx_github_identity) 72 | 73 | 74 | def test_list_groups(fx_github_identity: Identity, fx_github_org_login: str, 75 | fx_github_team_slugs: typing.AbstractSet[str]): 76 | org = GitHubOrganization('', '', fx_github_org_login) 77 | groups = org.list_groups(fx_github_identity) 78 | assert groups == fx_github_team_slugs 79 | 80 | 81 | def cleanup_ssh_keys(identity: Identity): 82 | keys = request(identity, GitHubKeyStore.list_url, 'GET') 83 | for key in keys: 84 | url = GitHubKeyStore.deregister_url.format(**key) 85 | request(identity, url, 'DELETE') 86 | 87 | 88 | @yield_fixture 89 | def fx_github_keystore(fx_github_identity: Identity): 90 | cleanup_ssh_keys(fx_github_identity) 91 | yield GitHubKeyStore() 92 | cleanup_ssh_keys(fx_github_identity) 93 | 94 | 95 | def test_github_keystore(fx_github_identity: Identity, 96 | fx_github_keystore: GitHubKeyStore): 97 | assert_keystore_compliance(fx_github_keystore, fx_github_identity) 98 | -------------------------------------------------------------------------------- /tests/backends/dbapi_test.py: -------------------------------------------------------------------------------- 1 | from pytest import fail, fixture, skip, yield_fixture 2 | from werkzeug.utils import import_string 3 | 4 | from ..keystore_test import assert_keystore_compliance 5 | from ..server_test import DummyTeam 6 | from geofront.backends.dbapi import DatabaseKeyStore 7 | from geofront.identity import Identity 8 | 9 | 10 | DRIVERS = { 11 | 'sqlite3': 'pysqlite', # https://docs.python.org/3/library/sqlite3.html 12 | 'psycopg2': 'psycopg2', # http://initd.org/psycopg/ 13 | 'pymysql': 'PyMySQL', # http://www.pymysql.org/ 14 | 'mysql.connector': 'mysql-connector-python', 15 | # http://dev.mysql.com/doc/connector-python/en/ 16 | } 17 | 18 | 19 | @yield_fixture(scope='function', params=list(DRIVERS.keys())) 20 | def fx_db_module(request, tmpdir): 21 | import_name = request.param 22 | package = DRIVERS[import_name] 23 | try: 24 | db_module = import_string(import_name) 25 | except ImportError: 26 | skip(package + ' is not installed; skipped') 27 | args = () 28 | kwargs = {} 29 | getoption = request.config.getoption 30 | if import_name == 'sqlite3': 31 | args = str(tmpdir.join('geofront_test.db')), 32 | elif package == 'psycopg2': 33 | try: 34 | pgdatabase = getoption('--postgresql-database') 35 | except ValueError: 36 | pgdatabase = None 37 | if pgdatabase is None: 38 | skip('--postgresql-database is not provided; skipped') 39 | kwargs['database'] = pgdatabase 40 | for option in 'host', 'port', 'user', 'password': 41 | try: 42 | kwargs[option] = getoption('--postgresql-' + option) 43 | except ValueError: 44 | continue 45 | elif 'mysql' in import_name: 46 | try: 47 | mysql_db = getoption('--mysql-database') 48 | except ValueError: 49 | mysql_db = None 50 | if mysql_db is None: 51 | skip('--mysql-database is not provided; skipped') 52 | kwargs['database'] = mysql_db 53 | for option in 'host', 'port', 'user', 'passwd': 54 | try: 55 | kwargs[option] = getoption('--mysql-' + option) 56 | except ValueError: 57 | continue 58 | if kwargs[option] is None: 59 | del kwargs[option] 60 | else: 61 | fail('arguments to {}.connect() are not ready'.format(import_name)) 62 | kwargs = {k: v for k, v in kwargs.items() if v is not None} 63 | yield db_module, args, kwargs 64 | if 'sqlite' not in import_name.lower(): 65 | connection = db_module.connect(*args, **kwargs) 66 | try: 67 | cursor = connection.cursor() 68 | try: 69 | cursor.execute('DROP TABLE geofront_public_key') 70 | finally: 71 | cursor.close() 72 | finally: 73 | connection.close() 74 | 75 | 76 | @fixture 77 | def fx_db_key_store(fx_db_module): 78 | mod, args, kwargs = fx_db_module 79 | return DatabaseKeyStore(mod, *args, **kwargs) 80 | 81 | 82 | def test_db_key_store(fx_db_key_store): 83 | identity = Identity(DummyTeam, 'abcd') 84 | assert_keystore_compliance(fx_db_key_store, identity) 85 | identity2 = Identity(DummyTeam, 'efg') 86 | assert_keystore_compliance(fx_db_key_store, identity2) 87 | -------------------------------------------------------------------------------- /example.cfg.py: -------------------------------------------------------------------------------- 1 | # This is a configuration example. See docs/config.rst as well. 2 | 3 | # Scenario: Your team is using GitHub, and the organization login is @YOUR_TEAM. 4 | # All members already registered their public keys to their GitHub accounts, 5 | # and are using git through ssh public key authorization. 6 | 7 | # First of all, you have to decide how to authorize team members. 8 | # Geofront provides a built-in authorization method for GitHub organizations. 9 | # It requires a pair of client keys (id and secret) for OAuth authentication. 10 | # You can create one from: 11 | # 12 | # https://github.com/organizations/YOUR_TEAM/settings/applications/new 13 | # 14 | # Then import GitHubOrganization class, and configure a pair of client keys 15 | # and your organization login name (@YOUR_TEAM in here). 16 | from geofront.backends.github import GitHubOrganization 17 | 18 | TEAM = GitHubOrganization( 19 | client_id='0123456789abcdef0123', 20 | client_secret='0123456789abcdef0123456789abcdef01234567', 21 | org_login='YOUR_TEAM' 22 | ) 23 | 24 | # Your colleagues have already registered their public keys to GitHub, 25 | # so you don't need additional storage for public keys. We'd use GitHub 26 | # as your public key store. 27 | from geofront.backends.github import GitHubKeyStore 28 | 29 | KEY_STORE = GitHubKeyStore() 30 | 31 | # Unlike public keys, the master key ideally ought to be accessible by 32 | # only Geofront. Assume you use Amazon Web Services. So you'll store 33 | # the master key to the your private S3 bucket named your_team_master_key. 34 | # You can find proper provider via below link 35 | # https://libcloud.readthedocs.io/en/latest/storage/supported_providers.html#supported-methods-storage 36 | from geofront.backends.cloud import CloudMasterKeyStore 37 | from libcloud.storage.types import Provider 38 | from libcloud.storage.providers import get_driver 39 | 40 | driver_cls = get_driver(Provider.S3) 41 | driver = driver_cls('aws access key', 'aws secret key') 42 | container = driver.get_container(container_name='your_team_master_key') 43 | MASTER_KEY_STORE = CloudMasterKeyStore(driver, container, 'id_rsa') 44 | 45 | # You have to let Geofront know what to manage remote servers. 46 | # Although the list can be hard-coded in the configuration file, 47 | # but you'll get the list dynamically from EC2 API. Assume our all 48 | # AMIs are Amazon Linux, so the usernames are always ec2-user. 49 | # If you're using Ubuntu AMIs it should be ubuntu instead. 50 | from geofront.backends.cloud import CloudRemoteSet 51 | from libcloud.compute.types import Provider 52 | from libcloud.compute.providers import get_driver 53 | 54 | driver_cls = get_driver(Provider.EC2) 55 | driver = driver_cls('aws access id', 'aws secret key', region='uest-east-1') 56 | REMOTE_SET = CloudRemoteSet(driver, user='ec2-user') 57 | 58 | # Suppose your team is divided by several subgroups, and these subgroups are 59 | # represented in teams of the GitHub organization. So you can control 60 | # who can access each remote by specifying allowed groups to its metadata. 61 | # CloudRemoteSet which is used for above REMOTE_SET exposes each EC2 instance's 62 | # metadata as it has. We suppose every EC2 instance has Allowed-Groups 63 | # metadata key and its value is space-separated list of group slugs. 64 | # The following settings will allow only members who belong to corresponding 65 | # groups to access. 66 | from geofront.remote import GroupMetadataPermissionPolicy 67 | 68 | PERMISSION_POLICY = GroupMetadataPermissionPolicy('Allowed-Groups') 69 | 70 | # Geofront provisions access tokens (or you can think them as sessions) 71 | # for Geofront clients. Assume you already have a Redis server running 72 | # on the same host. We'd store tokens to the db 0 on that Redis server 73 | # in the example. 74 | from werkzeug.contrib.cache import RedisCache 75 | 76 | TOKEN_STORE = RedisCache(host='localhost', db=0) 77 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Geofront 2 | ======== 3 | 4 | .. image:: https://badges.gitter.im/spoqa/geofront.svg 5 | :alt: Join the chat at https://gitter.im/spoqa/geofront 6 | :target: https://gitter.im/spoqa/geofront?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge 7 | 8 | .. image:: https://badge.fury.io/py/Geofront.svg? 9 | :target: https://pypi.python.org/pypi/Geofront 10 | :alt: Latest PyPI version 11 | 12 | .. image:: https://readthedocs.org/projects/geofront/badge/ 13 | :target: https://geofront.readthedocs.io/ 14 | :alt: Read the Docs 15 | 16 | .. image:: https://travis-ci.org/spoqa/geofront.svg?branch=master 17 | :target: https://travis-ci.org/spoqa/geofront 18 | 19 | .. image:: https://codecov.io/gh/spoqa/geofront/branch/master/graph/badge.svg 20 | :target: https://codecov.io/gh/spoqa/geofront 21 | 22 | Geofront is a simple SSH key management server. It helps to maintain servers 23 | to SSH, and ``authorized_keys`` list for them. `Read the docs`__ for more 24 | details. 25 | 26 | __ https://geofront.readthedocs.io/ 27 | 28 | 29 | Situations 30 | ---------- 31 | 32 | - If the team maintains ``authorized_keys`` list of all servers owned 33 | by the team: 34 | 35 | - When someone joins or leaves the team, all lists have to be updated. 36 | - *Who* do update the list? 37 | 38 | - If the team maintains shared private keys to SSH servers: 39 | 40 | - These keys have to be expired when someone leaves the team. 41 | - There should be a shared storage for the keys. (Dropbox? srsly?) 42 | - Everyone might need to add ``-i`` option to use team's own key. 43 | 44 | - The above ways are both hard to scale servers. Imagine your team 45 | has more than 10 servers. 46 | 47 | 48 | Idea 49 | ---- 50 | 51 | 1. Geofront has its own *master key*. The private key is never shared. 52 | The master key is periodically and automatically regened. 53 | 2. Every server has a simple ``authorized_keys`` list, which authorizes 54 | only the master key. 55 | 3. Every member registers their own public key to Geofront. 56 | The registration can be omitted if the key storage is GitHub, Bitbucket, 57 | etc. 58 | 4. A member requests to SSH a server, then Geofront *temporarily* 59 | (about 30 seconds, or a minute) adds their public key to ``authorized_keys`` 60 | of the requested server. 61 | 62 | 63 | Prerequisites 64 | ------------- 65 | 66 | - Linux, BSD, Mac 67 | - Python 3.3+ 68 | - Third-party packages (automatically installed together) 69 | 70 | - Paramiko_ 2.0.1+ (which requires cryptography_) 71 | - Werkzeug_ 0.11+ 72 | - Flask_ 0.10.1+ 73 | - Flask-Sockets_ 0.2.1+ 74 | - gevent_ 1.1.2+ 75 | - OAuthLib_ 1.1.1+ 76 | - Apache Libcloud_ 1.1.0+ 77 | - singledispatch_ (only if Python is older than 3.4) 78 | - typing_ (only if Python is older than 3.5) 79 | - typeguard_ 2.1.1+ 80 | 81 | .. _Paramiko: http://www.paramiko.org/ 82 | .. _cryptography: https://cryptography.io/ 83 | .. _Werkzeug: http://werkzeug.pocoo.org/ 84 | .. _Flask: http://flask.pocoo.org/ 85 | .. _Flask-Sockets: https://github.com/kennethreitz/flask-sockets 86 | .. _gevent: http://www.gevent.org/ 87 | .. _OAuthLib: https://github.com/idan/oauthlib 88 | .. _Libcloud: http://libcloud.apache.org/ 89 | .. _singledispatch: https://pypi.python.org/pypi/singledispatch 90 | .. _typing: https://pypi.python.org/pypi/typing 91 | .. _typeguard: https://github.com/agronholm/typeguard 92 | 93 | 94 | Author and license 95 | ------------------ 96 | 97 | Geofront is written by `Hong Minhee`__, maintained by Spoqa_, and licensed 98 | under AGPL3_ or later. You can find the source code from GitHub__: 99 | 100 | .. code-block:: console 101 | 102 | $ git clone git://github.com/spoqa/geofront.git 103 | 104 | 105 | __ https://hongminhee.org/ 106 | .. _Spoqa: http://www.spoqa.com/ 107 | .. _AGPL3: http://www.gnu.org/licenses/agpl-3.0.html 108 | __ https://github.com/spoqa/geofront 109 | 110 | 111 | Missing features 112 | ---------------- 113 | 114 | - Google Apps backend [`#3`_] 115 | - Fabric_ integration 116 | - PuTTY_ integration 117 | 118 | (Contributions would be appreciated!) 119 | 120 | .. _Fabric: http://www.fabfile.org/ 121 | .. _PuTTY: http://www.chiark.greenend.org.uk/~sgtatham/putty/ 122 | .. _#3: https://github.com/spoqa/geofront/issues/3 123 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | 3 | import operator 4 | import os 5 | import sys 6 | 7 | from setuptools import setup, find_packages 8 | 9 | from geofront.version import VERSION 10 | 11 | 12 | def readme(): 13 | with open('README.rst') as f: 14 | return f.read() 15 | 16 | 17 | install_requires = [ 18 | 'setuptools', 19 | 'typeguard >= 2.1.1, < 3.0.0', 20 | 'cryptography >= 1.4', 21 | 'gevent >= 1.1.2', 22 | # indirect dependency thorugh paramiko; just for version constraint (>=1.4) 23 | 'paramiko >= 2.2.0', 24 | 'Werkzeug >= 0.11, < 0.14', 25 | 'oauthlib[rsa, signedtoken] >= 1.1.1, < 2.0.0', 26 | 'Flask >= 0.10.1', 27 | 'Flask-Sockets >= 0.2.1', 28 | 'apache-libcloud >= 1.1.0', 29 | ] 30 | 31 | supported_pyversions = [(3, 3), (3, 4), (3, 5), (3, 6)] 32 | 33 | pyversion_requires = { 34 | ('<', (3, 4)): ['singledispatch'], 35 | ('<', (3, 5)): ['typing'], 36 | } 37 | 38 | tests_require = [ 39 | 'pytest >= 2.5.0', 40 | 'sftpserver >= 0.3', 41 | 'iso8601 >= 0.1.10', 42 | 'redis', 43 | 'pytest-cov' 44 | ] 45 | 46 | docs_require = [ 47 | 'Sphinx >= 1.2', 48 | 'sphinxcontrib-httpdomain >= 1.2.1', 49 | 'sphinxcontrib-autoprogram' 50 | ] 51 | 52 | if sys.version_info < (3, 4): 53 | tests_require.append('asyncio >= 0.4.1') 54 | 55 | extras_require = { 56 | 'tests': tests_require, 57 | 'docs': docs_require, 58 | } 59 | 60 | # The current wheel version (0.24.0) doesn't seem to cover all comparison 61 | # operators of PEP 426 except for ==, so we need to expand all other operators 62 | # to multiple equals e.g. <=2.7 to ==2.6, ==2.7. 63 | operators = { 64 | '==': operator.eq, '!=': operator.ne, '<': operator.lt, '<=': operator.le, 65 | '>': operator.gt, '>=': operator.ge 66 | } 67 | for (op, ver), packages in pyversion_requires.items(): 68 | for pyversion in supported_pyversions: 69 | if operators[op](pyversion, ver): 70 | extras_require.setdefault( 71 | ':python_version==' + repr('.'.join(map(str, pyversion))), 72 | [] 73 | ).extend(packages) 74 | # FIXME: Shitty hack... The current version of setuptools and pip 75 | # doesn't support PEP 426, so we need to manually inject 76 | # conditional requirements into install_requires. 77 | # Note that injection must not be done for bdist_wheel since 78 | # wheel statically captures all install_requires and then 79 | # freezes them into JSON. 80 | if 'bdist_wheel' not in sys.argv: 81 | install_requires.extend(packages) 82 | 83 | # Install requirements for documentation if it's run by ReadTheDocs.org 84 | if os.environ.get('READTHEDOCS'): 85 | install_requires.extend(docs_require) 86 | 87 | 88 | setup( 89 | name='Geofront', 90 | version=VERSION, 91 | description='Simple SSH key management service', 92 | long_description=readme(), 93 | url='https://github.com/spoqa/geofront', 94 | author='Hong Minhee', 95 | author_email='hongminhee' '@' 'member.fsf.org', 96 | maintainer='Spoqa', 97 | maintainer_email='dev' '@' 'spoqa.com', 98 | license='AGPLv3 or later', 99 | packages=find_packages(exclude=['tests']), 100 | python_requires='>=3.3.0', 101 | install_requires=install_requires, 102 | tests_require=tests_require, 103 | extras_require=extras_require, 104 | entry_points=''' 105 | [console_scripts] 106 | geofront-server = geofront.server:main 107 | geofront-key-regen = geofront.regen:main 108 | ''', 109 | classifiers=[ 110 | 'Development Status :: 4 - Beta', 111 | 'Environment :: Web Environment', 112 | 'Intended Audience :: Developers', 113 | 'Intended Audience :: System Administrators', 114 | 'License :: OSI Approved ' 115 | ':: GNU Affero General Public License v3 or later (AGPLv3+)', 116 | 'Operating System :: POSIX', 117 | 'Programming Language :: Python :: 3', 118 | 'Programming Language :: Python :: 3.3', 119 | 'Programming Language :: Python :: 3.4', 120 | 'Programming Language :: Python :: 3.5', 121 | 'Programming Language :: Python :: 3.6', 122 | 'Programming Language :: Python :: Implementation :: CPython', 123 | 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 124 | 'Topic :: System :: Systems Administration :: Authentication/Directory' 125 | ] 126 | ) 127 | -------------------------------------------------------------------------------- /geofront/regen.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.regen` --- Regen master key 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | .. versionadded:: 0.2.0 5 | 6 | """ 7 | import argparse 8 | import logging 9 | import os.path 10 | from typing import Mapping, Optional, Tuple, Type 11 | 12 | from paramiko.pkey import PKey 13 | from paramiko.rsakey import RSAKey 14 | from typeguard import typechecked 15 | 16 | from .keystore import get_key_fingerprint 17 | from .masterkey import (EmptyStoreError, KeyGenerationError, MasterKeyStore, 18 | generate_key, renew_master_key) 19 | from .remote import RemoteSet 20 | from .version import VERSION 21 | 22 | __all__ = 'main', 'main_parser', 'get_regen_options', 'regenerate' 23 | 24 | 25 | @typechecked 26 | def main_parser( 27 | parser: argparse.ArgumentParser=None 28 | ) -> argparse.ArgumentParser: # pragma: no cover 29 | """Create an :class:`~argparse.ArgumentParser` object for 30 | :program:`geofront-key-regen` CLI program. It also is used for 31 | documentation through `sphinxcontrib-autoprogram`__. 32 | 33 | :return: a properly configured :class:`~argparse.ArgumentParser` 34 | :rtype: :class:`argparse.ArgumentParser` 35 | 36 | __ https://pythonhosted.org/sphinxcontrib-autoprogram/ 37 | 38 | """ 39 | parser = parser or argparse.ArgumentParser( 40 | description='Regen the Geofront master key' 41 | ) 42 | parser.add_argument('config', 43 | metavar='FILE', 44 | help='geofront configuration file (Python script)') 45 | parser.add_argument('--create-master-key', 46 | action='store_true', 47 | help='create a new master key if no master key yet') 48 | parser.add_argument('-d', '--debug', 49 | action='store_true', 50 | help='debug mode') 51 | parser.add_argument('-v', '--version', 52 | action='version', 53 | version='%(prog)s ' + VERSION) 54 | return parser 55 | 56 | 57 | @typechecked 58 | def regenerate(master_key_store: MasterKeyStore, 59 | remote_set: RemoteSet, 60 | key_type: Type[PKey]=RSAKey, 61 | bits: Optional[int]=None, 62 | *, 63 | create_if_empty: bool, 64 | renew_unless_empty: bool) -> None: 65 | """Regenerate or create the master key.""" 66 | logger = logging.getLogger(__name__ + '.regenerate') 67 | try: 68 | key = master_key_store.load() 69 | except EmptyStoreError: 70 | if create_if_empty: 71 | logger.warn('no master key; create one...') 72 | key = generate_key(key_type, bits) 73 | master_key_store.save(key) 74 | logger.info('created new master key: %s', get_key_fingerprint(key)) 75 | else: 76 | raise RegenError('no master key; try --create-master-key option ' 77 | 'if you want to create one') 78 | else: 79 | if renew_unless_empty: 80 | renew_master_key(frozenset(remote_set.values()), 81 | master_key_store, 82 | key_type, bits) 83 | 84 | 85 | class RegenError(Exception): 86 | """Error raised by :func:`regenerate()`.""" 87 | 88 | 89 | def get_regen_options(config: Mapping[str, object]) -> Tuple[Type[PKey], 90 | Optional[int]]: 91 | key_type = config.get('MASTER_KEY_TYPE', RSAKey) 92 | if not isinstance(key_type, type): 93 | raise RegenOptionError('MASTER_KEY_TYPE configuration must be a type, ' 94 | 'not ' + repr(key_type)) 95 | elif not issubclass(key_type, PKey): 96 | raise RegenOptionError( 97 | 'MASTER_KEY_TYPE configuration must be a subclass of ' 98 | '{0.__module__}.{0.__qualname__}, but {1.__module__}.' 99 | '{1.__qualname__} is not'.format(PKey, key_type) 100 | ) 101 | bits = config['MASTER_KEY_BITS'] 102 | if bits is not None and not isinstance(bits, int): 103 | raise RegenOptionError('MASTER_KEY_BITS configuration must be an ' 104 | 'integer, not ' + repr(bits)) 105 | return RSAKey, bits 106 | 107 | 108 | class RegenOptionError(RegenError): 109 | """Error raised by :func:`get_regen_options()`.""" 110 | 111 | 112 | def main(): # pragma: no cover 113 | """The main function of :program:`geofront-key-regen` CLI program.""" 114 | from .server import app, get_master_key_store, get_remote_set 115 | parser = main_parser() 116 | args = parser.parse_args() 117 | try: 118 | app.config.from_pyfile(os.path.abspath(args.config), silent=False) 119 | except FileNotFoundError: 120 | parser.error('unable to load configuration file: ' + args.config) 121 | logger = logging.getLogger('geofront.masterkey') 122 | handler = logging.StreamHandler() 123 | level = logging.DEBUG if args.debug else logging.INFO 124 | handler.setLevel(level) 125 | logger.addHandler(handler) 126 | logger.setLevel(level) 127 | try: 128 | regenerate( 129 | get_master_key_store(), 130 | get_remote_set(), 131 | *get_regen_options(app.config), 132 | create_if_empty=args.create_master_key, 133 | renew_unless_empty=True 134 | ) 135 | except KeyGenerationError as e: 136 | parser.error(str(e)) 137 | except RegenError as e: 138 | parser.error(str(e)) 139 | -------------------------------------------------------------------------------- /tests/keystore_test.py: -------------------------------------------------------------------------------- 1 | import collections.abc 2 | 3 | from paramiko.dsskey import DSSKey 4 | from paramiko.ecdsakey import ECDSAKey 5 | from paramiko.ed25519key import Ed25519Key 6 | from paramiko.rsakey import RSAKey 7 | from pytest import fixture, raises 8 | 9 | from geofront.keystore import (DuplicatePublicKeyError, KeyTypeError, 10 | format_openssh_pubkey, 11 | get_key_fingerprint, parse_openssh_pubkey) 12 | 13 | 14 | @fixture 15 | def fx_id_rsa_pub(): 16 | return ( 17 | 'AAAAB3NzaC1yc2EAAAABIwAAAQEA0ql70Tsi8ToDGm+gkkRGv12Eb15QSgdVQeIFbasK+' 18 | 'yHNITAOVHtbM3nlUTIxFh7sSga7UmEjCya0ljU0GJ+zvnFOxKvRypBoUY38W8XkR3f2IJ' 19 | 'QwbWE7/t4Vs4DViramrZr/wnQtRstLZRncIj307ApQuB18uedbtreGdg+cd75/KfTvDc3' 20 | 'L17ZYlgdmJ+tTdzTi5mYbiPmtn631Qm8/OCBazwUSfidRlG1SN97QJdV5ZFLNN+3BRR7R' 21 | 'IRzYZ/2KEJqiOI5nqi3TEiPeq49/LJElu4tdJ8icXT7COrGllnhBbpZdxRM26hhVXv62v' 22 | 'OTQwXm1fumg0PgMACP2S1WVNw==' 23 | ) 24 | 25 | 26 | def test_parse_openssh_pubkey_rsa(fx_id_rsa_pub): 27 | pkey = parse_openssh_pubkey('ssh-rsa ' + fx_id_rsa_pub) 28 | assert isinstance(pkey, RSAKey) 29 | assert pkey.get_name() == 'ssh-rsa' 30 | assert pkey.get_base64() == fx_id_rsa_pub 31 | pkey = parse_openssh_pubkey('ssh-rsa ' + fx_id_rsa_pub + ' comment') 32 | assert isinstance(pkey, RSAKey) 33 | assert pkey.get_name() == 'ssh-rsa' 34 | assert pkey.get_base64() == fx_id_rsa_pub 35 | 36 | 37 | def test_parse_openssh_pubkey_dsa(): 38 | id_dsa_pub = ( 39 | 'AAAAB3NzaC1kc3MAAACBALTeFi9rlCkORWTj2sznDx2p/nUDFGZY0j9ynIioho0vlNfgj' 40 | '4U9/3SCq4JjhXhH7OB6h0NyUSNEVe9bbe7mHFTpQWwy1bmXEBaJALv1IqIBme1ZJcdUbe' 41 | 'ZM3PCLmbPTE7sjgUwk98hT3TI8CI5hLkJmsV1nFckEONgIG9IPjnmnAAAAFQCb72U4lNY' 42 | '2DsZ+e2TaxTtT8i996QAAAIEAlO7/8Vypf5bgAkeHGJ15cfiuR1X/gkSUj+sAhJYJ7pyB' 43 | 'h7vnJbBPztgxVvuHxELFcCufFyps7sibUq4MifqBPrVwLiK4PiNNcK8M2hjDJmWrqo/Bw' 44 | 'LRXkc1LWWxLr/PCBVeqAe2OTFEtu4ZLaqlex+WI2Ezgn4pItAH9lIACBlcAAACAa5GI36' 45 | 'nWqU89z07Pdh7q8gZHR9KXHMS3T6dGxkOhLb+XSATV14+udjqtrULs552d+d7Pdq+0KBm' 46 | '+6lC/YRn6ETsJ2AJzWxlG+sJ/eTFEWw9Q2uTWOBRbAqL2VJG5DG+K+lhgRRNNKHMtUF1j' 47 | '1MeJb71HT7amaOcE+dNEgKS0xi4=' 48 | ) 49 | pkey = parse_openssh_pubkey('ssh-dss ' + id_dsa_pub) 50 | assert isinstance(pkey, DSSKey) 51 | assert pkey.get_name() == 'ssh-dss' 52 | assert pkey.get_base64() == id_dsa_pub 53 | pkey = parse_openssh_pubkey('ssh-dss ' + id_dsa_pub + ' comment') 54 | assert isinstance(pkey, DSSKey) 55 | assert pkey.get_name() == 'ssh-dss' 56 | assert pkey.get_base64() == id_dsa_pub 57 | 58 | 59 | def test_parse_openssh_pubkey_ecdsa(): 60 | id_ecdsa_pub = ( 61 | 'AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAA' 62 | 'ABBBDs0y6X8UquYBtTvDjbK+RZIAWduMbfWfUmh2MRtWpo2Zq' 63 | 'EyQiyeTRDJ/41A5heiONtm7QhUJoBF5VBUjsxiIFk=' 64 | ) 65 | pkey = parse_openssh_pubkey('ecdsa-sha2-nistp256 ' + id_ecdsa_pub) 66 | assert isinstance(pkey, ECDSAKey) 67 | assert pkey.get_name() == 'ecdsa-sha2-nistp256' 68 | assert pkey.get_base64() == id_ecdsa_pub 69 | pkey = parse_openssh_pubkey('ecdsa-sha2-nistp256 ' + id_ecdsa_pub + ' cmt') 70 | assert isinstance(pkey, ECDSAKey) 71 | assert pkey.get_name() == 'ecdsa-sha2-nistp256' 72 | assert pkey.get_base64() == id_ecdsa_pub 73 | 74 | 75 | def test_parse_openssh_pubkey_ed25519(): 76 | id_ed25519_pub = ('AAAAC3NzaC1lZDI1NTE5AAAAIBtfC/x6Bm' 77 | 'h0Y2BHGSSdRyMBpX2m3C7Fw3qSNWrzK3GP') 78 | pkey = parse_openssh_pubkey('ssh-ed25519 ' + id_ed25519_pub) 79 | assert isinstance(pkey, Ed25519Key) 80 | assert pkey.get_name() == 'ssh-ed25519' 81 | assert pkey.get_base64() == id_ed25519_pub 82 | 83 | 84 | def test_parse_openssh_unsupported(): 85 | with raises(KeyTypeError): 86 | parse_openssh_pubkey( 87 | 'ssh-unsupported ' 88 | 'AAAAC3NzaC1lZDI1NTE5AAAAIBtfC/x6Bm' 89 | 'h0Y2BHGSSdRyMBpX2m3C7Fw3qSNWrzK3GP ' 90 | 'key-type-error-test' 91 | ) 92 | 93 | 94 | def test_format_openssh_pubkey(): 95 | rsakey = RSAKey.generate(1024) 96 | assert parse_openssh_pubkey(format_openssh_pubkey(rsakey)) == rsakey 97 | dsskey = DSSKey.generate(1024) 98 | assert parse_openssh_pubkey(format_openssh_pubkey(dsskey)) == dsskey 99 | 100 | 101 | def test_get_key_fingerprint(fx_id_rsa_pub): 102 | pkey = parse_openssh_pubkey('ssh-rsa ' + fx_id_rsa_pub) 103 | assert (get_key_fingerprint(pkey) == 104 | 'f5:6e:03:1c:cd:2c:84:64:d7:94:18:8b:79:60:11:df') 105 | assert (get_key_fingerprint(pkey, '-') == 106 | 'f5-6e-03-1c-cd-2c-84-64-d7-94-18-8b-79-60-11-df') 107 | assert get_key_fingerprint(pkey, '') == 'f56e031ccd2c8464d794188b796011df' 108 | 109 | 110 | def assert_keystore_compliance(keystore, identity): 111 | """Test basic behaviors of a KeyStore implementation.""" 112 | # "List registered public keys of the given ``identity``." 113 | keys = keystore.list_keys(identity) 114 | assert isinstance(keys, collections.abc.Set) 115 | assert not keys 116 | # "Register the given ``public_key`` to the ``identity``." 117 | key = RSAKey.generate(1024) 118 | keystore.register(identity, key) 119 | keys = keystore.list_keys(identity) 120 | assert isinstance(keys, collections.abc.Set) 121 | assert keys == {key} 122 | # ":raise geofront.keystore.DuplicatePublicKeyError: 123 | # when the ``public_key`` is already in use" 124 | with raises(DuplicatePublicKeyError): 125 | keystore.register(identity, key) 126 | # "Remove the given ``public_key`` of the ``identity``." 127 | keystore.deregister(identity, key) 128 | keys = keystore.list_keys(identity) 129 | assert isinstance(keys, collections.abc.Set) 130 | assert not keys 131 | # "It silently does nothing if there isn't the given ``public_key`` 132 | # in the store." 133 | keystore.deregister(identity, key) 134 | -------------------------------------------------------------------------------- /geofront/backends/bitbucket.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.backends.bitbucket` --- Bitbucket Cloud team 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | .. versionadded:: 0.4.0 5 | 6 | Provides team implementation for `Bitbucket Cloud`_ 7 | (which is also known as simply Bitbucket). 8 | 9 | In order to use Bitbucket's API you need to add an OAuth consumer. 10 | You can find the form from :menuselection:`Bitbucket settings --> 11 | Access Management --> OAuth --> OAuth consumers --> Add consumer`. 12 | OAuth consumer has to be set like the following: 13 | 14 | :guilabel:`Callback` 15 | It has to be the root url of the Geofront server. 16 | 17 | :guilabel:`Permissions` 18 | The following permissions are required: 19 | 20 | :guilabel:`Account` 21 | :guilabel:`Read`. 22 | 23 | It's used for identifying the authenticated Bitbucket user. 24 | 25 | :guilabel:`Team membership` 26 | :guilabel:`Read`. 27 | 28 | It's used for determining whether the authenticated Bitbucket user 29 | belongs to the Bitbucket team. 30 | 31 | Other than the above are unnecessary. 32 | 33 | .. note:: 34 | 35 | Not to be confused with `Bitbucket Server`_ (which was Stash). 36 | For Bitbucket Server, use :mod:`geofront.backend.stash` module instead. 37 | 38 | .. note:: 39 | 40 | Unfortunately, Atlassian deprecated the existing SSH keys endpoint 41 | from their HTTP RESTful API. Unlike :mod:`geofront.backends.github` or 42 | :mod:`geofront.backends.stash`, Bitbucket Cloud cannot be used for 43 | storing/loading public keys, but can be used only for authentication and 44 | authorization. You need to use other key store implementations instead 45 | e.g. :class:`~.cloud.CloudKeyStore` or :class:`~.dbapi.DatabaseKeyStore`. 46 | 47 | .. _Bitbucket Server: https://bitbucket.org/product/server 48 | .. _Bitbucket Cloud: https://bitbucket.org/ 49 | 50 | """ 51 | import collections.abc 52 | import logging 53 | 54 | from typeguard import typechecked 55 | 56 | from ..identity import Identity 57 | from .oauth import OAuth2Team, request 58 | 59 | __all__ = 'BitbucketTeam', 60 | 61 | 62 | class BitbucketTeam(OAuth2Team): 63 | """Authenticate team membership through Bitbucket Cloud, 64 | and authorize to access Bitbucket Cloud key store. 65 | 66 | Note that group identifiers :meth:`list_groups()` method returns 67 | are Bitbucket team group *slugs*. You can find the list of your available 68 | group slugs in the team using Bitbucket API: 69 | 70 | .. code-block:: console 71 | 72 | $ curl -u YourUsername \ 73 | https://api.bitbucket.org/1.0/groups/YourTeamUsername/ 74 | [ 75 | { 76 | "name": "Administrators", 77 | "permission": "read", 78 | "auto_add": false, 79 | "slug": "administrators", 80 | ... 81 | }, 82 | { 83 | "name": "Developers", 84 | "permission": "read", 85 | "auto_add": false, 86 | "slug": "developers", 87 | ... 88 | }, 89 | ] 90 | 91 | :param consumer_key: bitbucket oauth consumer key 92 | :type consumer_key: :class:`str` 93 | :param consumer_secret: bitbucket oauth consumer secret 94 | :type consumer_secret: :class:`str` 95 | :param team_username: bitbucket team account name. for example ``'spoqa'`` 96 | in https://bitbucket.org/spoqa 97 | :type team_username: :class:`str` 98 | 99 | """ 100 | 101 | authorize_url = 'https://bitbucket.org/site/oauth2/authorize' 102 | authorize_scope = 'account:write team' 103 | access_token_url = 'https://bitbucket.org/site/oauth2/access_token' 104 | user_url = 'https://api.bitbucket.org/2.0/user' 105 | teams_list_url = 'https://api.bitbucket.org/2.0/teams?role=member' 106 | groups_list_url = \ 107 | 'https://api.bitbucket.org/1.0/groups/{team.team_username}' 108 | unauthorized_identity_message_format = ( 109 | '@{identity.identifier} user is not a member of ' 110 | '@{team.team_username} team' 111 | ) 112 | 113 | @typechecked 114 | def __init__(self, 115 | consumer_key: str, 116 | consumer_secret: str, 117 | team_username: str) -> None: 118 | super().__init__(consumer_key, consumer_secret) 119 | self.team_username = team_username.lower() 120 | 121 | def determine_identity(self, access_token: str) -> Identity: 122 | user_data = request(access_token, self.user_url) 123 | return Identity(type(self), user_data['username'], access_token) 124 | 125 | def authorize(self, identity: Identity) -> bool: 126 | logger = logging.getLogger(__name__ + '.BitbucketTeam.authorize') 127 | if not issubclass(identity.team_type, type(self)): 128 | return False 129 | url = self.teams_list_url 130 | while url: 131 | logger.debug('requesting %s...', url) 132 | try: 133 | response = request(identity, url) 134 | except IOError as e: 135 | logger.debug(str(e), exc_info=True) 136 | return False 137 | if isinstance(response, collections.abc.Mapping) and \ 138 | 'error' in response: 139 | logger.debug('error response: %r', response) 140 | return False 141 | logger.debug('successful response: %r', response) 142 | for team in response['values']: 143 | if team['username'].lower() == self.team_username: 144 | return True 145 | url = response.get('next') 146 | return False 147 | 148 | def list_groups(self, identity: Identity): 149 | if not issubclass(identity.team_type, type(self)): 150 | return frozenset() 151 | list_url = self.groups_list_url.format(team=self) 152 | try: 153 | response = request(identity, list_url) 154 | except IOError: 155 | return frozenset() 156 | if isinstance(response, collections.abc.Mapping) and \ 157 | 'error' in response: 158 | return frozenset() 159 | return frozenset(t['slug'] for t in response) 160 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | You can easily install Geofront server using pip: 5 | 6 | .. code-block:: console 7 | 8 | $ pip3 install Geofront 9 | 10 | 11 | Running server 12 | -------------- 13 | 14 | .. currentmodule:: config 15 | 16 | Geofront server requires a configuration file. Configuration file is a typical 17 | Python script. The server is sensitive to the values of some uppercase 18 | variables like :data:`TEAM`, :data:`KEY_STORE`, and :data:`MASTER_KEY_BITS`. 19 | The filename of the configuration is not important, but recommend to use 20 | :file:`.cfg.py` suffix. You also can find an example configuration in 21 | the Geofront repository: :file:`example.cfg.py`. 22 | 23 | .. seealso:: 24 | 25 | :doc:`config` 26 | The reference manual for Geofront server configuration. 27 | 28 | If a configuration file is ready you can run the server right now. Suppose 29 | the configuration file is :file:`geofront.cfg.py`. 30 | 31 | :program:`geofront-server` command provides several options like 32 | :option:`--host `, and requires a configuration 33 | filename as its argument. 34 | 35 | .. code-block:: console 36 | 37 | $ geofront-server -p 8080 geofront.cfg.py 38 | 39 | It might be terminated with the following error message: 40 | 41 | .. code-block:: console 42 | 43 | $ geofront-server -p 8080 geofront.cfg.py 44 | usage: geofront-server [...] FILE 45 | geofront-server: error: no master key; 46 | try --create-master-key option if you want to create one 47 | 48 | It means :data:`MASTER_KEY_STORE` you configured has no master key yet. 49 | :option:`--create-master-key ` option 50 | creates a new master key if there's no master key yet, and then stores it into 51 | the configured :data:`MASTER_KEY_STORE`. 52 | 53 | .. code-block:: console 54 | 55 | $ geofront-server -p 8080 --create-master-key geofront.cfg.py 56 | no master key; create one... 57 | created new master key: 2b:d5:64:fd:27:f9:7a:6a:12:7d:88:76:a7:54:bd:6a 58 | serving on http://0.0.0.0:8080 59 | 60 | If it successfully starts serving it will show you the bound host and port. 61 | 62 | 63 | Reverse proxy 64 | ------------- 65 | 66 | Application servers typically run behind the reverse proxy like Nginx_. 67 | Here's an example configuration for Geofront server behind Nginx reverse proxy: 68 | 69 | .. code-block:: nginx 70 | 71 | # Redirect all HTTP requests to HTTPS. 72 | # We highly recommend to expose Geofront server only through HTTPS. 73 | server { 74 | listen 80; 75 | server_name geofront-example.org; 76 | rewrite ^(.*)$ https://geofront-example.org$1; 77 | } 78 | 79 | # Forward all requests to https://geofront-example.org to internal 80 | # http://127.0.0.1:8080. 81 | server { 82 | listen 443 ssl; 83 | server_name geofront-example.org; 84 | access_log /var/log/nginx/geofront/access.log; 85 | error_log /var/log/nginx/geofront/error.log; 86 | 87 | ssl on; 88 | ssl_certificate /path/to/ssl_cert_chain.pem; 89 | ssl_certificate_key /path/to/ssl_cert.pem; 90 | 91 | # HSTS: https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security 92 | add_header Strict-Transport-Security "max-age=31536000"; 93 | 94 | location / { 95 | proxy_pass http://127.0.0.1:8080; 96 | proxy_set_header Host $host; 97 | proxy_set_header X-Real-IP $remote_addr; 98 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 99 | } 100 | } 101 | 102 | .. _Nginx: http://nginx.org/ 103 | 104 | 105 | Using :program:`geofront-cli` 106 | ----------------------------- 107 | 108 | Every team member who want to use Geofront has to install a client for Geofront. 109 | `geofront-cli`_ is the reference implementation of Geofront client. 110 | It can be installed using :program:`pip`: 111 | 112 | .. code-block:: console 113 | 114 | $ pip install geofront-cli 115 | 116 | To setup what Geofront server to use use :program:`geofront-cli start` command. 117 | It will show a prompt: 118 | 119 | .. code-block:: console 120 | 121 | $ geofront-cli start 122 | Geofront server URL: 123 | 124 | Type the server URL, and then it will open an authentication page in your 125 | default web browser: 126 | 127 | .. code-block:: console 128 | 129 | $ geofront-cli start 130 | Geofront server URL: https://geofront-example.org/ 131 | Continue to authenticate in your web browser... 132 | Press return to continue 133 | 134 | That's done. Setup process is only required at first. You can show the list 135 | of available remotes using :program:`geofront-cli remotes`: 136 | 137 | .. code-block:: console 138 | 139 | $ geofront-cli remotes 140 | web-1 141 | web-2 142 | ... 143 | 144 | For more details on :program:`geofront-cli`, read the manual of its 145 | :file:`README.rst`, or use :option:`geofront-cli --help` option. 146 | 147 | .. _geofront-cli: https://github.com/spoqa/geofront-cli 148 | 149 | 150 | Remote colonization 151 | ------------------- 152 | 153 | Until a remote server authorizes the master key you can't access to the remote 154 | using :program:`geofront-cli`. So the master key needs to be added to remote's 155 | :file:`authorized_keys` list. Geofront calls it *colonization*. You can 156 | colonize a remote using :program:`geofront-cli colonize` command. Surely 157 | the following command has to be run by who can access to it: 158 | 159 | .. code-block:: console 160 | 161 | $ geofront-cli remotes 162 | web-1 163 | web-2 164 | ... 165 | $ geofront-cli colonize web-1 166 | 167 | You can understand :program:`geofront-cli colonize` is :program:`ssh-copy-id` 168 | for Geofront. Once colonized remote is accessible by every team member 169 | unless you configured more fine-grained ACL. 170 | (See also :data:`~config.PERMISSION_POLICY` if you're interested in ACL.) 171 | 172 | 173 | SSH through Geofront 174 | -------------------- 175 | 176 | If a remote is once colonized any team member can :program:`ssh` to it through 177 | Geofront. Use :program:`geofront-cli ssh` command: 178 | 179 | .. code-block:: console 180 | 181 | $ geofront-cli ssh web-1 182 | Last login: Sat May 3 16:32:15 2014 from hong-minhees-macbook-pro.local 183 | $ 184 | -------------------------------------------------------------------------------- /geofront/keystore.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.keystore` --- Public key store 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | """ 5 | import base64 6 | from typing import TYPE_CHECKING, AbstractSet 7 | 8 | from paramiko.dsskey import DSSKey 9 | from paramiko.ecdsakey import ECDSAKey 10 | from paramiko.ed25519key import Ed25519Key 11 | from paramiko.pkey import PKey 12 | from paramiko.rsakey import RSAKey 13 | from typeguard import typechecked 14 | 15 | from .identity import Identity 16 | 17 | if TYPE_CHECKING: 18 | from typing import Mapping, Type # noqa: F401 19 | 20 | __all__ = ('KEY_TYPES', 'AuthorizationError', 'DuplicatePublicKeyError', 21 | 'KeyStore', 'KeyStoreError', 'KeyTypeError', 22 | 'format_openssh_pubkey', 'get_key_fingerprint', 23 | 'parse_openssh_pubkey') 24 | 25 | 26 | #: (:class:`~typing.Mapping`\ [:class:`str`, 27 | #: :class:`~typing.Type`\ [:class:`~paramiko.pkey.Pkey`]]) 28 | #: The mapping of supported key types. 29 | #: 30 | #: .. versionadded:: 0.4.0 31 | #: Added ``ecdsa-sha2-nistp256``, ``ecdsa-sha2-nistp384``, and 32 | #: ``ecdsa-sha2-nistp521`` (:class:`~paramiko.ecdsakey.ECDSAKey`) support. 33 | #: 34 | #: .. versionadded:: 0.4.1 35 | #: Added ``ssh-ed25519`` (:class:`~paramiko.ed25519key.Ed25519Key`) support. 36 | KEY_TYPES = { 37 | 'ssh-rsa': RSAKey, 38 | 'ssh-dss': DSSKey, 39 | 'ssh-ed25519': Ed25519Key, 40 | 'ecdsa-sha2-nistp256': ECDSAKey, 41 | 'ecdsa-sha2-nistp384': ECDSAKey, 42 | 'ecdsa-sha2-nistp521': ECDSAKey, 43 | } # type: Mapping[str, Type[PKey]] 44 | 45 | 46 | @typechecked 47 | def parse_openssh_pubkey(line: str) -> PKey: 48 | """Parse an OpenSSH public key line, used by :file:`authorized_keys`, 49 | :file:`id_rsa.pub`, etc. 50 | 51 | :param line: a line of public key 52 | :type line: :class:`str` 53 | :return: the parsed public key 54 | :rtype: :class:`paramiko.pkey.PKey` 55 | :raise ValueError: when the given ``line`` is an invalid format 56 | :raise KeyTypeError: when it's an unsupported key type 57 | 58 | .. versionchanged:: 0.4.0 59 | Added ``ecdsa-sha2-nistp256``, ``ecdsa-sha2-nistp384``, and 60 | ``ecdsa-sha2-nistp521`` (:class:`~paramiko.ecdsakey.ECDSAKey`) 61 | support. 62 | 63 | """ 64 | keytype, b64, *_ = line.split() 65 | try: 66 | cls = KEY_TYPES[keytype] 67 | except KeyError: 68 | raise KeyTypeError('unsupported key type: ' + repr(keytype)) 69 | return cls(data=base64.b64decode(b64)) 70 | 71 | 72 | @typechecked 73 | def format_openssh_pubkey(key: PKey) -> str: 74 | """Format the given ``key`` to an OpenSSH public key line, used by 75 | :file:`authorized_keys`, :file:`id_rsa.pub`, etc. 76 | 77 | :param key: the key object to format 78 | :type key: :class:`paramiko.pkey.PKey` 79 | :return: a formatted openssh public key line 80 | :rtype: :class:`str` 81 | 82 | """ 83 | return '{} {} '.format(key.get_name(), key.get_base64()) 84 | 85 | 86 | @typechecked 87 | def get_key_fingerprint(key: PKey, glue: str=':') -> str: 88 | """Get the hexadecimal fingerprint string of the ``key``. 89 | 90 | :param key: the key to get fingerprint 91 | :type key: :class:`paramiko.pkey.PKey` 92 | :param glue: glue character to be placed between bytes. 93 | ``':'`` by default 94 | :type glue: :class:`str` 95 | :return: the fingerprint string 96 | :rtype: :class:`str` 97 | 98 | """ 99 | return glue.join(map('{:02x}'.format, key.get_fingerprint())) 100 | 101 | 102 | class KeyStore: 103 | """The key store backend interface. Every key store has to guarantee 104 | that public keys are unique for all identities i.e. the same public key 105 | can't be registered across more than an identity. 106 | 107 | """ 108 | 109 | @typechecked 110 | def register(self, identity: Identity, public_key: PKey) -> None: 111 | """Register the given ``public_key`` to the ``identity``. 112 | 113 | :param ientity: the owner identity 114 | :type identity: :class:`~.identity.Identity` 115 | :param public_key: the public key to register 116 | :type public_key: :class:`paramiko.pkey.PKey` 117 | :raise geofront.keystore.AuthorizationError: 118 | when the given ``identity`` has no required permission 119 | to the key store 120 | :raise geofront.keystore.DuplicatePublicKeyError: 121 | when the ``public_key`` is already in use 122 | 123 | 124 | """ 125 | raise NotImplementedError('register() has to be implemented') 126 | 127 | @typechecked 128 | def list_keys(self, identity: Identity) -> AbstractSet[PKey]: 129 | """List registered public keys of the given ``identity``. 130 | 131 | :param identity: the owner of keys to list 132 | :type identity: :class:`~.identity.Identity` 133 | :return: the set of :class:`paramiko.pkey.PKey` 134 | owned by the ``identity`` 135 | :rtype: :class:`~typing.AbstractSet` 136 | :raise geofront.keystore.AuthorizationError: 137 | when the given ``identity`` has no required permission 138 | to the key store 139 | 140 | """ 141 | raise NotImplementedError('list_keys() has to be implemented') 142 | 143 | @typechecked 144 | def deregister(self, identity: Identity, public_key: PKey) -> None: 145 | """Remove the given ``public_key`` of the ``identity``. 146 | It silently does nothing if there isn't the given ``public_key`` 147 | in the store. 148 | 149 | :param ientity: the owner identity 150 | :type identity: :class:`~.identity.Identity` 151 | :param public_key: the public key to remove 152 | :type public_key: :class:`paramiko.pkey.PKey` 153 | :raise geofront.keystore.AuthorizationError: 154 | when the given ``identity`` has no required permission 155 | to the key store 156 | 157 | """ 158 | raise NotImplementedError('deregister() has to be implemented') 159 | 160 | 161 | class KeyStoreError(Exception): 162 | """Exceptions related to :class:`KeyStore` are an instance of this.""" 163 | 164 | 165 | class AuthorizationError(KeyStoreError): 166 | """Authorization exception that rise when the given identity has 167 | no required permission to the key store. 168 | 169 | """ 170 | 171 | 172 | class DuplicatePublicKeyError(KeyStoreError): 173 | """Exception that rise when the given public key is already registered.""" 174 | 175 | 176 | class KeyTypeError(ValueError): 177 | """Unsupported public key type raise this type of error.""" 178 | -------------------------------------------------------------------------------- /geofront/backends/github.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.backends.github` --- GitHub organization and key store 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | """ 5 | import collections.abc 6 | import json 7 | import logging 8 | import typing 9 | import urllib.error 10 | import urllib.request 11 | 12 | from paramiko.pkey import PKey 13 | from typeguard import typechecked 14 | from werkzeug.http import parse_options_header 15 | 16 | from ..identity import Identity 17 | from ..keystore import (DuplicatePublicKeyError, KeyStore, 18 | format_openssh_pubkey, get_key_fingerprint, 19 | parse_openssh_pubkey) 20 | from ..team import GroupSet 21 | from .oauth import OAuth2Team, request 22 | 23 | 24 | __all__ = 'GitHubKeyStore', 'GitHubOrganization' 25 | 26 | 27 | class GitHubOrganization(OAuth2Team): 28 | """Authenticate team membership through GitHub, and authorize to 29 | access GitHub key store. 30 | 31 | Note that group identifiers :meth:`list_groups()` method returns 32 | are GitHub team *slugs*. You can find what team slugs are there in 33 | the organization using GitHub API: 34 | 35 | .. code-block:: console 36 | 37 | $ curl -u YourUserLogin https://api.github.com/orgs/YourOrgLogin/teams 38 | Enter host password for user 'YourUserLogin': 39 | [ 40 | { 41 | "name": "Owners", 42 | "id": 111111, 43 | "slug": "owners", 44 | "permission": "admin", 45 | "url": "https://api.github.com/teams/111111", 46 | ... 47 | }, 48 | { 49 | "name": "Programmers", 50 | "id": 222222, 51 | "slug": "programmers", 52 | "permission": "pull", 53 | "url": "https://api.github.com/teams/222222", 54 | ... 55 | } 56 | ] 57 | 58 | In the above example, ``owners`` and ``programmers`` are team slugs. 59 | 60 | :param client_id: github api client id 61 | :type client_id: :class:`str` 62 | :param client_secret: github api client secret 63 | :type client_secret: :class:`str` 64 | :param org_login: github org account name. for example ``'spoqa'`` 65 | in https://github.com/spoqa 66 | :type org_login: :class:`str` 67 | 68 | """ 69 | 70 | authorize_url = 'https://github.com/login/oauth/authorize' 71 | authorize_scope = 'read:org,admin:public_key' 72 | access_token_url = 'https://github.com/login/oauth/access_token' 73 | user_url = 'https://api.github.com/user' 74 | orgs_list_url = 'https://api.github.com/user/orgs' 75 | teams_list_url = 'https://api.github.com/user/teams' 76 | unauthorized_identity_message_format = ( 77 | '@{identity.identifier} user is not a member of ' 78 | '@{team.org_login} organization' 79 | ) 80 | 81 | @typechecked 82 | def __init__(self, 83 | client_id: str, 84 | client_secret: str, 85 | org_login: str) -> None: 86 | super().__init__(client_id, client_secret) 87 | self.org_login = org_login 88 | 89 | def determine_identity(self, access_token: str) -> Identity: 90 | user_data = request(access_token, self.user_url) 91 | return Identity(type(self), user_data['login'], access_token) 92 | 93 | def authorize(self, identity: Identity) -> bool: 94 | if not issubclass(identity.team_type, type(self)): 95 | return False 96 | try: 97 | response = request(identity, self.orgs_list_url) 98 | except IOError: 99 | return False 100 | if isinstance(response, collections.abc.Mapping) and \ 101 | 'error' in response: 102 | return False 103 | return any(o['login'] == self.org_login for o in response) 104 | 105 | def list_groups(self, identity: Identity) -> GroupSet: 106 | if not issubclass(identity.team_type, type(self)): 107 | return frozenset() 108 | try: 109 | response = request(identity, self.teams_list_url) 110 | except IOError: 111 | return frozenset() 112 | if isinstance(response, collections.abc.Mapping) and \ 113 | 'error' in response: 114 | return frozenset() 115 | return frozenset(t['slug'] 116 | for t in response 117 | if t['organization']['login'] == self.org_login) 118 | 119 | 120 | class GitHubKeyStore(KeyStore): 121 | """Use GitHub account's public keys as key store.""" 122 | 123 | list_url = 'https://api.github.com/user/keys' 124 | deregister_url = list_url + '/{id}' 125 | logger = logging.getLogger(__name__ + '.GitHubKeyStore') 126 | 127 | @typechecked 128 | def register(self, identity: Identity, public_key: PKey) -> None: 129 | logger = self.logger.getChild('register') 130 | title = get_key_fingerprint(public_key) 131 | data = json.dumps({ 132 | 'title': title, 133 | 'key': format_openssh_pubkey(public_key) 134 | }) 135 | try: 136 | request(identity, self.list_url, 'POST', data=data.encode()) 137 | except urllib.error.HTTPError as e: 138 | if e.code != 422: 139 | raise 140 | content_type = e.headers.get('Content-Type') 141 | mimetype, options = parse_options_header(content_type) 142 | if mimetype != 'application/json': 143 | raise 144 | charset = options.get('charset', 'utf-8') 145 | content_body = e.read().decode(charset) 146 | logger.debug('response body:\n%s', content_body) 147 | response = json.loads(content_body) 148 | for error in response.get('errors', []): 149 | if not isinstance(error, dict): 150 | continue 151 | elif error.get('field') != 'key': 152 | continue 153 | message = error.get('message', '').strip().lower() 154 | if message != 'key is already in use': 155 | continue 156 | raise DuplicatePublicKeyError(message) 157 | raise 158 | 159 | @typechecked 160 | def _list_keys(self, identity: Identity) -> typing.Iterable[PKey]: 161 | logger = self.logger.getChild('list_keys') 162 | keys = request(identity, self.list_url) 163 | for key in keys: 164 | try: 165 | yield parse_openssh_pubkey(key['key']), key 166 | except Exception as e: 167 | logger.exception(str(e)) 168 | continue 169 | 170 | @typechecked 171 | def list_keys(self, identity: Identity) -> typing.AbstractSet[PKey]: 172 | return frozenset(pkey for pkey, _ in self._list_keys(identity)) 173 | 174 | @typechecked 175 | def deregister(self, identity: Identity, public_key: PKey) -> None: 176 | for pkey, key in self._list_keys(identity): 177 | if pkey == public_key: 178 | request(identity, self.deregister_url.format(**key), 'DELETE') 179 | break 180 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Geofront.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Geofront.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Geofront" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Geofront" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | goto end 41 | ) 42 | 43 | if "%1" == "clean" ( 44 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 45 | del /q /s %BUILDDIR%\* 46 | goto end 47 | ) 48 | 49 | 50 | %SPHINXBUILD% 2> nul 51 | if errorlevel 9009 ( 52 | echo. 53 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 54 | echo.installed, then set the SPHINXBUILD environment variable to point 55 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 56 | echo.may add the Sphinx directory to PATH. 57 | echo. 58 | echo.If you don't have Sphinx installed, grab it from 59 | echo.http://sphinx-doc.org/ 60 | exit /b 1 61 | ) 62 | 63 | if "%1" == "html" ( 64 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 68 | goto end 69 | ) 70 | 71 | if "%1" == "dirhtml" ( 72 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 76 | goto end 77 | ) 78 | 79 | if "%1" == "singlehtml" ( 80 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 84 | goto end 85 | ) 86 | 87 | if "%1" == "pickle" ( 88 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can process the pickle files. 92 | goto end 93 | ) 94 | 95 | if "%1" == "json" ( 96 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 97 | if errorlevel 1 exit /b 1 98 | echo. 99 | echo.Build finished; now you can process the JSON files. 100 | goto end 101 | ) 102 | 103 | if "%1" == "htmlhelp" ( 104 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 105 | if errorlevel 1 exit /b 1 106 | echo. 107 | echo.Build finished; now you can run HTML Help Workshop with the ^ 108 | .hhp project file in %BUILDDIR%/htmlhelp. 109 | goto end 110 | ) 111 | 112 | if "%1" == "qthelp" ( 113 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 114 | if errorlevel 1 exit /b 1 115 | echo. 116 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 117 | .qhcp project file in %BUILDDIR%/qthelp, like this: 118 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Geofront.qhcp 119 | echo.To view the help file: 120 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Geofront.ghc 121 | goto end 122 | ) 123 | 124 | if "%1" == "devhelp" ( 125 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished. 129 | goto end 130 | ) 131 | 132 | if "%1" == "epub" ( 133 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 137 | goto end 138 | ) 139 | 140 | if "%1" == "latex" ( 141 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 145 | goto end 146 | ) 147 | 148 | if "%1" == "latexpdf" ( 149 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 150 | cd %BUILDDIR%/latex 151 | make all-pdf 152 | cd %BUILDDIR%/.. 153 | echo. 154 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 155 | goto end 156 | ) 157 | 158 | if "%1" == "latexpdfja" ( 159 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 160 | cd %BUILDDIR%/latex 161 | make all-pdf-ja 162 | cd %BUILDDIR%/.. 163 | echo. 164 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 165 | goto end 166 | ) 167 | 168 | if "%1" == "text" ( 169 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 170 | if errorlevel 1 exit /b 1 171 | echo. 172 | echo.Build finished. The text files are in %BUILDDIR%/text. 173 | goto end 174 | ) 175 | 176 | if "%1" == "man" ( 177 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 178 | if errorlevel 1 exit /b 1 179 | echo. 180 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 181 | goto end 182 | ) 183 | 184 | if "%1" == "texinfo" ( 185 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 186 | if errorlevel 1 exit /b 1 187 | echo. 188 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 189 | goto end 190 | ) 191 | 192 | if "%1" == "gettext" ( 193 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 194 | if errorlevel 1 exit /b 1 195 | echo. 196 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 197 | goto end 198 | ) 199 | 200 | if "%1" == "changes" ( 201 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 202 | if errorlevel 1 exit /b 1 203 | echo. 204 | echo.The overview file is in %BUILDDIR%/changes. 205 | goto end 206 | ) 207 | 208 | if "%1" == "linkcheck" ( 209 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 210 | if errorlevel 1 exit /b 1 211 | echo. 212 | echo.Link check complete; look for any errors in the above output ^ 213 | or in %BUILDDIR%/linkcheck/output.txt. 214 | goto end 215 | ) 216 | 217 | if "%1" == "doctest" ( 218 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 219 | if errorlevel 1 exit /b 1 220 | echo. 221 | echo.Testing of doctests in the sources finished, look at the ^ 222 | results in %BUILDDIR%/doctest/output.txt. 223 | goto end 224 | ) 225 | 226 | if "%1" == "xml" ( 227 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 228 | if errorlevel 1 exit /b 1 229 | echo. 230 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 231 | goto end 232 | ) 233 | 234 | if "%1" == "pseudoxml" ( 235 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 236 | if errorlevel 1 exit /b 1 237 | echo. 238 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 239 | goto end 240 | ) 241 | 242 | :end 243 | -------------------------------------------------------------------------------- /geofront/backends/dbapi.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.backends.dbapi` --- Key store using DB-API 2.0 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | .. seealso:: 5 | 6 | :pep:`249` --- Python Database API Specification v2.0 7 | 8 | .. versionadded:: 0.2.0 9 | 10 | """ 11 | import base64 12 | import contextlib 13 | import types 14 | from typing import AbstractSet, Mapping, Tuple, Type, Union, cast 15 | 16 | from paramiko.pkey import PKey 17 | from typeguard import typechecked 18 | 19 | from ..identity import Identity 20 | from ..keystore import (KEY_TYPES, DuplicatePublicKeyError, KeyStore, 21 | KeyTypeError, get_key_fingerprint) 22 | 23 | __all__ = 'DatabaseKeyStore', 24 | 25 | 26 | class DatabaseKeyStore(KeyStore): 27 | """Store public keys into database through DB-API 2.0. It takes 28 | a module that implements DB-API 2.0, and arguments/keywords to 29 | its ``connect()`` method. For example, the following code stores 30 | public keys into SQLite 3 database:: 31 | 32 | import sqlite3 33 | DatabaseKeyStore(sqlite3, 'geofront.db') 34 | 35 | The following code stores public keys into PostgreSQL database 36 | through psycopg2_:: 37 | 38 | import psycopg2 39 | DatabaseKeyStore(psycopg2, database='geofront', user='postgres') 40 | 41 | It will create a table named ``geofront_public_key`` into the database. 42 | 43 | :param db_module: :pep:`249` DB-API 2.0 compliant module 44 | :type db_module: :class:`types.ModuleType` 45 | :param \*args: arguments to ``db_module.connect()`` function 46 | :param \*kwargs: keyword arguments to ``db_module.connect()`` function 47 | 48 | .. _psycopg2: http://initd.org/psycopg/ 49 | 50 | """ 51 | 52 | @typechecked 53 | def __init__(self, db_module: types.ModuleType, *args, **kwargs) -> None: 54 | if not callable(getattr(db_module, 'connect', None)): 55 | module_name = db_module.__name__ 56 | raise TypeError('db_module must be DB-API 2.0 compliant, but {} ' 57 | 'lacks connect() function'.format(module_name)) 58 | elif not isinstance(getattr(db_module, 'IntegrityError', None), 59 | type): 60 | raise TypeError('db_module must be DB-API 2.0 compliant, but {} ' 61 | 'lacks IntegrityError'.format(module_name)) 62 | integrity_error = db_module.IntegrityError # type: ignore 63 | if not issubclass(integrity_error, Exception): 64 | raise TypeError( 65 | 'db_module must be DB-API 2.0 compliant, but {}.' 66 | 'IntegrityError is not an exception class'.format(module_name) 67 | ) 68 | self.db_module = db_module 69 | self.integrity_error = cast(Type[Exception], integrity_error) 70 | self.connection_args = args 71 | self.connection_kwargs = kwargs 72 | 73 | @contextlib.contextmanager 74 | def _connect(self): 75 | connection = self.db_module.connect(*self.connection_args, 76 | **self.connection_kwargs) 77 | cursor = connection.cursor() 78 | try: 79 | cursor.execute(''' 80 | CREATE TABLE IF NOT EXISTS geofront_public_key ( 81 | key_type VARCHAR(64) NOT NULL, 82 | key_fingerprint VARCHAR(32) NOT NULL, 83 | key_base64 VARCHAR(2048) NOT NULL, 84 | team_type VARCHAR(128) NOT NULL, 85 | identifier VARCHAR(128) NOT NULL, 86 | PRIMARY KEY (key_type, key_fingerprint) 87 | ) 88 | ''') 89 | connection.commit() 90 | finally: 91 | cursor.close() 92 | yield connection 93 | connection.close() 94 | 95 | def _execute(self, cursor, sql: str, params: Tuple[str, ...]) -> None: 96 | """To support various paramstyles. See the following specification: 97 | 98 | http://legacy.python.org/dev/peps/pep-0249/#paramstyle 99 | 100 | """ 101 | final_params = cast(Union[Tuple[str, ...], Mapping[str, str]], params) 102 | paramstyle = self.db_module.paramstyle # type: ignore 103 | if paramstyle == 'format': 104 | sql = sql.replace('?', '%s') 105 | elif paramstyle != 'qmark': 106 | if paramstyle == 'numeric': 107 | fmt = ':{}' 108 | i = 1 109 | else: 110 | if paramstyle == 'named': 111 | fmt = ':p{}' 112 | else: # pyformat 113 | fmt = '%(p{})s' 114 | final_params = \ 115 | {'p' + str(i): val for i, val in enumerate(params)} 116 | i = 0 117 | while '?' in sql: 118 | sql = sql.replace('?', fmt.format(i), 1) 119 | i += 1 120 | cursor.execute(sql, final_params) 121 | 122 | def _get_key_params(self, public_key: PKey) -> Tuple[str, str]: 123 | return public_key.get_name(), get_key_fingerprint(public_key, '') 124 | 125 | def _get_identity_params(self, identity: Identity) -> Tuple[str, str]: 126 | return ('{0.__module__}.{0.__qualname__}'.format(identity.team_type), 127 | str(identity.identifier)) 128 | 129 | def _get_key_class(self, keytype: str) -> type: 130 | try: 131 | return KEY_TYPES[keytype] 132 | except KeyError: 133 | raise KeyTypeError('unsupported key type: ' + repr(keytype)) 134 | 135 | @typechecked 136 | def register(self, identity: Identity, public_key: PKey) -> None: 137 | with self._connect() as connection: 138 | cursor = connection.cursor() 139 | try: 140 | params = (self._get_key_params(public_key) + 141 | (public_key.get_base64(),) + 142 | self._get_identity_params(identity)) 143 | self._execute(cursor, ''' 144 | INSERT INTO geofront_public_key ( 145 | key_type, key_fingerprint, key_base64, 146 | team_type, identifier 147 | ) VALUES (?, ?, ?, ?, ?) 148 | ''', params) 149 | connection.commit() 150 | except self.integrity_error as e: 151 | raise DuplicatePublicKeyError(str(e)) 152 | finally: 153 | cursor.close() 154 | 155 | @typechecked 156 | def list_keys(self, identity: Identity) -> AbstractSet[PKey]: 157 | with self._connect() as connection: 158 | cursor = connection.cursor() 159 | try: 160 | self._execute(cursor, ''' 161 | SELECT key_type, key_base64 162 | FROM geofront_public_key 163 | WHERE team_type = ? AND identifier = ? 164 | ''', self._get_identity_params(identity)) 165 | return frozenset( 166 | self._get_key_class(keytype)(data=base64.b64decode(b64)) 167 | for keytype, b64 in cursor.fetchall() 168 | ) 169 | finally: 170 | cursor.close() 171 | 172 | @typechecked 173 | def deregister(self, identity: Identity, public_key: PKey) -> None: 174 | with self._connect() as connection: 175 | cursor = connection.cursor() 176 | try: 177 | params = (self._get_key_params(public_key) + 178 | self._get_identity_params(identity)) 179 | self._execute(cursor, ''' 180 | DELETE FROM geofront_public_key 181 | WHERE key_type = ? AND key_fingerprint = ? AND 182 | team_type = ? AND identifier = ? 183 | ''', params) 184 | connection.commit() 185 | finally: 186 | cursor.close() 187 | -------------------------------------------------------------------------------- /tests/backends/cloud_test.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import io 3 | import os 4 | 5 | from libcloud.compute.base import KeyPair, Node 6 | from libcloud.compute.drivers.dummy import DummyNodeDriver 7 | from libcloud.compute.types import KeyPairDoesNotExistError 8 | from libcloud.storage.drivers import dummy 9 | from libcloud.storage.drivers.dummy import DummyStorageDriver 10 | from libcloud.storage.providers import get_driver 11 | from libcloud.storage.types import ObjectDoesNotExistError, Provider 12 | from paramiko.rsakey import RSAKey 13 | from pytest import raises, skip 14 | 15 | from ..keystore_test import assert_keystore_compliance 16 | from ..server_test import DummyTeam, MemoryMasterKeyStore 17 | from geofront.backends.cloud import (CloudKeyStore, CloudMasterKeyStore, 18 | CloudMasterPublicKeyStore, CloudRemoteSet, 19 | get_metadata, supports_metadata) 20 | from geofront.identity import Identity 21 | from geofront.keystore import (format_openssh_pubkey, get_key_fingerprint, 22 | parse_openssh_pubkey) 23 | from geofront.masterkey import EmptyStoreError, read_private_key_file 24 | from geofront.remote import Remote 25 | 26 | 27 | @supports_metadata.register(DummyNodeDriver) 28 | def dummy_supports_metadata(driver: DummyNodeDriver): 29 | return True 30 | 31 | 32 | @get_metadata.register(DummyNodeDriver) 33 | def dummy_get_metadata(driver: DummyNodeDriver, node: Node): 34 | return {'dummy': 'test'} 35 | 36 | 37 | def test_cloud_remote_set(): 38 | driver = DummyNodeDriver('') 39 | set_ = CloudRemoteSet(driver) 40 | assert len(set_) == 2 41 | assert set_['dummy-1'] == Remote('ec2-user', '127.0.0.1') 42 | assert set_['dummy-1'].metadata == {'dummy': 'test'} 43 | assert set_['dummy-2'] == Remote('ec2-user', '127.0.0.1') 44 | assert set_['dummy-2'].metadata == {'dummy': 'test'} 45 | id_set = CloudRemoteSet(driver, alias_namer=lambda n: 'id-' + n.id) 46 | assert frozenset(id_set) == {'id-1', 'id-2'} 47 | assert id_set['id-1'] == set_['dummy-1'] 48 | assert id_set['id-2'] == set_['dummy-2'] 49 | 50 | 51 | def test_cloud_master_key_store(): 52 | driver = DummyStorageDriver('', '') 53 | container = driver.create_container('geofront-test') 54 | s = CloudMasterKeyStore(driver, container, 'test_id_rsa') 55 | with raises(EmptyStoreError): 56 | s.load() 57 | key = RSAKey.generate(1024) 58 | s.save(key) 59 | driver.get_object(container.name, 'test_id_rsa') # assert object exists 60 | # Mocking implementation 61 | with io.StringIO() as mock: 62 | key.write_private_key(mock) 63 | mock.seek(0) 64 | dummy.DummyFileObject = lambda *a, **k: mock 65 | stored_key = s.load() 66 | assert isinstance(stored_key, RSAKey) 67 | assert stored_key.get_base64() == stored_key.get_base64() 68 | 69 | 70 | def test_cloud_master_key_store_s3(request, tmpdir): 71 | try: 72 | access_key = request.config.getoption('--aws-access-key') 73 | secret_key = request.config.getoption('--aws-secret-key') 74 | bucket_name = request.config.getoption('--aws-s3-bucket') 75 | except ValueError: 76 | access_key = secret_key = bucket_name = None 77 | if access_key is None or secret_key is None or bucket_name is None: 78 | skip( 79 | '--aws-access-key/--aws-secret-key/--aws-s3-bucket are not ' 80 | 'provided; skipped' 81 | ) 82 | driver_cls = get_driver(Provider.S3) 83 | driver = driver_cls(access_key, secret_key) 84 | container = driver.get_container(container_name=bucket_name) 85 | tmpname = ''.join(map('{:02x}'.format, os.urandom(16))) 86 | s = CloudMasterKeyStore(driver, container, tmpname) 87 | key = RSAKey.generate(1024) 88 | # load() -- when not exists 89 | with raises(EmptyStoreError): 90 | s.load() 91 | try: 92 | # save() 93 | s.save(key) 94 | obj = driver.get_object(container.name, tmpname) 95 | dest = tmpdir / tmpname 96 | obj.download(str(dest)) 97 | saved = read_private_key_file(dest.open()) 98 | assert isinstance(saved, RSAKey) 99 | assert saved.get_base64() == key.get_base64() 100 | # load() -- when exists 101 | loaded = s.load() 102 | assert isinstance(loaded, RSAKey) 103 | assert loaded.get_base64() == key.get_base64() 104 | finally: 105 | try: 106 | o = driver.get_object(container.name, tmpname) 107 | except ObjectDoesNotExistError: 108 | pass 109 | else: 110 | o.delete() 111 | 112 | 113 | class KeyPairSupportedDummyNodeDriver(DummyNodeDriver): 114 | 115 | def __init__(self, *args, **kwargs): 116 | super().__init__(*args, **kwargs) 117 | self.key_pairs = {} 118 | 119 | def get_key_pair(self, name): 120 | try: 121 | key_material = self.key_pairs[name] 122 | except KeyError: 123 | raise KeyPairDoesNotExistError(name, self) 124 | return KeyPair(name, 125 | key_material, 126 | get_key_fingerprint(parse_openssh_pubkey(key_material)), 127 | self) 128 | 129 | def list_key_pairs(self): 130 | return [self.get_key_pair(name) for name in self.key_pairs] 131 | 132 | def import_key_pair_from_string(self, name, key_material): 133 | self.key_pairs[name] = key_material 134 | 135 | def delete_key_pair(self, key_pair): 136 | del self.key_pairs[key_pair.name] 137 | 138 | def import_key_pair_from_file(self, name, key_file_path): 139 | with open(key_file_path) as f: 140 | self.import_key_pair_from_string(name, f.read()) 141 | 142 | def create_key_pair(self, name): 143 | self.import_key_pair_from_string( 144 | name, 145 | format_openssh_pubkey(RSAKey.generate(1024)) 146 | ) 147 | 148 | 149 | def test_cloud_key_store(): 150 | driver = KeyPairSupportedDummyNodeDriver('') 151 | keystore = CloudKeyStore(driver) 152 | identity = Identity(DummyTeam, 'abcd') 153 | assert_keystore_compliance(keystore, identity) 154 | identity2 = Identity(DummyTeam, 'efg') 155 | assert_keystore_compliance(keystore, identity2) 156 | 157 | 158 | def test_cloud_key_store_get_key_name_pattern(): 159 | for i in range(100): 160 | CloudKeyStore._sample_keys = None 161 | # repeat for better reproducibility 162 | 163 | driver = KeyPairSupportedDummyNodeDriver('') 164 | keystore = CloudKeyStore(driver) 165 | identity = Identity(DummyTeam, 'abcd') 166 | pattern = keystore._get_key_name_pattern(identity) 167 | print('Cached CloudKeyStore._sample_keys:', CloudKeyStore._sample_keys) 168 | print('Cached CloudKeyStore._sample_keys (as names):', 169 | tuple(keystore._get_key_name(identity, k) 170 | for k in CloudKeyStore._sample_keys)) 171 | print('Generated pattern:', pattern.pattern) 172 | 173 | def random_fp(): 174 | return ':'.join( 175 | map('{:02x}'.format, hashlib.md5(os.urandom(100)).digest()) 176 | ) 177 | actual = { 178 | 'tests.server_test.DummyTeam abcd ' + random_fp() 179 | for _ in range(5) 180 | } 181 | result = filter(pattern.match, actual | { 182 | 'tests.server_test.DummyTeam defg ' + random_fp(), 183 | 'tests.server_test.OtherTeam abcd ' + random_fp(), 184 | 'tests.server_test.DummyTeam abcd ', 185 | 'junk' 186 | }) 187 | result = frozenset(result) 188 | assert result == actual 189 | 190 | 191 | def test_cloud_master_public_key_store(): 192 | driver = KeyPairSupportedDummyNodeDriver('') 193 | actual_store = MemoryMasterKeyStore() 194 | store = CloudMasterPublicKeyStore(driver, 195 | 'geofront-masterkey', 196 | actual_store) 197 | for _ in range(2): 198 | master_key = RSAKey.generate(1024) 199 | store.save(master_key) 200 | assert actual_store.load() == store.load() == master_key 201 | assert parse_openssh_pubkey( 202 | driver.get_key_pair('geofront-masterkey').public_key 203 | ) == master_key 204 | -------------------------------------------------------------------------------- /geofront/team.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.team` --- Team authentication 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | Geofront doesn't force you to manage team members by yourself. 5 | Instead it hides how to manage team members, and offers :class:`Team`, 6 | the layering interface to implement custom team data provider 7 | e.g. :class:`~.backends.github.GitHubOrganization`. 8 | 9 | It is theologically possible to implement a straightforward RDBMS-backed 10 | team provider, but we rather recommend to adapt your existing team data 11 | instead e.g. `GitHub organization`__, `Google Apps organization`__, 12 | `Bitbucket team`__. 13 | 14 | __ https://github.com/blog/674-introducing-organizations 15 | __ https://support.google.com/a/answer/182433?hl=en 16 | __ http://blog.bitbucket.org/2012/05/30/bitbucket-teams/ 17 | 18 | """ 19 | from typing import AbstractSet, Hashable, Mapping 20 | 21 | from typeguard import typechecked 22 | 23 | from .identity import Identity 24 | 25 | __all__ = ('AuthenticationContinuation', 'AuthenticationError', 26 | 'GroupSet', 'Team') 27 | 28 | 29 | #: The type to represent a set of groups. Group sets consist of group 30 | #: identifiers. Group identifiers are usually a string, but can be anything 31 | #: hashable. 32 | #: 33 | #: Alias of :class:`~typing.AbstractSet`\ [:class:`~typing.Hashable`]. 34 | #: 35 | #: .. versionadded:: 0.4.0 36 | GroupSet = AbstractSet[Hashable] 37 | 38 | 39 | class AuthenticationContinuation: 40 | """The continuation value for the process between 41 | :meth:`Team.request_authentication()` and :meth:`Team.authenticate()`. 42 | 43 | It is created by :meth:`Team.request_authentication()` method, 44 | and holds following two attributes: 45 | 46 | .. attribute:: next_url 47 | 48 | (:class:`str`) The url to direct the authenticator to. 49 | 50 | .. attribute:: state 51 | 52 | The arbitrary value to be passed to :meth:`Team.authenticate()` 53 | method's ``state`` parameter. 54 | 55 | It can be used for passing arbitrary nonce, or request token, etc. 56 | 57 | It has to be possible to pickle. 58 | 59 | .. versionadded:: 0.3.0 60 | 61 | """ 62 | 63 | @typechecked 64 | def __init__(self, next_url: str, state) -> None: 65 | self.next_url = next_url 66 | self.state = state 67 | 68 | def __repr__(self): 69 | return '{0.__module__}.{0.__qualname__}({1!r}, {2!r})'.format( 70 | type(self), self.next_url, self.state 71 | ) 72 | 73 | 74 | class Team: 75 | """Backend interface for team membership authentication. 76 | 77 | Authorization process consists of three steps (and therefore every 78 | backend subclass has to implement these three methods): 79 | 80 | 1. :meth:`request_authentication()` makes the url to interact with 81 | the owner of the identity to authenticate. I.e. the url to login 82 | web page of the backend service. 83 | 2. :meth:`authenticate()` finalize authentication of the identity, 84 | and then returns :class:`~.identity.Identity`. 85 | 3. :meth:`authorize()` tests the given :class:`~.identity.Identity` 86 | belongs to the team. It might be a redundant step for several 87 | backends, but is a necessary step for some backends that distinguish 88 | identity authentication between team membership authorization. 89 | For example, Any Gmail users can authenticate they own their Gmail 90 | account, but only particular users can authenticate their account 91 | belongs to the configured Google Apps organization. 92 | 93 | """ 94 | 95 | @typechecked 96 | def request_authentication( 97 | self, redirect_url: str 98 | ) -> AuthenticationContinuation: 99 | """First step of authentication process, to prepare the "sign in" 100 | interaction with the owner. It typically returns a url to 101 | the login web page. 102 | 103 | :param redirect_url: a url that owner's browser has to redirect to 104 | after the "sign in" interaction finishes 105 | :type redirect_url: :class:`str` 106 | :return: a url to the web page to interact with the owner 107 | in their browser 108 | :rtype: :class:`AuthenticationContinuation` 109 | 110 | .. versionchanged:: 0.3.0 111 | The ``auth_nonce`` parameter was removed. Instead, it became to 112 | return :class:`AuthenticationContinuation` value so that share 113 | state more general than simple ``auth_nonce`` between 114 | :meth:`request_authentication()` and :meth:`authenticate()`. 115 | If arbitrary nonce is needed, :meth:`request_authentication()` 116 | method has to generate one by itself. 117 | 118 | """ 119 | raise NotImplementedError('request_authentication() method has to ' 120 | 'be implemented') 121 | 122 | @typechecked 123 | def authenticate( 124 | self, 125 | state, 126 | requested_redirect_url: str, 127 | wsgi_environ: Mapping[str, object] 128 | ) -> Identity: 129 | """Second step of authentication process, to create a verification 130 | token for the identity. The token is used by :meth:`authorize()` 131 | method, and the key store as well (if available). 132 | 133 | :param state: :attr:`AuthenticationContinuation.state` vaule 134 | returned by :meth:`request_authentication()` method 135 | :param requested_redirect_url: a url that was passed to 136 | :meth:`request_authentication()`'s 137 | ``redirect_url`` parameter 138 | :type requested_redirect_url: :class:`str` 139 | :param wsgi_environ: forwarded wsgi environ dictionary 140 | :type wsgi_environ: :class:`~typing.Mapping`\ [:class:`str`, 141 | :class:`object`] 142 | :return: an identity which contains a verification token 143 | :rtype: :class:`~.identity.Identity` 144 | :raise geofront.team.AuthenticationError: 145 | when something goes wrong e.g. network errors, 146 | the user failed to verify their ownership 147 | 148 | .. versionchanged:: 0.3.0 149 | The ``auth_nonce`` parameter was replaced by more general ``state`` 150 | parameter. The new parameter has no longer type constraints 151 | so that it can be any value even if it's not a :class:`str`. 152 | 153 | """ 154 | raise NotImplementedError('authenticate() method has to ' 155 | 'be implemented') 156 | 157 | @typechecked 158 | def authorize(self, identity: Identity) -> bool: 159 | """The last step of authentication process. 160 | Test whether the given ``identity`` belongs to the team. 161 | 162 | Note that it can be called every time the owner communicates with 163 | Geofront server, out of authentication process. 164 | 165 | :param identity: the identity to authorize 166 | :type identity: :class:`~.identity.Identity` 167 | :return: :const:`True` only if the ``identity`` is a member of the team 168 | :rtype: :class:`bool` 169 | 170 | """ 171 | raise NotImplementedError('authorize() method has to be implemented') 172 | 173 | @typechecked 174 | def list_groups(self, identity: Identity) -> GroupSet: 175 | """List the all groups that the given ``identity`` belongs to. 176 | Any hashable value can be an element to represent a group e.g.:: 177 | 178 | {1, 4, 9} 179 | 180 | Or:: 181 | 182 | {'owners', 'programmers'} 183 | 184 | Whatever value the set consists of these would be referred by 185 | :class:`~.remote.Remote` objects. 186 | 187 | Some team implementations might not have a concept like groups. 188 | It's okay to return always an empty set then. 189 | 190 | :param identity: the identity to list his/her groups 191 | :type identity: :class:`~.identity.Identity` 192 | :return: the set of groups associated with the ``identity`` 193 | :rtype: :const:`GroupSet` 194 | 195 | .. versionadded:: 0.2.0 196 | 197 | """ 198 | raise NotImplementedError('list_groups() method has to be implemented') 199 | 200 | 201 | class AuthenticationError(Exception): 202 | """Authentication exception which rise when the authentication process 203 | has trouble including network problems. 204 | 205 | .. todo:: Exception hierarchy is needed. 206 | 207 | """ 208 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | import threading 4 | 5 | from paramiko.pkey import PKey 6 | from paramiko.rsakey import RSAKey 7 | from paramiko.sftp_client import SFTPClient 8 | from paramiko.transport import Transport 9 | from pytest import fixture, yield_fixture 10 | 11 | from .sftpd import start_server 12 | from geofront import server 13 | from geofront.keystore import format_openssh_pubkey 14 | 15 | 16 | # By default it's a minute, but a minute is enough to make the test suite 17 | # very slow. For faster unit testing we shorten this constant. 18 | server.AUTHORIZATION_TIMEOUT = datetime.timedelta(seconds=5) 19 | 20 | 21 | def env_default(env): 22 | return {'default': os.environ[env]} if env in os.environ else {} 23 | 24 | 25 | def pytest_addoption(parser): 26 | parser.addoption('--sshd-port-min', 27 | metavar='PORT', 28 | type=int, 29 | default=12220, 30 | help='the minimum unused port number [%default(s)]') 31 | parser.addoption('--sshd-port-max', 32 | metavar='PORT', 33 | type=int, 34 | default=12399, 35 | help='the maximum unused port number [%default(s)]') 36 | parser.addoption('--sshd-state-timeout', 37 | metavar='SECONDS', 38 | type=int, 39 | default=30, 40 | help='the maximum seconds to wait for satisfying ' 41 | 'condition of sshd server state [%default(s)]') 42 | parser.addoption('--redis-host', 43 | metavar='HOSTNAME', 44 | help='redis host', 45 | **env_default('REDIS_HOST')) 46 | parser.addoption('--redis-port', 47 | metavar='PORT', 48 | type=int, 49 | default=6379, 50 | help='redis port [%default(s)]') 51 | parser.addoption('--redis-password', 52 | metavar='PASSWORD', 53 | default=None, 54 | help='redis password') 55 | parser.addoption('--redis-db', 56 | metavar='DB', 57 | type=int, 58 | default=1, 59 | help='redis db number [%(default)s]') 60 | parser.addoption('--postgresql-host', 61 | metavar='HOSTNAME', 62 | help='postgresql database server host [%(default)s]', 63 | **env_default('PGHOST')) 64 | parser.addoption('--postgresql-port', 65 | metavar='PORT', 66 | type=int, 67 | help='postgresql database server port [%(default)s]', 68 | **env_default('PGPORT')) 69 | parser.addoption('--postgresql-user', 70 | metavar='USER', 71 | help='postgresql user [%(default)s]', 72 | **env_default('PGUSER')) 73 | parser.addoption('--postgresql-password', 74 | metavar='PASSWORD', 75 | help='postgresql user password [%(default)s]', 76 | **env_default('PGPASSWORD')) 77 | parser.addoption('--postgresql-database', 78 | metavar='DBNAME', 79 | help='postgresql database name [%(default)s]', 80 | **env_default('PGDATABASE')) 81 | parser.addoption('--mysql-host', 82 | metavar='HOSTNAME', 83 | help='mysql database server host [%(default)s]', 84 | **env_default('MYSQL_HOST')) 85 | parser.addoption('--mysql-port', 86 | metavar='PORT', 87 | type=int, 88 | help='mysql database server port [%(default)s]', 89 | **env_default('MYSQL_PORT')) 90 | parser.addoption('--mysql-user', 91 | metavar='USER', 92 | help='mysql user [%(default)s]', 93 | **env_default('MYSQL_USER')) 94 | parser.addoption('--mysql-passwd', 95 | metavar='PASSWD', 96 | help='mysql user password [%(default)s]', 97 | **env_default('MYSQL_PASSWD')) 98 | parser.addoption('--mysql-database', 99 | metavar='DATABASE', 100 | help='mysql database name [%(default)s]', 101 | **env_default('MYSQL_DATABASE')) 102 | parser.addoption('--github-access-token', 103 | metavar='TOKEN', 104 | help='github access token for key store test (caution: ' 105 | 'it will remove all ssh keys of the account)', 106 | **env_default('GITHUB_ACCESS_TOKEN')) 107 | parser.addoption('--github-org-login', 108 | metavar='LOGIN', 109 | help='github org login for team test', 110 | **env_default('GITHUB_ORG_LOGIN')) 111 | parser.addoption('--github-team-slugs', 112 | metavar='SLUGS', 113 | help='space-separated github team slugs for group ' 114 | 'listing test', 115 | **env_default('GITHUB_TEAM_SLUGS')) 116 | parser.addoption('--aws-access-key', 117 | metavar='KEY', 118 | help='aws access key for master key store test', 119 | **env_default('AWS_ACCESS_KEY')) 120 | parser.addoption('--aws-secret-key', 121 | metavar='SECRET', 122 | help='aws secret key for master key store test', 123 | **env_default('AWS_SECRET_KEY')) 124 | parser.addoption('--aws-s3-bucket', 125 | metavar='BUCKET', 126 | help='aws s3 bucket to be used for master key store test', 127 | **env_default('AWS_S3_BUCKET')) 128 | parser.addoption('--bitbucket-access-token', 129 | metavar='TOKEN', 130 | help='bitbucket access token for key store test ' 131 | '(caution: it will remove all ssh keys of the ' 132 | 'account)', 133 | **env_default('BITBUCKET_ACCESS_TOKEN')) 134 | parser.addoption('--bitbucket-team-username', 135 | metavar='USERNAME', 136 | help='bitbucket team username for team test', 137 | **env_default('BITBUCKET_TEAM_USERNAME')) 138 | parser.addoption('--bitbucket-group-slugs', 139 | metavar='SLUGS', 140 | help='space-separated bitbucket group slugs for group ' 141 | 'listing test', 142 | **env_default('BITBUCKET_GROUP_SLUGS')) 143 | 144 | 145 | def pytest_assertrepr_compare(op, left, right): 146 | if op == '==' and isinstance(left, PKey) and isinstance(right, PKey): 147 | left_key = format_openssh_pubkey(left) 148 | right_key = format_openssh_pubkey(right) 149 | return [ 150 | '{!r} == {!r}'.format(left, right), 151 | ' {} != {}'.format(left_key, right_key) 152 | ] 153 | 154 | 155 | used_port = 0 156 | 157 | 158 | @yield_fixture 159 | def fx_sftpd(request, tmpdir): 160 | global used_port 161 | getopt = request.config.getoption 162 | port_min = max(used_port + 1, getopt('--sshd-port-min')) 163 | port_max = min(port_min + 2, getopt('--sshd-port-max')) 164 | used_port = port_max 165 | servers = {} 166 | for port in range(port_min, port_max + 1): 167 | path = tmpdir.mkdir(str(port)) 168 | terminated = threading.Event() 169 | thread = threading.Thread( 170 | target=start_server, 171 | args=(str(path), '127.0.0.1', port, terminated) 172 | ) 173 | servers[port] = thread, path, terminated 174 | yield servers 175 | for port, (th, _, ev) in servers.items(): 176 | ev.set() 177 | for port, (th, _, ev) in servers.items(): 178 | if th.is_alive(): 179 | th.join(10) 180 | assert not th.is_alive(), '{!r} (for port #{}) is still alive'.format( 181 | th, port 182 | ) 183 | 184 | 185 | @fixture 186 | def fx_authorized_keys(): 187 | return [RSAKey.generate(1024) for _ in range(5)] 188 | 189 | 190 | @yield_fixture 191 | def fx_authorized_sftp(fx_sftpd, fx_authorized_keys): 192 | port, (thread, path, ev) = fx_sftpd.popitem() 193 | thread.start() 194 | key = RSAKey.generate(1024) 195 | dot_ssh = path.mkdir('.ssh') 196 | with dot_ssh.join('authorized_keys').open('w') as f: 197 | print(format_openssh_pubkey(key), file=f) 198 | for authorized_key in fx_authorized_keys: 199 | print(format_openssh_pubkey(authorized_key), file=f) 200 | transport = Transport(('127.0.0.1', port)) 201 | transport.connect(username='user', pkey=key) 202 | sftp_client = SFTPClient.from_transport(transport) 203 | yield sftp_client, path, [key] + fx_authorized_keys 204 | sftp_client.close() 205 | transport.close() 206 | 207 | 208 | @fixture 209 | def fx_master_key(): 210 | return RSAKey.generate(1024) 211 | 212 | 213 | @fixture 214 | def fx_authorized_servers(fx_sftpd, fx_master_key): 215 | for port, (thread, path, ev) in fx_sftpd.items(): 216 | with path.mkdir('.ssh').join('authorized_keys').open('w') as f: 217 | f.write(format_openssh_pubkey(fx_master_key)) 218 | thread.start() 219 | return fx_sftpd 220 | -------------------------------------------------------------------------------- /tests/masterkey_test.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os.path 3 | import time 4 | from typing import Type 5 | 6 | from paramiko.ecdsakey import ECDSAKey 7 | from paramiko.pkey import PKey 8 | from paramiko.rsakey import RSAKey 9 | from pytest import mark, raises 10 | 11 | from geofront.keystore import parse_openssh_pubkey 12 | from geofront.masterkey import (EmptyStoreError, FileSystemMasterKeyStore, 13 | KeyGenerationError, PeriodicalRenewal, 14 | TwoPhaseRenewal, 15 | generate_key, read_private_key_file, 16 | renew_master_key) 17 | from geofront.remote import Remote 18 | 19 | 20 | def test_fs_master_key_store_load(): 21 | path = os.path.join(os.path.dirname(__file__), 'test_id_rsa') 22 | s = FileSystemMasterKeyStore(path) 23 | key = s.load() 24 | assert isinstance(key, RSAKey) 25 | assert key.get_base64() == ( 26 | 'AAAAB3NzaC1yc2EAAAADAQABAAABAQC7+fDpQ9sQKIdzXvqT3TzrPp2OpUCOJtUW3k0oi' 27 | 'trqqHe1XiCke++DSpAv56poCppTj9qo3N1HyhZhSv/jH7/ejZ8NZdtvLIZGOCQZVdKNy0' 28 | 'cg7jlimrWA2s8X201Yn3hYpUrYJYbhAAuQM5flvbyBtn5/miONQ8NVimgjG6UVANVqX4W' 29 | 'H9kqdr4SBf45/+BAdenf2j5DC3xceOOW8wZfe2rOJpQ0msVxMeXExGqF9DS2E3bqOwE1C' 30 | 'MPEGYr5KZCx7IeJ/4udBuKc/gOXb8tPiTTNxtYXEBcqhBdCa/M6pEdW5LiHxxoF5b6xY9' 31 | 'q0nmi7Rn0weXK0SufhGgKrpSH+B' 32 | ) 33 | 34 | 35 | def test_fs_master_key_store_save(tmpdir): 36 | path = tmpdir.join('id_rsa') 37 | s = FileSystemMasterKeyStore(str(path)) 38 | with raises(EmptyStoreError): 39 | s.load() 40 | key = RSAKey.generate(1024) 41 | s.save(key) 42 | stored_key = s.load() 43 | assert isinstance(stored_key, RSAKey) 44 | assert stored_key.get_base64() == stored_key.get_base64() 45 | 46 | 47 | def test_read_private_key_file(): 48 | path = os.path.join(os.path.dirname(__file__), 'test_id_rsa') 49 | with open(path) as f: 50 | key = read_private_key_file(f) 51 | assert isinstance(key, RSAKey) 52 | assert key.get_base64() == ( 53 | 'AAAAB3NzaC1yc2EAAAADAQABAAABAQC7+fDpQ9sQKIdzXvqT3TzrPp2OpUCOJtUW3k0oi' 54 | 'trqqHe1XiCke++DSpAv56poCppTj9qo3N1HyhZhSv/jH7/ejZ8NZdtvLIZGOCQZVdKNy0' 55 | 'cg7jlimrWA2s8X201Yn3hYpUrYJYbhAAuQM5flvbyBtn5/miONQ8NVimgjG6UVANVqX4W' 56 | 'H9kqdr4SBf45/+BAdenf2j5DC3xceOOW8wZfe2rOJpQ0msVxMeXExGqF9DS2E3bqOwE1C' 57 | 'MPEGYr5KZCx7IeJ/4udBuKc/gOXb8tPiTTNxtYXEBcqhBdCa/M6pEdW5LiHxxoF5b6xY9' 58 | 'q0nmi7Rn0weXK0SufhGgKrpSH+B' 59 | ) 60 | 61 | 62 | def authorized_key_set(path): 63 | dotssh = path.join('.ssh') 64 | if not dotssh.isdir(): 65 | dotssh = path.mkdir('.ssh') 66 | with dotssh.join('authorized_keys').open() as f: 67 | return {parse_openssh_pubkey(line.strip()) for line in f} 68 | 69 | 70 | def test_two_phase_renewal(fx_authorized_servers, fx_master_key): 71 | remote_set = { 72 | Remote('user', '127.0.0.1', port) 73 | for port in fx_authorized_servers 74 | } 75 | old_key = fx_master_key 76 | new_key = RSAKey.generate(1024) 77 | for t, path, ev in fx_authorized_servers.values(): 78 | assert authorized_key_set(path) == {old_key} 79 | with TwoPhaseRenewal(remote_set, old_key, new_key): 80 | for t, path, ev in fx_authorized_servers.values(): 81 | assert authorized_key_set(path) == {old_key, new_key} 82 | for t, path, ev in fx_authorized_servers.values(): 83 | assert authorized_key_set(path) == {new_key} 84 | 85 | 86 | def test_two_phase_renewal_stop(fx_authorized_servers, fx_master_key): 87 | remote_set = { 88 | Remote('user', '127.0.0.1', port) 89 | for port in fx_authorized_servers 90 | } 91 | old_key = fx_master_key 92 | new_key = RSAKey.generate(1024) 93 | for t, path, ev in fx_authorized_servers.values(): 94 | assert authorized_key_set(path) == {old_key} 95 | SomeException = type('SomeException', (Exception,), {}) 96 | with raises(SomeException): 97 | with TwoPhaseRenewal(remote_set, old_key, new_key): 98 | for t, path, ev in fx_authorized_servers.values(): 99 | assert authorized_key_set(path) == {old_key, new_key} 100 | raise SomeException('something went wrong') 101 | for t, path, ev in fx_authorized_servers.values(): 102 | assert old_key in authorized_key_set(path) 103 | 104 | 105 | @mark.parametrize('key_type, bits', [ 106 | (RSAKey, None), 107 | (RSAKey, 1024), 108 | (RSAKey, 2048), 109 | (ECDSAKey, None), 110 | (ECDSAKey, 256), 111 | (ECDSAKey, 384), 112 | ]) 113 | def test_renew_master_key(fx_authorized_servers, fx_master_key, tmpdir, 114 | key_type: Type[PKey], bits: int): 115 | remote_set = { 116 | Remote('user', '127.0.0.1', port) 117 | for port in fx_authorized_servers 118 | } 119 | store = FileSystemMasterKeyStore(str(tmpdir.join('id_rsa'))) 120 | store.save(fx_master_key) 121 | for t, path, ev in fx_authorized_servers.values(): 122 | assert authorized_key_set(path) == {fx_master_key} 123 | new_key = renew_master_key(remote_set, store, key_type, bits) 124 | assert new_key.get_bits() == bits or bits is None 125 | assert isinstance(new_key, key_type) 126 | assert new_key != fx_master_key 127 | assert store.load() == new_key 128 | for t, path, ev in fx_authorized_servers.values(): 129 | assert authorized_key_set(path) == {new_key} 130 | 131 | 132 | class FailureTestMasterKeyStore(FileSystemMasterKeyStore): 133 | 134 | def save(self, master_key: PKey): 135 | try: 136 | self.load() 137 | except EmptyStoreError: 138 | super().save(master_key) 139 | else: 140 | raise RenewalFailure() 141 | 142 | 143 | class RenewalFailure(Exception): 144 | 145 | pass 146 | 147 | 148 | def test_renew_master_key_fail(fx_authorized_servers, fx_master_key, tmpdir): 149 | remote_set = { 150 | Remote('user', '127.0.0.1', port) 151 | for port in fx_authorized_servers 152 | } 153 | store = FailureTestMasterKeyStore(str(tmpdir.join('id_rsa'))) 154 | store.save(fx_master_key) 155 | for t, path, ev in fx_authorized_servers.values(): 156 | assert authorized_key_set(path) == {fx_master_key} 157 | with raises(RenewalFailure): 158 | renew_master_key(remote_set, store) 159 | assert store.load() == fx_master_key 160 | for t, path, ev in fx_authorized_servers.values(): 161 | assert fx_master_key in authorized_key_set(path) 162 | 163 | 164 | def wait_for(seconds: int, condition): 165 | for _ in range(seconds * 2): 166 | if condition(): 167 | break 168 | time.sleep(0.5) 169 | else: 170 | raise TimeoutError( 171 | 'failed to satisfy condition during {0} seconds'.format(seconds) 172 | ) 173 | 174 | 175 | def test_periodical_renewal(request, fx_authorized_servers, fx_master_key, 176 | tmpdir): 177 | timeout = request.config.getoption('--sshd-state-timeout') 178 | remote_set = { 179 | Remote('user', '127.0.0.1', port) 180 | for port in fx_authorized_servers 181 | } 182 | store = FileSystemMasterKeyStore(str(tmpdir.join('id_rsa'))) 183 | store.save(fx_master_key) 184 | for t, path, ev in fx_authorized_servers.values(): 185 | assert authorized_key_set(path) == {fx_master_key} 186 | p = PeriodicalRenewal(remote_set, store, datetime.timedelta(seconds=3)) 187 | assert store.load() == fx_master_key 188 | for t, path, ev in fx_authorized_servers.values(): 189 | assert fx_master_key in authorized_key_set(path) 190 | wait_for(timeout, lambda: store.load() != fx_master_key) 191 | second_key = store.load() 192 | assert second_key != fx_master_key 193 | for t, path, ev in fx_authorized_servers.values(): 194 | key_set = authorized_key_set(path) 195 | assert second_key in key_set 196 | wait_for(timeout, lambda: store.load() != second_key) 197 | third_key = store.load() 198 | assert third_key != fx_master_key 199 | assert third_key != second_key 200 | for t, path, ev in fx_authorized_servers.values(): 201 | key_set = authorized_key_set(path) 202 | assert third_key in key_set 203 | p.terminate() 204 | last_key = store.load() 205 | time.sleep(10) 206 | assert store.load() == last_key 207 | for t, path, ev in fx_authorized_servers.values(): 208 | assert authorized_key_set(path) == {last_key} 209 | 210 | 211 | def test_generate_key(): 212 | default_default = generate_key() 213 | assert isinstance(default_default, RSAKey) 214 | assert default_default.get_bits() == 1024 215 | rsa_default = generate_key(RSAKey) 216 | assert rsa_default.get_bits() == 1024 217 | assert isinstance(rsa_default, RSAKey) 218 | rsa_2048 = generate_key(RSAKey, 2048) 219 | assert isinstance(rsa_2048, RSAKey) 220 | assert rsa_2048.get_bits() == 2048 221 | ecdsa_default = generate_key(ECDSAKey) 222 | assert isinstance(ecdsa_default, ECDSAKey) 223 | assert ecdsa_default.get_bits() == 256 224 | ecdsa_256 = generate_key(ECDSAKey, 256) 225 | assert isinstance(ecdsa_256, ECDSAKey) 226 | assert ecdsa_256.get_bits() == 256 227 | ecdsa_384 = generate_key(ECDSAKey, 384) 228 | assert isinstance(ecdsa_384, ECDSAKey) 229 | assert ecdsa_384.get_bits() == 384 230 | ecdsa_521 = generate_key(ECDSAKey, 521) 231 | assert isinstance(ecdsa_521, ECDSAKey) 232 | assert ecdsa_521.get_bits() == 521 233 | with raises(KeyGenerationError): 234 | generate_key(RSAKey, 256) 235 | with raises(KeyGenerationError): 236 | generate_key(ECDSAKey, 1024) 237 | -------------------------------------------------------------------------------- /geofront/backends/stash.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.backends.stash` --- Bitbucket Server team and key store 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | .. versionadded:: 0.3.0 5 | 6 | Provides implementations of team and key store for Atlassian's 7 | `Bitbucket Server`_ (which was Stash). 8 | 9 | .. note:: 10 | 11 | Not to be confused with `Bitbucket Cloud`_. `As from September 22, 12 | Atlassian Stash becomes Bitbucket Server.`__ For Bitbucket Cloud, 13 | use :mod:`geofront.backends.bitbucket` module instead. 14 | 15 | .. _Bitbucket Server: https://bitbucket.org/product/server 16 | .. _Bitbucket Cloud: https://bitbucket.org/ 17 | __ https://twitter.com/Atlassian/status/646357289939664896 18 | 19 | """ 20 | import io 21 | import json 22 | import logging 23 | from typing import AbstractSet, Iterator, Mapping, Sequence, cast 24 | import urllib.error 25 | import urllib.request 26 | 27 | from oauthlib.oauth1 import SIGNATURE_RSA, Client 28 | from paramiko.pkey import PKey 29 | from typeguard import typechecked 30 | from werkzeug.datastructures import ImmutableMultiDict 31 | from werkzeug.urls import url_decode_stream, url_encode 32 | from werkzeug.wrappers import Request 33 | 34 | from ..identity import Identity 35 | from ..keystore import (DuplicatePublicKeyError, KeyStore, 36 | format_openssh_pubkey, parse_openssh_pubkey) 37 | from ..team import AuthenticationContinuation, AuthenticationError, Team 38 | 39 | __all__ = 'StashKeyStore', 'StashTeam' 40 | 41 | 42 | class StashTeam(Team): 43 | """Authenticate team membership through Bitbucket Server (which was 44 | Stash), and authorize to access Bitbucket Server key store. 45 | 46 | :param server_url: the base url of the bitbucket server (stash server) 47 | :type server_url: :class:`str` 48 | :param consumer_key: the consumer key (client id) 49 | :type consumer_key: :class:`str` 50 | 51 | """ 52 | 53 | AUTHORIZE_URL = '{0.server_url}/plugins/servlet/oauth/authorize' 54 | REQUEST_TOKEN_URL = '{0.server_url}/plugins/servlet/oauth/request-token' 55 | ACCESS_TOKEN_URL = '{0.server_url}/plugins/servlet/oauth/access-token' 56 | USER_URL = '{0.server_url}/plugins/servlet/applinks/whoami' 57 | USER_PROFILE_URL = '{0.server_url}/users/{1}' 58 | 59 | @typechecked 60 | def __init__(self, 61 | server_url: str, 62 | consumer_key: str, 63 | rsa_key: str) -> None: 64 | self.server_url = server_url.rstrip('/') 65 | self.consumer_key = consumer_key 66 | self.rsa_key = rsa_key 67 | 68 | def create_client(self, **kwargs): 69 | return Client( 70 | self.consumer_key, 71 | signature_method=SIGNATURE_RSA, 72 | rsa_key=self.rsa_key, 73 | **kwargs 74 | ) 75 | 76 | @typechecked 77 | def request(self, method: str, url: str, body=None, headers=None, 78 | **client_options): 79 | client = self.create_client(**client_options) 80 | url, headers, body = client.sign(url, method, body, headers) 81 | request = urllib.request.Request(url, body, headers, method=method) 82 | try: 83 | return urllib.request.urlopen(request) 84 | except urllib.error.HTTPError as e: 85 | logger = logging.getLogger(__name__ + '.StashTeam.request') 86 | logger.exception( 87 | '[%s %s] %s\nrequest headers: %r\nrequest body: %r\n' 88 | 'client_options: %r\nresponse status: %r\n' 89 | 'response headers: %r\nresponse body: %r', 90 | method, url, e, headers, body, client_options, 91 | e.code, dict(e.headers), e.read() 92 | ) 93 | raise 94 | 95 | @typechecked 96 | def request_authentication( 97 | self, redirect_url: str 98 | ) -> AuthenticationContinuation: 99 | response = self.request('POST', self.REQUEST_TOKEN_URL.format(self)) 100 | request_token = url_decode_stream(response) 101 | response.close() 102 | return AuthenticationContinuation( 103 | self.AUTHORIZE_URL.format(self) + '?' + url_encode({ 104 | 'oauth_token': request_token['oauth_token'], 105 | 'oauth_callback': redirect_url 106 | }), 107 | (request_token['oauth_token'], request_token['oauth_token_secret']) 108 | ) 109 | 110 | @typechecked 111 | def authenticate(self, 112 | state, 113 | requested_redirect_url: str, 114 | wsgi_environ: Mapping[str, object]) -> Identity: 115 | logger = logging.getLogger(__name__ + '.StashTeam.authenticate') 116 | logger.debug('state = %r', state) 117 | try: 118 | oauth_token, oauth_token_secret = state 119 | except ValueError: 120 | raise AuthenticationError() 121 | req = Request(wsgi_environ, populate_request=False, shallow=True) 122 | args = cast(ImmutableMultiDict, req.args) 123 | logger.debug('req.args = %r', args) 124 | if args.get('oauth_token') != oauth_token: 125 | raise AuthenticationError() 126 | response = self.request( 127 | 'POST', self.ACCESS_TOKEN_URL.format(self), 128 | resource_owner_key=oauth_token, 129 | resource_owner_secret=oauth_token_secret 130 | ) 131 | access_token = url_decode_stream(response) 132 | logger.debug('access_token = %r', access_token) 133 | response.close() 134 | response = self.request( 135 | 'GET', self.USER_URL.format(self), 136 | resource_owner_key=access_token['oauth_token'], 137 | resource_owner_secret=access_token['oauth_token_secret'] 138 | ) 139 | whoami = response.read().decode('utf-8') 140 | return Identity( 141 | type(self), 142 | self.USER_PROFILE_URL.format(self, whoami), 143 | (access_token['oauth_token'], access_token['oauth_token_secret']) 144 | ) 145 | 146 | def authorize(self, identity: Identity) -> bool: 147 | if not issubclass(identity.team_type, type(self)): 148 | return False 149 | return cast(str, identity.identifier).startswith(self.server_url) 150 | 151 | def list_groups(self, identity: Identity): 152 | return frozenset() 153 | 154 | 155 | class StashKeyStore(KeyStore): 156 | """Use Bitbucket Server (Stash) account's public keys as key store.""" 157 | 158 | REGISTER_URL = '{0.server_url}/rest/ssh/1.0/keys' 159 | LIST_URL = '{0.server_url}/rest/ssh/1.0/keys?start={1}' 160 | DEREGISTER_URL = '{0.server_url}/rest/ssh/1.0/keys/{1}' 161 | 162 | @typechecked 163 | def __init__(self, team: StashTeam) -> None: 164 | self.team = team 165 | 166 | def request(self, identity, *args, **kwargs): 167 | token, token_secret = identity.access_token 168 | return self.team.request( 169 | *args, 170 | resource_owner_key=token, 171 | resource_owner_secret=token_secret, 172 | **kwargs 173 | ) 174 | 175 | @typechecked 176 | def request_list( 177 | self, identity: Identity 178 | ) -> Iterator[Sequence[Mapping[str, object]]]: 179 | team = self.team 180 | if not (isinstance(team, identity.team_type) and 181 | cast(str, identity.identifier).startswith(team.server_url)): 182 | return 183 | start = 0 184 | while True: 185 | response = self.request( 186 | identity, 187 | 'GET', 188 | self.LIST_URL.format(self.team, start) 189 | ) 190 | assert response.code == 200 191 | payload = json.load(io.TextIOWrapper(response, encoding='utf-8')) 192 | response.close() 193 | yield from payload['values'] 194 | if payload['isLastPage']: 195 | break 196 | start = payload['nextPageStart'] 197 | 198 | @typechecked 199 | def register(self, identity: Identity, public_key: PKey) -> None: 200 | team = self.team 201 | if not (isinstance(team, identity.team_type) and 202 | cast(str, identity.identifier).startswith(team.server_url)): 203 | return 204 | data = json.dumps({ 205 | 'text': format_openssh_pubkey(public_key) 206 | }) 207 | try: 208 | self.request( 209 | identity, 'POST', self.REGISTER_URL.format(self.team), data, 210 | headers={'Content-Type': 'application/json'} 211 | ) 212 | except urllib.error.HTTPError as e: 213 | if e.code == 409: 214 | errors = json.loads(e.read().decode('utf-8'))['errors'] 215 | raise DuplicatePublicKeyError(errors[0]['message']) 216 | raise 217 | 218 | @typechecked 219 | def list_keys(self, identity: Identity) -> AbstractSet[PKey]: 220 | logger = logging.getLogger(__name__ + '.StashKeyStore.list_keys') 221 | keys = self.request_list(identity) 222 | result = set() 223 | for key in keys: 224 | try: 225 | pubkey = parse_openssh_pubkey(key['text']) 226 | except Exception as e: 227 | logger.exception(str(e)) 228 | continue 229 | result.add(pubkey) 230 | return result 231 | 232 | @typechecked 233 | def deregister(self, identity: Identity, public_key: PKey) -> None: 234 | keys = self.request_list(identity) 235 | for key in keys: 236 | if parse_openssh_pubkey(key['text']) == public_key: 237 | response = self.request( 238 | identity, 239 | 'DELETE', 240 | self.DEREGISTER_URL.format(self.team, key['id']) 241 | ) 242 | assert response.code == 204 243 | break 244 | -------------------------------------------------------------------------------- /geofront/backends/oauth.py: -------------------------------------------------------------------------------- 1 | """:mod:`geofront.backends.oauth` --- Team backend bases for OAuth 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | .. versionadded:: 0.4.0 5 | 6 | """ 7 | import contextlib 8 | import http.client 9 | import io 10 | import json 11 | import logging 12 | import os 13 | import shutil 14 | from typing import IO, TYPE_CHECKING, Mapping, cast 15 | import urllib.error 16 | import urllib.request 17 | 18 | from typeguard import typechecked 19 | from werkzeug.datastructures import ImmutableMultiDict 20 | from werkzeug.http import parse_options_header 21 | from werkzeug.urls import url_decode_stream, url_encode 22 | from werkzeug.wrappers import Request 23 | 24 | from ..identity import Identity 25 | from ..team import AuthenticationContinuation, AuthenticationError, Team 26 | 27 | __all__ = 'OAuth2Team', 'request' 28 | 29 | 30 | def request(access_token, url: str, method: str='GET', data: bytes=None): 31 | """Make a request to GitHub API, and then return the parsed JSON result. 32 | 33 | :param access_token: api access token string, 34 | or :class:`~geofront.identity.Identity` instance 35 | :type access_token: :class:`str`, :class:`~geofront.identity.Identity` 36 | :param url: the api url to request 37 | :type url: :class:`str` 38 | :param method: an optional http method. ``'GET'`` by default 39 | :type method: :class:`str` 40 | :param data: an optional content body 41 | :type data: :class:`bytes` 42 | 43 | """ 44 | logger = logging.getLogger(__name__ + '.request') 45 | if isinstance(access_token, Identity): 46 | access_token = access_token.access_token 47 | logger.debug('access_token: %r', access_token) 48 | req = urllib.request.Request( 49 | url, 50 | headers={ 51 | 'Authorization': 'Bearer ' + access_token, 52 | 'Accept': 'application/json' 53 | }, 54 | method=method, 55 | data=data 56 | ) 57 | try: 58 | with contextlib.closing(urllib.request.urlopen(req)) as response: 59 | assert isinstance(response, http.client.HTTPResponse), \ 60 | 'isinstance(response, {0.__module__}.{0.__qualname__})'.format( 61 | type(response)) 62 | headers = getattr(response, 'headers') # workaround mypy 63 | content_type = headers.get('Content-Type') 64 | mimetype, options = parse_options_header(content_type) 65 | assert mimetype == 'application/json' or method == 'DELETE', \ 66 | 'Content-Type of {} is not application/json but {}'.format( 67 | url, 68 | content_type 69 | ) 70 | charset = options.get('charset', 'utf-8') 71 | io_wrapper = io.TextIOWrapper(cast(IO[bytes], response), 72 | encoding=charset) 73 | if logger.isEnabledFor(logging.DEBUG): 74 | read = io_wrapper.read() 75 | if not TYPE_CHECKING: 76 | logger.debug( 77 | 'HTTP/%d.%d %d %s\n%s\n\n%s', 78 | response.version // 10, 79 | response.version % 10, 80 | response.code, 81 | response.reason, 82 | '\n'.join( 83 | '{}: {}'.format(k, v) 84 | for k, v in response.headers.items() 85 | ), 86 | read 87 | ) 88 | if method == 'DELETE': 89 | return 90 | return json.loads(read) 91 | else: 92 | if method == 'DELETE': 93 | io_wrapper.read() 94 | return 95 | return json.load(io_wrapper) 96 | except urllib.error.HTTPError as e: 97 | if logger.isEnabledFor(logging.DEBUG): 98 | f = io.BytesIO() 99 | shutil.copyfileobj(e, f) 100 | if not TYPE_CHECKING: 101 | logger.debug( 102 | 'HTTP/%d.%d %d %s\n%s\n\n%r', 103 | e.version // 10, 104 | e.version % 10, 105 | e.code, 106 | e.reason, 107 | '\n'.join( 108 | '{}: {}'.format(k, v) 109 | for k, v in e.headers.items() 110 | ), 111 | f.getvalue() 112 | ) 113 | f.seek(0) 114 | logger.debug(str(e), exc_info=True) 115 | make_error = urllib.error.HTTPError # workaround mypy 116 | restored = make_error( # type: ignore 117 | e.geturl(), e.code, e.reason, e.headers, f 118 | ) 119 | raise restored from e 120 | else: 121 | raise 122 | 123 | 124 | class OAuth2Team(Team): 125 | """Base implementation of :class:`~geofront.team.Team` for OAuth 2. 126 | Every subclass has to implement the following attributes and methods: 127 | 128 | - :attr:`authorize_url` attribute 129 | - :attr:`access_token_url` attribute 130 | - :attr:`scope` attribute 131 | - :meth:`determine_identity()` method 132 | - :meth:`~geofront.team.Team.authorize()` method 133 | 134 | """ 135 | 136 | #: (:class:`str`) The OAuth 2 authorization url. 137 | #: 138 | #: .. note:: 139 | #: 140 | #: Concrete subclass has to implement this method. 141 | authorize_url = NotImplemented 142 | 143 | #: (:class:`str`) The scope string for OAuth 2 authorization. 144 | #: 145 | #: .. note:: 146 | #: 147 | #: Concrete subclass has to implement this method. 148 | authorize_scope = NotImplemented 149 | 150 | #: (:class:`str`) The url to issue an OAuth 2 access token. 151 | #: 152 | #: .. note:: 153 | #: 154 | #: Concrete subclass has to implement this method. 155 | access_token_url = NotImplemented 156 | 157 | #: (:class:`str`) The message template which is used when the authenticated 158 | #: identity is unauthorized. There's a predefined default message, but 159 | #: it can be overridden by subclass. The two keywords are available: 160 | #: 161 | #: ``identity`` 162 | #: (:class:`~geofront.identity.Identity`) The authenticated identity. 163 | #: 164 | #: ``team`` 165 | #: (:class:`OAuth2Team`) The actual team object. 166 | unauthorized_identity_message_format = \ 167 | 'identity {identity} is unauthorized' 168 | 169 | logger = logging.getLogger(__name__ + '.OAuth2Team') 170 | 171 | @typechecked 172 | def __init__(self, client_id: str, client_secret: str) -> None: 173 | self.client_id = client_id 174 | self.client_secret = client_secret 175 | 176 | def determine_identity(self, access_token: str) -> Identity: 177 | """Determine :class:`~geofront.identity.Identity` from the given 178 | access token. 179 | 180 | .. note:: 181 | 182 | Concrete subclass has to implement this method. 183 | 184 | """ 185 | raise NotImplementedError( 186 | 'determine_identity() method has to be implemented' 187 | ) 188 | 189 | @typechecked 190 | def request_authentication( 191 | self, redirect_url: str 192 | ) -> AuthenticationContinuation: 193 | auth_nonce = ''.join(map('{:02x}'.format, os.urandom(16))) 194 | query = url_encode({ 195 | 'client_id': self.client_id, 196 | 'redirect_uri': redirect_url, 197 | 'scope': self.authorize_scope, 198 | 'state': auth_nonce, 199 | 'response_type': 'code', 200 | }) 201 | authorize_url = '{}?{}'.format(self.authorize_url, query) 202 | return AuthenticationContinuation(authorize_url, auth_nonce) 203 | 204 | @typechecked 205 | def authenticate( 206 | self, 207 | state, 208 | requested_redirect_url: str, 209 | wsgi_environ: Mapping[str, object] 210 | ) -> Identity: 211 | logger = self.logger.getChild('authenticate') 212 | req = Request(wsgi_environ, populate_request=False, shallow=True) 213 | args = cast(ImmutableMultiDict, req.args) 214 | try: 215 | code = args['code'] 216 | if args['state'] != state: 217 | raise AuthenticationError() 218 | except KeyError: 219 | raise AuthenticationError() 220 | data = url_encode({ 221 | 'client_id': self.client_id, 222 | 'client_secret': self.client_secret, 223 | 'code': code, 224 | 'redirect_uri': requested_redirect_url, 225 | 'grant_type': 'authorization_code', 226 | }).encode() 227 | try: 228 | response = urllib.request.urlopen(self.access_token_url, data) 229 | except urllib.error.HTTPError as e: 230 | logger.debug('Response of POST %s (with/ %r): %s\n%s', 231 | self.access_token_url, data, e.code, e.read()) 232 | raise 233 | assert isinstance(response, http.client.HTTPResponse), \ 234 | 'isinstance(response, {0.__module__}.{0.__qualname__})'.format( 235 | type(response)) 236 | headers = getattr(response, 'headers') # workaround mypy 237 | content_type = headers['Content-Type'] 238 | mimetype, options = parse_options_header(content_type) 239 | if mimetype == 'application/x-www-form-urlencoded': 240 | token_data = url_decode_stream(response) 241 | elif mimetype == 'application/json': 242 | charset = options.get('charset', 'utf-8') 243 | token_data = json.load( 244 | io.TextIOWrapper(cast(IO[bytes], response), encoding=charset) 245 | ) 246 | else: 247 | response.close() 248 | raise AuthenticationError( 249 | '{} sent unsupported content type: {}'.format( 250 | self.access_token_url, 251 | content_type 252 | ) 253 | ) 254 | response.close() 255 | identity = self.determine_identity(token_data['access_token']) 256 | if self.authorize(identity): 257 | return identity 258 | raise AuthenticationError( 259 | self.unauthorized_identity_message_format.format( 260 | identity=identity, team=self 261 | ) 262 | ) 263 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Geofront documentation build configuration file, created by 5 | # sphinx-quickstart on Sun Mar 30 18:52:45 2014. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import sys 17 | import os 18 | import os.path 19 | 20 | # If extensions (or modules to document with autodoc) are in another directory, 21 | # add these directories to sys.path here. If the directory is relative to the 22 | # documentation root, use os.path.abspath to make it absolute, like shown here. 23 | sys.path.insert( 24 | 0, 25 | os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 26 | ) 27 | 28 | from geofront.version import VERSION 29 | 30 | # -- General configuration ------------------------------------------------ 31 | 32 | # If your documentation needs a minimal Sphinx version, state it here. 33 | needs_sphinx = '1.2' 34 | 35 | # Add any Sphinx extension module names here, as strings. They can be 36 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 37 | # ones. 38 | extensions = [ 39 | 'sphinx.ext.autodoc', 40 | 'sphinx.ext.intersphinx', 41 | 'sphinx.ext.todo', 42 | 'sphinx.ext.extlinks', 43 | 'sphinxcontrib.httpdomain', 44 | 'sphinxcontrib.autohttp.flask', 45 | 'sphinxcontrib.autoprogram' 46 | ] 47 | 48 | # Add any paths that contain templates here, relative to this directory. 49 | templates_path = ['_templates'] 50 | 51 | # The suffix of source filenames. 52 | source_suffix = '.rst' 53 | 54 | # The encoding of source files. 55 | #source_encoding = 'utf-8-sig' 56 | 57 | # The master toctree document. 58 | master_doc = 'index' 59 | 60 | # General information about the project. 61 | project = 'Geofront' 62 | copyright = '2014\N{EN DASH}2016, Hong Minhee' 63 | 64 | # The version info for the project you're documenting, acts as replacement for 65 | # |version| and |release|, also used in various other places throughout the 66 | # built documents. 67 | # 68 | # The short X.Y version. 69 | version = VERSION 70 | # The full version, including alpha/beta/rc tags. 71 | release = VERSION 72 | 73 | # The language for content autogenerated by Sphinx. Refer to documentation 74 | # for a list of supported languages. 75 | #language = None 76 | 77 | # There are two options for replacing |today|: either, you set today to some 78 | # non-false value, then it is used: 79 | #today = '' 80 | # Else, today_fmt is used as the format for a strftime call. 81 | #today_fmt = '%B %d, %Y' 82 | 83 | # List of patterns, relative to source directory, that match files and 84 | # directories to ignore when looking for source files. 85 | exclude_patterns = ['_build'] 86 | 87 | # The reST default role (used for this markup: `text`) to use for all 88 | # documents. 89 | #default_role = None 90 | 91 | # If true, '()' will be appended to :func: etc. cross-reference text. 92 | #add_function_parentheses = True 93 | 94 | # If true, the current module name will be prepended to all description 95 | # unit titles (such as .. function::). 96 | #add_module_names = True 97 | 98 | # If true, sectionauthor and moduleauthor directives will be shown in the 99 | # output. They are ignored by default. 100 | #show_authors = False 101 | 102 | # The name of the Pygments (syntax highlighting) style to use. 103 | pygments_style = 'sphinx' 104 | 105 | # A list of ignored prefixes for module index sorting. 106 | #modindex_common_prefix = [] 107 | 108 | # If true, keep warnings as "system message" paragraphs in the built documents. 109 | #keep_warnings = False 110 | 111 | 112 | # -- Options for HTML output ---------------------------------------------- 113 | 114 | # The theme to use for HTML and HTML Help pages. See the documentation for 115 | # a list of builtin themes. 116 | html_theme = 'default' 117 | 118 | # Theme options are theme-specific and customize the look and feel of a theme 119 | # further. For a list of options available for each theme, see the 120 | # documentation. 121 | #html_theme_options = {} 122 | 123 | # Add any paths that contain custom themes here, relative to this directory. 124 | #html_theme_path = [] 125 | 126 | # The name for this set of Sphinx documents. If None, it defaults to 127 | # " v documentation". 128 | html_title = '{} v{}'.format(project, release) 129 | 130 | # A shorter title for the navigation bar. Default is the same as html_title. 131 | #html_short_title = None 132 | 133 | # The name of an image file (relative to this directory) to place at the top 134 | # of the sidebar. 135 | #html_logo = None 136 | 137 | # The name of an image file (within the static path) to use as favicon of the 138 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 139 | # pixels large. 140 | #html_favicon = None 141 | 142 | # Add any paths that contain custom static files (such as style sheets) here, 143 | # relative to this directory. They are copied after the builtin static files, 144 | # so a file named "default.css" will overwrite the builtin "default.css". 145 | html_static_path = ['_static'] 146 | 147 | # Add any extra paths that contain custom files (such as robots.txt or 148 | # .htaccess) here, relative to this directory. These files are copied 149 | # directly to the root of the documentation. 150 | #html_extra_path = [] 151 | 152 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 153 | # using the given strftime format. 154 | #html_last_updated_fmt = '%b %d, %Y' 155 | 156 | # If true, SmartyPants will be used to convert quotes and dashes to 157 | # typographically correct entities. 158 | #html_use_smartypants = True 159 | 160 | # Custom sidebar templates, maps document names to template names. 161 | #html_sidebars = {} 162 | 163 | # Additional templates that should be rendered to pages, maps page names to 164 | # template names. 165 | #html_additional_pages = {} 166 | 167 | # If false, no module index is generated. 168 | #html_domain_indices = True 169 | 170 | # If false, no index is generated. 171 | #html_use_index = True 172 | 173 | # If true, the index is split into individual pages for each letter. 174 | #html_split_index = False 175 | 176 | # If true, links to the reST sources are added to the pages. 177 | #html_show_sourcelink = True 178 | 179 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 180 | #html_show_sphinx = True 181 | 182 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 183 | #html_show_copyright = True 184 | 185 | # If true, an OpenSearch description file will be output, and all pages will 186 | # contain a tag referring to it. The value of this option must be the 187 | # base URL from which the finished HTML is served. 188 | #html_use_opensearch = '' 189 | 190 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 191 | #html_file_suffix = None 192 | 193 | # Output file base name for HTML help builder. 194 | htmlhelp_basename = 'Geofrontdoc' 195 | 196 | 197 | # -- Options for LaTeX output --------------------------------------------- 198 | 199 | latex_elements = { 200 | # The paper size ('letterpaper' or 'a4paper'). 201 | #'papersize': 'letterpaper', 202 | 203 | # The font size ('10pt', '11pt' or '12pt'). 204 | #'pointsize': '10pt', 205 | 206 | # Additional stuff for the LaTeX preamble. 207 | #'preamble': '', 208 | } 209 | 210 | # Grouping the document tree into LaTeX files. List of tuples 211 | # (source start file, target name, title, 212 | # author, documentclass [howto, manual, or own class]). 213 | latex_documents = [ 214 | ('index', 'Geofront.tex', 'Geofront Documentation', 215 | 'Hong Minhee', 'manual'), 216 | ] 217 | 218 | # The name of an image file (relative to this directory) to place at the top of 219 | # the title page. 220 | #latex_logo = None 221 | 222 | # For "manual" documents, if this is true, then toplevel headings are parts, 223 | # not chapters. 224 | #latex_use_parts = False 225 | 226 | # If true, show page references after internal links. 227 | #latex_show_pagerefs = False 228 | 229 | # If true, show URL addresses after external links. 230 | #latex_show_urls = False 231 | 232 | # Documents to append as an appendix to all manuals. 233 | #latex_appendices = [] 234 | 235 | # If false, no module index is generated. 236 | #latex_domain_indices = True 237 | 238 | 239 | # -- Options for manual page output --------------------------------------- 240 | 241 | # One entry per manual page. List of tuples 242 | # (source start file, name, description, authors, manual section). 243 | man_pages = [ 244 | ('index', 'geofront', 'Geofront Documentation', 245 | ['Hong Minhee'], 1) 246 | ] 247 | 248 | # If true, show URL addresses after external links. 249 | #man_show_urls = False 250 | 251 | 252 | # -- Options for Texinfo output ------------------------------------------- 253 | 254 | # Grouping the document tree into Texinfo files. List of tuples 255 | # (source start file, target name, title, author, 256 | # dir menu entry, description, category) 257 | texinfo_documents = [ 258 | ('index', 'Geofront', 'Geofront Documentation', 259 | 'Hong Minhee', 'Geofront', 'One line description of project.', 260 | 'Miscellaneous'), 261 | ] 262 | 263 | # Documents to append as an appendix to all manuals. 264 | #texinfo_appendices = [] 265 | 266 | # If false, no module index is generated. 267 | #texinfo_domain_indices = True 268 | 269 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 270 | #texinfo_show_urls = 'footnote' 271 | 272 | # If true, do not generate a @detailmenu in the "Top" node's menu. 273 | #texinfo_no_detailmenu = False 274 | 275 | 276 | # Example configuration for intersphinx: refer to the Python standard library. 277 | intersphinx_mapping = { 278 | 'python': ('https://docs.python.org/3/', None), 279 | 'paramiko': ('http://docs.paramiko.org/en/2.0/', None), 280 | 'werkzeug': ('http://werkzeug.pocoo.org/docs/', None), 281 | 'flask': ('http://flask.pocoo.org/docs/', None), 282 | 'libcloud': ('https://libcloud.readthedocs.io/en/latest/', None), 283 | 'waitress': ('http://docs.pylonsproject.org/projects/waitress/en/latest/', 284 | None), 285 | } 286 | 287 | 288 | extlinks = { 289 | 'issue': ('https://github.com/spoqa/geofront/issues/%s', '#'), 290 | 'pr': ('https://github.com/spoqa/geofront/pull/%s', '#'), 291 | 'branch': ('https://github.com/spoqa/geofront/compare/%s', ''), 292 | 'commit': ('https://github.com/spoqa/geofront/commit/%s', '') 293 | } 294 | -------------------------------------------------------------------------------- /docs/changes.rst: -------------------------------------------------------------------------------- 1 | Geofront Changelog 2 | ================== 3 | 4 | Version 0.5.0 5 | ------------- 6 | 7 | To be released. 8 | 9 | - Geofront now supports access to servers inside remote private networks 10 | via SSH tunneling over an HTTP WebSocket. [:issue:`21`] 11 | 12 | - Known issue: the WebSocket endpoints do not work in the debug mode. 13 | 14 | - Removed ``--trusted-proxy`` option from the :program:`geofront-server` 15 | command. 16 | - Geofront becomes to require gevent 1.1.2 or higher. 17 | - Geofront becomes to require Flask-Sockets 0.2.1 or higher. 18 | - Geofront becomes not to require waitress any more. 19 | 20 | 21 | Version 0.4.1 22 | ------------- 23 | 24 | To be released. 25 | 26 | - Geofront now supports Ed25519 key. 27 | Under the hood, :func:`geofront.keystore.parse_openssh_pubkey()` function 28 | becomes to be possible to parse Ed25519 public keys. 29 | - Added ``addresser`` and ``filter`` options to :class:`CloudRemoteSet 30 | ` constructor, which allow 31 | customization of server addressing and filtering schemes. The default 32 | is still to use public IPs, so you would wish to give lambda functions that 33 | return private IPs from the :class:`libcloud.compute.base.Node` object 34 | for use with VPNs or SSH tunneling. 35 | - Fixed :exc:`TypeError` 36 | (``__init__() got an unexpected keyword argument 'filename'``) 37 | from :meth:`FileSystemMasterKeyStore.load() 38 | ` method. 39 | [:issue:`7`] 40 | - Since Werkzeug 0.14 changed the way to handle exceptions and it's no more 41 | compatible with the current use of Geofront, Geofront explicitly scoped 42 | the version of Werzkeug to be less than 0.14. 43 | 44 | 45 | Version 0.4.0 46 | ------------- 47 | 48 | Released on March 30, 2017. 49 | 50 | - Geofront now supports ECDSA key. 51 | Under the hood, :func:`geofront.keystore.parse_openssh_pubkey()` function 52 | becomes to be possible to parse ECDSA public keys. [:issue:`14`] 53 | - Geofront now supports Atlassian Bitbucket Cloud. 54 | See also :mod:`geofront.backends.bitbucket` module. [:issue:`4`] 55 | - Geofront now can generate other than RSA master key (e.g. ECDSA). 56 | The new configuration :data:`~config.MASTER_KEY_TYPE` is added to 57 | choose the type of the master key Geofront will generate. 58 | It's :class:`~paramiko.rsakey.RSAKey` by default. 59 | - Added :mod:`geofront.backends.oauth` module which provides 60 | base team implementations for OAuth. 61 | - Added ``key_type`` optional parameter to 62 | :func:`~geofront.masterkey.renew_master_key()` function, 63 | :class:`~geofront.masterkey.PeriodicalRenewal` class constructor, and 64 | :func:`~geofront.regen.regenerate()` function. 65 | - Since ``key_type`` now can be other than :class:`~paramiko.rsakey.RSAKey`, 66 | the ``bits`` optional parameter of 67 | :func:`~geofront.masterkey.renew_master_key()` function, 68 | :class:`~geofront.masterkey.PeriodicalRenewal` class constructor, and 69 | :func:`~geofront.regen.regenerate()` function now have the default value 70 | :const:`None` instead of 2048. :const:`None` automatically choose 71 | the appropriate bits of the ``key_type``. 72 | - Added :func:`~geofront.masterkey.generate_key()` function and 73 | :exc:`~geofront.masterkey.KeyGenerationError` that it raises. 74 | - Added ``alias_namer`` option to :class:`CloudRemoteSet 75 | ` constructor. 76 | - Added :const:`geofront.team.GroupSet` type which is alias of 77 | :class:`~typing.AbstractSet`\ [:class:`~typing.Hashable`]. 78 | - Now master key can be found without token through 79 | :http:get:`/masterkey/` API. The server root :http:get:`/` also became 80 | to contain a :http:header:`Link` header to it and ``"master_key_url"`` field. 81 | - Deprecated :http:get:`/tokens/(token_id:token_id)/masterkey/`. 82 | Use :http:get:`/masterkey/` instead. 83 | The existing url redirects to the new url :http:get:`/masterkey/` with 84 | :http:statuscode:`301`. 85 | - Fixed bug that :meth:`CloudKeyStore.list_keys() 86 | <~geofront.backends.cloud.CloudKeyStore>` sometimes returned an empty set 87 | even if there were stored keys. 88 | - Geofront becomes to require Paramiko 2.0.1 or higher. 89 | - Geofront becomes to require Werkzeug 0.11 or higher. 90 | - Geofront becomes to require Flask 0.10.1 or higher. 91 | - Geofront becomes to require Apache Libcloud 1.1.0 or higher. 92 | - Geofront becomes to require OAuthLib 1.1.1 or higher. 93 | - Geofront becomes to require Waitress 1.0.2 or higher. 94 | - Goefront becomes to require typeguard_ 2.1.1 or higher. 95 | - :mod:`geofront.util` is gone now. Geofront instead became to require 96 | :mod:`typing` and typeguard_. 97 | 98 | .. _typeguard: https://github.com/agronholm/typeguard 99 | 100 | 101 | Version 0.3.2 102 | ------------- 103 | 104 | Released on March 7, 2016. 105 | 106 | - Added :class:`~geofront.remote.RemoteSetUnion` to make union view of 107 | multiple remote sets. 108 | - Fixed :exc:`AttributeError` on :meth:`StashKeyStore.register() 109 | ` or 110 | :meth:`StashKeyStore.deregister() 111 | ` being called. 112 | 113 | 114 | Version 0.3.1 115 | ------------- 116 | 117 | Released on January 19, 2016. 118 | 119 | - Added :class:`~geofront.remote.RemoteSetFilter` to dynamically filter 120 | set of remotes. 121 | - Fixed a regression bug introduced since 0.3.0 122 | (:commit:`9db44659c423ed33a89de712fb645186b7c722cc`) that 123 | :class:`~geofront.backends.github.GitHubOrganization` fails to authenticate. 124 | [:issue:`12`] 125 | 126 | 127 | Version 0.3.0 128 | ------------- 129 | 130 | Released on January 15, 2016. 131 | 132 | - Geofront becomes to require Paramiko 1.15.0 or higher. 133 | - Added save check for :class:`~geofront.remote.AuthorizedKeyList`. 134 | [:issue:`5`] 135 | - :meth:`Team.request_authentication() 136 | ` method becomes to no more take 137 | ``auth_nonce`` and return :class:`~geofront.team.AuthenticationContinuation` 138 | value instead of simple url :class:`str`, so that arbitrary value more 139 | general than simple nonce :class:`str` can be shared between 140 | :meth:`~geofront.team.Team.request_authentication()` and 141 | :meth:`Team.authenticate() `. If arbitrary 142 | nonce is needed, :meth:`~geofront.team.Team.request_authentication()` 143 | method has to generate one by itself. 144 | - Geofront now supports Atlassian Bitbucket Server (which was Stash). 145 | See also :mod:`geofront.backends.stash` module. 146 | - :class:`~geofront.masterkey.TwoPhaseRenewal` became to raise 147 | :exc:`ConnectionError` with attempted remote address instead of 148 | :exc:`socket.gaierror` which is hard to troubleshoot. 149 | - Fixed signature mismatch errors of 150 | :class:`~geofront.backends.cloud.CloudMasterKeyStore` when it's used with 151 | AWS S3. 152 | 153 | 154 | Version 0.2.2 155 | ------------- 156 | 157 | Released on July 8, 2014. 158 | 159 | - Became to depend on apache-libcloud 0.15.0 or later. 160 | - Added HSTS_ support: 161 | 162 | - Added :data:`~config.ENABLE_HSTS` configuration. 163 | - Added :option:`--force-https ` option 164 | to :program:`geofront-server` command. 165 | 166 | - Fixed a bug of :meth:`KeyPairDoesNotExistError.save() 167 | ` method that 168 | leaks :exc:`~libcloud.common.types.MalformedResponseError` raised by 169 | :class:`~libcloud.compute.drivers.ec2.EC2NodeDriver` which ought to 170 | raise proper :exc:`libcloud.compute.types.KeyPairDoesNotExistError`. 171 | 172 | .. _HSTS: https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security 173 | 174 | 175 | Version 0.2.1 176 | ------------- 177 | 178 | Released on June 16, 2014. 179 | 180 | - Fixed an authentication bug of :class:`~geofront.masterkey.TwoPhaseRenewal` 181 | raised due to not specify login username. 182 | - More detailed message logging of exceptions that rise during master key 183 | renewal. 184 | 185 | 186 | Version 0.2.0 187 | ------------- 188 | 189 | Released on May 3, 2014. 190 | 191 | - Added :meth:`~geofront.team.Team.list_groups()` method to 192 | :class:`~geofront.team.Team` interface. 193 | - Added :meth:`~geofront.backends.github.GitHubOrganization.list_groups()` 194 | method to :class:`~geofront.backends.github.GitHubOrganization` class. 195 | - Removed an unnecessary dependency to enum34_ on Python 3.3. 196 | - Added :mod:`geofront.backends.cloud` module. 197 | 198 | - ``geofront.masterkey.CloudMasterKeyStore`` is moved to 199 | :class:`geofront.backends.cloud.CloudMasterKeyStore`. 200 | - ``geofront.remote.CloudRemoteSet`` is moved to 201 | :class:`geofront.backends.cloud.CloudRemoteSet`. 202 | 203 | - :class:`~geofront.remote.Remote` now has 204 | :attr:`~geofront.remote.Remote.metadata` attribute. 205 | - :class:`~geofront.backends.cloud.CloudRemoteSet` fills 206 | :attr:`~geofront.remote.Remote.metadata` of the resulted 207 | :class:`~geofront.remote.Remote` objects if the given driver supports. 208 | - Now depends on singledispatch_ if Python is older than 3.4. 209 | - Added :class:`~geofront.remote.PermissionPolicy` interface. 210 | - Added :class:`~geofront.remote.DefaultPermissionPolicy` class. 211 | - Added :class:`~geofront.remote.GroupMetadataPermissionPolicity` class. 212 | - Added new ``PERMISSION_POLICY`` configuration. 213 | - Added :mod:`geofront.backends.dbapi` module. 214 | - Added :program:`geofront-key-regen` command. 215 | - HTTP APIs became more RESTful. Now it has the root endpoint which provides 216 | the link to create a new token, and the token API provides several 217 | links to subresources as well. 218 | - Added new ``MASTER_KEY_BITS`` configuration. 219 | - Added new ``bits`` optional parameters to :func:`renew_master_key() 220 | `, :class:`PeriodicalRenewal 221 | `, and :func:`regenerate() 222 | `. 223 | - Added :class:`~geofront.backends.cloud.CloudKeyStore`. [:issue:`2`] 224 | - Added :class:`~geofront.backends.cloud.CloudMasterPublicKeyStore`. 225 | [:issue:`2`] 226 | 227 | .. _enum34: https://pypi.python.org/pypi/enum34 228 | .. _singledispatch: https://pypi.python.org/pypi/singledispatch 229 | 230 | 231 | Version 0.1.1 232 | ------------- 233 | 234 | Released on April 22, 2014. 235 | 236 | - Fixed :exc:`TypeError` that rises when :class:`CloudMasterKeyStore 237 | ` is used with AWS S3 driver. 238 | - Added ``--trusted-proxy`` option to :program:`geofront-server` command. It's 239 | useful when the server is run behind a reverse proxy. 240 | - Added token no-op API: :http:get:`/tokens/(token_id:token_id)/`. 241 | 242 | 243 | Version 0.1.0 244 | ------------- 245 | 246 | First alpha release. Released on April 21, 2014. 247 | -------------------------------------------------------------------------------- /docs/config.rst: -------------------------------------------------------------------------------- 1 | .. module:: config 2 | 3 | Configuration 4 | ============= 5 | 6 | The :program:`geofront-server` command takes a configuration file as required 7 | argument. The configuration is an ordinary Python script that defines 8 | the following required and optional variables. Note that all names have to 9 | be uppercase. 10 | 11 | .. data:: TEAM 12 | 13 | (:class:`geofront.team.Team`) The backend implementation for team 14 | authentication. For example, in order to authorize members of GitHub 15 | organization use :class:`~geofront.backends.github.GitHubOrganization` 16 | implementation:: 17 | 18 | from geofront.backends.github import GitHubOrganization 19 | 20 | TEAM = GitHubOrganization( 21 | client_id='GitHub OAuth app client id goes here', 22 | client_secret='GitHub OAuth app client secret goes here', 23 | org_login='your_org_name' # in https://github.com/your_org_name 24 | ) 25 | 26 | Or you can implement your own backend by subclassing 27 | :class:`~geofront.team.Team`. 28 | 29 | .. seealso:: 30 | 31 | Module :mod:`geofront.team` --- Team authentication 32 | The interface for team authentication. 33 | 34 | Class :class:`geofront.backends.github.GitHubOrganization` 35 | The :class:`~geofront.team.Team` implementation for GitHub 36 | organizations. 37 | 38 | Class :class:`geofront.backends.bitbucket.BitbucketTeam` 39 | The :class:`~geofront.team.Team` implementation for Bitbucket Cloud 40 | teams. 41 | 42 | Class :class:`geofront.backends.stash.StashTeam` 43 | The :class:`~geofront.team.Team` implementation for Atlassian's 44 | Bitbucket Server (which was Stash). 45 | 46 | .. data:: REMOTE_SET 47 | 48 | (:class:`~geofront.remote.RemoteSet`) The set of remote servers to be managed 49 | by Geofront. It can be anything only if it's an mapping object. 50 | For example, you can hard-code it by using Python :class:`dict` data 51 | structure:: 52 | 53 | from geofront.remote import Remote 54 | 55 | REMOTE_SET = { 56 | 'web-1': Remote('ubuntu', '192.168.0.5'), 57 | 'web-2': Remote('ubuntu', '192.168.0.6'), 58 | 'web-3': Remote('ubuntu', '192.168.0.7'), 59 | 'worker-1': Remote('ubuntu', '192.168.0.25'), 60 | 'worker-2': Remote('ubuntu', '192.168.0.26'), 61 | 'db-1': Remote('ubuntu', '192.168.0.50'), 62 | 'db-2': Remote('ubuntu', '192.168.0.51'), 63 | } 64 | 65 | Every key has to be a string, and every valye has to be an instance of 66 | :class:`~geofront.remote.Remote`. :class:`~geofront.remote.Remote` consits 67 | of an user, a hostname, and the port to SSH. For example,if you've 68 | :program:`ssh`-ed to a remote server by the following command: 69 | 70 | .. code-block:: console 71 | 72 | $ ssh -p 2222 ubuntu@192.168.0.50 73 | 74 | A :class:`~geofront.remote.Remote` object for it should be:: 75 | 76 | Remote('ubuntu', '192.168.0.50', 2222) 77 | 78 | You can add more dynamism by providing custom :class:`dict`-like mapping 79 | object. :class:`collections.abc.Mapping` could help to implement it. 80 | For example, :class:`~geofront.backends.cloud.CloudRemoteSet` is a subtype of 81 | :class:`~collections.abc.Mapping`, and it dynamically loads the list 82 | of available instance nodes in the cloud e.g. EC2_ of AWS_. Due to 83 | Apache Libcloud_ it can work with more than 20 cloud providers like 84 | AWS_, Azure_, or Rackspace_. :: 85 | 86 | 87 | from geofront.backends.cloud import CloudRemoteSet 88 | from libcloud.compute.types import Provider 89 | from libcloud.compute.providers import get_driver 90 | 91 | driver_cls = get_driver(Provider.EC2) 92 | driver = driver_cls('access id', 'secret key', region='us-east-1') 93 | REMOTE_SET = CloudRemoteSet(driver) 94 | 95 | .. seealso:: 96 | 97 | Class :class:`geofront.remote.Remote` 98 | Value type that represents a remote server to :program:`ssh`. 99 | 100 | Class :class:`geofront.backends.cloud.CloudRemoteSet` 101 | The Libcloud_-backed dynamic remote set. 102 | 103 | Module :mod:`collections.abc` --- Abstract Base Classes for Containers 104 | This module provides abstract base classes that can be used to 105 | test whether a class provides a particular interface; for 106 | example, whether it is hashable or whether it is a mapping. 107 | 108 | .. _EC2: http://aws.amazon.com/ec2/ 109 | .. _AWS: http://aws.amazon.com/ 110 | .. _Libcloud: https://libcloud.apache.org/ 111 | .. _Azure: http://azure.microsoft.com/ 112 | .. _Rackspace: http://www.rackspace.com/ 113 | 114 | .. data:: TOKEN_STORE 115 | 116 | (:class:`werkzeug.contrib.cache.BaseCache`) The store to save access tokens. 117 | It uses Werkzeug's cache interface, and Werkzeug provides several 118 | built-in implementations as well e.g.: 119 | 120 | - :class:`~werkzeug.contrib.cache.MemcachedCache` 121 | - :class:`~werkzeug.contrib.cache.RedisCache` 122 | - :class:`~werkzeug.contrib.cache.FileSystemCache` 123 | 124 | For example, in order to store access tokens into Redis:: 125 | 126 | from werkzeug.contrib.cache import RedisCache 127 | 128 | TOKEN_STORE = RedisCache(host='localhost', db=0) 129 | 130 | Of course you can implement your own backend by subclassing 131 | :class:`~werkzeug.contrib.cache.BaseCache`. 132 | 133 | Although it's a required configuration, but when :option:`-d 134 | `/:option:`--debug ` is 135 | enabled, :class:`~werkzeug.contrib.cache.SimpleCache` (which is all expired 136 | after :program:`geofront-server` process terminated) is used by default. 137 | 138 | .. seealso:: 139 | 140 | Cache__ --- Werkzeug 141 | Cache backend interface and implementations provided by Werkzeug. 142 | 143 | __ http://werkzeug.pocoo.org/docs/contrib/cache/ 144 | 145 | .. data:: KEY_STORE 146 | 147 | (:class:`geofront.keystore.KeyStore`) The store to save *public keys* 148 | for each team member. (Not the *master key*; don't be confused with 149 | :data:`MASTER_KEY_STORE`.) 150 | 151 | If :data:`TEAM` is a :class:`~geofront.backends.github.GitHubOrganization` 152 | object, :data:`KEY_STORE` also can be 153 | :class:`~geofront.backends.github.GitHubKeyStore`. It's an adapter class 154 | of GitHub's per-account public key list. :: 155 | 156 | from geofront.backends.github import GitHubKeyStore 157 | 158 | KEY_STORE = GitHubKeyStore() 159 | 160 | You also can store public keys into the database like SQLite, PostgreSQL, 161 | or MySQL through :class:`~geofront.backends.dbapi.DatabaseKeyStore`:: 162 | 163 | import sqlite3 164 | from geofront.backends.dbapi import DatabaseKeyStore 165 | 166 | KEY_STORE = DatabaseKeyStore(sqlite3, 167 | '/var/lib/geofront/public_keys.db') 168 | 169 | Some cloud providers like Amazon EC2 and Rackspace (Next Gen) support 170 | *key pair service*. :class:`~geofront.backends.cloud.CloudKeyStore` 171 | helps to use the service as a public key store:: 172 | 173 | from geofront.backends.cloud import CloudKeyStore 174 | from libcloud.storage.types import Provider 175 | from libcloud.storage.providers import get_driver 176 | 177 | driver_cls = get_driver(Provider.EC2) 178 | driver = driver_cls('api key', 'api secret key', region='us-east-1') 179 | KEY_STORE = CloudKeyStore(driver) 180 | 181 | .. versionadded:: 0.2.0 182 | Added :class:`~geofront.backends.dbapi.DatabaseKeyStore` class. 183 | Added :class:`~geofront.backends.cloud.CloudKeyStore` class. 184 | 185 | .. versionadded:: 0.3.0 186 | Added :class:`~geofront.backends.stash.StashKeyStore` class. 187 | 188 | .. data:: MASTER_KEY_STORE 189 | 190 | (:class:`geofront.masterkey.MasterKeyStore`) The store to save 191 | the *master key*. (Not *public keys*; don't be confused with 192 | :data:`KEY_STORE`.) 193 | 194 | The master key store should be secure, and hard to lose the key at the 195 | same time. Geofront provides some built-in implementations: 196 | 197 | :class:`~geofront.masterkey.FileSystemMasterKeyStore` 198 | It stores the master key into the file system as the name suggests. 199 | You can set the path to save the key. Although it's not that secure, 200 | but it might help you to try out Geofront. 201 | 202 | :class:`~geofront.backends.cloud.CloudMasterKeyStore` 203 | It stores the master key into the cloud object storage like S3_ of AWS_. 204 | It supports more than 20 cloud providers through the efforts of Libcloud_. 205 | 206 | :: 207 | 208 | from geofront.masterkey import FileSystemMasterKeyStore 209 | 210 | MASTER_KEY_STORE = FileSystemMasterKeyStore('/var/lib/geofront/id_rsa') 211 | 212 | .. _S3: http://aws.amazon.com/s3/ 213 | 214 | .. data:: PERMISSION_POLICY 215 | 216 | (:class:`~geofront.remote.PermissionPolicy`) The permission policy to 217 | determine which remotes are visible for each team member, and allowed 218 | them to SSH. 219 | 220 | The default is :class:`~geofront.remote.DefaultPermissionPolicy`, 221 | and it allows everyone in the team to view and access through SSH to 222 | all available remotes. 223 | 224 | If your remote set has metadata for ACL i.e. group identifiers 225 | to allow you can utilize it through 226 | :class:`~geofront.remote.GroupMetadataPermissionPolicy`. 227 | 228 | If you need more subtle and complex rules for ACL you surely can implement 229 | your own policy by subclassing :class:`~geofront.remote.PermissionPolicy` 230 | interface. 231 | 232 | .. versionadded:: 0.2.0 233 | 234 | .. data:: MASTER_KEY_TYPE 235 | 236 | (:class:`~typing.Type`\ [:class:`~paramiko.pkey.PKey`]) The type of 237 | the master key that will be generated. It has to be a subclass of 238 | :class:`paramiko.pkey.PKey`: 239 | 240 | RSA 241 | :class:`paramiko.rsakey.RSAKey` 242 | ECDSA 243 | :class:`paramiko.ecdsakey.ECDSAKey` 244 | DSA (DSS) 245 | :class:`paramiko.dsskey.DSSKey` 246 | 247 | :class:`~paramiko.rsakey.RSAKey` by default. 248 | 249 | .. versionadded:: 0.4.0 250 | 251 | .. data:: MASTER_KEY_BITS 252 | 253 | (:class:`~typing.Optional`\ [:class:`int`]) The number of bits 254 | the generated master key should be. 255 | 2048 by default. 256 | 257 | .. versionchanged:: 0.4.0 258 | Since the appropriate :data:`MASTER_KEY_BITS` depends on its 259 | :data:`MASTER_KEY_TYPE`, the default value of :data:`MASTER_KEY_BITS` 260 | became :const:`None` (from 2048). 261 | 262 | :const:`None` means to follow :const:`MASTER_KEY_TYPE`'s own default 263 | (appropriate) bits. 264 | 265 | .. versionadded:: 0.2.0 266 | 267 | .. data:: MASTER_KEY_RENEWAL 268 | 269 | (:class:`datetime.timedelta`) The interval of master key renewal. 270 | :const:`None` means never. For example, if you want to renew the master 271 | key every week:: 272 | 273 | import datetime 274 | 275 | MASTER_KEY_RENEWAL = datetime.timedelta(days=7) 276 | 277 | A day by default. 278 | 279 | .. data:: TOKEN_EXPIRE 280 | 281 | (:class:`datetime.timedelta`) The time to expire each access token. 282 | As shorter it becomes more secure but more frequent to require team members 283 | to authenticate. So too short time would interrupt team members. 284 | 285 | A week by default. 286 | 287 | .. data:: ENABLE_HSTS 288 | 289 | (:class:`bool`) Enable HSTS_ (HTTP strict transport security). 290 | 291 | :const:`False` by default. 292 | 293 | .. versionadded:: 0.2.2 294 | 295 | .. _HSTS: https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security 296 | 297 | 298 | Example 299 | ------- 300 | 301 | .. include:: ../example.cfg.py 302 | :code: 303 | -------------------------------------------------------------------------------- /tests/remote_test.py: -------------------------------------------------------------------------------- 1 | import collections.abc 2 | import datetime 3 | import time 4 | 5 | from paramiko.rsakey import RSAKey 6 | from pytest import mark, raises 7 | 8 | from geofront.identity import Identity 9 | from geofront.keystore import format_openssh_pubkey, parse_openssh_pubkey 10 | from geofront.remote import (AuthorizedKeyList, DefaultPermissionPolicy, 11 | GroupMetadataPermissionPolicy, Remote, 12 | RemoteSetFilter, RemoteSetUnion, authorize) 13 | from geofront.team import Team 14 | 15 | 16 | @mark.parametrize(('b', 'equal'), [ 17 | (Remote('a', '192.168.0.1', 22), True), 18 | (Remote('a', '192.168.0.1', 2222), False), 19 | (Remote('b', '192.168.0.1', 22), False), 20 | (Remote('b', '192.168.0.1', 2222), False), 21 | (Remote('a', '192.168.0.2', 22), False), 22 | (Remote('b', '192.168.0.2', 22), False), 23 | (Remote('a', '192.168.0.2', 2222), False), 24 | (Remote('b', '192.168.0.2', 2222), False), 25 | (Remote('a', '192.168.0.1', 22, {'a': 1}), True), 26 | (Remote('a', '192.168.0.1', 2222, {'a': 1}), False), 27 | (Remote('b', '192.168.0.1', 22, {'a': 1}), False), 28 | (Remote('b', '192.168.0.1', 2222, {'a': 1}), False), 29 | (Remote('a', '192.168.0.2', 22, {'a': 1}), False), 30 | (Remote('b', '192.168.0.2', 22, {'a': 1}), False), 31 | (Remote('a', '192.168.0.2', 2222, {'a': 1}), False), 32 | (Remote('b', '192.168.0.2', 2222, {'a': 1}), False) 33 | ]) 34 | def test_remote(b, equal): 35 | a = Remote('a', '192.168.0.1') 36 | assert (a == b) is equal 37 | assert (a != b) is (not equal) 38 | assert (hash(a) == hash(b)) is equal 39 | 40 | 41 | def get_next_line(fo): 42 | line = '' 43 | while not line: 44 | line = fo.readline() 45 | if not line: 46 | return line 47 | line = line.strip() 48 | return line 49 | 50 | 51 | def test_authorized_keys_list_iter(fx_authorized_sftp): 52 | sftp_client, path, keys = fx_authorized_sftp 53 | key_list = AuthorizedKeyList(sftp_client) 54 | it = iter(key_list) 55 | assert next(it) == keys[0] 56 | assert next(it) == keys[1] 57 | assert next(it) == keys[2] 58 | assert next(it) == keys[3] 59 | assert next(it) == keys[4] 60 | assert next(it) == keys[5] 61 | with raises(StopIteration): 62 | next(it) 63 | # It's lazily evaluated; changes should reflect 64 | with path.join('.ssh', 'authorized_keys').open('w') as f: 65 | f.write(format_openssh_pubkey(keys[0])) 66 | it = iter(key_list) 67 | assert next(it) == keys[0] 68 | with raises(StopIteration): 69 | next(it) 70 | 71 | 72 | def test_authorized_keys_list_len(fx_authorized_sftp): 73 | sftp_client, path, keys = fx_authorized_sftp 74 | key_list = AuthorizedKeyList(sftp_client) 75 | assert len(key_list) == 6 76 | # It's lazily evaluated; changes should reflect 77 | with path.join('.ssh', 'authorized_keys').open('w') as f: 78 | f.write(format_openssh_pubkey(keys[0])) 79 | assert len(key_list) == 1 80 | 81 | 82 | def test_authorized_keys_list_getitem(fx_authorized_sftp): 83 | sftp_client, path, keys = fx_authorized_sftp 84 | key_list = AuthorizedKeyList(sftp_client) 85 | for i in range(-6, 6): 86 | assert key_list[i] == keys[i] 87 | assert key_list[i:] == keys[i:] 88 | assert key_list[:i] == keys[:i] 89 | assert key_list[i:i + 3] == keys[i:i + 3] 90 | with raises(IndexError): 91 | assert key_list[-7] 92 | with raises(IndexError): 93 | assert key_list[6] 94 | with raises(TypeError): 95 | key_list['key'] 96 | # It's lazily evaluated; changes should reflect 97 | with path.join('.ssh', 'authorized_keys').open('w') as f: 98 | f.write(format_openssh_pubkey(keys[0])) 99 | assert key_list[0] == key_list[-1] == keys[0] 100 | with raises(IndexError): 101 | key_list[1] 102 | with raises(IndexError): 103 | key_list[-2] 104 | 105 | 106 | def test_authorized_keys_list_setitem(fx_authorized_sftp): 107 | sftp_client, path, keys = fx_authorized_sftp 108 | key_list = AuthorizedKeyList(sftp_client) 109 | # Slice assignment 110 | key_list[3:] = [] 111 | with path.join('.ssh', 'authorized_keys').open() as f: 112 | for i in range(3): 113 | assert parse_openssh_pubkey(get_next_line(f)) == keys[i] 114 | assert not get_next_line(f) 115 | # Positive index 116 | key_list[2] = keys[3] 117 | with path.join('.ssh', 'authorized_keys').open() as f: 118 | assert parse_openssh_pubkey(get_next_line(f)) == keys[0] 119 | assert parse_openssh_pubkey(get_next_line(f)) == keys[1] 120 | assert parse_openssh_pubkey(get_next_line(f)) == keys[3] 121 | assert not get_next_line(f) 122 | # Negative index 123 | key_list[-1] = keys[4] 124 | with path.join('.ssh', 'authorized_keys').open() as f: 125 | assert parse_openssh_pubkey(get_next_line(f)) == keys[0] 126 | assert parse_openssh_pubkey(get_next_line(f)) == keys[1] 127 | assert parse_openssh_pubkey(get_next_line(f)) == keys[4] 128 | assert not get_next_line(f) 129 | 130 | 131 | def test_authorized_keys_list_insert(fx_authorized_sftp): 132 | sftp_client, path, keys = fx_authorized_sftp 133 | key_list = AuthorizedKeyList(sftp_client) 134 | new_key = RSAKey.generate(1024) 135 | key_list.insert(2, new_key) 136 | with path.join('.ssh', 'authorized_keys').open() as f: 137 | assert parse_openssh_pubkey(get_next_line(f)) == keys[0] 138 | assert parse_openssh_pubkey(get_next_line(f)) == keys[1] 139 | assert parse_openssh_pubkey(get_next_line(f)) == new_key 140 | for i in range(2, 6): 141 | assert parse_openssh_pubkey(get_next_line(f)) == keys[i] 142 | assert not get_next_line(f) 143 | 144 | 145 | def test_authorized_keys_list_extend(fx_authorized_sftp): 146 | sftp_client, path, keys = fx_authorized_sftp 147 | key_list = AuthorizedKeyList(sftp_client) 148 | new_keys = [RSAKey.generate(1024) for _ in range(3)] 149 | key_list.extend(new_keys) 150 | with path.join('.ssh', 'authorized_keys').open() as f: 151 | for i in range(6): 152 | assert parse_openssh_pubkey(get_next_line(f)) == keys[i] 153 | for i in range(3): 154 | assert parse_openssh_pubkey(get_next_line(f)) == new_keys[i] 155 | assert not get_next_line(f) 156 | 157 | 158 | def test_authorized_keys_list_delitem(fx_authorized_sftp): 159 | sftp_client, path, keys = fx_authorized_sftp 160 | key_list = AuthorizedKeyList(sftp_client) 161 | # Slice deletion 162 | del key_list[3:] 163 | with path.join('.ssh', 'authorized_keys').open() as f: 164 | for i in range(3): 165 | assert parse_openssh_pubkey(get_next_line(f)) == keys[i] 166 | assert not get_next_line(f) 167 | # Positive index 168 | del key_list[2] 169 | with path.join('.ssh', 'authorized_keys').open() as f: 170 | assert parse_openssh_pubkey(get_next_line(f)) == keys[0] 171 | assert parse_openssh_pubkey(get_next_line(f)) == keys[1] 172 | assert not get_next_line(f) 173 | # Negative index 174 | del key_list[-1] 175 | with path.join('.ssh', 'authorized_keys').open() as f: 176 | assert parse_openssh_pubkey(get_next_line(f)) == keys[0] 177 | assert not get_next_line(f) 178 | 179 | 180 | def test_authorize(fx_sftpd): 181 | port, (thread, path, ev) = fx_sftpd.popitem() 182 | thread.start() 183 | master_key = RSAKey.generate(1024) 184 | public_keys = {RSAKey.generate(1024), RSAKey.generate(1024)} 185 | authorized_keys_path = path.mkdir('.ssh').join('authorized_keys') 186 | with authorized_keys_path.open('w') as f: 187 | print(format_openssh_pubkey(master_key), file=f) 188 | expires_at = authorize( 189 | public_keys, 190 | master_key, 191 | Remote('user', '127.0.0.1', port), 192 | timeout=datetime.timedelta(seconds=5) 193 | ) 194 | with authorized_keys_path.open() as f: 195 | saved_keys = frozenset(parse_openssh_pubkey(line) 196 | for line in f if line.strip()) 197 | assert saved_keys == (public_keys | {master_key}) 198 | while datetime.datetime.now(datetime.timezone.utc) <= expires_at: 199 | time.sleep(1) 200 | time.sleep(1) 201 | with authorized_keys_path.open() as f: 202 | saved_keys = map(parse_openssh_pubkey, f) 203 | assert frozenset(saved_keys) == {master_key} 204 | 205 | 206 | class DummyTeam(Team): 207 | 208 | pass 209 | 210 | 211 | def test_default_permission_policy(): 212 | remotes = { 213 | 'a': Remote('a', 'localhost'), 214 | 'b': Remote('b', 'localhost') 215 | } 216 | identity = Identity(DummyTeam, 'a') 217 | p = DefaultPermissionPolicy() 218 | assert p.filter(remotes, identity, {'x'}) == remotes 219 | for remote in remotes.values(): 220 | assert p.permit(remote, identity, {'x'}) 221 | 222 | 223 | @mark.parametrize(('key', 'separator'), [ 224 | ('role', None), 225 | ('role', ','), 226 | ('role', '/'), 227 | ('groups', None) 228 | ]) 229 | def test_group_metadata_permission_policy(key, separator): 230 | sep = separator or ' ' 231 | remotes = { 232 | 'web-1': Remote( 233 | 'ubuntu', '192.168.0.5', 234 | metadata={key: sep.join(['web', 'a']), 'other': 'ignore'} 235 | ), 236 | 'web-2': Remote( 237 | 'ubuntu', '192.168.0.6', 238 | metadata={key: sep.join(['web', 'b']), 'other': 'ignore'} 239 | ), 240 | 'web-3': Remote( 241 | 'ubuntu', '192.168.0.7', 242 | metadata={key: sep.join(['web', 'c']), 'other': 'ignore'} 243 | ), 244 | 'worker-1': Remote( 245 | 'ubuntu', '192.168.0.25', 246 | metadata={key: sep.join(['worker', 'a']), 'other': 'ignore'} 247 | ), 248 | 'worker-2': Remote( 249 | 'ubuntu', '192.168.0.26', 250 | metadata={key: sep.join(['worker', 'b']), 'other': 'ignore'} 251 | ), 252 | 'db-1': Remote( 253 | 'ubuntu', '192.168.0.50', 254 | metadata={key: sep.join(['db', 'a']), 'other': 'ignore'} 255 | ), 256 | 'db-2': Remote( 257 | 'ubuntu', '192.168.0.51', 258 | metadata={key: sep.join(['db', 'b']), 'other': 'ignore'} 259 | ) 260 | } 261 | 262 | def subset(*keys): 263 | return {a: r for a, r in remotes.items() if a in keys} 264 | p = GroupMetadataPermissionPolicy(key, separator) 265 | identity = Identity(DummyTeam, 1) 266 | assert (p.filter(remotes, identity, {'web', 'a'}) == 267 | subset('web-1', 'web-2', 'web-3', 'worker-1', 'db-1')) 268 | assert (p.filter(remotes, identity, {'db', 'c'}) == 269 | subset('web-3', 'worker-3', 'db-1', 'db-2')) 270 | assert p.permit(remotes['db-1'], identity, {'web', 'a'}) 271 | assert not p.permit(remotes['db-1'], identity, {'web', 'b'}) 272 | assert p.permit(remotes['db-1'], identity, {'db', 'a'}) 273 | assert p.permit(remotes['db-1'], identity, {'db', 'b'}) 274 | 275 | 276 | def test_remote_set_filter(): 277 | dict_ = { 278 | 'inc-a': Remote('a', 'example.com'), 279 | 'inc-b': Remote('b', 'example.com'), 280 | 'exc-c': Remote('c', 'example.com'), 281 | 'exc-d': Remote('d', 'example.com'), 282 | 'inc-e': Remote('e', 'example.com', 10022), 283 | 'exc-f': Remote('f', 'example.com', 10022), 284 | } 285 | filtered = RemoteSetFilter( 286 | lambda a, r: a.startswith('inc-') and r.port == 22, 287 | dict_ 288 | ) 289 | assert isinstance(filtered, collections.abc.Mapping) 290 | assert set(filtered) == set(filtered.keys()) == {'inc-a', 'inc-b'} 291 | assert len(filtered) == 2 292 | assert filtered['inc-a'] == filtered.get('inc-a') == dict_['inc-a'] 293 | assert filtered['inc-b'] == filtered.get('inc-b') == dict_['inc-b'] 294 | with raises(KeyError): 295 | filtered['exc-c'] 296 | assert filtered.get('exc-c') is None 297 | with raises(KeyError): 298 | filtered['exc-d'] 299 | assert filtered.get('exc-d') is None 300 | with raises(KeyError): 301 | filtered['inc-e'] 302 | assert filtered.get('inc-e') is None 303 | with raises(KeyError): 304 | filtered['exc-f'] 305 | assert filtered.get('exc-f') is None 306 | assert set(filtered.items()) == { 307 | ('inc-a', dict_['inc-a']), 308 | ('inc-b', dict_['inc-b']), 309 | } 310 | assert set(filtered.values()) == {dict_['inc-a'], dict_['inc-b']} 311 | # 312 | # test lazy evaluation 313 | del dict_['inc-b'] 314 | dict_['inc-g'] = g = Remote('g', 'sample.com') 315 | assert set(filtered) == set(filtered.keys()) == {'inc-a', 'inc-g'} 316 | assert len(filtered) == 2 317 | assert filtered['inc-a'] == filtered.get('inc-a') == dict_['inc-a'] 318 | with raises(KeyError): 319 | filtered['exc-c'] 320 | assert filtered.get('exc-c') is None 321 | with raises(KeyError): 322 | filtered['exc-d'] 323 | assert filtered.get('exc-d') is None 324 | with raises(KeyError): 325 | filtered['inc-e'] 326 | assert filtered.get('inc-e') is None 327 | with raises(KeyError): 328 | filtered['exc-f'] 329 | assert filtered.get('exc-f') is None 330 | assert filtered['inc-g'] == filtered.get('inc-g') == g 331 | assert set(filtered.items()) == {('inc-a', dict_['inc-a']), ('inc-g', g)} 332 | 333 | 334 | def test_remote_set_union(): 335 | a = { 336 | 'web-1': Remote('ubuntu', '192.168.0.5'), 337 | 'web-2': Remote('ubuntu', '192.168.0.6'), 338 | 'web-3': Remote('ubuntu', '192.168.0.7'), 339 | 'worker-1': Remote('ubuntu', '192.168.0.8'), 340 | } 341 | b = { 342 | 'worker-1': Remote('ubuntu', '192.168.0.25'), 343 | 'worker-2': Remote('ubuntu', '192.168.0.26'), 344 | 'db-1': Remote('ubuntu', '192.168.0.27'), 345 | 'db-2': Remote('ubuntu', '192.168.0.28'), 346 | 'db-3': Remote('ubuntu', '192.168.0.29'), 347 | } 348 | c = { 349 | 'web-1': Remote('ubuntu', '192.168.0.49'), 350 | 'db-1': Remote('ubuntu', '192.168.0.50'), 351 | 'db-2': Remote('ubuntu', '192.168.0.51'), 352 | } 353 | union = RemoteSetUnion(a, b, c) 354 | assert isinstance(union, collections.abc.Mapping) 355 | assert set(union) == set(union.keys()) == { 356 | 'web-1', 'web-2', 'web-3', 'worker-1', 'worker-2', 357 | 'db-1', 'db-2', 'db-3' 358 | } 359 | assert len(union) == 8 360 | assert union['web-1'] == union.get('web-1') == c['web-1'] 361 | assert union['web-2'] == union.get('web-2') == a['web-2'] 362 | assert union['web-3'] == union.get('web-3') == a['web-3'] 363 | assert union['worker-1'] == union.get('worker-1') == b['worker-1'] 364 | assert union['worker-2'] == union.get('worker-2') == b['worker-2'] 365 | assert union['db-1'] == union.get('db-1') == c['db-1'] 366 | assert union['db-2'] == union.get('db-2') == c['db-2'] 367 | assert union['db-3'] == union.get('db-3') == b['db-3'] 368 | assert set(union.items()) == { 369 | ('web-1', c['web-1']), 370 | ('web-2', a['web-2']), 371 | ('web-3', a['web-3']), 372 | ('worker-1', b['worker-1']), 373 | ('worker-2', b['worker-2']), 374 | ('db-1', c['db-1']), 375 | ('db-2', c['db-2']), 376 | ('db-3', b['db-3']), 377 | } 378 | assert set(union.values()) == { 379 | c['web-1'], a['web-2'], a['web-3'], 380 | b['worker-1'], b['worker-2'], 381 | c['db-1'], c['db-2'], b['db-3'], 382 | } 383 | # 384 | # test lazy evaluation 385 | del c['web-1'] 386 | assert isinstance(union, collections.abc.Mapping) 387 | assert set(union) == set(union.keys()) == { 388 | 'web-1', 'web-2', 'web-3', 'worker-1', 'worker-2', 389 | 'db-1', 'db-2', 'db-3' 390 | } 391 | assert len(union) == 8 392 | assert union['web-1'] == union.get('web-1') == a['web-1'] 393 | assert union['web-2'] == union.get('web-2') == a['web-2'] 394 | assert union['web-3'] == union.get('web-3') == a['web-3'] 395 | assert union['worker-1'] == union.get('worker-1') == b['worker-1'] 396 | assert union['worker-2'] == union.get('worker-2') == b['worker-2'] 397 | assert union['db-1'] == union.get('db-1') == c['db-1'] 398 | assert union['db-2'] == union.get('db-2') == c['db-2'] 399 | assert union['db-3'] == union.get('db-3') == b['db-3'] 400 | assert set(union.items()) == { 401 | ('web-1', a['web-1']), 402 | ('web-2', a['web-2']), 403 | ('web-3', a['web-3']), 404 | ('worker-1', b['worker-1']), 405 | ('worker-2', b['worker-2']), 406 | ('db-1', c['db-1']), 407 | ('db-2', c['db-2']), 408 | ('db-3', b['db-3']), 409 | } 410 | assert set(union.values()) == { 411 | a['web-1'], a['web-2'], a['web-3'], 412 | b['worker-1'], b['worker-2'], 413 | c['db-1'], c['db-2'], b['db-3'], 414 | } 415 | --------------------------------------------------------------------------------