├── .github
├── linters
│ └── .flake8
└── workflows
│ ├── linter.yml
│ └── ci.yml
├── MANIFEST.in
├── docs
├── Gemfile
├── _includes
│ └── menu-bar.html
├── assets
│ └── css
│ │ └── style.scss
├── _config.yml
├── tutorials.md
├── tutorials
│ ├── firefox-nightly-doh-proxy.md
│ ├── simple-setup.md
│ └── nginx-dohhttpproxy-unbound-centos7.md
├── _layouts
│ └── default.html
└── index.md
├── .gitignore
├── .coveragerc
├── Makefile
├── CODE_OF_CONDUCT.md
├── setup.cfg
├── dohproxy
├── __init__.py
├── constants.py
├── client.py
├── stub.py
├── httpproxy.py
├── server_protocol.py
├── client_protocol.py
├── integration.py
├── proxy.py
└── utils.py
├── LICENSE
├── CONTRIBUTING.md
├── setup.py
├── test
├── test_knownservers.py
├── test_protocol.py
├── test_httpproxy.py
└── test_utils.py
├── CHANGELOG.md
└── README.md
/.github/linters/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 120
3 | extend-ignore = E203
4 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | include CHANGELOG.md
3 | include LICENSE
4 |
5 |
--------------------------------------------------------------------------------
/docs/Gemfile:
--------------------------------------------------------------------------------
1 | source "https://rubygems.org"
2 |
3 | gem "jekyll"
4 | gem "github-pages", group: :jekyll_plugins
5 | gem 'jekyll-relative-links'
6 |
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | venv
2 | __pycache__
3 | _site
4 | .coverage
5 | .cache
6 | .eggs
7 | htmlcov
8 | build
9 | dist
10 | *.egg-info
11 | Gemfile.lock
12 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [report]
2 | fail_under = 18
3 | exclude_lines =
4 | pragma: no cover
5 | if __name__ == .__main__.:
6 |
7 | [run]
8 | branch = True
9 | source =
10 | dohproxy
11 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | doc:
2 | echo "---\nlayout: default\n---" > docs/index.md
3 | cat README.md >> docs/index.md
4 | echo "## Tutorials\n\nCheck the [tutorial page](tutorials.md)" >> docs/index.md
5 | cat CHANGELOG.md >> docs/index.md
6 |
--------------------------------------------------------------------------------
/docs/_includes/menu-bar.html:
--------------------------------------------------------------------------------
1 |
7 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 | Facebook has adopted a Code of Conduct that we expect project participants to adhere to. Please [read the full text](https://code.facebook.com/codeofconduct) so that you can understand what actions will and will not be tolerated.
3 |
4 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [aliases]
2 | test=pytest
3 |
4 | [tool:pytest]
5 | addopts = --verbose --cov dohproxy --cov-fail-under 35 --cov-report html --cov-report term
6 |
7 | [flake8]
8 | ignore = E121,E123,E126,E226,E24,E704,W503,W504
9 | max-complexity = -1
10 | max-line-length = 79
11 |
--------------------------------------------------------------------------------
/dohproxy/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 |
10 | __version__ = "0.0.9"
11 |
--------------------------------------------------------------------------------
/docs/assets/css/style.scss:
--------------------------------------------------------------------------------
1 | ---
2 | ---
3 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
4 |
5 | @import "{{ site.theme }}";
6 |
7 | #menu_links {
8 | }
9 |
10 | #menu_links a{
11 | color: #F2F2F2;
12 | text-decoration: underline;
13 | font-weight: bold;
14 | padding: 5px;
15 | }
16 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: DOH Proxy
3 | title: DOH Proxy
4 | description: A DNS-over-HTTPS Proxy
5 | repository: facebookexperimental/doh-proxy
6 | theme: jekyll-theme-slate
7 | timezone: America/Los_Angeles
8 |
9 | sass:
10 | style: compressed
11 | markdown: kramdown
12 |
13 | plugins:
14 | - jekyll-relative-links
15 |
16 | relative_links:
17 | enabled: true
18 | collections: false
19 |
20 |
--------------------------------------------------------------------------------
/dohproxy/constants.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 |
10 | DOH_URI = "/dns-query"
11 | DOH_MEDIA_TYPE = "application/dns-message"
12 | DOH_DNS_PARAM = "dns"
13 | DOH_H2_NPN_PROTOCOLS = ["h2"]
14 | DOH_CIPHERS = "ECDHE+AESGCM"
15 |
--------------------------------------------------------------------------------
/docs/tutorials.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | ---
4 |
5 | # Tutorials
6 |
7 | A list of tutorials/how-tos to configure `doh-proxy`, either on the client side or server side.
8 |
9 | [Pull requests](https://github.com/facebookexperimental/doh-proxy/pulls) with new tutorials are welcome.
10 |
11 | * [A simple setup](tutorials/simple-setup.md): Explains how to run both the proxy and stub.
12 | * [doh-httpproxy with NGINX on CentOS7](tutorials/nginx-dohhttpproxy-unbound-centos7.md): A comprehensive tutorial on setting up `doh-httpproxy` behind `NGINX` reverse proxy on `CentOS7` and using a local `unbound` recursive resolver.
13 | * [DOH with Firefox Nightly](tutorials/firefox-nightly-doh-proxy.md): Explains how to make Firefox Nightly use DNS over HTTPS.
14 |
15 |
--------------------------------------------------------------------------------
/docs/tutorials/firefox-nightly-doh-proxy.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | ---
4 |
5 | # DNS-over-HTTPS with Firefox Nightly
6 |
7 | [Firefox Nightly](https://www.mozilla.org/en-US/firefox/channel/desktop/) has started implementing a DOH client, known as [Trusted Recursive Resolver](https://bugzilla.mozilla.org/show_bug.cgi?id=1434852) (TRR).
8 |
9 | Assuming there is a DOH server accessible at `https://dns.example.com/dns-query` (or see [how to set your own](nginx-dohhttpproxy-unbound-centos7.md)), it is now possible to start using DNS over HTTPS in Firefox by following those steps:
10 |
11 | * go to `about:config`
12 | * search for `network.trr`
13 | * change `network.trr.uri` to `https://dns.example.com/dns-query`
14 | * change `network.trr.mode` to `2` (use DOH, but fallback to native resolver)
15 | * optionally, change `network.trr.bootstrapAddress` to the IP of the DOH server to avoid any bootstrapping issue. If you don't, Firefox will use the native resolver to get the DOH server IP.
16 |
17 | [@bagder](https://twitter.com/bagder) has a gist with more details on those [network.trr parameters](https://gist.github.com/bagder/5e29101079e9ac78920ba2fc718aceec).
18 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD License
2 |
3 | For DOH Proxy software
4 |
5 | Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
6 |
7 | Redistribution and use in source and binary forms, with or without modification,
8 | are permitted provided that the following conditions are met:
9 |
10 | * Redistributions of source code must retain the above copyright notice, this
11 | list of conditions and the following disclaimer.
12 |
13 | * Redistributions in binary form must reproduce the above copyright notice,
14 | this list of conditions and the following disclaimer in the documentation
15 | and/or other materials provided with the distribution.
16 |
17 | * Neither the name Facebook nor the names of its contributors may be used to
18 | endorse or promote products derived from this software without specific
19 | prior written permission.
20 |
21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
22 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
25 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
28 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to DOH Proxy
2 | We want to make contributing to this project as easy and transparent as
3 | possible.
4 |
5 | ## Code of Conduct
6 | The code of conduct is described in [`CODE_OF_CONDUCT.md`](CODE_OF_CONDUCT.md).
7 |
8 | ## Our Development Process
9 | DOH Proxy is developed on Github, contributions can be made by pull requests.
10 |
11 | ## Pull Requests
12 | We actively welcome your pull requests.
13 |
14 | 1. Fork the repo and create your branch from `master`.
15 | 2. If you've added code that should be tested, add tests.
16 | 3. If you've changed APIs, update the documentation.
17 | 4. Ensure the test suite passes.
18 | 5. Make sure your code lints.
19 | 6. If you haven't already, complete the Contributor License Agreement ("CLA").
20 |
21 | ## Contributor License Agreement ("CLA")
22 | In order to accept your pull request, we need you to submit a CLA. You only need
23 | to do this once to work on any of Facebook's open source projects.
24 |
25 | Complete your CLA here:
26 |
27 | ## Issues
28 | We use GitHub issues to track public bugs. Please ensure your description is
29 | clear and has sufficient instructions to be able to reproduce the issue.
30 |
31 | Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
32 | disclosure of security bugs. In those cases, please go through the process
33 | outlined on that page and do not file a public issue.
34 |
35 | ## Coding Style
36 | * 4 spaces for indentation rather than tabs
37 | * 80 character line length
38 |
39 | ## License
40 | By contributing to DOH Proxy, you agree that your contributions will be licensed
41 | under the LICENSE file in the root directory of this source tree.
42 |
--------------------------------------------------------------------------------
/.github/workflows/linter.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###########################
3 | ###########################
4 | ## Linter GitHub Actions ##
5 | ###########################
6 | ###########################
7 | name: Lint Code Base
8 |
9 | #
10 | # Documentation:
11 | # https://help.github.com/en/articles/workflow-syntax-for-github-actions
12 | #
13 |
14 | #############################
15 | # Start the job on all push #
16 | #############################
17 | on:
18 | push:
19 | pull_request:
20 | branches: [master]
21 |
22 | ###############
23 | # Set the Job #
24 | ###############
25 | jobs:
26 | build:
27 | # Name the Job
28 | name: Lint Code Base
29 | # Set the agent to run on
30 | runs-on: ubuntu-latest
31 |
32 | ##################
33 | # Load all steps #
34 | ##################
35 | steps:
36 | ##########################
37 | # Checkout the code base #
38 | ##########################
39 | - name: Checkout Code
40 | uses: actions/checkout@v2
41 | with:
42 | # Full git history is needed to get a proper list of changed files within `super-linter`
43 | fetch-depth: 0
44 |
45 | ################################
46 | # Run Linter against code base #
47 | ################################
48 | - name: Lint Code Base
49 | uses: github/super-linter@v3
50 | env:
51 | VALIDATE_ALL_CODEBASE: false
52 | # only lint python files in source and test direcotries
53 | FILTER_REGEX_INCLUDE: (dohproxy|test)/.*.py
54 | # Code base currently not pylint compliant
55 | VALIDATE_PYTHON_PYLINT: false
56 | DEFAULT_BRANCH: master
57 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
58 |
--------------------------------------------------------------------------------
/dohproxy/client.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 | import asyncio
10 |
11 | import dns.message
12 | from dohproxy import client_protocol, utils
13 |
14 |
15 | class Client(client_protocol.StubServerProtocol):
16 | def on_answer(self, addr, msg):
17 | try:
18 | print(dns.message.from_wire(msg))
19 | except Exception:
20 | self.logger.exception(msg)
21 |
22 |
23 | def parse_args():
24 | parser = utils.client_parser_base()
25 | parser.add_argument(
26 | "--qname",
27 | default="example.com",
28 | help="Name to query for. Default [%(default)s]",
29 | )
30 | parser.add_argument(
31 | "--qtype", default="AAAA", help="Type of query. Default [%(default)s]",
32 | )
33 | parser.add_argument(
34 | "--dnssec", action="store_true", help="Enable DNSSEC validation."
35 | )
36 | return parser.parse_args()
37 |
38 |
39 | def build_query(args):
40 | dnsq = dns.message.make_query(
41 | qname=args.qname, rdtype=args.qtype, want_dnssec=args.dnssec,
42 | )
43 | dnsq.id = 0
44 | return dnsq
45 |
46 |
47 | def main_sync(args):
48 | logger = utils.configure_logger("doh-client", level=args.level)
49 | client = Client(args=args, logger=logger)
50 |
51 | loop = asyncio.get_event_loop()
52 | loop.run_until_complete(client.make_request(None, build_query(args)))
53 |
54 |
55 | def main():
56 | args = parse_args()
57 | main_sync(args)
58 |
59 |
60 | if __name__ == "__main__":
61 | main()
62 |
--------------------------------------------------------------------------------
/docs/tutorials/simple-setup.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | ---
4 |
5 | In this examples, we will assume that we have the following setup:
6 |
7 | * A digital certificate for `dns.example.com`
8 | * cert file at `/etc/certs/dns.example.com/fullchain.pem`
9 | * key file at `/etc/certs/dns.example.com/privkey.pem`
10 | * a DNS resolver that listen on ::1 port 53.
11 | * A `server` that will be running the `doh-proxy`, this is a host to which the traffic
12 | will be sent encrypted and will perform the DNS request on our behalf.
13 | In this example, the server is running at `fdbe:7d77:b04f:a2ca::1/64`
14 | * A `client` that will run the `doh-stub`. We will configure our DNS queries to
15 | be sent to the stub, which in turn will be proxied encrypted to our DOH server.
16 |
17 | This document will focus on the `doh-proxy` tools arguments and where they
18 | should be run. The specifics of configuring a DNS recursive resolver, reverse
19 | proxy are outside the scope of this document and are already intensively
20 | covered o the Internet.
21 |
22 | # Simple setup
23 |
24 | ## Running the proxy
25 |
26 | On the `server`, we run the `doh-proxy` as root:
27 |
28 | ```shell
29 | $ sudo doh-proxy \
30 | --certfile /etc/certs/dns.example.com/fullchain.pem \
31 | --keyfile /etc/certs/dns.example.com/privkey.pem \
32 | --upstream-resolver ::1
33 | ```
34 |
35 | ## Running the client stub
36 |
37 | On the `client`
38 | ```shell
39 | $ sudo doh-stub \
40 | --domain dns.example.com \
41 | --remote-address fdbe:7d77:b04f:a2ca::1 \
42 | --listen-address ::1
43 | ```
44 |
45 | You can test it by running a `dig` on the `client`:
46 | ```shell
47 | $ dig @::1 example.com
48 | ```
49 |
50 | To start using it, update `/etc/resolv.conf` and change `nameserver` do be:
51 | ```
52 | nameserver ::1
53 | ```
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #######################
3 | #######################
4 | ## CI GitHub Actions ##
5 | #######################
6 | #######################
7 | name: Test Code Base
8 |
9 | #
10 | # Documentation:
11 | # https://help.github.com/en/articles/workflow-syntax-for-github-actions
12 | #
13 |
14 | #############################
15 | # Start the job on all push #
16 | #############################
17 | on:
18 | push:
19 | pull_request:
20 | branches: [master]
21 |
22 | ###############
23 | # Set the Job #
24 | ###############
25 | jobs:
26 | build:
27 | # Name the Job
28 | name: Test Code Base
29 | # Set the agent to run on
30 | runs-on: ubuntu-latest
31 | strategy:
32 | matrix:
33 | python-version: ['3.6', '3.7', '3.8']
34 |
35 | ##################
36 | # Load all steps #
37 | ##################
38 | steps:
39 | ##########################
40 | # Checkout the code base #
41 | ##########################
42 | - name: Checkout Code
43 | uses: actions/checkout@v2
44 | with:
45 | # Full git history is needed to get a proper list of changed files within `super-linter`
46 | fetch-depth: 0
47 |
48 | ##############################
49 | # Install Python Environemts #
50 | ##############################
51 | - name: Install Python
52 | uses: actions/setup-python@v2
53 | with:
54 | python-version: ${{ matrix.python-version }}
55 | ###############################
56 | # Run Test against code base #
57 | ###############################
58 | - name: Install Dependencies
59 | run: |
60 | python setup.py develop
61 | pip install git+https://github.com/URenko/aioh2#egg=aioh2
62 | - name: Test Code Base
63 | run: python setup.py test
64 | ####################
65 | # Install binaries #
66 | ####################
67 | - name: Install Binaries
68 | run: python setup.py install
69 | - name: Test dependencies
70 | run: |
71 | doh-proxy -h
72 | doh-httpproxy -h
73 | doh-stub -h
74 | doh-client -h
75 | - name: Build Wheel
76 | run: |
77 | pip install wheel
78 | python setup.py bdist_wheel
79 |
--------------------------------------------------------------------------------
/dohproxy/stub.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 | import asyncio
10 |
11 | from dohproxy import client_protocol, utils
12 |
13 | # CLIENT_STORE['client'] is shared by all handlers.
14 | CLIENT_STORE = {"client": None}
15 |
16 |
17 | def parse_args():
18 | parser = utils.client_parser_base()
19 | parser.add_argument(
20 | "--listen-port",
21 | default=53,
22 | help="The port the stub should listen on. Default: [%(default)s]",
23 | )
24 | parser.add_argument(
25 | "--listen-address",
26 | default=["::1"],
27 | nargs="+",
28 | help="A list of addresses the proxy should listen on. "
29 | '"all" for all detected interfaces and addresses (netifaces '
30 | "required). Default: [%(default)s]",
31 | )
32 |
33 | return parser.parse_args()
34 |
35 |
36 | def main():
37 | args = parse_args()
38 | logger = utils.configure_logger("doh-stub", args.level)
39 | loop = asyncio.get_event_loop()
40 |
41 | if "all" in args.listen_address:
42 | listen_addresses = utils.get_system_addresses()
43 | else:
44 | listen_addresses = args.listen_address
45 |
46 | transports = []
47 | for address in listen_addresses:
48 | logger.info("Starting UDP server: {}".format(address))
49 | # One protocol instance will be created to serve all client requests
50 | # for this UDP listen address
51 | cls = client_protocol.StubServerProtocolUDP
52 | listen = loop.create_datagram_endpoint(
53 | lambda: cls(args, logger=logger, client_store=CLIENT_STORE),
54 | local_addr=(address, args.listen_port),
55 | )
56 | transport, proto = loop.run_until_complete(listen)
57 | transports.append(transport)
58 | loop.run_until_complete(proto.get_client())
59 |
60 | logger.info("Starting TCP server: {}".format(address))
61 | cls = client_protocol.StubServerProtocolTCP
62 | listen_tcp = loop.create_server(
63 | lambda: cls(args, logger=logger, client_store=CLIENT_STORE),
64 | host=address,
65 | port=args.listen_port,
66 | )
67 | server_tcp = loop.run_until_complete(listen_tcp)
68 | transports.append(server_tcp)
69 |
70 | try:
71 | loop.run_forever()
72 | except KeyboardInterrupt:
73 | pass
74 |
75 | for transport in transports:
76 | transport.close()
77 | loop.close()
78 |
79 |
80 | if __name__ == "__main__":
81 | main()
82 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 | import re
10 | from codecs import open
11 | from os import path
12 |
13 | from setuptools import setup
14 |
15 | here = path.abspath(path.dirname(__file__))
16 |
17 | # Get the long description from the README file
18 | with open(path.join(here, "README.md"), encoding="utf-8") as f:
19 | long_description = f.read()
20 |
21 |
22 | def read(*parts):
23 | with open(path.join(here, *parts), "r") as fp:
24 | return fp.read()
25 |
26 |
27 | def find_version(*file_paths):
28 | version_file = read(*file_paths)
29 | version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.M)
30 | if version_match:
31 | return version_match.group(1)
32 | raise RuntimeError("Unable to find version string.")
33 |
34 |
35 | setup(
36 | name="doh-proxy",
37 | version=find_version("dohproxy", "__init__.py"),
38 | description="A client and proxy implementation of "
39 | "https://tools.ietf.org/html/draft-ietf-doh-dns-over-https-13",
40 | long_description=long_description,
41 | long_description_content_type="text/markdown",
42 | url="https://github.com/facebookexperimental/doh-proxy",
43 | author="Manu Bretelle",
44 | author_email="chantra@fb.com",
45 | license="BSD",
46 | classifiers=[
47 | "Development Status :: 3 - Alpha",
48 | "Intended Audience :: Developers",
49 | "License :: OSI Approved :: BSD License",
50 | "Programming Language :: Python :: 3.5",
51 | "Programming Language :: Python :: 3.6",
52 | "Topic :: Internet :: Name Service (DNS)",
53 | "Topic :: Internet :: WWW/HTTP :: HTTP Servers",
54 | "Topic :: Security :: Cryptography",
55 | "Topic :: Utilities",
56 | ],
57 | keywords="doh proxy dns https",
58 | packages=["dohproxy"],
59 | setup_requires=["flake8", "pytest-runner"],
60 | extras_require={"integration_tests": ["colour-runner"]},
61 | install_requires=[
62 | "aiohttp < 4.0.0",
63 | "dnspython",
64 | "aiohttp_remotes >= 0.1.2",
65 | ],
66 | tests_require=[
67 | "asynctest",
68 | "pytest",
69 | "pytest-aiohttp",
70 | "pytest-cov",
71 | "unittest-data-provider",
72 | ],
73 | entry_points={
74 | "console_scripts": [
75 | "doh-client = dohproxy.client:main",
76 | "doh-proxy = dohproxy.proxy:main",
77 | "doh-httpproxy = dohproxy.httpproxy:main",
78 | "doh-stub = dohproxy.stub:main",
79 | ],
80 | },
81 | )
82 |
--------------------------------------------------------------------------------
/docs/_layouts/default.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | {% seo %}
13 |
14 |
15 |
16 |
17 |
18 |
34 |
35 |
36 |
37 |
40 |
41 |
42 |
43 |
52 |
53 | {% if site.google_analytics %}
54 |
62 | {% endif %}
63 |
64 |
65 |
--------------------------------------------------------------------------------
/test/test_knownservers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 |
10 | import asyncio
11 | import os
12 | import unittest
13 |
14 | import asynctest
15 | import dns
16 | import dns.message
17 | from dohproxy import client_protocol, utils
18 |
19 | TEST_TIMEOUT = 3.0
20 | TRAVIS_TIMEOUT = 15.0
21 |
22 | METHOD_GET = 1
23 | METHOD_POST = 2
24 | METHOD_BOTH = 3
25 |
26 |
27 | def known_servers():
28 | """
29 | List of servers taken from
30 | https://github.com/curl/curl/wiki/DNS-over-HTTPS#publicly-available-servers
31 | """
32 | return [
33 | # Name, Domain, endpoint
34 | ("Google", "dns.google", "/dns-query", METHOD_BOTH),
35 | ("Cloudflare", "cloudflare-dns.com", "/dns-query", METHOD_BOTH),
36 | ("CleanBrowsing", "doh.cleanbrowsing.org", "/doh/family-filter/", METHOD_BOTH),
37 | # Currently off
38 | # ('@chantra', 'dns.dnsoverhttps.net', '/dns-query', METHOD_BOTH),
39 | ("@jedisct1", "doh.crypto.sx", "/dns-query", METHOD_GET),
40 | # Timeout
41 | # ('SecureDNS.eu', 'doh.securedns.eu', '/dns-query', METHOD_BOTH),
42 | ("BlahDNS.com JP", "doh-jp.blahdns.com", "/dns-query", METHOD_BOTH),
43 | ("BlahDNS.com DE", "doh-de.blahdns.com", "/dns-query", METHOD_BOTH),
44 | ("NekomimiRouter.com", "dns.dns-over-https.com", "/dns-query", METHOD_BOTH),
45 | ]
46 |
47 |
48 | def build_query(qname, qtype):
49 | dnsq = dns.message.make_query(qname=qname, rdtype=qtype,)
50 | dnsq.id = 0
51 | return dnsq
52 |
53 |
54 | class Client(client_protocol.StubServerProtocol):
55 | result = None
56 |
57 | def on_answer(self, addr, msg):
58 | self.result = dns.message.from_wire(msg)
59 |
60 |
61 | class TestKnownServers(asynctest.TestCase):
62 | def setUp(self):
63 | super().setUp()
64 | # ALPN requires >=openssl-1.0.2
65 | # NPN requires >=openssl-1.0.1
66 | self.test_timeout = TEST_TIMEOUT
67 | if os.getenv("TRAVIS"):
68 | self.test_timeout = TRAVIS_TIMEOUT
69 | for fn in ["set_alpn_protocols"]:
70 | patcher = unittest.mock.patch("ssl.SSLContext.{0}".format(fn))
71 | patcher.start()
72 | self.addCleanup(patcher.stop)
73 |
74 | async def _test_servers(self, post=False):
75 | for name, domain, uri, methods in known_servers():
76 | if post and not methods & METHOD_POST:
77 | continue
78 | if not post and not methods & METHOD_GET:
79 | continue
80 | with self.subTest(name):
81 | arglist = [
82 | "--domain",
83 | domain,
84 | "--uri",
85 | uri,
86 | ]
87 | if post:
88 | arglist.append("--post")
89 | parser = utils.client_parser_base()
90 | args = parser.parse_args(arglist)
91 | logger = utils.configure_logger("doh-integrationtest")
92 | c = Client(args=args, logger=logger)
93 | fut = c.make_request(None, build_query(qname=domain, qtype="A"))
94 | try:
95 | await asyncio.wait_for(fut, self.test_timeout)
96 | except asyncio.TimeoutError:
97 | raise unittest.SkipTest("%s Timeouted" % name)
98 | self.assertEqual(1, len(c.result.question))
99 | self.assertGreater(len(c.result.answer), 0)
100 |
101 | async def test_servers_get(self):
102 | await self._test_servers(post=False)
103 |
104 | async def test_servers_post(self):
105 | await self._test_servers(post=True)
106 |
--------------------------------------------------------------------------------
/test/test_protocol.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 | import asyncio
10 | import struct
11 | import unittest
12 | from unittest.mock import patch
13 |
14 | import dns
15 | import dns.message
16 | from dohproxy.server_protocol import DNSClientProtocolTCP
17 |
18 |
19 | class TCPTestCase(unittest.TestCase):
20 | def setUp(self):
21 | self.dnsq = dns.message.make_query("www.example.com", dns.rdatatype.ANY)
22 | self.dnsr = dns.message.make_response(self.dnsq)
23 | self.response = self.dnsr.to_wire()
24 |
25 | @patch.object(DNSClientProtocolTCP, "receive_helper")
26 | def test_single_valid(self, m_rcv):
27 | data = struct.pack("!H", len(self.response)) + self.response
28 | client_tcp = DNSClientProtocolTCP(self.dnsq, [], "10.0.0.0")
29 | client_tcp.data_received(data)
30 | m_rcv.assert_called_with(self.dnsr)
31 |
32 | @patch.object(DNSClientProtocolTCP, "receive_helper")
33 | def test_two_valid(self, m_rcv):
34 | data = struct.pack("!H", len(self.response)) + self.response
35 | client_tcp = DNSClientProtocolTCP(self.dnsq, [], "10.0.0.0")
36 | client_tcp.data_received(data + data)
37 | m_rcv.assert_called_with(self.dnsr)
38 | self.assertEqual(m_rcv.call_count, 2)
39 |
40 | @patch.object(DNSClientProtocolTCP, "receive_helper")
41 | def test_partial_valid(self, m_rcv):
42 | data = struct.pack("!H", len(self.response)) + self.response
43 | client_tcp = DNSClientProtocolTCP(self.dnsq, [], "10.0.0.0")
44 | client_tcp.data_received(data[0:5])
45 | m_rcv.assert_not_called()
46 | client_tcp.data_received(data[5:])
47 | m_rcv.assert_called_with(self.dnsr)
48 |
49 | @patch.object(DNSClientProtocolTCP, "receive_helper")
50 | def test_len_byte(self, m_rcv):
51 | data = struct.pack("!H", len(self.response)) + self.response
52 | client_tcp = DNSClientProtocolTCP(self.dnsq, [], "10.0.0.0")
53 | client_tcp.data_received(data[0:1])
54 | m_rcv.assert_not_called()
55 | client_tcp.data_received(data[1:])
56 | m_rcv.assert_called_with(self.dnsr)
57 |
58 | @patch.object(DNSClientProtocolTCP, "receive_helper")
59 | def test_complex(self, m_rcv):
60 | data = struct.pack("!H", len(self.response)) + self.response
61 | length = len(data)
62 | data = data * 3
63 | client_tcp = DNSClientProtocolTCP(self.dnsq, [], "10.0.0.0")
64 | client_tcp.data_received(data[0 : length - 3])
65 | client_tcp.data_received(data[length - 3 : length + 1])
66 | m_rcv.assert_called_with(self.dnsr)
67 | m_rcv.reset_mock()
68 | client_tcp.data_received(data[length + 1 : 2 * length])
69 | m_rcv.assert_called_with(self.dnsr)
70 | m_rcv.reset_mock()
71 | client_tcp.data_received(data[2 * length :])
72 | m_rcv.assert_called_with(self.dnsr)
73 |
74 | @patch.object(DNSClientProtocolTCP, "receive_helper")
75 | def test_single_long(self, m_rcv):
76 | data = struct.pack("!H", len(self.response) - 3) + self.response
77 | client_tcp = DNSClientProtocolTCP(self.dnsq, [], "10.0.0.0")
78 | with self.assertRaises(dns.exception.FormError):
79 | client_tcp.data_received(data)
80 |
81 | @patch.object(DNSClientProtocolTCP, "receive_helper")
82 | def test_single_short(self, m_rcv):
83 | data = struct.pack("!H", len(self.response) + 3) + self.response
84 | client_tcp = DNSClientProtocolTCP(self.dnsq, [], "10.0.0.0")
85 | client_tcp.data_received(data)
86 | m_rcv.assert_not_called()
87 |
88 | def test_cancelled_future(self):
89 | """Ensures that cancelled futures are handled appropriately."""
90 | data = struct.pack("!H", len(self.response)) + self.response
91 |
92 | mock_future = unittest.mock.MagicMock(asyncio.Future)
93 | client_tcp = DNSClientProtocolTCP(self.dnsq, mock_future, "10.0.0.0")
94 | client_tcp.time_stamp = 1000000
95 |
96 | # If the future is cancelled, set_result raises InvalidStateError.
97 | mock_future.set_result.side_effect = asyncio.InvalidStateError(
98 | "CANCELLED: "
99 | )
100 | client_tcp.data_received(data)
101 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## [Unreleased]
4 |
5 | ### Fixed
6 | - fix unittests pattern matching. GH #78
7 | - force aiohttp < 4.0.0. GH #78
8 | - close transport on Timeout. GH #79
9 | - don't decode body twice. GH #83 @tiran
10 | - handle get\_extra\_info('peername') being None. GH #89
11 | - add legal link to web site. GH #90
12 | - define flake8 defaults. GH #91 @rfinnie
13 | - set `Accept` header in client queries. GH #95
14 |
15 | ### Changes
16 | - improve logging. GH #87
17 | - support multiple --listen-address. GH 85 @rfinnie
18 | - Add support for ECS. GH #88 @rfinnie
19 |
20 | ## [0.0.9] - 2019-07-04
21 |
22 | ### Fixed
23 | - fix copyright headers. GH #51
24 | - fix flake8 error
25 | - loglevel (--level) was ignore in DNSClient. GH #58
26 | - Do not set_result when coroutine is already cancelled. GH #59
27 | - Remove NPN support. GH #64
28 | - Properly close UDP transport after an exception occured. GH #66
29 |
30 | ## [0.0.8] - 2018-08-14
31 |
32 | ### Changes
33 | - [doc] don't use `sudo` when not required. @jpmens
34 | - version bump to get markdown rendering on pypi.
35 |
36 | ## [0.0.7] - 2018-08-13
37 |
38 | ### Fixed
39 | - Handle dns message with empty question section GH #21
40 | - Make https://pypi.org/project/doh-proxy/ display description using markdown syntax.
41 |
42 | ### Changes
43 | - separate server side protocol classes from client side ones
44 | - Support for [draft-13](https://tools.ietf.org/html/draft-ietf-doh-dns-over-https-13). @bagder
45 | - DNSClientProtocol is now an async friendly class which will retry over TCP on timeout and/or TC bit set. @newEZ
46 | - Both `doh-httpproxy` and `doh-proxy` now use the new DNSClient @newEZ and @chantra
47 |
48 | ### Added
49 | - support listening from multiple IPs for proxy services.
50 | - Added support for TLS in `doh-httpproxy` @lucasvasconcelos
51 | - Pass optional `cafile` to `doh-stub` to be able to connect to service using custom CA @fim
52 |
53 | ## [0.0.6] - 2018-02-20
54 |
55 | ### Added
56 | - custom upstream port option GH #16
57 | - display version with --version
58 |
59 | ### Fixed
60 | - set :scheme pseudo-header correctly. GH #17
61 |
62 | ## [0.0.5] - 2018-02-05
63 |
64 | ### Added
65 | - Unittest coverage of httpproxy.py
66 |
67 | ### Changes
68 | - @jedisct1 change DOH_BODY_PARAM to `dns` to match draft-ietf-doh-dns-over-https-03
69 | - removed .well-known from default URI GH #15
70 |
71 | ### Fixed
72 | - support POST in doh-httpproxy. GH #12
73 |
74 |
75 | ## [0.0.4] - 2018-01-27
76 |
77 | ### Fixed
78 | - Create new connection on TooManyStreamsError to work around GH decentfox/aioh2#16
79 |
80 | ### Changes
81 | - ensure only 1 client is initialized at a time.
82 |
83 | ## [0.0.3] - 2018-01-17
84 |
85 | ### Fixed
86 | - proxy: handle empty ct parameter
87 |
88 | ### Changed
89 | - proxies and stub will listen to ::1 by default.
90 | - proxies better handle malformed DNS messages
91 |
92 | ## [0.0.2] - 2018-01-16
93 | ### Added
94 | - Travis CI
95 | - Support multiple query over the same HTTP2 connection.
96 | - started adding some unittests to utils.py
97 | - `dohprxy/httpproxy.py` a HTTP1 proxy to run as a reverse proxy backend.
98 |
99 | ### Changed
100 | - code refactor between stub and client
101 | - use logging modules instead of rogue prints
102 | - stub and client now use the same StubServerProtocol as the base to perform
103 | queries.
104 | - proxy: use logging module instead of print
105 | - doc: improved documentation and provide example setups.
106 |
107 | ### Removed
108 | - dependency on hyper package
109 |
110 | ### Fixed
111 | - doh-proxy: properly import dohproxy.protocol
112 | - doh-client: properly set entry_point
113 |
114 |
115 | ## 0.0.1 - 2018-01-11
116 | ### Added
117 | - Proxy script `dohproxy/proxy.py`
118 | - Stub script `dohproxy/stub.py`
119 | - Test client script `dohproxy/client.py`
120 | - setuptools' setup.py
121 | - doc
122 | - CHANGELOG.md and README.md
123 |
124 | [Unreleased]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.9...HEAD
125 | [0.0.9]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.8...v0.0.9
126 | [0.0.8]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.7...v0.0.8
127 | [0.0.7]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.6...v0.0.7
128 | [0.0.6]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.5...v0.0.6
129 | [0.0.5]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.4...v0.0.5
130 | [0.0.4]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.3...v0.0.4
131 | [0.0.3]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.2...v0.0.3
132 | [0.0.2]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.1...v0.0.2
133 |
--------------------------------------------------------------------------------
/dohproxy/httpproxy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 | import asyncio
10 | import time
11 | from argparse import ArgumentParser, Namespace
12 |
13 | import aiohttp.web
14 | import aiohttp_remotes
15 | import dns.message
16 | import dns.rcode
17 | from dohproxy import constants, utils
18 | from dohproxy.server_protocol import (
19 | DNSClient,
20 | DOHDNSException,
21 | DOHParamsException,
22 | )
23 | from multidict import CIMultiDict
24 |
25 |
26 | def parse_args(args=None):
27 | parser = utils.proxy_parser_base(port=80, secure=False)
28 | parser.add_argument(
29 | "--trusted",
30 | nargs="*",
31 | default=["::1", "127.0.0.1"],
32 | help="Trusted reverse proxy list separated by space %(default)s. \
33 | If you do not want to add a trusted trusted reverse proxy, \
34 | just specify this flag with empty parameters.",
35 | )
36 | return parser, parser.parse_args(args=args)
37 |
38 |
39 | async def doh1handler(request):
40 | path, params = utils.extract_path_params(request.rel_url.path_qs)
41 |
42 | if request.method in ["GET", "HEAD"]:
43 | try:
44 | ct, body = utils.extract_ct_body(params)
45 | except DOHParamsException as e:
46 | return aiohttp.web.Response(status=400, body=e.body())
47 | elif request.method == "POST":
48 | body = await request.content.read()
49 | ct = request.headers.get("content-type")
50 | else:
51 | return aiohttp.web.Response(status=501, body=b"Not Implemented")
52 | if ct != constants.DOH_MEDIA_TYPE:
53 | return aiohttp.web.Response(status=415, body=b"Unsupported content type")
54 |
55 | # Do actual DNS Query
56 | try:
57 | dnsq = utils.dns_query_from_body(body, debug=request.app.debug)
58 | except DOHDNSException as e:
59 | return aiohttp.web.Response(status=400, body=e.body())
60 |
61 | clientip = utils.get_client_ip(request.transport)
62 | request.app.logger.info(
63 | "[HTTPS] {} (Original IP: {}) {}".format(
64 | clientip, request.remote, utils.dnsquery2log(dnsq)
65 | )
66 | )
67 | return await request.app.resolve(request, dnsq)
68 |
69 |
70 | class DOHApplication(aiohttp.web.Application):
71 | def set_upstream_resolver(self, upstream_resolver, upstream_port):
72 | self.upstream_resolver = upstream_resolver
73 | self.upstream_port = upstream_port
74 |
75 | def set_ecs(self, ecs):
76 | self.ecs = ecs
77 |
78 | async def resolve(self, request, dnsq):
79 | self.time_stamp = time.time()
80 | clientip = request.remote
81 | dnsclient = DNSClient(
82 | self.upstream_resolver, self.upstream_port, logger=self.logger
83 | )
84 | dnsr = await dnsclient.query(dnsq, clientip, ecs=self.ecs)
85 |
86 | if dnsr is None:
87 | return self.on_answer(request, dnsq=dnsq)
88 | else:
89 | return self.on_answer(request, dnsr=dnsr)
90 |
91 | def on_answer(self, request, dnsr=None, dnsq=None):
92 | headers = CIMultiDict()
93 |
94 | if dnsr is None:
95 | dnsr = dns.message.make_response(dnsq)
96 | dnsr.set_rcode(dns.rcode.SERVFAIL)
97 | elif len(dnsr.answer):
98 | ttl = min(r.ttl for r in dnsr.answer)
99 | headers["cache-control"] = "max-age={}".format(ttl)
100 |
101 | clientip = utils.get_client_ip(request.transport)
102 | interval = int((time.time() - self.time_stamp) * 1000)
103 | self.logger.info(
104 | "[HTTPS] {} (Original IP: {}) {} {}ms".format(
105 | clientip, request.remote, utils.dnsans2log(dnsr), interval
106 | )
107 | )
108 | if request.method == "HEAD":
109 | body = b""
110 | else:
111 | body = dnsr.to_wire()
112 |
113 | return aiohttp.web.Response(
114 | status=200,
115 | body=body,
116 | content_type=constants.DOH_MEDIA_TYPE,
117 | headers=headers,
118 | )
119 |
120 |
121 | def setup_ssl(parser: ArgumentParser, options: Namespace):
122 | """ Setup the SSL Context """
123 | ssl_context = None
124 |
125 | # If SSL is wanted, both certfile and keyfile must
126 | # be passed
127 | if bool(options.certfile) ^ bool(options.keyfile):
128 | parser.error("To use SSL both --certfile and --keyfile must be passed")
129 | elif options.certfile and options.keyfile:
130 | ssl_context = utils.create_ssl_context(options)
131 |
132 | return ssl_context
133 |
134 |
135 | def get_app(args):
136 | logger = utils.configure_logger("doh-httpproxy", args.level)
137 | app = DOHApplication(logger=logger, debug=args.debug)
138 | app.set_upstream_resolver(args.upstream_resolver, args.upstream_port)
139 | app.set_ecs(args.ecs)
140 | app.router.add_get(args.uri, doh1handler)
141 | app.router.add_post(args.uri, doh1handler)
142 |
143 | # Get trusted reverse proxies and format it for aiohttp_remotes setup
144 | if len(args.trusted) == 0:
145 | x_forwarded_handling = aiohttp_remotes.XForwardedRelaxed()
146 | else:
147 | x_forwarded_handling = aiohttp_remotes.XForwardedStrict([args.trusted])
148 |
149 | asyncio.ensure_future(aiohttp_remotes.setup(app, x_forwarded_handling))
150 | return app
151 |
152 |
153 | def main():
154 | parser, args = parse_args()
155 | app = get_app(args)
156 |
157 | ssl_context = setup_ssl(parser, args)
158 | aiohttp.web.run_app(
159 | app, host=args.listen_address, port=args.port, ssl_context=ssl_context
160 | )
161 |
162 |
163 | if __name__ == "__main__":
164 | main()
165 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DNS Over HTTPS Proxy
2 |
3 | 
4 | [](https://github.com/marketplace/actions/super-linter)
5 | [](https://badge.fury.io/py/doh-proxy)
6 |
7 | A set of python 3 scripts that supports proxying DNS over HTTPS as specified
8 | in the [IETF Draft draft-ietf-doh-dns-over-https](https://tools.ietf.org/html/draft-ietf-doh-dns-over-https-13).
9 |
10 | DOH provides a way to run encrypted DNS over HTTPS, a protocol which can freely
11 | traverse firewalls when other encrypted mechanism may be blocked.
12 |
13 | The project comes with a set of 4 tools:
14 |
15 | * [doh-proxy](#doh-proxy): A service that receives DOH queries over HTTP2 and forwards them
16 | to a recursive resolver.
17 | * [doh-httpproxy](#doh-httpproxy): Like `doh-proxy` but uses HTTP instead of HTTP2.
18 | The main intent is to run this behind a reverse proxy.
19 | * [doh-stub](#doh-stub): A service that listens for DNS queries and forwards them to a DOH server.
20 | * [doh-client](#doh-client): A tool to perform a test DNS query against DOH server.
21 |
22 | See the `CONTRIBUTING` file for how to help out.
23 |
24 | DOH Proxy was created during [IETF Hackathon 100](https://www.ietf.org/how/runningcode/hackathons/100-hackathon/) as a proof-of-concept and is not used at Facebook.
25 |
26 | You are welcome to use it, but be aware that support is limited and best-effort.
27 |
28 | ## Installing
29 |
30 | To install an already packaged version directly from PyPi:
31 |
32 | ```shell
33 | $ pip3 install doh-proxy
34 | ```
35 |
36 | ## Usage
37 |
38 | ### doh-proxy
39 |
40 | `doh-proxy` is a stand alone server answering DOH request. The proxy does not do
41 | DNS recursion itself and rather forward the query to a full-featured DNS
42 | recursive server or DNS caching server.
43 |
44 | By running `doh-proxy`, you can get and end-to-end DOH solution with minimal
45 | setup.
46 |
47 | ```shell
48 | $ sudo doh-proxy \
49 | --upstream-resolver=::1 \
50 | --certfile=./fullchain.pem \
51 | --keyfile=./privkey.pem
52 | ```
53 |
54 | ### doh-httpproxy
55 |
56 | `doh-httpproxy` is designed to be running behind a reverse proxy. In this setup
57 | a reverse proxy such as [NGINX](https://nginx.org/) would be handling the
58 | HTTPS/HTTP2 requests from the DOH clients and will forward them to
59 | `doh-httpproxy` backends.
60 |
61 | While this setup requires more upfront setup, it allows running DOH proxy
62 | unprivileged and on multiple cores.
63 |
64 |
65 | ```shell
66 | $ doh-httpproxy \
67 | --upstream-resolver=::1 \
68 | --port 8080 \
69 | --listen-address ::1
70 | ```
71 |
72 | `doh-httpproxy` now also supports TLS, that you can enable passing the
73 | args `--certfile` and `--keyfile` (just like `doh-proxy`)
74 |
75 | ### doh-stub
76 |
77 | `doh-stub` is the piece of software that you would run on the clients. By
78 | providing a local DNS server, `doh-stub` will forward the DNS requests it
79 | receives to a DOH server using an encrypted link.
80 |
81 | You can start a stub resolver with:
82 |
83 | ```shell
84 | $ doh-stub \
85 | --listen-port 5553 \
86 | --listen-address ::1 \
87 | --domain foo.bar \
88 | --remote-address ::1
89 | ```
90 |
91 | and query it.
92 |
93 | ```shell
94 | $ dig @::1 -p 5553 example.com
95 | ```
96 |
97 | ### doh-client
98 |
99 | `doh-client` is just a test cli that can be used to quickly send a request to
100 | a DOH server and dump the returned answer.
101 |
102 | ```shell
103 | $ doh-client \
104 | --domain dns.dnsoverhttps.net \
105 | --qname sigfail.verteiltesysteme.net \
106 | --dnssec
107 | id 37762
108 | opcode QUERY
109 | rcode SERVFAIL
110 | flags QR RD RA
111 | edns 0
112 | eflags DO
113 | payload 4096
114 | ;QUESTION
115 | sigfail.verteiltesysteme.net. IN AAAA
116 | ;ANSWER
117 | ;AUTHORITY
118 | ;ADDITIONAL
119 |
120 | $ doh-client \
121 | --domain dns.dnsoverhttps.net \
122 | --qname sigok.verteiltesysteme.net \
123 | --dnssec
124 | id 49772
125 | opcode QUERY
126 | rcode NOERROR
127 | flags QR RD RA AD
128 | edns 0
129 | eflags DO
130 | payload 4096
131 | ;QUESTION
132 | sigok.verteiltesysteme.net. IN AAAA
133 | ;ANSWER
134 | sigok.verteiltesysteme.net. 60 IN AAAA 2001:638:501:8efc::139
135 | sigok.verteiltesysteme.net. 60 IN RRSIG AAAA 5 3 60 20180130030002 20171031030002 30665 verteiltesysteme.net. O7QgNZFBu3fULvBXwM39apv5nMehh51f mLOVEsC8qZUyxIbxo4eDLQt0JvPoPpFH 5TbWdlm/jxq5x2/Kjw7yUdpohhiNmdoD Op7Y+RyHbf676FoC5Zko9uOAB7Pp8ERz qiT0QPt1ec12bM0XKQigfp+2Hy9wUuSN QmAzXS2s75k=
136 | ;AUTHORITY
137 | ;ADDITIONAL
138 | ```
139 |
140 | ## Development
141 |
142 |
143 | ### Requirements
144 |
145 | * python >= 3.5
146 | * aiohttp
147 | * aioh2
148 | * dnspython
149 |
150 | ### Building
151 |
152 | DOH Proxy uses Python'setuptools to manage dependencies and build.
153 |
154 | To install its dependencies:
155 |
156 | ```shell
157 | $ python3 setup.py develop
158 | # Due to GH #63
159 | $ pip install git+https://github.com/URenko/aioh2#egg=aioh2
160 | ```
161 |
162 | To build:
163 | ```shell
164 | $ python3 setup.py build
165 | ```
166 |
167 | To run unittests:
168 | ```shell
169 | $ python3 setup.py test
170 | ```
171 |
172 | To run the linter:
173 |
174 | DOH Proxy uses GitHub Action [Super-Linter](https://github.com/marketplace/actions/super-linter) to lint the code. In order to validate your code locally, it is possible to run Super-Linter locally using the following comand line from within the repository:
175 |
176 | ```shell
177 | docker run -e RUN_LOCAL=true -e VALIDATE_PYTHON_PYLINT=false \
178 | -e FILTER_REGX_INCLUDE='(dohproxy|test)/.*.py' \
179 | -v $(pwd):/tmp/lint \
180 | --rm github/super-linter:v3
181 | ```
182 |
183 | From within the root of the repository, you can test the proxy, stub and client respectively
184 | by using the following commands:
185 |
186 | ```shell
187 | $ sudo PYTHONPATH=. ./dohproxy/proxy.py ...
188 | ```
189 |
190 | ```shell
191 | $ PYTHONPATH=. ./dohproxy/httpproxy.py ...
192 | ```
193 |
194 |
195 | ```shell
196 | $ PYTHONPATH=. ./dohproxy/stub.py ...
197 | ```
198 |
199 | ```shell
200 | $ PYTHONPATH=. ./dohproxy/client.py ...
201 | ```
202 |
203 | ## License
204 | DOH Proxy is BSD-licensed.
205 |
--------------------------------------------------------------------------------
/dohproxy/server_protocol.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 | import asyncio
10 | import struct
11 | import time
12 |
13 | import dns.edns
14 | import dns.entropy
15 | import dns.message
16 | from dohproxy import utils
17 |
18 |
19 | class DOHException(Exception):
20 | def body(self):
21 | return self.args[0]
22 |
23 |
24 | class DOHParamsException(DOHException):
25 | pass
26 |
27 |
28 | class DOHDNSException(DOHException):
29 | pass
30 |
31 |
32 | class DNSClient:
33 |
34 | DEFAULT_TIMEOUT = 10
35 |
36 | def __init__(self, upstream_resolver, upstream_port, logger=None):
37 | self.loop = asyncio.get_event_loop()
38 | self.upstream_resolver = upstream_resolver
39 | self.upstream_port = upstream_port
40 | if logger is None:
41 | logger = utils.configure_logger("DNSClient", "DEBUG")
42 | self.logger = logger
43 | self.transport = None
44 |
45 | async def query(self, dnsq, clientip, timeout=DEFAULT_TIMEOUT, ecs=False):
46 | # (Potentially) modified copy of dnsq
47 | dnsq_mod = dns.message.from_wire(dnsq.to_wire())
48 | we_set_ecs = False
49 | if ecs:
50 | we_set_ecs = utils.set_dns_ecs(dnsq_mod, clientip)
51 |
52 | dnsr = await self.query_udp(dnsq_mod, clientip, timeout=timeout)
53 | if dnsr is None or (dnsr.flags & dns.flags.TC):
54 | dnsr = await self.query_tcp(dnsq_mod, clientip, timeout=timeout)
55 |
56 | if dnsr is not None and we_set_ecs:
57 | for option in dnsr.options:
58 | if isinstance(option, dns.edns.ECSOption):
59 | dnsr.options.remove(option)
60 | dnsr.edns = dnsq.edns
61 |
62 | return dnsr
63 |
64 | async def query_udp(self, dnsq, clientip, timeout=DEFAULT_TIMEOUT):
65 | qid = dnsq.id
66 | fut = asyncio.Future()
67 | transport, _ = await self.loop.create_datagram_endpoint(
68 | lambda: DNSClientProtocolUDP(dnsq, fut, clientip, logger=self.logger),
69 | remote_addr=(self.upstream_resolver, self.upstream_port),
70 | )
71 | return await self._try_query(fut, qid, timeout, transport)
72 |
73 | async def query_tcp(self, dnsq, clientip, timeout=DEFAULT_TIMEOUT):
74 | qid = dnsq.id
75 | fut = asyncio.Future()
76 | start_time = time.time()
77 | try:
78 | transport, _ = await asyncio.wait_for(
79 | self.loop.create_connection(
80 | lambda: DNSClientProtocolTCP(
81 | dnsq, fut, clientip, logger=self.logger
82 | ),
83 | self.upstream_resolver,
84 | self.upstream_port,
85 | ),
86 | timeout,
87 | )
88 | except asyncio.TimeoutError:
89 | self.logger.debug(
90 | "Timeout connecting to upstream resolver {}:{}".format(
91 | self.upstream_resolver, self.upstream_port
92 | )
93 | )
94 | return None
95 |
96 | end_time = time.time()
97 | return await self._try_query(
98 | fut, qid, timeout - (end_time - start_time), transport
99 | )
100 |
101 | async def _try_query(self, fut, qid, timeout, transport):
102 | try:
103 | await asyncio.wait_for(fut, timeout)
104 | dnsr = fut.result()
105 | dnsr.id = qid
106 | except asyncio.TimeoutError:
107 | self.logger.debug("Request timed out")
108 | if transport:
109 | transport.close()
110 | dnsr = None
111 | return dnsr
112 |
113 |
114 | class DNSClientProtocol(asyncio.Protocol):
115 | def __init__(self, dnsq, fut, clientip, logger=None):
116 | self.transport = None
117 | self.dnsq = dnsq
118 | self.fut = fut
119 | self.clientip = clientip
120 | if logger is None:
121 | logger = utils.configure_logger("DNSClientProtocol", "DEBUG")
122 | self.logger = logger
123 |
124 | def connection_lost(self, exc):
125 | pass
126 |
127 | def connection_made(self, transport):
128 | raise NotImplementedError()
129 |
130 | def data_received(self, data):
131 | raise NotImplementedError()
132 |
133 | def datagram_received(self, data, addr):
134 | raise NotImplementedError()
135 |
136 | def error_received(self, exc):
137 | raise NotImplementedError()
138 |
139 | def eof_received(self):
140 | raise NotImplementedError()
141 |
142 | def send_helper(self, transport):
143 | self.transport = transport
144 | self.dnsq.id = dns.entropy.random_16()
145 | self.logger.info(
146 | "[DNS] {} {}".format(self.clientip, utils.dnsquery2log(self.dnsq))
147 | )
148 | self.time_stamp = time.time()
149 |
150 | def receive_helper(self, dnsr):
151 | interval = int((time.time() - self.time_stamp) * 1000)
152 | log_message = "[DNS] {} {} {}ms".format(
153 | self.clientip, utils.dnsans2log(dnsr), interval
154 | )
155 |
156 | if not self.fut.cancelled():
157 | self.logger.info(log_message)
158 | self.fut.set_result(dnsr)
159 | else:
160 | self.logger.info(log_message + "(CANCELLED)")
161 |
162 |
163 | class DNSClientProtocolUDP(DNSClientProtocol):
164 | def connection_made(self, transport):
165 | self.send_helper(transport)
166 | self.transport.sendto(self.dnsq.to_wire())
167 |
168 | def datagram_received(self, data, addr):
169 | dnsr = dns.message.from_wire(data)
170 | self.receive_helper(dnsr)
171 | self.transport.close()
172 |
173 | def error_received(self, exc):
174 | self.transport.close()
175 | self.logger.exception("Error received: " + str(exc))
176 |
177 |
178 | class DNSClientProtocolTCP(DNSClientProtocol):
179 | def __init__(self, dnsq, fut, clientip, logger=None):
180 | super().__init__(dnsq, fut, clientip, logger=logger)
181 | self.buffer = bytes()
182 |
183 | def connection_made(self, transport):
184 | self.send_helper(transport)
185 | msg = self.dnsq.to_wire()
186 | tcpmsg = struct.pack("!H", len(msg)) + msg
187 | self.transport.write(tcpmsg)
188 |
189 | def data_received(self, data):
190 | self.buffer = utils.handle_dns_tcp_data(self.buffer + data, self.receive_helper)
191 |
192 | def eof_received(self):
193 | if len(self.buffer) > 0:
194 | self.logger.debug("Discard incomplete message")
195 | self.transport.close()
196 |
--------------------------------------------------------------------------------
/dohproxy/client_protocol.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 |
10 | import asyncio
11 | import struct
12 | import urllib.parse
13 |
14 | import aioh2
15 | import dns.message
16 | import priority
17 | from dohproxy import constants, utils
18 |
19 |
20 | class StubServerProtocol:
21 | def __init__(self, args, logger=None, client_store=None):
22 | self.logger = logger
23 | self.args = args
24 | self._lock = asyncio.Lock()
25 | if logger is None:
26 | self.logger = utils.configure_logger("StubServerProtocol")
27 |
28 | # The client is wrapped in a mutable dictionary, so it may be shared
29 | # across multiple contexts if passed from higher in the chain.
30 | if client_store is None:
31 | self.client_store = {"client": None}
32 | else:
33 | self.client_store = client_store
34 |
35 | async def get_client(self, force_new=False):
36 | if force_new:
37 | self.client_store["client"] = None
38 | if self.client_store["client"] is not None:
39 | if self.client_store["client"]._conn is not None:
40 | return self.client_store["client"]
41 |
42 | # Open client connection
43 | self.logger.debug("Opening connection to {}".format(self.args.domain))
44 | sslctx = utils.create_custom_ssl_context(
45 | insecure=self.args.insecure, cafile=self.args.cafile
46 | )
47 | remote_addr = (
48 | self.args.remote_address if self.args.remote_address else self.args.domain
49 | )
50 | client = await aioh2.open_connection(
51 | remote_addr,
52 | self.args.port,
53 | functional_timeout=0.1,
54 | ssl=sslctx,
55 | server_hostname=self.args.domain,
56 | )
57 | rtt = await client.wait_functional()
58 | if rtt:
59 | self.logger.debug("Round-trip time: %.1fms" % (rtt * 1000))
60 |
61 | self.client_store["client"] = client
62 | return client
63 |
64 | def connection_made(self, transport):
65 | pass
66 |
67 | def connection_lost(self, exc):
68 | pass
69 |
70 | def on_answer(self, addr, msg):
71 | pass
72 |
73 | def on_message_received(self, stream_id, msg):
74 | """
75 | Takes a wired format message returned from a DOH server and convert it
76 | to a python dns message.
77 | """
78 | return dns.message.from_wire(msg)
79 |
80 | async def on_start_request(self, client, headers, end_stream):
81 | return await client.start_request(headers, end_stream=end_stream)
82 |
83 | async def on_send_data(self, client, stream_id, body):
84 | return await client.send_data(stream_id, body, end_stream=True)
85 |
86 | def on_recv_response(self, stream_id, headers):
87 | self.logger.debug("Response headers: {}".format(headers))
88 |
89 | def _make_get_path(self, content):
90 | params = utils.build_query_params(content)
91 | self.logger.debug("Query parameters: {}".format(params))
92 | params_str = urllib.parse.urlencode(params)
93 | if self.args.debug:
94 | url = utils.make_url(self.args.domain, self.args.uri)
95 | self.logger.debug("Sending {}?{}".format(url, params_str))
96 | return self.args.uri + "?" + params_str
97 |
98 | async def make_request(self, addr, dnsq):
99 |
100 | # FIXME: maybe aioh2 should allow registering to connection_lost event
101 | # so we can find out when the connection get disconnected.
102 | with await self._lock:
103 | client = await self.get_client()
104 |
105 | path = self.args.uri
106 | qid = dnsq.id
107 | dnsq.id = 0
108 | body = b""
109 |
110 | headers = [
111 | (":authority", self.args.domain),
112 | (":method", self.args.post and "POST" or "GET"),
113 | (":scheme", "https"),
114 | ("Accept", constants.DOH_MEDIA_TYPE),
115 | ]
116 | if self.args.post:
117 | headers.append(("content-type", constants.DOH_MEDIA_TYPE))
118 | body = dnsq.to_wire()
119 | else:
120 | path = self._make_get_path(dnsq.to_wire())
121 |
122 | headers.insert(0, (":path", path))
123 | headers.extend([("content-length", str(len(body)))])
124 | # Start request with headers
125 | # FIXME: Find a better way to close old streams. See GH#11
126 | try:
127 | stream_id = await self.on_start_request(client, headers, not body)
128 | except priority.priority.TooManyStreamsError:
129 | client = await self.get_client(force_new=True)
130 | stream_id = await self.on_start_request(client, headers, not body)
131 | self.logger.debug(
132 | "Stream ID: {} / Total streams: {}".format(stream_id, len(client._streams))
133 | )
134 | # Send my name "world" as whole request body
135 | if body:
136 | await self.on_send_data(client, stream_id, body)
137 |
138 | # Receive response headers
139 | headers = await client.recv_response(stream_id)
140 | self.on_recv_response(stream_id, headers)
141 | # FIXME handled error with servfail
142 |
143 | # Read all response body
144 | resp = await client.read_stream(stream_id, -1)
145 | dnsr = self.on_message_received(stream_id, resp)
146 |
147 | dnsr.id = qid
148 | self.on_answer(addr, dnsr.to_wire())
149 |
150 | # Read response trailers
151 | trailers = await client.recv_trailers(stream_id)
152 | self.logger.debug("Response trailers: {}".format(trailers))
153 |
154 |
155 | class StubServerProtocolUDP(StubServerProtocol):
156 | def connection_made(self, transport):
157 | self.transport = transport
158 |
159 | def datagram_received(self, data, addr):
160 | dnsq = dns.message.from_wire(data)
161 | asyncio.ensure_future(self.make_request(addr, dnsq))
162 |
163 | def on_answer(self, addr, msg):
164 | self.transport.sendto(msg, addr)
165 |
166 |
167 | class StubServerProtocolTCP(StubServerProtocol):
168 | def connection_made(self, transport):
169 | self.transport = transport
170 | self.addr = transport.get_extra_info("peername")
171 | self.buffer = b""
172 |
173 | def data_received(self, data):
174 | self.buffer = utils.handle_dns_tcp_data(self.buffer + data, self.receive_helper)
175 |
176 | def receive_helper(self, dnsq):
177 | asyncio.ensure_future(self.make_request(self.addr, dnsq))
178 |
179 | def on_answer(self, addr, msg):
180 | self.transport.write(struct.pack("!H", len(msg)) + msg)
181 |
182 | def eof_received(self):
183 | self.transport.close()
184 |
--------------------------------------------------------------------------------
/docs/tutorials/nginx-dohhttpproxy-unbound-centos7.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | ---
4 |
5 | # DNS over HTTPS with NGINX/DOH-PROXY/Unbound on CentOS7
6 |
7 | This tutorial will cover installing a working `doh-proxy` setup fronted by [NGINX](http://nginx.org/).
8 |
9 | We assume that we are setting up a DoH server for the domain `dns.example.com` and that the A/AAAA DNS records are already set to point to the server that is going to be configured.
10 |
11 |
12 | Port 443 is also assumed to be opened.
13 |
14 | ## Setting up the environment
15 |
16 |
17 | ### Basic tooling
18 |
19 | Let's get this out of our way first... and install some packages that will be needed during this tutorial:
20 |
21 | ```bash
22 | yum -y install git bind-utils certbot-nginx
23 | ```
24 |
25 | `git` will be used to be able to install `doh-proxy` directly from the github repository.
26 |
27 | `bind-utils` installs `dig` which we will use to perform dns queries.
28 |
29 | `certbot-nginx` will be used to get and install a digital certificate from [let's encrypt](https://letsencrypt.org/).
30 |
31 | ### Python3.6
32 | To run `doh-proxy` we need at least python3.5 installed. We are going to use the python packages provided by [IUS Community Project](https://ius.io/) to get a working `python3.6` set up in no time.
33 |
34 | ```bash
35 | yum -y install https://centos7.iuscommunity.org/ius-release.rpm
36 | yum -y install python36u python36u-pip python36u-devel
37 | ```
38 |
39 | ## Setting up doh-proxy
40 |
41 | ### Installing doh-proxy
42 |
43 | `doh-proxy` is being packaged and uploaded to [pypi](https://pypi.python.org/pypi/doh-proxy), so 1 simple method to install it is to run:
44 |
45 | ```bash
46 | pip3.6 install doh-proxy
47 | ```
48 |
49 | If you like living on the edge, you can install from master using:
50 |
51 | ```bash
52 | pip3.6 install git+https://github.com/facebookexperimental/doh-proxy.git
53 | ```
54 |
55 | ### Create a dedicated user
56 |
57 | `doh-proxy` will be running as its own user, but we need to create it first.
58 |
59 | ```bash
60 | adduser -r doh-proxy \
61 | -d /var/lib/doh-proxy \
62 | -c 'DOH Proxy server' \
63 | -s /sbin/nologin \
64 | -U
65 | mkdir /var/lib/doh-proxy \
66 | && chown doh-proxy: /var/lib/doh-proxy \
67 | && chown 700 /var/lib/doh-proxy
68 | ```
69 |
70 |
71 | ### Create a doh-proxy unit file
72 |
73 | The `systemd` unit files will ensure that `doh-proxy` is started upon bootup.
74 |
75 | ```bash
76 | cat < /etc/systemd/system/doh-httpproxy\@.service
77 | [Unit]
78 | Description=DOH HTTP Proxy on %I
79 | After=syslog.target network.target
80 | Before=nginx.target
81 |
82 | [Service]
83 | Type=simple
84 | ExecStart=/bin/doh-httpproxy --upstream-resolver ::1 --level DEBUG --listen-address=::1 --port %I
85 | Restart=always
86 | User=doh-proxy
87 | Group=doh-proxy
88 |
89 | [Install]
90 | WantedBy=multi-user.target
91 | EOF
92 |
93 | systemctl daemon-reload
94 | ```
95 |
96 |
97 | Now, we will set up `systemd` to start 2 `doh-httpproxy` processes, one on port 8080 and one on port 8081
98 | and start them.
99 |
100 | ```bash
101 | for i in 8080 8081
102 | do
103 | # We can't link on CentOS7 due to https://github.com/systemd/systemd/issues/3010
104 | # ln -s /etc/systemd/system/doh-httpproxy\@.service \
105 | cp /etc/systemd/system/doh-httpproxy\@.service \
106 | /etc/systemd/system/doh-httpproxy\@${i}.service
107 | systemctl enable doh-httpproxy@${i}
108 | systemctl start doh-httpproxy@${i}
109 | done
110 | ```
111 |
112 | That should be it for `doh-proxy`.... but we need a recursive nameserver to perform the actual DNS queries
113 | and all the recursion logic. In this example we will be using [unbound](https://www.unbound.net/).
114 |
115 | ## Setting unbound
116 |
117 | Setting an instance of `unbound` that listen on `127.0.0.1` and `::1` is pretty straightforward.
118 |
119 | Basically, we just need to install the package, enable it and start it.
120 |
121 | ```bash
122 | yum -y install unbound
123 | systemctl enable unbound
124 | systemctl start unbound
125 | ```
126 |
127 | You can confirm that it works by running:
128 | ```bash
129 | dig @::1 example.com
130 | ```
131 | and getting an `A` record.
132 |
133 | Finally now, we are left with the last bit, which is to configure `NGINX` with HTTP2 and that uses our doh-proxy backends.
134 |
135 | ## Setting nginx
136 |
137 | First, we need to install `NGINX`:
138 |
139 | ```bash
140 | yum -y install nginx
141 | systemctl enable nginx
142 | systemctl start nginx
143 | ```
144 |
145 | Now that we have `NGINX` running, we can use `certbot-nginx` to get a certificate from `let's encrypt`.
146 |
147 | ```bash
148 | certbot --nginx -d dns.example.com
149 | ```
150 |
151 | At this stage, we have a working HTTPS server. If you were going to open `https://dns.example.com`, you would get the default `NGINX` page.
152 |
153 | We now need to:
154 | * configure HTTP2
155 | * configure NGINX to use our `doh-proxy` backends.
156 |
157 |
158 | ### Configure NGINX
159 |
160 | First, we will enable `HTTP2` and tell NGINX to use our `dohproxy` backends.
161 |
162 | Open `/etc/nginx/nginx.conf` and look for the lines:
163 | ```
164 | listen [::]:443 ssl ipv6only=on; # managed by Certbot
165 | listen 443 ssl; # managed by Certbot
166 |
167 | ```
168 |
169 | and replace them with:
170 |
171 | ```
172 | listen [::]:443 ssl http2 ipv6only=on; # managed by Certbot
173 | listen 443 ssl http2; # managed by Certbot
174 | ```
175 |
176 | We will configure `nginx` to only let `HEAD`, `GET` and `POST` requests to go
177 | through:
178 |
179 | ```
180 | if ( $request_method !~ ^(GET|POST|HEAD)$ ) {
181 | return 501;
182 | }
183 | ```
184 | Now, we will configure anything that gets to `/dns-query` to be forwarded to our backends:
185 |
186 | Find the block:
187 | ```
188 | location / {
189 | }
190 | ```
191 |
192 | and replace it with:
193 |
194 | ```
195 | location /dns-query {
196 | proxy_set_header Host $http_host;
197 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
198 | proxy_redirect off;
199 | proxy_buffering off;
200 | proxy_pass http://dohproxy_backend;
201 | }
202 | ```
203 |
204 | Finally, we need to configure our `doh-proxy` backends. Add:
205 |
206 | ```
207 | upstream dohproxy_backend {
208 | server [::1]:8080;
209 | server [::1]:8081;
210 | }
211 | ```
212 |
213 | right before the block:
214 |
215 | ```
216 | server {
217 | server_name dns.example.com; # managed by Certbot
218 |
219 | ```
220 |
221 | At this stage, you just need to restart NGINX:
222 |
223 | ```bash
224 | systemctl restart nginx
225 | ```
226 | and you should be good to go.... unless you use `SELinux`, in which case a quick solution will be:
227 |
228 | ```bash
229 | setsebool -P httpd_can_network_connect=true
230 | ```
231 |
232 | in order to allow NGINX to connect to our proxies.
233 |
234 |
235 | ## Testing
236 |
237 | You can use `doh-stub` to test that everything is working fine. From the server you are configuring `doh-proxy` on, in one terminal run:
238 |
239 | ```bash
240 | doh-stub --listen-port 5353 --domain dns.example.com --remote-address ::1
241 | ```
242 |
243 | This will spin up a `doh-stub` that will listen on port 5353 and connect to our new `doh-proxy` on IP `::1`.
244 |
245 | You can now query DNS on the doh server using:
246 |
247 | ```bash
248 | dig @::1 -p 5353 example.com
249 | ```
250 |
251 | and this should show the `A` record of example.com.
252 |
253 | At this stage, you should have a working end to end NGINX/doh-proxy/unbound setup.
254 |
255 |
256 | Now, all you have to do is to configure an application to use your `doh-proxy` or set your whole system to use DoH by running [the client doh-stub](simple-setup.md#running-the-client-stub)
257 |
--------------------------------------------------------------------------------
/dohproxy/integration.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 |
10 | import argparse
11 | import asyncio
12 | import inspect
13 | import sys
14 | import unittest
15 | from functools import wraps
16 | from unittest.mock import patch
17 |
18 | import colour_runner.runner
19 | import dns.message
20 | from dohproxy import client_protocol, constants, utils
21 | from pygments import highlight
22 |
23 |
24 | def async_test(f):
25 | def wrapper(*args, **kwargs):
26 | if inspect.iscoroutinefunction(f):
27 | future = f(*args, **kwargs)
28 | else:
29 | coroutine = asyncio.coroutine(f)
30 | future = coroutine(*args, **kwargs)
31 | asyncio.get_event_loop().run_until_complete(future)
32 |
33 | return wrapper
34 |
35 |
36 | def save_to_store(f):
37 | @wraps(f)
38 | def call(*args, **kw):
39 | args[0]._result_store[f.__name__] = (
40 | args[1:],
41 | kw,
42 | )
43 | return f(*args, **kw)
44 |
45 | return call
46 |
47 |
48 | def extract_from_headers(headers, key):
49 | for k, v in headers:
50 | if k == key:
51 | return v
52 |
53 |
54 | def modify_headers(headers, key, value):
55 | new_headers = []
56 | for k, v in headers:
57 | if k == key:
58 | new_headers.append((key, value))
59 | else:
60 | new_headers.append((k, v))
61 | return new_headers
62 |
63 |
64 | class DOHTextTestResult(colour_runner.runner.ColourTextTestResult):
65 | def __init__(self, *args, **kwargs):
66 | super().__init__(*args, **kwargs)
67 | self.colours["fail"] = self._terminal.bold_yellow
68 |
69 | def formatErr(self, err):
70 | return "{}\n{}".format(err[1].__class__.__name__, err[1].args[0])
71 |
72 | def addSuccess(self, test):
73 | unittest.result.TestResult.addSuccess(self, test)
74 | self.printResult(".", "OK", "success")
75 |
76 | def addError(self, test, err):
77 | self.failures.error((test, self.formatErr(err)))
78 | self.printResult("F", "ERROR", "error")
79 |
80 | def addFailure(self, test, err):
81 | self.failures.append((test, self.formatErr(err)))
82 | self.printResult("F", "WARNING", "fail")
83 |
84 | def printErrorList(self, flavour, errors):
85 | colour = self.colours[flavour.lower()]
86 | for test, err in errors:
87 | self.stream.writeln(self.separator1)
88 | title = "%s: %s" % (flavour, self.getLongDescription(test))
89 | self.stream.writeln(colour(title))
90 | print(type(err))
91 | self.stream.writeln(self.separator2)
92 | self.stream.writeln(highlight(err, self.lexer, self.formatter))
93 |
94 |
95 | class DOHTestRunner(unittest.runner.TextTestRunner):
96 | """ A test runner that uses color in its output and customize signal """
97 |
98 | resultclass = DOHTextTestResult
99 |
100 |
101 | class Client(client_protocol.StubServerProtocol):
102 | def __init__(self, *args, **kwargs):
103 | super().__init__(*args, **kwargs)
104 | self._result_store = {}
105 |
106 | @save_to_store
107 | def on_answer(self, addr, msg):
108 | pass
109 |
110 | @save_to_store
111 | def on_message_received(self, stream_id, msg):
112 | return super().on_message_received(stream_id, msg)
113 |
114 | @save_to_store
115 | def on_recv_response(self, stream_id, headers):
116 | return super().on_recv_response(stream_id, headers)
117 |
118 | def from_store(self, name):
119 | return self._result_store[name]
120 |
121 | def _on_post_bad_body(self, client, stream_id, body):
122 | return super().on_send_data(client, stream_id, b"badcontent")
123 |
124 | def _on_start_request_bad_body(self, client, headers, end_stream):
125 | headers = modify_headers(headers, "content-length", str(len(b"badcontent")))
126 | return super().on_start_request(client, headers, end_stream)
127 |
128 | def _on_start_request_bad_method(self, client, headers, end_stream):
129 | headers = modify_headers(headers, ":method", "FOO")
130 | return super().on_start_request(client, headers, end_stream)
131 |
132 | def _on_start_request_head_method(self, client, headers, end_stream):
133 | headers = modify_headers(headers, ":method", "HEAD")
134 | return super().on_start_request(client, headers, end_stream)
135 |
136 | def _on_start_request_bad_dns(self, client, headers, end_stream):
137 | headers = modify_headers(headers, ":path", self._make_get_path(b"badcontent"))
138 | return super().on_start_request(client, headers, end_stream)
139 |
140 |
141 | class ServerIntegrationBaseClassTest(object):
142 | ARGS = None
143 |
144 | def setUp(self):
145 | self.logger = utils.configure_logger("doh-integration", level=self.ARGS.level)
146 | self.logger.propagate = self.ARGS.propagate_logs
147 | self.client = Client(args=self.ARGS, logger=self.logger)
148 |
149 | @async_test
150 | async def test_identical_id_on_reply(self):
151 | """
152 | The response from a DNS query should contain the same ID than what was
153 | sent.
154 | """
155 | q = dns.message.make_query(qname="www.example.com", rdtype="A")
156 | orig_qid = q.id
157 | await self.client.make_request(None, q)
158 | addr, msg = self.client.from_store("on_answer")[0]
159 | r = dns.message.from_wire(msg)
160 | self.assertEqual(orig_qid, r.id, "Query and Response IDs are not matching.")
161 |
162 | @async_test
163 | async def test_id_is_zero(self):
164 | """
165 | The response from a DNS query should contain the same ID than what was
166 | sent and should support ID=0.
167 | """
168 | q = dns.message.make_query(qname="www.example.com", rdtype="A")
169 | await self.client.make_request(None, q)
170 | msg = self.client.from_store("on_message_received")[0][1]
171 | r = dns.message.from_wire(msg)
172 | if r.id != 0:
173 | self.fail("Response ID was not set to 0: %d" % r.id)
174 |
175 | @async_test
176 | async def test_not_implemented_method(self):
177 | """
178 | Test that when a method which is not implemented is sent to the server,
179 | we get a 501 back.
180 | """
181 | with patch(
182 | "__main__.Client.on_start_request", new=Client._on_start_request_bad_method
183 | ):
184 | q = dns.message.make_query(qname="www.example.com", rdtype="A")
185 | with self.assertRaises(Exception):
186 | await self.client.make_request(None, q)
187 | headers = self.client.from_store("on_recv_response")[0][1]
188 | self.assertEqual(extract_from_headers(headers, ":status"), "501")
189 |
190 | @async_test
191 | async def test_bad_data(self):
192 | """
193 | Test that the server returns a 400 when the data payload is not a valid
194 | DNS packet.
195 | """
196 | if self.ARGS.post:
197 | with patch(
198 | "__main__.Client.on_send_data", new=Client._on_post_bad_body
199 | ), patch(
200 | "__main__.Client.on_start_request",
201 | new=Client._on_start_request_bad_body,
202 | ):
203 | q = dns.message.make_query(qname="www.example.com", rdtype="A")
204 | with self.assertRaises(dns.name.BadLabelType):
205 | await self.client.make_request(None, q)
206 | else:
207 | with patch(
208 | "__main__.Client.on_start_request", new=Client._on_start_request_bad_dns
209 | ):
210 | q = dns.message.make_query(qname="www.example.com", rdtype="A")
211 |
212 | with self.assertRaises(dns.name.BadLabelType):
213 | await self.client.make_request(None, q)
214 | headers = self.client.from_store("on_recv_response")[0][1]
215 | self.assertEqual(extract_from_headers(headers, ":status"), "400")
216 |
217 |
218 | class ServerIntegrationPostTest(ServerIntegrationBaseClassTest, unittest.TestCase):
219 | def setUp(self):
220 | ServerIntegrationBaseClassTest.ARGS.post = True
221 | super().setUp()
222 |
223 |
224 | class ServerIntegrationGetTest(ServerIntegrationBaseClassTest, unittest.TestCase):
225 | def setUp(self):
226 | ServerIntegrationBaseClassTest.ARGS.post = False
227 | super().setUp()
228 |
229 | @async_test
230 | async def test_head_method(self):
231 | """
232 | Test that when a HEAD method is sent to the server, 200 is returned.
233 | """
234 | with patch(
235 | "__main__.Client.on_start_request", new=Client._on_start_request_head_method
236 | ):
237 | q = dns.message.make_query(qname="www.example.com", rdtype="A")
238 | with self.assertRaises(dns.message.ShortHeader):
239 | await self.client.make_request(None, q)
240 |
241 | headers = self.client.from_store("on_recv_response")[0][1]
242 | self.assertEqual(extract_from_headers(headers, ":status"), "200")
243 | self.assertEqual(
244 | extract_from_headers(headers, "content-type"),
245 | constants.DOH_MEDIA_TYPE,
246 | "HEAD requests should return DOH content-type",
247 | )
248 | self.assertEqual(
249 | extract_from_headers(headers, "content-length"),
250 | "0",
251 | "HEAD request should return content-length of 0",
252 | )
253 | self.assertIsNotNone(
254 | extract_from_headers(headers, "cache-control"),
255 | "HEAD request should return cache-control header",
256 | )
257 |
258 |
259 | def main():
260 | parser = utils.client_parser_base()
261 | parser.add_argument("args", nargs=argparse.REMAINDER)
262 | parser.add_argument(
263 | "--propagate-logs",
264 | action="store_true",
265 | help="Print logs generated by the client.",
266 | )
267 |
268 | args = parser.parse_args()
269 | # HACK: pass arguments to `ServerIntegrationBaseClassTest` so we can access
270 | # them within the tests.
271 | ServerIntegrationBaseClassTest.ARGS = args
272 |
273 | unittest.main(argv=[sys.argv[0]] + args.args, testRunner=DOHTestRunner, verbosity=2)
274 |
275 |
276 | if __name__ == "__main__":
277 | sys.exit(main())
278 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 |
4 | ---
5 | # DNS Over HTTPS Proxy
6 |
7 | 
8 | [](https://github.com/marketplace/actions/super-linter)
9 | [](https://badge.fury.io/py/doh-proxy)
10 |
11 | A set of python 3 scripts that supports proxying DNS over HTTPS as specified
12 | in the [IETF Draft draft-ietf-doh-dns-over-https](https://tools.ietf.org/html/draft-ietf-doh-dns-over-https-13).
13 |
14 | DOH provides a way to run encrypted DNS over HTTPS, a protocol which can freely
15 | traverse firewalls when other encrypted mechanism may be blocked.
16 |
17 | The project comes with a set of 4 tools:
18 |
19 | * [doh-proxy](#doh-proxy): A service that receives DOH queries over HTTP2 and forwards them
20 | to a recursive resolver.
21 | * [doh-httpproxy](#doh-httpproxy): Like `doh-proxy` but uses HTTP instead of HTTP2.
22 | The main intent is to run this behind a reverse proxy.
23 | * [doh-stub](#doh-stub): A service that listens for DNS queries and forwards them to a DOH server.
24 | * [doh-client](#doh-client): A tool to perform a test DNS query against DOH server.
25 |
26 | See the `CONTRIBUTING` file for how to help out.
27 |
28 | DOH Proxy was created during [IETF Hackathon 100](https://www.ietf.org/how/runningcode/hackathons/100-hackathon/) as a proof-of-concept and is not used at Facebook.
29 |
30 | You are welcome to use it, but be aware that support is limited and best-effort.
31 |
32 | ## Installing
33 |
34 | To install an already packaged version directly from PyPi:
35 |
36 | ```shell
37 | $ pip3 install doh-proxy
38 | ```
39 |
40 | ## Usage
41 |
42 | ### doh-proxy
43 |
44 | `doh-proxy` is a stand alone server answering DOH request. The proxy does not do
45 | DNS recursion itself and rather forward the query to a full-featured DNS
46 | recursive server or DNS caching server.
47 |
48 | By running `doh-proxy`, you can get and end-to-end DOH solution with minimal
49 | setup.
50 |
51 | ```shell
52 | $ sudo doh-proxy \
53 | --upstream-resolver=::1 \
54 | --certfile=./fullchain.pem \
55 | --keyfile=./privkey.pem
56 | ```
57 |
58 | ### doh-httpproxy
59 |
60 | `doh-httpproxy` is designed to be running behind a reverse proxy. In this setup
61 | a reverse proxy such as [NGINX](https://nginx.org/) would be handling the
62 | HTTPS/HTTP2 requests from the DOH clients and will forward them to
63 | `doh-httpproxy` backends.
64 |
65 | While this setup requires more upfront setup, it allows running DOH proxy
66 | unprivileged and on multiple cores.
67 |
68 |
69 | ```shell
70 | $ doh-httpproxy \
71 | --upstream-resolver=::1 \
72 | --port 8080 \
73 | --listen-address ::1
74 | ```
75 |
76 | `doh-httpproxy` now also supports TLS, that you can enable passing the
77 | args `--certfile` and `--keyfile` (just like `doh-proxy`)
78 |
79 | ### doh-stub
80 |
81 | `doh-stub` is the piece of software that you would run on the clients. By
82 | providing a local DNS server, `doh-stub` will forward the DNS requests it
83 | receives to a DOH server using an encrypted link.
84 |
85 | You can start a stub resolver with:
86 |
87 | ```shell
88 | $ doh-stub \
89 | --listen-port 5553 \
90 | --listen-address ::1 \
91 | --domain foo.bar \
92 | --remote-address ::1
93 | ```
94 |
95 | and query it.
96 |
97 | ```shell
98 | $ dig @::1 -p 5553 example.com
99 | ```
100 |
101 | ### doh-client
102 |
103 | `doh-client` is just a test cli that can be used to quickly send a request to
104 | a DOH server and dump the returned answer.
105 |
106 | ```shell
107 | $ doh-client \
108 | --domain dns.dnsoverhttps.net \
109 | --qname sigfail.verteiltesysteme.net \
110 | --dnssec
111 | id 37762
112 | opcode QUERY
113 | rcode SERVFAIL
114 | flags QR RD RA
115 | edns 0
116 | eflags DO
117 | payload 4096
118 | ;QUESTION
119 | sigfail.verteiltesysteme.net. IN AAAA
120 | ;ANSWER
121 | ;AUTHORITY
122 | ;ADDITIONAL
123 |
124 | $ doh-client \
125 | --domain dns.dnsoverhttps.net \
126 | --qname sigok.verteiltesysteme.net \
127 | --dnssec
128 | id 49772
129 | opcode QUERY
130 | rcode NOERROR
131 | flags QR RD RA AD
132 | edns 0
133 | eflags DO
134 | payload 4096
135 | ;QUESTION
136 | sigok.verteiltesysteme.net. IN AAAA
137 | ;ANSWER
138 | sigok.verteiltesysteme.net. 60 IN AAAA 2001:638:501:8efc::139
139 | sigok.verteiltesysteme.net. 60 IN RRSIG AAAA 5 3 60 20180130030002 20171031030002 30665 verteiltesysteme.net. O7QgNZFBu3fULvBXwM39apv5nMehh51f mLOVEsC8qZUyxIbxo4eDLQt0JvPoPpFH 5TbWdlm/jxq5x2/Kjw7yUdpohhiNmdoD Op7Y+RyHbf676FoC5Zko9uOAB7Pp8ERz qiT0QPt1ec12bM0XKQigfp+2Hy9wUuSN QmAzXS2s75k=
140 | ;AUTHORITY
141 | ;ADDITIONAL
142 | ```
143 |
144 | ## Development
145 |
146 |
147 | ### Requirements
148 |
149 | * python >= 3.5
150 | * aiohttp
151 | * aioh2
152 | * dnspython
153 |
154 | ### Building
155 |
156 | DOH Proxy uses Python'setuptools to manage dependencies and build.
157 |
158 | To install its dependencies:
159 |
160 | ```shell
161 | $ python3 setup.py develop
162 | # Due to GH #63
163 | $ pip install git+https://github.com/URenko/aioh2#egg=aioh2
164 | ```
165 |
166 | To build:
167 | ```shell
168 | $ python3 setup.py build
169 | ```
170 |
171 | To run unittests:
172 | ```shell
173 | $ python3 setup.py test
174 | ```
175 |
176 | To run the linter:
177 |
178 | DOH Proxy uses GitHub Action [Super-Linter](https://github.com/marketplace/actions/super-linter) to lint the code. In order to validate your code locally, it is possible to run Super-Linter locally using the following comand line from within the repository:
179 |
180 | ```shell
181 | docker run -e RUN_LOCAL=true -e VALIDATE_PYTHON_PYLINT=false \
182 | -e FILTER_REGX_INCLUDE='(dohproxy|test)/.*.py' \
183 | -v $(pwd):/tmp/lint \
184 | --rm github/super-linter:v3
185 | ```
186 |
187 | From within the root of the repository, you can test the proxy, stub and client respectively
188 | by using the following commands:
189 |
190 | ```shell
191 | $ sudo PYTHONPATH=. ./dohproxy/proxy.py ...
192 | ```
193 |
194 | ```shell
195 | $ PYTHONPATH=. ./dohproxy/httpproxy.py ...
196 | ```
197 |
198 |
199 | ```shell
200 | $ PYTHONPATH=. ./dohproxy/stub.py ...
201 | ```
202 |
203 | ```shell
204 | $ PYTHONPATH=. ./dohproxy/client.py ...
205 | ```
206 |
207 | ## License
208 | DOH Proxy is BSD-licensed.
209 | ## Tutorials
210 |
211 | Check the [tutorial page](tutorials.md)
212 | # Changelog
213 |
214 | ## [Unreleased]
215 |
216 | ### Fixed
217 | - fix unittests pattern matching. GH #78
218 | - force aiohttp < 4.0.0. GH #78
219 | - close transport on Timeout. GH #79
220 | - don't decode body twice. GH #83 @tiran
221 | - handle get\_extra\_info('peername') being None. GH #89
222 | - add legal link to web site. GH #90
223 | - define flake8 defaults. GH #91 @rfinnie
224 | - set `Accept` header in client queries. GH #95
225 |
226 | ### Changes
227 | - improve logging. GH #87
228 | - support multiple --listen-address. GH 85 @rfinnie
229 | - Add support for ECS. GH #88 @rfinnie
230 |
231 | ## [0.0.9] - 2019-07-04
232 |
233 | ### Fixed
234 | - fix copyright headers. GH #51
235 | - fix flake8 error
236 | - loglevel (--level) was ignore in DNSClient. GH #58
237 | - Do not set_result when coroutine is already cancelled. GH #59
238 | - Remove NPN support. GH #64
239 | - Properly close UDP transport after an exception occured. GH #66
240 |
241 | ## [0.0.8] - 2018-08-14
242 |
243 | ### Changes
244 | - [doc] don't use `sudo` when not required. @jpmens
245 | - version bump to get markdown rendering on pypi.
246 |
247 | ## [0.0.7] - 2018-08-13
248 |
249 | ### Fixed
250 | - Handle dns message with empty question section GH #21
251 | - Make https://pypi.org/project/doh-proxy/ display description using markdown syntax.
252 |
253 | ### Changes
254 | - separate server side protocol classes from client side ones
255 | - Support for [draft-13](https://tools.ietf.org/html/draft-ietf-doh-dns-over-https-13). @bagder
256 | - DNSClientProtocol is now an async friendly class which will retry over TCP on timeout and/or TC bit set. @newEZ
257 | - Both `doh-httpproxy` and `doh-proxy` now use the new DNSClient @newEZ and @chantra
258 |
259 | ### Added
260 | - support listening from multiple IPs for proxy services.
261 | - Added support for TLS in `doh-httpproxy` @lucasvasconcelos
262 | - Pass optional `cafile` to `doh-stub` to be able to connect to service using custom CA @fim
263 |
264 | ## [0.0.6] - 2018-02-20
265 |
266 | ### Added
267 | - custom upstream port option GH #16
268 | - display version with --version
269 |
270 | ### Fixed
271 | - set :scheme pseudo-header correctly. GH #17
272 |
273 | ## [0.0.5] - 2018-02-05
274 |
275 | ### Added
276 | - Unittest coverage of httpproxy.py
277 |
278 | ### Changes
279 | - @jedisct1 change DOH_BODY_PARAM to `dns` to match draft-ietf-doh-dns-over-https-03
280 | - removed .well-known from default URI GH #15
281 |
282 | ### Fixed
283 | - support POST in doh-httpproxy. GH #12
284 |
285 |
286 | ## [0.0.4] - 2018-01-27
287 |
288 | ### Fixed
289 | - Create new connection on TooManyStreamsError to work around GH decentfox/aioh2#16
290 |
291 | ### Changes
292 | - ensure only 1 client is initialized at a time.
293 |
294 | ## [0.0.3] - 2018-01-17
295 |
296 | ### Fixed
297 | - proxy: handle empty ct parameter
298 |
299 | ### Changed
300 | - proxies and stub will listen to ::1 by default.
301 | - proxies better handle malformed DNS messages
302 |
303 | ## [0.0.2] - 2018-01-16
304 | ### Added
305 | - Travis CI
306 | - Support multiple query over the same HTTP2 connection.
307 | - started adding some unittests to utils.py
308 | - `dohprxy/httpproxy.py` a HTTP1 proxy to run as a reverse proxy backend.
309 |
310 | ### Changed
311 | - code refactor between stub and client
312 | - use logging modules instead of rogue prints
313 | - stub and client now use the same StubServerProtocol as the base to perform
314 | queries.
315 | - proxy: use logging module instead of print
316 | - doc: improved documentation and provide example setups.
317 |
318 | ### Removed
319 | - dependency on hyper package
320 |
321 | ### Fixed
322 | - doh-proxy: properly import dohproxy.protocol
323 | - doh-client: properly set entry_point
324 |
325 |
326 | ## 0.0.1 - 2018-01-11
327 | ### Added
328 | - Proxy script `dohproxy/proxy.py`
329 | - Stub script `dohproxy/stub.py`
330 | - Test client script `dohproxy/client.py`
331 | - setuptools' setup.py
332 | - doc
333 | - CHANGELOG.md and README.md
334 |
335 | [Unreleased]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.9...HEAD
336 | [0.0.9]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.8...v0.0.9
337 | [0.0.8]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.7...v0.0.8
338 | [0.0.7]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.6...v0.0.7
339 | [0.0.6]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.5...v0.0.6
340 | [0.0.5]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.4...v0.0.5
341 | [0.0.4]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.3...v0.0.4
342 | [0.0.3]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.2...v0.0.3
343 | [0.0.2]: https://github.com/facebookexperimental/doh-proxy/compare/v0.0.1...v0.0.2
344 |
--------------------------------------------------------------------------------
/dohproxy/proxy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 | import asyncio
10 | import collections
11 | import io
12 | import time
13 | from typing import List, Tuple
14 |
15 | import dns.message
16 | import dns.rcode
17 | from dohproxy import constants, utils
18 | from dohproxy.server_protocol import (
19 | DNSClient,
20 | DOHDNSException,
21 | DOHParamsException,
22 | )
23 | from h2.config import H2Configuration
24 | from h2.connection import H2Connection
25 | from h2.events import (
26 | ConnectionTerminated,
27 | DataReceived,
28 | RequestReceived,
29 | StreamEnded,
30 | )
31 | from h2.exceptions import ProtocolError
32 |
33 | RequestData = collections.namedtuple("RequestData", ["headers", "data"])
34 |
35 |
36 | def parse_args():
37 | parser = utils.proxy_parser_base(port=443, secure=True)
38 | return parser.parse_args()
39 |
40 |
41 | class H2Protocol(asyncio.Protocol):
42 | def __init__(
43 | self,
44 | upstream_resolver=None,
45 | upstream_port=None,
46 | uri=None,
47 | logger=None,
48 | debug=False,
49 | ecs=False,
50 | ):
51 | config = H2Configuration(client_side=False, header_encoding="utf-8")
52 | self.conn = H2Connection(config=config)
53 | self.logger = logger
54 | if logger is None:
55 | self.logger = utils.configure_logger("doh-proxy", "DEBUG")
56 | self.transport = None
57 | self.debug = debug
58 | self.ecs = ecs
59 | self.stream_data = {}
60 | self.upstream_resolver = upstream_resolver
61 | self.upstream_port = upstream_port
62 | self.time_stamp = 0
63 | self.uri = constants.DOH_URI if uri is None else uri
64 | assert upstream_resolver is not None, "An upstream resolver must be provided"
65 | assert upstream_port is not None, "An upstream resolver port must be provided"
66 |
67 | def connection_made(self, transport: asyncio.Transport): # type: ignore
68 | self.transport = transport
69 | self.conn.initiate_connection()
70 | self.transport.write(self.conn.data_to_send())
71 |
72 | def data_received(self, data: bytes):
73 | try:
74 | events = self.conn.receive_data(data)
75 | except ProtocolError:
76 | self.transport.write(self.conn.data_to_send())
77 | self.transport.close()
78 | else:
79 | self.transport.write(self.conn.data_to_send())
80 | for event in events:
81 | if isinstance(event, RequestReceived):
82 | self.request_received(event.headers, event.stream_id)
83 | elif isinstance(event, DataReceived):
84 | self.receive_data(event.data, event.stream_id)
85 | elif isinstance(event, StreamEnded):
86 | self.stream_complete(event.stream_id)
87 | elif isinstance(event, ConnectionTerminated):
88 | self.transport.close()
89 |
90 | self.transport.write(self.conn.data_to_send())
91 |
92 | def request_received(self, headers: List[Tuple[str, str]], stream_id: int):
93 | _headers = collections.OrderedDict(headers)
94 | method = _headers[":method"]
95 |
96 | # We only support GET and POST.
97 | if method not in ["GET", "POST", "HEAD"]:
98 | self.return_501(stream_id)
99 | return
100 |
101 | # Store off the request data.
102 | request_data = RequestData(_headers, io.BytesIO())
103 | self.stream_data[stream_id] = request_data
104 |
105 | def stream_complete(self, stream_id: int):
106 | """
107 | When a stream is complete, we can send our response.
108 | """
109 | try:
110 | request_data = self.stream_data[stream_id]
111 | except KeyError:
112 | # Just return, we probably 405'd this already
113 | return
114 |
115 | headers = request_data.headers
116 | method = request_data.headers[":method"]
117 |
118 | # Handle the actual query
119 | path, params = utils.extract_path_params(headers[":path"])
120 |
121 | if path != self.uri:
122 | self.return_404(stream_id)
123 | return
124 |
125 | if method in ["GET", "HEAD"]:
126 | try:
127 | ct, body = utils.extract_ct_body(params)
128 | except DOHParamsException as e:
129 | self.return_400(stream_id, body=e.body())
130 | return
131 | elif method == "POST":
132 | body = request_data.data.getvalue()
133 | ct = headers.get("content-type")
134 | else:
135 | self.return_501(stream_id)
136 | return
137 |
138 | if ct != constants.DOH_MEDIA_TYPE:
139 | self.return_415(stream_id)
140 | return
141 |
142 | # Do actual DNS Query
143 | try:
144 | dnsq = utils.dns_query_from_body(body, self.debug)
145 | except DOHDNSException as e:
146 | self.return_400(stream_id, body=e.body())
147 | return
148 |
149 | clientip = utils.get_client_ip(self.transport)
150 | self.logger.info("[HTTPS] {} {}".format(clientip, utils.dnsquery2log(dnsq)))
151 | self.time_stamp = time.time()
152 | asyncio.ensure_future(self.resolve(dnsq, stream_id))
153 |
154 | def on_answer(self, stream_id, dnsr=None, dnsq=None):
155 | try:
156 | request_data = self.stream_data[stream_id]
157 | except KeyError:
158 | # Just return, we probably 405'd this already
159 | return
160 |
161 | response_headers = [
162 | (":status", "200"),
163 | ("content-type", constants.DOH_MEDIA_TYPE),
164 | ("server", "asyncio-h2"),
165 | ]
166 | if dnsr is None:
167 | dnsr = dns.message.make_response(dnsq)
168 | dnsr.set_rcode(dns.rcode.SERVFAIL)
169 | elif len(dnsr.answer):
170 | ttl = min(r.ttl for r in dnsr.answer)
171 | response_headers.append(("cache-control", "max-age={}".format(ttl)))
172 |
173 | clientip = utils.get_client_ip(self.transport)
174 | interval = int((time.time() - self.time_stamp) * 1000)
175 | self.logger.info(
176 | "[HTTPS] {} {} {}ms".format(clientip, utils.dnsans2log(dnsr), interval)
177 | )
178 | if request_data.headers[":method"] == "HEAD":
179 | body = b""
180 | else:
181 | body = dnsr.to_wire()
182 | response_headers.append(("content-length", str(len(body))))
183 |
184 | self.conn.send_headers(stream_id, response_headers)
185 | self.conn.send_data(stream_id, body, end_stream=True)
186 | self.transport.write(self.conn.data_to_send())
187 |
188 | async def resolve(self, dnsq, stream_id):
189 | clientip = utils.get_client_ip(self.transport)
190 | dnsclient = DNSClient(
191 | self.upstream_resolver, self.upstream_port, logger=self.logger
192 | )
193 | dnsr = await dnsclient.query(dnsq, clientip, ecs=self.ecs)
194 |
195 | if dnsr is None:
196 | self.on_answer(stream_id, dnsq=dnsq)
197 | else:
198 | self.on_answer(stream_id, dnsr=dnsr)
199 |
200 | def return_XXX(self, stream_id: int, status: int, body: bytes = b""):
201 | """
202 | Wrapper to return a status code and some optional content.
203 | """
204 | response_headers = (
205 | (":status", str(status)),
206 | ("content-length", str(len(body))),
207 | ("server", "asyncio-h2"),
208 | )
209 | self.conn.send_headers(stream_id, response_headers)
210 | self.conn.send_data(stream_id, body, end_stream=True)
211 |
212 | def return_400(self, stream_id: int, body: bytes = b""):
213 | """
214 | We don't support the given PATH, so we want to return a 403 response.
215 | """
216 | self.return_XXX(stream_id, 400, body)
217 |
218 | def return_403(self, stream_id: int, body: bytes = b""):
219 | """
220 | We don't support the given PATH, so we want to return a 403 response.
221 | """
222 | self.return_XXX(stream_id, 403, body)
223 |
224 | def return_404(self, stream_id: int):
225 | """
226 | We don't support the given PATH, so we want to return a 403 response.
227 | """
228 | self.return_XXX(stream_id, 404, body=b"Wrong path")
229 |
230 | def return_405(self, stream_id: int):
231 | """
232 | We don't support the given method, so we want to return a 405 response.
233 | """
234 | self.return_XXX(stream_id, 405)
235 |
236 | def return_415(self, stream_id: int):
237 | """
238 | We don't support the given media, so we want to return a 415 response.
239 | """
240 | self.return_XXX(stream_id, 415, body=b"Unsupported content type")
241 |
242 | def return_501(self, stream_id: int):
243 | """
244 | We don't support the given method.
245 | """
246 | self.return_XXX(stream_id, 501, body=b"Not Implemented")
247 |
248 | def receive_data(self, data: bytes, stream_id: int):
249 | """
250 | We've received some data on a stream. If that stream is one we're
251 | expecting data on, save it off. Otherwise, reset the stream.
252 | """
253 | try:
254 | stream_data = self.stream_data[stream_id]
255 | except KeyError:
256 | # Unknown stream, log and ignore (the stream may already be ended)
257 | clientip = utils.get_client_ip(self.transport)
258 | self.logger.info("[HTTPS] %s Unknown stream %d", clientip, stream_id)
259 | else:
260 | stream_data.data.write(data)
261 |
262 |
263 | def main():
264 | args = parse_args()
265 | logger = utils.configure_logger("doh-proxy", args.level)
266 | ssl_ctx = utils.create_ssl_context(args, http2=True)
267 | loop = asyncio.get_event_loop()
268 | if "all" in args.listen_address:
269 | listen_addresses = utils.get_system_addresses()
270 | else:
271 | listen_addresses = args.listen_address
272 | for addr in listen_addresses:
273 | coro = loop.create_server(
274 | lambda: H2Protocol(
275 | upstream_resolver=args.upstream_resolver,
276 | upstream_port=args.upstream_port,
277 | uri=args.uri,
278 | logger=logger,
279 | debug=args.debug,
280 | ecs=args.ecs,
281 | ),
282 | host=addr,
283 | port=args.port,
284 | ssl=ssl_ctx,
285 | )
286 | server = loop.run_until_complete(coro)
287 |
288 | # Serve requests until Ctrl+C is pressed
289 | logger.info("Serving on {}".format(server))
290 | try:
291 | loop.run_forever()
292 | except KeyboardInterrupt:
293 | pass
294 |
295 | # Close the server
296 | server.close()
297 | loop.run_until_complete(server.wait_closed())
298 | loop.close()
299 |
300 |
301 | if __name__ == "__main__":
302 | main()
303 |
--------------------------------------------------------------------------------
/test/test_httpproxy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 |
10 | import logging
11 | from unittest.mock import MagicMock, patch
12 |
13 | import aiohttp
14 | import aiohttp_remotes
15 | import asynctest
16 | import dns.message
17 | from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
18 | from dohproxy import constants, httpproxy, server_protocol, utils
19 | from dohproxy.server_protocol import DNSClient
20 |
21 |
22 | def echo_dns_q(q):
23 | return aiohttp.web.Response(
24 | status=200, body=q.to_wire(), content_type=constants.DOH_MEDIA_TYPE,
25 | )
26 |
27 |
28 | class HTTPProxyTestCase(AioHTTPTestCase):
29 | def setUp(self):
30 | super().setUp()
31 | self.endpoint = "/dns"
32 | self.dnsq = dns.message.make_query(qname="foo.example.com", rdtype="A",)
33 | self.dnsq.id = 0
34 |
35 | def get_args(self):
36 | return [
37 | "--listen-port",
38 | "0",
39 | "--level",
40 | "DEBUG",
41 | "--listen-address",
42 | "127.0.0.1",
43 | "--uri",
44 | "/dns",
45 | "--trusted",
46 | ]
47 |
48 | async def get_application(self):
49 | """
50 | Override the get_app method to return your application.
51 | """
52 | parser, args = httpproxy.parse_args(self.get_args())
53 | return httpproxy.get_app(args)
54 |
55 |
56 | class HTTPProxyGETTestCase(HTTPProxyTestCase):
57 | def setUp(self):
58 | super().setUp()
59 | self.method = "GET"
60 |
61 | @asynctest.patch.object(httpproxy.DOHApplication, "resolve")
62 | @unittest_run_loop
63 | async def test_get_valid_request(self, resolve):
64 | """ Test that when we run a valid GET request, resolve will be called
65 | and returns some content, here echoes the request.
66 | """
67 | resolve.return_value = echo_dns_q(self.dnsq)
68 | params = utils.build_query_params(self.dnsq.to_wire())
69 | request = await self.client.request(self.method, self.endpoint, params=params)
70 | self.assertEqual(request.status, 200)
71 | content = await request.read()
72 |
73 | self.assertEqual(self.dnsq, dns.message.from_wire(content))
74 |
75 | @asynctest.patch.object(httpproxy.DOHApplication, "resolve")
76 | @unittest_run_loop
77 | async def test_get_request_bad_content_type(self, resolve):
78 | """ Test that when an invalid content-type is provided, we return 200.
79 | content-type is not used in GET request anymore, so it will default to
80 | 'application/dns-message'
81 | """
82 | resolve.return_value = echo_dns_q(self.dnsq)
83 | params = utils.build_query_params(self.dnsq.to_wire())
84 | params["ct"] = "bad/type"
85 | request = await self.client.request(self.method, self.endpoint, params=params)
86 | self.assertEqual(request.status, 200)
87 | content = await request.read()
88 | self.assertEqual(self.dnsq, dns.message.from_wire(content))
89 |
90 | @asynctest.patch.object(httpproxy.DOHApplication, "resolve")
91 | @unittest_run_loop
92 | async def test_get_request_no_content_type(self, resolve):
93 | """ Test that when no ct parameter, we accept the query.
94 | content-type is not used in GET request anymore, so it will default to
95 | 'application/dns-message'
96 | """
97 | resolve.return_value = echo_dns_q(self.dnsq)
98 | params = utils.build_query_params(self.dnsq.to_wire())
99 | request = await self.client.request(self.method, self.endpoint, params=params)
100 | self.assertEqual(request.status, 200)
101 | content = await request.read()
102 | self.assertEqual(self.dnsq, dns.message.from_wire(content))
103 |
104 | @unittest_run_loop
105 | async def test_get_request_empty_body(self):
106 | """ Test that when an empty body is provided, we return 400.
107 | """
108 | params = utils.build_query_params(self.dnsq.to_wire())
109 | params[constants.DOH_DNS_PARAM] = ""
110 | request = await self.client.request(self.method, self.endpoint, params=params)
111 | self.assertEqual(request.status, 400)
112 | content = await request.read()
113 | self.assertEqual(content, b"Missing Body")
114 |
115 | @unittest_run_loop
116 | async def test_get_request_bad_dns_request(self):
117 | """ Test that when an invalid body is provided, we return 400.
118 | """
119 | params = utils.build_query_params(self.dnsq.to_wire())
120 | params[constants.DOH_DNS_PARAM] = "dummy"
121 | request = await self.client.request(self.method, self.endpoint, params=params)
122 | self.assertEqual(request.status, 400)
123 | content = await request.read()
124 | self.assertEqual(content, b"Invalid Body Parameter")
125 |
126 |
127 | class HTTPProxyPOSTTestCase(HTTPProxyTestCase):
128 | def setUp(self):
129 | super().setUp()
130 | self.method = "POST"
131 |
132 | def make_header(self):
133 | return {"content-type": constants.DOH_MEDIA_TYPE}
134 |
135 | def make_body(self, q):
136 | return q.to_wire()
137 |
138 | @asynctest.patch.object(httpproxy.DOHApplication, "resolve")
139 | @unittest_run_loop
140 | async def test_post_valid_request(self, resolve):
141 | """ Test that when we run a valid POST request, resolve will be called
142 | and returns some content, here echoes the request.
143 | """
144 | resolve.return_value = echo_dns_q(self.dnsq)
145 | request = await self.client.request(
146 | self.method,
147 | self.endpoint,
148 | headers=self.make_header(),
149 | data=self.make_body(self.dnsq),
150 | )
151 |
152 | self.assertEqual(request.status, 200)
153 | content = await request.read()
154 |
155 | self.assertEqual(self.dnsq, dns.message.from_wire(content))
156 |
157 | @unittest_run_loop
158 | async def test_post_request_no_content_type(self):
159 | """ Test that when no content-type is provided, we return 415.
160 | """
161 | request = await self.client.request(
162 | self.method, self.endpoint, headers={}, data=self.make_body(self.dnsq)
163 | )
164 |
165 | self.assertEqual(request.status, 415)
166 | content = await request.read()
167 |
168 | self.assertEqual(content, b"Unsupported content type")
169 |
170 | @unittest_run_loop
171 | async def test_post_request_bad_content_type(self):
172 | """ Test that when an invalid content-type is provided, we return 415.
173 | """
174 | request = await self.client.request(
175 | self.method,
176 | self.endpoint,
177 | headers={"content-type": "bad/type"},
178 | data=self.make_body(self.dnsq),
179 | )
180 |
181 | self.assertEqual(request.status, 415)
182 | content = await request.read()
183 | self.assertEqual(content, b"Unsupported content type")
184 |
185 | @unittest_run_loop
186 | async def test_post_request_empty_body(self):
187 | """ Test that when an empty body is provided, we return 400.
188 | """
189 | request = await self.client.request(
190 | self.method, self.endpoint, headers=self.make_header(),
191 | )
192 |
193 | self.assertEqual(request.status, 400)
194 | content = await request.read()
195 | self.assertEqual(content, b"Malformed DNS query")
196 |
197 | @unittest_run_loop
198 | async def test_post_request_bad_dns_request(self):
199 | """ Test that when an invalid dns request is provided, we return 400.
200 | """
201 | request = await self.client.request(
202 | self.method, self.endpoint, headers=self.make_header(), data="dummy",
203 | )
204 |
205 | self.assertEqual(request.status, 400)
206 | content = await request.read()
207 | self.assertEqual(content, b"Malformed DNS query")
208 |
209 |
210 | class HTTPProxyXForwardedModeTestCase(HTTPProxyTestCase):
211 | """ Trusted parameter is set by default to [::1, 127.0.0.1].
212 | See httpproxy.parse_args
213 | """
214 |
215 | def setUp(self):
216 | super().setUp()
217 |
218 | def get_args(self):
219 | return [
220 | "--listen-port",
221 | "0",
222 | "--level",
223 | "DEBUG",
224 | "--listen-address",
225 | "127.0.0.1",
226 | "--uri",
227 | "/dns",
228 | ]
229 |
230 | @asynctest.patch.object(aiohttp_remotes, "XForwardedStrict")
231 | @asynctest.patch.object(aiohttp_remotes, "XForwardedRelaxed")
232 | @unittest_run_loop
233 | async def test_xforwarded_mode_with_trusted_hosts(
234 | self, mock_xforwarded_relaxed, mock_xforwarded_strict
235 | ):
236 | """ Test that when the aiohttp app have some trusted hosts specified at
237 | initialization, the XForwardedStrict method is applied.
238 | """
239 | args = self.get_args()
240 | args.extend(["--trusted", ["::1", "127.0.0.1"]])
241 | parser, args = httpproxy.parse_args(self.get_args())
242 | httpproxy.get_app(args)
243 |
244 | not mock_xforwarded_relaxed.called
245 | mock_xforwarded_strict.called
246 |
247 | @asynctest.patch.object(aiohttp_remotes, "XForwardedStrict")
248 | @asynctest.patch.object(aiohttp_remotes, "XForwardedRelaxed")
249 | @unittest_run_loop
250 | async def test_xforwarded_mode_without_trusted_hosts(
251 | self, mock_xforwarded_relaxed, mock_xforwarded_strict
252 | ):
253 | """ Test that when the aiohttp app have some trusted hosts specified at
254 | initialization, the XForwardedStrict method is applied.
255 | """
256 | args = self.get_args()
257 | args.extend(["--trusted"])
258 | parser, args = httpproxy.parse_args(self.get_args())
259 | httpproxy.get_app(args)
260 |
261 | mock_xforwarded_relaxed.called
262 | not mock_xforwarded_strict.called
263 |
264 |
265 | async def async_magic():
266 | pass
267 |
268 |
269 | # make MagicMock could be used in 'await' expression
270 | MagicMock.__await__ = lambda x: async_magic().__await__()
271 |
272 |
273 | class DNSClientLoggerTestCase(HTTPProxyTestCase):
274 | # This class mainly helps verify logger's propagation.
275 |
276 | def setUp(self):
277 | super().setUp()
278 |
279 | @asynctest.patch.object(server_protocol.DNSClient, "query")
280 | @patch.object(httpproxy.DOHApplication, "on_answer")
281 | @asynctest.patch("dohproxy.httpproxy.DNSClient")
282 | @unittest_run_loop
283 | async def test_mock_dnsclient_assigned_logger(
284 | self, MockedDNSClient, Mockedon_answer, Mockedquery
285 | ):
286 | """ Test that when MockedDNSClient is created with the doh-httpproxy
287 | logger and DEBUG level
288 | """
289 | Mockedquery.return_value = self.dnsq
290 | Mockedon_answer.return_value = aiohttp.web.Response(status=200, body=b"Done")
291 | params = utils.build_query_params(self.dnsq.to_wire())
292 | request = await self.client.request("GET", self.endpoint, params=params)
293 | request.remote = "127.0.0.1"
294 | app = await self.get_application()
295 | await app.resolve(request, self.dnsq)
296 |
297 | mylogger = utils.configure_logger(name="doh-httpproxy", level="DEBUG")
298 | MockedDNSClient.assert_called_with(
299 | app.upstream_resolver, app.upstream_port, logger=mylogger
300 | )
301 |
302 | def test_dnsclient_none_logger(self):
303 | """ Test that when DNSClient is created without a logger,
304 | The default logger and default level 'DEBUG' should be used.
305 | """
306 | dnsclient = DNSClient("", 80)
307 | self.assertEqual(dnsclient.logger.level, 10) # DEBUG's level is 10
308 | self.assertEqual(dnsclient.logger.name, "DNSClient")
309 |
310 | def test_dnsclient_assigned_logger(self):
311 | """ Test that when DNSClient is created with a logger,
312 | This logger and its corresponding level should be used.
313 | """
314 | mylogger = logging.getLogger("mylogger")
315 | level = "ERROR"
316 | mylogger.setLevel(level)
317 |
318 | dnsclient = DNSClient("", 80, logger=mylogger)
319 | self.assertEqual(dnsclient.logger.level, 40) # ERROR's level is 40
320 | self.assertEqual(dnsclient.logger.name, "mylogger")
321 |
--------------------------------------------------------------------------------
/dohproxy/utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 | import argparse
10 | import asyncio
11 | import base64
12 | import binascii
13 | import ipaddress
14 | import logging
15 | import ssl
16 | import struct
17 | import sys
18 | import urllib.parse
19 |
20 | import dns.edns
21 | import dns.exception
22 | import dns.message
23 | import dns.rcode
24 |
25 | try:
26 | import netifaces
27 | except ImportError as e:
28 | # Optional module
29 | netifaces = e
30 | from typing import Dict, List, Optional, Tuple
31 |
32 | from dohproxy import __version__, constants, server_protocol
33 |
34 |
35 | def get_client_ip(transport: asyncio.BaseTransport) -> Tuple[str, None]:
36 | """ Helper function to return the IP of the client connecting to us.
37 | Returns None on error.
38 | """
39 | peername = transport.get_extra_info("peername")
40 | if peername:
41 | return peername[0]
42 | return None
43 |
44 |
45 | def msg2question(msg: dns.message.Message) -> str:
46 | """ Helper function to return a string of name class and type
47 | """
48 | question = ""
49 | if len(msg.question):
50 | q = msg.question[0]
51 | name = q.name.to_text()
52 | qclass = dns.rdataclass.to_text(q.rdclass)
53 | qtype = dns.rdatatype.to_text(q.rdtype)
54 | question = " ".join([name, qtype, qclass])
55 | return question
56 |
57 |
58 | def msg2flags(msg: dns.message.Message) -> str:
59 | """ Helper function to return flags in a message
60 | """
61 | return "/".join(dns.flags.to_text(msg.flags).split(" "))
62 |
63 |
64 | def sum_items(section: List[dns.rrset.RRset]) -> int:
65 | """ Helper function to return items in a section of dns answer
66 | """
67 | return sum(len(x) for x in section)
68 |
69 |
70 | def dnsquery2log(msg: dns.message.Message) -> str:
71 | """ Helper function to return a readable excerpt from a dns query object.
72 | """
73 | question = msg2question(msg)
74 | flags = msg2flags(msg)
75 |
76 | return "{} {} {}".format(question, msg.id, flags,)
77 |
78 |
79 | def dnsans2log(msg: dns.message.Message) -> str:
80 | """ Helper function to return a readable excerpt from a dns answer object.
81 | """
82 | question = msg2question(msg)
83 | flags = msg2flags(msg)
84 |
85 | return "{} {} {} {}/{}/{} {}/{}/{} {}".format(
86 | question,
87 | msg.id,
88 | flags,
89 | sum_items(msg.answer),
90 | sum_items(msg.authority),
91 | sum_items(msg.additional),
92 | msg.edns,
93 | msg.ednsflags,
94 | msg.payload,
95 | dns.rcode.to_text(msg.rcode()),
96 | )
97 |
98 |
99 | def extract_path_params(url: str) -> Tuple[str, Dict[str, List[str]]]:
100 | """ Given a URI, extract the path and the parameters
101 | """
102 | p = urllib.parse.urlparse(url)
103 | params = urllib.parse.parse_qs(p.query, keep_blank_values=True)
104 | return p.path, params
105 |
106 |
107 | def create_ssl_context(
108 | options: argparse.Namespace, http2: bool = False
109 | ) -> ssl.SSLContext:
110 | """ Create SSL Context for the proxies
111 | :param options: where to find the certile and the keyfile
112 | :param http2: enable http2 into the context
113 | :return: An instance of ssl.SSLContext to be used by the proxies
114 | """
115 |
116 | ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
117 | ctx.load_cert_chain(options.certfile, keyfile=options.keyfile)
118 | if http2:
119 | ctx.set_alpn_protocols(["h2"])
120 | ctx.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
121 | ctx.set_ciphers(constants.DOH_CIPHERS)
122 |
123 | return ctx
124 |
125 |
126 | def create_custom_ssl_context(
127 | *, insecure: bool, cafile: Optional[str] = None
128 | ) -> ssl.SSLContext:
129 | """ Create a custom SSL context
130 | :param insecure: Disable certificate verification if True
131 | :param cafile: Pass custom CA file for cert verification
132 | :return: An instance of ssl.SSLContext with our configuration
133 | """
134 |
135 | if insecure:
136 | sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
137 | sslctx.options |= ssl.OP_NO_SSLv2
138 | sslctx.options |= ssl.OP_NO_SSLv3
139 | sslctx.options |= ssl.OP_NO_COMPRESSION
140 | sslctx.set_default_verify_paths()
141 | else:
142 | sslctx = ssl.create_default_context()
143 | if cafile:
144 | sslctx.load_verify_locations(cafile=cafile, capath=None)
145 |
146 | sslctx.set_alpn_protocols(constants.DOH_H2_NPN_PROTOCOLS)
147 |
148 | return sslctx
149 |
150 |
151 | def extract_ct_body(params: Dict[str, List[str]]) -> Tuple[str, bytes]:
152 | """ Extract the content type and body from a list of get parameters.
153 | :param params: A dictionary of key/value of parameters as provided by
154 | urllib.parse.parse_qs
155 | :return: a tuple that contains a string and bytes, respectively ct and
156 | body.
157 | :raises: a DOHParamsException with an explanatory message.
158 | """
159 | ct = constants.DOH_MEDIA_TYPE
160 | if constants.DOH_DNS_PARAM in params and len(params[constants.DOH_DNS_PARAM]):
161 | try:
162 | body = doh_b64_decode(params[constants.DOH_DNS_PARAM][0])
163 | except binascii.Error:
164 | raise server_protocol.DOHParamsException(b"Invalid Body Parameter")
165 | if not body:
166 | raise server_protocol.DOHParamsException(b"Missing Body")
167 | else:
168 | raise server_protocol.DOHParamsException(b"Missing Body Parameter")
169 |
170 | return ct, body
171 |
172 |
173 | def dns_query_from_body(body: bytes, debug: bool = False) -> dns.message.Message:
174 | """ Given a bytes-object, attempt to unpack a DNS Message.
175 | :param body: the bytes-object wired representation of a DNS message.
176 | :param debug: a boolean. When True, The error message sent to client will
177 | be more meaningful.
178 | :return: a dns.message.Message on success, raises DOHDNSException
179 | otherwise.
180 | """
181 | exc = b"Malformed DNS query"
182 | try:
183 | return dns.message.from_wire(body)
184 | except Exception as e:
185 | if debug:
186 | exc = str(e).encode("utf-8")
187 | raise server_protocol.DOHDNSException(exc)
188 |
189 |
190 | def doh_b64_encode(s: bytes) -> str:
191 | """Base 64 urlsafe encode and remove padding.
192 | :param s: input bytes-like object to be encoded.
193 | :return: urlsafe base 64 encoded string.
194 | """
195 | return base64.urlsafe_b64encode(s).decode("utf-8").rstrip("=")
196 |
197 |
198 | def doh_b64_decode(s: str) -> bytes:
199 | """Base 64 urlsafe decode, add padding as needed.
200 | :param s: input base64 encoded string with potentially missing padding.
201 | :return: decodes bytes
202 | """
203 | padding = "=" * (-len(s) % 4)
204 | return base64.urlsafe_b64decode(s + padding)
205 |
206 |
207 | def build_query_params(dns_query):
208 | """Given a wire-format DNS query, build the query parameters.
209 | """
210 | return {
211 | constants.DOH_DNS_PARAM: doh_b64_encode(dns_query),
212 | }
213 |
214 |
215 | def make_url(domain, uri):
216 | """Utility function to return a URL ready to use from a browser or cURL....
217 | """
218 | p = urllib.parse.ParseResult(
219 | scheme="https", netloc=domain, path=uri, params="", query="", fragment="",
220 | )
221 | return urllib.parse.urlunparse(p)
222 |
223 |
224 | def client_parser_base():
225 | """Build a ArgumentParser object with all the default arguments that are
226 | useful to both client and stub.
227 | :return: a ArgumentParser object with the common client side arguments set.
228 | """
229 | parser = argparse.ArgumentParser()
230 | parser.add_argument(
231 | "--domain",
232 | default="localhost",
233 | help="Domain to make DOH request against. Default: [%(default)s]",
234 | )
235 | parser.add_argument(
236 | "--uri", default=constants.DOH_URI, help="DNS API URI. Default [%(default)s]",
237 | )
238 | parser.add_argument(
239 | "--remote-address",
240 | help="Remote address where the DOH proxy is running. If None, "
241 | "--domain will be resolved to lookup and IP. Default: [%(default)s]",
242 | )
243 | parser.add_argument(
244 | "--port", default=443, help="Port to connect to. Default: [%(default)s]"
245 | )
246 | parser.add_argument(
247 | "--post", action="store_true", help="Use HTTP POST instead of GET."
248 | )
249 | parser.add_argument(
250 | "--debug", action="store_true", help="Prints some debugging output",
251 | )
252 | parser.add_argument(
253 | "--level", default="DEBUG", help="log level [%(default)s]",
254 | )
255 | parser.add_argument(
256 | "--cafile", default=None, help="Specify custom CA file for cert verification"
257 | )
258 | parser.add_argument(
259 | "--insecure", action="store_true", help=argparse.SUPPRESS,
260 | )
261 | parser.add_argument(
262 | "--version", action="version", version="%(prog)s {}".format(__version__)
263 | )
264 | return parser
265 |
266 |
267 | def proxy_parser_base(*, port: int, secure: bool = True) -> argparse.ArgumentParser:
268 | parser = argparse.ArgumentParser()
269 | parser.add_argument(
270 | "--listen-address",
271 | default=["::1"],
272 | nargs="+",
273 | help="A list of addresses the proxy should listen on. "
274 | '"all" for all detected interfaces and addresses (netifaces '
275 | "required). Default: [%(default)s]",
276 | )
277 | parser.add_argument(
278 | "--port",
279 | "--listen-port",
280 | default=port,
281 | type=int,
282 | help="Port to listen on. Default: [%(default)s]",
283 | )
284 | parser.add_argument("--certfile", help="SSL cert file.", required=secure)
285 | parser.add_argument("--keyfile", help="SSL key file.", required=secure)
286 | parser.add_argument(
287 | "--upstream-resolver",
288 | default="::1",
289 | help="Upstream recursive resolver to send the query to. "
290 | "Default: [%(default)s]",
291 | )
292 | parser.add_argument(
293 | "--upstream-port",
294 | default=53,
295 | help="Upstream recursive resolver port to send the query to. "
296 | "Default: [%(default)s]",
297 | )
298 | parser.add_argument(
299 | "--uri", default=constants.DOH_URI, help="DNS API URI. Default [%(default)s]",
300 | )
301 | parser.add_argument(
302 | "--level", default="DEBUG", help="log level [%(default)s]",
303 | )
304 | parser.add_argument("--debug", action="store_true", help="Debugging messages...")
305 | parser.add_argument(
306 | "--version", action="version", version="%(prog)s {}".format(__version__),
307 | )
308 | parser.add_argument(
309 | "--ecs", action="store_true", help="Enable EDNS Client Subnet (ECS)"
310 | )
311 | return parser
312 |
313 |
314 | def configure_logger(name="", level="DEBUG"):
315 | """
316 | :param name: (optional) name of the logger, default: ''.
317 | :param level: (optional) level of logging, default: DEBUG.
318 | :return: a logger instance.
319 | """
320 | log_format = "%(name)s/%(levelname)s: %(message)s"
321 | if sys.stdout.isatty():
322 | # If this is a TTY (e.g. not running in a service manager),
323 | # prepend the time to log messages
324 | log_format = "%(asctime)s: " + log_format
325 | logging.basicConfig(format=log_format)
326 | logger = logging.getLogger(name)
327 | level_name = level.upper()
328 | level = getattr(logging, level_name, None)
329 | if not isinstance(level, int):
330 | raise Exception("Invalid log level name : %s" % level_name)
331 | logger.setLevel(level)
332 | return logger
333 |
334 |
335 | def get_system_addresses():
336 | """Get all IPv4/IPv6 addresses listening on the system.
337 | :return: List of addresses.
338 | """
339 | if isinstance(netifaces, ImportError):
340 | raise netifaces
341 |
342 | addresses = set()
343 | for iface in netifaces.interfaces():
344 | iface_addresses = netifaces.ifaddresses(iface)
345 | for family in (netifaces.AF_INET, netifaces.AF_INET6):
346 | if family not in iface_addresses:
347 | continue
348 | addresses.update(
349 | [f["addr"] for f in iface_addresses[family] if "addr" in f]
350 | )
351 | return list(addresses)
352 |
353 |
354 | def handle_dns_tcp_data(data, cb):
355 | """Handle TCP data_received DNS data.
356 | When enough data is received to assemble a DNS message, a
357 | callback is called and the remaining data (if any) is returned.
358 | :param data: Incoming bytes data.
359 | :param cb: Callback to call when a full TCP DNS message is received.
360 | :return: Any remaining bytes not fed to the callback.
361 | """
362 | if len(data) < 2:
363 | return data
364 | msglen = struct.unpack("!H", data[0:2])[0]
365 | while msglen + 2 <= len(data):
366 | dnsq = dns.message.from_wire(data[2 : msglen + 2])
367 | cb(dnsq)
368 | data = data[msglen + 2 :]
369 | if len(data) < 2:
370 | return data
371 | msglen = struct.unpack("!H", data[0:2])[0]
372 | return data
373 |
374 |
375 | def set_dns_ecs(dnsq, ip):
376 | """Sets RFC 7871 EDNS Client Subnet (ECS) option in a DNS packet.
377 | An existing ECS option will not be overwritten if present.
378 | :param dnsq: DNS packet.
379 | :param ip: IP address. String or ipaddress object.
380 | :return: Whether ECS was set (bool)
381 | """
382 | options = []
383 | for option in dnsq.options:
384 | if isinstance(option, dns.edns.ECSOption):
385 | return False
386 | options.append(option)
387 |
388 | if not isinstance(ip, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
389 | ip = ipaddress.ip_address(ip)
390 | ip_supernet_bits = 56 if ip.version == 6 else 24
391 | ip_supernet = ipaddress.ip_network(ip).supernet(new_prefix=ip_supernet_bits,)
392 |
393 | options.append(
394 | dns.edns.ECSOption(
395 | address=ip_supernet.network_address.compressed, srclen=ip_supernet_bits,
396 | )
397 | )
398 | dnsq.use_edns(edns=0, ednsflags=dnsq.ednsflags, options=options)
399 | return True
400 |
--------------------------------------------------------------------------------
/test/test_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # Copyright (c) 2018-present, Facebook, Inc.
4 | # All rights reserved.
5 | #
6 | # This source code is licensed under the BSD-style license found in the
7 | # LICENSE file in the root directory of this source tree.
8 | #
9 |
10 | import argparse
11 | import binascii
12 | import ssl
13 | import tempfile
14 | import unittest
15 |
16 | import dns.message
17 | import dns.rcode
18 |
19 | try:
20 | import netifaces
21 | except ImportError as e:
22 | netifaces = e
23 | from unittest.mock import MagicMock, patch
24 |
25 | from dohproxy import constants, server_protocol, utils
26 | from unittest_data_provider import data_provider
27 |
28 | # Randomly generated source of words/b64
29 | # gshuf /usr/share/dict/words | head -n 20 | while read line
30 | # do
31 | # echo -e "(b'$line', '$(echo -n $line | base64 | tr -d '=' )',),"
32 | # done
33 |
34 | TEST_CA = (
35 | "-----BEGIN CERTIFICATE-----\n"
36 | "MIIDVzCCAj+gAwIBAgIJAOGYgypV1bcIMA0GCSqGSIb3DQEBCwUAMEIxCzAJBgNV\n"
37 | "BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg\n"
38 | "Q29tcGFueSBMdGQwHhcNMTgwMjI2MjIxODA3WhcNMjgwMjI0MjIxODA3WjBCMQsw\n"
39 | "CQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZh\n"
40 | "dWx0IENvbXBhbnkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\n"
41 | "zkceT8GjMPz7e6nU30CO6aEonx3iszpNXpa+nH31M1NBs4wF2Rli9M1exyX2tAu9\n"
42 | "gr4ImpIXurryeT61RJYprRBLBdy2FBwx7tgSOeaxZupnQkfd7HwtBJD3dg7cBGpe\n"
43 | "RbJ44CQozLt0n16FM7yX2NwBxBxMKG+Brqo+PB9dR219Nzh5jB/UTWH21rrMYjiW\n"
44 | "ABa0OnMh/oc/YGSuR7ymtYWIKL2u3fZ1wV6yCblAKDIhAOhxY3yL6SxyS4uE2j8i\n"
45 | "XuMNCApD7mKbS3DGK6/H/zbn5jVwpzPr1FCPCkuWixoFH9Om6d7+x0HPrrO7yYND\n"
46 | "5cNxqR8mpsy2tpHDG+9MyQIDAQABo1AwTjAdBgNVHQ4EFgQUxLNYNYbSS7j6P6Wh\n"
47 | "UwToShMPcPIwHwYDVR0jBBgwFoAUxLNYNYbSS7j6P6WhUwToShMPcPIwDAYDVR0T\n"
48 | "BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEACj/aXTKWStuM7PaiGzeuDHCyIRMm\n"
49 | "fDoRndTZXMH3bKmIb+2DlTjcLvHUeFSs21opssPL1U1fcvJRi3Yd5DYboiKILjO/\n"
50 | "0iNVGx6CPMiZZsYb+yeoA2ZtVqe/HoKzmeak4nP/QTv5xYRtFgSzXFmEPuC8CWdr\n"
51 | "xBdVAGX08H8vYlQk72YjKS/eJ6WbrijU0OnI3ZVlhMmlhwzW1cr/QmJSPoTsbS+a\n"
52 | "3c2aLV6NGplhmr2CuqqznDKT/QfxSk5qMoKAMdtA4iT5S5fPG5kGExt2MD+aimOw\n"
53 | "DOeHuyCLRXxIolT+8r2BY56sV1uYyuBFw0RAnEpmnc2d072DND6XcDeQCw==\n"
54 | "-----END CERTIFICATE-----"
55 | )
56 |
57 |
58 | def b64_source():
59 | return [
60 | (b"punner", "cHVubmVy",),
61 | (b"visitation", "dmlzaXRhdGlvbg",),
62 | (b"werf", "d2VyZg",),
63 | (b"Hysterophyta", "SHlzdGVyb3BoeXRh",),
64 | (b"diurne", "ZGl1cm5l",),
65 | (b"reputableness", "cmVwdXRhYmxlbmVzcw",),
66 | (b"uncompletely", "dW5jb21wbGV0ZWx5",),
67 | (b"thalami", "dGhhbGFtaQ",),
68 | (b"unpapal", "dW5wYXBhbA",),
69 | (b"nonapposable", "bm9uYXBwb3NhYmxl",),
70 | (b"synalgic", "c3luYWxnaWM",),
71 | (b"exscutellate", "ZXhzY3V0ZWxsYXRl",),
72 | (b"predelegation", "cHJlZGVsZWdhdGlvbg",),
73 | (b"Varangi", "VmFyYW5naQ",),
74 | (b"coucal", "Y291Y2Fs",),
75 | (b"intensely", "aW50ZW5zZWx5",),
76 | (b"apprize", "YXBwcml6ZQ",),
77 | (b"jirble", "amlyYmxl",),
78 | (b"imparalleled", "aW1wYXJhbGxlbGVk",),
79 | (b"dinornithic", "ZGlub3JuaXRoaWM",),
80 | ]
81 |
82 |
83 | class TestDOHB64(unittest.TestCase):
84 | @data_provider(b64_source)
85 | def test_b64_encode(self, input, output):
86 | self.assertEqual(utils.doh_b64_encode(input), output)
87 |
88 | @data_provider(b64_source)
89 | def test_b64_decode(self, output, input):
90 | self.assertEqual(utils.doh_b64_decode(input), output)
91 |
92 | def test_b64_decode_invalid(self):
93 | """ When providing an invalid input to base64.urlsafe_b64decode it
94 | should raise a binascii.Error exception.
95 | """
96 | with self.assertRaisesRegex(
97 | binascii.Error, "^(Invalid base64-encoded string|Incorrect padding)"
98 | ):
99 | utils.doh_b64_decode("_")
100 |
101 |
102 | def make_url_source():
103 | return [
104 | ("foo", "uri", "https://foo/uri",),
105 | ("foo", "/uri", "https://foo/uri",),
106 | ("foo", "/uri/", "https://foo/uri/",),
107 | ("foo:8443", "/uri/", "https://foo:8443/uri/",),
108 | ]
109 |
110 |
111 | class TestMakeURL(unittest.TestCase):
112 | @data_provider(make_url_source)
113 | def test_make_url(self, domain, uri, output):
114 | self.assertEqual(utils.make_url(domain, uri), output)
115 |
116 |
117 | class TestBuildQueryParams(unittest.TestCase):
118 | def test_has_right_keys(self):
119 | """ Check that this function returns body only. """
120 | keys = {
121 | constants.DOH_DNS_PARAM,
122 | }
123 | self.assertEqual(keys, utils.build_query_params(b"").keys())
124 |
125 | def test_query_must_be_bytes(self):
126 | """ Check that this function raises when we pass a string. """
127 | with self.assertRaises(TypeError):
128 | utils.build_query_params("")
129 |
130 | def test_query_accepts_bytes(self):
131 | """ Check that this function accepts a bytes-object. """
132 | utils.build_query_params(b"")
133 |
134 | def test_body_b64encoded(self):
135 | """ Check that this function is b64 encoding the content of body. """
136 | q = b""
137 | params = utils.build_query_params(q)
138 | self.assertEqual(utils.doh_b64_encode(q), params[constants.DOH_DNS_PARAM])
139 |
140 |
141 | class TestTypoChecker(unittest.TestCase):
142 | def test_client_base_parser(self):
143 | """ Basic test to check that there is no stupid typos.
144 | """
145 | utils.client_parser_base()
146 |
147 | def test_proxy_base_parser_noargs(self):
148 | """ We must provide a port parameter to proxy_parser_base. """
149 | with self.assertRaises(TypeError):
150 | utils.proxy_parser_base()
151 |
152 | def test_proxy_base_default_secure_require_certs(self):
153 | """ If secure (default), will ask for the certfile and keyfile """
154 | p = utils.proxy_parser_base(port=80)
155 | # Since we are secure, we need --certfile and --keyfile
156 | with self.assertRaises(SystemExit) as e:
157 | args, left = p.parse_known_args()
158 | self.assertEqual(e.exception.code, 2) # exit status must be 2
159 |
160 | def test_proxy_base_non_secure_no_certfile(self):
161 | """ If not using TLS, we don't suggest TLS related arguments. """
162 | p = utils.proxy_parser_base(port=80, secure=False)
163 | args, left = p.parse_known_args()
164 | # The values for cerfile and keyfile must be empty
165 | self.assertIsNone(args.certfile)
166 | self.assertIsNone(args.keyfile)
167 |
168 | def test_configure_logger(self):
169 | """ Basic test to check that there is no stupid typos.
170 | """
171 | utils.configure_logger()
172 |
173 | def test_configure_logger_unknown_level(self):
174 | """ Basic test to check that there is no stupid typos.
175 | """
176 | with self.assertRaises(Exception):
177 | utils.configure_logger(level="thisisnotalevel")
178 |
179 |
180 | def extract_path_params_source():
181 | return [
182 | ("/foo?a=b&c=d#1234", ("/foo", {"a": ["b"], "c": ["d"]})),
183 | ("/foo", ("/foo", {})),
184 | ("/foo?#", ("/foo", {})),
185 | ("foo", ("foo", {})),
186 | # Test that we keep empty values
187 | ("/foo?a=b&c", ("/foo", {"a": ["b"], "c": [""]})),
188 | ("/foo?a=b&c=", ("/foo", {"a": ["b"], "c": [""]})),
189 | ]
190 |
191 |
192 | class TestExtractPathParams(unittest.TestCase):
193 | @data_provider(extract_path_params_source)
194 | def test_extract_path_params(self, uri, output):
195 | path, params = utils.extract_path_params(uri)
196 | self.assertEqual(path, output[0])
197 | self.assertDictEqual(params, output[1])
198 |
199 |
200 | def extract_ct_body_valid_source():
201 | return [
202 | ("/foo?ct&dns=aW1wYXJhbGxlbGVk", (constants.DOH_MEDIA_TYPE, b"imparalleled"),),
203 | ("/foo?ct=&dns=aW1wYXJhbGxlbGVk", (constants.DOH_MEDIA_TYPE, b"imparalleled"),),
204 | (
205 | "/foo?ct=bar&dns=aW1wYXJhbGxlbGVk",
206 | (constants.DOH_MEDIA_TYPE, b"imparalleled"),
207 | ),
208 | ("/foo?dns=aW1wYXJhbGxlbGVk", (constants.DOH_MEDIA_TYPE, b"imparalleled"),),
209 | ]
210 |
211 |
212 | def extract_ct_body_invalid_source():
213 | return [
214 | ("/foo?ct=&dns=", "Missing Body",),
215 | ("/foo?ct=", "Missing Body Parameter",),
216 | ("/foo?ct=bar&dns=_", "Invalid Body Parameter",),
217 | ]
218 |
219 |
220 | class TestExtractCtBody(unittest.TestCase):
221 | @data_provider(extract_ct_body_valid_source)
222 | def test_extract_ct_body_valid(self, uri, output):
223 | path, params = utils.extract_path_params(uri)
224 | ct, body = utils.extract_ct_body(params)
225 | self.assertEqual(ct, output[0])
226 | self.assertEqual(body, output[1])
227 |
228 | @data_provider(extract_ct_body_invalid_source)
229 | def test_extract_ct_body_invalid(self, uri, output):
230 | path, params = utils.extract_path_params(uri)
231 | with self.assertRaisesRegex(server_protocol.DOHParamsException, output):
232 | utils.extract_ct_body(params)
233 |
234 |
235 | class TestDNSQueryFromBody(unittest.TestCase):
236 | def test_invalid_message_no_debug(self):
237 | body = "a"
238 | with self.assertRaisesRegex(
239 | server_protocol.DOHDNSException, "Malformed DNS query"
240 | ):
241 | utils.dns_query_from_body(body)
242 |
243 | def test_invalid_message_with_debug(self):
244 | body = "a"
245 | with self.assertRaisesRegex(server_protocol.DOHDNSException, "is too short"):
246 | utils.dns_query_from_body(body, debug=True)
247 |
248 | def test_valid_message(self):
249 | dnsq = dns.message.Message()
250 | body = dnsq.to_wire()
251 | self.assertEqual(utils.dns_query_from_body(body), dnsq)
252 |
253 |
254 | class TestDNSQuery2Log(unittest.TestCase):
255 | def setUp(self):
256 | self._qname = "example.com"
257 | self._qtype = "A"
258 | self._q = dns.message.make_query(self._qname, self._qtype)
259 |
260 | def test_valid_query(self):
261 | """
262 | test that no exception is thrown with a legitimate query.
263 | """
264 | utils.dnsquery2log(self._q)
265 |
266 | def test_valid_response(self):
267 | """
268 | test that no exception is thrown with a legitimate response.
269 | """
270 | r = dns.message.make_response(self._q, recursion_available=True)
271 | utils.dnsquery2log(r)
272 |
273 | def test_refused_response_no_question(self):
274 | """
275 | test that no exception is thrown with a legitimate response.
276 | """
277 | r = dns.message.make_response(self._q, recursion_available=True)
278 | r.set_rcode(dns.rcode.REFUSED)
279 | r.question = []
280 | utils.dnsquery2log(r)
281 |
282 |
283 | class TestDNSAns2Log(unittest.TestCase):
284 | def setUp(self):
285 | self._qname = "example.com"
286 | self._qtype = "A"
287 | self._q = dns.message.make_query(self._qname, self._qtype)
288 |
289 | def test_valid_query(self):
290 | """
291 | test that no exception is thrown with a legitimate query.
292 | """
293 | utils.dnsans2log(self._q)
294 |
295 | def test_valid_response(self):
296 | """
297 | test that no exception is thrown with a legitimate response.
298 | """
299 | r = dns.message.make_response(self._q, recursion_available=True)
300 | utils.dnsans2log(r)
301 |
302 | def test_refused_response_no_question(self):
303 | """
304 | test that no exception is thrown with a legitimate response.
305 | """
306 | r = dns.message.make_response(self._q, recursion_available=True)
307 | r.set_rcode(dns.rcode.REFUSED)
308 | r.question = []
309 | utils.dnsans2log(r)
310 |
311 |
312 | @patch("ssl.SSLContext.set_alpn_protocols", MagicMock())
313 | @patch("ssl.SSLContext.load_cert_chain", MagicMock())
314 | class TestProxySSLContext(unittest.TestCase):
315 | def setUp(self):
316 | self.args = argparse.Namespace()
317 | self.args.certfile = None
318 | self.args.keyfile = None
319 |
320 | # not all opnssl version may support DOH_CIPHERS, override with the one
321 | # supported by the testing platform
322 | constants.DOH_CIPHERS = ssl._DEFAULT_CIPHERS
323 |
324 | def test_proxy_ssl_context(self):
325 | """ Test a default ssl context, it should have http2 disabled """
326 | ssl_context = utils.create_ssl_context(self.args)
327 | self.assertIsInstance(ssl_context, ssl.SSLContext)
328 | # don't enable http2
329 | self.assertEqual(ssl_context.set_alpn_protocols.called, 0)
330 |
331 | def test_proxy_ssl_context_http2_enabled(self):
332 | """ Test a ssl context with http2 enabled """
333 | ssl_context = utils.create_ssl_context(self.args, http2=True)
334 | self.assertIsInstance(ssl_context, ssl.SSLContext)
335 | # enable http2
336 | self.assertEqual(ssl_context.set_alpn_protocols.called, 1)
337 |
338 |
339 | class TestSSLContext(unittest.TestCase):
340 | def setUp(self):
341 | self._CA = TEST_CA
342 | self._CA_serial = "E198832A55D5B708"
343 |
344 | # ALPN requires >=openssl-1.0.2
345 | # NPN requires >=openssl-1.0.1
346 | for fn in ["set_alpn_protocols"]:
347 | patcher = unittest.mock.patch("ssl.SSLContext.{0}".format(fn))
348 | patcher.start()
349 | self.addCleanup(patcher.stop)
350 |
351 | def test_insecure_context(self):
352 | """
353 | Test that insecure flag creates a context where verify method is
354 | CERT_NONE
355 | """
356 | sslctx = utils.create_custom_ssl_context(insecure=True)
357 | self.assertEqual(sslctx.verify_mode, ssl.CERT_NONE)
358 |
359 | def test_secure_context(self):
360 | """
361 | Test that if insecure is False, the ssl context created has
362 | CERT_REQUIRED as the verify method
363 | """
364 | sslctx = utils.create_custom_ssl_context(insecure=False)
365 | self.assertEqual(sslctx.verify_mode, ssl.CERT_REQUIRED)
366 |
367 | def test_cafile(self):
368 | with tempfile.NamedTemporaryFile() as ca:
369 | ca.write(self._CA.encode())
370 | ca.flush()
371 | sslctx = utils.create_custom_ssl_context(insecure=False, cafile=ca.name)
372 | self.assertTrue(
373 | self._CA_serial
374 | in [crt["serialNumber"] for crt in sslctx.get_ca_certs()]
375 | )
376 |
377 |
378 | @unittest.skipIf(isinstance(netifaces, ImportError), "netifaces not installed")
379 | class TestGetSystemAddresses(unittest.TestCase):
380 | def test_get_system_addresses(self):
381 | self.assertIn("127.0.0.1", utils.get_system_addresses())
382 |
383 |
384 | class TestHandleDNSTCPData(unittest.TestCase):
385 | def setUp(self):
386 | self._data = (
387 | b"\x00/\x00\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00"
388 | b"\x11connectivitycheck\x07gstatic\x03com\x00\x00\x1c\x00\x01"
389 | )
390 | self._cb_data = []
391 |
392 | def _cb(self, data):
393 | self._cb_data.append(data)
394 |
395 | def test_short(self):
396 | # Short message (no length check), returns itself
397 | res = utils.handle_dns_tcp_data(self._data[0:1], self._cb)
398 | self.assertEqual(res, self._data[0:1])
399 | self.assertEqual(self._cb_data, [])
400 |
401 | def test_partial(self):
402 | # Partial message (no cb), returns itself
403 | res = utils.handle_dns_tcp_data(self._data[0:10], self._cb)
404 | self.assertEqual(res, self._data[0:10])
405 | self.assertEqual(self._cb_data, [])
406 |
407 | def test_complete(self):
408 | # Complete message (calls cb once)
409 | res = utils.handle_dns_tcp_data(self._data, self._cb)
410 | self.assertEqual(res, b"")
411 | self.assertIsInstance(self._cb_data[0], dns.message.Message)
412 |
413 | def test_complete_plus_partial(self):
414 | # Complete message (calls cb once) + partial message
415 | res = utils.handle_dns_tcp_data(self._data + self._data[0:10], self._cb)
416 | self.assertEqual(res, self._data[0:10])
417 | self.assertIsInstance(self._cb_data[0], dns.message.Message)
418 |
419 | def test_complete_multiple(self):
420 | # Muliple complete messages will call the cb multiple times
421 | res = utils.handle_dns_tcp_data(self._data + self._data, self._cb)
422 | self.assertEqual(res, b"")
423 | self.assertIsInstance(self._cb_data[0], dns.message.Message)
424 | self.assertIsInstance(self._cb_data[1], dns.message.Message)
425 |
426 |
427 | class TestDNSECS(unittest.TestCase):
428 | def test_set_dns_ecs_ipv4(self):
429 | dnsq = dns.message.make_query("www.example.com", rdtype="A")
430 | utils.set_dns_ecs(dnsq, "10.0.0.242")
431 | self.assertEqual(dnsq.edns, 0)
432 | self.assertEqual(dnsq.options[0].address, "10.0.0.0")
433 | self.assertEqual(dnsq.options[0].srclen, 24)
434 |
435 | def test_set_dns_ecs_ipv6(self):
436 | dnsq = dns.message.make_query("www.example.com", rdtype="A")
437 | utils.set_dns_ecs(dnsq, "2000::aa")
438 | self.assertEqual(dnsq.edns, 0)
439 | self.assertEqual(dnsq.options[0].address, "2000::")
440 | self.assertEqual(dnsq.options[0].srclen, 56)
441 |
--------------------------------------------------------------------------------