├── .github └── workflows │ ├── python-publish.yml │ └── python-test.yml ├── .gitignore ├── CHANGES.md ├── CONTRIBUTORS.txt ├── LICENSE ├── MANIFEST.in ├── README.md ├── examples ├── client-server │ ├── README.md │ ├── client.py │ └── server.py ├── netunnel.example.conf ├── secret-auth-plugin │ ├── README.md │ └── secret_auth_plugin.py └── server-server │ ├── README.md │ ├── client.py │ ├── server1.conf │ └── server2.conf ├── netunnel ├── __init__.py ├── client.py ├── common │ ├── __init__.py │ ├── auth.py │ ├── channel.py │ ├── const.py │ ├── exceptions.py │ ├── security.py │ ├── tunnel.py │ └── utils.py ├── server │ ├── __init__.py │ ├── __main__.py │ ├── client_handler.py │ ├── config.py │ ├── peer.py │ ├── schemas.py │ ├── server.py │ └── static_tunnel.py └── tests │ ├── __init__.py │ ├── auth_utils.py │ ├── conftest.py │ ├── helpers │ └── proxy_plugins.py │ ├── test_channel.py │ ├── test_config.py │ ├── test_flow.py │ ├── test_peers.py │ ├── test_server.py │ ├── test_static_tunnels.py │ ├── test_tunnel.py │ ├── test_utils.py │ └── utils.py ├── requirements.dev.txt ├── requirements.txt └── setup.py /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package 5 | 6 | on: 7 | release: 8 | types: [created] 9 | 10 | jobs: 11 | deploy: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: '3.x' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install setuptools wheel twine 25 | - name: Build and publish 26 | env: 27 | TWINE_USERNAME: ${{ secrets.PYPI_USER }} 28 | TWINE_PASSWORD: ${{ secrets.PYPI_WRITE }} 29 | run: | 30 | python setup.py sdist bdist_wheel 31 | twine upload dist/* 32 | -------------------------------------------------------------------------------- /.github/workflows/python-test.yml: -------------------------------------------------------------------------------- 1 | name: NETunnel pytest 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | test: 11 | 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: [3.8, 3.9] 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | python -m pip install -r requirements.dev.txt 27 | python -m pip install . 28 | - name: Run pytest 29 | run: | 30 | pytest netunnel 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # NETunnel specifics 2 | netunnel.conf 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # Pycharm project settings 124 | .idea 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | 137 | .DS_Store 138 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | ## 1.0.5 (2021-03-30) 3 | ### Features 4 | - Add a dynamic factory reset REST API call 5 | 6 | ### Misc 7 | - Set marshmallow requirement to less than 4.0.0 8 | 9 | 10 | ## 1.0.4 (2021-01-25) 11 | ### Misc 12 | - Include LICENSE, CHANGES.md and README.md in the PyPi package. 13 | 14 | 15 | ## 1.0.3 (2021-01-25) 16 | ### Misc 17 | - Extend supported version of `importlib-metadata` to the lowest. 18 | -------------------------------------------------------------------------------- /CONTRIBUTORS.txt: -------------------------------------------------------------------------------- 1 | FOUNDER - Tomer Shlomo 2 | 3 | - Contributors - 4 | ---------------- 5 | Amit Itzkovitch 6 | Or Zehavi 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2021 Claroty LTD 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include CHANGES.md 3 | include README.md 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NETunnel 2 | NETunnel is a tool to create network tunnels over HTTP/S written in Python 3.\ 3 | It can be used both in a client-server and in a server-server model. 4 | 5 | ## Getting Started 6 | 7 | ### Installing 8 | ```bash 9 | pip install netunnel 10 | ``` 11 | We officially support Python 3.6+. 12 | 13 | ### Usage 14 | The following example creates an HTTP tunnel from the client to the server's port 22 (SSHD service). 15 | 16 | Running the server: (In production, use --config-path to preserve changes) 17 | ```bash 18 | $ python -m netunnel.server 19 | The server is running in stateless mode. Use --config-path to generate a config file 20 | netunnel_server - INFO - Generating default secret-key 21 | netunnel_server - INFO - Starting server on 127.0.0.1:4040 22 | ``` 23 | 24 | Running the client: 25 | ```bash 26 | $ python -m netunnel.client --remote-port 22 27 | Opening tunnel to the server... 28 | Tunnel entrance socket: 127.0.0.1:54781 29 | Tunnel exit socket: 127.0.0.1:22 30 | ``` 31 | 32 | The server's SSHD service is now accessible from the client: 33 | ```bash 34 | $ ssh -p 54781 localhost 35 | ``` 36 | 37 | Please take a look at the [examples](examples) directory for additional usages. 38 | 39 | ## How it works 40 | 1. The client connects to the server and creates a websocket connection we call a "Channel". The channel 41 | is used by server to send commands to the client and it performs heartbeat pings so both sides will know 42 | if there are connection issues. 43 | 2. The client makes a POST request to create a tunnel. 44 | 3. Either the client or the server (depends on the tunnel's direction) listens on a socket locally 45 | for incoming connections. 46 | 4. For every new connection, it generates a websocket connection to the remote, and stream the data 47 | from the connection over to the websocket and vice versa. 48 | 5. Whenever a websocket is created, the remote connects to a socket locally and stream data from the 49 | websocket to the socket and vice versa. 50 | 51 | ``` 52 | Connection1 -> Websocket1 -> -> 53 | Connection2 -> Tunnel Entrance -> Websocket2 -> Tunnel Exit -> Service 54 | Connection3 -> Websocket3 -> -> 55 | ``` 56 | 57 | ### Under the hood 58 | There are 2 core objects which performs the tunnelling: 59 | 60 | - **InputTunnel** - The tunnel's entrance which listens on a socket. 61 | - **OutputTunnel** - The tunnel's exit which creates connections to a socket. 62 | 63 | When a normal tunnel is created, the client creates an InputTunnel and the server creates 64 | an OutputTunnel, while reverse tunnels are essentially the server is creating InputTunnel while 65 | the client is creating an OutputTunnel. 66 | 67 | InputTunnel is initialized with a feeder of websockets that the client/server provides, so 68 | that the implementation can be generic. In reverse tunnels, The server uses the channel to request 69 | the client for a new websocket when it needs to feed a new websocket. 70 | 71 | ## Server Configurations 72 | The server's configuration file is optional, but recommended in production environments. 73 | 74 | When running the NETunnel server, you can provide a path to a configuration file using `-c` or `--config-path` flags, 75 | and the server will generate a default configuration file at that location. 76 | If there is an existing configuration in that path, the server will load it, and merge it with its default 77 | configurations, and for any change that was made dynamically to the server using the API, it will commit it to 78 | the configuration file. 79 | 80 | The configuration file is in JSON format and support the following keys: 81 | - `allowed_tunnel_destinations` - A key-value mapping of IPs and ports(as strings separated by comma) allowed to be 82 | used as a tunnel's exit sockets. The special symbol `*` supported to allow all ports for a certain IP. 83 | Defaults to `{"127.0.0.1": "*"}` 84 | - `secret_key` - A passphrase used as an encryption key for sensitive settings to avoid storing them in the disk as plain text. 85 | The key is generated automatically, but we recommend using the `-s`/`--secret-key` when running the server which will avoid 86 | storing the key in the configuration file. Setting the environment variable `NETUNNEL_SECRET_KEY` will behave just the 87 | same as the flag, and won't be stored in the configuration. If you wish to decrypt, encrypt, or generate a key manually, see 88 | `python -m netunnel.common.security`. 89 | - `peers` - A list of remote NETunnel servers that can be used to set static tunnels (See `Peers` in the Additional Features). 90 | For an example of how to set a peer, look at [examples/server-server](examples/server-server). 91 | - `allow_unverified_ssl_peers` - When set to `true`, remote peers certificates won't be verified. Defaults to `false`. 92 | - `revision` - Currently unused. This will be used for configuration migrations purposes. You should not modify 93 | this field manually in any use case. 94 | - `http_proxy` - Settings for an optional global HTTP proxy to use for any requests the server may need to make, for 95 | example to remote peers. The setting include a key-value mapping of the following: 96 | - `proxy_url` - The URL to the remote proxy server 97 | - `username` - An encrypted (using the `secret_key`) username string 98 | - `password` - An encrypted (using the `secret_key`) password string 99 | 100 | A useful feature of NETunnel configuration is that it can parse environment variables on load to modify the default 101 | values of any key. The configuration will search for variables starting with the prefix `NETUNNEL_`, following by the 102 | uppercase of any existing key. The value is expected to be in JSON format. 103 | 104 | For example, in POSIX environments, running: 105 | ```bash 106 | export NETUNNEL_ALLOWED_TUNNEL_DESTINATIONS='{"127.0.0.1": "22"}' 107 | export NETUNNEL_ALLOW_UNVERIFIED_SSL_PEERS='true' 108 | python -m netunnel.server 109 | ``` 110 | Will change the default `allowed_tunnel_destinations` to `{"127.0.0.1": "22"}` 111 | and the default `allow_unverified_ssl_peers` to `true`. 112 | 113 | An example for a configuration file: [examples/netunnel.example.conf](examples/netunnel.example.conf) 114 | 115 | ## Additional Features 116 | * Peers - A NETunnel server can register remote NETunnel servers called peers. The peers are stored in the 117 | configuration and can be used to create static tunnels. 118 | * Static Tunnels - NETunnel supports permanent tunnels between servers. This is a useful feature 119 | for when we want a long term tunnels between machines. It can be used by making the netunnel server to run as a service 120 | and create a configuration file with peers and static tunnels which initialized on startup. Both peers and static tunnels 121 | can also be created dynamically via the server's API. 122 | An example for a server-server model can be found here: [examples/server-server](examples/server-server) 123 | * HTTP proxy support - You can use `--proxy-url` `--proxy-username` `--proxy-password` to configure a proxy 124 | for the client. When used in a server-server model, there can be a global proxy used by the server to connect with. 125 | The credentials of the global proxy in that case are stored encrypted in the server's configuration using a secret_key. 126 | * Authentication plugins - By default, no authentication is made between NETunnel instances. 127 | This can be configured by inherit the auth classes on [netunnel/common/auth.py](netunnel/common/auth.py) and pass them 128 | to the client and server using `--auth-plugin`. A plugin example: [examples/secret-auth-plugin](examples/secret-auth-plugin) 129 | -------------------------------------------------------------------------------- /examples/client-server/README.md: -------------------------------------------------------------------------------- 1 | # NETunnel Client-Server example 2 | In this example, you have a `server.py` and a `client.py` files. 3 | They contain a very minimalist code for running a NETunnelServer and a NETunnelClient using their python objects. 4 | 5 | The server.py also creates a `netunnel.conf` with the default configurations if there isn't one already. 6 | 7 | ## Usage 8 | Run the server: 9 | ```bash 10 | python server.py 11 | ``` 12 | 13 | Run the client: 14 | ```bash 15 | python client.py 16 | ``` 17 | In this example, the client always opens a tunnel on port 12345 to the server on port 22: 18 | ```bash 19 | ssh -p 12345 localhost 20 | ``` -------------------------------------------------------------------------------- /examples/client-server/client.py: -------------------------------------------------------------------------------- 1 | from netunnel.client import NETunnelClient 2 | 3 | import asyncio 4 | 5 | 6 | async def main(): 7 | async with NETunnelClient('http://localhost:4040') as client: 8 | # Open a client-to-server tunnel from localhost:12345 to localhost:22 9 | tunnel = await client.open_tunnel_to_server(local_address='127.0.0.1', 10 | local_port=12345, 11 | remote_address='127.0.0.1', 12 | remote_port=22) 13 | await tunnel.join() 14 | 15 | 16 | if __name__ == "__main__": 17 | loop = asyncio.get_event_loop() 18 | loop.run_until_complete(main()) 19 | -------------------------------------------------------------------------------- /examples/client-server/server.py: -------------------------------------------------------------------------------- 1 | from netunnel.server.server import NETunnelServer 2 | 3 | import asyncio 4 | 5 | 6 | def main(): 7 | # This will create a configuration file named 'netunnel.conf' 8 | server = NETunnelServer(config_path='netunnel.conf') 9 | loop = asyncio.get_event_loop() 10 | try: 11 | asyncio.ensure_future(server.start()) 12 | loop.run_forever() 13 | finally: 14 | loop.close() 15 | 16 | 17 | if __name__ == '__main__': 18 | main() 19 | -------------------------------------------------------------------------------- /examples/netunnel.example.conf: -------------------------------------------------------------------------------- 1 | { 2 | "allowed_tunnel_destinations": { 3 | "127.0.0.1": "*" 4 | }, 5 | "http_proxy": { 6 | "proxy_url": "http://localhost:8899", 7 | "username": "gAAAAABf_sxibg1FnyJWYRS5j4FlJPhbMqxjrjidUB7y1OTjSAZZsp0lyuvB1jhlbEuzghYszO_E4Td-kLK0rLBKoWhWC1fWKQ==", 8 | "password": "gAAAAABf_sxiRu0n0vVsBKvnnyZ7pCb_IlZS_XsRgH_kBTY_dRnEheDMDmah8SbWHPFPFCnYYI3usiA1u-IxY61rNnHxpTXPiw==" 9 | }, 10 | "http_proxy_test_url": "https://google.com", 11 | "allow_unverified_ssl_peers": false, 12 | "peers": [ 13 | { 14 | "static_tunnels": [ 15 | { 16 | "tunnel_local_address": "127.0.0.1", 17 | "tunnel_remote_address": "127.0.0.1", 18 | "tunnel_remote_port": 22, 19 | "tunnel_local_port": 20000, 20 | "id": 1 21 | } 22 | ], 23 | "auth_data": null, 24 | "target_netunnel_url": "http://localhost:4040", 25 | "name": "peer1", 26 | "id": 1 27 | } 28 | ], 29 | "revision": 1, 30 | "secret_key": "3vpPjEJ6pEckqFxQ5Vbtwa9pHw9c6gQUxS8NueEZQII=" 31 | } -------------------------------------------------------------------------------- /examples/secret-auth-plugin/README.md: -------------------------------------------------------------------------------- 1 | # NETunnel Secret Auth Plugin 2 | This is an example of an authentication plugin for NETunnel. 3 | The plugin initialize the NETunnel server with a secret of your choosing and only clients which knows the 4 | secret can open tunnels with the server. 5 | 6 | This plugin requires pyjwt since its using JWT tokens for the authentication, so make sure you install it first: 7 | ```bash 8 | pip install "pyjwt<2.0.0" 9 | ``` 10 | 11 | # Usage 12 | Run the server. Our secret will be `abc` for this example: 13 | ```bash 14 | python -m netunnel.server --auth-plugin secret_auth_plugin.SecretServerAuth --auth-data '{"secret": "abc"}' 15 | ``` 16 | Run the client: 17 | ```bash 18 | python -m netunnel.client --auth-plugin secret_auth_plugin.SecretClientAuth --auth-data '{"secret": "abc"}' 19 | ``` 20 | If you try to run the client without the plugin or with different secret, you'll receive Forbidden (403) respond. -------------------------------------------------------------------------------- /examples/secret-auth-plugin/secret_auth_plugin.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is an example for NETunnel auth plugin. it enforces clients to include a secret that only the server knows 3 | in order to be authorized with it. 4 | Please make sure to read netunnel/common/auth.py first so you would understand when each method is called in 5 | NETunnel's core code. 6 | """ 7 | import jwt 8 | import time 9 | 10 | from aiohttp import web 11 | from netunnel.common.auth import NETunnelClientAuth, NETunnelServerAuth 12 | from netunnel.common.utils import get_session 13 | from netunnel.client import NETunnelClient 14 | 15 | 16 | class SecretClientAuth(NETunnelClientAuth): 17 | # This object will be used by NETunnelClient 18 | 19 | def __init__(self, secret, *args, **kwargs): 20 | # The `secret` parameter is exclusive to this plugin. you can declare whatever parameters you need 21 | # as long as your users knows what parameters your plugin expects. 22 | super().__init__(*args, **kwargs) 23 | self._secret = secret 24 | self._token = None 25 | 26 | async def authenticate(self, client: NETunnelClient, *args, **kwargs): 27 | # `client` will always be passed to authenticate(), but it cannot be used to make requests to the server, this 28 | # should be handled manually. The reason for that is that we're not authenticated yet of course. 29 | payload = {'secret': self._secret} 30 | session = await get_session() 31 | async with session: 32 | async with session.post(f'{client.server_url}/authenticate', json=payload, raise_for_status=True) as resp: 33 | data = await resp.json() 34 | self._token = data['token'] 35 | 36 | async def is_authenticated(self): 37 | if self._token is None: 38 | return False 39 | try: 40 | result = jwt.decode(self._token.encode(), self._secret) 41 | except (ValueError, jwt.DecodeError): 42 | return False 43 | return result['exp'] > time.time() 44 | 45 | async def get_authorized_headers(self): 46 | return {'Authorization': f'Bearer {self._token}'} 47 | 48 | def dump_object(self): 49 | # The return value here is stored as `auth_data` of peers that needs the secret for the next startup. 50 | # In a real life use case, you would probably store something else then the secret since it will be written 51 | # to the disk (for example an encrypted version of it), just make sure __init__ can be initialized with 52 | # whatever you return here. 53 | return {'secret': self._secret} 54 | 55 | 56 | class SecretServerAuth(NETunnelServerAuth): 57 | # This object will be used by NETunnelServer 58 | 59 | def __init__(self, secret, *args, **kwargs): 60 | # The `secret` parameter is exclusive to this plugin. you can declare whatever parameters you need 61 | # as long as your users knows what parameters your plugin expects. 62 | super().__init__(*args, **kwargs) 63 | self._secret = secret 64 | 65 | async def get_client_for_peer(self, secret): 66 | return SecretClientAuth(secret=secret) 67 | 68 | async def is_authenticated(self, request: web.Request): 69 | if 'Authorization' not in request.headers: 70 | return False 71 | try: 72 | auth_type, token = request.headers['Authorization'].split() 73 | result = jwt.decode(token.encode(), self._secret) 74 | except (ValueError, jwt.DecodeError): 75 | return False 76 | if result['exp'] < time.time(): 77 | return False 78 | return True 79 | 80 | async def authenticate(self, request: web.Request): 81 | data = await request.json() 82 | if data['secret'] != self._secret: 83 | return web.HTTPForbidden() 84 | # We're generating a token valid for the next 30 minutes. The tunnel won't be closed if this expires, 85 | # but the client will not be able to make valid requests with this token again, which will be resolved 86 | # by it automatically since it will request a new one. 87 | response = {'token': jwt.encode({'exp': time.time() + 30}, self._secret).decode()} 88 | return web.json_response(response) 89 | 90 | -------------------------------------------------------------------------------- /examples/server-server/README.md: -------------------------------------------------------------------------------- 1 | # NETunnel Server-Server example 2 | In this example we'll create 2 instances of NETunnel servers, `server1` and `server2`, 3 | each with its own configuration file. 4 | 5 | `server2` settings will include a single peer - `server1` with a single static tunnel to this peer. 6 | 7 | There are several advantages in using server-server model instead of the [client-server](../client-server) model. 8 | First of all, A single server instance can create more than 1 tunnel to more then 1 remote, and secondly, these 9 | tunnels are designed to recreate themselves if there was a disconnection. 10 | 11 | Another useful advantage is that peers and static tunnels can be dynamically created 12 | using the NETunnelClient. 13 | ## Usage 14 | Run the first server: 15 | ```bash 16 | python -m netunnel.server -p 4040 -c server1.conf 17 | ``` 18 | Run the second server: 19 | ```bash 20 | python -m netunnel.server -p 4041 -c server2.conf 21 | ``` 22 | The `server2.conf` configuration file include a peer called `server1` with a static tunnel to it on port 20000. Let's 23 | make sure it works: 24 | ```bash 25 | ssh -p 20000 localhost 26 | ``` 27 | Notice that if you stop the instance of `server1` from running, the second instance will try to reconnect it. 28 | 29 | Now, make sure we have both `server1` and `server2` running and let's use `server1`'s API to add `server2` as a 30 | peer and a static tunnel to it: 31 | ```bash 32 | python client.py 33 | ``` 34 | Now `server2` is registered as a new peer and you will see that `server1.conf` was updated. The local port 35 | that `server1` chooses for the tunnel is an available port number from 20000 or more, so we'll assume it chose port 20001 36 | and now we can run: 37 | ```bash 38 | ssh -p 20001 localhost 39 | ``` 40 | You can also see that `server1.conf` was updated so it will recreate the tunnel on startup. -------------------------------------------------------------------------------- /examples/server-server/client.py: -------------------------------------------------------------------------------- 1 | from netunnel.client import NETunnelClient 2 | 3 | import asyncio 4 | 5 | 6 | async def create_static_tunnel(): 7 | async with NETunnelClient('http://localhost:4040') as client: 8 | peer = await client.register_peer('server2', target_netunnel_url='http://localhost:4041') 9 | static_tunnel = await client.create_peer_static_tunnel('server2', tunnel_remote_port=22) 10 | 11 | 12 | if __name__ == '__main__': 13 | loop = asyncio.get_event_loop() 14 | loop.run_until_complete(create_static_tunnel()) -------------------------------------------------------------------------------- /examples/server-server/server1.conf: -------------------------------------------------------------------------------- 1 | { 2 | "allowed_tunnel_destinations": { 3 | "127.0.0.1": "*" 4 | }, 5 | "peers": [], 6 | "allow_unverified_ssl_peers": false 7 | } -------------------------------------------------------------------------------- /examples/server-server/server2.conf: -------------------------------------------------------------------------------- 1 | { 2 | "allowed_tunnel_destinations": { 3 | "127.0.0.1": "*" 4 | }, 5 | "peers": [ 6 | { 7 | "static_tunnels": [ 8 | { 9 | "tunnel_local_address": "127.0.0.1", 10 | "tunnel_remote_address": "127.0.0.1", 11 | "tunnel_remote_port": 22, 12 | "tunnel_local_port": 20000, 13 | "id": 1 14 | } 15 | ], 16 | "auth_data": {}, 17 | "target_netunnel_url": "http://localhost:4040", 18 | "name": "server1", 19 | "id": 1 20 | } 21 | ], 22 | "allow_unverified_ssl_peers": false 23 | } -------------------------------------------------------------------------------- /netunnel/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import importlib.metadata as importlib_metadata 3 | except ModuleNotFoundError: 4 | import importlib_metadata 5 | 6 | 7 | __version__ = importlib_metadata.version(__name__) 8 | -------------------------------------------------------------------------------- /netunnel/client.py: -------------------------------------------------------------------------------- 1 | import json 2 | import aiohttp 3 | import asyncio 4 | import logging 5 | import argparse 6 | import importlib 7 | import functools 8 | import contextlib 9 | import sys 10 | 11 | from typing import Dict, List, Any 12 | from . import __version__ 13 | from .common import auth 14 | from .common.const import TunnelId, CLIENT_CHANNEL_HEARTBEAT 15 | from .common.exceptions import NETunnelResponseError, NETunnelNotConnectedError, NETunnelError 16 | from .common.utils import get_logger, task_in_list_until_done, EventClientWebSocketResponse, get_session, asyncio_all_tasks 17 | from .common.tunnel import InputTunnel, OutputTunnel, Tunnel 18 | from .common.channel import Channel, ChannelMessage, ChannelResponse, Messages 19 | 20 | 21 | def connection_established(async_method): 22 | @functools.wraps(async_method) 23 | async def wrap(self: 'NETunnelClient', *args, **kwargs): 24 | if self.connected is False: 25 | raise NETunnelNotConnectedError('Client is not connected. Please use `async with NETunnelClient(...):` to avoid this exception') 26 | return await async_method(self, *args, **kwargs) 27 | return wrap 28 | 29 | 30 | class NETunnelClient: 31 | def __init__(self, server_url, logger=None, proxy_url=None, proxy_username=None, proxy_password=None, auth_client: auth.NETunnelClientAuth = None, 32 | ssl=None): 33 | """ 34 | Client-side object to communicate with a netunnel server. 35 | A constant websocket is used to allow the server to perform requests to the client 36 | :param server_url: URL to the remote netunnel server 37 | :param logger: optional logger 38 | :param proxy_url: url for a proxy to use when making requests to the remote netunnel server 39 | :param proxy_username: Optional username to use when authenticating with the proxy_url. `proxy_password` must be given as well 40 | :param proxy_password: Optional password to use when authenticating with the proxy_url. `proxy_username` must be given as well 41 | :param auth_client: Instance of subclass of netunnel.common.auth.NETunnelClientAuth that will be used for authentication 42 | :param ssl: SSLContext object. False to skip validation, None for default SSL check. 43 | """ 44 | self.server_url = server_url 45 | self._proxy_url = None 46 | self._proxy_auth = None 47 | self.set_client_proxy(proxy_url, proxy_username, proxy_password) 48 | # aiohttp.ClientSession must be initialized from inside coroutine. 49 | # DO NOT USE __client_session.get/post/etc.. use await self._get / await self._post instead. 50 | self.__client_session: aiohttp.ClientSession = None 51 | self._control_channel: Channel = None 52 | self._control_channel_task: asyncio.Task = None 53 | self._tunnels: Dict[TunnelId, Tunnel] = {} 54 | # As its name implies, this lock is used when creating server-to-client tunnels. This resolves a 55 | # race condition when the server request a websocket for a newly created tunnel_id, but the http post 56 | # request for creating this tunnel_id is still processing, so the client did not register the new tunnel_id yet. 57 | self._tunnel_to_client_is_under_construction_lock = asyncio.Lock() 58 | # list of tasks to handle new websockets for existing tunnels. Used for cleanups 59 | self._tunnels_connections_tasks: List[asyncio.Task] = [] 60 | self._logger = logger or get_logger(__name__) 61 | self._auth_client: auth.NETunnelClientAuth = auth_client or auth.ClientNoAuth() 62 | self._ssl = ssl 63 | 64 | def _get_url(self, uri: str) -> str: 65 | return f'{self.server_url}{uri}' 66 | 67 | async def _get(self, uri: str, parse_as_text=False, *args, **kwargs): 68 | """ 69 | Perform GET request to server_url with NETunnelClient settings 70 | :param uri: The uri to append server_url 71 | :param parse_as_text: parse the response as text instead of json 72 | """ 73 | url = self._get_url(uri) 74 | auth_headers = await self._request_auth_headers() 75 | async with self.__client_session.get(url, *args, headers=auth_headers, proxy=self._proxy_url, proxy_auth=self._proxy_auth, **kwargs) as response: 76 | if parse_as_text: 77 | return await response.text() 78 | return await self._parse_response(response) 79 | 80 | async def _post(self, uri: str, *args, **kwargs): 81 | """ 82 | Perform POST request to server_url with NETunnelClient settings 83 | """ 84 | url = self._get_url(uri) 85 | auth_headers = await self._request_auth_headers() 86 | async with self.__client_session.post(url, *args, headers=auth_headers, proxy=self._proxy_url, proxy_auth=self._proxy_auth, **kwargs) as response: 87 | return await self._parse_response(response) 88 | 89 | async def _delete(self, uri: str, **kwargs): 90 | """ 91 | Perform DELETE request to server_url with NETunnelClient settings 92 | """ 93 | url = self._get_url(uri) 94 | auth_headers = await self._request_auth_headers() 95 | async with self.__client_session.delete(url, headers=auth_headers, proxy=self._proxy_url, proxy_auth=self._proxy_auth, **kwargs) as response: 96 | return await self._parse_response(response) 97 | 98 | async def _ws_connect(self, uri: str, **kwargs): 99 | """ 100 | Perform websocket connection to server_url with NETunnelClient settings. 101 | Usage: 102 | async with (await self._ws_connect()) as websocket: 103 | websocket.send_bytes(...) 104 | """ 105 | url = self._get_url(uri) 106 | auth_headers = await self._request_auth_headers() 107 | return self.__client_session.ws_connect(url, headers=auth_headers, heartbeat=CLIENT_CHANNEL_HEARTBEAT, 108 | proxy=self._proxy_url, proxy_auth=self._proxy_auth, **kwargs) 109 | 110 | async def _request_auth_headers(self) -> dict: 111 | """ 112 | Return headers authorized by the remote server. 113 | We first authenticate if we're not already. 114 | """ 115 | if not await self._auth_client.is_authenticated(): 116 | await self._auth_client.authenticate(client=self) 117 | return await self._auth_client.get_authorized_headers() 118 | 119 | def set_client_proxy(self, proxy_url, proxy_username=None, proxy_password=None): 120 | """ 121 | Set the client's proxy to use when making requests to the netunnel server 122 | :param proxy_url: new url for a proxy to use when making requests to the remote netunnel server 123 | :param proxy_username: Optional username to use when authenticating with the proxy_url. `proxy_password` must be given as well 124 | :param proxy_password: Optional password to use when authenticating with the proxy_url. `proxy_username` must be given as well 125 | """ 126 | if proxy_url is None: 127 | self._proxy_url = None 128 | self._proxy_auth = None 129 | return 130 | self._proxy_url = proxy_url 131 | if proxy_username and proxy_password: 132 | self._proxy_auth = aiohttp.BasicAuth(proxy_username, proxy_password) 133 | 134 | @staticmethod 135 | async def _parse_response(response: aiohttp.ClientResponse) -> Any: 136 | """ 137 | Try await for json response. If there is an error message, raise ClientResponseError with 138 | the error message instead of the default status code error 139 | """ 140 | try: 141 | data = await response.json() 142 | if data is not None and 'error' in data: 143 | raise NETunnelResponseError(data['error']) 144 | return data 145 | except aiohttp.ContentTypeError: 146 | raise NETunnelResponseError(await response.text()) 147 | 148 | @connection_established 149 | async def get_remote_version(self): 150 | return await self._get('/version', parse_as_text=True, raise_for_status=True) 151 | 152 | @connection_established 153 | async def factory_reset(self, disconnect_clients=False): 154 | """ 155 | Erase and recreate the configurations on the server. 156 | :param disconnect_clients: Disconnect currently connected clients. 157 | """ 158 | payload = { 159 | 'disconnect_clients': disconnect_clients 160 | } 161 | return await self._post('/config/factory-reset', json=payload, raise_for_status=True) 162 | 163 | @property 164 | def connected(self): 165 | return self._control_channel is not None and self._control_channel.running 166 | 167 | @property 168 | def ssl(self): 169 | return self._ssl 170 | 171 | async def connect(self): 172 | """ 173 | Establish a channel with the server. This channel is mandatory for some of the 174 | functionality of NETunnelClient, like opening tunnels. 175 | """ 176 | if self.connected: 177 | raise RuntimeError(f'Client already connected to {self.server_url}') 178 | headers = await self._request_auth_headers() 179 | self.__client_session = await get_session(headers=headers, ssl=self._ssl) 180 | channel_payload = {'version': __version__} 181 | self._logger.debug('Connecting to `%s`', self.server_url) 182 | channel_data = await self._post('/channels', json=channel_payload) 183 | channel_id = channel_data['channel_id'] 184 | remote_version = channel_data['version'] 185 | # aiohttp websockets supposed to be called only with `async with`, but 186 | # we need the websocket to be constantly open and we want to avoid 187 | # breaking compatibility in the future therefore we call `.__aenter__()` directly. 188 | # Channel class will close the websocket when it finishes 189 | websocket_async_context_manager = await self._ws_connect(f'/channels/{channel_id}/connect') 190 | self._logger.debug('Starting channel `%s`. Server version: `%s`', channel_id, remote_version) 191 | websocket: EventClientWebSocketResponse = await websocket_async_context_manager.__aenter__() 192 | self._control_channel = Channel(websocket=websocket, channel_id=channel_id, handler=self._handle_server_messages, logger=self._logger) 193 | 194 | # Start serving on an external task so we can continue using the client 195 | self._control_channel_task = asyncio.ensure_future(self._serve_channel()) 196 | 197 | async def _serve_channel(self): 198 | """ 199 | Start serving the channel and cleanup after it closes 200 | """ 201 | try: 202 | await self._control_channel.serve() 203 | finally: 204 | self._logger.debug('Finished serving channel `%s` on `%s`. Cleaning any remaining tunnels', self._control_channel.id, self.server_url) 205 | # The tunnel's close_callback will delete from self._tunnels, so to avoid RuntimeError for changing dictionary while iteration, we cast to list first 206 | for tunnel_id, tunnel in list(self._tunnels.items()): 207 | self._logger.debug('Stopping tunnel `%s`', tunnel_id) 208 | # The channel is closed so the server is responsible for closing its own tunnels 209 | await tunnel.stop(stop_remote_tunnel=False, force=True) 210 | while self._tunnels_connections_tasks: 211 | task = self._tunnels_connections_tasks.pop() 212 | with contextlib.suppress(asyncio.CancelledError): 213 | task.cancel() 214 | await task 215 | 216 | async def _handle_server_messages(self, message: ChannelMessage) -> ChannelResponse: 217 | """ 218 | Handle messages that comes from the control channel 219 | """ 220 | self._logger.debug('New message from `%s` on channel `%s`: %s', self.server_url, self._control_channel.id, message.message_type.name) 221 | if message.message_type == Messages.TUNNEL_SERVER_TO_CLIENT_NEW_CONNECTION: 222 | tunnel_id = message.data['tunnel_id'] 223 | self._logger.debug('Creating new connection for server-to-client tunnel `%s`', tunnel_id) 224 | # This lock ensure that all server-to-client tunnels are ready and registered 225 | async with self._tunnel_to_client_is_under_construction_lock: 226 | if tunnel_id not in self._tunnels: 227 | return message.get_error_response(f'tunnel_id `{tunnel_id}` is not registered on this client') 228 | new_connection_task = asyncio.ensure_future(self._serve_tunnel_new_websocket_connection(tunnel_id)) 229 | task_in_list_until_done(new_connection_task, self._tunnels_connections_tasks) 230 | return message.get_valid_response() 231 | elif message.message_type == Messages.DELETE_TUNNEL: 232 | tunnel_id = message.data['tunnel_id'] 233 | force = message.data['force'] 234 | self._logger.debug('Closing tunnel `%s` %s', 235 | tunnel_id, 'forcefully' if force else 'gracefully') 236 | # Request to stop the tunnel came from remote, so obviously we stop it only on our side 237 | await self._tunnels[tunnel_id].stop(stop_remote_tunnel=False, force=force) 238 | return message.get_valid_response() 239 | return message.get_error_response(f'Unknown message_type {message.message_type}') 240 | 241 | async def close(self): 242 | """ 243 | Close the client sessions and control channel if its connected and await for the cleanup afterwards. 244 | """ 245 | self._logger.debug('Closing NETunnel client to `%s`', self.server_url) 246 | try: 247 | if self.connected: 248 | self._logger.debug('Closing channel to `%s`', self.server_url) 249 | await self._control_channel.close() 250 | await self._control_channel_task 251 | finally: 252 | await self._close_session() 253 | 254 | async def _close_session(self): 255 | if self.__client_session and not self.__client_session.closed: 256 | await self.__client_session.close() 257 | 258 | @connection_established 259 | async def open_tunnel_to_server(self, remote_address, remote_port, local_address='127.0.0.1', local_port=None, wait_ready=True) -> Tunnel: 260 | """ 261 | Creates a tunnel from here to the server (client-to-server tunnel). 262 | :param remote_address: address on the remote to communicate with 263 | :param remote_port: port on the remote to communicate with 264 | :param local_address: address to listen on locally. Defaults to localhost 265 | :param local_port: port to listen on locally. Defaults to random 266 | :param wait_ready: return tunnel only after it's ready to receive websockets 267 | :return: Tunnel object 268 | """ 269 | payload = {'exit_address': remote_address, 'exit_port': remote_port, 'reverse': False} 270 | data = await self._post(f'/channels/{self._control_channel.id}/tunnels', json=payload) 271 | tunnel_id = data['tunnel_id'] 272 | self._tunnels[tunnel_id] = InputTunnel(entrance_address=local_address, entrance_port=local_port, 273 | websocket_feeder_coro=self._input_tunnel_websocket_feeder(tunnel_id), 274 | exit_address=remote_address, exit_port=remote_port, 275 | logger=self._logger, stop_tunnel_on_remote_callback=lambda force: self._delete_tunnel_on_server(tunnel_id, force), 276 | stopped_callback=lambda: self._delete_tunnel_from_tunnels(tunnel_id)) 277 | await self._tunnels[tunnel_id].start() 278 | if wait_ready: 279 | await self._tunnels[tunnel_id].wait_new_websocket() 280 | return self._tunnels[tunnel_id] 281 | 282 | @connection_established 283 | async def open_tunnel_to_client(self, local_address, local_port, remote_address='127.0.0.1', remote_port=None) -> Tunnel: 284 | """ 285 | Creates a tunnel from the server to here. (server-to-client tunnel) 286 | :param local_address: local connection address. This is the exit address of the tunnel 287 | :param local_port: local connection port. This is the exit port of the tunnel 288 | :param remote_address: address on the server to listen. Defaults to localhost 289 | :param remote_port: port on the server to listen. Defaults to random. 290 | """ 291 | if not self.connected: 292 | raise RuntimeError('Cannot create tunnel to client without connecting first. Use `async with NETunnelClient` to avoid this exception') 293 | payload = {'entrance_address': remote_address, 'entrance_port': remote_port, 'reverse': True} 294 | async with self._tunnel_to_client_is_under_construction_lock: 295 | data = await self._post(f'/channels/{self._control_channel.id}/tunnels', json=payload) 296 | tunnel_id = data['tunnel_id'] 297 | self._tunnels[tunnel_id] = OutputTunnel(entrance_address=remote_address, entrance_port=remote_port, 298 | exit_address=local_address, exit_port=local_port, 299 | logger=self._logger, stop_tunnel_on_remote_callback=lambda force: self._delete_tunnel_on_server(tunnel_id, force), 300 | stopped_callback=lambda: self._delete_tunnel_from_tunnels(tunnel_id)) 301 | await self._tunnels[tunnel_id].start() 302 | return self._tunnels[tunnel_id] 303 | 304 | async def _delete_tunnel_on_server(self, tunnel_id, force): 305 | """ 306 | Used internally to close tunnels. This is used as a callback for Tunnel.close() so 307 | that we can request the server to close the tunnel on its side, and also 308 | as a cleanup for self._tunnels 309 | :param tunnel_id: Tunnel ID on the server to delete 310 | :param force: Whether to stop the tunnel forcefully or to wait for connections to finish 311 | """ 312 | self._logger.debug('Deleting tunnel `%s` of channel `%s` on `%s`', tunnel_id, self._control_channel.id, self.server_url) 313 | try: 314 | payload = {'force': force} 315 | await self._delete(f'/channels/{self._control_channel.id}/tunnels/{tunnel_id}', json=payload) 316 | except aiohttp.ClientError as err: 317 | self._logger.warning('Failed to delete remote tunnel `%s`: %s', tunnel_id, err) 318 | 319 | async def _delete_tunnel_from_tunnels(self, tunnel_id): 320 | del self._tunnels[tunnel_id] 321 | 322 | async def _input_tunnel_websocket_feeder(self, tunnel_id): 323 | """ 324 | As long as the tunnel is alive, feed it with websockets. 325 | We wait until the pending websocket is consumed and create another one to reduce latency 326 | on new connections. 327 | :param tunnel_id: ID of the tunnel to feed 328 | """ 329 | tunnel = self._tunnels[tunnel_id] 330 | while tunnel.running: 331 | await tunnel.wait_no_websocket() 332 | if not tunnel.running: 333 | break 334 | self._logger.debug('Generating new websocket for tunnel `%s` of channel `%s` on `%s`', tunnel_id, self._control_channel.id, self.server_url) 335 | new_connection_task = asyncio.ensure_future(self._serve_tunnel_new_websocket_connection(tunnel_id)) 336 | task_in_list_until_done(new_connection_task, self._tunnels_connections_tasks) 337 | try: 338 | await asyncio.wait_for(tunnel.wait_new_websocket(), timeout=10) 339 | except asyncio.TimeoutError: 340 | self._logger.warning('Failed to retrieve websocket on Tunnel `%s` after 10 seconds. Retrying..', tunnel_id) 341 | 342 | async def _serve_tunnel_new_websocket_connection(self, tunnel_id): 343 | """ 344 | Used internally. Creates a new websocket and feed it to the tunnel. 345 | Awaits until the websocket is used up before closing it. 346 | :param tunnel_id: ID of the tunnel to serve 347 | """ 348 | try: 349 | async with (await self._ws_connect(f'/channels/{self._control_channel.id}/tunnels/{tunnel_id}/connect')) as websocket: 350 | websocket: EventClientWebSocketResponse 351 | await self._tunnels[tunnel_id].feed_websocket(websocket) 352 | await websocket.wait_closed() 353 | except Exception: 354 | logging.exception(f"TUNNEL ERROR: Received exception during new websocket connection") 355 | sys.exit(1) 356 | 357 | @connection_established 358 | async def list_peers(self) -> List[Any]: 359 | """ 360 | Return a list of registered peers on the remote netunnel server. 361 | """ 362 | return await self._get('/peers') 363 | 364 | @connection_established 365 | async def register_peer(self, name, target_netunnel_url, auth_data=None): 366 | """ 367 | Register a new peer on the remote netunnel server. 368 | The peer is stored on the server and can be used to set static tunnels 369 | :param name: name of the remote peer. This will be used as an identifier of the peer 370 | :param target_netunnel_url: url to the remote netunnel server on the peer (e.g. https://my.netunnel.server/netunnel) 371 | :param auth_data: data used to authenticate with this peer 372 | """ 373 | peer_payload = { 374 | 'name': name, 375 | 'target_netunnel_url': target_netunnel_url, 376 | 'auth_data': auth_data or {} 377 | } 378 | return await self._post('/peers', json=peer_payload) 379 | 380 | @connection_established 381 | async def update_peer(self, peer_id, new_name=None, new_target_netunnel_url=None, auth_data=None): 382 | """ 383 | Update peer fields 384 | Only updating the target_netunnel_url will trigger restart to the static tunnels of this peer 385 | :param peer_id: The id of the peer to update 386 | :param new_name: new name for the peer 387 | :param new_target_netunnel_url: new url to use when making requests to this peer. (Will trigger all static tunnels to restart) 388 | :param auth_data: new data required for authentication 389 | """ 390 | update_peer_payload = {} 391 | if new_name: 392 | update_peer_payload['name'] = new_name 393 | if new_target_netunnel_url: 394 | update_peer_payload['target_netunnel_url'] = new_target_netunnel_url 395 | if auth_data: 396 | update_peer_payload['auth_data'] = auth_data 397 | return await self._post(f'/peers/{peer_id}', json=update_peer_payload) 398 | 399 | @connection_established 400 | async def delete_peer_by_id(self, peer_id): 401 | """ 402 | Delete a peer by an id 403 | :param peer_id: The id of the peer to delete 404 | """ 405 | return await self._delete(f'/peers/{peer_id}') 406 | 407 | @connection_established 408 | async def delete_peer_by_name(self, name): 409 | """ 410 | Delete a peer by it's name identifier 411 | :param name: name of the peer to delete 412 | """ 413 | peers = await self.list_peers() 414 | for peer in peers: 415 | if peer['name'] == name: 416 | return await self.delete_peer_by_id(peer['id']) 417 | raise NETunnelError(f'Peer `{name}` was not found') 418 | 419 | @connection_established 420 | async def get_peer_by_id(self, peer_id) -> Dict[str, Any]: 421 | """ 422 | Return peer settings by an id 423 | :param peer_id: The id of the peer to return 424 | """ 425 | return await self._get(f'/peers/{peer_id}') 426 | 427 | @connection_established 428 | async def get_peer_by_name(self, name) -> Dict[str, Any]: 429 | """ 430 | Return peer settings by it's name identifier 431 | :param name: name of the peer to return 432 | """ 433 | peers = await self._get(f'/peers?name={name}') 434 | if len(peers) == 0: 435 | raise NETunnelError(f'Peer `{name}` was not found') 436 | # peer name is unique 437 | return peers[0] 438 | 439 | @connection_established 440 | async def list_peer_static_tunnels(self, peer_name): 441 | """ 442 | Return a list of static tunnels to a peer 443 | :param peer_name: name of the peer on which the static tunnels are headed 444 | """ 445 | peer = await self.get_peer_by_name(peer_name) 446 | peer_id = peer['id'] 447 | return await self._get(f'/peers/{peer_id}/static_tunnels') 448 | 449 | @connection_established 450 | async def create_peer_static_tunnel(self, peer_name, tunnel_remote_port, tunnel_remote_address='127.0.0.1'): 451 | """ 452 | Creates a static tunnel to a peer 453 | :param peer_name: name of the peer on which to create the static tunnel 454 | :param tunnel_remote_port: exit port of the tunnel on the peer 455 | :param tunnel_remote_address: exit address of the tunnel on the peer 456 | """ 457 | peer = await self.get_peer_by_name(peer_name) 458 | peer_id = peer['id'] 459 | static_tunnel_payload = { 460 | 'tunnel_remote_address': tunnel_remote_address, 461 | 'tunnel_remote_port': tunnel_remote_port 462 | } 463 | return await self._post(f'/peers/{peer_id}/static_tunnels', json=static_tunnel_payload) 464 | 465 | @connection_established 466 | async def get_peer_static_tunnel(self, peer_name, static_tunnel_id): 467 | """ 468 | Return a static tunnel settings of a peer 469 | :param peer_name: name of the peer on which the static tunnel is set 470 | :param static_tunnel_id: id of the static tunnel on the peer to return 471 | """ 472 | peer = await self.get_peer_by_name(peer_name) 473 | peer_id = peer['id'] 474 | return await self._get(f'/peers/{peer_id}/static_tunnels/{static_tunnel_id}') 475 | 476 | @connection_established 477 | async def delete_peer_static_tunnel(self, peer_name, static_tunnel_id): 478 | """ 479 | Delete a static tunnel of a peer 480 | :param peer_name: name of the peer on which the static tunnel is set 481 | :param static_tunnel_id: id of the static tunnel on the peer to delete 482 | """ 483 | peer = await self.get_peer_by_name(peer_name) 484 | peer_id = peer['id'] 485 | return await self._delete(f'/peers/{peer_id}/static_tunnels/{static_tunnel_id}') 486 | 487 | @connection_established 488 | async def get_server_default_http_proxy(self): 489 | """ 490 | Return the current http proxy settings on the remote netunnel server 491 | """ 492 | return await self._get('/config/http-proxy') 493 | 494 | @connection_established 495 | async def set_server_default_http_proxy(self, proxy_url, username=None, password=None, test_url=None, check_proxy=True): 496 | """ 497 | Set the http proxy settings on the remote netunnel server 498 | :param proxy_url: HTTP proxy url to set 499 | :param username: Optional username to authenticate with the proxy. `password` must be given as well 500 | :param password: Optional password to authenticate with the proxy. `username` must be given as well 501 | :param test_url: A url that will be used to verify the proxy_url by making a GET request to it via the proxy 502 | :param check_proxy: Whether to validate that the proxy works. If set to False, the test_url will be ignored 503 | """ 504 | http_proxy_payload = { 505 | 'proxy_url': proxy_url, 506 | 'username': username, 507 | 'password': password, 508 | 'test_url': test_url, 509 | 'check_proxy': check_proxy 510 | } 511 | return await self._post('/config/http-proxy', json=http_proxy_payload) 512 | 513 | async def __aenter__(self): 514 | try: 515 | await self.connect() 516 | except Exception: 517 | await self._close_session() 518 | raise 519 | return self 520 | 521 | async def __aexit__(self, exc_type, exc_val, exc_tb): 522 | await self.close() 523 | 524 | 525 | def get_args(): 526 | parser = argparse.ArgumentParser(description='Run a netunnel client') 527 | parser.add_argument('-d', '--debug', help="Increase log verbosity to debug mode", dest='loglevel', action="store_const", 528 | const=logging.DEBUG, default=logging.INFO) 529 | parser.add_argument('-s', '--server', help="Address of the NETunnel Server to connect", default='http://127.0.0.1:4040') 530 | parser.add_argument('--local-address', help="Local address to listen/connect to", default='127.0.0.1') 531 | parser.add_argument('--local-port', help="Local port to listen/connect to. Defaults to random. Mandatory for reverse tunnel", type=int) 532 | parser.add_argument('--remote-address', help="Remote address to listen/connect to", default='127.0.0.1') 533 | parser.add_argument('--remote-port', help="Remote port to listen/connect to", type=int, default=22) 534 | parser.add_argument('-r', '--reverse', action="store_true", help="Reverse the tunnel. The remote socket will be the entrance while the local socket will be the exit") 535 | parser.add_argument('--auth-plugin', help="Plugin to use for authentication. (e.g. .)") 536 | parser.add_argument('--auth-data', default='{}', help="A json dump string of the data required by the authentication plugin") 537 | parser.add_argument('--no-ssl-validate', action="store_true", help="Do not validate the certificate of the server") 538 | parser.add_argument('--proxy-url', help="URL to a proxy server between the client and the server") 539 | parser.add_argument('--proxy-username', help="Optional username to use to authenticate with the proxy") 540 | parser.add_argument('--proxy-password', help="Optional password to use to authenticate with the proxy") 541 | return parser.parse_args() 542 | 543 | 544 | async def main(): 545 | args = get_args() 546 | logger = get_logger('netunnel_client', args.loglevel) 547 | auth_client = None 548 | if args.auth_plugin: 549 | module, class_name = args.auth_plugin.rsplit('.', maxsplit=1) 550 | auth_class = getattr(importlib.import_module(module), class_name) 551 | auth_client = auth_class(**json.loads(args.auth_data)) 552 | ssl = None 553 | if args.no_ssl_validate: 554 | ssl = False 555 | async with NETunnelClient(server_url=args.server, proxy_url=args.proxy_url, proxy_username=args.proxy_username, 556 | proxy_password=args.proxy_password, logger=logger, auth_client=auth_client, ssl=ssl) as client: 557 | if args.reverse: 558 | if args.local_port is None: 559 | raise ValueError("--local-port is required for reverse tunnel") 560 | print("Opening tunnel to the client...") 561 | tunnel = await client.open_tunnel_to_client(local_address=args.local_address, 562 | local_port=args.local_port, 563 | remote_address=args.remote_address, 564 | remote_port=args.remote_port) 565 | 566 | else: 567 | print("Opening tunnel to the server...") 568 | tunnel = await client.open_tunnel_to_server(local_address=args.local_address, 569 | local_port=args.local_port, 570 | remote_address=args.remote_address, 571 | remote_port=args.remote_port) 572 | print("Tunnel entrance socket: %s:%d" % tunnel.get_entrance_socket()) 573 | print("Tunnel exit socket: %s:%d" % tunnel.get_exit_socket()) 574 | await tunnel.join() 575 | 576 | 577 | if __name__ == "__main__": 578 | loop = asyncio.get_event_loop() 579 | try: 580 | loop.run_until_complete(main()) 581 | except KeyboardInterrupt: 582 | # KeyboardInterrupt raises outside the event loop (and pausing it) so we cancel the tasks and let them close cleanly 583 | tasks = asyncio_all_tasks(loop) 584 | for task in tasks: 585 | task.cancel() 586 | with contextlib.suppress(asyncio.CancelledError): 587 | loop.run_until_complete(asyncio.gather(*tasks)) 588 | -------------------------------------------------------------------------------- /netunnel/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claroty/netunnel/01d28f539c079796eec21afc6525bb771173d636/netunnel/common/__init__.py -------------------------------------------------------------------------------- /netunnel/common/auth.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING 2 | from aiohttp import web 3 | if TYPE_CHECKING: 4 | from ..client import NETunnelClient 5 | 6 | import abc 7 | 8 | 9 | class NETunnelServerAuth(abc.ABC): 10 | def __init__(self, *args, **kwargs): 11 | pass 12 | 13 | @abc.abstractmethod 14 | async def get_client_for_peer(self, *args, **kwargs) -> 'NETunnelClientAuth': 15 | """ 16 | Returns an instance of NETunnelClientAuth which will be used by a peer object to authenticate with it. 17 | The parameters of the subclass should be the expected parameters of the constructor of your NETunnelClientAuth class: 18 | for example: 19 | 20 | async def get_client_for_peer(self, username, password): 21 | return NETunnelClientAuth(username, password) 22 | """ 23 | raise NotImplementedError 24 | 25 | @abc.abstractmethod 26 | async def is_authenticated(self, request: web.Request) -> bool: 27 | """ 28 | Return True if the request is authorized and False if not. 29 | The headers of the request should include the return value of NETunnelClientAuth.get_authorized_headers() 30 | """ 31 | raise NotImplementedError 32 | 33 | @abc.abstractmethod 34 | async def authenticate(self, request: web.Request): 35 | """ 36 | Perform the server-side authentication. This method handling the route /authenticate on the NETunnel server. 37 | 38 | The NETunnelClientAuth.authenticate(client) should make a request to the /authenticate uri of the NETunnel server, 39 | and this method will be called as the route handler. 40 | """ 41 | raise NotImplementedError 42 | 43 | 44 | class NETunnelClientAuth(abc.ABC): 45 | def __init__(self, *args, **kwargs): 46 | pass 47 | 48 | @abc.abstractmethod 49 | async def authenticate(self, client: 'NETunnelClient', *args, **kwargs): 50 | """ 51 | Perform the client-side authentication. The NETunnel server is exposing the /authenticate uri which will 52 | be handled by NETunnelServerAuth.authenticate(request). 53 | Raises NETunnelAuthError if failed to authenticate. 54 | 55 | After calling this method, self.get_authorized_headers is expected to return valid headers which will 56 | be authorized by the remote. 57 | """ 58 | raise NotImplementedError 59 | 60 | @abc.abstractmethod 61 | async def is_authenticated(self) -> bool: 62 | """ 63 | Return True if we are authenticated with the remote. 64 | This should return False if the tokens returned by the server are expired. 65 | """ 66 | raise NotImplementedError 67 | 68 | @abc.abstractmethod 69 | async def get_authorized_headers(self) -> dict: 70 | """ 71 | Return headers which can be used to make authorized requests to the remote server. 72 | Raises NETunnelNotAuthenticatedError if we need to authenticate first/again 73 | """ 74 | raise NotImplementedError 75 | 76 | @abc.abstractmethod 77 | def dump_object(self) -> dict: 78 | """ 79 | This method is not async since we're using marshmallow to parse this data and it does not support asyncio yet. 80 | Return a dump of this NETunnelClientAuth instance's data so it can be stored in the configuration 81 | and reloaded during startup. This is used by NETunnelServer when handling peer objects. 82 | The return value should be a dictionary that can be given to NETunnelServerAuth.get_client_for_peer() and 83 | will generate a valid NETunnelClientAuth object. 84 | """ 85 | raise NotImplementedError 86 | 87 | 88 | class ServerNoAuth(NETunnelServerAuth): 89 | async def get_client_for_peer(self, *args, **kwargs) -> 'NETunnelClientAuth': 90 | return ClientNoAuth(*args, **kwargs) 91 | 92 | async def is_authenticated(self, request: web.Request): 93 | return True 94 | 95 | async def authenticate(self, request: web.Request): 96 | pass 97 | 98 | 99 | class ClientNoAuth(NETunnelClientAuth): 100 | async def authenticate(self, client: 'NETunnelClient', *args, **kwargs): 101 | pass 102 | 103 | async def is_authenticated(self): 104 | return True 105 | 106 | async def get_authorized_headers(self): 107 | return {} 108 | 109 | def dump_object(self): 110 | return {} 111 | -------------------------------------------------------------------------------- /netunnel/common/channel.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Any, Callable, Awaitable 3 | from collections import defaultdict 4 | from .const import WebsocketType 5 | from .utils import EventItem 6 | 7 | import bson # This is provided by pymongo 8 | import aiohttp 9 | import logging 10 | import asyncio 11 | 12 | 13 | class Messages(Enum): 14 | TUNNEL_SERVER_TO_CLIENT_NEW_CONNECTION = 0 # Request from server to client to open a new websocket since there is a new connection to the tunnel. 15 | DELETE_TUNNEL = 1 # Request from the server to client to delete a tunnel on the client side 16 | 17 | ##### Serialize and Deserialize Messages bson support ##### 18 | 19 | MESSAGES_TYPES_KEY = "__Messages__" 20 | def _encode_message_data(data: dict): 21 | """ 22 | Recursively encoding the data to support Messages objects 23 | """ 24 | result = {} 25 | for key, value in data.items(): 26 | if isinstance(value, Messages): 27 | result[key] = {MESSAGES_TYPES_KEY: value.name} 28 | elif isinstance(value, dict): 29 | result[key] = _encode_message_data(value) 30 | else: 31 | result[key] = value 32 | return result 33 | 34 | def _decode_message_data(data: dict): 35 | """ 36 | Recursively decoding the data to support Messages objects 37 | """ 38 | result = {} 39 | for key, value in data.items(): 40 | if isinstance(value, dict): 41 | if MESSAGES_TYPES_KEY in value: 42 | result[key] = getattr(Messages, value[MESSAGES_TYPES_KEY]) 43 | else: 44 | result[key] = _decode_message_data(value) 45 | else: 46 | result[key] = value 47 | return result 48 | 49 | def message_bson_dumps(data: dict) -> bytes: 50 | """ 51 | Return a bytes dump of data using bson.encode with support to Message objects 52 | """ 53 | encoded_data = _encode_message_data(data) 54 | return bson.encode(encoded_data) 55 | 56 | def message_bson_loader(data: bytes) -> dict: 57 | """ 58 | Return a dictionary of data using bson.loads with support to Message objects 59 | """ 60 | data = bson.decode(data) 61 | return _decode_message_data(data) 62 | 63 | ################################################################ 64 | 65 | 66 | class ChannelError(Exception): 67 | pass 68 | 69 | 70 | class ChannelMessage: 71 | def __init__(self, message_type: Messages, data: dict = None, _identifier: int = None): 72 | """ 73 | Represent a request over the control channel. Each request has a type, and optionally additional data. 74 | The identifier is used to match ChannelMessage with a ChannelResponse as request <-> response relationship 75 | :param message_type: Messages object 76 | :param data: data to send. Must be serializable to json. 77 | :param _identifier: This is used internally 78 | """ 79 | self.message_type = message_type 80 | self.data = data or {} 81 | self._identifier = _identifier 82 | self.channel: Channel = None 83 | 84 | def get_error_response(self, err_message, data: Any = None) -> 'ChannelResponse': 85 | """ 86 | Generates an ChannelResponse for this message with error message 87 | """ 88 | if self._identifier is None: 89 | raise ChannelError('Missing _identifier to generate a response. This is probably a bug') 90 | return ChannelResponse(message_identifier=self._identifier, data=data, error=err_message) 91 | 92 | def get_valid_response(self, data: Any = None) -> 'ChannelResponse': 93 | """ 94 | Generates and ChannelResponse for this message the the given data 95 | """ 96 | if self._identifier is None: 97 | raise ChannelError('Missing _identifier to generate a response. This is probably a bug') 98 | return ChannelResponse(message_identifier=self._identifier, data=data) 99 | 100 | def to_dict(self): 101 | return { 102 | '_type': 'message', 103 | '_identifier': self._identifier, 104 | 'message_type': self.message_type, 105 | 'data': self.data 106 | } 107 | 108 | @staticmethod 109 | def from_dict(message) -> 'ChannelMessage': 110 | return ChannelMessage(**message) 111 | 112 | 113 | class ChannelResponse: 114 | def __init__(self, message_identifier: int, data: dict = None, error: Any = None): 115 | """ 116 | Represent a response over the control channel. A response is always associated with an ChannelMessage identifier. 117 | Do not initialize it directly, use ChannelMessage.get_error_response and ChannelMessage.get_valid_response instead 118 | """ 119 | self._message_identifier = message_identifier 120 | self.data = data or {} 121 | self.error = error 122 | 123 | def is_ok(self): 124 | return self.error is None 125 | 126 | def to_dict(self): 127 | response = { 128 | '_type': 'response', 129 | '_identifier': self._message_identifier, 130 | 'data': self.data 131 | } 132 | if self.error is not None: 133 | response['error'] = self.error 134 | return response 135 | 136 | @staticmethod 137 | def from_dict(response) -> 'ChannelResponse': 138 | message_identifier = response.pop('_identifier') 139 | return ChannelResponse(message_identifier=message_identifier, **response) 140 | 141 | 142 | class Channel: 143 | def __init__(self, websocket: WebsocketType, channel_id: int, handler: 'Callable[[ChannelMessage], Awaitable[ChannelResponse]]' = None, logger: logging.Logger = None): 144 | """ 145 | Creates a link between 2 peers on top of a websocket. 146 | Websockets communication is bi-directional, each message is independent so we can't tell which message is a request and 147 | which is a response. This creates a protocol on top of a websocket to solve this problem. 148 | :param websocket: An established websocket between 2 servers. Channel is responsible for closing it 149 | :param handler: an awaitable callable to handle ChannelMessages. If left as None, we won't handle incoming messages, but we'll still be able to send 150 | :param channel_id: a unique id for that Channel. 151 | :param logger: additional logger 152 | """ 153 | self._websocket = websocket 154 | self._handler = handler 155 | self._id = channel_id 156 | self._logger = logger or logging.getLogger(__name__) 157 | self._message_id_counter = 0 158 | # A dictionary of {message_id: Event}. This dict register message_ids which awaits for response. 159 | # When a new ChannelResponse arrives, if it has a message_id in this dict, we use the event to notify the subscriber. 160 | self._subscribed_messages = defaultdict(EventItem) 161 | # This lock is used when registering new message on self._subscribed_messages to prevent duplications of 162 | # message_ids in case multiple messages received at once. 163 | self._subscribe_messages_lock = asyncio.Lock() 164 | 165 | @property 166 | def id(self): 167 | return self._id 168 | 169 | @property 170 | def running(self): 171 | return not self._websocket.closed 172 | 173 | async def send_message(self, message: 'ChannelMessage', timeout=None, raise_if_error=False) -> 'ChannelResponse': 174 | """ 175 | Send an ChannelMessage to the remote peer and wait for a response. 176 | """ 177 | if self._websocket.closed: 178 | raise ChannelError('Channel is closed') 179 | # Subscribe the message so we'll be notified when a response come back 180 | async with self._subscribe_messages_lock: 181 | self._message_id_counter += 1 182 | message_id = self._message_id_counter 183 | self._subscribed_messages[message_id].clear() 184 | 185 | # Mark the message_id before sending it 186 | message_payload = message.to_dict() 187 | message_payload['_identifier'] = message_id 188 | try: 189 | await self._websocket.send_bytes(message_bson_dumps(message_payload)) 190 | # wait for the response to arrive 191 | wait_coro = self._subscribed_messages[message_id].wait() 192 | if timeout: 193 | wait_coro = asyncio.wait_for(wait_coro, timeout=timeout) 194 | response: ChannelResponse = await wait_coro 195 | finally: 196 | del self._subscribed_messages[message_id] 197 | if raise_if_error: 198 | if not response.is_ok(): 199 | raise ChannelError(response.error) 200 | return response 201 | 202 | async def serve(self): 203 | """ 204 | Start listening for incoming traffic from the websocket. 205 | """ 206 | async for msg in self._websocket: 207 | msg: aiohttp.WSMessage 208 | if msg.type != aiohttp.WSMsgType.BINARY: 209 | # Websocket related traffic. We ignore it 210 | self._logger.debug('Channel `%s` Received message of type `%s` with data `%s`. Ignoring', self._id, msg.type, msg.data) 211 | continue 212 | message = message_bson_loader(msg.data) 213 | message_type = message.pop('_type') 214 | if message_type == 'message': 215 | # This is a message. We'll handle it and return a response. If no handler, a valid response will return 216 | message = ChannelMessage.from_dict(message) 217 | if self._handler: 218 | # Inject Channel to the ChannelMessage so that the handler can access it 219 | message.channel = self 220 | try: 221 | response = await self._handler(message) 222 | except Exception as err: 223 | self._logger.exception('Got exception while handling channel message of type `%s` for channel `%s`:', message.message_type.name, self.id) 224 | response = message.get_error_response(err_message=str(err)) 225 | else: 226 | response = message.get_valid_response() 227 | await self._websocket.send_bytes(message_bson_dumps(response.to_dict())) 228 | elif message_type == 'response': 229 | message_identifier = message['_identifier'] 230 | if message_identifier in self._subscribed_messages: 231 | # We'll pass the message to the subscriber 232 | response = ChannelResponse.from_dict(message) 233 | self._subscribed_messages[message_identifier].set(response) 234 | else: 235 | self._logger.warning('No subscriber for the message: %s', message) 236 | else: 237 | self._logger.warning('Unknown message type: %s', message_type) 238 | if self._websocket.exception(): 239 | raise self._websocket.exception() 240 | try: 241 | close_reason = aiohttp.WSCloseCode(self._websocket.close_code) 242 | if close_reason is not aiohttp.WSCloseCode.OK: 243 | self._logger.debug('Channel `%s` closed unexpectedly with close code: %s(%s)', self.id, close_reason.value, close_reason.name) 244 | except ValueError: 245 | self._logger.debug('Channel `%s` closed unexpectedly with unknown close code: %s', self.id, self._websocket.close_code) 246 | 247 | async def close(self): 248 | """ 249 | Closes the websocket, and therefore the channel itself. 250 | """ 251 | await self._websocket.close() -------------------------------------------------------------------------------- /netunnel/common/const.py: -------------------------------------------------------------------------------- 1 | from typing import Union, Callable, Coroutine, Any, Dict, List 2 | from .utils import EventClientWebSocketResponse, EventWebSocketResponse 3 | from aiohttp import ClientWebSocketResponse 4 | from aiohttp.web import WebSocketResponse 5 | 6 | 7 | # Types 8 | WebsocketType = Union[EventClientWebSocketResponse, EventWebSocketResponse, WebSocketResponse, ClientWebSocketResponse] 9 | CallableOfCoroutine = Callable[..., Coroutine] 10 | TunnelId = int 11 | ChannelId = int 12 | 13 | 14 | # Constants 15 | CLIENT_CHANNEL_HEARTBEAT = 20 16 | SERVER_MAXIMUM_CLIENT_CHANNELS = 1000 17 | SERVER_MAXIMUM_CLIENT_CHANNELS_ERROR = 'Reached maximum allowed number of control_channels' 18 | MIN_STATIC_TUNNEL_LOCAL_PORT = 20000 19 | MAX_STATIC_TUNNEL_LOCAL_PORT = 21000 -------------------------------------------------------------------------------- /netunnel/common/exceptions.py: -------------------------------------------------------------------------------- 1 | import http 2 | 3 | 4 | class NETunnelError(Exception): 5 | pass 6 | 7 | 8 | class NETunnelNotConnectedError(NETunnelError): 9 | pass 10 | 11 | 12 | class NETunnelNotAuthenticatedError(NETunnelError): 13 | pass 14 | 15 | 16 | class NETunnelResponseError(NETunnelError): 17 | pass 18 | 19 | 20 | class NETunnelDestinationNotAllowed(NETunnelError): 21 | pass 22 | 23 | 24 | class NETunnelInvalidProxy(NETunnelError): 25 | pass 26 | 27 | 28 | class NETunnelServerError(NETunnelError): 29 | """ 30 | Used by the server to raise exception and convert them to json responses with a specific http status code bound to them 31 | """ 32 | status_code = http.HTTPStatus.INTERNAL_SERVER_ERROR 33 | 34 | 35 | class NETunnelServerNotFound(NETunnelServerError): 36 | status_code = http.HTTPStatus.NOT_FOUND 37 | 38 | 39 | class NETunnelServerBadRequest(NETunnelServerError): 40 | status_code = http.HTTPStatus.BAD_REQUEST 41 | 42 | 43 | class NETunnelAuthError(NETunnelServerError): 44 | status_code = http.HTTPStatus.FORBIDDEN 45 | -------------------------------------------------------------------------------- /netunnel/common/security.py: -------------------------------------------------------------------------------- 1 | """ 2 | Security utilities for netunnel 3 | """ 4 | from cryptography.fernet import Fernet 5 | 6 | import click 7 | 8 | 9 | @click.group(help="Toolkit for security utilities on netunnel") 10 | def main(): 11 | pass 12 | 13 | 14 | @main.command(name='generate-key') 15 | def _generate_key(): 16 | """Generates a key that can be used on NETunnelServer 17 | """ 18 | print(Encryptor.generate_key().decode()) 19 | 20 | 21 | @main.command(name='encrypt') 22 | @click.argument('secret-key') 23 | @click.argument('data') 24 | def _encrypt(secret_key, data): 25 | """Encrypt data using the secret_key""" 26 | print(Encryptor(secret_key).encrypt_string(data)) 27 | 28 | 29 | @main.command(name='decrypt') 30 | @click.argument('secret-key') 31 | @click.argument('data') 32 | def _decrypt(secret_key, data): 33 | """Decrypt data using the secret_key""" 34 | print(Encryptor(secret_key).decrypt_string(data)) 35 | 36 | 37 | class Encryptor: 38 | def __init__(self, key): 39 | self._fernet = Fernet(key) 40 | 41 | @staticmethod 42 | def generate_key(): 43 | return Fernet.generate_key() 44 | 45 | def encrypt(self, data: bytes): 46 | return self._fernet.encrypt(data) 47 | 48 | def decrypt(self, data: bytes): 49 | return self._fernet.decrypt(data) 50 | 51 | def encrypt_string(self, data: str): 52 | return self.encrypt(data.encode()).decode() 53 | 54 | def decrypt_string(self, data: str): 55 | return self.decrypt(data.encode()).decode() 56 | 57 | 58 | if __name__ == '__main__': 59 | main() -------------------------------------------------------------------------------- /netunnel/common/tunnel.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities for creating tunnels on top of websockets 3 | The 2 classes you'll need to use are: 4 | InputTunnel -> Listen on a local address and port. For each connection, it takes a new websocket and forward the data on top of it 5 | OutputTunnel -> Listen on a websocket for incoming messages and forward all data to a designated local address and port 6 | 7 | Diagram for how traffic looks like inside the tunnel: 8 | 9 | Connection1 -> Websocket1 -> -> 10 | Connection2 -> InputTunnel -> Websocket2 -> OutputTunnel -> Service 11 | Connection3 -> Websocket3 -> -> 12 | 13 | traffic goes bi-directional to allow `Service` to respond 14 | 15 | For 'reverse tunnel', you just need to create them in reverse. 16 | """ 17 | import asyncio 18 | import logging 19 | import subprocess 20 | import contextlib 21 | import abc 22 | import aiohttp 23 | import aiohttp.web 24 | 25 | from typing import List, Coroutine, Callable 26 | from asyncio.base_events import Server 27 | from .const import WebsocketType, CallableOfCoroutine 28 | from .utils import get_logger, EventQueue, object_in_list, task_in_list_until_done, asyncio_current_task 29 | 30 | CONNECTION_READ_CHUNK_SIZE = 65535 31 | # This message is used when a connection arrived on InputTunnel to send to the OutputTunnel. 32 | # This is utilized by OutputTunnel to avoid establishing connections with websockets that aren't 33 | # needed yet by InputTunnel and just exists to reduce latency for new connections. 34 | WEBSOCKET_CONNECTION_START_MESSAGE = 'pasten' 35 | 36 | 37 | class _ConnectionHandler: 38 | def __init__(self, websocket: WebsocketType, connection_reader: asyncio.StreamReader, connection_writer: asyncio.StreamWriter, logger: logging.Logger): 39 | """ 40 | Manage a connection. This is used internally, do not initialize this. 41 | Used by both InputTunnel for new connections to the tunnel, and OutputTunnel for connections established to a service. 42 | Create read and write pipes between the websocket and the connection: websocket <-> connection 43 | :param websocket: websocket to receive and send data in and from the connection 44 | :param connection_reader: StreamReader object used to read data and send it to the websocket 45 | :param connection_writer: StreamWriter object used to write data that came from the websocket 46 | """ 47 | self._websocket = websocket 48 | self._connection_reader = connection_reader 49 | self._connection_writer = connection_writer 50 | self._websocket_to_connection_task: asyncio.Task = None 51 | self._connection_to_websocket_task: asyncio.Task = None 52 | self._logger = logger 53 | # The following event is triggered when either the websocket or the connection closed or reached end-of-file 54 | self._eof_event = asyncio.Event() 55 | # Replace with self._connection_writer.wait_closed in Python3.7 56 | self._shutdown_event = asyncio.Event() 57 | 58 | async def health_check(self): 59 | """ 60 | Check that both the websocket and the connection can still stream data 61 | """ 62 | if self._websocket_to_connection_task and self._connection_to_websocket_task: 63 | return not self._websocket_to_connection_task.done() and not self._connection_to_websocket_task.done() and \ 64 | not self._websocket.closed and not self._connection_reader.at_eof() 65 | return False 66 | 67 | async def _websocket_to_connection(self): 68 | """ 69 | This method stream data that comes from the websocket to the connection_writer. 70 | If either there is not data left to send (the websocket closed) or there was an error in the communication, 71 | we signal the self._eof_event. 72 | """ 73 | try: 74 | async for msg in self._websocket: 75 | if msg.type == aiohttp.WSMsgType.BINARY: 76 | self._connection_writer.write(msg.data) 77 | try: 78 | await self._connection_writer.drain() 79 | except ConnectionError: 80 | self._logger.debug('Connection closed. cannot send data from websocket. Closing websocket-to-connection pipe') 81 | break 82 | elif msg.type == aiohttp.WSMsgType.ERROR: 83 | self._logger.exception("Websocket disconnected unexpectedly", exc_info=self._websocket.exception()) 84 | break 85 | else: 86 | self._logger.warning('unexpected message received from server. Type: %s, Data: %s', msg.type.name, msg.data) 87 | finally: 88 | self._eof_event.set() 89 | 90 | async def _connection_to_websocket(self): 91 | """ 92 | This method stream data that comes from the connection_reader to the websocket. 93 | If there is no more data to read or there was exception in either reading or writing to the websocket, 94 | we signal the self._eof_event. 95 | """ 96 | try: 97 | while not self._connection_reader.at_eof() and not self._websocket.closed: 98 | data = await self._connection_reader.read(CONNECTION_READ_CHUNK_SIZE) 99 | try: 100 | await self._websocket.send_bytes(data) 101 | except ConnectionError: 102 | self._logger.debug('Websocket closed. cannot send data from connection. Closing connection-to-websocket pipe') 103 | break 104 | finally: 105 | self._eof_event.set() 106 | 107 | async def _start(self): 108 | """ 109 | Start read and write pipes 110 | This should not be used directly, use self.run_until_eof() instead. 111 | """ 112 | self._shutdown_event.clear() 113 | self._websocket_to_connection_task = asyncio.ensure_future(self._websocket_to_connection()) 114 | self._connection_to_websocket_task = asyncio.ensure_future(self._connection_to_websocket()) 115 | 116 | async def run_until_eof(self): 117 | """ 118 | Start working and wait until either websocket's closed (remote at eof) or until connection at eof 119 | """ 120 | await self._start() 121 | await self._eof_event.wait() 122 | await self.stop() 123 | 124 | async def stop(self): 125 | """ 126 | Stop read and write pipes and close the connection 127 | """ 128 | for task in (self._websocket_to_connection_task, self._connection_to_websocket_task): 129 | if task: 130 | task.cancel() 131 | try: 132 | await task 133 | except asyncio.CancelledError: 134 | pass 135 | except Exception: 136 | self._logger.exception('An error occurred while awaiting connection pipe to be closed:') 137 | self._connection_writer.close() 138 | self._shutdown_event.set() 139 | 140 | async def join(self): 141 | with contextlib.suppress(asyncio.CancelledError): 142 | await self._shutdown_event.wait() 143 | 144 | 145 | class Tunnel(abc.ABC): 146 | def __init__(self, entrance_address=None, entrance_port=None, exit_address=None, exit_port=None, 147 | logger: logging.Logger = None, stop_tunnel_on_remote_callback: Callable[[bool], Coroutine] = None, 148 | stopped_callback: CallableOfCoroutine = None): 149 | """ 150 | This is an interface to manage tunnels. 151 | A tunnel is an InputTunnel on one side and OutputTunnel on the other side, and reverse tunnel is just the same but 152 | in reverse. We want the user to have the same interface for when using both normal tunnels and reverse tunnels, 153 | therefore this class will be used to simplify his usage, and also to share common utilities between InputTunnel and OutputTunnel. 154 | :param entrance_address: Tunnel's entrance address 155 | :param entrance_port: Tunnel's entrance port 156 | :param exit_address: Tunnel's exit address 157 | :param exit_port: Tunnel's exit port 158 | :param logger: A logging.Logger object 159 | :param stop_tunnel_on_remote_callback: callback for when we need to stop the remote tunnel. Accept parameter `force` 160 | :param stopped_callback: callback for after the tunnel is stopped 161 | """ 162 | self._entrance_address = entrance_address 163 | self._entrance_port = entrance_port 164 | self._exit_address = exit_address 165 | self._exit_port = exit_port 166 | self._connections: List[_ConnectionHandler] = [] 167 | self._logger = logger or get_logger(__name__) 168 | self._stop_tunnel_on_remote_callback = stop_tunnel_on_remote_callback 169 | self._stopped_callback = stopped_callback 170 | # This queue is responsible to hold new websockets ready to be used for incoming connections. 171 | # Each item on the queue is a tuple of (websocket, websocket_done_event): 172 | # websocket - this websocket is connected to the tunnel's other side 173 | # websocket_done_event - this event is triggered by the tunnel when the websocket is no needed anymore because eof reached 174 | # This helps us to separate the tunnel logic from the client/server logic and also reduce latency in creating websockets. 175 | self._websocket_queue = EventQueue(maxsize=1) 176 | self._running = False 177 | self._shutdown = asyncio.Event() 178 | 179 | def get_entrance_socket(self): 180 | return self._entrance_address, self._entrance_port 181 | 182 | def get_exit_socket(self): 183 | return self._exit_address, self._exit_port 184 | 185 | @property 186 | def running(self): 187 | return self._running 188 | 189 | async def health_check(self): 190 | """ 191 | Perform a health check to the tunnel. 192 | Make sure that the tunnel is running and all the existing connections are working. 193 | see InputTunnel and OutputTunnel for their individual checks as well. 194 | """ 195 | return self.running and all([await connection.health_check() for connection in self._connections]) 196 | 197 | async def start(self): 198 | self._running = True 199 | self._shutdown.clear() 200 | 201 | async def stop(self, stop_remote_tunnel=True, force=False): 202 | """ 203 | Stop the tunnel by closing the connections and the queued websocket. 204 | We first initiate the queued websocket to be closed in the background so it will start listening for messages, 205 | and then we ask the remote to also stop so it will start listening to his queued websocket and receive our close message. 206 | :param stop_remote_tunnel: Whether to call the callback for stopping tunnel on remote. Remote won't be using it to prevent a loop. 207 | :param force: Whether to close connections forcefully. Otherwise, await for the connections to be closed normally 208 | """ 209 | if self._running is False: 210 | return 211 | self._running = False 212 | websocket: WebsocketType = None 213 | try: 214 | while self._connections: 215 | connection = self._connections.pop() 216 | if force: 217 | await connection.stop() 218 | else: 219 | await connection.join() 220 | with contextlib.suppress(asyncio.QueueEmpty): 221 | websocket = await self._get_websocket(nowait=True) 222 | websocket.close_nowait() 223 | if stop_remote_tunnel and self._stop_tunnel_on_remote_callback: 224 | await self._stop_tunnel_on_remote_callback(force=force) 225 | except Exception: 226 | self._logger.exception('An error occurred while stopping tunnel:') 227 | raise 228 | finally: 229 | # stopping tunnel on remote might throw a network related exception but tunnel is still considered close and 230 | # we still need to await for the yet-to-be-closed websocket to be closed cleanly (probably by timeout if there were an exception) 231 | try: 232 | if websocket is not None: 233 | await websocket.wait_closed() 234 | except aiohttp.WebSocketError: 235 | self._logger.exception('An error occurred while closing an unused websocket:') 236 | self._shutdown.set() 237 | if self._stopped_callback: 238 | await self._stopped_callback() 239 | 240 | async def join(self): 241 | """ 242 | Blocks until the tunnel is closed 243 | """ 244 | await self._shutdown.wait() 245 | 246 | async def feed_websocket(self, websocket: WebsocketType): 247 | """ 248 | Put a websocket in the self._websocket_queue 249 | """ 250 | if not self.running: 251 | raise RuntimeError('Tunnel is not running') 252 | await self._websocket_queue.put(websocket) 253 | 254 | async def wait_new_websocket(self): 255 | """ 256 | Blocks until there is a new websocket on the queue 257 | """ 258 | await self._websocket_queue.join_no_empty() 259 | 260 | async def wait_no_websocket(self): 261 | """ 262 | Blocks until there is no websocket on the queue 263 | """ 264 | await self._websocket_queue.join() 265 | 266 | async def _get_websocket(self, nowait=False) -> WebsocketType: 267 | """ 268 | Return an item from self._websocket_queue, and also mark the task as done. 269 | Marking done is for the client/server to already start working on the next websocket so we can 270 | reduce latency for new connections. 271 | :param nowait: Call get_nowait instead of get. This won't block but raise if no websocket on the queue 272 | """ 273 | if nowait: 274 | websocket = self._websocket_queue.get_nowait() 275 | else: 276 | websocket = await self._websocket_queue.get() 277 | self._websocket_queue.task_done() 278 | return websocket 279 | 280 | 281 | class InputTunnel(Tunnel): 282 | def __init__(self, entrance_address, entrance_port, websocket_feeder_coro: Coroutine, exit_address=None, exit_port=None, 283 | logger: logging.Logger = None, stop_tunnel_on_remote_callback: Callable[[bool], Coroutine] = None, stopped_callback: CallableOfCoroutine = None): 284 | """ 285 | Creates a local tcp server and listen on _entrance_address and _entrance_port 286 | For each connection, it creates a websocket to the remote server and start streaming the data on top of it and vice-versa 287 | This is basically the tunnel's entrance while OutputTunnel is the exit 288 | :param websocket_feeder_coro: coroutine which responsible to feed this InputTunnel instance with websockets. 289 | it makes the code simpler since InputTunnel can hook it to the start and close methods 290 | """ 291 | super().__init__(entrance_address, entrance_port, exit_address, exit_port, logger, stop_tunnel_on_remote_callback, stopped_callback) 292 | self._websocket_feeder_coro: Coroutine = websocket_feeder_coro 293 | self._websocket_feeder_task: asyncio.Future = None 294 | # A list of the active tasks which serves connections to self._server. Used for cleanup when stopping the tunnel 295 | self._active_handle_connection_tasks = [] 296 | self._server: Server = None 297 | 298 | async def health_check(self): 299 | # According to asyncio doc, Server.sockets is None if the server is closed 300 | # On python3.7+ replace self._server.sockets with self._server.is_serving() 301 | return await super().health_check() and self._server.sockets and not self._websocket_feeder_task.done() 302 | 303 | def get_local_socket(self): 304 | return self.get_entrance_socket() 305 | 306 | def get_remote_socket(self): 307 | return self.get_exit_socket() 308 | 309 | async def start(self): 310 | """ 311 | Start the server to listen and handle incoming connections from the local address and port. 312 | This also starts the websocket_feeder so we can start to receive new websockets 313 | """ 314 | self._server = await asyncio.start_server(self._handle_connection, host=self._entrance_address, port=self._entrance_port) 315 | self._entrance_address, self._entrance_port = self._server.sockets[0].getsockname() 316 | self._logger.debug('Start listening on %s:%s', self._entrance_address, self._entrance_port) 317 | await super().start() 318 | self._websocket_feeder_task = asyncio.ensure_future(self._websocket_feeder_coro) 319 | 320 | async def stop(self, stop_remote_tunnel=True, force=False): 321 | if self._running is False: 322 | return 323 | try: 324 | self._logger.debug('Stop listening on %s:%s', self._entrance_address, self._entrance_port) 325 | self._server.close() 326 | await self._server.wait_closed() 327 | if force: 328 | self._logger.debug('Stopping any remaining connections on %s:%s', self._entrance_address, self._entrance_port) 329 | for task in self._active_handle_connection_tasks: 330 | task.cancel() 331 | self._logger.debug('Stopping websocket feeder on %s:%s', self._entrance_address, self._entrance_port) 332 | self._websocket_feeder_task.cancel() 333 | with contextlib.suppress(asyncio.CancelledError): 334 | await self._websocket_feeder_task 335 | except Exception: 336 | self._logger.exception('An error occurred while stopping tunnel entrance handler:') 337 | finally: 338 | await super().stop(stop_remote_tunnel=stop_remote_tunnel, force=force) 339 | # Any exception will be handled or at least logged in the task, yet we await it to satisfy asyncio. 340 | # We await only after super().stop(...) since we need self.running to be False. 341 | with contextlib.suppress(Exception): 342 | await asyncio.gather(*self._active_handle_connection_tasks) 343 | 344 | @staticmethod 345 | def _get_connection_identity_display_name(connection_writer: asyncio.StreamWriter) -> str: 346 | """ 347 | Try to query the connection_writer to identify the connection source and return it. 348 | If we failed to identify the source, we return "Unknown" 349 | """ 350 | client_process: subprocess.Popen = connection_writer.get_extra_info('subprocess') 351 | client_peername = connection_writer.get_extra_info('peername') 352 | if client_process is not None: 353 | return f'PID `{client_process.pid}`' 354 | elif client_peername is not None: 355 | return f'Socket `{client_peername}`' 356 | return 'Unknown' 357 | 358 | async def _handle_connection(self, client_reader: asyncio.StreamReader, client_writer: asyncio.StreamWriter): 359 | """ 360 | Await a websocket to dedicate it for that connection and a _ConnectionHandler to manage communications between them 361 | """ 362 | current_task = asyncio_current_task() 363 | with object_in_list(current_task, self._active_handle_connection_tasks): 364 | connection_identity_display_name = self._get_connection_identity_display_name(client_writer) 365 | self._logger.debug('Serving new connection from %s', connection_identity_display_name) 366 | websocket = None 367 | try: 368 | while websocket is None: 369 | try: 370 | websocket = await asyncio.wait_for(self._get_websocket(), timeout=5) 371 | # We notify the remote that this websocket is about to be used 372 | await websocket.send_str(WEBSOCKET_CONNECTION_START_MESSAGE) 373 | except asyncio.TimeoutError: 374 | # We stop waiting for websocket every few seconds to make sure the tunnel still works 375 | # and the client is still await. 376 | if not self.running or client_writer.transport.is_closing(): 377 | return 378 | except ConnectionError: 379 | # This might occurred when a queued websocket has been pending long enough, which means it got closed by 380 | # the remote / proxy and we'll know about it only on the first message. 381 | # The queue will be filled up with a fresh websocket so we try again. 382 | websocket = None 383 | continue 384 | connection = _ConnectionHandler(websocket=websocket, connection_reader=client_reader, connection_writer=client_writer, logger=self._logger) 385 | with object_in_list(connection, self._connections): 386 | self._logger.debug('Start serving %s', connection_identity_display_name) 387 | await connection.run_until_eof() 388 | except asyncio.CancelledError: 389 | self._logger.debug('Abort handling connection to %s', connection_identity_display_name) 390 | except Exception: 391 | self._logger.exception('Failed to serve new connection. Closing connection') 392 | raise 393 | finally: 394 | self._logger.debug('Close connection from %s', connection_identity_display_name) 395 | client_writer.close() 396 | if websocket: 397 | await websocket.close() 398 | 399 | 400 | class OutputTunnel(Tunnel): 401 | def __init__(self, exit_address, exit_port, entrance_address=None, entrance_port=None, logger: logging.Logger = None, 402 | stop_tunnel_on_remote_callback: Callable[[bool], Coroutine] = None, stopped_callback: CallableOfCoroutine = None): 403 | """ 404 | This is basically the tunnel's exit while InputTunnel is the entrance. See Tunnel class doc or this modules 405 | doc above for further understanding. 406 | """ 407 | super().__init__(entrance_address, entrance_port, exit_address, exit_port, logger, stop_tunnel_on_remote_callback, stopped_callback) 408 | self._running_task: asyncio.Task = None 409 | self._serving_connections_tasks = [] 410 | 411 | async def health_check(self): 412 | return await super().health_check() and not self._running_task.done() 413 | 414 | def get_local_socket(self): 415 | return self.get_exit_socket() 416 | 417 | def get_remote_socket(self): 418 | return self.get_entrance_socket() 419 | 420 | async def start(self): 421 | """ 422 | Start processing new websockets and create connections 423 | """ 424 | await super().start() 425 | self._running_task = asyncio.ensure_future(self._run()) 426 | 427 | async def stop(self, stop_remote_tunnel=True, force=False): 428 | """ 429 | Stop receiving new websockets and close all existing connections 430 | """ 431 | try: 432 | if self._running_task is not None: 433 | with contextlib.suppress(asyncio.CancelledError): 434 | self._running_task.cancel() 435 | await self._running_task 436 | except Exception: 437 | self._logger.exception('An error occurred while stopping tunnel exit handler:') 438 | finally: 439 | await super().stop(stop_remote_tunnel=stop_remote_tunnel, force=False) 440 | if self._serving_connections_tasks: 441 | # super().stop() close the connections, and therefore those should be closed right after 442 | await asyncio.gather(*self._serving_connections_tasks) 443 | 444 | async def _serve_new_connection(self, websocket: WebsocketType): 445 | """ 446 | Serve a new connection with the given websocket until eof reached. 447 | Then, mark the websocket_done_event 448 | """ 449 | # We don't want to establish connections with the exit address and port if the websocket we received 450 | # is still queued on the remote InputTunnel. Therefore, we wait for an initial message before connecting. 451 | message: aiohttp.WSMessage = await websocket.receive() 452 | if websocket.closed: 453 | # Websocket was closed, we don't even start a connection 454 | return 455 | if message.type != aiohttp.WSMsgType.TEXT and message.data == WEBSOCKET_CONNECTION_START_MESSAGE: 456 | self._logger.error('Invalid first message: %s', message.data) 457 | try: 458 | reader, writer = await asyncio.open_connection(host=self._exit_address, 459 | port=self._exit_port) 460 | connection = _ConnectionHandler(websocket=websocket, connection_reader=reader, connection_writer=writer, 461 | logger=self._logger) 462 | with object_in_list(connection, self._connections): 463 | await connection.run_until_eof() 464 | except ConnectionRefusedError: 465 | # No one is listening on the exit address and port, we close the tunnel normally. 466 | self._logger.debug('Connection Refused for `%s:%s`. Closing websocket', self._exit_address, self._exit_port) 467 | finally: 468 | await websocket.close() 469 | 470 | async def _run(self): 471 | """ 472 | Wait for new websocket to arrive and open a connection object to (self._exit_address, self._exit_port). 473 | Then wait until either the connection or the websocket reach eof before marking the websocket as unneeded 474 | """ 475 | while self.running: 476 | websocket = await self._get_websocket() 477 | task = asyncio.ensure_future(self._serve_new_connection(websocket)) 478 | task_in_list_until_done(task, self._serving_connections_tasks) 479 | -------------------------------------------------------------------------------- /netunnel/common/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Union, Iterable 2 | from aiohttp.web import WebSocketResponse 3 | from aiohttp import ClientWebSocketResponse, ClientSession, BasicAuth, ClientError, TCPConnector 4 | 5 | from .exceptions import NETunnelInvalidProxy 6 | 7 | import logging 8 | import socket 9 | import asyncio 10 | import contextlib 11 | import functools 12 | import sys 13 | import http 14 | 15 | LOGGING_FORMATTER = '%(asctime)s %(name)s - %(levelname)s - %(message)s' 16 | 17 | 18 | def asyncio_current_task(loop=None): 19 | if sys.version_info >= (3, 9, 0): 20 | return asyncio.current_task(loop) 21 | return asyncio.Task.current_task(loop) 22 | 23 | 24 | def asyncio_all_tasks(loop=None): 25 | if sys.version_info >= (3, 9, 0): 26 | return asyncio.all_tasks(loop) 27 | return asyncio.Task.all_tasks(loop) 28 | 29 | 30 | async def get_session(headers=None, ssl=None) -> ClientSession: 31 | """ 32 | Return an aiohttp.ClientSession object. 33 | ClientSession must be initialized inside an async context. 34 | :param headers: Optional headers to append for every request that this session will make 35 | :param ssl: SSLContext object. False to skip validation, None for default SSL check. 36 | """ 37 | connector = TCPConnector(ssl=ssl) 38 | headers = headers or {} 39 | return ClientSession(connector=connector, ws_response_class=EventClientWebSocketResponse, headers=headers) 40 | 41 | 42 | def get_logger(name, level=logging.INFO): 43 | logger = logging.getLogger(name) 44 | logger.setLevel(level) 45 | handler = logging.StreamHandler(sys.stdout) 46 | formatter = logging.Formatter(LOGGING_FORMATTER) 47 | handler.setFormatter(formatter) 48 | logger.addHandler(handler) 49 | return logger 50 | 51 | 52 | async def run_blocking_func_in_executor(func, *args, **kwargs): 53 | """ 54 | Run a blocking function in a different executor so that it won't stop 55 | the event loop 56 | """ 57 | return await asyncio.get_event_loop().run_in_executor(None, functools.partial(func, *args, **kwargs)) 58 | 59 | 60 | def get_unused_port(min_port: int, max_port: int, exclude_ports: Iterable = None, address='127.0.0.1'): 61 | """ 62 | Return an unused port from a range of integers 63 | :param min_port: minimum port that can be allocated 64 | :param max_port: maximum port that can be allocated 65 | :param exclude_ports: list of ports to exclude 66 | :param address: address of the interface to try binding with 67 | """ 68 | exclude_ports = set(exclude_ports) or set() 69 | for port in range(min_port, max_port): 70 | if port in exclude_ports: 71 | continue 72 | with contextlib.suppress(OSError): 73 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 74 | s.bind((address, port)) 75 | s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 76 | return port 77 | raise RuntimeError(f"Failed to found an available port between {min_port} and {max_port}") 78 | 79 | 80 | @contextlib.contextmanager 81 | def object_in_list(obj: Any, list_of_obj: list): 82 | """ 83 | append obj to list_of_obj and remove it once the with statement finishes. 84 | Used like this: 85 | 86 | with object_in_list(item, list_of_items): 87 | # item is in list_of_items 88 | # item is not in list_of_items 89 | """ 90 | list_of_obj.append(obj) 91 | try: 92 | yield obj 93 | finally: 94 | with contextlib.suppress(ValueError): 95 | list_of_obj.remove(obj) 96 | 97 | 98 | def task_in_list_until_done(task: Union[asyncio.Task, asyncio.Future], list_of_tasks: list): 99 | """ 100 | Add a task to a list and a callback which removes it from the list once it's done 101 | """ 102 | def remove_task(*args): 103 | with contextlib.suppress(ValueError): 104 | list_of_tasks.remove(task) 105 | list_of_tasks.append(task) 106 | task.add_done_callback(remove_task) 107 | 108 | 109 | def update_dict_recursively(dict_to_update: dict, dict_new_values: dict): 110 | """ 111 | Update the target dictionary recursively 112 | """ 113 | for key, value in dict_new_values.items(): 114 | if isinstance(value, dict): 115 | # If the target corresponding key is not a dict as well, we'll override it 116 | # because it's irrelevant and then we can use update_dict_recursively again 117 | if key not in dict_to_update or not isinstance(dict_to_update[key], dict): 118 | dict_to_update[key] = {} 119 | update_dict_recursively(dict_to_update[key], value) 120 | else: 121 | dict_to_update[key] = value 122 | 123 | 124 | class EventItem(asyncio.Event): 125 | def __init__(self, loop=None): 126 | """ 127 | Works just like asyncio.Event with the following enhancements: 128 | - Stores an item when setting the event and retrieve it when it's available with self.wait 129 | """ 130 | super().__init__(loop=loop) 131 | self._obj: Any = None 132 | 133 | def set(self, obj: Any = None): 134 | self._obj = obj 135 | return super().set() 136 | 137 | def clear(self): 138 | self._obj = None 139 | return super().clear() 140 | 141 | async def wait(self) -> Any: 142 | await super().wait() 143 | return self._obj 144 | 145 | 146 | class EventQueue(asyncio.Queue): 147 | """ 148 | This queue works just like a normal async queue, a new method self.join_no_empty 149 | was added. It works just like self.join but in opposite, it returns whenever 150 | a new item was put in the queue without extracting it 151 | """ 152 | def __init__(self, maxsize=0): 153 | super().__init__(maxsize=maxsize) 154 | self._queue_no_empty = asyncio.Event() 155 | 156 | async def put(self, item): 157 | await super().put(item) 158 | self._queue_no_empty.set() 159 | 160 | async def get(self): 161 | try: 162 | return await super().get() 163 | finally: 164 | if self.qsize() == 0: 165 | self._queue_no_empty.clear() 166 | 167 | def get_nowait(self): 168 | try: 169 | return super().get_nowait() 170 | finally: 171 | if self.qsize() == 0: 172 | self._queue_no_empty.clear() 173 | 174 | async def join_no_empty(self): 175 | """ 176 | Blocks until a new task is in the queue 177 | """ 178 | await self._queue_no_empty.wait() 179 | 180 | 181 | class EventWebSocketResponse(WebSocketResponse): 182 | """ 183 | Works the same as WebSocketResponse with new methods: 184 | - wait_closed - block until websocket is close (does not trigger close) 185 | - close_nowait - trigger close websocket without blocking 186 | """ 187 | def __init__(self, *args, **kwargs): 188 | super().__init__(*args, **kwargs) 189 | self._shutdown = asyncio.Event() 190 | self._closing_task: asyncio.Future = None 191 | 192 | async def wait_closed(self): 193 | """ 194 | Block until the websocket is closed 195 | """ 196 | await self._shutdown.wait() 197 | if self._closing_task is not None: 198 | with contextlib.suppress(asyncio.CancelledError): 199 | await self._closing_task 200 | 201 | def close_nowait(self): 202 | """ 203 | Closing without waiting. 204 | Use wait_closed to await for the close task 205 | """ 206 | self._closing_task = asyncio.ensure_future(self.close()) 207 | 208 | async def close(self, *args, **kwargs): 209 | return_value = await super().close(*args, **kwargs) 210 | self._shutdown.set() 211 | return return_value 212 | 213 | 214 | class EventClientWebSocketResponse(ClientWebSocketResponse): 215 | """ 216 | Works the same as ClientWebSocketResponse with new methods: 217 | - wait_closed - block until websocket is close (does not trigger close) 218 | - close_nowait - trigger close websocket without blocking 219 | """ 220 | def __init__(self, *args, **kwargs): 221 | super().__init__(*args, **kwargs) 222 | self._shutdown = asyncio.Event() 223 | self._closing_task: asyncio.Future = None 224 | 225 | async def wait_closed(self): 226 | """ 227 | Block until the websocket is closed 228 | """ 229 | await self._shutdown.wait() 230 | if self._closing_task is not None: 231 | with contextlib.suppress(asyncio.CancelledError): 232 | await self._closing_task 233 | 234 | def close_nowait(self): 235 | """ 236 | Closing without waiting. 237 | Use wait_closed to await for the close task 238 | """ 239 | self._closing_task = asyncio.ensure_future(self.close()) 240 | 241 | async def close(self, *args, **kwargs): 242 | return_value = await super().close(*args, **kwargs) 243 | self._shutdown.set() 244 | return return_value 245 | 246 | 247 | async def verify_proxy(proxy_url, test_url, username=None, password=None): 248 | """ 249 | Verify that a proxy server is working 250 | :param proxy_url: url to the proxy 251 | :param test_url: url that accepts GET method and will be used to test the proxy 252 | :param username: username to use to authenticate the proxy 253 | :param password: password to use to authenticate the proxy 254 | """ 255 | proxy_auth = None 256 | if username and password: 257 | proxy_auth = BasicAuth(username, password) 258 | elif (username and password is None) or (password and username is None): 259 | raise ValueError('HTTP proxy authentication should include both username and password') 260 | async with ClientSession() as client: 261 | try: 262 | async with client.get(test_url, proxy=proxy_url, proxy_auth=proxy_auth) as resp: 263 | if resp.status != http.HTTPStatus.OK.value: 264 | raise NETunnelInvalidProxy(f"Invalid response from proxy: {resp.status}") 265 | except (ClientError, ConnectionRefusedError) as err: 266 | raise NETunnelInvalidProxy(str(err)) 267 | -------------------------------------------------------------------------------- /netunnel/server/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claroty/netunnel/01d28f539c079796eec21afc6525bb771173d636/netunnel/server/__init__.py -------------------------------------------------------------------------------- /netunnel/server/__main__.py: -------------------------------------------------------------------------------- 1 | from .server import main 2 | 3 | main() -------------------------------------------------------------------------------- /netunnel/server/client_handler.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from aiohttp.web import WebSocketResponse 3 | from ..common.const import TunnelId 4 | from ..common.tunnel import Tunnel, OutputTunnel, InputTunnel 5 | from ..common.utils import EventWebSocketResponse 6 | from ..common.channel import Channel, Messages, ChannelMessage 7 | 8 | import asyncio 9 | import logging 10 | 11 | 12 | class ChannelHandler: 13 | def __init__(self, channel_id, client_version, logger: logging.Logger): 14 | """ 15 | Manage a channel to the server. 16 | :param channel_id: the channel_id to set the created channel 17 | """ 18 | self._control_channel: Channel = None 19 | # For backwards compatibility if needed 20 | self._client_version = client_version 21 | self._channel_id = channel_id 22 | self._tunnels: Dict[TunnelId, Tunnel] = {} 23 | self._tunnel_id_counter = 0 24 | # This lock is used to prevent duplications of tunnel_ids when multiple requests received at once 25 | self._tunnels_id_lock = asyncio.Lock() 26 | self._logger = logger 27 | # This is used to stop creating new tunnels during shutdown until complete close 28 | self._closing = False 29 | 30 | async def _get_next_tunnel_id(self): 31 | async with self._tunnels_id_lock: 32 | self._tunnel_id_counter += 1 33 | return self._tunnel_id_counter 34 | 35 | @property 36 | def running(self): 37 | return self._control_channel is not None and self._control_channel.running 38 | 39 | async def serve(self, websocket: WebSocketResponse): 40 | """ 41 | Start serving the channel. 42 | Close all leftover tunnels after the channel is close 43 | """ 44 | if self.running: 45 | raise RuntimeError('Channel is already running') 46 | self._closing = False 47 | self._control_channel = Channel(websocket=websocket, channel_id=self._channel_id, logger=self._logger) 48 | self._logger.debug('Starting serving channel `%s`. Client version: `%s`', self._control_channel.id, self._client_version) 49 | try: 50 | await self._control_channel.serve() 51 | except asyncio.CancelledError: 52 | self._logger.warning('Channel `%s` was cancelled, probably due to client sudden disconnection', self._control_channel.id) 53 | except Exception: 54 | self._logger.exception('Channel `%s` failed to serve:', self._control_channel.id) 55 | raise 56 | finally: 57 | self._logger.debug('Closing channel `%s`', self._control_channel.id) 58 | for tunnel in list(self._tunnels.values()): 59 | # At this point, the channel is closed anyway so we can't stop the remote tunnels 60 | await tunnel.stop(stop_remote_tunnel=False) 61 | 62 | async def close(self, force=True): 63 | """ 64 | Close the client handler 65 | :param force: Whether to forcefully close the connections to the tunnels 66 | """ 67 | self._closing = True 68 | for tunnel_id, tunnel in list(self._tunnels.items()): 69 | self._logger.debug('Stopping tunnel `%s` due to channel `%s` shutdown', tunnel_id, self._control_channel.id) 70 | await tunnel.stop(force=force) 71 | if self.running: 72 | await self._control_channel.close() 73 | 74 | async def create_client_to_server_tunnel(self, exit_address, exit_port) -> int: 75 | """ 76 | Creates a client-to-server tunnel and return the generated tunnel_id 77 | """ 78 | if self._closing: 79 | raise RuntimeError('Cannot create new tunnel during shutdown') 80 | tunnel_id = await self._get_next_tunnel_id() 81 | self._logger.debug('Creating Client-To-Server Tunnel `%s` for channel `%s`', tunnel_id, self._control_channel.id) 82 | self._tunnels[tunnel_id] = OutputTunnel(exit_address=exit_address, exit_port=exit_port, 83 | logger=self._logger, stop_tunnel_on_remote_callback=lambda force: self._delete_tunnel_on_client(tunnel_id, force), 84 | stopped_callback=lambda: self._delete_tunnel_from_tunnels(tunnel_id)) 85 | await self._tunnels[tunnel_id].start() 86 | return tunnel_id 87 | 88 | async def create_server_to_client_tunnel(self, entrance_address, entrance_port): 89 | """ 90 | Creates a server-to-client tunnel and return the generated tunnel_id 91 | """ 92 | if self._closing: 93 | raise RuntimeError('Cannot create new tunnel during shutdown') 94 | tunnel_id = await self._get_next_tunnel_id() 95 | self._logger.debug('Creating Server-To-Client Tunnel `%s` for channel `%s`', tunnel_id, self._control_channel.id) 96 | self._tunnels[tunnel_id] = InputTunnel(entrance_address=entrance_address, entrance_port=entrance_port, 97 | websocket_feeder_coro=self._input_tunnel_websocket_feeder(tunnel_id), 98 | logger=self._logger, stop_tunnel_on_remote_callback=lambda force: self._delete_tunnel_on_client(tunnel_id, force), 99 | stopped_callback=lambda: self._delete_tunnel_from_tunnels(tunnel_id)) 100 | await self._tunnels[tunnel_id].start() 101 | return tunnel_id 102 | 103 | async def delete_tunnel(self, tunnel_id, force): 104 | """ 105 | Deletes a tunnel. This is designed to be called as a request from the client, so we will only 106 | stop the tunnel on our side. 107 | :param tunnel_id: ID of the tunnel to delete 108 | :param force: Whether to call stop tunnel forcefully or wait for connections to finish 109 | """ 110 | try: 111 | await self._tunnels[tunnel_id].stop(stop_remote_tunnel=False, force=force) 112 | except KeyError: 113 | raise KeyError(f'Tunnel id `{tunnel_id}` was not found on channel `{self._channel_id}`') 114 | 115 | async def _delete_tunnel_on_client(self, tunnel_id, force): 116 | """ 117 | Request the client to delete it's tunnel via the control channel 118 | :param tunnel_id: ID of the tunnel to delete 119 | :param force: Whether to call stop tunnel forcefully or wait for connections to finish 120 | """ 121 | self._logger.debug('Requesting client to stop tunnel `%s` on channel `%s`', tunnel_id, self._channel_id) 122 | delete_tunnel_message = ChannelMessage(message_type=Messages.DELETE_TUNNEL, data={'tunnel_id': tunnel_id, 'force': force}) 123 | await self._control_channel.send_message(delete_tunnel_message, raise_if_error=True) 124 | 125 | async def _delete_tunnel_from_tunnels(self, tunnel_id): 126 | self._logger.debug('Closing tunnel `%s` on channel `%s`', tunnel_id, self._channel_id) 127 | del self._tunnels[tunnel_id] 128 | 129 | async def _input_tunnel_websocket_feeder(self, tunnel_id): 130 | """ 131 | Get the tunnel_id of an InputTunnel and as long as this tunnel running, wait until it consumes it's pending websocket 132 | and then requests the client for a new one. 133 | """ 134 | tunnel = self._tunnels[tunnel_id] 135 | while tunnel.running: 136 | await tunnel.wait_no_websocket() 137 | if not tunnel.running: 138 | break 139 | self._logger.debug('Generating new websocket for tunnel `%s` on channel `%s`', tunnel_id, self._control_channel.id) 140 | new_connection_message = ChannelMessage(message_type=Messages.TUNNEL_SERVER_TO_CLIENT_NEW_CONNECTION, data={'tunnel_id': tunnel_id}) 141 | response = await self._control_channel.send_message(new_connection_message) 142 | if not response.is_ok(): 143 | self._logger.error('Failed to request the client to create a new websocket for tunnel_id `%s`: %s.', tunnel_id, response.error) 144 | try: 145 | await asyncio.wait_for(tunnel.wait_new_websocket(), timeout=10) 146 | except asyncio.TimeoutError: 147 | self._logger.warning('Failed to retrieve websocket for Channel `%s` on Tunnel `%s` after 10 seconds. Retrying..', self._channel_id, tunnel_id) 148 | 149 | async def serve_new_connection(self, websocket: EventWebSocketResponse, tunnel_id: int): 150 | """ 151 | Get a websocket and feed it to the tunnel by the given id. 152 | wait until the tunnel finished with the websocket before closing it 153 | """ 154 | if not self.running: 155 | raise RuntimeError('Channel is not connected') 156 | if tunnel_id not in self._tunnels: 157 | raise RuntimeError(f'Tunnel by id `{tunnel_id}` does not exists') 158 | await self._tunnels[tunnel_id].feed_websocket(websocket) 159 | await websocket.wait_closed() 160 | -------------------------------------------------------------------------------- /netunnel/server/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import copy 3 | import json 4 | import time 5 | import asyncio 6 | import aiofiles 7 | import contextlib 8 | import click 9 | 10 | from ..common.utils import run_blocking_func_in_executor 11 | 12 | # The default config is the basic configuration of the netunnel. 13 | # In case the configuration already exists, it's performing an update over the default config. 14 | # This way, in case the default configuration has a new key in some version, it will be added automatically. 15 | # We don't performing a full recursive merge between the current and the default configuration to protect 16 | # dict values like 'allowed_tunnel_destination' from being changed unexpectedly. 17 | _DEFAULT_CONFIG = { 18 | 'allowed_tunnel_destinations': {"127.0.0.1": "*"}, 19 | 'http_proxy': None, 20 | 'http_proxy_test_url': 'https://google.com', 21 | 'peers': [], 22 | 'allow_unverified_ssl_peers': False, 23 | 'revision': 1 24 | # secret_key is not provided here so it won't be dynamically stored in the configuration file by mistake 25 | } 26 | ENV_VARIABLES_PREFIX = 'NETUNNEL_' 27 | 28 | 29 | def get_default_config(use_env_vars=True) -> dict: 30 | """ 31 | If use_env_vars is True, config values will be override by environment variables if defined. 32 | For example, to override the value of "allowed_tunnel_destinations", export "NETUNNEL_ALLOWED_TUNNEL_DESTINATIONS". 33 | Environment variables are expected to be in json format, and are usually defined in the systemd service. 34 | """ 35 | default_config = copy.deepcopy(_DEFAULT_CONFIG) 36 | if use_env_vars: 37 | for key in default_config: 38 | env_var_name = ENV_VARIABLES_PREFIX + key.upper() 39 | if env_var_name in os.environ: 40 | default_config[key] = json.loads(os.environ[env_var_name]) 41 | return default_config 42 | 43 | 44 | class NETunnelConfiguration: 45 | def __init__(self, config_path=None): 46 | """ 47 | USE ONLY `await NETunnelConfiguration.create(path)` TO INITIALIZE AN OBJECT 48 | 49 | Manage configurations for NETunnelServer in JSON format. 50 | Store configurations in memory unless config_path was given. 51 | :param config_path: Optional path to a configuration file in which to store changes. 52 | """ 53 | self._config_path = config_path 54 | # This lock is used to prevent multiple disk writes when saving the configuration using safe writes. 55 | self._saving_config_lock = asyncio.Lock() 56 | self._config = get_default_config() 57 | 58 | @classmethod 59 | async def create(cls, config_path=None): 60 | self = NETunnelConfiguration(config_path=config_path) 61 | await self._initialize() 62 | return self 63 | 64 | async def recreate(self): 65 | """ 66 | Recreate the configuration file from the default settings. 67 | This will erase the current configurations. 68 | """ 69 | self._config = get_default_config() 70 | await self.save() 71 | 72 | async def _initialize(self): 73 | """ 74 | Initialize config by loading config_path 75 | """ 76 | if self._config_path: 77 | with contextlib.suppress(FileNotFoundError): 78 | async with aiofiles.open(self._config_path) as config_file: 79 | data = await config_file.read() 80 | self._config.update(json.loads(data)) 81 | await self.save() 82 | 83 | async def save(self): 84 | """ 85 | Save configurations using safe writes to the config path if exists 86 | """ 87 | if self._config_path is None: 88 | return 89 | async with self._saving_config_lock: 90 | temp_config_path = f"{self._config_path}.{time.time()}" 91 | # First we write all the config to a temporary file 92 | async with aiofiles.open(temp_config_path, 'w') as temp_config_file: 93 | await temp_config_file.write(json.dumps(self._config, indent=4)) 94 | await temp_config_file.flush() 95 | await run_blocking_func_in_executor(os.fsync, temp_config_file) 96 | # Now we overwrite the config file with an atomic operation 97 | await run_blocking_func_in_executor(os.rename, temp_config_path, self._config_path) 98 | 99 | def __getitem__(self, key): 100 | return self._config[key] 101 | 102 | def __setitem__(self, key, value): 103 | self._config[key] = value 104 | 105 | def __contains__(self, key): 106 | return key in self._config 107 | 108 | def get(self, key, default=None): 109 | return self._config.get(key, default) 110 | 111 | 112 | @click.group() 113 | def main(): 114 | pass 115 | 116 | 117 | @main.command() 118 | @click.argument('path') 119 | @click.option('--custom-changes', default='{}', help='A json dump string of the changes you want to make in the default configuration') 120 | def create(path, custom_changes): 121 | custom_changes = json.loads(custom_changes) 122 | 123 | config_dir = os.path.dirname(path) 124 | os.makedirs(config_dir, exist_ok=True) 125 | with open(path, 'w') as f: 126 | json.dump(custom_changes, f) 127 | 128 | async def create_config(): 129 | config = await NETunnelConfiguration.create(path) 130 | await config.save() 131 | 132 | loop = asyncio.get_event_loop() 133 | loop.run_until_complete(create_config()) 134 | 135 | 136 | if __name__ == '__main__': 137 | main() 138 | -------------------------------------------------------------------------------- /netunnel/server/peer.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | from .static_tunnel import StaticTunnel 3 | from .schemas import StaticTunnelSchema 4 | from ..client import NETunnelClient 5 | from ..common.utils import get_logger 6 | from ..common.exceptions import NETunnelServerNotFound, NETunnelServerError, NETunnelResponseError, NETunnelAuthError 7 | from ..common.auth import NETunnelClientAuth 8 | 9 | import asyncio 10 | import aiohttp 11 | 12 | 13 | class Peer: 14 | def __init__(self, id, name, target_netunnel_url, auth, proxy_url=None, proxy_username=None, proxy_password=None, ssl=None, logger=None): 15 | """ 16 | Peer is a remote NETunnelServer. 17 | :param id: unique id for this peer 18 | :param name: name of the peer 19 | :param target_netunnel_url: url to the remote netunnel server 20 | :param proxy_url: url to an http proxy to set when making http requests 21 | :param proxy_username: username for the proxy 22 | :param proxy_password: password for the proxy 23 | :param auth: Instance of subclass of netunnel.common.auth.NETunnelClientAuth that will be used to authenticate the peer 24 | :param ssl: SSLContext object. False to skip validation, None for default SSL check. 25 | :param logger: logging.Logger object for logging 26 | """ 27 | self._id = id 28 | self.name = name 29 | self._target_netunnel_url = target_netunnel_url 30 | self._auth: NETunnelClientAuth = auth 31 | self._logger = logger or get_logger(f'Peer `{self.name}`') 32 | self._ssl = ssl 33 | self._proxy_url = proxy_url 34 | self._proxy_username = proxy_username 35 | self._proxy_password = proxy_password 36 | # mapping from static tunnel id to StaticTunnel object belong to this peer 37 | self._static_tunnels: Dict[int, StaticTunnel] = {} 38 | # Used to prevent id duplications when creating new static tunnels 39 | self._creating_static_tunnel_lock = asyncio.Lock() 40 | 41 | @property 42 | def id(self) -> int: 43 | return self._id 44 | 45 | @property 46 | def target_netunnel_url(self) -> str: 47 | return self._target_netunnel_url 48 | 49 | @property 50 | def auth(self): 51 | return self._auth 52 | 53 | @property 54 | def auth_data(self): 55 | return self._auth.dump_object() 56 | 57 | @property 58 | def static_tunnels(self) -> List[StaticTunnel]: 59 | """ 60 | Return a list of the static tunnels to this peer. Used by the nested field of PeerSchema 61 | """ 62 | return list(self._static_tunnels.values()) 63 | 64 | async def update_settings(self, new_url, new_auth=None): 65 | """ 66 | Set new settings for either target_netunnel_url, auth or both. 67 | Restart the static tunnels of this peer so they will use the new settings 68 | """ 69 | if new_url: 70 | self._target_netunnel_url = new_url 71 | if new_auth: 72 | self._auth = new_auth 73 | for static_tunnel in self.static_tunnels: 74 | static_tunnel_settings = StaticTunnelSchema().dump3(static_tunnel) 75 | await self.delete_static_tunnel(static_tunnel.id) 76 | await self.add_static_tunnel(**static_tunnel_settings) 77 | 78 | def _generate_static_tunnel_id(self) -> int: 79 | """ 80 | Generates an unused static tunnel id 81 | """ 82 | if self._static_tunnels: 83 | return max(self._static_tunnels.keys()) + 1 84 | return 1 85 | 86 | def _new_client(self): 87 | """ 88 | Return a NETunnelClient to the peer 89 | """ 90 | return NETunnelClient(server_url=self._target_netunnel_url, proxy_url=self._proxy_url, 91 | proxy_username=self._proxy_username, proxy_password=self._proxy_password, 92 | auth_client=self._auth, ssl=self._ssl, logger=self._logger) 93 | 94 | async def verify_connectivity(self): 95 | """ 96 | Make sure there is a connection to the peer by query it's version. 97 | Raises an exception if peer is not connected 98 | """ 99 | try: 100 | async with self._new_client() as client: 101 | await client.get_remote_version() 102 | except NETunnelAuthError as err: 103 | self._logger.debug('The following exception raised when trying to connect to the peer:', exc_info=err) 104 | raise NETunnelAuthError(f'Failed to authenticate with peer `{self.name}`') 105 | except aiohttp.ClientError as err: 106 | self._logger.debug('The following exception raised when trying to connect to the peer:', exc_info=err) 107 | raise NETunnelServerError(f'Failed to connect with peer `{self.name}`') 108 | return True 109 | 110 | async def set_new_proxy(self, proxy_url, proxy_username, proxy_password): 111 | """ 112 | Set a new http proxy to use when communicating with this peer 113 | """ 114 | self._proxy_url = proxy_url 115 | self._proxy_username = proxy_username 116 | self._proxy_password = proxy_password 117 | for static_tunnel in self.static_tunnels: 118 | await static_tunnel.set_new_proxy(proxy_url, proxy_username, proxy_password) 119 | 120 | async def add_static_tunnel(self, tunnel_remote_address, tunnel_remote_port, tunnel_local_port, tunnel_local_address, id=None, verify_connectivity=True): 121 | """ 122 | Creates a new static tunnel for this peer and start it. 123 | Return the generated static tunnel 124 | :param tunnel_remote_address: Remote address used as the exit address of the tunnel 125 | :param tunnel_remote_port: Remote port used as the exit port of the tunnel 126 | :param tunnel_local_address: Local address used as the entrance address of the tunnel 127 | :param tunnel_local_port: Local port used as the entrance port of the tunnel 128 | :param id: Optional id to set this tunnel. Used when tunnel is initialized from the config 129 | :param verify_connectivity: Whether to verify connectivity before adding the tunnel 130 | """ 131 | if verify_connectivity: 132 | await self.verify_connectivity() 133 | async with self._creating_static_tunnel_lock: 134 | # Set static tunnel id 135 | static_tunnel_id = id or self._generate_static_tunnel_id() 136 | if id in self._static_tunnels: 137 | raise RuntimeError(f'ID `{id}` for static tunnel on peer `{self.name}` is already in use') 138 | 139 | # Create and start the new static tunnel 140 | static_tunnel = StaticTunnel(id=static_tunnel_id, tunnel_local_port=tunnel_local_port, 141 | tunnel_local_address=tunnel_local_address, tunnel_remote_port=tunnel_remote_port, 142 | tunnel_remote_address=tunnel_remote_address, target_netunnel_url=self._target_netunnel_url, 143 | auth_client=self._auth, proxy_url=self._proxy_url, proxy_username=self._proxy_username, 144 | proxy_password=self._proxy_password, ssl=self._ssl, logger=self._logger) 145 | self._logger.info('Creating static tunnel `%s` to peer `%s`', static_tunnel.get_tunnel_display_name(), self.name) 146 | await static_tunnel.start() 147 | await static_tunnel.wait_online() 148 | self._static_tunnels[static_tunnel_id] = static_tunnel 149 | return static_tunnel 150 | 151 | async def delete_static_tunnels(self): 152 | """ 153 | Stop and remove all static tunnels 154 | """ 155 | while self._static_tunnels: 156 | _, static_tunnel = self._static_tunnels.popitem() 157 | await static_tunnel.stop() 158 | 159 | async def delete_static_tunnel(self, id): 160 | """ 161 | Remove static tunnel from this peer by id 162 | """ 163 | if id not in self._static_tunnels: 164 | raise NETunnelServerNotFound(f'No static tunnel by id `{id}` on `{self.name}`') 165 | static_tunnel = self._static_tunnels.pop(id) 166 | await static_tunnel.stop() 167 | 168 | def get_static_tunnel(self, id): 169 | """ 170 | Return a static tunnel by ID 171 | """ 172 | if id not in self._static_tunnels: 173 | raise NETunnelServerNotFound(f'No static tunnel by id `{id}` on `{self.name}`') 174 | return self._static_tunnels[id] 175 | -------------------------------------------------------------------------------- /netunnel/server/schemas.py: -------------------------------------------------------------------------------- 1 | from marshmallow import Schema, fields, validate, pre_load, ValidationError 2 | 3 | import ipaddress 4 | 5 | 6 | class NETunnelSchema(Schema): 7 | """ 8 | Base schema for netunnel objects 9 | 10 | For backwards compatibility to marshmallow 2.X, we're using self.dump3 and self.load3 instead 11 | of self.dump & self.load to make them as similar as possible to marshmallow 3.X. 12 | We're not overriding these methods directly to avoid breaking marshmallow as they're being used internally. 13 | """ 14 | def dump3(self, obj, *args, **kwargs): 15 | dump_result = super().dump(obj, *args, **kwargs) 16 | # We cannot use isinstance because MarshalResult won't exists on marshmallow 3.X 17 | if type(dump_result).__name__ == 'MarshalResult': 18 | if dump_result.errors: 19 | raise ValidationError(message=dump_result.errors) 20 | return dump_result.data 21 | return dump_result 22 | 23 | def load3(self, obj, *args, **kwargs): 24 | load_result = super().load(obj, *args, **kwargs) 25 | # We cannot use isinstance because UnmarshalResult won't exists on marshmallow 3.X 26 | if type(load_result).__name__ == 'UnmarshalResult': 27 | if load_result.errors: 28 | raise ValidationError(message=load_result.errors) 29 | return load_result.data 30 | return load_result 31 | 32 | 33 | class StaticTunnelSchema(NETunnelSchema): 34 | id = fields.Integer() 35 | tunnel_remote_address = fields.String(default='127.0.0.1', missing='127.0.0.1') 36 | tunnel_remote_port = fields.Integer(required=True, validate=validate.Range(min=1, max=65535)) 37 | tunnel_local_address = fields.String(default='127.0.0.1', missing='127.0.0.1') 38 | tunnel_local_port = fields.Integer(required=True, validate=validate.Range(min=1, max=65535)) 39 | 40 | @pre_load 41 | def validate_scheme(self, data, **kwargs): 42 | for address in ['tunnel_remote_address', 'tunnel_local_address']: 43 | try: 44 | ipaddress.IPv4Address(data.get(address, '127.0.0.1')) 45 | except ipaddress.AddressValueError as err: 46 | raise ValidationError(str(err)) 47 | return data 48 | 49 | 50 | class PeerSchema(NETunnelSchema): 51 | id = fields.Integer() 52 | name = fields.String(required=True) 53 | target_netunnel_url = fields.URL(required=True) 54 | auth_data = fields.Dict(required=True, keys=fields.String(), values=fields.String()) 55 | static_tunnels = fields.List(fields.Nested(StaticTunnelSchema)) 56 | -------------------------------------------------------------------------------- /netunnel/server/static_tunnel.py: -------------------------------------------------------------------------------- 1 | from ..client import NETunnelClient 2 | from ..common.tunnel import Tunnel 3 | from ..common.utils import get_logger 4 | from ..common.exceptions import NETunnelAuthError 5 | 6 | import time 7 | import aiohttp 8 | import sys 9 | import logging 10 | import asyncio 11 | import contextlib 12 | 13 | CONNECTION_RETRY_INTERVAL = 10 14 | HEALTH_CHECK_INTERVAL = 30 15 | 16 | 17 | class StaticTunnel: 18 | def __init__(self, id, target_netunnel_url, tunnel_local_address, tunnel_local_port, tunnel_remote_address, 19 | tunnel_remote_port, auth_client, proxy_url=None, proxy_username=None, proxy_password=None, ssl=None, 20 | logger=None, connection_retry_interval=CONNECTION_RETRY_INTERVAL): 21 | """ 22 | Interface for managing a static tunnel 23 | :param id: unique id for that static tunnel 24 | :param target_netunnel_url: url of the remote peer's netunnel server 25 | :param tunnel_local_address: local address to use for the static tunnel 26 | :param tunnel_local_port: local port to use for the static tunnel 27 | :param tunnel_remote_address: remote address to use for the static 28 | :param tunnel_remote_port: remote port to use for the static tunnel 29 | :param auth_client: Instance of subclass of netunnel.common.auth.NETunnelClientAuth that will be used to authenticate the remote 30 | :param proxy_url: url for a proxy to use when making requests to the remote netunnel server 31 | :param proxy_username: Optional username to use when authenticating with the proxy_url. `proxy_password` must be given as well 32 | :param proxy_password: Optional password to use when authenticating with the proxy_url. `proxy_username` must be given as well 33 | :param ssl: SSLContext object. False to skip validation, None for default SSL check. 34 | :param logger: logger to use 35 | :param connection_retry_interval: interval for retrying to connect the remote peer after failure 36 | """ 37 | self._id = id 38 | self._client: NETunnelClient = NETunnelClient(server_url=target_netunnel_url, proxy_url=proxy_url, 39 | proxy_username=proxy_username, proxy_password=proxy_password, 40 | auth_client=auth_client, ssl=ssl, logger=logger) 41 | self._tunnel_remote_address = tunnel_remote_address 42 | self._tunnel_remote_port = tunnel_remote_port 43 | self._tunnel_local_address = tunnel_local_address 44 | self._tunnel_local_port = tunnel_local_port 45 | self._tunnel: Tunnel = None 46 | # _running is True when the static tunnel needs to be online 47 | self._running = False 48 | # This event is True when the static tunnel is actually online 49 | self._online_event = asyncio.Event() 50 | self._logger: logging.Logger = logger or get_logger(f'static_tunnel `{self._id}`') 51 | self._tunnel_running_task: asyncio.Future = None 52 | self._tunnel_watchdog_task: asyncio.Future = None 53 | self._connection_retry_interval = connection_retry_interval 54 | 55 | @property 56 | def id(self): 57 | return self._id 58 | 59 | @property 60 | def tunnel_local_address(self): 61 | return self._tunnel_local_address 62 | 63 | @property 64 | def tunnel_local_port(self): 65 | return self._tunnel_local_port 66 | 67 | @property 68 | def tunnel_remote_address(self): 69 | return self._tunnel_remote_address 70 | 71 | @property 72 | def tunnel_remote_port(self): 73 | return self._tunnel_remote_port 74 | 75 | @property 76 | def running(self): 77 | return self._running 78 | 79 | def get_tunnel_display_name(self): 80 | return f'local on {self._tunnel_local_address}:{self._tunnel_local_port} -> {self._client.server_url} on {self._tunnel_remote_address}:{self._tunnel_remote_port}' 81 | 82 | async def start(self): 83 | """ 84 | Start connecting the tunnel to the peer. If peer is not available 85 | or the connection is lost, keep retrying 86 | """ 87 | if self._running: 88 | raise RuntimeError('Static tunnel already started') 89 | self._running = True 90 | self._tunnel_running_task = asyncio.ensure_future(self._tunnel_keep_alive()) 91 | self._tunnel_watchdog_task = asyncio.ensure_future(self._tunnel_watchdog()) 92 | 93 | async def wait_online(self): 94 | """ 95 | Blocks until the the static tunnel is up and running 96 | """ 97 | await self._online_event.wait() 98 | 99 | async def set_new_proxy(self, proxy_url, proxy_username, proxy_password): 100 | """ 101 | Set a new http proxy to the client and close the existing tunnel so it will recreate it with the new proxy 102 | """ 103 | if not self._running: 104 | self._client.set_client_proxy(proxy_url, proxy_username, proxy_password) 105 | return 106 | try: 107 | await self.stop() 108 | self._client.set_client_proxy(proxy_url, proxy_username, proxy_password) 109 | finally: 110 | await self.start() 111 | 112 | async def _tunnel_watchdog(self): 113 | """ 114 | Perform health checks to the tunnel every certain interval. 115 | if the tunnel marked as running but the health check fails, we stop it so that 116 | the _tunnel_keep_alive task can wake up and restart it 117 | """ 118 | while self._running: 119 | if self._tunnel and self._tunnel.running and not await self._tunnel.health_check(): 120 | # We use force because the tunnel is probably malfunctioning and might hang with graceful shutdown 121 | self._logger.info('Static Tunnel `%s` is not working. Restarting tunnel', self.get_tunnel_display_name()) 122 | await self._tunnel.stop(force=True) 123 | await asyncio.sleep(HEALTH_CHECK_INTERVAL) 124 | 125 | async def _tunnel_keep_alive(self): 126 | """ 127 | try to connect to the peer and join the tunnel. If peer disconnected or unavailable, 128 | keep trying until closed. 129 | """ 130 | while self._running: 131 | try: 132 | async with self._client: 133 | start_time = time.time() 134 | self._tunnel: Tunnel = await asyncio.wait_for(self._client.open_tunnel_to_server(remote_address=self._tunnel_remote_address, 135 | remote_port=self._tunnel_remote_port, 136 | local_address=self._tunnel_local_address, 137 | local_port=self._tunnel_local_port, 138 | wait_ready=True), 139 | self._connection_retry_interval) 140 | self._logger.info('Static Tunnel `%s` is online', self.get_tunnel_display_name()) 141 | self._online_event.set() 142 | await self._tunnel.join() 143 | self._online_event.clear() 144 | # At this point, the tunnel might have been closed unexpectedly so we break only if we're signaled to stop 145 | if self._running is False: 146 | self._logger.info('Static Tunnel `%s` is closed', self.get_tunnel_display_name()) 147 | break 148 | self._logger.info('Static Tunnel `%s` is offline after %s seconds. Waiting %s seconds before reconnecting...', 149 | self.get_tunnel_display_name(), int(time.time() - start_time), self._connection_retry_interval) 150 | await asyncio.sleep(self._connection_retry_interval) 151 | except Exception as err: 152 | self._online_event.clear() 153 | if isinstance(err, aiohttp.ClientConnectionError): 154 | self._logger.warning('Failed to connect to `%s`', self._client.server_url) 155 | elif isinstance(err, NETunnelAuthError): 156 | self._logger.warning('Failed to authenticate with `%s`', self._client.server_url) 157 | else: 158 | self._logger.exception('Failed to establish a tunnel with the peer `%s`:', self._client.server_url) 159 | sys.exit(1) 160 | 161 | async def stop(self, force=True): 162 | """ 163 | Close the connection to the peer 164 | :param force: Whether to close the tunnel forcefully. Defaults to True 165 | """ 166 | self._running = False 167 | if self._tunnel is not None and self._tunnel.running: 168 | await self._tunnel.stop(force=force) 169 | await self._tunnel_running_task 170 | if not self._tunnel_watchdog_task.done(): 171 | # We cancel the watchdog task to avoid waiting for the sleep interval 172 | self._tunnel_watchdog_task.cancel() 173 | with contextlib.suppress(asyncio.CancelledError): 174 | await self._tunnel_watchdog_task 175 | await self._client.close() -------------------------------------------------------------------------------- /netunnel/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/claroty/netunnel/01d28f539c079796eec21afc6525bb771173d636/netunnel/tests/__init__.py -------------------------------------------------------------------------------- /netunnel/tests/auth_utils.py: -------------------------------------------------------------------------------- 1 | import jwt 2 | import time 3 | 4 | from aiohttp import web 5 | from netunnel.common.auth import NETunnelClientAuth, NETunnelServerAuth 6 | from netunnel.common.utils import get_session 7 | 8 | 9 | class MockClientAuth(NETunnelClientAuth): 10 | def __init__(self, secret, *args, **kwargs): 11 | super().__init__(*args, **kwargs) 12 | self._secret = secret 13 | self._token = None 14 | 15 | async def authenticate(self, client, *args, **kwargs): 16 | payload = {'secret': self._secret} 17 | session = await get_session(ssl=False) 18 | async with session: 19 | async with session.post(f'{client.server_url}/authenticate', json=payload, raise_for_status=True) as resp: 20 | data = await resp.json() 21 | self._token = data['token'] 22 | 23 | async def is_authenticated(self): 24 | if self._token is None: 25 | return False 26 | try: 27 | result = jwt.decode(self._token.encode(), self._secret) 28 | except (ValueError, jwt.DecodeError): 29 | return False 30 | return result['exp'] > time.time() 31 | 32 | async def get_authorized_headers(self): 33 | return {'Authorization': f'Bearer {self._token}'} 34 | 35 | def dump_object(self): 36 | return {'secret': self._secret} 37 | 38 | 39 | class MockServerAuth(NETunnelServerAuth): 40 | def __init__(self, secret, *args, **kwargs): 41 | super().__init__(*args, **kwargs) 42 | self._secret = secret 43 | 44 | async def get_client_for_peer(self, secret): 45 | return MockClientAuth(secret=secret) 46 | 47 | async def is_authenticated(self, request: web.Request): 48 | if 'Authorization' not in request.headers: 49 | return False 50 | try: 51 | auth_type, token = request.headers['Authorization'].split() 52 | result = jwt.decode(token.encode(), self._secret) 53 | except (ValueError, jwt.DecodeError): 54 | return False 55 | if result['exp'] < time.time(): 56 | return False 57 | return True 58 | 59 | async def authenticate(self, request: web.Request): 60 | data = await request.json() 61 | if data['secret'] != self._secret: 62 | return web.HTTPForbidden() 63 | response = {'token': jwt.encode({'exp': time.time() + 30}, self._secret).decode()} 64 | return web.json_response(response) 65 | -------------------------------------------------------------------------------- /netunnel/tests/conftest.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | from netunnel.server.server import NETunnelServer 4 | from netunnel.client import NETunnelClient 5 | from netunnel.common.utils import get_logger 6 | 7 | import logging 8 | import pytest 9 | import tempfile 10 | import time 11 | import contextlib 12 | import os 13 | 14 | 15 | def pytest_configure(config): 16 | if config.pluginmanager.hasplugin('asyncio'): 17 | pytest.fail("Please uninstall pytest-asyncio first. We're using pytest-aiohttp and these packages conflicts with each other") 18 | 19 | 20 | @contextlib.contextmanager 21 | def config_path_context_manager(): 22 | config_file_name = f'test_netunnel_config_{time.time()}.json' 23 | with tempfile.TemporaryDirectory() as config_dir_path: 24 | yield os.path.join(config_dir_path, config_file_name) 25 | 26 | 27 | @pytest.fixture 28 | def config_path(): 29 | # This fixture is just a proxy to the context manager and not the implementation 30 | # because if it were, then test cases which needs both config_path and netunnel_client 31 | # fixtures together for example, will share the same config_path as the fixture netunnel_client 32 | with config_path_context_manager() as path: 33 | yield path 34 | 35 | 36 | @pytest.fixture 37 | async def netunnel_client(loop, aiohttp_unused_port): 38 | """ 39 | Creates a client-server instance of netunnel and return the client. 40 | """ 41 | with config_path_context_manager() as config_path: 42 | server_logger = get_logger('test_netunnel_server', logging.DEBUG) 43 | client_logger = get_logger('test_netunnel_client', logging.DEBUG) 44 | server = NETunnelServer(config_path=config_path, host='127.0.0.1', port=aiohttp_unused_port(), logger=server_logger) 45 | await server.start() 46 | server_url = f'http://localhost:{server._port}' 47 | async with NETunnelClient(server_url=server_url, logger=client_logger) as client: 48 | yield client 49 | await server.stop() 50 | 51 | 52 | @pytest.fixture 53 | async def netunnel_server(loop, aiohttp_unused_port): 54 | """ 55 | Creates a NETunnelServer instance, start and return it. 56 | """ 57 | with config_path_context_manager() as config_path: 58 | server_logger = get_logger('test_netunnel_server', logging.DEBUG) 59 | server = NETunnelServer(config_path=config_path, host='127.0.0.1', port=aiohttp_unused_port(), logger=server_logger) 60 | await server.start() 61 | yield server 62 | await server.stop() 63 | 64 | 65 | @pytest.fixture 66 | async def not_started_netunnel_client_server(loop, aiohttp_unused_port) -> Tuple[NETunnelClient, NETunnelServer]: 67 | """ 68 | Creates a client-server instance of netunnel and return a tuple of (client, server) 69 | """ 70 | with config_path_context_manager() as config_path: 71 | server_logger = get_logger('test_netunnel_server') 72 | client_logger = get_logger('test_netunnel_client') 73 | server = NETunnelServer(config_path=config_path, host='127.0.0.1', port=aiohttp_unused_port(), logger=server_logger) 74 | server_url = f'http://localhost:{server._port}' 75 | client = NETunnelClient(server_url=server_url, logger=client_logger) 76 | yield client, server 77 | 78 | 79 | @pytest.fixture 80 | async def netunnel_client_server(not_started_netunnel_client_server) -> Tuple[NETunnelClient, NETunnelServer]: 81 | """ 82 | Creates a client-server instance of netunnel and return a tuple of (client, server) 83 | """ 84 | client, server = not_started_netunnel_client_server 85 | await server.start() 86 | async with client as client: 87 | yield client, server 88 | await server.stop() 89 | 90 | 91 | @pytest.fixture(scope="session") 92 | def bytes_data(): 93 | """ 94 | Return a bytes object 95 | """ 96 | return bytes(range(256)) 97 | -------------------------------------------------------------------------------- /netunnel/tests/helpers/proxy_plugins.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from typing import Optional 4 | from proxy.http.parser import HttpParser 5 | from proxy.http.proxy import HttpProxyBasePlugin 6 | 7 | 8 | ACCESS_LOG_KEY = 'ACCESSED_HOSTS_LOG' 9 | 10 | 11 | class LogAccessedHostsPlugin(HttpProxyBasePlugin): 12 | """ 13 | log client connection access to the log path in ACCESS_LOG_KEY environment variable. 14 | This plugin helps to overcome the normal behavior which log accesses only after the client closed the connection, 15 | which causes race conditions for when we inspect the log file later. 16 | """ 17 | def before_upstream_connection( 18 | self, request: HttpParser) -> Optional[HttpParser]: 19 | return request 20 | 21 | def handle_client_request( 22 | self, request: HttpParser) -> Optional[HttpParser]: 23 | try: 24 | with open(os.environ[ACCESS_LOG_KEY] ,'a') as f: 25 | f.write(request.host.decode() + '\n') 26 | except KeyError: 27 | raise RuntimeError('ACCESSED_HOSTS_LOG was not defined') 28 | return request 29 | 30 | def handle_upstream_chunk(self, chunk: memoryview) -> memoryview: 31 | return chunk 32 | 33 | def on_upstream_connection_close(self) -> None: 34 | pass -------------------------------------------------------------------------------- /netunnel/tests/test_channel.py: -------------------------------------------------------------------------------- 1 | from netunnel.common import channel 2 | 3 | import contextlib 4 | import asyncio 5 | import aiohttp 6 | import pytest 7 | 8 | 9 | def test_messages_encoding(): 10 | data = { 11 | 'test_message': channel.Messages.TUNNEL_SERVER_TO_CLIENT_NEW_CONNECTION 12 | } 13 | assert channel.message_bson_loader(channel.message_bson_dumps(data)) == data 14 | 15 | 16 | def test_api_message(): 17 | message = channel.ChannelMessage(message_type=channel.Messages.TUNNEL_SERVER_TO_CLIENT_NEW_CONNECTION) 18 | with pytest.raises(channel.ChannelError): 19 | message.get_valid_response() 20 | with pytest.raises(channel.ChannelError): 21 | message.get_error_response('') 22 | message = channel.ChannelMessage(message_type=channel.Messages.TUNNEL_SERVER_TO_CLIENT_NEW_CONNECTION, 23 | _identifier=0) 24 | assert isinstance(message.get_valid_response(), channel.ChannelResponse) 25 | assert isinstance(message.get_error_response(''), channel.ChannelResponse) 26 | 27 | 28 | def test_api_response(): 29 | message = channel.ChannelMessage(message_type=channel.Messages.TUNNEL_SERVER_TO_CLIENT_NEW_CONNECTION, 30 | _identifier=0) 31 | assert message.get_valid_response().is_ok() 32 | error_response = message.get_error_response('Error') 33 | assert not error_response.is_ok() and error_response.error == 'Error' 34 | 35 | 36 | class MockWSMsg: 37 | data = None 38 | 39 | 40 | class MockWebsocket: 41 | def __init__(self): 42 | self.closed = False 43 | self._linked_mock_websocket: MockWebsocket = None 44 | self._data_to_retrieve = asyncio.Queue() 45 | 46 | async def add_data(self, data): 47 | await self._data_to_retrieve.put(data) 48 | 49 | async def send_bytes(self, data: bytes): 50 | await self._linked_mock_websocket.add_data(data) 51 | 52 | def link_mock_websocket(self, mock_websocket): 53 | self._linked_mock_websocket = mock_websocket 54 | 55 | def __aiter__(self): 56 | return self 57 | 58 | async def __anext__(self): 59 | msg = MockWSMsg() 60 | msg.type = aiohttp.WSMsgType.BINARY 61 | msg.data = await self._data_to_retrieve.get() 62 | return msg 63 | 64 | async def close(self): 65 | self.closed = True 66 | 67 | 68 | async def test_channel(): 69 | # Test flow of client-server channel 70 | async def echo_handler(msg: channel.ChannelMessage) -> channel.ChannelResponse: 71 | return msg.get_valid_response(msg.data) 72 | 73 | server_websocket = MockWebsocket() 74 | client_websocket = MockWebsocket() 75 | server_websocket.link_mock_websocket(client_websocket) 76 | client_websocket.link_mock_websocket(server_websocket) 77 | channel_server = channel.Channel(websocket=server_websocket, channel_id=0, handler=echo_handler) 78 | channel_client = channel.Channel(websocket=client_websocket, channel_id=0) 79 | channel_server_task = asyncio.ensure_future(channel_server.serve()) 80 | channel_client_task = asyncio.ensure_future(channel_client.serve()) 81 | try: 82 | data_payload = {'id': channel_client.id} 83 | message = channel.ChannelMessage(channel.Messages.TUNNEL_SERVER_TO_CLIENT_NEW_CONNECTION, data=data_payload) 84 | try: 85 | response = await asyncio.wait_for(channel_client.send_message(message), timeout=2) 86 | except asyncio.TimeoutError: 87 | pytest.fail('No response for Channel.send_message after 2 seconds') 88 | assert response.data == data_payload 89 | finally: 90 | for c in (channel_client_task, channel_server_task): 91 | c.cancel() 92 | with contextlib.suppress(asyncio.CancelledError): 93 | await c 94 | -------------------------------------------------------------------------------- /netunnel/tests/test_config.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from netunnel.server import config 4 | 5 | import os 6 | import json 7 | import time 8 | 9 | from netunnel.server.config import get_default_config, ENV_VARIABLES_PREFIX 10 | from netunnel.tests.utils import environment_variables 11 | 12 | 13 | async def test_config_path_created(config_path): 14 | await config.NETunnelConfiguration.create(config_path=config_path) 15 | assert os.path.exists(config_path) 16 | 17 | 18 | async def test_write_config(config_path): 19 | netunnel_config = await config.NETunnelConfiguration.create(config_path=config_path) 20 | with open(config_path) as f: 21 | assert json.load(f) == get_default_config() 22 | data = str(time.time()) 23 | netunnel_config[data] = data 24 | await netunnel_config.save() 25 | with open(config_path) as f: 26 | assert json.load(f)[data] == data 27 | 28 | 29 | async def test_default_config_with_variables(config_path): 30 | default_conf_without_env = get_default_config(use_env_vars=False) 31 | some_key = next(iter(default_conf_without_env.keys())) 32 | env_var_name = ENV_VARIABLES_PREFIX + some_key.upper() 33 | with environment_variables({env_var_name: '{"something": 1}'}): 34 | await config.NETunnelConfiguration.create(config_path=config_path) 35 | with open(config_path) as f: 36 | config_from_disk = json.load(f) 37 | assert config_from_disk == get_default_config(use_env_vars=True) 38 | assert config_from_disk[some_key] == json.loads(os.environ[env_var_name]) 39 | assert default_conf_without_env[some_key] != config_from_disk[some_key] 40 | 41 | 42 | def test_verify_all_config_keys_are_overridable(config_path): 43 | """ 44 | This test makes sure that all the keys has valid name for env variable, 45 | so one can use an env variable to override them if he wants. 46 | """ 47 | default_conf_without_env = get_default_config(use_env_vars=False) 48 | for key in default_conf_without_env: 49 | assert re.match('^[a-z][a-zA-Z0-9_]+$', key), f"Key {key} in config is not valid for environment variable name" 50 | 51 | 52 | async def test_load_config(config_path): 53 | now_as_string = str(time.time()) 54 | data = {now_as_string: now_as_string} 55 | with open(config_path, 'w') as f: 56 | f.write(json.dumps(data)) 57 | f.flush() 58 | os.fsync(f.fileno()) 59 | netunnel_config = await config.NETunnelConfiguration.create(config_path=config_path) 60 | assert netunnel_config[now_as_string] == now_as_string 61 | 62 | 63 | async def test_create_config_doesnt_change_allowed_tunnel_destinations(config_path): 64 | """ 65 | Make sure the allowed tunnel destination is not being merged with the default value when loading a config from disk. 66 | """ 67 | default_config = get_default_config() 68 | # if someone changes this key name some day they should the test too. 69 | assert 'allowed_tunnel_destinations' in default_config 70 | test_allowed_destinations = {'8.8.8.8': '1,2,3'} 71 | # the allowed destinations of the test must be different than the default 72 | assert test_allowed_destinations != default_config['allowed_tunnel_destinations'] 73 | 74 | with open(config_path, 'w') as f: 75 | f.write(json.dumps({'allowed_tunnel_destinations': test_allowed_destinations})) 76 | netunnel_config = await config.NETunnelConfiguration.create(config_path=config_path) 77 | # make sure the config contains the original value and didn't modify it 78 | assert netunnel_config['allowed_tunnel_destinations'] == test_allowed_destinations 79 | assert netunnel_config['allowed_tunnel_destinations'] != default_config['allowed_tunnel_destinations'] 80 | 81 | 82 | async def test_recreate_config(config_path): 83 | """ 84 | Create a configuration, modify it and make sure recreate return to the default state 85 | """ 86 | default_config = get_default_config() 87 | netunnel_config = await config.NETunnelConfiguration.create(config_path=config_path) 88 | now_as_string = str(time.time()) 89 | 90 | # Add a key, recreate and make sure the config revert to default 91 | netunnel_config[now_as_string] = now_as_string 92 | await netunnel_config.recreate() 93 | assert netunnel_config._config == default_config 94 | 95 | # Make sure recreate keep configurations from environment variables 96 | some_key = next(iter(default_config.keys())) 97 | env_var_name = ENV_VARIABLES_PREFIX + some_key.upper() 98 | with environment_variables({env_var_name: now_as_string}): 99 | default_config = get_default_config() 100 | await netunnel_config.recreate() 101 | assert netunnel_config._config == default_config 102 | -------------------------------------------------------------------------------- /netunnel/tests/test_flow.py: -------------------------------------------------------------------------------- 1 | from netunnel.client import NETunnelClient 2 | from netunnel.server.server import NETunnelServer 3 | from netunnel import __version__ 4 | from .utils import assert_tunnel_echo_server 5 | 6 | import socket 7 | import asyncio 8 | 9 | 10 | class TestNETunnelFlow: 11 | @staticmethod 12 | def _get_test_data(aiohttp_unused_port): 13 | return { 14 | 'local_address': "127.0.0.1", 15 | 'local_port': aiohttp_unused_port(), 16 | 'remote_address': "127.0.0.1", 17 | 'remote_port': aiohttp_unused_port() 18 | } 19 | 20 | @staticmethod 21 | async def assert_tunnel_no_remote_service(tunnel_entrance_host, tunnel_entrance_port, bytes_data): 22 | """ 23 | Try connect to a tunnel without a remote service available 24 | """ 25 | reader, writer = await asyncio.open_connection(host=tunnel_entrance_host, port=tunnel_entrance_port) 26 | writer.write(bytes_data) 27 | await writer.drain() 28 | assert await reader.read(1024) == b'' # Expected output when remote close the connection gracefully 29 | writer.close() 30 | 31 | async def assert_tunnel_working(self, tunnel, bytes_data): 32 | # Check if tunnel is listens 33 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: 34 | assert sock.connect_ex(tunnel.get_entrance_socket()) == 0, f"Tunnel not listening on {tunnel.get_entrance_socket()}" 35 | 36 | # Try to connect to the tunnel without a "remote" service to accept the connection 37 | entrance_host, entrance_port = tunnel.get_entrance_socket() 38 | await asyncio.wait_for(self.assert_tunnel_no_remote_service(entrance_host, entrance_port, bytes_data), 2) 39 | 40 | # # Create a "remote" service to accept the connection and try again 41 | exit_host, exit_port = tunnel.get_exit_socket() 42 | await asyncio.wait_for(assert_tunnel_echo_server(entrance_host, entrance_port, exit_host, exit_port, bytes_data), 2) 43 | 44 | async def test_tunnel_client_to_server(self, netunnel_client: NETunnelClient, aiohttp_unused_port, bytes_data): 45 | tunnel = await netunnel_client.open_tunnel_to_server(**self._get_test_data(aiohttp_unused_port)) 46 | await self.assert_tunnel_working(tunnel, bytes_data) 47 | await tunnel.stop() 48 | 49 | async def test_tunnel_server_to_client(self, netunnel_client: NETunnelClient, aiohttp_unused_port, bytes_data): 50 | tunnel = await netunnel_client.open_tunnel_to_client(**self._get_test_data(aiohttp_unused_port)) 51 | await self.assert_tunnel_working(tunnel, bytes_data) 52 | await tunnel.stop() 53 | 54 | async def test_netunnel_client_reuse(self, netunnel_server: NETunnelServer, aiohttp_unused_port): 55 | """Check that netunnel client release whatever it needs to be used again""" 56 | netunnel_url = f'http://127.0.0.1:{netunnel_server._port}' 57 | client = NETunnelClient(server_url=netunnel_url) 58 | async with client: 59 | await client.open_tunnel_to_client(**self._get_test_data(aiohttp_unused_port)) 60 | async with client: 61 | await client.open_tunnel_to_client(**self._get_test_data(aiohttp_unused_port)) 62 | 63 | async def test_get_remote_version(self, netunnel_client: NETunnelClient): 64 | assert __version__ == await netunnel_client.get_remote_version() 65 | 66 | async def test_static_tunnels(self, netunnel_client: NETunnelClient, bytes_data, aiohttp_unused_port): 67 | tunnel_remote_port = aiohttp_unused_port() 68 | peer = await netunnel_client.register_peer('abc', target_netunnel_url=netunnel_client.server_url) 69 | static_tunnel = await netunnel_client.create_peer_static_tunnel(peer['name'], tunnel_remote_port=tunnel_remote_port) 70 | try: 71 | await assert_tunnel_echo_server(tunnel_entrance_address=static_tunnel['tunnel_local_address'], tunnel_entrance_port=static_tunnel['tunnel_local_port'], 72 | tunnel_exit_address='127.0.0.1', tunnel_exit_port=tunnel_remote_port, bytes_data=bytes_data) 73 | finally: 74 | await netunnel_client.delete_peer_by_id(peer['id']) 75 | -------------------------------------------------------------------------------- /netunnel/tests/test_peers.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | from netunnel.client import NETunnelClient 3 | from netunnel.server.server import NETunnelServer 4 | from netunnel.common import exceptions 5 | from netunnel.common.utils import get_logger 6 | from .auth_utils import MockServerAuth, MockClientAuth 7 | 8 | import pytest 9 | 10 | 11 | class TestPeers: 12 | async def test_peers_rest_api(self, netunnel_client_server: Tuple[NETunnelClient, NETunnelServer]): 13 | client, server = netunnel_client_server 14 | assert len(await client.list_peers()) == 0 15 | peer1 = await client.register_peer(name='peer1', target_netunnel_url=client.server_url) 16 | # Test unique name 17 | with pytest.raises(exceptions.NETunnelError): 18 | await client.register_peer(name='peer1', target_netunnel_url=client.server_url) 19 | assert len(await client.list_peers()) == 1 20 | # Test creation of another peer and delete it 21 | peer2 = await client.register_peer(name='peer2', target_netunnel_url=client.server_url) 22 | assert len(await client.list_peers()) == 2 23 | await client.delete_peer_by_id(peer2['id']) 24 | # Test the remaining peer is really the first one 25 | peers = await client.list_peers() 26 | assert len(peers) == 1 27 | assert peers[0] == peer1 28 | # Test GET requests for exists and non-exists peers 29 | assert await client.get_peer_by_id(peer1['id']) == peer1 30 | assert await client.get_peer_by_name(peer1['name']) == peer1 31 | with pytest.raises(exceptions.NETunnelError): 32 | await client.get_peer_by_id(999) 33 | with pytest.raises(exceptions.NETunnelError): 34 | await client.get_peer_by_name('non-existing-peer') 35 | # Test update peer name 36 | new_peer = await client.update_peer(peer1['id'], new_name='new_peer1') 37 | assert peer1['id'] == new_peer['id'] and new_peer['name'] == 'new_peer1' 38 | peer1 = new_peer 39 | # Test exception in update peer doesn't partially change it 40 | with pytest.raises(exceptions.NETunnelError): 41 | await client.update_peer(peer1['id'], new_name='unused_name', new_target_netunnel_url='http://non.existings.url.com/') 42 | assert await client.get_peer_by_id(peer1['id']) == peer1 43 | # Test static tunnels API 44 | assert len(await client.list_peer_static_tunnels(peer1['name'])) == 0 45 | static_tunnel1 = await client.create_peer_static_tunnel(peer_name=peer1['name'], tunnel_remote_port=22) 46 | assert len(await client.list_peer_static_tunnels(peer1['name'])) == 1 47 | static_tunnel2 = await client.create_peer_static_tunnel(peer_name=peer1['name'], tunnel_remote_port=22) 48 | assert len(await client.list_peer_static_tunnels(peer1['name'])) == 2 49 | await client.delete_peer_static_tunnel(peer_name=peer1['name'], static_tunnel_id=static_tunnel2['id']) 50 | static_tunnels = await client.list_peer_static_tunnels(peer1['name']) 51 | assert len(static_tunnels) == 1 52 | assert static_tunnels[0] == static_tunnel1 53 | assert await client.get_peer_static_tunnel(peer_name=peer1['name'], static_tunnel_id=static_tunnel1['id']) == static_tunnel1 54 | await client.delete_peer_by_id(peer1['id']) 55 | 56 | async def test_update_target_url(self, netunnel_client_server: Tuple[NETunnelClient, NETunnelServer]): 57 | client, server = netunnel_client_server 58 | peer = await client.register_peer(name='peer1', target_netunnel_url=client.server_url) 59 | static_tunnel = await client.create_peer_static_tunnel(peer_name=peer['name'], tunnel_remote_port=22) 60 | assert server._peers[peer['id']]._static_tunnels[static_tunnel['id']]._client.server_url == client.server_url 61 | with pytest.raises(exceptions.NETunnelError): 62 | await client.update_peer(peer['id'], new_target_netunnel_url='http://non.existings.url.com/') 63 | # We manually change the url to skip the validation, and then we test setting it to the valid one 64 | server._peers[peer['id']]._static_tunnels[static_tunnel['id']]._client.server_url = 'http://non.existings.url.com/' 65 | await client.update_peer(peer['id'], new_target_netunnel_url=client.server_url) 66 | assert server._peers[peer['id']]._static_tunnels[static_tunnel['id']]._client.server_url == client.server_url 67 | 68 | async def test_authenticated_peer(self, config_path, aiohttp_unused_port): 69 | logger = get_logger('test_authenticated_peer') 70 | auth_client = MockClientAuth(secret='hlu') 71 | auth_server = MockServerAuth(secret='hlu') 72 | server = NETunnelServer(config_path, port=aiohttp_unused_port(), auth_server=auth_server, logger=logger) 73 | await server.start() 74 | async with NETunnelClient(f'http://localhost:{server._port}', auth_client=auth_client, logger=logger) as client: 75 | # Make sure the authentication work with the server 76 | await client.get_remote_version() 77 | # Make sure the authentication does not work when registering peer (because we didn't provide data) 78 | with pytest.raises(exceptions.NETunnelResponseError): 79 | await client.register_peer(name='peer1', target_netunnel_url=client.server_url) 80 | # Make sure it does not work when we provide invalid data 81 | auth_data = {'secret': 'invalid'} 82 | with pytest.raises(exceptions.NETunnelResponseError): 83 | await client.register_peer(name='peer1', target_netunnel_url=client.server_url, auth_data=auth_data) 84 | # Make sure it works now that provide valid data 85 | auth_data = {'secret': 'hlu'} 86 | await client.register_peer(name='peer1', target_netunnel_url=client.server_url, auth_data=auth_data) 87 | await server.stop() 88 | -------------------------------------------------------------------------------- /netunnel/tests/test_server.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | from urllib.parse import urlparse 3 | from netunnel.client import NETunnelClient 4 | from netunnel.server.config import get_default_config 5 | from netunnel.server.server import NETunnelServer, SECRET_STRING 6 | from netunnel.common.exceptions import NETunnelResponseError 7 | from netunnel.common.utils import get_logger 8 | from .utils import ProxyForTests 9 | from .auth_utils import MockClientAuth, MockServerAuth 10 | 11 | import copy 12 | import json 13 | import pytest 14 | 15 | 16 | class TestNETunnelServer: 17 | async def test_server_can_close_tunnels_from_client(self, netunnel_client_server: Tuple[NETunnelClient, NETunnelServer], aiohttp_unused_port): 18 | client, server = netunnel_client_server 19 | tunnel = await client.open_tunnel_to_server(remote_address='127.0.0.1', remote_port=aiohttp_unused_port()) 20 | assert tunnel.running 21 | tunnel_id = list(client._tunnels.keys())[0] 22 | await server.channel_handlers[client._control_channel.id]._tunnels[tunnel_id].stop() 23 | assert tunnel.running is False 24 | 25 | @pytest.mark.parametrize('allowed_dests_config, dest_address, legit_ports, not_legit_ports', 26 | [({"127.0.0.1": "22"}, "127.0.0.1", [22], [23, 24, 21]), 27 | ({"127.0.0.1": ""}, "127.0.0.1", [], [22, 23, 24, 21]), 28 | ({"127.0.0.1": "22, 23"}, "127.0.0.1", [22, 23], [24, 21]), 29 | ({"127.0.0.1": "*"}, "127.0.0.1", [1, 2, 22, 443], []), 30 | (None, "127.0.0.1", [1, 2, 22, 443], []), 31 | ({"8.8.8.8": "*"}, "127.0.0.1", [], [22, 443, 5000]), 32 | ({"8.8.8.8": "22,443"}, "8.8.8.8", [22, 443], [5000]), 33 | ],) 34 | async def test_server_allowed_tunnel_destinations(self, not_started_netunnel_client_server: Tuple[NETunnelClient, NETunnelServer], 35 | allowed_dests_config, dest_address, legit_ports, not_legit_ports): 36 | client, server = not_started_netunnel_client_server 37 | if allowed_dests_config: 38 | with open(server._config_path, 'w') as f: 39 | test_config = get_default_config() 40 | test_config.update({'allowed_tunnel_destinations': allowed_dests_config}) 41 | json.dump(test_config, f) 42 | await server.start() 43 | async with client as client: 44 | # verify allowed ports are working 45 | for port in legit_ports: 46 | tunnel = await client.open_tunnel_to_server(remote_address=dest_address, remote_port=port) 47 | assert tunnel.running 48 | # verify not allowed ports are not working 49 | for port in not_legit_ports: 50 | with pytest.raises(NETunnelResponseError): 51 | await client.open_tunnel_to_server(remote_address=dest_address, remote_port=port) 52 | await server.stop() 53 | 54 | @pytest.mark.parametrize("username,password", [(None, None), ('abc', 'abc')]) 55 | @pytest.mark.skip(reason="We need to redo the sys.exit() on static_tunnel.py") 56 | async def test_default_http_proxy(self, username, password, netunnel_client_server: Tuple[NETunnelClient, NETunnelServer], aiohttp_unused_port): 57 | test_url = 'http://www.google.com/' 58 | test_url_hostname = urlparse(test_url).hostname 59 | client, server = netunnel_client_server 60 | current_http_proxy_settings = await client.get_server_default_http_proxy() 61 | assert current_http_proxy_settings == {} 62 | proxy_port = aiohttp_unused_port() 63 | proxy_url = f'http://localhost:{proxy_port}' 64 | with ProxyForTests(port=proxy_port, username=username, password=password) as proxy: 65 | new_http_proxy_settings = await client.set_server_default_http_proxy(proxy_url=proxy_url, username=username, password=password, test_url=test_url) 66 | proxy.assert_host_forwarded(test_url_hostname) 67 | assert new_http_proxy_settings['proxy_url'] == proxy_url 68 | # If username and password has value, we expect the response to be censored 69 | assert new_http_proxy_settings['username'] == (username and SECRET_STRING) 70 | assert new_http_proxy_settings['password'] == (password and SECRET_STRING) 71 | unverified_http_proxy_settings = await client.set_server_default_http_proxy(proxy_url='', username=username, password=password, check_proxy=False) 72 | assert unverified_http_proxy_settings['proxy_url'] == '' 73 | assert unverified_http_proxy_settings['username'] == (username and SECRET_STRING) 74 | assert unverified_http_proxy_settings['password'] == (password and SECRET_STRING) 75 | empty_http_proxy_settings = await client.set_server_default_http_proxy(proxy_url=None) 76 | assert empty_http_proxy_settings == {} 77 | tunnel_remote_port = aiohttp_unused_port() 78 | peer_settings = await client.register_peer(name='abc', target_netunnel_url=client.server_url) 79 | static_tunnel_settings = await client.create_peer_static_tunnel(peer_name='abc', tunnel_remote_port=tunnel_remote_port) 80 | proxy_port = aiohttp_unused_port() 81 | proxy_url = f'http://localhost:{proxy_port}' 82 | try: 83 | with ProxyForTests(port=proxy_port, username=username, password=password) as proxy: 84 | await client.set_server_default_http_proxy(proxy_url=proxy_url, username=username, password=password, test_url=test_url) 85 | static_tunnel = server._peers[peer_settings['id']]._static_tunnels[static_tunnel_settings['id']] 86 | assert static_tunnel._client._proxy_url == proxy_url 87 | proxy.assert_host_forwarded(test_url_hostname) 88 | finally: 89 | await client.delete_peer_by_id(peer_settings['id']) 90 | 91 | async def test_setup_encryptor(self, netunnel_server: NETunnelServer): 92 | """ 93 | Test that the encryptor is setup correctly before and after it was set again. 94 | """ 95 | assert 'secret_key' in netunnel_server._config 96 | original_auto_generated_secret_key = netunnel_server._config['secret_key'] 97 | await netunnel_server._setup_encryptor() 98 | assert original_auto_generated_secret_key == netunnel_server._config['secret_key'] 99 | 100 | async def test_authentication(self, config_path, aiohttp_unused_port): 101 | logger = get_logger('test_authentication') 102 | auth_client = MockClientAuth(secret='hlu') 103 | auth_server = MockServerAuth(secret='hlu') 104 | server = NETunnelServer(config_path, port=aiohttp_unused_port(), auth_server=auth_server, logger=logger) 105 | await server.start() 106 | with pytest.raises(NETunnelResponseError): 107 | async with NETunnelClient(f'http://localhost:{server._port}', logger=logger) as client: 108 | await client.get_remote_version() 109 | async with NETunnelClient(f'http://localhost:{server._port}', auth_client=auth_client, logger=logger) as client: 110 | await client.get_remote_version() 111 | await server.stop() 112 | 113 | @pytest.mark.skip(reason="We need to redo the sys.exit() on static_tunnel.py") 114 | async def test_factory_reset(self, netunnel_client_server: Tuple[NETunnelClient, NETunnelServer]): 115 | client, server = netunnel_client_server 116 | changable_config_keys = ['secret_key'] 117 | original_config = copy.deepcopy(server._config._config) 118 | 119 | # Fill up some data and make sure the configuration got updated 120 | await client.register_peer('peer', target_netunnel_url=client.server_url) 121 | await client.create_peer_static_tunnel(peer_name='peer', tunnel_remote_port=22) 122 | await client.set_server_default_http_proxy(proxy_url='http://127.0.0.1:8899', username='abc', password='abc', 123 | check_proxy=False) 124 | assert server._config._config != original_config 125 | 126 | # Perform factory reset and make sure it worked as expected 127 | await client.factory_reset() 128 | new_config = copy.deepcopy(server._config._config) 129 | for key in changable_config_keys: 130 | assert original_config.pop(key) != new_config.pop(key) 131 | assert original_config == new_config 132 | 133 | # Perform factory reset which disconnect all clients 134 | assert client.connected 135 | await client.factory_reset(disconnect_clients=True) 136 | assert not client.connected 137 | -------------------------------------------------------------------------------- /netunnel/tests/test_static_tunnels.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | from netunnel.client import NETunnelClient 3 | from netunnel.server.server import NETunnelServer 4 | from .utils import assert_tunnel_echo_server, assert_tunnel_not_listening 5 | 6 | import pytest 7 | import asyncio 8 | 9 | 10 | class TestStaticTunnels: 11 | async def test_static_tunnel_startup(self, config_path, aiohttp_unused_port, bytes_data): 12 | """ 13 | Test that static tunnels are working probably on startup 14 | """ 15 | server_port = aiohttp_unused_port() 16 | port_to_tunnel = aiohttp_unused_port() 17 | server = NETunnelServer(config_path=config_path, port=server_port) 18 | await server.start() 19 | async with NETunnelClient(f'http://localhost:{server_port}') as client: 20 | peer = await client.register_peer('abc', target_netunnel_url=client.server_url) 21 | static_tunnel = await client.create_peer_static_tunnel(peer['name'], tunnel_remote_port=port_to_tunnel) 22 | # Assert static tunnel is working 23 | await assert_tunnel_echo_server(tunnel_entrance_address=static_tunnel['tunnel_local_address'], 24 | tunnel_entrance_port=static_tunnel['tunnel_local_port'], 25 | tunnel_exit_address=static_tunnel['tunnel_remote_address'], 26 | tunnel_exit_port=static_tunnel['tunnel_remote_port'], 27 | bytes_data=bytes_data) 28 | 29 | # Assert static tunnel stopped 30 | await server.stop() 31 | assert_tunnel_not_listening(static_tunnel['tunnel_local_address'], static_tunnel['tunnel_local_port']) 32 | # Assert static tunnel started working again 33 | await server.start() 34 | await server._peers[peer['id']]._static_tunnels[static_tunnel['id']].wait_online() 35 | await assert_tunnel_echo_server(tunnel_entrance_address=static_tunnel['tunnel_local_address'], 36 | tunnel_entrance_port=static_tunnel['tunnel_local_port'], 37 | tunnel_exit_address=static_tunnel['tunnel_remote_address'], 38 | tunnel_exit_port=static_tunnel['tunnel_remote_port'], 39 | bytes_data=bytes_data) 40 | 41 | # Cleanup 42 | await server.stop() 43 | 44 | async def test_static_tunnel_reconnect(self, netunnel_client_server: Tuple[NETunnelClient, NETunnelServer], config_path, aiohttp_unused_port, bytes_data): 45 | """ 46 | Test that static tunnels successfully recreate themselves if there was a disconnection 47 | """ 48 | client, server = netunnel_client_server 49 | 50 | port = aiohttp_unused_port() 51 | target_server = NETunnelServer(config_path=config_path, port=port) 52 | target_server_url = f'http://127.0.0.1:{port}' 53 | await target_server.start() 54 | 55 | tunnel_remote_port = aiohttp_unused_port() 56 | peer = await client.register_peer('abc', target_netunnel_url=target_server_url) 57 | tunnel_info = await client.create_peer_static_tunnel(peer['name'], tunnel_remote_port=tunnel_remote_port) 58 | 59 | static_tunnel_object = server._peers[peer['id']]._static_tunnels[tunnel_info['id']] 60 | # patch the connection retry interval so we won't have to wait for reconnection too long 61 | static_tunnel_object._connection_retry_interval = 1 62 | 63 | await assert_tunnel_echo_server(tunnel_entrance_address=tunnel_info['tunnel_local_address'], 64 | tunnel_entrance_port=tunnel_info['tunnel_local_port'], 65 | tunnel_exit_address=tunnel_info['tunnel_remote_address'], 66 | tunnel_exit_port=tunnel_info['tunnel_remote_port'], 67 | bytes_data=bytes_data) 68 | # Stop target server to cause disconnection and make sure it's disconnected 69 | await target_server.stop() 70 | assert_tunnel_not_listening(tunnel_info['tunnel_local_address'], tunnel_info['tunnel_local_port']) 71 | 72 | # Start target server to see if reconnection works 73 | await target_server.start() 74 | try: 75 | await asyncio.wait_for(static_tunnel_object.wait_online(), 2) 76 | except asyncio.TimeoutError: 77 | await static_tunnel_object.stop() 78 | pytest.fail('Static tunnel did not reconnect after 2 seconds') 79 | await assert_tunnel_echo_server(tunnel_entrance_address=tunnel_info['tunnel_local_address'], 80 | tunnel_entrance_port=tunnel_info['tunnel_local_port'], 81 | tunnel_exit_address=tunnel_info['tunnel_remote_address'], 82 | tunnel_exit_port=tunnel_info['tunnel_remote_port'], 83 | bytes_data=bytes_data) 84 | 85 | # cleanup 86 | await client.delete_peer_by_id(peer['id']) 87 | await target_server.stop() 88 | 89 | -------------------------------------------------------------------------------- /netunnel/tests/test_tunnel.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import asyncio 3 | import aiohttp 4 | import logging 5 | 6 | from netunnel.common.tunnel import _ConnectionHandler 7 | 8 | 9 | @pytest.fixture 10 | async def echo_server_socket(loop, aiohttp_unused_port): 11 | """ 12 | Creates a server which echo back whatever it receive. 13 | The value is the port of the server, while the address is 127.0.0.1 14 | """ 15 | async def handler(reader, writer): 16 | data = await reader.read(1024) 17 | writer.write(data) 18 | await writer.drain() 19 | writer.close() 20 | port = aiohttp_unused_port() 21 | server = await asyncio.start_server(handler, host='127.0.0.1', port=port) 22 | yield '127.0.0.1', port 23 | server.close() 24 | await server.wait_closed() 25 | 26 | 27 | class MockWebsocket: 28 | def __init__(self, items: list, hook_iteration=None): 29 | self._iter = iter(items) 30 | self._result = [] 31 | self._hook_iteration = hook_iteration 32 | self.closed = False 33 | 34 | def __aiter__(self): 35 | return self 36 | 37 | async def __anext__(self): 38 | try: 39 | res = aiohttp.WSMessage(aiohttp.WSMsgType.BINARY, next(self._iter), None) 40 | if self._hook_iteration: 41 | await self._hook_iteration(res) 42 | return res 43 | except StopIteration: 44 | self.closed = True 45 | raise StopAsyncIteration 46 | 47 | async def send_bytes(self, data): 48 | self._result.append(data) 49 | 50 | def assert_received(self, expected): 51 | return expected == self._result 52 | 53 | 54 | async def test_connection_handler(echo_server_socket, bytes_data): 55 | host, port = echo_server_socket 56 | 57 | reader, writer = await asyncio.open_connection(host=host, port=port) 58 | data_chunks = [bytes_data for i in range(5)] 59 | mock_websocket = MockWebsocket(data_chunks) 60 | conn = _ConnectionHandler(websocket=mock_websocket, 61 | connection_writer=writer, 62 | connection_reader=reader, 63 | logger=logging.getLogger()) 64 | try: 65 | await asyncio.wait_for(conn.run_until_eof(), timeout=5) 66 | except asyncio.TimeoutError: 67 | pytest.fail('_ConnectionHandler did not reach eof') 68 | mock_websocket.assert_received(b''.join(data_chunks)) 69 | assert conn._websocket_to_connection_task.done() 70 | assert conn._connection_to_websocket_task.done() 71 | 72 | 73 | async def test_connection_handler_health_check(echo_server_socket, bytes_data): 74 | conn: _ConnectionHandler = None 75 | async def assert_health_check(data): 76 | assert await conn.health_check() 77 | host, port = echo_server_socket 78 | 79 | reader, writer = await asyncio.open_connection(host=host, port=port) 80 | mock_websocket = MockWebsocket([bytes_data, bytes_data], hook_iteration=assert_health_check) 81 | conn = _ConnectionHandler(websocket=mock_websocket, 82 | connection_writer=writer, 83 | connection_reader=reader, 84 | logger=logging.getLogger()) 85 | assert await conn.health_check() is False 86 | try: 87 | await asyncio.wait_for(conn.run_until_eof(), timeout=5) 88 | except asyncio.TimeoutError: 89 | pytest.fail('_ConnectionHandler did not reach eof') 90 | assert await conn.health_check() is False 91 | -------------------------------------------------------------------------------- /netunnel/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from urllib.parse import urlparse 2 | from netunnel.common import utils 3 | from netunnel.common.exceptions import NETunnelInvalidProxy 4 | from .utils import ProxyForTests 5 | 6 | import asyncio 7 | import pytest 8 | 9 | 10 | def test_object_in_list(bytes_data): 11 | l = [] 12 | with utils.object_in_list(bytes_data, l): 13 | assert bytes_data in l 14 | assert bytes_data not in l 15 | 16 | 17 | @pytest.mark.parametrize("test_target_dict,test_target_source,expected", [ 18 | ({}, {}, {}), 19 | ({'a': 'a'}, {'a': 'b'}, {'a': 'b'}), 20 | ({}, {'a': 'a'}, {'a': 'a'}), 21 | ({'a': {'a': 'a'}}, {'a': {'b': 'b'}}, {'a': {'a': 'a', 'b': 'b'}}), 22 | ({'a': 'a'}, {'a': {'a': 'a'}}, {'a': {'a': 'a'}}) 23 | ]) 24 | def test_update_dict_recursively(test_target_dict, test_target_source, expected): 25 | utils.update_dict_recursively(test_target_dict, test_target_source) 26 | assert test_target_dict == expected 27 | 28 | 29 | async def test_task_in_list_until_done(): 30 | l = [] 31 | task = asyncio.ensure_future(asyncio.sleep(0.5)) 32 | utils.task_in_list_until_done(task, l) 33 | assert task in l 34 | await task 35 | assert task not in l 36 | 37 | 38 | async def test_event_item(bytes_data): 39 | event_item = utils.EventItem() 40 | assert not event_item.is_set() 41 | event_item.set() 42 | assert await event_item.wait() is None 43 | assert event_item.is_set() 44 | event_item.clear() 45 | assert not event_item.is_set() 46 | event_item.set(bytes_data) 47 | assert event_item.is_set() 48 | assert await event_item.wait() == bytes_data 49 | event_item.clear() 50 | event_item.set() 51 | assert await event_item.wait() is None 52 | 53 | 54 | async def test_event_queue(bytes_data): 55 | event_queue = utils.EventQueue() 56 | join_new_task_task = asyncio.ensure_future(event_queue.join_no_empty()) 57 | await event_queue.put(bytes_data) 58 | try: 59 | await asyncio.wait_for(join_new_task_task, timeout=1) 60 | except asyncio.TimeoutError: 61 | pytest.fail("EventQueue.join_no_empty blocks after a new task inserted") 62 | returned_random_bits = await event_queue.get() 63 | assert returned_random_bits == bytes_data 64 | event_queue.task_done() 65 | join_new_task_task = asyncio.ensure_future(event_queue.join_no_empty()) 66 | with pytest.raises(asyncio.TimeoutError): 67 | await asyncio.wait_for(join_new_task_task, timeout=0.5) 68 | 69 | 70 | async def test_verify_proxy(aiohttp_unused_port): 71 | port = aiohttp_unused_port() 72 | test_data = { 73 | 'proxy_url': f'http://localhost:{port}', 74 | 'test_url': 'http://www.google.com/' 75 | } 76 | test_url_hostname = urlparse(test_data['test_url']).hostname 77 | with ProxyForTests(port=port) as proxy: 78 | await utils.verify_proxy(**test_data) 79 | proxy.assert_host_forwarded(test_url_hostname) 80 | with pytest.raises(ValueError): 81 | await utils.verify_proxy(**test_data, username='a') 82 | with pytest.raises(ValueError): 83 | await utils.verify_proxy(**test_data, password='a') 84 | with pytest.raises(NETunnelInvalidProxy): 85 | await utils.verify_proxy(**test_data) 86 | port = aiohttp_unused_port() 87 | test_data['proxy_url'] = f'http://localhost:{port}' 88 | with ProxyForTests(port=port, username='abc', password='abc') as proxy: 89 | await utils.verify_proxy(**test_data, username='abc', password='abc') 90 | proxy.assert_host_forwarded(test_url_hostname) 91 | -------------------------------------------------------------------------------- /netunnel/tests/utils.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import os 3 | import proxy 4 | import pytest 5 | import socket 6 | import asyncio 7 | import tempfile 8 | 9 | from .helpers.proxy_plugins import ACCESS_LOG_KEY 10 | 11 | 12 | PACKET_CHUNK_SIZE = 4096 13 | 14 | 15 | class ProxyForTests(proxy.Proxy): 16 | def __init__(self, port, username=None, password=None): 17 | self._port = port 18 | credentials = [] 19 | if None not in (username, password): 20 | credentials = ['--basic-auth', f'{username}:{password}'] 21 | self._temp_dir = tempfile.TemporaryDirectory() 22 | os.environ[ACCESS_LOG_KEY] = self.log_path 23 | super().__init__(['--hostname', '127.0.0.1', '--port', str(port), 24 | '--plugins', 'netunnel.tests.helpers.proxy_plugins.LogAccessedHostsPlugin', 25 | '--num-workers', '1', *credentials]) 26 | @property 27 | def log_path(self): 28 | if self._temp_dir: 29 | return os.path.join(self._temp_dir.name, 'test_proxy_access_hosts.log') 30 | 31 | def assert_host_forwarded(self, hostname): 32 | """ 33 | Assert that data is a substring in the proxy's log 34 | """ 35 | with open(self.log_path) as f: 36 | logs = f.read() 37 | assert hostname in logs, f'`{hostname}` not found in proxy logs:\n{logs}' 38 | 39 | def __exit__(self, exc_type, exc_val, exc_tb): 40 | self._temp_dir.cleanup() 41 | super().__exit__(exc_type, exc_val, exc_tb) 42 | 43 | 44 | async def echo_handler(reader, writer): 45 | data = await reader.read(1024) 46 | writer.write(data) 47 | await writer.drain() 48 | writer.close() 49 | 50 | 51 | async def assert_tunnel_echo_server(tunnel_entrance_address, tunnel_entrance_port, 52 | tunnel_exit_address, tunnel_exit_port, bytes_data): 53 | """ 54 | Setup echo server handler on the the tunnel exit socket and try to communicate with it 55 | through the entrance socket 56 | """ 57 | server = await asyncio.start_server(echo_handler, host=tunnel_exit_address, port=tunnel_exit_port) 58 | reader, writer = await asyncio.open_connection(host=tunnel_entrance_address, port=tunnel_entrance_port) 59 | writer.write(bytes_data) 60 | try: 61 | assert await asyncio.wait_for(reader.read(1024), timeout=5) == bytes_data, "Invalid response from tunnel" 62 | except asyncio.TimeoutError: 63 | pytest.fail(f"Tunnel response failed to come back after 5 seconds") 64 | writer.close() 65 | server.close() 66 | await server.wait_closed() 67 | 68 | 69 | def assert_tunnel_not_listening(tunnel_local_address, tunnel_local_port): 70 | with pytest.raises(ConnectionRefusedError): 71 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: 72 | assert sock.connect( 73 | (tunnel_local_address, tunnel_local_port)) == 0, f"Tunnel not listening on {tunnel_local_address}:{tunnel_local_port}" 74 | 75 | 76 | @contextlib.contextmanager 77 | def environment_variables(env_vars: dict): 78 | original_env = dict(os.environ) 79 | os.environ.update(env_vars) 80 | try: 81 | yield 82 | finally: 83 | os.environ.clear() 84 | os.environ.update(original_env) 85 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | pytest-aiohttp==0.3.0 3 | pytest==6.2.0 4 | proxy.py==2.2.0 5 | pyjwt==1.7.1 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.9.5 2 | aiofiles==0.4.0 3 | pymongo==4.8.0 # We need the bson packaged provided by pymongo 4 | marshmallow==2.21.0 5 | cryptography==43.0.1 6 | colorama==0.4.4 7 | click==7.0 8 | importlib-metadata==3.10.1 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from setuptools import setup, find_packages 3 | import pathlib 4 | HERE = pathlib.Path(__file__).parent 5 | def read(path): 6 | return (HERE / path).read_text("utf-8").strip() 7 | install_requires = [ 8 | 'aiohttp>=3.9.5,<4.0.0', 9 | 'aiofiles>=0.0.4', 10 | 'pymongo>=4.8.0', 11 | 'marshmallow>=2.8,<4', # We have temporary backwards compatibility for 2.X, but also support 3.X 12 | 'cryptography>=43.0.0', 13 | 'colorama>=0.2', 14 | 'click', 15 | 'importlib-metadata<4' 16 | ] 17 | setup( 18 | name="netunnel", 19 | version='1.0.13', 20 | description='A tool to create network tunnels over HTTP/S written in Python 3', 21 | long_description="\n\n".join((read("README.md"), read("CHANGES.md"))), 22 | long_description_content_type='text/markdown', 23 | author='Claroty Open Source', 24 | author_email='opensource@claroty.com', 25 | maintainer='Claroty Open Source', 26 | maintainer_email='opensource@claroty.com', 27 | url='https://github.com/claroty/netunnel', 28 | license="Apache 2", 29 | packages=find_packages(exclude=('*test*',)), 30 | install_requires=install_requires, 31 | include_package_data=True, 32 | classifiers=[ 33 | "Programming Language :: Python", 34 | "Programming Language :: Python :: 3", 35 | "Programming Language :: Python :: 3.6", 36 | "Programming Language :: Python :: 3.7", 37 | "Programming Language :: Python :: 3.8", 38 | "Programming Language :: Python :: 3.9", 39 | "License :: OSI Approved :: Apache Software License", 40 | "Operating System :: OS Independent", 41 | ], 42 | python_requires='>=3.6', 43 | ) 44 | --------------------------------------------------------------------------------