├── .coveragerc
├── .flake8
├── .gitignore
├── .travis.yml
├── CHANGELOG
├── LICENSE
├── Makefile
├── README.md
├── conf
├── application.cfg
└── logging.cfg
├── examples
├── plugins
│ └── no_gzip.py
└── viewer
│ └── simple_viewer.py
├── microproxy
├── __init__.py
├── cert.py
├── command_line.py
├── config.py
├── context
│ ├── __init__.py
│ ├── base.py
│ ├── event.py
│ ├── http.py
│ ├── layer.py
│ ├── plugin.py
│ ├── server.py
│ ├── tls.py
│ └── viewer.py
├── event
│ ├── __init__.py
│ ├── client.py
│ ├── manager.py
│ ├── replay.py
│ └── types.py
├── exception.py
├── interceptor
│ ├── __init__.py
│ ├── interceptor.py
│ ├── msg_publisher.py
│ └── plugin_manager.py
├── layer
│ ├── __init__.py
│ ├── application
│ │ ├── __init__.py
│ │ ├── forward.py
│ │ ├── http1.py
│ │ ├── http2.py
│ │ └── tls.py
│ ├── base.py
│ ├── debug
│ │ ├── __init__.py
│ │ └── http2.py
│ ├── manager.py
│ └── proxy
│ │ ├── __init__.py
│ │ ├── http.py
│ │ ├── replay.py
│ │ ├── socks.py
│ │ └── transparent.py
├── log.py
├── protocol
│ ├── __init__.py
│ ├── http1.py
│ ├── http2.py
│ └── tls.py
├── proxy.py
├── pyca_tls
│ ├── __init__.py
│ └── _constructs.py
├── server_state.py
├── test
│ ├── __init__.py
│ ├── context
│ │ ├── __init__.py
│ │ ├── test_base.py
│ │ ├── test_http.py
│ │ ├── test_layer.py
│ │ └── test_viewer.py
│ ├── data
│ │ └── replay.script
│ ├── event
│ │ ├── __init__.py
│ │ ├── test_client.py
│ │ ├── test_manager.py
│ │ └── test_replay.py
│ ├── interceptor
│ │ ├── __init__.py
│ │ └── test_msg_publisher.py
│ ├── layer
│ │ ├── __init__.py
│ │ ├── test_forward.py
│ │ ├── test_http1.py
│ │ ├── test_http2.py
│ │ ├── test_manager.py
│ │ ├── test_replay.py
│ │ ├── test_socks.py
│ │ ├── test_tls.py
│ │ └── test_transparent.py
│ ├── protocol
│ │ ├── __init__.py
│ │ ├── client_hello.bin
│ │ ├── test_http1.py
│ │ ├── test_http2.py
│ │ └── test_tls.py
│ ├── test.crt
│ ├── test.key
│ ├── test_cert_store.py
│ ├── test_commandline.py
│ ├── test_config.py
│ ├── test_log.py
│ ├── test_server_state.py
│ ├── tornado_ext
│ │ ├── __init__.py
│ │ └── test_iostream.py
│ ├── utils.py
│ └── viewer
│ │ ├── __init__.py
│ │ ├── test_console.py
│ │ ├── test_formatter.py
│ │ └── test_tui.py
├── tornado_ext
│ ├── __init__.py
│ ├── iostream.py
│ └── tcpserver.py
├── utils.py
├── version.py
└── viewer
│ ├── __init__.py
│ ├── console.py
│ ├── formatter.py
│ ├── tui.py
│ └── utils.py
├── mpcertutil.sh
├── requirements
├── development.txt
├── proxy.txt
└── viewer.txt
└── setup.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [report]
2 | omit = microproxy/test/*,microproxy/layer/debug/*,microproxy/pyca_tls/*
3 | exclude_lines =
4 | pragma: no cover
5 | def __repr__
6 | def __neq__
7 | raise AssertionError
8 | raise NotImplementedError
9 | if __name__ == .__main__.:
10 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore = E501
3 | exclude = **/__init__.py
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .*.swp
2 | *.pyc
3 | .coverage
4 | htmlcov
5 | mpserver.log
6 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "2.7"
4 |
5 | install: "pip install -r requirements/development.txt"
6 | script:
7 | - make coverage
8 |
9 | after_success:
10 | coveralls
11 |
--------------------------------------------------------------------------------
/CHANGELOG:
--------------------------------------------------------------------------------
1 | 03 November 2016: MicroProxy 0.4.1
2 | This release fix a critical issue that will make microProxy startup failed.
3 |
4 | * Use a proper download link for socks5 in setup.py and requirements file. (credit chhsiao90)
5 |
6 | 26 October 2016: MicroProxy 0.4.0
7 | This release add several new features, massive internal refactoring and unittest.
8 |
9 | * Add replay feature.
10 | * server side implementation. (credit chhsiao90)
11 | * tui implementation. (credit chhsiao90)
12 | * command line implementation.
13 |
14 | * Refactor layer structure.
15 | * socks5: using socks5 module
16 | * http1: using h11 module. (credit chhsiao90)
17 | * http2: refactor for better maintainability. (credit chhsiao90)
18 |
19 | * TUI Enhancement. (credit chhsiao90)
20 | * Add new key bindings.
21 | * urlencoded formatter.
22 | * Improvement user experience.
23 |
24 | * Config and command line refinement.
25 | * remove subcommand and chnage into individual binary interfaces.
26 |
27 | * Bug fixed.
28 |
29 |
30 | 31 July 2016: MicroProxy 0.3.0
31 |
32 | This release focus on adding new features and code refactoring.
33 |
34 | * Add setup.py.
35 | * Support HTTP2 Protocol. (credit chhsiao90)
36 | * SOCKS layer Improvement.
37 | * better error handling.
38 | * handle timeout.
39 |
40 | * MsgProtocol refactoring.
41 | * Reorganize context type for internal use.
42 |
43 | * TUI Improvement. (credit chhsiao90)
44 | * Add body viewer for different content type.
45 | * Using tab to switch between request and response.
46 |
47 | * Several bugs fix.
48 | * TLS layer
49 | * HTTP2 layer
50 |
51 | 28 June 2016: MicroProxy 0.2.0
52 |
53 | This release focus on adding new features and code refactoring.
54 |
55 | * Support TLS Protocol.
56 | * Add Plugin System.
57 | * Code refactoring. (credit chhsiao90)
58 | * Use tornado native http interface.
59 | * layer design.
60 | * UI Improvement. (credit chhsiao90)
61 |
62 | 9 May 2016: MicroProxy 0.1.0
63 |
64 | Initial release for MicroProxy. Support the following features.
65 |
66 | * Support socks and transparent proxy(Linux Only).
67 | * Support HTTP protocal only.
68 | * Basic Log Viewer.
69 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2016, Rueimin Jiang. All rights reserved.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | test:
2 | python -B -m unittest discover
3 |
4 | lint:
5 | @flake8 && echo "Static Check Without Error"
6 |
7 | coverage:
8 | @coverage run --source=microproxy -m unittest discover
9 |
10 | install:
11 | pip install -U --no-deps .
12 |
13 | install-all:
14 | pip install -U .
15 | pip install -U .[viewer]
16 | pip install -U .[develop]
17 |
18 | run-server:
19 | mpserver --config-file conf/application.cfg --log-config-file conf/logging.cfg
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # microProxy
2 |
3 | **microProxy** is a http/https traffic interceptor which can help you debug with http/https based network traffic.
4 | This project is highly inspired by [mitmproxy](https://github.com/mitmproxy/mitmproxy),
5 | but with some different [architecture design](https://github.com/mike820324/microProxy/wiki/System-Architecture).
6 |
7 |
8 | For more in depth information about our architecture design, please refer to the [wiki page](https://github.com/mike820324/microProxy/wiki).
9 | > Note: We are still in a very earyly stage and as a result, the interface will be changed.
10 |
11 | [](https://travis-ci.org/mike820324/microProxy)
12 | [](https://coveralls.io/github/mike820324/microProxy?branch=master)
13 |
14 | ## Features:
15 | - Proxy Mode:
16 | - SOCKS5 Proxy
17 | - Transparent Proxy(Linux Only)
18 |
19 | - Protocol Support:
20 | - HTTP
21 | - HTTPS
22 | - HTTP2 (vis alpn, HTTP Upgrade is not supported)
23 |
24 | - Flexible Viewer Design:
25 | - Viewer can connect to proxy with different machine.
26 | - Support multiple viewer connect to the same proxy instance.
27 | - Implement your own viewer by following the Viewer communication mechanism.
28 |
29 | - Plugin System (still in very earyly stage):
30 | - Support external script to modify the Request/Response content.
31 |
32 | ## Viewer Implementation List:
33 | - Console Viwer: Simple console dump viewer.
34 | - TUI Viwer: Terminal UI viewer which used [gviewer](https://github.com/chhsiao90/gviewer).
35 | - [GUI Viewer](https://github.com/mike820324/microProxy-GUI): Graphic UI written in node.js and electron.
36 |
37 | ## System Requirement:
38 | **microProxy** can run in both **Linux** and **MacOS**.
39 | > Note: The transparent proxy mode can only work in Linux.
40 |
41 | In order to let http2 protocol works properly, the openssl version **1.0.2.h** (alpn support)is required.
42 |
43 | ## Installation:
44 |
45 | This project is not update to pypi yet.
46 | Therefore, to intall this project, please follow the following steps.
47 |
48 | ```bash
49 | # Install basic dependencies.
50 | pip install --process-dependency-links https://github.com/mike820324/microProxy/archive/v0.4.0.tar.gz#egg=microProxy
51 |
52 | # Install viewer related dependencies.
53 | pip install https://github.com/mike820324/microProxy/archive/v0.4.0.tar.gz#egg=microProxy[viewer]
54 | ```
55 |
56 | ## QuickStart:
57 | To run the proxy, simply type the following command.
58 |
59 | ```bash
60 | # create server root ca
61 | openssl req -new -x509 -days 365 -nodes -out ./cert.crt -keyout ./cert.key
62 |
63 | # start proxy server
64 | mpserver --viewer-channel tcp://127.0.0.1:5581 --cert-file ./cert.crt --key-file ./cert.key
65 |
66 | # start tui-viewer
67 | mptui --viewer-channel tcp://127.0.0.1:5581
68 |
69 | # start console-viewer
70 | mpdump --viewer-channel tcp://127.0.0.1:5581
71 | ```
72 |
73 | For more information about command line options and configurations,
74 | please refer to the [wiki page](https://github.com/mike820324/microProxy/wiki/Command-Line-Options-and-Config-Files).
75 |
76 | ## Troubleshooting:
77 |
78 | - Installation failed in macosx:
79 |
80 | Please followg this link https://cryptography.io/en/latest/installation/#building-cryptography-on-os-x to build on macosx.
81 |
--------------------------------------------------------------------------------
/conf/application.cfg:
--------------------------------------------------------------------------------
1 | [proxy]
2 | mode=socks
3 | host=127.0.0.1
4 | port=5580
5 |
6 | http.port=5000
7 | https.port=5001
8 |
9 | certfile=/tmp/cert.crt
10 | keyfile=/tmp/cert.key
11 |
12 | [channel]
13 | viewer=tcp://127.0.0.1:5581
14 | events=tcp://127.0.0.1:5582
15 |
--------------------------------------------------------------------------------
/conf/logging.cfg:
--------------------------------------------------------------------------------
1 | [handlers]
2 | keys=consoleHandler
3 |
4 | [handler_consoleHandler]
5 | class=StreamHandler
6 | formatter=formatter
7 | args=(sys.stdout,)
8 |
9 | [handler_fileHandler]
10 | class=FileHandler
11 | formatter=formatter
12 | args=('mpserver.log', 'w')
13 |
14 | [formatters]
15 | keys=formatter
16 |
17 | [formatter_formatter]
18 | format=%(asctime)s - %(name)-30s - %(levelname)-8s - %(message)s
19 | datefmt=
20 |
21 | [loggers]
22 | keys=root,cert_store,layer_manager,http1_protocol,tls_layer
23 |
24 | [logger_root]
25 | level=INFO
26 | handlers=consoleHandler
27 |
28 | [logger_cert_store]
29 | level=INFO
30 | handlers=consoleHandler
31 | qualname=microproxy.cert
32 | propagate=0
33 |
34 | [logger_layer_manager]
35 | level=INFO
36 | handlers=consoleHandler
37 | qualname=microproxy.layer.manager
38 | propagate=0
39 |
40 | [logger_tls_layer]
41 | level=INFO
42 | handlers=consoleHandler
43 | qualname=microproxy.layer.application.tls
44 | propagate=0
45 |
46 | [logger_http1_protocol]
47 | level=INFO
48 | handlers=consoleHandler
49 | qualname=microproxy.protocol.http1
50 | propagate=0
51 |
--------------------------------------------------------------------------------
/examples/plugins/no_gzip.py:
--------------------------------------------------------------------------------
1 | from microproxy.context.http import HttpHeaders
2 |
3 |
4 | def on_request(plugin_context):
5 | if plugin_context.scheme == "h2":
6 | return plugin_context
7 |
8 | try:
9 | headers = plugin_context.request.headers.get_dict()
10 | for key in headers:
11 | if key.lower() != "accept-encoding":
12 | continue
13 | headers[key] = "deflate"
14 |
15 | new_headers = HttpHeaders(headers=headers)
16 | plugin_context.request.headers = new_headers
17 | except Exception as e:
18 | print e
19 | return plugin_context
20 |
--------------------------------------------------------------------------------
/examples/viewer/simple_viewer.py:
--------------------------------------------------------------------------------
1 | # Simple Viewer Implementation for microProxy.
2 | # Usage: python simple_viewer.py --viewer-channel tcp://127.0.0.1:5581
3 |
4 | import zmq
5 | import json
6 | import argparse
7 |
8 |
9 | def create_msg_channel(channel):
10 | context = zmq.Context()
11 | socket = context.socket(zmq.SUB)
12 | socket.connect(channel)
13 | socket.setsockopt(zmq.SUBSCRIBE, "")
14 | return socket
15 |
16 |
17 | def main(channel):
18 | socket = create_msg_channel(channel)
19 | print "Simple Viewer Example"
20 |
21 | while True:
22 | try:
23 | # using zmq to get the viewer context
24 | data = socket.recv()
25 | message = json.loads(data)
26 |
27 | scheme = message["scheme"]
28 | host = message["host"]
29 | port = message["port"]
30 | path = message["path"]
31 | request = message["request"]
32 | response = message["response"]
33 |
34 | pretty_message = "{0:3} {1:8} {2}://{3}:{4}{5:50}".format(
35 | response["code"], request["method"], scheme, host, port, path)
36 | print pretty_message
37 | except KeyboardInterrupt:
38 | print "Bye Bye"
39 | exit(0)
40 |
41 |
42 | if __name__ == '__main__':
43 | parser = argparse.ArgumentParser(description="Viewer Example")
44 | parser.add_argument(
45 | '--viewer-channel', dest="channel", required=True,
46 | help="zmq channel. ex. tcp://127.0.0.1:5581")
47 | args = parser.parse_args()
48 |
49 | main(args.channel)
50 |
--------------------------------------------------------------------------------
/microproxy/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/__init__.py
--------------------------------------------------------------------------------
/microproxy/cert.py:
--------------------------------------------------------------------------------
1 | from OpenSSL import crypto
2 | import time
3 |
4 | from microproxy.log import ProxyLogger
5 | logger = ProxyLogger.get_logger(__name__)
6 |
7 |
8 | class CertStore(object):
9 | def __init__(self, config):
10 | self.ca_root, self.private_key = self._get_root(
11 | config["certfile"], config["keyfile"])
12 | self.certs_cache = dict()
13 |
14 | def _get_root(self, certfile, keyfile):
15 | root_ca_file = certfile
16 | with open(root_ca_file, "rb") as fp:
17 | _buffer = fp.read()
18 | ca_root = crypto.load_certificate(crypto.FILETYPE_PEM, _buffer)
19 |
20 | private_key_file = keyfile
21 | with open(private_key_file, "rb") as fp:
22 | _buffer = fp.read()
23 | private_key = crypto.load_privatekey(crypto.FILETYPE_PEM, _buffer)
24 |
25 | return (ca_root, private_key)
26 |
27 | def get_cert_and_pkey(self, common_name):
28 | cert = self.get_cert_from_cache(common_name)
29 | if cert:
30 | logger.debug("get cert commonname:{0} from cache".format(
31 | common_name))
32 | return (cert, self.private_key)
33 | else:
34 | logger.debug("create cert commonname:{0} to cache".format(
35 | common_name))
36 | cert = self.create_cert(common_name)
37 | return (cert, self.private_key)
38 |
39 | def create_cert(self, common_name):
40 | cert = crypto.X509()
41 |
42 | # NOTE: Expire time 3 yr
43 | cert.set_serial_number(int(time.time() * 10000))
44 | cert.gmtime_adj_notBefore(-3600 * 48)
45 | cert.gmtime_adj_notAfter(94608000)
46 | cert.get_subject().CN = common_name
47 |
48 | cert.set_issuer(self.ca_root.get_subject())
49 | cert.set_pubkey(self.ca_root.get_pubkey())
50 | cert.set_version(2)
51 | cert.sign(self.private_key, "sha256")
52 |
53 | self.certs_cache[common_name] = cert
54 | return cert
55 |
56 | def get_cert_from_cache(self, common_name):
57 | try:
58 | return self.certs_cache[common_name]
59 | except KeyError:
60 | return None
61 |
--------------------------------------------------------------------------------
/microproxy/config.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import ConfigParser
3 | from os import path
4 |
5 | from microproxy.log import ProxyLogger
6 |
7 |
8 | _OPTION_TYPES = ["str", "bool", "int", "list:str", "list:int"]
9 |
10 |
11 | class ConfigParserBuilder(object):
12 | @staticmethod
13 | def setup_ini_parser():
14 | parser = ConfigParser.SafeConfigParser()
15 | return parser
16 |
17 | @staticmethod
18 | def setup_cmd_parser(config_field_info):
19 | parser = argparse.ArgumentParser(description="MicroProxy a http/https proxy interceptor.")
20 |
21 | parser.add_argument("--config-file",
22 | default="",
23 | help="Specify config file location")
24 |
25 | for field_name, field_info in config_field_info.iteritems():
26 | if "bool" == field_info["type"]:
27 | parser.add_argument(*field_info["cmd_flags"],
28 | dest=field_name,
29 | action="store_true",
30 | help=field_info["help"])
31 | else:
32 | parser.add_argument(*field_info["cmd_flags"],
33 | dest=field_name,
34 | help=field_info["help"])
35 |
36 | return parser
37 |
38 |
39 | def define_section(config_field_info,
40 | section,
41 | help_str,
42 | option_info):
43 | if not isinstance(option_info, dict):
44 | raise ValueError("Expect option_info as a dictionary")
45 | if not isinstance(config_field_info, dict):
46 | raise ValueError("Expect config_field_info as a dictionary")
47 |
48 | config_field_info[section] = option_info
49 |
50 |
51 | def define_option(option_info,
52 | option_name,
53 | help_str,
54 | option_type,
55 | default=None,
56 | cmd_flags=None,
57 | config_file_flags=None,
58 | choices=None
59 | ):
60 | if not isinstance(option_info, dict):
61 | raise ValueError("Expect option_info as a dictionary")
62 |
63 | if not config_file_flags and not cmd_flags:
64 | raise ValueError("Useless option")
65 |
66 | if option_name in option_info:
67 | raise ValueError("option {} is already defined".format(option_name))
68 |
69 | if option_type not in _OPTION_TYPES:
70 | raise ValueError("Unsupport type : {0}".format(option_type))
71 |
72 | if choices is not None:
73 | if not isinstance(choices, list):
74 | raise ValueError("choices should be a list object")
75 |
76 | if default is not None and default not in choices:
77 | raise ValueError("default value {0} not in {1}".format(default, choices))
78 |
79 | option = {
80 | "help": help_str,
81 | "type": option_type
82 | }
83 |
84 | if choices:
85 | option["choices"] = choices
86 |
87 | if default is not None:
88 | option["is_require"] = False
89 | option["default"] = default
90 | else:
91 | option["is_require"] = True
92 |
93 | if cmd_flags:
94 | if isinstance(cmd_flags, str):
95 | option["cmd_flags"] = [cmd_flags]
96 | elif isinstance(cmd_flags, list):
97 | option["cmd_flags"] = cmd_flags
98 |
99 | if config_file_flags:
100 | option["config_file_flags"] = {
101 | "section": config_file_flags.split(":")[0],
102 | "key": config_file_flags.split(":")[1]
103 | }
104 |
105 | option_info.update({option_name: option})
106 |
107 |
108 | def verify_config(config_field_info, config):
109 | fieldInfos = config_field_info
110 | require_fields = [k for k, v in fieldInfos.iteritems() if v["is_require"]]
111 | missing_fields = [field for field in require_fields if field not in config]
112 | if missing_fields:
113 | raise KeyError("missing config field: [{0}]".format(",".join(missing_fields)))
114 |
115 | # NOTE: Verify that the value in choices
116 | for field in config:
117 | try:
118 | if config[field] not in fieldInfos[field]["choices"]:
119 | raise ValueError("illgeal value: {0} at field: {1}".format(config[field], field))
120 | except KeyError:
121 | pass
122 |
123 | unknown_fields = [field for field in config if field not in fieldInfos]
124 | if unknown_fields:
125 | ProxyLogger.get_logger(__name__).warning(
126 | "Unknown field names: {0}".format(",".join(unknown_fields)))
127 |
128 |
129 | def parse_config(config_field_info, args=None): # pragma: no cover
130 | cmd_parser = ConfigParserBuilder.setup_cmd_parser(config_field_info)
131 | if args:
132 | cmd_config = cmd_parser.parse_args(args)
133 | else:
134 | cmd_config = cmd_parser.parse_args()
135 |
136 | ini_parser = ConfigParserBuilder.setup_ini_parser()
137 | ini_parser.read([cmd_config.config_file] + resolve_default_config_pathes())
138 |
139 | config = gen_config(config_field_info, ini_parser, vars(cmd_config))
140 |
141 | verify_config(config_field_info, config)
142 | return config
143 |
144 |
145 | def gen_file_config(config_field_info, file_config):
146 | config = dict()
147 | for field_name, field_info in config_field_info.iteritems():
148 | try:
149 | section = field_info["config_file_flags"]["section"]
150 | key = field_info["config_file_flags"]["key"]
151 | config[field_name] = file_config.get(section, key)
152 | except (KeyError, ConfigParser.NoSectionError, ConfigParser.NoOptionError):
153 | continue
154 |
155 | return config
156 |
157 |
158 | def gen_config(config_field_info, file_config, cmd_config):
159 | config = dict()
160 |
161 | config.update(gen_file_config(config_field_info, file_config))
162 |
163 | cmd_config = {k: v for k, v in cmd_config.iteritems() if v is not None and k != "config_file"}
164 | config.update(cmd_config)
165 |
166 | config.update(append_default(config, config_field_info))
167 | config.update(type_transform(config, config_field_info))
168 | return config
169 |
170 |
171 | def append_default(config, optionInfo):
172 | fields = [field for field in optionInfo if field not in config and not optionInfo[field]["is_require"]]
173 | options = {field: optionInfo[field]["default"] for field in fields}
174 | return options
175 |
176 |
177 | def type_transform(config, optionInfo):
178 | new_config = {}
179 | for field in config:
180 | if field not in optionInfo:
181 | continue
182 | option_type = optionInfo[field]["type"]
183 |
184 | if "str" == option_type or "bool" == option_type:
185 | new_config[field] = config[field]
186 | elif "int" == option_type:
187 | new_config[field] = int(config[field])
188 | elif "list:str" == option_type:
189 | values = [value for value in config[field].split(",") if len(value) > 0]
190 | new_config[field] = values
191 | elif "list:int" == option_type:
192 | values = [value for value in config[field].split(",") if len(value) > 0]
193 | new_config[field] = map(int, values)
194 | else:
195 | raise ValueError("Non supported type")
196 |
197 | return new_config
198 |
199 |
200 | def resolve_default_config_pathes():
201 | return [
202 | "application.cfg",
203 | "conf/application.cfg",
204 | path.join(path.expanduser("~"), ".microproxy", "application.cfg")
205 | ]
206 |
--------------------------------------------------------------------------------
/microproxy/context/__init__.py:
--------------------------------------------------------------------------------
1 | from server import ServerContext
2 | from layer import LayerContext
3 | from viewer import ViewerContext
4 | from plugin import PluginContext
5 | from http import HttpRequest, HttpResponse, HttpHeaders
6 | from tls import TlsInfo
7 | from event import Event
8 |
--------------------------------------------------------------------------------
/microproxy/context/base.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | class Serializable(object):
5 | def serialize(self):
6 | data = {}
7 | for k, v in self.__dict__.items():
8 | if isinstance(v, Serializable):
9 | data[k] = v.serialize()
10 | else:
11 | data[k] = v
12 | return data
13 |
14 | @classmethod
15 | def deserialize(cls, data):
16 | if isinstance(data, cls):
17 | return data
18 | elif isinstance(data, dict):
19 | return cls(**data)
20 | elif data:
21 | raise ValueError("cannot deserialize to {0} with {1}".format(
22 | type(data).__name__, cls.__name__))
23 | else:
24 | return None
25 |
26 | def __str__(self):
27 | return "{0}{1}".format(type(self).__name__, self.__dict__)
28 |
29 | def __repr__(self):
30 | return "{0}{1}".format(type(self).__name__, self.__dict__)
31 |
32 | def __eq__(self, other):
33 | return self.__dict__ == other.__dict__
34 |
35 | def __neq__(self, other):
36 | return not self.__eq__(other)
37 |
38 |
39 | def parse_version(version):
40 | versions = re.split(r"\.|-|\+", version)
41 | return (
42 | int(versions[0]),
43 | int(versions[1]),
44 | int(versions[2]),
45 | )
46 |
--------------------------------------------------------------------------------
/microproxy/context/event.py:
--------------------------------------------------------------------------------
1 | from base import Serializable
2 |
3 |
4 | class Event(Serializable):
5 | def __init__(self, name="", context=None):
6 | self.name = name
7 | self.context = context
8 |
--------------------------------------------------------------------------------
/microproxy/context/http.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 | import time
3 |
4 | from base import Serializable
5 |
6 |
7 | class HttpRequest(Serializable):
8 | def __init__(self,
9 | version="",
10 | method="",
11 | path="",
12 | headers=None,
13 | body=b"",
14 | timestamp=None,
15 | **kwargs):
16 | super(HttpRequest, self).__init__()
17 | self.timestamp = timestamp or int(time.time() * 1000000)
18 | self.version = version
19 | self.method = method
20 | self.path = path
21 | self.body = body
22 | self.headers = HttpHeaders(headers)
23 |
24 | def serialize(self):
25 | data = super(HttpRequest, self).serialize()
26 | data["body"] = self.body.encode("base64")
27 | return data
28 |
29 | @classmethod
30 | def deserialize(cls, data):
31 | if isinstance(data, cls):
32 | return data
33 |
34 | req = super(HttpRequest, cls).deserialize(data)
35 | if req and req.body:
36 | req.body = req.body.decode("base64")
37 | return req
38 |
39 | def __str__(self):
40 | display_data = {
41 | "version": self.version,
42 | "method": self.method,
43 | "path": self.path,
44 | "headers": self.headers
45 | }
46 | return "{0}{1}".format(type(self).__name__, display_data)
47 |
48 |
49 | class HttpResponse(Serializable):
50 | def __init__(self,
51 | code="",
52 | reason="",
53 | version="",
54 | headers=None,
55 | body=b"",
56 | timestamp=None,
57 | **kwargs):
58 | super(HttpResponse, self).__init__()
59 | self.timestamp = timestamp or int(time.time() * 1000000)
60 | self.code = code
61 | self.reason = reason
62 | self.version = version
63 | self.body = body
64 | self.headers = HttpHeaders(headers)
65 |
66 | def serialize(self):
67 | data = super(HttpResponse, self).serialize()
68 | data["body"] = self.body.encode("base64")
69 | return data
70 |
71 | @classmethod
72 | def deserialize(cls, data):
73 | if isinstance(data, cls):
74 | return data
75 |
76 | resp = super(HttpResponse, cls).deserialize(data)
77 | if resp and resp.body:
78 | resp.body = resp.body.decode("base64")
79 | return resp
80 |
81 | def __str__(self):
82 | display_data = {
83 | "version": self.version,
84 | "code": self.code,
85 | "reason": self.reason,
86 | "headers": self.headers
87 | }
88 | return "{0}{1}".format(type(self).__name__, display_data)
89 |
90 |
91 | class HttpHeaders(Serializable):
92 | def __init__(self, headers=None):
93 | headers = headers or []
94 | if isinstance(headers, (dict, OrderedDict)):
95 | self.headers = headers.items()
96 | elif isinstance(headers, list):
97 | self.headers = headers
98 | elif isinstance(headers, HttpHeaders):
99 | self.headers = list(headers.headers)
100 | elif not headers:
101 | self.headers = []
102 | else:
103 | raise ValueError("HttpHeaders not support with: " + str(type(headers)))
104 |
105 | def __len__(self):
106 | return len(self.headers)
107 |
108 | def __contains__(self, key):
109 | for k, _ in self.headers:
110 | if k.lower() == key.lower():
111 | return True
112 | return False
113 |
114 | def __getitem__(self, key):
115 | return ", ".join([v for k, v in self.headers if k.lower() == key.lower()])
116 |
117 | def __setitem__(self, key, value):
118 | self.headers.append((key, value))
119 |
120 | def __iter__(self):
121 | return self.headers.__iter__()
122 |
123 | def __eq__(self, other):
124 | return self.__dict__ == other.__dict__
125 |
126 | def __ne__(self, other):
127 | return not self.__eq__(other)
128 |
129 | def serialize(self):
130 | return [h for h in self.headers]
131 |
--------------------------------------------------------------------------------
/microproxy/context/layer.py:
--------------------------------------------------------------------------------
1 | class LayerContext(object):
2 | """
3 | LayerContext: Context used to communicate with different layer.
4 | """
5 | def __init__(self,
6 | mode,
7 | src_stream=None,
8 | dest_stream=None,
9 | scheme=None,
10 | host=None,
11 | port=None,
12 | client_tls=None,
13 | server_tls=None,
14 | done=False,
15 | src_info=None):
16 | if mode not in ("socks", "transparent", "replay", "http"):
17 | raise ValueError("incorrect mode value")
18 |
19 | self.mode = mode
20 | self.src_stream = src_stream
21 | self.dest_stream = dest_stream
22 | self.scheme = scheme
23 | self.host = host
24 | self.port = port
25 | self.client_tls = client_tls
26 | self.server_tls = server_tls
27 | self.done = done
28 | self.src_info = src_info
29 |
--------------------------------------------------------------------------------
/microproxy/context/plugin.py:
--------------------------------------------------------------------------------
1 |
2 | class PluginContext(object):
3 | """
4 | PluinContext: Context used to communicate with plugin.
5 | """
6 | def __init__(self,
7 | scheme,
8 | host,
9 | port,
10 | path,
11 | request,
12 | response):
13 |
14 | super(PluginContext, self).__init__()
15 | self.scheme = scheme
16 | self.host = host
17 | self.port = port
18 | self.path = path
19 | self.request = request
20 | self.response = response
21 |
--------------------------------------------------------------------------------
/microproxy/context/server.py:
--------------------------------------------------------------------------------
1 | class ServerContext(object):
2 | """ServerContext: Context contains server state."""
3 | def __init__(self,
4 | io_loop=None,
5 | config=None,
6 | interceptor=None,
7 | cert_store=None):
8 | self.io_loop = io_loop
9 | self.config = config
10 | self.interceptor = interceptor
11 | self.cert_store = cert_store
12 |
--------------------------------------------------------------------------------
/microproxy/context/tls.py:
--------------------------------------------------------------------------------
1 | from base import Serializable
2 |
3 |
4 | class TlsInfo(Serializable):
5 | def __init__(self,
6 | version="",
7 | cipher="",
8 | sni="",
9 | alpn="",
10 | **kwargs):
11 | super(TlsInfo, self).__init__()
12 | self.version = version
13 | self.cipher = cipher
14 | self.sni = sni
15 | self.alpn = alpn
16 |
--------------------------------------------------------------------------------
/microproxy/context/viewer.py:
--------------------------------------------------------------------------------
1 | from http import HttpRequest, HttpResponse
2 | from tls import TlsInfo
3 | from base import Serializable, parse_version
4 | from microproxy.version import VERSION
5 |
6 |
7 | _DEFAULT_VERSION = "0.4.0"
8 |
9 |
10 | class ViewerContext(Serializable):
11 | """
12 | ViewerContext: Context used to communicate with viewer.
13 | """
14 | def __init__(self,
15 | scheme="",
16 | host="",
17 | port=0,
18 | path="",
19 | request=None,
20 | response=None,
21 | client_tls=None,
22 | server_tls=None,
23 | version=VERSION,
24 | **kwargs):
25 |
26 | super(ViewerContext, self).__init__()
27 | self.scheme = scheme
28 | self.host = host
29 | self.port = port
30 | self.path = path
31 | self.version = version
32 |
33 | self.request = HttpRequest.deserialize(request)
34 | self.response = HttpResponse.deserialize(response)
35 | self.client_tls = TlsInfo.deserialize(client_tls)
36 | self.server_tls = TlsInfo.deserialize(server_tls)
37 |
38 | @classmethod
39 | def deserialize(cls, data):
40 | enrich_data(data)
41 | return ViewerContext(**data)
42 |
43 |
44 | def enrich_data(data):
45 | if "version" not in data:
46 | data["version"] = _DEFAULT_VERSION
47 |
48 | while True:
49 | version = parse_version(data["version"])
50 | converter = converters.get(version, None)
51 | if converter:
52 | converter(data)
53 | else:
54 | break
55 | data["version"] = VERSION
56 |
57 |
58 | def convert_040_041(ctx):
59 | # Could remove in the future
60 | ctx["version"] = "0.4.1"
61 |
62 |
63 | converters = {
64 | (0, 4, 0): convert_040_041
65 | }
66 |
--------------------------------------------------------------------------------
/microproxy/event/__init__.py:
--------------------------------------------------------------------------------
1 | from manager import EventManager
2 | from client import EventClient
3 | from manager import start_events_server
4 | from types import REPLAY
5 |
--------------------------------------------------------------------------------
/microproxy/event/client.py:
--------------------------------------------------------------------------------
1 | import zmq
2 | from microproxy.log import ProxyLogger
3 |
4 | logger = ProxyLogger.get_logger(__name__)
5 |
6 |
7 | class EventClient(object):
8 | def __init__(self, channel, zmq_socket=None):
9 | super(EventClient, self).__init__()
10 | self.zmq_socket = zmq_socket or self._create_socket(channel)
11 |
12 | def _create_socket(self, channel): # pragma: no cover
13 | context = zmq.Context()
14 | socket = context.socket(zmq.PUSH)
15 | socket.connect(channel)
16 | logger.info("EventClient is connect to {0}".format(channel))
17 | return socket
18 |
19 | def send_event(self, event):
20 | self.zmq_socket.send_json(event.serialize())
21 |
--------------------------------------------------------------------------------
/microproxy/event/manager.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from microproxy.context import Event
4 | from microproxy.log import ProxyLogger
5 | from replay import ReplayHandler
6 | from types import REPLAY
7 |
8 | logger = ProxyLogger.get_logger(__name__)
9 |
10 |
11 | class EventManager(object):
12 | def __init__(self, server_state, zmq_stream, handler=None):
13 | super(EventManager, self).__init__()
14 | self.handler = handler or EventHandler(server_state)
15 | self.zmq_stream = zmq_stream
16 |
17 | def start(self):
18 | self.zmq_stream.on_recv(self._on_recv)
19 |
20 | def _on_recv(self, msg_parts):
21 | message = msg_parts[0]
22 | try:
23 | event = Event.deserialize(json.loads(message))
24 | except:
25 | logger.error("Wrong message received: {0}".format(message))
26 | else:
27 | logger.debug("Receive event: {0}".format(event))
28 | try:
29 | self.handler.handle_event(event)
30 | except Exception as e:
31 | logger.error("handle event failed: {0}".format(e))
32 |
33 |
34 | class EventHandler(object):
35 | def __init__(self, server_state):
36 | self.handlers = {
37 | REPLAY: ReplayHandler(server_state)
38 | }
39 |
40 | def handle_event(self, event):
41 | if event.name in self.handlers:
42 | self.handlers[event.name].handle(event)
43 | else:
44 | logger.error("Unhandled event: {0}".format(event.name))
45 |
46 |
47 | def start_events_server(server_state, zmq_stream):
48 | EventManager(server_state, zmq_stream).start()
49 |
--------------------------------------------------------------------------------
/microproxy/event/replay.py:
--------------------------------------------------------------------------------
1 | import os
2 | from tornado.iostream import PipeIOStream
3 | from tornado import gen
4 |
5 | import h11
6 | from microproxy.protocol.http1 import Connection as Http1Connection
7 | from microproxy.protocol.http2 import Connection as Http2Connection
8 | from microproxy.utils import curr_loop
9 | from microproxy.context import ViewerContext, LayerContext
10 | from microproxy.layer import manager as default_layer_manager
11 |
12 | from microproxy.log import ProxyLogger
13 | logger = ProxyLogger.get_logger(__name__)
14 |
15 |
16 | class ReplayHandler(object):
17 | def __init__(self, server_state, layer_manager=None, io_loop=None):
18 | self.server_state = server_state
19 | self.io_loop = io_loop or curr_loop()
20 | self.layer_manager = layer_manager or default_layer_manager
21 |
22 | @gen.coroutine
23 | def handle(self, event):
24 | logger.debug("start handling replay event")
25 | try:
26 | viewer_context = ViewerContext.deserialize(event.context)
27 | write_stream, read_stream = self._create_streams()
28 |
29 | if viewer_context.scheme in ("http", "https"):
30 | self._send_http1_request(write_stream, viewer_context)
31 | elif viewer_context.scheme == "h2":
32 | self._send_http2_request(write_stream, viewer_context)
33 | else:
34 | raise ValueError("not support replay with: {0}".format(
35 | viewer_context.scheme))
36 |
37 | layer_context = LayerContext(
38 | mode="replay",
39 | src_stream=read_stream,
40 | host=viewer_context.host,
41 | port=viewer_context.port,
42 | scheme=viewer_context.scheme)
43 |
44 | initial_layer = self.layer_manager.get_first_layer(layer_context)
45 | yield self.layer_manager.run_layers(
46 | self.server_state, initial_layer, layer_context)
47 | except Exception as e:
48 | logger.exception(e)
49 | else:
50 | logger.debug("replay event successfully")
51 |
52 | def _create_streams(self):
53 | read_fd, write_fd = os.pipe()
54 | write_stream = PipeIOStream(write_fd, io_loop=self.io_loop)
55 | read_stream = PipeIOStream(read_fd, io_loop=self.io_loop)
56 | return (write_stream, read_stream)
57 |
58 | def _send_http1_request(self, stream, context):
59 | logger.debug("replay http1 request: {0}".format(context.request))
60 | Http1Connection(h11.CLIENT, stream).send_request(context.request)
61 |
62 | def _send_http2_request(self, stream, context):
63 | logger.debug("replay http2 request: {0}".format(context.request))
64 | conn = Http2Connection(stream, client_side=True)
65 | conn.initiate_connection()
66 | stream_id = conn.get_next_available_stream_id()
67 | conn.send_request(stream_id, context.request)
68 |
--------------------------------------------------------------------------------
/microproxy/event/types.py:
--------------------------------------------------------------------------------
1 | REPLAY = "replay"
2 |
--------------------------------------------------------------------------------
/microproxy/exception.py:
--------------------------------------------------------------------------------
1 | class ProtocolError(Exception):
2 | pass
3 |
4 |
5 | class StreamClosedError(Exception):
6 | def __init__(self, detail="closed"):
7 | super(StreamClosedError, self).__init__(
8 | "{0}: {1}".format(
9 | type(self).__name__, detail))
10 |
11 |
12 | class SrcStreamClosedError(StreamClosedError):
13 | pass
14 |
15 |
16 | class DestStreamClosedError(StreamClosedError):
17 | pass
18 |
19 |
20 | class DestNotConnectedError(Exception):
21 | def __init__(self, addr):
22 | super(DestNotConnectedError, self).__init__(
23 | "Address: {0}".format(addr))
24 |
25 |
26 | class Http2Error(Exception):
27 | def __init__(self, conn, error, error_msg, stream_id=None):
28 | if stream_id:
29 | err = "{0}: {1} on {2}, stream_id={3}, cause is {4}".format(
30 | type(error), error_msg, conn, stream_id, error.args)
31 | else:
32 | err = "{0}: {1} on {2}, cause is {3}".format(
33 | type(error), error_msg, conn, error.args)
34 | super(Http2Error, self).__init__(err)
35 |
36 |
37 | class TlsError(Exception):
38 | pass
39 |
--------------------------------------------------------------------------------
/microproxy/interceptor/__init__.py:
--------------------------------------------------------------------------------
1 | from msg_publisher import MsgPublisher
2 | from plugin_manager import PluginManager
3 | from interceptor import Interceptor
4 |
--------------------------------------------------------------------------------
/microproxy/interceptor/interceptor.py:
--------------------------------------------------------------------------------
1 | from microproxy.context import ViewerContext, PluginContext
2 |
3 |
4 | class Interceptor(object):
5 | def __init__(self, plugin_manager=None, msg_publisher=None):
6 | self.msg_publisher = msg_publisher
7 | self.plugin_manager = plugin_manager
8 |
9 | def request(self, layer_context, request):
10 | plugin_context = PluginContext(
11 | scheme=layer_context.scheme,
12 | host=layer_context.host,
13 | port=layer_context.port,
14 | path=request.path,
15 | request=request,
16 | response=None)
17 |
18 | if self.plugin_manager is None:
19 | return plugin_context
20 |
21 | new_plugin_context = self.plugin_manager.exec_request(plugin_context)
22 | return new_plugin_context
23 |
24 | def response(self, layer_context, request, response):
25 | plugin_context = PluginContext(
26 | scheme=layer_context.scheme,
27 | host=layer_context.host,
28 | port=layer_context.port,
29 | path=request.path,
30 | request=request,
31 | response=response)
32 |
33 | if self.plugin_manager is None:
34 | return plugin_context
35 |
36 | new_plugin_context = self.plugin_manager.exec_response(plugin_context)
37 | return new_plugin_context
38 |
39 | def publish(self, layer_context, request, response):
40 | if self.msg_publisher is None:
41 | return
42 |
43 | viewer_context = ViewerContext(
44 | scheme=layer_context.scheme,
45 | host=layer_context.host,
46 | port=layer_context.port,
47 | path=request.path,
48 | request=request,
49 | response=response,
50 | client_tls=layer_context.client_tls,
51 | server_tls=layer_context.server_tls)
52 |
53 | self.msg_publisher.publish(viewer_context)
54 |
--------------------------------------------------------------------------------
/microproxy/interceptor/msg_publisher.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 |
4 | class MsgPublisher(object):
5 | TOPIC = "message"
6 |
7 | def __init__(self, config, zmq_socket):
8 | super(MsgPublisher, self).__init__()
9 | self.zmq_socket = zmq_socket
10 |
11 | def publish(self, viewer_context):
12 | message = json.dumps(viewer_context.serialize())
13 | self.zmq_socket.send_multipart([
14 | self.TOPIC, message])
15 |
--------------------------------------------------------------------------------
/microproxy/interceptor/plugin_manager.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from copy import copy
4 | from watchdog.events import RegexMatchingEventHandler
5 |
6 | if sys.platform == "darwin":
7 | from watchdog.observers.polling import PollingObserver as Observer
8 | else:
9 | from watchdog.observers import Observer
10 |
11 | from microproxy.log import ProxyLogger
12 | logger = ProxyLogger.get_logger(__name__)
13 |
14 |
15 | class PluginEventHandler(RegexMatchingEventHandler):
16 | def __init__(self, filename, callback):
17 | super(PluginEventHandler, self).__init__(ignore_directories=True,
18 | regexes=['.*' + filename])
19 | self.callback = callback
20 |
21 | def on_modified(self, event):
22 | self.callback()
23 |
24 |
25 | class Plugin(object):
26 | PLUGIN_METHODS = ["on_request", "on_response"]
27 |
28 | def __init__(self, plugin_path):
29 | self.plugin_path = os.path.abspath(plugin_path)
30 | self.plugin_name = os.path.basename(self.plugin_path)
31 | self.plugin_dir = os.path.dirname(self.plugin_path)
32 | self.namespace = None
33 | self._load_plugin()
34 | self._register_watcher()
35 |
36 | def _register_watcher(self):
37 | logger.debug("Register File Watcher for {0}".format(self.plugin_name))
38 | self.event_handler = PluginEventHandler(self.plugin_name,
39 | self._reload_plugin)
40 | self.observer = Observer()
41 | self.observer.schedule(self.event_handler, self.plugin_dir)
42 | self.observer.start()
43 |
44 | def _load_plugin(self):
45 | sys.path.append(os.path.dirname(self.plugin_path))
46 | try:
47 | with open(self.plugin_path) as fp:
48 | self.namespace = {"__file__": self.plugin_path}
49 | code = compile(fp.read(), self.plugin_path, "exec")
50 | exec (code, self.namespace, self.namespace)
51 |
52 | except Exception as e:
53 | logger.exception(e)
54 |
55 | sys.path.pop()
56 | logger.info("Load Plugin : {0}".format(self.plugin_name))
57 |
58 | def _reload_plugin(self):
59 | logger.info("Reload Plugin : {0}".format(self.plugin_name))
60 | self._load_plugin()
61 |
62 | def __getattr__(self, attr):
63 | if attr not in self.PLUGIN_METHODS:
64 | raise AttributeError
65 | try:
66 | return self.namespace[attr]
67 | except KeyError:
68 | raise AttributeError
69 |
70 |
71 | class PluginManager(object):
72 | def __init__(self, config):
73 | self.plugins = []
74 | self.load_plugins(config["plugins"])
75 |
76 | def load_plugins(self, plugin_paths):
77 | for plugin_path in plugin_paths:
78 | plugin = Plugin(plugin_path)
79 | self.plugins.append(plugin)
80 |
81 | def exec_request(self, plugin_context):
82 | if len(self.plugins) == 0:
83 | return plugin_context
84 |
85 | current_context = copy(plugin_context)
86 | for plugin in self.plugins:
87 | try:
88 | new_context = plugin.on_request(current_context)
89 | current_context = copy(new_context)
90 | except AttributeError:
91 | logger.debug(
92 | "Plugin {0} does not have on_request".format(
93 | plugin.namespace["__file__"].split("/")[-1]))
94 | return current_context
95 |
96 | def exec_response(self, plugin_context):
97 | if len(self.plugins) == 0:
98 | return plugin_context
99 |
100 | current_context = copy(plugin_context)
101 | for plugin in self.plugins:
102 | try:
103 | new_context = plugin.on_response(current_context)
104 | current_context = copy(new_context)
105 | except AttributeError:
106 | logger.debug(
107 | "Plugin {0} does not have on_response".format(
108 | plugin.namespace["__file__"].split("/")[-1]))
109 | return current_context
110 |
--------------------------------------------------------------------------------
/microproxy/layer/__init__.py:
--------------------------------------------------------------------------------
1 | from application import Http1Layer, TlsLayer, ForwardLayer, Http2Layer
2 | from proxy import SocksLayer, TransparentLayer, ReplayLayer, HttpProxyLayer
3 |
--------------------------------------------------------------------------------
/microproxy/layer/application/__init__.py:
--------------------------------------------------------------------------------
1 | from http1 import Http1Layer
2 | from tls import TlsLayer
3 | from forward import ForwardLayer
4 | from http2 import Http2Layer
5 |
--------------------------------------------------------------------------------
/microproxy/layer/application/forward.py:
--------------------------------------------------------------------------------
1 | from tornado import concurrent
2 |
3 | from microproxy.layer.base import ApplicationLayer
4 |
5 |
6 | class ForwardLayer(ApplicationLayer):
7 | '''
8 | ForwardLayer: passing all the src data to destination. Will not intercept anything
9 | '''
10 | def __init__(self, server_state, context):
11 | super(ForwardLayer, self).__init__(server_state, context)
12 | self._future = concurrent.Future()
13 |
14 | def process_and_return_context(self):
15 | self.src_stream.read_until_close(streaming_callback=self.on_request)
16 | self.src_stream.set_close_callback(self.on_src_close)
17 | self.dest_stream.read_until_close(streaming_callback=self.on_response)
18 | self.dest_stream.set_close_callback(self.on_dest_close)
19 | return self._future
20 |
21 | def on_src_close(self):
22 | self.dest_stream.close()
23 | self.on_finish()
24 |
25 | def on_dest_close(self):
26 | self.src_stream.close()
27 | self.on_finish()
28 |
29 | def on_finish(self):
30 | if self._future.running():
31 | self._future.set_result(self.context)
32 |
33 | def on_request(self, data):
34 | if not self.dest_stream.closed():
35 | self.dest_stream.write(data)
36 |
37 | def on_response(self, data):
38 | if not self.src_stream.closed():
39 | self.src_stream.write(data)
40 |
--------------------------------------------------------------------------------
/microproxy/layer/application/http1.py:
--------------------------------------------------------------------------------
1 | import h11
2 | import re
3 | from tornado import gen
4 | from tornado.iostream import StreamClosedError
5 |
6 | from microproxy.context import HttpResponse
7 | from microproxy.exception import SrcStreamClosedError, DestStreamClosedError
8 | from microproxy.layer.base import ApplicationLayer, DestStreamCreatorMixin
9 | from microproxy.log import ProxyLogger
10 | from microproxy.protocol.http1 import Connection
11 |
12 | logger = ProxyLogger.get_logger(__name__)
13 |
14 |
15 | def _wrap_req_path(context, req):
16 | return "http://{0}:{1}{2}".format(context.host, context.port, req.path)
17 |
18 |
19 | def parse_proxy_path(path):
20 | default_ports = {
21 | "http": 80,
22 | "https": 443,
23 | }
24 | matcher = re.search(r"^(https?):\/\/([a-zA-Z0-9\.\-]+)(:(\d+))?(/.*)", path)
25 | groups = matcher.groups() if matcher else []
26 | if not groups: # pragma: no cover
27 | raise ValueError("illegal proxy path {0}".format(path))
28 | else:
29 | scheme = groups[0]
30 | host = groups[1]
31 | port = int(groups[3]) if groups[3] else default_ports[scheme]
32 | path = groups[4]
33 |
34 | return (scheme, host, port, path)
35 |
36 |
37 | def parse_tunnel_proxy_path(path):
38 | default_schemes = {
39 | 80: "http",
40 | 443: "https"
41 | }
42 | matcher = re.search(r"([a-zA-Z0-9\.\-]+)(:(\d+))", path)
43 | groups = matcher.groups() if matcher else []
44 | if not groups: # pragma: no cover
45 | raise ValueError("illegal proxy path {0}".format(path))
46 | else:
47 | host = groups[0]
48 | port = int(groups[2])
49 | scheme = default_schemes.get(port, "http")
50 | return (scheme, host, port)
51 |
52 |
53 | class Http1Layer(ApplicationLayer, DestStreamCreatorMixin):
54 | def __init__(self, server_state, context):
55 | super(Http1Layer, self).__init__(server_state, context)
56 | self.src_conn = Connection(
57 | h11.SERVER,
58 | self.src_stream,
59 | conn_type="src",
60 | readonly=(context.mode == "replay"),
61 | on_request=self.on_request)
62 | self.dest_conn = Connection(
63 | h11.CLIENT,
64 | self.dest_stream,
65 | conn_type="dest",
66 | on_response=self.on_response,
67 | on_info_response=self.on_info_response)
68 | self.req = None
69 | self.resp = None
70 | self.switch_protocol = False
71 |
72 | @gen.coroutine
73 | def process_and_return_context(self):
74 | while not self.finished():
75 | self.req = None
76 | self.resp = None
77 | try:
78 | yield self.read_request()
79 | yield self.handle_http_proxy()
80 | self.send_request()
81 | yield self.read_response()
82 | self.send_response()
83 | except SrcStreamClosedError:
84 | if self.dest_stream:
85 | self.dest_stream.close()
86 | self.context.done = True
87 | if self.req:
88 | raise
89 | except DestStreamClosedError:
90 | self.src_stream.close()
91 | raise
92 | except SwitchToTunnelHttpProxy:
93 | break
94 |
95 | if self.switch_protocol:
96 | self.context.scheme = self.req.headers["Upgrade"]
97 | raise gen.Return(self.context)
98 |
99 | @gen.coroutine
100 | def read_request(self):
101 | # NOTE: run first request to handle protocol change
102 | logger.debug("{0} wait for request".format(self))
103 | while not self.req:
104 | try:
105 | data = yield self.src_stream.read_bytes(
106 | self.src_stream.max_buffer_size, partial=True)
107 | except StreamClosedError:
108 | raise SrcStreamClosedError(detail="read request failed")
109 | else:
110 | self.src_conn.receive(data, raise_exception=True)
111 | logger.debug("{0} received request: {1}".format(self, self.req))
112 |
113 | @gen.coroutine
114 | def read_response(self):
115 | logger.debug("{0} wait for response".format(self))
116 | while not self.resp:
117 | try:
118 | data = yield self.dest_stream.read_bytes(
119 | self.dest_stream.max_buffer_size, partial=True)
120 | except StreamClosedError:
121 | # NOTE: for HTTP protocol, there is some condition that response finish when they didn't send data
122 | # It may happen when there is no "Content-Length" or "Content-Encoding: chunked" defined in there header
123 | self.dest_conn.receive(b"", raise_exception=False)
124 | break
125 | else:
126 | self.dest_conn.receive(data, raise_exception=True)
127 | logger.debug("{0} received response: {1}".format(self, self.resp))
128 |
129 | def on_request(self, request):
130 | plugin_result = self.interceptor.request(
131 | layer_context=self.context, request=request)
132 |
133 | self.req = plugin_result.request if plugin_result else request
134 |
135 | def send_request(self):
136 | try:
137 | self.dest_conn.send_request(self.req)
138 | except StreamClosedError:
139 | raise DestStreamClosedError(detail="send request failed with {0}".format(
140 | _wrap_req_path(self.context, self.req)))
141 |
142 | def on_response(self, response):
143 | plugin_result = self.interceptor.response(
144 | layer_context=self.context,
145 | request=self.req, response=response)
146 |
147 | self.resp = plugin_result.response if plugin_result else response
148 |
149 | def send_response(self):
150 | try:
151 | if int(self.resp.code) in range(200, 600):
152 | self.src_conn.send_response(self.resp)
153 | self.finish()
154 | else:
155 | self.src_conn.send_info_response(self.resp)
156 | self.finish(switch_protocol=True)
157 | except StreamClosedError:
158 | raise SrcStreamClosedError(detail="send response failed {0}".format(
159 | _wrap_req_path(self.context, self.req)))
160 |
161 | def on_info_response(self, response):
162 | plugin_result = self.interceptor.response(
163 | layer_context=self.context,
164 | request=self.req, response=response)
165 | self.resp = plugin_result.response if plugin_result else response
166 |
167 | def finished(self):
168 | return (self.switch_protocol or
169 | self.src_stream.closed() or
170 | (self.dest_stream and self.dest_stream.closed()))
171 |
172 | def finish(self, switch_protocol=False):
173 | self.interceptor.publish(
174 | layer_context=self.context,
175 | request=self.req, response=self.resp)
176 | if (self.context.mode == "replay" or
177 | self.src_conn.closed() or
178 | self.dest_conn.closed()):
179 | self.src_stream.close()
180 | self.dest_stream.close()
181 | self.context.done = True
182 | elif switch_protocol:
183 | self.switch_protocol = True
184 | else:
185 | self.src_conn.start_next_cycle()
186 | self.dest_conn.start_next_cycle()
187 |
188 | @gen.coroutine
189 | def handle_http_proxy(self):
190 | if self.is_tunnel_http_proxy():
191 | logger.debug("{0} proxy tunnel to {1}".format(self, self.req.path))
192 | scheme, host, port = parse_tunnel_proxy_path(self.req.path)
193 | yield self.connect_to_dest(scheme, (host, port))
194 | self.src_conn.send_response(HttpResponse(
195 | code="200",
196 | reason="OK", version="HTTP/1.1"))
197 | raise SwitchToTunnelHttpProxy
198 | elif self.is_normal_http_proxy():
199 | logger.debug("{0} proxy to {1}".format(self, self.req.path))
200 | scheme, host, port, path = parse_proxy_path(self.req.path)
201 | self.req.path = path
202 | yield self.connect_to_dest(scheme, (host, port))
203 | self.dest_conn.io_stream = self.dest_stream
204 | else:
205 | raise gen.Return(None)
206 |
207 | def is_tunnel_http_proxy(self):
208 | return self.req.method == "CONNECT"
209 |
210 | def is_normal_http_proxy(self):
211 | return (self.req.path.startswith("http://") or
212 | self.req.path.startswith("https://"))
213 |
214 | @gen.coroutine
215 | def connect_to_dest(self, scheme, addr):
216 | if addr != (self.context.host, self.context.port):
217 | logger.debug("{0} proxy to new connection {1}".format(self, addr))
218 | if self.dest_stream:
219 | self.dest_stream.close()
220 |
221 | dest_stream = yield self.create_dest_stream(addr)
222 | self.context.dest_stream = dest_stream
223 | self.context.scheme = scheme
224 | self.context.host = addr[0]
225 | self.context.port = addr[1]
226 | logger.debug("{0} proxy to new connection success".format(self))
227 | else:
228 | logger.debug("{0} proxy to same connection".format(self))
229 |
230 |
231 | class SwitchToTunnelHttpProxy(Exception):
232 | pass
233 |
--------------------------------------------------------------------------------
/microproxy/layer/application/http2.py:
--------------------------------------------------------------------------------
1 | from tornado import concurrent, gen
2 |
3 | from microproxy.layer.base import ApplicationLayer
4 | from microproxy.protocol.http2 import Connection
5 |
6 | from microproxy.log import ProxyLogger
7 | logger = ProxyLogger.get_logger(__name__)
8 |
9 |
10 | class Http2Layer(ApplicationLayer):
11 | '''
12 | Http2Layer: Responsible for handling the http2 request and response.
13 | '''
14 | def __init__(self, server_state, context):
15 | super(Http2Layer, self).__init__(server_state, context)
16 | self.src_conn = Connection(
17 | self.src_stream, client_side=False,
18 | conn_type="source",
19 | on_request=self.on_request,
20 | on_settings=self.on_src_settings,
21 | on_window_updates=self.on_src_window_updates,
22 | on_priority_updates=self.on_src_priority_updates,
23 | on_reset=self.on_src_reset,
24 | on_terminate=self.on_src_terminate,
25 | readonly=(context.mode == "replay"))
26 | self.dest_conn = Connection(
27 | self.dest_stream, client_side=True,
28 | conn_type="destination",
29 | on_response=self.on_response,
30 | on_push=self.on_push,
31 | on_settings=self.on_dest_settings,
32 | on_window_updates=self.on_dest_window_updates,
33 | on_terminate=self.on_dest_terminate,
34 | on_reset=self.on_dest_reset)
35 | self.streams = dict()
36 | self.src_to_dest_ids = dict([(0, 0)])
37 | self.dest_to_src_ids = dict([(0, 0)])
38 | self._future = concurrent.Future()
39 |
40 | @gen.coroutine
41 | def process_and_return_context(self):
42 | yield self._init_h2_connection()
43 | self.src_stream.read_until_close(
44 | streaming_callback=self.src_conn.receive)
45 | self.src_stream.set_close_callback(self.on_src_close)
46 |
47 | self.dest_stream.read_until_close(
48 | streaming_callback=self.dest_conn.receive)
49 | self.dest_stream.set_close_callback(self.on_dest_close)
50 | result = yield self._future
51 | raise gen.Return(result)
52 |
53 | @gen.coroutine
54 | def _init_h2_connection(self):
55 | self.dest_conn.initiate_connection()
56 | yield self.dest_conn.flush()
57 | self.src_conn.initiate_connection()
58 | yield self.src_conn.flush()
59 |
60 | def on_src_close(self):
61 | logger.debug("{0}: src stream closed".format(self))
62 | self.dest_stream.close()
63 | self.layer_finish()
64 |
65 | def on_dest_close(self):
66 | logger.debug("{0}: dest stream closed".format(self))
67 | self.src_stream.close()
68 | self.layer_finish()
69 |
70 | def layer_finish(self):
71 | if self._future.running():
72 | self._future.set_result(self.context)
73 |
74 | def update_ids(self, src_stream_id, dest_stream_id):
75 | self.src_to_dest_ids[src_stream_id] = dest_stream_id
76 | self.dest_to_src_ids[dest_stream_id] = src_stream_id
77 |
78 | def on_request(self, stream_id, request, priority_updated):
79 | dest_stream_id = self.dest_conn.get_next_available_stream_id()
80 | self.update_ids(stream_id, dest_stream_id)
81 |
82 | if priority_updated:
83 | priority_weight = priority_updated.weight
84 | priority_exclusive = priority_updated.exclusive
85 | priority_depends_on = self.safe_mapping_id(
86 | self.src_to_dest_ids, priority_updated.depends_on)
87 | else:
88 | priority_weight = None
89 | priority_exclusive = None
90 | priority_depends_on = None
91 |
92 | stream = Stream(self, self.context, stream_id, dest_stream_id)
93 | stream.on_request(
94 | request,
95 | priority_weight=priority_weight,
96 | priority_exclusive=priority_exclusive,
97 | priority_depends_on=priority_depends_on)
98 | self.streams[stream_id] = stream
99 |
100 | def on_push(self, pushed_stream_id, parent_stream_id, request):
101 | self.update_ids(pushed_stream_id, pushed_stream_id)
102 | target_parent_stream_id = self.dest_to_src_ids[parent_stream_id]
103 |
104 | stream = Stream(self, self.context, pushed_stream_id, pushed_stream_id)
105 | stream.on_push(request, target_parent_stream_id)
106 | self.streams[pushed_stream_id] = stream
107 |
108 | def on_response(self, stream_id, response):
109 | src_stream_id = self.dest_to_src_ids[stream_id]
110 | self.streams[src_stream_id].on_response(response)
111 |
112 | self.on_finish(src_stream_id)
113 |
114 | def on_finish(self, src_stream_id):
115 | stream = self.streams[src_stream_id]
116 |
117 | self.interceptor.publish(
118 | layer_context=self.context, request=stream.request,
119 | response=stream.response)
120 | del self.streams[src_stream_id]
121 |
122 | if self.context.mode == "replay":
123 | self.src_stream.close()
124 | self.dest_stream.close()
125 |
126 | def on_src_settings(self, changed_settings):
127 | new_settings = {
128 | id: cs.new_value for (id, cs) in changed_settings.iteritems()
129 | }
130 | self.dest_conn.send_update_settings(new_settings)
131 |
132 | def on_dest_settings(self, changed_settings):
133 | new_settings = {
134 | id: cs.new_value for (id, cs) in changed_settings.iteritems()
135 | }
136 | self.src_conn.send_update_settings(new_settings)
137 |
138 | def on_src_window_updates(self, stream_id, delta):
139 | target_stream_id = self.safe_mapping_id(self.src_to_dest_ids, stream_id)
140 | self.dest_conn.send_window_updates(target_stream_id, delta)
141 |
142 | def on_dest_window_updates(self, stream_id, delta):
143 | target_stream_id = self.safe_mapping_id(self.dest_to_src_ids, stream_id)
144 | self.src_conn.send_window_updates(target_stream_id, delta)
145 |
146 | def on_src_priority_updates(self, stream_id, depends_on,
147 | weight, exclusive):
148 | target_stream_id = self.safe_mapping_id(
149 | self.src_to_dest_ids, stream_id)
150 | target_depends_on = self.safe_mapping_id(
151 | self.src_to_dest_ids, depends_on)
152 | if target_stream_id:
153 | self.dest_conn.send_priority_updates(
154 | target_stream_id, target_depends_on, weight, exclusive)
155 |
156 | def safe_mapping_id(self, ids, stream_id):
157 | if stream_id in ids:
158 | return ids[stream_id]
159 | return 0
160 |
161 | def on_src_reset(self, stream_id, error_code):
162 | target_stream_id = self.src_to_dest_ids[stream_id]
163 | self.dest_conn.send_reset(target_stream_id, error_code)
164 |
165 | def on_dest_reset(self, stream_id, error_code):
166 | target_stream_id = self.dest_to_src_ids[stream_id]
167 | self.src_conn.send_reset(target_stream_id, error_code)
168 |
169 | def on_src_terminate(self, additional_data, error_code, last_stream_id):
170 | self.dest_conn.send_terminate(
171 | error_code=error_code,
172 | additional_data=additional_data,
173 | last_stream_id=last_stream_id)
174 |
175 | def on_dest_terminate(self, additional_data, error_code, last_stream_id):
176 | self.src_conn.send_terminate(
177 | error_code=error_code,
178 | additional_data=additional_data,
179 | last_stream_id=last_stream_id)
180 |
181 |
182 | class Stream(object):
183 | def __init__(self, layer, context, src_stream_id, dest_stream_id):
184 | self.layer = layer
185 | self.context = context
186 | self.src_stream_id = src_stream_id
187 | self.dest_stream_id = dest_stream_id
188 | self.request = None
189 | self.response = None
190 |
191 | def on_request(self, request, **kwargs):
192 | plugin_ressult = self.layer.interceptor.request(
193 | layer_context=self.context, request=request)
194 |
195 | self.request = plugin_ressult.request if plugin_ressult else request
196 | self.layer.dest_conn.send_request(
197 | self.dest_stream_id, self.request, **kwargs)
198 |
199 | def on_push(self, request, parent_stream_id):
200 | plugin_ressult = self.layer.interceptor.request(
201 | layer_context=self.context, request=request)
202 |
203 | self.request = plugin_ressult.request if plugin_ressult else request
204 | self.layer.src_conn.send_pushed_stream(
205 | parent_stream_id, self.src_stream_id, self.request)
206 |
207 | def on_response(self, response):
208 | plugin_result = self.layer.interceptor.response(
209 | layer_context=self.context,
210 | request=self.request, response=response
211 | )
212 |
213 | self.response = plugin_result.response if plugin_result else response
214 | self.layer.src_conn.send_response(
215 | self.src_stream_id, self.response)
216 |
--------------------------------------------------------------------------------
/microproxy/layer/application/tls.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import struct
4 | from OpenSSL import SSL
5 | import certifi
6 | from service_identity import VerificationError
7 | from tornado import gen
8 |
9 | from microproxy.layer.base import ApplicationLayer
10 | from microproxy.context import LayerContext, TlsInfo
11 | from microproxy.protocol.tls import TlsClientHello, ServerConnection, ClientConnection
12 | from microproxy.exception import (
13 | DestStreamClosedError, TlsError, ProtocolError)
14 |
15 | from microproxy.log import ProxyLogger
16 | logger = ProxyLogger.get_logger(__name__)
17 |
18 |
19 | class TlsLayer(ApplicationLayer):
20 | def __init__(self, server_state, context):
21 | super(TlsLayer, self).__init__(server_state, context)
22 | self.cert_store = self.server_state.cert_store
23 | self.src_conn = ServerConnection(self.src_stream)
24 | self.dest_conn = ClientConnection(self.dest_stream)
25 |
26 | def peek_client_hello(self):
27 | client_hello = b""
28 | client_hello_size = 1
29 | offset = 0
30 | while len(client_hello) < client_hello_size:
31 | record_header = self.src_stream.peek(offset + 5)[offset:]
32 | if len(record_header) != 5:
33 | raise ProtocolError(
34 | 'Expected TLS record, got "{}" instead.'.format(record_header))
35 |
36 | record_size = struct.unpack("!H", record_header[3:])[0] + 5
37 | record_body = self.src_stream.peek(offset + record_size)[offset + 5:]
38 | if len(record_body) != record_size - 5:
39 | raise ProtocolError(
40 | "Unexpected EOF in TLS handshake: {}".format(record_body))
41 |
42 | client_hello += record_body
43 | offset += record_size
44 | client_hello_size = struct.unpack("!I", b'\x00' + client_hello[1:4])[0] + 4
45 |
46 | return client_hello
47 |
48 | @gen.coroutine
49 | def start_dest_tls(self, hostname, client_alpns):
50 | trusted_ca_certs = self.config["client_certs"] or certifi.where()
51 |
52 | try:
53 | logger.debug("start dest tls handshaking: {0}".format(hostname))
54 | dest_stream = yield self.dest_conn.start_tls(
55 | insecure=self.config["insecure"],
56 | trusted_ca_certs=trusted_ca_certs,
57 | hostname=hostname, alpns=client_alpns)
58 |
59 | # TODO: tornado_ext.iostream should handle this part.
60 | except SSL.SysCallError as e:
61 | raise DestStreamClosedError(detail="Stream closed when tls Handshaking failed")
62 |
63 | except (SSL.Error, VerificationError) as e:
64 | raise TlsError("Tls Handshaking Failed on destination with: ({0}) {1}".format(
65 | type(e).__name__, str(e)))
66 |
67 | else:
68 | logger.debug(dest_stream.fileno().get_alpn_proto_negotiated())
69 | select_alpn = (dest_stream.fileno().get_alpn_proto_negotiated() or
70 | b"http/1.1")
71 |
72 | logger.debug("{0}:{1} -> Choose {2} as application protocol".format(
73 | self.context.host, self.context.port, select_alpn))
74 | logger.debug("finish dest tls handshake")
75 | raise gen.Return((dest_stream, select_alpn))
76 |
77 | @gen.coroutine
78 | def start_src_tls(self, hostname, select_alpn):
79 | try:
80 | logger.debug("start src tls handshaking: {0}".format(hostname))
81 | src_stream = yield self.src_conn.start_tls(
82 | *self.cert_store.get_cert_and_pkey(hostname),
83 | select_alpn=select_alpn)
84 |
85 | except SSL.Error as e:
86 | raise TlsError("Tls Handshaking Failed on source with: ({0}) {1}".format(
87 | type(e).__name__, str(e)))
88 |
89 | else:
90 | logger.debug("finish src tls handshake")
91 | raise gen.Return(src_stream)
92 |
93 | def alpn_to_scheme(self, alpn):
94 | if alpn == "http/1.1":
95 | return "https"
96 | elif alpn == "h2":
97 | return "h2"
98 | else:
99 | raise ProtocolError("Unsupported alpn protocol: {0}".format(alpn))
100 |
101 | @gen.coroutine
102 | def process_and_return_context(self):
103 | # NOTE: peeking src stream client hello.
104 | raw_client_hello = self.peek_client_hello()
105 | client_hello = TlsClientHello(raw_client_hello[4:])
106 |
107 | hostname = client_hello.sni or self.context.host
108 | try:
109 | dest_stream, select_alpn = yield self.start_dest_tls(
110 | hostname, client_hello.alpn_protocols)
111 | except:
112 | if not self.src_stream.closed():
113 | self.src_stream.close()
114 | raise
115 |
116 | try:
117 | src_stream = yield self.start_src_tls(
118 | hostname, select_alpn)
119 | except:
120 | if not dest_stream.closed():
121 | dest_stream.close()
122 | raise
123 |
124 | try:
125 | ctx = LayerContext(
126 | mode=self.context.mode,
127 | src_stream=src_stream,
128 | dest_stream=dest_stream,
129 | scheme=self.alpn_to_scheme(select_alpn),
130 | host=hostname or self.context.host,
131 | port=self.context.port,
132 | client_tls=self._resolve_tls_info(src_stream),
133 | server_tls=self._resolve_tls_info(dest_stream),
134 | src_info=self.context.src_info
135 | )
136 | except:
137 | src_stream.close()
138 | dest_stream.close()
139 | raise
140 |
141 | raise gen.Return(ctx)
142 |
143 | def _resolve_tls_info(self, conn):
144 | try:
145 | version = conn.fileno().get_protocol_version_name()
146 | sni = conn.fileno().get_servername().decode("idna")
147 | alpn = conn.fileno().get_alpn_proto_negotiated()
148 | cipher = conn.fileno().get_cipher_name()
149 | return TlsInfo(
150 | version=version,
151 | sni=sni,
152 | alpn=alpn,
153 | cipher=cipher)
154 | except Exception as e:
155 | logger.error("resolve tls info failed: %s", e)
156 | return None
157 |
--------------------------------------------------------------------------------
/microproxy/layer/base.py:
--------------------------------------------------------------------------------
1 | from copy import copy
2 | from datetime import timedelta
3 | import socket
4 | from tornado import gen
5 |
6 | from microproxy.tornado_ext.iostream import MicroProxyIOStream
7 |
8 |
9 | class Layer(object):
10 | def process_and_return_context(self):
11 | raise NotImplementedError
12 |
13 |
14 | class ApplicationLayer(Layer):
15 | def __init__(self, server_state, context):
16 | super(ApplicationLayer, self).__init__()
17 | self.context = copy(context)
18 | self.server_state = server_state
19 |
20 | @property
21 | def interceptor(self):
22 | return self.server_state.interceptor
23 |
24 | @property
25 | def config(self):
26 | return self.server_state.config
27 |
28 | @property
29 | def src_stream(self):
30 | return self.context.src_stream
31 |
32 | @src_stream.setter
33 | def src_stream(self, value):
34 | self.context.src_stream = value
35 |
36 | @property
37 | def dest_stream(self):
38 | return self.context.dest_stream
39 |
40 | @dest_stream.setter
41 | def dest_stream(self, value):
42 | self.context.dest_stream = value
43 |
44 | def __repr__(self):
45 | return "{0}({1} -> {2}:{3})".format(
46 | type(self).__name__, self.context.src_info,
47 | self.context.host, self.context.port)
48 |
49 | def __str__(self):
50 | return "{0}({1} -> {2}:{3})".format(
51 | type(self).__name__, self.context.src_info,
52 | self.context.host, self.context.port)
53 |
54 |
55 | class DestStreamCreatorMixin:
56 | @gen.coroutine
57 | def create_dest_stream(self, dest_addr_info):
58 | dest_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
59 | dest_stream = MicroProxyIOStream(dest_socket)
60 | yield gen.with_timeout(
61 | timedelta(seconds=5), dest_stream.connect(dest_addr_info))
62 | raise gen.Return(dest_stream)
63 |
64 |
65 | class ProxyLayer(Layer, DestStreamCreatorMixin):
66 | def __init__(self, context, **kwargs):
67 | super(ProxyLayer, self).__init__()
68 | self.context = copy(context)
69 |
70 | for k, v in kwargs.iteritems():
71 | self.__setattr__(k, v)
72 |
73 | def __repr__(self):
74 | return "{0}({1})".format(
75 | type(self).__name__, self.context.src_info)
76 |
77 | def __str__(self):
78 | return "{0}({1}) ".format(
79 | type(self).__name__, self.context.src_info)
80 |
--------------------------------------------------------------------------------
/microproxy/layer/debug/__init__.py:
--------------------------------------------------------------------------------
1 | from http2 import Http2Layer
2 |
--------------------------------------------------------------------------------
/microproxy/layer/debug/http2.py:
--------------------------------------------------------------------------------
1 | from tornado import concurrent
2 |
3 | from h2.connection import H2Connection
4 | from h2.exceptions import NoSuchStreamError
5 | from h2.events import RequestReceived, WindowUpdated
6 |
7 | from microproxy.log import ProxyLogger
8 | logger = ProxyLogger.get_logger(__name__)
9 |
10 |
11 | class Http2Layer(object): # pragma: no cover
12 | def __init__(self, context):
13 | super(Http2Layer, self).__init__()
14 | self.context = context
15 | self.src_conn = Connection(
16 | self.context.src_stream, self.context.dest_stream, False,
17 | self, client_side=False)
18 | self.dest_conn = Connection(
19 | self.context.dest_stream, self.context.src_stream, True,
20 | self, client_side=True)
21 | self._future = concurrent.Future()
22 |
23 | def process_and_return_context(self):
24 | self.context.src_stream.read_until_close(
25 | streaming_callback=self.src_conn.on_data_received)
26 | self.context.src_stream.set_close_callback(self.on_src_close)
27 | self.context.dest_stream.read_until_close(
28 | streaming_callback=self.dest_conn.on_data_received)
29 | self.context.dest_stream.set_close_callback(self.on_dest_close)
30 | return self._future
31 |
32 | def on_src_close(self):
33 | self.context.dest_stream.close()
34 | if self._future.running():
35 | self._future.set_result(self.context)
36 |
37 | def on_dest_close(self):
38 | self.context.src_stream.close()
39 | if self._future.running():
40 | self._future.set_result(self.context)
41 |
42 | def on_request_header(self, stream_id, headers):
43 | self.dest_conn.send_headers(stream_id, headers)
44 |
45 | def on_window_update(self, src_conn, event):
46 | if src_conn is self.src_conn:
47 | self.dest_conn.increment_flow_control_window(
48 | event.delta, event.stream_id or None)
49 | else:
50 | self.src_conn.increment_flow_control_window(
51 | event.delta, event.stream_id or None)
52 |
53 |
54 | class Connection(H2Connection): # pragma: no cover
55 | def __init__(self, from_stream, to_stream, is_server, layer, *args, **kwargs):
56 | super(Connection, self).__init__(*args, **kwargs)
57 | self.from_stream = from_stream
58 | self.to_stream = to_stream
59 | self._type = "dest" if is_server else "src"
60 | self.layer = layer
61 |
62 | def on_data_received(self, data):
63 | try:
64 | self.to_stream.write(data)
65 | events = self.receive_data(data)
66 | for event in events:
67 | logger.debug("event received from {0}: {1}".format(
68 | self._type, event))
69 | if isinstance(event, RequestReceived):
70 | self.layer.on_request_header(event.stream_id, event.headers)
71 | elif isinstance(event, WindowUpdated):
72 | self.layer.on_window_update(self, event)
73 | except NoSuchStreamError as e:
74 | logger.error("NoSuchStreamError with stream_id: {0} on {1}".format(e.stream_id, self._type))
75 | logger.exception(e)
76 | except Exception as e:
77 | logger.error("Exception on {0}".format(self._type))
78 | logger.exception(e)
79 |
--------------------------------------------------------------------------------
/microproxy/layer/manager.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from tornado import gen
4 | from tornado import iostream
5 |
6 | from microproxy.exception import (
7 | DestStreamClosedError, SrcStreamClosedError, DestNotConnectedError,
8 | TlsError)
9 | from microproxy.layer import (
10 | SocksLayer, TransparentLayer, ReplayLayer, HttpProxyLayer,
11 | ForwardLayer, TlsLayer, Http1Layer, Http2Layer
12 | )
13 |
14 | from microproxy.log import ProxyLogger
15 | logger = ProxyLogger.get_logger(__name__)
16 |
17 |
18 | def get_first_layer(context):
19 | mode = context.mode
20 | if mode == "socks":
21 | return SocksLayer(context)
22 | elif mode == "transparent":
23 | return TransparentLayer(context)
24 | elif mode == "replay":
25 | return ReplayLayer(context)
26 | elif mode == "http":
27 | return HttpProxyLayer(context)
28 | else:
29 | raise ValueError("Unsupport proxy mode: {0}".format(mode))
30 |
31 |
32 | @gen.coroutine
33 | def run_layers(server_state, initial_layer, initial_layer_context): # pragma: no cover
34 | current_context = initial_layer_context
35 | current_layer = initial_layer
36 |
37 | while current_layer:
38 | try:
39 | logger.debug("Enter {0} Layer".format(current_layer))
40 | current_context = yield current_layer.process_and_return_context()
41 | logger.debug("Leave {0} Layer".format(current_layer))
42 | current_layer = _next_layer(server_state, current_layer, current_context)
43 | except Exception as error:
44 | _handle_layer_error(error, current_layer)
45 | break
46 |
47 | raise gen.Return(None)
48 |
49 |
50 | def _handle_layer_error(error, layer):
51 | if isinstance(error, gen.TimeoutError):
52 | logger.warn("{0} timeout".format(layer))
53 | layer.src_stream.close()
54 | elif isinstance(error, DestNotConnectedError):
55 | logger.warn("{0} destination not connected".format(layer))
56 | elif isinstance(error, DestStreamClosedError):
57 | logger.error("{0} failed with {1}".format(layer, error))
58 | layer.src_stream.close()
59 | elif isinstance(error, SrcStreamClosedError):
60 | logger.error("{0} failed with {1}".format(layer, error))
61 | elif isinstance(error, iostream.StreamClosedError):
62 | # NOTE: unhandled StreamClosedError, print stack to find out where
63 | logger.exception("{0} failed with {1}".format(layer, error))
64 | layer.src_stream.close()
65 | elif isinstance(error, TlsError):
66 | logger.error(error)
67 | layer.src_stream.close()
68 | else:
69 | logger.exception("{0} unhandled exception {1}".format(layer, error))
70 | layer.src_stream.close()
71 |
72 |
73 | def _next_layer(server_state, current_layer, context):
74 | config = server_state.config
75 | http_ports = [80] + config["http_port"]
76 | https_ports = [443] + config["https_port"]
77 |
78 | if isinstance(current_layer, HttpProxyLayer):
79 | return Http1Layer(server_state, context)
80 |
81 | if isinstance(current_layer, (SocksLayer, TransparentLayer)):
82 | if context.port in http_ports:
83 | context.scheme = "http"
84 | return Http1Layer(server_state, context)
85 | elif context.port in https_ports:
86 | return TlsLayer(server_state, context)
87 | else:
88 | return ForwardLayer(server_state, context)
89 |
90 | if isinstance(current_layer, TlsLayer):
91 | if context.scheme == "https":
92 | return Http1Layer(server_state, context)
93 | elif context.scheme == "h2":
94 | return Http2Layer(server_state, context)
95 | else:
96 | return ForwardLayer(server_state, context)
97 |
98 | if isinstance(current_layer, ReplayLayer):
99 | if context.scheme in ("http", "https"):
100 | return Http1Layer(server_state, context)
101 | elif context.scheme == "h2":
102 | return Http2Layer(server_state, context)
103 | else:
104 | return ForwardLayer(server_state, context)
105 |
106 | if isinstance(current_layer, Http1Layer):
107 | if context.scheme == "websocket":
108 | return ForwardLayer(server_state, context)
109 | elif context.scheme == "https" and not context.done:
110 | return TlsLayer(server_state, context)
111 | elif context.scheme == "http" and not context.done:
112 | return Http1Layer(server_state, context)
113 |
--------------------------------------------------------------------------------
/microproxy/layer/proxy/__init__.py:
--------------------------------------------------------------------------------
1 | from http import HttpProxyLayer
2 | from replay import ReplayLayer
3 | from transparent import TransparentLayer
4 | from socks import SocksLayer
5 |
--------------------------------------------------------------------------------
/microproxy/layer/proxy/http.py:
--------------------------------------------------------------------------------
1 | from tornado import gen
2 |
3 | from microproxy.layer.base import ProxyLayer
4 |
5 |
6 | class HttpProxyLayer(ProxyLayer): # pragma: no cover
7 | def __init__(self, context, dest_addr_resolver=None, **kwargs):
8 | super(HttpProxyLayer, self).__init__(context, **kwargs)
9 |
10 | @gen.coroutine
11 | def process_and_return_context(self):
12 | raise gen.Return(self.context)
13 |
--------------------------------------------------------------------------------
/microproxy/layer/proxy/replay.py:
--------------------------------------------------------------------------------
1 | from tornado import gen
2 |
3 | from microproxy.protocol import tls
4 | from microproxy.layer.base import ProxyLayer
5 |
6 |
7 | class ReplayLayer(ProxyLayer):
8 | def __init__(self, context, **kwargs):
9 | super(ReplayLayer, self).__init__(context, **kwargs)
10 |
11 | @gen.coroutine
12 | def process_and_return_context(self):
13 | dest_stream = yield self.create_dest_stream(
14 | (self.context.host, self.context.port))
15 |
16 | if self.context.scheme in ("https", "h2"):
17 | if self.context.scheme == "h2":
18 | alpn = ["h2"]
19 | else:
20 | alpn = None
21 |
22 | dest_stream = yield dest_stream.start_tls(
23 | server_side=False, ssl_options=tls.create_dest_sslcontext(alpn=alpn))
24 |
25 | self.context.dest_stream = dest_stream
26 | raise gen.Return(self.context)
27 |
--------------------------------------------------------------------------------
/microproxy/layer/proxy/socks.py:
--------------------------------------------------------------------------------
1 | import errno
2 |
3 | from tornado import gen
4 | from tornado import iostream
5 | from socks5 import GreetingResponse, Response
6 | from socks5 import RESP_STATUS, AUTH_TYPE, REQ_COMMAND, ADDR_TYPE
7 | from socks5 import Connection
8 |
9 | from microproxy.layer.base import ProxyLayer
10 | from microproxy.exception import SrcStreamClosedError, DestNotConnectedError, ProtocolError
11 |
12 | from microproxy.log import ProxyLogger
13 | logger = ProxyLogger.get_logger(__name__)
14 |
15 |
16 | class SocksLayer(ProxyLayer):
17 | def __init__(self, context):
18 | super(SocksLayer, self).__init__(context)
19 | self.socks_conn = Connection(our_role="server")
20 |
21 | @gen.coroutine
22 | def process_and_return_context(self):
23 | self.socks_conn.initiate_connection()
24 | while True:
25 | try:
26 | data = yield self.context.src_stream.read_bytes(1024, partial=True)
27 | except iostream.StreamClosedError:
28 | raise SrcStreamClosedError(
29 | detail="client closed while socks handshaking")
30 |
31 | _event = self.socks_conn.recv(data)
32 | if _event == "GreetingRequest":
33 | yield self.handle_greeting_request(_event)
34 | elif _event == "Request":
35 | dest_stream, host, port = yield self.handle_request_and_create_destination(_event)
36 | self.context.dest_stream = dest_stream
37 | self.context.host = host
38 | self.context.port = port
39 | break
40 | else:
41 | raise NotImplementedError("not handling with {0}".format(_event))
42 |
43 | raise gen.Return(self.context)
44 |
45 | @gen.coroutine
46 | def handle_greeting_request(self, event):
47 | if not AUTH_TYPE["NO_AUTH"] in event.methods:
48 | yield self.send_event_to_src_conn(
49 | GreetingResponse(AUTH_TYPE["NO_SUPPORT_AUTH_METHOD"]))
50 | else:
51 | yield self.send_event_to_src_conn(
52 | GreetingResponse(AUTH_TYPE["NO_AUTH"]))
53 |
54 | @gen.coroutine
55 | def handle_request_and_create_destination(self, event):
56 | """Handle the socks request from source
57 | Create destination connection
58 |
59 | Returns:
60 | tuple: (dest_stream, host, port)
61 | """
62 | if event.cmd != REQ_COMMAND["CONNECT"]:
63 | logger.debug("Unsupport connect type")
64 | yield self.send_event_to_src_conn(Response(
65 | RESP_STATUS["COMMAND_NOT_SUPPORTED"],
66 | event.atyp, event.addr, event.port), raise_exception=False)
67 | raise ProtocolError("Unsupport bind type")
68 |
69 | try:
70 | dest_stream = yield self.create_dest_stream((str(event.addr), event.port))
71 | except gen.TimeoutError as e:
72 | yield self.handle_timeout_error(e, event)
73 | except iostream.StreamClosedError as e:
74 | yield self.handle_stream_closed_error(e, event)
75 | else:
76 | yield self.send_event_to_src_conn(Response(
77 | RESP_STATUS["SUCCESS"],
78 | event.atyp, event.addr, event.port))
79 | raise gen.Return((dest_stream, event.addr, event.port))
80 |
81 | @gen.coroutine
82 | def send_event_to_src_conn(self, event, raise_exception=True):
83 | try:
84 | data = self.socks_conn.send(event)
85 | yield self.context.src_stream.write(data)
86 | except iostream.StreamClosedError as e: # pragma: no cover
87 | if raise_exception:
88 | raise SrcStreamClosedError(detail="failed on {0}".format(type(event).__name__))
89 | logger.error("stream closed on {0}".format(type(event)))
90 | except Exception as e: # pragma: no cover
91 | if raise_exception:
92 | raise
93 | logger.exception(e)
94 |
95 | @gen.coroutine
96 | def handle_timeout_error(self, error, event):
97 | logger.debug("connection timout {0}:{1}".format(
98 | event.addr, event.port))
99 | yield self.send_event_to_src_conn(Response(
100 | RESP_STATUS["NETWORK_UNREACHABLE"],
101 | event.atyp, event.addr, event.port), raise_exception=False)
102 | raise DestNotConnectedError((event.addr, event.port))
103 |
104 | @gen.coroutine
105 | def handle_stream_closed_error(self, error, event):
106 | if error.real_error:
107 | err_num = abs(error.real_error[0])
108 | try:
109 | errorcode = errno.errorcode[err_num]
110 | except KeyError:
111 | errorcode = "undefined(code={0})".format(err_num)
112 |
113 | logger.debug("connect to {0}:{1} with error code {2}".format(
114 | event.addr, event.port, errorcode))
115 | # NOTE: if we submit an incorrect address type,
116 | # the error code will be:
117 | # - ENOEXEC in macos.
118 | # - EBADF in linux.
119 | if err_num in (errno.ENOEXEC, errno.EBADF):
120 | reason = "ADDRESS_TYPE_NOT_SUPPORTED"
121 | elif err_num == errno.ETIMEDOUT:
122 | reason = "NETWORK_UNREACHABLE"
123 | else:
124 | logger.error("unhandled error code {0} received".format(errorcode))
125 | reason = "GENRAL_FAILURE"
126 |
127 | yield self.send_event_to_src_conn(Response(
128 | RESP_STATUS[reason],
129 | event.atyp, event.addr, event.port), raise_exception=False)
130 | raise DestNotConnectedError((event.addr, event.port))
131 | else: # pragma: no cover
132 | # TODO: StreamClosedError without real_error?
133 | # need check that tornado would occur this situation?
134 | raise
135 |
--------------------------------------------------------------------------------
/microproxy/layer/proxy/transparent.py:
--------------------------------------------------------------------------------
1 | import platform
2 | import socket
3 | import struct
4 |
5 | from tornado import gen
6 |
7 | from microproxy.layer.base import ProxyLayer
8 |
9 |
10 | class TransparentLayer(ProxyLayer):
11 | SO_ORIGINAL_DST = 80
12 |
13 | def __init__(self, context, dest_addr_resolver=None, **kwargs):
14 | super(TransparentLayer, self).__init__(context, **kwargs)
15 | self.dest_addr_resolver = dest_addr_resolver or self._get_dest_addr_resolver()
16 |
17 | def _get_dest_addr_resolver(self): # pragma: no cover
18 | if platform.system() == "Linux":
19 | return self._linux_get_dest_addr
20 | else:
21 | raise NotImplementedError
22 |
23 | def _linux_get_dest_addr(self): # pragma: no cover
24 | src_stream = self.context.src_stream
25 | sock_opt = src_stream.socket.getsockopt(socket.SOL_IP,
26 | self.SO_ORIGINAL_DST,
27 | 16)
28 | _, port, a1, a2, a3, a4 = struct.unpack("!HHBBBBxxxxxxxx", sock_opt)
29 | address = "%d.%d.%d.%d" % (a1, a2, a3, a4)
30 | return (address, port)
31 |
32 | @gen.coroutine
33 | def process_and_return_context(self):
34 | host, port = self.dest_addr_resolver()
35 | dest_stream = yield self.create_dest_stream((host, port))
36 | self.context.dest_stream = dest_stream
37 | self.context.host = host
38 | self.context.port = port
39 |
40 | raise gen.Return(self.context)
41 |
--------------------------------------------------------------------------------
/microproxy/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import logging.config
3 | from zmq.log.handlers import PUBHandler
4 |
5 |
6 | class ProxyLogger(object):
7 | formatter = logging.Formatter("%(asctime)s - %(name)-30s - %(levelname)-8s - %(message)s")
8 |
9 | @classmethod
10 | def init_proxy_logger(cls, config):
11 | if config["logger_config"]:
12 | # NOTE: If user specify the logging config file,
13 | # used it to configure the logger behavior.
14 | # Moreover, the disable_existing_loggers is necessary,
15 | # since most of our code will get logger before we initialize it.
16 | logging.config.fileConfig(config["logger_config"], disable_existing_loggers=False)
17 | else:
18 | # NOTE: Otherwise, we start setup the logger based on other configure value.
19 | logger = logging.getLogger()
20 | log_level = getattr(logging, config["log_level"].upper())
21 | logger.setLevel(log_level)
22 |
23 | if config["log_file"]:
24 | cls.register_file_handler(config["log_file"])
25 | else:
26 | cls.register_stream_handler()
27 |
28 | @classmethod
29 | def register_zmq_handler(cls, zmq_socket): # pragma: no cover
30 | handler = PUBHandler(zmq_socket)
31 | handler.root_topic = "logger"
32 |
33 | logger = logging.getLogger()
34 | logger.addHandler(handler)
35 |
36 | @classmethod
37 | def register_file_handler(cls, filename): # pragma: no cover
38 | fileHandler = logging.FileHandler(filename, encoding="utf8")
39 | fileHandler.setFormatter(cls.formatter)
40 |
41 | logger = logging.getLogger()
42 | logger.addHandler(fileHandler)
43 |
44 | @classmethod
45 | def register_stream_handler(cls): # pragma: no cover
46 | basicHandler = logging.StreamHandler()
47 | basicHandler.setFormatter(cls.formatter)
48 |
49 | logger = logging.getLogger()
50 | logger.addHandler(basicHandler)
51 |
52 | @classmethod
53 | def get_logger(cls, name): # pragma: no cover
54 | logger = logging.getLogger(name)
55 | return logger
56 |
--------------------------------------------------------------------------------
/microproxy/protocol/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/protocol/__init__.py
--------------------------------------------------------------------------------
/microproxy/protocol/http1.py:
--------------------------------------------------------------------------------
1 | import h11
2 | from h11 import Connection as H11Connection
3 | from h11 import (
4 | Request, InformationalResponse, Response, Data, EndOfMessage,
5 | ConnectionClosed)
6 |
7 | from tornado import gen
8 |
9 | from microproxy.context import HttpRequest, HttpResponse
10 | from microproxy.exception import ProtocolError
11 | from microproxy.log import ProxyLogger
12 | logger = ProxyLogger.get_logger(__name__)
13 |
14 |
15 | class Connection(H11Connection):
16 | def __init__(self, our_role, io_stream, conn_type=None,
17 | readonly=False, on_request=None, on_response=None,
18 | on_info_response=None, on_unhandled=None, **kwargs):
19 | super(Connection, self).__init__(our_role, **kwargs)
20 | on_unhandled = on_unhandled or self._default_on_unhandled
21 |
22 | self.io_stream = io_stream
23 | self.conn_type = conn_type or str(our_role)
24 | self.readonly = readonly
25 | self.on_request = on_request or on_unhandled
26 | self.on_response = on_response or on_unhandled
27 | self.on_info_response = on_info_response or on_unhandled
28 | self._req = None
29 | self._resp = None
30 | self._body_chunks = []
31 | self.unhandled_events = None
32 |
33 | def send(self, event):
34 | logger.debug("event send to {0}: {1}".format(self.conn_type, type(event)))
35 | data = super(Connection, self).send(event)
36 | if not self.readonly:
37 | self.io_stream.write(data)
38 |
39 | @gen.coroutine
40 | def read_bytes(self):
41 | data = yield self.io_stream.read_bytes(
42 | self.io_stream.max_buffer_size, partial=True)
43 | self.receive(data)
44 |
45 | def receive(self, data, raise_exception=False):
46 | try:
47 | logger.debug("data received from {0} with length {1}".format(self.conn_type, len(data)))
48 | self.receive_data(data)
49 | while True:
50 | event = self.next_event()
51 | self._log_event(event)
52 | if isinstance(event, Request):
53 | if self._req: # pragma: no cover
54 | # NOTE: guess that never happen because h11 should help us handle http state
55 | raise ProtocolError("http1 connection had received request")
56 | self._req = event
57 | elif isinstance(event, InformationalResponse):
58 | self.on_info_response(HttpResponse(
59 | version=self._parse_version(event),
60 | reason=event.reason,
61 | code=str(event.status_code),
62 | headers=event.headers))
63 | elif isinstance(event, Response):
64 | self._resp = event
65 | if self.our_state is h11.SWITCHED_PROTOCOL:
66 | self.on_response(HttpResponse(
67 | version=self._parse_version(self._resp),
68 | reason=self._resp.reason,
69 | code=str(self._resp.status_code),
70 | headers=self._resp.headers,
71 | body=b"".join(self._body_chunks)))
72 | self._cleanup_after_received()
73 | elif isinstance(event, Data):
74 | self._body_chunks.append(bytes(event.data))
75 | elif isinstance(event, EndOfMessage):
76 | if self.our_role is h11.SERVER:
77 | if not self._req: # pragma: no cover
78 | # NOTE: guess that never happen because h11 should help us handle http state
79 | raise ProtocolError("EndOfMessage received, but not request found")
80 | self.on_request(HttpRequest(
81 | version=self._parse_version(self._req),
82 | method=self._req.method,
83 | path=self._req.target,
84 | headers=self._req.headers,
85 | body=b"".join(self._body_chunks)))
86 | else:
87 | if not self._resp: # pragma: no cover
88 | # NOTE: guess that never happen because h11 should help us handle http state
89 | raise ProtocolError("EndOfMessage received, but not response found")
90 | self.on_response(HttpResponse(
91 | version=self._parse_version(self._resp),
92 | reason=self._resp.reason,
93 | code=str(self._resp.status_code),
94 | headers=self._resp.headers,
95 | body=b"".join(self._body_chunks)))
96 | self._cleanup_after_received()
97 | break
98 | elif isinstance(event, ConnectionClosed): # pragma: no cover
99 | raise ProtocolError("Should closed the connection")
100 | elif event is h11.NEED_DATA:
101 | break
102 | elif event is h11.PAUSED: # pragma: no cover
103 | break
104 | else: # pragma: no cover
105 | logger.warning("event recevied was not handled from {0}: {1}".format(self.conn_type, repr(event)))
106 | except Exception as e: # pragma: no cover
107 | if raise_exception:
108 | raise
109 | logger.error("Exception on {0}".format(self.conn_type))
110 | logger.exception(e)
111 |
112 | def _log_event(self, event):
113 | if isinstance(event, Data): # Note: Data event that would print to mush info
114 | logger.debug("event recevied from {0}: {1}".format(self.conn_type, type(event)))
115 | else:
116 | logger.debug("event recevied from {0}: {1}".format(self.conn_type, repr(event)))
117 |
118 | def _parse_version(self, http_content):
119 | try:
120 | return "HTTP/{0}".format(http_content.http_version)
121 | except:
122 | return "HTTP/1.1"
123 |
124 | def _cleanup_after_received(self):
125 | self._req = None
126 | self._resp = None
127 | self._body_chunks = []
128 | if self.our_state is h11.MUST_CLOSE:
129 | self.io_stream.close()
130 |
131 | def send_request(self, request):
132 | logger.debug("sent request to {0}: {1}".format(self.conn_type, request))
133 | self.send(h11.Request(
134 | method=request.method,
135 | target=request.path,
136 | headers=request.headers,
137 | ))
138 | if request.body:
139 | self.send(h11.Data(data=request.body))
140 | self.send(h11.EndOfMessage())
141 |
142 | def send_response(self, response):
143 | logger.debug("sent response to {0}: {1}".format(self.conn_type, response))
144 | self.send(h11.Response(
145 | status_code=int(response.code),
146 | reason=response.reason,
147 | headers=response.headers,
148 | ))
149 | if response.body:
150 | self.send(h11.Data(data=response.body))
151 |
152 | if not self.our_state == h11.SWITCHED_PROTOCOL:
153 | self.send(h11.EndOfMessage())
154 |
155 | if self.our_state is h11.MUST_CLOSE:
156 | self.io_stream.close()
157 |
158 | def send_info_response(self, response):
159 | logger.debug("sent info response to {0}: {1}".format(self.conn_type, response))
160 | self.send(h11.InformationalResponse(
161 | status_code=int(response.code),
162 | headers=response.headers,
163 | reason=response.reason,
164 | ))
165 |
166 | def _default_on_unhandled(self, *args): # pragma: no cover
167 | logger.warn("unhandled event: {0}".format(args))
168 | self.unhandled_events.append(args)
169 |
170 | def closed(self):
171 | return self.our_state is h11.MUST_CLOSE or self.io_stream.closed()
172 |
--------------------------------------------------------------------------------
/microproxy/protocol/tls.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from OpenSSL import SSL, crypto
4 | import certifi
5 | import construct
6 |
7 | from tornado import gen
8 | from microproxy.pyca_tls import _constructs
9 | from microproxy.utils import HAS_ALPN
10 | from microproxy.exception import ProtocolError
11 |
12 | from microproxy.log import ProxyLogger
13 | logger = ProxyLogger.get_logger(__name__)
14 |
15 | _SUPPROT_CIPHERS_SUITES = (
16 | "ECDHE-RSA-AES128-GCM-SHA256",
17 | "ECDHE-ECDSA-AES128-GCM-SHA256",
18 | "ECDHE-RSA-AES256-GCM-SHA384",
19 | "ECDHE-ECDSA-AES256-GCM-SHA384",
20 | "DHE-RSA-AES128-GCM-SHA256",
21 | "DHE-DSS-AES128-GCM-SHA256",
22 | "kEDH+AESGCM",
23 | "ECDHE-RSA-AES128-SHA256",
24 | "ECDHE-ECDSA-AES128-SHA256",
25 | "ECDHE-RSA-AES128-SHA",
26 | "ECDHE-ECDSA-AES128-SHA",
27 | "ECDHE-RSA-AES256-SHA384",
28 | "ECDHE-ECDSA-AES256-SHA384",
29 | "ECDHE-RSA-AES256-SHA",
30 | "ECDHE-ECDSA-AES256-SHA",
31 | "DHE-RSA-AES128-SHA256",
32 | "DHE-RSA-AES128-SHA",
33 | "DHE-DSS-AES128-SHA256",
34 | "DHE-RSA-AES256-SHA256",
35 | "DHE-DSS-AES256-SHA",
36 | "DHE-RSA-AES256-SHA",
37 | "ECDHE-RSA-DES-CBC3-SHA",
38 | "ECDHE-ECDSA-DES-CBC3-SHA",
39 | "AES128-GCM-SHA256",
40 | "AES256-GCM-SHA384",
41 | "AES128-SHA256",
42 | "AES256-SHA256",
43 | "AES128-SHA",
44 | "AES256-SHA",
45 | "AES",
46 | "DES-CBC3-SHA",
47 | "HIGH",
48 | "!aNULL",
49 | "!eNULL",
50 | "!EXPORT",
51 | "!DES",
52 | "!RC4",
53 | "!MD5",
54 | "!PSK",
55 | "!aECDH",
56 | "!EDH-DSS-DES-CBC3-SHA",
57 | "!EDH-RSA-DES-CBC3-SHA",
58 | "!KRB5-DES-CBC3-SHA"
59 | )
60 |
61 |
62 | def create_basic_sslcontext():
63 | ssl_ctx = SSL.Context(SSL.SSLv23_METHOD)
64 | ssl_ctx.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_CIPHER_SERVER_PREFERENCE)
65 |
66 | ssl_ctx.set_cipher_list(":".join(_SUPPROT_CIPHERS_SUITES))
67 |
68 | # NOTE: cipher suite related to ECDHE will need this
69 | ssl_ctx.set_tmp_ecdh(crypto.get_elliptic_curve('prime256v1'))
70 | return ssl_ctx
71 |
72 |
73 | def certificate_verify_cb(conn, x509, err_num, err_depth, verify_status):
74 | return verify_status
75 |
76 |
77 | def create_dest_sslcontext(insecure=False, trusted_ca_certs="", alpn=None):
78 | ssl_ctx = create_basic_sslcontext()
79 |
80 | if not insecure:
81 | trusted_ca_certs = trusted_ca_certs or certifi.where()
82 | ssl_ctx.load_verify_locations(trusted_ca_certs)
83 | ssl_ctx.set_verify(
84 | SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
85 | certificate_verify_cb)
86 | else:
87 | ssl_ctx.set_verify(SSL.VERIFY_NONE, certificate_verify_cb)
88 |
89 | if alpn and HAS_ALPN:
90 | ssl_ctx.set_alpn_protos(alpn)
91 |
92 | return ssl_ctx
93 |
94 |
95 | def create_src_sslcontext(cert, priv_key, alpn_callback=None,
96 | info_callback=None):
97 | ssl_ctx = create_basic_sslcontext()
98 | ssl_ctx.use_certificate(cert)
99 | ssl_ctx.use_privatekey(priv_key)
100 |
101 | if alpn_callback and HAS_ALPN:
102 | ssl_ctx.set_alpn_select_callback(alpn_callback)
103 | if info_callback:
104 | ssl_ctx.set_info_callback(info_callback)
105 |
106 | return ssl_ctx
107 |
108 |
109 | class TlsClientHello(object):
110 | SUPPORT_PROTOCOLS = ["http/1.1", "h2"]
111 |
112 | def __init__(self, raw_client_hello):
113 | try:
114 | self._client_hello = _constructs.ClientHello.parse(raw_client_hello)
115 | except construct.ConstructError as e:
116 | raise ProtocolError(
117 | 'Cannot parse Client Hello: {0}, Raw Client Hello: {1}'.format(
118 | repr(e), raw_client_hello.encode("hex"))
119 | )
120 |
121 | def raw(self): # pragma: no cover
122 | return self._client_hello
123 |
124 | @property
125 | def cipher_suites(self): # pragma: no cover
126 | return self._client_hello.cipher_suites.cipher_suites
127 |
128 | @property
129 | def sni(self):
130 | # TODO: hostname validation is required.
131 | for extension in self._client_hello.extensions.extensions:
132 | is_valid_sni_extension = (
133 | extension.type == 0x00 and
134 | len(extension.server_names) == 1 and
135 | extension.server_names[0].name_type == 0)
136 |
137 | if is_valid_sni_extension:
138 | return extension.server_names[0].host_name.decode("idna")
139 |
140 | @property
141 | def alpn_protocols(self):
142 | for extension in self._client_hello.extensions.extensions:
143 | if extension.type == 0x10:
144 | return [
145 | bytes(protocol)
146 | for protocol in list(extension.alpn_protocols)
147 | if protocol in self.SUPPORT_PROTOCOLS
148 | ]
149 |
150 | def __repr__(self):
151 | return "TlsClientHello( sni: %s alpn_protocols: %s, cipher_suites: %s)" % \
152 | (self.sni, self.alpn_protocols, self.cipher_suites)
153 |
154 |
155 | class ServerConnection(object):
156 | def __init__(self, stream):
157 | self.stream = stream
158 | self.alpn_resolver = None
159 | self.on_alpn = None
160 |
161 | def start_tls(self, cert, priv_key, select_alpn=None):
162 | self.select_alpn = select_alpn
163 | ssl_ctx = create_src_sslcontext(
164 | cert, priv_key, alpn_callback=self.alpn_callback)
165 | return self.stream.start_tls(server_side=True, ssl_options=ssl_ctx)
166 |
167 | def alpn_callback(self, conn, alpns):
168 | return self.select_alpn
169 |
170 |
171 | class ClientConnection(object):
172 | def __init__(self, stream):
173 | self.stream = stream
174 |
175 | @gen.coroutine
176 | def start_tls(self, insecure=False, trusted_ca_certs="",
177 | hostname=None, alpns=None):
178 | ssl_ctx = create_dest_sslcontext(insecure, trusted_ca_certs, alpns)
179 | stream = yield self.stream.start_tls(
180 | server_side=False, ssl_options=ssl_ctx, server_hostname=hostname)
181 |
182 | raise gen.Return(stream)
183 |
--------------------------------------------------------------------------------
/microproxy/proxy.py:
--------------------------------------------------------------------------------
1 | from tornado import gen
2 |
3 | from microproxy.tornado_ext.tcpserver import TCPServer
4 | from microproxy.layer import manager as layer_manager
5 | from microproxy.context import LayerContext
6 | from microproxy.utils import curr_loop
7 |
8 | from microproxy.log import ProxyLogger
9 | logger = ProxyLogger.get_logger(__name__)
10 |
11 |
12 | class ProxyServer(TCPServer):
13 | def __init__(self, server_state, **kwargs):
14 | super(ProxyServer, self).__init__(**kwargs)
15 | self.server_state = server_state
16 | self.config = server_state.config
17 |
18 | @gen.coroutine
19 | def handle_stream(self, stream):
20 | src_info = "{0}:{1}".format(*stream.fileno().getpeername())
21 | try:
22 | initial_context = LayerContext(
23 | mode=self.config["mode"], src_stream=stream, src_info=src_info)
24 |
25 | logger.debug("Start new layer manager")
26 | initial_layer = layer_manager.get_first_layer(initial_context)
27 | yield layer_manager.run_layers(
28 | self.server_state, initial_layer, initial_context)
29 | except Exception as e:
30 | # NOTE: not handle exception, log it and close the stream
31 | logger.exception("Unhandled exception occured at {0} with {1}".format(
32 | src_info, e))
33 | stream.close()
34 |
35 | def start_listener(self):
36 | self.listen(self.config["port"], self.config["host"])
37 | logger.info(
38 | "proxy server is listening at {0}:{1}".format(self.config["host"],
39 | self.config["port"]))
40 |
41 |
42 | def start_tcp_server(server_state):
43 | io_loop = curr_loop()
44 | server = ProxyServer(server_state, io_loop=io_loop)
45 | server.start_listener()
46 |
--------------------------------------------------------------------------------
/microproxy/pyca_tls/__init__.py:
--------------------------------------------------------------------------------
1 | # This file is dual licensed under the terms of the Apache License, Version
2 | # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 | # for complete details.
4 |
5 | from __future__ import absolute_import, division, print_function
6 |
--------------------------------------------------------------------------------
/microproxy/pyca_tls/_constructs.py:
--------------------------------------------------------------------------------
1 | # This file is dual licensed under the terms of the Apache License, Version
2 | # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 | # for complete details.
4 |
5 |
6 | from construct import (
7 | Array,
8 | Bytes,
9 | Struct,
10 | VarInt,
11 | Int8ub,
12 | Int16ub,
13 | Int24ub,
14 | Int32ub,
15 | PascalString,
16 | Embedded,
17 | Prefixed,
18 | Range,
19 | GreedyRange,
20 | Switch,
21 | Optional,
22 | )
23 |
24 | ProtocolVersion = "version" / Struct(
25 | "major" / Int8ub,
26 | "minor" / Int8ub,
27 | )
28 |
29 | TLSPlaintext = "TLSPlaintext" / Struct(
30 | "type" / Int8ub,
31 | ProtocolVersion,
32 | "length" / Int16ub, # TODO: Reject packets with length > 2 ** 14
33 | "fragment" / Bytes(lambda ctx: ctx.length),
34 | )
35 |
36 | TLSCompressed = "TLSCompressed" / Struct(
37 | "type" / Int8ub,
38 | ProtocolVersion,
39 | "length" / Int16ub, # TODO: Reject packets with length > 2 ** 14 + 1024
40 | "fragment" / Bytes(lambda ctx: ctx.length),
41 | )
42 |
43 | TLSCiphertext = "TLSCiphertext" / Struct(
44 | "type" / Int8ub,
45 | ProtocolVersion,
46 | "length" / Int16ub, # TODO: Reject packets with length > 2 ** 14 + 2048
47 | "fragment" / Bytes(lambda ctx: ctx.length),
48 | )
49 |
50 | Random = "random" / Struct(
51 | "gmt_unix_time" / Int32ub,
52 | "random_bytes" / Bytes(28),
53 | )
54 |
55 | SessionID = "session_id" / Struct(
56 | "length" / Int8ub,
57 | "session_id" / Bytes(lambda ctx: ctx.length),
58 | )
59 |
60 | CipherSuites = "cipher_suites" / Struct(
61 | "length" / Int16ub, # TODO: Reject packets of length 0
62 | Array(lambda ctx: ctx.length // 2, "cipher_suites" / Int16ub),
63 | )
64 |
65 | CompressionMethods = "compression_methods" / Struct(
66 | "length" / Int8ub, # TODO: Reject packets of length 0
67 | Array(lambda ctx: ctx.length, "compression_methods" / Int8ub),
68 | )
69 |
70 | ServerName = Struct(
71 | "type" / Int8ub,
72 | "name" / PascalString("length" / Int16ub),
73 | )
74 |
75 | SNIExtension = Prefixed(
76 | Int16ub,
77 | Struct(
78 | Int16ub,
79 | "server_names" / GreedyRange(
80 | "server_name" / Struct(
81 | "name_type" / Int8ub,
82 | "host_name" / PascalString("length" / Int16ub),
83 | )
84 | )
85 | )
86 | )
87 |
88 | ALPNExtension = Prefixed(
89 | Int16ub,
90 | Struct(
91 | Int16ub,
92 | "alpn_protocols" / GreedyRange(
93 | "name" / PascalString(Int8ub),
94 | ),
95 | )
96 | )
97 |
98 | UnknownExtension = Struct(
99 | "bytes" / PascalString("length" / Int16ub)
100 | )
101 |
102 | Extension = "Extension" / Struct(
103 | "type" / Int16ub,
104 | Embedded(
105 | Switch(
106 | lambda ctx: ctx.type,
107 | {
108 | 0x00: SNIExtension,
109 | 0x10: ALPNExtension,
110 | },
111 | default=UnknownExtension
112 | )
113 | )
114 | )
115 |
116 | extensions = "extensions" / Struct(
117 | Int16ub,
118 | "extensions" / GreedyRange(Extension)
119 | )
120 |
121 | ClientHello = "ClientHello" / Struct(
122 | ProtocolVersion,
123 | Random,
124 | SessionID,
125 | CipherSuites,
126 | CompressionMethods,
127 | extensions,
128 | )
129 |
130 | ServerHello = "ServerHello" / Struct(
131 | ProtocolVersion,
132 | Random,
133 | SessionID,
134 | "cipher_suite" / Bytes(2),
135 | "compression_method" / Int8ub,
136 | extensions,
137 | )
138 |
139 | ClientCertificateType = "certificate_types" / Struct(
140 | "length" / Int8ub, # TODO: Reject packets of length 0
141 | Array(lambda ctx: ctx.length, "certificate_types" / Int8ub),
142 | )
143 |
144 | SignatureAndHashAlgorithm = "algorithms" / Struct(
145 | "hash" / Int8ub,
146 | "signature" / Int8ub,
147 | )
148 |
149 | SupportedSignatureAlgorithms = "supported_signature_algorithms" / Struct(
150 | "supported_signature_algorithms_length" / Int16ub,
151 | # TODO: Reject packets of length 0
152 | Array(
153 | lambda ctx: ctx.supported_signature_algorithms_length / 2,
154 | SignatureAndHashAlgorithm,
155 | ),
156 | )
157 |
158 | DistinguishedName = "certificate_authorities" / Struct(
159 | "length" / Int16ub,
160 | "certificate_authorities" / Bytes(lambda ctx: ctx.length),
161 | )
162 |
163 | CertificateRequest = "CertificateRequest" / Struct(
164 | ClientCertificateType,
165 | SupportedSignatureAlgorithms,
166 | DistinguishedName,
167 | )
168 |
169 | ServerDHParams = "ServerDHParams" / Struct(
170 | "dh_p_length" / Int16ub,
171 | "dh_p" / Bytes(lambda ctx: ctx.dh_p_length),
172 | "dh_g_length" / Int16ub,
173 | "dh_g" / Bytes(lambda ctx: ctx.dh_g_length),
174 | "dh_Ys_length" / Int16ub,
175 | "dh_Ys" / Bytes(lambda ctx: ctx.dh_Ys_length),
176 | )
177 |
178 | PreMasterSecret = "pre_master_secret" / Struct(
179 | ProtocolVersion,
180 | "random_bytes" / Bytes(46),
181 | )
182 |
183 | ASN1Cert = "ASN1Cert" / Struct(
184 | "length" / Int32ub, # TODO: Reject packets with length not in 1..2^24-1
185 | "asn1_cert" / Bytes(lambda ctx: ctx.length),
186 | )
187 |
188 | Certificate = "Certificate" / Struct(
189 | # TODO: Reject packets with length > 2 ** 24 - 1
190 | "certificates_length" / Int32ub,
191 | "certificates_bytes" / Bytes(lambda ctx: ctx.certificates_length),
192 | )
193 |
194 | Handshake = "Handshake" / Struct(
195 | "msg_type" / Int8ub,
196 | "length" / Int24ub,
197 | "body" / Bytes(lambda ctx: ctx.length),
198 | )
199 |
200 | Alert = "Alert" / Struct(
201 | "level" / Int8ub,
202 | "description" / Int8ub,
203 | )
204 |
--------------------------------------------------------------------------------
/microproxy/server_state.py:
--------------------------------------------------------------------------------
1 | """This module contains two helper function to initialize and get the server state."""
2 |
3 | from microproxy.context import ServerContext
4 | from microproxy.interceptor import Interceptor
5 | from microproxy.interceptor import MsgPublisher
6 | from microproxy.interceptor import PluginManager
7 | from microproxy.cert import CertStore
8 |
9 |
10 | def _init_cert_store(config):
11 | return CertStore(config)
12 |
13 |
14 | def _init_interceptor(config, publish_socket):
15 | plugin_manager = PluginManager(config)
16 | msg_publisher = MsgPublisher(config, zmq_socket=publish_socket)
17 | return Interceptor(
18 | plugin_manager=plugin_manager, msg_publisher=msg_publisher)
19 |
20 |
21 | def init_server_state(config, publish_socket):
22 | """Initialize the ServerContext by config.
23 |
24 | Args:
25 | config (dict): The config object pass by user.
26 | publish_socket (object): The zmq pub socket for interceptor
27 |
28 | Returns:
29 | object: ServerContext.
30 | """
31 | cert_store = _init_cert_store(config)
32 | interceptor = _init_interceptor(config, publish_socket)
33 |
34 | return ServerContext(
35 | config=config, interceptor=interceptor, cert_store=cert_store)
36 |
--------------------------------------------------------------------------------
/microproxy/test/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from logging import NullHandler
3 |
4 | logging.getLogger("protocol.http2").addHandler(NullHandler())
5 | logging.getLogger("protocol.http2").propagate = False
6 | logging.getLogger("tornado_ext").addHandler(NullHandler())
7 | logging.getLogger("tornado_ext").propagate = False
8 | logging.getLogger("tornado").addHandler(NullHandler())
9 | logging.getLogger("tornado").propagate = False
10 |
--------------------------------------------------------------------------------
/microproxy/test/context/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/test/context/__init__.py
--------------------------------------------------------------------------------
/microproxy/test/context/test_base.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from microproxy.context.base import parse_version
4 |
5 |
6 | class TestBase(unittest.TestCase):
7 | def test_parse_release_version(self):
8 | self.assertEquals((0, 4, 0), parse_version("0.4.0"))
9 |
10 | def test_parse_minus_dev_version(self):
11 | self.assertEquals((0, 4, 0), parse_version("0.4.0-dev"))
12 |
13 | def test_parse_plus_dev_version(self):
14 | self.assertEquals((0, 4, 0), parse_version("0.4.0+dev"))
15 |
--------------------------------------------------------------------------------
/microproxy/test/context/test_http.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from microproxy.context import HttpRequest, HttpResponse, HttpHeaders
3 |
4 |
5 | class TestHttp(unittest.TestCase):
6 | def test_req_serialize(self):
7 | http_message = HttpRequest(version="1.1",
8 | method="GET",
9 | path="/hello",
10 | headers=[("Content-Type", "text/html")],
11 | body="body")
12 | json = http_message.serialize()
13 | self.assertEqual(json["version"], "1.1")
14 | self.assertEqual(json["method"], "GET")
15 | self.assertEqual(json["path"], "/hello")
16 | self.assertEqual(json["headers"], [("Content-Type", "text/html")])
17 | self.assertEqual(json["body"], "body".encode("base64"))
18 |
19 | def test_resp_serialize(self):
20 | http_message = HttpResponse(version="1.1",
21 | code="200",
22 | reason="OK",
23 | headers=[("Content-Type", "text/html")],
24 | body="body")
25 | json = http_message.serialize()
26 | self.assertEqual(json["version"], "1.1")
27 | self.assertEqual(json["code"], "200")
28 | self.assertEqual(json["reason"], "OK")
29 | self.assertEqual(json["headers"], [("Content-Type", "text/html")])
30 | self.assertEqual(json["body"], "body".encode("base64"))
31 |
32 | def test_req_deserialize(self):
33 | request = HttpRequest.deserialize({
34 | "version": "1.1",
35 | "method": "GET",
36 | "path": "/hello",
37 | "headers": [("Content-Type", "text/html")],
38 | "body": "body".encode("base64")
39 | })
40 | self.assertEqual(request.version, "1.1")
41 | self.assertEqual(request.method, "GET")
42 | self.assertEqual(request.path, "/hello")
43 | self.assertEqual(request.headers, HttpHeaders([("Content-Type", "text/html")]))
44 | self.assertEqual(request.body, "body")
45 |
46 | def test_resp_deserialize(self):
47 | response = HttpResponse.deserialize({
48 | "version": "1.1",
49 | "code": "200",
50 | "reason": "OK",
51 | "headers": [("Content-Type", "text/html")],
52 | "body": "body".encode("base64")
53 | })
54 | self.assertEqual(response.version, "1.1")
55 | self.assertEqual(response.code, "200")
56 | self.assertEqual(response.reason, "OK")
57 | self.assertEqual(response.headers, HttpHeaders([("Content-Type", "text/html")]))
58 | self.assertEqual(response.body, "body")
59 |
60 |
61 | class TestHttpHeaders(unittest.TestCase):
62 | def setUp(self):
63 | self.headers = HttpHeaders([
64 | ("Host", "localhost"),
65 | ("Accept", "application/xml"),
66 | ("Yayaya", "Yoyoyo")])
67 |
68 | def test_same_order_iteration(self):
69 | headers = [h for h in self.headers]
70 | self.assertEqual(
71 | headers,
72 | [("Host", "localhost"),
73 | ("Accept", "application/xml"),
74 | ("Yayaya", "Yoyoyo")])
75 |
76 | def test_contains(self):
77 | self.assertTrue("Host" in self.headers)
78 | self.assertFalse("Hahaha" in self.headers)
79 |
80 | def test_getitem(self):
81 | self.assertEqual(self.headers["Host"], "localhost")
82 |
83 | def test_setitem(self):
84 | self.headers["hahaha"] = "hey!!!"
85 | self.assertEqual(len(self.headers), 4)
86 | self.assertEqual(self.headers["hahaha"], "hey!!!")
87 |
88 | def test_eq(self):
89 | self.assertEqual(
90 | self.headers,
91 | HttpHeaders([
92 | ("Host", "localhost"),
93 | ("Accept", "application/xml"),
94 | ("Yayaya", "Yoyoyo")]))
95 |
96 | def test_neq(self):
97 | self.assertNotEqual(
98 | self.headers,
99 | HttpHeaders([]))
100 |
101 | def test_construct_with_dict(self):
102 | headers = HttpHeaders(dict(
103 | Host="localhost", Accept="application/xml",
104 | Yayaya="Yoyoyo"))
105 | self.assertEqual(len(headers), 3)
106 | self.assertEqual(headers["Host"], "localhost")
107 | self.assertEqual(headers["Accept"], "application/xml")
108 | self.assertEqual(headers["Yayaya"], "Yoyoyo")
109 |
110 | def test_construct_failed(self):
111 | with self.assertRaises(ValueError):
112 | HttpHeaders("aaa")
113 |
114 | if __name__ == "__main__":
115 | unittest.main()
116 |
--------------------------------------------------------------------------------
/microproxy/test/context/test_layer.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from microproxy.context import LayerContext
3 |
4 |
5 | class TestLayerContext(unittest.TestCase):
6 | def test_invalid_mode(self):
7 | with self.assertRaises(ValueError):
8 | LayerContext(mode="test")
9 |
--------------------------------------------------------------------------------
/microproxy/test/context/test_viewer.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from microproxy.context import ViewerContext
4 | from microproxy.version import VERSION
5 |
6 |
7 | class TestViewerContext(unittest.TestCase):
8 | def test_deserialize(self):
9 | data = {
10 | "scheme": "https",
11 | "host": "localhost",
12 | "port": 8080,
13 | "path": "/index",
14 | "request": {
15 | "version": "1.1",
16 | "method": "GET",
17 | "path": "/index",
18 | "headers": [["Content-Type", "text/html"]],
19 | },
20 | "response": {
21 | "version": "1.1",
22 | "code": "200",
23 | "reason": "OK",
24 | "headers": [["Content-Type", "text/html"]],
25 | "body": "",
26 | },
27 | "client_tls": {
28 | "sni": "localhost",
29 | "alpn": "http/1.1",
30 | "cipher": "AES",
31 | },
32 | "server_tls": {
33 | "sni": "localhost",
34 | "alpn": "http/1.1",
35 | "cipher": "AES",
36 | },
37 | }
38 | viewer_context = ViewerContext.deserialize(data)
39 |
40 | self.assertIsInstance(viewer_context, ViewerContext)
41 |
42 | self.assertEqual("https", viewer_context.scheme)
43 | self.assertEqual("localhost", viewer_context.host)
44 | self.assertEqual(8080, viewer_context.port)
45 | self.assertEqual("/index", viewer_context.path)
46 | self.assertEqual(VERSION, viewer_context.version)
47 |
48 | self.assertEqual("1.1", viewer_context.request.version)
49 | self.assertEqual("GET", viewer_context.request.method)
50 | self.assertEqual("/index", viewer_context.request.path)
51 | self.assertEqual(1, len(viewer_context.request.headers))
52 | self.assertEqual("text/html",
53 | viewer_context.request.headers["Content-Type"])
54 |
55 | self.assertEqual("1.1", viewer_context.response.version)
56 | self.assertEqual("200", viewer_context.response.code)
57 | self.assertEqual("OK", viewer_context.response.reason)
58 | self.assertEqual("", viewer_context.response.body)
59 | self.assertEqual(1, len(viewer_context.response.headers))
60 | self.assertEqual("text/html",
61 | viewer_context.response.headers["Content-Type"])
62 |
63 | self.assertEqual("http/1.1", viewer_context.client_tls.alpn)
64 | self.assertEqual("localhost", viewer_context.client_tls.sni)
65 | self.assertEqual("AES", viewer_context.client_tls.cipher)
66 |
67 | self.assertEqual("http/1.1", viewer_context.server_tls.alpn)
68 | self.assertEqual("localhost", viewer_context.server_tls.sni)
69 | self.assertEqual("AES", viewer_context.server_tls.cipher)
70 |
--------------------------------------------------------------------------------
/microproxy/test/data/replay.script:
--------------------------------------------------------------------------------
1 | {"client_tls": null, "request": {"body": "", "timestamp": 1478677229628324, "headers": [["host", "example.com"], ["user-agent", "curl/7.50.3"], ["accept", "*/*"]], "version": "HTTP/1.1", "path": "/", "method": "GET"}, "port": 80, "host": "example.com", "version": "0.4.1+dev", "server_tls": null, "path": "/", "scheme": "http", "response": {"body": "PCFkb2N0eXBlIGh0bWw+CjxodG1sPgo8aGVhZD4KICAgIDx0aXRsZT5FeGFtcGxlIERvbWFpbjwv\ndGl0bGU+CgogICAgPG1ldGEgY2hhcnNldD0idXRmLTgiIC8+CiAgICA8bWV0YSBodHRwLWVxdWl2\nPSJDb250ZW50LXR5cGUiIGNvbnRlbnQ9InRleHQvaHRtbDsgY2hhcnNldD11dGYtOCIgLz4KICAg\nIDxtZXRhIG5hbWU9InZpZXdwb3J0IiBjb250ZW50PSJ3aWR0aD1kZXZpY2Utd2lkdGgsIGluaXRp\nYWwtc2NhbGU9MSIgLz4KICAgIDxzdHlsZSB0eXBlPSJ0ZXh0L2NzcyI+CiAgICBib2R5IHsKICAg\nICAgICBiYWNrZ3JvdW5kLWNvbG9yOiAjZjBmMGYyOwogICAgICAgIG1hcmdpbjogMDsKICAgICAg\nICBwYWRkaW5nOiAwOwogICAgICAgIGZvbnQtZmFtaWx5OiAiT3BlbiBTYW5zIiwgIkhlbHZldGlj\nYSBOZXVlIiwgSGVsdmV0aWNhLCBBcmlhbCwgc2Fucy1zZXJpZjsKICAgICAgICAKICAgIH0KICAg\nIGRpdiB7CiAgICAgICAgd2lkdGg6IDYwMHB4OwogICAgICAgIG1hcmdpbjogNWVtIGF1dG87CiAg\nICAgICAgcGFkZGluZzogNTBweDsKICAgICAgICBiYWNrZ3JvdW5kLWNvbG9yOiAjZmZmOwogICAg\nICAgIGJvcmRlci1yYWRpdXM6IDFlbTsKICAgIH0KICAgIGE6bGluaywgYTp2aXNpdGVkIHsKICAg\nICAgICBjb2xvcjogIzM4NDg4ZjsKICAgICAgICB0ZXh0LWRlY29yYXRpb246IG5vbmU7CiAgICB9\nCiAgICBAbWVkaWEgKG1heC13aWR0aDogNzAwcHgpIHsKICAgICAgICBib2R5IHsKICAgICAgICAg\nICAgYmFja2dyb3VuZC1jb2xvcjogI2ZmZjsKICAgICAgICB9CiAgICAgICAgZGl2IHsKICAgICAg\nICAgICAgd2lkdGg6IGF1dG87CiAgICAgICAgICAgIG1hcmdpbjogMCBhdXRvOwogICAgICAgICAg\nICBib3JkZXItcmFkaXVzOiAwOwogICAgICAgICAgICBwYWRkaW5nOiAxZW07CiAgICAgICAgfQog\nICAgfQogICAgPC9zdHlsZT4gICAgCjwvaGVhZD4KCjxib2R5Pgo8ZGl2PgogICAgPGgxPkV4YW1w\nbGUgRG9tYWluPC9oMT4KICAgIDxwPlRoaXMgZG9tYWluIGlzIGVzdGFibGlzaGVkIHRvIGJlIHVz\nZWQgZm9yIGlsbHVzdHJhdGl2ZSBleGFtcGxlcyBpbiBkb2N1bWVudHMuIFlvdSBtYXkgdXNlIHRo\naXMKICAgIGRvbWFpbiBpbiBleGFtcGxlcyB3aXRob3V0IHByaW9yIGNvb3JkaW5hdGlvbiBvciBh\nc2tpbmcgZm9yIHBlcm1pc3Npb24uPC9wPgogICAgPHA+PGEgaHJlZj0iaHR0cDovL3d3dy5pYW5h\nLm9yZy9kb21haW5zL2V4YW1wbGUiPk1vcmUgaW5mb3JtYXRpb24uLi48L2E+PC9wPgo8L2Rpdj4K\nPC9ib2R5Pgo8L2h0bWw+Cg==\n", "code": "200", "timestamp": 1478677229778640, "headers": [["accept-ranges", "bytes"], ["cache-control", "max-age=604800"], ["content-type", "text/html"], ["date", "Wed, 09 Nov 2016 07:40:29 GMT"], ["etag", "\"359670651+gzip\""], ["expires", "Wed, 16 Nov 2016 07:40:29 GMT"], ["last-modified", "Fri, 09 Aug 2013 23:54:35 GMT"], ["server", "ECS (rhv/818F)"], ["vary", "Accept-Encoding"], ["x-cache", "HIT"], ["x-ec-custom-error", "1"], ["content-length", "1270"]], "reason": "OK", "version": "HTTP/1.1"}}
2 | {"client_tls": {"version": "TLSv1.2", "alpn": "http/1.1", "cipher": "ECDHE-RSA-AES128-GCM-SHA256", "sni": "example.com"}, "request": {"body": "", "timestamp": 1478677239811207, "headers": [["host", "example.com"], ["user-agent", "curl/7.50.3"], ["accept", "*/*"]], "version": "HTTP/1.1", "path": "/", "method": "GET"}, "port": 443, "host": "example.com", "version": "0.4.1+dev", "server_tls": {"version": "TLSv1.2", "alpn": "http/1.1", "cipher": "ECDHE-RSA-AES128-GCM-SHA256", "sni": "example.com"}, "path": "/", "scheme": "https", "response": {"body": "PCFkb2N0eXBlIGh0bWw+CjxodG1sPgo8aGVhZD4KICAgIDx0aXRsZT5FeGFtcGxlIERvbWFpbjwv\ndGl0bGU+CgogICAgPG1ldGEgY2hhcnNldD0idXRmLTgiIC8+CiAgICA8bWV0YSBodHRwLWVxdWl2\nPSJDb250ZW50LXR5cGUiIGNvbnRlbnQ9InRleHQvaHRtbDsgY2hhcnNldD11dGYtOCIgLz4KICAg\nIDxtZXRhIG5hbWU9InZpZXdwb3J0IiBjb250ZW50PSJ3aWR0aD1kZXZpY2Utd2lkdGgsIGluaXRp\nYWwtc2NhbGU9MSIgLz4KICAgIDxzdHlsZSB0eXBlPSJ0ZXh0L2NzcyI+CiAgICBib2R5IHsKICAg\nICAgICBiYWNrZ3JvdW5kLWNvbG9yOiAjZjBmMGYyOwogICAgICAgIG1hcmdpbjogMDsKICAgICAg\nICBwYWRkaW5nOiAwOwogICAgICAgIGZvbnQtZmFtaWx5OiAiT3BlbiBTYW5zIiwgIkhlbHZldGlj\nYSBOZXVlIiwgSGVsdmV0aWNhLCBBcmlhbCwgc2Fucy1zZXJpZjsKICAgICAgICAKICAgIH0KICAg\nIGRpdiB7CiAgICAgICAgd2lkdGg6IDYwMHB4OwogICAgICAgIG1hcmdpbjogNWVtIGF1dG87CiAg\nICAgICAgcGFkZGluZzogNTBweDsKICAgICAgICBiYWNrZ3JvdW5kLWNvbG9yOiAjZmZmOwogICAg\nICAgIGJvcmRlci1yYWRpdXM6IDFlbTsKICAgIH0KICAgIGE6bGluaywgYTp2aXNpdGVkIHsKICAg\nICAgICBjb2xvcjogIzM4NDg4ZjsKICAgICAgICB0ZXh0LWRlY29yYXRpb246IG5vbmU7CiAgICB9\nCiAgICBAbWVkaWEgKG1heC13aWR0aDogNzAwcHgpIHsKICAgICAgICBib2R5IHsKICAgICAgICAg\nICAgYmFja2dyb3VuZC1jb2xvcjogI2ZmZjsKICAgICAgICB9CiAgICAgICAgZGl2IHsKICAgICAg\nICAgICAgd2lkdGg6IGF1dG87CiAgICAgICAgICAgIG1hcmdpbjogMCBhdXRvOwogICAgICAgICAg\nICBib3JkZXItcmFkaXVzOiAwOwogICAgICAgICAgICBwYWRkaW5nOiAxZW07CiAgICAgICAgfQog\nICAgfQogICAgPC9zdHlsZT4gICAgCjwvaGVhZD4KCjxib2R5Pgo8ZGl2PgogICAgPGgxPkV4YW1w\nbGUgRG9tYWluPC9oMT4KICAgIDxwPlRoaXMgZG9tYWluIGlzIGVzdGFibGlzaGVkIHRvIGJlIHVz\nZWQgZm9yIGlsbHVzdHJhdGl2ZSBleGFtcGxlcyBpbiBkb2N1bWVudHMuIFlvdSBtYXkgdXNlIHRo\naXMKICAgIGRvbWFpbiBpbiBleGFtcGxlcyB3aXRob3V0IHByaW9yIGNvb3JkaW5hdGlvbiBvciBh\nc2tpbmcgZm9yIHBlcm1pc3Npb24uPC9wPgogICAgPHA+PGEgaHJlZj0iaHR0cDovL3d3dy5pYW5h\nLm9yZy9kb21haW5zL2V4YW1wbGUiPk1vcmUgaW5mb3JtYXRpb24uLi48L2E+PC9wPgo8L2Rpdj4K\nPC9ib2R5Pgo8L2h0bWw+Cg==\n", "code": "200", "timestamp": 1478677239970378, "headers": [["cache-control", "max-age=604800"], ["content-type", "text/html"], ["date", "Wed, 09 Nov 2016 07:40:39 GMT"], ["etag", "\"359670651+ident\""], ["expires", "Wed, 16 Nov 2016 07:40:39 GMT"], ["last-modified", "Fri, 09 Aug 2013 23:54:35 GMT"], ["server", "ECS (rhv/8199)"], ["vary", "Accept-Encoding"], ["x-cache", "HIT"], ["x-ec-custom-error", "1"], ["content-length", "1270"]], "reason": "OK", "version": "HTTP/1.1"}}
3 |
--------------------------------------------------------------------------------
/microproxy/test/event/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/test/event/__init__.py
--------------------------------------------------------------------------------
/microproxy/test/event/test_client.py:
--------------------------------------------------------------------------------
1 | import mock
2 | import unittest
3 |
4 | from microproxy.context import Event
5 | from microproxy.event import EventClient
6 |
7 |
8 | class TestEventClient(unittest.TestCase):
9 | def setUp(self):
10 | self.zmq_socket = mock.Mock()
11 | self.event_client = EventClient(None, zmq_socket=self.zmq_socket)
12 |
13 | def test_send_event(self):
14 | self.event_client.send_event(Event(name="replay", context={"replay": "gogo"}))
15 | self.zmq_socket.send_json.assert_called_with({
16 | "name": "replay",
17 | "context": {
18 | "replay": "gogo"
19 | }
20 | })
21 |
22 |
23 | if __name__ == "__main__":
24 | unittest.main()
25 |
--------------------------------------------------------------------------------
/microproxy/test/event/test_manager.py:
--------------------------------------------------------------------------------
1 | import json
2 | import mock
3 | import unittest
4 |
5 | from microproxy.context import Event
6 | from microproxy.event import EventManager
7 |
8 |
9 | class TestEventManager(unittest.TestCase):
10 | def setUp(self):
11 | self.handler = mock.Mock()
12 | self.zmq_stream = mock.Mock()
13 |
14 | self.event_manager = EventManager(
15 | None, handler=self.handler, zmq_stream=self.zmq_stream)
16 | self.event_manager.start()
17 |
18 | def test_register_stream_call_invoked(self):
19 | self.zmq_stream.on_recv.assert_called_with(self.event_manager._on_recv)
20 |
21 | def test_recv(self):
22 | msg = json.dumps({
23 | "name": "replay",
24 | "context": {
25 | "replay": "yoyo"
26 | }
27 | })
28 | self.event_manager._on_recv([msg])
29 | self.handler.handle_event.assert_called_with(
30 | Event(name="replay", context={"replay": "yoyo"}))
31 |
32 | if __name__ == "__main__":
33 | unittest.main()
34 |
--------------------------------------------------------------------------------
/microproxy/test/event/test_replay.py:
--------------------------------------------------------------------------------
1 | import mock
2 | import h11
3 | from tornado.testing import AsyncTestCase, gen_test
4 | from tornado.concurrent import Future
5 | from tornado.gen import coroutine
6 |
7 | from microproxy.context import HttpRequest, HttpHeaders, ServerContext, Event
8 | from microproxy.event.replay import ReplayHandler
9 | from microproxy.event import REPLAY
10 | from microproxy.protocol.http1 import Connection as Http1Connection
11 | from microproxy.protocol.http2 import Connection as Http2Connection
12 |
13 |
14 | class TestReplayHandler(AsyncTestCase):
15 | def setUp(self):
16 | super(TestReplayHandler, self).setUp()
17 | self.layer_manager = mock.Mock()
18 | self.layer_manager.get_first_layer = mock.Mock(
19 | return_value=self.layer_manager.first_layer)
20 | self.layer_manager.run_layers = mock.Mock(
21 | side_effect=self.get_context)
22 |
23 | self.server_state = ServerContext()
24 | self.replay_handler = ReplayHandler(
25 | self.server_state, layer_manager=self.layer_manager, io_loop=self.io_loop)
26 | self.context = None
27 | self.http_events = []
28 |
29 | def _future(self, result):
30 | future = Future()
31 | future.set_result(result)
32 | return future
33 |
34 | def get_context(self, server_state, layer, context):
35 | self.context = context
36 | return self._future(None)
37 |
38 | def collect_event(self, *args):
39 | self.http_events.append(args)
40 |
41 | @coroutine
42 | def read_until(self, conn, count):
43 | while len(self.http_events) < count:
44 | yield conn.read_bytes()
45 |
46 | @gen_test
47 | def test_http1(self):
48 | ctx = dict(
49 | host="localhost", port=8080, scheme="http", path="/",
50 | request=dict(
51 | method="GET", path="/", version="HTTP/1.1",
52 | headers=[("Host", "localhost")]),
53 | response=None)
54 | event = Event(REPLAY, ctx)
55 | yield self.replay_handler.handle(event)
56 |
57 | self.assertIsNotNone(self.context)
58 | self.layer_manager.get_first_layer.assert_called_with(
59 | self.context)
60 | self.layer_manager.run_layers.assert_called_with(
61 | self.server_state, self.layer_manager.first_layer, self.context)
62 |
63 | conn = Http1Connection(
64 | h11.SERVER, self.context.src_stream, on_unhandled=self.collect_event)
65 | yield self.read_until(conn, 1)
66 |
67 | req, = self.http_events[0]
68 | self.assertIsInstance(req, HttpRequest)
69 | self.assertEqual(req.method, "GET")
70 | self.assertEqual(req.version, "HTTP/1.1")
71 | self.assertEqual(req.path, "/")
72 | self.assertEqual(req.headers, HttpHeaders([
73 | ("host", "localhost")]))
74 |
75 | @gen_test
76 | def test_http1_post_body(self):
77 | body = b"this is body"
78 | body_length = len(body)
79 | ctx = dict(
80 | host="localhost", port=8080, scheme="http", path="/",
81 | request=dict(
82 | method="POST", path="/", version="HTTP/1.1",
83 | headers=[("Host", "localhost"),
84 | ("Content-Length", str(body_length))],
85 | body=body.encode("base64")),
86 | response=None)
87 | event = Event(REPLAY, ctx)
88 | yield self.replay_handler.handle(event)
89 |
90 | self.assertIsNotNone(self.context)
91 | self.layer_manager.get_first_layer.assert_called_with(
92 | self.context)
93 | self.layer_manager.run_layers.assert_called_with(
94 | self.server_state, self.layer_manager.first_layer, self.context)
95 |
96 | conn = Http1Connection(
97 | h11.SERVER, self.context.src_stream, on_unhandled=self.collect_event)
98 | yield self.read_until(conn, 1)
99 |
100 | req, = self.http_events[0]
101 | self.assertIsInstance(req, HttpRequest)
102 | self.assertEqual(req.method, "POST")
103 | self.assertEqual(req.version, "HTTP/1.1")
104 | self.assertEqual(req.path, "/")
105 | self.assertEqual(req.headers, HttpHeaders([
106 | ("host", "localhost"),
107 | ("content-length", str(body_length))]))
108 | self.assertEqual(req.body, body)
109 |
110 | @gen_test
111 | def test_http2(self):
112 | ctx = dict(
113 | host="localhost", port=8080, scheme="h2", path="/",
114 | request=dict(
115 | method="GET", path="/", version="HTTP/2",
116 | headers=[(":method", "GET"), (":path", "/")]),
117 | response=None)
118 | event = Event(REPLAY, ctx)
119 | yield self.replay_handler.handle(event)
120 |
121 | self.assertIsNotNone(self.context)
122 | self.layer_manager.get_first_layer.assert_called_with(
123 | self.context)
124 | self.layer_manager.run_layers.assert_called_with(
125 | self.server_state, self.layer_manager.first_layer, self.context)
126 |
127 | conn = Http2Connection(
128 | self.context.src_stream, client_side=False, on_request=self.collect_event, on_unhandled=mock.Mock())
129 | yield self.read_until(conn, 1)
130 |
131 | _, req, _ = self.http_events[0]
132 | self.assertIsInstance(req, HttpRequest)
133 | self.assertEqual(req.method, "GET")
134 | self.assertEqual(req.version, "HTTP/2")
135 | self.assertEqual(req.path, "/")
136 | self.assertEqual(req.headers, HttpHeaders([
137 | (":method", "GET"), (":path", "/")]))
138 |
139 | @gen_test
140 | def test_http2_post_body(self):
141 | body = b"this is body"
142 | body_length = len(body)
143 | ctx = dict(
144 | host="localhost", port=8080, scheme="h2", path="/",
145 | request=dict(
146 | method="POST", path="/", version="HTTP/2",
147 | headers=[(":method", "POST"), (":path", "/"), ("content-length", str(body_length))],
148 | body=body.encode("base64")),
149 | response=None)
150 | event = Event(REPLAY, ctx)
151 | yield self.replay_handler.handle(event)
152 |
153 | self.assertIsNotNone(self.context)
154 | self.layer_manager.get_first_layer.assert_called_with(
155 | self.context)
156 | self.layer_manager.run_layers.assert_called_with(
157 | self.server_state, self.layer_manager.first_layer, self.context)
158 |
159 | conn = Http2Connection(
160 | self.context.src_stream, client_side=False, on_request=self.collect_event, on_unhandled=mock.Mock())
161 | yield self.read_until(conn, 1)
162 |
163 | _, req, _ = self.http_events[0]
164 | self.assertIsInstance(req, HttpRequest)
165 | self.assertEqual(req.method, "POST")
166 | self.assertEqual(req.version, "HTTP/2")
167 | self.assertEqual(req.path, "/")
168 | self.assertEqual(req.headers, HttpHeaders([
169 | (":method", "POST"), (":path", "/"), ("content-length", str(body_length))]))
170 | self.assertEqual(req.body, body)
171 |
172 | @gen_test
173 | def test_not_support_protocol(self):
174 | ctx = {
175 | "scheme": "websocket"
176 | }
177 | event = Event(REPLAY, ctx)
178 | yield self.replay_handler.handle(event)
179 |
180 | self.layer_manager.get_first_layer.assert_not_called()
181 | self.layer_manager.run_layers.assert_not_called()
182 |
--------------------------------------------------------------------------------
/microproxy/test/interceptor/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/test/interceptor/__init__.py
--------------------------------------------------------------------------------
/microproxy/test/interceptor/test_msg_publisher.py:
--------------------------------------------------------------------------------
1 | import json
2 | import unittest
3 | import mock
4 |
5 | from microproxy.context import ViewerContext
6 | from microproxy.interceptor.msg_publisher import MsgPublisher
7 | from microproxy.version import VERSION
8 |
9 |
10 | class TestMsgPublisher(unittest.TestCase):
11 | def setUp(self):
12 | self.zmq_socket = mock.Mock()
13 | self.msg_publisher = MsgPublisher(None, self.zmq_socket)
14 |
15 | def test_publish(self):
16 | ctx_data = {
17 | "scheme": "https",
18 | "host": "example.com",
19 | "path": "/index",
20 | "port": 443,
21 | "version": VERSION,
22 | "client_tls": None,
23 | "server_tls": None,
24 | "response": None,
25 | "request": None,
26 | }
27 | ctx = ViewerContext.deserialize(ctx_data)
28 | self.msg_publisher.publish(ctx)
29 |
30 | self.zmq_socket.send_multipart.assert_called_with([
31 | "message",
32 | JsonStrMatcher(ctx_data)])
33 |
34 |
35 | class JsonStrMatcher(object):
36 | def __init__(self, data):
37 | self.data = data
38 |
39 | def __eq__(self, other):
40 | return json.loads(other) == self.data
41 |
42 | def __str__(self):
43 | return str(self.data)
44 |
--------------------------------------------------------------------------------
/microproxy/test/layer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/test/layer/__init__.py
--------------------------------------------------------------------------------
/microproxy/test/layer/test_forward.py:
--------------------------------------------------------------------------------
1 | import mock
2 | from tornado.testing import gen_test
3 |
4 | from microproxy.test.utils import ProxyAsyncTestCase
5 | from microproxy.context import LayerContext
6 | from microproxy.layer import ForwardLayer
7 |
8 |
9 | class TestForwardLayer(ProxyAsyncTestCase):
10 | def setUp(self):
11 | super(TestForwardLayer, self).setUp()
12 | self.asyncSetUp()
13 |
14 | @gen_test
15 | def asyncSetUp(self):
16 | self.client_stream, src_stream = yield self.create_iostream_pair()
17 | dest_stream, self.server_stream = yield self.create_iostream_pair()
18 | self.context = LayerContext(mode="socks",
19 | src_stream=src_stream,
20 | dest_stream=dest_stream)
21 |
22 | self.forward_layer = ForwardLayer(mock.Mock(), self.context)
23 |
24 | @gen_test
25 | def test_forward_message(self):
26 | self.forward_layer.process_and_return_context()
27 | self.client_stream.write(b"aaa\r\n")
28 | message = yield self.server_stream.read_until(b"\r\n")
29 | assert message == b"aaa\r\n"
30 |
31 | self.server_stream.write(b"bbb\r\n")
32 | message = yield self.client_stream.read_until(b"\r\n")
33 | assert message == b"bbb\r\n"
34 |
35 | self.client_stream.close()
36 | self.server_stream.close()
37 | self.context.src_stream.close()
38 | self.context.dest_stream.close()
39 |
--------------------------------------------------------------------------------
/microproxy/test/layer/test_manager.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import sys
3 | from mock import Mock
4 |
5 | from tornado import gen, iostream
6 | from microproxy.context import LayerContext, ServerContext
7 | from microproxy.exception import (
8 | DestStreamClosedError, SrcStreamClosedError, DestNotConnectedError
9 | )
10 | from microproxy.layer import manager as layer_manager
11 | from microproxy.layer import (
12 | SocksLayer, TransparentLayer, ReplayLayer, HttpProxyLayer,
13 | TlsLayer, Http1Layer, Http2Layer, ForwardLayer
14 | )
15 |
16 |
17 | class TestLayerManager(unittest.TestCase):
18 | def setUp(self):
19 | super(TestLayerManager, self).setUp()
20 | config = {
21 | "mode": "socks",
22 | "http_port": [],
23 | "https_port": [],
24 | "certfile": "microproxy/test/test.crt",
25 | "keyfile": "microproxy/test/test.key"
26 | }
27 | self.server_state = ServerContext(config=config)
28 | self.src_stream = Mock()
29 |
30 | def test_get_socks_layer(self):
31 | context = LayerContext(mode="socks", port=443)
32 |
33 | layer = layer_manager.get_first_layer(context)
34 | self.assertIsInstance(layer, SocksLayer)
35 |
36 | @unittest.skipIf('linux' not in sys.platform, "TransparentLayer only in linux")
37 | def test_get_transparent_layer_linux(self):
38 | context = LayerContext(mode="transparent", port=443)
39 | layer = layer_manager.get_first_layer(context)
40 | self.assertIsInstance(layer, TransparentLayer)
41 |
42 | @unittest.skipIf('linux' in sys.platform, "TransparentLayer only in linux")
43 | def test_get_transparent_layer_non_linux(self):
44 | context = LayerContext(mode="transparent", port=443)
45 | with self.assertRaises(NotImplementedError):
46 | layer_manager.get_first_layer(context)
47 |
48 | def test_get_replay_layer(self):
49 | context = LayerContext(mode="replay", port=443)
50 | layer = layer_manager.get_first_layer(context)
51 | self.assertIsInstance(layer, ReplayLayer)
52 |
53 | def test_get_tls_layer_from_socks(self):
54 | context = LayerContext(mode="socks", port=443)
55 |
56 | socks_layer = SocksLayer(context)
57 | layer = layer_manager._next_layer(self.server_state, socks_layer, context)
58 | self.assertIsInstance(layer, TlsLayer)
59 |
60 | @unittest.skipIf('linux' not in sys.platform, "TransparentLayer only in linux")
61 | def test_get_tls_layer_from_transparent(self):
62 | context = LayerContext(mode="socks", port=443)
63 | transparent_layer = TransparentLayer(context)
64 | layer = layer_manager._next_layer(self.server_state, transparent_layer, context)
65 | self.assertIsInstance(layer, TlsLayer)
66 |
67 | def test_get_http1_layer_from_socks_replay(self):
68 | context = LayerContext(mode="socks", port=80)
69 |
70 | socks_layer = SocksLayer(context)
71 | layer = layer_manager._next_layer(self.server_state, socks_layer, context)
72 | self.assertIsInstance(layer, Http1Layer)
73 |
74 | context.scheme = "http"
75 | replay_layer = ReplayLayer(context)
76 | layer = layer_manager._next_layer(self.server_state, replay_layer, context)
77 | self.assertIsInstance(layer, Http1Layer)
78 |
79 | context.scheme = "https"
80 | tls_layer = TlsLayer(self.server_state, context)
81 | layer = layer_manager._next_layer(self.server_state, tls_layer, context)
82 | self.assertIsInstance(layer, Http1Layer)
83 |
84 | @unittest.skipIf('linux' not in sys.platform, "TransparentLayer only in linux")
85 | def test_get_http1_layer_from_transparent(self):
86 | context = LayerContext(mode="socks", port=80)
87 | transparent_layer = TransparentLayer(context)
88 | layer = layer_manager._next_layer(self.server_state, transparent_layer, context)
89 | self.assertIsInstance(layer, Http1Layer)
90 |
91 | def test_get_http2_layer(self):
92 | context = LayerContext(mode="socks", port=443, scheme="h2")
93 |
94 | replay_layer = ReplayLayer(context)
95 | layer = layer_manager._next_layer(self.server_state, replay_layer, context)
96 | self.assertIsInstance(layer, Http2Layer)
97 |
98 | tls_layer = TlsLayer(self.server_state, context)
99 | layer = layer_manager._next_layer(self.server_state, tls_layer, context)
100 | self.assertIsInstance(layer, Http2Layer)
101 |
102 | def test_get_forward_layer_from_socks_replay(self):
103 | context = LayerContext(mode="socks", port=5555)
104 |
105 | socks_layer = SocksLayer(context)
106 | layer = layer_manager._next_layer(self.server_state, socks_layer, context)
107 | self.assertIsInstance(layer, ForwardLayer)
108 |
109 | context.scheme = "test"
110 | replay_layer = ReplayLayer(context)
111 | layer = layer_manager._next_layer(self.server_state, replay_layer, context)
112 | self.assertIsInstance(layer, ForwardLayer)
113 |
114 | context.scheme = "test"
115 | tls_layer = TlsLayer(self.server_state, context)
116 | layer = layer_manager._next_layer(self.server_state, tls_layer, context)
117 | self.assertIsInstance(layer, ForwardLayer)
118 |
119 | @unittest.skipIf('linux' not in sys.platform, "TransparentLayer only in linux")
120 | def test_get_forward_layer_from_transparent(self):
121 | context = LayerContext(mode="socks", port=5555)
122 | transparent_layer = TransparentLayer(context)
123 | layer = layer_manager._next_layer(self.server_state, transparent_layer, context)
124 | self.assertIsInstance(layer, ForwardLayer)
125 |
126 | def test_handle_layer_error(self):
127 | context = LayerContext(
128 | mode="socks", src_stream=self.src_stream, port=443, scheme="h2")
129 |
130 | layer_manager._handle_layer_error(gen.TimeoutError("timeout"), context)
131 | context.src_stream.close.assert_called_once_with()
132 |
133 | context.src_stream.reset_mock()
134 | layer_manager._handle_layer_error(DestNotConnectedError("stream closed"), context)
135 | context.src_stream.close.assert_not_called()
136 |
137 | context.src_stream.reset_mock()
138 | layer_manager._handle_layer_error(DestStreamClosedError("stream closed"), context)
139 | context.src_stream.close.assert_called_once_with()
140 |
141 | context.src_stream.reset_mock()
142 | layer_manager._handle_layer_error(SrcStreamClosedError("stream closed"), context)
143 | context.src_stream.close.assert_not_called()
144 |
145 | context.src_stream.reset_mock()
146 | layer_manager._handle_layer_error(iostream.StreamClosedError("stream closed"), context)
147 | context.src_stream.close.assert_called_once_with()
148 |
149 | def test_handle_unhandled_layer_error(self):
150 | context = LayerContext(
151 | mode="socks", src_stream=Mock(), port=443, scheme="h2")
152 | layer_manager._handle_layer_error(ValueError, context)
153 | context.src_stream.close.assert_called_once_with()
154 |
155 | def test_get_http_proxy_layer(self):
156 | context = LayerContext(mode="http", port=80)
157 | layer = layer_manager.get_first_layer(context)
158 | self.assertIsInstance(layer, HttpProxyLayer)
159 |
160 | def test_get_http_layer_from_http_proxy_layer(self):
161 | context = LayerContext(mode="http", port=80)
162 |
163 | http_proxy_layer = HttpProxyLayer(context)
164 | layer = layer_manager._next_layer(
165 | self.server_state, http_proxy_layer, context)
166 | self.assertIsInstance(layer, Http1Layer)
167 |
168 | def test_get_tls_layer_from_http_layer(self):
169 | context = LayerContext(mode="http", scheme="https", port=80)
170 |
171 | http_layer = Http1Layer(self.server_state, context)
172 | layer = layer_manager._next_layer(
173 | self.server_state, http_layer, context)
174 | self.assertIsInstance(layer, TlsLayer)
175 |
176 | def test_get_http_layer_from_http_layer(self):
177 | context = LayerContext(mode="http", scheme="http", port=80)
178 |
179 | http_layer = Http1Layer(self.server_state, context)
180 | layer = layer_manager._next_layer(
181 | self.server_state, http_layer, context)
182 | self.assertIsInstance(layer, Http1Layer)
183 |
--------------------------------------------------------------------------------
/microproxy/test/layer/test_replay.py:
--------------------------------------------------------------------------------
1 | import mock
2 | from tornado.testing import AsyncTestCase, gen_test
3 | from tornado.concurrent import Future
4 |
5 | from microproxy.layer.proxy.replay import ReplayLayer
6 |
7 |
8 | class TestReplayLayer(AsyncTestCase):
9 | def setUp(self):
10 | super(TestReplayLayer, self).setUp()
11 |
12 | self.streams = mock.Mock()
13 | self.create_dest_stream = mock.Mock(
14 | return_value=self._create_future(self.streams.dest_stream))
15 | self.streams.dest_stream.start_tls = mock.Mock(
16 | return_value=self._create_future(self.streams.tls_stream))
17 |
18 | self.context = mock.Mock()
19 | self.context.host = "localhost"
20 | self.context.port = 8080
21 |
22 | def _create_future(self, result):
23 | future = Future()
24 | future.set_result(result)
25 | return future
26 |
27 | @gen_test
28 | def test_run_layer_with_https(self):
29 | self.context.scheme = "https"
30 | self.layer = ReplayLayer(
31 | self.context, create_dest_stream=self.create_dest_stream)
32 |
33 | context = yield self.layer.process_and_return_context()
34 |
35 | self.assertIs(context.dest_stream, self.streams.tls_stream)
36 | self.create_dest_stream.assert_called_with(
37 | ("localhost", 8080))
38 |
39 | @gen_test
40 | def test_run_layer_with_h2(self):
41 | self.context.scheme = "h2"
42 | self.layer = ReplayLayer(
43 | self.context, create_dest_stream=self.create_dest_stream)
44 |
45 | context = yield self.layer.process_and_return_context()
46 |
47 | self.assertIs(context.dest_stream, self.streams.tls_stream)
48 | self.create_dest_stream.assert_called_with(
49 | ("localhost", 8080))
50 |
51 | @gen_test
52 | def test_run_layer_with_other(self):
53 | self.context.scheme = "http"
54 | self.layer = ReplayLayer(
55 | self.context, create_dest_stream=self.create_dest_stream)
56 |
57 | context = yield self.layer.process_and_return_context()
58 |
59 | self.assertIs(context.dest_stream, self.streams.dest_stream)
60 | self.create_dest_stream.assert_called_with(
61 | ("localhost", 8080))
62 |
--------------------------------------------------------------------------------
/microproxy/test/layer/test_tls.py:
--------------------------------------------------------------------------------
1 | import mock
2 | import unittest
3 |
4 | from OpenSSL import SSL
5 | from service_identity import VerificationError
6 | from tornado.iostream import StreamClosedError
7 | from tornado.testing import gen_test
8 |
9 | from microproxy.test.utils import ProxyAsyncTestCase
10 | from microproxy.cert import CertStore
11 | from microproxy.context import LayerContext, ServerContext
12 | from microproxy.exception import DestStreamClosedError, ProtocolError, TlsError
13 | from microproxy.tornado_ext.iostream import MicroProxySSLIOStream
14 | from microproxy.layer.application.tls import TlsLayer
15 | from microproxy.protocol.tls import create_dest_sslcontext, create_basic_sslcontext
16 | from microproxy.utils import HAS_ALPN
17 |
18 |
19 | def create_src_sslcontext(cert, priv_key, alpn_callback):
20 | ssl_ctx = create_basic_sslcontext()
21 | ssl_ctx.use_certificate_file(cert)
22 | ssl_ctx.use_privatekey_file(priv_key)
23 |
24 | if alpn_callback and HAS_ALPN:
25 | ssl_ctx.set_alpn_select_callback(alpn_callback)
26 |
27 | return ssl_ctx
28 |
29 | class TestTlsLayer(ProxyAsyncTestCase):
30 | def setUp(self):
31 | super(TestTlsLayer, self).setUp()
32 | self.asyncSetUp()
33 |
34 | @gen_test
35 | def asyncSetUp(self):
36 | self.client_stream, src_stream = yield self.create_iostream_pair()
37 | dest_stream, self.server_stream = yield self.create_iostream_pair()
38 |
39 | self.config = dict(
40 | client_certs="microproxy/test/test.crt", insecure=True)
41 |
42 | cert_store = CertStore(dict(certfile="microproxy/test/test.crt",
43 | keyfile="microproxy/test/test.key"))
44 | server_state = ServerContext(cert_store=cert_store, config=self.config)
45 |
46 | # src_stream.pause()
47 | context = LayerContext(mode="socks",
48 | src_stream=src_stream,
49 | dest_stream=dest_stream,
50 | host="127.0.0.1", port="443")
51 |
52 | self.tls_layer = TlsLayer(server_state, context)
53 |
54 | @gen_test
55 | @unittest.skipIf(not HAS_ALPN, "only support for env with alpn")
56 | def test_start_dest_tls_with_alpn_http1(self):
57 | def alpn_callback(conn, alpns):
58 | if "http/1.1" not in alpns:
59 | raise ValueError("incorrect alpns")
60 | return b"http/1.1"
61 |
62 | server_stream_future = self.server_stream.start_tls(
63 | server_side=True,
64 | ssl_options=create_src_sslcontext(
65 | "microproxy/test/test.crt", "microproxy/test/test.key",
66 | alpn_callback=alpn_callback))
67 |
68 | ctx_future = self.tls_layer.start_dest_tls("www.google.com", ["http/1.1"])
69 |
70 | dest_stream, alpn = yield ctx_future
71 | self.assertIsInstance(dest_stream, MicroProxySSLIOStream)
72 | self.assertFalse(dest_stream.closed())
73 | self.assertEqual(alpn, "http/1.1")
74 |
75 | self.server_stream = yield server_stream_future
76 |
77 | dest_stream.write(b"hello")
78 | data = yield self.server_stream.read_bytes(5)
79 | self.assertEqual(data, b"hello")
80 |
81 | @gen_test
82 | @unittest.skipIf(not HAS_ALPN, "only support for env with alpn")
83 | def test_start_dest_tls_with_alpn_h2(self):
84 | def alpn_callback(conn, alpns):
85 | if "h2" not in alpns:
86 | raise ValueError("incorrect alpns")
87 | return b"h2"
88 |
89 | server_stream_future = self.server_stream.start_tls(
90 | server_side=True,
91 | ssl_options=create_src_sslcontext(
92 | "microproxy/test/test.crt", "microproxy/test/test.key",
93 | alpn_callback=alpn_callback))
94 |
95 | ctx_future = self.tls_layer.start_dest_tls("www.google.com", ["http/1.1", "h2"])
96 |
97 | dest_stream, alpn = yield ctx_future
98 | self.assertIsInstance(dest_stream, MicroProxySSLIOStream)
99 | self.assertFalse(dest_stream.closed())
100 | self.assertEqual(alpn, "h2")
101 |
102 | self.server_stream = yield server_stream_future
103 |
104 | dest_stream.write(b"hello")
105 | data = yield self.server_stream.read_bytes(5)
106 | self.assertEqual(data, b"hello")
107 |
108 | @gen_test
109 | def test_start_dest_tls_with_verification_error(self):
110 | self.config.update(dict(insecure=False))
111 |
112 | def alpn_callback(conn, alpns):
113 | return b""
114 |
115 | server_stream_future = self.server_stream.start_tls(
116 | server_side=True,
117 | ssl_options=create_src_sslcontext(
118 | "microproxy/test/test.crt", "microproxy/test/test.key",
119 | alpn_callback=alpn_callback))
120 |
121 | ctx_future = self.tls_layer.start_dest_tls("www.google.com", [])
122 |
123 | with self.assertRaises(TlsError):
124 | dest_stream, alpn = yield ctx_future
125 |
126 | self.server_stream = yield server_stream_future
127 |
128 | @gen_test
129 | def test_start_dest_tls_with_ssl_error(self):
130 | server_stream_future = self.server_stream.start_tls(
131 | server_side=True,
132 | ssl_options=create_src_sslcontext(
133 | "microproxy/test/test.crt", "microproxy/test/test.key",
134 | alpn_callback=None))
135 |
136 | self.config.update({"insecure": False})
137 | ctx_future = self.tls_layer.start_dest_tls("www.google.com", [])
138 |
139 | with self.assertRaises(TlsError):
140 | dest_stream, alpn = yield ctx_future
141 |
142 | self.server_stream = yield server_stream_future
143 |
144 | @gen_test
145 | def test_start_dest_tls_with_dest_stream_closed(self):
146 | ctx_future = self.tls_layer.start_dest_tls("www.google.com", [])
147 | self.server_stream.close()
148 |
149 | with self.assertRaises(DestStreamClosedError):
150 | dest_stream, alpn = yield ctx_future
151 |
152 | def tearDown(self):
153 | if self.client_stream and not self.client_stream.closed():
154 | self.client_stream.close()
155 | if self.server_stream and not self.server_stream.closed():
156 | self.server_stream.close()
157 |
--------------------------------------------------------------------------------
/microproxy/test/layer/test_transparent.py:
--------------------------------------------------------------------------------
1 | import mock
2 | from tornado.testing import AsyncTestCase, gen_test
3 | from tornado.concurrent import Future
4 |
5 | from microproxy.layer.proxy.transparent import TransparentLayer
6 |
7 |
8 | class TestTransparentLayer(AsyncTestCase):
9 | def setUp(self):
10 | super(TestTransparentLayer, self).setUp()
11 |
12 | self.dest_stream = mock.Mock()
13 | self.create_dest_stream = mock.Mock(
14 | return_value=self._create_future(self.dest_stream))
15 | self.dest_addr_resolver = mock.Mock(return_value=("localhost", 8080))
16 |
17 | self.context = mock.Mock()
18 | self.context.host = "localhost"
19 | self.context.port = 8080
20 |
21 | self.layer = TransparentLayer(
22 | self.context, dest_addr_resolver=self.dest_addr_resolver,
23 | create_dest_stream=self.create_dest_stream)
24 |
25 | def _create_future(self, result):
26 | future = Future()
27 | future.set_result(result)
28 | return future
29 |
30 | @gen_test
31 | def test_run_layer_with_other(self):
32 | context = yield self.layer.process_and_return_context()
33 |
34 | self.assertIs(context.dest_stream, self.dest_stream)
35 | self.assertEqual(context.host, "localhost")
36 | self.assertEqual(context.port, 8080)
37 | self.create_dest_stream.assert_called_with(("localhost", 8080))
38 |
--------------------------------------------------------------------------------
/microproxy/test/protocol/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/test/protocol/__init__.py
--------------------------------------------------------------------------------
/microproxy/test/protocol/client_hello.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/test/protocol/client_hello.bin
--------------------------------------------------------------------------------
/microproxy/test/protocol/test_http1.py:
--------------------------------------------------------------------------------
1 | import h11
2 | import mock
3 | from tornado.testing import gen_test
4 |
5 | from microproxy.test.utils import ProxyAsyncTestCase
6 | from microproxy.protocol.http1 import Connection
7 | from microproxy.context import HttpRequest, HttpResponse, HttpHeaders
8 |
9 |
10 | class TestConnection(ProxyAsyncTestCase):
11 | def setUp(self):
12 | super(TestConnection, self).setUp()
13 | self.asyncSetUp()
14 | self.request = None
15 | self.response = None
16 |
17 | def on_request(self, request):
18 | self.request = request
19 |
20 | def on_response(self, response):
21 | self.response = response
22 |
23 | @gen_test
24 | def asyncSetUp(self):
25 | self.client_stream, self.server_stream = yield self.create_iostream_pair()
26 | self.addCleanup(self.client_stream.close)
27 | self.addCleanup(self.server_stream.close)
28 |
29 | @gen_test
30 | def test_on_request(self):
31 | client_conn = Connection(h11.CLIENT, self.client_stream)
32 | client_conn.send_request(HttpRequest(
33 | method="GET", path="/", headers=[("Host", "localhost")]))
34 |
35 | server_conn = Connection(
36 | h11.SERVER, self.server_stream, on_request=self.on_request)
37 | yield server_conn.read_bytes()
38 |
39 | self.assertIsNotNone(self.request)
40 | self.assertEqual(self.request.headers,
41 | HttpHeaders([("host", "localhost")]))
42 | self.assertEqual(self.request.method, "GET")
43 | self.assertEqual(self.request.path, "/")
44 | self.assertEqual(self.request.version, "HTTP/1.1")
45 |
46 | @gen_test
47 | def test_on_response(self):
48 | server_conn = Connection(h11.SERVER, self.server_stream)
49 | server_conn.send_response(HttpResponse(
50 | version="HTTP/1.1", code="200", reason="OK",
51 | headers=[("Host", "localhost"),
52 | ("Content-Length", "1")],
53 | body=b"A"))
54 |
55 | client_conn = Connection(
56 | h11.CLIENT, self.client_stream, on_response=self.on_response)
57 | yield client_conn.read_bytes()
58 |
59 | self.assertIsNotNone(self.response)
60 | self.assertEqual(self.response.headers,
61 | HttpHeaders([("host", "localhost"),
62 | ("content-length", "1")]))
63 | self.assertEqual(self.response.code, "200")
64 | self.assertEqual(self.response.reason, "OK")
65 | self.assertEqual(self.response.version, "HTTP/1.1")
66 | self.assertEqual(self.response.body, b"A")
67 |
68 | @gen_test
69 | def test_on_info_response(self):
70 | client_conn = Connection(
71 | h11.CLIENT, self.client_stream, on_info_response=self.on_response)
72 | client_conn.send_request(HttpRequest(
73 | method="GET", path="/chat", version="HTTP/1.1",
74 | headers=[("Host", "localhost"), ("Upgrade", "websocket")]))
75 |
76 | server_conn = Connection(h11.SERVER, self.server_stream, on_request=self.on_request)
77 | yield server_conn.read_bytes()
78 | server_conn.send_info_response(HttpResponse(
79 | version="HTTP/1.1", code="101", reason="Protocol Upgrade",
80 | headers=[("Host", "localhost"),
81 | ("Upgrade", "websocket")]))
82 |
83 | yield client_conn.read_bytes()
84 |
85 | self.assertIsNotNone(self.response)
86 | self.assertEqual(self.response.headers,
87 | HttpHeaders([("host", "localhost"),
88 | ("upgrade", "websocket")]))
89 | self.assertEqual(self.response.code, "101")
90 | self.assertEqual(self.response.reason, "Protocol Upgrade")
91 | self.assertEqual(self.response.version, "HTTP/1.1")
92 |
93 | @gen_test
94 | def test_on_post_request(self):
95 | client_conn = Connection(h11.CLIENT, self.client_stream)
96 | client_conn.send_request(HttpRequest(
97 | method="POST", path="/",
98 | headers=[("Host", "localhost"), ("Content-Length", "4")],
99 | body=b"yaya"))
100 |
101 | server_conn = Connection(
102 | h11.SERVER, self.server_stream, on_request=self.on_request)
103 | yield server_conn.read_bytes()
104 |
105 | self.assertIsNotNone(self.request)
106 | self.assertEqual(self.request.headers,
107 | HttpHeaders([("host", "localhost"), ("content-length", "4")]))
108 | self.assertEqual(self.request.method, "POST")
109 | self.assertEqual(self.request.path, "/")
110 | self.assertEqual(self.request.version, "HTTP/1.1")
111 | self.assertEqual(self.request.body, b"yaya")
112 |
113 | @gen_test
114 | def test_on_connection_closed(self):
115 | client_conn = Connection(
116 | h11.CLIENT, self.client_stream, on_response=self.on_response)
117 | client_conn.send_request(HttpRequest(
118 | method="GET", path="/",
119 | headers=[("Host", "localhost"), ("Connection", "close")]))
120 |
121 | server_conn = Connection(
122 | h11.SERVER, self.server_stream, on_request=self.on_request)
123 | yield server_conn.read_bytes()
124 | server_conn.send_response(HttpResponse(
125 | version="HTTP/1.1", code="200", reason="OK",
126 | headers=[("Host", "localhost"),
127 | ("Content-Length", "4")],
128 | body=b"Yaya"))
129 |
130 | yield client_conn.read_bytes()
131 | self.assertTrue(self.client_stream.closed())
132 | self.assertTrue(self.server_stream.closed())
133 |
134 | def test_parse_version(self):
135 | self.assertEqual(
136 | Connection(h11.CLIENT, None)._parse_version(None),
137 | "HTTP/1.1")
138 |
139 | http_content = mock.Mock()
140 | http_content.http_version = "1.1"
141 | self.assertEqual(
142 | Connection(h11.CLIENT, None)._parse_version(http_content),
143 | "HTTP/1.1")
144 |
145 | def tearDown(self):
146 | self.client_stream.close()
147 | self.server_stream.close()
148 |
--------------------------------------------------------------------------------
/microproxy/test/test.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDtTCCAp2gAwIBAgIJALnQM6ej60ljMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV
3 | BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
4 | aWRnaXRzIFB0eSBMdGQwHhcNMTYwNzAyMTEyNjA3WhcNMTcwNzAyMTEyNjA3WjBF
5 | MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50
6 | ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
7 | CgKCAQEA49yPnl9HWhEH+mXPBVVUOIlErtive57LzbNUfp1LN23AcqV43XJKwHqr
8 | A0wKo0FA8Fr2b8/ioesqbBJXzi/Fie6nLEyg5w1q9bNlBSD348E232A8IM9xvafh
9 | lYTx7gteCiPDM3Vz4i1nBIWZW7hUNO99i+diqkc6W2f2cy175wkax5W435CSxaA5
10 | yHqKx2gqgCh8XaAVToT668HP2+sDVU8IfPJx9RlAsoBekDNzSHCvgxGXHtAzK4gj
11 | g5++uF1CyJyX3Z1sYnp1Yf6NsDuX9XaBzDlouuRX4NN//Od1QnuKDjN0VcaxmQGl
12 | VgA0XCw7RjiTzqtCKzXpcRzDeBahCQIDAQABo4GnMIGkMB0GA1UdDgQWBBQ1oCEW
13 | IuJ+m7IXQUy3iyxvweOcLzB1BgNVHSMEbjBsgBQ1oCEWIuJ+m7IXQUy3iyxvweOc
14 | L6FJpEcwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNV
15 | BAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZIIJALnQM6ej60ljMAwGA1UdEwQF
16 | MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAK1e7jmNJ1osCohjOOErb/U2jWVMYfUm
17 | rPE8dMllFVrydRJmFbSy2jcd/ceuwDp/mZFyiIJ67RdGSH0PTWBpHlDubfsTPbgm
18 | zTLSrdqGMjM4Txnp/cjR7DuMK5JMhpJJH7riZes1+Z/FMJmfVCKGmIHyoxaZSaT8
19 | WpvY/FUMPAqvN6Ng6d+GzHh7h/14Om12lr9Ix8O3K8nWQonLc070/saBS39/9nTP
20 | mowgvLTZxsM3Coop1tp3Vyv9iiB4sa7tmeLAlys3gHDSg69Vz9+n8+gw1LiChqmp
21 | 5UPLV3yEhVsBf+DgrRdnblikYwnas3x80ikcX04ksUxoSWGq/d6Zni0=
22 | -----END CERTIFICATE-----
23 |
--------------------------------------------------------------------------------
/microproxy/test/test.key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEowIBAAKCAQEA49yPnl9HWhEH+mXPBVVUOIlErtive57LzbNUfp1LN23AcqV4
3 | 3XJKwHqrA0wKo0FA8Fr2b8/ioesqbBJXzi/Fie6nLEyg5w1q9bNlBSD348E232A8
4 | IM9xvafhlYTx7gteCiPDM3Vz4i1nBIWZW7hUNO99i+diqkc6W2f2cy175wkax5W4
5 | 35CSxaA5yHqKx2gqgCh8XaAVToT668HP2+sDVU8IfPJx9RlAsoBekDNzSHCvgxGX
6 | HtAzK4gjg5++uF1CyJyX3Z1sYnp1Yf6NsDuX9XaBzDlouuRX4NN//Od1QnuKDjN0
7 | VcaxmQGlVgA0XCw7RjiTzqtCKzXpcRzDeBahCQIDAQABAoIBAQDg8cqudC0CUzHn
8 | Fj6razN7pLezTKLgjUFxToopfKrWi3ijNYv+QWheYDsXRCh0RmUPYx1KAJYhdwEo
9 | M2uPV0XSB1Htv7BQqMRgzrr5tzRBGWtMVbrGQURuEJ/4dYhIkdxCck+wK1E9hcjf
10 | g+yDyZMHty3fwYt7wllwB1AbIFnjFxL5jM+MfyVyB1fIHkCBBSwn9nG3iUJgsyMj
11 | 2dtxZkIrfnw0rytH9mPbU5/USz+a+v2y6ZdOAEtFMj05VjwkJf3dI6UwEKYhmILM
12 | cQMuduZHQXd+j6Z7RSlNBBFVCRsVhijU6sNiiXXPsux9hrqU+i8holshZrW1OBsH
13 | 4NzNyJkBAoGBAPTe9dhivablz11cOK8hVKopQDImbpKaO035ayd3In7ffF7DLLPU
14 | yQOEjD8sWfnE7ViJ8TDSv+i2CgzZiswXmm8AD+zKSJu2t5rUsgjOrEoQNjAn5k0q
15 | PeLbm/b4ddPIRNthwbhVMnsVvjk3HFtRkZyfdLj7fJvFrQPv7M/j8T6ZAoGBAO43
16 | su/Hkupp2/LoZjH5YofcZKWcY8tkRDc8Bt/vVl03nFxFDk2vHABdKSvi1hSAoU5j
17 | HhBvaGOSNJGU3gttjxZnf3eI59nJWxZ8idgAK2wySLxx6O73vkhxHVz/Pirlalt5
18 | A8nMcLHl/+ThvniVJVy1FTx1BJGJUjpMxn2tjyvxAoGAS1BsMwKjrDqQnfloYc0R
19 | mkiXuxUA+0w+o0vsfjXxQS6BtS+4hxMSRGe3LxL4FY9RJONAcLDRX8TlsHAUdZNo
20 | 520QeNb6oBIBxLhYjcbrTNhRolwTRHaSlDzRapOeweWd8A/QIl7p6NuWJ0jhCuE6
21 | tP6CSwbmm46gGU7o+kP0hjkCgYAezFK1D29eISuPnx4/TcfAWu0kQDDixebP4VHe
22 | N9pJmPv9LexofMA5B9Jf2ybkZbD1KnljlJPBc9CaFc4TWinKsrzXHnsjPBYrtRQ5
23 | OeRjS6j9vBhqdNtEEGPEdzd5Ykm6481qlKTpSxwnYkw22MmIz9ycAWEC0+cupGhx
24 | LTQ6cQKBgDJwqbSvsCxlM2pJV8gJD06K0RoN0+nd0nwLradLYxtJW17Ztjugi4s1
25 | 33FSlBzgBIN8nMPzL9jw+sKKv71bHTYwsC+KBZ2TjhXbQyxZNcpCza6BqNJ0xOxs
26 | On7ejRPCz4T0rW/pSOHtKSbNOXBavNxmw2juhj+7YJPjS00j7LVe
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/microproxy/test/test_cert_store.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from microproxy.cert import CertStore
4 | from OpenSSL import crypto
5 |
6 |
7 | class CertStoreTest(unittest.TestCase):
8 | def setUp(self):
9 | config = {
10 | "certfile": "microproxy/test/test.crt",
11 | "keyfile": "microproxy/test/test.key"
12 | }
13 |
14 | self.cert_store = CertStore(config)
15 |
16 | def test_create_cert(self):
17 | cn = "www.test.com"
18 | cert = self.cert_store.create_cert(cn)
19 |
20 | ca_root = self.cert_store.ca_root
21 | # pkey = self.cert_store.private_key
22 |
23 | self.assertIsInstance(cert, crypto.X509)
24 | self.assertEqual(2, cert.get_version())
25 | self.assertEqual(ca_root.get_subject(), cert.get_issuer())
26 | # TODO: Need to find a way to test whether the pkey content is equal.
27 | # self.assertEqual(pkey._pkey, cert.get_pubkey()._pkey)
28 | self.assertEquals(unicode(cn), cert.get_subject().CN)
29 |
30 | def test_get_cert_from_cache_nonexist(self):
31 | cert = self.cert_store.get_cert_from_cache("www.abc.com")
32 | self.assertIsNone(cert)
33 |
34 | def test_get_cert_from_cache_exist(self):
35 | orig_cert = self.cert_store.create_cert("www.abc.com")
36 | new_cert = self.cert_store.get_cert_from_cache("www.abc.com")
37 |
38 | self.assertIsInstance(new_cert, crypto.X509)
39 | self.assertEqual(orig_cert, new_cert)
40 |
41 | def test_get_cert_and_pkey(self):
42 | old_ca, old_pkey = self.cert_store.get_cert_and_pkey("www.abc.com")
43 |
44 | self.assertIsInstance(old_ca, crypto.X509)
45 | self.assertIsInstance(old_pkey, crypto.PKey)
46 |
47 | new_ca, new_pkey = self.cert_store.get_cert_and_pkey("www.abc.com")
48 |
49 | self.assertIsInstance(new_ca, crypto.X509)
50 | self.assertIsInstance(new_pkey, crypto.PKey)
51 | self.assertEqual(old_ca, new_ca)
52 | self.assertEqual(old_pkey, new_pkey)
53 |
--------------------------------------------------------------------------------
/microproxy/test/test_log.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from mock import patch
3 |
4 | from microproxy.log import ProxyLogger
5 |
6 |
7 | class TestProxyLogger(unittest.TestCase):
8 | @patch('microproxy.log.ProxyLogger.register_stream_handler')
9 | @patch('microproxy.log.ProxyLogger.register_file_handler')
10 | @patch('logging.config.fileConfig')
11 | def test_init_proxy_logger_file_handler(self,
12 | mock_logging_fileConfig,
13 | mock_register_file_handler,
14 | mock_register_stream_handler):
15 | config = {
16 | "logger_config": "test.cfg",
17 | "log_file": "test.log",
18 | "log_level": "INFO"
19 | }
20 | ProxyLogger.init_proxy_logger(config)
21 | mock_logging_fileConfig.assert_called_once_with(filename="test.cfg", encoding="utf8")
22 | mock_register_file_handler.assert_not_called()
23 | mock_register_stream_handler.assert_not_called()
24 |
25 | @patch('microproxy.log.ProxyLogger.register_stream_handler')
26 | @patch('microproxy.log.ProxyLogger.register_file_handler')
27 | def test_init_proxy_logger_file_handler(self, mock_register_file_handler, mock_register_stream_handler):
28 | config = {
29 | "logger_config": "",
30 | "log_file": "test.log",
31 | "log_level": "INFO"
32 | }
33 | ProxyLogger.init_proxy_logger(config)
34 | mock_register_file_handler.assert_called_once_with("test.log")
35 | mock_register_stream_handler.assert_not_called()
36 |
37 | @patch('microproxy.log.ProxyLogger.register_stream_handler')
38 | @patch('microproxy.log.ProxyLogger.register_file_handler')
39 | def test_init_proxy_logger_stream_handler(self, mock_register_file_handler, mock_register_stream_handler):
40 | config = {
41 | "logger_config": "",
42 | "log_file": "",
43 | "log_level": "INFO"
44 | }
45 | ProxyLogger.init_proxy_logger(config)
46 | mock_register_stream_handler.assert_called_once()
47 | mock_register_file_handler.assert_not_called()
48 |
--------------------------------------------------------------------------------
/microproxy/test/test_server_state.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import mock
3 |
4 | from microproxy.server_state import init_server_state, _init_cert_store, _init_interceptor
5 |
6 |
7 | class ServerStateAPITest(unittest.TestCase):
8 | def setUp(self):
9 | self.config = dict()
10 |
11 | @mock.patch("microproxy.server_state.ServerContext")
12 | @mock.patch("microproxy.server_state._init_cert_store")
13 | @mock.patch("microproxy.server_state._init_interceptor")
14 | def test_init_server_state(self,
15 | mock_init_interceptor,
16 | mock_init_cert_store,
17 | MockServerContext):
18 | publish_socket = dict()
19 | context = init_server_state(self.config, publish_socket)
20 |
21 | mock_init_cert_store.assert_called_once_with(self.config)
22 | mock_init_interceptor.assert_called_once_with(
23 | self.config, publish_socket)
24 |
25 | MockServerContext.assert_called_once_with(
26 | config=self.config,
27 | interceptor=mock_init_interceptor.return_value,
28 | cert_store=mock_init_cert_store.return_value)
29 | self.assertEqual(context, MockServerContext.return_value)
30 |
31 | @mock.patch("microproxy.server_state.CertStore")
32 | def test_init_cert_store(self, MockCertStore):
33 | cert_store = _init_cert_store(self.config)
34 |
35 | MockCertStore.assert_called_once_with(self.config)
36 | self.assertEqual(cert_store, MockCertStore.return_value)
37 |
38 | @mock.patch("microproxy.server_state.MsgPublisher")
39 | @mock.patch("microproxy.server_state.PluginManager")
40 | @mock.patch("microproxy.server_state.Interceptor")
41 | def test_init_interceptor(self,
42 | MockInterceptor,
43 | MockPluginManager,
44 | MockMsgPublisher):
45 |
46 | publish_socket = dict()
47 | interceptor = _init_interceptor(self.config, publish_socket)
48 |
49 | self.assertEqual(interceptor, MockInterceptor.return_value)
50 |
51 | MockPluginManager.assert_called_once_with(self.config)
52 | MockMsgPublisher.assert_called_once_with(
53 | self.config, zmq_socket=publish_socket)
54 | MockInterceptor.assert_called_once_with(
55 | plugin_manager=MockPluginManager.return_value,
56 | msg_publisher=MockMsgPublisher.return_value)
57 |
--------------------------------------------------------------------------------
/microproxy/test/tornado_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/test/tornado_ext/__init__.py
--------------------------------------------------------------------------------
/microproxy/test/utils.py:
--------------------------------------------------------------------------------
1 | import socket
2 |
3 | from tornado.testing import AsyncTestCase, bind_unused_port
4 | from tornado.locks import Event
5 | from tornado.netutil import add_accept_handler
6 | from tornado.gen import coroutine, Return
7 |
8 | from microproxy.tornado_ext.iostream import MicroProxyIOStream
9 |
10 |
11 | class ProxyAsyncTestCase(AsyncTestCase):
12 | @coroutine
13 | def create_iostream_pair(self):
14 | _lock = Event()
15 | server_streams = []
16 |
17 | def accept_callback(conn, addr):
18 | server_stream = MicroProxyIOStream(conn)
19 | server_streams.append(server_stream)
20 | # self.addCleanup(server_stream.close)
21 | _lock.set()
22 |
23 | listener, port = bind_unused_port()
24 | add_accept_handler(listener, accept_callback)
25 | client_stream = MicroProxyIOStream(socket.socket())
26 | yield [client_stream.connect(('127.0.0.1', port)),
27 | _lock.wait()]
28 | self.io_loop.remove_handler(listener)
29 | listener.close()
30 |
31 | raise Return((client_stream, server_streams[0]))
32 |
--------------------------------------------------------------------------------
/microproxy/test/viewer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/test/viewer/__init__.py
--------------------------------------------------------------------------------
/microproxy/test/viewer/test_console.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest import TestCase
3 | from colored import fg, bg, attr
4 |
5 | from microproxy.viewer.console import (
6 | ColorText, TextList, StatusText, Header,
7 | Request, Response, construct_status_summary,
8 | construct_color_msg)
9 | from microproxy.context import (
10 | ViewerContext, HttpRequest, HttpResponse)
11 |
12 |
13 | class TestColorText(TestCase):
14 | def test_clear_text(self):
15 | self.assertEqual("clear text", str(ColorText("clear text")))
16 |
17 | def test_fg(self):
18 | self.assertEqual(fg("blue") + "fg test" + attr("reset"),
19 | str(ColorText("fg test", fg_color="blue")))
20 |
21 | def test_bg(self):
22 | self.assertEqual(bg("blue") + "bg test" + attr("reset"),
23 | str(ColorText("bg test", bg_color="blue")))
24 |
25 | def test_bold(self):
26 | self.assertEqual(attr("bold") + "bold test" + attr("reset"),
27 | str(ColorText("bold test", attrs=["bold"])))
28 |
29 | def test_bold_and_underlined(self):
30 | self.assertEqual(attr("bold") + attr("underlined") + "bold underlined test" + attr("reset"),
31 | str(ColorText("bold underlined test", attrs=["bold", "underlined"])))
32 |
33 | def test_fg_and_bg(self):
34 | self.assertEqual(fg("blue") + bg("blue") + "fg bg test" + attr("reset"),
35 | str(ColorText("fg bg test", fg_color="blue", bg_color="blue")))
36 |
37 | def test_fg_and_bg_and_bold(self):
38 | self.assertEqual(fg("blue") + bg("blue") + attr("bold") + "fg bg test" + attr("reset"),
39 | str(ColorText("fg bg test", fg_color="blue", bg_color="blue", attrs=["bold"])))
40 |
41 |
42 | class TestTextList(TestCase):
43 | def test_default_delimiter(self):
44 | self.assertEqual("test\n123",
45 | str(TextList([ColorText("test"), ColorText(123)])))
46 |
47 | def test_comma_delimiter(self):
48 | self.assertEqual("test,123",
49 | str(TextList([ColorText("test"), ColorText(123)], delimiter=",")))
50 |
51 | def test_empty(self):
52 | self.assertEqual("", str(TextList([])))
53 |
54 |
55 | class TestStatusText(TestCase):
56 | def test_status_ok(self):
57 | self.assertEqual(TextList([ColorText(200, fg_color="green", attrs=["bold"]),
58 | "GET", "http://github.com/index"],
59 | delimiter=" ").__dict__,
60 | StatusText(200, "GET", "http://github.com", "/index").__dict__)
61 |
62 | def test_status_error(self):
63 | self.assertEqual(TextList([ColorText(400, fg_color="red", attrs=["bold"]),
64 | "GET", "http://github.com/index"],
65 | delimiter=" ").__dict__,
66 | StatusText(400, "GET", "http://github.com", "/index").__dict__)
67 |
68 |
69 | class TestHeader(TestCase):
70 | def test_one_header(self):
71 | expected = TextList([ColorText("Host: github.com", bg_color="blue")])
72 | self.assertEqual(
73 | expected.__dict__,
74 | Header([("Host", "github.com")]).__dict__)
75 |
76 | def test_two_headers(self):
77 | expected = TextList(
78 | [ColorText("Header: Value", bg_color="blue"),
79 | ColorText("Host: github.com", bg_color="blue")])
80 | self.assertEqual(
81 | expected.__dict__,
82 | Header([("Header", "Value"), ("Host", "github.com")]).__dict__)
83 |
84 |
85 | class TestRequest(TestCase):
86 | def test_simple_request(self):
87 | request = HttpRequest(headers=[("Host", "github.com")])
88 | expected = TextList(
89 | [ColorText("Request Headers:", fg_color="blue", attrs=["bold"]),
90 | Header([("Host", "github.com")])])
91 | self.assertEqual(
92 | expected.__dict__,
93 | Request(request).__dict__)
94 |
95 |
96 | class TestResponse(TestCase):
97 | def test_simple_response(self):
98 | response = HttpResponse(headers=[("Content-Type", "application/xml")])
99 | expected = TextList(
100 | [ColorText("Response Headers:", fg_color="blue", attrs=["bold"]),
101 | Header([("Content-Type", "application/xml")])])
102 | self.assertEqual(
103 | expected.__dict__,
104 | Response(response).__dict__)
105 |
106 |
107 | class TestConsole(TestCase):
108 | def test_construct_status_summary(self):
109 | status = construct_status_summary(
110 | ViewerContext.deserialize({
111 | "host": "example.com",
112 | "path": "/index",
113 | "response": {
114 | "code": 200,
115 | },
116 | "request": {
117 | "method": "GET",
118 | },
119 | }))
120 | self.assertEqual(status, StatusText(200, "GET", "example.com", "/index"))
121 |
122 | def test_construct_color_msg_with_status(self):
123 | msg = construct_color_msg(
124 | ViewerContext.deserialize({
125 | "host": "example.com",
126 | "path": "/index",
127 | "response": {
128 | "code": 200,
129 | },
130 | "request": {
131 | "method": "GET",
132 | },
133 | }), "status")
134 | self.assertEqual(msg, StatusText(200, "GET", "example.com", "/index"))
135 |
136 | def test_construct_color_msg_with_header(self):
137 | msg = construct_color_msg(ViewerContext.deserialize({
138 | "host": "example.com",
139 | "path": "/index",
140 | "response": {
141 | "code": 200,
142 | "headers": [
143 | ("Content-Type", "application/xml"),
144 | ],
145 | "body": "response".encode("base64"),
146 | },
147 | "request": {
148 | "method": "GET",
149 | "headers": [
150 | ("Content-Type", "application/xml"),
151 | ],
152 | },
153 | }), "header")
154 |
155 | self.assertEqual(
156 | msg,
157 | TextList([
158 | StatusText(200, "GET", "example.com", "/index"),
159 | Request(HttpRequest(method="GET", headers=[("Content-Type", "application/xml")])),
160 | Response(HttpResponse(code=200, headers=[("Content-Type", "application/xml")]))
161 | ])
162 | )
163 |
164 | def test_construct_color_msg_with_body(self):
165 | msg = construct_color_msg(ViewerContext.deserialize({
166 | "host": "example.com",
167 | "path": "/index",
168 | "response": {
169 | "code": 200,
170 | "headers": [
171 | ("Content-Type", "application/xml"),
172 | ],
173 | "body": "Response".encode("base64"),
174 | },
175 | "request": {
176 | "method": "GET",
177 | "headers": [
178 | ("Content-Type", "application/xml"),
179 | ],
180 | "body": "Request".encode("base64"),
181 | },
182 | }), "body")
183 |
184 | self.assertEqual(
185 | msg,
186 | TextList([
187 | StatusText(200, "GET", "example.com", "/index"),
188 | Request(HttpRequest(
189 | method="GET",
190 | headers=[("Content-Type", "application/xml")],
191 | body="Request"), show_body=True),
192 | Response(HttpResponse(
193 | code=200,
194 | headers=[("Content-Type", "application/xml")],
195 | body="Response"), show_body=True)
196 | ])
197 | )
198 |
199 |
200 | if __name__ == "__main__":
201 | unittest.main()
202 |
--------------------------------------------------------------------------------
/microproxy/tornado_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/tornado_ext/__init__.py
--------------------------------------------------------------------------------
/microproxy/tornado_ext/tcpserver.py:
--------------------------------------------------------------------------------
1 | from tornado.log import app_log
2 | from tornado import tcpserver
3 | from iostream import MicroProxyIOStream
4 |
5 |
6 | class TCPServer(tcpserver.TCPServer):
7 | """tornado TCPServer that use the extended iostream and ssliostream."""
8 | def __init__(self, **kwargs):
9 | super(TCPServer, self).__init__(**kwargs)
10 |
11 | def _handle_connection(self, connection, address):
12 | """Handle connection with extended IOStream.
13 |
14 | In order to let the later start_tls works properly,
15 | the _handle_connection must use extened IOStream to create the proper ssl context.
16 |
17 | NOTE: currently, we only use IOStream. That is if you pass ssl_options in contructor,
18 | it will still use IOStream and not SSLIOStream.
19 | """
20 | try:
21 | stream = MicroProxyIOStream(connection,
22 | io_loop=self.io_loop,
23 | max_buffer_size=self.max_buffer_size,
24 | read_chunk_size=self.read_chunk_size)
25 | future = self.handle_stream(stream)
26 | if future is not None:
27 | self.io_loop.add_future(future, lambda f: f.result())
28 |
29 | except Exception:
30 | app_log.error("Error in connection callback", exc_info=True)
31 |
--------------------------------------------------------------------------------
/microproxy/utils.py:
--------------------------------------------------------------------------------
1 | import zmq
2 | from zmq.eventloop import zmqstream
3 | from zmq.eventloop.ioloop import IOLoop
4 | from OpenSSL import SSL
5 |
6 |
7 | try:
8 | HAS_ALPN = SSL._lib.Cryptography_HAS_ALPN
9 | except:
10 | HAS_ALPN = False
11 |
12 |
13 | def curr_loop(): # pragma: no cover
14 | return IOLoop.current()
15 |
16 |
17 | def create_publish_channel(channel): # pragma: no cover
18 | context = zmq.Context()
19 | socket = context.socket(zmq.PUB)
20 | socket.bind(channel)
21 | return socket
22 |
23 |
24 | def create_event_channel(channel): # pragma: no cover
25 | context = zmq.Context()
26 | socket = context.socket(zmq.PULL)
27 | socket.bind(channel)
28 | return zmqstream.ZMQStream(socket)
29 |
--------------------------------------------------------------------------------
/microproxy/version.py:
--------------------------------------------------------------------------------
1 | VERSION="0.4.1+dev"
2 |
--------------------------------------------------------------------------------
/microproxy/viewer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mike820324/microProxy/64c7c5add4759c6e105b9438cd18c0f8c930c7a3/microproxy/viewer/__init__.py
--------------------------------------------------------------------------------
/microproxy/viewer/console.py:
--------------------------------------------------------------------------------
1 | from microproxy.version import VERSION
2 | import zmq
3 | import json
4 | from colored import fg, bg, attr
5 |
6 | from microproxy.context import ViewerContext, Event
7 | from microproxy.event import EventClient, REPLAY
8 | from formatter import ConsoleFormatter
9 |
10 | _formatter = ConsoleFormatter()
11 |
12 |
13 | class ColorText(object):
14 | def __init__(self,
15 | text,
16 | fg_color=None,
17 | bg_color=None,
18 | attrs=None):
19 | self.text = str(text)
20 | self.fg_color = fg_color
21 | self.bg_color = bg_color
22 | self.attrs = attrs or []
23 |
24 | def __unicode__(self):
25 | if not (self.fg_color or self.bg_color or self.attrs):
26 | return self.text.decode("utf8")
27 | _str = fg(self.fg_color).decode("utf8") if self.fg_color else u""
28 | _str += bg(self.bg_color).decode("utf8") if self.bg_color else u""
29 | _str += u"".join(map(lambda a: attr(a), self.attrs))
30 | _str += self.text.decode("utf8")
31 | _str += attr("reset").decode("utf8")
32 | return _str
33 |
34 | def __str__(self):
35 | return self.__unicode__().encode("utf8")
36 |
37 | def __repr__(self):
38 | return self.__str__()
39 |
40 | def __eq__(self, other):
41 | return (
42 | type(self) is type(other) and
43 | self.__dict__ == other.__dict__
44 | )
45 |
46 | def __neq__(self, other):
47 | return not self.__eq__(other)
48 |
49 |
50 | class TextList(object):
51 | def __init__(self, text_list, delimiter=u"\n"):
52 | self.text_list = text_list
53 | self.delimiter = delimiter
54 |
55 | def __unicode__(self):
56 | return self.delimiter.join(map(lambda s: unicode(s), self.text_list))
57 |
58 | def __str__(self):
59 | return self.__unicode__().encode("utf8")
60 |
61 | def __repr__(self):
62 | return self.__str__()
63 |
64 | def __eq__(self, other):
65 | return (
66 | type(self) is type(other) and
67 | self.__dict__ == other.__dict__
68 | )
69 |
70 | def __neq__(self, other):
71 | return not self.__eq__(other)
72 |
73 |
74 | class StatusText(TextList):
75 | FG_COLOR_OK = "green"
76 | FG_COLOR_NOT_OK = "red"
77 | ATTRS = ["bold"]
78 |
79 | def __init__(self, status_code, method, host, path):
80 | status_fg = self.FG_COLOR_OK if status_code < 400 else self.FG_COLOR_NOT_OK
81 | super(StatusText, self).__init__(
82 | [ColorText(status_code, fg_color=status_fg, attrs=self.ATTRS),
83 | method,
84 | host + path],
85 | delimiter=u" ")
86 |
87 |
88 | class Header(TextList):
89 | BG_COLOR = "blue"
90 |
91 | def __init__(self, headers):
92 | super(Header, self).__init__(
93 | map(lambda (k, v): ColorText("{0}: {1}".format(k, v),
94 | bg_color=self.BG_COLOR), headers))
95 |
96 |
97 | class Request(TextList):
98 | HEADER_TITLE = "Request Headers:"
99 | BODY_TITLE = "Request Body:"
100 | FG_COLOR = "blue"
101 | ATTRS = ["bold"]
102 |
103 | def __init__(self, request, show_body=False):
104 | content = []
105 | content.append(ColorText(self.HEADER_TITLE, fg_color=self.FG_COLOR, attrs=self.ATTRS))
106 | content.append(Header(request.headers))
107 | if show_body and request.body:
108 | content.append(ColorText(self.BODY_TITLE, fg_color=self.FG_COLOR, attrs=self.ATTRS))
109 | body = _formatter.format_body(request.body, request.headers)
110 | content.append(body)
111 | super(Request, self).__init__(content)
112 |
113 |
114 | class Response(TextList):
115 | HEADER_TITLE = "Response Headers:"
116 | BODY_TITLE = "Response Body:"
117 | FG_COLOR = "blue"
118 | ATTRS = ["bold"]
119 |
120 | def __init__(self, response, show_body=False):
121 | content = []
122 | content.append(ColorText(self.HEADER_TITLE, fg_color=self.FG_COLOR, attrs=self.ATTRS))
123 | content.append(Header(response.headers))
124 | if show_body and response.body:
125 | content.append(ColorText(self.BODY_TITLE, fg_color=self.FG_COLOR, attrs=self.ATTRS))
126 | body = _formatter.format_body(response.body, response.headers)
127 | content.append(body)
128 | super(Response, self).__init__(content)
129 |
130 |
131 | def construct_status_summary(message):
132 | # TODO: need update here when we implement new context system
133 | host = message.host
134 | path = message.path
135 | status_code = message.response.code
136 | method = message.request.method
137 | return StatusText(status_code, method, host, path)
138 |
139 |
140 | def construct_color_msg(message, verbose_level):
141 | status = construct_status_summary(message)
142 |
143 | if verbose_level == "status":
144 | return status
145 | if verbose_level == "header":
146 | return TextList([status, Request(message.request), Response(message.response)])
147 | elif verbose_level in ("body", "all"):
148 | return TextList([
149 | status, Request(message.request, show_body=True),
150 | Response(message.response, show_body=True)])
151 |
152 |
153 | def create_msg_channel(channel): # pragma: no cover
154 | context = zmq.Context()
155 | socket = context.socket(zmq.SUB)
156 | socket.connect(channel)
157 | socket.setsockopt(zmq.SUBSCRIBE, "message")
158 | return socket
159 |
160 |
161 | def replay(channel_addr, replay_file): # pragma: no cover
162 | client = EventClient(channel_addr)
163 | for line in open(replay_file, "r"):
164 | if line:
165 | ctx = json.loads(line)
166 | event = Event(REPLAY, ctx)
167 | client.send_event(event)
168 |
169 |
170 | def start(config): # pragma: no cover
171 | proxy_host = config["proxy_host"]
172 | viewer_channel = "{0}:{1}".format(proxy_host, config["viewer_port"])
173 | events_channel = "{0}:{1}".format(proxy_host, config["events_port"])
174 | socket = create_msg_channel(viewer_channel)
175 | verbose_level = config["verbose_level"]
176 | print ColorText("MicroProxy Simple Viewer {}".format(VERSION),
177 | fg_color="blue",
178 | attrs=["bold"])
179 |
180 | if "replay_file" in config and config["replay_file"]:
181 | replay(events_channel, config["replay_file"])
182 |
183 | dump_file = None
184 | if "dump_file" in config and config["dump_file"]:
185 | dump_file = config["dump_file"]
186 |
187 | if dump_file:
188 | fp = open(dump_file, "w")
189 |
190 | while True:
191 | try:
192 | topic, data = socket.recv_multipart()
193 | viewer_context = ViewerContext.deserialize(json.loads(data))
194 | if dump_file:
195 | fp.write(data)
196 | fp.write("\n")
197 |
198 | print construct_color_msg(viewer_context, verbose_level)
199 | print
200 | except KeyboardInterrupt:
201 | print ColorText("Closing Simple Viewer",
202 | fg_color="blue",
203 | attrs=["bold"])
204 | if dump_file:
205 | fp.close()
206 | exit(0)
207 |
--------------------------------------------------------------------------------
/microproxy/viewer/tui.py:
--------------------------------------------------------------------------------
1 | import zmq
2 | from zmq.eventloop import ioloop, zmqstream
3 | import urwid
4 | import json
5 | from backports.shutil_get_terminal_size import get_terminal_size
6 |
7 | import gviewer
8 |
9 | from microproxy.context import ViewerContext, Event
10 | from microproxy.event import EventClient, REPLAY
11 | from microproxy.viewer.formatter import TuiFormatter
12 |
13 |
14 | class Tui(gviewer.BaseDisplayer):
15 | PALETTE = [
16 | ("code ok", "light green", "black", "bold"),
17 | ("code error", "light red", "black", "bold"),
18 | ("indicator", "yellow", "black", "bold")
19 | ]
20 | DEFAULT_EXPORT_REPLAY_FILE = "replay.script"
21 |
22 | def __init__(self, config, stream=None, event_loop=None):
23 | stream = (
24 | stream or
25 | create_msg_channel(config["viewer_channel"], "message")
26 | )
27 | data_store = MessageAsyncDataStore(stream.on_recv)
28 |
29 | context = gviewer.DisplayerContext(
30 | data_store, self, actions=gviewer.Actions([
31 | ("e", "export replay script", self.export_replay),
32 | ("r", "replay", self.replay),
33 | ("L", "log", self.log)]))
34 |
35 | self.log_context = LogDisplayer(config).context
36 | event_loop = event_loop or urwid.TornadoEventLoop(ioloop.IOLoop.instance())
37 | self.viewer = gviewer.GViewer(
38 | context, palette=self.PALETTE,
39 | other_contexts=[self.log_context],
40 | config=gviewer.Config(auto_scroll=True),
41 | event_loop=event_loop)
42 | self.formatter = TuiFormatter()
43 | self.config = config
44 | self.event_client = EventClient(config["events_channel"])
45 | self.terminal_width, _ = get_terminal_size()
46 |
47 | def start(self):
48 | if "replay_file" in self.config and self.config["replay_file"]:
49 | for line in open(self.config["replay_file"], "r"):
50 | if line:
51 | self.execute_replay(json.loads(line))
52 |
53 | self.viewer.start()
54 |
55 | def _code_text_markup(self, code):
56 | if int(code) < 400:
57 | return ("code ok", str(code))
58 | return ("code error", str(code))
59 |
60 | def _fold_path(self, path):
61 | max_width = self.terminal_width - 16
62 | return path if len(path) < max_width else path[:max_width - 1] + "..."
63 |
64 | def _format_port(self, scheme, port):
65 | if (scheme, port) in [("https", 443), ("http", 80)]:
66 | return ""
67 | return ":" + str(port)
68 |
69 | def summary(self, message, exported=False):
70 | mark = "V " if exported else " "
71 | pretty_path = self._fold_path("{0}://{1}{2}{3}".format(
72 | message.scheme,
73 | message.host,
74 | self._format_port(message.scheme, message.port),
75 | message.path)
76 | )
77 | return [
78 | ("indicator", mark),
79 | self._code_text_markup(message.response.code),
80 | " {0:7} {1}".format(
81 | message.request.method,
82 | pretty_path)
83 | ]
84 |
85 | def get_views(self):
86 | return [("Request", self.request_view),
87 | ("Response", self.response_view),
88 | ("Detail", self.detail_view)]
89 |
90 | def request_view(self, message):
91 | groups = []
92 | request = message.request
93 | groups.append(gviewer.PropsGroup(
94 | "",
95 | [gviewer.Prop("method", request.method),
96 | gviewer.Prop("path", request.path),
97 | gviewer.Prop("version", request.version)]))
98 | groups.append(gviewer.PropsGroup(
99 | "Request Header",
100 | [gviewer.Prop(k, v) for k, v in request.headers]))
101 |
102 | if request.body:
103 | body_list = self.formatter.format_body(
104 | request.body, request.headers)
105 | groups.append(gviewer.Group(
106 | "Request Body", body_list))
107 | return gviewer.View(groups)
108 |
109 | def response_view(self, message):
110 | groups = []
111 | response = message.response
112 | groups.append(gviewer.PropsGroup(
113 | "",
114 | [gviewer.Prop("code", str(response.code)),
115 | gviewer.Prop("reason", response.reason),
116 | gviewer.Prop("version", response.version)]))
117 | groups.append(gviewer.PropsGroup(
118 | "Response Header",
119 | [gviewer.Prop(k, v) for k, v in response.headers]))
120 |
121 | if response.body:
122 | body_list = self.formatter.format_body(
123 | response.body, response.headers)
124 | groups.append(gviewer.Group(
125 | "Response Body", body_list))
126 | return gviewer.View(groups)
127 |
128 | def detail_view(self, message):
129 | groups = []
130 |
131 | groups.append(gviewer.PropsGroup(
132 | "Detail",
133 | [
134 | gviewer.Prop("Host", message.host),
135 | gviewer.Prop("Port", str(message.port)),
136 | gviewer.Prop("Path", message.path)
137 | ]
138 | ))
139 | if message.client_tls:
140 | groups.append(gviewer.PropsGroup(
141 | "Client Connection",
142 | [
143 | gviewer.Prop("TLS Version", message.client_tls.version),
144 | gviewer.Prop("Server Name Notation", message.client_tls.sni),
145 | gviewer.Prop("Cipher", message.client_tls.cipher),
146 | gviewer.Prop("ALPN", message.client_tls.alpn),
147 | ]
148 | ))
149 | if message.server_tls:
150 | groups.append(gviewer.PropsGroup(
151 | "Server Connection",
152 | [
153 | gviewer.Prop("TLS Version", message.server_tls.version),
154 | gviewer.Prop("Server Name Notation", message.server_tls.sni),
155 | gviewer.Prop("Cipher", message.server_tls.cipher),
156 | gviewer.Prop("ALPN", message.server_tls.alpn),
157 | ]
158 | ))
159 | return gviewer.View(groups)
160 |
161 | def export_replay(self, parent, message, widget, *args, **kwargs):
162 | export_file = self.config.get("out_file", self.DEFAULT_EXPORT_REPLAY_FILE)
163 |
164 | with open(export_file, "a") as f:
165 | f.write(json.dumps(message.serialize()))
166 | f.write("\n")
167 | widget.set_title(self.summary(message, exported=True))
168 | parent.notify("replay script export to {0}".format(export_file))
169 |
170 | def replay(self, parent, message, *args, **kwargs):
171 | self.event_client.send_event(Event(name=REPLAY, context=message))
172 | if parent:
173 | parent.notify("sent replay event to server")
174 |
175 | def execute_replay(self, event):
176 | self.event_client.send_event(Event(name=REPLAY, context=event))
177 |
178 | def log(self, controller, message, widget, *args, **kwargs):
179 | controller.open_view_by_context(self.log_context)
180 |
181 |
182 | class MessageAsyncDataStore(gviewer.AsyncDataStore): # pragma: no cover
183 | def transform(self, message):
184 | context = ViewerContext.deserialize(json.loads(message[1]))
185 | return context
186 |
187 |
188 | class LogDisplayer(gviewer.BaseDisplayer):
189 | def __init__(self, config, stream=None):
190 | stream = (
191 | stream or
192 | create_msg_channel(config["viewer_channel"], "logger")
193 | )
194 | data_store = gviewer.AsyncDataStore(stream.on_recv)
195 | self.context = gviewer.DisplayerContext(
196 | data_store, self)
197 |
198 | def summary(self, message):
199 | if "\n" in message[1]:
200 | return message[1].split("\n")[0]
201 | else:
202 | return message[1]
203 |
204 | def get_views(self):
205 | return [("", self.show)]
206 |
207 | def show(self, message):
208 | lines = message[1].split("\n")
209 | lines = map(lambda l: gviewer.Text(l), lines)
210 | return gviewer.View([gviewer.Group("", lines)])
211 |
212 |
213 | def create_msg_channel(channel, topic): # pragma: no cover
214 | context = zmq.Context()
215 | socket = context.socket(zmq.SUB)
216 | socket.connect(channel)
217 | socket.setsockopt(zmq.SUBSCRIBE, topic)
218 | return zmqstream.ZMQStream(socket)
219 |
220 |
221 | def start(config): # pragma: no cover
222 | ioloop.install()
223 | tui = Tui(config)
224 | tui.start()
225 |
--------------------------------------------------------------------------------
/microproxy/viewer/utils.py:
--------------------------------------------------------------------------------
1 | import StringIO
2 | import gzip
3 |
4 |
5 | def ungzip(content):
6 | fio = StringIO.StringIO(content)
7 | with gzip.GzipFile(fileobj=fio) as f:
8 | decompress_content = f.read()
9 | return decompress_content
10 |
--------------------------------------------------------------------------------
/mpcertutil.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | download_mozilla_certs()
4 | {
5 | local destination_dir
6 | destination_dir="${1}"
7 | local pem_url
8 | pem_url=https://curl.haxx.se/ca/cacert.pem
9 |
10 | echo "Downlading trusted ca from ${pem_url}"
11 | wget "${pem_url}" -O "${destination_dir}/cacert.pem"
12 |
13 | if [ $? -ne 0 ]; then
14 | echo "Download failed"
15 | exit 1
16 | fi
17 | }
18 |
19 | get_client_ca()
20 | {
21 | local platform
22 | platform=$(uname -s)
23 |
24 | local destination_dir
25 | destination_dir="${1}"
26 |
27 | if [ "Linux" == "${platform}" ]; then
28 | echo "Checking /etc/ssl/certs/ca-certificates.crt"
29 | if [ -s /etc/ssl/certs/ca-certificates.crt ]; then
30 | cp /etc/ssl/certs/ca-certificates.crt "${destination_dir}/cacert.pem"
31 | fi
32 | elif [ "Darwin" == "${platform}" ]; then
33 | echo "Checking /usr/local/etc/openssl/cert.pem"
34 | if [ -s /usr/local/etc/openssl/cert.pem ]; then
35 | echo "Using /usr/local/etc/openssl/cert.pem"
36 | cp /usr/local/etc/openssl/cert.pem "${destination_dir}/cacert.pem"
37 | fi
38 | fi
39 |
40 | if [ ! -f "${destination_dir}/cacert.pem" ]; then
41 | download_mozilla_certs "${destination_dir}"
42 | fi
43 | }
44 |
45 | create_server_ca()
46 | {
47 | local cert_file
48 | cert_file="${1}/cert.crt"
49 | local key_file
50 | key_file="${1}/cert.key"
51 |
52 | echo "Creating certificate for microProxy"
53 | openssl req -new -x509 -days 365 -nodes -out "${cert_file}" -keyout "${key_file}"
54 |
55 | if [ $? -ne 0 ]; then
56 | echo "Creating certificate failed"
57 | exit 1
58 | fi
59 | }
60 |
61 | print_usage()
62 | {
63 | echo "Usage: mpcertutil.sh MODE [PATH]"
64 | echo ""
65 | echo " Create microProxy related certificate to the given PATH."
66 | echo " PATH when not specified, will be the current directory."
67 | echo ""
68 | echo "example:"
69 | echo " mpcertutil.sh all /var/tmp"
70 | echo " mpcertutil.sh client /var/tmp"
71 | echo ""
72 | echo "Output MODE:"
73 | echo " all: create both client and server certificate."
74 | echo " client: Check locald trusted ca file. "
75 | echo " If can not find one, download trusted ca file curl."
76 | echo " server: create microproy server cert/key file"
77 |
78 | }
79 |
80 | if [ $# -lt 1 ]; then
81 | echo "mpcertutil.sh require at least one arguments."
82 | print_usage
83 | exit 1
84 | fi
85 |
86 | if [ $# -eq 1 ]; then
87 | download_path=$(pwd)
88 | else
89 | download_path="${2}"
90 | fi
91 |
92 | if [ ! -d "${download_path}" ]; then
93 | echo "${download_path} folder not exist"
94 | exit 1
95 | fi
96 |
97 | if [ "${1}" == "all" ]; then
98 | create_server_ca ${download_path}
99 | get_client_ca ${download_path}
100 | elif [ "${1}" == "client" ]; then
101 | get_client_ca ${download_path}
102 | elif [ "${1}" == "server" ]; then
103 | create_server_ca ${download_path}
104 | else
105 | echo "unknown command"
106 | exit 1
107 | fi
108 | exit 0
109 |
--------------------------------------------------------------------------------
/requirements/development.txt:
--------------------------------------------------------------------------------
1 | -r proxy.txt
2 | -r viewer.txt
3 |
4 | mock==2.0.0
5 | coverage==4.0.3
6 | coveralls==1.1
7 |
--------------------------------------------------------------------------------
/requirements/proxy.txt:
--------------------------------------------------------------------------------
1 | tornado==4.3
2 | pyzmq==15.4.0
3 | watchdog==0.8.3
4 | pyOpenSSL==16.0.0
5 | service-identity==16.0.0
6 | certifi==2016.8.8
7 | construct==2.8.8
8 | six==1.10.0
9 | h2==2.4.2
10 | h11==0.7.0
11 | socks5==0.2.1
12 |
--------------------------------------------------------------------------------
/requirements/viewer.txt:
--------------------------------------------------------------------------------
1 | colored==1.2.2
2 | urwid==1.3.1
3 | gviewer==3.0.6
4 | jsbeautifier==1.6.3
5 | cssutils==1.0.1
6 | lxml==3.6.0
7 | backports.shutil-get-terminal-size==1.0.0
8 | Pygments==2.1.3
9 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | from codecs import open
3 | from microproxy.version import VERSION
4 | import os
5 |
6 | here = os.path.abspath(os.path.dirname(__file__))
7 |
8 | with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
9 | long_description = f.read()
10 |
11 | with open(os.path.join(here, './requirements/proxy.txt')) as f:
12 | proxy_deps = [dep for dep in f.read().split("\n") if dep]
13 |
14 | with open(os.path.join(here, './requirements/viewer.txt')) as f:
15 | viewer_deps = [dep for dep in f.read().split("\n") if dep]
16 |
17 | with open(os.path.join(here, './requirements/development.txt')) as f:
18 | dev_deps = [dep for dep in f.read().split("\n") if dep and "-r" not in dep]
19 |
20 | setup(
21 | name="microProxy",
22 | version=VERSION,
23 | description="A http/https interceptor proxy written in python inspired by mitmproxy",
24 | long_description=long_description,
25 | url="https://github.com/mike820324/microProxy",
26 | author="MicroMike",
27 | author_email="mike820324@gmail.com",
28 | license="MIT",
29 | classifiers=[
30 | "License :: OSI Approved :: MIT License",
31 | "Environment :: Console",
32 | "Environment :: Console :: Curses",
33 | "Operating System :: MacOS :: MacOS X",
34 | "Operating System :: POSIX",
35 | "Programming Language :: Python",
36 | "Programming Language :: Python :: 2",
37 | "Programming Language :: Python :: 2.7",
38 | "Programming Language :: Python :: Implementation :: CPython",
39 | "Programming Language :: Python :: Implementation :: PyPy",
40 | "Topic :: Security",
41 | "Topic :: Internet",
42 | "Topic :: Internet :: WWW/HTTP",
43 | "Topic :: Internet :: Proxy Servers",
44 | "Topic :: Software Development :: Testing"
45 | ],
46 | packages=find_packages(include=[
47 | "microproxy", "microproxy.*",
48 | ]),
49 | include_package_data=True,
50 | entry_points={
51 | 'console_scripts': [
52 | "mpserver=microproxy.command_line:mpserver",
53 | "mptui=microproxy.command_line:mptui",
54 | "mpdump=microproxy.command_line:mpdump",
55 | ]
56 | },
57 | install_requires=[
58 | "tornado==4.3",
59 | "pyzmq==15.4.0",
60 | "watchdog==0.8.3",
61 | "pyOpenSSL==16.0.0",
62 | "service-identity==16.0.0",
63 | "certifi==2016.8.8",
64 | "construct==2.8.8, < 2.9.0",
65 | "six==1.10.0",
66 | "h2==2.4.2",
67 | "h11==0.7.0",
68 | "socks5==0.2.1"
69 | ],
70 | extras_require={
71 | 'viewer': viewer_deps,
72 | 'develop': dev_deps
73 | }
74 | )
75 |
--------------------------------------------------------------------------------