├── ChangeLog
├── INSTALL
├── LICENSE
├── MANIFEST.in
├── README
├── README.md
├── TODO
├── bin
└── obfsproxy
├── doc
├── HOWTO.txt
├── obfs2
│ ├── obfs2-protocol-spec.txt
│ └── obfs2-threat-model.txt
├── obfs3
│ ├── obfs3-protocol-spec.txt
│ └── obfs3-threat-model.txt
└── scramblesuit
│ ├── ChangeLog
│ └── scramblesuit-spec.txt
├── obfsproxy
├── __init__.py
├── _version.py
├── common
│ ├── __init__.py
│ ├── aes.py
│ ├── argparser.py
│ ├── heartbeat.py
│ ├── hmac_sha256.py
│ ├── log.py
│ ├── modexp.py
│ ├── rand.py
│ ├── serialize.py
│ └── transport_config.py
├── managed
│ ├── __init__.py
│ ├── client.py
│ └── server.py
├── network
│ ├── __init__.py
│ ├── buffer.py
│ ├── extended_orport.py
│ ├── launch_transport.py
│ ├── network.py
│ ├── socks.py
│ └── socks5.py
├── pyobfsproxy.py
├── test
│ ├── __init__.py
│ ├── int_tests
│ │ └── pits_design.txt
│ ├── test_aes.py
│ ├── test_buffer.py
│ ├── test_obfs3_dh.py
│ ├── test_socks.py
│ ├── test_socks5.py
│ ├── tester.py
│ └── transports
│ │ ├── __init__.py
│ │ ├── test_b64.py
│ │ ├── test_obfs3_dh.py
│ │ └── test_scramblesuit.py
└── transports
│ ├── __init__.py
│ ├── b64.py
│ ├── base.py
│ ├── dummy.py
│ ├── model
│ ├── __init__.py
│ └── dummy_nn.py
│ ├── nnmorph.py
│ ├── obfs2.py
│ ├── obfs3.py
│ ├── obfs3_dh.py
│ ├── scramblesuit
│ ├── __init__.py
│ ├── const.py
│ ├── fifobuf.py
│ ├── message.py
│ ├── mycrypto.py
│ ├── packetmorpher.py
│ ├── probdist.py
│ ├── replay.py
│ ├── scramblesuit.py
│ ├── state.py
│ ├── ticket.py
│ ├── uniformdh.py
│ └── util.py
│ └── transports.py
├── setup.py
├── setup_py2exe.py
├── trainer
└── deepcorr.py
└── versioneer.py
/ChangeLog:
--------------------------------------------------------------------------------
1 | Changes in version 0.2.7 - 2014-03-15
2 | - Support SOCKS5 instead of SOCKS4. Patch by Yawning Angel. Fixes #9221.
3 | - Fix a scramblesuit bug that makes bridges reject a session
4 | ticket connection from already seen clients. Diagnosed and patched
5 | by Yawning Angel. Fixes #11100.
6 | - obfs3 now uses twisted.internet.threads.deferToThread to process
7 | the key exchange outside of the main event loop.
8 | Patch by Yawning Angel. Fixes #11015.
9 | - Support gmpy2 if it is available in addition to gmpy.
10 | Patch by Yawning Angel.
11 |
12 |
13 | Changes in version 0.2.6 - 2014-02-03
14 | - Stop having 'gmpy' as a hard dependency by removing it from setup.py.
15 | Now gmpy is only used if it was already installed on the system.
16 |
17 |
18 | Changes in version 0.2.5 - 2014-02-03
19 | - Use gmpy's modular exponentiation function since it's more efficient.
20 | Fixes #10031 and adds gmpy as a dependency. Patch by Philipp Winter.
21 | - Add a transport method called setup() that gets called on obfsproxy
22 | startup and can be used by transports for expensive initializations.
23 | Patch by David Stainton.
24 | - Add a transport method called get_public_server_options() that allows
25 | transports to filter server-side options that should not be announced
26 | to BridgeDB (because they might leak filesystem paths etc.) .
27 | Patch by David Stainton. Fixes #10243.
28 | - Make the circuit an attribute of the transport, rather than passing it
29 | as a method argument. Patch by Ximin Luo. Fixes #10342.
30 | - Rename the handshake() method to circuitConnected().
31 | Patch by Ximin Luo.
32 | - Add ScrambleSuit as transport protocol. Fixes #10598.
33 |
34 |
35 | Changes in version 0.2.4 - 2013-09-30
36 | - Make pluggable transports aware of where they should store state
37 | in the filesystem. Also introduce --data-dir CLI switch to specify
38 | the path in external mode. Fixes #9815. Patch by Philipp Winter.
39 | - Pass server-side parameters (like shared-secrets) from Tor to the
40 | transports. Fixes #8979.
41 |
42 |
43 | Changes in version 0.2.3 - 2013-09-11
44 | - Use the new pyptlib API (>= pyptlib-0.0.4). Patch by Ximin Luo.
45 | - Add support for sending the pluggable transport name to Tor (using
46 | the Extended ORPort) so that it can be considered in the statistics.
47 | - Remove licenses of dependencies from the LICENSE file. (They were
48 | moved to be with browser bundle packaging scripts.)
49 | - Fix a bug in the SOCKS code. An assertion would trigger if
50 | the SOCKS destination sent traffic before obfsproxy did.
51 | Fixes #9239.
52 | - Add a --version switch. Fixes #9255.
53 |
54 |
55 | Changes in version 0.2.2 - 2013-04-15
56 | - Fix a bug where the CLI compatibility patch that was introduced
57 | in 0.2.1 was placed in the wrong place, making it useless when
58 | obfsproxy gets installed. Patch by Lunar.
59 | - Add dependencies to the setup script.
60 | - Update the HOWTO to use pip.
61 |
62 |
63 | Changes in version 0.2.1 - 2013-04-08
64 | - Rename project from "pyobfsproxy" to "obfsproxy"!
65 | - Add licenses of dependencies to the LICENSE file.
66 | - Add support for logging exceptions to logfiles.
67 | - Add shared secret support to obfs2.
68 | - Add support for per-connection SOCKS arguments.
69 | - Add a setup script for py2exe.
70 | - Slightly improve the executable script.
71 | - Improve command line interface compatibility between C-obfpsroxy
72 | and Python-obfsproxy by supporting the "--managed" switch.
73 |
74 |
75 | Changes in version 0.0.2 - 2013-02-17
76 | - Add some more files to the MANIFEST.in.
77 |
78 |
79 | Changes in version 0.0.1 - 2013-02-15
80 | - Initial release.
81 |
--------------------------------------------------------------------------------
/INSTALL:
--------------------------------------------------------------------------------
1 | Just run: # python setup.py install
2 |
3 | You will need to run the above command as root. It will install
4 | obfsproxy somewhere in your $PATH. If you don't want that, you can
5 | try to run
6 | $ python setup.py install -user
7 | as your regular user, and setup.py will install obfsproxy somewhere
8 | in your home directory
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | This is the license of the obfsproxy software.
2 |
3 | Copyright 2013 George Kadianakis
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are
7 | met:
8 |
9 | * Redistributions of source code must retain the above copyright
10 | notice, this list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above
13 | copyright notice, this list of conditions and the following disclaimer
14 | in the documentation and/or other materials provided with the
15 | distribution.
16 |
17 | * Neither the names of the copyright owners nor the names of its
18 | contributors may be used to endorse or promote products derived from
19 | this software without specific prior written permission.
20 |
21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include obfsproxy *.py *.pits *.txt
2 | recursive-include doc *
3 | recursive-include bin *
4 | include TODO
5 | include LICENSE
6 | include versioneer.py
7 | include INSTALL
8 | include ChangeLog
9 | include setup_py2exe.py
10 |
--------------------------------------------------------------------------------
/README:
--------------------------------------------------------------------------------
1 | GAN-based-obfuscation is a pluggable transport proxy written in Python.
2 |
3 |
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # BLANKET
2 |
3 |
4 | Tor Transport pluggin for the paper: "Defeating DNN-Based Traffic Analysis Systems in Real-Time With Blind Adversarial Perturbations"
5 | https://arxiv.org/pdf/2002.06495
6 |
7 |
8 |
9 |
10 | Based on obfs3
11 |
12 |
13 |
14 | Step 0: Install Python
15 |
16 | To use obfsproxy you will need Python (>= 2.7) and pip. If you use
17 | Debian testing (or unstable), or a version of Ubuntu newer than
18 | Oneiric, this is easy:
19 |
20 | $ apt-get install python2.7 python-pip python-dev build-essential libgmp-dev
21 |
22 |
23 | Step 1: Install Tor
24 |
25 | You will also need a development version of Tor. To do this, you
26 | should use the following guide to install tor and
27 | deb.torproject.org-keyring:
28 | https://www.torproject.org/docs/debian.html.en#development
29 |
30 | You need Tor 0.2.4.x because it knows how to automatically report
31 | your obfsproxy address to BridgeDB.
32 |
33 |
34 | Step 2: Install nnmorph
35 |
36 | If you have pip, installing obfsproxy and its dependencies should be
37 | a matter of a single command:
38 |
39 | $ python setup.py install
40 |
41 |
42 | Step 3: Setup Tor
43 |
44 | Now setup Tor. Edit your /etc/tor/torrc to add:
45 |
46 | SocksPort 0
47 | ORPort 443 # or some other port if you already run a webserver/skype
48 | BridgeRelay 1
49 | Exitpolicy reject *:*
50 |
51 | ## CHANGEME_1 -> provide a nickname for your bridge, can be anything you like
52 | #Nickname CHANGEME_1
53 | ## CHANGEME_2 -> provide some email address so we can contact you if there's a problem
54 | #ContactInfo CHANGEME_2
55 |
56 | ServerTransportPlugin nnmorph exec /usr/local/bin/obfsproxy managed
57 |
58 |
--------------------------------------------------------------------------------
/TODO:
--------------------------------------------------------------------------------
1 | * Write more transports.
2 |
3 | * Write more docs (architecture document, HACKING, etc.)
4 |
5 | * Improve the integration testers (especially add better debugging
6 | support for when a test fails)
7 |
8 | * Kill all the XXXs in the code.
9 |
10 | * Convert all the leftover camelCases to underscore_naming.
11 |
12 | * Implement a SOCKS client, so that Obfsproxy can send its data
13 | through a SOCKS proxy.
--------------------------------------------------------------------------------
/bin/obfsproxy:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys, os
4 |
5 | # Forcerfully add root directory of the project to our path.
6 | # http://www.py2exe.org/index.cgi/WhereAmI
7 | if hasattr(sys, "frozen"):
8 | dir_of_executable = os.path.dirname(sys.executable)
9 | else:
10 | dir_of_executable = os.path.dirname(__file__)
11 | path_to_project_root = os.path.abspath(os.path.join(dir_of_executable, '..'))
12 |
13 | sys.path.insert(0, path_to_project_root)
14 |
15 | from obfsproxy.pyobfsproxy import run
16 | run()
17 |
18 |
19 |
--------------------------------------------------------------------------------
/doc/HOWTO.txt:
--------------------------------------------------------------------------------
1 | This is a short guide on how to setup an obfsproxy obfs2/obfs3 bridge
2 | on a Debian/Ubuntu system.
3 |
4 | Step 0: Install Python
5 |
6 | To use obfsproxy you will need Python (>= 2.7) and pip. If you use
7 | Debian testing (or unstable), or a version of Ubuntu newer than
8 | Oneiric, this is easy:
9 |
10 | # apt-get install python2.7 python-pip python-dev build-essential libgmp-dev
11 |
12 |
13 | Step 1: Install Tor
14 |
15 | You will also need a development version of Tor. To do this, you
16 | should use the following guide to install tor and
17 | deb.torproject.org-keyring:
18 | https://www.torproject.org/docs/debian.html.en#development
19 |
20 | You need Tor 0.2.4.x because it knows how to automatically report
21 | your obfsproxy address to BridgeDB.
22 |
23 |
24 | Step 2: Install obfsproxy
25 |
26 | If you have pip, installing obfsproxy and its dependencies should be
27 | a matter of a single command:
28 |
29 | $ pip install obfsproxy
30 |
31 |
32 | Step 3: Setup Tor
33 |
34 | Now setup Tor. Edit your /etc/tor/torrc to add:
35 |
36 | SocksPort 0
37 | ORPort 443 # or some other port if you already run a webserver/skype
38 | BridgeRelay 1
39 | Exitpolicy reject *:*
40 |
41 | ## CHANGEME_1 -> provide a nickname for your bridge, can be anything you like
42 | #Nickname CHANGEME_1
43 | ## CHANGEME_2 -> provide some email address so we can contact you if there's a problem
44 | #ContactInfo CHANGEME_2
45 |
46 | ServerTransportPlugin obfs2,obfs3 exec /usr/local/bin/obfsproxy managed
47 |
48 | Don't forget to uncomment and edit the CHANGEME fields.
49 |
50 |
51 | Step 4: Launch Tor and verify that it bootstraps
52 |
53 | Restart Tor to use the new configuration file. (Preface with sudo if
54 | needed.)
55 |
56 | # service tor restart
57 |
58 | Now check /var/log/tor/log and you should see something like this:
59 |
60 | Nov 05 16:40:45.000 [notice] We now have enough directory information to build circuits.
61 | Nov 05 16:40:45.000 [notice] Bootstrapped 80%: Connecting to the Tor network.
62 | Nov 05 16:40:46.000 [notice] Bootstrapped 85%: Finishing handshake with first hop.
63 | Nov 05 16:40:46.000 [notice] Bootstrapped 90%: Establishing a Tor circuit.
64 | Nov 05 16:40:48.000 [notice] Tor has successfully opened a circuit. Looks like client functionality is working.
65 | Nov 05 16:40:48.000 [notice] Bootstrapped 100%: Done.
66 |
67 | If Tor is earlier in the bootstrapping phase, wait until it gets to 100%.
68 |
69 |
70 | Step 5: Set up port forwarding if needed
71 |
72 | If you're behind a NAT/firewall, you'll need to make your bridge
73 | reachable from the outside world — both on the ORPort and the
74 | obfsproxy port. The ORPort is whatever you defined in step two
75 | above. To find your obfsproxy port, check your Tor logs for two
76 | lines similar to these:
77 |
78 | Oct 05 20:00:41.000 [notice] Registered server transport 'obfs2' at '0.0.0.0:26821
79 | Oct 05 20:00:42.000 [notice] Registered server transport 'obfs3' at '0.0.0.0:40172
80 |
81 | The last number in each line, in this case 26821 and 40172, are the
82 | TCP port numbers that you need to forward through your
83 | firewall. (This port is randomly chosen the first time Tor starts,
84 | but Tor will cache and reuse the same number in future runs.) If you
85 | want to change the number, use Tor 0.2.4.7-alpha or later, and set
86 | "ServerTransportListenAddr obfs2 0.0.0.0:26821" in your torrc.
87 |
88 |
--------------------------------------------------------------------------------
/doc/obfs2/obfs2-protocol-spec.txt:
--------------------------------------------------------------------------------
1 | obfs2 (The Twobfuscator)
2 |
3 | 0. Protocol overview
4 |
5 | This is a protocol obfuscation layer for TCP protocols. Its purpose
6 | is to keep a third party from telling what protocol is in use based
7 | on message contents. It is based on brl's ssh obfuscation protocol.
8 |
9 | It does not provide authentication or data integrity. It does not
10 | hide data lengths. It is more suitable for providing a layer of
11 | obfuscation for an existing authenticated protocol, like SSH or TLS.
12 |
13 | The protocol has two phases: in the first phase, the parties
14 | establish keys. In the second, the parties exchange superenciphered
15 | traffic.
16 |
17 | 1. Primitives, notation, and constants.
18 |
19 | H(x) is SHA256 of x.
20 | H^n(x) is H(x) called iteratively n times.
21 |
22 | E(K,s) is the AES-CTR-128 encryption of s using K as key.
23 |
24 | x | y is the concatenation of x and y.
25 | UINT32(n) is the 4 byte value of n in big-endian (network) order.
26 | SR(n) is n bytes of strong random data.
27 | WR(n) is n bytes of weaker random data.
28 | "xyz" is the ASCII characters 'x', 'y', and 'z', not NUL-terminated.
29 | s[:n] is the first n bytes of s.
30 | s[n:] is the last n bytes of s.
31 |
32 | MAGIC_VALUE is 0x2BF5CA7E
33 | SEED_LENGTH is 16
34 | MAX_PADDING is 8192
35 | HASH_ITERATIONS is 100000
36 |
37 | KEYLEN is the length of the key used by E(K,s) -- that is, 16.
38 | IVLEN is the length of the IV used by E(K,s) -- that is, 16.
39 |
40 | HASHLEN is the length of the output of H() -- that is, 32.
41 |
42 | MAC(s, x) = H(s | x | s)
43 |
44 | A "byte" is an 8-bit octet.
45 |
46 | We require that HASHLEN >= KEYLEN + IVLEN
47 |
48 | 2. Key establishment phase.
49 |
50 | The party who opens the connection is the 'initiator'; the one who
51 | accepts it is the 'responder'. Each begins by generating a seed
52 | and a padding key as follows. The initiator generates:
53 |
54 | INIT_SEED = SR(SEED_LENGTH)
55 | INIT_PAD_KEY = MAC("Initiator obfuscation padding", INIT_SEED)[:KEYLEN]
56 |
57 | And the responder generates:
58 |
59 | RESP_SEED = SR(SEED_LENGTH)
60 | RESP_PAD_KEY = MAC("Responder obfuscation padding", INIT_SEED)[:KEYLEN]
61 |
62 | Each then generates a random number PADLEN in range from 0 through
63 | MAX_PADDING (inclusive).
64 |
65 | The initiator then sends:
66 |
67 | INIT_SEED | E(INIT_PAD_KEY, UINT32(MAGIC_VALUE) | UINT32(PADLEN) | WR(PADLEN))
68 |
69 | and the responder sends:
70 |
71 | RESP_SEED | E(RESP_PAD_KEY, UINT32(MAGIC_VALUE) | UINT32(PADLEN) | WR(PADLEN))
72 |
73 | Upon receiving the SEED from the other party, each party derives
74 | the other party's padding key value as above, and decrypts the next
75 | 8 bytes of the key establishment message. If the MAGIC_VALUE does
76 | not match, or the PADLEN value is greater than MAX_PADDING, the
77 | party receiving it should close the connection immediately.
78 | Otherwise, it should read the remaining PADLEN bytes of padding data
79 | and discard them.
80 |
81 | Additional keys are then derived as:
82 |
83 | INIT_SECRET = MAC("Initiator obfuscated data", INIT_SEED|RESP_SEED)
84 | RESP_SECRET = MAC("Responder obfuscated data", INIT_SEED|RESP_SEED)
85 | INIT_KEY = INIT_SECRET[:KEYLEN]
86 | INIT_IV = INIT_SECRET[KEYLEN:]
87 | RESP_KEY = RESP_SECRET[:KEYLEN]
88 | RESP_IV = RESP_SECRET[KEYLEN:]
89 |
90 | The INIT_KEY value keys a stream cipher used to encrypt values from
91 | initiator to responder thereafter. The stream cipher's IV is
92 | INIT_IV. The RESP_KEY value keys a stream cipher used to encrypt
93 | values from responder to initiator thereafter. That stream cipher's
94 | IV is RESP_IV.
95 |
96 | 3. Shared-secret extension
97 |
98 | Optionally, if the client and server share a secret value SECRET,
99 | they can replace the MAC function with:
100 |
101 | MAC(s,x) = H^n(s | x | H(SECRET) | s)
102 |
103 | where n = HASH_ITERATIONS.
104 |
--------------------------------------------------------------------------------
/doc/obfs2/obfs2-threat-model.txt:
--------------------------------------------------------------------------------
1 | Threat model for the obfs2 obfuscation protocol
2 |
3 | George Kadianakis
4 | Nick Mathewson
5 |
6 | 0. Abstract
7 |
8 | We discuss the intended threat model for the 'obfs2' protocol
9 | obfuscator, its limitations, and its implications for the protocol
10 | design.
11 |
12 | The 'obfs2' protocol is based on Bruce Leidl's obfuscated SSH layer,
13 | and is documented in the 'doc/protocol-spec.txt' file in the obfsproxy
14 | distribution.
15 |
16 | 1. Adversary capabilities and non-capabilities
17 |
18 | We assume a censor with limited per-connection resources.
19 |
20 | The adversary controls the infrastructure of the network within and
21 | at the edges of her jurisdiction, and she can potentially monitor,
22 | block, alter, and inject traffic anywhere within this region.
23 |
24 | However, the adversary's computational resources are limited.
25 | Specifically, the adversary does not have the resources in her
26 | censorship infrastructure to store very much long-term information
27 | about any given IP or connection.
28 |
29 | The adversary also holds a blacklist of network protocols, which she
30 | is interested in blocking. We assume that the adversary does not have
31 | a complete list of specific IPs running that protocol, though
32 | preventing this is out-of-scope.
33 |
34 | 2. The adversary's goals
35 |
36 | The censor wants to ban particular encrypted protocols or
37 | applications, and is willing to tolerate some collateral damage, but
38 | is not willing to ban all encrypted traffic entirely.
39 |
40 | 3. Goals of obfs2
41 |
42 | Currently, most attackers in the category described above implement
43 | their censorship by one or more firewalls that looking for protocol
44 | signatures and block protocols matching those signatures. These
45 | signatures are typically in the form of static strings to be matched
46 | or regular expressions to be evaluated, over a packet or TCP flow.
47 |
48 | obfs2 attempts to counter the above attack by removing content
49 | signatures from network traffic. obfs2 encrypts the traffic stream
50 | with a stream cipher, which results in the traffic looking uniformly
51 | random.
52 |
53 | 4. Non-goals of obfs2
54 |
55 | obfs2 was designed as a proof-of-concept for Tor's pluggable
56 | transport system: it is simple, useable and easily implementable. It
57 | does _not_ try to protect against more sophisticated adversaries.
58 |
59 | obfs2 does not try to protect against non-content protocol
60 | fingerprints, like the packet size or timing.
61 |
62 | obfs2 does not try to protect against attackers capable of measuring
63 | traffic entropy.
64 |
65 | obfs2 (in its default configuration) does not try to protect against
66 | Deep Packet Inspection machines that expect the obfs2 protocol and
67 | have the resources to run it. Such machines can trivially retrieve
68 | the decryption key off the traffic stream and use it to decrypt obfs2
69 | and detect the Tor protocol.
70 |
71 | obfs2 assumes that the underlying protocol provides (or does not
72 | need!) integrity, confidentiality, and authentication; it provides
73 | none of those on its own.
74 |
75 | In other words, obfs2 does not try to protect against anything other
76 | than fingerprintable TLS content patterns.
77 |
78 | That said, obfs2 is not useless. It protects against many real-life
79 | Tor traffic detection methods currentl deployed, since most of them
80 | currently use static SSL handshake strings as signatures.
81 |
82 |
--------------------------------------------------------------------------------
/doc/obfs3/obfs3-protocol-spec.txt:
--------------------------------------------------------------------------------
1 | obfs3 (The Threebfuscator)
2 |
3 | 0. Protocol overview
4 |
5 | This is a protocol obfuscation layer for TCP protocols. Its
6 | purpose is to keep a third party from telling what protocol is in
7 | use based on message contents.
8 |
9 | Like obfs2, it does not provide authentication or data integrity.
10 | It does not hide data lengths. It is more suitable for providing a
11 | layer of obfuscation for an existing authenticated protocol, like
12 | SSH or TLS.
13 |
14 | Like obfs2, the protocol has two phases: in the first phase, the
15 | parties establish keys. In the second, the parties exchange
16 | superenciphered traffic.
17 |
18 | 1. Motivation
19 |
20 | The first widely used obfuscation protocol for Tor was obfs2. obfs2
21 | encrypted traffic using a key that was negotiated during the
22 | protocol.
23 |
24 | obfs2 did not use a robust cryptographic key exchange, and the key
25 | could be retrieved by any passive adversary who monitored the
26 | initial handshake of obfs2.
27 |
28 | People believe that the easiest way to block obfs2 would be to
29 | retrieve the key, decrypt the first bytes of the handshake, and
30 | look for redundancy on the handshake message.
31 |
32 | To defend against this attack, obfs3 negotiates keys using an
33 | anonymous Diffie Hellman key exchange. This is done so that a
34 | passive adversary would not be able to retrieve the obfs3 session
35 | key.
36 |
37 | Unfortunately, traditional DH (over subgroups of Z_p* or over
38 | Elliptic Curves) does not fit our threat model since its public
39 | keys are distinguishable from random strings of the same size. For
40 | this reason, a custom DH protocol was proposed that offers public
41 | keys that look like random strings. The UniformDH scheme was
42 | proposed by Ian Goldberg in:
43 | https://lists.torproject.org/pipermail/tor-dev/2012-December/004245.html
44 |
45 | 2. Primitives, notation, and constants.
46 |
47 | E(K,s) is the AES-CTR-128 encryption of s using K as key.
48 |
49 | x | y is the concatenation of x and y.
50 | WR(n) is n bytes of weaker random data.
51 | "xyz" is the ASCII characters 'x', 'y', and 'z', not NULL-terminated.
52 | s[:n] is the first n bytes of s.
53 | s[n:] is the last n bytes of s.
54 |
55 | MAX_PADDING is 8194
56 |
57 | KEYLEN is the length of the key used by E(K,s) -- that is, 16.
58 | COUNTERLEN is the length of the counter used by AES-CTR-128 -- that is, 16.
59 |
60 | HMAC(k,m) is HMAC-SHA256(k,m) with 'k' being the key, and 'm' the
61 | message.
62 |
63 | A "byte" is an 8-bit octet.
64 |
65 | 3. UniformDH
66 |
67 | The UniformDH Diffie-Hellman scheme uses group 5 from RFC3526. It's
68 | a 1536-bit MODP group.
69 |
70 | To pick a private UniformDH key, we pick a random 1536-bit number,
71 | and make it even by setting its low bit to 0. Let x be that private
72 | key, and X = g^x (mod p).
73 |
74 | The other party computes private and public keys, y and Y, in the
75 | same manner.
76 |
77 | When someone sends her public key to the other party, she randomly
78 | decides whether to send X or p-X. This makes the public key
79 | negligibly different from a uniform 1536-bit string
80 |
81 | When a party wants to calculate the shared secret, she
82 | raises the foreign public key to her private key. Note that both
83 | (p-Y)^x = Y^x (mod p) and (p-X)^y = X^y (mod p), since x and y are
84 | even.
85 |
86 | 3. Key establishment phase.
87 |
88 | The party who opens the connection is the 'initiator'; the one who
89 | accepts it is the 'responder'. Each begins by generating a
90 | UniformDH keypair, and a random number PADLEN in [0, MAX_PADDING/2].
91 | Both parties then send:
92 |
93 | PUB_KEY | WR(PADLEN)
94 |
95 | After retrieving the public key of the other end, each party
96 | completes the DH key exchange and generates a shared-secret for the
97 | session (named SHARED_SECRET). Using that shared-secret each party
98 | derives its encryption keys as follows:
99 |
100 | INIT_SECRET = HMAC(SHARED_SECRET, "Initiator obfuscated data")
101 | RESP_SECRET = HMAC(SHARED_SECRET, "Responder obfuscated data")
102 | INIT_KEY = INIT_SECRET[:KEYLEN]
103 | INIT_COUNTER = INIT_SECRET[KEYLEN:]
104 | RESP_KEY = RESP_SECRET[:KEYLEN]
105 | RESP_COUNTER = RESP_SECRET[KEYLEN:]
106 |
107 | The INIT_KEY value keys a block cipher (in CTR mode) used to
108 | encrypt values from initiator to responder thereafter. The counter
109 | mode's initial counter value is INIT_COUNTER. The RESP_KEY value
110 | keys a block cipher (in CTR mode) used to encrypt values from
111 | responder to initiator thereafter. That counter mode's initial
112 | counter value is RESP_COUNTER.
113 |
114 | After the handshake is complete, when the initiator wants to send
115 | application-layer data for the first time, she generates another
116 | random number PADLEN2 in [0, MAX_PADDING/2], and sends:
117 |
118 | WR(PADLEN2) | HMAC(SHARED_SECRET, "Initiator magic") | E(INIT_KEY, DATA)
119 |
120 | When the responder wants to send application-layer data for the
121 | first time, she sends:
122 |
123 | WR(PADLEN2) | HMAC(SHARED_SECRET, "Responder magic") | E(RESP_KEY, DATA)
124 |
125 | After a party receives the public key from the other end, it needs
126 | to find out where the padding stops and where the application-layer
127 | data starts. To do so, every time she receives network data, the
128 | receiver tries to find the magic HMAC string in the data between
129 | the public key and the end of the newly received data. After
130 | spotting the magic string, she knows where the application-layer
131 | data starts and she can start decrypting it.
132 |
133 | If a party has scanned more than MAX_PADDING bytes and the magic
134 | string has not yet been found, the party MUST close the connection.
135 |
136 | After the initiator sends the magic string and the first chunk of
137 | application-layer data, she can send additional application-layer
138 | data simply by encrypting it with her encryption key, and without
139 | prepending any magic strings:
140 |
141 | E(INIT_KEY, DATA)
142 |
143 | Similarly, the responder sends additional application-layer data by
144 | encrypting it with her encryption key:
145 |
146 | E(RESP_KEY, DATA)
147 |
148 | 4. Acknowledgments
149 |
150 | The idea of using a hash of the shared secret as the delimiter
151 | between the padding and the data was suggested by Philipp Winter.
152 |
153 | Ian Goldberg suggested the UniformDH scheme and helped a lot with
154 | reviewing the protocol specification.
155 |
--------------------------------------------------------------------------------
/doc/obfs3/obfs3-threat-model.txt:
--------------------------------------------------------------------------------
1 | Threat model for the obfs3 obfuscation protocol
2 |
3 | The threat model of obfs3 is identical to the threat model of obfs2,
4 | with an added goal:
5 |
6 | obfs3 offers protection against passive Deep Packet Inspection
7 | machines that expect the obfs3 protocol. Such machines should not be
8 | able to verify the existence of the obfs3 protocol without launching
9 | an active attack against its handshake.
10 |
--------------------------------------------------------------------------------
/doc/scramblesuit/ChangeLog:
--------------------------------------------------------------------------------
1 | 2014-01-19 - Changes in version 2014.01.b:
2 | - More unit tests and several minor bug fixes.
3 | - Sanitise shared secret if the user got it slightly wrong.
4 |
5 | 2014-01-09 - Changes in version 2014.01.a:
6 | - Update API to be compatible with recent obfsproxy changes.
7 | - Improve argument parsing.
8 |
9 | 2013-11-18 - Changes in version 2013.11.a:
10 | - Revert UniformDH group size back to 1536 bits to have less of a timing
11 | distinguisher at the cost of having less effective security. Note that
12 | this also breaks compatibility with version 2013.10.a!
13 | - Add the config option "USE_IAT_OBFUSCATION" which can be used to disable
14 | inter-arrival time obfuscation. This would mean more throughput at the
15 | cost of being slightly more detectable.
16 | - Add a fast FIFO buffer implementation.
17 | - Refactored plenty of code.
18 | - Add this ChangeLog file.
19 |
20 | 2013-10-02 - Changes in version 2013.10.a:
21 | - First public release of ScrambleSuit.
22 |
--------------------------------------------------------------------------------
/obfsproxy/__init__.py:
--------------------------------------------------------------------------------
1 | from ._version import get_versions
2 | __version__ = get_versions()['version']
3 | del get_versions
4 |
--------------------------------------------------------------------------------
/obfsproxy/_version.py:
--------------------------------------------------------------------------------
1 |
2 | IN_LONG_VERSION_PY = True
3 | # This file helps to compute a version number in source trees obtained from
4 | # git-archive tarball (such as those provided by githubs download-from-tag
5 | # feature). Distribution tarballs (build by setup.py sdist) and build
6 | # directories (produced by setup.py build) will contain a much shorter file
7 | # that just contains the computed version number.
8 |
9 | # This file is released into the public domain. Generated by
10 | # versioneer-0.7+ (https://github.com/warner/python-versioneer)
11 |
12 | # these strings will be replaced by git during git-archive
13 | git_refnames = "$Format:%d$"
14 | git_full = "$Format:%H$"
15 |
16 |
17 | import subprocess
18 | import sys
19 |
20 | def run_command(args, cwd=None, verbose=False):
21 | try:
22 | # remember shell=False, so use git.cmd on windows, not just git
23 | p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
24 | except EnvironmentError:
25 | e = sys.exc_info()[1]
26 | if verbose:
27 | print("unable to run %s" % args[0])
28 | print(e)
29 | return None
30 | stdout = p.communicate()[0].strip()
31 | if sys.version >= '3':
32 | stdout = stdout.decode()
33 | if p.returncode != 0:
34 | if verbose:
35 | print("unable to run %s (error)" % args[0])
36 | return None
37 | return stdout
38 |
39 |
40 | import sys
41 | import re
42 | import os.path
43 |
44 | def get_expanded_variables(versionfile_source):
45 | # the code embedded in _version.py can just fetch the value of these
46 | # variables. When used from setup.py, we don't want to import
47 | # _version.py, so we do it with a regexp instead. This function is not
48 | # used from _version.py.
49 | variables = {}
50 | try:
51 | for line in open(versionfile_source,"r").readlines():
52 | if line.strip().startswith("git_refnames ="):
53 | mo = re.search(r'=\s*"(.*)"', line)
54 | if mo:
55 | variables["refnames"] = mo.group(1)
56 | if line.strip().startswith("git_full ="):
57 | mo = re.search(r'=\s*"(.*)"', line)
58 | if mo:
59 | variables["full"] = mo.group(1)
60 | except EnvironmentError:
61 | pass
62 | return variables
63 |
64 | def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
65 | refnames = variables["refnames"].strip()
66 | if refnames.startswith("$Format"):
67 | if verbose:
68 | print("variables are unexpanded, not using")
69 | return {} # unexpanded, so not in an unpacked git-archive tarball
70 | refs = set([r.strip() for r in refnames.strip("()").split(",")])
71 | for ref in list(refs):
72 | if not re.search(r'\d', ref):
73 | if verbose:
74 | print("discarding '%s', no digits" % ref)
75 | refs.discard(ref)
76 | # Assume all version tags have a digit. git's %d expansion
77 | # behaves like git log --decorate=short and strips out the
78 | # refs/heads/ and refs/tags/ prefixes that would let us
79 | # distinguish between branches and tags. By ignoring refnames
80 | # without digits, we filter out many common branch names like
81 | # "release" and "stabilization", as well as "HEAD" and "master".
82 | if verbose:
83 | print("remaining refs: %s" % ",".join(sorted(refs)))
84 | for ref in sorted(refs):
85 | # sorting will prefer e.g. "2.0" over "2.0rc1"
86 | if ref.startswith(tag_prefix):
87 | r = ref[len(tag_prefix):]
88 | if verbose:
89 | print("picking %s" % r)
90 | return { "version": r,
91 | "full": variables["full"].strip() }
92 | # no suitable tags, so we use the full revision id
93 | if verbose:
94 | print("no suitable tags, using full revision id")
95 | return { "version": variables["full"].strip(),
96 | "full": variables["full"].strip() }
97 |
98 | def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
99 | # this runs 'git' from the root of the source tree. That either means
100 | # someone ran a setup.py command (and this code is in versioneer.py, so
101 | # IN_LONG_VERSION_PY=False, thus the containing directory is the root of
102 | # the source tree), or someone ran a project-specific entry point (and
103 | # this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
104 | # containing directory is somewhere deeper in the source tree). This only
105 | # gets called if the git-archive 'subst' variables were *not* expanded,
106 | # and _version.py hasn't already been rewritten with a short version
107 | # string, meaning we're inside a checked out source tree.
108 |
109 | try:
110 | here = os.path.abspath(__file__)
111 | except NameError:
112 | # some py2exe/bbfreeze/non-CPython implementations don't do __file__
113 | return {} # not always correct
114 |
115 | # versionfile_source is the relative path from the top of the source tree
116 | # (where the .git directory might live) to this file. Invert this to find
117 | # the root from __file__.
118 | root = here
119 | if IN_LONG_VERSION_PY:
120 | for i in range(len(versionfile_source.split("/"))):
121 | root = os.path.dirname(root)
122 | else:
123 | root = os.path.dirname(here)
124 | if not os.path.exists(os.path.join(root, ".git")):
125 | if verbose:
126 | print("no .git in %s" % root)
127 | return {}
128 |
129 | GIT = "git"
130 | if sys.platform == "win32":
131 | GIT = "git.cmd"
132 | stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
133 | cwd=root)
134 | if stdout is None:
135 | return {}
136 | if not stdout.startswith(tag_prefix):
137 | if verbose:
138 | print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
139 | return {}
140 | tag = stdout[len(tag_prefix):]
141 | stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
142 | if stdout is None:
143 | return {}
144 | full = stdout.strip()
145 | if tag.endswith("-dirty"):
146 | full += "-dirty"
147 | return {"version": tag, "full": full}
148 |
149 |
150 | def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
151 | if IN_LONG_VERSION_PY:
152 | # We're running from _version.py. If it's from a source tree
153 | # (execute-in-place), we can work upwards to find the root of the
154 | # tree, and then check the parent directory for a version string. If
155 | # it's in an installed application, there's no hope.
156 | try:
157 | here = os.path.abspath(__file__)
158 | except NameError:
159 | # py2exe/bbfreeze/non-CPython don't have __file__
160 | return {} # without __file__, we have no hope
161 | # versionfile_source is the relative path from the top of the source
162 | # tree to _version.py. Invert this to find the root from __file__.
163 | root = here
164 | for i in range(len(versionfile_source.split("/"))):
165 | root = os.path.dirname(root)
166 | else:
167 | # we're running from versioneer.py, which means we're running from
168 | # the setup.py in a source tree. sys.argv[0] is setup.py in the root.
169 | here = os.path.abspath(sys.argv[0])
170 | root = os.path.dirname(here)
171 |
172 | # Source tarballs conventionally unpack into a directory that includes
173 | # both the project name and a version string.
174 | dirname = os.path.basename(root)
175 | if not dirname.startswith(parentdir_prefix):
176 | if verbose:
177 | print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
178 | (root, dirname, parentdir_prefix))
179 | return None
180 | return {"version": dirname[len(parentdir_prefix):], "full": ""}
181 |
182 | tag_prefix = ""
183 | parentdir_prefix = "obfsproxy-"
184 | versionfile_source = "obfsproxy/_version.py"
185 |
186 | def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
187 | variables = { "refnames": git_refnames, "full": git_full }
188 | ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
189 | if not ver:
190 | ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
191 | if not ver:
192 | ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
193 | verbose)
194 | if not ver:
195 | ver = default
196 | return ver
197 |
198 |
--------------------------------------------------------------------------------
/obfsproxy/common/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SPIN-UMass/BLANKET/0d222f54dab8342c0b3473f8923fc3cd9e524722/obfsproxy/common/__init__.py
--------------------------------------------------------------------------------
/obfsproxy/common/aes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | """ This module is a convenience wrapper for the AES cipher in CTR mode. """
5 |
6 | from Crypto.Cipher import AES
7 | from Crypto.Util import Counter
8 |
9 | class AES_CTR_128(object):
10 | """An AES-CTR-128 PyCrypto wrapper."""
11 |
12 | def __init__(self, key, iv):
13 | """Initialize AES with the given key and IV."""
14 |
15 | assert(len(key) == 16)
16 | assert(len(iv) == 16)
17 |
18 | self.ctr = Counter.new(128, initial_value=long(iv.encode('hex'), 16))
19 | self.cipher = AES.new(key, AES.MODE_CTR, counter=self.ctr)
20 |
21 | def crypt(self, data):
22 | """
23 | Encrypt or decrypt 'data'.
24 | """
25 | return self.cipher.encrypt(data)
26 |
27 |
--------------------------------------------------------------------------------
/obfsproxy/common/argparser.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 |
4 | """
5 | Overrides argparse.ArgumentParser so that it emits error messages to
6 | stdout instead of stderr.
7 | """
8 | class MyArgumentParser(argparse.ArgumentParser):
9 | def _print_message(self, message, fd=None):
10 | if message:
11 | fd = sys.stdout
12 | fd.write(message)
13 |
--------------------------------------------------------------------------------
/obfsproxy/common/heartbeat.py:
--------------------------------------------------------------------------------
1 | """heartbeat code"""
2 |
3 | import datetime
4 | import socket # for socket.inet_pton()
5 |
6 | import obfsproxy.common.log as logging
7 |
8 | log = logging.get_obfslogger()
9 |
10 | def get_integer_from_ip_str(ip_str):
11 | """
12 | Given an IP address in string format in ip_str, return its
13 | integer representation.
14 |
15 | Throws ValueError if the IP address string was invalid.
16 | """
17 | try:
18 | return socket.inet_pton(socket.AF_INET, ip_str)
19 | except socket.error:
20 | pass
21 |
22 | try:
23 | return socket.inet_pton(socket.AF_INET6, ip_str)
24 | except socket.error:
25 | pass
26 |
27 | # Down here, both inet_pton()s failed.
28 | raise ValueError("Invalid IP address string")
29 |
30 | class Heartbeat(object):
31 | """
32 | Represents obfsproxy's heartbeat.
33 |
34 | It keeps stats on a number of things that the obfsproxy operator
35 | might be interested in, and every now and then it reports them in
36 | the logs.
37 |
38 | 'unique_ips': A Python set that contains unique IPs (in integer
39 | form) that have connected to obfsproxy.
40 | """
41 |
42 | def __init__(self):
43 | self.n_connections = 0
44 | self.started = datetime.datetime.now()
45 | self.last_reset = self.started
46 | self.unique_ips = set()
47 |
48 | def register_connection(self, ip_str):
49 | """Register a new connection."""
50 | self.n_connections += 1
51 | self._register_ip(ip_str)
52 |
53 | def _register_ip(self, ip_str):
54 | """
55 | See if 'ip_str' has connected to obfsproxy before. If not, add
56 | it to the list of unique IPs.
57 | """
58 | ip = get_integer_from_ip_str(ip_str)
59 | if ip not in self.unique_ips:
60 | self.unique_ips.add(ip)
61 |
62 | def reset_stats(self):
63 | """Reset stats."""
64 |
65 | self.n_connections = 0
66 | self.unique_ips = set()
67 | self.last_reset = datetime.datetime.now()
68 |
69 | def say_uptime(self):
70 | """Log uptime information."""
71 |
72 | now = datetime.datetime.now()
73 | delta = now - self.started
74 |
75 | uptime_days = delta.days
76 | uptime_hours = round(float(delta.seconds)/3600)
77 | uptime_minutes = round(float(delta.seconds)/60)%60
78 |
79 | if uptime_days:
80 | log.info("Heartbeat: obfsproxy's uptime is %d day(s), %d hour(s) and %d minute(s)." % \
81 | (uptime_days, uptime_hours, uptime_minutes))
82 | else:
83 | log.info("Heartbeat: obfsproxy's uptime is %d hour(s) and %d minute(s)." % \
84 | (uptime_hours, uptime_minutes))
85 |
86 | def say_stats(self):
87 | """Log connection stats."""
88 |
89 | now = datetime.datetime.now()
90 | reset_delta = now - self.last_reset
91 |
92 | log.info("Heartbeat: During the last %d hour(s) we saw %d connection(s)" \
93 | " from %d unique address(es)." % \
94 | (round(float(reset_delta.seconds/3600)) + reset_delta.days*24, self.n_connections,
95 | len(self.unique_ips)))
96 |
97 | # Reset stats every 24 hours.
98 | if (reset_delta.days > 0):
99 | log.debug("Resetting heartbeat.")
100 | self.reset_stats()
101 |
102 | def talk(self):
103 | """Do a heartbeat."""
104 |
105 | self.say_uptime()
106 | self.say_stats()
107 |
108 | # A heartbeat singleton.
109 | heartbeat = Heartbeat()
110 |
--------------------------------------------------------------------------------
/obfsproxy/common/hmac_sha256.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import hmac
3 |
4 | def hmac_sha256_digest(key, msg):
5 | """
6 | Return the HMAC-SHA256 message authentication code of the message
7 | 'msg' with key 'key'.
8 | """
9 |
10 | return hmac.new(key, msg, hashlib.sha256).digest()
11 |
--------------------------------------------------------------------------------
/obfsproxy/common/log.py:
--------------------------------------------------------------------------------
1 | """obfsproxy logging code"""
2 | import logging
3 | import sys
4 |
5 | from twisted.python import log
6 |
7 | def get_obfslogger():
8 | """ Return the current ObfsLogger instance """
9 | return OBFSLOGGER
10 |
11 |
12 | class ObfsLogger(object):
13 | """
14 | Maintain state of logging options specified with command line arguments
15 |
16 | Attributes:
17 | safe_logging: Boolean value indicating if we should scrub addresses
18 | before logging
19 | obfslogger: Our logging instance
20 | """
21 |
22 | def __init__(self):
23 |
24 | self.safe_logging = True
25 |
26 | observer = log.PythonLoggingObserver('obfslogger')
27 | observer.start()
28 |
29 | # Create the default log handler that logs to stdout.
30 | self.obfslogger = logging.getLogger('obfslogger')
31 | self.default_handler = logging.StreamHandler(sys.stdout)
32 | self.set_formatter(self.default_handler)
33 | self.obfslogger.addHandler(self.default_handler)
34 | self.obfslogger.propagate = False
35 |
36 | def set_formatter(self, handler):
37 | """Given a log handler, plug our custom formatter to it."""
38 |
39 | formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
40 | handler.setFormatter(formatter)
41 |
42 | def set_log_file(self, filename):
43 | """Set up our logger so that it starts logging to file in 'filename' instead."""
44 |
45 | # remove the default handler, and add the FileHandler:
46 | self.obfslogger.removeHandler(self.default_handler)
47 |
48 | log_handler = logging.FileHandler(filename)
49 | self.set_formatter(log_handler)
50 |
51 | self.obfslogger.addHandler(log_handler)
52 |
53 |
54 | def set_log_severity(self, sev_string):
55 | """Update our minimum logging severity to 'sev_string'."""
56 |
57 | # Turn it into a numeric level that logging understands first.
58 | numeric_level = getattr(logging, sev_string.upper(), None)
59 | self.obfslogger.setLevel(numeric_level)
60 |
61 |
62 | def disable_logs(self):
63 | """Disable all logging."""
64 |
65 | logging.disable(logging.CRITICAL)
66 |
67 |
68 | def set_no_safe_logging(self):
69 | """ Disable safe_logging """
70 |
71 | self.safe_logging = False
72 |
73 |
74 | def safe_addr_str(self, address):
75 | """
76 | Unless safe_logging is False, we return '[scrubbed]' instead
77 | of the address parameter. If safe_logging is false, then we
78 | return the address itself.
79 | """
80 |
81 | if self.safe_logging:
82 | return '[scrubbed]'
83 | else:
84 | return address
85 |
86 | def debug(self, msg, *args, **kwargs):
87 | """ Class wrapper around debug logging method """
88 |
89 | self.obfslogger.debug(msg, *args, **kwargs)
90 |
91 | def warning(self, msg, *args, **kwargs):
92 | """ Class wrapper around warning logging method """
93 |
94 | self.obfslogger.warning(msg, *args, **kwargs)
95 |
96 | def info(self, msg, *args, **kwargs):
97 | """ Class wrapper around info logging method """
98 |
99 | self.obfslogger.info(msg, *args, **kwargs)
100 |
101 | def error(self, msg, *args, **kwargs):
102 | """ Class wrapper around error logging method """
103 |
104 | self.obfslogger.error(msg, *args, **kwargs)
105 |
106 | def critical(self, msg, *args, **kwargs):
107 | """ Class wrapper around critical logging method """
108 |
109 | self.obfslogger.critical(msg, *args, **kwargs)
110 |
111 | def exception(self, msg, *args, **kwargs):
112 | """ Class wrapper around exception logging method """
113 |
114 | self.obfslogger.exception(msg, *args, **kwargs)
115 |
116 | """ Global variable that will track our Obfslogger instance """
117 | OBFSLOGGER = ObfsLogger()
118 |
--------------------------------------------------------------------------------
/obfsproxy/common/modexp.py:
--------------------------------------------------------------------------------
1 | try:
2 | from gmpy2 import mpz as mpz
3 | except ImportError:
4 | try:
5 | from gmpy import mpz as mpz
6 | except ImportError:
7 | def mpz( x ):
8 | return x
9 | pass
10 |
11 | def powMod( x, y, mod ):
12 | """
13 | (Efficiently) Calculate and return `x' to the power of `y' mod `mod'.
14 |
15 | If possible, the three numbers are converted to GMPY's bignum
16 | representation which speeds up exponentiation. If GMPY is not installed,
17 | built-in exponentiation is used.
18 | """
19 |
20 | x = mpz(x)
21 | y = mpz(y)
22 | mod = mpz(mod)
23 | return pow(x, y, mod)
24 |
--------------------------------------------------------------------------------
/obfsproxy/common/rand.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | def random_bytes(n):
4 | """ Returns n bytes of strong random data. """
5 |
6 | return os.urandom(n)
7 |
8 |
--------------------------------------------------------------------------------
/obfsproxy/common/serialize.py:
--------------------------------------------------------------------------------
1 | """Helper functions to go from integers to binary data and back."""
2 |
3 | import struct
4 |
5 | def htonl(n):
6 | """
7 | Convert integer in 'n' from host-byte order to network-byte order.
8 | """
9 | return struct.pack('!I', n)
10 |
11 | def ntohl(bs):
12 | """
13 | Convert integer in 'n' from network-byte order to host-byte order.
14 | """
15 | return struct.unpack('!I', bs)[0]
16 |
17 | def htons(n):
18 | """
19 | Convert integer in 'n' from host-byte order to network-byte order.
20 | """
21 | return struct.pack('!h', n)
22 |
23 | def ntohs(bs):
24 | """
25 | Convert integer in 'n' from network-byte order to host-byte order.
26 | """
27 | return struct.unpack('!h', bs)[0]
28 |
--------------------------------------------------------------------------------
/obfsproxy/common/transport_config.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | Provides a class which represents a pluggable transport's configuration.
5 | """
6 |
7 | class TransportConfig( object ):
8 |
9 | """
10 | This class embeds configuration options for pluggable transport modules.
11 |
12 | The options are set by obfsproxy and then passed to the transport's class
13 | constructor. The pluggable transport might want to use these options but
14 | does not have to. An example of such an option is the state location which
15 | can be used by the pluggable transport to store persistent information.
16 | """
17 |
18 | def __init__( self ):
19 | """
20 | Initialise a `TransportConfig' object.
21 | """
22 |
23 | self.stateLocation = None
24 | self.serverTransportOptions = None
25 |
26 | # True if we are client, False if not.
27 | self.weAreClient = None
28 | # True if we are in external mode. False otherwise.
29 | self.weAreExternal = None
30 |
31 | def setStateLocation( self, stateLocation ):
32 | """
33 | Set the given `stateLocation'.
34 | """
35 |
36 | self.stateLocation = stateLocation
37 |
38 | def getStateLocation( self ):
39 | """
40 | Return the stored `stateLocation'.
41 | """
42 |
43 | return self.stateLocation
44 |
45 | def setServerTransportOptions( self, serverTransportOptions ):
46 | """
47 | Set the given `serverTransportOptions'.
48 | """
49 |
50 | self.serverTransportOptions = serverTransportOptions
51 |
52 | def getServerTransportOptions( self ):
53 | """
54 | Return the stored `serverTransportOptions'.
55 | """
56 |
57 | return self.serverTransportOptions
58 |
59 | def setListenerMode( self, mode ):
60 | if mode == "client" or mode == "socks":
61 | self.weAreClient = True
62 | elif mode == "server" or mode == "ext_server":
63 | self.weAreClient = False
64 | else:
65 | raise ValueError("Invalid listener mode: %s" % mode)
66 |
67 | def setObfsproxyMode( self, mode ):
68 | if mode == "external":
69 | self.weAreExternal = True
70 | elif mode == "managed":
71 | self.weAreExternal = False
72 | else:
73 | raise ValueError("Invalid obfsproxy mode: %s" % mode)
74 |
75 |
76 | def __str__( self ):
77 | """
78 | Return a string representation of the `TransportConfig' instance.
79 | """
80 |
81 | return str(vars(self))
82 |
--------------------------------------------------------------------------------
/obfsproxy/managed/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SPIN-UMass/BLANKET/0d222f54dab8342c0b3473f8923fc3cd9e524722/obfsproxy/managed/__init__.py
--------------------------------------------------------------------------------
/obfsproxy/managed/client.py:
--------------------------------------------------------------------------------
1 | #!/home/milad/anaconda2/bin/python2
2 | # -*- coding: utf-8 -*-
3 |
4 | from twisted.internet import reactor, error
5 |
6 | import obfsproxy.network.launch_transport as launch_transport
7 | import obfsproxy.transports.transports as transports
8 | import obfsproxy.common.log as logging
9 | import obfsproxy.common.transport_config as transport_config
10 |
11 | from pyptlib.client import ClientTransportPlugin
12 | from pyptlib.config import EnvError
13 |
14 | import pprint
15 |
16 | log = logging.get_obfslogger()
17 |
18 | def do_managed_client():
19 | log.info("I AM HERERER UOOOYOYOYOYOYO")
20 | """Start the managed-proxy protocol as a client."""
21 |
22 | should_start_event_loop = False
23 | log.debug("I AM HERERER")
24 | ptclient = ClientTransportPlugin()
25 | try:
26 | ptclient.init(transports.transports.keys())
27 | log.info("TRANSPORTS", transports.transports.keys())
28 | except EnvError, err:
29 | log.warning("Client managed-proxy protocol failed (%s)." % err)
30 | return
31 |
32 | log.debug("MM pyptlib gave us the following data:\n'%s'", pprint.pformat(ptclient.getDebugData()))
33 |
34 | for transport in ptclient.getTransports():
35 |
36 | # Will hold configuration parameters for the pluggable transport module.
37 | pt_config = transport_config.TransportConfig()
38 | pt_config.setStateLocation(ptclient.config.getStateLocation())
39 | pt_config.setListenerMode("socks")
40 | pt_config.setObfsproxyMode("managed")
41 |
42 | # Call setup() method for this transport.
43 | transport_class = transports.get_transport_class(transport, 'socks')
44 | transport_class.setup(pt_config)
45 |
46 | try:
47 | addrport = launch_transport.launch_transport_listener(transport, None, 'socks', None, pt_config)
48 | except transports.TransportNotFound:
49 | log.warning("Could not find transport '%s'" % transport)
50 | ptclient.reportMethodError(transport, "Could not find transport.")
51 | continue
52 | except error.CannotListenError, e:
53 | error_msg = "Could not set up listener (%s:%s) for '%s' (%s)." % \
54 | (e.interface, e.port, transport, e.socketError[1])
55 | log.warning(error_msg)
56 | ptclient.reportMethodError(transport, error_msg)
57 | continue
58 |
59 | should_start_event_loop = True
60 | log.debug("Successfully launched '%s' at '%s'" % (transport, log.safe_addr_str(str(addrport))))
61 | ptclient.reportMethodSuccess(transport, "socks5", addrport, None, None)
62 |
63 | ptclient.reportMethodsEnd()
64 |
65 | if should_start_event_loop:
66 | log.info("Starting up the event loop.")
67 | reactor.run()
68 | else:
69 | log.info("No transports launched. Nothing to do.")
70 |
--------------------------------------------------------------------------------
/obfsproxy/managed/server.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | from twisted.internet import reactor, error
5 |
6 | from pyptlib.server import ServerTransportPlugin
7 | from pyptlib.config import EnvError
8 |
9 | import obfsproxy.transports.transports as transports
10 | import obfsproxy.network.launch_transport as launch_transport
11 | import obfsproxy.common.log as logging
12 | import obfsproxy.common.transport_config as transport_config
13 |
14 | import pprint
15 |
16 | log = logging.get_obfslogger()
17 |
18 | def do_managed_server():
19 | """Start the managed-proxy protocol as a server."""
20 |
21 | should_start_event_loop = False
22 |
23 | ptserver = ServerTransportPlugin()
24 | try:
25 | ptserver.init(transports.transports.keys())
26 | except EnvError, err:
27 | log.warning("Server managed-proxy protocol failed (%s)." % err)
28 | return
29 |
30 | log.debug("pyptlib gave us the following data:\n'%s'", pprint.pformat(ptserver.getDebugData()))
31 |
32 | ext_orport = ptserver.config.getExtendedORPort()
33 | authcookie = ptserver.config.getAuthCookieFile()
34 | orport = ptserver.config.getORPort()
35 | server_transport_options = ptserver.config.getServerTransportOptions()
36 |
37 | for transport, transport_bindaddr in ptserver.getBindAddresses().items():
38 |
39 | # Will hold configuration parameters for the pluggable transport module.
40 | pt_config = transport_config.TransportConfig()
41 | pt_config.setStateLocation(ptserver.config.getStateLocation())
42 | if ext_orport:
43 | pt_config.setListenerMode("ext_server")
44 | else:
45 | pt_config.setListenerMode("server")
46 | pt_config.setObfsproxyMode("managed")
47 |
48 | transport_options = ""
49 | if server_transport_options and transport in server_transport_options:
50 | transport_options = server_transport_options[transport]
51 | pt_config.setServerTransportOptions(transport_options)
52 |
53 | # Call setup() method for this tranpsort.
54 | transport_class = transports.get_transport_class(transport, 'server')
55 | transport_class.setup(pt_config)
56 |
57 | try:
58 | if ext_orport:
59 | addrport = launch_transport.launch_transport_listener(transport,
60 | transport_bindaddr,
61 | 'ext_server',
62 | ext_orport,
63 | pt_config,
64 | ext_or_cookie_file=authcookie)
65 | else:
66 | addrport = launch_transport.launch_transport_listener(transport,
67 | transport_bindaddr,
68 | 'server',
69 | orport,
70 | pt_config)
71 | except transports.TransportNotFound:
72 | log.warning("Could not find transport '%s'" % transport)
73 | ptserver.reportMethodError(transport, "Could not find transport.")
74 | continue
75 | except error.CannotListenError, e:
76 | error_msg = "Could not set up listener (%s:%s) for '%s' (%s)." % \
77 | (e.interface, e.port, transport, e.socketError[1])
78 | log.warning(error_msg)
79 | ptserver.reportMethodError(transport, error_msg)
80 | continue
81 |
82 | should_start_event_loop = True
83 |
84 | extra_log = "" # Include server transport options in the log message if we got 'em
85 | if transport_options:
86 | extra_log = " (server transport options: '%s')" % str(transport_options)
87 | log.debug("Successfully launched '%s' at '%s'%s" % (transport, log.safe_addr_str(str(addrport)), extra_log))
88 |
89 | # Invoke the transport-specific get_public_server_options()
90 | # method to potentially filter the server transport options
91 | # that should be passed on to Tor and eventually to BridgeDB.
92 | public_options_dict = transport_class.get_public_server_options(transport_options)
93 | public_options_str = None
94 |
95 | # If the transport filtered its options:
96 | if public_options_dict:
97 | optlist = []
98 | for k, v in public_options_dict.items():
99 | optlist.append("%s=%s" % (k,v))
100 | public_options_str = ",".join(optlist)
101 |
102 | log.debug("do_managed_server: sending only public_options to tor: %s" % public_options_str)
103 |
104 | # Report success for this transport.
105 | # If public_options_str is None then all of the
106 | # transport options from ptserver are used instead.
107 | ptserver.reportMethodSuccess(transport, addrport, public_options_str)
108 |
109 | ptserver.reportMethodsEnd()
110 |
111 | if should_start_event_loop:
112 | log.info("Starting up the event loop.")
113 | reactor.run()
114 | else:
115 | log.info("No transports launched. Nothing to do.")
116 |
--------------------------------------------------------------------------------
/obfsproxy/network/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SPIN-UMass/BLANKET/0d222f54dab8342c0b3473f8923fc3cd9e524722/obfsproxy/network/__init__.py
--------------------------------------------------------------------------------
/obfsproxy/network/buffer.py:
--------------------------------------------------------------------------------
1 | class Buffer(object):
2 | """
3 | A Buffer is a simple FIFO buffer. You write() stuff to it, and you
4 | read() them back. You can also peek() or drain() data.
5 | """
6 |
7 | def __init__(self, data=''):
8 | """
9 | Initialize a buffer with 'data'.
10 | """
11 | self.buffer = bytes(data)
12 |
13 | def read(self, n=-1):
14 | """
15 | Read and return 'n' bytes from the buffer.
16 |
17 | If 'n' is negative, read and return the whole buffer.
18 | If 'n' is larger than the size of the buffer, read and return
19 | the whole buffer.
20 | """
21 |
22 | if (n < 0) or (n > len(self.buffer)):
23 | the_whole_buffer = self.buffer
24 | self.buffer = bytes('')
25 | return the_whole_buffer
26 |
27 | data = self.buffer[:n]
28 | self.buffer = self.buffer[n:]
29 | return data
30 |
31 | def write(self, data):
32 | """
33 | Append 'data' to the buffer.
34 | """
35 | self.buffer = self.buffer + data
36 |
37 | def peek(self, n=-1):
38 | """
39 | Return 'n' bytes from the buffer, without draining them.
40 |
41 | If 'n' is negative, return the whole buffer.
42 | If 'n' is larger than the size of the buffer, return the whole
43 | buffer.
44 | """
45 |
46 | if (n < 0) or (n > len(self.buffer)):
47 | return self.buffer
48 |
49 | return self.buffer[:n]
50 |
51 | def drain(self, n=-1):
52 | """
53 | Drain 'n' bytes from the buffer.
54 |
55 | If 'n' is negative, drain the whole buffer.
56 | If 'n' is larger than the size of the buffer, drain the whole
57 | buffer.
58 | """
59 | if (n < 0) or (n > len(self.buffer)):
60 | self.buffer = bytes('')
61 | return
62 |
63 | self.buffer = self.buffer[n:]
64 | return
65 |
66 | def __len__(self):
67 | """Returns length of buffer. Used in len()."""
68 | return len(self.buffer)
69 |
70 | def __nonzero__(self):
71 | """
72 | Returns True if the buffer is non-empty.
73 | Used in truth-value testing.
74 | """
75 | return True if len(self.buffer) else False
76 |
--------------------------------------------------------------------------------
/obfsproxy/network/launch_transport.py:
--------------------------------------------------------------------------------
1 | import obfsproxy.network.network as network
2 | import obfsproxy.transports.transports as transports
3 | import obfsproxy.network.socks as socks
4 | import obfsproxy.network.extended_orport as extended_orport
5 |
6 | from twisted.internet import reactor
7 |
8 | def launch_transport_listener(transport, bindaddr, role, remote_addrport, pt_config, ext_or_cookie_file=None):
9 | """
10 | Launch a listener for 'transport' in role 'role' (socks/client/server/ext_server).
11 |
12 | If 'bindaddr' is set, then listen on bindaddr. Otherwise, listen
13 | on an ephemeral port on localhost.
14 | 'remote_addrport' is the TCP/IP address of the other end of the
15 | circuit. It's not used if we are in 'socks' role.
16 |
17 | 'pt_config' contains configuration options (such as the state location)
18 | which are of interest to the pluggable transport.
19 |
20 | 'ext_or_cookie_file' is the filesystem path where the Extended
21 | ORPort Authentication cookie is stored. It's only used in
22 | 'ext_server' mode.
23 |
24 | Return a tuple (addr, port) representing where we managed to bind.
25 |
26 | Throws obfsproxy.transports.transports.TransportNotFound if the
27 | transport could not be found.
28 |
29 | Throws twisted.internet.error.CannotListenError if the listener
30 | could not be set up.
31 | """
32 |
33 | transport_class = transports.get_transport_class(transport, role)
34 | listen_host = bindaddr[0] if bindaddr else 'localhost'
35 | listen_port = int(bindaddr[1]) if bindaddr else 0
36 |
37 | if role == 'socks':
38 | factory = socks.OBFSSOCKSv5Factory(transport_class, pt_config)
39 | elif role == 'ext_server':
40 | assert(remote_addrport and ext_or_cookie_file)
41 | factory = extended_orport.ExtORPortServerFactory(remote_addrport, ext_or_cookie_file, transport, transport_class, pt_config)
42 | else:
43 | assert(remote_addrport)
44 | factory = network.StaticDestinationServerFactory(remote_addrport, role, transport_class, pt_config)
45 |
46 | addrport = reactor.listenTCP(listen_port, factory, interface=listen_host)
47 |
48 | return (addrport.getHost().host, addrport.getHost().port)
49 |
--------------------------------------------------------------------------------
/obfsproxy/network/socks.py:
--------------------------------------------------------------------------------
1 | import csv
2 |
3 | from twisted.internet import reactor, protocol
4 |
5 | import obfsproxy.common.log as logging
6 | import obfsproxy.network.network as network
7 | import obfsproxy.network.socks5 as socks5
8 | import obfsproxy.transports.base as base
9 |
10 |
11 | log = logging.get_obfslogger()
12 |
13 |
14 | def _split_socks_args(args_str):
15 | """
16 | Given a string containing the SOCKS arguments (delimited by
17 | semicolons, and with semicolons and backslashes escaped), parse it
18 | and return a list of the unescaped SOCKS arguments.
19 | """
20 | return csv.reader([args_str], delimiter=';', escapechar='\\').next()
21 |
22 |
23 | class OBFSSOCKSv5Outgoing(socks5.SOCKSv5Outgoing, network.GenericProtocol):
24 | """
25 | Represents a downstream connection from the SOCKS server to the
26 | destination.
27 |
28 | It subclasses socks5.SOCKSv5Outgoing, so that data can be passed to the
29 | pluggable transport before proxying.
30 |
31 | Attributes:
32 | circuit: The circuit this connection belongs to.
33 | buffer: Buffer that holds data that can't be proxied right
34 | away. This can happen because the circuit is not yet
35 | complete, or because the pluggable transport needs more
36 | data before deciding what to do.
37 | """
38 |
39 | name = None
40 |
41 | def __init__(self, socksProtocol):
42 | """
43 | Constructor.
44 |
45 | 'socksProtocol' is a 'SOCKSv5Protocol' object.
46 | """
47 | self.name = "socks_down_%s" % hex(id(self))
48 | self.socks = socksProtocol
49 |
50 | network.GenericProtocol.__init__(self, socksProtocol.circuit)
51 | return super(OBFSSOCKSv5Outgoing, self).__init__(socksProtocol)
52 |
53 | def connectionMade(self):
54 | self.socks.set_up_circuit(self)
55 |
56 | # XXX: The transport should be doing this after handshaking since it
57 | # calls, self.socks.sendReply(), when this changes to defer sending the
58 | # reply back set self.socks.otherConn here.
59 | super(OBFSSOCKSv5Outgoing, self).connectionMade()
60 |
61 | def dataReceived(self, data):
62 | log.debug("%s: Recived %d bytes." % (self.name, len(data)))
63 |
64 | assert self.circuit.circuitIsReady()
65 | self.buffer.write(data)
66 | self.circuit.dataReceived(self.buffer, self)
67 |
68 |
69 | class OBFSSOCKSv5Protocol(socks5.SOCKSv5Protocol, network.GenericProtocol):
70 | """
71 | Represents an upstream connection from a SOCKS client to our SOCKS
72 | server.
73 |
74 | It overrides socks5.SOCKSv5Protocol because py-obfsproxy's connections need
75 | to have a circuit and obfuscate traffic before proxying it.
76 | """
77 |
78 | def __init__(self, circuit):
79 | self.name = "socks_up_%s" % hex(id(self))
80 |
81 | network.GenericProtocol.__init__(self, circuit)
82 | socks5.SOCKSv5Protocol.__init__(self)
83 |
84 | def connectionLost(self, reason):
85 | network.GenericProtocol.connectionLost(self, reason)
86 |
87 | def processEstablishedData(self, data):
88 | assert self.circuit.circuitIsReady()
89 | self.buffer.write(data)
90 | self.circuit.dataReceived(self.buffer, self)
91 |
92 | def processRfc1929Auth(self, uname, passwd):
93 | """
94 | Handle the Pluggable Transport variant of RFC1929 Username/Password
95 | authentication.
96 | """
97 |
98 | # The Tor PT spec jams the per session arguments into the UNAME/PASSWD
99 | # fields, and uses this to pass arguments to the pluggable transport.
100 |
101 | # Per the RFC, it's not possible to have 0 length passwords, so tor sets
102 | # the length to 1 and the first byte to NUL when passwd doesn't actually
103 | # contain data. Recombine the two fields if appropriate.
104 | args = uname
105 | if len(passwd) > 1 or ord(passwd[0]) != 0:
106 | args += passwd
107 |
108 | # Arguments are a CSV string with Key=Value pairs. The transport is
109 | # responsible for dealing with the K=V format, but the SOCKS code is
110 | # currently expected to de-CSV the args.
111 | #
112 | # XXX: This really should also handle converting the K=V pairs into a
113 | # dict.
114 | try:
115 | split_args = _split_socks_args(args)
116 | except csvError, err:
117 | log.warning("split_socks_args failed (%s)" % str(err))
118 | return False
119 |
120 | # Pass the split up list to the transport.
121 | try:
122 | self.circuit.transport.handle_socks_args(split_args)
123 | except base.SOCKSArgsError:
124 | # Transports should log the issue themselves
125 | return False
126 |
127 | return True
128 |
129 | def connectClass(self, addr, port, klass, *args):
130 | """
131 | Instantiate the outgoing connection.
132 |
133 | This is overriden so that our sub-classed SOCKSv5Outgoing gets created.
134 | """
135 |
136 | return protocol.ClientCreator(reactor, OBFSSOCKSv5Outgoing, self).connectTCP(addr, port)
137 |
138 | def set_up_circuit(self, otherConn):
139 | self.circuit.setDownstreamConnection(otherConn)
140 | self.circuit.setUpstreamConnection(self)
141 |
142 | class OBFSSOCKSv5Factory(protocol.Factory):
143 | """
144 | A SOCKSv5 factory.
145 | """
146 |
147 | def __init__(self, transport_class, pt_config):
148 | # XXX self.logging = log
149 | self.transport_class = transport_class
150 | self.pt_config = pt_config
151 |
152 | self.name = "socks_fact_%s" % hex(id(self))
153 |
154 | def startFactory(self):
155 | log.debug("%s: Starting up SOCKS server factory." % self.name)
156 |
157 | def buildProtocol(self, addr):
158 | log.debug("%s: New connection." % self.name)
159 |
160 | circuit = network.Circuit(self.transport_class())
161 |
162 | return OBFSSOCKSv5Protocol(circuit)
163 |
--------------------------------------------------------------------------------
/obfsproxy/pyobfsproxy.py:
--------------------------------------------------------------------------------
1 | #!/home/milad/anaconda2/bin/python2
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | This is the command line interface to py-obfsproxy.
6 | It is designed to be a drop-in replacement for the obfsproxy executable.
7 | Currently, not all of the obfsproxy command line options have been implemented.
8 | """
9 |
10 | import sys
11 |
12 | import obfsproxy.network.launch_transport as launch_transport
13 | import obfsproxy.transports.transports as transports
14 | import obfsproxy.common.log as logging
15 | import obfsproxy.common.argparser as argparser
16 | import obfsproxy.common.heartbeat as heartbeat
17 | import obfsproxy.common.transport_config as transport_config
18 | import obfsproxy.managed.server as managed_server
19 | import obfsproxy.managed.client as managed_client
20 | from obfsproxy import __version__
21 |
22 | from pyptlib.config import checkClientMode
23 |
24 | from twisted.internet import task # for LoopingCall
25 |
26 | log = logging.get_obfslogger()
27 |
28 | def set_up_cli_parsing():
29 | """Set up our CLI parser. Register our arguments and options and
30 | query individual transports to register their own external-mode
31 | arguments."""
32 |
33 | parser = argparser.MyArgumentParser(
34 | description='py-obfsproxy: A pluggable transports proxy written in Python')
35 | subparsers = parser.add_subparsers(title='supported transports', dest='name')
36 |
37 | parser.add_argument('-v', '--version', action='version', version=__version__)
38 | parser.add_argument('--log-file', help='set logfile')
39 | parser.add_argument('--log-min-severity',
40 | choices=['error', 'warning', 'info', 'debug'],
41 | help='set minimum logging severity (default: %(default)s)')
42 | parser.add_argument('--no-log', action='store_true', default=False,
43 | help='disable logging')
44 | parser.add_argument('--no-safe-logging', action='store_true',
45 | default=False,
46 | help='disable safe (scrubbed address) logging')
47 | parser.add_argument('--data-dir', help='where persistent information should be stored.',
48 | default=None)
49 |
50 | # Managed mode is a subparser for now because there are no
51 | # optional subparsers: bugs.python.org/issue9253
52 | subparsers.add_parser("managed", help="managed mode")
53 |
54 | # Add a subparser for each transport. Also add a
55 | # transport-specific function to later validate the parsed
56 | # arguments.
57 | for transport, transport_class in transports.transports.items():
58 | subparser = subparsers.add_parser(transport, help='%s help' % transport)
59 | transport_class['base'].register_external_mode_cli(subparser)
60 | subparser.set_defaults(validation_function=transport_class['base'].validate_external_mode_cli)
61 |
62 | return parser
63 |
64 | def do_managed_mode():
65 | """This function starts obfsproxy's managed-mode functionality."""
66 |
67 | if checkClientMode():
68 | log.info('123 Entering client managed-mode.')
69 | managed_client.do_managed_client()
70 | else:
71 | log.info('Entering server managed-mode.')
72 | managed_server.do_managed_server()
73 |
74 | def do_external_mode(args):
75 | """This function starts obfsproxy's external-mode functionality."""
76 |
77 | assert(args)
78 | assert(args.name)
79 | assert(args.name in transports.transports)
80 |
81 | from twisted.internet import reactor
82 |
83 | pt_config = transport_config.TransportConfig()
84 | pt_config.setStateLocation(args.data_dir)
85 | pt_config.setListenerMode(args.mode)
86 | pt_config.setObfsproxyMode("external")
87 |
88 | # Run setup() method.
89 | run_transport_setup(pt_config)
90 |
91 | launch_transport.launch_transport_listener(args.name, args.listen_addr, args.mode, args.dest, pt_config, args.ext_cookie_file)
92 | log.info("Launched '%s' listener at '%s:%s' for transport '%s'." % \
93 | (args.mode, log.safe_addr_str(args.listen_addr[0]), args.listen_addr[1], args.name))
94 | reactor.run()
95 |
96 | def consider_cli_args(args):
97 | """Check out parsed CLI arguments and take the appropriate actions."""
98 |
99 | if args.log_file:
100 | log.set_log_file(args.log_file)
101 | if args.log_min_severity:
102 | log.set_log_severity(args.log_min_severity)
103 | if args.no_log:
104 | log.disable_logs()
105 | if args.no_safe_logging:
106 | log.set_no_safe_logging()
107 |
108 | # validate:
109 | if (args.name == 'managed') and (not args.log_file) and (args.log_min_severity):
110 | log.error("obfsproxy in managed-proxy mode can only log to a file!")
111 | sys.exit(1)
112 | elif (args.name == 'managed') and (not args.log_file):
113 | # managed proxies without a logfile must not log at all.
114 | log.disable_logs()
115 |
116 | def run_transport_setup(pt_config):
117 | """Run the setup() method for our transports."""
118 | for transport, transport_class in transports.transports.items():
119 | transport_class['base'].setup(pt_config)
120 |
121 | def pyobfsproxy():
122 | """Actual pyobfsproxy entry-point."""
123 | parser = set_up_cli_parsing()
124 |
125 | args = parser.parse_args()
126 |
127 | consider_cli_args(args)
128 |
129 | log.warning('Obfsproxy (version: %shttps://github.com/Yawning/obfsproxy.gitarting up.' % (__version__))
130 |
131 | log.debug('argv: ' + str(sys.argv))
132 | log.debug('args: ' + str(args))
133 |
134 | # Fire up our heartbeat.
135 | l = task.LoopingCall(heartbeat.heartbeat.talk)
136 | l.start(3600.0, now=False) # do heartbeat every hour
137 |
138 | # Initiate obfsproxy.
139 | if (args.name == 'managed'):
140 | log.debug('xzr managed ')
141 | do_managed_mode()
142 | else:
143 | # Pass parsed arguments to the appropriate transports so that
144 | # they can initialize and setup themselves. Exit if the
145 | # provided arguments were corrupted.
146 |
147 | # XXX use exceptions
148 | if (args.validation_function(args) == False):
149 | sys.exit(1)
150 |
151 | do_external_mode(args)
152 |
153 | def run():
154 | """Fake entry-point so that we can log unhandled exceptions."""
155 | log.debug("I AM HERERRER")
156 | # Pyobfsproxy's CLI uses "managed" whereas C-obfsproxy uses
157 | # "--managed" to configure managed-mode. Python obfsproxy can't
158 | # recognize "--managed" because it uses argparse subparsers and
159 | # http://bugs.python.org/issue9253 is not yet solved. This is a crazy
160 | # hack to maintain CLI compatibility between the two versions. we
161 | # basically inplace replace "--managed" with "managed" in the argument
162 | # list.
163 | if len(sys.argv) > 1 and '--managed' in sys.argv:
164 | for n, arg in enumerate(sys.argv):
165 | if arg == '--managed':
166 | sys.argv[n] = 'managed'
167 |
168 | try:
169 | pyobfsproxy()
170 | except Exception, e:
171 | log.exception(e)
172 | raise
173 |
174 | if __name__ == '__main__':
175 | run()
176 |
--------------------------------------------------------------------------------
/obfsproxy/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SPIN-UMass/BLANKET/0d222f54dab8342c0b3473f8923fc3cd9e524722/obfsproxy/test/__init__.py
--------------------------------------------------------------------------------
/obfsproxy/test/int_tests/pits_design.txt:
--------------------------------------------------------------------------------
1 | Pyobfsproxy integration test suite (PITS)
2 |
3 | THIS IS UNIMPLEMENTED. IT'S JUST A DESIGN DOC.
4 |
5 | Overview
6 |
7 | Obfsproxy needs an automated and robust way of testing its pluggable
8 | transports. While unit tests are certainly helpful, integration
9 | tests provide realistic testing scenarios for network daemons like
10 | obfsproxy.
11 |
12 | Motivation
13 |
14 | Obfsproxy needs to be tested on how well it can proxy traffic from
15 | one side to its other side. A basic integration test would be to
16 | transfer a string from one side and see if it arrives intact on the
17 | other side.
18 |
19 | A more involved integration test is the "timeline tests" of
20 | Stegotorus, developed by Zack Weinberg. Stegotorus integration tests
21 | are configurable: you pass them a script file that defines the
22 | behavior of the integration test connections. This allows
23 | customizable connection establishment and tear down, and the ability
24 | to send arbitrary data through the integration test connections.
25 |
26 | That's good enough, but sometimes bugs appear on more complex
27 | network interactions. For this reason, PITS was developed which has
28 | support for:
29 | + multiple network connections
30 | + flexible connection behavior
31 | + automated test case generation
32 |
33 | The integration tests should also be cross-platform so that they can
34 | be ran on Microsoft Windows.
35 |
36 | Design
37 |
38 |
39 |
40 | +-----------+ +-----------+
41 | |-------->| client |<-------------------->| server |<--------|
42 | | |----->| obfsproxy |<-------------------->| obfsproxy |<-----| |
43 | | | |-->| |<-------------------->| |<--| | |
44 | | | | +-----------+ +-----------+ | | |
45 | | | | | | |
46 | v v v v v v
47 | +---------------+ +---------------+
48 | | PITS outbound | | PITS inbound |
49 | +---------------+ +---------------+
50 | ^ |
51 | | |
52 | | v
53 | +---------------+ +---------------+
54 | |Test case file |<------------------------------>|Transcript file|
55 | +---------------+ +---------------+
56 |
57 | PITS does integration tests by reading a user-provided test case
58 | file which contains a description of the test that PITS should
59 | perform.
60 |
61 | A basic PITS test case usually involves launching two obfsproxies as
62 | in the typical obfuscated bridge client-server scenario, exchanging
63 | some data between them and finally checking if both sides received
64 | the proper data.
65 |
66 | A basic PITS test case usually involves opening a listening socket
67 | (which in the case of a client-side obfsproxy, emulates the
68 | server-side obfspoxy), and a number of outbound connections (which in
69 | the case of a client-side obfsproxy, emulate the connections from the
70 | Tor client).
71 |
72 | Test case files contain instructions for the sockets of PITS. Through
73 | test case files, PITS can be configured to perform the following
74 | actions:
75 | + Open and close connections
76 | + Send arbitrary data through connections
77 | + Pause connections
78 |
79 | While conducting the tests, the PITS inbound and outbound sockets
80 | record the data they sent and receive in a 'transcript'; after the
81 | test is over, the transcript and test case file are post-processed
82 | and compared with each other to check whether the intended
83 | conversation was performed successfully.
84 |
85 | Test case files
86 |
87 | The test case file format is line-oriented; each line is a command,
88 | and the first character of the line is a directive followed by a
89 | number of arguments.
90 | Valid commands are:
91 |
92 | # comment line - note that # _only_ introduces a comment at the beginning
93 | of a line; elsewhere, it's either a syntax error or part
94 | of an argument
95 |
96 | P number - pause test-case execution for |number| milliseconds
97 | ! - initiate connection with identifier
98 | * - Close connection (through inbound socket)
99 | > - transmit on through outbound socket
100 | < - transmit on through inbound socket
101 |
102 | Trailing whitespace is ignored.
103 |
104 | Test cases have to close all established connections explicitly,
105 | otherwise the test won't be validated correctly.
106 |
107 | Transcript files
108 |
109 | Inbound and outbound sockets log received data to a transcript
110 | file. The transcript file format is similar to the test case format:
111 |
112 | ! - connection established on inbound socket
113 | > - received on inbound socket
114 | < - received on outbound socket.
115 | * - connection destroyed on inbound socket
116 |
117 |
118 |
119 | Test case results
120 |
121 | After a test case is completed and the transcript file is written,
122 | PITS needs to evalute whether the test case was successful; that is,
123 | whether the transcript file correctly describes the test case.
124 |
125 | Because of the properties of TCP, the following post-processing
126 | happens to validate the transcript file with the test case file:
127 |
128 | a) Both files are segregated: all the traffic and events of inbound
129 | sockets are put on top, and the traffic and events of outbound
130 | sockets are put on the bottom.
131 |
132 | (This happens because TCP can't guarantee order of event arival in
133 | one direction relative to the order of event arrival in the other
134 | direction.)
135 |
136 | b) In both files, for each socket identifier, we concatenate all its
137 | traffic in a single 'transmit' directive. In the end, we place the
138 | transmit line below the events (session establishment, etc.).
139 |
140 | (This happens because TCP is a stream protocol.)
141 |
142 | c) We string compare the transcript and test-case files.
143 |
144 | XXX document any unexpected behaviors or untestable cases caused by
145 | the above postprocessing.
146 |
147 | Acknowledgements
148 |
149 | The script file format and the basic idea of PITS are concepts of
150 | Zack Weinberg. They were implemented as part of Stegotorus:
151 | https://gitweb.torproject.org/stegotorus.git/blob/HEAD:/src/test/tltester.cc
152 |
--------------------------------------------------------------------------------
/obfsproxy/test/test_aes.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from Crypto.Cipher import AES
4 | from Crypto.Util import Counter
5 |
6 | import obfsproxy.common.aes as aes
7 | import twisted.trial.unittest
8 |
9 | class testAES_CTR_128_NIST(twisted.trial.unittest.TestCase):
10 | def _helper_test_vector(self, input_block, output_block, plaintext, ciphertext):
11 | self.assertEqual(long(input_block.encode('hex'), 16), self.ctr.next_value())
12 |
13 | ct = self.cipher.encrypt(plaintext)
14 | self.assertEqual(ct, ciphertext)
15 |
16 | # XXX how do we extract the keystream out of the AES object?
17 |
18 | def test_nist(self):
19 | # Prepare the cipher
20 | key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6\xab\xf7\x15\x88\x09\xcf\x4f\x3c"
21 | iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
22 |
23 | self.ctr = Counter.new(128, initial_value=long(iv.encode('hex'), 16))
24 | self.cipher = AES.new(key, AES.MODE_CTR, counter=self.ctr)
25 |
26 | input_block = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
27 | output_block = "\xec\x8c\xdf\x73\x98\x60\x7c\xb0\xf2\xd2\x16\x75\xea\x9e\xa1\xe4"
28 | plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
29 | ciphertext = "\x87\x4d\x61\x91\xb6\x20\xe3\x26\x1b\xef\x68\x64\x99\x0d\xb6\xce"
30 |
31 | self._helper_test_vector(input_block, output_block, plaintext, ciphertext)
32 |
33 | input_block = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xff\x00"
34 | output_block = "\x36\x2b\x7c\x3c\x67\x73\x51\x63\x18\xa0\x77\xd7\xfc\x50\x73\xae"
35 | plaintext = "\xae\x2d\x8a\x57\x1e\x03\xac\x9c\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
36 | ciphertext = "\x98\x06\xf6\x6b\x79\x70\xfd\xff\x86\x17\x18\x7b\xb9\xff\xfd\xff"
37 |
38 | self._helper_test_vector(input_block, output_block, plaintext, ciphertext)
39 |
40 | input_block = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xff\x01"
41 | output_block = "\x6a\x2c\xc3\x78\x78\x89\x37\x4f\xbe\xb4\xc8\x1b\x17\xba\x6c\x44"
42 | plaintext = "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
43 | ciphertext = "\x5a\xe4\xdf\x3e\xdb\xd5\xd3\x5e\x5b\x4f\x09\x02\x0d\xb0\x3e\xab"
44 |
45 | self._helper_test_vector(input_block, output_block, plaintext, ciphertext)
46 |
47 | input_block = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xff\x02"
48 | output_block = "\xe8\x9c\x39\x9f\xf0\xf1\x98\xc6\xd4\x0a\x31\xdb\x15\x6c\xab\xfe"
49 | plaintext = "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
50 | ciphertext = "\x1e\x03\x1d\xda\x2f\xbe\x03\xd1\x79\x21\x70\xa0\xf3\x00\x9c\xee"
51 |
52 | self._helper_test_vector(input_block, output_block, plaintext, ciphertext)
53 |
54 | class testAES_CTR_128_simple(twisted.trial.unittest.TestCase):
55 | def test_encrypt_decrypt_small_ASCII(self):
56 | """
57 | Validate that decryption and encryption work as intended on a small ASCII string.
58 | """
59 | self.key = "\xe3\xb0\xc4\x42\x98\xfc\x1c\x14\x9a\xfb\xf4\xc8\x99\x6f\xb9\x24"
60 | self.iv = "\x27\xae\x41\xe4\x64\x9b\x93\x4c\xa4\x95\x99\x1b\x78\x52\xb8\x55"
61 |
62 | test_string = "This unittest kills fascists."
63 |
64 | cipher1 = aes.AES_CTR_128(self.key, self.iv)
65 | cipher2 = aes.AES_CTR_128(self.key, self.iv)
66 |
67 | ct = cipher1.crypt(test_string)
68 | pt = cipher2.crypt(ct)
69 |
70 | self.assertEqual(test_string, pt)
71 |
72 |
73 | if __name__ == '__main__':
74 | unittest.main()
75 |
76 |
--------------------------------------------------------------------------------
/obfsproxy/test/test_buffer.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import obfsproxy.network.buffer as obfs_buf
4 | import twisted.trial.unittest
5 |
6 | class testBuffer(twisted.trial.unittest.TestCase):
7 | def setUp(self):
8 | self.test_string = "No pop no style, I strictly roots."
9 | self.buf = obfs_buf.Buffer(self.test_string)
10 |
11 | def test_totalread(self):
12 | tmp = self.buf.read(-1)
13 | self.assertEqual(tmp, self.test_string)
14 |
15 | def test_byte_by_byte(self):
16 | """Read one byte at a time."""
17 | for i in xrange(len(self.test_string)):
18 | self.assertEqual(self.buf.read(1), self.test_string[i])
19 |
20 | def test_bigread(self):
21 | self.assertEqual(self.buf.read(666), self.test_string)
22 |
23 | def test_peek(self):
24 | tmp = self.buf.peek(-1)
25 | self.assertEqual(tmp, self.test_string)
26 | self.assertEqual(self.buf.read(-1), self.test_string)
27 |
28 | def test_drain(self):
29 | tmp = self.buf.drain(-1) # drain everything
30 | self.assertIsNone(tmp) # check non-existent retval
31 | self.assertEqual(self.buf.read(-1), '') # it should be empty.
32 | self.assertEqual(len(self.buf), 0)
33 |
34 | def test_drain2(self):
35 | tmp = self.buf.drain(len(self.test_string)-1) # drain everything but a byte
36 | self.assertIsNone(tmp) # check non-existent retval
37 | self.assertEqual(self.buf.peek(-1), '.') # peek at last character
38 | self.assertEqual(len(self.buf), 1) # length must be 1
39 |
40 |
41 | if __name__ == '__main__':
42 | unittest.main()
43 |
44 |
45 |
--------------------------------------------------------------------------------
/obfsproxy/test/test_obfs3_dh.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import time
3 |
4 | import obfsproxy.transports.obfs3_dh as obfs3_dh
5 | import twisted.trial.unittest
6 | from twisted.python import log
7 |
8 | class testUniformDH_KAT(twisted.trial.unittest.TestCase):
9 | #
10 | # Test keypair x/X:
11 | #
12 | # The test vector specifies "... 756e" for x but this forces the UniformDH
13 | # code to return p - X as the public key, and more importantly that's what
14 | # the original material I used ends with.
15 | #
16 | _x = int(
17 | """6f59 2d67 6f53 6874 746f 2068 6e6b 776f
18 | 2073 6874 2065 6167 6574 202e 6f59 2d67
19 | 6f53 6874 746f 2068 7369 7420 6568 6720
20 | 7461 2e65 5920 676f 532d 746f 6f68 6874
21 | 6920 2073 6874 2065 656b 2079 6e61 2064
22 | 7567 7261 6964 6e61 6f20 2066 6874 2065
23 | 6167 6574 202e 6150 7473 202c 7270 7365
24 | 6e65 2c74 6620 7475 7275 2c65 6120 6c6c
25 | 6120 6572 6f20 656e 6920 206e 6f59 2d67
26 | 6f53 6874 746f 2e68 4820 2065 6e6b 776f
27 | 2073 6877 7265 2065 6874 2065 6c4f 2064
28 | 6e4f 7365 6220 6f72 656b 7420 7268 756f""".replace(' ','').replace('\n',''), 16)
29 |
30 | _X = int(
31 | """76a3 d17d 5c55 b03e 865f a3e8 2679 90a7
32 | 24ba a24b 0bdd 0cc4 af93 be8d e30b e120
33 | d553 3c91 bf63 ef92 3b02 edcb 84b7 4438
34 | 3f7d e232 cca6 eb46 d07c ad83 dcaa 317f
35 | becb c68c a13e 2c40 19e6 a365 3106 7450
36 | 04ae cc0b e1df f0a7 8733 fb0e 7d5c b7c4
37 | 97ca b77b 1331 bf34 7e5f 3a78 47aa 0bc0
38 | f4bc 6414 6b48 407f ed7b 931d 1697 2d25
39 | fb4d a5e6 dc07 4ce2 a58d aa8d e762 4247
40 | cdf2 ebe4 e4df ec6d 5989 aac7 78c8 7559
41 | d321 3d60 40d4 111c e3a2 acae 19f9 ee15
42 | 3250 9e03 7f69 b252 fdc3 0243 cbbc e9d0""".replace(' ','').replace('\n',''), 16)
43 |
44 | #
45 | # Test keypair y/Y
46 | #
47 | _y = int(
48 | """7365 6220 6f72 656b 7420 7268 756f 6867
49 | 6f20 2066 6c6f 2c64 6120 646e 7720 6568
50 | 6572 5420 6568 2079 6873 6c61 206c 7262
51 | 6165 206b 6874 6f72 6775 2068 6761 6961
52 | 2e6e 4820 2065 6e6b 776f 2073 6877 7265
53 | 2065 6854 7965 6820 7661 2065 7274 646f
54 | 6520 7261 6874 7327 6620 6569 646c 2c73
55 | 6120 646e 7720 6568 6572 5420 6568 2079
56 | 7473 6c69 206c 7274 6165 2064 6874 6d65
57 | 202c 6e61 2064 6877 2079 6f6e 6f20 656e
58 | 6320 6e61 6220 6865 6c6f 2064 6854 6d65
59 | 6120 2073 6854 7965 7420 6572 6461 0a2e""".replace(' ','').replace('\n',''), 16)
60 |
61 | _Y = int(
62 | """d04e 156e 554c 37ff d7ab a749 df66 2350
63 | 1e4f f446 6cb1 2be0 5561 7c1a 3687 2237
64 | 36d2 c3fd ce9e e0f9 b277 7435 0849 112a
65 | a5ae b1f1 2681 1c9c 2f3a 9cb1 3d2f 0c3a
66 | 7e6f a2d3 bf71 baf5 0d83 9171 534f 227e
67 | fbb2 ce42 27a3 8c25 abdc 5ba7 fc43 0111
68 | 3a2c b206 9c9b 305f aac4 b72b f21f ec71
69 | 578a 9c36 9bca c84e 1a7d cf07 54e3 42f5
70 | bc8f e491 7441 b882 5443 5e2a baf2 97e9
71 | 3e1e 5796 8672 d45b d7d4 c8ba 1bc3 d314
72 | 889b 5bc3 d3e4 ea33 d4f2 dfdd 34e5 e5a7
73 | 2ff2 4ee4 6316 d475 7dad 0936 6a0b 66b3""".replace(' ','').replace('\n',''), 16)
74 |
75 | #
76 | # Shared secret: x + Y/y + X
77 | #
78 | _xYyX = int(
79 | """78af af5f 457f 1fdb 832b ebc3 9764 4a33
80 | 038b e9db a10c a2ce 4a07 6f32 7f3a 0ce3
81 | 151d 477b 869e e7ac 4677 5529 2ad8 a77d
82 | b9bd 87ff bbc3 9955 bcfb 03b1 5838 88c8
83 | fd03 7834 ff3f 401d 463c 10f8 99aa 6378
84 | 4451 40b7 f838 6a7d 509e 7b9d b19b 677f
85 | 062a 7a1a 4e15 0960 4d7a 0839 ccd5 da61
86 | 73e1 0afd 9eab 6dda 7453 9d60 493c a37f
87 | a5c9 8cd9 640b 409c d8bb 3be2 bc51 36fd
88 | 42e7 64fc 3f3c 0ddb 8db3 d87a bcf2 e659
89 | 8d2b 101b ef7a 56f5 0ebc 658f 9df1 287d
90 | a813 5954 3e77 e4a4 cfa7 598a 4152 e4c0""".replace(' ','').replace('\n',''), 16)
91 |
92 | def __init__(self, methodName='runTest'):
93 | self._x_str = obfs3_dh.int_to_bytes(self._x, 192)
94 | self._X_str = obfs3_dh.int_to_bytes(self._X, 192)
95 |
96 | self._y_str = obfs3_dh.int_to_bytes(self._y, 192)
97 | self._Y_str = obfs3_dh.int_to_bytes(self._Y, 192)
98 |
99 | self._xYyX_str = obfs3_dh.int_to_bytes(self._xYyX, 192)
100 |
101 | twisted.trial.unittest.TestCase.__init__(self, methodName)
102 |
103 | def test_odd_key(self):
104 | dh_x = obfs3_dh.UniformDH(self._x_str)
105 | self.assertEqual(self._x_str, dh_x.priv_str)
106 | self.assertEqual(self._X_str, dh_x.get_public())
107 |
108 | def test_even_key(self):
109 | dh_y = obfs3_dh.UniformDH(self._y_str)
110 | self.assertEqual(self._y_str, dh_y.priv_str)
111 | self.assertEqual(self._Y_str, dh_y.get_public())
112 |
113 | def test_exchange(self):
114 | dh_x = obfs3_dh.UniformDH(self._x_str)
115 | dh_y = obfs3_dh.UniformDH(self._y_str)
116 | xY = dh_x.get_secret(dh_y.get_public())
117 | yX = dh_y.get_secret(dh_x.get_public())
118 | self.assertEqual(self._xYyX_str, xY)
119 | self.assertEqual(self._xYyX_str, yX)
120 |
121 | class testUniformDH_Benchmark(twisted.trial.unittest.TestCase):
122 | def test_benchmark(self):
123 | start = time.clock()
124 | for i in range(0, 1000):
125 | dh_x = obfs3_dh.UniformDH()
126 | dh_y = obfs3_dh.UniformDH()
127 | xY = dh_x.get_secret(dh_y.get_public())
128 | yX = dh_y.get_secret(dh_x.get_public())
129 | self.assertEqual(xY, yX)
130 | end = time.clock()
131 | taken = (end - start) / 1000 / 2
132 | log.msg("Generate + Exchange: %f sec" % taken)
133 |
134 | if __name__ == '__main__':
135 | unittest.main()
136 |
--------------------------------------------------------------------------------
/obfsproxy/test/test_socks.py:
--------------------------------------------------------------------------------
1 | import obfsproxy.network.socks as socks
2 |
3 | import twisted.trial.unittest
4 |
5 | class test_SOCKS(twisted.trial.unittest.TestCase):
6 | def test_socks_args_splitting(self):
7 | socks_args = socks._split_socks_args("monday=blue;tuesday=grey;wednesday=too;thursday=don\\;tcareabout\\\\you;friday=i\\;minlove")
8 | self.assertListEqual(socks_args, ["monday=blue", "tuesday=grey", "wednesday=too", "thursday=don;tcareabout\\you", "friday=i;minlove"])
9 |
10 | socks_args = socks._split_socks_args("monday=blue")
11 | self.assertListEqual(socks_args, ["monday=blue"])
12 |
13 | socks_args = socks._split_socks_args("monday=;tuesday=grey")
14 | self.assertListEqual(socks_args, ["monday=", "tuesday=grey"])
15 |
16 | socks_args = socks._split_socks_args("\\;=\\;;\\\\=\\;")
17 | self.assertListEqual(socks_args, [";=;", "\\=;"])
18 |
19 |
--------------------------------------------------------------------------------
/obfsproxy/test/tester.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | """@package tester.py.in
4 | Integration tests for obfsproxy.
5 |
6 | The obfsproxy binary is assumed to exist in the current working
7 | directory, and you need to have Python 2.6 or better (but not 3).
8 | You need to be able to make connections to arbitrary high-numbered
9 | TCP ports on the loopback interface.
10 | """
11 |
12 | import difflib
13 | import errno
14 | import multiprocessing
15 | import Queue
16 | import re
17 | import signal
18 | import socket
19 | import struct
20 | import subprocess
21 | import time
22 | import traceback
23 | import unittest
24 | import sys,os
25 | import tempfile
26 | import shutil
27 |
28 | def diff(label, expected, received):
29 | """
30 | Helper: generate unified-format diffs between two named strings.
31 | Pythonic escaped-string syntax is used for unprintable characters.
32 | """
33 | if expected == received:
34 | return ""
35 | else:
36 | return (label + "\n"
37 | + "\n".join(s.encode("string_escape")
38 | for s in
39 | difflib.unified_diff(expected.split("\n"),
40 | received.split("\n"),
41 | "expected", "received",
42 | lineterm=""))
43 | + "\n")
44 |
45 | class Obfsproxy(subprocess.Popen):
46 | """
47 | Helper: Run obfsproxy instances and confirm that they have
48 | completed without any errors.
49 | """
50 | def __init__(self, *args, **kwargs):
51 | """Spawns obfsproxy with 'args'"""
52 | argv = ["bin/obfsproxy", "--no-log"]
53 | if len(args) == 1 and (isinstance(args[0], list) or
54 | isinstance(args[0], tuple)):
55 | argv.extend(args[0])
56 | else:
57 | argv.extend(args)
58 |
59 | subprocess.Popen.__init__(self, argv,
60 | stdin=open("/dev/null", "r"),
61 | stdout=subprocess.PIPE,
62 | stderr=subprocess.PIPE,
63 | **kwargs)
64 |
65 | severe_error_re = re.compile(r"\[(?:warn|err(?:or)?)\]")
66 |
67 | def check_completion(self, label, force_stderr):
68 | """
69 | Checks the output and exit status of obfsproxy to see if
70 | everything went fine.
71 |
72 | Returns an empty string if the test was good, otherwise it
73 | returns a report that should be printed to the user.
74 | """
75 | if self.poll() is None:
76 | self.send_signal(signal.SIGINT)
77 |
78 | (out, err) = self.communicate()
79 |
80 | report = ""
81 | def indent(s):
82 | return "| " + "\n| ".join(s.strip().split("\n"))
83 |
84 | # exit status should be zero
85 | if self.returncode > 0:
86 | report += label + " exit code: %d\n" % self.returncode
87 | elif self.returncode < 0:
88 | report += label + " killed: signal %d\n" % -self.returncode
89 |
90 | # there should be nothing on stdout
91 | if out != "":
92 | report += label + " stdout:\n%s\n" % indent(out)
93 |
94 | # there will be debugging messages on stderr, but there should be
95 | # no [warn], [err], or [error] messages.
96 | if force_stderr or self.severe_error_re.search(err):
97 | report += label + " stderr:\n%s\n" % indent(err)
98 |
99 | return report
100 |
101 | def stop(self):
102 | """Terminates obfsproxy."""
103 | if self.poll() is None:
104 | self.terminate()
105 |
106 | def connect_with_retry(addr):
107 | """
108 | Helper: Repeatedly try to connect to the specified server socket
109 | until either it succeeds or one full second has elapsed. (Surely
110 | there is a better way to do this?)
111 | """
112 |
113 | retry = 0
114 | while True:
115 | try:
116 | return socket.create_connection(addr)
117 | except socket.error, e:
118 | if e.errno != errno.ECONNREFUSED: raise
119 | if retry == 20: raise
120 | retry += 1
121 | time.sleep(0.05)
122 |
123 | SOCKET_TIMEOUT = 2.0
124 |
125 | class ReadWorker(object):
126 | """
127 | Helper: In a separate process (to avoid deadlock), listen on a
128 | specified socket. The first time something connects to that socket,
129 | read all available data, stick it in a string, and post the string
130 | to the output queue. Then close both sockets and exit.
131 | """
132 |
133 | @staticmethod
134 | def work(address, oq):
135 | listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
136 | listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
137 | listener.bind(address)
138 | listener.listen(1)
139 | (conn, remote) = listener.accept()
140 | listener.close()
141 | conn.settimeout(SOCKET_TIMEOUT)
142 | data = ""
143 | try:
144 | while True:
145 | chunk = conn.recv(4096)
146 | if chunk == "": break
147 | data += chunk
148 | except socket.timeout:
149 | pass
150 | except Exception, e:
151 | data += "|RECV ERROR: " + e
152 | conn.close()
153 | oq.put(data)
154 |
155 | def __init__(self, address):
156 | self.oq = multiprocessing.Queue()
157 | self.worker = multiprocessing.Process(target=self.work,
158 | args=(address, self.oq))
159 | self.worker.start()
160 |
161 | def get(self):
162 | """
163 | Get a chunk of data from the ReadWorker's queue.
164 | """
165 | rv = self.oq.get(timeout=SOCKET_TIMEOUT+0.1)
166 | self.worker.join()
167 | return rv
168 |
169 | def stop(self):
170 | if self.worker.is_alive(): self.worker.terminate()
171 |
172 | # Right now this is a direct translation of the former int_test.sh
173 | # (except that I have fleshed out the SOCKS test a bit).
174 | # It will be made more general and parametric Real Soon.
175 |
176 | ENTRY_PORT = 4999
177 | SERVER_PORT = 5000
178 | EXIT_PORT = 5001
179 |
180 | #
181 | # Test base classes. They do _not_ inherit from unittest.TestCase
182 | # so that they are not scanned directly for test functions (some of
183 | # them do provide test functions, but not in a usable state without
184 | # further code from subclasses).
185 | #
186 |
187 | class DirectTest(object):
188 | def setUp(self):
189 | self.output_reader = ReadWorker(("127.0.0.1", EXIT_PORT))
190 | self.obfs_server = Obfsproxy(self.server_args)
191 | time.sleep(0.1)
192 | self.obfs_client = Obfsproxy(self.client_args)
193 | self.input_chan = connect_with_retry(("127.0.0.1", ENTRY_PORT))
194 | self.input_chan.settimeout(SOCKET_TIMEOUT)
195 |
196 | def tearDown(self):
197 | self.obfs_client.stop()
198 | self.obfs_server.stop()
199 | self.output_reader.stop()
200 | self.input_chan.close()
201 |
202 | def test_direct_transfer(self):
203 | # Open a server and a simple client (in the same process) and
204 | # transfer a file. Then check whether the output is the same
205 | # as the input.
206 | self.input_chan.sendall(TEST_FILE)
207 | time.sleep(2)
208 | try:
209 | output = self.output_reader.get()
210 | except Queue.Empty:
211 | output = ""
212 |
213 | self.input_chan.close()
214 |
215 | report = diff("errors in transfer:", TEST_FILE, output)
216 |
217 | report += self.obfs_client.check_completion("obfsproxy client (%s)" % self.transport, report!="")
218 | report += self.obfs_server.check_completion("obfsproxy server (%s)" % self.transport, report!="")
219 |
220 | if report != "":
221 | self.fail("\n" + report)
222 |
223 | #
224 | # Concrete test classes specialize the above base classes for each protocol.
225 | #
226 |
227 | class DirectDummy(DirectTest, unittest.TestCase):
228 | transport = "dummy"
229 | server_args = ("dummy", "server",
230 | "127.0.0.1:%d" % SERVER_PORT,
231 | "--dest=127.0.0.1:%d" % EXIT_PORT)
232 | client_args = ("dummy", "client",
233 | "127.0.0.1:%d" % ENTRY_PORT,
234 | "--dest=127.0.0.1:%d" % SERVER_PORT)
235 |
236 | class DirectObfs2(DirectTest, unittest.TestCase):
237 | transport = "obfs2"
238 | server_args = ("obfs2", "server",
239 | "127.0.0.1:%d" % SERVER_PORT,
240 | "--dest=127.0.0.1:%d" % EXIT_PORT)
241 | client_args = ("obfs2", "client",
242 | "127.0.0.1:%d" % ENTRY_PORT,
243 | "--dest=127.0.0.1:%d" % SERVER_PORT)
244 |
245 | class DirectObfs2_ss(DirectTest, unittest.TestCase):
246 | transport = "obfs2"
247 | server_args = ("obfs2", "server",
248 | "127.0.0.1:%d" % SERVER_PORT,
249 | "--shared-secret=test",
250 | "--ss-hash-iterations=50",
251 | "--dest=127.0.0.1:%d" % EXIT_PORT)
252 | client_args = ("obfs2", "client",
253 | "127.0.0.1:%d" % ENTRY_PORT,
254 | "--shared-secret=test",
255 | "--ss-hash-iterations=50",
256 | "--dest=127.0.0.1:%d" % SERVER_PORT)
257 |
258 | class DirectB64(DirectTest, unittest.TestCase):
259 | transport = "b64"
260 | server_args = ("b64", "server",
261 | "127.0.0.1:%d" % SERVER_PORT,
262 | "--dest=127.0.0.1:%d" % EXIT_PORT)
263 | client_args = ("b64", "client",
264 | "127.0.0.1:%d" % ENTRY_PORT,
265 | "--dest=127.0.0.1:%d" % SERVER_PORT)
266 |
267 | class DirectObfs3(DirectTest, unittest.TestCase):
268 | transport = "obfs3"
269 | server_args = ("obfs3", "server",
270 | "127.0.0.1:%d" % SERVER_PORT,
271 | "--dest=127.0.0.1:%d" % EXIT_PORT)
272 | client_args = ("obfs3", "client",
273 | "127.0.0.1:%d" % ENTRY_PORT,
274 | "--dest=127.0.0.1:%d" % SERVER_PORT)
275 |
276 | class DirectScrambleSuit(DirectTest, unittest.TestCase):
277 | transport = "scramblesuit"
278 |
279 | def setUp(self):
280 | # First, we need to create data directories for ScrambleSuit. It uses
281 | # them to store persistent information such as session tickets and the
282 | # server's long-term keys.
283 | self.tmpdir_srv = tempfile.mkdtemp(prefix="server")
284 | self.tmpdir_cli = tempfile.mkdtemp(prefix="client")
285 |
286 | self.server_args = ("--data-dir=%s" % self.tmpdir_srv,
287 | "scramblesuit", "server",
288 | "127.0.0.1:%d" % SERVER_PORT,
289 | "--password=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
290 | "--dest=127.0.0.1:%d" % EXIT_PORT)
291 | self.client_args = ("--data-dir=%s" % self.tmpdir_cli,
292 | "scramblesuit", "client",
293 | "127.0.0.1:%d" % ENTRY_PORT,
294 | "--password=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
295 | "--dest=127.0.0.1:%d" % SERVER_PORT)
296 |
297 | # Now, the remaining setup steps can be done.
298 | super(DirectScrambleSuit, self).setUp()
299 |
300 | def tearDown(self):
301 | # First, let the parent class shut down the test.
302 | super(DirectScrambleSuit, self).tearDown()
303 |
304 | # Now, we can clean up after ourselves.
305 | shutil.rmtree(self.tmpdir_srv)
306 | shutil.rmtree(self.tmpdir_cli)
307 |
308 |
309 | TEST_FILE = """\
310 | THIS IS A TEST FILE. IT'S USED BY THE INTEGRATION TESTS.
311 | THIS IS A TEST FILE. IT'S USED BY THE INTEGRATION TESTS.
312 | THIS IS A TEST FILE. IT'S USED BY THE INTEGRATION TESTS.
313 | THIS IS A TEST FILE. IT'S USED BY THE INTEGRATION TESTS.
314 |
315 | "Can entropy ever be reversed?"
316 | "THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
317 | "Can entropy ever be reversed?"
318 | "THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
319 | "Can entropy ever be reversed?"
320 | "THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
321 | "Can entropy ever be reversed?"
322 | "THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
323 | "Can entropy ever be reversed?"
324 | "THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
325 | "Can entropy ever be reversed?"
326 | "THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
327 | "Can entropy ever be reversed?"
328 | "THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
329 | "Can entropy ever be reversed?"
330 | "THERE IS AS YET INSUFFICIENT DATA FOR A MEANINGFUL ANSWER."
331 |
332 | In obfuscatory age geeky warfare did I wage
333 | For hiding bits from nasty censors' sight
334 | I was hacker to my set in that dim dark age of net
335 | And I hacked from noon till three or four at night
336 |
337 | Then a rival from Helsinki said my protocol was dinky
338 | So I flamed him with a condescending laugh,
339 | Saying his designs for stego might as well be made of lego
340 | And that my bikeshed was prettier by half.
341 |
342 | But Claude Shannon saw my shame. From his noiseless channel came
343 | A message sent with not a wasted byte
344 | "There are nine and sixty ways to disguise communiques
345 | And RATHER MORE THAN ONE OF THEM IS RIGHT"
346 |
347 | (apologies to Rudyard Kipling.)
348 | """
349 |
350 | if __name__ == '__main__':
351 | unittest.main()
352 |
--------------------------------------------------------------------------------
/obfsproxy/test/transports/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SPIN-UMass/BLANKET/0d222f54dab8342c0b3473f8923fc3cd9e524722/obfsproxy/test/transports/__init__.py
--------------------------------------------------------------------------------
/obfsproxy/test/transports/test_b64.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import twisted.trial.unittest
3 |
4 | import obfsproxy.transports.b64 as b64
5 |
6 | class test_b64_splitting(twisted.trial.unittest.TestCase):
7 | def _helper_splitter(self, string, expected_chunks):
8 | chunks = b64._get_b64_chunks_from_str(string)
9 | self.assertEqual(chunks, expected_chunks)
10 |
11 | def test_1(self):
12 | string = "on==the==left==hand==side=="
13 | expected = ["on==", "the==", "left==", "hand==", "side=="]
14 | self._helper_splitter(string, expected)
15 |
16 | def test_2(self):
17 | string = "on=the=left=hand=side="
18 | expected = ["on=", "the=", "left=", "hand=", "side="]
19 | self._helper_splitter(string, expected)
20 |
21 | def test_3(self):
22 | string = "on==the=left==hand=side=="
23 | expected = ["on==", "the=", "left==", "hand=", "side=="]
24 | self._helper_splitter(string, expected)
25 |
26 | def test_4(self):
27 | string = "on==the==left=hand=side"
28 | expected = ["on==", "the==", "left=", "hand=", "side"]
29 | self._helper_splitter(string, expected)
30 |
31 | def test_5(self):
32 | string = "onthelefthandside=="
33 | expected = ["onthelefthandside=="]
34 | self._helper_splitter(string, expected)
35 |
36 | def test_6(self):
37 | string = "onthelefthandside"
38 | expected = ["onthelefthandside"]
39 | self._helper_splitter(string, expected)
40 |
41 | def test_7(self):
42 | string = "onthelefthandside="
43 | expected = ["onthelefthandside="]
44 | self._helper_splitter(string, expected)
45 |
46 | def test_8(self):
47 | string = "side=="
48 | expected = ["side=="]
49 | self._helper_splitter(string, expected)
50 |
51 | def test_9(self):
52 | string = "side="
53 | expected = ["side="]
54 | self._helper_splitter(string, expected)
55 |
56 | def test_10(self):
57 | string = "side"
58 | expected = ["side"]
59 | self._helper_splitter(string, expected)
60 |
61 | if __name__ == '__main__':
62 | unittest.main()
63 |
64 |
--------------------------------------------------------------------------------
/obfsproxy/test/transports/test_obfs3_dh.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import twisted.trial.unittest
3 |
4 | import obfsproxy.transports.obfs3_dh as obfs3_dh
5 |
6 | class test_uniform_dh(twisted.trial.unittest.TestCase):
7 | def test_uniform_dh(self):
8 | alice = obfs3_dh.UniformDH()
9 | bob = obfs3_dh.UniformDH()
10 |
11 | alice_pub = alice.get_public()
12 | bob_pub = bob.get_public()
13 |
14 | alice_secret = alice.get_secret(bob_pub)
15 | bob_secret = bob.get_secret(alice_pub)
16 |
17 | self.assertEqual(alice_secret, bob_secret)
18 |
19 | if __name__ == '__main__':
20 | unittest.main()
21 |
22 |
--------------------------------------------------------------------------------
/obfsproxy/test/transports/test_scramblesuit.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import os
4 | import base64
5 | import shutil
6 | import tempfile
7 |
8 | import Crypto.Hash.SHA256
9 | import Crypto.Hash.HMAC
10 |
11 | import obfsproxy.common.log as logging
12 | import obfsproxy.network.buffer as obfs_buf
13 | import obfsproxy.common.transport_config as transport_config
14 | import obfsproxy.transports.base as base
15 |
16 | import obfsproxy.transports.scramblesuit.util as util
17 | import obfsproxy.transports.scramblesuit.const as const
18 | import obfsproxy.transports.scramblesuit.mycrypto as mycrypto
19 | import obfsproxy.transports.scramblesuit.uniformdh as uniformdh
20 | import obfsproxy.transports.scramblesuit.scramblesuit as scramblesuit
21 | import obfsproxy.transports.scramblesuit.message as message
22 |
23 | # Disable all logging as it would yield plenty of warning and error
24 | # messages.
25 | log = logging.get_obfslogger()
26 | log.disable_logs()
27 |
28 | class CryptoTest( unittest.TestCase ):
29 |
30 | """
31 | The HKDF test cases are taken from the appendix of RFC 5869:
32 | https://tools.ietf.org/html/rfc5869
33 | """
34 |
35 | def setUp( self ):
36 | pass
37 |
38 | def extract( self, salt, ikm ):
39 | return Crypto.Hash.HMAC.new(salt, ikm, Crypto.Hash.SHA256).digest()
40 |
41 | def runHKDF( self, ikm, salt, info, prk, okm ):
42 | myprk = self.extract(salt, ikm)
43 | self.failIf(myprk != prk)
44 | myokm = mycrypto.HKDF_SHA256(myprk, info).expand()
45 | self.failUnless(myokm in okm)
46 |
47 | def test1_HKDF_TestCase1( self ):
48 |
49 | ikm = "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b".decode('hex')
50 | salt = "000102030405060708090a0b0c".decode('hex')
51 | info = "f0f1f2f3f4f5f6f7f8f9".decode('hex')
52 | prk = ("077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122e" + \
53 | "c844ad7c2b3e5").decode('hex')
54 | okm = ("3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db" + \
55 | "02d56ecc4c5bf34007208d5b887185865").decode('hex')
56 |
57 | self.runHKDF(ikm, salt, info, prk, okm)
58 |
59 | def test2_HKDF_TestCase2( self ):
60 |
61 | ikm = ("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c" + \
62 | "1d1e1f202122232425262728292a2b2c2d2e2f30313233343536373839" + \
63 | "3a3b3c3d3e3f404142434445464748494a4b4c4d4e4f").decode('hex')
64 | salt =("606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c" + \
65 | "7d7e7f808182838485868788898a8b8c8d8e8f90919293949596979899" + \
66 | "9a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf").decode('hex')
67 | info =("b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcc" + \
68 | "cdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9" + \
69 | "eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff").decode('hex')
70 | prk = ("06a6b88c5853361a06104c9ceb35b45cef760014904671014a193f40c1" + \
71 | "5fc244").decode('hex')
72 | okm = ("b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19" + \
73 | "afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b83677" + \
74 | "93a9aca3db71cc30c58179ec3e87c14c01d5c1" + \
75 | "f3434f1d87").decode('hex')
76 |
77 | self.runHKDF(ikm, salt, info, prk, okm)
78 |
79 | def test3_HKDF_TestCase3( self ):
80 | ikm = "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b".decode('hex')
81 | salt = ""
82 | info = ""
83 | prk = ("19ef24a32c717b167f33a91d6f648bdf96596776afdb6377a" + \
84 | "c434c1c293ccb04").decode('hex')
85 | okm = ("8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec" + \
86 | "3454e5f3c738d2d9d201395faa4b61a96c8").decode('hex')
87 |
88 | self.runHKDF(ikm, salt, info, prk, okm)
89 |
90 | def test4_HKDF_TestCase4( self ):
91 |
92 | self.assertRaises(ValueError,
93 | mycrypto.HKDF_SHA256, "x" * 40, length=(32*255)+1)
94 |
95 | self.assertRaises(ValueError,
96 | mycrypto.HKDF_SHA256, "tooShort")
97 |
98 | # Accidental re-use should raise an exception.
99 | hkdf = mycrypto.HKDF_SHA256("x" * 40)
100 | hkdf.expand()
101 | self.assertRaises(base.PluggableTransportError, hkdf.expand)
102 |
103 | def test4_CSPRNG( self ):
104 | self.failIf(mycrypto.strongRandom(10) == mycrypto.strongRandom(10))
105 | self.failIf(len(mycrypto.strongRandom(100)) != 100)
106 |
107 | def test5_AES( self ):
108 | plain = "this is a test"
109 | key = os.urandom(16)
110 | iv = os.urandom(8)
111 |
112 | crypter1 = mycrypto.PayloadCrypter()
113 | crypter1.setSessionKey(key, iv)
114 | crypter2 = mycrypto.PayloadCrypter()
115 | crypter2.setSessionKey(key, iv)
116 |
117 | cipher = crypter1.encrypt(plain)
118 |
119 | self.failIf(cipher == plain)
120 | self.failUnless(crypter2.decrypt(cipher) == plain)
121 |
122 | def test6_HMAC_SHA256_128( self ):
123 | self.assertRaises(AssertionError, mycrypto.HMAC_SHA256_128,
124 | "x" * (const.SHARED_SECRET_LENGTH - 1), "test")
125 |
126 | self.failUnless(len(mycrypto.HMAC_SHA256_128("x" * \
127 | const.SHARED_SECRET_LENGTH, "test")) == 16)
128 |
129 |
130 | class UniformDHTest( unittest.TestCase ):
131 |
132 | def setUp( self ):
133 | weAreServer = True
134 | self.udh = uniformdh.new("A" * const.SHARED_SECRET_LENGTH, weAreServer)
135 |
136 | def test1_createHandshake( self ):
137 | handshake = self.udh.createHandshake()
138 | self.failUnless((const.PUBLIC_KEY_LENGTH +
139 | const.MARK_LENGTH +
140 | const.HMAC_SHA256_128_LENGTH) <= len(handshake) <=
141 | (const.MARK_LENGTH +
142 | const.HMAC_SHA256_128_LENGTH +
143 | const.MAX_PADDING_LENGTH))
144 |
145 | def test2_receivePublicKey( self ):
146 | buf = obfs_buf.Buffer(self.udh.createHandshake())
147 |
148 | def callback( masterKey ):
149 | self.failUnless(len(masterKey) == const.MASTER_KEY_LENGTH)
150 |
151 | self.failUnless(self.udh.receivePublicKey(buf, callback) == True)
152 |
153 | publicKey = self.udh.getRemotePublicKey()
154 | self.failUnless(len(publicKey) == const.PUBLIC_KEY_LENGTH)
155 |
156 | def test3_invalidHMAC( self ):
157 | # Make the HMAC invalid.
158 | handshake = self.udh.createHandshake()
159 | if handshake[-1] != 'a':
160 | handshake = handshake[:-1] + 'a'
161 | else:
162 | handshake = handshake[:-1] + 'b'
163 |
164 | buf = obfs_buf.Buffer(handshake)
165 |
166 | self.failIf(self.udh.receivePublicKey(buf, lambda x: x) == True)
167 |
168 | class UtilTest( unittest.TestCase ):
169 |
170 | def test1_isValidHMAC( self ):
171 | self.failIf(util.isValidHMAC("A" * const.HMAC_SHA256_128_LENGTH,
172 | "B" * const.HMAC_SHA256_128_LENGTH,
173 | "X" * const.SHA256_LENGTH) == True)
174 | self.failIf(util.isValidHMAC("A" * const.HMAC_SHA256_128_LENGTH,
175 | "A" * const.HMAC_SHA256_128_LENGTH,
176 | "X" * const.SHA256_LENGTH) == False)
177 |
178 | def test2_locateMark( self ):
179 | self.failIf(util.locateMark("D", "ABC") != None)
180 |
181 | hmac = "X" * const.HMAC_SHA256_128_LENGTH
182 | mark = "A" * const.MARK_LENGTH
183 | payload = mark + hmac
184 |
185 | self.failIf(util.locateMark(mark, payload) == None)
186 | self.failIf(util.locateMark(mark, payload[:-1]) != None)
187 |
188 | def test3_sanitiseBase32( self ):
189 | self.failUnless(util.sanitiseBase32("abc") == "ABC")
190 | self.failUnless(util.sanitiseBase32("ABC1XYZ") == "ABCIXYZ")
191 | self.failUnless(util.sanitiseBase32("ABC1XYZ0") == "ABCIXYZO")
192 |
193 | def test4_setStateLocation( self ):
194 | name = (const.TRANSPORT_NAME).lower()
195 |
196 | util.setStateLocation("/tmp")
197 | self.failUnless(const.STATE_LOCATION == "/tmp/%s/" % name)
198 |
199 | # Nothing should change if we pass "None".
200 | util.setStateLocation(None)
201 | self.failUnless(const.STATE_LOCATION == "/tmp/%s/" % name)
202 |
203 | # Check if function creates non-existant directories.
204 | d = tempfile.mkdtemp()
205 | util.setStateLocation(d)
206 | self.failUnless(const.STATE_LOCATION == "%s/%s/" % (d, name))
207 | self.failUnless(os.path.exists("%s/%s/" % (d, name)))
208 | shutil.rmtree(d)
209 |
210 | def test5_getEpoch( self ):
211 | e = util.getEpoch()
212 | self.failUnless(isinstance(e, basestring))
213 |
214 | def test6_writeToFile( self ):
215 | f = tempfile.mktemp()
216 | content = "ThisIsATest\n"
217 | util.writeToFile(content, f)
218 | self.failUnless(util.readFromFile(f) == content)
219 | os.unlink(f)
220 |
221 | def test7_readFromFile( self ):
222 |
223 | # Read from non-existant file.
224 | self.failUnless(util.readFromFile(tempfile.mktemp()) == None)
225 |
226 | # Read file where we (hopefully) don't have permissions.
227 | self.failUnless(util.readFromFile("/etc/shadow") == None)
228 |
229 |
230 | class MockArgs( object ):
231 | uniformDHSecret = sharedSecret = ext_cookie_file = dest = None
232 | mode = 'socks'
233 |
234 |
235 | class ScrambleSuitTransportTest( unittest.TestCase ):
236 |
237 | def setUp( self ):
238 | config = transport_config.TransportConfig( )
239 | config.state_location = const.STATE_LOCATION
240 | args = MockArgs( )
241 | suit = scramblesuit.ScrambleSuitTransport
242 | suit.weAreServer = False
243 |
244 | self.suit = suit
245 | self.args = args
246 | self.config = config
247 |
248 | self.validSecret = base64.b32encode( 'A' * const.SHARED_SECRET_LENGTH )
249 | self.invalidSecret = 'a' * const.SHARED_SECRET_LENGTH
250 |
251 | def test1_validateExternalModeCli( self ):
252 | """Test with valid scramblesuit args and valid obfsproxy args."""
253 | self.args.uniformDHSecret = self.validSecret
254 |
255 | self.assertTrue(
256 | super( scramblesuit.ScrambleSuitTransport,
257 | self.suit ).validate_external_mode_cli( self.args ))
258 |
259 | self.assertIsNone( self.suit.validate_external_mode_cli( self.args ) )
260 |
261 | def test2_validateExternalModeCli( self ):
262 | """Test with invalid scramblesuit args and valid obfsproxy args."""
263 | self.args.uniformDHSecret = self.invalidSecret
264 |
265 | with self.assertRaises( base.PluggableTransportError ):
266 | self.suit.validate_external_mode_cli( self.args )
267 |
268 | def test3_get_public_server_options( self ):
269 | scramblesuit.ScrambleSuitTransport.setup(transport_config.TransportConfig())
270 | options = scramblesuit.ScrambleSuitTransport.get_public_server_options("")
271 | self.failUnless("password" in options)
272 |
273 | d = { "password": "3X5BIA2MIHLZ55UV4VAEGKZIQPPZ4QT3" }
274 | options = scramblesuit.ScrambleSuitTransport.get_public_server_options(d)
275 | self.failUnless("password" in options)
276 | self.failUnless(options["password"] == "3X5BIA2MIHLZ55UV4VAEGKZIQPPZ4QT3")
277 |
278 | class MessageTest( unittest.TestCase ):
279 |
280 | def test1_createProtocolMessages( self ):
281 | # An empty message consists only of a header.
282 | self.failUnless(len(message.createProtocolMessages("")[0]) == \
283 | const.HDR_LENGTH)
284 |
285 | msg = message.createProtocolMessages('X' * const.MPU)
286 | self.failUnless((len(msg) == 1) and (len(msg[0]) == const.MTU))
287 |
288 | msg = message.createProtocolMessages('X' * (const.MPU + 1))
289 | self.failUnless((len(msg) == 2) and \
290 | (len(msg[0]) == const.MTU) and \
291 | (len(msg[1]) == (const.HDR_LENGTH + 1)))
292 |
293 | def test2_getFlagNames( self ):
294 | self.failUnless(message.getFlagNames(0) == "Undefined")
295 | self.failUnless(message.getFlagNames(1) == "PAYLOAD")
296 | self.failUnless(message.getFlagNames(2) == "NEW_TICKET")
297 | self.failUnless(message.getFlagNames(4) == "PRNG_SEED")
298 |
299 | def test3_isSane( self ):
300 | self.failUnless(message.isSane(0, 0, const.FLAG_NEW_TICKET) == True)
301 | self.failUnless(message.isSane(const.MPU, const.MPU,
302 | const.FLAG_PRNG_SEED) == True)
303 | self.failUnless(message.isSane(const.MPU + 1, 0,
304 | const.FLAG_PAYLOAD) == False)
305 | self.failUnless(message.isSane(0, 0, 1234) == False)
306 | self.failUnless(message.isSane(0, 1, const.FLAG_PAYLOAD) == False)
307 |
308 | def test4_ProtocolMessage( self ):
309 | flags = [const.FLAG_NEW_TICKET,
310 | const.FLAG_PAYLOAD,
311 | const.FLAG_PRNG_SEED]
312 |
313 | self.assertRaises(base.PluggableTransportError,
314 | message.ProtocolMessage, "1", paddingLen=const.MPU)
315 |
316 |
317 | if __name__ == '__main__':
318 | unittest.main()
319 |
--------------------------------------------------------------------------------
/obfsproxy/transports/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SPIN-UMass/BLANKET/0d222f54dab8342c0b3473f8923fc3cd9e524722/obfsproxy/transports/__init__.py
--------------------------------------------------------------------------------
/obfsproxy/transports/b64.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | """ This module contains an implementation of the 'b64' transport. """
5 |
6 | from obfsproxy.transports.base import BaseTransport
7 |
8 | import base64
9 |
10 | import obfsproxy.common.log as logging
11 |
12 | log = logging.get_obfslogger()
13 |
14 | def _get_b64_chunks_from_str(string):
15 | """
16 | Given a 'string' of concatenated base64 objects, return a list
17 | with the objects.
18 |
19 | Assumes that the objects are well-formed base64 strings. Also
20 | assumes that the padding character of base64 is '='.
21 | """
22 | chunks = []
23 |
24 | while True:
25 | pad_loc = string.find('=')
26 | if pad_loc < 0 or pad_loc == len(string)-1 or pad_loc == len(string)-2:
27 | # If there is no padding, or it's the last chunk: append
28 | # it to chunks and return.
29 | chunks.append(string)
30 | return chunks
31 |
32 | if pad_loc != len(string)-1 and string[pad_loc+1] == '=': # double padding
33 | pad_loc += 1
34 |
35 | # Append the object to the chunks, and prepare the string for
36 | # the next iteration.
37 | chunks.append(string[:pad_loc+1])
38 | string = string[pad_loc+1:]
39 |
40 | return chunks
41 |
42 | class B64Transport(BaseTransport):
43 | """
44 | Implements the b64 protocol. A protocol that encodes data with
45 | base64 before pushing them to the network.
46 | """
47 |
48 | def __init__(self):
49 | super(B64Transport, self).__init__()
50 |
51 | def receivedDownstream(self, data):
52 | """
53 | Got data from downstream; relay them upstream.
54 | """
55 |
56 | decoded_data = ''
57 |
58 | # TCP is a stream protocol: the data we received might contain
59 | # more than one b64 chunk. We should inspect the data and
60 | # split it into multiple chunks.
61 | b64_chunks = _get_b64_chunks_from_str(data.peek())
62 |
63 | # Now b64 decode each chunk and append it to the our decoded
64 | # data.
65 | for chunk in b64_chunks:
66 | try:
67 | decoded_data += base64.b64decode(chunk)
68 | except TypeError:
69 | log.info("We got corrupted b64 ('%s')." % chunk)
70 | return
71 |
72 | data.drain()
73 | self.circuit.upstream.write(decoded_data)
74 |
75 | def receivedUpstream(self, data):
76 | """
77 | Got data from upstream; relay them downstream.
78 | """
79 |
80 | self.circuit.downstream.write(base64.b64encode(data.read()))
81 | return
82 |
83 |
84 | class B64Client(B64Transport):
85 | pass
86 |
87 |
88 | class B64Server(B64Transport):
89 | pass
90 |
91 |
92 |
--------------------------------------------------------------------------------
/obfsproxy/transports/base.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | import pyptlib.util
5 |
6 | import obfsproxy.common.log as logging
7 |
8 | import argparse
9 |
10 | log = logging.get_obfslogger()
11 |
12 | """
13 | This module contains BaseTransport, a pluggable transport skeleton class.
14 | """
15 |
16 | def addrport(string):
17 | """
18 | Receive ':' and return (,).
19 | Used during argparse CLI parsing.
20 | """
21 | try:
22 | return pyptlib.util.parse_addr_spec(string)
23 | except ValueError, err:
24 | raise argparse.ArgumentTypeError(err)
25 |
26 | class BaseTransport(object):
27 | """
28 | The BaseTransport class is a skeleton class for pluggable transports.
29 | It contains callbacks that your pluggable transports should
30 | override and customize.
31 |
32 | Attributes:
33 | circuit: Circuit object. This is set just before circuitConnected is called.
34 | """
35 |
36 | def __init__(self):
37 | """
38 | Initialize transport. This is called right after TCP connect.
39 |
40 | Subclass overrides should still call this via super().
41 | """
42 | self.name = "tran_%s" % hex(id(self))
43 | self.circuit = None
44 |
45 | @classmethod
46 | def setup(cls, pt_config):
47 | """
48 | Receive Pluggable Transport Config, perform setup task
49 | and save state in class attributes.
50 | Called at obfsproxy startup.
51 | """
52 |
53 | @classmethod
54 | def get_public_server_options(cls, transport_options):
55 | """
56 | By default all server transport options are passed to BridgeDB.
57 | If the transport server wishes to prevent some server
58 | transport options from being added to the BridgeDB then
59 | the transport may override this method and return a
60 | transport_options dict with the keys/values to be distributed.
61 |
62 | get_public_server_options receives the transport_options argument which
63 | is a dict of server transport options... for example:
64 |
65 | A torrc could specify multiple server transport options:
66 |
67 | ServerTransportPlugin bananaphone exec /usr/local/bin/obfsproxy --log-min-severity=debug --log-file=/var/log/tor/obfsproxy.log managed
68 | ServerTransportOptions bananaphone corpus=/opt/bananaphone-corpora/pg29468.txt encodingSpec=words,sha1,4 modelName=markov order=1
69 |
70 | But if the transport wishes to only pass the encodingSpec to
71 | the BridgeDB then get_public_server_options can be overridden like this:
72 |
73 | @classmethod
74 | def get_public_server_options(cls, transport_options):
75 | return dict(encodingSpec = transport_options['encodingSpec'])
76 |
77 | In this example the get_public_server_options receives the transport_options dict:
78 | {'corpus': '/opt/bananaphone-corpora/pg29468.txt', 'modelName': 'markov', 'order': '1', 'encodingSpec': 'words,sha1,4'}
79 | """
80 | return None
81 |
82 | def circuitConnected(self):
83 | """
84 | Our circuit was completed, and this is a good time to do your
85 | transport-specific handshake on its downstream side.
86 | """
87 |
88 | def circuitDestroyed(self, reason, side):
89 | """
90 | Our circuit was tore down.
91 | Both connections of the circuit are closed when this callback triggers.
92 | """
93 |
94 | def receivedDownstream(self, data):
95 | """
96 | Received 'data' in the downstream side of our circuit.
97 | 'data' is an obfsproxy.network.buffer.Buffer.
98 | """
99 |
100 | def receivedUpstream(self, data):
101 | """
102 | Received 'data' in the upstream side of our circuit.
103 | 'data' is an obfsproxy.network.buffer.Buffer.
104 | """
105 |
106 | def handle_socks_args(self, args):
107 | """
108 | 'args' is a list of k=v strings that serve as configuration
109 | parameters to the pluggable transport.
110 | """
111 |
112 | @classmethod
113 | def register_external_mode_cli(cls, subparser):
114 | """
115 | Given an argparse ArgumentParser in 'subparser', register
116 | some default external-mode CLI arguments.
117 |
118 | Transports with more complex CLI are expected to override this
119 | function.
120 | """
121 |
122 | subparser.add_argument('mode', choices=['server', 'ext_server', 'client', 'socks'])
123 | subparser.add_argument('listen_addr', type=addrport)
124 | subparser.add_argument('--dest', type=addrport, help='Destination address')
125 | subparser.add_argument('--ext-cookie-file', type=str,
126 | help='Filesystem path where the Extended ORPort authentication cookie is stored.')
127 |
128 | @classmethod
129 | def validate_external_mode_cli(cls, args):
130 | """
131 | Given the parsed CLI arguments in 'args', validate them and
132 | make sure they make sense. Return True if they are kosher,
133 | otherwise return False.
134 |
135 | Override for your own needs.
136 | """
137 |
138 | # If we are not 'socks', we need to have a static destination
139 | # to send our data to.
140 | if (args.mode != 'socks') and (not args.dest):
141 | log.error("'client' and 'server' modes need a destination address.")
142 | return False
143 |
144 | if (args.mode != 'ext_server') and args.ext_cookie_file:
145 | log.error("No need for --ext-cookie-file if not an ext_server.")
146 | return False
147 |
148 | if (args.mode == 'ext_server') and (not args.ext_cookie_file):
149 | log.error("You need to specify --ext-cookie-file as an ext_server.")
150 | return False
151 |
152 | return True
153 |
154 | class PluggableTransportError(Exception): pass
155 | class SOCKSArgsError(Exception): pass
156 |
--------------------------------------------------------------------------------
/obfsproxy/transports/dummy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | """ This module contains an implementation of the 'dummy' transport. """
5 |
6 | from obfsproxy.transports.base import BaseTransport
7 |
8 |
9 | class DummyTransport(BaseTransport):
10 | """
11 | Implements the dummy protocol. A protocol that simply proxies data
12 | without obfuscating them.
13 | """
14 |
15 | def __init__(self):
16 | """
17 | If you override __init__, you ought to call the super method too.
18 | """
19 |
20 | super(DummyTransport, self).__init__()
21 |
22 | def receivedDownstream(self, data):
23 | """
24 | Got data from downstream; relay them upstream.
25 | """
26 |
27 | self.circuit.upstream.write(data.read())
28 |
29 | def receivedUpstream(self, data):
30 | """
31 | Got data from upstream; relay them downstream.
32 | """
33 |
34 | self.circuit.downstream.write(data.read())
35 |
36 | class DummyClient(DummyTransport):
37 |
38 | """
39 | DummyClient is a client for the 'dummy' protocol.
40 | Since this protocol is so simple, the client and the server are identical and both just trivially subclass DummyTransport.
41 | """
42 |
43 | pass
44 |
45 |
46 | class DummyServer(DummyTransport):
47 |
48 | """
49 | DummyServer is a server for the 'dummy' protocol.
50 | Since this protocol is so simple, the client and the server are identical and both just trivially subclass DummyTransport.
51 | """
52 |
53 | pass
54 |
55 |
56 |
--------------------------------------------------------------------------------
/obfsproxy/transports/model/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SPIN-UMass/BLANKET/0d222f54dab8342c0b3473f8923fc3cd9e524722/obfsproxy/transports/model/__init__.py
--------------------------------------------------------------------------------
/obfsproxy/transports/model/dummy_nn.py:
--------------------------------------------------------------------------------
1 |
2 | import torch.optim as optim
3 | import argparse
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 | import torch.optim as optim
8 | from collections import OrderedDict
9 | import numpy as np
10 | from torchvision import datasets, transforms
11 | import threading
12 | import scipy.stats as st
13 |
14 |
15 | import torch
16 |
17 |
18 | def extract_time_size_to_tensor ( inp):
19 | t = np.array([i[0] for i in inp])
20 | s = np.array([i[1] for i in inp])
21 | t -= t[0]
22 | return torch.from_numpy(t).float(),torch.from_numpy(s).float()
23 |
24 |
25 |
26 |
27 |
28 | class Generator(nn.Module):
29 | def __init__(self,inp,out):
30 | super(Generator, self).__init__()
31 |
32 | self.fc1_size = nn.Linear(inp, 300)
33 | self.fc1_time = nn.Linear(inp, 300)
34 | self.fc_size = nn.Linear(300, out)
35 | self.fc_time = nn.Linear(300, out)
36 |
37 |
38 |
39 | def forward(self, x):
40 | t,s = extract_time_size_to_tensor(x)
41 | print(t,s)
42 | out_time = F.relu(self.fc1_time(t))
43 | out_size = F.relu(self.fc1_size(s))
44 |
45 | out_size = F.relu(self.fc_size(out_size))
46 | out_time = F.relu(self.fc_time(out_time))
47 |
48 |
49 | return out_size,out_time
--------------------------------------------------------------------------------
/obfsproxy/transports/nnmorph.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | The obfs3 module implements the obfs3 protocol.
6 | """
7 |
8 | import random
9 |
10 | from twisted.internet import threads
11 |
12 | import obfsproxy.transports.base as base
13 |
14 | from collections import deque
15 | import obfsproxy.common.log as logging
16 | from model.dummy_nn import Generator
17 |
18 | from threading import Timer
19 |
20 | log = logging.get_obfslogger()
21 |
22 | HISTORY_LEN = 10
23 | OUT_LEN = 10
24 | import time
25 |
26 | class NNTransport(base.BaseTransport):
27 |
28 |
29 | def __init__(self):
30 | """Initialize the obfs3 pluggable transport."""
31 | super(NNTransport, self).__init__()
32 | self.sent_packets = deque(maxlen = HISTORY_LEN)
33 | self.rcvd_packets = deque(maxlen = HISTORY_LEN)
34 |
35 |
36 | for _ in range(HISTORY_LEN):
37 | self.sent_packets.append((0,0))
38 | self.rcvd_packets.append((0,0))
39 |
40 |
41 | self.model = Generator(HISTORY_LEN,OUT_LEN)
42 |
43 |
44 | def circuitConnected(self):
45 |
46 |
47 | log.debug("SOMEONE connected")
48 |
49 |
50 | def tick( self,message):
51 | log.debug("TICKING")
52 | self.sent_packets.append((time.time(),len(message)))
53 | self.circuit.downstream.write(message)
54 |
55 |
56 | def receivedUpstream(self, data):
57 | """
58 | Got data from upstream. We need to obfuscated and proxy them downstream.
59 | """
60 | message = data.read()
61 | log.debug("nn receivedUpstream: Transmitting %d bytes.", len(message))
62 |
63 | times, sizes = self.model(self.rcvd_packets)
64 | times= times.detach().numpy()
65 | sizes = sizes.detach().numpy()
66 | dl = float(times[0])/1000.0
67 | log.debug('MMMM scheduled to send after %f ms',dl)
68 |
69 | ti = Timer(dl,self.tick,args=(message,))
70 | ti.start()
71 |
72 |
73 |
74 | # Proxy encrypted message.
75 |
76 |
77 | def receivedDownstream(self, data):
78 | """
79 | Got data from downstream. We need to de-obfuscate them and
80 | proxy them upstream.
81 | """
82 | log.debug("nn receivedDownstream: Processing %d bytes of application data." %
83 | len(data))
84 |
85 | self.rcvd_packets.append((time.time(),len(data)))
86 |
87 | log.debug(self.rcvd_packets)
88 |
89 | self.circuit.upstream.write(data.read())
90 |
91 |
92 | class NNClient(NNTransport):
93 |
94 | """
95 | Obfs3Client is a client for the obfs3 protocol.
96 | The client and server differ in terms of their padding strings.
97 | """
98 |
99 | def __init__(self):
100 | NNTransport.__init__(self)
101 |
102 |
103 | self.we_are_initiator = True
104 |
105 | class NNServer(NNTransport):
106 |
107 | """
108 | Obfs3Server is a server for the obfs3 protocol.
109 | The client and server differ in terms of their padding strings.
110 | """
111 |
112 | def __init__(self):
113 | NNTransport.__init__(self)
114 |
115 |
116 | self.we_are_initiator = False
117 |
118 |
119 |
120 |
--------------------------------------------------------------------------------
/obfsproxy/transports/obfs2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | The obfs2 module implements the obfs2 protocol.
6 | """
7 |
8 | import random
9 | import hashlib
10 | import argparse
11 |
12 | import obfsproxy.common.aes as aes
13 | import obfsproxy.common.serialize as srlz
14 | import obfsproxy.common.rand as rand
15 |
16 | import obfsproxy.transports.base as base
17 |
18 | import obfsproxy.common.log as logging
19 |
20 | log = logging.get_obfslogger()
21 |
22 | MAGIC_VALUE = 0x2BF5CA7E
23 | SEED_LENGTH = 16
24 | MAX_PADDING = 8192
25 | HASH_ITERATIONS = 100000
26 |
27 | KEYLEN = 16 # is the length of the key used by E(K,s) -- that is, 16.
28 | IVLEN = 16 # is the length of the IV used by E(K,s) -- that is, 16.
29 |
30 | ST_WAIT_FOR_KEY = 0
31 | ST_WAIT_FOR_PADDING = 1
32 | ST_OPEN = 2
33 |
34 | def h(x):
35 | """ H(x) is SHA256 of x. """
36 |
37 | hasher = hashlib.sha256()
38 | hasher.update(x)
39 | return hasher.digest()
40 |
41 | def hn(x, n):
42 | """ H^n(x) is H(x) called iteratively n times. """
43 |
44 | data = x
45 | for _ in xrange(n):
46 | data = h(data)
47 | return data
48 |
49 | class Obfs2Transport(base.BaseTransport):
50 | """
51 | Obfs2Transport implements the obfs2 protocol.
52 | """
53 |
54 | def __init__(self):
55 | """Initialize the obfs2 pluggable transport."""
56 | super(Obfs2Transport, self).__init__()
57 |
58 | # Check if the shared_secret class attribute was already
59 | # instantiated. If not, instantiate it now.
60 | if not hasattr(self, 'shared_secret'):
61 | self.shared_secret = None
62 | # If external-mode code did not specify the number of hash
63 | # iterations, just use the default.
64 | if not hasattr(self, 'ss_hash_iterations'):
65 | self.ss_hash_iterations = HASH_ITERATIONS
66 |
67 | if self.shared_secret:
68 | log.debug("Starting obfs2 with shared secret: %s" % self.shared_secret)
69 |
70 | # Our state.
71 | self.state = ST_WAIT_FOR_KEY
72 |
73 | if self.we_are_initiator:
74 | self.initiator_seed = rand.random_bytes(SEED_LENGTH) # Initiator's seed.
75 | self.responder_seed = None # Responder's seed.
76 | else:
77 | self.initiator_seed = None # Initiator's seed.
78 | self.responder_seed = rand.random_bytes(SEED_LENGTH) # Responder's seed
79 |
80 | # Shared secret seed.
81 | self.secret_seed = None
82 |
83 | # Crypto to encrypt outgoing data.
84 | self.send_crypto = None
85 | # Crypto to encrypt outgoing padding.
86 | self.send_padding_crypto = None
87 | # Crypto to decrypt incoming data.
88 | self.recv_crypto = None
89 | # Crypto to decrypt incoming padding.
90 | self.recv_padding_crypto = None
91 |
92 | # Number of padding bytes left to read.
93 | self.padding_left_to_read = 0
94 |
95 | # If it's True, it means that we received upstream data before
96 | # we had the chance to set up our crypto (after receiving the
97 | # handshake). This means that when we set up our crypto, we
98 | # must remember to push the cached upstream data downstream.
99 | self.pending_data_to_send = False
100 |
101 | @classmethod
102 | def setup(cls, transport_config):
103 | """Setup the obfs2 pluggable transport."""
104 | cls.we_are_initiator = transport_config.weAreClient
105 |
106 | # Check for shared-secret in the server transport options.
107 | transport_options = transport_config.getServerTransportOptions()
108 | if transport_options and "shared-secret" in transport_options:
109 | log.debug("Setting shared-secret from server transport options: '%s'", transport_options["shared-secret"])
110 | cls.shared_secret = transport_options["shared-secret"]
111 |
112 | @classmethod
113 | def register_external_mode_cli(cls, subparser):
114 | subparser.add_argument('--shared-secret', type=str, help='Shared secret')
115 |
116 | # This is a hidden CLI argument for use by the integration
117 | # tests: so that they don't do an insane amount of hash
118 | # iterations.
119 | subparser.add_argument('--ss-hash-iterations', type=int, help=argparse.SUPPRESS)
120 | super(Obfs2Transport, cls).register_external_mode_cli(subparser)
121 |
122 | @classmethod
123 | def validate_external_mode_cli(cls, args):
124 | if args.shared_secret:
125 | cls.shared_secret = args.shared_secret
126 | if args.ss_hash_iterations:
127 | cls.ss_hash_iterations = args.ss_hash_iterations
128 |
129 | super(Obfs2Transport, cls).validate_external_mode_cli(args)
130 |
131 | def handle_socks_args(self, args):
132 | log.debug("obfs2: Got '%s' as SOCKS arguments." % args)
133 |
134 | # A shared secret might already be set if obfsproxy is in
135 | # external-mode and both a cli shared-secret was specified
136 | # _and_ a SOCKS per-connection shared secret.
137 | if self.shared_secret:
138 | log.notice("obfs2: Hm. Weird configuration. A shared secret "
139 | "was specified twice. I will keep the one "
140 | "supplied by the SOCKS arguments.")
141 |
142 | if len(args) != 1:
143 | err_msg = "obfs2: Too many SOCKS arguments (%d) (%s)" % (len(args), str(args))
144 | log.warning(err_msg)
145 | raise base.SOCKSArgsError(err_msg)
146 |
147 | if not args[0].startswith("shared-secret="):
148 | err_msg = "obfs2: SOCKS arg is not correctly formatted (%s)" % args[0]
149 | log.warning(err_msg)
150 | raise base.SOCKSArgsError(err_msg)
151 |
152 | self.shared_secret = args[0][14:]
153 |
154 | def circuitConnected(self):
155 | """
156 | Do the obfs2 handshake:
157 | SEED | E_PAD_KEY( UINT32(MAGIC_VALUE) | UINT32(PADLEN) | WR(PADLEN) )
158 | """
159 | # Generate keys for outgoing padding.
160 | self.send_padding_crypto = \
161 | self._derive_padding_crypto(self.initiator_seed if self.we_are_initiator else self.responder_seed,
162 | self.send_pad_keytype)
163 |
164 | padding_length = random.randint(0, MAX_PADDING)
165 | seed = self.initiator_seed if self.we_are_initiator else self.responder_seed
166 |
167 | handshake_message = seed + self.send_padding_crypto.crypt(srlz.htonl(MAGIC_VALUE) +
168 | srlz.htonl(padding_length) +
169 | rand.random_bytes(padding_length))
170 |
171 | log.debug("obfs2 handshake: %s queued %d bytes (padding_length: %d).",
172 | "initiator" if self.we_are_initiator else "responder",
173 | len(handshake_message), padding_length)
174 |
175 | self.circuit.downstream.write(handshake_message)
176 |
177 | def receivedUpstream(self, data):
178 | """
179 | Got data from upstream. We need to obfuscated and proxy them downstream.
180 | """
181 | if not self.send_crypto:
182 | log.debug("Got upstream data before doing handshake. Caching.")
183 | self.pending_data_to_send = True
184 | return
185 |
186 | log.debug("obfs2 receivedUpstream: Transmitting %d bytes.", len(data))
187 | # Encrypt and proxy them.
188 | self.circuit.downstream.write(self.send_crypto.crypt(data.read()))
189 |
190 | def receivedDownstream(self, data):
191 | """
192 | Got data from downstream. We need to de-obfuscate them and
193 | proxy them upstream.
194 | """
195 | log_prefix = "obfs2 receivedDownstream" # used in logs
196 |
197 | if self.state == ST_WAIT_FOR_KEY:
198 | log.debug("%s: Waiting for key." % log_prefix)
199 | if len(data) < SEED_LENGTH + 8:
200 | log.debug("%s: Not enough bytes for key (%d)." % (log_prefix, len(data)))
201 | return data # incomplete
202 |
203 | if self.we_are_initiator:
204 | self.responder_seed = data.read(SEED_LENGTH)
205 | else:
206 | self.initiator_seed = data.read(SEED_LENGTH)
207 |
208 | # Now that we got the other seed, let's set up our crypto.
209 | self.send_crypto = self._derive_crypto(self.send_keytype)
210 | self.recv_crypto = self._derive_crypto(self.recv_keytype)
211 | self.recv_padding_crypto = \
212 | self._derive_padding_crypto(self.responder_seed if self.we_are_initiator else self.initiator_seed,
213 | self.recv_pad_keytype)
214 |
215 | # XXX maybe faster with a single d() instead of two.
216 | magic = srlz.ntohl(self.recv_padding_crypto.crypt(data.read(4)))
217 | padding_length = srlz.ntohl(self.recv_padding_crypto.crypt(data.read(4)))
218 |
219 | log.debug("%s: Got %d bytes of handshake data (padding_length: %d, magic: %s)" % \
220 | (log_prefix, len(data), padding_length, hex(magic)))
221 |
222 | if magic != MAGIC_VALUE:
223 | raise base.PluggableTransportError("obfs2: Corrupted magic value '%s'" % hex(magic))
224 | if padding_length > MAX_PADDING:
225 | raise base.PluggableTransportError("obfs2: Too big padding length '%s'" % padding_length)
226 |
227 | self.padding_left_to_read = padding_length
228 | self.state = ST_WAIT_FOR_PADDING
229 |
230 | while self.padding_left_to_read:
231 | if not data: return
232 |
233 | n_to_drain = self.padding_left_to_read
234 | if (self.padding_left_to_read > len(data)):
235 | n_to_drain = len(data)
236 |
237 | data.drain(n_to_drain)
238 | self.padding_left_to_read -= n_to_drain
239 | log.debug("%s: Consumed %d bytes of padding, %d still to come (%d).",
240 | log_prefix, n_to_drain, self.padding_left_to_read, len(data))
241 |
242 | self.state = ST_OPEN
243 | log.debug("%s: Processing %d bytes of application data.",
244 | log_prefix, len(data))
245 |
246 | if self.pending_data_to_send:
247 | log.debug("%s: We got pending data to send and our crypto is ready. Pushing!" % log_prefix)
248 | self.receivedUpstream(self.circuit.upstream.buffer) # XXX touching guts of network.py
249 | self.pending_data_to_send = False
250 |
251 | self.circuit.upstream.write(self.recv_crypto.crypt(data.read()))
252 |
253 | def _derive_crypto(self, pad_string): # XXX consider secret_seed
254 | """
255 | Derive and return an obfs2 key using the pad string in 'pad_string'.
256 | """
257 | secret = self.mac(pad_string,
258 | self.initiator_seed + self.responder_seed,
259 | self.shared_secret)
260 | return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])
261 |
262 | def _derive_padding_crypto(self, seed, pad_string): # XXX consider secret_seed
263 | """
264 | Derive and return an obfs2 padding key using the pad string in 'pad_string'.
265 | """
266 | secret = self.mac(pad_string,
267 | seed,
268 | self.shared_secret)
269 | return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])
270 |
271 | def mac(self, s, x, secret):
272 | """
273 | obfs2 regular MAC: MAC(s, x) = H(s | x | s)
274 |
275 | Optionally, if the client and server share a secret value SECRET,
276 | they can replace the MAC function with:
277 | MAC(s,x) = H^n(s | x | H(SECRET) | s)
278 |
279 | where n = HASH_ITERATIONS.
280 | """
281 | if secret:
282 | secret_hash = h(secret)
283 | return hn(s + x + secret_hash + s, self.ss_hash_iterations)
284 | else:
285 | return h(s + x + s)
286 |
287 |
288 | class Obfs2Client(Obfs2Transport):
289 |
290 | """
291 | Obfs2Client is a client for the obfs2 protocol.
292 | The client and server differ in terms of their padding strings.
293 | """
294 |
295 | def __init__(self):
296 | self.send_pad_keytype = 'Initiator obfuscation padding'
297 | self.recv_pad_keytype = 'Responder obfuscation padding'
298 | self.send_keytype = "Initiator obfuscated data"
299 | self.recv_keytype = "Responder obfuscated data"
300 |
301 | Obfs2Transport.__init__(self)
302 |
303 |
304 | class Obfs2Server(Obfs2Transport):
305 |
306 | """
307 | Obfs2Server is a server for the obfs2 protocol.
308 | The client and server differ in terms of their padding strings.
309 | """
310 |
311 | def __init__(self):
312 | self.send_pad_keytype = 'Responder obfuscation padding'
313 | self.recv_pad_keytype = 'Initiator obfuscation padding'
314 | self.send_keytype = "Responder obfuscated data"
315 | self.recv_keytype = "Initiator obfuscated data"
316 |
317 | Obfs2Transport.__init__(self)
318 |
319 |
320 |
--------------------------------------------------------------------------------
/obfsproxy/transports/obfs3.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | The obfs3 module implements the obfs3 protocol.
6 | """
7 |
8 | import random
9 |
10 | import obfsproxy.common.aes as aes
11 | import obfsproxy.transports.base as base
12 | import obfsproxy.transports.obfs3_dh as obfs3_dh
13 | import obfsproxy.common.log as logging
14 | import obfsproxy.common.hmac_sha256 as hmac_sha256
15 | import obfsproxy.common.rand as rand
16 |
17 | from twisted.internet import threads
18 |
19 | log = logging.get_obfslogger()
20 |
21 | MAX_PADDING = 8194
22 |
23 | PUBKEY_LEN = 192
24 | KEYLEN = 16 # is the length of the key used by E(K,s) -- that is, 16.
25 | HASHLEN = 32 # length of output of sha256
26 |
27 | ST_WAIT_FOR_KEY = 0 # Waiting for public key from the other party
28 | ST_WAIT_FOR_HANDSHAKE = 1 # Waiting for the DH handshake
29 | ST_SEARCHING_MAGIC = 2 # Waiting for magic strings from the other party
30 | ST_OPEN = 3 # obfs3 handshake is complete. Sending application data.
31 |
32 | class Obfs3Transport(base.BaseTransport):
33 | """
34 | Obfs3Transport implements the obfs3 protocol.
35 | """
36 |
37 | def __init__(self):
38 | """Initialize the obfs3 pluggable transport."""
39 | super(Obfs3Transport, self).__init__()
40 |
41 | # Our state.
42 | self.state = ST_WAIT_FOR_KEY
43 |
44 | # Uniform-DH object
45 | self.dh = obfs3_dh.UniformDH()
46 |
47 | # DH shared secret
48 | self.shared_secret = None
49 |
50 | # Bytes of padding scanned so far.
51 | self.scanned_padding = 0
52 | # Last padding bytes scanned.
53 | self.last_padding_chunk = ''
54 |
55 | # Magic value that the other party is going to send
56 | # (initialized after deriving shared secret)
57 | self.other_magic_value = None
58 | # Crypto to encrypt outgoing data.
59 | self.send_crypto = None
60 | # Crypto to decrypt incoming data.
61 | self.recv_crypto = None
62 |
63 | # Buffer for the first data, Tor is trying to send but can't right now
64 | # because we have to handle the DH handshake first.
65 | self.queued_data = ''
66 |
67 | # Attributes below are filled by classes that inherit Obfs3Transport.
68 | self.send_keytype = None
69 | self.recv_keytype = None
70 | self.send_magic_const = None
71 | self.recv_magic_const = None
72 | self.we_are_initiator = None
73 |
74 | def circuitConnected(self):
75 | """
76 | Do the obfs3 handshake:
77 | PUBKEY | WR(PADLEN)
78 | """
79 | padding_length = random.randint(0, MAX_PADDING/2)
80 |
81 | handshake_message = self.dh.get_public() + rand.random_bytes(padding_length)
82 |
83 | log.debug("obfs3 handshake: %s queued %d bytes (padding_length: %d) (public key: %s).",
84 | "initiator" if self.we_are_initiator else "responder",
85 | len(handshake_message), padding_length, repr(self.dh.get_public()))
86 |
87 | self.circuit.downstream.write(handshake_message)
88 |
89 | def receivedUpstream(self, data):
90 | """
91 | Got data from upstream. We need to obfuscated and proxy them downstream.
92 | """
93 | if not self.send_crypto:
94 | log.debug("Got upstream data before doing handshake. Caching.")
95 | self.queued_data += data.read()
96 | return
97 |
98 | message = self.send_crypto.crypt(data.read())
99 | log.debug("obfs3 receivedUpstream: Transmitting %d bytes.", len(message))
100 |
101 | # Proxy encrypted message.
102 | self.circuit.downstream.write(message)
103 |
104 | def receivedDownstream(self, data):
105 | """
106 | Got data from downstream. We need to de-obfuscate them and
107 | proxy them upstream.
108 | """
109 |
110 | if self.state == ST_WAIT_FOR_KEY: # Looking for the other peer's pubkey
111 | self._read_handshake(data)
112 |
113 | if self.state == ST_WAIT_FOR_HANDSHAKE: # Doing the exp mod
114 | return
115 |
116 | if self.state == ST_SEARCHING_MAGIC: # Looking for the magic string
117 | self._scan_for_magic(data)
118 |
119 | if self.state == ST_OPEN: # Handshake is done. Just decrypt and read application data.
120 | log.debug("obfs3 receivedDownstream: Processing %d bytes of application data." %
121 | len(data))
122 | self.circuit.upstream.write(self.recv_crypto.crypt(data.read()))
123 |
124 | def _read_handshake(self, data):
125 | """
126 | Read handshake message, parse the other peer's public key and
127 | schedule the key exchange for execution outside of the event loop.
128 | """
129 |
130 | log_prefix = "obfs3:_read_handshake()"
131 | if len(data) < PUBKEY_LEN:
132 | log.debug("%s: Not enough bytes for key (%d)." % (log_prefix, len(data)))
133 | return
134 |
135 | log.debug("%s: Got %d bytes of handshake data (waiting for key)." % (log_prefix, len(data)))
136 |
137 | # Get the public key from the handshake message, do the DH and
138 | # get the shared secret.
139 | other_pubkey = data.read(PUBKEY_LEN)
140 |
141 | # Do the UniformDH handshake asynchronously
142 | self.d = threads.deferToThread(self.dh.get_secret, other_pubkey)
143 | self.d.addCallback(self._read_handshake_post_dh, other_pubkey, data)
144 | self.d.addErrback(self._uniform_dh_errback, other_pubkey)
145 |
146 | self.state = ST_WAIT_FOR_HANDSHAKE
147 |
148 | def _uniform_dh_errback(self, failure, other_pubkey):
149 | """
150 | Worker routine that does the actual UniformDH key exchange. We need to
151 | call it from a defered so that it does not block the main event loop.
152 | """
153 |
154 | self.circuit.close()
155 | e = failure.trap(ValueError)
156 | log.warning("obfs3: Corrupted public key '%s'" % repr(other_pubkey))
157 |
158 | def _read_handshake_post_dh(self, shared_secret, other_pubkey, data):
159 | """
160 | Setup the crypto from the calculated shared secret, and complete the
161 | obfs3 handshake.
162 | """
163 |
164 | self.shared_secret = shared_secret
165 | log_prefix = "obfs3:_read_handshake_post_dh()"
166 | log.debug("Got public key: %s.\nGot shared secret: %s" %
167 | (repr(other_pubkey), repr(self.shared_secret)))
168 |
169 | # Set up our crypto.
170 | self.send_crypto = self._derive_crypto(self.send_keytype)
171 | self.recv_crypto = self._derive_crypto(self.recv_keytype)
172 | self.other_magic_value = hmac_sha256.hmac_sha256_digest(self.shared_secret,
173 | self.recv_magic_const)
174 |
175 | # Send our magic value to the remote end and append the queued outgoing data.
176 | # Padding is prepended so that the server does not just send the 32-byte magic
177 | # in a single TCP segment.
178 | padding_length = random.randint(0, MAX_PADDING/2)
179 | magic = hmac_sha256.hmac_sha256_digest(self.shared_secret, self.send_magic_const)
180 | message = rand.random_bytes(padding_length) + magic + self.send_crypto.crypt(self.queued_data)
181 | self.queued_data = ''
182 |
183 | log.debug("%s: Transmitting %d bytes (with magic)." % (log_prefix, len(message)))
184 | self.circuit.downstream.write(message)
185 |
186 | self.state = ST_SEARCHING_MAGIC
187 | if len(data) > 0:
188 | log.debug("%s: Processing %d bytes of handshake data remaining after key." % (log_prefix, len(data)))
189 | self._scan_for_magic(data)
190 |
191 | def _scan_for_magic(self, data):
192 | """
193 | Scan 'data' for the magic string. If found, drain it and all
194 | the padding before it. Then open the connection.
195 | """
196 |
197 | log_prefix = "obfs3:_scan_for_magic()"
198 | log.debug("%s: Searching for magic." % log_prefix)
199 |
200 | assert(self.other_magic_value)
201 | chunk = data.peek()
202 |
203 | index = chunk.find(self.other_magic_value)
204 | if index < 0:
205 | if (len(data) > MAX_PADDING+HASHLEN):
206 | raise base.PluggableTransportError("obfs3: Too much padding (%d)!" % len(data))
207 | log.debug("%s: Did not find magic this time (%d)." % (log_prefix, len(data)))
208 | return
209 |
210 | index += len(self.other_magic_value)
211 | log.debug("%s: Found magic. Draining %d bytes." % (log_prefix, index))
212 | data.drain(index)
213 |
214 | self.state = ST_OPEN
215 | if len(data) > 0:
216 | log.debug("%s: Processing %d bytes of application data remaining after magic." % (log_prefix, len(data)))
217 | self.circuit.upstream.write(self.recv_crypto.crypt(data.read()))
218 |
219 | def _derive_crypto(self, pad_string):
220 | """
221 | Derive and return an obfs3 key using the pad string in 'pad_string'.
222 | """
223 | secret = hmac_sha256.hmac_sha256_digest(self.shared_secret, pad_string)
224 | return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])
225 |
226 | class Obfs3Client(Obfs3Transport):
227 |
228 | """
229 | Obfs3Client is a client for the obfs3 protocol.
230 | The client and server differ in terms of their padding strings.
231 | """
232 |
233 | def __init__(self):
234 | Obfs3Transport.__init__(self)
235 |
236 | self.send_keytype = "Initiator obfuscated data"
237 | self.recv_keytype = "Responder obfuscated data"
238 | self.send_magic_const = "Initiator magic"
239 | self.recv_magic_const = "Responder magic"
240 | self.we_are_initiator = True
241 |
242 | class Obfs3Server(Obfs3Transport):
243 |
244 | """
245 | Obfs3Server is a server for the obfs3 protocol.
246 | The client and server differ in terms of their padding strings.
247 | """
248 |
249 | def __init__(self):
250 | Obfs3Transport.__init__(self)
251 |
252 | self.send_keytype = "Responder obfuscated data"
253 | self.recv_keytype = "Initiator obfuscated data"
254 | self.send_magic_const = "Responder magic"
255 | self.recv_magic_const = "Initiator magic"
256 | self.we_are_initiator = False
257 |
258 |
259 |
260 |
--------------------------------------------------------------------------------
/obfsproxy/transports/obfs3_dh.py:
--------------------------------------------------------------------------------
1 | import binascii
2 |
3 | import obfsproxy.common.rand as rand
4 | import obfsproxy.common.modexp as modexp
5 |
6 | def int_to_bytes(lvalue, width):
7 | fmt = '%%.%dx' % (2*width)
8 | return binascii.unhexlify(fmt % (lvalue & ((1L<<8*width)-1)))
9 |
10 | class UniformDH:
11 | """
12 | This is a class that implements a DH handshake that uses public
13 | keys that are indistinguishable from 192-byte random strings.
14 |
15 | The idea (and even the implementation) was suggested by Ian
16 | Goldberg in:
17 | https://lists.torproject.org/pipermail/tor-dev/2012-December/004245.html
18 | https://lists.torproject.org/pipermail/tor-dev/2012-December/004248.html
19 |
20 | Attributes:
21 | mod, the modulus of our DH group.
22 | g, the generator of our DH group.
23 | group_len, the size of the group in bytes.
24 |
25 | priv_str, a byte string representing our DH private key.
26 | priv, our DH private key as an integer.
27 | pub_str, a byte string representing our DH public key.
28 | pub, our DH public key as an integer.
29 | shared_secret, our DH shared secret.
30 | """
31 |
32 | # 1536-bit MODP Group from RFC3526
33 | mod = int(
34 | """FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
35 | 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
36 | EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
37 | E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
38 | EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
39 | C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
40 | 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
41 | 670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF""".replace(' ','').replace('\n','').replace('\t',''), 16)
42 | g = 2
43 | group_len = 192 # bytes (1536-bits)
44 |
45 | def __init__(self, private_key = None):
46 | # Generate private key
47 | if private_key != None:
48 | if len(private_key) != self.group_len:
49 | raise ValueError("private_key is a invalid length (Expected %d, got %d)" % (group_len, len(private_key)))
50 | self.priv_str = private_key
51 | else:
52 | self.priv_str = rand.random_bytes(self.group_len)
53 | self.priv = int(binascii.hexlify(self.priv_str), 16)
54 |
55 | # Make the private key even
56 | flip = self.priv % 2
57 | self.priv -= flip
58 |
59 | # Generate public key
60 | #
61 | # Note: Always generate both valid public keys, and then pick to avoid
62 | # leaking timing information about which key was chosen.
63 | pub = modexp.powMod(self.g, self.priv, self.mod)
64 | pub_p_sub_X = self.mod - pub
65 | if flip == 1:
66 | self.pub = pub_p_sub_X
67 | else:
68 | self.pub = pub
69 | self.pub_str = int_to_bytes(self.pub, self.group_len)
70 |
71 | self.shared_secret = None
72 |
73 | def get_public(self):
74 | return self.pub_str
75 |
76 | def get_secret(self, their_pub_str):
77 | """
78 | Given the public key of the other party as a string of bytes,
79 | calculate our shared secret.
80 |
81 | This might raise a ValueError since 'their_pub_str' is
82 | attacker controlled.
83 | """
84 | their_pub = int(binascii.hexlify(their_pub_str), 16)
85 |
86 | self.shared_secret = modexp.powMod(their_pub, self.priv, self.mod)
87 | return int_to_bytes(self.shared_secret, self.group_len)
88 |
89 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SPIN-UMass/BLANKET/0d222f54dab8342c0b3473f8923fc3cd9e524722/obfsproxy/transports/scramblesuit/__init__.py
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/const.py:
--------------------------------------------------------------------------------
1 | """
2 | This module defines constant values for the ScrambleSuit protocol.
3 |
4 | While some values can be changed, in general they should not. If you do not
5 | obey, be at least careful because the protocol could easily break.
6 | """
7 |
8 | # Length of the key of the HMAC which used to authenticate tickets in bytes.
9 | TICKET_HMAC_KEY_LENGTH = 32
10 |
11 | # Length of the AES key used to encrypt tickets in bytes.
12 | TICKET_AES_KEY_LENGTH = 16
13 |
14 | # Length of the IV for AES-CBC which is used to encrypt tickets in bytes.
15 | TICKET_AES_CBC_IV_LENGTH = 16
16 |
17 | # Directory where long-lived information is stored. It defaults to the current
18 | # directory but is later set by `setStateLocation()' in util.py.
19 | STATE_LOCATION = ""
20 |
21 | # Divisor (in seconds) for the Unix epoch used to defend against replay
22 | # attacks.
23 | EPOCH_GRANULARITY = 3600
24 |
25 | # Flags which can be set in a ScrambleSuit protocol message.
26 | FLAG_PAYLOAD = (1 << 0)
27 | FLAG_NEW_TICKET = (1 << 1)
28 | FLAG_PRNG_SEED = (1 << 2)
29 |
30 | # Length of ScrambleSuit's header in bytes.
31 | HDR_LENGTH = 16 + 2 + 2 + 1
32 |
33 | # Length of the HMAC-SHA256-128 digest in bytes.
34 | HMAC_SHA256_128_LENGTH = 16
35 |
36 | # Whether or not to use inter-arrival time obfuscation. Disabling this option
37 | # makes the transported protocol more identifiable but increases throughput a
38 | # lot.
39 | USE_IAT_OBFUSCATION = False
40 |
41 | # Key rotation time for session ticket keys in seconds.
42 | KEY_ROTATION_TIME = 60 * 60 * 24 * 7
43 |
44 | # Mark used to easily locate the HMAC authenticating handshake messages in
45 | # bytes.
46 | MARK_LENGTH = 16
47 |
48 | # The master key's length in bytes.
49 | MASTER_KEY_LENGTH = 32
50 |
51 | # Maximum amount of seconds, a packet is delayed due to inter arrival time
52 | # obfuscation.
53 | MAX_PACKET_DELAY = 0.01
54 |
55 | # The maximum amount of padding to be appended to handshake data.
56 | MAX_PADDING_LENGTH = 1500
57 |
58 | # Length of ScrambleSuit's MTU in bytes. Note that this is *not* the link MTU
59 | # which is probably 1500.
60 | MTU = 1448
61 |
62 | # Maximum payload unit of a ScrambleSuit message in bytes.
63 | MPU = MTU - HDR_LENGTH
64 |
65 | # The minimum amount of distinct bins for probability distributions.
66 | MIN_BINS = 1
67 |
68 | # The maximum amount of distinct bins for probability distributions.
69 | MAX_BINS = 100
70 |
71 | # Length of a UniformDH public key in bytes.
72 | PUBLIC_KEY_LENGTH = 192
73 |
74 | # Length of the PRNG seed used to generate probability distributions in bytes.
75 | PRNG_SEED_LENGTH = 32
76 |
77 | # File which holds the server's state information.
78 | SERVER_STATE_FILE = "server_state.cpickle"
79 |
80 | # Life time of session tickets in seconds.
81 | SESSION_TICKET_LIFETIME = KEY_ROTATION_TIME
82 |
83 | # SHA256's digest length in bytes.
84 | SHA256_LENGTH = 32
85 |
86 | # The length of the UniformDH shared secret in bytes. It should be a multiple
87 | # of 5 bytes since outside ScrambleSuit it is encoded in Base32. That way, we
88 | # can avoid padding which might confuse users.
89 | SHARED_SECRET_LENGTH = 20
90 |
91 | # States which are used for the protocol state machine.
92 | ST_WAIT_FOR_AUTH = 0
93 | ST_CONNECTED = 1
94 |
95 | # File which holds the client's session tickets.
96 | CLIENT_TICKET_FILE = "session_ticket.yaml"
97 |
98 | # Static validation string embedded in all tickets. Must be a multiple of 16
99 | # bytes due to AES' block size.
100 | TICKET_IDENTIFIER = "ScrambleSuitTicket"
101 |
102 | # Length of a session ticket in bytes.
103 | TICKET_LENGTH = 112
104 |
105 | # The protocol name which is used in log messages.
106 | TRANSPORT_NAME = "ScrambleSuit"
107 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/fifobuf.py:
--------------------------------------------------------------------------------
1 | """
2 | Provides an interface for a fast FIFO buffer.
3 |
4 | The interface implements only 'read()', 'write()' and 'len()'. The
5 | implementation below is a modified version of the code originally written by
6 | Ben Timby: http://ben.timby.com/?p=139
7 | """
8 |
9 | try:
10 | from cStringIO import StringIO
11 | except ImportError:
12 | from StringIO import StringIO
13 |
14 | MAX_BUFFER = 1024**2*4
15 |
16 | class Buffer( object ):
17 |
18 | """
19 | Implements a fast FIFO buffer.
20 |
21 | Internally, the buffer consists of a list of StringIO objects. New
22 | StringIO objects are added and delete as data is written to and read from
23 | the FIFO buffer.
24 | """
25 |
26 | def __init__( self, max_size=MAX_BUFFER ):
27 | """
28 | Initialise a Buffer object.
29 | """
30 |
31 | self.buffers = []
32 | self.max_size = max_size
33 | self.read_pos = 0
34 | self.write_pos = 0
35 |
36 | def write( self, data ):
37 | """
38 | Write `data' to the FIFO buffer.
39 |
40 | If necessary, a new internal buffer is created.
41 | """
42 |
43 | # Add a StringIO buffer if none exists yet.
44 | if not self.buffers:
45 | self.buffers.append(StringIO())
46 | self.write_pos = 0
47 |
48 | lastBuf = self.buffers[-1]
49 | lastBuf.seek(self.write_pos)
50 | lastBuf.write(data)
51 |
52 | # If we are over the limit, a new internal buffer is created.
53 | if lastBuf.tell() >= self.max_size:
54 | lastBuf = StringIO()
55 | self.buffers.append(lastBuf)
56 |
57 | self.write_pos = lastBuf.tell()
58 |
59 | def read( self, length=-1 ):
60 | """
61 | Read `length' elements of the FIFO buffer.
62 |
63 | Drained data is automatically deleted.
64 | """
65 |
66 | read_buf = StringIO()
67 | remaining = length
68 |
69 | while True:
70 |
71 | if not self.buffers:
72 | break
73 |
74 | firstBuf = self.buffers[0]
75 | firstBuf.seek(self.read_pos)
76 | read_buf.write(firstBuf.read(remaining))
77 | self.read_pos = firstBuf.tell()
78 |
79 | if length == -1:
80 |
81 | # We did not limit the read, we exhausted the buffer, so delete
82 | # it. Keep reading from the remaining buffers.
83 | del self.buffers[0]
84 | self.read_pos = 0
85 |
86 | else:
87 |
88 | # We limited the read so either we exhausted the buffer or not.
89 | remaining = length - read_buf.tell()
90 |
91 | if remaining > 0:
92 | # Exhausted, remove buffer, read more. Keep reading from
93 | # remaining buffers.
94 | del self.buffers[0]
95 | self.read_pos = 0
96 | else:
97 | # Did not exhaust buffer, but read all that was requested.
98 | # Break to stop reading and return data of requested
99 | # length.
100 | break
101 |
102 | return read_buf.getvalue()
103 |
104 | def __len__(self):
105 | """
106 | Return the length of the Buffer object.
107 | """
108 |
109 | length = 0
110 |
111 | for buf in self.buffers:
112 |
113 | # Jump to the end of the internal buffer.
114 | buf.seek(0, 2)
115 |
116 | if buf == self.buffers[0]:
117 | length += buf.tell() - self.read_pos
118 | else:
119 | length += buf.tell()
120 |
121 | return length
122 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/message.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides code to handle ScrambleSuit protocol messages.
3 |
4 | The exported classes and functions provide interfaces to handle protocol
5 | messages, check message headers for validity and create protocol messages out
6 | of application data.
7 | """
8 |
9 | import obfsproxy.common.log as logging
10 | import obfsproxy.common.serialize as pack
11 | import obfsproxy.transports.base as base
12 |
13 | import mycrypto
14 | import const
15 |
16 | log = logging.get_obfslogger()
17 |
18 |
19 | def createProtocolMessages( data, flags=const.FLAG_PAYLOAD ):
20 | """
21 | Create protocol messages out of the given payload.
22 |
23 | The given `data' is turned into a list of protocol messages with the given
24 | `flags' set. The list is then returned. If possible, all messages fill
25 | the MTU.
26 | """
27 |
28 | messages = []
29 |
30 | while len(data) > const.MPU:
31 | messages.append(ProtocolMessage(data[:const.MPU], flags=flags))
32 | data = data[const.MPU:]
33 |
34 | messages.append(ProtocolMessage(data, flags=flags))
35 |
36 | log.debug("Created %d protocol messages." % len(messages))
37 |
38 | return messages
39 |
40 |
41 | def getFlagNames( flags ):
42 | """
43 | Return the flag name encoded in the integer `flags' as string.
44 |
45 | This function is only useful for printing easy-to-read flag names in debug
46 | log messages.
47 | """
48 |
49 | if flags == 1:
50 | return "PAYLOAD"
51 |
52 | elif flags == 2:
53 | return "NEW_TICKET"
54 |
55 | elif flags == 4:
56 | return "PRNG_SEED"
57 |
58 | else:
59 | return "Undefined"
60 |
61 |
62 | def isSane( totalLen, payloadLen, flags ):
63 | """
64 | Verifies whether the given header fields are sane.
65 |
66 | The values of the fields `totalLen', `payloadLen' and `flags' are checked
67 | for their sanity. If they are in the expected range, `True' is returned.
68 | If any of these fields has an invalid value, `False' is returned.
69 | """
70 |
71 | def isFine( length ):
72 | """
73 | Check if the given length is fine.
74 | """
75 |
76 | return True if (0 <= length <= const.MPU) else False
77 |
78 | log.debug("Message header: totalLen=%d, payloadLen=%d, flags"
79 | "=%s" % (totalLen, payloadLen, getFlagNames(flags)))
80 |
81 | validFlags = [
82 | const.FLAG_PAYLOAD,
83 | const.FLAG_NEW_TICKET,
84 | const.FLAG_PRNG_SEED,
85 | ]
86 |
87 | return isFine(totalLen) and \
88 | isFine(payloadLen) and \
89 | totalLen >= payloadLen and \
90 | (flags in validFlags)
91 |
92 |
93 | class ProtocolMessage( object ):
94 |
95 | """
96 | Represents a ScrambleSuit protocol message.
97 |
98 | This class provides methods to deal with protocol messages. The methods
99 | make it possible to add padding as well as to encrypt and authenticate
100 | protocol messages.
101 | """
102 |
103 | def __init__( self, payload="", paddingLen=0, flags=const.FLAG_PAYLOAD ):
104 | """
105 | Initialises a ProtocolMessage object.
106 | """
107 |
108 | payloadLen = len(payload)
109 | if (payloadLen + paddingLen) > const.MPU:
110 | raise base.PluggableTransportError("No overly long messages.")
111 |
112 | self.totalLen = payloadLen + paddingLen
113 | self.payloadLen = payloadLen
114 | self.payload = payload
115 | self.flags = flags
116 |
117 | def encryptAndHMAC( self, crypter, hmacKey ):
118 | """
119 | Encrypt and authenticate this protocol message.
120 |
121 | This protocol message is encrypted using `crypter' and authenticated
122 | using `hmacKey'. Finally, the encrypted message prepended by a
123 | HMAC-SHA256-128 is returned and ready to be sent over the wire.
124 | """
125 |
126 | encrypted = crypter.encrypt(pack.htons(self.totalLen) +
127 | pack.htons(self.payloadLen) +
128 | chr(self.flags) + self.payload +
129 | (self.totalLen - self.payloadLen) * '\0')
130 |
131 | hmac = mycrypto.HMAC_SHA256_128(hmacKey, encrypted)
132 |
133 | return hmac + encrypted
134 |
135 | def addPadding( self, paddingLen ):
136 | """
137 | Add padding to this protocol message.
138 |
139 | Padding is added to this protocol message. The exact amount is
140 | specified by `paddingLen'.
141 | """
142 |
143 | # The padding must not exceed the message size.
144 | if (self.totalLen + paddingLen) > const.MPU:
145 | raise base.PluggableTransportError("Can't pad more than the MTU.")
146 |
147 | if paddingLen == 0:
148 | return
149 |
150 | log.debug("Adding %d bytes of padding to %d-byte message." %
151 | (paddingLen, const.HDR_LENGTH + self.totalLen))
152 | self.totalLen += paddingLen
153 |
154 | def __len__( self ):
155 | """
156 | Return the length of this protocol message.
157 | """
158 |
159 | return const.HDR_LENGTH + self.totalLen
160 |
161 | # Alias class name in order to provide a more intuitive API.
162 | new = ProtocolMessage
163 |
164 | class MessageExtractor( object ):
165 |
166 | """
167 | Extracts ScrambleSuit protocol messages out of an encrypted stream.
168 | """
169 |
170 | def __init__( self ):
171 | """
172 | Initialise a new MessageExtractor object.
173 | """
174 |
175 | self.recvBuf = ""
176 | self.totalLen = None
177 | self.payloadLen = None
178 | self.flags = None
179 |
180 | def extract( self, data, aes, hmacKey ):
181 | """
182 | Extracts (i.e., decrypts and authenticates) protocol messages.
183 |
184 | The raw `data' coming directly from the wire is decrypted using `aes'
185 | and authenticated using `hmacKey'. The payload is then returned as
186 | unencrypted protocol messages. In case of invalid headers or HMACs, an
187 | exception is raised.
188 | """
189 |
190 | self.recvBuf += data
191 | msgs = []
192 |
193 | # Keep trying to unpack as long as there is at least a header.
194 | while len(self.recvBuf) >= const.HDR_LENGTH:
195 |
196 | # If necessary, extract the header fields.
197 | if self.totalLen == self.payloadLen == self.flags == None:
198 | self.totalLen = pack.ntohs(aes.decrypt(self.recvBuf[16:18]))
199 | self.payloadLen = pack.ntohs(aes.decrypt(self.recvBuf[18:20]))
200 | self.flags = ord(aes.decrypt(self.recvBuf[20]))
201 |
202 | if not isSane(self.totalLen, self.payloadLen, self.flags):
203 | raise base.PluggableTransportError("Invalid header.")
204 |
205 | # Parts of the message are still on the wire; waiting.
206 | if (len(self.recvBuf) - const.HDR_LENGTH) < self.totalLen:
207 | break
208 |
209 | rcvdHMAC = self.recvBuf[0:const.HMAC_SHA256_128_LENGTH]
210 | vrfyHMAC = mycrypto.HMAC_SHA256_128(hmacKey,
211 | self.recvBuf[const.HMAC_SHA256_128_LENGTH:
212 | (self.totalLen + const.HDR_LENGTH)])
213 |
214 | if rcvdHMAC != vrfyHMAC:
215 | raise base.PluggableTransportError("Invalid message HMAC.")
216 |
217 | # Decrypt the message and remove it from the input buffer.
218 | extracted = aes.decrypt(self.recvBuf[const.HDR_LENGTH:
219 | (self.totalLen + const.HDR_LENGTH)])[:self.payloadLen]
220 | msgs.append(ProtocolMessage(payload=extracted, flags=self.flags))
221 | self.recvBuf = self.recvBuf[const.HDR_LENGTH + self.totalLen:]
222 |
223 | # Protocol message processed; now reset length fields.
224 | self.totalLen = self.payloadLen = self.flags = None
225 |
226 | return msgs
227 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/mycrypto.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides cryptographic functions not implemented in PyCrypto.
3 |
4 | The implemented algorithms include HKDF-SHA256, HMAC-SHA256-128, (CS)PRNGs and
5 | an interface for encryption and decryption using AES in counter mode.
6 | """
7 |
8 | import Crypto.Hash.SHA256
9 | import Crypto.Hash.HMAC
10 | import Crypto.Util.Counter
11 | import Crypto.Cipher.AES
12 |
13 | import obfsproxy.transports.base as base
14 | import obfsproxy.common.log as logging
15 |
16 | import math
17 | import os
18 |
19 | import const
20 |
21 | log = logging.get_obfslogger()
22 |
23 |
24 | class HKDF_SHA256( object ):
25 |
26 | """
27 | Implements HKDF using SHA256: https://tools.ietf.org/html/rfc5869
28 |
29 | This class only implements the `expand' but not the `extract' stage since
30 | the provided PRK already exhibits strong entropy.
31 | """
32 |
33 | def __init__( self, prk, info="", length=32 ):
34 | """
35 | Initialise a HKDF_SHA256 object.
36 | """
37 |
38 | self.hashLen = const.SHA256_LENGTH
39 |
40 | if length > (self.hashLen * 255):
41 | raise ValueError("The OKM's length cannot be larger than %d." %
42 | (self.hashLen * 255))
43 |
44 | if len(prk) < self.hashLen:
45 | raise ValueError("The PRK must be at least %d bytes in length "
46 | "(%d given)." % (self.hashLen, len(prk)))
47 |
48 | self.N = math.ceil(float(length) / self.hashLen)
49 | self.prk = prk
50 | self.info = info
51 | self.length = length
52 | self.ctr = 1
53 | self.T = ""
54 |
55 | def expand( self ):
56 | """
57 | Return the expanded output key material.
58 |
59 | The output key material is calculated based on the given PRK, info and
60 | L.
61 | """
62 |
63 | tmp = ""
64 |
65 | # Prevent the accidental re-use of output keying material.
66 | if len(self.T) > 0:
67 | raise base.PluggableTransportError("HKDF-SHA256 OKM must not "
68 | "be re-used by application.")
69 |
70 | while self.length > len(self.T):
71 | tmp = Crypto.Hash.HMAC.new(self.prk, tmp + self.info +
72 | chr(self.ctr),
73 | Crypto.Hash.SHA256).digest()
74 | self.T += tmp
75 | self.ctr += 1
76 |
77 | return self.T[:self.length]
78 |
79 |
80 | def HMAC_SHA256_128( key, msg ):
81 | """
82 | Return the HMAC-SHA256-128 of the given `msg' authenticated by `key'.
83 | """
84 |
85 | assert(len(key) >= const.SHARED_SECRET_LENGTH)
86 |
87 | h = Crypto.Hash.HMAC.new(key, msg, Crypto.Hash.SHA256)
88 |
89 | # Return HMAC truncated to 128 out of 256 bits.
90 | return h.digest()[:16]
91 |
92 |
93 | def strongRandom( size ):
94 | """
95 | Return `size' bytes of strong randomness suitable for cryptographic use.
96 | """
97 |
98 | return os.urandom(size)
99 |
100 |
101 | class PayloadCrypter:
102 |
103 | """
104 | Provides methods to encrypt data using AES in counter mode.
105 |
106 | This class provides methods to set a session key as well as an
107 | initialisation vector and to encrypt and decrypt data.
108 | """
109 |
110 | def __init__( self ):
111 | """
112 | Initialise a PayloadCrypter object.
113 | """
114 |
115 | log.debug("Initialising AES-CTR instance.")
116 |
117 | self.sessionKey = None
118 | self.crypter = None
119 | self.counter = None
120 |
121 | def setSessionKey( self, key, iv ):
122 | """
123 | Set AES' session key and the initialisation vector for counter mode.
124 |
125 | The given `key' and `iv' are used as 256-bit AES key and as 128-bit
126 | initialisation vector for counter mode. Both, the key as well as the
127 | IV must come from a CSPRNG.
128 | """
129 |
130 | self.sessionKey = key
131 |
132 | # Our 128-bit counter has the following format:
133 | # [ 64-bit static and random IV ] [ 64-bit incrementing counter ]
134 | # Counter wrapping is not allowed which makes it possible to transfer
135 | # 2^64 * 16 bytes of data while avoiding counter reuse. That amount is
136 | # effectively out of reach given today's networking performance.
137 | log.debug("Setting IV for AES-CTR.")
138 | self.counter = Crypto.Util.Counter.new(64,
139 | prefix = iv,
140 | initial_value = 1,
141 | allow_wraparound = False)
142 |
143 | log.debug("Setting session key for AES-CTR.")
144 | self.crypter = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CTR,
145 | counter=self.counter)
146 |
147 | def encrypt( self, data ):
148 | """
149 | Encrypts the given `data' using AES in counter mode.
150 | """
151 |
152 | return self.crypter.encrypt(data)
153 |
154 | # Encryption equals decryption in AES-CTR.
155 | decrypt = encrypt
156 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/packetmorpher.py:
--------------------------------------------------------------------------------
1 | """
2 | Provides code to morph a chunk of data to a given probability distribution.
3 |
4 | The class provides an interface to morph a network packet's length to a
5 | previously generated probability distribution. The packet lengths of the
6 | morphed network data should then match the probability distribution.
7 | """
8 |
9 | import random
10 |
11 | import probdist
12 | import const
13 |
14 | import obfsproxy.common.log as logging
15 |
16 | log = logging.get_obfslogger()
17 |
18 | class PacketMorpher( object ):
19 |
20 | """
21 | Implements methods to morph data to a target probability distribution.
22 |
23 | This class is used to modify ScrambleSuit's packet length distribution on
24 | the wire. The class provides a method to determine the padding for packets
25 | smaller than the MTU.
26 | """
27 |
28 | def __init__( self, dist=None ):
29 | """
30 | Initialise the packet morpher with the given distribution `dist'.
31 |
32 | If `dist' is `None', a new discrete probability distribution is
33 | generated randomly.
34 | """
35 |
36 | if dist:
37 | self.dist = dist
38 | else:
39 | self.dist = probdist.new(lambda: random.randint(const.HDR_LENGTH,
40 | const.MTU))
41 |
42 | def calcPadding( self, dataLen ):
43 | """
44 | Based on `dataLen', determines the padding for a network packet.
45 |
46 | ScrambleSuit morphs packets which are smaller than the link's MTU.
47 | This method draws a random sample from the probability distribution
48 | which is used to determine and return the padding for such packets.
49 | This effectively gets rid of Tor's 586-byte signature.
50 | """
51 |
52 | # The `is' and `should-be' length of the burst's last packet.
53 | dataLen = dataLen % const.MTU
54 | sampleLen = self.dist.randomSample()
55 |
56 | # Now determine the padding length which is in {0..MTU-1}.
57 | if sampleLen >= dataLen:
58 | padLen = sampleLen - dataLen
59 | else:
60 | padLen = (const.MTU - dataLen) + sampleLen
61 |
62 | log.debug("Morphing the last %d-byte packet to %d bytes by adding %d "
63 | "bytes of padding." %
64 | (dataLen % const.MTU, sampleLen, padLen))
65 |
66 | return padLen
67 |
68 | # Alias class name in order to provide a more intuitive API.
69 | new = PacketMorpher
70 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/probdist.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides code to generate and sample probability distributions.
3 |
4 | The class RandProbDist provides an interface to randomly generate probability
5 | distributions. Random samples can then be drawn from these distributions.
6 | """
7 |
8 | import random
9 |
10 | import const
11 |
12 | import obfsproxy.common.log as logging
13 |
14 | log = logging.get_obfslogger()
15 |
16 |
17 | class RandProbDist:
18 |
19 | """
20 | Provides code to generate, sample and dump probability distributions.
21 | """
22 |
23 | def __init__( self, genSingleton, seed=None ):
24 | """
25 | Initialise a discrete probability distribution.
26 |
27 | The parameter `genSingleton' is expected to be a function which yields
28 | singletons for the probability distribution. The optional `seed' can
29 | be used to seed the PRNG so that the probability distribution is
30 | generated deterministically.
31 | """
32 |
33 | self.prng = random if (seed is None) else random.Random(seed)
34 |
35 | self.sampleList = []
36 | self.dist = self.genDistribution(genSingleton)
37 | self.dumpDistribution()
38 |
39 | def genDistribution( self, genSingleton ):
40 | """
41 | Generate a discrete probability distribution.
42 |
43 | The parameter `genSingleton' is a function which is used to generate
44 | singletons for the probability distribution.
45 | """
46 |
47 | dist = {}
48 |
49 | # Amount of distinct bins, i.e., packet lengths or inter arrival times.
50 | bins = self.prng.randint(const.MIN_BINS, const.MAX_BINS)
51 |
52 | # Cumulative probability of all bins.
53 | cumulProb = 0
54 |
55 | for _ in xrange(bins):
56 | prob = self.prng.uniform(0, (1 - cumulProb))
57 | cumulProb += prob
58 |
59 | singleton = genSingleton()
60 | dist[singleton] = prob
61 | self.sampleList.append((cumulProb, singleton,))
62 |
63 | dist[genSingleton()] = (1 - cumulProb)
64 |
65 | return dist
66 |
67 | def dumpDistribution( self ):
68 | """
69 | Dump the probability distribution using the logging object.
70 |
71 | Only probabilities > 0.01 are dumped.
72 | """
73 |
74 | log.debug("Dumping probability distribution.")
75 |
76 | for singleton in self.dist.iterkeys():
77 | # We are not interested in tiny probabilities.
78 | if self.dist[singleton] > 0.01:
79 | log.debug("P(%s) = %.3f" %
80 | (str(singleton), self.dist[singleton]))
81 |
82 | def randomSample( self ):
83 | """
84 | Draw and return a random sample from the probability distribution.
85 | """
86 |
87 | assert len(self.sampleList) > 0
88 |
89 | rand = random.random()
90 |
91 | for cumulProb, singleton in self.sampleList:
92 | if rand <= cumulProb:
93 | return singleton
94 |
95 | return self.sampleList[-1][1]
96 |
97 | # Alias class name in order to provide a more intuitive API.
98 | new = RandProbDist
99 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/replay.py:
--------------------------------------------------------------------------------
1 | """
2 | This module implements a mechanism to protect against replay attacks.
3 |
4 | The replay protection mechanism is based on a dictionary which caches
5 | previously observed keys. New keys can be added to the dictionary and existing
6 | ones can be queried. A pruning mechanism deletes expired keys from the
7 | dictionary.
8 | """
9 |
10 | import time
11 |
12 | import const
13 |
14 | import obfsproxy.common.log as logging
15 |
16 | log = logging.get_obfslogger()
17 |
18 |
19 | class Tracker( object ):
20 |
21 | """
22 | Implement methods to keep track of replayed keys.
23 |
24 | This class provides methods to add new keys (elements), check whether keys
25 | are already present in the dictionary and to prune the lookup table.
26 | """
27 |
28 | def __init__( self ):
29 | """
30 | Initialise a `Tracker' object.
31 | """
32 |
33 | self.table = dict()
34 |
35 | def addElement( self, element ):
36 | """
37 | Add the given `element' to the lookup table.
38 | """
39 |
40 | if self.isPresent(element):
41 | raise LookupError("Element already present in table.")
42 |
43 | # The key is a HMAC and the value is the current Unix timestamp.
44 | self.table[element] = int(time.time())
45 |
46 | def isPresent( self, element ):
47 | """
48 | Check if the given `element' is already present in the lookup table.
49 |
50 | Return `True' if `element' is already in the lookup table and `False'
51 | otherwise.
52 | """
53 |
54 | log.debug("Looking for existing element in size-%d lookup table." %
55 | len(self.table))
56 |
57 | # Prune the replay table before looking up the given `element'. This
58 | # could be done more efficiently, e.g. by pruning every n minutes and
59 | # only checking the timestamp of this particular element.
60 | self.prune()
61 |
62 | return (element in self.table)
63 |
64 | def prune( self ):
65 | """
66 | Delete expired elements from the lookup table.
67 |
68 | Keys whose Unix timestamps are older than `const.EPOCH_GRANULARITY' are
69 | being removed from the lookup table.
70 | """
71 |
72 | log.debug("Pruning the replay table.")
73 |
74 | deleteList = []
75 | now = int(time.time())
76 |
77 | for element in self.table.iterkeys():
78 | if (now - self.table[element]) > const.EPOCH_GRANULARITY:
79 | deleteList.append(element)
80 |
81 | # We can't delete from a dictionary while iterating over it; therefore
82 | # this construct.
83 | for elem in deleteList:
84 | log.debug("Deleting expired element.")
85 | del self.table[elem]
86 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/state.py:
--------------------------------------------------------------------------------
1 | """
2 | Provide a way to store the server's state information on disk.
3 |
4 | The server possesses state information which should persist across runs. This
5 | includes key material to encrypt and authenticate session tickets, replay
6 | tables and PRNG seeds. This module provides methods to load, store and
7 | generate such state information.
8 | """
9 |
10 | import os
11 | import sys
12 | import time
13 | import cPickle
14 | import random
15 |
16 | import const
17 | import replay
18 | import mycrypto
19 | import probdist
20 |
21 | import obfsproxy.common.log as logging
22 |
23 | log = logging.get_obfslogger()
24 |
25 | def load( ):
26 | """
27 | Load the server's state object from file.
28 |
29 | The server's state file is loaded and the state object returned. If no
30 | state file is found, a new one is created and returned.
31 | """
32 |
33 | stateFile = const.STATE_LOCATION + const.SERVER_STATE_FILE
34 |
35 | log.info("Attempting to load the server's state file from `%s'." %
36 | stateFile)
37 |
38 | if not os.path.exists(stateFile):
39 | log.info("The server's state file does not exist (yet).")
40 | state = State()
41 | state.genState()
42 | return state
43 |
44 | try:
45 | with open(stateFile, 'r') as fd:
46 | stateObject = cPickle.load(fd)
47 | except IOError as err:
48 | log.error("Error reading server state file from `%s': %s" %
49 | (stateFile, err))
50 | sys.exit(1)
51 |
52 | return stateObject
53 |
54 | class State( object ):
55 |
56 | """
57 | Implement a state class which stores the server's state.
58 |
59 | This class makes it possible to store state information on disk. It
60 | provides methods to generate and write state information.
61 | """
62 |
63 | def __init__( self ):
64 | """
65 | Initialise a `State' object.
66 | """
67 |
68 | self.prngSeed = None
69 | self.keyCreation = None
70 | self.hmacKey = None
71 | self.aesKey = None
72 | self.oldHmacKey = None
73 | self.oldAesKey = None
74 | self.ticketReplay = None
75 | self.uniformDhReplay = None
76 | self.pktDist = None
77 | self.iatDist = None
78 | self.fallbackPassword = None
79 |
80 | def genState( self ):
81 | """
82 | Populate all the local variables with values.
83 | """
84 |
85 | log.info("Generating parameters for the server's state file.")
86 |
87 | # PRNG seed for the client to reproduce the packet and IAT morpher.
88 | self.prngSeed = mycrypto.strongRandom(const.PRNG_SEED_LENGTH)
89 |
90 | # HMAC and AES key used to encrypt and authenticate tickets.
91 | self.hmacKey = mycrypto.strongRandom(const.TICKET_HMAC_KEY_LENGTH)
92 | self.aesKey = mycrypto.strongRandom(const.TICKET_AES_KEY_LENGTH)
93 | self.keyCreation = int(time.time())
94 |
95 | # The previous HMAC and AES keys.
96 | self.oldHmacKey = None
97 | self.oldAesKey = None
98 |
99 | # Replay dictionary for both authentication mechanisms.
100 | self.replayTracker = replay.Tracker()
101 |
102 | # Distributions for packet lengths and inter arrival times.
103 | prng = random.Random(self.prngSeed)
104 | self.pktDist = probdist.new(lambda: prng.randint(const.HDR_LENGTH,
105 | const.MTU),
106 | seed=self.prngSeed)
107 | self.iatDist = probdist.new(lambda: prng.random() %
108 | const.MAX_PACKET_DELAY,
109 | seed=self.prngSeed)
110 |
111 | # Fallback UniformDH shared secret. Only used if the bridge operator
112 | # did not set `ServerTransportOptions'.
113 | self.fallbackPassword = os.urandom(const.SHARED_SECRET_LENGTH)
114 |
115 | self.writeState()
116 |
117 | def isReplayed( self, hmac ):
118 | """
119 | Check if `hmac' is present in the replay table.
120 |
121 | Return `True' if the given `hmac' is present in the replay table and
122 | `False' otherwise.
123 | """
124 |
125 | assert self.replayTracker is not None
126 |
127 | log.debug("Querying if HMAC is present in the replay table.")
128 |
129 | return self.replayTracker.isPresent(hmac)
130 |
131 | def registerKey( self, hmac ):
132 | """
133 | Add the given `hmac' to the replay table.
134 | """
135 |
136 | assert self.replayTracker is not None
137 |
138 | log.debug("Adding a new HMAC to the replay table.")
139 | self.replayTracker.addElement(hmac)
140 |
141 | # We must write the data to disk immediately so that other ScrambleSuit
142 | # connections can share the same state.
143 | self.writeState()
144 |
145 | def writeState( self ):
146 | """
147 | Write the state object to a file using the `cPickle' module.
148 | """
149 |
150 | stateFile = const.STATE_LOCATION + const.SERVER_STATE_FILE
151 |
152 | log.debug("Writing server's state file to `%s'." %
153 | stateFile)
154 |
155 | try:
156 | with open(stateFile, 'w') as fd:
157 | cPickle.dump(self, fd)
158 | except IOError as err:
159 | log.error("Error writing state file to `%s': %s" %
160 | (stateFile, err))
161 | sys.exit(1)
162 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/uniformdh.py:
--------------------------------------------------------------------------------
1 | """
2 | This module implements a class to deal with Uniform Diffie-Hellman handshakes.
3 |
4 | The class `UniformDH' is used by the server as well as by the client to handle
5 | the Uniform Diffie-Hellman handshake used by ScrambleSuit.
6 | """
7 |
8 | import const
9 | import random
10 | import binascii
11 |
12 | import Crypto.Hash.SHA256
13 |
14 | import util
15 | import mycrypto
16 |
17 | import obfsproxy.transports.obfs3_dh as obfs3_dh
18 | import obfsproxy.transports.base as base
19 | import obfsproxy.common.log as logging
20 |
21 | log = logging.get_obfslogger()
22 |
23 | class UniformDH( object ):
24 |
25 | """
26 | Provide methods to deal with Uniform Diffie-Hellman handshakes.
27 |
28 | The class provides methods to extract public keys and to generate public
29 | keys wrapped in a valid UniformDH handshake.
30 | """
31 |
32 | def __init__( self, sharedSecret, weAreServer ):
33 | """
34 | Initialise a UniformDH object.
35 | """
36 |
37 | # `True' if we are the server; `False' otherwise.
38 | self.weAreServer = weAreServer
39 |
40 | # The shared UniformDH secret.
41 | self.sharedSecret = sharedSecret
42 |
43 | # Cache a UniformDH public key until it's added to the replay table.
44 | self.remotePublicKey = None
45 |
46 | # Uniform Diffie-Hellman object (implemented in obfs3_dh.py).
47 | self.udh = None
48 |
49 | def getRemotePublicKey( self ):
50 | """
51 | Return the cached remote UniformDH public key.
52 | """
53 |
54 | return self.remotePublicKey
55 |
56 | def receivePublicKey( self, data, callback, srvState=None ):
57 | """
58 | Extract the public key and invoke a callback with the master secret.
59 |
60 | First, the UniformDH public key is extracted out of `data'. Then, the
61 | shared master secret is computed and `callback' is invoked with the
62 | master secret as argument. If any of this fails, `False' is returned.
63 | """
64 |
65 | # Extract the public key sent by the remote host.
66 | remotePublicKey = self.extractPublicKey(data, srvState)
67 | if not remotePublicKey:
68 | return False
69 |
70 | if self.weAreServer:
71 | self.remotePublicKey = remotePublicKey
72 | # As server, we need a DH object; as client, we already have one.
73 | self.udh = obfs3_dh.UniformDH()
74 |
75 | assert self.udh is not None
76 |
77 | try:
78 | uniformDHSecret = self.udh.get_secret(remotePublicKey)
79 | except ValueError:
80 | raise base.PluggableTransportError("Corrupted public key.")
81 |
82 | # First, hash the 4096-bit UniformDH secret to obtain the master key.
83 | masterKey = Crypto.Hash.SHA256.new(uniformDHSecret).digest()
84 |
85 | # Second, session keys are now derived from the master key.
86 | callback(masterKey)
87 |
88 | return True
89 |
90 | def extractPublicKey( self, data, srvState=None ):
91 | """
92 | Extract and return a UniformDH public key out of `data'.
93 |
94 | Before the public key is touched, the HMAC is verified. If the HMAC is
95 | invalid or some other error occurs, `False' is returned. Otherwise,
96 | the public key is returned. The extracted data is finally drained from
97 | the given `data' object.
98 | """
99 |
100 | assert self.sharedSecret is not None
101 |
102 | # Do we already have the minimum amount of data?
103 | if len(data) < (const.PUBLIC_KEY_LENGTH + const.MARK_LENGTH +
104 | const.HMAC_SHA256_128_LENGTH):
105 | return False
106 |
107 | log.debug("Attempting to extract the remote machine's UniformDH "
108 | "public key out of %d bytes of data." % len(data))
109 |
110 | handshake = data.peek()
111 |
112 | # First, find the mark to efficiently locate the HMAC.
113 | publicKey = handshake[:const.PUBLIC_KEY_LENGTH]
114 | mark = mycrypto.HMAC_SHA256_128(self.sharedSecret, publicKey)
115 |
116 | index = util.locateMark(mark, handshake)
117 | if not index:
118 | return False
119 |
120 | # Now that we know where the authenticating HMAC is: verify it.
121 | hmacStart = index + const.MARK_LENGTH
122 | existingHMAC = handshake[hmacStart:
123 | (hmacStart + const.HMAC_SHA256_128_LENGTH)]
124 | myHMAC = mycrypto.HMAC_SHA256_128(self.sharedSecret,
125 | handshake[0 : hmacStart] +
126 | util.getEpoch())
127 |
128 | if not util.isValidHMAC(myHMAC, existingHMAC, self.sharedSecret):
129 | log.warning("The HMAC is invalid: `%s' vs. `%s'." %
130 | (myHMAC.encode('hex'), existingHMAC.encode('hex')))
131 | return False
132 |
133 | # Do nothing if the ticket is replayed. Immediately closing the
134 | # connection would be suspicious.
135 | if srvState is not None and srvState.isReplayed(existingHMAC):
136 | log.warning("The HMAC was already present in the replay table.")
137 | return False
138 |
139 | data.drain(index + const.MARK_LENGTH + const.HMAC_SHA256_128_LENGTH)
140 |
141 | if srvState is not None:
142 | log.debug("Adding the HMAC authenticating the UniformDH message " \
143 | "to the replay table: %s." % existingHMAC.encode('hex'))
144 | srvState.registerKey(existingHMAC)
145 |
146 | return handshake[:const.PUBLIC_KEY_LENGTH]
147 |
148 | def createHandshake( self ):
149 | """
150 | Create and return a ready-to-be-sent UniformDH handshake.
151 |
152 | The returned handshake data includes the public key, pseudo-random
153 | padding, the mark and the HMAC. If a UniformDH object has not been
154 | initialised yet, a new instance is created.
155 | """
156 |
157 | assert self.sharedSecret is not None
158 |
159 | log.debug("Creating UniformDH handshake message.")
160 |
161 | if self.udh is None:
162 | self.udh = obfs3_dh.UniformDH()
163 | publicKey = self.udh.get_public()
164 |
165 | assert (const.MAX_PADDING_LENGTH - const.PUBLIC_KEY_LENGTH) >= 0
166 |
167 | # Subtract the length of the public key to make the handshake on
168 | # average as long as a redeemed ticket. That should thwart statistical
169 | # length-based attacks.
170 | padding = mycrypto.strongRandom(random.randint(0,
171 | const.MAX_PADDING_LENGTH -
172 | const.PUBLIC_KEY_LENGTH))
173 |
174 | # Add a mark which enables efficient location of the HMAC.
175 | mark = mycrypto.HMAC_SHA256_128(self.sharedSecret, publicKey)
176 |
177 | # Authenticate the handshake including the current approximate epoch.
178 | mac = mycrypto.HMAC_SHA256_128(self.sharedSecret,
179 | publicKey + padding + mark +
180 | util.getEpoch())
181 |
182 | return publicKey + padding + mark + mac
183 |
184 | # Alias class name in order to provide a more intuitive API.
185 | new = UniformDH
186 |
--------------------------------------------------------------------------------
/obfsproxy/transports/scramblesuit/util.py:
--------------------------------------------------------------------------------
1 | """
2 | This module implements several commonly used utility functions.
3 |
4 | The implemented functions can be used to swap variables, write and read data
5 | from files and to convert a number to raw text.
6 | """
7 |
8 | import obfsproxy.common.log as logging
9 |
10 | import os
11 | import time
12 | import const
13 |
14 | import mycrypto
15 |
16 | log = logging.get_obfslogger()
17 |
18 | def setStateLocation( stateLocation ):
19 | """
20 | Set the constant `STATE_LOCATION' to the given `stateLocation'.
21 |
22 | The variable `stateLocation' determines where persistent information (such
23 | as the server's key material) is stored. If `stateLocation' is `None', it
24 | remains to be the current directory. In general, however, it should be a
25 | subdirectory of Tor's data directory.
26 | """
27 |
28 | if stateLocation is None:
29 | return
30 |
31 | if not stateLocation.endswith('/'):
32 | stateLocation += '/'
33 |
34 | # To be polite, we create a subdirectory inside wherever we are asked to
35 | # store data in.
36 | stateLocation += (const.TRANSPORT_NAME).lower() + '/'
37 |
38 | # ...and if it does not exist yet, we attempt to create the full
39 | # directory path.
40 | if not os.path.exists(stateLocation):
41 | log.info("Creating directory path `%s'." % stateLocation)
42 | os.makedirs(stateLocation)
43 |
44 | log.debug("Setting the state location to `%s'." % stateLocation)
45 | const.STATE_LOCATION = stateLocation
46 |
47 |
48 | def isValidHMAC( hmac1, hmac2, key ):
49 | """
50 | Compares `hmac1' and `hmac2' after HMACing them again using `key'.
51 |
52 | The arguments `hmac1' and `hmac2' are compared. If they are equal, `True'
53 | is returned and otherwise `False'. To prevent timing attacks, double HMAC
54 | verification is used meaning that the two arguments are HMACed again before
55 | (variable-time) string comparison. The idea is taken from:
56 | https://www.isecpartners.com/blog/2011/february/double-hmac-verification.aspx
57 | """
58 |
59 | assert len(hmac1) == len(hmac2)
60 |
61 | # HMAC the arguments again to prevent timing attacks.
62 | doubleHmac1 = mycrypto.HMAC_SHA256_128(key, hmac1)
63 | doubleHmac2 = mycrypto.HMAC_SHA256_128(key, hmac2)
64 |
65 | if doubleHmac1 != doubleHmac2:
66 | return False
67 |
68 | log.debug("The computed HMAC is valid.")
69 |
70 | return True
71 |
72 |
73 | def locateMark( mark, payload ):
74 | """
75 | Locate the given `mark' in `payload' and return its index.
76 |
77 | The `mark' is placed before the HMAC of a ScrambleSuit authentication
78 | mechanism and makes it possible to efficiently locate the HMAC. If the
79 | `mark' could not be found, `None' is returned.
80 | """
81 |
82 | index = payload.find(mark)
83 | if index < 0:
84 | log.debug("Could not find the mark just yet.")
85 | return None
86 |
87 | if (len(payload) - index - const.MARK_LENGTH) < \
88 | const.HMAC_SHA256_128_LENGTH:
89 | log.debug("Found the mark but the HMAC is still incomplete.")
90 | return None
91 |
92 | log.debug("Successfully located the mark.")
93 |
94 | return index
95 |
96 |
97 | def getEpoch( ):
98 | """
99 | Return the Unix epoch divided by a constant as string.
100 |
101 | This function returns a coarse-grained version of the Unix epoch. The
102 | seconds passed since the epoch are divided by the constant
103 | `EPOCH_GRANULARITY'.
104 | """
105 |
106 | return str(int(time.time()) / const.EPOCH_GRANULARITY)
107 |
108 |
109 | def writeToFile( data, fileName ):
110 | """
111 | Writes the given `data' to the file specified by `fileName'.
112 |
113 | If an error occurs, the function logs an error message but does not throw
114 | an exception or return an error code.
115 | """
116 |
117 | log.debug("Opening `%s' for writing." % fileName)
118 |
119 | try:
120 | with open(fileName, "wb") as desc:
121 | desc.write(data)
122 |
123 | except IOError as err:
124 | log.error("Error writing to `%s': %s." % (fileName, err))
125 |
126 |
127 | def readFromFile( fileName, length=-1 ):
128 | """
129 | Read `length' amount of bytes from the given `fileName'
130 |
131 | If `length' equals -1 (the default), the entire file is read and the
132 | content returned. If an error occurs, the function logs an error message
133 | but does not throw an exception or return an error code.
134 | """
135 |
136 | data = None
137 |
138 | if not os.path.exists(fileName):
139 | log.debug("File `%s' does not exist (yet?)." % fileName)
140 | return None
141 |
142 | log.debug("Opening `%s' for reading." % fileName)
143 |
144 | try:
145 | with open(fileName, "rb") as desc:
146 | data = desc.read(length)
147 |
148 | except IOError as err:
149 | log.error("Error reading from `%s': %s." % (fileName, err))
150 |
151 | return data
152 |
153 |
154 | def sanitiseBase32( data ):
155 | """
156 | Try to sanitise a Base32 string if it's slightly wrong.
157 |
158 | ScrambleSuit's shared secret might be distributed verbally which could
159 | cause mistakes. This function fixes simple mistakes, e.g., when a user
160 | noted "1" rather than "I".
161 | """
162 |
163 | data = data.upper()
164 |
165 | if "1" in data:
166 | log.info("Found a \"1\" in Base32-encoded \"%s\". Assuming " \
167 | "it's actually \"I\"." % data)
168 | data = data.replace("1", "I")
169 |
170 | if "0" in data:
171 | log.info("Found a \"0\" in Base32-encoded \"%s\". Assuming " \
172 | "it's actually \"O\"." % data)
173 | data = data.replace("0", "O")
174 |
175 | return data
176 |
--------------------------------------------------------------------------------
/obfsproxy/transports/transports.py:
--------------------------------------------------------------------------------
1 | # XXX modulify transports and move this to a single import
2 | import obfsproxy.transports.dummy as dummy
3 | import obfsproxy.transports.b64 as b64
4 | import obfsproxy.transports.obfs2 as obfs2
5 | import obfsproxy.transports.obfs3 as obfs3
6 | import obfsproxy.transports.scramblesuit.scramblesuit as scramblesuit
7 |
8 | import obfsproxy.transports.nnmorph as NNMorph
9 |
10 | transports = { 'dummy' : {'base': dummy.DummyTransport, 'client' : dummy.DummyClient, 'server' : dummy.DummyServer },
11 | 'b64' : {'base': b64.B64Transport, 'client' : b64.B64Client, 'server' : b64.B64Server },
12 | 'obfs2' : {'base': obfs2.Obfs2Transport, 'client' : obfs2.Obfs2Client, 'server' : obfs2.Obfs2Server },
13 | 'scramblesuit' : {'base': scramblesuit.ScrambleSuitTransport,
14 | 'client':scramblesuit.ScrambleSuitClient,
15 | 'server':scramblesuit.ScrambleSuitServer },
16 | 'obfs3' : {'base': obfs3.Obfs3Transport, 'client' : obfs3.Obfs3Client, 'server' : obfs3.Obfs3Server } ,
17 | 'NNMORPH': {'base': NNMorph.NNTransport, 'client': NNMorph.NNClient , 'server': NNMorph.NNServer}
18 | }
19 |
20 | def get_transport_class(name, role):
21 | # Rewrite equivalent roles.
22 | if role == 'socks':
23 | role = 'client'
24 | elif role == 'ext_server':
25 | role = 'server'
26 |
27 | # Find the correct class
28 | if (name in transports) and (role in transports[name]):
29 | return transports[name][role]
30 | else:
31 | raise TransportNotFound
32 |
33 | class TransportNotFound(Exception): pass
34 |
35 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys
4 |
5 | from setuptools import setup, find_packages
6 |
7 | import versioneer
8 | versioneer.versionfile_source = 'obfsproxy/_version.py'
9 | versioneer.versionfile_build = 'obfsproxy/_version.py'
10 | versioneer.tag_prefix = 'obfsproxy-' # tags are like 1.2.0
11 | versioneer.parentdir_prefix = 'obfsproxy-' # dirname like 'myproject-1.2.0'
12 |
13 | setup(
14 | name = "obfsproxy",
15 | author = "asn",
16 | author_email = "asn@torproject.org",
17 | description = ("A pluggable transport proxy written in Python"),
18 | license = "BSD",
19 | keywords = ['tor', 'obfuscation', 'twisted'],
20 |
21 | version=versioneer.get_version(),
22 | cmdclass=versioneer.get_cmdclass(),
23 |
24 | packages = find_packages(),
25 | entry_points = {
26 | 'console_scripts': [
27 | 'obfsproxy = obfsproxy.pyobfsproxy:run'
28 | ]
29 | },
30 |
31 | install_requires = [
32 | 'setuptools',
33 | 'PyCrypto',
34 | 'Twisted',
35 | 'argparse',
36 | 'pyptlib >= 0.0.5',
37 | 'pyyaml'
38 | ],
39 | )
40 |
--------------------------------------------------------------------------------
/setup_py2exe.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from distutils.core import setup
4 | import py2exe
5 | import os
6 |
7 | topdir = "py2exe_bundle"
8 | build_path = os.path.join(topdir, "build")
9 | dist_path = os.path.join(topdir, "dist")
10 |
11 | setup(
12 | console=["bin/obfsproxy"],
13 | zipfile="obfsproxy.zip",
14 | options={
15 | "build": {"build_base": build_path},
16 | "py2exe": {
17 | "includes": ["twisted", "pyptlib", "Crypto"],
18 | "dist_dir": dist_path,
19 | }
20 | }
21 | )
22 |
--------------------------------------------------------------------------------