├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .vscode └── launch.json ├── LICENSE ├── README.md ├── bin ├── run └── run.cmd ├── build-release.sh ├── custom-typings ├── cors-gate.d.ts ├── graphql.js.d.ts ├── node-gsettings-wrapper.d.ts └── tarball-extract.d.ts ├── nss ├── LICENSE ├── README.md ├── darwin │ ├── certutil │ ├── common.sh │ ├── createprecomplete.py │ ├── libfreebl3.dylib │ ├── libmozglue.dylib │ ├── libnss3.dylib │ ├── libnssckbi.dylib │ ├── libnssdbm3.dylib │ ├── libsoftokn3.dylib │ ├── make_full_update.sh │ ├── make_incremental_update.sh │ ├── mar │ ├── mbsdiff │ ├── modutil │ ├── pk12util │ ├── shlibsign │ └── signmar ├── linux │ ├── certutil │ ├── common.sh │ ├── createprecomplete.py │ ├── libfreeblpriv3.so │ ├── libmozsqlite3.so │ ├── libnspr4.so │ ├── libnss3.so │ ├── libnssckbi.so │ ├── libnssdbm3.so │ ├── libnssutil3.so │ ├── libplc4.so │ ├── libplds4.so │ ├── libsmime3.so │ ├── libsoftokn3.so │ ├── libssl3.so │ ├── make_full_update.sh │ ├── make_incremental_update.sh │ ├── mar │ ├── mbsdiff │ ├── modutil │ ├── pk12util │ ├── shlibsign │ └── signmar └── win32 │ ├── certutil.exe │ ├── common.sh │ ├── createprecomplete.py │ ├── freebl3.dll │ ├── make_full_update.sh │ ├── make_incremental_update.sh │ ├── mar │ ├── mbsdiff │ ├── modutil.exe │ ├── mozglue.dll │ ├── nss3.dll │ ├── nssckbi.dll │ ├── nssdbm3.dll │ ├── pk12util.exe │ ├── shlibsign.exe │ ├── signmar.exe │ └── softokn3.dll ├── overrides ├── frida │ ├── LICENSE │ ├── README.md │ ├── android │ │ ├── android-certificate-unpinning-fallback.js │ │ ├── android-certificate-unpinning.js │ │ ├── android-proxy-override.js │ │ └── android-system-certificate-injection.js │ ├── config.js │ ├── frida-script.js │ ├── ios │ │ └── ios-connect-hook.js │ ├── native-connect-hook.js │ ├── native-tls-hook.js │ └── utilities │ │ └── test-ip-connectivity.js ├── gems │ ├── cloudinary.rb │ ├── http.rb │ ├── net │ │ ├── http.rb │ │ └── https.rb │ ├── stripe.rb │ └── uri │ │ └── generic.rb ├── java-agent.jar ├── js │ ├── package-lock.json │ ├── package.json │ ├── prepend-electron.js │ ├── prepend-node.js │ └── wrap-require.js ├── path │ ├── node │ ├── node.bat │ ├── php │ └── php.bat ├── php │ ├── php-httptoolkit-override.ini │ └── prepend.php ├── pythonpath │ ├── aiohttp.py │ ├── hgdemandimport.py │ ├── http │ │ ├── __init__.py │ │ └── client.py │ ├── httplib.py │ ├── httplib2.py │ ├── httptoolkit_intercept.py │ └── stripe.py └── webextension │ ├── build │ ├── background.js │ ├── content-script.js │ └── injected-script.js │ ├── icon-128.png │ ├── icon-16.png │ ├── icon-48.png │ └── manifest.json ├── pack.ts ├── package-lock.json ├── package.json ├── prepare.ts ├── src ├── api │ ├── api-model.ts │ ├── api-server.ts │ ├── graphql-api.ts │ └── rest-api.ts ├── browsers.ts ├── cert-check-server.ts ├── certificates.ts ├── client │ ├── client-types.ts │ └── http-client.ts ├── commands │ └── start.ts ├── config.d.ts ├── constants.ts ├── dns-server.ts ├── dynamic-dep-store.ts ├── error-tracking.ts ├── hide-warning-server.ts ├── index.ts ├── interceptors │ ├── android │ │ ├── adb-commands.ts │ │ ├── android-adb-interceptor.ts │ │ └── fetch-apk.ts │ ├── chromium-based-interceptors.ts │ ├── docker │ │ ├── docker-build-injection.ts │ │ ├── docker-commands.ts │ │ ├── docker-compose.ts │ │ ├── docker-data-injection.ts │ │ ├── docker-interception-services.ts │ │ ├── docker-interceptor.ts │ │ ├── docker-networking.ts │ │ ├── docker-proxy.ts │ │ ├── docker-tunnel-proxy.ts │ │ └── docker-utils.ts │ ├── electron.ts │ ├── fresh-firefox.ts │ ├── frida │ │ ├── frida-android-integration.ts │ │ ├── frida-android-interceptor.ts │ │ ├── frida-integration.ts │ │ ├── frida-ios-integration.ts │ │ ├── frida-ios-interceptor.ts │ │ └── frida-scripts.ts │ ├── index.ts │ ├── jvm.ts │ └── terminal │ │ ├── existing-terminal-interceptor.ts │ │ ├── fresh-terminal-interceptor.ts │ │ ├── terminal-env-overrides.ts │ │ └── terminal-scripts.ts ├── message-server.ts ├── shutdown.ts ├── util │ ├── fs.ts │ ├── http.ts │ ├── network.ts │ ├── process-management.ts │ ├── promise.ts │ ├── snap.ts │ └── stream.ts └── webextension.ts ├── test ├── distributables-test │ ├── unix.sh │ └── windows.bat ├── fixtures │ ├── docker │ │ ├── compose │ │ │ ├── Dockerfile │ │ │ ├── docker-compose.networks.yml │ │ │ ├── docker-compose.yml │ │ │ └── index.js │ │ ├── go │ │ │ ├── Dockerfile │ │ │ ├── app.go │ │ │ ├── go.mod │ │ │ └── go.sum │ │ ├── java │ │ │ ├── Dockerfile │ │ │ └── Main.java │ │ ├── js │ │ │ ├── Dockerfile │ │ │ └── app.js │ │ ├── php │ │ │ ├── Dockerfile │ │ │ └── index.php │ │ ├── python │ │ │ ├── Dockerfile │ │ │ └── app.py │ │ ├── requests-in-build │ │ │ ├── Dockerfile │ │ │ └── make-request.js │ │ └── ruby │ │ │ ├── Dockerfile │ │ │ └── app.rb │ └── terminal │ │ └── js-test-script.js ├── integration │ ├── e2e-api-test.spec.ts │ ├── frida-downloads.spec.ts │ └── interceptors │ │ ├── docker-attachment.spec.ts │ │ ├── docker-terminal-interception.spec.ts │ │ ├── electron.spec.ts │ │ ├── existing-terminal.spec.ts │ │ ├── fresh-chrome.spec.ts │ │ ├── fresh-firefox.spec.ts │ │ ├── fresh-terminal.spec.ts │ │ └── interceptor-test-utils.ts ├── no-sandbox-docker-wrapper.sh ├── test-util.ts └── unit │ └── send-request.spec.ts ├── tsconfig.json ├── wallaby.js └── webpack.config.js /.gitignore: -------------------------------------------------------------------------------- 1 | # Bower dependency directory (https://bower.io/) 2 | # Compiled binary addons (https://nodejs.org/api/addons.html) 3 | # Coverage directory used by tools like istanbul 4 | # Dependency directories 5 | # Directory for instrumented libs generated by jscoverage/JSCover 6 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 7 | # Logs 8 | # Optional REPL history 9 | # Optional eslint cache 10 | # Optional npm cache directory 11 | # Output of 'npm pack' 12 | # Runtime data 13 | # TypeScript v1 declaration files 14 | # Yarn Integrity file 15 | # dotenv environment variables file 16 | # next.js build output 17 | # node-waf configuration 18 | # nyc test coverage 19 | *-debug.log 20 | *-error.log 21 | *.log 22 | *.pid 23 | *.pid.lock 24 | *.seed 25 | *.tgz 26 | .env 27 | .eslintcache 28 | .grunt 29 | .lock-wscript 30 | .next 31 | .node_repl_history 32 | .npm 33 | .nyc_output 34 | .yarn-integrity 35 | /.nyc_output 36 | /dist 37 | oclif.manifest.json 38 | /lib 39 | /tmp 40 | /yarn.lock 41 | bower_components 42 | build/Release 43 | coverage 44 | jspm_packages/ 45 | lib-cov 46 | logs 47 | node_modules 48 | node_modules/ 49 | npm-debug.log* 50 | pids 51 | typings/ 52 | yarn-debug.log* 53 | yarn-error.log* 54 | 55 | __pycache__ 56 | *.pyc 57 | 58 | bundle/ 59 | build/ 60 | *.tsbuildinfo 61 | 62 | overrides/webextension/config/ -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "node", 9 | "request": "launch", 10 | "name": "Launch server", 11 | "program": "${workspaceFolder}/bin/run", 12 | "args": ["start"] 13 | } 14 | ] 15 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | HTTP Toolkit Server [![Build Status](https://github.com/httptoolkit/httptoolkit-server/workflows/CI/badge.svg)](https://github.com/httptoolkit/httptoolkit-server/actions) [![Version](https://img.shields.io/npm/v/httptoolkit-server.svg)](https://npmjs.org/package/httptoolkit-server) 2 | =================== 3 | 4 | This repo contains the backend for [HTTP Toolkit](https://httptoolkit.tech), a beautiful, cross-platform & open-source HTTP(S) debugging proxy, analyzer & client. 5 | 6 | Looking to file bugs, request features or send feedback? File an issue or vote on existing ones at [github.com/httptoolkit/httptoolkit](https://github.com/httptoolkit/httptoolkit). 7 | 8 | ## What is this? 9 | 10 | HTTP Toolkit runs everything possible within [the web UI](https://github.com/httptoolkit/httptoolkit-ui), written as a standard single-page web application. There's a couple of necessary things you can't do in a web application though, especially: 11 | 12 | * Start a locally running proxy server (here using [Mockttp](https://npmjs.com/package/mockttp)) 13 | * Launch local applications preconfigured for interception 14 | 15 | This server exposes an API that is used by the web UI, exposing these actions and some other related information - see [`src/api/rest-api.ts`](src/api/rest-api.ts) for the full API details. 16 | 17 | This server is runnable standalone as a CLI using [oclif](http://oclif.io), or can be imported into other modules to be run programmatically. The available interceptors are defined in [`src/interceptors`](src/interceptors), and some of these also use other services in here, e.g. [`src/cert-check-server.ts`](src/cert-check-server.ts) automatically checks if a certificate is trusted by a browser client, and downloads or installs (depending on the client) the certificate if not. 18 | 19 | Note that the set of interceptors available in HTTP Toolkit depends on both the interceptors available on your server and the interceptors defined in the UI - new interceptors will need to be added to both. 20 | 21 | This server is typically used by users via [httptoolkit-desktop](https://github.com/httptoolkit/httptoolkit-desktop), which builds the server and web UI into an electron application, and starts & stops the server in the background whenever that app is run. Each time the desktop app is built, a new electron app is created containing the latest release from this repo. 22 | 23 | Once the server has installed it automatically updates in the background periodically, pulling new releases from the github releases of this repo. 24 | 25 | ## Contributing 26 | 27 | If you want to add new interceptors, change/fix existing interceptor behaviour (but not their UI) or reconfigure how the underlying proxy server is set up, then you're in the right place :+1:. 28 | 29 | To get started: 30 | 31 | * Clone this repo. 32 | * `npm install` 33 | * `npm start` 34 | * A [Mockttp](https://npmjs.com/package/mockttp) standalone server will start on port 45456, and a graphql management server on 45457. 35 | * Either make requests to the servers by hand, use the production UI by opening `https://app.httptoolkit.tech` in a Chromium-based browser, or start and use a local UI by: 36 | * Setting up the web UI locally (see [httptoolkit/httptoolkit-ui#contributing](https://github.com/httptoolkit/httptoolkit-ui#contributing)). 37 | * Running `npm run start:web` there to start the UI without its own server. 38 | * Opening `http://local.httptoolkit.tech:8080` in a Chromium-based browser 39 | 40 | A few things to be aware of: 41 | 42 | * If you're looking to add a new interceptor, those also need to be registered in `src/interceptors/index.ts`, and will also need to be added to [the UI](https://github.com/httptoolkit/httptoolkit-ui) to make that available. 43 | * Tests (both unit & integration) can be run with `npm test`, or `npm run test:unit`/`npm run test:integration` to run just the unit/integration tests. 44 | * Note that the integration tests assume the required applications are installed and some docker images are already pulled. See [ci.yml](.github/workflows/ci.yml) and the [build-base](https://github.com/httptoolkit/act-build-base/) image for an example of how to set this up (or just run the tests, look at the errors, and install whatever's missing). 45 | * If running the server in serious use (self-hosting, or long-term ongoing development) you probably want to set a `HTK_SERVER_TOKEN` env var with a random key, and pass this similarly to the UI as an `authToken=` URL parameter. This is useful because the API is very powerful (it can launch arbitrary applications on your machine). The API only listens on localhost and blocks CORS requests, so strictly speaking this shouldn't be necessary, but it is useful as a stronger guarantee & defense in depth. This is handled automatically in the production desktop app. -------------------------------------------------------------------------------- /bin/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | require('@oclif/command').run() 4 | .catch(require('@oclif/errors/handle')) -------------------------------------------------------------------------------- /bin/run.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | node "%~dp0\run" %* 4 | -------------------------------------------------------------------------------- /build-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # This script takes a target platform as an argument, and then 5 | # builds the server for that platform - installing the correct 6 | # native-built modules for the platform as appropriate. 7 | 8 | # ------------------------------------------------------------------------ 9 | # Configure everything for the target platform 10 | # ------------------------------------------------------------------------ 11 | 12 | TARGET_PLATFORM=$1 13 | TARGET_ARCH=$2 14 | 15 | echo "CONFIGURING FOR $TARGET_PLATFORM $TARGET_ARCH" 16 | 17 | if [ -z "$TARGET_PLATFORM" ]; then 18 | echo 'A target platform (linux/win32/darwin) is required' 19 | exit 1 20 | fi 21 | 22 | if [ -z "$TARGET_ARCH" ]; then 23 | echo 'A target platform (x64/arm/arm64) is required' 24 | exit 1 25 | fi 26 | 27 | 28 | export PATH=./node_modules/.bin:$PATH 29 | 30 | # Pick the target platform for prebuild-install: 31 | export npm_config_platform=$TARGET_PLATFORM 32 | # Pick the target platform for node-pre-gyp: 33 | export npm_config_target_platform=$TARGET_PLATFORM 34 | 35 | # Same for architecture: 36 | export npm_config_arch=$TARGET_ARCH 37 | export npm_config_target_arch=$TARGET_ARCH 38 | 39 | # Disable node-gyp-build for win-version-info only. Without this, it's 40 | # rebuilt for Linux, even given $TARGET_PLATFORM=win32, and then breaks 41 | # at runtime even though there are valid win32 prebuilds available. 42 | export WIN_VERSION_INFO=disable-prebuild 43 | 44 | TARGET=$TARGET_PLATFORM-$TARGET_ARCH 45 | 46 | # ------------------------------------------------------------------------ 47 | # Clean the existing build workspace, to keep targets 100% independent 48 | # ------------------------------------------------------------------------ 49 | 50 | rm -rf ./tmp || true 51 | 52 | # ------------------------------------------------------------------------ 53 | # Build the package for this platform 54 | # ------------------------------------------------------------------------ 55 | 56 | echo 57 | echo "BUILDING FOR $TARGET_PLATFORM $TARGET_ARCH" 58 | echo 59 | 60 | oclif-dev pack --targets=$TARGET 61 | 62 | echo 63 | echo "BUILT" 64 | echo 65 | 66 | # ------------------------------------------------------------------------ 67 | # Confirm that the installed binaries all support the target platform. 68 | # This is not 100% by any means, but catches obvious mistakes. 69 | # ------------------------------------------------------------------------ 70 | 71 | # Whitelist (as a regex) for packages that may include binaries for other platforms 72 | PACKAGE_WHITELIST='' 73 | 74 | case "$TARGET_ARCH" in 75 | x64) 76 | EXPECTED_ARCH_STRING='x86[_-]64|80386' 77 | ;; 78 | arm64) 79 | EXPECTED_ARCH_STRING='arm64|aarch64' 80 | ;; 81 | *) 82 | echo "Unknown arch $TARGET_ARCH" 83 | exit 1 84 | ;; 85 | esac 86 | 87 | case "$TARGET_PLATFORM" in 88 | linux) 89 | EXPECTED_PLATFORM_STRING='ELF' 90 | # Registry-js builds raw on non-Windows, but never used 91 | # Win-version info includes prebuilds for Windows on all platforms 92 | PACKAGE_WHITELIST='registry-js|win-version-info/prebuilds' 93 | ;; 94 | win32) 95 | EXPECTED_PLATFORM_STRING='MS Windows' 96 | PACKAGE_WHITELIST='' 97 | ;; 98 | darwin) 99 | EXPECTED_PLATFORM_STRING='Mach-O' 100 | # Registry-js builds raw on non-Windows, but never used 101 | # Win-version info includes prebuilds for Windows on all platforms 102 | PACKAGE_WHITELIST='registry-js|win-version-info/prebuilds' 103 | ;; 104 | *) 105 | echo "Unknown platform $TARGET_PLATFORM" 106 | exit 1 107 | ;; 108 | esac 109 | 110 | echo "CHECKING FOR BAD CONFIG" 111 | echo "EXPECTING: $EXPECTED_PLATFORM_STRING and $EXPECTED_ARCH_STRING" 112 | echo "WHITELIST: $PACKAGE_WHITELIST" 113 | 114 | # Find all *.node files in the build that `file` doesn't describe with the above 115 | NATIVE_BINARIES=$( 116 | find ./tmp/$TARGET/ \ 117 | -name '*.node' \ 118 | -type f \ 119 | -exec file {} \; \ 120 | | sed "s#^./tmp/$TARGET/##" # Don't match the build targets's own path name! 121 | ) 122 | echo "NATIVE BINS: $NATIVE_BINARIES" 123 | 124 | BAD_PLATFORM_BINS=$(echo "$NATIVE_BINARIES" | grep -v "$EXPECTED_PLATFORM_STRING" || true) 125 | BAD_ARCH_BINS=$(echo "$NATIVE_BINARIES" | grep -vE "$EXPECTED_ARCH_STRING" || true) 126 | 127 | BAD_BINS="$BAD_PLATFORM_BINS 128 | $BAD_ARCH_BINS" 129 | 130 | if [[ ! -z "$PACKAGE_WHITELIST" ]]; then 131 | BAD_BINS=$(echo "$BAD_BINS" | grep -vE "$PACKAGE_WHITELIST" || true) 132 | fi 133 | 134 | if [ `echo "$BAD_BINS" | wc -w` -ne 0 ]; then 135 | echo 136 | echo "***** BUILD FAILED *****" 137 | echo 138 | echo "Invalid build! $TARGET build has binaries for the wrong platform." 139 | echo "Bad binaries are:" 140 | echo "$BAD_BINS" 141 | echo 142 | echo "---" 143 | 144 | exit 1 145 | fi 146 | 147 | echo "BUILD SUCCESSFUL" -------------------------------------------------------------------------------- /custom-typings/cors-gate.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'cors-gate' { 2 | import * as express from 'express'; 3 | 4 | interface Options { 5 | origin: string; 6 | strict?: boolean; 7 | allowSafe?: boolean; 8 | failure: (req: express.Request, res: express.Response, next: express.NextFunction) => void; 9 | } 10 | 11 | function corsGate(options: Options): express.RequestHandler; 12 | 13 | export = corsGate; 14 | } -------------------------------------------------------------------------------- /custom-typings/graphql.js.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'graphql.js'; -------------------------------------------------------------------------------- /custom-typings/node-gsettings-wrapper.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'node-gsettings-wrapper'; -------------------------------------------------------------------------------- /custom-typings/tarball-extract.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'tarball-extract'; -------------------------------------------------------------------------------- /nss/README.md: -------------------------------------------------------------------------------- 1 | # NSS & Certutil 2 | 3 | This folder contains prebuilt binaries of the NSS tools and corresponding libs, including `certutil`. The folder are named to match Node.js's `process.platform` on the corresponding OSs. 4 | 5 | `certutil` is used to preconfigure Firefox profile's certificate database to trust the HTTP Toolkit certificate authority. We attempt to use any existing `certutil` binary in PATH first, and fall back to the bundled binary if it's not available, or mark Firefox as unavailable if neither work. These binaries aren't included in the npm package for size reasons - in that case, you'll need to ensure certutil is available on your system some other way (for example, download the binaries here and put them in your PATH). 6 | 7 | The files here were downloaded directly from https://tor.eff.org/dist/torbrowser/9.0.9/, in the mar-tools-{linux64,mac64,win64}.zip. They're used unmodified, under the Tor license also in this folder. -------------------------------------------------------------------------------- /nss/darwin/certutil: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/certutil -------------------------------------------------------------------------------- /nss/darwin/createprecomplete.py: -------------------------------------------------------------------------------- 1 | # Any copyright is dedicated to the Public Domain. 2 | # http://creativecommons.org/publicdomain/zero/1.0/ 3 | 4 | # Creates the precomplete file containing the remove and rmdir application 5 | # update instructions which is used to remove files and directories that are no 6 | # longer present in a complete update. The current working directory is used for 7 | # the location to enumerate and to create the precomplete file. 8 | # For symlinks, remove instructions are always generated. 9 | 10 | import os 11 | 12 | 13 | # TODO When TOR_BROWSER_DATA_OUTSIDE_APP_DIR is used on all platforms, 14 | # we should remove all lines in this file that contain: 15 | # TorBrowser/Data 16 | 17 | def get_build_entries(root_path): 18 | """ Iterates through the root_path, creating a list for each file and 19 | directory. Excludes any file paths ending with channel-prefs.js. 20 | To support Tor Browser updates, excludes: 21 | TorBrowser/Data/Browser/profiles.ini 22 | TorBrowser/Data/Browser/profile.default/bookmarks.html 23 | TorBrowser/Data/Tor/torrc 24 | """ 25 | rel_file_path_set = set() 26 | rel_dir_path_set = set() 27 | for root, dirs, files in os.walk(root_path): 28 | for file_name in files: 29 | parent_dir_rel_path = root[len(root_path)+1:] 30 | rel_path_file = os.path.join(parent_dir_rel_path, file_name) 31 | rel_path_file = rel_path_file.replace("\\", "/") 32 | if not (rel_path_file.endswith("channel-prefs.js") or 33 | rel_path_file.endswith("update-settings.ini") or 34 | rel_path_file == "TorBrowser/Data/Browser/profiles.ini" or 35 | rel_path_file == "TorBrowser/Data/Browser/profile.default/bookmarks.html" or 36 | rel_path_file == "TorBrowser/Data/Tor/torrc" or 37 | rel_path_file.find("distribution/") != -1): 38 | rel_file_path_set.add(rel_path_file) 39 | 40 | for dir_name in dirs: 41 | parent_dir_rel_path = root[len(root_path)+1:] 42 | rel_path_dir = os.path.join(parent_dir_rel_path, dir_name) 43 | rel_path_dir = rel_path_dir.replace("\\", "/")+"/" 44 | if rel_path_dir.find("distribution/") == -1: 45 | if (os.path.islink(rel_path_dir[:-1])): 46 | rel_file_path_set.add(rel_path_dir[:-1]) 47 | else: 48 | rel_dir_path_set.add(rel_path_dir) 49 | 50 | rel_file_path_list = list(rel_file_path_set) 51 | rel_file_path_list.sort(reverse=True) 52 | rel_dir_path_list = list(rel_dir_path_set) 53 | rel_dir_path_list.sort(reverse=True) 54 | 55 | return rel_file_path_list, rel_dir_path_list 56 | 57 | 58 | def generate_precomplete(root_path): 59 | """ Creates the precomplete file containing the remove and rmdir 60 | application update instructions. The given directory is used 61 | for the location to enumerate and to create the precomplete file. 62 | """ 63 | rel_path_precomplete = "precomplete" 64 | # If inside a Mac bundle use the root of the bundle for the path. 65 | if os.path.basename(root_path) == "Resources": 66 | root_path = os.path.abspath(os.path.join(root_path, '../../')) 67 | rel_path_precomplete = "Contents/Resources/precomplete" 68 | 69 | precomplete_file_path = os.path.join(root_path, rel_path_precomplete) 70 | # Open the file so it exists before building the list of files and open it 71 | # in binary mode to prevent OS specific line endings. 72 | precomplete_file = open(precomplete_file_path, "wb") 73 | rel_file_path_list, rel_dir_path_list = get_build_entries(root_path) 74 | for rel_file_path in rel_file_path_list: 75 | precomplete_file.writelines("remove \""+rel_file_path+"\"\n") 76 | 77 | for rel_dir_path in rel_dir_path_list: 78 | precomplete_file.writelines("rmdir \""+rel_dir_path+"\"\n") 79 | 80 | precomplete_file.close() 81 | 82 | 83 | if __name__ == "__main__": 84 | generate_precomplete(os.getcwd()) 85 | -------------------------------------------------------------------------------- /nss/darwin/libfreebl3.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/libfreebl3.dylib -------------------------------------------------------------------------------- /nss/darwin/libmozglue.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/libmozglue.dylib -------------------------------------------------------------------------------- /nss/darwin/libnss3.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/libnss3.dylib -------------------------------------------------------------------------------- /nss/darwin/libnssckbi.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/libnssckbi.dylib -------------------------------------------------------------------------------- /nss/darwin/libnssdbm3.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/libnssdbm3.dylib -------------------------------------------------------------------------------- /nss/darwin/libsoftokn3.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/libsoftokn3.dylib -------------------------------------------------------------------------------- /nss/darwin/mar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/mar -------------------------------------------------------------------------------- /nss/darwin/mbsdiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/mbsdiff -------------------------------------------------------------------------------- /nss/darwin/modutil: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/modutil -------------------------------------------------------------------------------- /nss/darwin/pk12util: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/pk12util -------------------------------------------------------------------------------- /nss/darwin/shlibsign: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/shlibsign -------------------------------------------------------------------------------- /nss/darwin/signmar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/darwin/signmar -------------------------------------------------------------------------------- /nss/linux/certutil: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/certutil -------------------------------------------------------------------------------- /nss/linux/createprecomplete.py: -------------------------------------------------------------------------------- 1 | # Any copyright is dedicated to the Public Domain. 2 | # http://creativecommons.org/publicdomain/zero/1.0/ 3 | 4 | # Creates the precomplete file containing the remove and rmdir application 5 | # update instructions which is used to remove files and directories that are no 6 | # longer present in a complete update. The current working directory is used for 7 | # the location to enumerate and to create the precomplete file. 8 | # For symlinks, remove instructions are always generated. 9 | 10 | import os 11 | 12 | 13 | # TODO When TOR_BROWSER_DATA_OUTSIDE_APP_DIR is used on all platforms, 14 | # we should remove all lines in this file that contain: 15 | # TorBrowser/Data 16 | 17 | def get_build_entries(root_path): 18 | """ Iterates through the root_path, creating a list for each file and 19 | directory. Excludes any file paths ending with channel-prefs.js. 20 | To support Tor Browser updates, excludes: 21 | TorBrowser/Data/Browser/profiles.ini 22 | TorBrowser/Data/Browser/profile.default/bookmarks.html 23 | TorBrowser/Data/Tor/torrc 24 | """ 25 | rel_file_path_set = set() 26 | rel_dir_path_set = set() 27 | for root, dirs, files in os.walk(root_path): 28 | for file_name in files: 29 | parent_dir_rel_path = root[len(root_path)+1:] 30 | rel_path_file = os.path.join(parent_dir_rel_path, file_name) 31 | rel_path_file = rel_path_file.replace("\\", "/") 32 | if not (rel_path_file.endswith("channel-prefs.js") or 33 | rel_path_file.endswith("update-settings.ini") or 34 | rel_path_file == "TorBrowser/Data/Browser/profiles.ini" or 35 | rel_path_file == "TorBrowser/Data/Browser/profile.default/bookmarks.html" or 36 | rel_path_file == "TorBrowser/Data/Tor/torrc" or 37 | rel_path_file.find("distribution/") != -1): 38 | rel_file_path_set.add(rel_path_file) 39 | 40 | for dir_name in dirs: 41 | parent_dir_rel_path = root[len(root_path)+1:] 42 | rel_path_dir = os.path.join(parent_dir_rel_path, dir_name) 43 | rel_path_dir = rel_path_dir.replace("\\", "/")+"/" 44 | if rel_path_dir.find("distribution/") == -1: 45 | if (os.path.islink(rel_path_dir[:-1])): 46 | rel_file_path_set.add(rel_path_dir[:-1]) 47 | else: 48 | rel_dir_path_set.add(rel_path_dir) 49 | 50 | rel_file_path_list = list(rel_file_path_set) 51 | rel_file_path_list.sort(reverse=True) 52 | rel_dir_path_list = list(rel_dir_path_set) 53 | rel_dir_path_list.sort(reverse=True) 54 | 55 | return rel_file_path_list, rel_dir_path_list 56 | 57 | 58 | def generate_precomplete(root_path): 59 | """ Creates the precomplete file containing the remove and rmdir 60 | application update instructions. The given directory is used 61 | for the location to enumerate and to create the precomplete file. 62 | """ 63 | rel_path_precomplete = "precomplete" 64 | # If inside a Mac bundle use the root of the bundle for the path. 65 | if os.path.basename(root_path) == "Resources": 66 | root_path = os.path.abspath(os.path.join(root_path, '../../')) 67 | rel_path_precomplete = "Contents/Resources/precomplete" 68 | 69 | precomplete_file_path = os.path.join(root_path, rel_path_precomplete) 70 | # Open the file so it exists before building the list of files and open it 71 | # in binary mode to prevent OS specific line endings. 72 | precomplete_file = open(precomplete_file_path, "wb") 73 | rel_file_path_list, rel_dir_path_list = get_build_entries(root_path) 74 | for rel_file_path in rel_file_path_list: 75 | precomplete_file.writelines("remove \""+rel_file_path+"\"\n") 76 | 77 | for rel_dir_path in rel_dir_path_list: 78 | precomplete_file.writelines("rmdir \""+rel_dir_path+"\"\n") 79 | 80 | precomplete_file.close() 81 | 82 | 83 | if __name__ == "__main__": 84 | generate_precomplete(os.getcwd()) 85 | -------------------------------------------------------------------------------- /nss/linux/libfreeblpriv3.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libfreeblpriv3.so -------------------------------------------------------------------------------- /nss/linux/libmozsqlite3.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libmozsqlite3.so -------------------------------------------------------------------------------- /nss/linux/libnspr4.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libnspr4.so -------------------------------------------------------------------------------- /nss/linux/libnss3.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libnss3.so -------------------------------------------------------------------------------- /nss/linux/libnssckbi.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libnssckbi.so -------------------------------------------------------------------------------- /nss/linux/libnssdbm3.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libnssdbm3.so -------------------------------------------------------------------------------- /nss/linux/libnssutil3.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libnssutil3.so -------------------------------------------------------------------------------- /nss/linux/libplc4.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libplc4.so -------------------------------------------------------------------------------- /nss/linux/libplds4.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libplds4.so -------------------------------------------------------------------------------- /nss/linux/libsmime3.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libsmime3.so -------------------------------------------------------------------------------- /nss/linux/libsoftokn3.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libsoftokn3.so -------------------------------------------------------------------------------- /nss/linux/libssl3.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/libssl3.so -------------------------------------------------------------------------------- /nss/linux/mar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/mar -------------------------------------------------------------------------------- /nss/linux/mbsdiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/mbsdiff -------------------------------------------------------------------------------- /nss/linux/modutil: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/modutil -------------------------------------------------------------------------------- /nss/linux/pk12util: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/pk12util -------------------------------------------------------------------------------- /nss/linux/shlibsign: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/shlibsign -------------------------------------------------------------------------------- /nss/linux/signmar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/linux/signmar -------------------------------------------------------------------------------- /nss/win32/certutil.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/certutil.exe -------------------------------------------------------------------------------- /nss/win32/createprecomplete.py: -------------------------------------------------------------------------------- 1 | # Any copyright is dedicated to the Public Domain. 2 | # http://creativecommons.org/publicdomain/zero/1.0/ 3 | 4 | # Creates the precomplete file containing the remove and rmdir application 5 | # update instructions which is used to remove files and directories that are no 6 | # longer present in a complete update. The current working directory is used for 7 | # the location to enumerate and to create the precomplete file. 8 | # For symlinks, remove instructions are always generated. 9 | 10 | import os 11 | 12 | 13 | # TODO When TOR_BROWSER_DATA_OUTSIDE_APP_DIR is used on all platforms, 14 | # we should remove all lines in this file that contain: 15 | # TorBrowser/Data 16 | 17 | def get_build_entries(root_path): 18 | """ Iterates through the root_path, creating a list for each file and 19 | directory. Excludes any file paths ending with channel-prefs.js. 20 | To support Tor Browser updates, excludes: 21 | TorBrowser/Data/Browser/profiles.ini 22 | TorBrowser/Data/Browser/profile.default/bookmarks.html 23 | TorBrowser/Data/Tor/torrc 24 | """ 25 | rel_file_path_set = set() 26 | rel_dir_path_set = set() 27 | for root, dirs, files in os.walk(root_path): 28 | for file_name in files: 29 | parent_dir_rel_path = root[len(root_path)+1:] 30 | rel_path_file = os.path.join(parent_dir_rel_path, file_name) 31 | rel_path_file = rel_path_file.replace("\\", "/") 32 | if not (rel_path_file.endswith("channel-prefs.js") or 33 | rel_path_file.endswith("update-settings.ini") or 34 | rel_path_file == "TorBrowser/Data/Browser/profiles.ini" or 35 | rel_path_file == "TorBrowser/Data/Browser/profile.default/bookmarks.html" or 36 | rel_path_file == "TorBrowser/Data/Tor/torrc" or 37 | rel_path_file.find("distribution/") != -1): 38 | rel_file_path_set.add(rel_path_file) 39 | 40 | for dir_name in dirs: 41 | parent_dir_rel_path = root[len(root_path)+1:] 42 | rel_path_dir = os.path.join(parent_dir_rel_path, dir_name) 43 | rel_path_dir = rel_path_dir.replace("\\", "/")+"/" 44 | if rel_path_dir.find("distribution/") == -1: 45 | if (os.path.islink(rel_path_dir[:-1])): 46 | rel_file_path_set.add(rel_path_dir[:-1]) 47 | else: 48 | rel_dir_path_set.add(rel_path_dir) 49 | 50 | rel_file_path_list = list(rel_file_path_set) 51 | rel_file_path_list.sort(reverse=True) 52 | rel_dir_path_list = list(rel_dir_path_set) 53 | rel_dir_path_list.sort(reverse=True) 54 | 55 | return rel_file_path_list, rel_dir_path_list 56 | 57 | 58 | def generate_precomplete(root_path): 59 | """ Creates the precomplete file containing the remove and rmdir 60 | application update instructions. The given directory is used 61 | for the location to enumerate and to create the precomplete file. 62 | """ 63 | rel_path_precomplete = "precomplete" 64 | # If inside a Mac bundle use the root of the bundle for the path. 65 | if os.path.basename(root_path) == "Resources": 66 | root_path = os.path.abspath(os.path.join(root_path, '../../')) 67 | rel_path_precomplete = "Contents/Resources/precomplete" 68 | 69 | precomplete_file_path = os.path.join(root_path, rel_path_precomplete) 70 | # Open the file so it exists before building the list of files and open it 71 | # in binary mode to prevent OS specific line endings. 72 | precomplete_file = open(precomplete_file_path, "wb") 73 | rel_file_path_list, rel_dir_path_list = get_build_entries(root_path) 74 | for rel_file_path in rel_file_path_list: 75 | precomplete_file.writelines("remove \""+rel_file_path+"\"\n") 76 | 77 | for rel_dir_path in rel_dir_path_list: 78 | precomplete_file.writelines("rmdir \""+rel_dir_path+"\"\n") 79 | 80 | precomplete_file.close() 81 | 82 | 83 | if __name__ == "__main__": 84 | generate_precomplete(os.getcwd()) 85 | -------------------------------------------------------------------------------- /nss/win32/freebl3.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/freebl3.dll -------------------------------------------------------------------------------- /nss/win32/mar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/mar -------------------------------------------------------------------------------- /nss/win32/mbsdiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/mbsdiff -------------------------------------------------------------------------------- /nss/win32/modutil.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/modutil.exe -------------------------------------------------------------------------------- /nss/win32/mozglue.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/mozglue.dll -------------------------------------------------------------------------------- /nss/win32/nss3.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/nss3.dll -------------------------------------------------------------------------------- /nss/win32/nssckbi.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/nssckbi.dll -------------------------------------------------------------------------------- /nss/win32/nssdbm3.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/nssdbm3.dll -------------------------------------------------------------------------------- /nss/win32/pk12util.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/pk12util.exe -------------------------------------------------------------------------------- /nss/win32/shlibsign.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/shlibsign.exe -------------------------------------------------------------------------------- /nss/win32/signmar.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/signmar.exe -------------------------------------------------------------------------------- /nss/win32/softokn3.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/nss/win32/softokn3.dll -------------------------------------------------------------------------------- /overrides/frida/android/android-proxy-override.js: -------------------------------------------------------------------------------- 1 | /************************************************************************************************** 2 | * 3 | * The first step in intercepting HTTP & HTTPS traffic is to set the default proxy settings, 4 | * telling the app that all requests should be sent via our HTTP proxy. 5 | * 6 | * In this script, we set that up via a few different mechanisms, which cumulatively should 7 | * ensure that all connections are sent via the proxy, even if they attempt to use their 8 | * own custom proxy configurations to avoid this. 9 | * 10 | * Despite that, this still only covers well behaved apps - it's still possible for apps 11 | * to send network traffic directly if they're determined to do so, or if they're built 12 | * with a framework that does not do this by default (Flutter is notably in this category). 13 | * To handle those less tidy cases, we manually capture traffic to recognized target ports 14 | * in the native connect() hook script. 15 | * 16 | * Source available at https://github.com/httptoolkit/frida-interception-and-unpinning/ 17 | * SPDX-License-Identifier: AGPL-3.0-or-later 18 | * SPDX-FileCopyrightText: Tim Perry 19 | * 20 | *************************************************************************************************/ 21 | 22 | Java.perform(() => { 23 | // Set default JVM system properties for the proxy address. Notably these are used 24 | // to initialize WebView configuration. 25 | Java.use('java.lang.System').setProperty('http.proxyHost', PROXY_HOST); 26 | Java.use('java.lang.System').setProperty('http.proxyPort', PROXY_PORT.toString()); 27 | Java.use('java.lang.System').setProperty('https.proxyHost', PROXY_HOST); 28 | Java.use('java.lang.System').setProperty('https.proxyPort', PROXY_PORT.toString()); 29 | 30 | Java.use('java.lang.System').clearProperty('http.nonProxyHosts'); 31 | Java.use('java.lang.System').clearProperty('https.nonProxyHosts'); 32 | 33 | // Some Android internals attempt to reset these settings to match the device configuration. 34 | // We block that directly here: 35 | const controlledSystemProperties = [ 36 | 'http.proxyHost', 37 | 'http.proxyPort', 38 | 'https.proxyHost', 39 | 'https.proxyPort', 40 | 'http.nonProxyHosts', 41 | 'https.nonProxyHosts' 42 | ]; 43 | Java.use('java.lang.System').clearProperty.implementation = function (property) { 44 | if (controlledSystemProperties.includes(property)) { 45 | if (DEBUG_MODE) console.log(`Ignoring attempt to clear ${property} system property`); 46 | return this.getProperty(property); 47 | } 48 | return this.clearProperty(...arguments); 49 | } 50 | Java.use('java.lang.System').setProperty.implementation = function (property) { 51 | if (controlledSystemProperties.includes(property)) { 52 | if (DEBUG_MODE) console.log(`Ignoring attempt to override ${property} system property`); 53 | return this.getProperty(property); 54 | } 55 | return this.setProperty(...arguments); 56 | } 57 | 58 | // Configure the app's proxy directly, via the app connectivity manager service: 59 | const ConnectivityManager = Java.use('android.net.ConnectivityManager'); 60 | const ProxyInfo = Java.use('android.net.ProxyInfo'); 61 | ConnectivityManager.getDefaultProxy.implementation = () => ProxyInfo.$new(PROXY_HOST, PROXY_PORT, ''); 62 | // (Not clear if this works 100% - implying there are ConnectivityManager subclasses handling this) 63 | 64 | console.log(`== Proxy system configuration overridden to ${PROXY_HOST}:${PROXY_PORT} ==`); 65 | 66 | // Configure the proxy indirectly, by overriding the return value for all ProxySelectors everywhere: 67 | const Collections = Java.use('java.util.Collections'); 68 | const ProxyType = Java.use('java.net.Proxy$Type'); 69 | const InetSocketAddress = Java.use('java.net.InetSocketAddress'); 70 | const ProxyCls = Java.use('java.net.Proxy'); // 'Proxy' is reserved in JS 71 | 72 | const targetProxy = ProxyCls.$new( 73 | ProxyType.HTTP.value, 74 | InetSocketAddress.$new(PROXY_HOST, PROXY_PORT) 75 | ); 76 | const getTargetProxyList = () => Collections.singletonList(targetProxy); 77 | 78 | const ProxySelector = Java.use('java.net.ProxySelector'); 79 | 80 | // Find every implementation of ProxySelector by quickly scanning method signatures, and 81 | // then checking whether each match actually implements java.net.ProxySelector: 82 | const proxySelectorClasses = Java.enumerateMethods('*!select(java.net.URI): java.util.List/s') 83 | .flatMap((matchingLoader) => matchingLoader.classes 84 | .map((classData) => Java.use(classData.name)) 85 | .filter((Cls) => ProxySelector.class.isAssignableFrom(Cls.class)) 86 | ); 87 | 88 | // Replace the 'select' of every implementation, so they all send traffic to us: 89 | proxySelectorClasses.forEach(ProxySelectorCls => { 90 | if (DEBUG_MODE) { 91 | console.log('Rewriting', ProxySelectorCls.toString()); 92 | } 93 | ProxySelectorCls.select.implementation = () => getTargetProxyList() 94 | }); 95 | 96 | console.log(`== Proxy configuration overridden to ${PROXY_HOST}:${PROXY_PORT} ==`); 97 | }); 98 | 99 | -------------------------------------------------------------------------------- /overrides/frida/android/android-system-certificate-injection.js: -------------------------------------------------------------------------------- 1 | /************************************************************************************************** 2 | * 3 | * Once we have captured traffic (once it's being sent to our proxy port) the next step is 4 | * to ensure any clients using TLS (HTTPS) trust our CA certificate, to allow us to intercept 5 | * encrypted connections successfully. 6 | * 7 | * This script does so by attaching to the internals of Conscrypt (the Android SDK's standard 8 | * TLS implementation) and pre-adding our certificate to the 'already trusted' cache, so that 9 | * future connections trust it implicitly. This ensures that all normal uses of Android APIs 10 | * for HTTPS & TLS will allow interception. 11 | * 12 | * This does not handle all standalone certificate pinning techniques - where the application 13 | * actively rejects certificates that are trusted by default on the system. That's dealt with 14 | * in the separate certificate unpinning script. 15 | * 16 | * Source available at https://github.com/httptoolkit/frida-interception-and-unpinning/ 17 | * SPDX-License-Identifier: AGPL-3.0-or-later 18 | * SPDX-FileCopyrightText: Tim Perry 19 | * 20 | *************************************************************************************************/ 21 | 22 | Java.perform(() => { 23 | // First, we build a JVM representation of our certificate: 24 | const String = Java.use("java.lang.String"); 25 | const ByteArrayInputStream = Java.use('java.io.ByteArrayInputStream'); 26 | const CertFactory = Java.use('java.security.cert.CertificateFactory'); 27 | 28 | let cert; 29 | try { 30 | const certFactory = CertFactory.getInstance("X.509"); 31 | const certBytes = String.$new(CERT_PEM).getBytes(); 32 | cert = certFactory.generateCertificate(ByteArrayInputStream.$new(certBytes)); 33 | } catch (e) { 34 | console.error('Could not parse provided certificate PEM!'); 35 | console.error(e); 36 | Java.use('java.lang.System').exit(1); 37 | } 38 | 39 | // Then we hook TrustedCertificateIndex. This is used for caching known trusted certs within Conscrypt - 40 | // by prepopulating all instances, we ensure that all TrustManagerImpls (and potentially other 41 | // things) automatically trust our certificate specifically (without disabling validation entirely). 42 | // This should apply to Android v7+ - previous versions used SSLContext & X509TrustManager. 43 | [ 44 | 'com.android.org.conscrypt.TrustedCertificateIndex', 45 | 'org.conscrypt.TrustedCertificateIndex', // Might be used (com.android is synthetic) - unclear 46 | 'org.apache.harmony.xnet.provider.jsse.TrustedCertificateIndex' // Used in Apache Harmony version of Conscrypt 47 | ].forEach((TrustedCertificateIndexClassname, i) => { 48 | let TrustedCertificateIndex; 49 | try { 50 | TrustedCertificateIndex = Java.use(TrustedCertificateIndexClassname); 51 | } catch (e) { 52 | if (i === 0) { 53 | throw new Error(`${TrustedCertificateIndexClassname} not found - could not inject system certificate`); 54 | } else { 55 | // Other classnames are optional fallbacks 56 | if (DEBUG_MODE) { 57 | console.log(`[ ] Skipped cert injection for ${TrustedCertificateIndexClassname} (not present)`); 58 | } 59 | return; 60 | } 61 | } 62 | 63 | TrustedCertificateIndex.$init.overloads.forEach((overload) => { 64 | overload.implementation = function () { 65 | this.$init(...arguments); 66 | // Index our cert as already trusted, right from the start: 67 | this.index(cert); 68 | } 69 | }); 70 | 71 | TrustedCertificateIndex.reset.overloads.forEach((overload) => { 72 | overload.implementation = function () { 73 | const result = this.reset(...arguments); 74 | // Index our cert in here again, since the reset removes it: 75 | this.index(cert); 76 | return result; 77 | }; 78 | }); 79 | 80 | if (DEBUG_MODE) console.log(`[+] Injected cert into ${TrustedCertificateIndexClassname}`); 81 | }); 82 | 83 | // This effectively adds us to the system certs, and also defeats quite a bit of basic certificate 84 | // pinning too! It auto-trusts us in any implementation that uses TrustManagerImpl (Conscrypt) as 85 | // the underlying cert checking component. 86 | 87 | console.log('== System certificate trust injected =='); 88 | }); -------------------------------------------------------------------------------- /overrides/frida/frida-script.js: -------------------------------------------------------------------------------- 1 | // Placeholder script - this will be deleted soon, it exists purely because the 2 | // original script URL was popular and still gets many direct visits. 3 | 4 | throw new Error(` 5 | 6 | This frida-script.js script has now been replaced with a significantly 7 | upgraded set of scripts. 8 | 9 | To upgrade to this new version, please visit the GitHub repo at: 10 | https://github.com/httptoolkit/frida-interception-and-unpinning/ 11 | 12 | If you'd still like to use the original script, the final version is 13 | available via GitHub here: 14 | https://github.com/httptoolkit/frida-interception-and-unpinning/tree/4d477da 15 | 16 | This will not be maintained in future, and is already missing many unpinning 17 | targets and other features from the latest version. Please don't file issues 18 | about any bugs or problems with this script. 19 | 20 | `); 21 | -------------------------------------------------------------------------------- /overrides/frida/ios/ios-connect-hook.js: -------------------------------------------------------------------------------- 1 | /** 2 | * In some cases, proxy configuration by itself won't work. This notably includes Flutter apps (which ignore 3 | * system/JVM configuration entirely) and plausibly other apps intentionally ignoring proxies. To handle that 4 | * we hook low-level connection attempts within Network Framework directly, to redirect traffic on all ports 5 | * to the target. 6 | * 7 | * This handles all attempts to connect an outgoing socket, and for all TCP connections opened it will 8 | * manually replace the nw_connection_create() endpoint parameter so that the socket connects to the proxy 9 | * instead of the 'real' destination. 10 | * 11 | * This doesn't help with certificate trust (you still need some kind of certificate setup) but it does ensure 12 | * the proxy receives all connections (and so will see if connections don't trust its CA). It's still useful 13 | * to do proxy config alongside this, as applications may behave a little more 'correctly' if they're aware 14 | * they're using a proxy rather than doing so unknowingly. 15 | * 16 | * Source available at https://github.com/httptoolkit/frida-interception-and-unpinning/ 17 | * SPDX-License-Identifier: AGPL-3.0-or-later 18 | * SPDX-FileCopyrightText: Tim Perry 19 | */ 20 | 21 | // This is the method we're going to patch: 22 | // https://developer.apple.com/documentation/network/2976677-nw_connection_create (iOS 12+) 23 | const nw_connection_create = Module.findExportByName('libnetwork.dylib', 'nw_connection_create'); 24 | 25 | // This is the method to make a new endpoint to connect to: 26 | // https://developer.apple.com/documentation/network/2976720-nw_endpoint_create_host (iOS 12+) 27 | const nw_endpoint_create_host = new NativeFunction( 28 | Module.findExportByName('libnetwork.dylib', 'nw_endpoint_create_host'), 29 | 'pointer', ['pointer', 'pointer'] 30 | ); 31 | 32 | const newHostStr = Memory.allocUtf8String(PROXY_HOST); 33 | const newPortStr = Memory.allocUtf8String(PROXY_PORT.toString()); 34 | 35 | Interceptor.attach(nw_connection_create, { 36 | onEnter: function (args) { 37 | // Replace the endpoint argument entirely with our own: 38 | args[0] = nw_endpoint_create_host(newHostStr, newPortStr); 39 | } 40 | }); -------------------------------------------------------------------------------- /overrides/frida/utilities/test-ip-connectivity.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This script can be useful as part of a pre-setup or automated configuration process, 3 | * where you don't know why IP address is best used to reach your proxy server from the 4 | * target device. You can run this script first with a list of IP addresses, and wait for 5 | * the 'connected' message to confirm the working IP (or 'connection-failed' if none work) 6 | * before then injecting the config script and the rest of your script code. 7 | * 8 | * Source available at https://github.com/httptoolkit/frida-interception-and-unpinning/ 9 | * SPDX-License-Identifier: AGPL-3.0-or-later 10 | * SPDX-FileCopyrightText: Tim Perry 11 | */ 12 | 13 | // Modify this to specify the addresses you'd like to test: 14 | const IP_ADDRESSES_TO_TEST = [ 15 | ]; 16 | 17 | const TARGET_PORT = 0; 18 | 19 | 20 | 21 | 22 | // ---------------------------------------------------------------------------- 23 | // You don't need to modify any of the below - this is the logic that does the 24 | // checks themselves. 25 | // ---------------------------------------------------------------------------- 26 | 27 | if (IP_ADDRESSES_TO_TEST.length === 0) { 28 | throw new Error('No IP addresses provided to check - please modify IP_ADDRESSES_TO_TEST'); 29 | } 30 | 31 | if (TARGET_PORT === 0) { 32 | throw new Error('No target port provided to check - please modify TARGET_PORT'); 33 | } 34 | 35 | async function testAddress(ip, port) { 36 | try { 37 | const socket = await Socket.connect({ host: ip, port }); 38 | socket.close(); 39 | return true; 40 | } catch (e) { 41 | return false; 42 | } 43 | } 44 | 45 | let completed = false; 46 | let testsCompleted = 0; 47 | IP_ADDRESSES_TO_TEST.forEach(async (ip) => { 48 | const result = await testAddress(ip, TARGET_PORT); 49 | testsCompleted += 1; 50 | 51 | if (completed) return; // Ignore results after the first connection 52 | 53 | if (result) { 54 | completed = true; 55 | send({ 56 | type: 'connected', 57 | ip, 58 | port: TARGET_PORT 59 | }); 60 | } 61 | 62 | if (testsCompleted === IP_ADDRESSES_TO_TEST.length && !completed) { 63 | completed = true; 64 | send({ 65 | type: 'connection-failed' 66 | }); 67 | } 68 | }); -------------------------------------------------------------------------------- /overrides/gems/cloudinary.rb: -------------------------------------------------------------------------------- 1 | # Remove this module from LOAD_PATH, so we can load the real one 2 | gem_override_path = File.expand_path(__dir__) 3 | $LOAD_PATH.reject! { |path| File.expand_path(path) == gem_override_path } 4 | 5 | # Load the real module, and inject our settings 6 | require 'cloudinary' 7 | Cloudinary::config.api_proxy = ENV['HTTP_PROXY'] 8 | 9 | # Put this override directory back on LOAD_PATH 10 | $LOAD_PATH.unshift(gem_override_path) -------------------------------------------------------------------------------- /overrides/gems/http.rb: -------------------------------------------------------------------------------- 1 | # Remove this module from LOAD_PATH, so we can load the real one 2 | gem_override_path = File.expand_path(__dir__) 3 | $LOAD_PATH.reject! { |path| File.expand_path(path) == gem_override_path } 4 | 5 | # Load http.rb, and inject our proxy settings as the default for all requests 6 | require 'http' 7 | module HTTP 8 | module RequestHttpToolkitExtensions 9 | def initialize(opts) 10 | if not opts[:proxy] or opts[:proxy].keys.size < 2 11 | proxy_from_env = URI(opts.fetch(:uri).to_s).find_proxy 12 | opts[:proxy] = { 13 | proxy_address: proxy_from_env.host, 14 | proxy_port: proxy_from_env.port 15 | } 16 | end 17 | super(opts) 18 | end 19 | end 20 | 21 | class Request 22 | prepend RequestHttpToolkitExtensions 23 | end 24 | end 25 | 26 | # Put this override directory back on LOAD_PATH 27 | $LOAD_PATH.unshift(gem_override_path) -------------------------------------------------------------------------------- /overrides/gems/net/http.rb: -------------------------------------------------------------------------------- 1 | # Remove this module from LOAD_PATH, so we can load the real one 2 | gem_override_path = File.expand_path('..', __dir__) # parent dir, we're a subfolder 3 | $LOAD_PATH.reject! { |path| File.expand_path(path) == gem_override_path } 4 | 5 | # Override net/http, the built-in HTTP module to inject our cert 6 | # This isn't necessary with OpenSSL (doesn't hurt), but LibreSSL ignores 7 | # the SSL_CERT_FILE setting, so we need to be more explicit: 8 | require 'net/http' 9 | module Net 10 | module HTTPHttpToolkitExtensions 11 | def ca_file=(path) 12 | # If you try to use a certificate, use ours instead 13 | super(ENV['SSL_CERT_FILE']) 14 | end 15 | 16 | def cert_store=(store) 17 | # If you try to use a whole store of certs, use ours instead 18 | self.ca_file = ENV['SSL_CERT_FILE'] 19 | end 20 | 21 | def use_ssl=(val) 22 | # If you try to use SSL, make sure you trust our cert first 23 | self.ca_file = ENV['SSL_CERT_FILE'] 24 | super(val) 25 | end 26 | end 27 | 28 | class HTTP 29 | prepend HTTPHttpToolkitExtensions 30 | end 31 | end 32 | 33 | # Put this override directory back on LOAD_PATH 34 | $LOAD_PATH.unshift(gem_override_path) -------------------------------------------------------------------------------- /overrides/gems/net/https.rb: -------------------------------------------------------------------------------- 1 | # require 'net/https' is not necessary in modern ruby, as net/http 2 | # can handle HTTPS all by itself. That said, it's still used in 3 | # places, and its use of require_relative to load 'http' means that 4 | # it avoids our net/http hook. 5 | 6 | # When using standard OpenSSL that's not a big problem, because 7 | # we also set SSL_CERT_FILE, which ensures we trust the certificate. 8 | # In some environments though (default Mac Ruby installs) that 9 | # variable is ignored, and so the net/http hook is *necessary*. 10 | 11 | # All this file does is import our hooked HTTP version, before 12 | # running the real module as normal, to guarantee the hook is 13 | # always in place in every case. 14 | 15 | require_relative 'http' 16 | 17 | # Remove this module from LOAD_PATH, so we can load the real one 18 | gem_override_path = File.expand_path('..', __dir__) # parent dir, we're a subfolder 19 | $LOAD_PATH.reject! { |path| File.expand_path(path) == gem_override_path } 20 | 21 | # Load the real net/https module as normal 22 | require 'net/https' 23 | 24 | # Put this override directory back on LOAD_PATH again 25 | $LOAD_PATH.unshift(gem_override_path) -------------------------------------------------------------------------------- /overrides/gems/stripe.rb: -------------------------------------------------------------------------------- 1 | # Remove this module from LOAD_PATH, so we can load the real one 2 | gem_override_path = File.expand_path(__dir__) 3 | $LOAD_PATH.reject! { |path| File.expand_path(path) == gem_override_path } 4 | 5 | # Load stripe, and inject our certificate 6 | require 'stripe' 7 | Stripe.ca_bundle_path=ENV['SSL_CERT_FILE'] 8 | 9 | # Put this override directory back on LOAD_PATH 10 | $LOAD_PATH.unshift(gem_override_path) -------------------------------------------------------------------------------- /overrides/gems/uri/generic.rb: -------------------------------------------------------------------------------- 1 | # Remove this module from LOAD_PATH, so we can load the real one 2 | gem_override_path = File.expand_path('..', __dir__) # parent dir, we're a subfolder 3 | $LOAD_PATH.reject! { |path| File.expand_path(path) == gem_override_path } 4 | 5 | # Load uri/generic, and inject our proxy settings as the default for all requests 6 | require 'uri/generic' 7 | module URI 8 | class Generic 9 | def find_proxy 10 | # Real code for this avoids it in various cases, if some CGI env vars 11 | # are set, or for 127.*.*.* requests. We want to ensure we use it always. 12 | URI.parse(ENV['HTTP_PROXY']) 13 | end 14 | end 15 | end 16 | 17 | # Put this override directory back on LOAD_PATH 18 | $LOAD_PATH.unshift(gem_override_path) -------------------------------------------------------------------------------- /overrides/java-agent.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/overrides/java-agent.jar -------------------------------------------------------------------------------- /overrides/js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "js-overrides", 3 | "description": "Scripts used to inject HTTP Toolkit settings into Node.js targets, as part of HTTP Toolkit Server", 4 | "repository": "httptoolkit/httptoolkit-server", 5 | "license": "AGPL-3.0-or-later", 6 | "dependencies": { 7 | "global-agent": "3.0.0", 8 | "global-tunnel-ng": "2.7.1", 9 | "undici": "^5.29.0" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /overrides/js/prepend-electron.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Injected into Electron via the debug protocol before any user code is run. 3 | */ 4 | 5 | // Wrap all normal node HTTP APIs 6 | require('./prepend-node'); 7 | 8 | module.exports = function reconfigureElectron(params) { 9 | let electronWrapped = false; 10 | 11 | // Reconfigure electron slightly too 12 | const wrapModule = require('./wrap-require'); 13 | wrapModule('electron', function wrapElectron (loadedModule) { 14 | if ( 15 | electronWrapped || 16 | !loadedModule.app || 17 | !loadedModule.app.commandLine 18 | ) return; 19 | 20 | electronWrapped = true; 21 | 22 | const app = loadedModule.app; 23 | 24 | app.commandLine.appendSwitch('proxy-server', process.env.HTTP_PROXY); 25 | app.commandLine.appendSwitch('proxy-bypass-list', '<-loopback>'); 26 | 27 | app.commandLine.appendSwitch( 28 | 'ignore-certificate-errors-spki-list', params.spkiFingerprint 29 | ); 30 | 31 | app.on('quit', () => { 32 | // This means the user has exited the app while HTTP Toolkit is still running. That's fine, 33 | // but it normally won't exit, since we still have a debugger attached. If we can, we use 34 | // the experimental node 8+ inspector API to disconnect it. If not, we just kill the process 35 | // after a brief delay to allow any other cleanup 36 | try { 37 | require('inspector').close(); 38 | } catch (e) { 39 | console.log('Could not disconnect app via inspector, killing manually', e); 40 | setTimeout(() => process.exit(0), 1000); 41 | } 42 | }); 43 | 44 | // Register a cert verifier for the default session, so that electron.net calls 45 | // can have their certificates manually verified. 46 | app.on('ready', () => { 47 | loadedModule.session.defaultSession.setCertificateVerifyProc((req, callback) => { 48 | if ( 49 | req.certificate && 50 | req.certificate.issuerCert && 51 | req.certificate.issuerCert.data === params.newlineEncodedCertData 52 | ) { 53 | callback(0); // The cert is good, I promise 54 | } else { 55 | callback(-3); // Fallback to Chromium's own opinion 56 | } 57 | }); 58 | }) 59 | 60 | // Also handle explicitly certificate errors from Electron in the standard way 61 | app.on('certificate-error', (event, webContents, url, error, certificate, callback) => { 62 | if ( 63 | certificate.issuerCert && 64 | certificate.issuerCert.data === params.newlineEncodedCertData 65 | ) { 66 | event.preventDefault(); 67 | callback(true); 68 | } else { 69 | callback(false); 70 | } 71 | }); 72 | }, true); 73 | 74 | wrapModule('crypto', function wrapCrypto () { 75 | const NativeSecureContext = process.binding('crypto').SecureContext; 76 | const addRootCerts = NativeSecureContext.prototype.addRootCerts; 77 | NativeSecureContext.prototype.addRootCerts = function() { 78 | const ret = addRootCerts.apply(this,arguments); 79 | this.addCACert(params.newlineEncodedCertData); 80 | return ret; 81 | }; 82 | }, true); 83 | }; -------------------------------------------------------------------------------- /overrides/js/prepend-node.js: -------------------------------------------------------------------------------- 1 | /** 2 | * --require'd by node before loading any other modules. The --require 3 | * option is injected both using NODE_OPTIONS and with a node wrapper in 4 | * PATH to handle various potential cases (node dedupes --require anyway). 5 | * 6 | * This file sets up a global agent for the http & https modules, 7 | * plus tweaks various other HTTP clients that need nudges, so they 8 | * all correctly pick up the proxy from the environment. 9 | * 10 | * Tested against Node 6, 8, 10, 12, 14, 16 & 18. 11 | */ 12 | 13 | const wrapModule = require('./wrap-require'); 14 | 15 | // These should always both be set identically by HTTP Toolkit, so this should be a no-op. 16 | // Unfortunately, on Windows env vars are case insensitive, and the logic to handle this 17 | // in worker threads this has a bug in some versions, so this is required. More details in 18 | // https://github.com/httptoolkit/httptoolkit-server/issues/91. 19 | if (process.env.http_proxy && !process.env.HTTP_PROXY) { 20 | process.env.HTTP_PROXY = process.env.http_proxy; 21 | } 22 | 23 | wrapModule('axios', function wrapAxios (loadedModule) { 24 | // Global agent handles this automatically, if used (i.e. Node >= 10) 25 | if (global.GLOBAL_AGENT) return; 26 | 27 | // Disable built-in proxy support, to let global-tunnel take precedence 28 | // Supported back to the very first release of Axios 29 | loadedModule.defaults.proxy = false; 30 | }); 31 | 32 | wrapModule('request', function wrapRequest (loadedModule) { 33 | // Global agent handles this automatically, if used (i.e. Node >= 10) 34 | if (global.GLOBAL_AGENT) return; 35 | 36 | // Is this Request >= 2.17? 37 | // Before then proxy support isn't a problem anyway 38 | if (!loadedModule.defaults) return; 39 | 40 | // Have we intercepted this already? 41 | if (loadedModule.INTERCEPTED_BY_HTTPTOOLKIT) return; 42 | 43 | const fixedModule = loadedModule.defaults({ proxy: false }); 44 | fixedModule.INTERCEPTED_BY_HTTPTOOLKIT = true; 45 | return fixedModule; 46 | }); 47 | 48 | wrapModule('superagent', function wrapSuperagent (loadedModule) { 49 | // Global agent handles this automatically, if used (i.e. Node >= 10) 50 | if (global.GLOBAL_AGENT) return; 51 | 52 | // Have we intercepted this already? 53 | if (loadedModule.INTERCEPTED_BY_HTTPTOOLKIT) return; 54 | loadedModule.INTERCEPTED_BY_HTTPTOOLKIT = true; 55 | 56 | // Global tunnel doesn't successfully reconfigure superagent. 57 | // To fix it, we forcibly override the agent property on every request. 58 | const originalRequestMethod = loadedModule.Request.prototype.request; 59 | loadedModule.Request.prototype.request = function () { 60 | if (this.url.indexOf('https:') === 0) { 61 | this._agent = require('https').globalAgent; 62 | } else { 63 | this._agent = require('http').globalAgent; 64 | } 65 | return originalRequestMethod.apply(this, arguments); 66 | }; 67 | }); 68 | 69 | wrapModule('undici', function wrapUndici (loadedModule) { 70 | const ProxyAgent = loadedModule.ProxyAgent; 71 | const setGlobalDispatcher = loadedModule.setGlobalDispatcher; 72 | 73 | // Old Undici release, which can't be intercepted: 74 | if (!ProxyAgent || !setGlobalDispatcher) return; 75 | 76 | setGlobalDispatcher( 77 | new ProxyAgent(process.env.HTTP_PROXY) 78 | ); 79 | }); 80 | 81 | wrapModule('stripe', function wrapStripe (loadedModule) { 82 | if (loadedModule.INTERCEPTED_BY_HTTPTOOLKIT) return; 83 | 84 | return Object.assign( 85 | function () { 86 | // In Stripe v8+ setHttpAgent is deprecated and a config param is preferred 87 | const agentConfigSupported = !loadedModule.DEFAULT_HOST; 88 | 89 | // Set by global-tunnel in Node < 10 (or global-agent in 11.7+) 90 | const agent = require('https').globalAgent; 91 | 92 | if (agentConfigSupported) { 93 | const [apiKey, configOption] = arguments; 94 | 95 | const config = { 96 | ...configOption, 97 | httpAgent: agent // Add our agent to the config object 98 | } 99 | 100 | return loadedModule.call(this, apiKey, config); 101 | } else { 102 | const result = loadedModule.apply(this, arguments); 103 | result.setHttpAgent(agent); 104 | return result; 105 | } 106 | }, 107 | loadedModule, 108 | { INTERCEPTED_BY_HTTPTOOLKIT: true } 109 | ); 110 | }); 111 | 112 | // We always install a global HTTP agent, to ensure that everything using the base HTTP module is intercepted 113 | // by default. This avoids issues where hooks don't fire on ESM imports by just enabling this in all cases. 114 | const MAJOR_NODEJS_VERSION = parseInt(process.version.slice(1).split('.')[0], 10); 115 | if (MAJOR_NODEJS_VERSION >= 10) { 116 | // `global-agent` works with Node.js v10 and above. 117 | const globalAgent = require('global-agent'); 118 | globalAgent.bootstrap(); 119 | } else { 120 | // `global-tunnel-ng` works only with Node.js v10 and below. 121 | const globalTunnel = require('global-tunnel-ng'); 122 | globalTunnel.initialize(); 123 | } 124 | 125 | if (MAJOR_NODEJS_VERSION >= 18 || global.fetch) { 126 | // Node 18 enables fetch by default (available previously behind a flag). This does not use 127 | // the existing agent API, so is not intercepted by global-agent. Instead, the only way to 128 | // set the HTTP proxy is to separately import Undici (used internally by Node) and configure 129 | // Undici's global proxy agent. We bundle our own Undici dep so we can do this reliably, 130 | // and here we import it to trigger the Undici setup hook defined above. 131 | require('undici'); 132 | } -------------------------------------------------------------------------------- /overrides/js/wrap-require.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Intercept calls to require() certain modules, to monkey-patch them. 3 | * 4 | * This modules intercepts all require calls. For all modules previously 5 | * registered via wrapModule, it runs the registered wrapper on the loaded 6 | * module before it is returned to the original require() call. 7 | * 8 | * This only works for require(), not for ESM Import. Node.js import hooks 9 | * are very experimental and not yet usable, but we should extend this to 10 | * support those in future: https://github.com/nodejs/modules/issues/351 11 | */ 12 | 13 | // Grab the built-in module loader that we're going to intercept 14 | const mod = require('module'); 15 | const realLoad = mod._load; 16 | 17 | const wrappers = {}; 18 | 19 | // Either false, or a list (initially empty) of modules whose wrapping is being 20 | // delayed. This is important for modules who require other modules that need 21 | // wrapping, to avoid issues with circular requires. 22 | let wrappingBlocked = false; 23 | 24 | function fixModule(requestedName, filename, loadedModule) { 25 | const wrapper = wrappers[requestedName]; 26 | 27 | if (wrapper) { 28 | wrappingBlocked = wrapper.shouldBlockWrapping ? [] : false; 29 | 30 | // wrap can either return a replacement, or mutate the module itself. 31 | const fixedModule = wrapper.wrap(loadedModule) || loadedModule; 32 | 33 | if (fixedModule !== loadedModule && mod._cache[filename] && mod._cache[filename].exports) { 34 | mod._cache[filename].exports = fixedModule; 35 | } 36 | 37 | if (wrappingBlocked) { 38 | wrappingBlocked.forEach(function (modDetails) { 39 | fixModule(modDetails.requestedName, modDetails.filename, modDetails.loadedModule); 40 | }); 41 | wrappingBlocked = false; 42 | } 43 | 44 | return fixedModule; 45 | } else { 46 | return loadedModule; 47 | } 48 | } 49 | 50 | // Our hook into require(): 51 | mod._load = function (requestedName, parent, isMain) { 52 | const filename = mod._resolveFilename(requestedName, parent, isMain); 53 | let loadedModule = realLoad.apply(this, arguments); 54 | 55 | // Should always be set, but check just in case. This also allows 56 | // users to disable interception explicitly, if need be. 57 | if (!process.env.HTTP_TOOLKIT_ACTIVE) return loadedModule; 58 | 59 | if (wrappingBlocked !== false) { 60 | wrappingBlocked.push({ 61 | requestedName: requestedName, 62 | filename: filename, 63 | loadedModule: loadedModule 64 | }); 65 | } else { 66 | loadedModule = fixModule(requestedName, filename, loadedModule); 67 | } 68 | 69 | return loadedModule; 70 | }; 71 | 72 | // Register a wrapper for a given name. If shouldBlockWrapping is set, all wrapping 73 | // of modules require'd during the modules wrapper function will be delayed until 74 | // after it completes. 75 | module.exports = function wrapModule( 76 | requestedName, 77 | wrapperFunction, 78 | shouldBlockWrapping 79 | ) { 80 | wrappers[requestedName] = { 81 | wrap: wrapperFunction, 82 | shouldBlockWrapping: shouldBlockWrapping || false 83 | }; 84 | }; -------------------------------------------------------------------------------- /overrides/path/node: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # ^ Note that we use sh, not bash, for alpine compatibility 3 | set -e 4 | 5 | # Exclude ourselves from PATH, find the real node, then reset PATH 6 | PATH="$(printf '%s\n' "$PATH" | sed "s:$(dirname "$0")\:::g")" 7 | # ^ This is made more complicated by sh, since we can't use variable expansion, but this 8 | # should be equivalent. We use : as a safe sed delim here, though it is confusing! 9 | 10 | real_node=`command -v node` 11 | PATH="`dirname "$0"`:$PATH" 12 | 13 | PREPEND_PATH=`dirname "$0"`/../js/prepend-node.js 14 | 15 | # Call node with the given arguments, prefixed with our extra logic 16 | if command -v winpty >/dev/null 2>&1; then 17 | winpty "$real_node" -r "$PREPEND_PATH" "$@" 18 | else 19 | "$real_node" -r "$PREPEND_PATH" "$@" 20 | fi -------------------------------------------------------------------------------- /overrides/path/node.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | SETLOCAL 3 | 4 | REM Exclude ourselves from PATH within this script, to avoid recursing 5 | set ORIGINALPATH=%PATH% 6 | set WRAPPER_FOLDER=%HTTP_TOOLKIT_OVERRIDE_PATH%\path 7 | call set PATH=%%PATH:%WRAPPER_FOLDER%;=%% 8 | 9 | REM Get the real node path, store it in %REAL_NODE% 10 | FOR /F "tokens=*" %%g IN ('where node') do (SET REAL_NODE=%%g) 11 | 12 | REM Reset PATH, so its visible to node & subprocesses 13 | set PATH=%ORIGINALPATH% 14 | 15 | REM Start Node for real, with an extra arg to inject our logic 16 | "%REAL_NODE%" -r "%WRAPPER_FOLDER%\..\js\prepend-node.js" %* -------------------------------------------------------------------------------- /overrides/path/php: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # Exclude ourselves from PATH, find the real php, then reset PATH 5 | PATH="${PATH//`dirname "$0"`:/}" 6 | real_php=`command -v php` 7 | PATH="`dirname "$0"`:$PATH" 8 | 9 | # Strip out our PHP_INI_SCAN_DIR - if this file has been run successfully, then it's not necessary, 10 | # and it can cause problems (since it overrides any default scan directories configured) 11 | export INJECTED_PHP_INI_DIR=$HTTP_TOOLKIT_OVERRIDE_PATH/php 12 | export PHP_INI_SCAN_DIR="${PHP_INI_SCAN_DIR//:$INJECTED_PHP_INI_DIR/}" 13 | export PHP_INI_SCAN_DIR="${PHP_INI_SCAN_DIR//$INJECTED_PHP_INI_DIR/}" 14 | if [ -z "$PHP_INI_SCAN_DIR" ]; then 15 | unset PHP_INI_SCAN_DIR 16 | fi 17 | 18 | # Call PHP with the given arguments, and a few extra 19 | PHP_ARGS=( 20 | # Make OpenSSL trust us 21 | -d "openssl.cafile=$SSL_CERT_FILE" \ 22 | # Make cURL trust us 23 | -d "curl.cainfo=$SSL_CERT_FILE" \ 24 | # Prepend a script that enables the proxy 25 | -d "auto_prepend_file=`dirname "$0"`/../php/prepend.php" \ 26 | # Pass through all other provided arguments 27 | "$@" 28 | ) 29 | 30 | if command -v winpty >/dev/null 2>&1; then 31 | winpty "$real_php" "${PHP_ARGS[@]}" 32 | else 33 | "$real_php" "${PHP_ARGS[@]}" 34 | fi 35 | -------------------------------------------------------------------------------- /overrides/path/php.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | SETLOCAL 3 | 4 | REM Exclude ourselves from PATH within this script, to avoid recursing 5 | set ORIGINALPATH=%PATH% 6 | set WRAPPER_FOLDER=%HTTP_TOOLKIT_OVERRIDE_PATH%\path 7 | call set PATH=%%PATH:%WRAPPER_FOLDER%;=%% 8 | 9 | REM Get the real php path, store it in %REAL_PHP% 10 | FOR /F "tokens=*" %%g IN ('where php') do (SET REAL_PHP=%%g) 11 | 12 | REM Reset PATH, so its visible to php & subprocesses 13 | set PATH=%ORIGINALPATH% 14 | 15 | REM Reset PHP_INI_SCAN_DIR, removing our override path 16 | call set PHP_INI_SCAN_DIR=%%PHP_INI_SCAN_DIR:%HTTP_TOOLKIT_OVERRIDE_PATH%\php;=%% 17 | call set PHP_INI_SCAN_DIR=%%PHP_INI_SCAN_DIR:%HTTP_TOOLKIT_OVERRIDE_PATH%\php=%% 18 | 19 | REM Start PHP for real, with extra args to override certain configs 20 | "%REAL_PHP%" -d "openssl.cafile=%SSL_CERT_FILE%" -d "curl.cainfo=%SSL_CERT_FILE%" -d "auto_prepend_file=%WRAPPER_FOLDER%\..\php\prepend.php" %* -------------------------------------------------------------------------------- /overrides/php/php-httptoolkit-override.ini: -------------------------------------------------------------------------------- 1 | ; Make OpenSSL trust us 2 | openssl.cafile=${SSL_CERT_FILE} 3 | ; Make cURL trust us 4 | curl.cainfo=${SSL_CERT_FILE} 5 | ; Prepend a script that enables the proxy 6 | auto_prepend_file=${HTTP_TOOLKIT_OVERRIDE_PATH}/php/prepend.php 7 | 8 | ; Intercepting PHP using this file via PHP_INI_SCAN_DIR isn't a perfect solution. It's better 9 | ; to use the 'php' wrapper (overrides/path/php) which sets this configuration, because when 10 | ; PHP_INI_SCAN_DIR is left blank it defaults to a system config directory, and overriding this 11 | ; means that is not loaded. 12 | ; Unfortunately, it's not always possible to inject the 'php' wrapper where we need it, due to 13 | ; how PHP is often launched (managed by another process, not launched & injectable by HTTP Toolkit). 14 | ; This is a fallback solution for that case that seems to work well in practice. 15 | 16 | ; Where this doesn't work, you may be able to replace the relevant env vars above and place this 17 | ; file directly into your PHP_INI_SCAN_DIR directory (run php --ini to find this). 18 | 19 | ; (In future, we could consider more complicated fixes: e.g. a prepend script that launches a PHP 20 | ; subprocess which runs with the default configuration, just explicitly overridden by CLI args. 21 | ; That would have some performance implications, but probably nothing notable in dev. Not worthwhile 22 | ; for now unless this causes serious problems though) -------------------------------------------------------------------------------- /overrides/php/prepend.php: -------------------------------------------------------------------------------- 1 | array('proxy' => str_replace('http://', '', getenv('HTTPS_PROXY'))) 5 | ) 6 | ); 7 | 8 | // We've overridden php.ini to ensure this runs. We should go back to 9 | // php.ini, check if there was a previous value, and ensure that 10 | // gets run as well, to make sure we don't break anything. 11 | $phpIniLocation = php_ini_loaded_file(); 12 | if ($phpIniLocation) { 13 | $phpIniContents = parse_ini_file($phpIniLocation); 14 | if ($phpIniContents['auto_prepend_file']) { 15 | require($phpIniContents['auto_prepend_file']); 16 | } 17 | } 18 | ?> -------------------------------------------------------------------------------- /overrides/pythonpath/aiohttp.py: -------------------------------------------------------------------------------- 1 | from httptoolkit_intercept import preload_real_module 2 | preload_real_module('aiohttp') 3 | 4 | import functools, aiohttp 5 | 6 | # Re-export all public fields, and a few notable private fields for max compatibility: 7 | from aiohttp import * 8 | from aiohttp import __path__, __file__, __doc__ 9 | 10 | # Forcibly enable environment trust for all sessions: 11 | _session_init = aiohttp.ClientSession.__init__ 12 | @functools.wraps(_session_init) 13 | def _new_client_session_init(self, *k, **kw): 14 | _session_init(self,*k, **dict(kw, trust_env=True)) 15 | aiohttp.ClientSession.__init__ = _new_client_session_init -------------------------------------------------------------------------------- /overrides/pythonpath/hgdemandimport.py: -------------------------------------------------------------------------------- 1 | from httptoolkit_intercept import preload_real_module 2 | 3 | preload_real_module('hgdemandimport') 4 | 5 | import hgdemandimport 6 | 7 | # Re-export all other public fields 8 | from hgdemandimport import * 9 | 10 | # Disable hgdemandimport entirely. This is an optional optimization used by hg, which doesn't play 11 | # nicely with HTTP Toolkit's import hooks, making hg unusable. 12 | hgdemandimport.enable = lambda: None -------------------------------------------------------------------------------- /overrides/pythonpath/http/__init__.py: -------------------------------------------------------------------------------- 1 | # This module _must_ trigger the import for http.client. We need to ensure we preload 2 | # the real http module (or import http & import http.server don't work properly), but 3 | # if we do that before importing the client, the client can never be intercepted. 4 | 5 | # There might be a cleaner alternative, but for now we just aggressively pre-intercept 6 | # the client instead. 7 | 8 | from . import client -------------------------------------------------------------------------------- /overrides/pythonpath/http/client.py: -------------------------------------------------------------------------------- 1 | from httptoolkit_intercept import preload_real_module 2 | 3 | preload_real_module('http', 'http.client') 4 | 5 | import http.client, os, functools 6 | 7 | # Re-export all public fields 8 | from http.client import * 9 | # Load a few extra notable private fields, for max compatibility 10 | from http.client import __file__, __doc__ 11 | 12 | _httpProxy = os.environ['HTTP_PROXY'] 13 | [_proxyHost, _proxyPort] = _httpProxy.split('://')[1].split(':') 14 | _certPath = os.environ['SSL_CERT_FILE'] 15 | 16 | # Redirect and then tunnel all plain HTTP connections: 17 | _http_connection_init = HTTPConnection.__init__ 18 | @functools.wraps(_http_connection_init) 19 | def _new_http_connection_init(self, host, port=None, *k, **kw): 20 | _http_connection_init(self, _proxyHost, int(_proxyPort), *k, **kw) 21 | self.set_tunnel(host, port) 22 | HTTPConnection.__init__ = _new_http_connection_init 23 | 24 | def _build_default_context(): 25 | import ssl 26 | context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) 27 | context.options |= ssl.OP_NO_SSLv2 28 | context.options |= ssl.OP_NO_SSLv3 29 | return context 30 | 31 | # Redirect & tunnel HTTPS connections, and inject our CA certificate: 32 | _https_connection_init = HTTPSConnection.__init__ 33 | @functools.wraps(_https_connection_init) 34 | def _new_https_connection_init(self, host, port=None, *k, **kw): 35 | context = None 36 | if 'context' in kw: 37 | context = kw.get('context') 38 | elif len(k) > 7: 39 | context = k[7] 40 | 41 | if context == None: 42 | context = kw['context'] = _build_default_context() 43 | 44 | context.load_verify_locations(_certPath) 45 | 46 | _https_connection_init(self, _proxyHost, int(_proxyPort), *k, **kw) 47 | self.set_tunnel(host, port) 48 | HTTPSConnection.__init__ = _new_https_connection_init -------------------------------------------------------------------------------- /overrides/pythonpath/httplib.py: -------------------------------------------------------------------------------- 1 | from httptoolkit_intercept import preload_real_module 2 | 3 | preload_real_module('httplib') 4 | 5 | import httplib, os, functools 6 | 7 | # Re-export all public fields 8 | from httplib import * 9 | # Load a few extra notable private fields, for max compatibility 10 | from httplib import __file__, __doc__ 11 | 12 | _httpProxy = os.environ['HTTP_PROXY'] 13 | [_proxyHost, _proxyPort] = _httpProxy.split('://')[1].split(':') 14 | _certPath = os.environ['SSL_CERT_FILE'] 15 | 16 | # Redirect and then tunnel all plain HTTP connections: 17 | _http_connection_init = HTTPConnection.__init__ 18 | @functools.wraps(_http_connection_init) 19 | def _new_http_connection_init(self, host, port=None, *k, **kw): 20 | _http_connection_init(self, _proxyHost, _proxyPort, *k, **kw) 21 | self.set_tunnel(host, port) 22 | HTTPConnection.__init__ = _new_http_connection_init 23 | 24 | def _build_default_context(): 25 | import ssl 26 | context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) 27 | context.options |= ssl.OP_NO_SSLv2 28 | context.options |= ssl.OP_NO_SSLv3 29 | return context 30 | 31 | # Redirect & tunnel HTTPS connections, and inject our CA certificate: 32 | _https_connection_init = HTTPSConnection.__init__ 33 | @functools.wraps(_https_connection_init) 34 | def _new_https_connection_init(self, host, port=None, *k, **kw): 35 | context = None 36 | if 'context' in kw: 37 | context = kw.get('context') 38 | elif len(k) > 7: 39 | context = k[7] 40 | 41 | if context == None: 42 | context = kw['context'] = _build_default_context() 43 | 44 | context.load_verify_locations(_certPath) 45 | 46 | _https_connection_init(self, _proxyHost, _proxyPort, *k, **kw) 47 | self.set_tunnel(host, port) 48 | HTTPSConnection.__init__ = _new_https_connection_init -------------------------------------------------------------------------------- /overrides/pythonpath/httplib2.py: -------------------------------------------------------------------------------- 1 | from httptoolkit_intercept import preload_real_module 2 | 3 | preload_real_module('httplib2') 4 | 5 | import httplib2, os, functools 6 | 7 | # Re-export all public fields 8 | from httplib2 import * 9 | # Load a few extra notable private fields, for max compatibility 10 | from httplib2 import __file__, __doc__ 11 | 12 | _certPath = os.environ['SSL_CERT_FILE'] 13 | 14 | # Ensure all connections trust our cert: 15 | _http_init = httplib2.Http.__init__ 16 | @functools.wraps(_http_init) 17 | def _new_http_init(self, *k, **kw): 18 | kList = list(k) 19 | if len(kList) > 3: 20 | kList[3] = _certPath 21 | else: 22 | kw['ca_certs'] = _certPath 23 | _http_init(self, *kList, **kw) 24 | httplib2.Http.__init__ = _new_http_init -------------------------------------------------------------------------------- /overrides/pythonpath/httptoolkit_intercept.py: -------------------------------------------------------------------------------- 1 | def preload_real_module(*module_names): 2 | # Re-importing the real module at the top level of an override fails after deleting it from 3 | # sys.modules['httplib'] in Python 2. Some interesting issues there ofc, but doing this 4 | # instead works nicely in both Python 2 & 3. 5 | import sys, os 6 | 7 | override_path = os.path.dirname(os.path.abspath(__file__)) 8 | original_sys_path = list(sys.path) 9 | sys.path = [p for p in sys.path if p != override_path and p != ''] 10 | 11 | for mod in module_names: 12 | if mod in sys.modules: 13 | del sys.modules[mod] 14 | __import__(mod) 15 | 16 | sys.path = original_sys_path -------------------------------------------------------------------------------- /overrides/pythonpath/stripe.py: -------------------------------------------------------------------------------- 1 | from httptoolkit_intercept import preload_real_module 2 | preload_real_module('stripe') 3 | 4 | import stripe, os 5 | stripe.ca_bundle_path = os.environ['SSL_CERT_FILE'] 6 | 7 | # Re-export all public fields from Stripe 8 | from stripe import * 9 | # Load a few extra notable private fields, for max compatibility 10 | from stripe import __path__, __file__, __doc__ -------------------------------------------------------------------------------- /overrides/webextension/icon-128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/overrides/webextension/icon-128.png -------------------------------------------------------------------------------- /overrides/webextension/icon-16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/overrides/webextension/icon-16.png -------------------------------------------------------------------------------- /overrides/webextension/icon-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/httptoolkit/httptoolkit-server/8d00adbd17da045a4fa3949fb588183c41d8062a/overrides/webextension/icon-48.png -------------------------------------------------------------------------------- /overrides/webextension/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HTTP Toolkit", 3 | "description": " A browser extension used in HTTP Toolkit", 4 | "icons": { 5 | "16": "icon-16.png", 6 | "48": "icon-48.png", 7 | "128": "icon-128.png" 8 | }, 9 | "key": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyP7cZAF3QssK5iB59HYUdKPCf+hSqlY8M6UXIhemXvmpTKQE1gfPFLACSKP9wB+oWJnJChq/IkpgC3nlKbLbcKO8lGVyU5ecvq3A5v/znovmeVs4FKZOkq+tKnYdRCD7nIH9ycAksKPXqM3aBopOPVvoQFfDp3MFlHeW5TpxVaRj02IW7say4jOvNLRBoekczQTHUXw8NpYrpJPC9YqahMaB5tz4l9SGFiXNf/ga611jzzlFcQGhqtJAXQ6EmnQUoBtt/35qiTdL3VlBxqSdVTfdU23zKgEIJ5Uf6x0SOe6vdO0jAPvx8maKbw0ZRd2fvxOQ2qgxP2NcDsXjoGll4wIDAQAB", 10 | "version": "1.2.0", 11 | "manifest_version": 3, 12 | "background": { 13 | "service_worker": "build/background.js" 14 | }, 15 | "permissions": [ 16 | "scripting", 17 | "proxy" 18 | ], 19 | "host_permissions": [ 20 | "" 21 | ], 22 | "web_accessible_resources": [ 23 | { 24 | "resources": [ 25 | "build/injected-script.js" 26 | ], 27 | "matches": [ 28 | "" 29 | ] 30 | } 31 | ] 32 | } -------------------------------------------------------------------------------- /pack.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as fs from 'fs-extra'; 3 | import _ from 'lodash'; 4 | import { spawn as spawnAsync, SpawnOptions } from 'child_process'; 5 | 6 | const OUTPUT_DIR = path.join(__dirname, 'build'); 7 | 8 | const pJson = require(path.join(__dirname, './package.json')); 9 | const pLockJson = require(path.join(__dirname, './package-lock.json')); 10 | 11 | const spawn = (command: string, args: string[] = [], options: SpawnOptions = {}) => { 12 | return new Promise((resolve, reject) => { 13 | const proc = spawnAsync(command, args, options); 14 | proc.on('exit', (code) => { 15 | if (code === 0) resolve(); 16 | else reject(new Error( 17 | `Spawn ${command} ${args.join(' ')} exited with ${code}` 18 | )); 19 | }); 20 | }); 21 | } 22 | 23 | /** 24 | * The process here is that we create a clone of the whole codebase, but keeping the latest 25 | * build output, and make a couple of small modifications to manage the build process, and then 26 | * we handle off to Oclif to rebuild once for each target operating system. 27 | * 28 | * Oclif will run "npm pack", then "npm unpack" into a temp directory, "npm install" there, inject 29 | * the appropriate scripts & node binaries to make everything work nicely, and then bundled us up 30 | * a nice tarball ready for deployment. 31 | */ 32 | const packageApp = async () => { 33 | console.log('Preparing packaging directory'); 34 | await fs.emptyDir(OUTPUT_DIR); 35 | 36 | // Copy all normally deployable files: 37 | const filesToCopy = pJson.files; 38 | await Promise.all(filesToCopy.map((file: string) => 39 | fs.copy( 40 | path.join(__dirname, file), 41 | path.join('build', file) 42 | ) 43 | )); 44 | 45 | await Promise.all([ 46 | // Include the packaging & build scripts: 47 | 'build-release.sh', 48 | 'prepare.ts', 49 | // Include package-lock.json, to keep dependencies locked: 50 | 'package-lock.json', 51 | // Add the fully bundled source (not normally packaged by npm): 52 | path.join('bundle', 'index.js'), 53 | path.join('bundle', 'error-tracking.js'), 54 | // Static resources normally stored in browser-launcher 55 | path.join('bundle', 'bl-resources') 56 | ].map((extraFile) => 57 | fs.copy(path.join(__dirname, extraFile), path.join(OUTPUT_DIR, extraFile)) 58 | )); 59 | 60 | // Edit the package to replace deps with the bundle: 61 | pJson.files.push('/bundle'); 62 | pJson.files.push('/nss'); 63 | 64 | // Replace package dependencies with strict version dependencies on only the 65 | // unbundleable dependencies, pulling the versions from our package lock. 66 | pJson.dependencies = _(pJson.oclif.dependenciesToPackage) 67 | .keyBy(_.identity) 68 | .mapValues((pkg: string) => pLockJson.packages[`node_modules/${pkg}`].version); 69 | 70 | // Oclif is going to re-run install, and there's a couple of extra files that will be required to make 71 | // that work, which aren't normally included by the "npm pack"/"npm unpack" flow, so we manually pull 72 | // them across here: 73 | pJson.scripts.preinstall = `cp ../../prepare.ts . && cp ../../overrides/js/package-lock.json overrides/js`; 74 | 75 | delete pJson.scripts.prepack; // We don't want to rebuild - all built code will be in the packed content 76 | await fs.writeJson(path.join(OUTPUT_DIR, 'package.json'), pJson); 77 | 78 | const buildScript = path.join(OUTPUT_DIR, 'build-release.sh'); 79 | 80 | // Run build-release in this folder, for each platform. For each bundle, we copy in 81 | // only the relevant platform-specific NSS files. 82 | console.log('Building for Linux x64'); 83 | await fs.mkdir(path.join(OUTPUT_DIR, 'nss')); 84 | await fs.copy(path.join(__dirname, 'nss', 'linux'), path.join(OUTPUT_DIR, 'nss', 'linux')); 85 | await spawn(buildScript, ['linux', 'x64'], { cwd: OUTPUT_DIR, stdio: 'inherit' }); 86 | 87 | console.log('Building for Linux arm64'); 88 | await spawn(buildScript, ['linux', 'arm64'], { cwd: OUTPUT_DIR, stdio: 'inherit' }); 89 | 90 | console.log('Building for Darwin x64'); 91 | await fs.remove(path.join(OUTPUT_DIR, 'nss', 'linux')); 92 | await fs.copy(path.join(__dirname, 'nss', 'darwin'), path.join(OUTPUT_DIR, 'nss', 'darwin')); 93 | await spawn(buildScript, ['darwin', 'x64'], { cwd: OUTPUT_DIR, stdio: 'inherit' }); 94 | 95 | console.log('Building for Darwin arm64'); 96 | await spawn(buildScript, ['darwin', 'arm64'], { cwd: OUTPUT_DIR, stdio: 'inherit' }); 97 | 98 | console.log('Building for Win32'); 99 | await fs.remove(path.join(OUTPUT_DIR, 'nss', 'darwin')); 100 | await fs.copy(path.join(__dirname, 'nss', 'win32'), path.join(OUTPUT_DIR, 'nss', 'win32')); 101 | await spawn(buildScript, ['win32', 'x64'], { cwd: OUTPUT_DIR, stdio: 'inherit' }); 102 | 103 | // Oclif builds a nodeless platform-agnostic bundle too (although in our case, nothing is 104 | // really platform agnostic). Not necessary, probably won't work - drop it. 105 | await fs.remove(path.join( 106 | OUTPUT_DIR, 107 | 'dist', 108 | `v${pJson.version}`, 109 | `httptoolkit-server-v${pJson.version}.tar.gz` 110 | )); 111 | } 112 | 113 | packageApp().catch(e => { 114 | console.error(e); 115 | process.exit(1); 116 | }); 117 | -------------------------------------------------------------------------------- /prepare.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import type { Stats } from 'fs'; 3 | import * as fs from 'fs/promises'; 4 | import klaw from 'klaw'; 5 | import { spawn as spawnAsync, SpawnOptions } from 'child_process'; 6 | 7 | const spawn = (command: string, args: string[] = [], options: SpawnOptions = {}) => { 8 | return new Promise((resolve, reject) => { 9 | const proc = spawnAsync(command, args, options); 10 | proc.on('exit', (code) => { 11 | if (code === 0) resolve(); 12 | else reject(new Error( 13 | `Spawn ${command} ${args.join(' ')} exited with ${code}` 14 | )); 15 | }); 16 | }); 17 | } 18 | 19 | const collectAsyncIterator = async (asyncIterator: any) => { 20 | const result: any[] = []; 21 | for await (const value of asyncIterator) result.push(value); 22 | return result; 23 | } 24 | 25 | const OVERRIDES_DIR = path.join(__dirname, 'overrides'); 26 | 27 | (async () => { 28 | console.log('Installing override npm dependencies...'); 29 | 30 | await spawn(process.platform === 'win32' ? 'npm.cmd' : 'npm', ['ci', '--production'], { 31 | cwd: path.join(OVERRIDES_DIR, 'js'), 32 | stdio: 'inherit', 33 | shell: process.platform === 'win32', // Required for .cmd files due to CVE-2024-27980 34 | }); 35 | 36 | const files: Array<{ 37 | path: string, 38 | stats: Stats 39 | }> = await collectAsyncIterator(klaw(OVERRIDES_DIR)); 40 | 41 | // For Docker we don't know the user in the container, so all override files must 42 | // be globally readable (and directories globally executable) 43 | await files.map(({ path, stats }) => 44 | stats.isDirectory() 45 | ? fs.chmod(path, stats.mode | 0o5) // Set o+rx 46 | : fs.chmod(path, stats.mode | 0o4) // Set o+r 47 | ); 48 | 49 | console.log('Override dependencies installed'); 50 | })().catch((e) => { 51 | console.error(e); 52 | process.exit(1); 53 | }); -------------------------------------------------------------------------------- /src/api/api-server.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import * as events from 'events'; 3 | import express from 'express'; 4 | import cors from 'cors'; 5 | import corsGate from 'cors-gate'; 6 | 7 | import { HtkConfig } from '../config'; 8 | import { buildInterceptors } from '../interceptors'; 9 | import { ALLOWED_ORIGINS } from '../constants'; 10 | import { shutdown } from '../shutdown'; 11 | 12 | import { ApiModel } from './api-model'; 13 | import { exposeGraphQLAPI } from './graphql-api'; 14 | import { exposeRestAPI } from './rest-api'; 15 | import { HttpClient } from '../client/http-client'; 16 | 17 | /** 18 | * This file contains the core server API, used by the UI to query 19 | * machine state that isn't easily visible from the web (cert files, 20 | * network interfaces), and to launch intercepted applications 21 | * directly on this machine. 22 | * 23 | * This is a very powerful API! It's not far from remote code 24 | * execution. Because of that, access is tightly controlled: 25 | * - Only listens on 127.0.0.1 26 | * - All requests must include an acceptable Origin header, i.e. 27 | * no browsers requests except from a strict whitelist of valid 28 | * origins. In prod, that's just app.httptoolkit.tech. 29 | * - Optionally (always set in the HTK app) requires an auth 30 | * token with every request, provided by $HTK_SERVER_TOKEN or 31 | * --token at startup. 32 | * 33 | * The API is available in two formats: a simple REST-ish API, 34 | * and a GraphQL that exists for backward compatibility. All 35 | * future development will happen on the REST API, and the 36 | * GraphQL API will eventually be removed. 37 | */ 38 | 39 | export class HttpToolkitServerApi extends events.EventEmitter { 40 | 41 | private server: express.Application; 42 | 43 | constructor( 44 | config: HtkConfig, 45 | httpClient: HttpClient, 46 | getRuleParamKeys: () => string[] 47 | ) { 48 | super(); 49 | 50 | const interceptors = buildInterceptors(config); 51 | 52 | this.server = express(); 53 | this.server.disable('x-powered-by'); 54 | 55 | // Allow web pages on non-local URLs (app.httptoolkit.tech, not localhost) to 56 | // send requests to this admin server too. Without this, those requests will 57 | // fail after rejected preflights in recent Chrome (from ~v102, ish? Unclear). 58 | this.server.use((req, res, next) => { 59 | if (req.headers["access-control-request-private-network"]) { 60 | res.setHeader("access-control-allow-private-network", "true"); 61 | } 62 | next(null); 63 | }); 64 | 65 | this.server.use(cors({ 66 | origin: ALLOWED_ORIGINS, 67 | maxAge: 86400 // Cache this result for as long as possible 68 | })); 69 | 70 | this.server.use(corsGate({ 71 | strict: true, // MUST send an allowed origin 72 | allowSafe: false, // Even for HEAD/GET requests 73 | origin: '', // No origin - we accept *no* same-origin requests 74 | 75 | // Extend default failure response to add a helpful error body. 76 | failure: (_req, res, _next) => { 77 | res.statusCode = 403; 78 | res.send({ error: { message: 'Invalid CORS headers' }}); 79 | } 80 | })); 81 | 82 | this.server.use((req, res, next) => { 83 | if (req.path === '/' && req.method !== 'POST') { 84 | // We allow only POST to GQL, because that's all we expect for GraphQL queries, 85 | // and this helps derisk some (admittedly unlikely) XSRF possibilities. 86 | 87 | res.status(405).send({ 88 | error: { message: 'Only POST requests are supported' } 89 | }); 90 | 91 | // XSRF is less of a risk elsewhere, as REST GET endpoints don't do dangerous 92 | // things. Also we're enforcing Origin headers everywhere so it should be 93 | // impossible regardless, but better safe than sorry! 94 | } else { 95 | next(); 96 | } 97 | }); 98 | 99 | if (config.authToken) { 100 | // Optional auth token. This allows us to lock down UI/server communication further 101 | // when started together. The desktop generates a token every run and passes it to both. 102 | this.server.use((req: express.Request, res: express.Response, next: () => void) => { 103 | const authHeader = req.headers['authorization'] || ''; 104 | 105 | const tokenMatch = authHeader.match(/Bearer (\S+)/) || []; 106 | const token = tokenMatch[1]; 107 | 108 | if (token !== config.authToken) { 109 | res.status(403).send({ 110 | error: { message: 'Valid token required' } 111 | }); 112 | } else { 113 | next(); 114 | } 115 | }); 116 | } 117 | 118 | const apiModel = new ApiModel( 119 | config, 120 | interceptors, 121 | getRuleParamKeys, 122 | httpClient, 123 | { 124 | onTriggerUpdate: () => this.emit('update-requested'), 125 | onTriggerShutdown: () => shutdown(0, 'API call') 126 | } 127 | ) 128 | 129 | this.server.use(express.json()); 130 | 131 | exposeRestAPI(this.server, apiModel); 132 | exposeGraphQLAPI(this.server, apiModel); 133 | } 134 | 135 | start() { 136 | return new Promise((resolve, reject) => { 137 | this.server.listen(45457, '127.0.0.1', resolve); // Localhost only 138 | this.server.once('error', reject); 139 | }); 140 | } 141 | }; -------------------------------------------------------------------------------- /src/browsers.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import { promisify } from 'util'; 3 | 4 | import { delay, isErrorLike } from '@httptoolkit/util'; 5 | import getBrowserLauncherCb from '@httptoolkit/browser-launcher'; 6 | import { 7 | LaunchOptions, 8 | Launch, 9 | BrowserInstance, 10 | Browser, 11 | update as updateBrowserCacheCb 12 | } from '@httptoolkit/browser-launcher'; 13 | 14 | import { logError } from './error-tracking'; 15 | import { readFile, deleteFile } from './util/fs'; 16 | 17 | const getBrowserLauncher = promisify(getBrowserLauncherCb); 18 | const updateBrowserCache: (configPath: string) => Promise = promisify(updateBrowserCacheCb); 19 | 20 | const browserConfigPath = (configPath: string) => path.join(configPath, 'browsers.json'); 21 | 22 | export { BrowserInstance, Browser }; 23 | 24 | export async function checkBrowserConfig(configPath: string) { 25 | // It's not clear why, but sometimes the browser config can become corrupted, so it's not valid JSON 26 | // If that happens browser-launcher can hit issues. To avoid that entirely, we check it here on startup. 27 | 28 | const browserConfig = browserConfigPath(configPath); 29 | 30 | try { 31 | const rawConfig = await readFile(browserConfig, 'utf8'); 32 | JSON.parse(rawConfig); 33 | } catch (error) { 34 | if (isErrorLike(error) && error.code === 'ENOENT') return; 35 | console.warn(`Failed to read browser config cache from ${browserConfig}, clearing.`, error); 36 | 37 | return deleteFile(browserConfig).catch((err) => { 38 | // There may be possible races around here - as long as the file's gone, we're happy 39 | if (isErrorLike(err) && err.code === 'ENOENT') return; 40 | 41 | console.error('Failed to clear broken config file:', err); 42 | logError(err); 43 | }); 44 | } 45 | } 46 | 47 | let launcher: Promise | undefined; 48 | 49 | function getLauncher(configPath: string) { 50 | if (!launcher) { 51 | const browserConfig = browserConfigPath(configPath); 52 | launcher = getBrowserLauncher(browserConfig); 53 | 54 | launcher.then(async () => { 55 | // Async after first creating the launcher, we trigger a background cache update. 56 | // This can be *synchronously* expensive (spawns 10s of procs, 10+ms sync per 57 | // spawn on unix-based OSs) so defer briefly. 58 | await delay(2000); 59 | try { 60 | await updateBrowserCache(browserConfig); 61 | console.log('Browser cache updated'); 62 | // Need to reload the launcher after updating the cache: 63 | launcher = getBrowserLauncher(browserConfig); 64 | } catch (e) { 65 | logError(e) 66 | } 67 | }); 68 | 69 | // Reset & retry if this fails somehow: 70 | launcher.catch((e) => { 71 | logError(e); 72 | launcher = undefined; 73 | }); 74 | } 75 | 76 | return launcher; 77 | } 78 | 79 | export const getAvailableBrowsers = async (configPath: string) => { 80 | return (await getLauncher(configPath)).browsers; 81 | }; 82 | 83 | export const getBrowserDetails = async (configPath: string, variant: string): Promise => { 84 | const browsers = await getAvailableBrowsers(configPath); 85 | 86 | // Get the details for the first matching browsers that is installed: 87 | return browsers.find(b => b.name === variant); 88 | }; 89 | 90 | export { LaunchOptions }; 91 | 92 | export const launchBrowser = async (url: string, options: LaunchOptions, configPath: string) => { 93 | const launcher = await getLauncher(configPath); 94 | const browserInstance = await promisify(launcher)(url, options); 95 | 96 | browserInstance.process.on('error', (e) => { 97 | // If nothing else is listening for this error, this acts as default 98 | // fallback error handling: log & report & don't crash. 99 | if (browserInstance.process.listenerCount('error') === 1) { 100 | console.log('Browser launch error'); 101 | logError(e); 102 | } 103 | }); 104 | 105 | return browserInstance; 106 | }; -------------------------------------------------------------------------------- /src/certificates.ts: -------------------------------------------------------------------------------- 1 | import * as crypto from 'crypto'; 2 | import * as forge from 'node-forge'; 3 | 4 | export const parseCert = forge.pki.certificateFromPem; 5 | 6 | export function getCertExpiry(cert: forge.pki.Certificate): number { 7 | return cert.validity.notAfter.valueOf(); 8 | } 9 | 10 | export function getTimeToCertExpiry(cert: forge.pki.Certificate): number { 11 | return getCertExpiry(cert) - Date.now(); 12 | } 13 | 14 | // A series of magic incantations that matches the behaviour of openssl's 15 | // -subject_hash_old output, as expected by Android's cert store. 16 | export function getCertificateSubjectHash(cert: forge.pki.Certificate) { 17 | const derBytes = forge.asn1.toDer( 18 | ( 19 | forge.pki as any 20 | ).distinguishedNameToAsn1(cert.subject) 21 | ).getBytes(); 22 | 23 | return crypto.createHash('md5') 24 | .update(derBytes) 25 | .digest() 26 | .readUInt32LE(0) 27 | .toString(16); 28 | } 29 | 30 | // Get a full SHA1 hash of the certificate 31 | export function getCertificateFingerprint(cert: forge.pki.Certificate) { 32 | return forge.md.sha1.create() 33 | .update( 34 | forge.asn1.toDer( 35 | forge.pki.certificateToAsn1(cert) 36 | ).getBytes() 37 | ) 38 | .digest() 39 | .toHex(); 40 | } -------------------------------------------------------------------------------- /src/client/client-types.ts: -------------------------------------------------------------------------------- 1 | import type * as Mockttp from 'mockttp'; 2 | 3 | // --- Request definition types --- 4 | 5 | export type RawHeaders = Mockttp.RawHeaders; 6 | export type RawTrailers = Mockttp.RawTrailers; 7 | 8 | export interface RequestDefinition { 9 | method: string; 10 | url: string; 11 | 12 | /** 13 | * The raw headers to send. These will be sent exactly as provided - no headers 14 | * will be added automatically. 15 | * 16 | * Note that this means omitting the 'Host' header may cause problems, as will 17 | * omitting both 'Content-Length' and 'Transfer-Encoding' on requests with 18 | * bodies. 19 | */ 20 | headers: RawHeaders; 21 | 22 | rawBody?: Uint8Array; 23 | } 24 | 25 | // --- Request option types --- 26 | 27 | export const RULE_PARAM_REF_KEY = '__rule_param_reference__'; 28 | type ClientProxyRuleParamReference = { [RULE_PARAM_REF_KEY]: string }; 29 | 30 | export type ClientProxyConfig = 31 | | undefined // No Docker, no user or system proxy 32 | | Mockttp.ProxySetting // User or system proxy 33 | | ClientProxyRuleParamReference // Docker proxy (must be dereferenced) 34 | | Array // Both, ordered 35 | 36 | export interface RequestOptions { 37 | /** 38 | * A list of hostnames for which server certificate and TLS version errors 39 | * should be ignored (none, by default). 40 | * 41 | * If set to 'true', HTTPS errors will be ignored for all hosts. WARNING: 42 | * Use this at your own risk. Setting this to `true` can open your 43 | * application to MITM attacks and should never be used over any network 44 | * that is not completed trusted end-to-end. 45 | */ 46 | ignoreHostHttpsErrors?: string[] | boolean; 47 | 48 | /** 49 | * An array of additional certificates, which should be trusted as certificate 50 | * authorities for upstream hosts, in addition to Node.js's built-in certificate 51 | * authorities. 52 | * 53 | * Each certificate should be an object with a `cert` key containing the PEM 54 | * certificate as a string. 55 | */ 56 | additionalTrustedCAs: Array<{ cert: string }>; 57 | 58 | /** 59 | * Deprecated alias for `additionalTrustedCAs` 60 | * 61 | * @deprecated 62 | */ 63 | trustAdditionalCAs?: Array<{ cert: string }>; 64 | 65 | /** 66 | * A client certificate that should be used for the connection, if the server 67 | * requests one during the TLS handshake. 68 | */ 69 | clientCertificate?: { pfx: Buffer, passphrase?: string }; 70 | 71 | /** 72 | * Proxy configuration, specifying how (if at all) a proxy that should be used 73 | * for upstream connections. 74 | */ 75 | proxyConfig?: ClientProxyConfig; 76 | 77 | /** 78 | * Custom DNS options, to allow configuration of the resolver used when 79 | * forwarding requests upstream. Passing any option switches from using node's 80 | * default dns.lookup function to using the cacheable-lookup module, which 81 | * will cache responses. 82 | */ 83 | lookupOptions?: { servers?: string[] }; 84 | 85 | /** 86 | * An abort signal, which can be used to cancel the in-process request if 87 | * required. 88 | */ 89 | abortSignal?: AbortSignal; 90 | } 91 | 92 | // --- Response types --- 93 | 94 | export type ResponseStreamEvents = 95 | | RequestStart 96 | | ResponseHead 97 | | ResponseBodyPart 98 | | ResponseTrailers 99 | | ResponseEnd; 100 | // Other notable event is errors (via 'error' event) 101 | 102 | export interface RequestStart { 103 | type: 'request-start'; 104 | startTime: number; // Unix timestamp 105 | timestamp: number; // High precision timer (for relative calculations on later events) 106 | } 107 | 108 | export interface ResponseHead { 109 | type: 'response-head'; 110 | statusCode: number; 111 | statusMessage?: string; 112 | headers: RawHeaders; 113 | timestamp: number; 114 | } 115 | 116 | export interface ResponseBodyPart { 117 | type: 'response-body-part'; 118 | rawBody: Buffer; 119 | timestamp: number; 120 | } 121 | 122 | export interface ResponseTrailers { 123 | type: 'response-trailers'; 124 | trailers: RawTrailers; 125 | timestamp: number; 126 | } 127 | 128 | export interface ResponseEnd { 129 | type: 'response-end'; 130 | timestamp: number; 131 | } -------------------------------------------------------------------------------- /src/config.d.ts: -------------------------------------------------------------------------------- 1 | export interface HtkConfig { 2 | configPath: string; 3 | authToken?: string; 4 | https: { 5 | keyPath: string; 6 | certPath: string; 7 | certContent: string; 8 | keyLength: number; 9 | } 10 | } -------------------------------------------------------------------------------- /src/constants.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | 3 | export const IS_PROD_BUILD = !!process.env.HTTPTOOLKIT_SERVER_BINPATH; 4 | 5 | // Notably, for this file, this is the same when either bundled or unbundled. 6 | // That's not true for most other files! Everything should use this instead of __dirname: 7 | export const APP_ROOT = path.join(__dirname, '..'); 8 | 9 | export const ALLOWED_ORIGINS = IS_PROD_BUILD 10 | ? [ 11 | // Prod builds only allow HTTPS app.httptoolkit.tech usage. This 12 | // ensures that no other sites/apps can communicate with your server 13 | // whilst you have the app open. If they could (requires an HTTP mitm), 14 | // they would be able to start proxies & interceptors. 15 | /^https:\/\/app\.httptoolkit\.tech$/ 16 | ] 17 | : [ 18 | // Dev builds can use the main site, or local sites, even if those 19 | // use HTTP. Note that HTTP here could technically open you to the risk 20 | // above, but it'd require a DNS MitM too (to stop local.httptoolkit.tech 21 | // resolving to localhost and never hitting the network). 22 | /^https?:\/\/localhost(:\d+)?$/, 23 | /^https?:\/\/127.0.0.\d+(:\d+)?$/, 24 | /^http:\/\/local\.httptoolkit\.tech(:\d+)?$/, 25 | /^https:\/\/app\.httptoolkit\.tech$/, 26 | ]; 27 | 28 | export const MOCKTTP_ALLOWED_ORIGINS = [ 29 | ...ALLOWED_ORIGINS, 30 | // The Chromium webextension is allowed to make requests from the 31 | // browser directly into the Mockttp admin API (but not into our API server). 32 | 'chrome-extension://oeehdgfohghfelggpifolochpnkdmpog' 33 | ]; 34 | 35 | // The range of ports that should be used by invisible ephemeral services, such as Firefox's 36 | // certificate check server and Chrome's "hide warning" server. These ports are extra likely 37 | // not to conflict with normal user usage, and are specifically designated by the IANA for 38 | // use for dynamic ports. 39 | export const EPHEMERAL_PORT_RANGE = { startPort: 49152, endPort: 65535 } as const; 40 | 41 | export const SERVER_VERSION = require('../package.json').version as string; -------------------------------------------------------------------------------- /src/dns-server.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import * as dns2 from 'dns2'; 3 | 4 | const DNS_SERVER_MAP: { [mockServerPort: number]: Promise | undefined } = {}; 5 | 6 | export function getDnsServer(mockServerPort: number): Promise { 7 | if (!DNS_SERVER_MAP[mockServerPort]) { 8 | const serverPromise = (async () => { 9 | const server = new DnsServer(); 10 | 11 | server.on('close', () => { 12 | delete DNS_SERVER_MAP[mockServerPort]; 13 | }); 14 | 15 | await server.start(); 16 | return server; 17 | })(); 18 | 19 | DNS_SERVER_MAP[mockServerPort] = serverPromise; 20 | } 21 | return DNS_SERVER_MAP[mockServerPort]!; 22 | } 23 | 24 | export async function stopDnsServer(mockServerPort: number) { 25 | const dnsServer = await DNS_SERVER_MAP[mockServerPort] 26 | if (!dnsServer) return; 27 | 28 | delete DNS_SERVER_MAP[mockServerPort]; 29 | dnsServer.stop(); 30 | } 31 | 32 | const EMPTY_SET: ReadonlySet = new Set(); 33 | 34 | class DnsServer extends dns2.UDPServer { 35 | 36 | constructor() { 37 | super((req, sendResponse) => this.handleQuery(req, sendResponse)); 38 | } 39 | 40 | private hosts: { 41 | [host: string]: ReadonlySet | undefined 42 | } = {}; 43 | 44 | setHosts(hosts: { [hostname: string]: ReadonlySet }) { 45 | this.hosts = hosts; 46 | } 47 | 48 | private getHostAddresses(hostname: string): ReadonlySet { 49 | return this.hosts[hostname] ?? EMPTY_SET; 50 | } 51 | 52 | handleQuery(request: dns2.DnsRequest, sendResponse: (response: dns2.DnsResponse) => void) { 53 | const response = dns2.Packet.createResponseFromRequest(request); 54 | 55 | // Multiple questions are allowed in theory, but apparently nobody 56 | // supports it, so we don't either. 57 | const [question] = request.questions; 58 | if (!question) return sendResponse(response); // Send an empty response 59 | 60 | const answers = this.getHostAddresses(question.name); 61 | 62 | if (answers.size > 1) { 63 | console.log(`Multiple hosts in internal DNS for hostname ${question.name}:`, answers); 64 | } 65 | 66 | answers.forEach((answer) => { 67 | response.answers.push({ 68 | name: question.name, 69 | type: dns2.Packet.TYPE.A, 70 | class: dns2.Packet.CLASS.IN, 71 | ttl: 0, 72 | address: answer 73 | }); 74 | }); 75 | 76 | sendResponse(response); 77 | } 78 | 79 | start() { 80 | return new Promise((resolve, reject) => { 81 | // Only listens on localhost, only used by Mockttp itself. 82 | this.listen(0, '127.0.0.1'); 83 | this.once('listening', () => resolve()); 84 | this.once('error', reject); 85 | }); 86 | } 87 | 88 | stop() { 89 | return new Promise((resolve) => { 90 | this.once('close', resolve); 91 | this.close(); 92 | }); 93 | } 94 | } -------------------------------------------------------------------------------- /src/dynamic-dep-store.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as stream from 'stream'; 3 | import { CustomError } from '@httptoolkit/util'; 4 | 5 | import { HtkConfig } from './config'; 6 | import * as fs from './util/fs'; 7 | 8 | /** 9 | * This retrieves a stream for a dependency file, either from disk if it's already available 10 | * or by calling the fetch() function (and then saving the result to disk in parallel for 11 | * future calls). 12 | */ 13 | export async function getDependencyStream(options: { 14 | config: HtkConfig, 15 | key: K, 16 | ext: `.${string}`, 17 | fetch: (key: K) => Promise 18 | }) { 19 | const depPath = path.join(options.config.configPath, `${options.key.join('-')}${options.ext}`); 20 | 21 | if (await fs.canAccess(depPath)) { 22 | return fs.createReadStream(depPath); 23 | } 24 | 25 | const tmpDownloadPath = depPath + `.tmp-${Math.random().toString(36).slice(2)}`; 26 | const downloadStream = await options.fetch(options.key); 27 | const diskStream = fs.createWriteStream(tmpDownloadPath); 28 | const resultStream = new stream.PassThrough(); 29 | 30 | downloadStream.pipe(diskStream); 31 | downloadStream.pipe(resultStream); 32 | 33 | downloadStream.on('error', (e) => { 34 | console.warn(`Failed to download dependency to ${depPath}:`, e); 35 | 36 | // Clean up the temp download file: 37 | diskStream.destroy(); 38 | fs.deleteFile(tmpDownloadPath).catch(() => {}); 39 | 40 | // Pass the error on to the client: 41 | resultStream.destroy( 42 | new CustomError(`${options.key.join('-')} dependency fetch failed: ${e.message ?? e}`, { 43 | cause: e 44 | }) 45 | ); 46 | }); 47 | 48 | diskStream.on('finish', () => { 49 | fs.moveFile(tmpDownloadPath, depPath) 50 | .catch(console.warn); 51 | }); 52 | 53 | return resultStream; 54 | } 55 | 56 | export async function cleanupDependencies(options: { 57 | config: HtkConfig, 58 | keyPrefix: string, 59 | versionToKeep: string, 60 | ext: `.${string}` 61 | }) { 62 | const depFiles = await fs.readDir(options.config.configPath); 63 | 64 | await Promise.all(depFiles.map(async (depFile) => { 65 | if ( 66 | depFile.startsWith(options.keyPrefix + '-') && 67 | (depFile.endsWith(options.ext) || depFile.includes(`${options.ext}.tmp-`)) && 68 | !depFile.includes(options.versionToKeep) 69 | ) { 70 | await fs.deleteFile(path.join(options.config.configPath, depFile)).catch((e) => { 71 | console.warn(`Failed to delete old dependency file ${depFile}:`, e); 72 | }); 73 | } 74 | })); 75 | } -------------------------------------------------------------------------------- /src/hide-warning-server.ts: -------------------------------------------------------------------------------- 1 | import { getLocal, Mockttp } from 'mockttp'; 2 | import { HtkConfig } from './config'; 3 | import { EPHEMERAL_PORT_RANGE } from './constants'; 4 | 5 | // The first tab that opens in a new Chrome/Edge window warns about dangerous flags. 6 | // Closing it and immediately opening a new one is a bit cheeky, but 7 | // is completely gets rid that, more or less invisibly: 8 | 9 | export class HideWarningServer { 10 | 11 | constructor( 12 | private config: HtkConfig, 13 | private options: { delay: number | undefined } = { delay: undefined } 14 | ) {} 15 | 16 | private server: Mockttp = getLocal(); 17 | 18 | // Resolved once the server has seen at least once 19 | // request for the warning-hiding page. 20 | public completedPromise = new Promise((resolve) => { 21 | this.server.on('request', (req) => { 22 | if (req.url.includes('hide-warning')) { 23 | resolve(); 24 | } 25 | }); 26 | }) 27 | 28 | async start(targetUrl: string) { 29 | await this.server.start(EPHEMERAL_PORT_RANGE); 30 | 31 | await this.server.forGet('/hide-warning').thenReply(200, ` 32 | 33 | HTTP Toolkit Warning Fix 34 | 35 | 38 | 52 | 53 | This page should disappear momentarily. If it doesn't, click 54 | this link. 55 | 56 | 57 | `, { "content-type": "text/html" }); 58 | } 59 | 60 | get host(): string { 61 | return this.server!.url 62 | .replace('https://', ''); 63 | } 64 | 65 | get hideWarningUrl(): string { 66 | return this.server.url.replace(/\/?$/, '/hide-warning'); 67 | } 68 | 69 | async stop() { 70 | await this.server.stop(); 71 | } 72 | } -------------------------------------------------------------------------------- /src/interceptors/docker/docker-compose.ts: -------------------------------------------------------------------------------- 1 | const interceptionSuffix = (proxyPort: number) => `+httptoolkit:${proxyPort}`; 2 | 3 | // Take a set of labels provided by a client for a new container that we're intercepting, and remap the config 4 | // hashes so this can be used in future docker-compose commands automatically: 5 | export function transformComposeCreationLabels(proxyPort: number, labels: { [label: string]: string } | undefined) { 6 | // No/null labels - we don't need to do anything 7 | if (!labels) return undefined; 8 | 9 | // Not a docker-compose container - nothing to do here 10 | if (!labels['com.docker.compose.config-hash']) return labels; 11 | 12 | return { 13 | ...labels, 14 | 'com.docker.compose.config-hash': labels['com.docker.compose.config-hash'] + interceptionSuffix(proxyPort) 15 | }; 16 | } 17 | 18 | // Take a set of labels that we might be returning to a client, and remap the config hashes so that only 19 | // the intercepted containers seem to be usable (so that any non-intercepted containers are recreated, not used). 20 | export function transformComposeResponseLabels(proxyPort: number, labels: { [label: string]: string } | undefined) { 21 | // No/null labels - we don't need to do anything 22 | if (!labels) return undefined; 23 | 24 | // Not a docker-compose container - nothing to do here 25 | if (!labels['com.docker.compose.config-hash']) return labels; 26 | 27 | const currentHash = labels['com.docker.compose.config-hash']; 28 | const modifiedHash = currentHash.endsWith(interceptionSuffix(proxyPort)) 29 | ? currentHash.slice(0, -1 * interceptionSuffix(proxyPort).length) 30 | : currentHash + '+unintercepted'; 31 | 32 | return { 33 | ...labels, 34 | 'com.docker.compose.config-hash': modifiedHash 35 | }; 36 | } -------------------------------------------------------------------------------- /src/interceptors/docker/docker-interceptor.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import Docker from 'dockerode'; 3 | 4 | import { Interceptor } from ".."; 5 | import { HtkConfig } from '../../config'; 6 | 7 | import { DOCKER_CONTAINER_LABEL, restartAndInjectContainer } from './docker-commands'; 8 | import { 9 | isDockerAvailable, 10 | ensureDockerServicesRunning, 11 | deleteAllInterceptedDockerData 12 | } from './docker-interception-services'; 13 | 14 | export class DockerContainerInterceptor implements Interceptor { 15 | 16 | id: string = "docker-attach"; 17 | version: string = "1.0.0"; 18 | 19 | constructor( 20 | private config: HtkConfig 21 | ) {} 22 | 23 | private _docker: Docker | undefined; 24 | private getDocker() { 25 | if (!this._docker) { 26 | this._docker = new Docker(); 27 | } 28 | return this._docker; 29 | } 30 | 31 | async isActivable(): Promise { 32 | return isDockerAvailable(); 33 | } 34 | 35 | private _containersPromise: Promise | undefined; 36 | async getContainers() { 37 | if (!this._containersPromise) { 38 | // We cache the containers query whilst it's active, because this gets hit a lot, 39 | // usually directly in parallel by getMetadata and isActive, and this ensures 40 | // that concurrent calls all just run one lookup and use the same result. 41 | this._containersPromise = this.getDocker().listContainers({ 42 | all: true 43 | }).finally(() => { this._containersPromise = undefined; }); 44 | } 45 | return this._containersPromise; 46 | } 47 | 48 | async getMetadata() { 49 | if (!await this.isActivable()) return; 50 | 51 | return { 52 | targets: _(await this.getContainers()).map((containerData) => ({ 53 | // Keep the docker data structure, but normalize the key names and filter 54 | // to just the relevant data, just to make sure we don't unnecessarily 55 | // expose secrets or similar. 56 | id: containerData.Id, 57 | names: containerData.Names, 58 | command: containerData.Command, 59 | labels: containerData.Labels ?? {}, 60 | state: containerData.State, 61 | status: containerData.Status, 62 | image: containerData.Image, 63 | ips: Object.values(containerData.NetworkSettings?.Networks ?? {}) 64 | .map(network => network.IPAddress) 65 | })) 66 | .keyBy('id') 67 | .valueOf() 68 | }; 69 | } 70 | 71 | async activate(proxyPort: number, options: { containerId: string }): Promise { 72 | const interceptionSettings = { 73 | interceptionType: 'mount', 74 | proxyPort, 75 | certContent: this.config.https.certContent, 76 | certPath: this.config.https.certPath, 77 | } as const; 78 | 79 | ensureDockerServicesRunning(proxyPort); 80 | await restartAndInjectContainer(this.getDocker(), options.containerId, interceptionSettings); 81 | } 82 | 83 | async isActive(proxyPort: number): Promise { 84 | if (!await this.isActivable()) return false; 85 | 86 | return Object.values((await this.getContainers())).some((target) => { 87 | target.Labels?.[DOCKER_CONTAINER_LABEL] === proxyPort.toString() 88 | }); 89 | } 90 | 91 | async deactivate(proxyPort: number): Promise { 92 | if (!await isDockerAvailable()) return; 93 | await deleteAllInterceptedDockerData(proxyPort); 94 | } 95 | 96 | async deactivateAll(): Promise { 97 | if (!await isDockerAvailable()) return; 98 | await deleteAllInterceptedDockerData('all'); 99 | } 100 | 101 | } -------------------------------------------------------------------------------- /src/interceptors/docker/docker-utils.ts: -------------------------------------------------------------------------------- 1 | import Docker from 'dockerode'; 2 | 3 | export const waitForDockerStream = ( 4 | docker: Docker, 5 | stream: NodeJS.ReadableStream 6 | ) => new Promise((resolve, reject) => { 7 | docker.modem.followProgress(stream, ( 8 | err: Error | null, 9 | stream: Array<{ error?: string }> 10 | ) => { 11 | if (err) reject(err); 12 | 13 | const firstError = stream.find((msg) => !!msg.error); 14 | if (firstError) reject(new Error(firstError.error)); 15 | 16 | resolve(); 17 | }); 18 | }); 19 | 20 | export const getDockerAddress = async (docker: Docker): Promise< 21 | | { socketPath: string } 22 | | { host: string, port: number } 23 | > => { 24 | // Hacky logic to reuse docker-modem's internal env + OS parsing logic to 25 | // work out where the local Docker host is: 26 | const modem = docker.modem as any as ({ 27 | getSocketPath(): undefined | Promise; 28 | host: string; 29 | port: number; 30 | }); 31 | 32 | const modemSocketPath = await modem.getSocketPath(); 33 | return modemSocketPath 34 | ? { socketPath: modemSocketPath } 35 | : { host: modem.host, port: modem.port }; 36 | } -------------------------------------------------------------------------------- /src/interceptors/frida/frida-android-interceptor.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import * as stream from 'stream'; 3 | import * as FridaJs from 'frida-js'; 4 | 5 | import { Interceptor } from ".."; 6 | import { HtkConfig } from '../../config'; 7 | 8 | import { createAdbClient } from '../android/adb-commands'; 9 | import { cleanupOldFridaServers, FridaHost, FridaTarget, killProcess } from './frida-integration'; 10 | import { 11 | getAndroidFridaHosts, 12 | getAndroidFridaTargets, 13 | interceptAndroidFridaTarget, 14 | launchAndroidHost, 15 | setupAndroidHost 16 | } from './frida-android-integration'; 17 | import { combineParallelCalls } from '@httptoolkit/util'; 18 | 19 | export class FridaAndroidInterceptor implements Interceptor { 20 | 21 | id: string = "android-frida"; 22 | version: string = "1.0.0"; 23 | 24 | private adbClient = createAdbClient(); 25 | 26 | constructor( 27 | private config: HtkConfig 28 | ) {} 29 | 30 | getFridaHosts = combineParallelCalls(() => getAndroidFridaHosts(this.adbClient)); 31 | 32 | async isActivable(): Promise { 33 | return Object.keys(await this.getFridaHosts()).length > 0; 34 | } 35 | 36 | isActive(): boolean { 37 | return false; 38 | } 39 | 40 | async getMetadata(): Promise<{ hosts: Record }> { 41 | const fridaHosts = await this.getFridaHosts(); 42 | return { 43 | hosts: fridaHosts 44 | }; 45 | } 46 | 47 | async getSubMetadata(hostId: string): Promise<{ targets: Array }> { 48 | return { 49 | targets: await getAndroidFridaTargets(this.adbClient, hostId) 50 | } 51 | } 52 | 53 | private fridaServers: { [proxyPort: number]: Array } = {}; 54 | private interceptedApps: { [proxyPort: number]: Array } = {}; 55 | 56 | async activate( 57 | proxyPort: number, 58 | options: 59 | | { action: 'setup', hostId: string } 60 | | { action: 'launch', hostId: string } 61 | | { action: 'intercept', hostId: string, targetId: string } 62 | ): Promise { 63 | if (options.action === 'setup') { 64 | await setupAndroidHost(this.config, this.adbClient, options.hostId); 65 | } else if (options.action === 'launch') { 66 | const fridaServer = await launchAndroidHost(this.adbClient, options.hostId); 67 | 68 | // Track this server stream, so we can close it to stop the server later 69 | this.fridaServers[proxyPort] = this.fridaServers[proxyPort] ?? []; 70 | this.fridaServers[proxyPort].push(fridaServer); 71 | fridaServer.on('close', () => { 72 | _.remove(this.fridaServers[proxyPort], fridaServer); 73 | }); 74 | } else if (options.action === 'intercept') { 75 | const fridaSession = await interceptAndroidFridaTarget( 76 | this.adbClient, 77 | options.hostId, 78 | options.targetId, 79 | this.config.https.certContent, 80 | proxyPort 81 | ); 82 | 83 | // Track this session, so we can close it to stop the interception later 84 | this.interceptedApps[proxyPort] = this.interceptedApps[proxyPort] ?? []; 85 | this.interceptedApps[proxyPort].push(fridaSession); 86 | } else { 87 | throw new Error(`Unknown Frida interception command: ${(options as any).action ?? '(none)'}`) 88 | } 89 | } 90 | 91 | async deactivate(proxyPort: number): Promise { 92 | this.fridaServers[proxyPort]?.forEach(serverStream => { 93 | serverStream.destroy(); 94 | }); 95 | 96 | const fridaSessions = this.interceptedApps[proxyPort] ?? []; 97 | 98 | await Promise.all(fridaSessions.map((session) => 99 | killProcess(session).catch(() => {}) 100 | )); 101 | } 102 | 103 | async deactivateAll(): Promise { 104 | cleanupOldFridaServers(this.config).catch(console.warn); 105 | 106 | const allPorts = new Set([ 107 | ...Object.keys(this.fridaServers), 108 | ...Object.keys(this.interceptedApps) 109 | ]); 110 | 111 | await Promise.all( 112 | [...allPorts] 113 | .map(port => this.deactivate(Number(port))) 114 | ); 115 | } 116 | 117 | } -------------------------------------------------------------------------------- /src/interceptors/frida/frida-ios-interceptor.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import { UsbmuxClient } from 'usbmux-client'; 3 | import * as FridaJs from 'frida-js'; 4 | import { combineParallelCalls } from '@httptoolkit/util'; 5 | 6 | import { Interceptor } from ".."; 7 | import { HtkConfig } from '../../config'; 8 | 9 | import { FridaHost, FridaTarget, killProcess } from './frida-integration'; 10 | import { 11 | getIosFridaHosts, 12 | getIosFridaTargets, 13 | interceptIosFridaTarget 14 | } from './frida-ios-integration'; 15 | 16 | export class FridaIosInterceptor implements Interceptor { 17 | 18 | id: string = "ios-frida"; 19 | version: string = "1.0.0"; 20 | 21 | private usbmuxClient = new UsbmuxClient(); 22 | 23 | constructor( 24 | private config: HtkConfig 25 | ) {} 26 | 27 | getFridaHosts = combineParallelCalls(() => getIosFridaHosts(this.usbmuxClient)); 28 | 29 | async isActivable(): Promise { 30 | return Object.keys(await this.getFridaHosts()).length > 0; 31 | } 32 | 33 | isActive(): boolean { 34 | return false; 35 | } 36 | 37 | async getMetadata(): Promise<{ hosts: Record }> { 38 | const fridaHosts = await this.getFridaHosts(); 39 | return { 40 | hosts: fridaHosts 41 | }; 42 | } 43 | 44 | async getSubMetadata(hostId: string): Promise<{ targets: Array }> { 45 | return { 46 | targets: await getIosFridaTargets(this.usbmuxClient, hostId) 47 | } 48 | } 49 | 50 | private interceptedApps: { [proxyPort: number]: Array } = {}; 51 | 52 | async activate( 53 | proxyPort: number, 54 | options: 55 | | { action: 'intercept', hostId: string, targetId: string } 56 | ): Promise { 57 | if (options.action === 'intercept') { 58 | const fridaSession = await interceptIosFridaTarget( 59 | this.usbmuxClient, 60 | options.hostId, 61 | options.targetId, 62 | this.config.https.certContent, 63 | proxyPort 64 | ); 65 | 66 | // Track this session, so we can close it to stop the interception later 67 | this.interceptedApps[proxyPort] = this.interceptedApps[proxyPort] ?? []; 68 | this.interceptedApps[proxyPort].push(fridaSession); 69 | } else { 70 | throw new Error(`Unknown Frida interception command: ${(options as any).action ?? '(none)'}`) 71 | } 72 | } 73 | 74 | async deactivate(proxyPort: number): Promise { 75 | await Promise.all(this.interceptedApps[proxyPort]?.map(async fridaSession => { 76 | await killProcess(fridaSession).catch(() => {}); 77 | })); 78 | } 79 | 80 | async deactivateAll(): Promise { 81 | await Promise.all( 82 | Object.keys(this.interceptedApps) 83 | .map(port => this.deactivate(Number(port))) 84 | ); 85 | } 86 | 87 | } -------------------------------------------------------------------------------- /src/interceptors/frida/frida-scripts.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | 3 | import * as fs from '../../util/fs'; 4 | import { OVERRIDES_DIR } from '../terminal/terminal-env-overrides'; 5 | 6 | const FRIDA_SCRIPTS_ROOT = path.join(OVERRIDES_DIR, 'frida'); 7 | 8 | function buildFridaConfig( 9 | configScriptTemplate: string, 10 | caCertContent: string, 11 | proxyHost: string, 12 | proxyPort: number, 13 | portsToIgnore: number[] 14 | ) { 15 | return configScriptTemplate 16 | .replace(/(?<=const CERT_PEM = `)[^`]+(?=`)/s, caCertContent.trim()) 17 | .replace(/(?<=const PROXY_HOST = ')[^']+(?=')/, proxyHost) 18 | .replace(/(?<=const PROXY_PORT = )\d+(?=;)/, proxyPort.toString()) 19 | .replace(/(?<=const IGNORED_NON_HTTP_PORTS = )\[\s*\](?=;)/s, JSON.stringify(portsToIgnore)); 20 | } 21 | 22 | export async function buildAndroidFridaScript( 23 | caCertContent: string, 24 | proxyHost: string, 25 | proxyPort: number, 26 | portsToIgnore: number[] 27 | ) { 28 | const scripts = await Promise.all([ 29 | fs.readFile(path.join(FRIDA_SCRIPTS_ROOT, 'config.js'), { encoding: 'utf8' }) 30 | .then((configTemplate) => 31 | buildFridaConfig(configTemplate, caCertContent, proxyHost, proxyPort, portsToIgnore) 32 | ), 33 | ...[ 34 | ['native-connect-hook.js'], 35 | ['native-tls-hook.js'], 36 | ['android', 'android-proxy-override.js'], 37 | ['android', 'android-system-certificate-injection.js'], 38 | ['android', 'android-certificate-unpinning.js'], 39 | ['android', 'android-certificate-unpinning-fallback.js'] 40 | ].map((hookRelPath) => 41 | fs.readFile(path.join(FRIDA_SCRIPTS_ROOT, ...hookRelPath), { encoding: 'utf8' }) 42 | ) 43 | ]); 44 | 45 | return scripts.join('\n'); 46 | } 47 | 48 | export async function buildIosFridaScript( 49 | caCertContent: string, 50 | proxyHost: string, 51 | proxyPort: number, 52 | portsToIgnore: number[] 53 | ) { 54 | const scripts = await Promise.all([ 55 | fs.readFile(path.join(FRIDA_SCRIPTS_ROOT, 'config.js'), { encoding: 'utf8' }) 56 | .then((configTemplate) => 57 | buildFridaConfig(configTemplate, caCertContent, proxyHost, proxyPort, portsToIgnore) 58 | ), 59 | ...[ 60 | ['ios', 'ios-connect-hook.js'], 61 | ['native-tls-hook.js'], 62 | ['native-connect-hook.js'], 63 | ].map((hookRelPath) => 64 | fs.readFile(path.join(FRIDA_SCRIPTS_ROOT, ...hookRelPath), { encoding: 'utf8' }) 65 | ) 66 | ]); 67 | 68 | return scripts.join('\n'); 69 | } 70 | 71 | export async function buildIpTestScript( 72 | ips: string[], 73 | proxyPort: number 74 | ) { 75 | const baseScript = await fs.readFile( 76 | path.join(FRIDA_SCRIPTS_ROOT, 'utilities', 'test-ip-connectivity.js'), 77 | { encoding: 'utf8' } 78 | ); 79 | 80 | return baseScript.replace(/(?<=const IP_ADDRESSES_TO_TEST = )\[\s+\](?=;)/s, JSON.stringify(ips)) 81 | .replace(/(?<=const TARGET_PORT = )0(?=;)/, proxyPort.toString()); 82 | } -------------------------------------------------------------------------------- /src/interceptors/index.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import { ErrorLike } from '@httptoolkit/util'; 3 | 4 | import { HtkConfig } from '../config'; 5 | import { addShutdownHandler } from '../shutdown'; 6 | 7 | import { FreshFirefox, FreshFirefoxDeveloper, FreshFirefoxNightly } from './fresh-firefox'; 8 | import { 9 | FreshChrome, 10 | ExistingChrome, 11 | FreshChromeBeta, 12 | FreshChromeCanary, 13 | FreshChromeDev, 14 | FreshChromium, 15 | ExistingChromium, 16 | FreshChromiumDev, 17 | FreshEdge, 18 | FreshEdgeBeta, 19 | FreshEdgeDev, 20 | FreshEdgeCanary, 21 | FreshBrave, 22 | FreshOpera 23 | } from './chromium-based-interceptors'; 24 | import { FreshTerminalInterceptor } from './terminal/fresh-terminal-interceptor'; 25 | import { ExistingTerminalInterceptor } from './terminal/existing-terminal-interceptor'; 26 | import { AndroidAdbInterceptor } from './android/android-adb-interceptor'; 27 | import { DockerContainerInterceptor } from './docker/docker-interceptor'; 28 | import { ElectronInterceptor } from './electron'; 29 | import { JvmInterceptor } from './jvm'; 30 | import { FridaAndroidInterceptor } from './frida/frida-android-interceptor'; 31 | import { FridaIosInterceptor } from './frida/frida-ios-interceptor'; 32 | 33 | export interface Interceptor { 34 | id: string; 35 | version: string; 36 | 37 | getMetadata?(type: 'summary' | 'detailed'): Promise; 38 | getSubMetadata?(subId: string): Promise; 39 | 40 | isActivable(): Promise; 41 | activableTimeout?: number; 42 | 43 | isActive(proxyPort: number): Promise | boolean; 44 | 45 | activate(proxyPort: number, options?: any): Promise; 46 | 47 | deactivate(proxyPort: number, options?: any): Promise; 48 | deactivateAll(): Promise; 49 | } 50 | 51 | export interface ActivationError extends ErrorLike { 52 | /** 53 | * Activation errors can have an extra `metadata` field, to share data with the 54 | * client which attempted the activation, e.g. whether it can be retried. 55 | */ 56 | metadata?: any; 57 | 58 | /** 59 | * Errors should be thrown with reportable set to `false` if they're a 'normal' 60 | * event that shouldn't be logged or exposed to the user. For example, if it's a 61 | * temporary failure that will lead to a confirmation flow or similar. 62 | * 63 | * This disables error logging and reporting of failure details from the API 64 | * (it assumes that the metadata will expose any info required, since this is a 65 | * recognized failure case). 66 | */ 67 | reportable?: boolean; 68 | } 69 | 70 | export function buildInterceptors(config: HtkConfig): _.Dictionary { 71 | const interceptors = [ 72 | new FreshChrome(config), 73 | new ExistingChrome(config), 74 | new FreshChromeBeta(config), 75 | new FreshChromeDev(config), 76 | new FreshChromeCanary(config), 77 | 78 | new FreshChromium(config), 79 | new ExistingChromium(config), 80 | new FreshChromiumDev(config), 81 | 82 | new FreshEdge(config), 83 | new FreshEdgeBeta(config), 84 | new FreshEdgeDev(config), 85 | new FreshEdgeCanary(config), 86 | 87 | new FreshOpera(config), 88 | new FreshBrave(config), 89 | new FreshFirefox(config), 90 | new FreshFirefoxDeveloper(config), 91 | new FreshFirefoxNightly(config), 92 | 93 | new FreshTerminalInterceptor(config), 94 | new ExistingTerminalInterceptor(config), 95 | 96 | new ElectronInterceptor(config), 97 | 98 | new AndroidAdbInterceptor(config), 99 | new FridaAndroidInterceptor(config), 100 | new FridaIosInterceptor(config), 101 | 102 | new JvmInterceptor(config), 103 | new DockerContainerInterceptor(config) 104 | ]; 105 | 106 | // When the server exits, try to shut down the interceptors too 107 | addShutdownHandler(() => shutdownInterceptors(interceptors)); 108 | 109 | const interceptorIndex = _.keyBy(interceptors, (interceptor) => interceptor.id); 110 | 111 | if (Object.keys(interceptorIndex).length !== interceptors.length) { 112 | throw new Error('Duplicate interceptor id'); 113 | } 114 | 115 | return interceptorIndex; 116 | } 117 | 118 | function shutdownInterceptors(interceptors: Interceptor[]) { 119 | return Promise.all(interceptors.map(i => i.deactivateAll())); 120 | } -------------------------------------------------------------------------------- /src/interceptors/terminal/existing-terminal-interceptor.ts: -------------------------------------------------------------------------------- 1 | 2 | import { Mockttp, getLocal } from 'mockttp'; 3 | 4 | import { Interceptor } from '..'; 5 | import { HtkConfig } from '../../config'; 6 | import { getTerminalEnvVars } from './terminal-env-overrides'; 7 | import { getBashShellScript, getFishShellScript, getGitBashShellScript, getPowerShellScript } from './terminal-scripts'; 8 | 9 | interface ServerState { 10 | server: Mockttp; 11 | isActive: boolean; 12 | } 13 | 14 | type ShellDefinition = { command: string, description: string }; 15 | 16 | const getBashDefinition = (port: number) => ({ 17 | description: "Bash-compatible", 18 | command: `eval "$(curl -sS localhost:${port}/setup)"` 19 | }); 20 | 21 | const getGitBashDefinition = (port: number) => ({ 22 | description: "Git Bash", 23 | command: `eval "$(curl -sS localhost:${port}/gb-setup)"` 24 | }); 25 | 26 | const getFishDefinition = (port: number) => ({ 27 | description: "Fish", 28 | command: `curl -sS localhost:${port}/fish-setup | source` 29 | }); 30 | 31 | const getPowershellDefinition = (port: number) => ({ 32 | description: "Powershell", 33 | command: `Invoke-Expression (Invoke-WebRequest http://localhost:${port}/ps-setup).Content` 34 | }); 35 | 36 | function getShellCommands(port: number): { [shellName: string]: ShellDefinition } { 37 | if (process.platform === 'win32') { 38 | return { 39 | 'Powershell': getPowershellDefinition(port), 40 | 'Git Bash': getGitBashDefinition(port) 41 | } 42 | } else { 43 | return { 44 | 'Bash': getBashDefinition(port), 45 | 'Fish': getFishDefinition(port), 46 | 'Powershell': getPowershellDefinition(port) 47 | }; 48 | } 49 | } 50 | 51 | export class ExistingTerminalInterceptor implements Interceptor { 52 | 53 | private servers: { 54 | [proxyPort: number]: ServerState 55 | } = {}; 56 | 57 | id = 'existing-terminal'; 58 | version = '1.0.0'; 59 | 60 | constructor(private config: HtkConfig) { } 61 | 62 | isActivable(): Promise { 63 | return Promise.resolve(true); 64 | } 65 | 66 | isActive(proxyPort: number): boolean { 67 | return this.servers[proxyPort]?.isActive ?? false; 68 | } 69 | 70 | async activate(proxyPort: number): Promise<{ port: number, commands: { [shellName: string]: ShellDefinition } }> { 71 | if (this.servers[proxyPort]) { 72 | // Reset isActive, so we wait again for a new request 73 | this.servers[proxyPort].isActive = false; 74 | const serverPort = this.servers[proxyPort].server.port; 75 | return { 76 | port: serverPort, 77 | commands: getShellCommands(serverPort) 78 | }; 79 | } 80 | 81 | const server = getLocal(); 82 | await server.start({ startPort: proxyPort + 1, endPort: 65535 }); 83 | 84 | const serverState = { server, isActive: false }; 85 | 86 | const posixEnvVars = getTerminalEnvVars(proxyPort, this.config.https, 'posix-runtime-inherit'); 87 | 88 | // Endpoints for each of the various setup scripts: 89 | await server.forGet('/setup').thenReply(200, 90 | getBashShellScript(server.urlFor('/success'), posixEnvVars), 91 | { "content-type": "text/x-shellscript" } 92 | ); 93 | await server.forGet('/gb-setup').thenReply(200, 94 | getGitBashShellScript(server.urlFor('/success'), posixEnvVars), 95 | { "content-type": "text/x-shellscript" } 96 | ); 97 | await server.forGet('/fish-setup').thenReply(200, 98 | getFishShellScript(server.urlFor('/success'), posixEnvVars), 99 | { "content-type": "application/x-fish" } 100 | ); 101 | 102 | const powerShellEnvVars = getTerminalEnvVars(proxyPort, this.config.https, 'powershell-runtime-inherit'); 103 | await server.forGet('/ps-setup').thenReply(200, 104 | getPowerShellScript(server.urlFor('/success'), powerShellEnvVars), 105 | { "content-type": "text/plain" } 106 | ); 107 | 108 | // A success endpoint, so we can mark this as active (which provides some helpful UX on the frontend) 109 | await server.forPost('/success').thenCallback(() => { 110 | serverState.isActive = true; 111 | return { status: 200 }; 112 | }); 113 | 114 | this.servers[proxyPort] = serverState; 115 | return { 116 | port: server.port, 117 | commands: getShellCommands(server.port) 118 | }; 119 | } 120 | 121 | async deactivate(proxyPort: number): Promise { 122 | if (this.servers[proxyPort]) { 123 | await this.servers[proxyPort].server.stop(); 124 | delete this.servers[proxyPort]; 125 | } 126 | } 127 | 128 | deactivateAll(): Promise { 129 | return Promise.all( 130 | Object.keys(this.servers).map((port) => 131 | this.deactivate(parseInt(port, 10)) 132 | ) 133 | ).then(() => {}); 134 | } 135 | 136 | } -------------------------------------------------------------------------------- /src/message-server.ts: -------------------------------------------------------------------------------- 1 | import { getLocal, Mockttp } from 'mockttp'; 2 | import { getDeferred } from '@httptoolkit/util'; 3 | 4 | import { HtkConfig } from './config'; 5 | import { EPHEMERAL_PORT_RANGE } from './constants'; 6 | 7 | export class MessageServer { 8 | 9 | constructor( 10 | private config: HtkConfig, 11 | private message: string 12 | ) { } 13 | 14 | private server: Mockttp | undefined; 15 | 16 | private messageSeen = getDeferred(); 17 | 18 | async start() { 19 | this.server = getLocal({ https: this.config.https, cors: true }); 20 | await this.server.start(EPHEMERAL_PORT_RANGE); 21 | 22 | await this.server.forGet('/') 23 | .thenCallback(() => { 24 | console.log('Request to message server received'); 25 | this.messageSeen.resolve(); 26 | 27 | return { 28 | statusCode: 200, 29 | body: ` 30 | 31 | HTTP Toolkit 32 | 33 | 34 | 58 | 59 | 70 | 71 | 80 | 81 | 82 | 83 |

84 | ${this.message} 85 |

86 | 87 | 88 | 89 | ` 90 | }; 91 | }); 92 | } 93 | 94 | get host(): string { 95 | return this.server!.url 96 | .replace('https://', ''); 97 | } 98 | 99 | get url(): string { 100 | return this.server!.url.replace('https://', 'http://'); 101 | } 102 | 103 | async waitForSuccess(): Promise { 104 | await this.messageSeen.promise; 105 | } 106 | 107 | async stop() { 108 | if (this.server) { 109 | await this.server.stop(); 110 | this.server = undefined; 111 | } 112 | } 113 | } -------------------------------------------------------------------------------- /src/shutdown.ts: -------------------------------------------------------------------------------- 1 | import { delay } from '@httptoolkit/util'; 2 | import { logError } from './error-tracking'; 3 | 4 | type ShutdownHandler = () => Promise; 5 | const shutdownHandlers: Array = []; 6 | 7 | export function registerShutdownHandler() { 8 | process.on('SIGTERM', () => shutdown(0, 'SIGTERM')); 9 | process.on('SIGINT', () => shutdown(0, 'SIGINT')); 10 | } 11 | 12 | export function addShutdownHandler(handler: ShutdownHandler) { 13 | shutdownHandlers.push(handler); 14 | } 15 | 16 | export async function shutdown(code: number, cause: string) { 17 | console.log(`Shutting down after ${cause}...`); 18 | 19 | const shutdownPromises = Promise.all(shutdownHandlers.map( 20 | async (handler) => { 21 | try { 22 | await handler(); 23 | } catch (e) { 24 | logError(e); 25 | } 26 | } 27 | )); 28 | 29 | await Promise.race([ 30 | shutdownPromises, 31 | delay(3000) // After 3 seconds, we just close anyway, we're done. 32 | ]); 33 | 34 | process.exit(code); 35 | } 36 | 37 | -------------------------------------------------------------------------------- /src/util/fs.ts: -------------------------------------------------------------------------------- 1 | import { promisify } from 'util'; 2 | import * as fs from 'fs'; 3 | import * as path from 'path'; 4 | import * as tmp from 'tmp'; 5 | import rimraf from 'rimraf'; 6 | import { lookpath } from 'lookpath'; 7 | import { isErrorLike } from '@httptoolkit/util'; 8 | 9 | export const statFile = fs.promises.stat; 10 | export const readFile = fs.promises.readFile; 11 | export const readDir = fs.promises.readdir; 12 | export const readLink = fs.promises.readlink; 13 | export const deleteFile = fs.promises.unlink; 14 | export const deleteFolder = promisify(rimraf); 15 | export const checkAccess = fs.promises.access; 16 | export const chmod = fs.promises.chmod; 17 | export const mkDir = fs.promises.mkdir; 18 | export const writeFile = fs.promises.writeFile; 19 | export const copyFile = fs.promises.copyFile; 20 | export const appendOrCreateFile = fs.promises.appendFile; 21 | 22 | export const createReadStream = fs.createReadStream; 23 | export const createWriteStream = fs.createWriteStream; 24 | 25 | export const copyRecursive = async (from: string, to: string) => { 26 | // fs.cp is only available in Node 16.7.0+ 27 | if (!fs.cp) throw new Error("fs.cp not available"); 28 | 29 | return new Promise((resolve, reject) => { 30 | fs.cp(from, to, { recursive: true }, (err) => { 31 | if (err) reject(err); 32 | else resolve(); 33 | }); 34 | }); 35 | }; 36 | 37 | export const canAccess = (path: string) => checkAccess(path).then(() => true).catch(() => false); 38 | 39 | // Takes a path, follows any links present (if possible) until we reach a non-link file. This 40 | // does *not* check that the final path is accessible - it just removes any links en route. 41 | // This will return undefined if a target path does not resolve at all. 42 | export const getRealPath = async (targetPath: string): Promise => { 43 | while (true) { 44 | try { 45 | const linkTarget = await readLink(targetPath); 46 | // Links are often relative, so we need to resolve them against the link parent directory: 47 | targetPath = path.resolve(path.dirname(targetPath), linkTarget); 48 | } catch (e: any) { 49 | // Target file does not exist: 50 | if (e.code === 'ENOENT') return undefined; 51 | // Not a link, or some other error: 52 | else return targetPath; 53 | } 54 | } 55 | }; 56 | 57 | export const ensureDirectoryExists = (path: string) => 58 | checkAccess(path).catch(() => mkDir(path, { recursive: true })); 59 | 60 | export const resolveCommandPath = (path: string): Promise => 61 | lookpath(path); 62 | 63 | export const commandExists = (path: string): Promise => 64 | resolveCommandPath(path).then((result) => result !== undefined); 65 | 66 | export const createTmp = (options: tmp.Options = {}) => new Promise<{ 67 | path: string, 68 | fd: number, 69 | cleanupCallback: () => void 70 | }>((resolve, reject) => { 71 | tmp.file(options, (err, path, fd, cleanupCallback) => { 72 | if (err) return reject(err); 73 | resolve({ path, fd, cleanupCallback }); 74 | }); 75 | }); 76 | 77 | export const moveFile = async (oldPath: string, newPath: string) => { 78 | try { 79 | await fs.promises.rename(oldPath, newPath); 80 | } catch (e) { 81 | if (isErrorLike(e) && e.code === 'EXDEV') { 82 | // Cross-device - can't rename files across partions etc. 83 | // In that case, we fallback to copy then delete: 84 | await copyFile(oldPath, newPath); 85 | await deleteFile(oldPath); 86 | } 87 | } 88 | }; -------------------------------------------------------------------------------- /src/util/http.ts: -------------------------------------------------------------------------------- 1 | // In some places (Docker proxy) we use req.rawHeaders plus this logic to capture a 2 | // usable header object that doesn't normalize headers to e.g. combine duplicates and 3 | // lowercase names. 4 | export function rawHeadersToHeaders(rawHeaders: string[]) { 5 | return rawHeaders.reduce((result, next, i) => { 6 | if (i % 2 === 0) { 7 | const existingValue = result[next]; 8 | if (typeof existingValue === 'string') { 9 | result[next] = [existingValue]; 10 | } 11 | } else { 12 | const key = rawHeaders[i - 1]; 13 | const existingValue = result[key]; 14 | if (Array.isArray(existingValue)) { 15 | existingValue.push(next); 16 | } else { 17 | result[key] = next; 18 | } 19 | } 20 | return result; 21 | }, {} as { [key: string]: string | string[] | undefined }); 22 | } -------------------------------------------------------------------------------- /src/util/network.ts: -------------------------------------------------------------------------------- 1 | import * as _ from 'lodash'; 2 | import * as os from 'os'; 3 | 4 | export function getReachableInterfaces() { 5 | return _.flatMap(os.networkInterfaces(), (addresses, iface) => 6 | (addresses || []) 7 | .filter(a => 8 | !a.internal && // Loopback interfaces 9 | iface !== 'docker0' && // Docker default bridge interface 10 | !iface.startsWith('br-') && // More docker bridge interfaces 11 | !iface.startsWith('veth') // Virtual interfaces for each docker container 12 | ) 13 | ) 14 | } -------------------------------------------------------------------------------- /src/util/promise.ts: -------------------------------------------------------------------------------- 1 | import { CustomError, delay } from '@httptoolkit/util'; 2 | 3 | export async function waitUntil( 4 | delayMs: number, 5 | tries: number, 6 | test: () => Promise 7 | ): Promise> { 8 | let result = tries > 0 && await test() 9 | 10 | while (tries > 0 && !result) { 11 | tries = tries - 1; 12 | await delay(delayMs); 13 | result = await test(); 14 | } 15 | 16 | if (!result) { 17 | throw new CustomError(`Wait loop failed after ${tries} retries`, { 18 | code: 'wait-loop-failed' 19 | }); 20 | } 21 | else return result as Exclude; 22 | } 23 | 24 | export class TimeoutError extends CustomError { 25 | constructor() { 26 | super('Timeout', { code: 'timeout' }); 27 | } 28 | } 29 | 30 | export async function withTimeout( 31 | timeoutMs: number, 32 | promise: Promise 33 | ) { 34 | return Promise.race([ 35 | promise, 36 | delay(timeoutMs, { unref: true }) 37 | .then(() => { throw new TimeoutError(); }) 38 | ]); 39 | } -------------------------------------------------------------------------------- /src/util/snap.ts: -------------------------------------------------------------------------------- 1 | import os = require('os'); 2 | import path = require('path'); 3 | 4 | import fs = require('./fs'); 5 | import { streamToBuffer } from './stream'; 6 | 7 | export async function isSnap(bin: string) { 8 | if (os.platform() !== 'linux') return false; 9 | 10 | const binPath = await fs.resolveCommandPath(bin); 11 | if (!binPath) { 12 | throw new Error(`Can't resolve command ${bin}`); 13 | } 14 | 15 | // Most snaps directly run from the Snap bin folder: 16 | if (binPath.startsWith('/snap/bin/')) return true; 17 | 18 | // If not, the command might be a wrapper script - both chromium-browser 19 | // & firefox use these. Check the end and see if we recognize it: 20 | 21 | const fileSize = await fs.statFile(binPath); 22 | const stream = fs.createReadStream(binPath, { 23 | start: Math.max(fileSize.size - 100, 0) 24 | }); 25 | const lastChunkOfFile = (await streamToBuffer(stream)).toString('utf8'); 26 | 27 | return lastChunkOfFile.includes('exec /snap/bin/'); 28 | } 29 | 30 | // For all Snaps, any data we want to inject needs to live inside the 31 | // Snap's data directory - we put it in a .httptoolkit folder. 32 | export async function getSnapConfigPath(appName: string) { 33 | const snapDataPath = path.join( 34 | os.homedir(), 35 | 'snap', 36 | appName, 37 | 'current' 38 | ); 39 | 40 | if (!await fs.canAccess(snapDataPath)) { 41 | throw new Error(`Could not find Snap data path for ${appName}`); 42 | } 43 | 44 | return path.join(snapDataPath, '.httptoolkit'); 45 | } -------------------------------------------------------------------------------- /src/util/stream.ts: -------------------------------------------------------------------------------- 1 | import * as stream from 'stream'; 2 | 3 | export function streamToArray(input: stream.Readable) { 4 | return new Promise((resolve, reject) => { 5 | const chunks: T[] = []; 6 | input.on('data', (d) => chunks.push(d)); 7 | input.on('end', () => resolve(chunks)); 8 | input.on('error', reject); 9 | }); 10 | }; 11 | 12 | export async function streamToBuffer(input: stream.Readable) { 13 | const chunks = await streamToArray(input); 14 | return Buffer.concat(chunks); 15 | }; -------------------------------------------------------------------------------- /src/webextension.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as os from 'os'; 3 | 4 | import { deleteFile, mkDir, writeFile, copyRecursive, deleteFolder } from "./util/fs"; 5 | import { addShutdownHandler } from './shutdown'; 6 | 7 | import { OVERRIDES_DIR } from './interceptors/terminal/terminal-env-overrides'; 8 | 9 | const WEBEXTENSION_BASE_PATH = path.join(OVERRIDES_DIR, 'webextension'); 10 | 11 | // We copy the WebExtension to a temp directory the first time it's activated, so that we can 12 | // modify the config folder within to easily inject config into the extension. Without this, 13 | // the extension is usually in the app install directory, which is often not user-writable. 14 | // We only make one copy for all sessions, but we later inject independent per-session 15 | // config files, so they can behave independently. 16 | export let WEBEXTENSION_INSTALL: { 17 | path: string; 18 | configPath: string; 19 | } | undefined; 20 | async function ensureWebExtensionInstalled() { 21 | if (WEBEXTENSION_INSTALL) return; // No-op after the first install 22 | else { 23 | const tmpDir = os.tmpdir(); 24 | 25 | const webExtensionPath = path.join(tmpDir, 'httptoolkit-webextension'); 26 | const configPath = path.join(webExtensionPath, 'config'); 27 | 28 | await copyRecursive(WEBEXTENSION_BASE_PATH, webExtensionPath); 29 | await mkDir(configPath).catch((e: any) => { 30 | if (e.code === 'EEXIST') return; // Already exists, no problem 31 | else throw e; 32 | }); 33 | 34 | WEBEXTENSION_INSTALL = { path: webExtensionPath, configPath }; 35 | console.log(`Webextension installed at ${WEBEXTENSION_INSTALL.path}`); 36 | } 37 | } 38 | 39 | // On shutdown, we delete the webextension install again. 40 | addShutdownHandler(async () => { 41 | if (WEBEXTENSION_INSTALL) { 42 | console.log(`Uninstalling webextension from ${WEBEXTENSION_INSTALL.path}`); 43 | await deleteFolder(WEBEXTENSION_INSTALL.path); 44 | WEBEXTENSION_INSTALL = undefined; 45 | } 46 | }); 47 | 48 | interface WebExtensionConfig { // Should match config in the WebExtension itself 49 | mockRtc: { 50 | peerId: string; 51 | adminBaseUrl: string; 52 | } | false; 53 | } 54 | 55 | const getConfigKey = (proxyPort: number) => 56 | `127_0_0_1.${proxyPort}`; // Filename-safe proxy address 57 | 58 | const getConfigPath = (proxyPort: number) => 59 | path.join(WEBEXTENSION_INSTALL!.configPath, getConfigKey(proxyPort)); 60 | 61 | export function clearWebExtensionConfig(httpProxyPort: number) { 62 | if (!WEBEXTENSION_INSTALL) return; 63 | 64 | return deleteFile(getConfigPath(httpProxyPort)) 65 | .catch(() => {}); // We ignore errors - nothing we can do, not very important. 66 | } 67 | 68 | export async function updateWebExtensionConfig( 69 | sessionId: string, 70 | httpProxyPort: number, 71 | webRTCEnabled: boolean 72 | ) { 73 | if (webRTCEnabled) { 74 | await ensureWebExtensionInstalled(); 75 | 76 | const adminBaseUrl = `http://internal.httptoolkit.localhost:45456/session/${sessionId}`; 77 | await writeConfig(httpProxyPort, { 78 | mockRtc: { 79 | peerId: 'matching-peer', 80 | adminBaseUrl 81 | } 82 | }); 83 | } else { 84 | if (WEBEXTENSION_INSTALL) { 85 | // If the extension is set up, but this specific session has it disabled, we 86 | // make the config explicitly disable it, just to be clear: 87 | await writeConfig(httpProxyPort, { mockRtc: false }); 88 | } 89 | } 90 | } 91 | 92 | async function writeConfig(proxyPort: number, config: WebExtensionConfig) { 93 | return writeFile(getConfigPath(proxyPort), JSON.stringify(config)); 94 | } -------------------------------------------------------------------------------- /test/distributables-test/unix.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ls -la * 6 | 7 | tar -xzf ./distributables/v*/httptoolkit-server-*-$1-$2.tar.gz 8 | 9 | echo "\nStarting server..." 10 | 11 | ./httptoolkit-server/bin/httptoolkit-server start & 12 | SERVER_PID=$! 13 | 14 | echo "Waiting for server..." 15 | for i in {1..30}; do 16 | if curl -s http://127.0.0.1:45456/ >/dev/null 2>&1; then 17 | echo "Server is up" 18 | break 19 | fi 20 | sleep 1 21 | done 22 | 23 | echo "\nTesting server..." 24 | 25 | # CSRF protection fully blocks unrecognized/missing origin requests: 26 | CURL_OPTIONS="--silent --fail -i" 27 | WITH_ORIGIN="-HOrigin: https://app.httptoolkit.tech" 28 | AS_JSON="-HContent-Type: application/json" 29 | 30 | echo "\nCan start a Mockttp server"? 31 | # Uses the default config from the UI: 32 | curl $CURL_OPTIONS "$WITH_ORIGIN" "$AS_JSON" 'http://127.0.0.1:45456/start' \ 33 | --data '{"plugins":{"http":{"options":{"cors":false,"suggestChanges":false,"http2":"fallback","https":{"tlsPassthrough":[]}}},"webrtc":{}}}' 34 | 35 | echo "\nCan query the API server version?" 36 | curl $CURL_OPTIONS "$WITH_ORIGIN" http://127.0.0.1:45457/version 37 | 38 | echo "\nCan get config?" 39 | curl $CURL_OPTIONS "$WITH_ORIGIN" http://127.0.0.1:45457/config 40 | 41 | echo "\nCan query interceptors?" 42 | curl $CURL_OPTIONS "$WITH_ORIGIN" http://127.0.0.1:45457/interceptors 43 | 44 | echo "\nCan trigger update?" 45 | # (can't test that it actually updates, unfortunately) 46 | curl $CURL_OPTIONS "$WITH_ORIGIN" -X POST http://127.0.0.1:45457/update 47 | 48 | # ^ This will fail if they receive anything but a 200 result. 49 | # This ensures that the server is startable, and has minimal functionality for launch. -------------------------------------------------------------------------------- /test/distributables-test/windows.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | REM Extract the tarball. Tar doesn't support wildcards on windows, DIR only supports one wildcard, so we have to do this mess: 4 | cd distributables\v*\ 5 | set GET_TAR="dir /b httptoolkit-server-*-win32-x64.tar.gz" 6 | FOR /F "tokens=*" %%i IN (' %GET_TAR% ') DO SET TAR_PATH=%%i 7 | 8 | tar -xvzf %TAR_PATH% 9 | 10 | echo: 11 | echo: 12 | echo Starting server... 13 | 14 | START "server" /b .\httptoolkit-server\bin\httptoolkit-server start 15 | 16 | echo Waiting for server... 17 | FOR /L %%i IN (1,1,30) DO ( 18 | curl -s http://127.0.0.1:45456/ >NUL 2>&1 19 | IF NOT ERRORLEVEL 1 ( 20 | echo Server is up 21 | goto :serverup 22 | ) 23 | timeout /t 1 /nobreak >NUL 24 | ) 25 | echo Server failed to start 26 | goto :error 27 | 28 | :serverup 29 | 30 | echo: 31 | echo: 32 | echo Testing server... 33 | 34 | REM --silent (no progress), --fail on errors, --include headers in logs 35 | set CURL_OPTIONS="-sfi" 36 | REM CSRF protection fully blocks unrecognized/missing origin requests: 37 | set WITH_ORIGIN="-HOrigin: https://app.httptoolkit.tech" 38 | set AS_JSON="-HContent-Type: application/json" 39 | 40 | echo: 41 | echo: 42 | echo Can start a Mockttp server? 43 | REM Uses the default config from the UI: 44 | curl %CURL_OPTIONS% %WITH_ORIGIN% %AS_JSON% -X POST "http://127.0.0.1:45456/start" ^ 45 | --data "{\"plugins\":{\"http\":{\"options\":{\"cors\":false,\"suggestChanges\":false,\"http2\":\"fallback\",\"https\":{\"tlsPassthrough\":[]}}},\"webrtc\":{}}}" ^ 46 | || goto :error` 47 | 48 | echo: 49 | echo: 50 | echo Can query the API server version? 51 | curl %CURL_OPTIONS% %WITH_ORIGIN% http://127.0.0.1:45457/version || goto :error 52 | 53 | echo: 54 | echo: 55 | echo Can get config? 56 | curl %CURL_OPTIONS% %WITH_ORIGIN% http://127.0.0.1:45457/config || goto :error 57 | 58 | echo: 59 | echo: 60 | echo Can query interceptors? 61 | curl %CURL_OPTIONS% %WITH_ORIGIN% http://127.0.0.1:45457/interceptors || goto :error 62 | 63 | echo: 64 | echo: 65 | echo Can trigger update? 66 | REM (can't test that it actually updates, unfortunately) 67 | curl %CURL_OPTIONS% %WITH_ORIGIN% -X POST http://127.0.0.1:45457/update || goto :error 68 | 69 | REM ^ This will fail if they receive anything but a 200 result. 70 | REM This ensures that the server is startable, and has minimal functionality for launch. 71 | 72 | goto :success 73 | 74 | :error 75 | set err=%errorlevel% 76 | 77 | taskkill /FI "WindowTitle eq server*" /T /F 78 | echo Test failed with error #%err%. 79 | exit /b %err% 80 | 81 | :success 82 | echo All good. 83 | 84 | REM Shut down by matching title passed to START to run in the background 85 | taskkill /FI "WindowTitle eq server*" /T /F || goto :success -------------------------------------------------------------------------------- /test/fixtures/docker/compose/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | 3 | RUN mkdir /usr/src/app 4 | WORKDIR /usr/src/app 5 | 6 | COPY . . 7 | 8 | CMD node /usr/src/app/index.js 9 | -------------------------------------------------------------------------------- /test/fixtures/docker/compose/docker-compose.networks.yml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | # This compose file tests every network configuration I can think of, starting the same HTTP 4 | # app and checking self, remote & neighbour connectivity & interception. This is in addition 5 | # to the non-network compose file in this folder, which tests default docker-compose networks. 6 | 7 | services: 8 | host: 9 | build: . 10 | network_mode: 'host' 11 | 12 | none: 13 | build: . 14 | network_mode: 'none' 15 | environment: 16 | SKIP_REQUESTS: 'true' 17 | 18 | default-service-a: 19 | build: . 20 | network_mode: 'bridge' # Docker's default bridge network 21 | 22 | default-linked-service-b: 23 | build: . 24 | network_mode: 'bridge' # Docker's default bridge network 25 | environment: 26 | EXTRA_TARGET: 'http://a:9876' 27 | # Links are deprecated, but should still work: 28 | links: 29 | - "default-service-a:a" 30 | 31 | extra-host-service: 32 | build: . 33 | extra_hosts: 34 | - 'custom.host.address.example:host-gateway' 35 | environment: 36 | EXTRA_TARGET: 'http://custom.host.address.example:9876' # The host container 37 | 38 | multi-network-a: 39 | build: . 40 | networks: 41 | - custom_net_1 42 | - custom_net_2 43 | extra_hosts: 44 | - 'host.docker.internal:host-gateway' 45 | environment: 46 | EXTRA_TARGET: 'http://host.docker.internal:9876' # The host container 47 | 48 | multi-network-b: 49 | build: . 50 | networks: 51 | - custom_net_2 52 | - custom_net_3 53 | environment: 54 | EXTRA_TARGET: 'http://multi-network-a:9876' # Container on some shared networks 55 | 56 | networks: 57 | custom_net_1: 58 | custom_net_2: 59 | custom_net_3: -------------------------------------------------------------------------------- /test/fixtures/docker/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | service-a: 5 | build: . 6 | environment: 7 | - EXTRA_TARGET=http://service-b:9876 8 | service-b: 9 | build: . 10 | environment: 11 | - EXTRA_TARGET=http://service-a:9876 -------------------------------------------------------------------------------- /test/fixtures/docker/compose/index.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const https = require('https'); 3 | 4 | // The 'none' network container can just shutdown immediately - we only want 5 | // to check it starts OK, but we'll clearly never be able to proxy anything. 6 | if (process.env.SKIP_REQUESTS) { 7 | console.log('Skipping'); 8 | process.exit(0); 9 | } 10 | 11 | const makeRequest = async (url) => { 12 | const sendRequest = url.startsWith('https') ? https.request : http.request; 13 | 14 | const request = sendRequest(url); 15 | request.end(); 16 | 17 | return new Promise((resolve, reject) => { 18 | request.on('response', resolve); 19 | request.on('error', reject); 20 | }); 21 | }; 22 | 23 | const getBody = (response) => new Promise((resolve, reject) => { 24 | let body = ""; 25 | response.on('data', (d) => { 26 | body = body + d; 27 | }); 28 | response.on('end', () => resolve(body)); 29 | response.on('error', reject); 30 | }); 31 | 32 | const SERVER_PORT = 9876; 33 | const OUR_HOSTNAME = process.env.HOSTNAME; 34 | 35 | const isOurHostname = (v) => v === OUR_HOSTNAME; 36 | const isNotOurHostname = (v) => v !== OUR_HOSTNAME; 37 | const is = (x) => (v) => v === x; 38 | 39 | const TARGETS = [ 40 | // Can we remotely resolve our own loopback address? 41 | [`http://localhost:${SERVER_PORT}/`, isOurHostname], 42 | [`http://127.0.0.1:${SERVER_PORT}/`, isOurHostname], 43 | // (This works because Mockttp replaces localhost addresses in requests with 44 | // the client's IP) 45 | 46 | // We can remote resolve our Docker hostname? 47 | [`http://${OUR_HOSTNAME}:${SERVER_PORT}/`, isOurHostname], 48 | // (This works because our hostname is picked up by the network monitor, so the 49 | // request is sent via the tunnel, and our DNS server routes it to our IP. 50 | 51 | // Can we resolve a mocked-only URL? 52 | [`https://example.test/`, is('Mock response')], 53 | // (This will always fail normally, but works in testing because we specifically 54 | // spot this and inject the response). 55 | ]; 56 | 57 | if (process.env.EXTRA_TARGET) { 58 | TARGETS.push([process.env.EXTRA_TARGET, isNotOurHostname]); 59 | } 60 | 61 | const server = http.createServer((req, res) => { 62 | res.writeHead(200).end(OUR_HOSTNAME); 63 | }); 64 | 65 | server.listen(SERVER_PORT, () => { 66 | console.log('Server started'); 67 | }); 68 | 69 | const pollInterval = setInterval(async () => { 70 | console.log("Sending requests to ", TARGETS); 71 | 72 | const responses = await Promise.all(TARGETS.map(([target]) => 73 | makeRequest(target).catch(e => e) 74 | )); 75 | 76 | // ^ This will always fail normally, because the external request fails. Will only pass if it's 77 | // intercepted such that both external & all internal requests are successful at the same time. 78 | 79 | if (responses.every((response) => 80 | !(response instanceof Error) && 81 | response.statusCode === 200 82 | )) { 83 | // Check the bodies, fail hard if any have the wrong content (i.e. went to the wrong host) 84 | const responseBodies = await Promise.all(responses.map(r => getBody(r))); 85 | responseBodies.forEach((body, i) => { 86 | const validateBody = TARGETS[i][1]; 87 | if (!validateBody(body)) throw new Error( 88 | `Request ${i} to ${TARGETS[i][0]} unexpectedly returned ${body}` 89 | ); 90 | }); 91 | 92 | console.log("All requests ok"); 93 | clearInterval(pollInterval); 94 | 95 | // Exit OK, but after a delay, so the other containers can still make requests to us. 96 | setTimeout(() => { 97 | process.exit(0); 98 | }, 5000); 99 | } else { 100 | console.log("Requests failed with", responses.map(r => r.message || r.statusCode)); 101 | } 102 | }, 250); -------------------------------------------------------------------------------- /test/fixtures/docker/go/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.16 2 | 3 | RUN mkdir -p /usr/src/app 4 | WORKDIR /usr/src/app 5 | 6 | COPY . . 7 | 8 | RUN go build app.go 9 | 10 | ENTRYPOINT ["./app"] -------------------------------------------------------------------------------- /test/fixtures/docker/go/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | "time" 8 | ) 9 | 10 | func main() { 11 | fmt.Println("Starting Go container") 12 | 13 | targetUrl := os.Args[1] 14 | 15 | for { 16 | resp, err := http.Get(targetUrl) 17 | 18 | if err != nil { 19 | fmt.Fprintf(os.Stderr, "Error: %v\n", err) 20 | os.Exit(1) 21 | } 22 | 23 | fmt.Printf("Got %v response\n", resp.StatusCode) 24 | 25 | time.Sleep(500 * time.Millisecond) 26 | } 27 | 28 | } 29 | -------------------------------------------------------------------------------- /test/fixtures/docker/go/go.mod: -------------------------------------------------------------------------------- 1 | module app 2 | 3 | go 1.18 4 | 5 | require github.com/samber/lo v1.10.1 // indirect 6 | -------------------------------------------------------------------------------- /test/fixtures/docker/go/go.sum: -------------------------------------------------------------------------------- 1 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 2 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 5 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 6 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 7 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= 8 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 9 | github.com/samber/lo v1.10.1 h1:0D3h7i0U3hRAbaCeQ82DLe67n0A7Bbl0/cEoWqFGp+U= 10 | github.com/samber/lo v1.10.1/go.mod h1:2I7tgIv8Q1SG2xEIkRq0F2i2zgxVpnyPOP0d3Gj2r+A= 11 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 12 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 13 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 14 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 15 | github.com/thoas/go-funk v0.9.1/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= 16 | github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 17 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 18 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 19 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 20 | golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 h1:3MTrJm4PyNL9NBqvYDSj3DHl46qQakyfqfWo4jgfaEM= 21 | golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= 22 | golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= 23 | golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= 24 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 25 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 26 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 27 | golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 28 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 29 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 30 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 31 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 32 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 33 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 34 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 35 | golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 36 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 37 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 38 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 39 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 40 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 41 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 42 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 43 | golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= 44 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 45 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 46 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 47 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 48 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 49 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 50 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 51 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 52 | -------------------------------------------------------------------------------- /test/fixtures/docker/java/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:16-alpine3.13 2 | 3 | RUN mkdir -p /usr/src/app 4 | WORKDIR /usr/src/app 5 | 6 | # We run the app with a non-root user, because this is a common pattern especially 7 | # in Java apps, and we need to ensure the injected file permissions support it: 8 | RUN adduser --uid 5000 -D appuser && chown -R appuser /usr/src/app 9 | USER appuser 10 | 11 | COPY . . 12 | 13 | RUN javac Main.java 14 | 15 | ENTRYPOINT ["java", "Main"] -------------------------------------------------------------------------------- /test/fixtures/docker/java/Main.java: -------------------------------------------------------------------------------- 1 | import java.io.IOException; 2 | import java.net.HttpURLConnection; 3 | import java.net.URL; 4 | 5 | public class Main { 6 | public static void main(String[] argv) throws IOException, InterruptedException { 7 | String targetUrl = argv[0]; 8 | URL url = new URL(targetUrl); 9 | 10 | System.out.println("Starting Java container"); 11 | while (true) { 12 | HttpURLConnection con = (HttpURLConnection) url.openConnection(); 13 | con.setRequestMethod("GET"); 14 | int status = con.getResponseCode(); 15 | System.out.println("Got " + status + " response"); 16 | 17 | Thread.sleep(500); 18 | } 19 | } 20 | } -------------------------------------------------------------------------------- /test/fixtures/docker/js/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | 3 | RUN mkdir /usr/src/app 4 | WORKDIR /usr/src/app 5 | 6 | COPY . . 7 | 8 | ENTRYPOINT ["node", "/usr/src/app/app.js"] 9 | -------------------------------------------------------------------------------- /test/fixtures/docker/js/app.js: -------------------------------------------------------------------------------- 1 | const https = require('https'); 2 | 3 | const delay = (timeMs) => new Promise((resolve) => setTimeout(resolve, timeMs)); 4 | 5 | const targetUrl = process.argv[2]; 6 | 7 | (async function() { 8 | console.log('Starting JS container'); 9 | 10 | while (true) { 11 | const response = await new Promise((resolve, reject) => { 12 | const req = https.get(targetUrl); 13 | req.on('response', resolve); 14 | req.on('error', reject); 15 | }); 16 | console.log(`Got ${response.statusCode} response`); 17 | 18 | await delay(500); 19 | } 20 | })(); -------------------------------------------------------------------------------- /test/fixtures/docker/php/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:8.0-apache 2 | 3 | WORKDIR /var/www/html 4 | 5 | COPY index.php index.php -------------------------------------------------------------------------------- /test/fixtures/docker/php/index.php: -------------------------------------------------------------------------------- 1 | Hello world 2 | 3 | $_GET['target'], 13 | CURLOPT_RETURNTRANSFER => true, 14 | CURLOPT_HTTPHEADER => array() 15 | ); 16 | curl_setopt_array($cURL, $setopt_array); 17 | curl_exec($cURL); 18 | $httpcode = curl_getinfo($cURL, CURLINFO_HTTP_CODE); 19 | fwrite($stdout, "Got ".$httpcode." response"); 20 | curl_close($cURL); 21 | 22 | sleep(1); 23 | } 24 | 25 | ?> -------------------------------------------------------------------------------- /test/fixtures/docker/python/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | RUN mkdir /usr/src/app 4 | WORKDIR /usr/src/app 5 | 6 | RUN pip install requests 7 | 8 | COPY . . 9 | 10 | ENTRYPOINT ["python", "/usr/src/app/app.py"] -------------------------------------------------------------------------------- /test/fixtures/docker/python/app.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import requests 4 | 5 | targetUrl = sys.argv[1] 6 | 7 | print('Starting Python container') 8 | while True: 9 | resp = requests.get(targetUrl) 10 | print('Got {0} response'.format(resp.status_code)) 11 | time.sleep(0.5) -------------------------------------------------------------------------------- /test/fixtures/docker/requests-in-build/Dockerfile: -------------------------------------------------------------------------------- 1 | # Run a series of HTTPS requests during the build, from different multistage builds. 2 | # Can HTTP Toolkit successfully hook the build and capture all of these? 3 | FROM node:14 as base-image 4 | RUN curl -s https://base-request.test 5 | 6 | FROM node:14 as base-image-2 7 | COPY . . 8 | RUN curl -s https://base2-request.test 9 | 10 | FROM base-image 11 | COPY --from=base-image-2 make-request.js . 12 | RUN node ./make-request.js https://final-stage-request.test -------------------------------------------------------------------------------- /test/fixtures/docker/requests-in-build/make-request.js: -------------------------------------------------------------------------------- 1 | const https = require('https'); 2 | 3 | const targetUrl = process.argv[2]; 4 | 5 | (async function() { 6 | console.log(`Making request to ${targetUrl}`); 7 | 8 | const response = await new Promise((resolve) => { 9 | const req = https.get(targetUrl); 10 | req.on('response', resolve); 11 | req.on('error', () => process.exit(1)); 12 | }); 13 | 14 | console.log(`Got ${response.statusCode} response`); 15 | })(); -------------------------------------------------------------------------------- /test/fixtures/docker/ruby/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ruby:alpine3.13 2 | 3 | RUN mkdir /usr/src/app 4 | WORKDIR /usr/src/app 5 | 6 | COPY . . 7 | 8 | ENTRYPOINT ["ruby", "/usr/src/app/app.rb"] -------------------------------------------------------------------------------- /test/fixtures/docker/ruby/app.rb: -------------------------------------------------------------------------------- 1 | require 'net/http' 2 | 3 | target_uri = URI(ARGV[0]) 4 | 5 | puts 'Starting Ruby container' 6 | 7 | while true do 8 | res = Net::HTTP.get_response(target_uri) 9 | puts "Got #{res.code} response" 10 | sleep(0.5) 11 | end -------------------------------------------------------------------------------- /test/fixtures/terminal/js-test-script.js: -------------------------------------------------------------------------------- 1 | // Test built-ins against their specific protocol 2 | require('node:http').get('http://example.test/js/http'); 3 | require('node:https').get('https://example.test/js/https'); 4 | require('http').get('http://example.test/js/http'); 5 | require('https').get('https://example.test/js/https'); 6 | 7 | function sendRequestsTo(baseUrl) { 8 | if (global.fetch) { 9 | // Only in Node 18+ so this isn't actually covered by unit tests, but 10 | // it's useful for manually testing. 11 | fetch(baseUrl + '/global-fetch'); 12 | } 13 | 14 | require('request').get(baseUrl + '/request'); 15 | require('axios').get(baseUrl + '/axios'); 16 | require('superagent').get(baseUrl + '/superagent').end(() => {}); 17 | require('node-fetch')(baseUrl + '/node-fetch'); 18 | require('got')(baseUrl + '/got'); 19 | require('bent')('GET')(baseUrl + '/bent') 20 | require('unirest').get(baseUrl + '/unirest', () => {}); 21 | require('reqwest')(baseUrl + '/reqwest'); 22 | require('needle').get(baseUrl + '/needle', () => {}); 23 | require('undici').request(baseUrl + '/undici'); 24 | } 25 | 26 | // Test all other libs against both protocols 27 | sendRequestsTo('http://example.test/js'); 28 | sendRequestsTo('https://example.test/js'); 29 | 30 | // Test libraries that need manual steps, and use their own URLs: 31 | require('stripe')('sk_test_hunter2').customers.list(); -------------------------------------------------------------------------------- /test/integration/frida-downloads.spec.ts: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import * as FridaJs from 'frida-js'; 3 | 4 | import { FRIDA_SRIS, FRIDA_VERSION } from '../../src/interceptors/frida/frida-integration'; 5 | 6 | describe("Frida download SRIs", function () { 7 | this.timeout(30000); // Can be slow, since we're doing MB downloads & disk IO 8 | 9 | Object.entries(FRIDA_SRIS).forEach(([target, sriMap]) => { 10 | Object.entries(sriMap).forEach(([arch, expectedHash]) => { 11 | it(`should have the correct SRI for ${target}-${arch}`, async () => { 12 | const correctSriHash = await FridaJs.calculateFridaSRI({ 13 | ghToken: process.env.GITHUB_TOKEN, 14 | arch: arch as any, 15 | platform: target as any, 16 | version: FRIDA_VERSION 17 | }); 18 | 19 | expect(expectedHash).to.equal(correctSriHash[0]); 20 | }); 21 | }); 22 | }); 23 | }); -------------------------------------------------------------------------------- /test/integration/interceptors/electron.spec.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import * as os from 'os'; 3 | import * as fs from 'fs'; 4 | import { CompletedRequest } from 'mockttp'; 5 | import { setupInterceptor, itIsAvailable, itCanBeActivated } from './interceptor-test-utils'; 6 | 7 | import { expect } from 'chai'; 8 | 9 | const interceptorSetup = setupInterceptor('electron'); 10 | 11 | // We use slack as a test app. Doesn't really matter what it is, but 12 | // slack is pretty common, easy to kill & restart, and fairly representative. 13 | const electronAppPath: string | undefined = [ 14 | '/usr/local/bin/slack', // Wrapper, used in CI to inject --no-sandbox 15 | '/usr/bin/slack', 16 | '/Applications/Slack.app/Contents/MacOS/Slack', 17 | `${os.homedir()}\\AppData\\Local\\slack\\slack.exe` 18 | ].find((path) => fs.existsSync(path)); 19 | 20 | describe('Electron interception', function () { 21 | this.timeout(5000); 22 | 23 | beforeEach(async () => { 24 | const { server } = await interceptorSetup; 25 | await server.start(); 26 | await server.forAnyRequest().thenPassThrough(); 27 | }); 28 | 29 | afterEach(async () => { 30 | const { server, interceptor } = await interceptorSetup; 31 | 32 | await interceptor.deactivate(server.port); 33 | await server.stop(); 34 | }); 35 | 36 | itIsAvailable(interceptorSetup); 37 | 38 | it('can be activated', async function () { 39 | if (!electronAppPath) this.skip(); 40 | 41 | const { interceptor, server } = await interceptorSetup; 42 | 43 | expect(interceptor.isActive(server.port)).to.equal(false); 44 | 45 | await interceptor.activate(server.port, { 46 | pathToApplication: electronAppPath 47 | }); 48 | expect(interceptor.isActive(server.port)).to.equal(true); 49 | expect(interceptor.isActive(server.port + 1)).to.equal(false); 50 | 51 | await interceptor.deactivate(server.port); 52 | expect(interceptor.isActive(server.port)).to.equal(false); 53 | }); 54 | 55 | it('successfully makes requests', async function () { 56 | if (!electronAppPath) this.skip(); 57 | 58 | const { server, interceptor } = await interceptorSetup; 59 | 60 | const slackRequestReceived = new Promise((resolve) => 61 | server.on('request', (req) => { 62 | if (req.url.includes('slack.com')) { 63 | resolve(req); 64 | } 65 | }) 66 | ); 67 | 68 | await interceptor.activate(server.port, { 69 | pathToApplication: electronAppPath 70 | }); 71 | 72 | // Only resolves if a slack request is intercepted 73 | await slackRequestReceived; 74 | }); 75 | }); -------------------------------------------------------------------------------- /test/integration/interceptors/existing-terminal.spec.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import * as util from 'util'; 3 | import { exec } from 'child_process'; 4 | 5 | import { expect } from 'chai'; 6 | 7 | import fetch from 'node-fetch'; 8 | 9 | import { FIXTURES_DIR } from '../../test-util'; 10 | import { setupInterceptor, itIsAvailable } from './interceptor-test-utils'; 11 | 12 | const execAsync = util.promisify(exec); 13 | 14 | const interceptorSetup = setupInterceptor('existing-terminal'); 15 | 16 | describe('Existing terminal interceptor', function () { 17 | this.timeout(5000); 18 | 19 | beforeEach(async () => { 20 | const { server } = await interceptorSetup; 21 | await server.start(); 22 | }); 23 | 24 | afterEach(async () => { 25 | const { server, interceptor } = await interceptorSetup; 26 | await interceptor.deactivate(server.port); 27 | await server.stop(); 28 | }); 29 | 30 | itIsAvailable(interceptorSetup); 31 | 32 | it('can be activated', async () => { 33 | const { interceptor, server } = await interceptorSetup; 34 | 35 | expect(interceptor.isActive(server.port)).to.equal(false); 36 | 37 | const result = await interceptor.activate(server.port) as { port: number }; 38 | expect(interceptor.isActive(server.port)).to.equal(false); 39 | await fetch(`http://localhost:${result.port}/setup`); 40 | await fetch(`http://localhost:${result.port}/success`, { method: 'POST' }); 41 | expect(interceptor.isActive(server.port)).to.equal(true); 42 | 43 | expect(interceptor.isActive(server.port + 1)).to.equal(false); 44 | 45 | await interceptor.deactivate(server.port); 46 | expect(interceptor.isActive(server.port)).to.equal(false); 47 | }); 48 | 49 | it('can deactivate all', async () => { 50 | const { interceptor, server } = await interceptorSetup; 51 | 52 | expect(interceptor.isActive(server.port)).to.equal(false); 53 | 54 | const result = await interceptor.activate(server.port) as { port: number }; 55 | await fetch(`http://localhost:${result.port}/setup`); 56 | await fetch(`http://localhost:${result.port}/success`, { method: 'POST' }); 57 | expect(interceptor.isActive(server.port)).to.equal(true); 58 | 59 | await interceptor.deactivateAll(); 60 | expect(interceptor.isActive(server.port)).to.equal(false); 61 | }); 62 | 63 | it('can deactivate after failed activation', async () => { 64 | const { interceptor, server } = await interceptorSetup; 65 | 66 | expect(interceptor.isActive(server.port)).to.equal(false); 67 | 68 | const result = await interceptor.activate(server.port) as { port: number }; 69 | expect(interceptor.isActive(server.port)).to.equal(false); 70 | 71 | await interceptor.deactivateAll(); 72 | expect(interceptor.isActive(server.port)).to.equal(false); 73 | 74 | const setupResponse = await fetch(`http://localhost:${result.port}/setup`).catch(e => e); 75 | expect(setupResponse).to.be.instanceOf(Error); 76 | }); 77 | 78 | it("should intercept all popular JS libraries", async function () { 79 | this.timeout(10000); 80 | const { interceptor, server } = await interceptorSetup; 81 | const result = await interceptor.activate(server.port) as { port: number, commands: { [shell: string]: { command: string } } }; 82 | 83 | const mainRule = await server.forGet(/https?:\/\/example.test\/js\/.*/).thenReply(200); 84 | const stripeRule = await server.forGet('https://api.stripe.com/v1/customers').thenJson(200, {}); 85 | 86 | const scriptOutput = await execAsync(` 87 | ${result.commands['Bash'].command} 88 | node "${require.resolve(`${FIXTURES_DIR}/terminal/js-test-script`)}" 89 | `, { 90 | shell: '/bin/bash' 91 | }); 92 | 93 | expect(scriptOutput.stdout).to.contain("HTTP Toolkit interception enabled"); 94 | expect(interceptor.isActive(server.port)).to.equal(true); 95 | 96 | const seenRequests = _.concat(...await Promise.all([ 97 | mainRule.getSeenRequests(), 98 | stripeRule.getSeenRequests() 99 | ])).map(r => r.url.replace(':443', '').replace(':80', '')); 100 | 101 | // Built-in modules 102 | expect(seenRequests).to.include('http://example.test/js/http'); 103 | expect(seenRequests).to.include('https://example.test/js/https'); 104 | 105 | // http & https with lots of popular libraries 106 | ['http', 'https'].forEach((protocol) => 107 | [ 108 | 'request', 109 | 'axios', 110 | 'superagent', 111 | 'node-fetch', 112 | 'got', 113 | 'bent', 114 | 'unirest', 115 | 'reqwest', 116 | 'needle', 117 | 'undici' 118 | ].forEach((library) => 119 | expect(seenRequests).to.include(`${protocol}://example.test/js/${library}`) 120 | ) 121 | ); 122 | 123 | // Special case modules that need manual handling: 124 | expect(seenRequests).to.include('https://api.stripe.com/v1/customers'); 125 | }); 126 | 127 | }); -------------------------------------------------------------------------------- /test/integration/interceptors/fresh-chrome.spec.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import { CompletedRequest } from 'mockttp'; 3 | import { setupInterceptor, itIsAvailable, itCanBeActivated } from './interceptor-test-utils'; 4 | 5 | const interceptorSetup = setupInterceptor('fresh-chrome'); 6 | 7 | describe('Chrome interceptor', function () { 8 | this.timeout(5000); 9 | 10 | beforeEach(async () => { 11 | const { server } = await interceptorSetup; 12 | await server.start(); 13 | await server.forAnyRequest().thenPassThrough(); 14 | }); 15 | 16 | afterEach(async () => { 17 | const { server, interceptor: chromeInterceptor } = await interceptorSetup; 18 | 19 | await chromeInterceptor.deactivate(server.port); 20 | await server.stop(); 21 | }); 22 | 23 | itIsAvailable(interceptorSetup); 24 | itCanBeActivated(interceptorSetup); 25 | 26 | it('successfully makes requests', async function () { 27 | const { server, interceptor: chromeInterceptor } = await interceptorSetup; 28 | 29 | const exampleRequestReceived = new Promise((resolve) => 30 | server.on('request', (req) => { 31 | if (req.url.startsWith('https://amiusing.httptoolkit.tech')) { 32 | resolve(req); 33 | } 34 | }) 35 | ); 36 | 37 | await chromeInterceptor.activate(server.port); 38 | 39 | // Only resolves if amiusing request is sent successfully 40 | await exampleRequestReceived; 41 | }); 42 | }); -------------------------------------------------------------------------------- /test/integration/interceptors/fresh-firefox.spec.ts: -------------------------------------------------------------------------------- 1 | import { CompletedRequest } from 'mockttp'; 2 | import { setupInterceptor, itIsAvailable } from './interceptor-test-utils'; 3 | import { expect } from 'chai'; 4 | 5 | const interceptorSetup = setupInterceptor('fresh-firefox'); 6 | 7 | describe('Firefox interceptor', function () { 8 | this.timeout(10000); 9 | 10 | beforeEach(async () => { 11 | const { server } = await interceptorSetup; 12 | await server.start(); 13 | await server.forAnyRequest().thenPassThrough(); 14 | }); 15 | 16 | afterEach(async () => { 17 | const { server, interceptor: firefoxInterceptor } = await interceptorSetup; 18 | await firefoxInterceptor.deactivate(server.port); 19 | await server.stop(); 20 | }); 21 | 22 | itIsAvailable(interceptorSetup); 23 | 24 | it('successfully makes requests', async function () { 25 | const { server, interceptor: firefoxInterceptor } = await interceptorSetup; 26 | 27 | const exampleRequestReceived = new Promise((resolve) => 28 | server.on('request', (req) => { 29 | if (req.url.startsWith('https://amiusing.httptoolkit.tech')) { 30 | resolve(req); 31 | } 32 | }) 33 | ); 34 | 35 | await firefoxInterceptor.activate(server.port); 36 | 37 | // Only resolves if amiusing request is sent successfully 38 | await exampleRequestReceived; 39 | }); 40 | 41 | it('can deactivate all', async () => { 42 | const { interceptor, server } = await interceptorSetup; 43 | 44 | expect(interceptor.isActive(server.port)).to.equal(false); 45 | 46 | await interceptor.activate(server.port); 47 | expect(interceptor.isActive(server.port)).to.equal(true); 48 | expect(interceptor.isActive(server.port + 1)).to.equal(false); 49 | 50 | await interceptor.deactivateAll(); 51 | expect(interceptor.isActive(server.port)).to.equal(false); 52 | }); 53 | }); -------------------------------------------------------------------------------- /test/integration/interceptors/fresh-terminal.spec.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import { fork } from 'child_process'; 3 | 4 | import { expect } from 'chai'; 5 | 6 | import { FIXTURES_DIR } from '../../test-util'; 7 | import { setupInterceptor, itIsAvailable, itCanBeActivated } from './interceptor-test-utils'; 8 | import { getTerminalEnvVars } from '../../../src/interceptors/terminal/terminal-env-overrides'; 9 | 10 | const interceptorSetup = setupInterceptor('fresh-terminal'); 11 | 12 | describe('Fresh terminal interceptor', function () { 13 | this.timeout(5000); 14 | 15 | beforeEach(async () => { 16 | const { server } = await interceptorSetup; 17 | await server.start(); 18 | }); 19 | 20 | afterEach(async () => { 21 | const { server } = await interceptorSetup; 22 | await server.stop(); 23 | }); 24 | 25 | describe('run directly', () => { 26 | 27 | beforeEach(async () => { 28 | const { server } = await interceptorSetup; 29 | await server.forAnyRequest().thenPassThrough(); 30 | }); 31 | 32 | afterEach(async () => { 33 | const { server, interceptor } = await interceptorSetup; 34 | await interceptor.deactivate(server.port); 35 | }); 36 | 37 | itIsAvailable(interceptorSetup); 38 | itCanBeActivated(interceptorSetup); 39 | }); 40 | 41 | describe('simulated from env vars', () => { 42 | 43 | it("should intercept all popular JS libraries", async function () { 44 | this.timeout(10000); 45 | 46 | const { server, httpsConfig } = await interceptorSetup; 47 | 48 | const mainRule = await server.forGet(/https?:\/\/example.test\/js\/.*/).thenReply(200); 49 | const stripeRule = await server.forGet('https://api.stripe.com/v1/customers').thenJson(200, {}); 50 | 51 | // Spawn node, as if it was run inside an intercepted terminal 52 | const terminalEnvOverrides = getTerminalEnvVars(server.port, httpsConfig, process.env); 53 | const nodeScript = fork(require.resolve(`${FIXTURES_DIR}/terminal/js-test-script`), [], { 54 | execArgv: ['-r', require.resolve('../../../overrides/js/prepend-node.js')], 55 | env: Object.assign({}, process.env, terminalEnvOverrides) 56 | }); 57 | await new Promise((resolve, reject) => { 58 | nodeScript.on('close', resolve); 59 | nodeScript.on('error', reject); 60 | }); 61 | 62 | const seenRequests = _.concat(...await Promise.all([ 63 | mainRule.getSeenRequests(), 64 | stripeRule.getSeenRequests() 65 | ])).map(r => r.url.replace(':443', '').replace(':80', '')); 66 | 67 | // Built-in modules 68 | expect(seenRequests).to.include('http://example.test/js/http'); 69 | expect(seenRequests).to.include('https://example.test/js/https'); 70 | 71 | // http & https with lots of popular libraries 72 | ['http', 'https'].forEach((protocol) => 73 | [ 74 | 'request', 75 | 'axios', 76 | 'superagent', 77 | 'node-fetch', 78 | 'got', 79 | 'bent', 80 | 'unirest', 81 | 'reqwest', 82 | 'needle', 83 | 'undici' 84 | ].forEach((library) => 85 | expect(seenRequests).to.include(`${protocol}://example.test/js/${library}`) 86 | ) 87 | ); 88 | 89 | // Special case modules that need manual handling: 90 | expect(seenRequests).to.include('https://api.stripe.com/v1/customers'); 91 | }); 92 | 93 | }); 94 | 95 | }); -------------------------------------------------------------------------------- /test/integration/interceptors/interceptor-test-utils.ts: -------------------------------------------------------------------------------- 1 | import _ from 'lodash'; 2 | import * as path from 'path'; 3 | import * as fs from 'fs'; 4 | import * as tmp from 'tmp'; 5 | 6 | import { expect } from 'chai'; 7 | 8 | import { getLocal, generateCACertificate, Mockttp, requestHandlers } from 'mockttp'; 9 | 10 | import { buildInterceptors, Interceptor } from '../../../src/interceptors'; 11 | import { getDnsServer } from '../../../src/dns-server'; 12 | 13 | const getCertificateDetails = _.memoize(async (configPath: string) => { 14 | const keyPath = path.join(configPath, 'ca.key'); 15 | const certPath = path.join(configPath, 'ca.pem'); 16 | 17 | const newCertPair = await generateCACertificate({ commonName: 'HTTP Toolkit CA - DO NOT TRUST' }); 18 | 19 | fs.writeFileSync(keyPath, newCertPair.key); 20 | fs.writeFileSync(certPath, newCertPair.cert); 21 | 22 | return { certPath, keyPath, certContent: newCertPair.cert, keyLength: 2048}; 23 | }); 24 | 25 | type TestSetup = { 26 | server: Mockttp, 27 | configPath: string, 28 | httpsConfig: { certPath: string, keyPath: string, certContent: string, keyLength: number } 29 | getPassThroughOptions(): Promise; 30 | }; 31 | 32 | export async function setupTest(): Promise { 33 | const configPath = tmp.dirSync({ unsafeCleanup: true }).name; 34 | const httpsConfig = await getCertificateDetails(configPath); 35 | const server = getLocal({ https: httpsConfig }); 36 | 37 | const getPassThroughOptions = async (): Promise => ({ 38 | lookupOptions: { 39 | servers: [`127.0.0.1:${(await getDnsServer(server.port)).address().port}`] 40 | } 41 | }); 42 | 43 | return { server, configPath, httpsConfig, getPassThroughOptions }; 44 | } 45 | 46 | type InterceptorSetup = TestSetup & { 47 | interceptor: Interceptor 48 | }; 49 | 50 | export async function setupInterceptor(interceptor: string): Promise { 51 | const testSetup = await setupTest(); 52 | const interceptors = buildInterceptors({ 53 | configPath: testSetup.configPath, 54 | https: testSetup.httpsConfig 55 | }); 56 | 57 | return { ...testSetup, interceptor: interceptors[interceptor] }; 58 | } 59 | 60 | // Various tests that we'll want to reuse across interceptors: 61 | 62 | export function itIsAvailable(interceptorSetup: Promise) { 63 | it('is available', async () => { 64 | const { interceptor } = await interceptorSetup; 65 | expect(await interceptor.isActivable()).to.equal(true); 66 | }); 67 | } 68 | 69 | export function itCanBeActivated(interceptorSetup: Promise) { 70 | it('can be activated', async () => { 71 | const { interceptor, server } = await interceptorSetup; 72 | 73 | expect(interceptor.isActive(server.port)).to.equal(false); 74 | 75 | await interceptor.activate(server.port); 76 | expect(interceptor.isActive(server.port)).to.equal(true); 77 | expect(interceptor.isActive(server.port + 1)).to.equal(false); 78 | 79 | await interceptor.deactivate(server.port); 80 | expect(interceptor.isActive(server.port)).to.equal(false); 81 | }); 82 | 83 | it('can deactivate all', async () => { 84 | const { interceptor, server } = await interceptorSetup; 85 | 86 | expect(interceptor.isActive(server.port)).to.equal(false); 87 | 88 | await interceptor.activate(server.port); 89 | expect(interceptor.isActive(server.port)).to.equal(true); 90 | expect(interceptor.isActive(server.port + 1)).to.equal(false); 91 | 92 | await interceptor.deactivateAll(); 93 | expect(interceptor.isActive(server.port)).to.equal(false); 94 | }); 95 | } -------------------------------------------------------------------------------- /test/no-sandbox-docker-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | trap : SIGTERM SIGINT 3 | 4 | # We need to inject a --no-sandbox option, because inside Docker Chrome/Electron apps 5 | # can't start their sandbox, so without this it refuses to start at all. 6 | 7 | # It's assumed that this is copied/linked into a file in /usr/local/bin to wrap a 8 | # binary with the same name (e.g. google-chrome) in /usr/bin: 9 | /usr/bin/$(basename $0) --no-sandbox "$@" & 10 | APP_PID=$! 11 | 12 | wait $APP_PID 13 | 14 | WAIT_EXIT_CODE=$? 15 | 16 | # If this script gets killed, kill the app with the same signal 17 | if [[ WAIT_EXIT_CODE -gt 128 ]] 18 | then 19 | kill -$(($WAIT_EXIT_CODE - 128)) $APP_PID 20 | fi -------------------------------------------------------------------------------- /test/test-util.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | 3 | export const FIXTURES_DIR = path.join(__dirname, 'fixtures'); -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "declaration": true, 4 | "importHelpers": true, 5 | "module": "commonjs", 6 | "sourceMap": true, 7 | "rootDir": "src", 8 | "outDir": "lib", 9 | "strict": true, 10 | "target": "es2020", 11 | "resolveJsonModule": true, 12 | "esModuleInterop": true, 13 | "composite": true, 14 | "typeRoots" : [ 15 | "./node_modules/@types", 16 | "./custom-typings" 17 | ] 18 | }, 19 | "include": [ 20 | "src/**/*", 21 | "custom-typings/**/*.d.ts" 22 | ] 23 | } 24 | -------------------------------------------------------------------------------- /wallaby.js: -------------------------------------------------------------------------------- 1 | module.exports = (wallaby) => { 2 | return { 3 | files: [ 4 | 'src/**/*.ts', 5 | 'test/**/*.ts', 6 | '!test/**/*.spec.ts' 7 | ], 8 | tests: [ 9 | 'test/unit/**/*.spec.ts' 10 | ], 11 | 12 | workers: { 13 | initial: 1, 14 | regular: 1, 15 | restart: true 16 | }, 17 | 18 | testFramework: 'mocha', 19 | env: { 20 | type: 'node' 21 | } 22 | }; 23 | }; -------------------------------------------------------------------------------- /webpack.config.js: -------------------------------------------------------------------------------- 1 | const webpack = require('webpack'); 2 | const path = require('path'); 3 | const CopyWebpackPlugin = require('copy-webpack-plugin'); 4 | const SentryPlugin = require('@sentry/webpack-plugin'); 5 | 6 | const pjson = require('./package.json'); 7 | 8 | const OUTPUT_DIR = path.resolve(__dirname, 'bundle'); 9 | 10 | console.log( 11 | process.env.SENTRY_AUTH_TOKEN 12 | ? "* Webpack will upload source map to Sentry *" 13 | : "Sentry source map upload disabled - no token set" 14 | ); 15 | 16 | module.exports = { 17 | entry: { 18 | index: './src/index.ts', 19 | 'error-tracking': './src/error-tracking.ts' 20 | }, 21 | output: { 22 | path: OUTPUT_DIR, 23 | filename: '[name].js', 24 | libraryTarget: 'commonjs2' 25 | }, 26 | mode: 'production', 27 | devtool: 'source-map', 28 | target: 'node', 29 | node: false, // Don't mess with any node built-ins or globals 30 | module: { 31 | rules: [ 32 | { 33 | test: /\.tsx?$/, 34 | use: 'ts-loader', 35 | exclude: /node_modules/ 36 | }, 37 | { 38 | // Required to build .mjs files correctly 39 | test: /\.mjs$/, 40 | include: /node_modules/, 41 | type: "javascript/auto", 42 | }, 43 | { 44 | test: /node_modules[\\|/]simple-plist[\\|/]dist/, 45 | use: { loader: 'umd-compat-loader' } 46 | }, 47 | { 48 | // Some browser launchers (Opera) use resource files from within the browser-launcher 49 | // module. We need to reroute that to point to the unbundled files: 50 | test: /node_modules\/@httptoolkit\/browser-launcher\/lib\/run.js$/, 51 | loader: 'string-replace-loader', 52 | options: { 53 | search: '../res/', 54 | replace: './bl-resources/', 55 | flags: 'g', 56 | strict: true 57 | } 58 | } 59 | ] 60 | }, 61 | externals: [ 62 | '@oclif/plugin-update/lib/commands/update', // Lots of complicated dynamic requires in @oclif 63 | 'registry-js', // Native module 64 | 'win-version-info', // Native module 65 | 'node-datachannel', // Native module 66 | '_stream_wrap', // Used in httpolyglot only in old Node, where it's available 67 | function (context, request, callback) { 68 | if (context !== __dirname && request.endsWith('/error-tracking')) { 69 | // Direct all requires of error-tracking to its entrypoint at the top level, 70 | // except the root require that actually builds the entrypoint. 71 | callback(null, 'commonjs ./error-tracking'); 72 | } else { 73 | callback(); 74 | } 75 | } 76 | ], 77 | plugins: [ 78 | // Optimistic require for 'iconv' in 'encoding', falls back to 'iconv-lite' 79 | new webpack.NormalModuleReplacementPlugin(/\/iconv-loader$/, /node-noop/), 80 | // Optimistically required in (various) ws versions, with fallback 81 | new webpack.IgnorePlugin({ resourceRegExp: /^bufferutil$/ }), 82 | // Optimistically required in (various) ws versions, with fallback 83 | new webpack.IgnorePlugin({ resourceRegExp: /^utf-8-validate$/ }), 84 | // Optimistically required in headless, falls back to child_process 85 | new webpack.IgnorePlugin({ resourceRegExp: /^child-killer$/ }), 86 | // GraphQL playground - never used 87 | new webpack.NormalModuleReplacementPlugin(/^\.\/renderGraphiQL$/, 'node-noop'), 88 | // SSH2 - used within Dockerode, but we don't support it and it has awkward native deps 89 | new webpack.NormalModuleReplacementPlugin(/^ssh2$/, 'node-noop'), 90 | // CDP protocol - not used without local:true (which we never use, we always 91 | // get the CDP protocol details from the target Electron app). 92 | new webpack.IgnorePlugin({ 93 | resourceRegExp: /^\.\/protocol.json$/, 94 | contextRegExp: /chrome-remote-interface/ 95 | }), 96 | // Copy Mockttp's schema (read with readFile) into the output directory 97 | new CopyWebpackPlugin({ 98 | patterns: [{ 99 | from: path.join('node_modules', '@httptoolkit', 'browser-launcher', 'res'), 100 | to: 'bl-resources' 101 | }] 102 | }), 103 | // If SENTRY_AUTH_TOKEN is set, upload this sourcemap to Sentry 104 | process.env.SENTRY_AUTH_TOKEN 105 | ? new SentryPlugin({ 106 | release: pjson.version, 107 | include: OUTPUT_DIR, 108 | urlPrefix: '~/bundle', 109 | validate: true 110 | }) 111 | : { apply: () => {} }, 112 | // Used to e.g. fix the relative path to the overrides directory 113 | new webpack.EnvironmentPlugin({ HTK_IS_BUNDLED: true }) 114 | ], 115 | resolve: { 116 | extensions: [ '.tsx', '.ts', '.mjs', '.js' ] 117 | } 118 | }; 119 | --------------------------------------------------------------------------------