├── .env.example ├── .gitignore ├── README.md ├── proxy.py └── requirements.txt /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY=key_goes_brrr -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .idea 3 | __pycache__ 4 | node_modules -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Update 2 | 3 | It looks like GH Chat now uses GPT4 by default, however I will leave this repo up incase someone wants to re-use it for a different use case, like using a local LLM. 4 | 5 | # Custom Proxy for GitHub Copilot Chat 6 | 7 | This custom proxy forwards HTTP requests to their original destination, except when talking to the CoPilot chat endpoints. When it finds that endpoint it modifies the request to use GPT-4 using the main openai endpoint. 8 | 9 | ## Requirements 10 | 11 | - Python 3.6 or higher 12 | - mitmproxy 13 | - python-dotenv 14 | 15 | ## Installation 16 | 17 | 1. Clone the repository: 18 | 19 | ```bash 20 | git clone https://github.com/yourusername/custom-proxy.git 21 | cd custom-proxy 22 | ``` 23 | 24 | 2. Install the required packages: 25 | 26 | ```bash 27 | pip install -r requirements.txt 28 | ``` 29 | 30 | 3. Create a `.env` file in the project directory and add your OpenAI API key: 31 | 32 | ``` 33 | OPENAI_API_KEY=your_openai_api_key 34 | ``` 35 | 36 | Replace `your_openai_api_key` with your actual API key. 37 | 38 | ## Usage 39 | 40 | 1. Start the proxy server: 41 | 42 | ```bash 43 | mitmdump -s proxy.py -p 8090 44 | ``` 45 | 46 | This command starts the proxy server on port 8090. 47 | 48 | 2. Configure your application to use the proxy server by setting the `HTTP_PROXY` and `HTTPS_PROXY` environment variables to `http://localhost:8090`. 49 | 50 | 3. Run your application, and the proxy will intercept and modify the specified requests as described. 51 | 52 | ## License 53 | 54 | This project is licensed under the [MIT License](LICENSE). 55 | -------------------------------------------------------------------------------- /proxy.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from mitmproxy import http 4 | from dotenv import load_dotenv 5 | 6 | # Load the API key from the .env file 7 | load_dotenv() 8 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 9 | 10 | def request(flow: http.HTTPFlow) -> None: 11 | if (flow.request.host == "copilot-proxy.githubusercontent.com" and 12 | flow.request.path == "/v1/chat/completions"): 13 | # Modify the request JSON 14 | request_data = flow.request.get_text() 15 | json_data = json.loads(request_data) 16 | json_data["model"] = "gpt-4" 17 | 18 | # Remove the "intent" parameter 19 | if "intent" in json_data: 20 | del json_data["intent"] 21 | 22 | flow.request.set_text(json.dumps(json_data)) 23 | 24 | # Modify the request URL 25 | flow.request.host = "api.openai.com" 26 | 27 | # Strip all existing headers 28 | flow.request.headers.clear() 29 | 30 | # Add new headers 31 | flow.request.headers["Content-Type"] = "application/json" 32 | flow.request.headers["Authorization"] = f"Bearer {OPENAI_API_KEY}" 33 | else: 34 | # Forward the request to the original destination 35 | pass 36 | 37 | # TODO: streaming doesn't seem to work yet 38 | def response(flow: http.HTTPFlow) -> None: 39 | if (flow.request.host == "api.openai.com" and 40 | flow.request.path == "/v1/chat/completions"): 41 | # Forward the streamed response to the client 42 | for chunk in flow.response.stream: 43 | ctx.log.info(f"Forwarding chunk: {chunk}") 44 | ctx.master.commands.call("view.response.stream", [chunk]) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | mitmproxy==9.0.1 2 | python-dotenv==1.0.0 --------------------------------------------------------------------------------