├── .gitignore ├── LICENSE ├── README.md ├── debugger.py ├── demo └── average.py ├── failingJavascript.js ├── failingPython.py ├── llm.py ├── package-lock.json ├── package.json ├── requirements.txt ├── terminal.py ├── tests └── test_debugger.py └── wrapper.js /.gitignore: -------------------------------------------------------------------------------- 1 | **/tools/ 2 | **/node_modules/ 3 | package-lock.json 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | cover/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | .pybuilder/ 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # IPython 86 | profile_default/ 87 | ipython_config.py 88 | 89 | # pyenv 90 | # For a library or package, you might want to ignore these files since the code is 91 | # intended to run in multiple environments; otherwise, check them in: 92 | # .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # poetry 102 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 103 | # This is especially recommended for binary packages to ensure reproducibility, and is more 104 | # commonly ignored for libraries. 105 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 106 | #poetry.lock 107 | 108 | # pdm 109 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 110 | #pdm.lock 111 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 112 | # in version control. 113 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 114 | .pdm.toml 115 | .pdm-python 116 | .pdm-build/ 117 | 118 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 119 | __pypackages__/ 120 | 121 | # Celery stuff 122 | celerybeat-schedule 123 | celerybeat.pid 124 | 125 | # SageMath parsed files 126 | *.sage.py 127 | 128 | # Environments 129 | .env 130 | .venv 131 | env/ 132 | venv/ 133 | ENV/ 134 | env.bak/ 135 | venv.bak/ 136 | 137 | # Spyder project settings 138 | .spyderproject 139 | .spyproject 140 | 141 | # Rope project settings 142 | .ropeproject 143 | 144 | # mkdocs documentation 145 | /site 146 | 147 | # mypy 148 | .mypy_cache/ 149 | .dmypy.json 150 | dmypy.json 151 | 152 | # Pyre type checker 153 | .pyre/ 154 | 155 | # pytype static type analyzer 156 | .pytype/ 157 | 158 | # Cython debug symbols 159 | cython_debug/ 160 | 161 | # PyCharm 162 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 163 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 164 | # and can be added to the global gitignore or merged into this file. For a more nuclear 165 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 166 | #.idea/ 167 | 168 | *.tgz -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Samarth Aggarwal 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Always-On Debugger 2 | 3 | Automatically debug code from your terminal. 4 | 5 | ## Overview 6 | 7 | Always-On Debugger is a tool that enhances your terminal experience by automatically detecting errors and providing debugging assistance using AI. It acts as a wrapper around your existing terminal, intercepting commands and their outputs to offer real-time debugging support. 8 | 9 | At present, we only support using Anthropic's Claude API. Let us know if you need OpenAI support. 10 | 11 | https://github.com/user-attachments/assets/2a8a6d49-8b98-4319-a8de-16ef6ea23210 12 | 13 | [Loom Demo](https://www.loom.com/share/5afa2d7fd46c470bbc884675a77aec3c) 14 | 15 | ## Features 16 | 17 | - Mimics the terminal for every command 18 | - Automatically detects errors in command outputs 19 | - Captures context and sends it to an AI language model for analysis 20 | - Provides AI-generated debugging suggestions directly in the terminal 21 | 22 | ## Installation 23 | 24 | There are two ways to setup Always-On Debugger. 25 | 26 | ### Option 1: Using npm 27 | 28 | Step 1: Install the package 29 | 30 | ```bash 31 | npm install -g aidebug 32 | ``` 33 | 34 | Step 2: Setup the API key for Anthropic 35 | 36 | ```bash 37 | export ANTHROPIC_API_KEY= 38 | ``` 39 | 40 | Step 3: Now you can use the `debug` command to debug your commands. 41 | 42 | ```bash 43 | debug python average.py 44 | ``` 45 | 46 | ### Option 2: Manual Setup 47 | 48 | _Step 1: Clone the repo_ 49 | 50 | ```bash 51 | cd ~ 52 | git clone git@github.com:samarthaggarwal/always-on-debugger.git 53 | ``` 54 | 55 | _Step 2: Update ~/.bashrc_ 56 | Add the following to your ~/.bashrc or ~/.zshrc 57 | 58 | ``` 59 | alias debug="python ~/always-on-debugger/terminal.py" 60 | export ANTHROPIC_API_KEY= 61 | ``` 62 | 63 | _Step 3: Source ~/.bashrc or ~/.zshrc . Alternatively, open a new terminal._ 64 | 65 | ```bash 66 | source ~/.bashrc 67 | ``` 68 | 69 | ## Usage 70 | 71 | Just prefix any terminal command with `debug`. That's it, the debugger will automatically kick in when an error is detected and prints the error along with the suggested course of action. Here's an example: 72 | 73 | > Normally, the deverlop would only see the error. 74 | 75 | ```bash 76 | [14:57:19] ➜ demo git:(main) ✗ python average.py 77 | The average is: 3.0 78 | Traceback (most recent call last): 79 | File "/Users/samarthaggarwal/personal/always-on-debugger/demo/average.py", line 15, in 80 | average_of_empty = calculate_average(empty_list) 81 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 82 | File "/Users/samarthaggarwal/personal/always-on-debugger/demo/average.py", line 7, in calculate_average 83 | return total / count 84 | ~~~~~~^~~~~~~ 85 | ZeroDivisionError: division by zero 86 | ``` 87 | 88 | > Prefixing the same command with `debug` prints the error along with diagnosis and recommendations. 89 | 90 | ```bash 91 | [14:57:21] ➜ demo git:(main) ✗ debug python average.py 92 | Traceback (most recent call last): 93 | File "/Users/samarthaggarwal/personal/always-on-debugger/demo/average.py", line 15, in 94 | average_of_empty = calculate_average(empty_list) 95 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 96 | File "/Users/samarthaggarwal/personal/always-on-debugger/demo/average.py", line 7, in calculate_average 97 | return total / count 98 | ~~~~~~^~~~~~~ 99 | ZeroDivisionError: division by zero 100 | 101 | --------------------------------------------------------------------- 102 | Debugging... 103 | 104 | --------------------------------------------------------------------- 105 | To fix this error and improve the calculate_average function, follow these steps: 106 | 107 | 1. Modify the calculate_average function to handle empty lists: 108 | def calculate_average(numbers): 109 | if not numbers: # Check if the list is empty 110 | return 0 # or you could return None, or raise a custom exception 111 | total = sum(numbers) 112 | count = len(numbers) 113 | return total / count 114 | 115 | 2. This modification checks if the input list is empty before performing any calculations. If it is empty, it returns 0 (or you could choose to return None or raise a custom exception, depending on how you want to handle this case). 116 | 117 | 3. Test the function with both non-empty and empty lists to ensure it works correctly in all cases. 118 | 119 | 4. If you want to keep the original loop structure, you can modify it like this: 120 | def calculate_average(numbers): 121 | total = 0 122 | count = 0 123 | for num in numbers: 124 | total += num 125 | count += 1 126 | if count == 0: 127 | return 0 # or None, or raise an exception 128 | return total / count 129 | 130 | 5. Choose the implementation that best fits your needs and coding style. 131 | 132 | 133 | By implementing one of these solutions, you will prevent the ZeroDivisionError and handle empty lists gracefully. 134 | ``` 135 | 136 | ## How it works? 137 | 138 | 1. Prefix your terminal commands with `debug`. 139 | 2. If an error occurs, Always-On Debugger automatically captures the context. 140 | 3. The error context is sent to an AI language model for analysis. 141 | 4. Debugging suggestions are printed directly to your terminal. 142 | 143 | ## Project Structure 144 | 145 | - `debugger.py`: Core Python script that orchestrates the debugging flow 146 | - `llm.py`: Handles LLM interaction (prompt generation and response parsing) 147 | - (Additional files for terminal wrapping and packaging) 148 | 149 | ## License 150 | 151 | [MIT License](LICENSE) 152 | -------------------------------------------------------------------------------- /debugger.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | from typing import Dict, Any, Optional 4 | from dotenv import load_dotenv, find_dotenv 5 | from typing import Callable, Dict, List, Optional 6 | 7 | import openai 8 | import anthropic 9 | 10 | # Environment setup 11 | dotenv_path = find_dotenv() 12 | load_dotenv(dotenv_path) 13 | 14 | from llm import AnthropicLLM, Conversation 15 | 16 | class Debugger: 17 | DELIMITER = "#######" 18 | FILE_DETECTION_SYSTEM_PROMPT = f""" 19 | You are an experienced engineer who is helping a junior engineer to debug the error. 20 | 21 | {DELIMITER} Instruction: 22 | - Carefully read the command run by the user and the resulting error. 23 | - Identify if the error is related to a specific file. If so, return the file path. If not, return NO_FILE. 24 | 25 | {DELIMITER} Output: 26 | 27 | {{Just return the file path as a string. If not related to any file, return NO_FILE. Do not add any other information.}} 28 | 29 | """ 30 | 31 | DEBUG_SYSTEM_PROMPT = f""" 32 | You are an experienced engineer who is helping a junior engineer to debug the error. 33 | 34 | {DELIMITER} Instruction: 35 | - Carefully read the user's input that includes error and optionally code snippet. 36 | - Generate a list of hypotheses to explain the error. 37 | - Then suggest clear and concise steps users can follow to validate the error 38 | 39 | {DELIMITER}: Output 40 | 41 | {{Present your thinking and approach here}} 42 | 43 | 44 | {{Present your recommendation here. Be very specific, concise and actionable.}} 45 | 46 | """ 47 | # 48 | # {{Present your reflection on the recommendation here. If you think the recommendation is not enough, suggest more information you may need to solve the problem.}} 49 | # 50 | 51 | def __init__(self): 52 | self.llm = AnthropicLLM() 53 | self.llm.client.api_key = os.environ.get("ANTHROPIC_API_KEY") 54 | #print(f"Anthropic API key: {self.llm.client.api_key}") 55 | 56 | def generate_user_prompt_for_file_detection(self, command: str, error: str) -> str: 57 | return f""" 58 | Here is the command and error message: 59 | =============== 60 | 61 | {command} 62 | 63 | 64 | {error} 65 | 66 | """ 67 | 68 | def detect_file_path(self, command: str, error: str) -> Optional[str]: 69 | user_prompt = self.generate_user_prompt_for_file_detection(command, error) 70 | conversation = Conversation() 71 | conversation.add_user_message(user_prompt) 72 | messages = conversation.to_dict() 73 | 74 | llm_response = self.llm.generate_conversation(self.FILE_DETECTION_SYSTEM_PROMPT, messages, model="small") 75 | parsed_response = self.parse_response(llm_response) 76 | filepath = parsed_response["filepath"] 77 | if filepath == "NO_FILE": 78 | return None 79 | return filepath 80 | 81 | def generate_user_prompt_for_debug(self, command: str, error: str, code_snippet: Optional[str]) -> str: 82 | if code_snippet is None: 83 | return f""" 84 | Here is the command and error message: 85 | =============== 86 | 87 | {command} 88 | 89 | 90 | {error} 91 | 92 | """ 93 | else: 94 | return f""" 95 | Here is the command, error message and relevant code: 96 | =============== 97 | 98 | {command} 99 | 100 | 101 | {error} 102 | 103 | 104 | {code_snippet} 105 | 106 | =============== 107 | """ 108 | 109 | def debug(self, command: str, error: str, code_snippet: Optional[str] = None) -> Dict[str, Any]: 110 | user_prompt = self.generate_user_prompt_for_debug(command, error, code_snippet) 111 | conversation = Conversation() 112 | conversation.add_user_message(user_prompt) 113 | messages = conversation.to_dict() 114 | 115 | llm_response = self.llm.generate_conversation_stream_print(self.DEBUG_SYSTEM_PROMPT, messages, model="latest_large") 116 | parsed_response = self.parse_response(llm_response) 117 | 118 | return parsed_response 119 | 120 | @staticmethod 121 | def parse_response(llm_response: str) -> Dict[str, Any]: 122 | """ 123 | Parse an XML-like string LLM response to get structured output using regex. 124 | """ 125 | try: 126 | result = {} 127 | 128 | patterns = { 129 | 'thinking': r'(.*?)', 130 | 'recommendation': r'(.*?)', 131 | # 'reflection': r'(.*?)' 132 | 'filepath': r'(.*?)' 133 | } 134 | 135 | for key, pattern in patterns.items(): 136 | match = re.search(pattern, llm_response, re.DOTALL) 137 | result[key] = match.group(1).strip() if match else "" 138 | 139 | return result 140 | except Exception as e: 141 | print(f"Error parsing response: {e}") 142 | return {} -------------------------------------------------------------------------------- /demo/average.py: -------------------------------------------------------------------------------- 1 | def calculate_average(numbers): 2 | total = 0 3 | count = 0 4 | for num in numbers: 5 | total += num 6 | count += 1 7 | return total / count 8 | 9 | numbers = [1, 2, 3, 4, 5] 10 | result = calculate_average(numbers) 11 | print(f"The average is: {result}") 12 | 13 | # This line contains the bug 14 | empty_list = [] 15 | average_of_empty = calculate_average(empty_list) 16 | print(f"The average of an empty list is: {average_of_empty}") 17 | -------------------------------------------------------------------------------- /failingJavascript.js: -------------------------------------------------------------------------------- 1 | console.log('HEY from Javascript!'); 2 | throw Error("'Some error occured! Ooops!"); 3 | -------------------------------------------------------------------------------- /failingPython.py: -------------------------------------------------------------------------------- 1 | print("HEY FROM PYTHON !!!") 2 | raise Exception("Sorry, no numbers below zero") -------------------------------------------------------------------------------- /llm.py: -------------------------------------------------------------------------------- 1 | # Related third-party imports 2 | import abc 3 | import os 4 | import logging 5 | import tiktoken 6 | from dotenv import load_dotenv, find_dotenv 7 | from typing import Callable, Dict, List, Optional 8 | 9 | import openai 10 | import anthropic 11 | # Environment setup 12 | dotenv_path = find_dotenv() 13 | load_dotenv(dotenv_path) 14 | 15 | #OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] 16 | ANTHROPIC_API_KEY = os.environ["ANTHROPIC_API_KEY"] 17 | #print(f"Anthropic API key: {ANTHROPIC_API_KEY}") 18 | 19 | 20 | MODEL_MAPPING = { 21 | "openai": { 22 | "small": "gpt-3.5-turbo-0125", 23 | "medium": "gpt-4", 24 | "large": "gpt-4-turbo", 25 | "latest_large": "gpt-4o" 26 | }, 27 | "anthropic": { 28 | "small": "claude-3-haiku-20240307", 29 | "medium": "claude-3-sonnet-20240229", 30 | "large": "claude-3-opus-20240229", 31 | "latest_large": "claude-3-5-sonnet-20240620" 32 | } 33 | } 34 | 35 | class LLM(abc.ABC): 36 | def __init__(self, api_key: Optional[str] = None): 37 | self.api_key = api_key 38 | 39 | @abc.abstractmethod 40 | def generate_text(self, prompt: str, **kwargs) -> str: 41 | pass 42 | 43 | @abc.abstractmethod 44 | def stream_text(self, prompt: str, **kwargs): 45 | pass 46 | 47 | class OpenAILLM(LLM): 48 | def __init__(self, api_key: Optional[str] = None): 49 | super().__init__(api_key or OPENAI_API_KEY) 50 | self.client = openai.OpenAI(api_key=self.api_key) 51 | 52 | def generate_text(self, prompt: str, model: str = "large", **kwargs) -> str: 53 | model_name = MODEL_MAPPING["openai"][model] 54 | messages = [{"role": "user", "content": prompt}] 55 | response = self.client.chat.completions.create( 56 | model=model_name, 57 | messages=messages, 58 | temperature=kwargs.get("temperature", 0.2), 59 | ) 60 | logger.info(f"Response from OpenAI: {response.choices[0].message.content}") 61 | return response.choices[0].message.content 62 | 63 | def stream_text(self, prompt: str, model: str = "large", **kwargs): 64 | model_name = MODEL_MAPPING["openai"][model] 65 | messages = [{"role": "user", "content": prompt}] 66 | stream = self.client.chat.completions.create( 67 | model=model_name, 68 | messages=messages, 69 | temperature=kwargs.get("temperature", 0.2), 70 | stream=True 71 | ) 72 | for chunk in stream: 73 | if chunk.choices[0].message['content'] is not None: 74 | yield chunk.choices[0].message['content'] 75 | 76 | def generate_conversation(self, messages, model: str = "large", **kwargs) -> str: 77 | model_name = MODEL_MAPPING["openai"][model] 78 | response = self.client.chat.completions.create( 79 | model=model_name, 80 | messages=messages, 81 | temperature=kwargs.get("temperature", 0.2), 82 | ) 83 | logger.info(f"Response from OpenAI: {response.choices[0].message.content}") 84 | return response.choices[0].message.content 85 | 86 | def generate_conversation_stream(self, messages, model: str = "large", **kwargs): 87 | model_name = MODEL_MAPPING["openai"][model] 88 | stream = self.client.chat.completions.create( 89 | model=model_name, 90 | messages=messages, 91 | temperature=kwargs.get("temperature", 0.2), 92 | stream=True 93 | ) 94 | for chunk in stream: 95 | if chunk.choices[0].delta.content is not None: 96 | response_string = chunk.choices[0].delta.content 97 | yield response_string 98 | 99 | 100 | class AnthropicLLM(LLM): 101 | def __init__(self, api_key: Optional[str] = None): 102 | super().__init__(api_key or ANTHROPIC_API_KEY) 103 | self.client = anthropic.Anthropic(api_key=self.api_key) 104 | 105 | def generate_text(self, prompt: str, model: str = "large", **kwargs) -> str: 106 | model_name = MODEL_MAPPING["anthropic"][model] 107 | response = self.client.messages.create( 108 | model=model_name, 109 | temperature=kwargs.get("temperature", 0.0), 110 | max_tokens=kwargs.get("max_tokens", 4096), 111 | messages=[{"role": "user", "content": prompt}] 112 | ) 113 | logger.info(f"Response from Anthropic: {response.content[0].text}") 114 | return response.content[0].text 115 | 116 | def generate_conversation(self, system_message, messages, model: str = "large", **kwargs) -> str: 117 | model_name = MODEL_MAPPING["anthropic"][model] 118 | system = system_message 119 | response = self.client.messages.create( 120 | model=model_name, 121 | system=system, 122 | temperature=kwargs.get("temperature", 0.0), 123 | max_tokens=kwargs.get("max_tokens", 4096), 124 | messages=messages 125 | ) 126 | return response.content[0].text 127 | 128 | def stream_text(self, prompt: str, model: str = "large", **kwargs): 129 | model_name = MODEL_MAPPING["anthropic"][model] 130 | messages = [{"role": "user", "content": prompt}] 131 | with self.client.messages.stream( 132 | model=model_name, 133 | temperature=kwargs.get("temperature", 0.0), 134 | max_tokens=kwargs.get("max_tokens", 4096), 135 | messages=messages, 136 | ) as stream: 137 | for chunk in stream.text_stream: 138 | yield chunk 139 | 140 | def generate_conversation_stream(self, system_message, messages, model: str = "large", **kwargs): 141 | model_name = MODEL_MAPPING["anthropic"][model] 142 | system = system_message 143 | with self.client.messages.stream( 144 | system=system, 145 | model=model_name, 146 | temperature=kwargs.get("temperature", 0.0), 147 | max_tokens=kwargs.get("max_tokens", 4096), 148 | messages=messages, 149 | ) as stream: 150 | for chunk in stream.text_stream: 151 | if chunk is not None: 152 | yield chunk 153 | 154 | def generate_conversation_stream_print(self, system_message, messages, model: str = "large", **kwargs): 155 | model_name = MODEL_MAPPING["anthropic"][model] 156 | system = system_message 157 | response = "" 158 | with self.client.messages.stream( 159 | system=system, 160 | model=model_name, 161 | temperature=kwargs.get("temperature", 0.0), 162 | max_tokens=kwargs.get("max_tokens", 4096), 163 | messages=messages, 164 | ) as stream: 165 | for chunk in stream.text_stream: 166 | if chunk is not None: 167 | response += chunk 168 | # print(chunk, end="", flush=True) 169 | return response 170 | 171 | class Message: 172 | def __init__( 173 | self, 174 | role: str, 175 | content: str, 176 | name: str = None, 177 | ): 178 | self.role = role 179 | self.content = content 180 | self.name = name 181 | 182 | def to_dict(self): 183 | message_dict = {"role": self.role, "content": self.content} 184 | if self.name: 185 | message_dict["name"] = self.name 186 | return message_dict 187 | 188 | class Conversation: 189 | def __init__(self, system_message=None): 190 | self.messages = [] 191 | if system_message: 192 | self.messages.append(Message(role="system", content=system_message)) 193 | 194 | def __iter__(self): 195 | for message in self.messages: 196 | yield message 197 | 198 | def to_dict(self): 199 | return [message.to_dict() for message in self.messages] 200 | 201 | def add_message(self, message: Dict[str, str]): 202 | self.messages.append(Message(**message)) 203 | 204 | def add_user_message(self, question: str): 205 | self.add_message({"role": "user", "content": question}) 206 | 207 | def add_assistant_message(self, answer: str): 208 | self.add_message({"role": "assistant", "content": answer}) 209 | 210 | def print_conversation(self): 211 | for message in self.messages: 212 | logger.info(f"{message.role}: {message.content}") -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "wrapper", 3 | "lockfileVersion": 3, 4 | "requires": true, 5 | "packages": { 6 | "": { 7 | "devDependencies": { 8 | "@types/node": "^22.5.4", 9 | "ts-node": "^10.9.2" 10 | } 11 | }, 12 | "node_modules/@cspotcode/source-map-support": { 13 | "version": "0.8.1", 14 | "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", 15 | "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", 16 | "dev": true, 17 | "dependencies": { 18 | "@jridgewell/trace-mapping": "0.3.9" 19 | }, 20 | "engines": { 21 | "node": ">=12" 22 | } 23 | }, 24 | "node_modules/@jridgewell/resolve-uri": { 25 | "version": "3.1.2", 26 | "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", 27 | "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", 28 | "dev": true, 29 | "engines": { 30 | "node": ">=6.0.0" 31 | } 32 | }, 33 | "node_modules/@jridgewell/sourcemap-codec": { 34 | "version": "1.5.0", 35 | "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", 36 | "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", 37 | "dev": true 38 | }, 39 | "node_modules/@jridgewell/trace-mapping": { 40 | "version": "0.3.9", 41 | "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", 42 | "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", 43 | "dev": true, 44 | "dependencies": { 45 | "@jridgewell/resolve-uri": "^3.0.3", 46 | "@jridgewell/sourcemap-codec": "^1.4.10" 47 | } 48 | }, 49 | "node_modules/@tsconfig/node10": { 50 | "version": "1.0.11", 51 | "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", 52 | "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", 53 | "dev": true 54 | }, 55 | "node_modules/@tsconfig/node12": { 56 | "version": "1.0.11", 57 | "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", 58 | "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", 59 | "dev": true 60 | }, 61 | "node_modules/@tsconfig/node14": { 62 | "version": "1.0.3", 63 | "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", 64 | "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", 65 | "dev": true 66 | }, 67 | "node_modules/@tsconfig/node16": { 68 | "version": "1.0.4", 69 | "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", 70 | "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", 71 | "dev": true 72 | }, 73 | "node_modules/@types/node": { 74 | "version": "22.5.4", 75 | "resolved": "https://registry.npmjs.org/@types/node/-/node-22.5.4.tgz", 76 | "integrity": "sha512-FDuKUJQm/ju9fT/SeX/6+gBzoPzlVCzfzmGkwKvRHQVxi4BntVbyIwf6a4Xn62mrvndLiml6z/UBXIdEVjQLXg==", 77 | "dev": true, 78 | "dependencies": { 79 | "undici-types": "~6.19.2" 80 | } 81 | }, 82 | "node_modules/acorn": { 83 | "version": "8.12.1", 84 | "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz", 85 | "integrity": "sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==", 86 | "dev": true, 87 | "bin": { 88 | "acorn": "bin/acorn" 89 | }, 90 | "engines": { 91 | "node": ">=0.4.0" 92 | } 93 | }, 94 | "node_modules/acorn-walk": { 95 | "version": "8.3.3", 96 | "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.3.tgz", 97 | "integrity": "sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==", 98 | "dev": true, 99 | "dependencies": { 100 | "acorn": "^8.11.0" 101 | }, 102 | "engines": { 103 | "node": ">=0.4.0" 104 | } 105 | }, 106 | "node_modules/arg": { 107 | "version": "4.1.3", 108 | "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", 109 | "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", 110 | "dev": true 111 | }, 112 | "node_modules/create-require": { 113 | "version": "1.1.1", 114 | "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", 115 | "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", 116 | "dev": true 117 | }, 118 | "node_modules/diff": { 119 | "version": "4.0.2", 120 | "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", 121 | "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", 122 | "dev": true, 123 | "engines": { 124 | "node": ">=0.3.1" 125 | } 126 | }, 127 | "node_modules/make-error": { 128 | "version": "1.3.6", 129 | "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", 130 | "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", 131 | "dev": true 132 | }, 133 | "node_modules/ts-node": { 134 | "version": "10.9.2", 135 | "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", 136 | "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", 137 | "dev": true, 138 | "dependencies": { 139 | "@cspotcode/source-map-support": "^0.8.0", 140 | "@tsconfig/node10": "^1.0.7", 141 | "@tsconfig/node12": "^1.0.7", 142 | "@tsconfig/node14": "^1.0.0", 143 | "@tsconfig/node16": "^1.0.2", 144 | "acorn": "^8.4.1", 145 | "acorn-walk": "^8.1.1", 146 | "arg": "^4.1.0", 147 | "create-require": "^1.1.0", 148 | "diff": "^4.0.1", 149 | "make-error": "^1.1.1", 150 | "v8-compile-cache-lib": "^3.0.1", 151 | "yn": "3.1.1" 152 | }, 153 | "bin": { 154 | "ts-node": "dist/bin.js", 155 | "ts-node-cwd": "dist/bin-cwd.js", 156 | "ts-node-esm": "dist/bin-esm.js", 157 | "ts-node-script": "dist/bin-script.js", 158 | "ts-node-transpile-only": "dist/bin-transpile.js", 159 | "ts-script": "dist/bin-script-deprecated.js" 160 | }, 161 | "peerDependencies": { 162 | "@swc/core": ">=1.2.50", 163 | "@swc/wasm": ">=1.2.50", 164 | "@types/node": "*", 165 | "typescript": ">=2.7" 166 | }, 167 | "peerDependenciesMeta": { 168 | "@swc/core": { 169 | "optional": true 170 | }, 171 | "@swc/wasm": { 172 | "optional": true 173 | } 174 | } 175 | }, 176 | "node_modules/typescript": { 177 | "version": "5.5.4", 178 | "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.4.tgz", 179 | "integrity": "sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==", 180 | "dev": true, 181 | "peer": true, 182 | "bin": { 183 | "tsc": "bin/tsc", 184 | "tsserver": "bin/tsserver" 185 | }, 186 | "engines": { 187 | "node": ">=14.17" 188 | } 189 | }, 190 | "node_modules/undici-types": { 191 | "version": "6.19.8", 192 | "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", 193 | "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", 194 | "dev": true 195 | }, 196 | "node_modules/v8-compile-cache-lib": { 197 | "version": "3.0.1", 198 | "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", 199 | "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", 200 | "dev": true 201 | }, 202 | "node_modules/yn": { 203 | "version": "3.1.1", 204 | "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", 205 | "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", 206 | "dev": true, 207 | "engines": { 208 | "node": ">=6" 209 | } 210 | } 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aidebug", 3 | "version": "1.0.3", 4 | "repository": { 5 | "type": "git", 6 | "url": "https://github.com/samarthaggarwal/always-on-debugger.git" 7 | }, 8 | "homepage": "https://github.com/samarthaggarwal/always-on-debugger", 9 | "bin": { 10 | "aod": "./wrapper.js", 11 | "debug": "./wrapper.js" 12 | }, 13 | "scripts": { 14 | "start": "node wrapper.js" 15 | }, 16 | "dependencies": { 17 | "aidebug": "^1.0.0" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tiktoken 2 | openai 3 | anthropic 4 | python-dotenv 5 | -------------------------------------------------------------------------------- /terminal.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | import os 4 | from debugger import Debugger 5 | 6 | class Command: 7 | def __init__(self, args: list[str]): 8 | if len(args) == 0: 9 | print("format: python terminal.py ") 10 | exit(1) 11 | self.raw_command = " ".join(args) 12 | self.interpreter = args[0] 13 | self.filename = args[1] if len(args) > 1 else None 14 | self.args = args[2:] if len(args) > 2 else [] 15 | self.debugger = Debugger() 16 | 17 | def run(self): 18 | try: 19 | result = subprocess.run(self.raw_command, shell=True, check=True, text=True, capture_output=True) 20 | print(result.stdout) 21 | except subprocess.CalledProcessError as e: 22 | print(e.stderr) 23 | print("-" * 100 + "\n" + "Debugging..." + "\n" + "-" * 100) 24 | 25 | # Detect file path 26 | filepath = self.debugger.detect_file_path(self.raw_command, e.stderr) 27 | if filepath is not None and not os.path.exists(filepath): 28 | filepath = None 29 | 30 | # Read code snippet 31 | code_snippet = None 32 | if filepath is not None: 33 | with open(filepath, 'r') as file: 34 | code_snippet = file.read() 35 | response = self.debugger.debug(self.raw_command, e.stderr, code_snippet) 36 | print(response["recommendation"]) 37 | 38 | if __name__ == "__main__": 39 | cmd = Command(sys.argv[1:]) 40 | cmd.run() 41 | -------------------------------------------------------------------------------- /tests/test_debugger.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 4 | from debugger import Debugger 5 | 6 | def test_debugger(): 7 | debugger = Debugger() 8 | result = debugger.debug("Error: File not found", "print('Hello, World!')") 9 | print(result) 10 | for key, value in result.items(): 11 | print(f"{key}: {value}") 12 | print("-" * 100) 13 | 14 | 15 | if __name__ == "__main__": 16 | test_debugger() 17 | 18 | """ 19 | Sample output: 20 | 21 | 22 | Given the error message "File not found" and the provided code snippet, there seems to be a mismatch between the error and the code. The code snippet is a simple print statement that doesn't involve any file operations. This suggests that the error might be occurring in a different part of the code or during the execution process. 23 | 24 | Possible hypotheses: 25 | 1. The error is unrelated to the provided code snippet and occurs elsewhere in the program. 26 | 2. There might be an issue with the file containing this code (e.g., it's not saved or in the wrong location). 27 | 3. The Python interpreter might be trying to execute a different file that doesn't exist. 28 | 4. There could be an issue with the Python environment or the way the script is being run. 29 | 30 | To validate these hypotheses, we need to gather more information about the execution context and the full code. 31 | 32 | 33 | 34 | To debug this issue, please follow these steps: 35 | 36 | 1. Verify the file location: 37 | - Ensure that the Python file containing your code is saved in the correct directory. 38 | - Check if the file has the correct .py extension. 39 | 40 | 2. Run the script directly: 41 | - Open a terminal or command prompt. 42 | - Navigate to the directory containing your Python file. 43 | - Run the script using the command: python your_file_name.py 44 | - Observe if the same error occurs or if "Hello, World!" is printed. 45 | 46 | 3. Check for other files: 47 | - Look for any other Python files in the same directory that might be causing the error. 48 | - Ensure you're running the intended file. 49 | 50 | 4. Verify Python installation: 51 | - Open a Python interactive shell by typing 'python' in the terminal. 52 | - If it opens successfully, try running the print statement directly in the shell. 53 | 54 | 5. Examine the full code: 55 | - If the error persists, please provide the complete code of your Python file, not just the single print statement. 56 | 57 | 6. Check execution method: 58 | - If you're running the script through an IDE or another tool, try running it directly from the command line as suggested in step 2. 59 | 60 | Please report back with the results of these steps and any new error messages you encounter. 61 | 62 | 63 | 64 | The provided information is insufficient to definitively solve the problem. The error message suggests a file-related issue, but the code snippet doesn't involve any file operations. This discrepancy indicates that crucial information is missing. 65 | 66 | To provide a more accurate and helpful solution, we would need: 67 | 1. The complete code of the Python file, not just the single print statement. 68 | 2. Information about how the script is being executed (e.g., from command line, IDE, etc.). 69 | 3. Details about the Python environment (version, how it's installed, etc.). 70 | 4. The exact command or method used to run the script that produces this error. 71 | 5. Any additional error messages or stack traces that might be available. 72 | 73 | With this additional information, we could provide a more targeted and effective solution to the problem. 74 | {'thinking': 'Given the error message "File not found" and the provided code snippet, there seems to be a mismatch between the error and the code. The code snippet is a simple print statement that doesn\'t involve any file operations. This suggests that the error might be occurring in a different part of the code or during the execution process.\n\nPossible hypotheses:\n1. The error is unrelated to the provided code snippet and occurs elsewhere in the program.\n2. There might be an issue with the file containing this code (e.g., it\'s not saved or in the wrong location).\n3. The Python interpreter might be trying to execute a different file that doesn\'t exist.\n4. There could be an issue with the Python environment or the way the script is being run.\n\nTo validate these hypotheses, we need to gather more information about the execution context and the full code.', 'recommendation': 'To debug this issue, please follow these steps:\n\n1. Verify the file location:\n - Ensure that the Python file containing your code is saved in the correct directory.\n - Check if the file has the correct .py extension.\n\n2. Run the script directly:\n - Open a terminal or command prompt.\n - Navigate to the directory containing your Python file.\n - Run the script using the command: python your_file_name.py\n - Observe if the same error occurs or if "Hello, World!" is printed.\n\n3. Check for other files:\n - Look for any other Python files in the same directory that might be causing the error.\n - Ensure you\'re running the intended file.\n\n4. Verify Python installation:\n - Open a Python interactive shell by typing \'python\' in the terminal.\n - If it opens successfully, try running the print statement directly in the shell.\n\n5. Examine the full code:\n - If the error persists, please provide the complete code of your Python file, not just the single print statement.\n\n6. Check execution method:\n - If you\'re running the script through an IDE or another tool, try running it directly from the command line as suggested in step 2.\n\nPlease report back with the results of these steps and any new error messages you encounter.', 'reflection': "The provided information is insufficient to definitively solve the problem. The error message suggests a file-related issue, but the code snippet doesn't involve any file operations. This discrepancy indicates that crucial information is missing.\n\nTo provide a more accurate and helpful solution, we would need:\n1. The complete code of the Python file, not just the single print statement.\n2. Information about how the script is being executed (e.g., from command line, IDE, etc.).\n3. Details about the Python environment (version, how it's installed, etc.).\n4. The exact command or method used to run the script that produces this error.\n5. Any additional error messages or stack traces that might be available.\n\nWith this additional information, we could provide a more targeted and effective solution to the problem."} 75 | thinking: Given the error message "File not found" and the provided code snippet, there seems to be a mismatch between the error and the code. The code snippet is a simple print statement that doesn't involve any file operations. This suggests that the error might be occurring in a different part of the code or during the execution process. 76 | 77 | Possible hypotheses: 78 | 1. The error is unrelated to the provided code snippet and occurs elsewhere in the program. 79 | 2. There might be an issue with the file containing this code (e.g., it's not saved or in the wrong location). 80 | 3. The Python interpreter might be trying to execute a different file that doesn't exist. 81 | 4. There could be an issue with the Python environment or the way the script is being run. 82 | 83 | To validate these hypotheses, we need to gather more information about the execution context and the full code. 84 | ---------------------------------------------------------------------------------------------------- 85 | recommendation: To debug this issue, please follow these steps: 86 | 87 | 1. Verify the file location: 88 | - Ensure that the Python file containing your code is saved in the correct directory. 89 | - Check if the file has the correct .py extension. 90 | 91 | 2. Run the script directly: 92 | - Open a terminal or command prompt. 93 | - Navigate to the directory containing your Python file. 94 | - Run the script using the command: python your_file_name.py 95 | - Observe if the same error occurs or if "Hello, World!" is printed. 96 | 97 | 3. Check for other files: 98 | - Look for any other Python files in the same directory that might be causing the error. 99 | - Ensure you're running the intended file. 100 | 101 | 4. Verify Python installation: 102 | - Open a Python interactive shell by typing 'python' in the terminal. 103 | - If it opens successfully, try running the print statement directly in the shell. 104 | 105 | 5. Examine the full code: 106 | - If the error persists, please provide the complete code of your Python file, not just the single print statement. 107 | 108 | 6. Check execution method: 109 | - If you're running the script through an IDE or another tool, try running it directly from the command line as suggested in step 2. 110 | 111 | Please report back with the results of these steps and any new error messages you encounter. 112 | ---------------------------------------------------------------------------------------------------- 113 | reflection: The provided information is insufficient to definitively solve the problem. The error message suggests a file-related issue, but the code snippet doesn't involve any file operations. This discrepancy indicates that crucial information is missing. 114 | 115 | To provide a more accurate and helpful solution, we would need: 116 | 1. The complete code of the Python file, not just the single print statement. 117 | 2. Information about how the script is being executed (e.g., from command line, IDE, etc.). 118 | 3. Details about the Python environment (version, how it's installed, etc.). 119 | 4. The exact command or method used to run the script that produces this error. 120 | 5. Any additional error messages or stack traces that might be available. 121 | 122 | With this additional information, we could provide a more targeted and effective solution to the problem. 123 | ---------------------------------------------------------------------------------------------------- 124 | """ -------------------------------------------------------------------------------- /wrapper.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const { argv } = require('node:process'); 4 | const { exec, execSync, spawn, spawnSync } = require('child_process'); 5 | const { join } = require('path'); 6 | 7 | // npm run start -- "py -3.8 -m terminal" 8 | // npm pack 9 | // npm i -g aod-0.1.tgz 10 | // $env:ANTHROPIC_API_KEY = "" 11 | // npm publish 12 | 13 | async function run() { 14 | const [, , ...args] = argv; 15 | const command = args.join(' '); 16 | 17 | const terminalScript = join(__dirname, 'terminal.py'); 18 | 19 | // const terminalCommand = 20 | // process.platform === 'win32' ? `py -3.11 -m terminal "${command}"` : `python terminal "${command}"`; 21 | 22 | const terminalCommand = `python ${terminalScript} "${command}"`; 23 | 24 | try { 25 | const child = execSync(terminalCommand, { stdio: 'inherit' }); 26 | } catch (error) { 27 | console.log('Error happened', error); 28 | } 29 | } 30 | 31 | run(); 32 | --------------------------------------------------------------------------------