├── requirements.txt
├── 1_auto_coder_single_iteration.py
├── README.md
└── 2_auto_coder_iterative.py
/requirements.txt:
--------------------------------------------------------------------------------
1 | openai
2 | termcolor
--------------------------------------------------------------------------------
/1_auto_coder_single_iteration.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 | from termcolor import termcolor
4 |
5 | # Constants
6 | USER_PROMPT = "a pygame tower defense game with only in game assets" # Default empty string for user input
7 | SYSTEM_PROMPT = """You are a Python code generator. Return all code in between and tags.
8 | Make sure the code is well-documented, follows best practices, and is ready to use. You must return the code in between and tags. do not use ```python or ```"""
9 | MODEL = "deepseek/deepseek-chat"
10 | OUTPUT_DIR = "generated_code"
11 |
12 | try:
13 | # Initialize OpenAI client
14 | print(termcolor.colored("Initializing OpenAI client...", "cyan"))
15 | client = OpenAI(
16 | base_url="https://openrouter.ai/api/v1",
17 | api_key=os.getenv("OPENROUTER_API_KEY")
18 | )
19 |
20 | # Create output directory if it doesn't exist
21 | if not os.path.exists(OUTPUT_DIR):
22 | os.makedirs(OUTPUT_DIR)
23 | print(termcolor.colored(f"Created output directory: {OUTPUT_DIR}", "green"))
24 |
25 | def extract_code(response_text):
26 | """Extract code from between and tags."""
27 | try:
28 | start_tag = ""
29 | end_tag = ""
30 | start_index = response_text.find(start_tag) + len(start_tag)
31 | end_index = response_text.find(end_tag)
32 |
33 | if start_index == -1 or end_index == -1:
34 | raise ValueError("Code tags not found in response")
35 |
36 | return response_text[start_index:end_index].strip()
37 | except Exception as e:
38 | print(termcolor.colored(f"Error extracting code: {str(e)}", "red"))
39 | return None
40 |
41 | def generate_code(prompt):
42 | """Generate code using the DeepSeek model with streaming response."""
43 | try:
44 | print(termcolor.colored("Generating code...", "yellow"))
45 | print(termcolor.colored(f"Using prompt: {prompt}", "cyan"))
46 | stream = client.chat.completions.create(
47 | model=MODEL,
48 | messages=[
49 | {"role": "system", "content": SYSTEM_PROMPT},
50 | {"role": "user", "content": prompt}
51 | ],
52 | stream=True
53 | )
54 |
55 | # Collect the full response while streaming
56 | full_response = ""
57 | print(termcolor.colored("\nStreaming response:", "cyan"))
58 |
59 | for chunk in stream:
60 | if chunk.choices[0].delta.content is not None:
61 | content = chunk.choices[0].delta.content
62 | print(content, end="", flush=True)
63 | full_response += content
64 |
65 | print("\n") # New line after streaming
66 | code = extract_code(full_response)
67 |
68 | if code:
69 | # Save the code to a file
70 | filename = f"{OUTPUT_DIR}/generated_code_{len(os.listdir(OUTPUT_DIR)) + 1}.py"
71 | with open(filename, "w", encoding="utf-8") as f:
72 | f.write(code)
73 | print(termcolor.colored(f"Code successfully generated and saved to: {filename}", "green"))
74 | return code
75 | else:
76 | print(termcolor.colored("No code was found in the response", "red"))
77 | return None
78 |
79 | except Exception as e:
80 | print(termcolor.colored(f"Error generating code: {str(e)}", "red"))
81 | return None
82 |
83 | if __name__ == "__main__":
84 | print(termcolor.colored(f"Using prompt: {USER_PROMPT}", "cyan"))
85 | generated_code = generate_code(USER_PROMPT)
86 |
87 | if generated_code:
88 | print(termcolor.colored("\nExtracted Code:", "green"))
89 | print(generated_code)
90 |
91 | except Exception as e:
92 | print(termcolor.colored(f"Initialization error: {str(e)}", "red"))
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Auto Coder Project
2 |
3 | This project implements an AI-powered code generation system using the DeepSeek v3 model through OpenRouter's API. It consists of two implementations: a single-iteration version and an iterative version with code improvement capabilities.
4 |
5 | ## ⚠️ IMPORTANT SECURITY WARNING
6 |
7 | **This project executes AI-generated code on your machine. This can be potentially dangerous!**
8 |
9 | - AI-generated code could contain harmful operations
10 | - You can consider adding a code review step before code execution
11 | - Use in an isolated environment (virtual machine) for maximum safety possibly
12 |
13 | **Use at your own risk. The authors are not responsible for any damage caused by AI-generated code.**
14 |
15 | ## 🤖 Model Flexibility
16 |
17 | While this project defaults to using **DeepSeek v3** through OpenRouter, it's designed to work with **any model available on OpenRouter**! You can easily switch models by changing the `MODEL` constant in either script:
18 |
19 | ```python
20 | MODEL = "deepseek/deepseek-chat" # Default
21 | ```
22 |
23 | - Any other model supported by OpenRouter
24 |
25 | The code generation, error fixing, and improvement capabilities will adapt to whatever model you choose!
26 |
27 | ## Setup
28 |
29 | 1. Install dependencies:
30 |
31 | ```bash
32 | pip install -r requirements.txt
33 | ```
34 |
35 | 2. Set environment variable:
36 |
37 | ```bash
38 | export OPENROUTER_API_KEY="your_api_key_here" # Linux/Mac
39 | set OPENROUTER_API_KEY="your_api_key_here" # Windows
40 | ```
41 |
42 | ## ❤️ Support & Get 400+ AI Projects
43 |
44 | This is one of 400+ fascinating projects in my collection! [Support me on Patreon](https://www.patreon.com/c/echohive42/membership) to get:
45 |
46 | - 🎯 Access to 400+ AI projects (and growing daily!)
47 | - Including advanced projects like [2 Agent Real-time voice template with turn taking](https://www.patreon.com/posts/2-agent-real-you-118330397)
48 | - 📥 Full source code & detailed explanations
49 | - 📚 1000x Cursor Course
50 | - 🎓 Live coding sessions & AMAs
51 | - 💬 1-on-1 consultations (higher tiers)
52 | - 🎁 Exclusive discounts on AI tools & platforms (up to $180 value)
53 |
54 | ## 1. Single Iteration Auto Coder (`1_auto_coder_single_iteration.py`)
55 |
56 | A basic implementation that generates code once based on a user prompt.
57 |
58 | ### Features:
59 |
60 | - Takes user input as a constant variable (`USER_PROMPT`)
61 | - Uses DeepSeek model for code generation
62 | - Streams model responses in real-time
63 | - Extracts code from between `` and `` tags
64 | - Saves generated code to files in the `generated_code` directory
65 |
66 | ### Usage:
67 |
68 | 1. Set your desired prompt in the `USER_PROMPT` constant
69 | 2. Run the script:
70 |
71 | ```bash
72 | python 1_auto_coder_single_iteration.py
73 | ```
74 |
75 | ## 2. Iterative Auto Coder (`2_auto_coder_iterative.py`)
76 |
77 | An advanced implementation that iteratively improves generated code through execution feedback.
78 |
79 | ### Features:
80 |
81 | - All features from the single iteration version, plus:
82 | - Executes generated code with a 5-second timeout
83 | - Provides execution feedback to the model
84 | - Uses separate API calls for:
85 | - Initial code generation
86 | - Error fixing
87 | - Code improvements
88 | - Saves each iteration as numbered files (e.g., `generated_code_1.py`, `generated_code_2.py`)
89 | - Maximum 5 iterations for improvements
90 | - Handles timeouts gracefully (not treated as errors)
91 |
92 | ### Execution Flow:
93 |
94 | 1. Generate initial code
95 | 2. Execute the code
96 | 3. Based on execution result:
97 | - If timeout: Attempt to improve efficiency
98 | - If errors: Make fresh API call to fix errors
99 | - If success: Make fresh API call to add features
100 | 4. Save new iteration
101 | 5. Repeat until:
102 | - Max iterations reached
103 | - No more improvements needed
104 | - Error can't be fixed
105 |
106 | ### Usage:
107 |
108 | 1. Set your desired prompt in the `USER_PROMPT` constant
109 | 2. Run the script:
110 |
111 | ```bash
112 | python 2_auto_coder_iterative.py
113 | ```
114 |
115 | ## System Prompts
116 |
117 | The iterative version uses three distinct system prompts:
118 |
119 | 1. `SYSTEM_PROMPT_GENERATE`: For initial code generation
120 | 2. `SYSTEM_PROMPT_FIX`: For fixing errors (fresh context)
121 | 3. `SYSTEM_PROMPT_IMPROVE`: For adding features (fresh context)
122 |
123 | ## Output Directory Structure
124 |
125 | ```
126 | generated_code/
127 | ├── generated_code_1.py # Initial generation
128 | ├── generated_code_2.py # First improvement/fix
129 | ├── generated_code_3.py # Second improvement/fix
130 | └── ...
131 | ```
132 |
133 | ## Error Handling
134 |
135 | Both versions include comprehensive error handling:
136 |
137 | - API call errors
138 | - Code extraction errors
139 | - File saving errors
140 | - Code execution errors (iterative version)
141 | - Timeout handling (iterative version)
142 |
143 | ## Terminal Output
144 |
145 | Both versions provide colored terminal output using `termcolor`:
146 |
147 | - Cyan: Informational messages
148 | - Yellow: Processing status
149 | - Green: Success messages
150 | - Red: Error messages
151 |
152 | ## Limitations
153 |
154 | 1. 5-second execution timeout
155 | 2. Maximum 5 improvement iterations
156 | 3. No persistent context between API calls
157 | 4. Code must be complete and self-contained
158 | 5. Generated code must be valid Python
159 |
160 | ## Dependencies
161 |
162 | - `openai`: For API communication
163 | - `termcolor`: For colored terminal output
164 | - Python standard library modules:
165 | - `os`
166 | - `subprocess` (iterative version)
167 | - `time` (iterative version)
168 |
--------------------------------------------------------------------------------
/2_auto_coder_iterative.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import time
4 | from openai import OpenAI
5 | from termcolor import colored
6 |
7 | # Constants
8 | USER_PROMPT = "a pygame tower defense game with only in game assets" # Default empty string for user input
9 | SYSTEM_PROMPT_GENERATE = """You are a Python code generator. Return all code in between and tags.
10 | Make sure the code is well-documented, follows best practices, and is ready to use. You must return the code in between and tags. do not use ```python or ```"""
11 | SYSTEM_PROMPT_FIX = """You are a Python code error fixer. Analyze the error output and fix the code. Return the fixed code between and tags.
12 | Do not refer to previous conversations or context. Focus only on fixing the current error."""
13 | SYSTEM_PROMPT_IMPROVE = """You are a Python code improver. Analyze the working code and add more features or improvements. Return the improved code between and tags.
14 | Do not refer to previous conversations or context. Focus only on improving the current code with new features."""
15 | MODEL = "deepseek/deepseek-chat"
16 | OUTPUT_DIR = "generated_code"
17 | MAX_ITERATIONS = 5 # Maximum number of improvement iterations
18 | EXECUTION_TIMEOUT = 5 # Maximum execution time in seconds
19 |
20 | try:
21 | # Initialize OpenAI client
22 | print(colored("Initializing OpenAI client...", "cyan"))
23 | client = OpenAI(
24 | base_url="https://openrouter.ai/api/v1",
25 | api_key=os.getenv("OPENROUTER_API_KEY")
26 | )
27 |
28 | # Create output directory if it doesn't exist
29 | if not os.path.exists(OUTPUT_DIR):
30 | os.makedirs(OUTPUT_DIR)
31 | print(colored(f"Created output directory: {OUTPUT_DIR}", "green"))
32 |
33 | def extract_code(response_text):
34 | """Extract code from between and tags."""
35 | try:
36 | start_tag = ""
37 | end_tag = ""
38 | start_index = response_text.find(start_tag) + len(start_tag)
39 | end_index = response_text.find(end_tag)
40 |
41 | if start_index == -1 or end_index == -1:
42 | raise ValueError("Code tags not found in response")
43 |
44 | return response_text[start_index:end_index].strip()
45 | except Exception as e:
46 | print(colored(f"Error extracting code: {str(e)}", "red"))
47 | return None
48 |
49 | def execute_code(filename):
50 | """Execute the generated code with timeout and capture output."""
51 | try:
52 | print(colored(f"\nExecuting {filename}...", "yellow"))
53 | process = subprocess.Popen(
54 | ["python", filename],
55 | stdout=subprocess.PIPE,
56 | stderr=subprocess.PIPE,
57 | text=True
58 | )
59 |
60 | try:
61 | stdout, stderr = process.communicate(timeout=EXECUTION_TIMEOUT)
62 |
63 | # Print raw output first
64 | print(colored("\nRaw subprocess output:", "cyan"))
65 | print("=" * 50)
66 | if stdout:
67 | print(stdout, end="")
68 | if stderr:
69 | print(stderr, end="")
70 | print("=" * 50)
71 |
72 | # Format output for model consumption
73 | execution_output = "=" * 50 + "\n"
74 | if stdout.strip():
75 | execution_output += colored("STDOUT:", "cyan") + "\n" + stdout + "\n"
76 | if stderr.strip():
77 | execution_output += colored("STDERR:", "red") + "\n" + stderr + "\n"
78 | execution_output += "=" * 50
79 |
80 | # Only consider actual errors, not timeouts
81 | has_errors = bool(stderr.strip()) or "error" in stdout.lower() or "exception" in stdout.lower()
82 | return execution_output, has_errors
83 |
84 | except subprocess.TimeoutExpired:
85 | process.kill()
86 | msg = f"Code execution timed out after {EXECUTION_TIMEOUT} seconds (this is not considered an error)"
87 | print(colored(msg, "yellow"))
88 | # Return timeout message but has_errors=False since timeouts are not errors
89 | return msg, False
90 |
91 | except Exception as e:
92 | error_msg = f"Error executing code: {str(e)}"
93 | print(colored(error_msg, "red"))
94 | return error_msg, True
95 |
96 | def generate_initial_code(prompt):
97 | """Generate initial code using the DeepSeek model."""
98 | try:
99 | print(colored("\nGenerating initial code...", "yellow"))
100 | stream = client.chat.completions.create(
101 | model=MODEL,
102 | messages=[
103 | {"role": "system", "content": SYSTEM_PROMPT_GENERATE},
104 | {"role": "user", "content": prompt}
105 | ],
106 | stream=True
107 | )
108 |
109 | full_response = ""
110 | print(colored("\nStreaming response:", "cyan"))
111 |
112 | for chunk in stream:
113 | if chunk.choices[0].delta.content is not None:
114 | content = chunk.choices[0].delta.content
115 | print(content, end="", flush=True)
116 | full_response += content
117 |
118 | print("\n") # New line after streaming
119 | return extract_code(full_response)
120 |
121 | except Exception as e:
122 | print(colored(f"Error generating initial code: {str(e)}", "red"))
123 | return None
124 |
125 | def fix_code(code, error_output):
126 | """Fix code errors using a fresh API call."""
127 | try:
128 | print(colored("\nFixing code errors...", "yellow"))
129 | stream = client.chat.completions.create(
130 | model=MODEL,
131 | messages=[
132 | {"role": "system", "content": SYSTEM_PROMPT_FIX},
133 | {"role": "user", "content": f"Here is the code with errors:\n{code}\n\nError output:\n{error_output}\n\nPlease fix the code."}
134 | ],
135 | stream=True
136 | )
137 |
138 | full_response = ""
139 | print(colored("\nStreaming response:", "cyan"))
140 |
141 | for chunk in stream:
142 | if chunk.choices[0].delta.content is not None:
143 | content = chunk.choices[0].delta.content
144 | print(content, end="", flush=True)
145 | full_response += content
146 |
147 | print("\n") # New line after streaming
148 | return extract_code(full_response)
149 |
150 | except Exception as e:
151 | print(colored(f"Error fixing code: {str(e)}", "red"))
152 | return None
153 |
154 | def improve_code(code):
155 | """Improve working code using a fresh API call."""
156 | try:
157 | print(colored("\nImproving code...", "yellow"))
158 | stream = client.chat.completions.create(
159 | model=MODEL,
160 | messages=[
161 | {"role": "system", "content": SYSTEM_PROMPT_IMPROVE},
162 | {"role": "user", "content": f"Here is the working code to improve:\n{code}\n\nPlease add more features or improvements."}
163 | ],
164 | stream=True
165 | )
166 |
167 | full_response = ""
168 | print(colored("\nStreaming response:", "cyan"))
169 |
170 | for chunk in stream:
171 | if chunk.choices[0].delta.content is not None:
172 | content = chunk.choices[0].delta.content
173 | print(content, end="", flush=True)
174 | full_response += content
175 |
176 | print("\n") # New line after streaming
177 | return extract_code(full_response)
178 |
179 | except Exception as e:
180 | print(colored(f"Error improving code: {str(e)}", "red"))
181 | return None
182 |
183 | def save_code(code, iteration):
184 | """Save code to a file."""
185 | try:
186 | filename = f"{OUTPUT_DIR}/generated_code_{iteration}.py"
187 | with open(filename, "w", encoding="utf-8") as f:
188 | f.write(code)
189 | print(colored(f"Code saved to: {filename}", "green"))
190 | return filename
191 | except Exception as e:
192 | print(colored(f"Error saving code: {str(e)}", "red"))
193 | return None
194 |
195 | def iterative_code_generation(prompt):
196 | """Run the iterative code generation process with separate API calls for fixes and improvements."""
197 | iteration = 1
198 |
199 | # Generate initial code
200 | current_code = generate_initial_code(prompt)
201 | if not current_code:
202 | return
203 |
204 | filename = save_code(current_code, iteration)
205 | if not filename:
206 | return
207 |
208 | while iteration <= MAX_ITERATIONS:
209 | print(colored(f"\nIteration {iteration}/{MAX_ITERATIONS}", "yellow"))
210 |
211 | # Execute current code
212 | execution_output, has_errors = execute_code(filename)
213 |
214 | # If code timed out but didn't have errors, try to improve it
215 | if "timed out" in execution_output.lower():
216 | print(colored("\nCode timed out but no errors found. Attempting to improve efficiency...", "yellow"))
217 | improved_code = improve_code(current_code)
218 |
219 | if improved_code and improved_code != current_code:
220 | iteration += 1
221 | current_code = improved_code
222 | filename = save_code(current_code, iteration)
223 | if not filename:
224 | break
225 | else:
226 | print(colored("\nNo improvements made for timeout.", "yellow"))
227 | break
228 |
229 | # If no errors, try to improve the code
230 | elif not has_errors:
231 | print(colored("\nNo errors found. Attempting to improve code...", "green"))
232 | improved_code = improve_code(current_code)
233 |
234 | if improved_code and improved_code != current_code:
235 | iteration += 1
236 | current_code = improved_code
237 | filename = save_code(current_code, iteration)
238 | if not filename:
239 | break
240 | else:
241 | print(colored("\nNo further improvements needed.", "green"))
242 | break
243 |
244 | # If there are errors, try to fix them
245 | else:
246 | print(colored("\nErrors found. Attempting to fix code...", "yellow"))
247 | fixed_code = fix_code(current_code, execution_output)
248 |
249 | if fixed_code:
250 | iteration += 1
251 | current_code = fixed_code
252 | filename = save_code(current_code, iteration)
253 | if not filename:
254 | break
255 | else:
256 | print(colored("Failed to fix code. Stopping iterations.", "red"))
257 | break
258 |
259 | if iteration > MAX_ITERATIONS:
260 | print(colored(f"\nReached maximum iterations ({MAX_ITERATIONS})", "yellow"))
261 | break
262 |
263 | if __name__ == "__main__":
264 | print(colored(f"Using prompt: {USER_PROMPT}", "cyan"))
265 | iterative_code_generation(USER_PROMPT)
266 |
267 | except Exception as e:
268 | print(colored(f"Initialization error: {str(e)}", "red"))
--------------------------------------------------------------------------------