├── .DS_Store ├── .env ├── code_reader.py ├── data ├── readme.pdf └── test.py ├── main.py ├── output └── create_item_script.py ├── prompts.py └── requirements.txt /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/techwithtim/AI-Agent-Code-Generator/d9810bef7d9a199529118b0c6dc57ec7bc94fb24/.DS_Store -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | LLAMA_CLOUD_API_KEY="" -------------------------------------------------------------------------------- /code_reader.py: -------------------------------------------------------------------------------- 1 | from llama_index.core.tools import FunctionTool 2 | import os 3 | 4 | 5 | def code_reader_func(file_name): 6 | path = os.path.join("data", file_name) 7 | try: 8 | with open(path, "r") as f: 9 | content = f.read() 10 | return {"file_content": content} 11 | except Exception as e: 12 | return {"error": str(e)} 13 | 14 | 15 | code_reader = FunctionTool.from_defaults( 16 | fn=code_reader_func, 17 | name="code_reader", 18 | description="""this tool can read the contents of code files and return 19 | their results. Use this when you need to read the contents of a file""", 20 | ) 21 | -------------------------------------------------------------------------------- /data/readme.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/techwithtim/AI-Agent-Code-Generator/d9810bef7d9a199529118b0c6dc57ec7bc94fb24/data/readme.pdf -------------------------------------------------------------------------------- /data/test.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, jsonify 2 | 3 | app = Flask(__name__) 4 | 5 | # In-memory "database" for simplicity 6 | items = [] 7 | 8 | 9 | # Create 10 | @app.route("/items", methods=["POST"]) 11 | def create_item(): 12 | data = request.get_json() 13 | items.append(data) 14 | return jsonify(data), 201 15 | 16 | 17 | # Read all 18 | @app.route("/items", methods=["GET"]) 19 | def read_items(): 20 | return jsonify(items) 21 | 22 | 23 | # Read one 24 | @app.route("/items/", methods=["GET"]) 25 | def read_item(item_id): 26 | if item_id < 0 or item_id >= len(items): 27 | return "Item not found.", 404 28 | return jsonify(items[item_id]) 29 | 30 | 31 | # Update 32 | @app.route("/items/", methods=["PUT"]) 33 | def update_item(item_id): 34 | if item_id < 0 or item_id >= len(items): 35 | return "Item not found.", 404 36 | data = request.get_json() 37 | items[item_id] = data 38 | return jsonify(data) 39 | 40 | 41 | # Delete 42 | @app.route("/items/", methods=["DELETE"]) 43 | def delete_item(item_id): 44 | if item_id < 0 or item_id >= len(items): 45 | return "Item not found.", 404 46 | del items[item_id] 47 | return "", 204 48 | 49 | 50 | if __name__ == "__main__": 51 | app.run(debug=True) 52 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from llama_index.llms.ollama import Ollama 2 | from llama_parse import LlamaParse 3 | from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, PromptTemplate 4 | from llama_index.core.embeddings import resolve_embed_model 5 | from llama_index.core.tools import QueryEngineTool, ToolMetadata 6 | from llama_index.core.agent import ReActAgent 7 | from pydantic import BaseModel 8 | from llama_index.core.output_parsers import PydanticOutputParser 9 | from llama_index.core.query_pipeline import QueryPipeline 10 | from prompts import context, code_parser_template 11 | from code_reader import code_reader 12 | from dotenv import load_dotenv 13 | import os 14 | import ast 15 | 16 | load_dotenv() 17 | 18 | llm = Ollama(model="mistral", request_timeout=30.0) 19 | 20 | parser = LlamaParse(result_type="markdown") 21 | 22 | file_extractor = {".pdf": parser} 23 | documents = SimpleDirectoryReader("./data", file_extractor=file_extractor).load_data() 24 | 25 | embed_model = resolve_embed_model("local:BAAI/bge-m3") 26 | vector_index = VectorStoreIndex.from_documents(documents, embed_model=embed_model) 27 | query_engine = vector_index.as_query_engine(llm=llm) 28 | 29 | tools = [ 30 | QueryEngineTool( 31 | query_engine=query_engine, 32 | metadata=ToolMetadata( 33 | name="api_documentation", 34 | description="this gives documentation about code for an API. Use this for reading docs for the API", 35 | ), 36 | ), 37 | code_reader, 38 | ] 39 | 40 | code_llm = Ollama(model="codellama") 41 | agent = ReActAgent.from_tools(tools, llm=code_llm, verbose=True, context=context) 42 | 43 | 44 | class CodeOutput(BaseModel): 45 | code: str 46 | description: str 47 | filename: str 48 | 49 | 50 | parser = PydanticOutputParser(CodeOutput) 51 | json_prompt_str = parser.format(code_parser_template) 52 | json_prompt_tmpl = PromptTemplate(json_prompt_str) 53 | output_pipeline = QueryPipeline(chain=[json_prompt_tmpl, llm]) 54 | 55 | while (prompt := input("Enter a prompt (q to quit): ")) != "q": 56 | retries = 0 57 | 58 | while retries < 3: 59 | try: 60 | result = agent.query(prompt) 61 | next_result = output_pipeline.run(response=result) 62 | cleaned_json = ast.literal_eval(str(next_result).replace("assistant:", "")) 63 | break 64 | except Exception as e: 65 | retries += 1 66 | print(f"Error occured, retry #{retries}:", e) 67 | 68 | if retries >= 3: 69 | print("Unable to process request, try again...") 70 | continue 71 | 72 | print("Code generated") 73 | print(cleaned_json["code"]) 74 | print("\n\nDesciption:", cleaned_json["description"]) 75 | 76 | filename = cleaned_json["filename"] 77 | 78 | try: 79 | with open(os.path.join("output", filename), "w") as f: 80 | f.write(cleaned_json["code"]) 81 | print("Saved file", filename) 82 | except: 83 | print("Error saving file...") 84 | -------------------------------------------------------------------------------- /output/create_item_script.py: -------------------------------------------------------------------------------- 1 | ```python 2 | import requests 3 | 4 | # Replace X with your access token 5 | access_token = 'X' 6 | 7 | # Read the contents of test.py file 8 | with open('test.py') as f: 9 | data = f.read() 10 | 11 | # Create a new item in the "items" collection 12 | response = requests.post("https://api.example.com/items", json={}, 13 | headers={'Authorization': 'Bearer {}'.format(access_token)}, 14 | data=data) 15 | 16 | # Check if the operation was successful 17 | if response.status_code == 201: 18 | print('Item created successfully') 19 | else: 20 | print('Error creating item:', response.json()) 21 | ` -------------------------------------------------------------------------------- /prompts.py: -------------------------------------------------------------------------------- 1 | context = """Purpose: The primary role of this agent is to assist users by analyzing code. It should 2 | be able to generate code and answer questions about code provided. """ 3 | 4 | code_parser_template = """Parse the response from a previous LLM into a description and a string of valid code, 5 | also come up with a valid filename this could be saved as that doesnt contain special characters. 6 | Here is the response: {response}. You should parse this in the following JSON Format: """ 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate==0.28.0 2 | aiohttp==3.9.3 3 | aiosignal==1.3.1 4 | annotated-types==0.6.0 5 | anyio==4.3.0 6 | attrs==23.2.0 7 | beautifulsoup4==4.12.3 8 | bs4==0.0.2 9 | certifi==2024.2.2 10 | charset-normalizer==3.3.2 11 | click==8.1.7 12 | dataclasses-json==0.6.4 13 | Deprecated==1.2.14 14 | dirtyjson==1.0.8 15 | diskcache==5.6.3 16 | distro==1.9.0 17 | fastapi==0.110.1 18 | filelock==3.13.3 19 | frozenlist==1.4.1 20 | fsspec==2024.3.1 21 | greenlet==3.0.3 22 | guidance==0.1.13 23 | h11==0.14.0 24 | html2text==2024.2.26 25 | httpcore==1.0.5 26 | httpx==0.27.0 27 | huggingface-hub==0.20.3 28 | idna==3.6 29 | interegular==0.3.3 30 | Jinja2==3.1.3 31 | joblib==1.3.2 32 | llama-hub==0.0.79.post1 33 | llama-index==0.10.25 34 | llama-index-agent-openai==0.2.1 35 | llama-index-cli==0.1.11 36 | llama-index-core==0.10.25.post2 37 | llama-index-embeddings-huggingface==0.2.0 38 | llama-index-embeddings-openai==0.1.7 39 | llama-index-indices-managed-llama-cloud==0.1.5 40 | llama-index-legacy==0.9.48 41 | llama-index-llms-huggingface==0.1.4 42 | llama-index-llms-llama-cpp==0.1.3 43 | llama-index-llms-ollama==0.1.2 44 | llama-index-llms-openai==0.1.13 45 | llama-index-multi-modal-llms-openai==0.1.4 46 | llama-index-program-guidance==0.1.2 47 | llama-index-program-lmformatenforcer==0.1.2 48 | llama-index-program-openai==0.1.5 49 | llama-index-question-gen-openai==0.1.3 50 | llama-index-readers-file==0.1.12 51 | llama-index-readers-llama-parse==0.1.4 52 | llama-parse==0.4.0 53 | llama_cpp_python==0.2.58 54 | llamaindex-py-client==0.1.15 55 | lm-format-enforcer==0.9.3 56 | MarkupSafe==2.1.5 57 | marshmallow==3.21.1 58 | minijinja==1.0.16 59 | mpmath==1.3.0 60 | multidict==6.0.5 61 | mypy-extensions==1.0.0 62 | nest-asyncio==1.6.0 63 | networkx==3.2.1 64 | nltk==3.8.1 65 | numpy==1.26.4 66 | openai==1.14.3 67 | ordered-set==4.1.0 68 | packaging==24.0 69 | pandas==2.2.1 70 | pillow==10.2.0 71 | platformdirs==4.2.0 72 | protobuf==5.26.1 73 | psutil==5.9.8 74 | pyaml==23.12.0 75 | pydantic==2.6.4 76 | pydantic_core==2.16.3 77 | pydot==2.0.0 78 | pyformlang==1.0.9 79 | PyMuPDF==1.24.0 80 | PyMuPDFb==1.24.0 81 | pyparsing==3.1.2 82 | pypdf==4.1.0 83 | python-dateutil==2.9.0.post0 84 | python-dotenv==1.0.1 85 | pytz==2024.1 86 | PyYAML==6.0.1 87 | regex==2023.12.25 88 | requests==2.31.0 89 | retrying==1.3.4 90 | safetensors==0.4.2 91 | scikit-learn==1.4.1.post1 92 | scipy==1.12.0 93 | sentence-transformers==2.6.1 94 | six==1.16.0 95 | sniffio==1.3.1 96 | soupsieve==2.5 97 | SQLAlchemy==2.0.29 98 | starlette==0.37.2 99 | striprtf==0.0.26 100 | sympy==1.12 101 | tenacity==8.2.3 102 | threadpoolctl==3.4.0 103 | tiktoken==0.6.0 104 | tokenizers==0.15.2 105 | torch==2.2.2 106 | tqdm==4.66.2 107 | transformers==4.39.2 108 | typing-inspect==0.9.0 109 | typing_extensions==4.10.0 110 | tzdata==2024.1 111 | urllib3==2.2.1 112 | uvicorn==0.29.0 113 | wrapt==1.16.0 114 | yarl==1.9.4 115 | --------------------------------------------------------------------------------