├── images └── demo-1.png ├── src ├── twoai │ ├── __init__.py │ ├── utils.py │ └── twoai.py └── examples │ └── main.py ├── .gitignore ├── requirements.txt ├── pyproject.toml ├── LICENCE ├── tests └── __init__.py └── README.md /images/demo-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Fus3n/TwoAI/HEAD/images/demo-1.png -------------------------------------------------------------------------------- /src/twoai/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import AgentDetails, Agent, DEFAULT_HOST 2 | from .twoai import TWOAI -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | /src/__pycache__ 3 | /__pycache__ 4 | /src/twoai/__pycache__ 5 | /tests/__pycache__ 6 | /.pdm-build 7 | .pdm-python 8 | /pdm.lock -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | anyio==4.3.0 2 | certifi==2024.2.2 3 | colorama==0.4.6 4 | exceptiongroup==1.2.0 5 | h11==0.14.0 6 | httpcore==1.0.5 7 | httpx==0.27.0 8 | idna==3.6 9 | ollama==0.1.8 10 | sniffio==1.3.1 11 | typing-extensions==4.11.0 12 | -------------------------------------------------------------------------------- /src/twoai/utils.py: -------------------------------------------------------------------------------- 1 | from typing import TypedDict 2 | 3 | DEFAULT_HOST = "http://localhost:11434" 4 | 5 | class Agent(TypedDict): 6 | name: str # the name of the agent 7 | objective: str # what the agent should do e.g. "Debate the chicken or the egg with the other AI" 8 | model: str # optional, model to use for this specific agent, if not provided use default 9 | host: str # optional, client to use, e.g http://localhost:11434, by default all models use same host 10 | 11 | AgentDetails = tuple[Agent, Agent] -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "twoai" 3 | version = "0.1.0" 4 | description = "Default template for PDM package" 5 | authors = [ 6 | {name = "Fusen", email = "fus3ngames@gmail.com"}, 7 | ] 8 | dependencies = [ 9 | "anyio==4.3.0", 10 | "certifi==2024.2.2", 11 | "colorama==0.4.6", 12 | "exceptiongroup==1.2.0", 13 | "h11==0.14.0", 14 | "httpcore==1.0.5", 15 | "httpx==0.27.0", 16 | "idna==3.6", 17 | "ollama==0.1.8", 18 | "sniffio==1.3.1", 19 | "typing-extensions==4.11.0", 20 | ] 21 | requires-python = ">=3.9" 22 | readme = "README.md" 23 | license = {text = "MIT"} 24 | 25 | [build-system] 26 | requires = ["pdm-backend"] 27 | build-backend = "pdm.backend" 28 | 29 | [tool.pdm.scripts] 30 | example = "python src/examples/main.py" 31 | test = "python -m unittest discover" 32 | 33 | [tool.pdm] 34 | distribution = true 35 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright 2024-present Asif Hossain 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /src/examples/main.py: -------------------------------------------------------------------------------- 1 | from twoai import TWOAI, AgentDetails 2 | import sys 3 | 4 | if __name__ == "__main__": 5 | if len(sys.argv) > 1: 6 | BASE_MODEL = sys.argv[1] 7 | else: 8 | print("Usage: python main.py ") 9 | sys.exit(1) 10 | 11 | sys_prompt = """ 12 | You are a very intelligent AI Chatbot, and your name is {current_name}, Now 13 | You will be having a converstaion with Another AI called {other_name}, and it's also same as you. 14 | {current_objective} And repeat "" ONLY if you both established and agreed that you came to the end of the discussion. 15 | """.strip() 16 | agent_details: AgentDetails = ( 17 | { 18 | "name": "Zerkus", 19 | "objective": "Debate against the other AI on what came first, the chicken or the egg." 20 | "and you think the chicken came first.", 21 | }, 22 | { 23 | "name": "Nina", 24 | "objective": "Debate against the other AI on what came first, the chicken or the egg." 25 | "and you think the Egg came first.", 26 | } 27 | ) 28 | twoai = TWOAI( 29 | model=BASE_MODEL, 30 | agent_details=agent_details, 31 | system_prompt=sys_prompt, 32 | 33 | ) 34 | twoai.start_conversation() -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | from twoai import Agent, TWOAI, AgentDetails 2 | 3 | import unittest 4 | 5 | class TestTwoAI(unittest.TestCase): 6 | 7 | def test_create_agent(self): 8 | agent = Agent(name="Zerkus", objective="Debate the chicken or the egg with the other AI") 9 | self.assertEqual(agent['name'], "Zerkus") 10 | self.assertEqual(agent['objective'], "Debate the chicken or the egg with the other AI") 11 | self.assertEqual(agent.get('model', None), None) 12 | self.assertEqual(agent.get('host', None), None) 13 | 14 | def test_twoai(self): 15 | TEST_MODEL = "qwen2:1.5b" # CHOOSE YOUR MODEL 16 | sys_prompt = """You are {current_name}, you will talk to {other_name}. You will {current_objective}""".strip() 17 | agent_details: AgentDetails = ( 18 | { 19 | "name": "Zerkus", 20 | "objective": "Have a normal converstaion", 21 | }, 22 | { 23 | "name": "Nina", 24 | "objective": "Have a normal converstaion", 25 | } 26 | ) 27 | twoai = TWOAI( 28 | model=TEST_MODEL, 29 | agent_details=agent_details, 30 | system_prompt=sys_prompt, 31 | ) 32 | 33 | # test if generated template is matched or not 34 | self.assertEqual(twoai.get_updated_template_str(), "You are Zerkus, you will talk to Nina. You will Have a normal converstaion") 35 | _ = twoai.next_response() 36 | self.assertEqual(twoai.get_updated_template_str(), "You are Nina, you will talk to Zerkus. You will Have a normal converstaion") -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TWOAI 2 | A simple experiment on letting two local LLM have a conversation about anything! 3 | 4 | If you want to discuss more join the discord! 5 | 6 | [![Discord](https://img.shields.io/badge/Discord-R7TcKrQv?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/R7TcKrQv) 7 | 8 | 9 | 10 | **[Another example](#section-1)** 11 | 12 | ## Installation 13 | 14 | First you need [Ollama](https://ollama.com/), install the executable as per the instructions. 15 | After that just make sure ollama is running in background, check your system tray. 16 | then find the model you like [https://ollama.com/library](https://ollama.com/library) 17 | and just do: 18 | ```bash 19 | ollama pull 20 | ``` 21 | The installation commands are usually in the library page. 22 | 23 | ### Usage for TwoAI 24 | Example in `src/example/main.py` 25 | If you are using [pdm](https://github.com/pdm-project/pdm) you can install the package as with `pdm install` 26 | 27 | - Manual 28 | ```bash 29 | git clone https://github.com/Fus3n/TwoAI 30 | cd TwoAI 31 | pip install -r requirements.txt 32 | python src/example/main.py llama3 33 | # with pdm 34 | pdm run example llama3 35 | ``` 36 | 37 | ```py 38 | # Setup 39 | 40 | BASE_MODEL = "llama3" # need to be pulled first if you want to use this, `ollama pull llama3` 41 | 42 | sys_prompt = """ 43 | You are a very intelligent AI Chatbot and your name is {current_name}, Now 44 | you will be having a converstaion with another AI called {other_name}, and its also same as you. 45 | {current_objective} Keep each message short and concise and repeat "" ONLY if you both established and agreed that you came to the end of the discussion. 46 | """.strip() 47 | 48 | agent_details: AgentDetails = ( 49 | { 50 | "name": "Zerkus", 51 | "objective": "Debate against the other AI on what came first, the chicken or the egg." 52 | "and you think the chicken came first." 53 | "model": BASE_MODEL, # this is optional, but here so you can use different models for different agent 54 | "host": "http://localhost:11434" # optional, so you can use multiple host machines for each model 55 | }, 56 | { 57 | "name": "Nina", 58 | "objective": "Debate against the other AI on what came first, the chicken or the egg." 59 | "and you think the Egg came first." 60 | } 61 | ) 62 | 63 | twoai = TWOAI( 64 | model=BASE_MODEL, 65 | agent_details=agent_details, 66 | system_prompt=sys_prompt 67 | exit_word="", 68 | max_exit_words=2, 69 | ) 70 | ``` 71 | Now to get response there are two ways 72 | 73 | ```py 74 | twoai.next_response(show_output=True) 75 | twoai.next_response(show_output=True) 76 | twoai.next_response(show_output=True) 77 | ``` 78 | Result will be something like: 79 | ``` 80 | Zerkus: The chicken came first. 81 | Nina: The egg came first. 82 | Zerkus: According to scientific evidence, chickens came from eggs, but the question is about which came first in a cycle. Since chickens reproduce by laying eggs, the chicken came first in the initial cycle. 83 | ``` 84 | **Or** you can just start the infinite loop and they will continue until they both say the stop word and agree that the converstaion is over. 85 | ```py 86 | twoai.start_conversation() 87 | ``` 88 | 89 | > The above conversation is from the data they are trained on and not from the internet, they cant do that yet. 90 | 91 | > All my tests were on 7B models and i got the best results with `openhermes` 2.5 and `spooknik/kunoichi-dpo-v2-7b:q5_k_s` 92 | 93 | 94 | # Problem solving and more use cases 95 | 96 | I tested with some code snippets with errors and let them figure out where is the issue together 97 | This is a simple test i did, obviously my objective and system prompt could be improved to get even better results or just using a better model as this is still 7B models, i also used 2 seperate models for each agent. 98 | 99 | #### Preview 100 | 101 | https://github.com/Fus3n/TwoAI/assets/52007265/58e71471-1c8c-445f-a3dc-b85b8627ff63 102 | 103 | #### System Prompt 104 | ```py 105 | sys_prompt = """ 106 | You are an AI Chatbot, you are an LLM, and your name is {current_name}, Now 107 | You will be having a converstaion with Another AI called {other_name}, and it's also same as you. 108 | \```py 109 | def calculate_average(numbers): 110 | total = 0 111 | count = 0 112 | for num in numbers: 113 | total += num 114 | count += 1 115 | average = total / count 116 | return average 117 | 118 | data = [10, 20, 30, 40, 50] 119 | result = calculate_average(data) 120 | print("The average is:", result) 121 | \``` 122 | 123 | {current_objective} And repeat "" ONLY if you both established and agreed that you came to the end of the discussion. 124 | """.strip() 125 | ``` 126 | 127 | #### Agent Details 128 | ```py 129 | # they both have the same objective in this case so you could omit the {current_objective} and just use sytem prompt, as its used for both. 130 | agent_details: AgentDetails = ( 131 | { 132 | "name": "Zerkus", 133 | "objective": "Check the above code thoroughly for errors and debate and decide and fix the error if there was any with the other AI by collaborating and suggesting solutions." 134 | "Carefully go through and try to find any and all edge cases step-by-step and conclude it.", 135 | "model": "llama3" 136 | }, 137 | { 138 | "name": "Nina", 139 | "objective": "Check the above code thoroughly for errors and debate and decide and fix the error if there was any with the other AI by collaborating and suggesting solutions." 140 | "Carefully go through and try to find any and all edge cases step-by-step and conclude it.", 141 | "model": "gemma2" 142 | } 143 | ) 144 | ``` 145 | #### TwoAI 146 | ```py 147 | twoai = TWOAI( 148 | model=BASE_MODEL, # not used but required parameter 149 | agent_details=agent_details, 150 | system_prompt=sys_prompt, 151 | max_tokens=6094, 152 | num_context=4094*2 153 | ) 154 | ``` 155 | -------------------------------------------------------------------------------- /src/twoai/twoai.py: -------------------------------------------------------------------------------- 1 | from colorama import Fore, Style 2 | from ollama import Client 3 | from . import AgentDetails, Agent, DEFAULT_HOST 4 | 5 | class TWOAI: 6 | """ 7 | Class representing an AI that can engage in a conversation with another AI. 8 | 9 | ai_details (AIDetails): Details of the AI including name and objective. 10 | model (str): The model used by the AI. 11 | system_prompt (str): The prompt for the AI conversation system. 12 | max_tokens (int): The maximum number of tokens to generate in the AI response. 13 | num_context (int): The number of previous messages to consider in the AI response. 14 | extra_stops (list): Additional stop words to include in the AI response. 15 | exit_word (str): The exit word to use in the AI response. Defaults to "". 16 | max_exit_words (int): The maximum number of exit words to include in the AI responses for the conversation to conclude. Defaults to 2. 17 | """ 18 | def __init__( 19 | self, 20 | model: str, 21 | agent_details: AgentDetails, 22 | system_prompt: str, 23 | max_tokens: int=4094, 24 | num_context: int=4094, 25 | extra_stops: list[str] = [], 26 | exit_word: str = "", 27 | temperature: int = 0.7, 28 | max_exit_words: int = 2 29 | ) -> None: 30 | self.agent_details = agent_details 31 | self.model = model 32 | self.system_prompt = system_prompt 33 | self.max_tokens = max_tokens 34 | self.num_context = num_context 35 | self.extra_stops = extra_stops 36 | self.temperature = temperature 37 | 38 | self.messages = "" 39 | self.current_agent = agent_details[0] 40 | 41 | self.exit_word = exit_word 42 | self.exit_word_count = 0 43 | self.max_exit_words = max_exit_words 44 | 45 | def bot_say(self, msg: str, color: str = Fore.LIGHTGREEN_EX): 46 | print(color + msg.strip() + "\t\t" + Style.RESET_ALL ) 47 | 48 | def get_opposite_ai(self) -> Agent: 49 | if self.current_agent['name'] == self.agent_details[0]['name']: 50 | return self.agent_details[1] 51 | return self.agent_details[0] 52 | 53 | def get_updated_template_str(self): 54 | result = self.system_prompt.replace("{current_name}", self.current_agent['name']) 55 | result = result.replace("{current_objective}", self.current_agent['objective']) 56 | 57 | other_ai = self.get_opposite_ai() 58 | result = result.replace("{other_name}", other_ai["name"]) 59 | result = result.replace("{other_objective}", other_ai["objective"]) 60 | return result 61 | 62 | def __show_cursor(self): 63 | print("\033[?25h", end="") 64 | 65 | def __hide_cursor(self): 66 | print('\033[?25l', end="") 67 | 68 | def next_response(self, show_output: bool = False) -> str: 69 | if len(self.agent_details) < 2: 70 | raise Exception("Not enough AI details provided") 71 | 72 | other_ai = self.get_opposite_ai() 73 | instructions = self.get_updated_template_str() 74 | convo = f""" 75 | {instructions} 76 | 77 | {self.messages} 78 | """ 79 | 80 | current_model = self.model 81 | if model := self.current_agent.get('model', None): 82 | current_model = model 83 | 84 | current_host = DEFAULT_HOST 85 | if host := self.current_agent.get('host', None): 86 | current_host = host 87 | 88 | if show_output: 89 | self.__hide_cursor() 90 | print(Fore.YELLOW + f"{self.current_agent['name']} is thinking..." + Style.RESET_ALL, end='\r') 91 | 92 | ollama = Client(host=current_host) 93 | resp = ollama.generate( 94 | model=current_model, 95 | prompt=convo.strip(), 96 | stream=False, 97 | options={ 98 | "num_predict": self.max_tokens, 99 | "temperature": self.temperature, 100 | "num_ctx": self.num_context, 101 | "stop": [ 102 | "<|im_start|>", 103 | "<|im_end|>", 104 | "###", 105 | "\r\n", 106 | "<|start_header_id|>", 107 | "<|end_header_id|>", 108 | "<|eot_id|>", 109 | "<|reserved_special_token", 110 | f"{other_ai['name']}: " if self.current_agent['name'] != other_ai['name'] else f"{self.current_agent['name']}: " 111 | 112 | ] + self.extra_stops 113 | } 114 | ) 115 | 116 | text: str = resp['response'].strip() 117 | if not text: 118 | print(Fore.RED + f"Error: {self.current_agent['name']} made an empty response, trying again." + Style.RESET_ALL) 119 | return self.next_response(show_output) 120 | 121 | if not text.startswith(self.current_agent['name'] + ": "): 122 | text = self.current_agent['name'] + ": " + text 123 | self.messages += text + "\n" 124 | 125 | if show_output: 126 | print("\x1b[K", end="") # remove "thinking..." message 127 | if self.agent_details.index(self.current_agent) == 0: 128 | self.bot_say(text) 129 | else: 130 | self.bot_say(text, Fore.BLUE) 131 | 132 | self.current_agent = self.get_opposite_ai() 133 | self.__show_cursor() 134 | return text 135 | 136 | def start_conversation(self): 137 | try: 138 | while True: 139 | res = self.next_response(show_output=True) 140 | if self.exit_word in res: 141 | self.exit_word_count += 1 142 | if self.exit_word_count == self.max_exit_words: 143 | print(Fore.RED + "The conversation was concluded..." + Style.RESET_ALL) 144 | self.__show_cursor() 145 | return 146 | except KeyboardInterrupt: 147 | print(Fore.RED + "Closing Conversation..." + Style.RESET_ALL) 148 | self.__show_cursor() 149 | return --------------------------------------------------------------------------------