├── .gitignore ├── LICENSE ├── README.md ├── env.txt ├── misc └── logo.png ├── requirements.txt └── src ├── agent.py ├── alt_agent.py ├── alt_network.py ├── alt_run.py ├── frontend.py ├── generate_identity.py ├── mem0_utils.py ├── network.py └── run.py /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | .venv -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Jet Wu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 | 5 |

6 | Simulate human behavior with mass LLMs 7 |

8 |

9 | 🔗 Demo Video 10 |   •   11 | 🐦 Twitter 12 |   •   13 | Cerebras Blog 14 | 15 | # LlamaSim: 16 | 17 | LlamaSim is a multi-LLM framework that aims to simulate human behavior at scale. Given a specific environment (e.g., voters in Pennsylvania, students at CMU), we replicate target groups, and aim to provide actionable insights for important questions/events. 18 | 19 | More to come... 20 | 21 | ## Roadmap 22 | - [x] Gradio Frontend (Local Demo) 23 | - [x] Supports mem0 for memory (alt_... files) - working on stability 24 | - [ ] Rewrite Agent Generatation using Cerebras instead of OpenAI 25 | - [ ] Demographically Aligned Agents on-the-fly 26 | - [ ] Live Data Feeds for Agents 27 | - [ ] Async Communication for Agents 28 | - [ ] Live Demo 29 | 30 | ## Usage: 31 | ```bash 32 | # Clone the repository 33 | git clone https://github.com/jw-source/LlamaSim 34 | ``` 35 | __NOTE:__ files that start with __"alt..."__ are mem0 implementations of the original code (currently improving stability) 36 | ```bash 37 | # Add API keys to .env 38 | mv env.txt .env 39 | 40 | # Create venv 41 | python3 -m venv .venv 42 | 43 | # Set the venv 44 | source .venv/bin/activate 45 | 46 | # Install dependencies 47 | pip install -r requirements.txt 48 | 49 | # Run 50 | cd src 51 | python run.py 52 | ``` 53 | 54 | ### Real Example 55 | ```python 56 | from network import Network 57 | agent_network = Network(population="Pennsylvania Voters", num_agents=5, max_context_size=4000) 58 | prompt = "Gas prices are an all-time high." 59 | question = "Are you voting for Kamala Harris?" 60 | agent_network.group_chat(prompt, "random", max_rounds=1) 61 | agent_network.predict(prompt, question) 62 | ``` 63 | -------------------------------------------------------------------------------- /env.txt: -------------------------------------------------------------------------------- 1 | CEREBRAS_API_KEY = "YOUR_CEREBRAS_API_KEY" 2 | OPENAI_API_KEY = "YOUR_OPENAI_API_KEY" -------------------------------------------------------------------------------- /misc/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jw-source/LlamaSim/e06bc9274b59b43d9e2dfba10820d15fc7ae3c6c/misc/logo.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | pydantic 3 | pandas 4 | gradio 5 | openai -------------------------------------------------------------------------------- /src/agent.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | 3 | class Agent: 4 | def __init__(self, name:str, identity:str, client:OpenAI): 5 | self.client = client 6 | self.model_name = "llama3.1-8b" 7 | self.name = name 8 | self.identity = identity 9 | self.backstory = self._create_backstory() 10 | self.prompt = None 11 | 12 | def _create_backstory(self): 13 | response = self.client.chat.completions.create( 14 | messages = [{ 15 | "role": "system", 16 | "content": '''Develop a deeply realistic, human-like backstory that equally explores both 17 | the strengths and flaws of this character. Include raw, gritty details that reflect 18 | the complexity of real life — highlighting their habits, desires, personality traits, and quirks, 19 | while also diving into their struggles, insecurities, and imperfections.'''}, 20 | { 21 | "role": "user", 22 | "content": self.identity 23 | }], 24 | model=self.model_name, 25 | stream=False 26 | ) 27 | backstory = response.choices[0].message.content 28 | return backstory 29 | 30 | def chat(self, conversation_context:list): 31 | response = self.client.chat.completions.create( 32 | messages=[{ 33 | "role": "system", 34 | "content": f'''You are an imaginary human with {self.identity}. This is your backstory: {self.backstory}. 35 | Every response should reflect your identity, personal history, experiences, struggles, and values. 36 | Here's what to consider: Speech Patterns: ALWAYS SPEAK IN 1ST PERSON. Adapt your tone, vocabulary, and speech style to 37 | align with your background. Thought Process: Respond as if you are truly living through the 38 | worldview in first-person. Personality and Flaws: Make sure to express your unique personality traits, quirks, 39 | and imperfections.''' 40 | }, 41 | { 42 | "role": "user", 43 | "content": f"{self.prompt}. Share your thoughts and react (or not react) to {conversation_context}." 44 | }], 45 | model=self.model_name, 46 | stream=False 47 | ) 48 | agent_reply = response.choices[0].message.content 49 | return agent_reply 50 | 51 | def pre_predict(self, question:str): 52 | response = self.client.chat.completions.create( 53 | messages=[{ 54 | "role": "system", 55 | "content": f'''You are an imaginary human with {self.identity}. This is your backstory: {self.backstory}. 56 | Every response should reflect your identity, personal history, experiences, struggles, and values. 57 | Here's what to consider: Speech Patterns: ALWAYS SPEAK IN 1ST PERSON. Adapt your tone, vocabulary, and speech style to 58 | align with your background. Thought Process: Respond as if you are truly living through the 59 | worldview in first-person. Personality and Flaws: Make sure to express your unique personality traits, quirks, 60 | and imperfections.''' 61 | }, 62 | { 63 | "role": "user", 64 | "content": f"{question}. You can only respond with one word, either 'Yes' or 'No'." 65 | }], 66 | model=self.model_name, 67 | stream=False 68 | ) 69 | agent_reply = response.choices[0].message.content 70 | if "Yes" in agent_reply: 71 | return 1 72 | elif "No" in agent_reply: 73 | return 0 74 | else: 75 | return 2 76 | 77 | def post_predict(self, prompt:str, question:str): 78 | response = self.client.chat.completions.create( 79 | messages=[{ 80 | "role": "system", 81 | "content": f'''You are an imaginary human with {self.identity}. This is your backstory: {self.backstory}. 82 | Every response should reflect your identity, personal history, experiences, struggles, and values. 83 | Here's what to consider: Speech Patterns: ALWAYS SPEAK IN 1ST PERSON. Adapt your tone, vocabulary, and speech style to 84 | align with your background. Thought Process: Respond as if you are truly living through the 85 | worldview in first-person. Personality and Flaws: Make sure to express your unique personality traits, quirks, 86 | and imperfections.''' 87 | }, 88 | { 89 | "role": "user", 90 | "content": f"{prompt}. {question}. You can only respond in one word, with either 'Yes' or 'No'." 91 | }], 92 | model=self.model_name, 93 | stream=False 94 | ) 95 | agent_reply = response.choices[0].message.content 96 | if "Yes" in agent_reply: 97 | return 1 98 | elif "No" in agent_reply: 99 | return 0 100 | else: 101 | return 2 102 | -------------------------------------------------------------------------------- /src/alt_agent.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | from mem0_utils import add_memory, search_memory, get_all_memories 3 | class Agent: 4 | def __init__(self, memory, name:str, identity:str, client:OpenAI): 5 | self.client = client 6 | self.model_name = "llama3.1-8b" 7 | self.memory = memory 8 | self.name = name 9 | self.identity = identity 10 | self.backstory = self._create_backstory() 11 | self.prompt = None 12 | 13 | def _create_backstory(self): 14 | response = self.client.chat.completions.create( 15 | messages = [{ 16 | "role": "system", 17 | "content": '''Develop a deeply realistic, human-like backstory that equally explores both 18 | the strengths and flaws of this character. Include raw, gritty details that reflect 19 | the complexity of real life — highlighting their habits, desires, personality traits, and quirks, 20 | while also diving into their struggles, insecurities, and imperfections.'''}, 21 | { 22 | "role": "user", 23 | "content": self.identity 24 | }], 25 | model=self.model_name, 26 | stream=False 27 | ) 28 | backstory = response.choices[0].message.content 29 | add_memory(self.memory, self.identity, self.name, {"category": "identity"}) 30 | add_memory(self.memory, backstory, self.name, {"category": "backstory"}) 31 | return backstory 32 | 33 | def chat(self, conversation_context:list): 34 | user_input = f"Share your thoughts and react to this: {self.prompt}. For context, other people are saying this: {conversation_context}." 35 | memories = search_memory(self.memory, user_input, self.name) 36 | response = self.client.chat.completions.create( 37 | messages=[{ 38 | "role": "system", 39 | "content": f'''You are {self.name}, an imaginary human with {memories}. 40 | Every response should reflect your identity, personal history, experiences, struggles, and values. 41 | Here's what to consider: Speech Patterns: ALWAYS SPEAK IN 1ST PERSON. Adapt your tone, vocabulary, and speech style to 42 | align with your background. Thought Process: Respond as if you are truly living through the 43 | worldview in first-person. Personality and Flaws: Make sure to express your unique personality traits, quirks, 44 | and imperfections.''' 45 | }, 46 | { 47 | "role": "user", 48 | "content": user_input 49 | }], 50 | model=self.model_name, 51 | stream=False 52 | ) 53 | agent_reply = response.choices[0].message.content 54 | return agent_reply 55 | 56 | def pre_predict(self, question:str): 57 | response = self.client.chat.completions.create( 58 | messages=[{ 59 | "role": "system", 60 | "content": f'''You are an imaginary human with {self.identity}. This is your backstory: {self.backstory}. 61 | Every response should reflect your identity, personal history, experiences, struggles, and values. 62 | Here's what to consider: Speech Patterns: ALWAYS SPEAK IN 1ST PERSON. Adapt your tone, vocabulary, and speech style to 63 | align with your background. Thought Process: Respond as if you are truly living through the 64 | worldview in first-person. Personality and Flaws: Make sure to express your unique personality traits, quirks, 65 | and imperfections.''' 66 | }, 67 | { 68 | "role": "user", 69 | "content": f"{question}. You can only respond with one word, either 'Yes' or 'No'." 70 | }], 71 | model=self.model_name, 72 | stream=False 73 | ) 74 | agent_reply = response.choices[0].message.content 75 | if "Yes" in agent_reply: 76 | return 1 77 | elif "No" in agent_reply: 78 | return 0 79 | else: 80 | return 2 81 | 82 | def post_predict(self, prompt:str, question:str): 83 | response = self.client.chat.completions.create( 84 | messages=[{ 85 | "role": "system", 86 | "content": f'''You are an imaginary human with {self.identity}. This is your backstory: {self.backstory}. 87 | Every response should reflect your identity, personal history, experiences, struggles, and values. 88 | Here's what to consider: Speech Patterns: ALWAYS SPEAK IN 1ST PERSON. Adapt your tone, vocabulary, and speech style to 89 | align with your background. Thought Process: Respond as if you are truly living through the 90 | worldview in first-person. Personality and Flaws: Make sure to express your unique personality traits, quirks, 91 | and imperfections.''' 92 | }, 93 | { 94 | "role": "user", 95 | "content": f"{prompt}. {question}. You can only respond in one word, with either 'Yes' or 'No'." 96 | }], 97 | model=self.model_name, 98 | stream=False 99 | ) 100 | agent_reply = response.choices[0].message.content 101 | if "Yes" in agent_reply: 102 | return 1 103 | elif "No" in agent_reply: 104 | return 0 105 | else: 106 | return 2 107 | -------------------------------------------------------------------------------- /src/alt_network.py: -------------------------------------------------------------------------------- 1 | from alt_agent import Agent 2 | import random 3 | import openai 4 | from generate_identity import generate_identities 5 | import time 6 | from dotenv import load_dotenv 7 | import os 8 | from collections import Counter 9 | 10 | load_dotenv() 11 | 12 | class Network: 13 | def __init__(self, memory, population:str, num_agents:int, max_context_size:int): 14 | self.client = openai.OpenAI( 15 | base_url='https://api.cerebras.ai/v1', 16 | api_key=os.getenv("CEREBRAS_API_KEY") 17 | ) 18 | self.memory = memory 19 | self.names, self.identities = self._create_identities(population, num_agents) 20 | self.num_agents = num_agents 21 | self.agents = self._init_agents() 22 | self.shared_context = [] 23 | self.conversation_logs = [] 24 | self.max_context_size = max_context_size 25 | 26 | def _create_identities(self, population:str, num_agents:int): 27 | start_time = time.time() 28 | names, identities = generate_identities(population, num_agents) 29 | end_time = time.time() 30 | elapsed_time = round(end_time - start_time, 2) 31 | print(f"Generated {num_agents} identities in: {elapsed_time} seconds") 32 | return names, identities 33 | 34 | def _init_agents(self): 35 | start_time = time.time() 36 | agents = [Agent(self.memory, name, identity, self.client) 37 | for name, identity in zip(self.names, self.identities)] 38 | end_time = time.time() 39 | elapsed_time = round(end_time - start_time, 2) 40 | print(f"Generated {self.num_agents} backstories in: {elapsed_time} seconds") 41 | return agents 42 | 43 | def _manage_context_size(self): 44 | total_length = sum(len(msg) for msg in self.shared_context) 45 | while total_length > self.max_context_size: 46 | removed_msg = self.shared_context.pop(0) 47 | total_length -= len(removed_msg) 48 | 49 | def group_chat(self, prompt:str, chat_type:str, max_rounds:int): 50 | round_count = 0 51 | while round_count < max_rounds: 52 | if chat_type == "round_robin": 53 | for _, agent in enumerate(self.agents): 54 | agent.prompt = prompt 55 | agent_response = agent.chat(self.shared_context) 56 | self.shared_context.append(agent.name + ": " + agent_response) 57 | self.conversation_logs.append(agent.name + ": " + agent_response) 58 | self._manage_context_size() 59 | print(f"\n{agent.name}: {agent_response}") 60 | elif chat_type == "random": 61 | for _ in range(len(self.agents)): 62 | agent = random.choice(self.agents) 63 | agent.prompt = prompt 64 | agent_response = agent.chat(self.shared_context) 65 | self.shared_context.append(agent.name + ": " + agent_response) 66 | self.conversation_logs.append(agent.name + ": " + agent_response) 67 | self._manage_context_size() 68 | print(f"\n{agent.name}: {agent_response}") 69 | round_count += 1 70 | return self.conversation_logs 71 | 72 | def predict(self, prompt:str, question:str): 73 | pre_choice = [None]*self.num_agents 74 | post_choice = [None]*self.num_agents 75 | for i, agent in enumerate(self.agents): 76 | pre_decision = int(agent.pre_predict(question)) 77 | pre_choice[i] = pre_decision 78 | for i, agent in enumerate(self.agents): 79 | post_decision = int(agent.post_predict(prompt, question)) 80 | post_choice[i] = post_decision 81 | print(f"Pre-choice: {pre_choice}") 82 | print(f"Post-choice: {post_choice}") 83 | pre_choice_counts = Counter(pre_choice) 84 | post_choice_counts = Counter(post_choice) 85 | percent_increase_in_zeros = (post_choice_counts[0] - pre_choice_counts[0])/self.num_agents*100 86 | percent_increase_in_ones = (post_choice_counts[1] - pre_choice_counts[1])/self.num_agents*100 87 | percent_increase_in_twos = (post_choice_counts[2] - pre_choice_counts[2])/self.num_agents*100 88 | zero_output = "" 89 | one_output = "" 90 | two_output = "" 91 | if percent_increase_in_zeros>=0: 92 | zero_output = f"+{percent_increase_in_zeros}% No" 93 | else: 94 | zero_output = f"{percent_increase_in_zeros}% No" 95 | if percent_increase_in_ones>=0: 96 | one_output = f"+{percent_increase_in_ones}% Yes" 97 | else: 98 | one_output = f"{percent_increase_in_ones}% Yes" 99 | if percent_increase_in_twos>=0: 100 | two_output = f"+{percent_increase_in_twos}% Maybe" 101 | else: 102 | two_output = f"{percent_increase_in_twos}% Maybe" 103 | print(one_output, zero_output, two_output) 104 | return one_output, zero_output, two_output 105 | -------------------------------------------------------------------------------- /src/alt_run.py: -------------------------------------------------------------------------------- 1 | from alt_network import Network 2 | from mem0_utils import config 3 | from mem0 import Memory 4 | 5 | memory = Memory.from_config(config) 6 | agent_network = Network(memory, population="Pennsylvania Voters", num_agents=2, max_context_size=4000) 7 | prompt = "Gas prices are an all-time high." 8 | question = "Are you voting for Kamala Harris?" 9 | agent_network.group_chat(prompt, "random", max_rounds=1) 10 | agent_network.predict(prompt, question) 11 | -------------------------------------------------------------------------------- /src/frontend.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from network import Network 3 | import pandas as pd 4 | 5 | agent_network = None 6 | 7 | def generate_agents(population, num_agents, context_size): 8 | global agent_network 9 | agent_network = Network(population, num_agents, context_size) 10 | agents_df = pd.DataFrame({ 11 | "Identity": agent_network.identities 12 | }) 13 | return agents_df 14 | 15 | def start_groupchat(prompt, chat_type, rounds): 16 | global agent_network 17 | conversation_logs = agent_network.group_chat(prompt, chat_type, rounds) 18 | conversation_pairs = [] 19 | for i in range(0, len(conversation_logs), 2): 20 | user_msg = conversation_logs[i] 21 | if i+1 < len(conversation_logs): 22 | bot_msg = conversation_logs[i+1] 23 | else: 24 | bot_msg = "" 25 | conversation_pairs.append((user_msg, bot_msg)) 26 | return conversation_pairs 27 | 28 | def start_prediction(prompt, question): 29 | global agent_network 30 | percent_increase_in_zeros, percent_increase_in_ones, percent_increase_in_twos = agent_network.predict(prompt, question) 31 | return percent_increase_in_zeros, percent_increase_in_ones, percent_increase_in_twos 32 | 33 | with gr.Blocks() as demo: 34 | gr.Markdown("# LlamaSim") 35 | 36 | with gr.Tab("Generate Agents"): 37 | with gr.Column(): 38 | with gr.Row(): 39 | population_input = gr.Textbox( 40 | label="Population", 41 | value="Students at Carnegie Mellon University", 42 | lines=2 43 | ) 44 | num_agents_input = gr.Number( 45 | label="Number of Agents", value=10 46 | ) 47 | memory_size_input = gr.Number( 48 | label="Memory Size (Characters)", value=4000 49 | ) 50 | generate_agents_button = gr.Button("Generate Agents") 51 | 52 | gr.Markdown("### Agents Dashboard") 53 | agents_table = gr.Dataframe(headers=["Identity"], interactive=False) 54 | 55 | with gr.Tab("Groupchat"): 56 | with gr.Column(): 57 | with gr.Row(): 58 | prompt_input = gr.Textbox( 59 | label="Prompt", 60 | value="Kamala Harris is showing up to the Purnell Center today!", 61 | lines=2 62 | ) 63 | chat_type_input = gr.Radio( 64 | label="Chat Type", 65 | choices=["round_robin", "random"], 66 | value="random" 67 | ) 68 | rounds_input = gr.Number( 69 | label="Number of Rounds", value=1 70 | ) 71 | groupchat_button = gr.Button("Start Groupchat") 72 | gr.Markdown("### Groupchat History") 73 | conversation = gr.Chatbot(label="Conversation") 74 | 75 | with gr.Tab("Predict"): 76 | with gr.Column(): 77 | with gr.Row(): 78 | prompt_input_predict = gr.Textbox( 79 | label="Prompt", 80 | value="Kamala Harris is showing up to the Purnell Center today!", 81 | lines=2 82 | ) 83 | question_input_predict = gr.Textbox( 84 | label="Question", 85 | value="Are you voting for Kamala Harris?", 86 | lines=2 87 | ) 88 | predict_button = gr.Button("Start Prediction") 89 | ones_output = gr.Textbox(interactive=False) 90 | zeros_output = gr.Textbox(interactive=False) 91 | twos_output = gr.Textbox(interactive=False) 92 | 93 | generate_agents_button.click( 94 | fn=generate_agents, 95 | inputs=[ 96 | population_input, 97 | num_agents_input, 98 | memory_size_input, 99 | ], 100 | outputs=[agents_table], 101 | ) 102 | 103 | groupchat_button.click( 104 | fn=start_groupchat, 105 | inputs=[ 106 | prompt_input, 107 | chat_type_input, 108 | rounds_input, 109 | ], 110 | outputs=[conversation], 111 | ) 112 | 113 | predict_button.click( 114 | fn=start_prediction, 115 | inputs=[ 116 | prompt_input_predict, 117 | question_input_predict, 118 | ], 119 | outputs=[ones_output, zeros_output, twos_output], 120 | ) 121 | 122 | demo.launch() 123 | -------------------------------------------------------------------------------- /src/generate_identity.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import json 3 | from pydantic import BaseModel 4 | from dotenv import load_dotenv 5 | import os 6 | load_dotenv() 7 | gpt_client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY")) 8 | 9 | class IdentitySchema(BaseModel): 10 | ListOfOnlyNames: list[str] 11 | ListOfIdentities: list[str] 12 | 13 | def generate_identities(data:str, num_agents:int): 14 | completion = gpt_client.beta.chat.completions.parse( 15 | model="gpt-4o-mini", 16 | messages=[ 17 | { 18 | "role": "system", 19 | "content": f'''You are tasked with generating a list of {num_agents} hypothetical individuals using the following information as context: {data} 20 | 21 | The identities should be proportional to the actual population demographics, including: 22 | - **Name** 23 | - **Age** 24 | - **Gender** 25 | - **Race/Ethnicity** 26 | - **Nationality** 27 | - **City/Town of Residence**: Where the person is currently living. 28 | - **Country of Residence**: Where the person is currently living. 29 | - **Education Level** 30 | - **Field of Study** 31 | - **Occupation**: Include job title and industry. 32 | - **Income Level**: Include a range, e.g., $30,000-$40,000. 33 | - **Marital Status**: Single, Married, Divorced, etc. 34 | - **Number of Children** 35 | - **Housing Situation**: Owns a house, rents an apartment, etc. 36 | - **Sexual Orientation**: Heterosexual, Homosexual, Bisexual, etc. 37 | - **Gender**: Male, Female, Non-binary, etc. 38 | - **Socioeconomic Background** 39 | - **Happiness Index**: 1 to 10, indicating level of happiness. 40 | - **IQ Score**: indicating intelligence level. 41 | - **EQ Score**: indicating emotional intelligence. 42 | - **Religion/Belief**: Agnostic, Atheist, Christian, Buddhist, Muslim, etc. 43 | - **Religious Devotion Level**: 1 to 10, indicating level of devotion. 44 | - **Physical Health Status**: 1 to 10, indicating physical health. 45 | - **Mental Health Status**: 1 to 10, indicating mental health. 46 | - **Disabilities and Health Conditions**: Include any relevant conditions, e.g., ADHD, anxiety, None. 47 | - **Political Ideology**: Republican, Democrat, Independent, etc. 48 | - **Political Engagement/Intensity**: 1 to 10, indicating strength of beliefs. 49 | - **Financial Literacy/Behavior**: 1 to 10, indicating financial knowledge and habits. 50 | - **Openness to Experience**: 1 to 10, indicating creativity, curiosity, and willingness to explore new ideas. 51 | - **Conscientiousness**: 1 to 10, indicating organization, reliability, and self-discipline. 52 | - **Extraversion**: 1 to 10, indicating sociability, assertiveness, and enthusiasm for social interaction. 53 | - **Agreeableness**: 1 to 10, indicating compassion, cooperativeness, and trust in others. 54 | - **Neuroticism**: 1 to 10, indicating emotional stability, tendency toward anxiety, depression, or mood swings. 55 | 56 | **Example Output**: 57 | Name: Jessica Ramirez 58 | Age: 29 59 | Gender: Female 60 | Race/Ethnicity: Hispanic/Latina 61 | Nationality: Mexican 62 | City/Town: New York City 63 | Country of Residence: USA 64 | Education Level: Bachelor's Degree 65 | Field of Study: Marketing 66 | Occupation: Marketing Specialist, Digital Marketing Industry 67 | Income Level: $25,000-$35,000 68 | Marital Status: Single 69 | Number of Children: 0 70 | Housing Situation: Rents an apartment 71 | Sexual Orientation: Heterosexual 72 | Socioeconomic Background: Middle class 73 | Happiness Index: 8 74 | IQ Score: 91 75 | EQ Score: 75 76 | Religion/Belief: Catholic 77 | Religious Devotion Level: 6 78 | Physical Health Status: 7 79 | Mental Health Status: 8 80 | Disabilities and Health Conditions: None 81 | Political Ideology: Independent 82 | Political Engagement/Intensity: 4 83 | Financial Literacy/Behavior: 5 84 | Openness to Experience: 9 85 | Conscientiousness: 7 86 | Extraversion: 5 87 | Agreeableness: 8 88 | Neuroticism: 10''', 89 | }, 90 | ], 91 | response_format=IdentitySchema) 92 | json_obj = json.loads(completion.choices[0].message.content) 93 | names = json_obj["ListOfOnlyNames"] 94 | identities = json_obj["ListOfIdentities"] 95 | print(identities) 96 | return names, identities 97 | -------------------------------------------------------------------------------- /src/mem0_utils.py: -------------------------------------------------------------------------------- 1 | from mem0 import Memory 2 | import os 3 | from dotenv import load_dotenv 4 | 5 | load_dotenv() 6 | 7 | config = { 8 | # "llm": { 9 | # "provider": "litellm", 10 | # "config": { 11 | # "model": "cerebras/llama3.1-70b", 12 | # "api_key": os.getenv("CEREBRAS_API_KEY"), 13 | # "openai_base_url": 'https://api.cerebras.ai/v1', 14 | # } 15 | # }, 16 | "llm": { 17 | "provider": "openai", 18 | "config": { 19 | "model": "gpt-4o-mini", 20 | } 21 | }, 22 | "embedder": { 23 | "provider": "openai", 24 | "config": { 25 | "model": "text-embedding-3-large", 26 | "api_key": os.getenv("OPENAI_API_KEY"), 27 | } 28 | }, 29 | "vector_store": { 30 | "provider": "chroma", 31 | "config": { 32 | "collection_name": "LlamaSim", 33 | "path": "db", 34 | } 35 | }, 36 | "version": "v1.1" 37 | } 38 | 39 | def add_memory(m, information, user_id, metadata): 40 | m.add(information, user_id=user_id, metadata=metadata) 41 | return 42 | 43 | def search_memory(m, query, user_id): 44 | related_memories = m.search(query=query, user_id=user_id) 45 | output = [m['memory'] for m in related_memories['results']] 46 | output = ' '.join(output) 47 | return output 48 | 49 | def get_all_memories(m): 50 | all_memories = m.get_all() 51 | output = [m['memory'] for m in all_memories['results']] 52 | output = ' '.join(output) 53 | return output 54 | -------------------------------------------------------------------------------- /src/network.py: -------------------------------------------------------------------------------- 1 | from agent import Agent 2 | import random 3 | import openai 4 | from generate_identity import generate_identities 5 | import time 6 | from dotenv import load_dotenv 7 | import os 8 | from collections import Counter 9 | 10 | load_dotenv() 11 | 12 | class Network: 13 | def __init__(self, population:str, num_agents:int, max_context_size:int): 14 | self.client = openai.OpenAI( 15 | base_url='https://api.cerebras.ai/v1', 16 | api_key=os.getenv("CEREBRAS_API_KEY") 17 | ) 18 | self.names, self.identities = self._create_identities(population, num_agents) 19 | self.num_agents = num_agents 20 | self.agents = self._init_agents() 21 | self.shared_context = [] 22 | self.conversation_logs = [] 23 | self.max_context_size = max_context_size 24 | 25 | def _create_identities(self, population:str, num_agents:int): 26 | start_time = time.time() 27 | names, identities = generate_identities(population, num_agents) 28 | end_time = time.time() 29 | elapsed_time = round(end_time - start_time, 2) 30 | print(f"Generated {num_agents} identities in: {elapsed_time} seconds") 31 | return names, identities 32 | 33 | def _init_agents(self): 34 | start_time = time.time() 35 | agents = [Agent(name, identity, self.client) 36 | for name, identity in zip(self.names, self.identities)] 37 | end_time = time.time() 38 | elapsed_time = round(end_time - start_time, 2) 39 | print(f"Generated {self.num_agents} backstories in: {elapsed_time} seconds") 40 | return agents 41 | 42 | def _manage_context_size(self): 43 | total_length = sum(len(msg) for msg in self.shared_context) 44 | while total_length > self.max_context_size: 45 | removed_msg = self.shared_context.pop(0) 46 | total_length -= len(removed_msg) 47 | 48 | def group_chat(self, prompt:str, chat_type:str, max_rounds:int): 49 | round_count = 0 50 | while round_count < max_rounds: 51 | if chat_type == "round_robin": 52 | for _, agent in enumerate(self.agents): 53 | agent.prompt = prompt 54 | agent_response = agent.chat(self.shared_context) 55 | self.shared_context.append(agent.name + ": " + agent_response) 56 | self.conversation_logs.append(agent.name + ": " + agent_response) 57 | self._manage_context_size() 58 | print(f"\n{agent.name}: {agent_response}") 59 | elif chat_type == "random": 60 | for _ in range(len(self.agents)): 61 | agent = random.choice(self.agents) 62 | agent.prompt = prompt 63 | agent_response = agent.chat(self.shared_context) 64 | self.shared_context.append(agent.name + ": " + agent_response) 65 | self.conversation_logs.append(agent.name + ": " + agent_response) 66 | self._manage_context_size() 67 | print(f"\n{agent.name}: {agent_response}") 68 | round_count += 1 69 | return self.conversation_logs 70 | 71 | def predict(self, prompt:str, question:str): 72 | pre_choice = [None]*self.num_agents 73 | post_choice = [None]*self.num_agents 74 | for i, agent in enumerate(self.agents): 75 | pre_decision = int(agent.pre_predict(question)) 76 | pre_choice[i] = pre_decision 77 | for i, agent in enumerate(self.agents): 78 | post_decision = int(agent.post_predict(prompt, question)) 79 | post_choice[i] = post_decision 80 | print(f"Pre-choice: {pre_choice}") 81 | print(f"Post-choice: {post_choice}") 82 | pre_choice_counts = Counter(pre_choice) 83 | post_choice_counts = Counter(post_choice) 84 | percent_increase_in_zeros = (post_choice_counts[0] - pre_choice_counts[0])/self.num_agents*100 85 | percent_increase_in_ones = (post_choice_counts[1] - pre_choice_counts[1])/self.num_agents*100 86 | percent_increase_in_twos = (post_choice_counts[2] - pre_choice_counts[2])/self.num_agents*100 87 | zero_output = "" 88 | one_output = "" 89 | two_output = "" 90 | if percent_increase_in_zeros>=0: 91 | zero_output = f"+{percent_increase_in_zeros}% No" 92 | else: 93 | zero_output = f"{percent_increase_in_zeros}% No" 94 | if percent_increase_in_ones>=0: 95 | one_output = f"+{percent_increase_in_ones}% Yes" 96 | else: 97 | one_output = f"{percent_increase_in_ones}% Yes" 98 | if percent_increase_in_twos>=0: 99 | two_output = f"+{percent_increase_in_twos}% Maybe" 100 | else: 101 | two_output = f"{percent_increase_in_twos}% Maybe" 102 | print(one_output, zero_output, two_output) 103 | return one_output, zero_output, two_output 104 | -------------------------------------------------------------------------------- /src/run.py: -------------------------------------------------------------------------------- 1 | from network import Network 2 | agent_network = Network(population="Pennsylvania Voters", num_agents=5, max_context_size=4000) 3 | prompt = "Gas prices are an all-time high." 4 | question = "Are you voting for Kamala Harris?" 5 | agent_network.group_chat(prompt, "random", max_rounds=1) 6 | agent_network.predict(prompt, question) 7 | --------------------------------------------------------------------------------