├── .github
└── workflows
│ └── codeql.yml
├── .gitignore
├── .replit
├── LICENSE
├── README.md
├── images
└── the_storyteller.png
├── pyproject.toml
├── requirements.txt
├── voices
├── Harry Potter_Narration_Voice.mp3
├── Hermione Granger_Narration_Voice.mp3
├── Ron Weasley_Narration_Voice.mp3
└── Storyteller_Narration_Voice.mp3
└── whatifgpt.py
/.github/workflows/codeql.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL Advanced"
13 |
14 | on:
15 | push:
16 | branches: [ "main" ]
17 | pull_request:
18 | branches: [ "main" ]
19 | schedule:
20 | - cron: '41 18 * * 5'
21 |
22 | jobs:
23 | analyze:
24 | name: Analyze (${{ matrix.language }})
25 | # Runner size impacts CodeQL analysis time. To learn more, please see:
26 | # - https://gh.io/recommended-hardware-resources-for-running-codeql
27 | # - https://gh.io/supported-runners-and-hardware-resources
28 | # - https://gh.io/using-larger-runners (GitHub.com only)
29 | # Consider using larger runners or machines with greater resources for possible analysis time improvements.
30 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
31 | permissions:
32 | # required for all workflows
33 | security-events: write
34 |
35 | # required to fetch internal or private CodeQL packs
36 | packages: read
37 |
38 | # only required for workflows in private repositories
39 | actions: read
40 | contents: read
41 |
42 | strategy:
43 | fail-fast: false
44 | matrix:
45 | include:
46 | - language: python
47 | build-mode: none
48 | # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
49 | # Use `c-cpp` to analyze code written in C, C++ or both
50 | # Use 'java-kotlin' to analyze code written in Java, Kotlin or both
51 | # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
52 | # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
53 | # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
54 | # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
55 | # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
56 | steps:
57 | - name: Checkout repository
58 | uses: actions/checkout@v4
59 |
60 | # Initializes the CodeQL tools for scanning.
61 | - name: Initialize CodeQL
62 | uses: github/codeql-action/init@v3
63 | with:
64 | languages: ${{ matrix.language }}
65 | build-mode: ${{ matrix.build-mode }}
66 | # If you wish to specify custom queries, you can do so here or in a config file.
67 | # By default, queries listed here will override any specified in a config file.
68 | # Prefix the list here with "+" to use these queries and those in the config file.
69 |
70 | # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
71 | # queries: security-extended,security-and-quality
72 |
73 | # If the analyze step fails for one of the languages you are analyzing with
74 | # "We were unable to automatically build your code", modify the matrix above
75 | # to set the build mode to "manual" for that language. Then modify this step
76 | # to build your code.
77 | # ℹ️ Command-line programs to run using the OS shell.
78 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
79 | - if: matrix.build-mode == 'manual'
80 | shell: bash
81 | run: |
82 | echo 'If you are using a "manual" build mode for one or more of the' \
83 | 'languages you are analyzing, replace this with the commands to build' \
84 | 'your code, for example:'
85 | echo ' make bootstrap'
86 | echo ' make release'
87 | exit 1
88 |
89 | - name: Perform CodeQL Analysis
90 | uses: github/codeql-action/analyze@v3
91 | with:
92 | category: "/language:${{matrix.language}}"
93 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | *$py.class
4 |
5 | .env
6 | .env.*
7 | env/
8 | .venv
9 | venv/
10 |
11 | .vscode/
12 | .idea/
13 |
14 | models
15 | llama/
16 |
17 | images/agent*
--------------------------------------------------------------------------------
/.replit:
--------------------------------------------------------------------------------
1 | onBoot = "pip install -r requirements.txt"
2 | run = "streamlit run whatifgpt.py"
3 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Yohei Nakajima
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | 🤖 WhatIfGPT 🤖
3 |
4 |
5 | # Introduction
6 | This project was inspired by [*Generative Agents: Interactive Simulacra of Human Behavior*](https://arxiv.org/abs/2304.03442) paper and evolved through use of LangChain's Experimental Generative Agents [*Generative Agents in LangChain*](https://python.langchain.com/en/latest/use_cases/agent_simulations/characters.html) and [*Multi-Player Dungeons & Dragons*](https://python.langchain.com/en/latest/use_cases/agent_simulations/multi_player_dnd.html). In this simulation, there is Storyteller Agent, and multiple Generative Agents. Storyteller Agent is just a narrator/moderator Agent with a set of memory that summarizes the story and Generative Agents' actions thus far. Generative Agents are extension of LangChain's Experimental Genrative Agents using its time-weighted Memory object backed by LangChain Retriever, with the same memory that maintains the story and other Agents' actions. The System and Human Messages were tweaked as it was losing focus and context hence why the summary memory was needed. One of the key concept in these Generative Agents are that it has time-weighted vector store that contains the Agents' memories that impacts its thoughts and actions. Though this memory is fairly short, it is experimental to see what is possible in this concept.
7 |
8 | It starts with setting up the main theme of the story (i.e. Harry Potter, Pirates of the Caribbean, etc.), # of Agents (excluding the Storyteller), and checkbox for random generation or custom Agent names. When the simulation runs, The Storyteller acts as the narrator and moderator having a dialogue with the Generative Agents. Both the Storyteller and Generative Agents have a summary memory that keeps track of the main story, the Storyteller's observations, and each Agents' actions. This was important to maintain as the agents would start to lose focus and start talking out of context.
9 |
10 | Each Generative Agents have the LangChain's Time-weighted Vector Store Retriever that contains certain memories that impacts their thoughts and actions. This was the bit of the twist in the simulation which adds additional randomness to the story due to the way they act.
11 |
12 | 
13 |
14 | Originally this memory was configurable but I removed it for simplicity for the UI. It can be added back to be configurable from the UI if you want. This was fun and great learning experience for me, I hope you enjoy it!
15 |
16 | This project is built with
17 | * [LangChain Python](https://python.langchain.com/en/latest/index.html)
18 | * [OpenAI API for GPT and DALL-E](https://platform.openai.com/docs/api-reference)
19 | * [ElevenLabs for Voice](https://beta.elevenlabs.io/)
20 | * [Streamlit for UI](https://streamlit.io/)
21 |
22 | [See It on Replit](https://replit.com/@minchoi76/whatifgpt)
23 |
24 | # Demo
25 | https://user-images.githubusercontent.com/67872688/236244415-a338481a-031b-4d30-acb9-a3f502672ba2.mp4
26 | # Installation
27 | Install the required packages:
28 | ````
29 | pip install -r requirements.txt
30 | ````
31 | ## Usage
32 |
33 | Run streamlit.
34 | ````
35 | python -m streamlit run babyagi.py
36 | ````
37 |
38 | You can now view your Streamlit app in your browser (Streamlit also automatically launches your default browser to this location).
39 |
40 | Local URL: http://localhost:8501
41 |
42 | # Acknowledgments
43 |
44 | I would like to express my gratitude to the developers whose code I referenced in creating this repo.
45 |
46 | Special thanks go to
47 |
48 | @hwchase17 (https://github.com/hwchase17/langchain)
49 |
50 | @mbchang (https://github.com/mbchang)
51 |
52 |
53 |
--------------------------------------------------------------------------------
/images/the_storyteller.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/realminchoi/whatifgpt/2a19f972b47239833506b45f700be73e663a9ff5/images/the_storyteller.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "repl_nix_whatifgpt"
3 | version = "0.1.0"
4 | description = ""
5 | authors = ["Replit user <<>>"]
6 |
7 | [tool.poetry.dependencies]
8 | python = "^3.10"
9 | streamlit = "^1.22.0"
10 | faiss-cpu = "^1.7.4"
11 | Flask = "^2.3.2"
12 | openai = "^0.27.6"
13 | Pillow = "^9.5.0"
14 | playsound = "^1.3.0"
15 | pydantic = "^1.10.7"
16 | langchain = "^0.0.155"
17 | elevenlabs = "0.2.8"
18 | tiktoken = "^0.3.3"
19 |
20 | [tool.poetry.dev-dependencies]
21 |
22 | [build-system]
23 | requires = ["poetry-core>=1.0.0"]
24 | build-backend = "poetry.core.masonry.api"
25 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pydantic>=1.10.7
2 | langchain>=0.0.155
3 | faiss-cpu>=1.7.4
4 | elevenlabs>=0.2.8
5 | playsound==1.2.2
6 | streamlit>=1.22.0
7 | openai>=0.27.6
8 | tiktoken>=0.3.3
--------------------------------------------------------------------------------
/voices/Harry Potter_Narration_Voice.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/realminchoi/whatifgpt/2a19f972b47239833506b45f700be73e663a9ff5/voices/Harry Potter_Narration_Voice.mp3
--------------------------------------------------------------------------------
/voices/Hermione Granger_Narration_Voice.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/realminchoi/whatifgpt/2a19f972b47239833506b45f700be73e663a9ff5/voices/Hermione Granger_Narration_Voice.mp3
--------------------------------------------------------------------------------
/voices/Ron Weasley_Narration_Voice.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/realminchoi/whatifgpt/2a19f972b47239833506b45f700be73e663a9ff5/voices/Ron Weasley_Narration_Voice.mp3
--------------------------------------------------------------------------------
/voices/Storyteller_Narration_Voice.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/realminchoi/whatifgpt/2a19f972b47239833506b45f700be73e663a9ff5/voices/Storyteller_Narration_Voice.mp3
--------------------------------------------------------------------------------
/whatifgpt.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import faiss
3 | import json
4 | import math
5 | import os
6 | import openai
7 | import random
8 | from datetime import datetime
9 | from typing import List, Tuple
10 | from elevenlabs import generate, play, clone, save, voices
11 | from playsound import playsound
12 |
13 | from PIL import Image
14 | import streamlit as st
15 |
16 | from langchain.chat_models import ChatOpenAI
17 | from langchain.docstore import InMemoryDocstore
18 | from langchain.embeddings import OpenAIEmbeddings
19 | from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
20 | from langchain.retrievers import TimeWeightedVectorStoreRetriever
21 | from langchain.schema import HumanMessage, SystemMessage
22 | from langchain.vectorstores import FAISS
23 | from langchain.experimental import GenerativeAgent
24 | from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
25 |
26 | class Message:
27 | def __init__(self, name: str, icon, layout: str = 'storyteller'):
28 | if layout == 'storyteller':
29 | message_col, icon_col = st.columns([10, 1], gap="medium")
30 | elif layout == 'agent':
31 | icon_col, message_col = st.columns([1, 10], gap="medium")
32 | else:
33 | raise ValueError("Invalid layout specified. Use 'storyteller' or 'agent'.")
34 |
35 | self.icon = icon
36 | icon_col.image(self.icon, caption=name)
37 | self.markdown = message_col.markdown
38 |
39 | def __enter__(self):
40 | return self
41 |
42 | def __exit__(self, exc_type, exc_value, traceback):
43 | pass
44 |
45 | def write(self, content):
46 | self.markdown(content)
47 |
48 | class StorytellerAgent():
49 |
50 | def __init__(self, name, system_message: SystemMessage, summary_history, story_main_objective, llm: ChatOpenAI,):
51 | self.name = name
52 | self.llm = llm
53 | self.system_message = system_message
54 | self.summary_history = summary_history
55 | self.story_main_objective = story_main_objective
56 | self.prefix = f'\n{self.name}:'
57 | self.voice = None
58 | self.icon = "images/the_storyteller.png"
59 |
60 | def send(self) -> Tuple[str, bool]:
61 | """
62 | Applies the chatmodel to the message history
63 | and returns the message string
64 | """
65 | summary = (f"Summary thus far: {self.summary_history}" )
66 | message = self.llm(
67 | [self.system_message,
68 | HumanMessage(content=summary)]).content
69 | return message, self.is_objective_complete(message)
70 |
71 | def receive(self, name: str, message: str) -> None:
72 | self.summary_history = get_summary_content(self.summary_history, name, message)
73 |
74 | def is_objective_complete(self, message: str) -> bool:
75 | """
76 | Checks if objective has been completed
77 | """
78 | objective_check_prompt = [
79 | SystemMessage(content="Determine if objective has been achieved."),
80 | HumanMessage(content=
81 | f"""
82 | Story Objective: {self.story_main_objective}
83 | Story thus far: {message}
84 | Based on this "Summary thus far"" has the main "Story Objective" completed? If obtaining item is part of the "Story Objective", is the item(s) in the possession of the characters?
85 | Only answer with "Yes" or "No", do not add anything else.
86 | """
87 | )
88 | ]
89 | is_complete = ChatOpenAI(temperature=0.0)(objective_check_prompt).content
90 | return True if "yes" in is_complete.lower() else False
91 |
92 | def narrate(self, message: str):
93 | if not os.environ['ELEVEN_API_KEY']:
94 | return
95 | """Narrate the observation using to Voice Cloned Storyteller voice, need ElevenLabs"""
96 | if not self.voice:
97 | for voice in voices():
98 | if voice.name == "Storyteller":
99 | self.voice = voice
100 | break
101 | else:
102 | self.voice = clone(
103 | name="Storyteller",
104 | description="An old British male voice with a strong hoarseness in his throat. Perfect for story narration",
105 | files=["./voices/Storyteller_Narration_Voice.mp3"]
106 | )
107 | audio = generate(text=message, voice=self.voice)
108 | save(audio, "narration.mpeg")
109 | playsound("narration.mpeg")
110 | os.remove("narration.mpeg")
111 |
112 | class WhatIfGenerativeAgent(GenerativeAgent):
113 | sex: str
114 | race: str
115 | age: int
116 | story: str
117 | traits: str
118 | system_message: SystemMessage = None
119 | summary_history: str = ""
120 | icon: str = None
121 | voice: voice = None
122 |
123 | def _compute_agent_summary(self) -> str:
124 | """"""
125 | prompt = PromptTemplate.from_template(
126 | "Please reply with a creative description of the character {name} in 50 words or less, "
127 | +f"also creatively include the character's traits with the description: {self.traits}."
128 | +"Also consider {name}'s core characteristics given the"
129 | + " following statements:\n"
130 | + "{relevant_memories}"
131 | + "Do not add anything else."
132 | + "\n\nSummary: "
133 | )
134 | # The agent seeks to think about their core characteristics.
135 | return (
136 | self.chain(prompt)
137 | .run(name=self.name, queries=[f"{self.name}'s core characteristics"])
138 | .strip()
139 | )
140 |
141 | def get_stats(self, force_refresh: bool = False) -> str:
142 | """Return the character stats of the agent."""
143 | current_time = datetime.now()
144 | since_refresh = (current_time - self.last_refreshed).seconds
145 | if (
146 | not self.summary
147 | or since_refresh >= self.summary_refresh_seconds
148 | or force_refresh
149 | ):
150 | self.summary = self._compute_agent_summary()
151 | self.last_refreshed = current_time
152 | return (
153 | f"Age: {self.age}"
154 | f"\nSex: {self.sex}"
155 | f"\nRace: {self.race}"
156 | f"\nStatus: {self.status}"
157 | +f"\nInnate traits: {self.traits}\n"
158 | )
159 |
160 | def get_summary_description(self, force_refresh: bool = False) -> str:
161 | """Return a short summary of the agent."""
162 | current_time = datetime.now()
163 | since_refresh = (current_time - self.last_refreshed).seconds
164 | if (
165 | not self.summary
166 | or since_refresh >= self.summary_refresh_seconds
167 | or force_refresh
168 | ):
169 | self.summary = self._compute_agent_summary()
170 | self.last_refreshed = current_time
171 | return (f"\n{self.summary}\n"
172 | )
173 |
174 | def _generate_reaction(self, observation: str, system_message: SystemMessage) -> str:
175 | """React to a given observation or dialogue act but with a Character Agent SystemMessage"""
176 | human_prompt = HumanMessagePromptTemplate.from_template(
177 | "{agent_summary_description}"
178 | + "\nIt is {current_time}."
179 | + "\n{agent_name}'s status: {agent_status}"
180 | + "\nSummary of relevant context from {agent_name}'s memory:"
181 | + "\n{relevant_memories}"
182 | + "\nMost recent observations: {most_recent_memories}"
183 | + "\nObservation: {observation}"
184 | + "\n\n"
185 | )
186 | prompt = ChatPromptTemplate.from_messages([system_message, human_prompt])
187 | agent_summary_description = self.get_summary()
188 | relevant_memories_str = self.summarize_related_memories(observation)
189 | current_time_str = datetime.now().strftime("%B %d, %Y, %I:%M %p")
190 | kwargs: Dict[str, Any] = dict(
191 | agent_summary_description=agent_summary_description,
192 | current_time=current_time_str,
193 | relevant_memories=relevant_memories_str,
194 | agent_name=self.name,
195 | observation=observation,
196 | agent_status=self.status,
197 | )
198 | consumed_tokens = self.llm.get_num_tokens(
199 | prompt.format(most_recent_memories="", **kwargs)
200 | )
201 | kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens
202 | return self.chain(prompt=prompt).run(**kwargs).strip()
203 |
204 | def generate_reaction(self, observation: str) -> Tuple[bool, str]:
205 | """React to a given observation."""
206 | story_summary_current = self.summary_history + "\n" + observation
207 | result = self._generate_reaction(story_summary_current, self.system_message)
208 | # Save Context to Agent's Memory
209 | self.memory.save_context(
210 | {},
211 | {
212 | self.memory.add_memory_key: f"{self.name} observed "
213 | f"{observation} and reacted by {result}"
214 | },
215 | )
216 | return result
217 |
218 | def setup_agent(self, system_message: SystemMessage, specified_story: str):
219 | """Sets the Agent post Story and Main Objective gets set"""
220 | self.system_message = system_message
221 | self.memory.add_memory(specified_story)
222 | self.summary_history = specified_story
223 |
224 | def receive(self, name: str, message: str) -> None:
225 | """Receives the current observation and summarize in to summary history"""
226 | self.summary_history = get_summary_content(self.summary_history, name, message)
227 |
228 | def narrate(self, message: str):
229 | """Narrate using ElevenLabs"""
230 | if not os.environ['ELEVEN_API_KEY']:
231 | return
232 | if not self.voice:
233 | for voice in voices():
234 | if voice.name == self.name:
235 | self.voice = voice
236 | break
237 | else:
238 | if self.name.lower() in ["harry potter", "hermione granger", "ron weasley"]:
239 | self.voice = clone(
240 | name=self.name,
241 | description=f"voice clone of {self.name}-like voice",
242 | files=[f"./voices/{self.name}_Narration_Voice.mp3"]
243 | )
244 | else:
245 | male_voices = ["Antoni", "Josh", "Arnold", "Adam", "Sam"]
246 | female_voices = ["Rachel", "Bella", "Elli" ]
247 | for voice in voices():
248 | if self.sex.lower() == "male":
249 | if voice.name == random.choice(male_voices):
250 | self.voice = voice
251 | break
252 | else:
253 | if voice.name == random.choice(female_voices):
254 | self.voice = voice
255 | break
256 | audio = generate(text=message, voice=self.voice)
257 | save(audio, f"{self.name}.mpeg")
258 | playsound(f"{self.name}.mpeg")
259 | os.remove(f"{self.name}.mpeg")
260 |
261 | class WhatIfStorySimulator():
262 | def __init__(self, story, mood, num_agents, is_random, agent_names, story_setting_event):
263 | self.story = story
264 | self.mood = mood
265 | self.num_agents = num_agents
266 | self.is_random = is_random
267 | self.agent_names = agent_names
268 | self.story_setting_event = story_setting_event
269 |
270 | def generate_agent_character(self, agent_num, story: str, mood: str, **kwargs):
271 | """Generate a Character Agent."""
272 | name = kwargs["name"]
273 | age = kwargs["age"]
274 | sex = kwargs["sex"]
275 | race = kwargs["race"]
276 | st.markdown(f":blue[A wild **_{name}_** appeared.]")
277 | icon_prompt = (f"{age} years old {sex} {race} named {name} from {story}, portrait, 16-bit super nes")
278 | response = openai.Image.create(
279 | prompt=icon_prompt,
280 | n=1,
281 | size="256x256",
282 | response_format="b64_json"
283 | )
284 | binary_data = base64.b64decode(response["data"][0]["b64_json"])
285 | icon_file = f"images/agent{str(agent_num)}.png"
286 | with open(icon_file, "wb") as file:
287 | file.write(binary_data)
288 | gen_agent = WhatIfGenerativeAgent(
289 | icon=icon_file,
290 | name=name,
291 | age=kwargs["age"],
292 | race=kwargs["race"],
293 | sex=kwargs["sex"],
294 | story=story,
295 | traits=kwargs["traits"],
296 | status=kwargs["status"],
297 | memory=GenerativeAgentMemory(llm=ChatOpenAI(), memory_retriever=create_new_memory_retriever()),
298 | llm=ChatOpenAI(model_name=os.environ['OPENAI_API_MODEL'], temperature=float(os.environ['OPENAI_TEMPERATURE'])),
299 | daily_summaries=[str(x) for x in kwargs["daily_summaries"]],
300 | )
301 | portrait_area, stats_area = st.columns([1,3])
302 | with portrait_area:
303 | st.image(icon_file)
304 | with stats_area:
305 | st.markdown(f"Sex: :blue[{gen_agent.sex}]")
306 | st.markdown(f"Race: :blue[{gen_agent.race}]")
307 | st.markdown(f"Status: :blue[{gen_agent.status}]")
308 | st.markdown(f"traits: :blue[{gen_agent.traits}]")
309 | for memory in [str(x) for x in kwargs["memories"]]:
310 | gen_agent.memory.add_memory(memory)
311 | summary_description = gen_agent.get_summary_description(force_refresh=True)
312 | st.markdown(f"Summary: :green[{summary_description}]")
313 |
314 | return gen_agent
315 |
316 | def generate_random_character(self, story: str, mood: str, agent_names: list):
317 | """ Generate random character with properties """
318 | character_exclusion = f" that is not in [{', '.join(agent_names)}]" if agent_names else ""
319 | prompt = (
320 | f"Generate a random {story} character {character_exclusion}. "
321 | "Based on the character possessing some basic memories and events, "
322 | "provide the following properties in JSON format:\n"
323 | "name: Name of the character\n"
324 | "race: Race of the character\n"
325 | "sex: The character's sex\n"
326 | "age: The character's age\n"
327 | "traits: 3 to 8 traits that describe the character (comma-separated)\n"
328 | f"status: The character's current status in the perspective of {story}\n"
329 | f"daily_summaries: 5 to 10 {mood}-themed daily activities that the character completed today (array of strings)\n"
330 | f"memories: 5 to 10 {mood}-themed memories from the character's life (array of strings)\n"
331 | )
332 | return json.loads(
333 | ChatOpenAI(model_name=os.environ['OPENAI_API_MODEL'], temperature=1.0)(
334 | [HumanMessage(content=prompt)]
335 | ).content
336 | )
337 |
338 | def generate_random_props(self, story: str, mood: str, name: str):
339 | """ Generate random character properties """
340 | prompt = (
341 | f"Based on the {story} character {name} possessing some basic memories and events, "
342 | "provide the following properties in JSON format:\n"
343 | "name: Name of the character\n"
344 | "race: Race of the character\n"
345 | "sex: The character's sex\n"
346 | "age: The character's age\n"
347 | "traits: 3 to 8 traits that describe the character (comma-separated)\n"
348 | f"status: The character's current status in the perspective of {story}\n"
349 | f"daily_summaries: 5 to 10 {mood}-themed daily activities that the character completed today (array of strings)\n"
350 | f"memories: 5 to 10 {mood}-themed memories from the character's life (array of strings)\n"
351 | )
352 | return json.loads(
353 | ChatOpenAI(model_name=os.environ['OPENAI_API_MODEL'], temperature=1.0)(
354 | [HumanMessage(content=prompt)]
355 | ).content
356 | )
357 |
358 | def generate_character_system_message(self, story_description, character_name, character_description):
359 | """Generate System Message for Generative Agents"""
360 | return (SystemMessage(content=(
361 | f"""{story_description}
362 | Your name is {character_name}.
363 | Your character description is as follows: {character_description}.
364 | You will speak what specific action you are taking next and try not to repeat any previous actions
365 | Speak in the first person from the perspective of {character_name}, in the tone that {character_name} would speak.
366 | Do not change roles!
367 | Do not speak from the perspective of anyone else.
368 | Remember you are {character_name}.
369 | Stop speaking the moment you finish speaking from your perspective.
370 | Never forget to keep your response to {word_limit} words!
371 | Do not add anything else.
372 | """)
373 | ))
374 |
375 | def generate_storyteller_system_message(self, story_description, storyteller_name):
376 | """Generate the System Message for Storyteller"""
377 | return (SystemMessage(content=(
378 | f"""{story_description}
379 | You are the storyteller, {storyteller_name}.
380 | Taking the character's actions into consideration you will narrate and explain what happens when they take those actions then narrate in details what must be done next.
381 | Narrate in a creative and captivating manner. Do not repeat anything that has already happened.
382 | Do not change roles!
383 | Do not speak from the perspective of anyone else.
384 | Remember you are the storyteller, {storyteller_name}.
385 | Stop speaking the moment you finish speaking from your perspective.
386 | Never forget to keep your response to 50 words!
387 | Do not add anything else.
388 | """)
389 | ))
390 |
391 | def generate_agents(self, story, mood, num_agents, agent_names, is_random):
392 | """Generate Agents"""
393 | agents = []
394 | for i in range(num_agents):
395 | with st.spinner(f"Generating {story} Character Agent"):
396 | kwargs = self.generate_random_character(story, mood, agent_names) if is_random else self.generate_random_props(story, mood, agent_names[i])
397 | agent = self.generate_agent_character(i+1, story=story, mood=mood, **kwargs)
398 | agents.append(agent)
399 | agent_names.append(agent.name)
400 |
401 | return agents
402 |
403 | def define_story_details(self, story, agent_names, story_setting_event):
404 | """Define Story Details with Main Objective"""
405 | story_description = f"""This is based on {story}.
406 | The characters are: {', '.join(agent_names)}.
407 | Here is the story setting: {story_setting_event}"""
408 | story_specifier_prompt = [
409 | SystemMessage(content="You can make tasks more specific."),
410 | HumanMessage(content=
411 | f"""{story_description}
412 | Narrate a creative and thrilling background story that has never been told and sets the stage for the main objective of the story.
413 | The main objective must require series of tasks the characters must complete.
414 | If the main objective is item or person, narrate a creative and cool name for them.
415 | Narrate specific detail what is the next step to embark on this journey.
416 | No actions have been taken yet by {', '.join(agent_names)}, only provide the introduction and background of the story.
417 | Please reply with the specified quest in 100 words or less.
418 | Speak directly to the characters: {', '.join(agent_names)}.
419 | Do not add anything else."""
420 | )
421 | ]
422 | with st.spinner(f"Generating Story"):
423 | specified_story = ChatOpenAI(model_name=os.environ['OPENAI_API_MODEL'], temperature=1.0)(story_specifier_prompt).content
424 |
425 | story_main_objective_prompt = [
426 | SystemMessage(content="Identify main objective"),
427 | HumanMessage(content=
428 | f"""Here is the story: {specified_story}
429 | What is the main objective of this story {', '.join(agent_names)}? Narrate the response in one line, do not add anything else."""
430 | )
431 | ]
432 | with st.spinner(f"Extracting Objective"):
433 | story_main_objective = ChatOpenAI(model_name=os.environ['OPENAI_API_MODEL'], temperature=0.0)(story_main_objective_prompt).content
434 | return story_description, specified_story, story_main_objective
435 |
436 | def initialize_storyteller_and_agents(self, agent_names, story_description, specified_story, story_main_objective, agents):
437 | """Initialize Storyteller and Agents"""
438 | storyteller = StorytellerAgent(
439 | name=storyteller_name,
440 | llm=ChatOpenAI(model_name=os.environ['OPENAI_API_MODEL'], temperature=0.5),
441 | system_message=self.generate_storyteller_system_message(specified_story, storyteller_name),
442 | summary_history=specified_story,
443 | story_main_objective=story_main_objective
444 | )
445 | for agent in agents:
446 | agent.setup_agent(
447 | self.generate_character_system_message(story_description, agent.name, agent.get_summary_description()),
448 | specified_story
449 | )
450 | return storyteller, agents
451 |
452 | def generate_story_finale(self, story_main_objective, final_observation):
453 | """Generate a Cliffhanger Finale"""
454 | story_finale_prompt = [
455 | SystemMessage(content="Make the finale a cliffhanger"),
456 | HumanMessage(content=
457 | f"""
458 | Story Objective: {story_main_objective}
459 | Final Observation: {final_observation}
460 | Based on this "Story Objective" and "Final Observation", narrate a grand finale cliffhanger ending.
461 | Be creative and spectacular!
462 | """
463 | )
464 | ]
465 | story_finale = ChatOpenAI(model_name=os.environ['OPENAI_API_MODEL'], temperature=1.0)(story_finale_prompt).content
466 | return story_finale
467 |
468 | def run_story(self, storyteller: StorytellerAgent, agents: List[WhatIfGenerativeAgent], observation: str) -> Tuple[str, int]:
469 | """Runs the Story"""
470 |
471 | is_objective_complete = False
472 | turns = 0
473 | prev_agent = None
474 | while True:
475 | random.shuffle(agents)
476 |
477 | for chosen_agent in agents:
478 | while chosen_agent == prev_agent:
479 | chosen_agent = random.choice(agents)
480 | prev_agent = chosen_agent
481 |
482 | with st.spinner(f"{chosen_agent.name} is reacting"):
483 | reaction = chosen_agent.generate_reaction(observation)
484 | with Message(chosen_agent.name, chosen_agent.icon, layout='agent') as m:
485 | m.write(f"{reaction}")
486 | chosen_agent.narrate(reaction)
487 |
488 | with st.spinner(f"Agents are observing"):
489 | for recipient in agents + [storyteller]:
490 | recipient.receive(chosen_agent.name, reaction)
491 |
492 | with st.spinner(f"{storyteller.name} is thinking"):
493 | observation, is_objective_complete = storyteller.send()
494 | turns += 1
495 | if is_objective_complete:
496 | return observation, turns
497 | with Message(storyteller.name, storyteller.icon, layout='storyteller') as m:
498 | m.write(f":green[{observation}]")
499 | storyteller.narrate(observation)
500 |
501 | def run_simulation(self):
502 | self.agents = self.generate_agents(self.story, self.mood, self.num_agents, self.agent_names, self.is_random)
503 | story_description, specified_story, story_main_objective = self.define_story_details(self.story, self.agent_names, self.story_setting_event)
504 | self.storyteller, self.agents = self.initialize_storyteller_and_agents(self.agent_names, story_description, specified_story, story_main_objective, self.agents)
505 | with Message(self.storyteller.name, self.storyteller.icon, layout='storyteller') as m:
506 | m.write(f":green[{specified_story}]")
507 | self.storyteller.narrate(specified_story)
508 | final_observation, turns = self.run_story(self.storyteller, self.agents, specified_story)
509 | story_finale = self.generate_story_finale(story_main_objective, final_observation)
510 |
511 | with Message(self.storyteller.name, self.storyteller.icon, layout='storyteller') as m:
512 | m.write(f":green[{story_finale}]")
513 | self.storyteller.narrate(story_finale)
514 | st.success(f"Story Objective completed in {turns} turns!", icon="✅")
515 |
516 | def relevance_score_fn(score: float) -> float:
517 | """Return a similarity score on a scale [0, 1]."""
518 | # This will differ depending on a few things:
519 | # - the distance / similarity metric used by the VectorStore
520 | # - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)
521 | # This function converts the euclidean norm of normalized embeddings
522 | # (0 is most similar, sqrt(2) most dissimilar)
523 | # to a similarity function (0 to 1)
524 | return 1.0 - score / math.sqrt(2)
525 |
526 | def create_new_memory_retriever():
527 | """Create a new vector store retriever unique to the agent."""
528 | # Define your embedding model
529 | embeddings_model = OpenAIEmbeddings()
530 | # Initialize the vectorstore as empty
531 | embedding_size = 1536
532 | index = faiss.IndexFlatL2(embedding_size)
533 | vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=relevance_score_fn)
534 | return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=["importance"], k=15)
535 |
536 | def get_summary_content(summary_history, name, message) -> str:
537 | """Summarize What has happened thus far"""
538 | summarizer_prompt = [
539 | SystemMessage(content="Make the summary concise."),
540 | HumanMessage(content=
541 | f"""Summarize the following into a concise summary with key details including the actions that {name} has taken and the results of that action
542 | {summary_history}
543 | {name} reacts {message}
544 | """
545 | )
546 | ]
547 | return ChatOpenAI(temperature=0.0)(summarizer_prompt).content
548 |
549 | storyteller_name = "The Storyteller"
550 | word_limit = 35
551 |
552 | def main():
553 | st.set_page_config(
554 | initial_sidebar_state="expanded",
555 | page_title="WhatIfGPT",
556 | layout="centered",
557 | )
558 |
559 | with st.sidebar:
560 | openai_api_key = st.text_input("Your OpenAI API KEY", type="password")
561 | openai_api_model = st.selectbox("Model name", options=["gpt-3.5-turbo", "gpt-4"])
562 | openai_temperature = st.slider(
563 | label="Temperature",
564 | min_value=0.0,
565 | max_value=1.0,
566 | step=0.1,
567 | value=0.2,
568 | )
569 | eleven_api_key = st.text_input("Your Eleven Labs API Key", type="password")
570 |
571 | os.environ['OPENAI_API_KEY'] = openai_api_key
572 | os.environ['OPENAI_API_MODEL'] = openai_api_model
573 | os.environ['OPENAI_TEMPERATURE'] = str(openai_temperature)
574 | os.environ['ELEVEN_API_KEY'] = eleven_api_key
575 | st.title("WhatIfGPT")
576 | story = st.text_input("Enter the theme of the story", "Random Story")
577 | mood = "positive"
578 | num_agents = st.slider(
579 | label="Number of Agents",
580 | min_value=2,
581 | max_value=4,
582 | step=1,
583 | value=2,
584 | )
585 | is_random = st.checkbox("Do you want the event and agents to be created randomly?", value=True)
586 | agent_names = []
587 | story_setting_event = f"random entertaining story with a mission to complete in the theme of {story}"
588 | if not is_random:
589 | for i in range(num_agents):
590 | name = st.text_input(f"Enter Character {i + 1} name: ", "")
591 | agent_names.append(name)
592 | user_story_setting_event = st.text_input("Enter the story to have the agents participate in (or just leave blank for random): ")
593 | if user_story_setting_event:
594 | story_setting_event = user_story_setting_event
595 | button = st.button("Run")
596 | if button:
597 | try:
598 | whatifsim = WhatIfStorySimulator(
599 | story,
600 | mood,
601 | num_agents,
602 | is_random,
603 | agent_names,
604 | story_setting_event
605 | )
606 | whatifsim.run_simulation()
607 | except Exception as e:
608 | st.error(e)
609 | if __name__ == "__main__":
610 | main()
611 |
--------------------------------------------------------------------------------