├── .gitignore
├── LICENSE
├── README.md
├── agent.py
├── chat_gpt.py
├── completions.py
├── embeddings.py
├── maths.py
├── memory.py
├── memory_stream.py
├── nodes.py
├── plan.py
├── play.py
├── questions.md
├── reflections.py
└── sandbox.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Ayo Reis
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Generative Agents:
Interactive Simulacra
of Human Behavior
2 |
3 | [](https://github.com/ayoreis/generative-agents/stargazers)
4 | [](https://discord.gg/97kcgMNN)
5 |
6 | Generative agents proposed by this [paper](https://arxiv.org/abs/2304.03442) extend LLMs (like ChatGPT) with memory, reflection, planing and a sandbox environment.
7 |
8 | We're building our Python implementation that allows you to add Generative Agents to your own worlds.
9 |
10 |
11 |
12 | ## TODO and contributing
13 |
14 | We have a `#dev` chanel on Discord.
15 |
16 | Answer [questions](/questions.md).
17 |
18 | Check out the issues and projects tabs, there are also `# TODO`s scatered around the code.
19 |
20 | - Python 3.10+
21 | - Black
22 | - isort
23 |
24 | ### Parts
25 |
26 | - 4.1 [`Memory`](/blob/main/memory.py) represents the most basic type of memory, an observation. Importance calculation is done here.
27 | - 4.2 [`Reflection`](/blob/main/reflection.py) a reflection.
28 | - 4.3 [`Plan`](/blob/main/plan.py) a plan.
29 | - 4.1 [`MemoryStream`](/blob/main/memory_stream.py) a stream of memories. Memory retrival is done here.
30 | - 4 [`Agent`](/blob/main/agent.py)
31 | - 5 [`Sandbox`](/blob/main/sandbox.py) tick, time,
32 | - Figure 2 [`World`](/blob/main/nodes.py)
33 | - Figure 2 [`Area`](/blob/main/nodes.py)
34 | - Figure 2 [`Object`](/blob/main/nodes.py)
35 |
36 | ## Usage
37 |
38 | ```sh
39 | pip install openai
40 | ```
41 |
42 | ### Authentication
43 |
44 | Create a `openai_secrets.py` file and set your key there.
45 |
46 | > **Note** https://platform.openai.com/docs/api-reference/authentication
47 |
48 | ```py
49 | import openai
50 |
51 | openai.api_key = 'Your OpenAI API key'
52 | ```
53 |
54 |
55 |
--------------------------------------------------------------------------------
/agent.py:
--------------------------------------------------------------------------------
1 | from re import compile, MULTILINE
2 |
3 | from chat_gpt import ChatGPT
4 | from memory_stream import MemoryStream
5 | from memory import Memory
6 | from reflections import Reflection
7 | from completions import complete
8 | from plan import Plan
9 | from nodes import World, Area, Object
10 | from maths import Point
11 |
12 | LIST_ITEM = compile("^ {0,3}(?:[-+*]|[0-9]{1,9}[.)])(?: {1,4}|\t)(.*?)$", MULTILINE)
13 |
14 | NEWLINE = "\n"
15 |
16 |
17 | def parse_list_items(string: str):
18 | return (item.group(1) for item in LIST_ITEM.finditer(string))
19 |
20 |
21 | class Agent:
22 | MEMORY_SEPARATOR = "; "
23 |
24 | DECAY_FACTOR = 0.99
25 |
26 | ALPHA = 1
27 | ALPHA_RECENCY = ALPHA
28 | APLHA_IMPORTANCE = ALPHA
29 | ALPHA_RELEVANCE = ALPHA
30 |
31 | def __init__(
32 | self,
33 | name: str,
34 | age: float,
35 | traits: str,
36 | description: str,
37 | environment: World,
38 | # TODO should this be in Sandbox?
39 | current_location: Area,
40 | description_of_current_action: str,
41 | interacting_sandbox_object: Object,
42 | ):
43 | self.name = name
44 | self.age = age
45 | self.traits = traits
46 |
47 | self.memory_stream = MemoryStream()
48 |
49 | for memory_description in description.split(self.MEMORY_SEPARATOR):
50 | self.memory_stream.stream.append(Memory(memory_description))
51 |
52 | self.current_location = current_location
53 | self.description_of_current_action = description_of_current_action
54 | self.interacting_sandbox_object = interacting_sandbox_object
55 |
56 | def observe(self, observation: str):
57 | self.memory_stream.stream.append(Memory(observation))
58 |
59 | # def reflect(self):
60 | # prompt = f"""{self.MEMORY_SEPARATOR.join(memory.description for memory in self.memory_stream.stream[-100:])}
61 |
62 | # Given only the information above, what are 3 most salient high-level questions we can answer about the subjects in the statements?
63 | # """
64 |
65 | # high_level_questions = parse_list_items(self.chat_gpt.message(prompt))
66 |
67 | # for high_level_question in high_level_questions:
68 | # scores = self.memory_stream.retrieve_memories(high_level_question)[:3]
69 |
70 | # for score in scores:
71 | # score.memory.access()
72 |
73 | # memories = (score.memory for score in scores)
74 |
75 | # # TODO "example format" doesn't work, need better wording
76 | # prompt = f"""Statements about {self.name}
77 | # {NEWLINE.join(f'{index}. {memory}' for index, memory in enumerate(memories, 1))}
78 | # What 5 high-level insights can you infer from the above statements? (example format: insight (because of 1, 5, 3))"""
79 |
80 | # reflection_descriptions = parse_list_items(self.chat_gpt.message(prompt))
81 |
82 | # for description in reflection_descriptions:
83 | # reflection = Reflection(description, tuple(memories))
84 |
85 | # self.memory_stream.stream.append(reflection)
86 |
87 | # def summary_description(self):
88 | # core_characteristics_memories = self.memory_stream.retrieve_memories(
89 | # f"{self.name}’s core characteristics"
90 | # )[:10]
91 |
92 | # core_characteristics = complete(
93 | # f"""How would one describe {self.name}’s core characteristics given the following statements?
94 | # - {f'{NEWLINE}- '.join(str(score.memory) for score in core_characteristics_memories)}"""
95 | # )
96 |
97 | # current_situation = "doing somethis"
98 |
99 | # def plan(self):
100 | # intial_plan_prompt = """Name: Eddy Lin (age: 19)
101 | # Innate traits: friendly, outgoing, hospitable
102 | # Eddy Lin is a student at Oak Hill College studying music theory and composition. He loves to explore different musical styles and is always looking for ways to expand his knowledge. Eddy Lin is working on a composition project for his college class. He is also taking classes to learn more about music theory. Eddy Lin is excited about the new composition he is working on but he wants to dedicate more hours in the day to work on it in the coming days
103 | # On Tuesday February 12, Eddy 1) woke up and completed the morning routine at 7:00 am, [. . . ] 6) got ready to sleep around 10 pm.
104 | # Today is Wednesday February 13. Here is Eddy’s
105 | # plan today in broad strokes: 1)"""
106 |
107 | # initial_plan = complete(intial_plan_prompt)
108 |
109 | # plan = Plan(initial_plan, "...", "6am", "12h")
110 |
111 | # def react(self):
112 | # p = f"""[Agent’s Summary Description]
113 | # It is February 13, 2023, 4:56 pm.
114 | # {self.name}’s status: John is back home early from
115 | # work.
116 | # Observation: ...
117 | # Summary of relevant context from {self.name}’s memory:
118 | # What is [observer]’s relationship
119 | # with the [observed entity]? [Observed entity] is [action status
120 | # of the observed entity]”
121 | # Should {self.name} react to the observation, and if so,
122 | # what would be an appropriate reaction?"""
123 |
124 | def __str__(self):
125 | return f"{self.name} is {self.description_of_current_action}"
126 |
--------------------------------------------------------------------------------
/chat_gpt.py:
--------------------------------------------------------------------------------
1 | from typing import TypedDict, Literal, List
2 | from openai import ChatCompletion
3 |
4 | class Message(TypedDict):
5 | role: Literal['user', 'assistant']
6 | content: str
7 |
8 | class ChatGPT:
9 | MODEL = 'gpt-3.5-turbo'
10 |
11 | def __init__(self):
12 | self.messages: List[Message] = []
13 |
14 | def message(self, content: str) -> str:
15 | self.messages.append({ 'role': 'user', 'content': content })
16 |
17 | completion = ChatCompletion.create(
18 | model=self.MODEL,
19 | messages=self.messages,
20 | )
21 |
22 | message = completion.choices[0].message
23 |
24 | role = message.role
25 | content = message.content
26 |
27 | self.messages.append({ 'role': role, 'content': content })
28 |
29 | return content
30 |
--------------------------------------------------------------------------------
/completions.py:
--------------------------------------------------------------------------------
1 | from openai import Completion
2 |
3 | MODEL = "gpt-3.5-turbo"
4 |
5 |
6 | # TODO handle API errors
7 | def complete(prompt: str, max_tokens: int | None = None) -> str:
8 | """`max_tokens` is tokens after prompt"""
9 |
10 | completion = Completion.create(model=MODEL, prompt=prompt, max_tokens=max_tokens)
11 |
12 | return completion.choices[0].text
13 |
--------------------------------------------------------------------------------
/embeddings.py:
--------------------------------------------------------------------------------
1 | from openai import Embedding
2 |
3 | MODEL = "text-embedding-ada-002"
4 |
5 |
6 | def embedding(text: str):
7 | return Embedding.create(model=MODEL, input=text).data[0].embedding
8 |
--------------------------------------------------------------------------------
/maths.py:
--------------------------------------------------------------------------------
1 | from math import sqrt, hypot
2 |
3 | Number = int | float
4 | Vector = tuple[Number, ...]
5 | """https://en.wikipedia.org/wiki/Coordinate_vector"""
6 | Point = tuple[Number, Number]
7 | """x, y"""
8 | Rectangle = tuple[Number, Number, Number, Number]
9 | """x, y, width, height"""
10 |
11 | SECONDS_IN_MINUTE = 60
12 | MINUTES_IN_HOUR = 60
13 |
14 | SECONDS_IN_HOUR = MINUTES_IN_HOUR * SECONDS_IN_MINUTE
15 |
16 |
17 | def min_max_scale(x: Number, min_x: Number, max_x: Number):
18 | """https://en.wikipedia.org/wiki/Feature_scaling#Rescaling_(min-max_normalization)"""
19 | return (x - min_x) / (max_x - min_x)
20 |
21 |
22 | def dot_product(a: Vector, b: Vector):
23 | """https://en.wikipedia.org/wiki/Dot_product"""
24 | return sum(i * j for i, j in zip(a, b))
25 |
26 |
27 | def cosine_similarity(a: Vector, b: Vector):
28 | """https://en.wikipedia.org/wiki/Cosine_similarity"""
29 | return dot_product(a, b) / (
30 | sqrt(sum(i**2 for i in a)) * sqrt(sum(i**2 for i in b))
31 | )
32 |
33 |
34 | # def average(a: Vector) -> Number:
35 | # """https://en.wikipedia.org/wiki/Average"""
36 | # return sum(a) / len(a)
37 |
38 |
39 | def center(rectangle: Rectangle) -> Point:
40 | return (
41 | rectangle[0] + rectangle[2] / 2,
42 | rectangle[1] + rectangle[3] / 2,
43 | )
44 |
45 |
46 | def distance(a: Point, b: Point) -> float:
47 | return hypot(a[0] - b[0], a[1] - b[1])
48 |
--------------------------------------------------------------------------------
/memory.py:
--------------------------------------------------------------------------------
1 | from re import compile
2 | from datetime import datetime
3 |
4 | from embeddings import embedding
5 | from completions import complete
6 |
7 | DIGIT = compile("\d+")
8 |
9 |
10 | class Memory:
11 | """4.1"""
12 |
13 | # IMPORTANCE_PROMPT = """On the scale of 1 to 10, where 1 is purely mundane (e.g., brushing teeth, making bed) and 10 is extremely poignant (e.g., a break up, college acceptance), rate the likely poignancy of the following piece of memory.
14 | # Memory: {}
15 | # Rating: """
16 |
17 | def __init__(self, description: str):
18 | now = datetime.now()
19 |
20 | self.description = description
21 | self.creation_timestamp = now
22 | self.most_recent_access_timestamp = now
23 |
24 | self.importance = None
25 | self.embedding = embedding(description)
26 |
27 | # prompt = self.IMPORTANCE_PROMPT.format(description)
28 |
29 | # while self.importance == None:
30 | # try:
31 | # completion = complete(prompt, 2)
32 |
33 | # matches = DIGIT.findall(completion)
34 |
35 | # if len(matches) != 1:
36 | # raise
37 |
38 | # self.importance = float(matches[0])
39 | # except:
40 | # # For debugging
41 | # print(completion)
42 |
43 | def __repr__(self):
44 | return self.description
45 |
46 | def access(self):
47 | self.most_recent_access_timestamp = datetime.now()
48 |
--------------------------------------------------------------------------------
/memory_stream.py:
--------------------------------------------------------------------------------
1 | from memory import Memory
2 | from embeddings import embedding
3 | from maths import SECONDS_IN_HOUR, min_max_scale, cosine_similarity
4 | from datetime import datetime
5 | from typing import NamedTuple
6 |
7 |
8 | class Score(NamedTuple):
9 | score: float
10 | memory: Memory
11 |
12 |
13 | class MemoryStream:
14 | """4.1"""
15 |
16 | def __init__(self):
17 | self.stream: list[Memory] = []
18 |
19 | def retrieve_memories(self, agents_current_situation: str):
20 | def sort(self, memory: Memory):
21 | hours_since_last_retrieval = (
22 | datetime.now() - memory.most_recent_access_timestamp
23 | ).total_seconds() / SECONDS_IN_HOUR
24 |
25 | recency = self.DECAY_FACTOR**hours_since_last_retrieval
26 | importance = min_max_scale(memory.importance, 0, 10)
27 | relevance = min_max_scale(
28 | cosine_similarity(
29 | memory.embedding, embedding(agents_current_situation)
30 | ),
31 | -1,
32 | 1,
33 | )
34 |
35 | score = (
36 | self.ALPHA_RECENCY * recency
37 | + self.APLHA_IMPORTANCE * importance
38 | + self.ALPHA_RELEVANCE * relevance
39 | )
40 |
41 | return Score(score, memory)
42 |
43 | return sorted(self.stream, sort, True)
44 |
--------------------------------------------------------------------------------
/nodes.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from maths import Rectangle
4 |
5 |
6 | class Area:
7 | """Figure 2"""
8 |
9 | def __init__(
10 | self,
11 | name: str,
12 | rectangle: Rectangle,
13 | children: tuple[Area | Object, ...],
14 | ):
15 | self.name = name
16 | self.rectangle = rectangle
17 | self.children = children
18 |
19 |
20 | class World:
21 | """Figure 2"""
22 |
23 | def __init__(self, children=tuple[Area, ...]):
24 | self.children = children
25 |
26 |
27 | class Object:
28 | """Figure 2"""
29 |
30 | def __init__(self, name: str, rectangle: Rectangle, state: str):
31 | self.name = name
32 | self.rectangle = rectangle
33 | self.state = state
34 |
35 | def __str__(self):
36 | return f"{self.name} is {self.state}"
37 |
--------------------------------------------------------------------------------
/plan.py:
--------------------------------------------------------------------------------
1 | from memory import Memory
2 |
3 |
4 | class Plan(Memory):
5 | def __init__(
6 | self, description: str, location: str, starting_time: str, duration: str
7 | ):
8 | super().__init__(description)
9 |
10 | self.location = location
11 | self.starting_time = starting_time
12 | self.duration = duration
13 |
14 | def __str__(self):
15 | return "for 180 minutes from 9am, February 12th, 2023, at Oak Hill College Dorm: Klaus Mueller’s room: desk, read and take notes for research paper."
16 |
--------------------------------------------------------------------------------
/play.py:
--------------------------------------------------------------------------------
1 | import openai_secrets
2 |
3 | from agent import Agent
4 | from nodes import World, Area, Object
5 | from sandbox import Sandbox
6 |
7 | table = Object("Table", (2.5, 3, 2, 1), "ocupied by Ayo")
8 | room = Area("Room", (1, 1, 5, 5), (table,))
9 |
10 | agents = (
11 | Agent(
12 | name="Ayo Reis",
13 | age=15,
14 | traits="Shy, curious",
15 | description="Table is empty",
16 | environment=None,
17 | current_location=room,
18 | description_of_current_action="Sitting at a table",
19 | interacting_sandbox_object=table,
20 | ),
21 | )
22 |
23 | place = World((room,))
24 | sandbox = Sandbox(agents, place)
25 |
26 | sandbox.tick()
27 |
28 | print(agents[0].memory_stream.stream)
29 |
--------------------------------------------------------------------------------
/questions.md:
--------------------------------------------------------------------------------
1 | # Questions
2 |
3 | Feel free to answer any of these on [Discord](https://discord.gg/5dkM59gsDY) if you think you have an answer.
4 |
5 | - How does the agent's individual environment change? Like how/when do they explore new areas?
6 |
--------------------------------------------------------------------------------
/reflections.py:
--------------------------------------------------------------------------------
1 | from memory import Memory
2 |
3 |
4 | class Reflection(Memory):
5 | """4.2"""
6 |
7 | def __init__(self, description: str, citations: tuple[Memory]):
8 | super().__init__(description)
9 |
10 | self.citations = citations
11 |
--------------------------------------------------------------------------------
/sandbox.py:
--------------------------------------------------------------------------------
1 | from time import time
2 | from itertools import chain
3 |
4 | from agent import Agent
5 | from nodes import World, Object
6 | from maths import center, distance, Rectangle
7 |
8 |
9 | class Sandbox:
10 | """5"""
11 |
12 | TICKS_PER_SECOND = 1
13 | SECONDS_PER_TICK = 1 / TICKS_PER_SECOND
14 | TIME_RATIO = 60
15 |
16 | VISUAL_RANGE = 10
17 |
18 | def __init__(self, agents: tuple[Agent], world: World):
19 | self.agents = agents
20 | self.world = world
21 |
22 | self.start_time = time()
23 | self.time = self.start_time
24 |
25 | def tick(self):
26 | for agent in self.agents:
27 | for agent_or_object in self.sight(agent):
28 | agent.observe(str(agent_or_object))
29 |
30 | # TODO exclude reflections and plans
31 | # if (
32 | # sum(
33 | # [
34 | # event.importance
35 | # for event in agent.memory_stream.stream[EVENTS * -1 :]
36 | # ]
37 | # )
38 | # > THRESHOLD
39 | # ):
40 | # agent.reflect()
41 |
42 | # agent.plan()
43 |
44 | # if agent.should_react():
45 | # agent.react()
46 |
47 | self.time += self.SECONDS_PER_TICK * self.TIME_RATIO
48 |
49 | def in_visual_range(self, rectangle_a: Rectangle, rectangle_b: Rectangle):
50 | return distance(center(rectangle_a), center(rectangle_b)) < self.VISUAL_RANGE
51 |
52 | def sight(self, agent: Agent):
53 | return chain(
54 | (
55 | child
56 | for child in agent.current_location.children
57 | if isinstance(child, Object)
58 | and self.in_visual_range(
59 | agent.current_location.rectangle, child.rectangle
60 | )
61 | ),
62 | (
63 | other_agent
64 | for other_agent in self.agents
65 | if not other_agent == agent
66 | and self.in_visual_range(
67 | agent.current_location.rectangle,
68 | other_agent.current_location.rectangle,
69 | )
70 | ),
71 | )
72 |
--------------------------------------------------------------------------------