├── .gitignore ├── EartAgent ├── ACM │ ├── AgentCommunication.py │ ├── __init__.py │ └── __pycache__ │ │ ├── AgentCommunication.cpython-39.pyc │ │ └── __init__.cpython-39.pyc ├── Agent │ ├── Audio_Agents.py │ ├── __init__.py │ ├── __pycache__ │ │ ├── Audio_Agents.cpython-39.pyc │ │ ├── __init__.cpython-39.pyc │ │ ├── images2text_agents.cpython-39.pyc │ │ ├── text2image_agents.cpython-39.pyc │ │ └── text_agents.cpython-39.pyc │ ├── images2text_agents.py │ ├── text2image_agents.py │ └── text_agents.py ├── WebUI │ ├── WebUI_1.py │ └── WebUI_2.py ├── __init__.py ├── __pycache__ │ └── __init__.cpython-39.pyc ├── app_packaging │ ├── Website_Cloning.py │ ├── Write_Paper.py │ ├── __init__.py │ ├── __pycache__ │ │ ├── Website_Cloning.cpython-39.pyc │ │ ├── Write_Paper.cpython-39.pyc │ │ ├── __init__.cpython-39.pyc │ │ └── voice_dialog_assistant.cpython-39.pyc │ └── voice_dialog_assistant.py ├── game_scripts │ ├── Italy.py │ ├── Wolf.py │ ├── __init__.py │ ├── __pycache__ │ │ ├── Italy.cpython-39.pyc │ │ ├── Wolf.cpython-39.pyc │ │ └── __init__.cpython-39.pyc │ └── moot court.py ├── log_time │ ├── log_2024-05-17 10-37-20.json │ ├── log_2024-05-17 10-38-23.json │ └── log_2024-05-17 10-39-30.json ├── thinking │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-39.pyc │ │ └── reflector.cpython-39.pyc │ └── reflector.py └── utils │ ├── UT.py │ ├── __init__.py │ └── __pycache__ │ ├── UT.cpython-39.pyc │ └── __init__.cpython-39.pyc ├── LICENSE ├── README.md ├── README_CN.md ├── assets └── 130898843 │ └── f145bbb8-ed97-4025-a40b-4260a8a75f6bno_alpha-4.png └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Distribution / packaging 7 | .Python 8 | build/ 9 | develop-eggs/ 10 | dist/ 11 | downloads/ 12 | eggs/ 13 | .eggs/ 14 | lib/ 15 | lib64/ 16 | parts/ 17 | sdist/ 18 | var/ 19 | wheels/ 20 | share/python-wheels/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | MANIFEST 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .nox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | *.py,cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | db.sqlite3 57 | db.sqlite3-journal 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # IPython Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | # For a library or package, you might want to ignore these files since the code is 77 | # intended to run in multiple environments; otherwise, check them in: 78 | # .python-version 79 | 80 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 81 | __pypackages__/ 82 | 83 | # Celery stuff 84 | celerybeat-schedule 85 | celerybeat.pid 86 | 87 | # SageMath parsed files 88 | *.sage.py 89 | 90 | # dotenv 91 | .env 92 | .env.* 93 | 94 | # virtualenv 95 | # Uncomment and modify this line to avoid committing virtual environments (e.g. when using 96 | # `pipenv`, `poetry`, or `pyenv`). This is useful if you have multiple environments +i.e., generated 97 | # for different versions of Python. If you only have a single environment and it is located in a 98 | # subdirectory of your project, add its path instead (e.g. `.venv/`). 99 | # .env/ 100 | # .venv/ 101 | venv/ 102 | ENV/ 103 | env.bak/ 104 | venv.bak/ 105 | 106 | # Spyder project settings 107 | .spyderproject 108 | .spyproject 109 | 110 | # Rope project settings 111 | .ropeproject 112 | 113 | # mkdocs documentation 114 | /site 115 | 116 | # mypy 117 | .mypy_cache/ 118 | .dmypy.json 119 | dmypy.json 120 | 121 | # Pyre type checker 122 | .pyre/ 123 | 124 | # pytype static type analyzer 125 | .pytype/ 126 | 127 | # ctags 128 | tags 129 | 130 | # jedi 131 | .jedi/ 132 | 133 | # Jupyter Notebook 134 | .ipynb_checkpoints 135 | 136 | # Local History 137 | .localhistory/ 138 | 139 | # Pycharm 140 | .idea/ 141 | *.iml 142 | *.iws 143 | *.ipr 144 | 145 | # VSCode 146 | .vscode/ 147 | 148 | # Logs and databases 149 | *.log 150 | *.sql 151 | *.sqlite 152 | 153 | # macOS 154 | .DS_Store 155 | 156 | # Windows 157 | Thumbs.db 158 | ehthumbs.db 159 | Desktop.ini 160 | 161 | # Custom 162 | config.json 163 | config.yaml 164 | secret_keys.py 165 | credentials/ 166 | 167 | # Log files 168 | *.log 169 | 170 | # Ignore any other file patterns or directories here 171 | -------------------------------------------------------------------------------- /EartAgent/ACM/AgentCommunication.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | class AgentCommunication: 14 | def __init__(self, agent_list=None): 15 | """Initialize the communication system with an optional list of agents.""" 16 | self.agents = agent_list if agent_list else [] 17 | 18 | def add_agent(self, agent): 19 | """Add a new agent to the communication system.""" 20 | if agent not in self.agents: 21 | self.agents.append(agent) 22 | print(f"Agent {agent.config.name} added to the system.") 23 | else: 24 | print(f"Agent {agent.config.name} is already in the system.") 25 | 26 | def remove_agent(self, agent_name): 27 | """Remove an agent from the communication system by name.""" 28 | self.agents = [agent for agent in self.agents if agent.config.name != agent_name] 29 | print(f"Agent {agent_name} removed from the system.") 30 | 31 | def broadcast_message(self, message): 32 | """Broadcast a message to all agents in the system.""" 33 | print(f"Broadcasting message: {message}") 34 | for agent in self.agents: 35 | try: 36 | agent(message) 37 | except Exception as e: 38 | print(f"Error broadcasting message to {agent.config.name}: {e}") 39 | 40 | def execute_pipeline(self): 41 | """Execute the response pipeline of agents.""" 42 | raise NotImplementedError("This method should be implemented by subclasses.") 43 | 44 | class MsgHub(AgentCommunication): 45 | def __init__(self, agent_list=None): 46 | """Initialize the message hub with an optional list of agents.""" 47 | super().__init__(agent_list) 48 | 49 | def execute_pipeline(self): 50 | """Execute each agent's response generation in sequence.""" 51 | for agent in self.agents: 52 | try: 53 | message = f"Message to {agent.config.name}" 54 | agent(message) 55 | self.broadcast_message(message) 56 | except Exception as e: 57 | print(f"Error in agent {agent.config.name} pipeline: {e}") 58 | 59 | class Pipeline(AgentCommunication): 60 | def __init__(self, agent_list=None): 61 | """Initialize the pipeline with an optional list of agents.""" 62 | super().__init__(agent_list) 63 | 64 | def execute_pipeline(self, initial_message, condition=None): 65 | """Execute each agent's response generation based on a condition, passing the message sequentially.""" 66 | message = initial_message 67 | user_input = '' 68 | while True: 69 | for agent in self.agents: 70 | try: 71 | if condition is None or condition(agent): 72 | message = agent(user_input + message) 73 | except Exception as e: 74 | print(f"Error in agent {agent.config.name} conditional pipeline: {e}") 75 | user_input = input() 76 | if user_input == 'exit': 77 | print("Discussion ended") 78 | break 79 | return message -------------------------------------------------------------------------------- /EartAgent/ACM/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ Import all agent related modules in the package. """ 3 | from .AgentCommunication import MsgHub 4 | from .AgentCommunication import Pipeline 5 | 6 | __all__ = [ 7 | "MsgHub", 8 | "Pipeline" 9 | ] 10 | -------------------------------------------------------------------------------- /EartAgent/ACM/__pycache__/AgentCommunication.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/ACM/__pycache__/AgentCommunication.cpython-39.pyc -------------------------------------------------------------------------------- /EartAgent/ACM/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/ACM/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /EartAgent/Agent/Audio_Agents.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | import re 13 | from http import HTTPStatus 14 | import dashscope 15 | from typing import Optional 16 | from abc import ABC, abstractmethod 17 | from dashscope.audio.tts import SpeechSynthesizer 18 | 19 | 20 | 21 | 22 | class AudioAgentConfig: 23 | """ 24 | Basic configuration information for audio agents 25 | """ 26 | 27 | def __init__(self, system_prompt: str, model_name: str = 'qwen-audio-turbo'): 28 | self.system_prompt = system_prompt 29 | self.model_name = model_name 30 | 31 | 32 | class AudioAgent(ABC): 33 | """ 34 | Abstract base class for audio recognition agents 35 | """ 36 | 37 | def __init__(self, config: AudioAgentConfig): 38 | self.config = config 39 | 40 | @abstractmethod 41 | def chat(self, audio_file: str) -> str: 42 | """ 43 | Abstract method for processing audio recognition requests 44 | """ 45 | pass 46 | 47 | def __call__(self, audio_file: str) -> str: 48 | response = self.chat(audio_file) 49 | # self.speak(response) 50 | return response 51 | 52 | def speak(self, response: str): 53 | """ 54 | Output the agent's response 55 | """ 56 | print(f"Audio content: {response}") 57 | 58 | 59 | class QwenAudioAgent(AudioAgent): 60 | """ 61 | Agent using Dashscope's Qwen model for audio recognition 62 | """ 63 | api_key = None # Class attribute to store API key 64 | 65 | def __init__(self, config: AudioAgentConfig): 66 | super().__init__(config) 67 | 68 | def chat(self, audio_file: str) -> str: 69 | dashscope.api_key = QwenAudioAgent.api_key # Use class attribute as API key 70 | messages = [ 71 | { 72 | "role": "user", 73 | "content": [ 74 | {"audio": f"file://{audio_file}"}, 75 | {"text": self.config.system_prompt} 76 | ] 77 | } 78 | ] 79 | response = dashscope.MultiModalConversation.call(model=self.config.model_name, messages=messages) 80 | 81 | if response.status_code == HTTPStatus.OK: 82 | text = response.output.choices[0].message.content[0]['text'] 83 | match = re.search(r'"(.*?)"', text) 84 | 85 | if match: 86 | extracted_text = match.group(1) 87 | return extracted_text 88 | else: 89 | print("No content matched") 90 | else: 91 | print(response.code) 92 | print(response.message) 93 | 94 | return "" 95 | 96 | 97 | class SambertAgent(AudioAgent): 98 | """ 99 | Agent using Dashscope's Sambert speech synthesis 100 | """ 101 | api_key = None # Class attribute to store API key 102 | 103 | def __init__(self, config: AudioAgentConfig): 104 | super().__init__(config) 105 | 106 | def chat(self, sys_prompt: str): 107 | sys_prompt=self.config.system_prompt+sys_prompt 108 | # print(sys_prompt) 109 | dashscope.api_key = SambertAgent.api_key 110 | result = SpeechSynthesizer.call(model='sambert-zhichu-v1', 111 | text=sys_prompt, 112 | sample_rate=48000) 113 | if result.get_audio_data() is not None: 114 | return result.get_audio_data() -------------------------------------------------------------------------------- /EartAgent/Agent/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ Import all agent related modules in the package. """ 3 | from .text_agents import QwenAgent,KimiAgent,DeepSeekAgent,ClaudeAgent,ChatGPTAgent,AgentConfig,BaiChuanAgent,PhiAgent,LlamaAgent,MixtralAgent 4 | from .text2image_agents import WanxAgent 5 | from .Audio_Agents import QwenAudioAgent,SambertAgent 6 | from .images2text_agents import ImageAnalyzer 7 | 8 | __all__ = [ 9 | "QwenAgent", 10 | "KimiAgent", 11 | "DeepSeekAgent", 12 | "ClaudeAgent", 13 | "ChatGPTAgent", 14 | "AgentConfig", 15 | "BaiChuanAgent", 16 | "PhiAgent", 17 | "LlamaAgent", 18 | "MixtralAgent", 19 | "WanxAgent", 20 | "QwenAudioAgent", 21 | "SambertAgent", 22 | "ImageAnalyzer", 23 | ] -------------------------------------------------------------------------------- /EartAgent/Agent/__pycache__/Audio_Agents.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/Agent/__pycache__/Audio_Agents.cpython-39.pyc -------------------------------------------------------------------------------- /EartAgent/Agent/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/Agent/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /EartAgent/Agent/__pycache__/images2text_agents.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/Agent/__pycache__/images2text_agents.cpython-39.pyc -------------------------------------------------------------------------------- /EartAgent/Agent/__pycache__/text2image_agents.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/Agent/__pycache__/text2image_agents.cpython-39.pyc -------------------------------------------------------------------------------- /EartAgent/Agent/__pycache__/text_agents.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/Agent/__pycache__/text_agents.cpython-39.pyc -------------------------------------------------------------------------------- /EartAgent/Agent/images2text_agents.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | import abc 14 | from dataclasses import dataclass 15 | from http import HTTPStatus 16 | import dashscope 17 | from dashscope import MultiModalConversation 18 | 19 | @dataclass 20 | class IAgentConfig: 21 | """ 22 | Agent configuration information 23 | """ 24 | name: str 25 | system_prompt: str 26 | model_name: str 27 | 28 | class Agent(abc.ABC): 29 | """ 30 | Base agent class, defining the basic interface and behavior of an agent 31 | """ 32 | 33 | def __init__(self, config: IAgentConfig): 34 | self.config = config 35 | 36 | @abc.abstractmethod 37 | def analyze(self, image_path_or_url: str, sys_prompt: str) -> str: 38 | """ 39 | Analyze an image and return a description result 40 | """ 41 | raise NotImplementedError 42 | 43 | class QwenVLPlusAgent(Agent): 44 | """ 45 | Use Dashscope's QwenVLPlus model for image analysis 46 | """ 47 | 48 | def analyze(self, image_path_or_url: str, sys_prompt: str) -> str: 49 | messages = [ 50 | { 51 | "role": "user", 52 | "content": [ 53 | {"image": image_path_or_url}, 54 | {"text": sys_prompt} 55 | ] 56 | } 57 | ] 58 | response = MultiModalConversation.call(model='qwen-vl-plus', messages=messages) 59 | if response.status_code == HTTPStatus.OK: 60 | return response["output"]["choices"][0]["message"]["content"][0]["text"] 61 | else: 62 | raise Exception(f"Request failed: {response.request_id}, {response.status_code}, {response.code}, {response.message}") 63 | 64 | class QwenVLMaxAgent(Agent): 65 | """ 66 | Use Dashscope's QwenVLMax model for image analysis 67 | """ 68 | 69 | def analyze(self, image_path_or_url: str, sys_prompt: str) -> str: 70 | messages = [ 71 | { 72 | 'role': 'system', 73 | 'content': [{'text': self.config.system_prompt}] 74 | }, 75 | { 76 | 'role': 'user', 77 | 'content': [ 78 | {'image': image_path_or_url}, 79 | {'text': sys_prompt} 80 | ] 81 | } 82 | ] 83 | response = MultiModalConversation.call(model='qwen-vl-max', messages=messages) 84 | if response.status_code == HTTPStatus.OK: 85 | return response["output"]["choices"][0]["message"]["content"][0]["text"] 86 | else: 87 | raise Exception(f"Request failed: {response.request_id}, {response.status_code}, {response.code}, {response.message}") 88 | 89 | class ImageAnalyzer: 90 | """ 91 | Example usage: 92 | api_key = "your_api_key_here" # Replace with your API key 93 | image_analyzer = ImageAnalyzer(api_key) 94 | # Analyze a web image 95 | description = image_analyzer.analyze_web_image("https://example.com/image.jpg", "Please describe this image") 96 | print(description) 97 | # Analyze a local image 98 | description = image_analyzer.analyze_local_image('file://D:/python_project_yeah/NLP/EartAgentV0.1/Agent/images/b99849f6d4244dac5d20c426445b8ec.png', "Please help me describe this image") 99 | print(description) 100 | """ 101 | 102 | def __init__(self, api_key): 103 | dashscope.api_key = api_key 104 | 105 | def analyze_web_image(self, image_url, sys_prompt): 106 | """ 107 | Analyze a web image and return a description result. 108 | """ 109 | agent = QwenVLPlusAgent(IAgentConfig(name="QwenVLPlus", system_prompt="You are a helpful image analyzer.", model_name="qwen-vl-plus")) 110 | return agent.analyze(image_url, sys_prompt) 111 | 112 | def analyze_local_image(self, image_path, sys_prompt): 113 | """ 114 | Analyze a local image and return a description result. 115 | """ 116 | agent = QwenVLMaxAgent(IAgentConfig(name="QwenVLMax", system_prompt="You are a helpful image analyzer.", model_name="qwen-vl-plus")) 117 | return agent.analyze(image_path, sys_prompt) -------------------------------------------------------------------------------- /EartAgent/Agent/text2image_agents.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | import abc 14 | from dataclasses import dataclass 15 | from http import HTTPStatus 16 | from urllib.parse import urlparse, unquote 17 | from pathlib import PurePosixPath 18 | 19 | import dashscope 20 | import requests 21 | from dashscope import ImageSynthesis 22 | 23 | 24 | @dataclass 25 | class TAgentConfig: 26 | """ 27 | Agent configuration information 28 | """ 29 | name: str 30 | 31 | 32 | class ImageAgent(abc.ABC): 33 | """ 34 | Image generation proxy base class 35 | """ 36 | 37 | def __init__(self, config: TAgentConfig, api_key: str): 38 | self.config = config 39 | self.api_key = api_key 40 | 41 | @abc.abstractmethod 42 | def generate_image(self, prompt: str, **kwargs) -> list: 43 | 44 | raise NotImplementedError 45 | 46 | 47 | class WanxAgent(ImageAgent): 48 | 49 | def generate_image(self, prompt: str): 50 | dashscope.api_key = self.api_key 51 | rsp = ImageSynthesis.call(model=ImageSynthesis.Models.wanx_v1, 52 | prompt=prompt, 53 | n=4, 54 | size='1024*1024') 55 | if rsp.status_code == HTTPStatus.OK: 56 | 57 | # save file to current directory 58 | for result in rsp.output.results: 59 | file_name = PurePosixPath(unquote(urlparse(result.url).path)).parts[-1] 60 | with open('./%s' % file_name, 'wb+') as f: 61 | f.write(requests.get(result.url).content) 62 | print(f"Image saved as {file_name}") 63 | 64 | else: 65 | print('Failed, status_code: %s, code: %s, message: %s' % 66 | (rsp.status_code, rsp.code, rsp.message)) 67 | 68 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /EartAgent/Agent/text_agents.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | import abc 14 | from dataclasses import dataclass, field 15 | from http import HTTPStatus 16 | from typing import List, Dict, Optional 17 | import dashscope 18 | import anthropic 19 | from openai import OpenAI 20 | from EartAgent.utils.UT import UtilityTools 21 | 22 | """ 23 | @EartAgentV1.0 24 | Includes ali qwen series, kimi series, baichuan series, 01AI, Deepseek, llama3, chatgpt series, claude series, phi-3 25 | """ 26 | 27 | 28 | @dataclass 29 | class AgentConfig: 30 | name: str 31 | system_prompt: str 32 | model_name: Optional[str] = None 33 | temperature: Optional[float] = None 34 | max_tokens: Optional[int] = None 35 | remember: bool = False 36 | tool_use: List[Dict[str, str]] = field(default_factory=list) 37 | 38 | 39 | class Agent(abc.ABC): 40 | remember_flag = False 41 | 42 | def __init__(self, config: AgentConfig): 43 | self.config = config 44 | self.memory: List[str] = [] 45 | self.remember_flag = config.remember 46 | self.utility_tools = UtilityTools() 47 | self.utility_tools.log_agent(self) 48 | 49 | def integrate_tools(self, sys_prompt: str) -> list: 50 | context = [] 51 | for tool_config in self.config.tool_use: 52 | tool_name = tool_config['name'] 53 | api_key = tool_config['api_key'] 54 | tool_method = getattr(self.utility_tools, tool_name, None) 55 | if callable(tool_method): 56 | params = self.extract_params_for_tool(sys_prompt, tool_name) 57 | params['api_key'] = api_key 58 | result = tool_method(params['query'], params['api_key']) 59 | context.append(result) 60 | return context 61 | 62 | def extract_params_for_tool(self, prompt: str, tool_name: str) -> dict: 63 | if tool_name == 'serpapi_search': 64 | return {'query': prompt} 65 | return {} 66 | 67 | def retrieve_context_from_url(self, url: str) -> str: 68 | if url.startswith("http"): 69 | return self.utility_tools.web_crawler_all(url) 70 | elif url.endswith(".pdf"): 71 | return " ".join(self.utility_tools.read_pdf(url)) 72 | elif url.endswith(".docx"): 73 | return " ".join(self.utility_tools.read_docx(url)) 74 | elif url.endswith(".pptx"): 75 | return " ".join(self.utility_tools.read_ppt(url)) 76 | elif url.endswith(".txt"): 77 | return " ".join(self.utility_tools.read_txt(url)) 78 | else: 79 | raise ValueError("Unsupported URL or file type") 80 | 81 | def build_rag_prompt(self, user_input: str, context: str) -> str: 82 | return f"{user_input}\n\nAdditional Context:\n{context}" 83 | 84 | @abc.abstractmethod 85 | def chat(self, sys_prompt: str) -> str: 86 | raise NotImplementedError 87 | 88 | def __call__(self, sys_prompt: str, url: Optional[str] = None) -> str: 89 | context = "" 90 | if url: 91 | context = self.retrieve_context_from_url(url) 92 | response = self.chat(self.build_rag_prompt(sys_prompt, context)) 93 | self.speak(response) 94 | return response 95 | 96 | def speak(self, response: str): 97 | print(f"{self.config.name}:{response}") 98 | 99 | def remember(self, message: str): 100 | if self.remember_flag: 101 | self.memory.append(message) 102 | if len(self.memory) > 5: 103 | self.memory.pop(0) 104 | self.utility_tools.log_agent(self) 105 | 106 | def recall(self) -> List[str]: 107 | return self.memory 108 | 109 | def build_messages(self, sys_prompt: str) -> List[dict]: 110 | messages = [ 111 | {"role": "system", 112 | "content": f"You are a helpful assistant.{self.config.system_prompt} and this is your recall {self.recall()} if you need you can find something ,You are {self.config.name}"}, 113 | {"role": "user", "content": sys_prompt} 114 | ] 115 | return messages 116 | 117 | 118 | class QwenAgent(Agent): 119 | """ 120 | Agents for the Qwen model using Dashscope 121 | """ 122 | api_key: str = None 123 | default_model_name = 'qwen_turbo' 124 | 125 | def __init__(self, config: AgentConfig): 126 | super().__init__(config) 127 | self.config.model_name = config.model_name or self.default_model_name 128 | 129 | def chat(self, sys_prompt: str) -> str: 130 | dashscope.api_key = self.api_key 131 | # Output of the integration tool 132 | tool_context = self.integrate_tools(sys_prompt) 133 | context = [] 134 | # Passing the output of system tips and tools to the model 135 | if tool_context != context: 136 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 137 | else: 138 | full_prompt = sys_prompt 139 | self.remember(f"users say that:{sys_prompt}") 140 | messages = self.build_messages(full_prompt) 141 | response = dashscope.Generation.call( 142 | dashscope.Generation.Models.qwen_turbo, 143 | messages=messages, 144 | result_format='message', 145 | ) 146 | self.remember(f"You said.:{response.output.choices[0].message.content}") 147 | if response.status_code == HTTPStatus.OK: 148 | return response.output.choices[0].message.content 149 | else: 150 | raise Exception( 151 | f"Request failed: {response.request_id}, {response.status_code}, {response.code}, {response.message}") 152 | 153 | 154 | class KimiAgent(Agent): 155 | api_key: str = None 156 | default_model_name = 'moonshot-v1-8k' 157 | 158 | def __init__(self, config: AgentConfig): 159 | super().__init__(config) 160 | self.config.model_name = config.model_name or self.default_model_name 161 | 162 | def chat(self, sys_prompt: str) -> str: 163 | 164 | tool_context = self.integrate_tools(sys_prompt) 165 | context = [] 166 | 167 | if tool_context != context: 168 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 169 | else: 170 | full_prompt = sys_prompt 171 | self.remember(f"users say that:{sys_prompt}") 172 | messages = self.build_messages(full_prompt) 173 | client = OpenAI( 174 | api_key=self.api_key, 175 | base_url="https://api.moonshot.cn/v1", 176 | ) 177 | completion = client.chat.completions.create( 178 | model=self.config.model_name, 179 | messages=messages, 180 | temperature=self.config.temperature or 0.3, 181 | ) 182 | return completion.choices[0].message.content 183 | 184 | class YiAgent(Agent): 185 | api_key: str = None 186 | default_model_name = "yi-large" 187 | 188 | def __init__(self, config: AgentConfig): 189 | super().__init__(config) 190 | self.config.model_name = config.model_name or self.default_model_name 191 | 192 | def chat(self, sys_prompt: str) -> str: 193 | 194 | tool_context = self.integrate_tools(sys_prompt) 195 | context = [] 196 | 197 | if tool_context != context: 198 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 199 | else: 200 | full_prompt = sys_prompt 201 | self.remember(f"users say that:{sys_prompt}") 202 | messages = self.build_messages(full_prompt) 203 | client = OpenAI( 204 | api_key=self.api_key, 205 | base_url="https://api.lingyiwanwu.com/v1" 206 | ) 207 | completion = client.chat.completions.create( 208 | model=self.config.model_name, 209 | messages=messages, 210 | # temperature=self.config.temperature or 0.3, 211 | ) 212 | return completion.choices[0].message.content 213 | 214 | class ZhipuAgent(Agent): 215 | api_key: str = None 216 | default_model_name = "glm-4" 217 | 218 | def __init__(self, config: AgentConfig): 219 | super().__init__(config) 220 | self.config.model_name = config.model_name or self.default_model_name 221 | 222 | def chat(self, sys_prompt: str) -> str: 223 | 224 | tool_context = self.integrate_tools(sys_prompt) 225 | context = [] 226 | 227 | if tool_context != context: 228 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 229 | else: 230 | full_prompt = sys_prompt 231 | self.remember(f"users say that:{sys_prompt}") 232 | messages = self.build_messages(full_prompt) 233 | client = ZhipuAI( 234 | api_key=self.api_key 235 | ) 236 | completion = client.chat.completions.create( 237 | model=self.config.model_name, 238 | messages=messages, 239 | # temperature=self.config.temperature or 0.3, 240 | ) 241 | return completion.choices[0].message.content 242 | 243 | 244 | class BaiChuanAgent(Agent): 245 | 246 | api_key: str = None 247 | default_model_name = 'baichuan2-7b-chat-v1' 248 | 249 | def __init__(self, config: AgentConfig): 250 | super().__init__(config) 251 | self.config.model_name = config.model_name or self.default_model_name 252 | 253 | def chat(self, sys_prompt: str) -> str: 254 | 255 | tool_context = self.integrate_tools(sys_prompt) 256 | 257 | context = [] 258 | if tool_context != context: 259 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 260 | else: 261 | full_prompt = sys_prompt 262 | dashscope.api_key = self.api_key 263 | self.remember(f"users say that:{sys_prompt}") 264 | messages = self.build_messages(full_prompt) 265 | response = dashscope.Generation.call( 266 | dashscope.Generation.Models.qwen_turbo, 267 | messages=messages, 268 | result_format='message', 269 | ) 270 | self.remember(f"你说的:{response}") 271 | if response.status_code == HTTPStatus.OK: 272 | return response.output.choices[0].message.content 273 | else: 274 | raise Exception( 275 | f"Request failed: {response.request_id}, {response.status_code}, {response.code}, {response.message}") 276 | 277 | 278 | class DeepSeekAgent(Agent): 279 | 280 | api_key: str = None 281 | default_model_name = "deepseek-chat" 282 | 283 | def __init__(self, config: AgentConfig): 284 | super().__init__(config) 285 | self.config.model_name = config.model_name or self.default_model_name 286 | 287 | def chat(self, sys_prompt: str) -> str: 288 | 289 | tool_context = self.integrate_tools(sys_prompt) 290 | context = [] 291 | 292 | if tool_context != context: 293 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 294 | else: 295 | full_prompt = sys_prompt 296 | self.remember(f"users say that:{sys_prompt}") 297 | messages = self.build_messages(full_prompt) 298 | client = OpenAI( 299 | api_key=self.api_key, 300 | base_url="https://api.deepseek.com", 301 | ) 302 | completion = client.chat.completions.create( 303 | model=self.config.model_name, 304 | messages=messages, 305 | temperature=self.config.temperature or 0.3, 306 | ) 307 | return completion.choices[0].message.content 308 | 309 | 310 | class LlamaAgent(Agent): 311 | """ 312 | Agents using Nvidia's Llama model 313 | """ 314 | api_key: str = None 315 | default_model_name = 'meta/llama3-70b-instruct' 316 | 317 | def __init__(self, config: AgentConfig): 318 | super().__init__(config) 319 | self.config.model_name = config.model_name or self.default_model_name 320 | 321 | def chat(self, sys_prompt: str) -> str: 322 | tool_context = self.integrate_tools(sys_prompt) 323 | context = [] 324 | if tool_context != context: 325 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 326 | else: 327 | full_prompt = sys_prompt 328 | self.remember(f"user say that {sys_prompt}") 329 | messages = self.build_messages(full_prompt) 330 | print(messages) 331 | client = OpenAI( 332 | base_url="https://integrate.api.nvidia.com/v1", 333 | api_key=self.api_key 334 | ) 335 | completion = client.chat.completions.create( 336 | model=self.config.model_name, 337 | messages=messages, 338 | temperature=self.config.temperature or 0.7, 339 | top_p=1, 340 | max_tokens=self.config.max_tokens or 1024, 341 | stream=True 342 | ) 343 | response = "" 344 | for chunk in completion: 345 | if chunk.choices[0].delta.content is not None: 346 | response += chunk.choices[0].delta.content 347 | return response 348 | 349 | 350 | class MixtralAgent(Agent): 351 | """ 352 | Agents using Nvidia's Mixtral model 353 | """ 354 | api_key: str = None 355 | default_model_name = "mistralai/mixtral-8x22b-instruct-v0.1" 356 | 357 | def __init__(self, config: AgentConfig): 358 | super().__init__(config) 359 | self.config.model_name = config.model_name or self.default_model_name 360 | 361 | def chat(self, sys_prompt: str) -> str: 362 | 363 | tool_context = self.integrate_tools(sys_prompt) 364 | context = [] 365 | 366 | if tool_context != context: 367 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 368 | else: 369 | full_prompt = sys_prompt 370 | self.remember(f"user say that {sys_prompt}") 371 | messages = self.build_messages(full_prompt) 372 | client = OpenAI( 373 | base_url="https://integrate.api.nvidia.com/v1", 374 | api_key=self.api_key 375 | ) 376 | completion = client.chat.completions.create( 377 | model=self.config.model_name, 378 | messages=messages, 379 | temperature=self.config.temperature or 0.7, 380 | top_p=1, 381 | max_tokens=self.config.max_tokens or 1024, 382 | stream=True 383 | ) 384 | response = "" 385 | for chunk in completion: 386 | if chunk.choices[0].delta.content is not None: 387 | response += chunk.choices[0].delta.content 388 | return response 389 | 390 | 391 | class PhiAgent(Agent): 392 | """ 393 | Agents using Nvidia's microsoft model 394 | """ 395 | api_key: str = None 396 | default_model_name = "microsoft/phi-3-mini-128k-instruct" 397 | 398 | def __init__(self, config: AgentConfig): 399 | super().__init__(config) 400 | self.config.model_name = config.model_name or self.default_model_name 401 | 402 | def chat(self, sys_prompt: str) -> str: 403 | 404 | tool_context = self.integrate_tools(sys_prompt) 405 | context = [] 406 | 407 | if tool_context != context: 408 | full_prompt = f"{sys_prompt}+'The results of the web search are as follows:'+{tool_context}" 409 | else: 410 | full_prompt = sys_prompt 411 | self.remember(f"user say that {sys_prompt}") 412 | messages = self.build_messages(full_prompt) 413 | client = OpenAI( 414 | base_url="https://integrate.api.nvidia.com/v1", 415 | api_key=self.api_key 416 | ) 417 | completion = client.chat.completions.create( 418 | model=self.config.model_name, 419 | messages=messages, 420 | temperature=self.config.temperature or 0.7, 421 | top_p=1, 422 | max_tokens=self.config.max_tokens or 1024, 423 | stream=True 424 | ) 425 | response = "" 426 | for chunk in completion: 427 | if chunk.choices[0].delta.content is not None: 428 | response += chunk.choices[0].delta.content 429 | return response 430 | 431 | 432 | class ChatGPTAgent(Agent): 433 | """ 434 | Agents using OpenAI's ChatGPT model 435 | """ 436 | api_key: str = None 437 | default_model_name = 'gpt-3.5-turbo' 438 | 439 | def __init__(self, config: AgentConfig): 440 | super().__init__(config) 441 | self.config.model_name = config.model_name or self.default_model_name 442 | 443 | def chat(self, sys_prompt: str) -> str: 444 | self.remember(f"users say that:{sys_prompt}") 445 | messages = self.build_messages(sys_prompt) 446 | 447 | try: 448 | client = OpenAI() 449 | completion = client.chat.completions.create( 450 | model=self.config.model_name, 451 | messages=messages, 452 | temperature=self.config.temperature or 0.7 453 | ) 454 | response_content = completion.choices[0].message.content 455 | except Exception as e: 456 | raise Exception(f"OpenAI API request failed: {e}") 457 | 458 | self.remember(response_content) 459 | return response_content 460 | 461 | 462 | class ClaudeAgent(Agent): 463 | """ 464 | Agents using Anthropic's Claude model 465 | """ 466 | api_key: str = None 467 | default_model_name = 'claude-3-sonnet-20240229' 468 | 469 | def __init__(self, config: AgentConfig): 470 | super().__init__(config) 471 | self.config.model_name = config.model_name or self.default_model_name 472 | 473 | def chat(self, sys_prompt: str) -> str: 474 | self.remember(f"users say that:{sys_prompt}") 475 | messages = self.build_messages(sys_prompt) 476 | 477 | client = anthropic.Anthropic(api_key=self.api_key) 478 | 479 | message = client.messages.create( 480 | model=self.config.model_name, 481 | messages=messages, 482 | temperature=self.config.temperature or 0.7, 483 | max_tokens=self.config.max_tokens or 1000, 484 | system=sys_prompt 485 | ) 486 | 487 | response_content = message.content 488 | self.remember(response_content) 489 | return response_content 490 | 491 | -------------------------------------------------------------------------------- /EartAgent/WebUI/WebUI_1.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | import os 13 | import dashscope 14 | from openai import OpenAI 15 | from dashscope import MultiModalConversation 16 | from flask import Flask, request, render_template_string 17 | dashscope.api_key = "api_key" 18 | app = Flask(__name__) 19 | """ 20 | Developers just need to modify call_with_messages themselves 21 | """ 22 | def call_with_messages(user_input): 23 | # example 24 | client = OpenAI( 25 | base_url="https://integrate.api.nvidia.com/v1", 26 | api_key="" 27 | ) 28 | 29 | completion = client.chat.completions.create( 30 | model="meta/llama3-70b-instruct", 31 | messages=[{"role": "user", "content": '你必须用简体中文回答我'+user_input}], 32 | temperature=0.5, 33 | top_p=1, 34 | max_tokens=1024, 35 | stream=True 36 | ) 37 | 38 | re = '' 39 | for chunk in completion: 40 | if chunk.choices[0].delta.content is not None: 41 | re += chunk.choices[0].delta.content 42 | return re 43 | 44 | 45 | 46 | """ 47 | 以下函数不需要修改 48 | """ 49 | 50 | HTML_TEMPLATE = ''' 51 | 52 | 53 |
54 | 55 |标签包裹起来
347 | processed_data = processed_data.replace('```', '').replace('```', '
')
348 | return processed_data
349 | else:
350 | return render_template_string(HTML_TEMPLATE)
351 |
352 |
353 | @app.route('/process_image', methods=['POST'])
354 | def process_image():
355 | image_file = request.files['imageFile']
356 | sys_prompt = request.form['sysPrompt']
357 | processed_response = process_multimodal_input(image_file, sys_prompt)
358 | return processed_response
359 |
360 |
361 | @app.route('/process_inputs', methods=['POST'])
362 | def process_inputs():
363 | user_input = request.form['userInput']
364 | image_file = request.files['imageFile']
365 | sys_prompt = request.form['sysPrompt']
366 |
367 | # 如果有用户输入文本,则将其与系统提示合并
368 | if user_input:
369 | sys_prompt = f"{user_input} {sys_prompt}"
370 |
371 | processed_response = process_multimodal_input(image_file, sys_prompt)
372 | return processed_response
373 |
374 |
375 | def process_multimodal_input(image_file, sys_prompt):
376 | """
377 | Process the uploaded image file and system prompt, and generate a response.
378 |
379 | Args:
380 | image_file (werkzeug.datastructures.FileStorage): The uploaded image file.
381 | sys_prompt (str): The system prompt for the analysis.
382 |
383 | Returns:
384 | str: The generated response.
385 | """
386 | # 获取上传图片的完整路径
387 | image_path = os.path.join('D:/images', image_file.filename)
388 |
389 | # 保存上传的图片到指定路径
390 | image_file.save(image_path)
391 |
392 | # 构建本地文件路径
393 | local_file_path = f'file://{image_path}'
394 |
395 | messages = [
396 | {
397 | 'role': 'system',
398 | 'content': [{
399 | 'text': '你是我的好助手'
400 |
401 | }]
402 | },
403 | {
404 | 'role': 'user',
405 | 'content': [
406 | {
407 | 'image': local_file_path
408 | },
409 | {
410 | 'text': sys_prompt
411 | },
412 | ]
413 | }
414 | ]
415 |
416 | response = MultiModalConversation.call(model='qwen-vl-max', messages=messages)
417 |
418 | if response["status_code"] == 200:
419 | res = response["output"]["choices"][0]["message"]["content"][0]["text"]
420 | else:
421 | raise Exception("Failed to process the input. Please check your network connection or image URL.")
422 |
423 | return res
424 |
425 |
426 | # 文字处理函数
427 | def process_input(input_text):
428 | """处理函数,这里简单地将输入文本"""
429 | return call_with_messages(input_text)
430 |
431 |
432 | if __name__ == '__main__':
433 | app.run(debug=True)
434 |
--------------------------------------------------------------------------------
/EartAgent/WebUI/WebUI_2.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | from flask import Flask, request, render_template_string, jsonify
14 | from openai import OpenAI
15 |
16 | app = Flask(__name__)
17 | """
18 | 用户只需要自行修改call_with_messages即可
19 | """
20 |
21 |
22 | def call_with_messages(query):
23 | # example
24 | client = OpenAI(
25 | base_url="https://integrate.api.nvidia.com/v1",
26 | api_key="your_api_key"
27 | )
28 | completion = client.chat.completions.create(
29 | model="meta/llama3-70b-instruct",
30 | messages=[{"role": "user", "content": '你必须用简体中文回复我!' + query}],
31 | temperature=0.7,
32 | top_p=1,
33 | max_tokens=1024,
34 | stream=True
35 | )
36 | result = 'llama-3:'
37 | for chunk in completion:
38 | if chunk.choices[0].delta.content is not None:
39 | result += chunk.choices[0].delta.content
40 | return result
41 |
42 |
43 | """
44 | 以下函数不需要修改
45 | """
46 | # 将前端HTML代码作为模板字符串
47 | HTML_TEMPLATE = '''
48 |
49 |
50 |
51 |
52 |
53 | Gemini
54 |
55 |
170 |
171 | EartAgent WebUI
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
183 |
184 |
250 |
251 |
252 | '''
253 |
254 |
255 | # 路由处理函数
256 | @app.route('/', methods=['GET'])
257 | def index():
258 | return render_template_string(HTML_TEMPLATE)
259 |
260 |
261 | # 处理查询的路由
262 | @app.route('/query', methods=['POST'])
263 | def query():
264 | try:
265 | user_input = request.json.get('query')
266 | print(user_input)
267 | if user_input:
268 | result = call_with_messages(user_input)
269 | print(result)
270 | return jsonify({'result': result})
271 | else:
272 | return jsonify({'error': 'No query provided'}), 400
273 | except Exception as e:
274 | return jsonify({'error': str(e)}), 500
275 |
276 |
277 | if __name__ == '__main__':
278 | app.run(debug=True)
279 |
--------------------------------------------------------------------------------
/EartAgent/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """ Import all modules in the package. """
3 | from . import Agent
4 | from . import ACM
5 | from . import app_packaging
6 | from . import game_scripts
7 | from . import thinking
8 | from . import utils
9 | from . import WebUI
10 | __all__ = [
11 | "Agent",
12 | "ACM",
13 | "app_packaging",
14 | "game_scripts",
15 | "thinking",
16 | "utils",
17 | "WebUI"
18 | ]
--------------------------------------------------------------------------------
/EartAgent/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/app_packaging/Website_Cloning.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | from EartAgent.Agent.text_agents import *
14 | from EartAgent.Agent.images2text_agents import *
15 | from EartAgent.utils.UT import UtilityTools
16 |
17 | class WebsiteClone:
18 | def __init__(self, qwen_api_key: str, claude_api_key: str):
19 | self.qwen_api_key = qwen_api_key
20 | self.claude_api_key = claude_api_key
21 | self.utility_tools = UtilityTools()
22 |
23 | def clone_website(self, image_path: str, qwen_iterations: int, claude_iterations: int) -> str:
24 | dashscope.api_key = self.qwen_api_key
25 | qwen_agent = QwenVLMaxAgent(
26 | IAgentConfig(name="QwenVLMax", system_prompt="Analyze this image and generate HTML css js code.",
27 | model_name="qwen-vl-max"))
28 | initial_code = qwen_agent.analyze(image_path, "Please look carefully at the layout and typography of the site without errors,Generate HTML css js code based on this image,")
29 | print("The first analyze-->", initial_code)
30 | refined_code = self.reflect_and_optimize1(initial_code, "Improve and optimize HTML css js code", qwen_agent, qwen_iterations)
31 | ClaudeAgent.api_key = self.claude_api_key
32 | claude_agent = ClaudeAgent(
33 | config=AgentConfig(name="Claude",
34 | system_prompt="Refine this HTML code.",
35 | model_name='claude-3-sonnet-20240229'))
36 | final_code = self.reflect_and_optimize2(refined_code, "Improve and optimize HTML css js code", claude_agent, claude_iterations)
37 | return final_code
38 |
39 | def reflect_and_optimize1(self, code: str, scenario: str, agent, iterations: int) -> str:
40 | current_code = code
41 | for i in range(iterations):
42 | prompt = f"Improve the following code for the scenario '{scenario}': '{current_code}'. Consider the functionality, aesthetics, and correctness.You look at the code based on the image to see if the code restores the image, if not it must be modified and you must write out all the code, writing the html, css, js code together"
43 | reflection = agent.analyze(image_path, prompt)
44 | current_code = reflection
45 | print(f"The {i+1} reflect by qwen-->", current_code)
46 | return current_code
47 |
48 | def reflect_and_optimize2(self, code: str, scenario: str, agent, iterations: int) -> str:
49 | current_code = code
50 | for i in range(iterations):
51 | prompt = f"Improve the following code for the scenario '{scenario}': '{current_code}'. Consider the functionality, aesthetics, and correctness.You continue to modify and optimize according to the code I give you and must write the entire code, putting together html, css, js code"
52 | reflection = agent.chat(prompt)
53 | current_code = reflection
54 | print(f"The {i + 1} reflect by claude-->", current_code)
55 | return current_code
56 |
57 |
--------------------------------------------------------------------------------
/EartAgent/app_packaging/Write_Paper.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | from EartAgent.Agent.text_agents import *
14 | from typing import List
15 | import logging
16 |
17 |
18 | def write_paper(question: str, search: str, serpapi_api_key: str, agent_list: List):
19 | """
20 | Function to write a paper using AI agents.
21 |
22 | :param question: The user's question to guide the paper writing.
23 | :param search: Search keywords to retrieve relevant web information.
24 | :param serpapi_api_key: SerpAPI API key for web scraping.
25 | :param agent_list: List of AI agents used for paper writing.
26 | :return: None
27 | """
28 | try:
29 | # Create agent instances
30 | agents = agent_list
31 | primary_agent = agents[3]
32 | del agents[3]
33 |
34 | # Search for related information
35 | tool = UtilityTools()
36 | web_search_results = tool.search_crawler(
37 | api_key=serpapi_api_key,
38 | query=search,
39 | max_results=1,
40 | agent=primary_agent
41 | )
42 |
43 | # Initialize system prompt
44 | system_prompt = f'The user\'s question is: {question}\n'
45 | system_prompt += f'The relevant search results are: {web_search_results}\n'
46 | system_prompt += 'Based on this information, please help me complete a 5000-word paper.'
47 |
48 | # Agent interaction loop
49 | while True:
50 | agent_responses = []
51 | for agent in agents:
52 | response = agent.generate_response(system_prompt)
53 | agent_responses.append(response)
54 | system_prompt = response
55 |
56 | # User input
57 | user_input = input("Type 'exit' to end the discussion, or press Enter to continue: ").strip()
58 | if user_input.lower() == 'exit':
59 | logging.info("Discussion ended.")
60 | break
61 |
62 | # Add user input to system prompt
63 | system_prompt = f"User input: {user_input}\n" + system_prompt
64 |
65 | # Generate paper using primary agent
66 | paper_content = primary_agent.generate_paper(system_prompt)
67 |
68 | # Save the text as a Word document
69 | tool.write_docx(content=paper_content, file_path='From_the_paper_generated_by_Eartagent.docx')
70 |
71 | logging.info('The paper has been saved, file name: From_the_paper_generated_by_Eartagent.docx')
72 |
73 | except Exception as e:
74 | logging.error(f"An error occurred: {e}")
75 |
76 |
77 |
78 |
79 |
--------------------------------------------------------------------------------
/EartAgent/app_packaging/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """ Import all app_packaging modules in the package. """
3 | from .voice_dialog_assistant import VoiceAssistant
4 | from .Write_Paper import write_paper
5 | from .Website_Cloning import WebsiteClone
6 |
7 | __all__ = [
8 | "VoiceAssistant",
9 | "write_paper",
10 | "WebsiteClone",
11 | ]
--------------------------------------------------------------------------------
/EartAgent/app_packaging/__pycache__/Website_Cloning.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/app_packaging/__pycache__/Website_Cloning.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/app_packaging/__pycache__/Write_Paper.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/app_packaging/__pycache__/Write_Paper.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/app_packaging/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/app_packaging/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/app_packaging/__pycache__/voice_dialog_assistant.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/app_packaging/__pycache__/voice_dialog_assistant.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/app_packaging/voice_dialog_assistant.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | import os
14 | import random
15 | import tempfile
16 | import emoji
17 | import wave
18 | import pyaudio
19 | import threading
20 | import sounddevice as sd
21 | import numpy as np
22 | from EartAgent.Agent.text_agents import QwenAgent, AgentConfig
23 | from EartAgent.Agent.Audio_Agents import AudioAgentConfig, QwenAudioAgent, SambertAgent
24 |
25 |
26 | class VoiceAssistant:
27 | def __init__(self, api_key: str):
28 | QwenAudioAgent.api_key = api_key
29 | QwenAgent.api_key = api_key
30 | SambertAgent.api_key = api_key
31 |
32 | self.audio_config = AudioAgentConfig(system_prompt="The output format is: The audio says..")
33 | self.audio_agent = QwenAudioAgent(self.audio_config)
34 |
35 | self.text_config = AgentConfig(name='Xiao Li', system_prompt='Please note you are having a real-time conversation with a person, keep the text concise',
36 | remember=True)
37 | self.text_agent = QwenAgent(self.text_config)
38 |
39 | self.speech_synthesis_config = AudioAgentConfig(system_prompt="")
40 | self.speech_synthesis_agent = SambertAgent(self.speech_synthesis_config)
41 |
42 | self.audio = pyaudio.PyAudio()
43 | self.stream = None
44 | self.frames = []
45 | self.is_recording = False
46 | self.lock = threading.Lock()
47 |
48 | # Other methods remain unchanged
49 | def start_recording(self):
50 | self.stream = self.audio.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True,
51 | frames_per_buffer=1024)
52 | self.is_recording = True
53 | print("Recording started...")
54 | threading.Thread(target=self.record_audio).start()
55 |
56 | def record_audio(self):
57 | while self.is_recording:
58 | data = self.stream.read(1024)
59 | self.frames.append(data)
60 |
61 | def stop_recording(self):
62 | self.is_recording = False
63 | self.stream.stop_stream()
64 | self.stream.close()
65 | print("Recording stopped...")
66 |
67 | with self.lock:
68 | with tempfile.TemporaryDirectory() as temp_dir:
69 | temp_wav = os.path.join(temp_dir, 'temp.wav')
70 | waveFile = wave.open(temp_wav, 'wb')
71 | waveFile.setnchannels(1)
72 | waveFile.setsampwidth(self.audio.get_sample_size(pyaudio.paInt16))
73 | waveFile.setframerate(16000)
74 | waveFile.writeframes(b''.join(self.frames))
75 | waveFile.close()
76 |
77 | # Add a delay before releasing the file
78 |
79 | # time.sleep(0.5)
80 | audio_response = self.audio_agent(temp_wav)
81 | print(f"User: {audio_response}")
82 | emoji_list = ['😀', '😃', '😁', '😄', '🙂', '😘', '😊', '🤗', '🤪', '😋', '😍']
83 | rdn = random.randint(0, len(emoji_list) - 1)
84 | text_response = self.text_agent.chat(audio_response)
85 | self.text_agent.speak(text_response + emoji.emojize(emoji_list[rdn]))
86 | # Synthesize speech response
87 | speech_data = self.speech_synthesis_agent.chat(text_response)
88 | if speech_data:
89 | self.play_audio(speech_data)
90 |
91 | # Ensure enough time before deleting the temporary directory
92 | # time.sleep(1)
93 |
94 | # The temporary directory will be automatically deleted when exiting the context manager
95 | def play_audio(self, audio_data):
96 | sample_rate = 44100
97 | bit_depth = 16
98 | # Determine the type of audio data (8-bit, 16-bit, 32-bit)
99 | dtype = {
100 | 8: np.uint8,
101 | 16: np.int16,
102 | 32: np.int32
103 | }[bit_depth]
104 |
105 | # Convert byte data to a numpy array
106 | audio_data = np.frombuffer(audio_data, dtype=dtype)
107 |
108 | # If the audio is stereo, convert to mono
109 | if audio_data.ndim == 2 and audio_data.shape[1] == 2:
110 | audio_data = audio_data.mean(axis=1)
111 |
112 | # Play the audio data
113 | sd.play(audio_data, sample_rate)
114 |
115 | # Wait for the audio to finish playing
116 | sd.wait()
117 |
118 | def run(self):
119 | while True:
120 | try:
121 | print("Press Enter to start recording, press Enter again to stop recording...")
122 | input()
123 | self.start_recording()
124 | input()
125 | self.stop_recording()
126 | except KeyboardInterrupt:
127 | break
128 |
129 |
130 |
--------------------------------------------------------------------------------
/EartAgent/game_scripts/Italy.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | from dataclasses import dataclass
14 | from typing import Dict, List, Type
15 | import random
16 | from EartAgent.Agent.text_agents import *
17 |
18 | @dataclass
19 | class Resident:
20 | name: str
21 | occupation: str
22 | agent_cls: Type[Agent]
23 | agent_config: AgentConfig
24 |
25 | @dataclass
26 | class TownConfig:
27 | num_residents: int = 8
28 | main_character_agent_cls: Type[Agent] = QwenAgent
29 | main_character_model_name: str = "qwen-turbo"
30 | resident_agent_cls: Type[Agent] = QwenAgent
31 | resident_model_name: str = "qwen-turbo"
32 | temperature: float = 0.7
33 | max_tokens: int = 256
34 |
35 | class ItalianTown:
36 | def __init__(self, config: TownConfig):
37 | self.config = config
38 | self.residents: Dict[str, Resident] = {}
39 | self.setup_residents()
40 |
41 | def setup_residents(self):
42 | occupations = ["farmer", "fisherman", "merchant", "artisan", "chef"]
43 | self.residents = {
44 | "Main Character": Resident(
45 | name="Main Character",
46 | occupation="artist",
47 | agent_cls=self.config.main_character_agent_cls,
48 | agent_config=AgentConfig(
49 | name="Main Character",
50 | system_prompt="You are an adventurous artist who loves to travel and experience different cultures. You are vacationing in a small town in Italy, hoping to gain inspiration for your work.",
51 | model_name=self.config.main_character_model_name,
52 | temperature=self.config.temperature,
53 | max_tokens=self.config.max_tokens,
54 | ),
55 | ),
56 | **{
57 | f"Resident{i}": Resident(
58 | name=f"Resident{i}",
59 | occupation=random.choice(occupations),
60 | agent_cls=self.config.resident_agent_cls,
61 | agent_config=AgentConfig(
62 | name=f"Resident{i}",
63 | system_prompt=f"You are a {resident.occupation} living in this small town. You love the lifestyle here and are happy to share your experiences and insights with visiting travelers.",
64 | model_name=self.config.resident_model_name,
65 | temperature=self.config.temperature,
66 | max_tokens=self.config.max_tokens,
67 | ),
68 | )
69 | for i, resident in enumerate(
70 | [
71 | Resident(name=f"Resident{i + 1}", occupation=random.choice(occupations),
72 | agent_cls=self.config.resident_agent_cls, agent_config=None)
73 | for i in range(self.config.num_residents)
74 | ],
75 | start=1,
76 | )
77 | },
78 | }
79 |
80 | def run_day(self):
81 | for resident in self.residents.values():
82 | agent = resident.agent_cls(resident.agent_config)
83 | print(f"\n{resident.name} ({resident.occupation}):")
84 | agent.remember_flag = True
85 |
86 | # Interaction between main character and residents
87 | if resident.name == "Main Character":
88 | print("You are wandering around the town, admiring the surroundings...")
89 | for other_resident in [r for r in self.residents.values() if r.name != "Main Character"]:
90 | encounter = agent.chat(
91 | f"You encounter a {other_resident.occupation}, {other_resident.name}. Do you want to talk to them and learn about life here?"
92 | )
93 | print(f"\nMain Character: {encounter}")
94 | if "yes" in encounter.lower():
95 | other_agent = other_resident.agent_cls(other_resident.agent_config)
96 | other_agent.remember_flag = True
97 | chat = agent.chat(
98 | f"Nice to meet you! As a visitor, I am very interested in the lifestyle here. Can you tell me something about your daily life as a {other_resident.occupation}?"
99 | )
100 | print(f"\nMain Character: {chat}")
101 | response = other_agent.chat(chat)
102 | print(f"\n{other_resident.name}: {response}")
103 |
104 | # Interaction between residents
105 | else:
106 | daily_routine = agent.chat(f"As a {resident.occupation}, how do you spend your day?")
107 | print(f"\n{resident.name}: {daily_routine}")
108 | for other_resident in [
109 | r for r in self.residents.values() if r.name != resident.name
110 | ]:
111 | interact = agent.chat(
112 | f"You encounter {other_resident.name}, a {other_resident.occupation}. Do you want to greet them and have a conversation?"
113 | )
114 | if "yes" in interact.lower():
115 | other_agent = other_resident.agent_cls(other_resident.agent_config)
116 | other_agent.remember_flag = True
117 | chat = agent.chat(f"What do you plan to say to {other_resident.name}?")
118 | print(f"\n{resident.name}: {chat}")
119 | response = other_agent.chat(f"{resident.name} says to you: '{chat}'. What is your response?")
120 | print(f"\n{other_resident.name}: {response}")
121 |
122 | agent.remember_flag = False
123 |
124 | if __name__ == "__main__":
125 | QwenAgent.api_key = "your_api_key"
126 | town_config = TownConfig()
127 | italian_town = ItalianTown(town_config)
128 |
129 | for day in range(1, 4):
130 | print(f"\n=========== Day {day} ===========")
131 | italian_town.run_day()
132 |
--------------------------------------------------------------------------------
/EartAgent/game_scripts/Wolf.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | import abc
14 | from dataclasses import dataclass
15 | from typing import List, Optional, Type
16 | import random
17 |
18 | from EartAgent.Agent.text_agents import *
19 |
20 |
21 | @dataclass
22 | class GameConfig:
23 | num_villagers: int = 2
24 | num_wolves: int = 1
25 | num_seers: int = 1
26 | agent_cls: Type[Agent] = ClaudeAgent # Default to using ClaudeAgent
27 |
28 |
29 | class WerewolfGame:
30 | def __init__(self, config: GameConfig):
31 | self.config = config
32 | self.players = []
33 | self.dead_players = []
34 | self.day = True
35 | self.discussion_history = []
36 | self.setup_players()
37 |
38 | def setup_players(self):
39 | villagers = [self.config.agent_cls(config=AgentConfig(name=f'Villager{i}',
40 | system_prompt="You are a villager. Your goal is to collaborate with other villagers to identify and expel the werewolves from the village."))
41 | for i in range(self.config.num_villagers)]
42 | wolves = [self.config.agent_cls(
43 | config=AgentConfig(name=f'Wolf{i}', system_prompt="You are a werewolf. Your goal is to secretly kill all the villagers without being identified."))
44 | for i in range(self.config.num_wolves)]
45 | seers = [self.config.agent_cls(
46 | config=AgentConfig(name=f'Seer{i}', system_prompt="You are a seer. You can verify the identity of other players to help the villagers identify the werewolves."))
47 | for i in range(self.config.num_seers)]
48 | self.players = villagers + wolves + seers
49 | random.shuffle(self.players)
50 |
51 | def night_phase(self):
52 | for player in self.players:
53 | if 'Wolf' in player.config.name:
54 | victim = random.choice([p for p in self.players if 'Villager' in p.config.name])
55 | wolf_action = player.chat(
56 | f'Night has fallen. Who should we attack? Available villagers: {[p.config.name for p in self.players if "Villager" in p.config.name]}')
57 | self.discussion_history.append(f'{player.config.name} says: {wolf_action}')
58 | print(wolf_action)
59 | elif 'Seer' in player.config.name:
60 | checked_player = random.choice(self.players)
61 | seer_action = player.chat(
62 | f'Night has fallen. Whose identity do you want to verify? Available players: {[p.config.name for p in self.players]}')
63 | self.discussion_history.append(f'{player.config.name} says: {seer_action}')
64 | print(seer_action)
65 | if 'Wolf' in checked_player.config.name:
66 | role = 'werewolf'
67 | else:
68 | role = 'villager'
69 | seer_reveal = player.chat(f'You checked {checked_player.config.name}. Their identity is {role}.')
70 | self.discussion_history.append(f'{player.config.name} says: {seer_reveal}')
71 | print(seer_reveal)
72 |
73 | def day_phase(self):
74 | for player in self.players:
75 | player_action = player.chat(
76 | f'A new day begins. What happened last night? {" ".join(self.discussion_history)} Based on this information, who do you suspect is a werewolf?')
77 | self.discussion_history.append(f'{player.config.name} says: {player_action}')
78 | print(player_action)
79 |
80 | # Simulate voting results
81 | vote_counts = {}
82 | for player in [p for p in self.players if 'Villager' in p.config.name]:
83 | suspect = player.chat(
84 | f'It is time to vote to expel a suspected werewolf. Based on previous discussions, who do you suspect is a werewolf? Available players: {[p.config.name for p in self.players]}')
85 | self.discussion_history.append(f'{player.config.name} votes: {suspect}')
86 | vote_counts[suspect] = vote_counts.get(suspect, 0) + 1
87 |
88 | to_be_expelled = max(vote_counts, key=vote_counts.get)
89 | expelled_player = next((p for p in self.players if p.config.name == to_be_expelled), None)
90 | if expelled_player:
91 | self.players.remove(expelled_player)
92 | self.dead_players.append(expelled_player)
93 | print(f'After the vote, {expelled_player.config.name} was expelled from the village.')
94 |
95 | def game_over(self):
96 | wolves = [p for p in self.players if 'Wolf' in p.config.name]
97 | villagers = [p for p in self.players if 'Villager' in p.config.name]
98 | if not wolves:
99 | print("Villagers win!")
100 | return True
101 | elif len(wolves) >= len(villagers):
102 | print("Werewolves win!")
103 | return True
104 | return False
105 |
106 | def main(game_config):
107 | game = WerewolfGame(config=game_config)
108 | round_counter = 1
109 | while not game.game_over():
110 | print(f"\n---------- Round {round_counter} ----------")
111 | if game.day:
112 | print("Day Phase - Villagers discuss and vote to expel suspected werewolves")
113 | game.day_phase()
114 | game.day = False # End day, start night
115 | else:
116 | print("Night Phase - Werewolves choose their target, seers verify identities")
117 | game.night_phase()
118 | game.day = True # End night, start day
119 |
120 | round_counter += 1 # Increment round counter
121 |
122 | print("\nGame Over")
123 | # Print final surviving players
124 | print("Surviving players:")
125 | for player in game.players:
126 | print(f"{player.config.name}")
127 | print("Eliminated players:")
128 | for dead_player in game.dead_players:
129 | print(f"{dead_player.config.name}")
130 |
131 |
132 | if __name__ == '__main__':
133 | # Run the game
134 | # Using QwenAgent
135 | QwenAgent.api_key = "your_api_key"
136 | qwen_game_config = GameConfig(num_villagers=3, num_wolves=2, num_seers=1, agent_cls=QwenAgent)
137 | main(qwen_game_config)
138 |
--------------------------------------------------------------------------------
/EartAgent/game_scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """ Import all game_scripts modules in the package. """
3 | from .Italy import ItalianTown,TownConfig
4 | from .Wolf import WerewolfGame,GameConfig
5 |
6 | __all__ = [
7 | "ItalianTown",
8 | "TownConfig",
9 | "WerewolfGame",
10 | "GameConfig",
11 | ]
--------------------------------------------------------------------------------
/EartAgent/game_scripts/__pycache__/Italy.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/game_scripts/__pycache__/Italy.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/game_scripts/__pycache__/Wolf.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/game_scripts/__pycache__/Wolf.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/game_scripts/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/game_scripts/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/game_scripts/moot court.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | from EartAgent.Agent.text_agents import YiAgent, AgentConfig
14 | from EartAgent.ACM.AgentCommunication import MsgHub, Pipeline
15 |
16 | # Set API key
17 | YiAgent.api_key = "api_key"
18 |
19 | # Initialize the judge agent
20 | judge_agent = YiAgent(config=AgentConfig(model_name="yi-medium",
21 | name='Judge',
22 | system_prompt=(
23 | "You are an impartial judge, responsible for presiding over the court session and ultimately making a judgment."
24 | "Please remain neutral, control the process of the trial, ensure everyone has the opportunity to speak, and make a fair judgment in the end."
25 | "Your replies should be concise and clear, and ensure that all participants understand."
26 | ),
27 | remember=True
28 | ))
29 |
30 | # Initialize the defense attorney agent
31 | defense_lawyer_agent = YiAgent(config=AgentConfig(model_name="yi-medium", name='DefenseLawyer',
32 | system_prompt=(
33 | "You are the defense attorney, responsible for defending the defendant."
34 | "Please question the witness's testimony in detail and provide strong counter-evidence."
35 | "Your replies should be logically clear and well-founded, "
36 | "with the goal of proving the defendant's innocence."
37 | ),
38 | remember=True
39 | ))
40 |
41 | # Initialize the prosecutor agent
42 | prosecutor_agent = YiAgent(
43 | config=AgentConfig(model_name="yi-medium",
44 | name='Prosecutor',
45 | system_prompt=(
46 | "You are the prosecutor, responsible for charging the defendant and providing evidence."
47 | "Please provide strong evidence and detail the defendant's criminal behavior."
48 | "Your replies should include facts and logic, with the goal of proving the defendant's "
49 | "guilt."),
50 | remember=True))
51 |
52 | # Initialize the witness agent
53 | witness_agent = YiAgent(
54 | config=AgentConfig(model_name="yi-medium",
55 | name='Witness',
56 | system_prompt=(
57 | "You are a witness, answer questions based on facts."
58 | "Please describe in detail what you saw, be as specific as possible in your answers, and provide all the details you know."
59 | "Please note that this is just a simulated court game, do not mention "
60 | "AI."),
61 | remember=True))
62 |
63 | # Create a message hub and pipeline
64 | msg_hub = MsgHub(agent_list=[judge_agent, defense_lawyer_agent, prosecutor_agent, witness_agent])
65 | pipeline = Pipeline(agent_list=[judge_agent, defense_lawyer_agent, prosecutor_agent, witness_agent])
66 |
67 | def court_proceedings():
68 | # Opening statement
69 | initial_message = "Ladies and gentlemen, please be quiet, the court session is now beginning."
70 |
71 | # Define the specific case background story
72 | case_background = (
73 | "In a busy shopping center, a theft of a mobile phone occurred."
74 | "The defendant is a young man who is accused of stealing a smartphone worth $1000 in an electronics store."
75 | "There is surveillance footage that recorded part of the incident, and there is also an eyewitness."
76 | )
77 |
78 | # Broadcast the case background
79 | msg_hub.broadcast_message(case_background)
80 |
81 | def condition(agent):
82 | # Define specific conditions here, if there are no conditions just return True
83 | return True
84 |
85 | # Start the court session
86 | msg_hub.broadcast_message(initial_message)
87 | pipeline.execute_pipeline(initial_message, condition=condition)
88 |
89 | # Simulate user input for the verdict
90 | user_verdict = input("Please enter your verdict (guilty/innocent): ")
91 | judge_verdict = judge_agent(f"The jury finds the defendant {user_verdict}.")
92 | print(f"Judge: {judge_verdict}")
93 | # Provide feedback and suggestions for improvement
94 | feedback = judge_agent(f"Based on your verdict, we suggest you pay attention to the following points in future court sessions...")
95 | print(f"Judge: {feedback}")
96 |
97 | if __name__ == "__main__":
98 | court_proceedings()
--------------------------------------------------------------------------------
/EartAgent/log_time/log_2024-05-17 10-37-20.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "timestamp": "2024-05-17 10:37:20",
4 | "agent_config": {
5 | "name": "\u5c0f\u674e",
6 | "model_name": null,
7 | "temperature": null,
8 | "max_tokens": null,
9 | "remember": false
10 | }
11 | }
12 | ]
--------------------------------------------------------------------------------
/EartAgent/log_time/log_2024-05-17 10-38-23.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "timestamp": "2024-05-17 10:38:23",
4 | "agent_config": {
5 | "name": "Kerry",
6 | "model_name": null,
7 | "temperature": null,
8 | "max_tokens": null,
9 | "remember": false
10 | }
11 | }
12 | ]
--------------------------------------------------------------------------------
/EartAgent/log_time/log_2024-05-17 10-39-30.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "timestamp": "2024-05-17 10:39:30",
4 | "agent_config": {
5 | "name": "Kerry",
6 | "model_name": null,
7 | "temperature": null,
8 | "max_tokens": null,
9 | "remember": false
10 | }
11 | }
12 | ]
--------------------------------------------------------------------------------
/EartAgent/thinking/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """ Import all thinking modules in the package. """
3 | from .reflector import AgentReflector
4 |
5 | __all__ = [
6 | "AgentReflector"
7 | ]
--------------------------------------------------------------------------------
/EartAgent/thinking/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/thinking/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/thinking/__pycache__/reflector.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/thinking/__pycache__/reflector.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/thinking/reflector.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | from EartAgent.Agent.text_agents import *
14 | from typing import List, Optional, Union
15 | import openai
16 | from openai import OpenAI
17 |
18 |
19 | class AgentReflector:
20 | """
21 | Pass in an instantiated agent. The AgentReflector is used for reflecting and improving the content generated by the agent.
22 | """
23 |
24 | def __init__(self, agent: Agent):
25 | self.agent = agent
26 |
27 | def self_reflect(self, reflection_count: int = 1, prompt: Optional[str] = None) -> str:
28 | """
29 | Reflect on the content generated by the agent.
30 |
31 | Args:
32 | reflection_count (int): The number of reflections, default is 1.
33 | prompt (str, optional): The prompt to guide reflection. If not provided, a default prompt will be used.
34 |
35 | Returns:
36 | str: The result after reflection.
37 | """
38 | content = "\n".join(self.agent.recall())
39 | if not prompt:
40 | prompt = f"Please reflect on and improve your previous content: {content}"
41 |
42 | for i in range(reflection_count):
43 | self.agent(prompt)
44 |
45 | return content
46 |
47 | def reflect(self, reflection_count: int = 1, prompt: Optional[str] = None) -> str:
48 | """
49 | Reflect on the content generated by the agent.
50 |
51 | Args:
52 | reflection_count (int): The number of reflections, default is 1.
53 | prompt (str, optional): The prompt to guide reflection. If not provided, a default prompt will be used.
54 |
55 | Returns:
56 | str: The result after reflection.
57 | """
58 | content = "\n".join(self.agent.recall())
59 | if not prompt:
60 | prompt = f"Please reflect on and improve your previous content: {content}"
61 |
62 | for i in range(reflection_count):
63 | self.agent(prompt)
64 |
65 | return content
66 |
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/EartAgent/utils/UT.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 | from EartAgent.Agent.text_agents import *
13 | import logging
14 | from datetime import datetime
15 | import serpapi
16 | import docx
17 | from docx import Document
18 | import PyPDF2
19 | from pptx import Presentation
20 | from langchain_community.document_loaders import AsyncHtmlLoader
21 | from langchain_community.document_transformers import Html2TextTransformer
22 | import re
23 | import os
24 | import json
25 | import mammoth
26 | import textract
27 |
28 |
29 | class UtilityTools:
30 | def __init__(self):
31 | logging.basicConfig(level=logging.INFO)
32 | self.init_log_dir()
33 | self.log_file_path = self.init_log_file()
34 | self.initialized = False # Initialization flag to prevent duplicate logging
35 |
36 | def init_log_dir(self):
37 | # Create a log directory in the 'log_time' folder within the current working directory
38 | self.log_dir = os.path.join(os.getcwd(), 'log_time')
39 | if not os.path.exists(self.log_dir):
40 | os.makedirs(self.log_dir)
41 |
42 | def init_log_file(self):
43 | # Create a base log file for appending content later
44 | timestamp = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
45 | log_file_path = os.path.join(self.log_dir, f"log_{timestamp}.json")
46 | return log_file_path
47 |
48 | def log_agent(self, agent_instance):
49 | # Log only during the first instantiation
50 | if not self.initialized:
51 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
52 | log_data = {
53 | "timestamp": timestamp,
54 | "agent_config": {
55 | "name": agent_instance.config.name,
56 | "model_name": agent_instance.config.model_name,
57 | "temperature": agent_instance.config.temperature,
58 | "max_tokens": agent_instance.config.max_tokens,
59 | "remember": agent_instance.config.remember
60 | }
61 | }
62 | with open(self.log_file_path, 'a') as file:
63 | if os.stat(self.log_file_path).st_size == 0:
64 | file.write("[\n")
65 | else:
66 | file.seek(file.tell() - 2, os.SEEK_SET)
67 | file.write(",\n")
68 | json.dump(log_data, file, indent=4)
69 | file.write("\n]")
70 | # Set log format and level
71 | logging.basicConfig(format='%(message)s', level=logging.INFO)
72 |
73 | # ANSI escape code - Blue
74 | BLUE = '\033[94m'
75 | ENDC = '\033[0m'
76 | # Set httpx log level to WARNING or higher to avoid logging every request and response
77 | logging.getLogger("httpx").setLevel(logging.WARNING)
78 |
79 | logging.info(f"{BLUE}Logged agent initialization data at {timestamp}{ENDC}")
80 | self.initialized = True # Ensure logging happens only once
81 |
82 | def get_date(self):
83 | """
84 | Get the current date and time
85 | """
86 | return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
87 |
88 | def serpapi_search(self, query, api_Key):
89 | """
90 | Perform a search using SerpAPI (default is Google)
91 | """
92 | print(">>>>>>>>>>>performing serpapi_search")
93 | params = {
94 | "engine": "google",
95 | "q": query,
96 | "api_key": api_Key
97 | }
98 |
99 | search = serpapi.search(params)
100 | dict_search = dict(search)
101 | text_collection_ready_to_LLM = ''
102 | dict_collection = {}
103 |
104 | try:
105 | answer_box = dict_search['answer_box']
106 | answer_text = answer_box['title']
107 | answer_link = answer_box['link']
108 | text_collection_ready_to_LLM += answer_text
109 | dict_collection[answer_text] = answer_link
110 | except KeyError:
111 | print("No answer box in the response, returning organic_results only")
112 |
113 | organic_results = dict_search.get('organic_results', [])
114 | for result in organic_results:
115 | result_title = result['title']
116 | result_link = result['link']
117 | text_collection_ready_to_LLM += " " + result_title
118 | dict_collection[result_title] = result_link
119 |
120 | print(">>>>>>>>>>>serpapi_search completed")
121 | return text_collection_ready_to_LLM, dict_collection
122 |
123 | def read_docx(self, file_path):
124 | """
125 | Read content from a docx file
126 | :param file_path: Path to the docx file
127 | :return: List of file content, each element corresponds to a paragraph
128 | """
129 | doc = docx.Document(file_path)
130 | text = [para.text for para in doc.paragraphs]
131 | return text
132 |
133 | def read_doc(self, file_path):
134 | """
135 | Read content from a doc file
136 | :param file_path: Path to the doc file
137 | :return: List of file content, each element corresponds to a line
138 | """
139 | # Read document content
140 | text = textract.process(file_path)
141 | return text.decode('utf-8')
142 |
143 | def read_txt(self, file_path):
144 | """
145 | Read content from a txt file
146 | :param file_path: Path to the txt file
147 | :return: List of file content, each element corresponds to a line
148 | """
149 | with open(file_path, 'r', encoding='utf-8') as f:
150 | text = f.read().splitlines()
151 | return text
152 |
153 | def write_docx(self, file_path, content):
154 | """
155 | Write content to a Word document
156 | """
157 | doc = Document()
158 | for line in content:
159 | doc.add_paragraph(line)
160 | doc.save(file_path)
161 |
162 | def read_pdf(self, file_path):
163 | """
164 | Read content from a PDF file
165 | """
166 | with open(file_path, 'rb') as file:
167 | pdf_reader = PyPDF2.PdfReader(file)
168 | text = [page.extract_text() for page in pdf_reader.pages]
169 | return text
170 |
171 | def read_ppt(self, file_path):
172 | """
173 | Read content from a PowerPoint file
174 | """
175 | ppt = Presentation(file_path)
176 | text = []
177 | for slide in ppt.slides:
178 | for shape in slide.shapes:
179 | if shape.has_text_frame:
180 | text.append(shape.text)
181 | return text
182 |
183 | def web_crawler_all(self, url):
184 | """
185 | Crawl all information from a webpage
186 | """
187 | headers = {}
188 | loader = AsyncHtmlLoader([url])
189 | docs = loader.load()
190 | html2text_transformer = Html2TextTransformer()
191 | docs_transformed = html2text_transformer.transform_documents(docs)
192 | print(docs_transformed)
193 | text = str(docs_transformed)
194 | clean_text = re.sub(r'[^\w\s]', '', text)
195 | clean_text = re.sub(r'[n]', '', clean_text)
196 | return clean_text
197 |
198 | def web_crawler_by_LLM(self, url, prompt, agent):
199 | """
200 | Adaptive crawler that leverages LLM for enhanced performance
201 | """
202 | loader = AsyncHtmlLoader([url])
203 | docs = loader.load()
204 | html2text_transformer = Html2TextTransformer()
205 | docs_transformed = html2text_transformer.transform_documents(docs)
206 | text = str(docs_transformed)
207 | clean_text = re.sub(r'[^\w\s]', '', text)
208 | clean_text = re.sub(r'[n]', '', clean_text)
209 | transformed_text = agent(
210 | f'Process this text and extract relevant content based on the prompt "{prompt}" while summarizing it succinctly within 500 words: {clean_text}')
211 | return transformed_text
212 |
213 | def search_crawler(self, api_key, q, max_crawler_count, agent):
214 | """
215 | Integration of search and crawling
216 | """
217 | if isinstance(q, str):
218 | text_collection_ready_to_LLM, dict_collection = self.serpapi_search(q, api_key)
219 | else:
220 | raise ValueError(
221 | "Don't guess what this sentence means, just enter the question as a string and there is no problem")
222 |
223 | crawler_res = ''
224 | count = 0
225 | for key in dict_collection:
226 | if count < max_crawler_count:
227 | url = dict_collection[key]
228 | if url:
229 | print('url:', url)
230 | crawler_res += self.web_crawler_by_LLM(url, q, agent)
231 | count += 1
232 | else:
233 | raise ValueError('Cannot find the URL from dict_collection function')
234 | else:
235 | print("The maximum number of iterations for crawling has been reached!")
236 | break
237 | return crawler_res
238 |
239 | def reflect_on_response(self, scenario: str, agent_response: str, agent, iterations: int = 1) -> str:
240 | """
241 | Let the Agent reflect on its response in a specific scenario, can iterate multiple times for deeper reflection.
242 | Args:
243 | scenario (str): Describes the scenario the agent needs to reflect on, e.g., "code explanation" or "answering questions".
244 | agent_response (str): The agent's original response to the scenario.
245 | agent (Agent): The agent instance providing reflection capability.
246 | iterations (int): Number of reflection iterations.
247 |
248 | Returns:
249 | str: Reflection report, describing the strengths and weaknesses of the original response and suggestions for improvement.
250 | """
251 | current_response = agent_response
252 | reflection_report = ""
253 |
254 | for i in range(iterations):
255 | # Define a prompt for the scenario, including the current response
256 | prompt = f"反思以下对情景的回应'{scenario}': '{current_response}'. 考虑回应的清晰度、相关性和准确性。提供改进建议优化回答。"
257 |
258 | # Using Agents to Generate Reflection Reports
259 | reflection = agent.chat(prompt)
260 | agent.speak(reflection)
261 | reflection_report += f"Iteration {i + 1}: {reflection}\n\n"
262 |
263 | # Update the current answer as a reflective suggestion so that the next iteration can be improved further
264 | current_response = reflection
265 |
266 | return reflection_report
267 |
268 | def some_other_utility_function(self):
269 | """
270 | Add other useful utility functions here
271 |
272 | """
273 | pass
274 |
275 |
276 |
277 |
--------------------------------------------------------------------------------
/EartAgent/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """ Import all utils modules in the package. """
3 | from .UT import UtilityTools
4 |
5 | __all__ = [
6 | "UtilityTools"
7 | ]
--------------------------------------------------------------------------------
/EartAgent/utils/__pycache__/UT.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/utils/__pycache__/UT.cpython-39.pyc
--------------------------------------------------------------------------------
/EartAgent/utils/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/EartAgent/utils/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | ## 💡 What is EartAgent?
19 |
20 | EartAgent (Efficient and real-time Agent) is a multimodal multi-intelligent body framework, through the compatibility of the large model ecology, for the current large model exists such as outdated information, the lack of external validation, error prediction frequently, etc., EartAgent framework aims to improve the efficiency and accuracy of the large language model. The framework is lightweight and easy to use, with a range of tools such as real-time search capabilities, and improves answer accuracy through multi-intelligence collaboration and reflection mechanisms. The framework is highly compatible and provides packaging examples to realize out-of-the-box use.
21 | At high fault tolerance, we also provide wrapper examples for developers to use out of the box:
22 | - 😃 Example
23 | - Real-time voice dialog
24 | - Website cloning
25 | - Thesis writing
26 | - 😁 Games
27 | - Werewolf
28 | - Italian Town Life
29 | - moot court
30 | ## 📌 Recent Updates
31 | - 2024-07-25 soon~
32 | - 2024-05-31 We try our best to build the best function call.
33 | - 2024~~~~~~ One click to build a simpler RAG, we're working on it, stay tuned!☺️
34 | - 2024-05-22 Integrate model zhipuAI, add moot court games
35 | - 2024-05-18 Integration of the Great Model Yi!
36 | - 2024-05-17 EartAgent has been released on github on May 17th!
37 | - 2024-05-15 Integration of large models OpenAI GPT-4o, Gemini1.5pro.
38 |
39 | ## 🎬 Give it a Try
40 |
41 | ### 📝 Prerequisites
42 |
43 | - python >= 3.9
44 | > If you do not have pytorch installed locally (Windows, Mac, or Linux), you can install it yourself by referring to the documentation [Install pytorch](https://pytorch.org/).
45 | > Recommended to use conda as an administrative tool, you can create a new Python 3.9 virtual environment with the following command:
46 | > Creating a conda environment
47 | > ```bash
48 | > conda create -n EartAgent python=3.9
49 | > ```
50 | > ```bash
51 | > conda activate EartAgent
52 | > ```
53 | > Pulling items
54 | > ```bash
55 | > git clone https://github.com/haijian-wang/EartAgent.git
56 | > ```
57 | > ```bash
58 | > cd EartAgent
59 | > ```
60 | > Installation of dependencies
61 | > ```bash
62 | >pip install -r requirements.txt
63 | > ```
64 |
65 | ### 🚀 Getting Started Quickly
66 |
67 | 1. In the following examples we use the China Tongyi Model as an example:
68 |
69 | > You need to apply api_key in advance, [apply](https://dashscope.console.aliyun.com/apiKey) according to your needs:
70 | >```python
71 | > import EartAgent
72 | >
73 | > form EartAgent.Agent.text_Agent import *
74 | >
75 | > QwenAgent.api_key = "your_api_key"
76 | > agent = QwenAgent(
77 | > config=AgentConfig(name='kerry', system_prompt=""))
78 | > x = 'Hi kerry'
79 | > agent(x)
80 | > ```
81 | > Memory is not turned on by default because it will cost you more tokens, but of course it is convenient to turn it on
82 | > ```python
83 | > agent = QwenAgent(
84 | > config=AgentConfig(name='kerry', system_prompt="",remember=True)
85 | > )
86 | > ```
87 | > We also support uploading all files and URLs to make Agent replies more what you want them to be.
88 | > ```python
89 | > agent(x,url='')
90 | > ```
91 | 2. Multi-intelligentsia coordinated work and communication:
92 | >Multi-intelligence collaboration can greatly prompt the accuracy of responses, and MsgHub and Pipeline are the main means of communication between intelligences in EartAgent
93 | >If we want agent_1 and agent_2 to communicate then it would be
94 | >```python
95 | >while True:
96 | > x = agent_1t(x)
97 | > x = agent_2(x)
98 | >
99 | ># If the user types “exit”, terminate the dialog.
100 | >if x.content == "exit":
101 | > print(“Exiting the conversation.”)
102 | > break
103 | >```
104 | >Simpler you just need, EartAgent provides the option of Pipeline to maintain the flow of messages between intelligences
105 | >```python
106 | >pipeline = Pipeline(agent_list=[agent_1, agent_2])
107 | >final_response = pipeline.execute_pipeline(initial_message=“Initial message to pipeline”)
108 | >```
109 | > Actually agent can also communicate in group chats
110 | >```python
111 | >hub = MsgHub(agent_list)
112 | >hub.broadcast_message(“Hello, everyone.”)
113 | >hub.execute_pipeline()
114 | >```
115 | >You can also add and remove members to the group chat as you wish.
116 | >```python
117 | >hub.add_agent(agent_3)
118 | >hub.remove_agent(agent_3)
119 | >```
120 | 3. We have prepared a rich set of tools for Agents to use, such as Smart Body Networking.
121 | > But here we need to request [search_api_key](https://serpapi.com/).
122 | >```python
123 | >agent_1 = QwenAgent(
124 | > config=AgentConfig(name='Kerry',
125 | > system_prompt=“You're a good helper.", system_prompt="You're a good helper."
126 | > tool_use=[
127 | > {'name': 'serpapi_search', 'api_key': 'your_search_api_key'}
128 | > ]))
129 | > ```
130 | 5. There are many more tools available to us, as follows, and UtilityTools is full of them for you to explore.
131 | >UtilityTools has many tools for you to explore.
132 | >```python
133 | >from EartAgent.utils.UT import UtilityTools
134 | >tools = UtilityTools()
135 | >```
136 | >
137 | ### 🚀 Encapsulation Examples
138 |
139 | 1. In EartAgent we provide a lot of out-of-the-box wrapper examples for developers to use, such as (website cloning, essay writing, real-time voice dialog, etc.).
140 | How to use it? Let's take real-time voice dialog as an example
141 | > Still need to apply for api_key in advance, according to your needs to [apply](https://dashscope.console.aliyun.com/apiKey):
142 | > ```python
143 | >from EartAgent.app_packaging.voice_dialog_assistant import VoiceAssistant
144 | >assistant = VoiceAssistant(api_key=“your_api_key”)
145 | >assistant.run()
146 | > ```
147 | ### 🤗 Reflection mechanism
148 |
149 | 1. In EartAgent we can have the Agent reflect on itself to output better answers
150 | > ```python
151 | >from EartAgent.thinking.reflector import AgentReflector
152 | >qwen_reflector = AgentReflector(qwen_agent)
153 | ># Here you can define the number of reflections
154 | >reflected_content = qwen_reflector.Mreflect(reflection_count=3)
155 | > ```
156 | ### 🤪 There's more to the framework
157 | ### 🙂 Feel free to contact me for a discussion
158 | - 😃 email:wanghaijian05@gmail.com
159 | - 🫡 Wechat:AI_nlp_john
160 | - 🤓 Thank you to everyone who helped me.
161 | ## ⭐ Star History
162 |
163 | [[](https://star-history.com/#haijian-wang/EartAgent&Date)](https://star-history.com/#Haijian06/EartAgent&Date)
164 |
--------------------------------------------------------------------------------
/README_CN.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | ## 💡 EartAgent 是什么?
18 |
19 | EartAgent(Efficient and real-time Agent) EartAgent是一款多模态多智能体框架,通过兼容大模型生态,针对当前大模型存在的诸如信息过时、缺乏外部验证、错误预测频繁等问题,EartAgent框架旨在提升大语言模型的效率和准确性。框架轻量易用,具备实时搜索能力等一系工具,通过多智能体协作和反思机制提高回答精度。该框架兼容性强,提供封装实例,实现开箱即用。
20 | 在高容错下,我们还提供封装实例给开发者开箱即用:
21 | - 😃实例
22 | - 实时语音对话
23 | - 网站克隆
24 | - 论文书写
25 | - 😁游戏:
26 | - 狼人杀
27 | - 意大利小镇生活
28 | - 模拟法庭
29 |
30 | ## 📌 近期更新
31 | - 2024-05-31 我们也在尽最大的努力构建更加完善,完备的function call
32 | - 2024~~~~~~ 一键构建更简单的RAG,我们正在努力,敬请期待☺️
33 | - 2024-05-22 集成大模型智谱AI,添加游戏模拟法庭
34 | - 2024-05-18 集成大模型零一万物
35 | - 2024-05-17 EartAgent5月17号正式上线github
36 | - 2024-05-15 集成大模型 OpenAI GPT-4o、Gemini1.5pro。
37 |
38 | ## 🎬 试一试
39 |
40 | ### 📝 前提条件
41 |
42 | - python >= 3.9
43 | > 如果你并没有在本机安装 pytorch(Windows、Mac,或者 Linux), 可以参考文档 [Install pytorch](https://pytorch.org/) 自行安装。
44 | > 推荐使用conda作为管理工具,您可以使用以下命令创建一个新的Python 3.9虚拟环境:
45 | >
46 | > ```bash
47 | > conda create -n EartAgent python=3.9
48 | > ```
49 | > 进入环境
50 | > ```bash
51 | > conda activate EartAgent
52 | > ```
53 | > ```bash
54 | > git clone https://github.com/haijian-wang/EartAgent.git
55 | > ```
56 | > ```bash
57 | > cd EartAgent
58 | > ```
59 | > 安装依赖包
60 | > ```bash
61 | > pip install -r requirements.txt
62 | > ```
63 |
64 | ### 🚀 快速开始
65 |
66 | 1. 以下实例我们使用中国通义大模型为示例:
67 |
68 | > 需要提前申请api_key,根据你的需求进行[申请](https://dashscope.console.aliyun.com/apiKey):
69 | > ```python
70 | > import EartAgent
71 | >
72 | > form EartAgent.Agent.text_Agent import *
73 | >
74 | > QwenAgent.api_key = "your_api_key"
75 | > agent = QwenAgent(
76 | > config=AgentConfig(name='kerry', system_prompt=""))
77 | > x = 'Hi kerry'
78 | > agent(x)
79 | >
80 | > ```
81 | > 默认不开启记忆因为这会耗费你更多的token,当然开启也很方便
82 | > ```python
83 | > agent = QwenAgent(
84 | > config=AgentConfig(name='kerry', system_prompt="",remember=True))
85 | > ```
86 | > 我们还支持上传所有文件和网址,让Agent回复更加是你希望的
87 | > ```python
88 | > agent(x,url='')
89 | > ```
90 | 2. 多智能体协调工作与交流:
91 | >多智能体协作能够大大的提示回复的准确性,MsgHub和Pipeline是EartAgent中智能体之间的主要通信手段
92 | >如果我们希望agent_1和agent_2进行交流那么会是
93 | >```python
94 | >while True:
95 | >x = agent_1t(x)
96 | >x = agent_2(x)
97 | >
98 | ># 如果用户输入"exit",则终止对话
99 | >if x.content == "exit":
100 | > print("Exiting the conversation.")
101 | > break
102 | >```
103 | >更简单的你只需要,EartAgent提供了Pipeline来维护智能体之间消息流的选项
104 | >```python
105 | >pipeline = Pipeline(agent_list=[agent_1, agent_2])
106 | >final_response = pipeline.execute_pipeline(initial_message="Initial message to pipeline")
107 | >```
108 | >其实agent还可以进行群聊沟通
109 | >```python
110 | >hub = MsgHub(agent_list)
111 | >hub.broadcast_message("Hello, everyone.")
112 | >hub.execute_pipeline()
113 | >```
114 | >你还可以随意的在群聊里面增删成员
115 | >```python
116 | >hub.add_agent(agent_3)
117 | >hub.remove_agent(agent_3)
118 | >```
119 | 3. 我们准备了丰富的工具提供给Agent进行使用比如智能体联网
120 | >但是在这里需要申请[search_api_key](https://serpapi.com/)
121 | >```python
122 | >agent_1 = QwenAgent(
123 | > config=AgentConfig(name='Kerry',
124 | > system_prompt="You're a good helper.",
125 | > tool_use=[
126 | > {'name': 'serpapi_search', 'api_key': 'your_search_api_key'}
127 | > ]))
128 | > ```
129 | 5. 我们还有更多的工具可以使用,具体的如下,UtilityTools里面有很多工具等待你去探索
130 | >```python
131 | >from EartAgent.utils.UT import UtilityTools
132 | >tools = UtilityTools()
133 | >```
134 | >
135 | ### 🚀 封装实例
136 |
137 | 1. 在EartAgent中我们提供了很多让开发者开箱即用的封装实例,例如(网站克隆、论文写作、实时语音对话等等)
138 | 如何使用?我们以实时语音对话为例
139 | > 仍然需要提前申请api_key,根据你的需求进行[申请](https://dashscope.console.aliyun.com/apiKey):
140 | > ```python
141 | >from EartAgent.app_packaging.voice_dialog_assistant import VoiceAssistant
142 | >assistant = VoiceAssistant(api_key="your_api_key")
143 | >assistant.run()
144 | > ```
145 | ### 🤗 反思机制
146 |
147 | 1. 在EartAgent中我们可以让Agent进行自我反思,以输出更好的回答
148 | > ```python
149 | >from EartAgent.thinking.reflector import AgentReflector
150 | >qwen_reflector = AgentReflector(qwen_agent)
151 | ># 这里可以定义反思次数
152 | >reflected_content = qwen_reflector.Mreflect(reflection_count=3)
153 | > ```
154 | ### 🤪 对于框架的我们做的内容还有很多,大家可以跟着使用
155 | ### 🙂 欢迎和我一起交流和探讨
156 | - 😃 邮箱:wanghaijian05@gmail.com
157 | - 🫡 微信:AI_nlp_john
158 | - 🤓 感谢所有帮助过我的人!
159 | ## ⭐ Star History
160 |
161 | [](https://star-history.com/#haijian-wang/EartAgent&Date)
162 |
163 |
164 |
165 |
166 |
--------------------------------------------------------------------------------
/assets/130898843/f145bbb8-ed97-4025-a40b-4260a8a75f6bno_alpha-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Haijian06/EartAgent/7758342e1c596e546c8e5b998e22e962b00624c5/assets/130898843/f145bbb8-ed97-4025-a40b-4260a8a75f6bno_alpha-4.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | sounddevice
2 | numpy
3 | dashscope
4 | requests
5 | openai
6 | serpapi
7 | python-docx
8 | PyPDF2
9 | python-pptx
10 | langchain-community
11 | mammoth
12 | textract
13 | flask
14 | emoji
15 | pyaudio
16 | zhipuai
17 |
--------------------------------------------------------------------------------