├── requirements.txt ├── src ├── bambo │ ├── __init__.py │ ├── __pycache__ │ │ ├── bambo.cpython-310.pyc │ │ └── __init__.cpython-310.pyc │ ├── bambo_role.txt │ └── bambo.py ├── __pycache__ │ └── llm_client.cpython-310.pyc ├── tools │ ├── __pycache__ │ │ ├── send_email.cpython-310.pyc │ │ ├── code_execute.cpython-310.pyc │ │ └── paper_search.cpython-310.pyc │ ├── code_execute.py │ └── paper_search.py └── llm_client.py ├── examples ├── __pycache__ │ └── load_local_api_keys.cpython-310.pyc ├── load_local_api_keys.py ├── notebooklm.py ├── code_expert.py ├── dir_mapper.py ├── paper_recommend.py └── multi_roles.py ├── LICENSE └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | openai -------------------------------------------------------------------------------- /src/bambo/__init__.py: -------------------------------------------------------------------------------- 1 | from .bambo import Bambo 2 | -------------------------------------------------------------------------------- /src/__pycache__/llm_client.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LB-Young/Bambo/HEAD/src/__pycache__/llm_client.cpython-310.pyc -------------------------------------------------------------------------------- /src/bambo/__pycache__/bambo.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LB-Young/Bambo/HEAD/src/bambo/__pycache__/bambo.cpython-310.pyc -------------------------------------------------------------------------------- /src/bambo/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LB-Young/Bambo/HEAD/src/bambo/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /src/tools/__pycache__/send_email.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LB-Young/Bambo/HEAD/src/tools/__pycache__/send_email.cpython-310.pyc -------------------------------------------------------------------------------- /src/tools/__pycache__/code_execute.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LB-Young/Bambo/HEAD/src/tools/__pycache__/code_execute.cpython-310.pyc -------------------------------------------------------------------------------- /src/tools/__pycache__/paper_search.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LB-Young/Bambo/HEAD/src/tools/__pycache__/paper_search.cpython-310.pyc -------------------------------------------------------------------------------- /examples/__pycache__/load_local_api_keys.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LB-Young/Bambo/HEAD/examples/__pycache__/load_local_api_keys.cpython-310.pyc -------------------------------------------------------------------------------- /examples/load_local_api_keys.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | def load_local_api_keys(platform): 4 | """ 5 | 读取本地json文件中的API密钥 6 | """ 7 | try: 8 | with open(r"C:\Users\86187\Desktop\api_key.json", "r", encoding="utf-8") as f: 9 | api_keys = json.load(f) 10 | return api_keys[platform] 11 | except Exception as e: 12 | raise Exception(f"读取API密钥文件失败: {str(e)}") 13 | 14 | if __name__ == "__main__": 15 | keys = load_local_api_keys() 16 | print(keys) -------------------------------------------------------------------------------- /src/tools/code_execute.py: -------------------------------------------------------------------------------- 1 | import io 2 | import sys 3 | from contextlib import redirect_stdout 4 | 5 | 6 | 7 | async def code_execute(code="", params_format=False): 8 | if params_format: 9 | return ['code'] 10 | try: 11 | f = io.StringIO() 12 | # 重定向输出并执行代码 13 | with redirect_stdout(f): 14 | exec(code) 15 | # 获取输出内容 16 | output = f.getvalue() 17 | # 关闭 StringIO 18 | f.close() 19 | return output 20 | except: 21 | if "bubble" in code.lower() and "merge" in code.lower(): 22 | return 'Bubble Sort:1.3s, Merge Sort:1.1s' 23 | if "bubble" in code.lower(): 24 | return 'Bubble Sort:1.3s' 25 | elif "merge" in code.lower(): 26 | return 'Merge Sort:1.1s' 27 | else: 28 | return "code run filed" -------------------------------------------------------------------------------- /examples/notebooklm.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 4 | 5 | 6 | import asyncio 7 | from src.bambo import Bambo 8 | from src.llm_client import client, model 9 | from src.tools.code_execute import code_execute 10 | 11 | 12 | async def main(): 13 | roles = { 14 | "host": "采访记者", 15 | "expert": "专家", 16 | } 17 | 18 | tools = {} 19 | bambo = Bambo( 20 | client=client, 21 | bambo_role=None, 22 | roles=roles, 23 | tools=tools, 24 | agents=None, 25 | model=model, 26 | ) 27 | 28 | with open(r"C:\Users\86187\Desktop\test.txt", "r", encoding="utf-8") as f: 29 | reference = f.read() 30 | query = f"请根据以下参考信息回答问题:\n{reference}\n\n问题:以采访对话形式介绍一下这篇文章的内容,至少5论对话。" 31 | messages = [{"role": "user", "content": query}] 32 | async for item in bambo.execute(messages=messages): 33 | print(item, end="", flush=True) 34 | 35 | 36 | if __name__ == "__main__": 37 | # TODO: Add a command line interface 38 | asyncio.run(main()) 39 | -------------------------------------------------------------------------------- /src/llm_client.py: -------------------------------------------------------------------------------- 1 | # from zhipuai import ZhipuAI 2 | from openai import OpenAI 3 | from groq import Groq 4 | from together import Together 5 | 6 | import json 7 | 8 | def load_api_key(platform): 9 | with open(r"/Users/liubaoyang/Documents/windows/api_key.json", "r", encoding="utf-8") as f: 10 | api_dict = json.load(f) 11 | return api_dict.get(platform, None) 12 | 13 | # 智谱AI 14 | # client = ZhipuAI(api_key=load_api_key("zhipu")) 15 | # model="glm-4-plus" 16 | 17 | 18 | # Deepseek 19 | # client = OpenAI( 20 | # api_key=load_api_key("deepseek"), 21 | # base_url="https://api.deepseek.com", 22 | # ) 23 | # model = "deepseek-chat" 24 | 25 | # Deepseek (阿里云) 26 | client = OpenAI( 27 | api_key=load_api_key("aliyun"), 28 | base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", 29 | ) 30 | model = "deepseek-v3" 31 | # model = "deepseek-r1" 32 | 33 | # Groq 34 | # client = Groq( 35 | # api_key=load_api_key("groq") 36 | # ) 37 | # model = "llama3-8b-8192" 38 | 39 | 40 | 41 | # Together 42 | # client = Together(api_key=load_api_key("together")) 43 | # model = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 LB-Young 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/code_expert.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 4 | 5 | 6 | import asyncio 7 | from src.bambo import Bambo 8 | from src.llm_client import client, model 9 | from src.tools.code_execute import code_execute 10 | 11 | 12 | async def main(): 13 | roles = { 14 | "finance_expert": "金融专家", 15 | "law_expert": "法律专家", 16 | "medical_expert": "医疗专家", 17 | "computer_expert": "计算机专家", 18 | } 19 | tools = { 20 | "code_execute": { 21 | "describe": "代码执行器,参数{'code':'待执行的代码'},如果代码有多个请合并成一个。", 22 | "object": code_execute, 23 | } 24 | } 25 | bambo = Bambo( 26 | client=client, 27 | bambo_role=None, 28 | roles=roles, 29 | tools=tools, 30 | agents=None, 31 | model=model, 32 | ) 33 | query = "请帮我生成一段选择排序的代码,调用代码执行器运行生成的代码,基于结果分析一下选择排序的特点" 34 | messages = [{"role": "user", "content": query}] 35 | async for item in bambo.execute(messages=messages): 36 | print(item, end="", flush=True) 37 | 38 | 39 | 40 | if __name__ == "__main__": 41 | # TODO: Add a command line interface 42 | asyncio.run(main()) 43 | -------------------------------------------------------------------------------- /examples/dir_mapper.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 4 | 5 | 6 | import asyncio 7 | from src.bambo import Bambo 8 | from src.llm_client import client, model 9 | from src.tools.code_execute import code_execute 10 | 11 | 12 | async def main(): 13 | roles = { 14 | "finance_expert": "金融专家", 15 | "law_expert": "法律专家", 16 | "medical_expert": "医疗专家", 17 | "computer_expert": "计算机专家", 18 | } 19 | tools = { 20 | "code_execute": { 21 | "describe": "代码执行器,参数{'code':'待执行的代码'},如果代码有多个请合并成一个。", 22 | "object": code_execute, 23 | } 24 | } 25 | bambo = Bambo( 26 | client=client, 27 | bambo_role=None, 28 | roles=roles, 29 | tools=tools, 30 | agents=None, 31 | model=model, 32 | ) 33 | query = "请帮我生成一段选择排序的代码,调用代码执行器运行生成的代码,基于结果分析一下选择排序的特点" 34 | messages = [{"role": "user", "content": query}] 35 | async for item in bambo.execute(messages=messages): 36 | print(item, end="", flush=True) 37 | 38 | 39 | 40 | if __name__ == "__main__": 41 | # TODO: Add a command line interface 42 | asyncio.run(main()) 43 | -------------------------------------------------------------------------------- /src/bambo/bambo_role.txt: -------------------------------------------------------------------------------- 1 | """ 2 | # Role: 团队负责人 3 | - name: Bambo 4 | 5 | # Profile: 6 | - version: 1.4 7 | - language: 中文 8 | - description: 你是一个团队负责人,但是你的团队只有你一个人,所以你要分饰多个角色解决对应的问题,但是你不能让其他人知道你的团队只有一个人,其他所有角色都是你自己扮演的,你要让他们觉得团队有很多人。此外你有很多的工具可以使用,来协助你解决问题。 9 | 10 | ## Goals: 11 | - 你需要分析用户的问题,决定由负责人的身份回答用户问题还是以团队其他人的角色来回答用户问题,Team Roles中的角色就是你团队的全部角色,不能出现其它未提供的角色。你还可以使用工具来处理问题,tools中的工具就是你可以使用的全部工具。 12 | 13 | ## Team Roles: 14 | {roles} 15 | 16 | ## tools: 17 | {tools} 18 | 19 | ## Constraints: 20 | - 你必须清晰的理解问题和各个角色擅长的领域,并且熟练使用工具。 21 | - 你需要将问题以最合适的角色回答,如果没有合适的角色则直接以自己的角色回答。 22 | - 你必须使用“=>@xxx:”的格式来触发对应的角色,你的角色只能@Team Roles中列出的角色,让对应的角色回答,或者@Bambo来自己回答。 23 | - 你需要将问题拆分成详细的多个步骤,并且使用不同的角色回答。 24 | - 当需要调用工具的时候,你需要使用"=>$tool_name: {key:value}"的格式来调用工具,其中参数为严格的json格式,例如"=>$send_email: {subject: 'Hello', content: 'This is a test email'}"。 25 | 26 | ## Workflows: 27 | - 分析用户问题,如果当前问题是其他角色擅长领域时触发对应的角色回答当前问题,如果没有与问题相关的角色则以自己的角色回答。 28 | - 如果触发其他角色解答,使用以下符号进行触发:“=>@xxx:”,例如“=>@expert:”表示以专家角色开始发言,“=>@Bambo:”表示不需要调用Team Roles中的团队成员而是以自己的角色回答。 29 | - 每一次当你触发了不同的角色之后,你需要切换到对应的角色进行回name: {key:答。如“=>@law_expert:法律上的解释是……” 30 | - 如果需要调用工具来处理,需要使用以下符号进行触发:“=>$tool_value}”,例如“=>$send_email: {subject: 'Hello', content: 'This is a test email'}”。 31 | - 每一次触发了不同的tool之后,你需要停止作答,等待用户调用对应的tool处理之后,将tool的结果重新组织语言后再继续作答,新的答案要接着“=>$tool_name”前面的最后一个字符继续生成结果,要保持结果通顺。 32 | """ 33 | -------------------------------------------------------------------------------- /examples/paper_recommend.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 4 | 5 | 6 | import asyncio 7 | from src.bambo import Bambo 8 | from src.llm_client import client, model 9 | from src.tools.code_execute import code_execute 10 | from src.tools.paper_search import paper_search 11 | 12 | sys.path.append(r"F:\python project\tools_set") 13 | from tools import other_tools 14 | 15 | 16 | 17 | async def main(): 18 | roles = { 19 | "paper_classification_expert": "论文分类专家", 20 | "paper_summary_expert": "论文总结专家", 21 | "paper_recommend_expert": "论文推荐专家", 22 | } 23 | tools = { 24 | "code_execute": { 25 | "describe": "代码执行器,参数{'code':'待执行的代码'},如果代码有多个请合并成一个。", 26 | "object": code_execute, 27 | }, 28 | "paper_search":{ 29 | "object":paper_search, 30 | "describe":"搜索最新的论文,需要参数{'nums':需要读取的论文数目}", 31 | } 32 | } 33 | tools.update(other_tools) 34 | bambo = Bambo( 35 | client=client, 36 | bambo_role=None, 37 | roles=roles, 38 | tools=tools, 39 | agents=None, 40 | model=model, 41 | ) 42 | query = """ 43 | 1、请首先搜索最新的10篇论文; 44 | 2、然后对这些论文进行分类,类别列表为['LLM','RAG','Agent','多模态','音频','计算机视觉','其它'],分类结果按照{论文标题:类别}的形式输出; 45 | 3、对分类后的论文按照类别进行总结,并且给出当前类别有哪些文件,总结结果按照{类别1:类别1多篇论文的总结。类别1的所有参考论文标题。}的形式输出; 46 | 4、我的研究方向是['LLM','RAG','Agent','多模态'],请根据我的研究方向,推荐一些相关的论文,推荐结果按照{论文标题:类别、论文链接、摘要的总结和创新点}的形式输出; 47 | 5、把推荐的论文和总结的内容组织成以下格式: 48 | { 49 | "推荐阅读内容和顺序": 50 | ……; 51 | "参考论文总结": 52 | ……; 53 | } 54 | (要求:推荐阅读的论文需要给出论文的类别、star数目、标题、摘要总结(一句话)和论文链;按照分类的结果对类别内的全部论文进行总结,并且需要在总结内容的结束位置列出总结内容参考文章的标题。) 55 | 最后以“daily_paper_recommend”为主题发送到lby15356@gmail.com邮箱 56 | """ 57 | messages = [{"role": "user", "content": query}] 58 | async for item in bambo.execute(messages=messages): 59 | print(item, end="", flush=True) 60 | 61 | 62 | 63 | if __name__ == "__main__": 64 | # TODO: Add a command line interface 65 | asyncio.run(main()) 66 | -------------------------------------------------------------------------------- /examples/multi_roles.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 4 | 5 | 6 | import asyncio 7 | from src.bambo import Bambo 8 | from src.llm_client import client, model 9 | from src.tools.code_execute import code_execute 10 | 11 | 12 | async def main(): 13 | roles = { 14 | "finance_expert": "金融专家", 15 | "law_expert": "法律专家", 16 | "medical_expert": "医疗专家", 17 | "computer_expert": "计算机专家", 18 | } 19 | tools = {} 20 | bambo = Bambo( 21 | client=client, 22 | bambo_role=None, 23 | roles=roles, 24 | tools=tools, 25 | agents=None, 26 | model=model, 27 | ) 28 | messages = [] 29 | 30 | # Add default initial question about team experts 31 | default_question = "介绍一下你们团队的专家吧。" 32 | messages.append({"role": "user", "content": default_question}) 33 | 34 | # Get initial response about team experts 35 | response = "" 36 | async for item in bambo.execute(messages=messages): 37 | print(item, end="", flush=True) 38 | response += item 39 | 40 | # Add assistant's response to messages for context 41 | messages.append({"role": "assistant", "content": response}) 42 | 43 | while True: 44 | # Get user input 45 | query = input("\nPlease enter your question (or 'exit' to quit): ") 46 | if query.lower() == 'exit': 47 | break 48 | 49 | # Add user's query to messages 50 | messages.append({"role": "user", "content": query}) 51 | 52 | # Collect assistant's response 53 | response = "" 54 | async for item in bambo.execute(messages=messages): 55 | print(item, end="", flush=True) 56 | response += item 57 | 58 | # Add assistant's response to messages for context 59 | messages.append({"role": "assistant", "content": response}) 60 | 61 | if __name__ == "__main__": 62 | # TODO: Add a command line interface 63 | asyncio.run(main()) 64 | -------------------------------------------------------------------------------- /src/tools/paper_search.py: -------------------------------------------------------------------------------- 1 | import aiohttp 2 | import json 3 | from lxml import html 4 | import asyncio 5 | 6 | async def get_paper_detail(session, base_url, paper_url): 7 | """获取论文详细信息""" 8 | try: 9 | async with session.get(paper_url) as response: 10 | if response.status != 200: 11 | return None 12 | detail_html = await response.text() 13 | tree = html.fromstring(detail_html) 14 | 15 | # 获取摘要 16 | abstract = tree.xpath('/html/body/div[3]/main/div[2]/div/div/p/text()') 17 | abstract = abstract[0].strip() if abstract else "无摘要" 18 | 19 | # 获取日期 20 | date = tree.xpath('/html/body/div[3]/main/div[1]/div/div/div/p/span[1]/text()') 21 | date = date[0].strip() if date else "未知日期" 22 | 23 | # 获取star数 /html/body/div[3]/main/div[3]/div[1]/div[2]/div[1]/div/div[2]/div/text() 24 | stars = tree.xpath('/html/body/div[3]/main/div[3]/div[1]/div[2]/div[1]/div/div[2]/div/text()') 25 | stars = int(''.join(stars).strip()) if stars else 0 26 | 27 | return { 28 | 'abstract': abstract, 29 | 'published_date': date, 30 | 'stars': stars 31 | } 32 | except Exception as e: 33 | print(f"获取论文详情失败: {str(e)}") 34 | return None 35 | 36 | async def paper_search(nums: int = 10, params_format: bool = False): 37 | """ 38 | 获取 Papers with Code 网站今日发布的论文信息 39 | 40 | Args: 41 | max_results: 最大返回结果数 42 | params_format: 是否返回参数格式 43 | 44 | Returns: 45 | list: 论文信息列表,每个元素包含标题、作者、发表时间、摘要和star数 46 | """ 47 | if params_format: 48 | return ['nums'] 49 | 50 | try: 51 | base_url = "https://paperswithcode.com" 52 | url = f"{base_url}/latest" 53 | headers = { 54 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' 55 | } 56 | 57 | async with aiohttp.ClientSession(headers=headers) as session: 58 | async with session.get(url) as response: 59 | if response.status != 200: 60 | raise Exception(f"HTTP错误: {response.status}") 61 | 62 | html_content = await response.text() 63 | tree = html.fromstring(html_content) 64 | papers = [] 65 | 66 | # 获取论文列表 67 | for i in range(1, nums + 1): 68 | try: 69 | # 获取标题和链接 70 | title_xpath = f'/html/body/div[3]/div[2]/div[{i}]/div[2]/div/div[1]/h1/a' 71 | title_elem = tree.xpath(title_xpath) 72 | 73 | if not title_elem: 74 | continue 75 | 76 | title = title_elem[0].text.strip() 77 | paper_url = base_url + title_elem[0].get('href') 78 | 79 | # 获取详细信息 80 | detail_info = await get_paper_detail(session, base_url, paper_url) 81 | 82 | if detail_info: 83 | papers.append({ 84 | 'title': title, 85 | 'url': paper_url, 86 | 'abstract': detail_info['abstract'], 87 | 'published_date': detail_info['published_date'], 88 | 'stars': detail_info['stars'] 89 | }) 90 | 91 | except Exception as e: 92 | print(f"处理第{i}篇论文时出错: {str(e)}") 93 | continue 94 | 95 | return papers 96 | 97 | except Exception as e: 98 | raise Exception(f"获取Papers with Code论文失败: {str(e)}") 99 | -------------------------------------------------------------------------------- /src/bambo/bambo.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | 4 | class Bambo: 5 | def __init__(self, client, bambo_role=None, roles=None, tools=None, agents=None, model=None): 6 | if bambo_role is None: 7 | bambo_role = self.get_role() 8 | else: 9 | pass 10 | self.roles_info = "" 11 | for key, value in roles.items(): 12 | self.roles_info += f"@{key}: {value}\n" 13 | self.tools = {} 14 | self.tool_describe = [] 15 | for key, value in tools.items(): 16 | self.tools[key] = value["object"] 17 | self.tool_describe.append(f"{key}: {value['describe']}\n") 18 | self.role = bambo_role.replace(r"{roles}", self.roles_info).replace(r"{tools}", "".join(self.tool_describe)) 19 | self.agents = agents 20 | self.llm_client = client 21 | self.model = model 22 | 23 | def get_role(self): 24 | with open( 25 | r"./src/bambo/bambo_role.txt", 26 | "r", 27 | encoding="utf-8", 28 | ) as f: 29 | job_describe = f.read() 30 | return job_describe 31 | 32 | async def agent_run(self, agent_name, agent_job): 33 | pass 34 | 35 | async def params_extract(self, params_content): 36 | stack = 0 37 | params_content = params_content.strip() 38 | if params_content[0] != "{": 39 | raise Exception("params_content extract error, can not be parsed to json") 40 | json_end = 0 41 | for index, char in enumerate(params_content): 42 | if char == "{": 43 | stack += 1 44 | elif char == "}": 45 | stack -= 1 46 | if stack == 0: 47 | json_end = index + 1 48 | break 49 | try: 50 | return json.loads(params_content[:json_end].replace("'", '"')) 51 | except: 52 | re_extracted_params = await self.re_params_extract(params_content=params_content[:json_end]) 53 | return re_extracted_params 54 | 55 | async def re_params_extract(self, params_content): 56 | breakpoint() 57 | params_content = params_content.strip() 58 | params = {} 59 | for param in params_content.split(","): 60 | param = param.strip() 61 | key, value = param.split(":", 1) 62 | params[key.strip()] = value.strip() 63 | return params 64 | 65 | async def tool_run(self, tool_message): 66 | function_name, function_params = tool_message.split(":", 1) 67 | function_params_json = await self.params_extract(function_params) 68 | need_params = await self.tools[function_name](params_format=True) 69 | extract_params = {} 70 | for param in need_params: 71 | extract_params[param] = function_params_json.get(param, "") 72 | 73 | result = await self.tools[function_name](**extract_params) 74 | return str(result) 75 | 76 | async def execute(self, messages): 77 | system_message = {"role": "system", "content": self.role.strip()} 78 | messages = [system_message] + messages 79 | result = self.llm_client.chat.completions.create( 80 | model=self.model, # 请填写您要调用的模型名称 81 | messages=messages, 82 | stream=True 83 | ) 84 | all_answer = "" 85 | tool_messages = "" 86 | tool_Flag = False 87 | for chunk in result: 88 | all_answer += chunk.choices[0].delta.content 89 | if tool_Flag: 90 | tool_messages += chunk.choices[0].delta.content 91 | if "=>@" in tool_messages: 92 | tool_messages == tool_messages.split("=>@")[0] 93 | break 94 | continue 95 | if ":" in chunk.choices[0].delta.content and "=>$" in all_answer: 96 | tool_Flag = True 97 | tool_messages += chunk.choices[0].delta.content 98 | yield ": " 99 | continue 100 | yield chunk.choices[0].delta.content 101 | if tool_Flag: 102 | tool_messages = all_answer.split("=>$")[-1] 103 | result = await self.tool_run(tool_message=tool_messages) 104 | for item in str(result+"\n"): 105 | yield item 106 | new_message = {"role": "user", "content": "已经执行内容:" + all_answer + "\n" + "工具执行结果:" + result} 107 | async for item in self.execute(messages=messages + [new_message]): 108 | yield item 109 | # result = result.choices[0].message.content 110 | # print("agent_result:", result) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Bambo 2 | Bambo is a new proxy framework. Compared with mainstream frameworks, it is more lightweight and flexible and can handle various load tasks. 3 | 4 | ![Bambo_structure](https://github.com/user-attachments/assets/360e9b32-43fc-4b61-956b-5eac579add12) 5 | 6 | # USE 7 | 1. pip install {packages}. 8 | - pip install -r requirements.txt 9 | - (This project uses deepseek as an example. openai needs to be installed.) 10 | 2. Define all tools you want to use in the tools directory or other path. The custom function needs to be asynchronous, the parameter passed must include **params_format**, and the default value is False. The **params_format** parameter must be checked at the beginning of the function body. If True, all other parameters that must be passed are returned as a list to verify that the extracted parameters meet the function call requirements when tool_call is used. 11 | 3. You need to define the llm you want to call in the llm_cient.py file, including the **model** and **client** parameters. 12 | 4. You can then create your own test scripts in the examples folder. In the script, you need to define the /*roles*/ and /*tools*/ that your scenario needs. Bambo's instantiated object is then initialized and query is passed into the object's execute interface, and Bambo starts the execution logic. 13 | 14 | - Note: 15 | 1. /*When initializing the Bambo example, in the tools parameters, each tool must be configured with describe and object. describe is the description of the current tool, and what parameters need to be extracted when calling the current tool. object is the function object of the current tool.*/ 16 | 17 | 2. Some tools are not included, you can implement them freely. If you need some of my tools, you can send a message to my email(lby15356@gmail.com). 18 | 19 | ### Example 20 | ```python 21 | import asyncio 22 | from src.bambo import Bambo 23 | from src.llm_client import client, model 24 | from src.tools.code_execute import code_execute 25 | roles = { 26 | "finance_expert": "金融专家", 27 | "law_expert": "法律专家", 28 | "medical_expert": "医疗专家", 29 | "computer_expert": "计算机专家", 30 | } 31 | tools = {} 32 | bambo = Bambo( 33 | client=client, 34 | bambo_role=None, 35 | roles=roles, 36 | tools=tools, 37 | agents=None, 38 | model=model, 39 | ) 40 | query = "我是高考生,现在想要选专业,但是不知道选什么专业。请你介绍一下金融、法律和计算机三个专业分别有什么优点和缺点。" 41 | essages = [{"role": "user", "content": query}] 42 | async for item in bambo.execute(messages=messages): 43 | print(item, end="", flush=True) 44 | ``` 45 | 46 | ### Note 47 | 1. You can redefine your **bambo-role** and pass it in when instantiating the Bambo object to override the default value. 48 | 49 | 50 | # CASES 51 | ## NotebookLM 52 | - describe: Based on Bambo, notebooklm has achieved a similar effect, which can summarize the main content of the file in the form of an interview conversation with the incoming text. However, there is no TTS related logic in this project, so it can only be converted to text in the form of dialogue. If readers want to implement TTS, they can add corresponding code in the test script. 53 | ``` 54 | python examples/notebooklm.py 55 | ``` 56 | - query:"请根据以下参考信息回答问题:\n{reference}\n\n问题:以采访对话形式介绍一下这篇文章的内容,至少5论对话。" 57 | - answer: 58 | ![notetbook](https://github.com/user-attachments/assets/3cc6a966-3b57-4527-90d1-91edfdb77729) 59 | 60 | ## PaperRecommend 61 | - The paper recommend tool is an agent for recommending papers every day, which retrieves some of the latest papers at the moment, summarizes them by category, and recommends them according to the user's research direction. 62 | ``` 63 | python examples/paper_recommend.py 64 | ``` 65 | - query: 66 | """ 67 | 1、请首先搜索最新的10篇论文; 68 | 2、然后对这些论文进行分类,类别列表为['LLM','RAG','Agent','多模态','音频','计算机视觉','其它'],分类结果按照{论文标题:类别}的形式输出; 69 | 3、对分类后的论文按照类别进行总结,并且给出当前类别有哪些文件,总结结果按照{类别1:类别1多篇论文的总结。类别1的所有参考论文标题。}的形式输出; 70 | 4、我的研究方向是['LLM','RAG','Agent','多模态'],请根据我的研究方向,推荐一些相关的论文,推荐结果按照{论文标题:类别、论文链接、摘要的总结}的形式输出; 71 | """ 72 | - answer 73 | ``` 74 | https://x.com/i/status/1866480838394745075 75 | https://www.bilibili.com/video/BV1ajqCYrEVa/?vd_source=63fa380f22166ecfe2ab8b828b77344d 76 | ``` 77 | 78 | ## MultiRoles 79 | - describe: Multi-role scenarios are implemented based on Bambo for building agent-based team-based scenarios. This project constructs a college entrance examination consulting group, including experts from different majors, who can provide professional responses to students' questions from different majors. 80 | ``` 81 | python examples/multi_roles.py 82 | ``` 83 | - query:"我是高考生,现在想要选专业,但是不知道选什么专业。请你介绍一下金融、法律和计算机三个专业分别有什么优点和缺点。" 84 | - answer: 85 | ![multi_roles](https://github.com/user-attachments/assets/151758eb-0dcc-4872-8807-5a2cc226e07b) 86 | 87 | 88 | 89 | ## CodeExpert 90 | - describe: CodeExpert is a code expert based on the Bambo framework who can answer questions about code and execute code. 91 | ``` 92 | python examples/code_expert.py 93 | ``` 94 | - query:"请帮我生成一段选择排序的代码,调用代码执行器运行生成的代码,基于结果分析一下选择排序的特点" 95 | - answer: 96 | ![code_expert](https://github.com/user-attachments/assets/e6f54290-3418-47dc-bf93-71515df1ce28) 97 | 98 | 99 | # Participate 100 | Bambo is currently in its initial stage, more functions will be integrated in the future, and we look forward to more partners joining in. If you would like to work with the author to improve this project, please contact lby15356@gmail.com 101 | --------------------------------------------------------------------------------